source stringlengths 3 92 | c stringlengths 26 2.25M |
|---|---|
sldtestunit.c | #include <stdio.h> // printf
#include <inttypes.h> // uint64_t and int64_t vars displaying on printf
// currently unused
#include <stdlib.h> // atoi atof
#include <math.h>
#include "sldouble.h"
#ifdef _WIN32
#include <Windows.h>
int gettimeofday(struct timeval * tp, struct timezone * tzp)
{
static const uint64_t epoch = (uint64_t) 116444736000000000;
uint64_t time;
static SYSTEMTIME st;
static FILETIME ft;
GetSystemTime(&st);
SystemTimeToFileTime(&st, &ft);
time = (uint64_t) ft.dwLowDateTime;
time += ((uint64_t) ft.dwHighDateTime) << 32;
tp->tv_sec = (long) ((time - epoch) / 10000000);
/* Despite the fact that time in FILETIME struct is measured
* in 1/10 microseconds, 4 least significant decimal digits
* are always returns with zero value, so we are
* using milliseconds multiplied by 1000
* only for conformity with relative POSIX function */
tp->tv_usec = (long) (st.wMilliseconds * 1000);
return 0;
}
#else
#include <sys/time.h>
#endif
void test_mult(int acc);
void test_sqrt(int acc);
void test_pow(int acc);
void test_speed_soft(void);
void test_speed_fpu(void);
double get_d_pseudo_random(int i);
int strcmp_max(char *s1, char *s2, int max);
int main(int argc, char **argv)
{
char argserror[] = "Usage:\n"
"-------\n"
"./sldmult mult | sqrt | fp | ip | div | pow OPTIONS\n\n"
"OPTIONS:\n"
"div: dividend(double) divisor(double)\n"
" dividend divisor - returns quotient of input\n\n"
"fp: number(double) power(double)\n"
" number power - returns number raised to fraction part of given power\n\n"
"ip: number(double) power(double)\n"
" number power - returns number raised to integer part of given power\n\n"
"mult: -b [precision] | factor1(double) factor2(double) | -f | -s\n"
" -b - big accuracy test of multiplication with default precision\n"
" 12 digits after decimal point or with\n"
" selected precision but not greater than 16\n"
" factor1 factor2 - multiplication of 2 selected numbers\n"
" -f - fpu multiplication speed test (it's here for historical reasons)\n"
" -s - soft multiplication speed test (it's here for historical reasons)\n\n"
"pow: -b [precision] | number(double) power(double)\n"
" -b - big accuracy test of raising number to given power \n"
" with default precision 12 digits after decimal point\n"
" or with selected precision but not greater than 16\n"
" number power - returns number raised to the given power\n\n"
"sqrt: -b [precision] | number(double)\n"
" -b - big accuracy test of square root with default precision 12 digits\n"
" after decimal point or with selected precision\n"
" but not greater than 16\n"
" number - selected number for square root test\n\n"
"P.S.: All big tests are include 64,000,000 operations for fpu "
"and just as much for soft. Also, if you wish to see where result "
"between fpu and sldouble calculations would be different you need "
"to run one of the big tests with precision 16 - that is now "
"the only precision by which you realy see that. And even with it, "
"the total accuracy is greater that 99,99% in all tests.\n\n"
"P.P.S.: Currently the only functions that have checkings on input "
"values are mult, sqrt and pow. They properly hold any legal "
"double value in contrast to fp, ip and div";
double a,b,acc;
struct timeval tim1, tim2;
gettimeofday(&tim1,NULL);
if (argc < 2) {
printf("%s", argserror);
return 1;
}
else {
if (!strcmp_max("mult", *++argv, 5)) {
if (argc > 2) {
if (!strcmp_max("-f", *++argv, 3)) test_speed_fpu();
else if (!strcmp_max("-s", *argv, 3)) test_speed_soft();
else if (!strcmp_max("-b", *argv, 3)){
if (argc > 3 && (acc = atoi(*++argv)))
test_mult(acc);
else
test_mult(12);
}
else if (argc > 3) {
a = atof(*argv);
b = atof(*++argv);
printf("sldout: %.16e\nfpuout: %.16e\n", mult_by_sd(a, b), a*b);
return 0;
}
else {
printf("%s", argserror);
return 1;
}
} else {
printf("%s", argserror);
return 1;
}
} else if (!strcmp_max("sqrt", *argv, 5)) {
if (argc > 2) {
if (!strcmp_max("-b", *++argv, 3)){
if (argc > 3 && (acc = atoi(*++argv)))
test_sqrt(acc);
else
test_sqrt(12);
}
else {
a = atof(*argv);
printf("sldout: %.16e\nfpuout: %.16e\n", sqrt_by_sd(a), sqrt(a));
return 0;
}
} else {
printf("%s", argserror);
return 1;
}
} else if (!strcmp_max("fp", *argv, 3)) {
if (argc > 2) {
if (argc > 3) {
a = atof(*++argv);
b = atof(*++argv);
printf("sldout: %.16e\nfpuout: %.16e\n", fract_power_by_sd(a, b), pow(a,b));
return 0;
}
else {
printf("%s", argserror);
return 1;
}
} else {
printf("%s", argserror);
return 1;
}
} else if (!strcmp_max("ip", *argv, 3)) {
if (argc > 2) {
if (argc > 3) {
a = atof(*++argv);
b = atof(*++argv);
printf("sldout: %.16e\nfpuout: %.16e\n", int_power_by_sd(a, b), pow(a,b));
return 0;
}
else {
printf("%s", argserror);
return 1;
}
} else {
printf("%s", argserror);
return 1;
}
} else if (!strcmp_max("div", *argv, 3)) {
if (argc > 2) {
if (argc > 3) {
a = atof(*++argv);
b = atof(*++argv);
printf("sldout: %.16e\nfpuout: %.16e\n", division_by_sd(a, b), a/b);
return 0;
}
else {
printf("%s", argserror);
return 1;
}
} else {
printf("%s", argserror);
return 1;
}
} else if (!strcmp_max("pow", *argv, 3)) {
if (argc > 2) {
if (!strcmp_max("-b", *++argv, 3)){
if (argc > 3 && (acc = atoi(*++argv)))
test_pow(acc);
else
test_pow(12);
}
else if (argc > 3) {
a = atof(*argv);
b = atof(*++argv);
printf("sldout: %.16e\nfpuout: %.16e\n", pow_by_sd(a, b), pow(a,b));
return 0;
}
else {
printf("%s", argserror);
return 1;
}
} else {
printf("%s", argserror);
return 1;
}
}
}
gettimeofday(&tim2,NULL);
printf("Time elapsed: %ldns\n", tim2.tv_usec - tim1.tv_usec + (tim2.tv_sec - tim1.tv_sec) * 1000000);
}
void test_mult(int acc)
{
if (acc > 16) acc = 16;
double accuracy = 1.0;
for (int z = 0; z < acc; z++, accuracy /= 10.0);
int counter = 0, r = 0;
double factor1 = 1.0e+200,
factor2;
double d1 = 0.0, d2 = 0.0, r1 = 0.0, r2 = 0.0;
for (int i = 0; i < 400; ++i) {
factor1 /= 10;
printf("***\n%.2e first random factor\n\n", factor1);
factor2 = 1.0e+200;
for (int j = 0; j < 400; ++j) {
#pragma omp parallel for \
reduction(+:counter) \
firstprivate(d1) firstprivate(d2) \
firstprivate(r1) firstprivate(r2) firstprivate(r)
for (int z = 0; z < 400; ++z) {
d1 = get_d_pseudo_random(z % 16) * factor1;
d2 = get_d_pseudo_random((j+z+31) % 16) * factor2;
if (z % 2 == 0) d1 = -d1;
if (z % 3 == 0) d2 = -d2;
r1 = mult_by_sd(d1, d2);
r2 = d1*d2;
if (((r1 > 0 && r1 < r2) && ((r1 + r1 * accuracy) < r2))
|| ((r1 < 0 && r1 > r2) && ((r1 + r1 * accuracy) > r2))
|| ((r1 > 0 && r1 > r2) && ((r2 + r2 * accuracy) < r1))
|| ((r1 < 0 && r1 < r2) && ((r2 + r2 * accuracy) > r1)))
r = 1;
else
r = 0;
if (r) {
printf("factor1: %.20e factor2: %.20e\nsld: %.20e\nfpu: %.20e\n"
"\n------------------------------\n",
d1, d2, r1, r2);
counter++;
}
}
factor2 /= 10;
}
}
printf("SUMMARY:\n--------\n"
"Tests of multiplication two random numbers was made: 64,000,000\n"
"Used precision: +-%.2e\n"
"Outputs that had missed accuracy: %d\n"
"Total accuracy: %.8g%%\n",
accuracy, counter, 100 - (counter/64000000.0) * 100);
}
void test_pow(int acc)
{
if (acc > 16) acc = 16;
double accuracy = 1.0;
for (int z = 0; z < acc; z++, accuracy /= 10.0);
int counter = 0, r = 0;
double factor1 = 1.0e+200,
factor2;
double d1 = 0.0, d2 = 0.0, r1 = 0.0, r2 = 0.0;
for (int i = 0; i < 400; ++i) {
factor1 /= 10;
printf("***\n%.2e number random factor\n\n", factor1);
factor2 = 1.0e+200;
for (int j = 0; j < 400; ++j) {
#pragma omp parallel for \
reduction(+:counter) \
firstprivate(d1) firstprivate(d2) \
firstprivate(r1) firstprivate(r2) firstprivate(r)
for (int z = 0; z < 400; ++z) {
d1 = get_d_pseudo_random(z % 16) * factor1;
d2 = get_d_pseudo_random((j+z+31) % 16) * factor2;
if (z % 2 == 0) d1 = -d1;
if (z % 3 == 0) d2 = -d2;
r1 = pow_by_sd(d1, d2);
r2 = pow(d1,d2);
if (((r1 > 0 && r1 < r2) && ((r1 + r1 * accuracy) < r2))
|| ((r1 < 0 && r1 > r2) && ((r1 + r1 * accuracy) > r2))
|| ((r1 > 0 && r1 > r2) && ((r2 + r2 * accuracy) < r1))
|| ((r1 < 0 && r1 < r2) && ((r2 + r2 * accuracy) > r1))
|| (r1 != r1 && r2 == r2) || (r1 == r1 && r2 != r2))
r = 1;
else
r = 0;
if (r) {
printf("number: %.20e power: %.20e\nsld: %.20e\nfpu: %.20e\n"
"\n------------------------------\n",
d1, d2, r1, r2);
counter++;
}
}
factor2 /= 10;
}
}
printf("SUMMARY:\n--------\n"
"Tests of raising random number to random power was made: 64,000,000\n"
"Used precision: +-%.2e\n"
"Outputs that had missed accuracy: %d\n"
"Total accuracy: %.8g%%\n",
accuracy, counter, 100 - (counter/64000000.0) * 100);
}
void test_sqrt(int acc)
{
if (acc > 16) acc = 16;
double accuracy = 1.0;
for (int z = 0; z < acc; z++, accuracy /= 10.0);
int counter = 0, r = 0;
double factor1 = 1.0e+200,
factor2;
double d = 0.0, r1= 0.0, r2 = 0.0;
for (int i = 0; i < 400; ++i) {
factor1 /= 10;
printf("***\n%.2e first random factor\n\n", factor1);
factor2 = 1.0e+200;
for (int j = 0; j < 400; ++j) {
#pragma omp parallel for \
reduction(+:counter) \
firstprivate(d) firstprivate(r1) \
firstprivate(r2) firstprivate(r)
for (int z = 0; z < 400; ++z) {
d = get_d_pseudo_random(z % 16) * factor1 * factor2;
r1 = sqrt_by_sd(d);
r2 = sqrt(d);
if (((r1 > 0 && r1 < r2) && ((r1 + r1 * accuracy) < r2))
|| ((r1 < 0 && r1 > r2) && ((r1 + r1 * accuracy) > r2))
|| ((r1 > 0 && r1 > r2) && ((r2 + r2 * accuracy) < r1))
|| ((r1 < 0 && r1 < r2) && ((r2 + r2 * accuracy) > r1)))
r = 1;
else
r = 0;
if (r) {
printf("in: %.20e\nsld: %.20e\nfpu: %.20e\n"
"\n------------------------------\n",
d, r1, r2);
counter++;
}
}
factor2 /= 10;
}
}
printf("SUMMARY:\n--------\n"
"Tests of extracting square root from random number was made: 64,000,000\n"
"Used precision: +-%.2e\n"
"Outputs that had missed accuracy: %d\n"
"Total accuracy: %.8g%%\n",
accuracy, counter, 100 - (counter/64000000.0) * 100);
}
double get_d_pseudo_random(int i)
{
static int s = 0;
struct timeval tim;
gettimeofday(&tim,NULL);
s++;
double d = tim.tv_usec/1000000.0 * ((s % 100000)+1) * ((i % 65536) + 1) / 6553600000.0;
if (d >= 1) return d/2;
else return d;
}
int strcmp_max(char *s1, char *s2, int max)
{
while (max--) {
if (*s1 > *s2) return 1;
else if (*s1 < *s2) return -1;
else if (*s1 == '\0') return 0;
s1++;
s2++;
}
return 0;
}
/* When profiling with gprof get_d_pseudo_random() function
* calls inside test_speed_fpu() are get 66.6% of all time
* that is 100ms long and without its expense we have
* 33ms of time to compute 800000 products by fpu
* (on my test stand, of course) */
void test_speed_fpu(void)
{
int i, j, r;
double factor1 = 1.0e+200,
factor2,
d1, d2;
/* Calculation of res is a workaround for optimizator */
double res = 0.0;
for (i = 0; i < 400; i++) {
factor1 = factor1 / 10;
factor2 = 1.0e+200;
for (j = 0; j < 2000; j++) {
if (j % 50 == 0) factor2 = factor2 / 10;
d1 = get_d_pseudo_random(j % 16) * factor1;
d2 = get_d_pseudo_random((i+j+31) % 16) * factor2;
if (j % 2 == 0) d1 = -d1;
if (j % 3 == 0) d2 = -d2;
r = d1*d2;
res += r / (i * j + 1);
}
}
printf("%e\n", res);
}
/* When profiling with gprof get_d_pseudo_random() function
* calls inside test_speed_soft() are get 4.2% of all time
* that is 600ms long and without its expense we have
* 575ms of time to compute 800000 products through sldouble
* (on my test stand, of course) */
/* What is intresting, is that here computings of
* pseudorandom numbers were 2.5 times faster that
* they were in the test_speed_fpu() case. Probably some of the
* executions were going natively in parallel. */
void test_speed_soft(void)
{
int i, j, r;
double factor1 = 1.0e+200,
factor2,
d1, d2;
/* Calculation of res is a workaround for optimizator */
double res = 0.0;
for (i = 0; i < 400; i++) {
factor1 = factor1 / 10;
factor2 = 1.0e+200;
for (j = 0; j < 2000; j++) {
if (j % 50 == 0) factor2 = factor2 / 10;
d1 = get_d_pseudo_random(j % 16) * factor1;
d2 = get_d_pseudo_random((i+j+31) % 16) * factor2;
if (j % 2 == 0) d1 = -d1;
if (j % 3 == 0) d2 = -d2;
r = mult_by_sd(d1, d2);
res += r / (i * j + 1);
}
}
printf("%e\n", res);
} |
macro-2.c | // { dg-do compile }
#define p parallel
#define s(x) shared(x##1, x##2)
#define d(x) default(x)
void bar(int, int, int, int);
void foo(void)
{
int a1, a2, b1, b2;
#pragma omp p s(a) s(b) d(none)
bar(a1, a2, b1, b2);
}
|
omp-simd-clone.c | /* OMP constructs' SIMD clone supporting code.
Copyright (C) 2005-2017 Free Software Foundation, Inc.
This file is part of GCC.
GCC is free software; you can redistribute it and/or modify it under
the terms of the GNU General Public License as published by the Free
Software Foundation; either version 3, or (at your option) any later
version.
GCC is distributed in the hope that it will be useful, but WITHOUT ANY
WARRANTY; without even the implied warranty of MERCHANTABILITY or
FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
for more details.
You should have received a copy of the GNU General Public License
along with GCC; see the file COPYING3. If not see
<http://www.gnu.org/licenses/>. */
#include "config.h"
#include "system.h"
#include "coretypes.h"
#include "backend.h"
#include "target.h"
#include "tree.h"
#include "gimple.h"
#include "cfghooks.h"
#include "alloc-pool.h"
#include "tree-pass.h"
#include "ssa.h"
#include "cgraph.h"
#include "pretty-print.h"
#include "diagnostic-core.h"
#include "fold-const.h"
#include "stor-layout.h"
#include "cfganal.h"
#include "gimplify.h"
#include "gimple-iterator.h"
#include "gimplify-me.h"
#include "gimple-walk.h"
#include "langhooks.h"
#include "tree-cfg.h"
#include "tree-into-ssa.h"
#include "tree-dfa.h"
#include "cfgloop.h"
#include "symbol-summary.h"
#include "ipa-prop.h"
#include "tree-eh.h"
#include "varasm.h"
/* Allocate a fresh `simd_clone' and return it. NARGS is the number
of arguments to reserve space for. */
static struct cgraph_simd_clone *
simd_clone_struct_alloc (int nargs)
{
struct cgraph_simd_clone *clone_info;
size_t len = (sizeof (struct cgraph_simd_clone)
+ nargs * sizeof (struct cgraph_simd_clone_arg));
clone_info = (struct cgraph_simd_clone *)
ggc_internal_cleared_alloc (len);
return clone_info;
}
/* Make a copy of the `struct cgraph_simd_clone' in FROM to TO. */
static inline void
simd_clone_struct_copy (struct cgraph_simd_clone *to,
struct cgraph_simd_clone *from)
{
memcpy (to, from, (sizeof (struct cgraph_simd_clone)
+ ((from->nargs - from->inbranch)
* sizeof (struct cgraph_simd_clone_arg))));
}
/* Return vector of parameter types of function FNDECL. This uses
TYPE_ARG_TYPES if available, otherwise falls back to types of
DECL_ARGUMENTS types. */
static vec<tree>
simd_clone_vector_of_formal_parm_types (tree fndecl)
{
if (TYPE_ARG_TYPES (TREE_TYPE (fndecl)))
return ipa_get_vector_of_formal_parm_types (TREE_TYPE (fndecl));
vec<tree> args = ipa_get_vector_of_formal_parms (fndecl);
unsigned int i;
tree arg;
FOR_EACH_VEC_ELT (args, i, arg)
args[i] = TREE_TYPE (args[i]);
return args;
}
/* Given a simd function in NODE, extract the simd specific
information from the OMP clauses passed in CLAUSES, and return
the struct cgraph_simd_clone * if it should be cloned. *INBRANCH_SPECIFIED
is set to TRUE if the `inbranch' or `notinbranch' clause specified,
otherwise set to FALSE. */
static struct cgraph_simd_clone *
simd_clone_clauses_extract (struct cgraph_node *node, tree clauses,
bool *inbranch_specified)
{
vec<tree> args = simd_clone_vector_of_formal_parm_types (node->decl);
tree t;
int n;
*inbranch_specified = false;
n = args.length ();
if (n > 0 && args.last () == void_type_node)
n--;
/* To distinguish from an OpenMP simd clone, Cilk Plus functions to
be cloned have a distinctive artificial label in addition to "omp
declare simd". */
bool cilk_clone
= (flag_cilkplus
&& lookup_attribute ("cilk simd function",
DECL_ATTRIBUTES (node->decl)));
/* Allocate one more than needed just in case this is an in-branch
clone which will require a mask argument. */
struct cgraph_simd_clone *clone_info = simd_clone_struct_alloc (n + 1);
clone_info->nargs = n;
clone_info->cilk_elemental = cilk_clone;
if (!clauses)
goto out;
clauses = TREE_VALUE (clauses);
if (!clauses || TREE_CODE (clauses) != OMP_CLAUSE)
goto out;
for (t = clauses; t; t = OMP_CLAUSE_CHAIN (t))
{
switch (OMP_CLAUSE_CODE (t))
{
case OMP_CLAUSE_INBRANCH:
clone_info->inbranch = 1;
*inbranch_specified = true;
break;
case OMP_CLAUSE_NOTINBRANCH:
clone_info->inbranch = 0;
*inbranch_specified = true;
break;
case OMP_CLAUSE_SIMDLEN:
clone_info->simdlen
= TREE_INT_CST_LOW (OMP_CLAUSE_SIMDLEN_EXPR (t));
break;
case OMP_CLAUSE_LINEAR:
{
tree decl = OMP_CLAUSE_DECL (t);
tree step = OMP_CLAUSE_LINEAR_STEP (t);
int argno = TREE_INT_CST_LOW (decl);
if (OMP_CLAUSE_LINEAR_VARIABLE_STRIDE (t))
{
enum cgraph_simd_clone_arg_type arg_type;
if (TREE_CODE (args[argno]) == REFERENCE_TYPE)
switch (OMP_CLAUSE_LINEAR_KIND (t))
{
case OMP_CLAUSE_LINEAR_REF:
arg_type
= SIMD_CLONE_ARG_TYPE_LINEAR_REF_VARIABLE_STEP;
break;
case OMP_CLAUSE_LINEAR_UVAL:
arg_type
= SIMD_CLONE_ARG_TYPE_LINEAR_UVAL_VARIABLE_STEP;
break;
case OMP_CLAUSE_LINEAR_VAL:
case OMP_CLAUSE_LINEAR_DEFAULT:
arg_type
= SIMD_CLONE_ARG_TYPE_LINEAR_VAL_VARIABLE_STEP;
break;
default:
gcc_unreachable ();
}
else
arg_type = SIMD_CLONE_ARG_TYPE_LINEAR_VARIABLE_STEP;
clone_info->args[argno].arg_type = arg_type;
clone_info->args[argno].linear_step = tree_to_shwi (step);
gcc_assert (clone_info->args[argno].linear_step >= 0
&& clone_info->args[argno].linear_step < n);
}
else
{
if (POINTER_TYPE_P (args[argno]))
step = fold_convert (ssizetype, step);
if (!tree_fits_shwi_p (step))
{
warning_at (OMP_CLAUSE_LOCATION (t), 0,
"ignoring large linear step");
args.release ();
return NULL;
}
else if (integer_zerop (step))
{
warning_at (OMP_CLAUSE_LOCATION (t), 0,
"ignoring zero linear step");
args.release ();
return NULL;
}
else
{
enum cgraph_simd_clone_arg_type arg_type;
if (TREE_CODE (args[argno]) == REFERENCE_TYPE)
switch (OMP_CLAUSE_LINEAR_KIND (t))
{
case OMP_CLAUSE_LINEAR_REF:
arg_type
= SIMD_CLONE_ARG_TYPE_LINEAR_REF_CONSTANT_STEP;
break;
case OMP_CLAUSE_LINEAR_UVAL:
arg_type
= SIMD_CLONE_ARG_TYPE_LINEAR_UVAL_CONSTANT_STEP;
break;
case OMP_CLAUSE_LINEAR_VAL:
case OMP_CLAUSE_LINEAR_DEFAULT:
arg_type
= SIMD_CLONE_ARG_TYPE_LINEAR_VAL_CONSTANT_STEP;
break;
default:
gcc_unreachable ();
}
else
arg_type = SIMD_CLONE_ARG_TYPE_LINEAR_CONSTANT_STEP;
clone_info->args[argno].arg_type = arg_type;
clone_info->args[argno].linear_step = tree_to_shwi (step);
}
}
break;
}
case OMP_CLAUSE_UNIFORM:
{
tree decl = OMP_CLAUSE_DECL (t);
int argno = tree_to_uhwi (decl);
clone_info->args[argno].arg_type
= SIMD_CLONE_ARG_TYPE_UNIFORM;
break;
}
case OMP_CLAUSE_ALIGNED:
{
tree decl = OMP_CLAUSE_DECL (t);
int argno = tree_to_uhwi (decl);
clone_info->args[argno].alignment
= TREE_INT_CST_LOW (OMP_CLAUSE_ALIGNED_ALIGNMENT (t));
break;
}
default:
break;
}
}
out:
if (TYPE_ATOMIC (TREE_TYPE (TREE_TYPE (node->decl))))
{
warning_at (DECL_SOURCE_LOCATION (node->decl), 0,
"ignoring %<#pragma omp declare simd%> on function "
"with %<_Atomic%> qualified return type");
args.release ();
return NULL;
}
for (unsigned int argno = 0; argno < clone_info->nargs; argno++)
if (TYPE_ATOMIC (args[argno])
&& clone_info->args[argno].arg_type != SIMD_CLONE_ARG_TYPE_UNIFORM)
{
warning_at (DECL_SOURCE_LOCATION (node->decl), 0,
"ignoring %<#pragma omp declare simd%> on function "
"with %<_Atomic%> qualified non-%<uniform%> argument");
args.release ();
return NULL;
}
args.release ();
return clone_info;
}
/* Given a SIMD clone in NODE, calculate the characteristic data
type and return the coresponding type. The characteristic data
type is computed as described in the Intel Vector ABI. */
static tree
simd_clone_compute_base_data_type (struct cgraph_node *node,
struct cgraph_simd_clone *clone_info)
{
tree type = integer_type_node;
tree fndecl = node->decl;
/* a) For non-void function, the characteristic data type is the
return type. */
if (TREE_CODE (TREE_TYPE (TREE_TYPE (fndecl))) != VOID_TYPE)
type = TREE_TYPE (TREE_TYPE (fndecl));
/* b) If the function has any non-uniform, non-linear parameters,
then the characteristic data type is the type of the first
such parameter. */
else
{
vec<tree> map = simd_clone_vector_of_formal_parm_types (fndecl);
for (unsigned int i = 0; i < clone_info->nargs; ++i)
if (clone_info->args[i].arg_type == SIMD_CLONE_ARG_TYPE_VECTOR)
{
type = map[i];
break;
}
map.release ();
}
/* c) If the characteristic data type determined by a) or b) above
is struct, union, or class type which is pass-by-value (except
for the type that maps to the built-in complex data type), the
characteristic data type is int. */
if (RECORD_OR_UNION_TYPE_P (type)
&& !aggregate_value_p (type, NULL)
&& TREE_CODE (type) != COMPLEX_TYPE)
return integer_type_node;
/* d) If none of the above three classes is applicable, the
characteristic data type is int. */
return type;
/* e) For Intel Xeon Phi native and offload compilation, if the
resulting characteristic data type is 8-bit or 16-bit integer
data type, the characteristic data type is int. */
/* Well, we don't handle Xeon Phi yet. */
}
static tree
simd_clone_mangle (struct cgraph_node *node,
struct cgraph_simd_clone *clone_info)
{
char vecsize_mangle = clone_info->vecsize_mangle;
char mask = clone_info->inbranch ? 'M' : 'N';
unsigned int simdlen = clone_info->simdlen;
unsigned int n;
pretty_printer pp;
gcc_assert (vecsize_mangle && simdlen);
pp_string (&pp, "_ZGV");
pp_character (&pp, vecsize_mangle);
pp_character (&pp, mask);
pp_decimal_int (&pp, simdlen);
for (n = 0; n < clone_info->nargs; ++n)
{
struct cgraph_simd_clone_arg arg = clone_info->args[n];
switch (arg.arg_type)
{
case SIMD_CLONE_ARG_TYPE_UNIFORM:
pp_character (&pp, 'u');
break;
case SIMD_CLONE_ARG_TYPE_LINEAR_CONSTANT_STEP:
pp_character (&pp, 'l');
goto mangle_linear;
case SIMD_CLONE_ARG_TYPE_LINEAR_REF_CONSTANT_STEP:
pp_character (&pp, 'R');
goto mangle_linear;
case SIMD_CLONE_ARG_TYPE_LINEAR_VAL_CONSTANT_STEP:
pp_character (&pp, 'L');
goto mangle_linear;
case SIMD_CLONE_ARG_TYPE_LINEAR_UVAL_CONSTANT_STEP:
pp_character (&pp, 'U');
goto mangle_linear;
mangle_linear:
gcc_assert (arg.linear_step != 0);
if (arg.linear_step > 1)
pp_unsigned_wide_integer (&pp, arg.linear_step);
else if (arg.linear_step < 0)
{
pp_character (&pp, 'n');
pp_unsigned_wide_integer (&pp, (-(unsigned HOST_WIDE_INT)
arg.linear_step));
}
break;
case SIMD_CLONE_ARG_TYPE_LINEAR_VARIABLE_STEP:
pp_string (&pp, "ls");
pp_unsigned_wide_integer (&pp, arg.linear_step);
break;
case SIMD_CLONE_ARG_TYPE_LINEAR_REF_VARIABLE_STEP:
pp_string (&pp, "Rs");
pp_unsigned_wide_integer (&pp, arg.linear_step);
break;
case SIMD_CLONE_ARG_TYPE_LINEAR_VAL_VARIABLE_STEP:
pp_string (&pp, "Ls");
pp_unsigned_wide_integer (&pp, arg.linear_step);
break;
case SIMD_CLONE_ARG_TYPE_LINEAR_UVAL_VARIABLE_STEP:
pp_string (&pp, "Us");
pp_unsigned_wide_integer (&pp, arg.linear_step);
break;
default:
pp_character (&pp, 'v');
}
if (arg.alignment)
{
pp_character (&pp, 'a');
pp_decimal_int (&pp, arg.alignment);
}
}
pp_underscore (&pp);
const char *str = IDENTIFIER_POINTER (DECL_ASSEMBLER_NAME (node->decl));
if (*str == '*')
++str;
pp_string (&pp, str);
str = pp_formatted_text (&pp);
/* If there already is a SIMD clone with the same mangled name, don't
add another one. This can happen e.g. for
#pragma omp declare simd
#pragma omp declare simd simdlen(8)
int foo (int, int);
if the simdlen is assumed to be 8 for the first one, etc. */
for (struct cgraph_node *clone = node->simd_clones; clone;
clone = clone->simdclone->next_clone)
if (strcmp (IDENTIFIER_POINTER (DECL_ASSEMBLER_NAME (clone->decl)),
str) == 0)
return NULL_TREE;
return get_identifier (str);
}
/* Create a simd clone of OLD_NODE and return it. */
static struct cgraph_node *
simd_clone_create (struct cgraph_node *old_node)
{
struct cgraph_node *new_node;
if (old_node->definition)
{
if (!old_node->has_gimple_body_p ())
return NULL;
old_node->get_body ();
new_node = old_node->create_version_clone_with_body (vNULL, NULL, NULL,
false, NULL, NULL,
"simdclone");
}
else
{
tree old_decl = old_node->decl;
tree new_decl = copy_node (old_node->decl);
DECL_NAME (new_decl) = clone_function_name (old_decl, "simdclone");
SET_DECL_ASSEMBLER_NAME (new_decl, DECL_NAME (new_decl));
SET_DECL_RTL (new_decl, NULL);
DECL_STATIC_CONSTRUCTOR (new_decl) = 0;
DECL_STATIC_DESTRUCTOR (new_decl) = 0;
new_node = old_node->create_version_clone (new_decl, vNULL, NULL);
if (old_node->in_other_partition)
new_node->in_other_partition = 1;
}
if (new_node == NULL)
return new_node;
TREE_PUBLIC (new_node->decl) = TREE_PUBLIC (old_node->decl);
DECL_COMDAT (new_node->decl) = DECL_COMDAT (old_node->decl);
DECL_WEAK (new_node->decl) = DECL_WEAK (old_node->decl);
DECL_EXTERNAL (new_node->decl) = DECL_EXTERNAL (old_node->decl);
DECL_VISIBILITY_SPECIFIED (new_node->decl)
= DECL_VISIBILITY_SPECIFIED (old_node->decl);
DECL_VISIBILITY (new_node->decl) = DECL_VISIBILITY (old_node->decl);
DECL_DLLIMPORT_P (new_node->decl) = DECL_DLLIMPORT_P (old_node->decl);
if (DECL_ONE_ONLY (old_node->decl))
make_decl_one_only (new_node->decl, DECL_ASSEMBLER_NAME (new_node->decl));
/* The method cgraph_version_clone_with_body () will force the new
symbol local. Undo this, and inherit external visibility from
the old node. */
new_node->local.local = old_node->local.local;
new_node->externally_visible = old_node->externally_visible;
return new_node;
}
/* Adjust the return type of the given function to its appropriate
vector counterpart. Returns a simd array to be used throughout the
function as a return value. */
static tree
simd_clone_adjust_return_type (struct cgraph_node *node)
{
tree fndecl = node->decl;
tree orig_rettype = TREE_TYPE (TREE_TYPE (fndecl));
unsigned int veclen;
tree t;
/* Adjust the function return type. */
if (orig_rettype == void_type_node)
return NULL_TREE;
TREE_TYPE (fndecl) = build_distinct_type_copy (TREE_TYPE (fndecl));
t = TREE_TYPE (TREE_TYPE (fndecl));
if (INTEGRAL_TYPE_P (t) || POINTER_TYPE_P (t))
veclen = node->simdclone->vecsize_int;
else
veclen = node->simdclone->vecsize_float;
veclen /= GET_MODE_BITSIZE (TYPE_MODE (t));
if (veclen > node->simdclone->simdlen)
veclen = node->simdclone->simdlen;
if (POINTER_TYPE_P (t))
t = pointer_sized_int_node;
if (veclen == node->simdclone->simdlen)
t = build_vector_type (t, node->simdclone->simdlen);
else
{
t = build_vector_type (t, veclen);
t = build_array_type_nelts (t, node->simdclone->simdlen / veclen);
}
TREE_TYPE (TREE_TYPE (fndecl)) = t;
if (!node->definition)
return NULL_TREE;
t = DECL_RESULT (fndecl);
/* Adjust the DECL_RESULT. */
gcc_assert (TREE_TYPE (t) != void_type_node);
TREE_TYPE (t) = TREE_TYPE (TREE_TYPE (fndecl));
relayout_decl (t);
tree atype = build_array_type_nelts (orig_rettype,
node->simdclone->simdlen);
if (veclen != node->simdclone->simdlen)
return build1 (VIEW_CONVERT_EXPR, atype, t);
/* Set up a SIMD array to use as the return value. */
tree retval = create_tmp_var_raw (atype, "retval");
gimple_add_tmp_var (retval);
return retval;
}
/* Each vector argument has a corresponding array to be used locally
as part of the eventual loop. Create such temporary array and
return it.
PREFIX is the prefix to be used for the temporary.
TYPE is the inner element type.
SIMDLEN is the number of elements. */
static tree
create_tmp_simd_array (const char *prefix, tree type, int simdlen)
{
tree atype = build_array_type_nelts (type, simdlen);
tree avar = create_tmp_var_raw (atype, prefix);
gimple_add_tmp_var (avar);
return avar;
}
/* Modify the function argument types to their corresponding vector
counterparts if appropriate. Also, create one array for each simd
argument to be used locally when using the function arguments as
part of the loop.
NODE is the function whose arguments are to be adjusted.
Returns an adjustment vector that will be filled describing how the
argument types will be adjusted. */
static ipa_parm_adjustment_vec
simd_clone_adjust_argument_types (struct cgraph_node *node)
{
vec<tree> args;
ipa_parm_adjustment_vec adjustments;
if (node->definition)
args = ipa_get_vector_of_formal_parms (node->decl);
else
args = simd_clone_vector_of_formal_parm_types (node->decl);
adjustments.create (args.length ());
unsigned i, j, veclen;
struct ipa_parm_adjustment adj;
struct cgraph_simd_clone *sc = node->simdclone;
for (i = 0; i < sc->nargs; ++i)
{
memset (&adj, 0, sizeof (adj));
tree parm = args[i];
tree parm_type = node->definition ? TREE_TYPE (parm) : parm;
adj.base_index = i;
adj.base = parm;
sc->args[i].orig_arg = node->definition ? parm : NULL_TREE;
sc->args[i].orig_type = parm_type;
switch (sc->args[i].arg_type)
{
default:
/* No adjustment necessary for scalar arguments. */
adj.op = IPA_PARM_OP_COPY;
break;
case SIMD_CLONE_ARG_TYPE_LINEAR_UVAL_CONSTANT_STEP:
case SIMD_CLONE_ARG_TYPE_LINEAR_UVAL_VARIABLE_STEP:
if (node->definition)
sc->args[i].simd_array
= create_tmp_simd_array (IDENTIFIER_POINTER (DECL_NAME (parm)),
TREE_TYPE (parm_type),
sc->simdlen);
adj.op = IPA_PARM_OP_COPY;
break;
case SIMD_CLONE_ARG_TYPE_LINEAR_VAL_CONSTANT_STEP:
case SIMD_CLONE_ARG_TYPE_LINEAR_VAL_VARIABLE_STEP:
case SIMD_CLONE_ARG_TYPE_VECTOR:
if (INTEGRAL_TYPE_P (parm_type) || POINTER_TYPE_P (parm_type))
veclen = sc->vecsize_int;
else
veclen = sc->vecsize_float;
veclen /= GET_MODE_BITSIZE (TYPE_MODE (parm_type));
if (veclen > sc->simdlen)
veclen = sc->simdlen;
adj.arg_prefix = "simd";
if (POINTER_TYPE_P (parm_type))
adj.type = build_vector_type (pointer_sized_int_node, veclen);
else
adj.type = build_vector_type (parm_type, veclen);
sc->args[i].vector_type = adj.type;
for (j = veclen; j < sc->simdlen; j += veclen)
{
adjustments.safe_push (adj);
if (j == veclen)
{
memset (&adj, 0, sizeof (adj));
adj.op = IPA_PARM_OP_NEW;
adj.arg_prefix = "simd";
adj.base_index = i;
adj.type = sc->args[i].vector_type;
}
}
if (node->definition)
sc->args[i].simd_array
= create_tmp_simd_array (DECL_NAME (parm)
? IDENTIFIER_POINTER (DECL_NAME (parm))
: NULL, parm_type, sc->simdlen);
}
adjustments.safe_push (adj);
}
if (sc->inbranch)
{
tree base_type = simd_clone_compute_base_data_type (sc->origin, sc);
memset (&adj, 0, sizeof (adj));
adj.op = IPA_PARM_OP_NEW;
adj.arg_prefix = "mask";
adj.base_index = i;
if (INTEGRAL_TYPE_P (base_type) || POINTER_TYPE_P (base_type))
veclen = sc->vecsize_int;
else
veclen = sc->vecsize_float;
veclen /= GET_MODE_BITSIZE (TYPE_MODE (base_type));
if (veclen > sc->simdlen)
veclen = sc->simdlen;
if (sc->mask_mode != VOIDmode)
adj.type
= lang_hooks.types.type_for_mode (sc->mask_mode, 1);
else if (POINTER_TYPE_P (base_type))
adj.type = build_vector_type (pointer_sized_int_node, veclen);
else
adj.type = build_vector_type (base_type, veclen);
adjustments.safe_push (adj);
for (j = veclen; j < sc->simdlen; j += veclen)
adjustments.safe_push (adj);
/* We have previously allocated one extra entry for the mask. Use
it and fill it. */
sc->nargs++;
if (sc->mask_mode != VOIDmode)
base_type = boolean_type_node;
if (node->definition)
{
sc->args[i].orig_arg
= build_decl (UNKNOWN_LOCATION, PARM_DECL, NULL, base_type);
if (sc->mask_mode == VOIDmode)
sc->args[i].simd_array
= create_tmp_simd_array ("mask", base_type, sc->simdlen);
else if (veclen < sc->simdlen)
sc->args[i].simd_array
= create_tmp_simd_array ("mask", adj.type, sc->simdlen / veclen);
else
sc->args[i].simd_array = NULL_TREE;
}
sc->args[i].orig_type = base_type;
sc->args[i].arg_type = SIMD_CLONE_ARG_TYPE_MASK;
}
if (node->definition)
ipa_modify_formal_parameters (node->decl, adjustments);
else
{
tree new_arg_types = NULL_TREE, new_reversed;
bool last_parm_void = false;
if (args.length () > 0 && args.last () == void_type_node)
last_parm_void = true;
gcc_assert (TYPE_ARG_TYPES (TREE_TYPE (node->decl)));
j = adjustments.length ();
for (i = 0; i < j; i++)
{
struct ipa_parm_adjustment *adj = &adjustments[i];
tree ptype;
if (adj->op == IPA_PARM_OP_COPY)
ptype = args[adj->base_index];
else
ptype = adj->type;
new_arg_types = tree_cons (NULL_TREE, ptype, new_arg_types);
}
new_reversed = nreverse (new_arg_types);
if (last_parm_void)
{
if (new_reversed)
TREE_CHAIN (new_arg_types) = void_list_node;
else
new_reversed = void_list_node;
}
tree new_type = build_distinct_type_copy (TREE_TYPE (node->decl));
TYPE_ARG_TYPES (new_type) = new_reversed;
TREE_TYPE (node->decl) = new_type;
adjustments.release ();
}
args.release ();
return adjustments;
}
/* Initialize and copy the function arguments in NODE to their
corresponding local simd arrays. Returns a fresh gimple_seq with
the instruction sequence generated. */
static gimple_seq
simd_clone_init_simd_arrays (struct cgraph_node *node,
ipa_parm_adjustment_vec adjustments)
{
gimple_seq seq = NULL;
unsigned i = 0, j = 0, k;
for (tree arg = DECL_ARGUMENTS (node->decl);
arg;
arg = DECL_CHAIN (arg), i++, j++)
{
if (adjustments[j].op == IPA_PARM_OP_COPY
|| POINTER_TYPE_P (TREE_TYPE (arg)))
continue;
node->simdclone->args[i].vector_arg = arg;
tree array = node->simdclone->args[i].simd_array;
if (node->simdclone->mask_mode != VOIDmode
&& node->simdclone->args[i].arg_type == SIMD_CLONE_ARG_TYPE_MASK)
{
if (array == NULL_TREE)
continue;
unsigned int l
= tree_to_uhwi (TYPE_MAX_VALUE (TYPE_DOMAIN (TREE_TYPE (array))));
for (k = 0; k <= l; k++)
{
if (k)
{
arg = DECL_CHAIN (arg);
j++;
}
tree t = build4 (ARRAY_REF, TREE_TYPE (TREE_TYPE (array)),
array, size_int (k), NULL, NULL);
t = build2 (MODIFY_EXPR, TREE_TYPE (t), t, arg);
gimplify_and_add (t, &seq);
}
continue;
}
if (TYPE_VECTOR_SUBPARTS (TREE_TYPE (arg)) == node->simdclone->simdlen)
{
tree ptype = build_pointer_type (TREE_TYPE (TREE_TYPE (array)));
tree ptr = build_fold_addr_expr (array);
tree t = build2 (MEM_REF, TREE_TYPE (arg), ptr,
build_int_cst (ptype, 0));
t = build2 (MODIFY_EXPR, TREE_TYPE (t), t, arg);
gimplify_and_add (t, &seq);
}
else
{
unsigned int simdlen = TYPE_VECTOR_SUBPARTS (TREE_TYPE (arg));
tree ptype = build_pointer_type (TREE_TYPE (TREE_TYPE (array)));
for (k = 0; k < node->simdclone->simdlen; k += simdlen)
{
tree ptr = build_fold_addr_expr (array);
int elemsize;
if (k)
{
arg = DECL_CHAIN (arg);
j++;
}
elemsize
= GET_MODE_SIZE (TYPE_MODE (TREE_TYPE (TREE_TYPE (arg))));
tree t = build2 (MEM_REF, TREE_TYPE (arg), ptr,
build_int_cst (ptype, k * elemsize));
t = build2 (MODIFY_EXPR, TREE_TYPE (t), t, arg);
gimplify_and_add (t, &seq);
}
}
}
return seq;
}
/* Callback info for ipa_simd_modify_stmt_ops below. */
struct modify_stmt_info {
ipa_parm_adjustment_vec adjustments;
gimple *stmt;
/* True if the parent statement was modified by
ipa_simd_modify_stmt_ops. */
bool modified;
};
/* Callback for walk_gimple_op.
Adjust operands from a given statement as specified in the
adjustments vector in the callback data. */
static tree
ipa_simd_modify_stmt_ops (tree *tp, int *walk_subtrees, void *data)
{
struct walk_stmt_info *wi = (struct walk_stmt_info *) data;
struct modify_stmt_info *info = (struct modify_stmt_info *) wi->info;
tree *orig_tp = tp;
if (TREE_CODE (*tp) == ADDR_EXPR)
tp = &TREE_OPERAND (*tp, 0);
struct ipa_parm_adjustment *cand = NULL;
if (TREE_CODE (*tp) == PARM_DECL)
cand = ipa_get_adjustment_candidate (&tp, NULL, info->adjustments, true);
else
{
if (TYPE_P (*tp))
*walk_subtrees = 0;
}
tree repl = NULL_TREE;
if (cand)
repl = unshare_expr (cand->new_decl);
else
{
if (tp != orig_tp)
{
*walk_subtrees = 0;
bool modified = info->modified;
info->modified = false;
walk_tree (tp, ipa_simd_modify_stmt_ops, wi, wi->pset);
if (!info->modified)
{
info->modified = modified;
return NULL_TREE;
}
info->modified = modified;
repl = *tp;
}
else
return NULL_TREE;
}
if (tp != orig_tp)
{
repl = build_fold_addr_expr (repl);
gimple *stmt;
if (is_gimple_debug (info->stmt))
{
tree vexpr = make_node (DEBUG_EXPR_DECL);
stmt = gimple_build_debug_source_bind (vexpr, repl, NULL);
DECL_ARTIFICIAL (vexpr) = 1;
TREE_TYPE (vexpr) = TREE_TYPE (repl);
SET_DECL_MODE (vexpr, TYPE_MODE (TREE_TYPE (repl)));
repl = vexpr;
}
else
{
stmt = gimple_build_assign (make_ssa_name (TREE_TYPE (repl)), repl);
repl = gimple_assign_lhs (stmt);
}
gimple_stmt_iterator gsi = gsi_for_stmt (info->stmt);
gsi_insert_before (&gsi, stmt, GSI_SAME_STMT);
*orig_tp = repl;
}
else if (!useless_type_conversion_p (TREE_TYPE (*tp), TREE_TYPE (repl)))
{
tree vce = build1 (VIEW_CONVERT_EXPR, TREE_TYPE (*tp), repl);
*tp = vce;
}
else
*tp = repl;
info->modified = true;
return NULL_TREE;
}
/* Traverse the function body and perform all modifications as
described in ADJUSTMENTS. At function return, ADJUSTMENTS will be
modified such that the replacement/reduction value will now be an
offset into the corresponding simd_array.
This function will replace all function argument uses with their
corresponding simd array elements, and ajust the return values
accordingly. */
static void
ipa_simd_modify_function_body (struct cgraph_node *node,
ipa_parm_adjustment_vec adjustments,
tree retval_array, tree iter)
{
basic_block bb;
unsigned int i, j, l;
/* Re-use the adjustments array, but this time use it to replace
every function argument use to an offset into the corresponding
simd_array. */
for (i = 0, j = 0; i < node->simdclone->nargs; ++i, ++j)
{
if (!node->simdclone->args[i].vector_arg)
continue;
tree basetype = TREE_TYPE (node->simdclone->args[i].orig_arg);
tree vectype = TREE_TYPE (node->simdclone->args[i].vector_arg);
adjustments[j].new_decl
= build4 (ARRAY_REF,
basetype,
node->simdclone->args[i].simd_array,
iter,
NULL_TREE, NULL_TREE);
if (adjustments[j].op == IPA_PARM_OP_NONE
&& TYPE_VECTOR_SUBPARTS (vectype) < node->simdclone->simdlen)
j += node->simdclone->simdlen / TYPE_VECTOR_SUBPARTS (vectype) - 1;
}
l = adjustments.length ();
tree name;
FOR_EACH_SSA_NAME (i, name, cfun)
{
if (SSA_NAME_VAR (name)
&& TREE_CODE (SSA_NAME_VAR (name)) == PARM_DECL)
{
for (j = 0; j < l; j++)
if (SSA_NAME_VAR (name) == adjustments[j].base
&& adjustments[j].new_decl)
{
tree base_var;
if (adjustments[j].new_ssa_base == NULL_TREE)
{
base_var
= copy_var_decl (adjustments[j].base,
DECL_NAME (adjustments[j].base),
TREE_TYPE (adjustments[j].base));
adjustments[j].new_ssa_base = base_var;
}
else
base_var = adjustments[j].new_ssa_base;
if (SSA_NAME_IS_DEFAULT_DEF (name))
{
bb = single_succ (ENTRY_BLOCK_PTR_FOR_FN (cfun));
gimple_stmt_iterator gsi = gsi_after_labels (bb);
tree new_decl = unshare_expr (adjustments[j].new_decl);
set_ssa_default_def (cfun, adjustments[j].base, NULL_TREE);
SET_SSA_NAME_VAR_OR_IDENTIFIER (name, base_var);
SSA_NAME_IS_DEFAULT_DEF (name) = 0;
gimple *stmt = gimple_build_assign (name, new_decl);
gsi_insert_before (&gsi, stmt, GSI_SAME_STMT);
}
else
SET_SSA_NAME_VAR_OR_IDENTIFIER (name, base_var);
}
}
}
struct modify_stmt_info info;
info.adjustments = adjustments;
FOR_EACH_BB_FN (bb, DECL_STRUCT_FUNCTION (node->decl))
{
gimple_stmt_iterator gsi;
gsi = gsi_start_bb (bb);
while (!gsi_end_p (gsi))
{
gimple *stmt = gsi_stmt (gsi);
info.stmt = stmt;
struct walk_stmt_info wi;
memset (&wi, 0, sizeof (wi));
info.modified = false;
wi.info = &info;
walk_gimple_op (stmt, ipa_simd_modify_stmt_ops, &wi);
if (greturn *return_stmt = dyn_cast <greturn *> (stmt))
{
tree retval = gimple_return_retval (return_stmt);
if (!retval)
{
gsi_remove (&gsi, true);
continue;
}
/* Replace `return foo' with `retval_array[iter] = foo'. */
tree ref = build4 (ARRAY_REF, TREE_TYPE (retval),
retval_array, iter, NULL, NULL);
stmt = gimple_build_assign (ref, retval);
gsi_replace (&gsi, stmt, true);
info.modified = true;
}
if (info.modified)
{
update_stmt (stmt);
if (maybe_clean_eh_stmt (stmt))
gimple_purge_dead_eh_edges (gimple_bb (stmt));
}
gsi_next (&gsi);
}
}
}
/* Helper function of simd_clone_adjust, return linear step addend
of Ith argument. */
static tree
simd_clone_linear_addend (struct cgraph_node *node, unsigned int i,
tree addtype, basic_block entry_bb)
{
tree ptype = NULL_TREE;
switch (node->simdclone->args[i].arg_type)
{
case SIMD_CLONE_ARG_TYPE_LINEAR_CONSTANT_STEP:
case SIMD_CLONE_ARG_TYPE_LINEAR_REF_CONSTANT_STEP:
case SIMD_CLONE_ARG_TYPE_LINEAR_VAL_CONSTANT_STEP:
case SIMD_CLONE_ARG_TYPE_LINEAR_UVAL_CONSTANT_STEP:
return build_int_cst (addtype, node->simdclone->args[i].linear_step);
case SIMD_CLONE_ARG_TYPE_LINEAR_VARIABLE_STEP:
case SIMD_CLONE_ARG_TYPE_LINEAR_REF_VARIABLE_STEP:
ptype = TREE_TYPE (node->simdclone->args[i].orig_arg);
break;
case SIMD_CLONE_ARG_TYPE_LINEAR_VAL_VARIABLE_STEP:
case SIMD_CLONE_ARG_TYPE_LINEAR_UVAL_VARIABLE_STEP:
ptype = TREE_TYPE (TREE_TYPE (node->simdclone->args[i].orig_arg));
break;
default:
gcc_unreachable ();
}
unsigned int idx = node->simdclone->args[i].linear_step;
tree arg = node->simdclone->args[idx].orig_arg;
gcc_assert (is_gimple_reg_type (TREE_TYPE (arg)));
gimple_stmt_iterator gsi = gsi_after_labels (entry_bb);
gimple *g;
tree ret;
if (is_gimple_reg (arg))
ret = get_or_create_ssa_default_def (cfun, arg);
else
{
g = gimple_build_assign (make_ssa_name (TREE_TYPE (arg)), arg);
gsi_insert_before (&gsi, g, GSI_SAME_STMT);
ret = gimple_assign_lhs (g);
}
if (TREE_CODE (TREE_TYPE (arg)) == REFERENCE_TYPE)
{
g = gimple_build_assign (make_ssa_name (TREE_TYPE (TREE_TYPE (arg))),
build_simple_mem_ref (ret));
gsi_insert_before (&gsi, g, GSI_SAME_STMT);
ret = gimple_assign_lhs (g);
}
if (!useless_type_conversion_p (addtype, TREE_TYPE (ret)))
{
g = gimple_build_assign (make_ssa_name (addtype), NOP_EXPR, ret);
gsi_insert_before (&gsi, g, GSI_SAME_STMT);
ret = gimple_assign_lhs (g);
}
if (POINTER_TYPE_P (ptype))
{
tree size = TYPE_SIZE_UNIT (TREE_TYPE (ptype));
if (size && TREE_CODE (size) == INTEGER_CST)
{
g = gimple_build_assign (make_ssa_name (addtype), MULT_EXPR,
ret, fold_convert (addtype, size));
gsi_insert_before (&gsi, g, GSI_SAME_STMT);
ret = gimple_assign_lhs (g);
}
}
return ret;
}
/* Adjust the argument types in NODE to their appropriate vector
counterparts. */
static void
simd_clone_adjust (struct cgraph_node *node)
{
push_cfun (DECL_STRUCT_FUNCTION (node->decl));
targetm.simd_clone.adjust (node);
tree retval = simd_clone_adjust_return_type (node);
ipa_parm_adjustment_vec adjustments
= simd_clone_adjust_argument_types (node);
push_gimplify_context ();
gimple_seq seq = simd_clone_init_simd_arrays (node, adjustments);
/* Adjust all uses of vector arguments accordingly. Adjust all
return values accordingly. */
tree iter = create_tmp_var (unsigned_type_node, "iter");
tree iter1 = make_ssa_name (iter);
tree iter2 = NULL_TREE;
ipa_simd_modify_function_body (node, adjustments, retval, iter1);
adjustments.release ();
/* Initialize the iteration variable. */
basic_block entry_bb = single_succ (ENTRY_BLOCK_PTR_FOR_FN (cfun));
basic_block body_bb = split_block_after_labels (entry_bb)->dest;
gimple_stmt_iterator gsi = gsi_after_labels (entry_bb);
/* Insert the SIMD array and iv initialization at function
entry. */
gsi_insert_seq_before (&gsi, seq, GSI_NEW_STMT);
pop_gimplify_context (NULL);
gimple *g;
basic_block incr_bb = NULL;
struct loop *loop = NULL;
/* Create a new BB right before the original exit BB, to hold the
iteration increment and the condition/branch. */
if (EDGE_COUNT (EXIT_BLOCK_PTR_FOR_FN (cfun)->preds))
{
basic_block orig_exit = EDGE_PRED (EXIT_BLOCK_PTR_FOR_FN (cfun), 0)->src;
incr_bb = create_empty_bb (orig_exit);
add_bb_to_loop (incr_bb, body_bb->loop_father);
/* The succ of orig_exit was EXIT_BLOCK_PTR_FOR_FN (cfun), with an empty
flag. Set it now to be a FALLTHRU_EDGE. */
gcc_assert (EDGE_COUNT (orig_exit->succs) == 1);
EDGE_SUCC (orig_exit, 0)->flags |= EDGE_FALLTHRU;
for (unsigned i = 0;
i < EDGE_COUNT (EXIT_BLOCK_PTR_FOR_FN (cfun)->preds); ++i)
{
edge e = EDGE_PRED (EXIT_BLOCK_PTR_FOR_FN (cfun), i);
redirect_edge_succ (e, incr_bb);
}
}
else if (node->simdclone->inbranch)
{
incr_bb = create_empty_bb (entry_bb);
add_bb_to_loop (incr_bb, body_bb->loop_father);
}
if (incr_bb)
{
edge e = make_edge (incr_bb, EXIT_BLOCK_PTR_FOR_FN (cfun), 0);
e->probability = REG_BR_PROB_BASE;
gsi = gsi_last_bb (incr_bb);
iter2 = make_ssa_name (iter);
g = gimple_build_assign (iter2, PLUS_EXPR, iter1,
build_int_cst (unsigned_type_node, 1));
gsi_insert_after (&gsi, g, GSI_CONTINUE_LINKING);
/* Mostly annotate the loop for the vectorizer (the rest is done
below). */
loop = alloc_loop ();
cfun->has_force_vectorize_loops = true;
loop->safelen = node->simdclone->simdlen;
loop->force_vectorize = true;
loop->header = body_bb;
}
/* Branch around the body if the mask applies. */
if (node->simdclone->inbranch)
{
gsi = gsi_last_bb (loop->header);
tree mask_array
= node->simdclone->args[node->simdclone->nargs - 1].simd_array;
tree mask;
if (node->simdclone->mask_mode != VOIDmode)
{
tree shift_cnt;
if (mask_array == NULL_TREE)
{
tree arg = node->simdclone->args[node->simdclone->nargs
- 1].vector_arg;
mask = get_or_create_ssa_default_def (cfun, arg);
shift_cnt = iter1;
}
else
{
tree maskt = TREE_TYPE (mask_array);
int c = tree_to_uhwi (TYPE_MAX_VALUE (TYPE_DOMAIN (maskt)));
c = node->simdclone->simdlen / (c + 1);
int s = exact_log2 (c);
gcc_assert (s > 0);
c--;
tree idx = make_ssa_name (TREE_TYPE (iter1));
g = gimple_build_assign (idx, RSHIFT_EXPR, iter1,
build_int_cst (NULL_TREE, s));
gsi_insert_after (&gsi, g, GSI_CONTINUE_LINKING);
mask = make_ssa_name (TREE_TYPE (TREE_TYPE (mask_array)));
tree aref = build4 (ARRAY_REF,
TREE_TYPE (TREE_TYPE (mask_array)),
mask_array, idx, NULL, NULL);
g = gimple_build_assign (mask, aref);
gsi_insert_after (&gsi, g, GSI_CONTINUE_LINKING);
shift_cnt = make_ssa_name (TREE_TYPE (iter1));
g = gimple_build_assign (shift_cnt, BIT_AND_EXPR, iter1,
build_int_cst (TREE_TYPE (iter1), c));
gsi_insert_after (&gsi, g, GSI_CONTINUE_LINKING);
}
g = gimple_build_assign (make_ssa_name (TREE_TYPE (mask)),
RSHIFT_EXPR, mask, shift_cnt);
gsi_insert_after (&gsi, g, GSI_CONTINUE_LINKING);
mask = gimple_assign_lhs (g);
g = gimple_build_assign (make_ssa_name (TREE_TYPE (mask)),
BIT_AND_EXPR, mask,
build_int_cst (TREE_TYPE (mask), 1));
gsi_insert_after (&gsi, g, GSI_CONTINUE_LINKING);
mask = gimple_assign_lhs (g);
}
else
{
mask = make_ssa_name (TREE_TYPE (TREE_TYPE (mask_array)));
tree aref = build4 (ARRAY_REF,
TREE_TYPE (TREE_TYPE (mask_array)),
mask_array, iter1, NULL, NULL);
g = gimple_build_assign (mask, aref);
gsi_insert_after (&gsi, g, GSI_CONTINUE_LINKING);
int bitsize = GET_MODE_BITSIZE (TYPE_MODE (TREE_TYPE (aref)));
if (!INTEGRAL_TYPE_P (TREE_TYPE (aref)))
{
aref = build1 (VIEW_CONVERT_EXPR,
build_nonstandard_integer_type (bitsize, 0),
mask);
mask = make_ssa_name (TREE_TYPE (aref));
g = gimple_build_assign (mask, aref);
gsi_insert_after (&gsi, g, GSI_CONTINUE_LINKING);
}
}
g = gimple_build_cond (EQ_EXPR, mask, build_zero_cst (TREE_TYPE (mask)),
NULL, NULL);
gsi_insert_after (&gsi, g, GSI_CONTINUE_LINKING);
make_edge (loop->header, incr_bb, EDGE_TRUE_VALUE);
FALLTHRU_EDGE (loop->header)->flags = EDGE_FALSE_VALUE;
}
basic_block latch_bb = NULL;
basic_block new_exit_bb = NULL;
/* Generate the condition. */
if (incr_bb)
{
gsi = gsi_last_bb (incr_bb);
g = gimple_build_cond (LT_EXPR, iter2,
build_int_cst (unsigned_type_node,
node->simdclone->simdlen),
NULL, NULL);
gsi_insert_after (&gsi, g, GSI_CONTINUE_LINKING);
edge e = split_block (incr_bb, gsi_stmt (gsi));
latch_bb = e->dest;
new_exit_bb = split_block_after_labels (latch_bb)->dest;
loop->latch = latch_bb;
redirect_edge_succ (FALLTHRU_EDGE (latch_bb), body_bb);
make_edge (incr_bb, new_exit_bb, EDGE_FALSE_VALUE);
/* The successor of incr_bb is already pointing to latch_bb; just
change the flags.
make_edge (incr_bb, latch_bb, EDGE_TRUE_VALUE); */
FALLTHRU_EDGE (incr_bb)->flags = EDGE_TRUE_VALUE;
}
gphi *phi = create_phi_node (iter1, body_bb);
edge preheader_edge = find_edge (entry_bb, body_bb);
edge latch_edge = NULL;
add_phi_arg (phi, build_zero_cst (unsigned_type_node), preheader_edge,
UNKNOWN_LOCATION);
if (incr_bb)
{
latch_edge = single_succ_edge (latch_bb);
add_phi_arg (phi, iter2, latch_edge, UNKNOWN_LOCATION);
/* Generate the new return. */
gsi = gsi_last_bb (new_exit_bb);
if (retval
&& TREE_CODE (retval) == VIEW_CONVERT_EXPR
&& TREE_CODE (TREE_OPERAND (retval, 0)) == RESULT_DECL)
retval = TREE_OPERAND (retval, 0);
else if (retval)
{
retval = build1 (VIEW_CONVERT_EXPR,
TREE_TYPE (TREE_TYPE (node->decl)),
retval);
retval = force_gimple_operand_gsi (&gsi, retval, true, NULL,
false, GSI_CONTINUE_LINKING);
}
g = gimple_build_return (retval);
gsi_insert_after (&gsi, g, GSI_CONTINUE_LINKING);
}
/* Handle aligned clauses by replacing default defs of the aligned
uniform args with __builtin_assume_aligned (arg_N(D), alignment)
lhs. Handle linear by adding PHIs. */
for (unsigned i = 0; i < node->simdclone->nargs; i++)
if (node->simdclone->args[i].arg_type == SIMD_CLONE_ARG_TYPE_UNIFORM
&& (TREE_ADDRESSABLE (node->simdclone->args[i].orig_arg)
|| !is_gimple_reg_type
(TREE_TYPE (node->simdclone->args[i].orig_arg))))
{
tree orig_arg = node->simdclone->args[i].orig_arg;
if (is_gimple_reg_type (TREE_TYPE (orig_arg)))
iter1 = make_ssa_name (TREE_TYPE (orig_arg));
else
{
iter1 = create_tmp_var_raw (TREE_TYPE (orig_arg));
gimple_add_tmp_var (iter1);
}
gsi = gsi_after_labels (entry_bb);
g = gimple_build_assign (iter1, orig_arg);
gsi_insert_before (&gsi, g, GSI_NEW_STMT);
gsi = gsi_after_labels (body_bb);
g = gimple_build_assign (orig_arg, iter1);
gsi_insert_before (&gsi, g, GSI_NEW_STMT);
}
else if (node->simdclone->args[i].arg_type == SIMD_CLONE_ARG_TYPE_UNIFORM
&& DECL_BY_REFERENCE (node->simdclone->args[i].orig_arg)
&& TREE_CODE (TREE_TYPE (node->simdclone->args[i].orig_arg))
== REFERENCE_TYPE
&& TREE_ADDRESSABLE
(TREE_TYPE (TREE_TYPE (node->simdclone->args[i].orig_arg))))
{
tree orig_arg = node->simdclone->args[i].orig_arg;
tree def = ssa_default_def (cfun, orig_arg);
if (def && !has_zero_uses (def))
{
iter1 = create_tmp_var_raw (TREE_TYPE (TREE_TYPE (orig_arg)));
gimple_add_tmp_var (iter1);
gsi = gsi_after_labels (entry_bb);
g = gimple_build_assign (iter1, build_simple_mem_ref (def));
gsi_insert_before (&gsi, g, GSI_NEW_STMT);
gsi = gsi_after_labels (body_bb);
g = gimple_build_assign (build_simple_mem_ref (def), iter1);
gsi_insert_before (&gsi, g, GSI_NEW_STMT);
}
}
else if (node->simdclone->args[i].alignment
&& node->simdclone->args[i].arg_type
== SIMD_CLONE_ARG_TYPE_UNIFORM
&& (node->simdclone->args[i].alignment
& (node->simdclone->args[i].alignment - 1)) == 0
&& TREE_CODE (TREE_TYPE (node->simdclone->args[i].orig_arg))
== POINTER_TYPE)
{
unsigned int alignment = node->simdclone->args[i].alignment;
tree orig_arg = node->simdclone->args[i].orig_arg;
tree def = ssa_default_def (cfun, orig_arg);
if (def && !has_zero_uses (def))
{
tree fn = builtin_decl_explicit (BUILT_IN_ASSUME_ALIGNED);
gimple_seq seq = NULL;
bool need_cvt = false;
gcall *call
= gimple_build_call (fn, 2, def, size_int (alignment));
g = call;
if (!useless_type_conversion_p (TREE_TYPE (orig_arg),
ptr_type_node))
need_cvt = true;
tree t = make_ssa_name (need_cvt ? ptr_type_node : orig_arg);
gimple_call_set_lhs (g, t);
gimple_seq_add_stmt_without_update (&seq, g);
if (need_cvt)
{
t = make_ssa_name (orig_arg);
g = gimple_build_assign (t, NOP_EXPR, gimple_call_lhs (g));
gimple_seq_add_stmt_without_update (&seq, g);
}
gsi_insert_seq_on_edge_immediate
(single_succ_edge (ENTRY_BLOCK_PTR_FOR_FN (cfun)), seq);
entry_bb = single_succ (ENTRY_BLOCK_PTR_FOR_FN (cfun));
int freq = compute_call_stmt_bb_frequency (current_function_decl,
entry_bb);
node->create_edge (cgraph_node::get_create (fn),
call, entry_bb->count, freq);
imm_use_iterator iter;
use_operand_p use_p;
gimple *use_stmt;
tree repl = gimple_get_lhs (g);
FOR_EACH_IMM_USE_STMT (use_stmt, iter, def)
if (is_gimple_debug (use_stmt) || use_stmt == call)
continue;
else
FOR_EACH_IMM_USE_ON_STMT (use_p, iter)
SET_USE (use_p, repl);
}
}
else if ((node->simdclone->args[i].arg_type
== SIMD_CLONE_ARG_TYPE_LINEAR_CONSTANT_STEP)
|| (node->simdclone->args[i].arg_type
== SIMD_CLONE_ARG_TYPE_LINEAR_REF_CONSTANT_STEP)
|| (node->simdclone->args[i].arg_type
== SIMD_CLONE_ARG_TYPE_LINEAR_VARIABLE_STEP)
|| (node->simdclone->args[i].arg_type
== SIMD_CLONE_ARG_TYPE_LINEAR_REF_VARIABLE_STEP))
{
tree orig_arg = node->simdclone->args[i].orig_arg;
gcc_assert (INTEGRAL_TYPE_P (TREE_TYPE (orig_arg))
|| POINTER_TYPE_P (TREE_TYPE (orig_arg)));
tree def = NULL_TREE;
if (TREE_ADDRESSABLE (orig_arg))
{
def = make_ssa_name (TREE_TYPE (orig_arg));
iter1 = make_ssa_name (TREE_TYPE (orig_arg));
if (incr_bb)
iter2 = make_ssa_name (TREE_TYPE (orig_arg));
gsi = gsi_after_labels (entry_bb);
g = gimple_build_assign (def, orig_arg);
gsi_insert_before (&gsi, g, GSI_NEW_STMT);
}
else
{
def = ssa_default_def (cfun, orig_arg);
if (!def || has_zero_uses (def))
def = NULL_TREE;
else
{
iter1 = make_ssa_name (orig_arg);
if (incr_bb)
iter2 = make_ssa_name (orig_arg);
}
}
if (def)
{
phi = create_phi_node (iter1, body_bb);
add_phi_arg (phi, def, preheader_edge, UNKNOWN_LOCATION);
if (incr_bb)
{
add_phi_arg (phi, iter2, latch_edge, UNKNOWN_LOCATION);
enum tree_code code = INTEGRAL_TYPE_P (TREE_TYPE (orig_arg))
? PLUS_EXPR : POINTER_PLUS_EXPR;
tree addtype = INTEGRAL_TYPE_P (TREE_TYPE (orig_arg))
? TREE_TYPE (orig_arg) : sizetype;
tree addcst = simd_clone_linear_addend (node, i, addtype,
entry_bb);
gsi = gsi_last_bb (incr_bb);
g = gimple_build_assign (iter2, code, iter1, addcst);
gsi_insert_before (&gsi, g, GSI_SAME_STMT);
}
imm_use_iterator iter;
use_operand_p use_p;
gimple *use_stmt;
if (TREE_ADDRESSABLE (orig_arg))
{
gsi = gsi_after_labels (body_bb);
g = gimple_build_assign (orig_arg, iter1);
gsi_insert_before (&gsi, g, GSI_NEW_STMT);
}
else
FOR_EACH_IMM_USE_STMT (use_stmt, iter, def)
if (use_stmt == phi)
continue;
else
FOR_EACH_IMM_USE_ON_STMT (use_p, iter)
SET_USE (use_p, iter1);
}
}
else if (node->simdclone->args[i].arg_type
== SIMD_CLONE_ARG_TYPE_LINEAR_UVAL_CONSTANT_STEP
|| (node->simdclone->args[i].arg_type
== SIMD_CLONE_ARG_TYPE_LINEAR_UVAL_VARIABLE_STEP))
{
tree orig_arg = node->simdclone->args[i].orig_arg;
tree def = ssa_default_def (cfun, orig_arg);
gcc_assert (!TREE_ADDRESSABLE (orig_arg)
&& TREE_CODE (TREE_TYPE (orig_arg)) == REFERENCE_TYPE);
if (def && !has_zero_uses (def))
{
tree rtype = TREE_TYPE (TREE_TYPE (orig_arg));
iter1 = make_ssa_name (orig_arg);
if (incr_bb)
iter2 = make_ssa_name (orig_arg);
tree iter3 = make_ssa_name (rtype);
tree iter4 = make_ssa_name (rtype);
tree iter5 = incr_bb ? make_ssa_name (rtype) : NULL_TREE;
gsi = gsi_after_labels (entry_bb);
gimple *load
= gimple_build_assign (iter3, build_simple_mem_ref (def));
gsi_insert_before (&gsi, load, GSI_NEW_STMT);
tree array = node->simdclone->args[i].simd_array;
TREE_ADDRESSABLE (array) = 1;
tree ptr = build_fold_addr_expr (array);
phi = create_phi_node (iter1, body_bb);
add_phi_arg (phi, ptr, preheader_edge, UNKNOWN_LOCATION);
if (incr_bb)
{
add_phi_arg (phi, iter2, latch_edge, UNKNOWN_LOCATION);
g = gimple_build_assign (iter2, POINTER_PLUS_EXPR, iter1,
TYPE_SIZE_UNIT (TREE_TYPE (iter3)));
gsi = gsi_last_bb (incr_bb);
gsi_insert_before (&gsi, g, GSI_SAME_STMT);
}
phi = create_phi_node (iter4, body_bb);
add_phi_arg (phi, iter3, preheader_edge, UNKNOWN_LOCATION);
if (incr_bb)
{
add_phi_arg (phi, iter5, latch_edge, UNKNOWN_LOCATION);
enum tree_code code = INTEGRAL_TYPE_P (TREE_TYPE (iter3))
? PLUS_EXPR : POINTER_PLUS_EXPR;
tree addtype = INTEGRAL_TYPE_P (TREE_TYPE (iter3))
? TREE_TYPE (iter3) : sizetype;
tree addcst = simd_clone_linear_addend (node, i, addtype,
entry_bb);
g = gimple_build_assign (iter5, code, iter4, addcst);
gsi = gsi_last_bb (incr_bb);
gsi_insert_before (&gsi, g, GSI_SAME_STMT);
}
g = gimple_build_assign (build_simple_mem_ref (iter1), iter4);
gsi = gsi_after_labels (body_bb);
gsi_insert_before (&gsi, g, GSI_SAME_STMT);
imm_use_iterator iter;
use_operand_p use_p;
gimple *use_stmt;
FOR_EACH_IMM_USE_STMT (use_stmt, iter, def)
if (use_stmt == load)
continue;
else
FOR_EACH_IMM_USE_ON_STMT (use_p, iter)
SET_USE (use_p, iter1);
if (!TYPE_READONLY (rtype) && incr_bb)
{
tree v = make_ssa_name (rtype);
tree aref = build4 (ARRAY_REF, rtype, array,
size_zero_node, NULL_TREE,
NULL_TREE);
gsi = gsi_after_labels (new_exit_bb);
g = gimple_build_assign (v, aref);
gsi_insert_before (&gsi, g, GSI_SAME_STMT);
g = gimple_build_assign (build_simple_mem_ref (def), v);
gsi_insert_before (&gsi, g, GSI_SAME_STMT);
}
}
}
calculate_dominance_info (CDI_DOMINATORS);
if (loop)
add_loop (loop, loop->header->loop_father);
update_ssa (TODO_update_ssa);
pop_cfun ();
}
/* If the function in NODE is tagged as an elemental SIMD function,
create the appropriate SIMD clones. */
static void
expand_simd_clones (struct cgraph_node *node)
{
tree attr = lookup_attribute ("omp declare simd",
DECL_ATTRIBUTES (node->decl));
if (attr == NULL_TREE
|| node->global.inlined_to
|| lookup_attribute ("noclone", DECL_ATTRIBUTES (node->decl)))
return;
/* Ignore
#pragma omp declare simd
extern int foo ();
in C, there we don't know the argument types at all. */
if (!node->definition
&& TYPE_ARG_TYPES (TREE_TYPE (node->decl)) == NULL_TREE)
return;
/* Call this before creating clone_info, as it might ggc_collect. */
if (node->definition && node->has_gimple_body_p ())
node->get_body ();
do
{
/* Start with parsing the "omp declare simd" attribute(s). */
bool inbranch_clause_specified;
struct cgraph_simd_clone *clone_info
= simd_clone_clauses_extract (node, TREE_VALUE (attr),
&inbranch_clause_specified);
if (clone_info == NULL)
continue;
int orig_simdlen = clone_info->simdlen;
tree base_type = simd_clone_compute_base_data_type (node, clone_info);
/* The target can return 0 (no simd clones should be created),
1 (just one ISA of simd clones should be created) or higher
count of ISA variants. In that case, clone_info is initialized
for the first ISA variant. */
int count
= targetm.simd_clone.compute_vecsize_and_simdlen (node, clone_info,
base_type, 0);
if (count == 0)
continue;
/* Loop over all COUNT ISA variants, and if !INBRANCH_CLAUSE_SPECIFIED,
also create one inbranch and one !inbranch clone of it. */
for (int i = 0; i < count * 2; i++)
{
struct cgraph_simd_clone *clone = clone_info;
if (inbranch_clause_specified && (i & 1) != 0)
continue;
if (i != 0)
{
clone = simd_clone_struct_alloc (clone_info->nargs
+ ((i & 1) != 0));
simd_clone_struct_copy (clone, clone_info);
/* Undo changes targetm.simd_clone.compute_vecsize_and_simdlen
and simd_clone_adjust_argument_types did to the first
clone's info. */
clone->nargs -= clone_info->inbranch;
clone->simdlen = orig_simdlen;
/* And call the target hook again to get the right ISA. */
targetm.simd_clone.compute_vecsize_and_simdlen (node, clone,
base_type,
i / 2);
if ((i & 1) != 0)
clone->inbranch = 1;
}
/* simd_clone_mangle might fail if such a clone has been created
already. */
tree id = simd_clone_mangle (node, clone);
if (id == NULL_TREE)
continue;
/* Only when we are sure we want to create the clone actually
clone the function (or definitions) or create another
extern FUNCTION_DECL (for prototypes without definitions). */
struct cgraph_node *n = simd_clone_create (node);
if (n == NULL)
continue;
n->simdclone = clone;
clone->origin = node;
clone->next_clone = NULL;
if (node->simd_clones == NULL)
{
clone->prev_clone = n;
node->simd_clones = n;
}
else
{
clone->prev_clone = node->simd_clones->simdclone->prev_clone;
clone->prev_clone->simdclone->next_clone = n;
node->simd_clones->simdclone->prev_clone = n;
}
symtab->change_decl_assembler_name (n->decl, id);
/* And finally adjust the return type, parameters and for
definitions also function body. */
if (node->definition)
simd_clone_adjust (n);
else
{
simd_clone_adjust_return_type (n);
simd_clone_adjust_argument_types (n);
}
}
}
while ((attr = lookup_attribute ("omp declare simd", TREE_CHAIN (attr))));
}
/* Entry point for IPA simd clone creation pass. */
static unsigned int
ipa_omp_simd_clone (void)
{
struct cgraph_node *node;
FOR_EACH_FUNCTION (node)
expand_simd_clones (node);
return 0;
}
namespace {
const pass_data pass_data_omp_simd_clone =
{
SIMPLE_IPA_PASS, /* type */
"simdclone", /* name */
OPTGROUP_OMP, /* optinfo_flags */
TV_NONE, /* tv_id */
( PROP_ssa | PROP_cfg ), /* properties_required */
0, /* properties_provided */
0, /* properties_destroyed */
0, /* todo_flags_start */
0, /* todo_flags_finish */
};
class pass_omp_simd_clone : public simple_ipa_opt_pass
{
public:
pass_omp_simd_clone(gcc::context *ctxt)
: simple_ipa_opt_pass(pass_data_omp_simd_clone, ctxt)
{}
/* opt_pass methods: */
virtual bool gate (function *);
virtual unsigned int execute (function *) { return ipa_omp_simd_clone (); }
};
bool
pass_omp_simd_clone::gate (function *)
{
return targetm.simd_clone.compute_vecsize_and_simdlen != NULL;
}
} // anon namespace
simple_ipa_opt_pass *
make_pass_omp_simd_clone (gcc::context *ctxt)
{
return new pass_omp_simd_clone (ctxt);
}
|
modifier_view.h | // ==========================================================================
// SeqAn - The Library for Sequence Analysis
// ==========================================================================
// Copyright (c) 2006-2015, Knut Reinert, FU Berlin
// All rights reserved.
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are met:
//
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
// * Neither the name of Knut Reinert or the FU Berlin nor the names of
// its contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
// ARE DISCLAIMED. IN NO EVENT SHALL KNUT REINERT OR THE FU BERLIN BE LIABLE
// FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
// DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
// SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
// CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
// LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
// OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH
// DAMAGE.
//
// ==========================================================================
// Author: David Weese <david.weese@fu-berlin.de>
// Author: Manuel Holtgrewe <manuel.holtgrewe@fu-berlin.de>
// ==========================================================================
// TODO(holtgrew): Split into modified_string_mod_view.h and modified_iterator_mod_view.h.
// TODO(holtgrew): Move out convert()
#ifndef SEQAN_MODIFIER_MODIFIER_VIEW_H_
#define SEQAN_MODIFIER_MODIFIER_VIEW_H_
namespace seqan
{
// ==========================================================================
// Forwards
// ==========================================================================
// ==========================================================================
// Classes
// ==========================================================================
// --------------------------------------------------------------------------
// Class ModView
// --------------------------------------------------------------------------
/*!
* @class ModViewModifiedIterator
* @extends ModifiedIterator
* @headerfile <seqan/modifier.h>
*
* @brief Transforms the character of a host using a custom functor.
*
* @signature template <typename THost, typename TFunctor>
* class ModifiedIterator<THost, ModView<TFunctor> >;
*
* @tparam THost The host iterator.
* @tparam TFunctor A unary functor type.
*/
/*!
* @class ModViewModifiedString
* @extends ModifiedString
* @headerfile <seqan/modifier.h>
*
* @brief Transforms the character of a host using a custom functor.
*
* @signature template <typename THost, typename TFunctor>
* class ModifiedString<THost, ModView<TFunctor> >;
*
* @tparam THost The host iterator.
* @tparam TFunctor A unary functor type.
*/
template <typename TFunctor>
struct ModView {};
template <typename TFunctor>
struct ModViewCargo
{
TFunctor func;
ModViewCargo() : func()
{}
};
template <typename THost, typename TFunctor>
class ModifiedIterator<THost, ModView<TFunctor> >
{
public:
typedef typename Cargo<ModifiedIterator>::Type TCargo_;
THost _host;
TCargo_ _cargo;
mutable typename Value<ModifiedIterator>::Type tmp_value;
ModifiedIterator() : _host(), tmp_value()
{}
template <typename TOtherHost>
ModifiedIterator(ModifiedIterator<TOtherHost, ModView<TFunctor> > & origin) :
_host(origin._host), _cargo(origin._cargo), tmp_value()
{}
explicit
ModifiedIterator(THost const & host) :
_host(host), tmp_value()
{}
ModifiedIterator(THost const & host, TFunctor const & functor):
_host(host), tmp_value()
{
cargo(*this).func = functor;
}
};
// --------------------------------------------------------------------------
// Class ModifiedString
// --------------------------------------------------------------------------
template <typename THost, typename TFunctor>
class ModifiedString<THost, ModView<TFunctor> >
{
public:
typedef typename Pointer_<THost>::Type THostPointer_;
typedef typename Cargo<ModifiedString>::Type TCargo_;
mutable THostPointer_ _host;
TCargo_ _cargo;
mutable typename Value<ModifiedString>::Type tmp_value;
// Default constructor.
ModifiedString() : _host(), tmp_value()
{}
// Construct with the actual host.
explicit
ModifiedString(typename Parameter_<THost>::Type host):
_host(_toPointer(host)), tmp_value()
{}
// Construct with the functor.
explicit
ModifiedString(TFunctor const & functor):
_host(), tmp_value()
{
cargo(*this).func = functor;
}
// Constructor for creating a ModifiedString with const host from a non-const host.
template <typename THost_>
explicit
ModifiedString(THost_ & host,
SEQAN_CTOR_ENABLE_IF(IsConstructible<THost, THost_>)) :
_host(_toPointer(host)), tmp_value()
{
ignoreUnusedVariableWarning(dummy);
}
// Construct with the actual host; variant with functor.
ModifiedString(typename Parameter_<THost>::Type host, TFunctor const & functor) :
_host(_toPointer(host)), tmp_value()
{
cargo(*this).func = functor;
}
// Constructor for creating a ModifiedString with const host with a non-const host; variant with functor.
template <typename THost_>
explicit
ModifiedString(THost_ & host,
TFunctor const & functor,
SEQAN_CTOR_ENABLE_IF(IsConstructible<THost, THost_>)) :
_host(_toPointer(host)), tmp_value()
{
ignoreUnusedVariableWarning(dummy);
cargo(*this).func = functor;
}
#ifdef SEQAN_CXX11_STANDARD
// Constructor for innermost type; hand down to _host which is a ModifiedString itself.
template <typename THost_>
explicit
ModifiedString(THost_ && host,
SEQAN_CTOR_ENABLE_IF(IsAnInnerHost<
typename RemoveReference<THost>::Type,
typename RemoveReference<THost_>::Type >)) :
_host(std::forward<THost_>(host)), tmp_value()
{
ignoreUnusedVariableWarning(dummy);
}
// Constructor for innermost type; hand down to _host which is a ModifiedString itself. Variant with functor.
template <typename THost_>
explicit
ModifiedString(THost_ && host,
TFunctor const & functor,
SEQAN_CTOR_ENABLE_IF(IsAnInnerHost<
typename RemoveReference<THost>::Type,
typename RemoveReference<THost_>::Type >)) :
_host(std::forward<THost_>(host)), tmp_value()
{
ignoreUnusedVariableWarning(dummy);
cargo(*this).func = functor;
}
#else
// Constructor for innermost type; hand down to _host which is a ModifiedString itself. Non-const variant.
template <typename THost_>
explicit
ModifiedString(THost_ & host,
SEQAN_CTOR_ENABLE_IF(IsAnInnerHost<THost, THost_>)) :
_host(host), tmp_value()
{
ignoreUnusedVariableWarning(dummy);
}
// Constructor for innermost type; hand down to _host which is a ModifiedString itself. Non-const variant.
template <typename THost_>
explicit
ModifiedString(THost_ const & host,
SEQAN_CTOR_ENABLE_IF(IsAnInnerHost<THost, THost_ const>)) :
_host(host), tmp_value()
{
ignoreUnusedVariableWarning(dummy);
}
// Constructor for innermost type; hand down to _host which is a ModifiedString itself. Non-const variant with
// functor.
template <typename THost_>
explicit
ModifiedString(THost_ & host,
TFunctor const & functor,
SEQAN_CTOR_ENABLE_IF(IsAnInnerHost<THost, THost_>)) :
_host(host), tmp_value()
{
ignoreUnusedVariableWarning(dummy);
cargo(*this).func = functor;
}
// Constructor for innermost type; hand down to _host which is a ModifiedString itself. Non-const variant with
// functor.
template <typename THost_>
explicit
ModifiedString(THost_ const & host,
TFunctor const & functor,
SEQAN_CTOR_ENABLE_IF(IsAnInnerHost<THost, THost_ const>)) :
_host(host), tmp_value()
{
ignoreUnusedVariableWarning(dummy);
cargo(*this).func = functor;
}
#endif
template <typename TPos>
inline typename Reference<ModifiedString>::Type
operator[](TPos pos)
{
return value(*this, pos);
}
template <typename TPos>
inline typename Reference<ModifiedString const>::Type
operator[](TPos pos) const
{
return value(*this, pos);
}
};
// ==========================================================================
// Metafunctions
// ==========================================================================
// --------------------------------------------------------------------------
// Metafunction Cargo [ModifiedIterator]
// --------------------------------------------------------------------------
template <typename THost, typename TFunctor>
struct Cargo<ModifiedIterator<THost, ModView<TFunctor> > >
{
typedef ModViewCargo<TFunctor> Type;
};
// --------------------------------------------------------------------------
// Metafunction Value [ModifiedIterator]
// --------------------------------------------------------------------------
template <typename THost, typename TFunctor>
struct Value<ModifiedIterator<THost, ModView<TFunctor> > >
{
typedef typename TFunctor::result_type TResult_;
typedef typename RemoveConst_<TResult_>::Type Type;
};
template <typename THost, typename TFunctor>
struct Value<ModifiedIterator<THost, ModView<TFunctor> > const> :
Value<ModifiedIterator<THost, ModView<TFunctor> > >
{};
// --------------------------------------------------------------------------
// Metafunction GetValue [ModifiedIterator]
// --------------------------------------------------------------------------
template <typename THost, typename TFunctor>
struct GetValue<ModifiedIterator<THost, ModView<TFunctor> > > :
Value<ModifiedIterator<THost, ModView<TFunctor> > >
{};
template <typename THost, typename TFunctor>
struct GetValue<ModifiedIterator<THost, ModView<TFunctor> > const> :
Value<ModifiedIterator<THost, ModView<TFunctor> > >
{};
// --------------------------------------------------------------------------
// Metafunction Reference [ModifiedIterator]
// --------------------------------------------------------------------------
template <typename THost, typename TFunctor>
struct Reference<ModifiedIterator<THost, ModView<TFunctor> > > :
Value<ModifiedIterator<THost, ModView<TFunctor> > >
{};
template <typename THost, typename TFunctor>
struct Reference<ModifiedIterator<THost, ModView<TFunctor> > const> :
Value<ModifiedIterator<THost, ModView<TFunctor> > >
{};
// NOTE(h-2): ModView element access is always by copy never by reference
// This is a workaround for dangling references to the stack when
// combining infixes and modified views, more precisely:
// if you iterate over an infix of a modview then value() on the iterator
// will return reference to the tmp_value inside the moditerator
// which might have been destructed.
// This is a more general problem that stems from the fact that
// "virtual strings" of the same type (infixes, modstrings) can be
// automatically compacted into one layer, but combinations cannot.
// This workaround happens in ModView, because it is used less frequently
// then Infixes.
// --------------------------------------------------------------------------
// Metafunction Cargo [ModifiedString]
// --------------------------------------------------------------------------
template <typename THost, typename TFunctor>
struct Cargo< ModifiedString<THost, ModView<TFunctor> > >
{
typedef ModViewCargo<TFunctor> Type;
};
// ==========================================================================
// Functions
// ==========================================================================
// --------------------------------------------------------------------------
// Function getValue() [ModifiedIterator]
// --------------------------------------------------------------------------
template <typename THost, typename TFunctor>
inline typename GetValue<ModifiedIterator<THost, ModView<TFunctor> > >::Type
getValue(ModifiedIterator<THost, ModView<TFunctor> > & me)
{
return cargo(me).func(getValue(host(me)));
}
template <typename THost, typename TFunctor>
inline typename GetValue<ModifiedIterator<THost, ModView<TFunctor> > const>::Type
getValue(ModifiedIterator<THost, ModView<TFunctor> > const & me)
{
return cargo(me).func(getValue(host(me)));
}
// --------------------------------------------------------------------------
// Function value() [ModifiedIterator]
// --------------------------------------------------------------------------
template <typename THost, typename TFunctor>
inline typename GetValue<ModifiedIterator<THost, ModView<TFunctor> > >::Type
value(ModifiedIterator<THost, ModView<TFunctor> > & me)
{
return getValue(me);
}
template <typename THost, typename TFunctor>
inline typename GetValue<ModifiedIterator<THost, ModView<TFunctor> > const>::Type
value(ModifiedIterator<THost, ModView<TFunctor> > const & me)
{
return getValue(me);
}
// --------------------------------------------------------------------------
// Function getValue() [ModifiedString]
// --------------------------------------------------------------------------
template <typename THost, typename TFunctor, typename TPos>
inline typename GetValue<ModifiedString<THost, ModView<TFunctor> > >::Type
getValue(ModifiedString<THost, ModView<TFunctor> > & me, TPos pos)
{
return cargo(me).func(getValue(host(me), pos));
}
template <typename THost, typename TFunctor, typename TPos>
inline typename GetValue<ModifiedString<THost, ModView<TFunctor> > const>::Type
getValue(ModifiedString<THost, ModView<TFunctor> > const & me, TPos pos)
{
return cargo(me).func(getValue(host(me), pos));
}
// --------------------------------------------------------------------------
// Function value() [ModifiedString]
// --------------------------------------------------------------------------
template <typename THost, typename TFunctor, typename TPos>
inline typename GetValue<ModifiedString<THost, ModView<TFunctor> > >::Type
value(ModifiedString<THost, ModView<TFunctor> > & me, TPos pos)
{
return getValue(me, pos);
}
template <typename THost, typename TFunctor, typename TPos>
inline typename GetValue<ModifiedString<THost, ModView<TFunctor> > const>::Type
value(ModifiedString<THost, ModView<TFunctor> > const & me, TPos pos)
{
return getValue(me, pos);
}
// --------------------------------------------------------------------------
// Function assignModViewFunctor()
// --------------------------------------------------------------------------
template <typename THost, typename TFunctor>
inline void
assignModViewFunctor(ModifiedString<THost, ModView<TFunctor> > & me, TFunctor const & functor)
{
cargo(me).func = functor;
}
// --------------------------------------------------------------------------
// Function convert()
// --------------------------------------------------------------------------
template < typename TSequence, typename TFunctor >
inline void
convert(TSequence & sequence, TFunctor const &F)
{
#if defined (_OPENMP) && defined (SEQAN_PARALLEL)
// OpenMP does not support for loop with iterators. Therefore use index variables.
typedef typename Position<TSequence>::Type TPos;
typedef typename MakeSigned_<TPos>::Type TSignedPos;
#pragma omp parallel for if(length(sequence) > 1000000)
for(TSignedPos p = 0; p < (TSignedPos)length(sequence); ++p)
sequence[p] = F(sequence[p]);
#else
typedef typename Iterator<TSequence, Standard>::Type TIter;
TIter it = begin(sequence, Standard());
TIter itEnd = end(sequence, Standard());
for(; it != itEnd; ++it)
*it = F(*it);
#endif
}
template < typename TSequence, typename TFunctor >
inline void
convert(TSequence const & sequence, TFunctor const &F)
{
#if defined (_OPENMP) && defined (SEQAN_PARALLEL)
// OpenMP does not support for loop with iterators. Therefore use index variables.
typedef typename Position<TSequence>::Type TPos;
typedef typename MakeSigned_<TPos>::Type TSignedPos;
#pragma omp parallel for if(length(sequence) > 1000000)
for(TSignedPos p = 0; p < (TSignedPos)length(sequence); ++p)
sequence[p] = F(sequence[p]);
#else
typedef typename Iterator<TSequence const, Standard>::Type TIter;
TIter it = begin(sequence, Standard());
TIter itEnd = end(sequence, Standard());
for(; it != itEnd; ++it)
*it = F(*it);
#endif
}
} // namespace seqan
#endif // SEQAN_MODIFIER_MODIFIER_VIEW_H_
|
sufsort_utils.h | /*
* nvbio
* Copyright (c) 2011-2014, NVIDIA CORPORATION. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* * Neither the name of the NVIDIA CORPORATION nor the
* names of its contributors may be used to endorse or promote products
* derived from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
* WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL NVIDIA CORPORATION BE LIABLE FOR ANY
* DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
* (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
* LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
* ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
* SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#pragma once
#include <nvbio/sufsort/sufsort_priv.h>
#include <nvbio/strings/string_set.h>
#include <nvbio/basic/thrust_view.h>
#include <nvbio/basic/vector.h>
#include <thrust/host_vector.h>
#include <thrust/device_vector.h>
namespace nvbio {
///@addtogroup Sufsort
///@{
///
///@defgroup SetBWTHandlersModule Set-BWT Handlers
/// This module contains a series of \ref SetBWTOutputHandler "SetBWTOutputHandlers"
///
///
///@defgroup StringSuffixHandlersModule String Suffix Handlers
/// This module contains a series of \ref StringSuffixHandler "StringSuffixHandlers"
///
///@addtogroup SetBWTHandlersModule
///@{
/// base virtual interface used by all string-set BWT handlers (a model of \ref SetBWTOutputHandler)
///
struct SetBWTHandler
{
/// virtual destructor
///
virtual ~SetBWTHandler() {}
/// process a batch of BWT symbols
///
virtual void process(
const uint32 n_suffixes,
const uint32 bits_per_symbol,
const uint32* bwt,
const uint32 n_dollars,
const uint64* dollar_pos,
const uint64* dollar_ids) {}
/// process a batch of BWT symbols
///
virtual void process(
const uint32 n_suffixes,
const uint8* bwt,
const uint32 n_dollars,
const uint64* dollar_pos,
const uint64* dollar_ids) {}
};
/// A \ref SetBWTOutputHandler class to output a string-set BWT to a (potentially packed) device string
///
template <typename OutputIterator>
struct DeviceBWTHandler : public SetBWTHandler
{
/// constructor
///
DeviceBWTHandler(OutputIterator _output) : output(_output), offset(0u) {}
/// process a batch of BWT symbols
///
void process(
const uint32 n_suffixes,
const uint32 bits_per_symbol,
const uint32* bwt,
const uint32 n_dollars,
const uint64* dollar_pos,
const uint64* dollar_ids)
{
// NOTE: not supported!
}
/// process a batch of BWT symbols
///
void process(
const uint32 n_suffixes,
const uint8* bwt,
const uint32 n_dollars,
const uint64* dollar_pos,
const uint64* dollar_ids)
{
// TODO: this may be redundant as we often have a device copy of 'bwt already
priv::alloc_storage( d_bwt, n_suffixes );
thrust::copy( bwt, bwt + n_suffixes, d_bwt.begin() );
priv::device_copy(
n_suffixes,
raw_pointer( d_bwt ),
output,
offset );
offset += n_suffixes;
}
OutputIterator output;
uint64 offset;
nvbio::vector<device_tag,uint8> d_bwt;
};
/// A \ref SetBWTOutputHandler class to output a string-set BWT to a (potentially packed) host string
///
template <typename OutputIterator>
struct HostBWTHandler : public SetBWTHandler
{
HostBWTHandler(OutputIterator _output) : output(_output) {}
/// process a batch of BWT symbols
///
template <typename bwt_iterator>
void do_process(
const uint32 n_suffixes,
const bwt_iterator bwt)
{
for (uint32 i = 0; i < n_suffixes; ++i)
output[i] = bwt[i];
output += n_suffixes;
}
/// process a batch of BWT symbols
///
void process(
const uint32 n_suffixes,
const uint32 bits_per_symbol,
const uint32* bwt,
const uint32 n_dollars,
const uint64* dollar_pos,
const uint64* dollar_ids)
{
if (bits_per_symbol == 2)
do_process( n_suffixes, PackedStream<const uint32*,uint8,2,true>(bwt) );
else if (bits_per_symbol == 4)
do_process( n_suffixes, PackedStream<const uint32*,uint8,4,true>(bwt) );
else if (bits_per_symbol == 8)
do_process( n_suffixes, PackedStream<const uint32*,uint8,8,true>(bwt) );
}
/// process a batch of BWT symbols
///
void process(
const uint32 n_suffixes,
const uint8* bwt,
const uint32 n_dollars,
const uint64* dollar_pos,
const uint64* dollar_ids)
{
do_process( n_suffixes, bwt );
}
OutputIterator output;
};
/// A \ref SetBWTOutputHandler class to output a string-set BWT to a (potentially packed) host string
///
template <uint32 SYMBOL_SIZE, bool BIG_ENDIAN, typename word_type>
struct HostBWTHandler< PackedStream<word_type*,uint8,SYMBOL_SIZE,BIG_ENDIAN,uint64> > : public SetBWTHandler
{
typedef PackedStream<word_type*,uint8,SYMBOL_SIZE,BIG_ENDIAN,uint64> OutputIterator;
static const uint32 WORD_SIZE = uint32( 8u * sizeof(word_type) );
static const uint32 SYMBOLS_PER_WORD = WORD_SIZE / SYMBOL_SIZE;
/// constructor
///
HostBWTHandler(OutputIterator _output) : output(_output), offset(0) {}
/// process a batch of BWT symbols
///
template <typename bwt_iterator>
void do_process(
const uint32 n_suffixes,
const bwt_iterator bwt)
{
const uint32 word_offset = offset & (SYMBOLS_PER_WORD-1);
uint32 word_rem = 0;
uint32 word_idx = offset / SYMBOLS_PER_WORD;
word_type* words = output.stream();
if (word_offset)
{
// compute how many symbols we still need to encode to fill the current word
word_rem = SYMBOLS_PER_WORD - word_offset;
// fetch the word in question
word_type word = words[ word_idx ];
for (uint32 i = 0; i < word_rem; ++i)
{
const uint32 bit_idx = (word_offset + i) * SYMBOL_SIZE;
const uint32 symbol_offset = BIG_ENDIAN ? (WORD_SIZE - SYMBOL_SIZE - bit_idx) : bit_idx;
const word_type symbol = word_type(bwt[i]) << symbol_offset;
// set bits
word |= symbol;
}
// write out the word
words[ word_idx ] = word;
// advance word_idx
++word_idx;
}
for (uint32 i = word_rem; i < n_suffixes; i += SYMBOLS_PER_WORD)
{
// encode a word's worth of characters
word_type word = 0u;
const uint32 n_symbols = nvbio::min( SYMBOLS_PER_WORD, n_suffixes - i );
for (uint32 j = 0; j < n_symbols; ++j)
{
const uint32 bit_idx = j * SYMBOL_SIZE;
const uint32 symbol_offset = BIG_ENDIAN ? (WORD_SIZE - SYMBOL_SIZE - bit_idx) : bit_idx;
const word_type symbol = word_type(bwt[i + j]) << symbol_offset;
// set bits
word |= symbol;
}
// write out the word and advance word_idx
words[ ++word_idx ] = word;
}
// advance the offset
offset += n_suffixes;
}
/// process a batch of BWT symbols
///
void process(
const uint32 n_suffixes,
const uint32 bits_per_symbol,
const uint32* bwt,
const uint32 n_dollars,
const uint64* dollar_pos,
const uint64* dollar_ids)
{
if (bits_per_symbol == 2)
do_process( n_suffixes, PackedStream<const uint32*,uint8,2,true>(bwt) );
else if (bits_per_symbol == 4)
do_process( n_suffixes, PackedStream<const uint32*,uint8,4,true>(bwt) );
else if (bits_per_symbol == 8)
do_process( n_suffixes, PackedStream<const uint32*,uint8,8,true>(bwt) );
}
/// process a batch of BWT symbols
///
void process(
const uint32 n_suffixes,
const uint8* bwt,
const uint32 n_dollars,
const uint64* dollar_pos,
const uint64* dollar_ids)
{
do_process( n_suffixes, bwt );
}
OutputIterator output;
uint64 offset;
};
/// A \ref SetBWTOutputHandler class to discard a string-set BWT
///
struct DiscardBWTHandler : public SetBWTHandler
{
};
///@} SetBWTHandlersModule
///@addtogroup StringSuffixHandlersModule
///@{
/// a utility \ref StringSuffixHandler to compute the BWT of the sorted suffixes
///
template <typename string_type, typename output_iterator>
struct StringBWTHandler
{
typedef typename string_type::index_type index_type;
static const uint32 NULL_PRIMARY = uint32(-1); // TODO: switch to index_type
// constructor
//
StringBWTHandler(
const index_type _string_len,
const string_type _string,
output_iterator _output) :
string_len ( _string_len ),
string ( _string ),
primary ( NULL_PRIMARY ),
n_output ( 0 ),
output ( _output )
{
// encode the first BWT symbol explicitly
priv::device_copy( 1u, string + string_len-1, output, index_type(0u) );
}
// process the next batch of suffixes
//
void process_batch(
const uint32 n_suffixes,
const uint32* d_suffixes)
{
priv::alloc_storage( d_block_bwt, n_suffixes );
// compute the bwt of the block
thrust::transform(
thrust::device_ptr<const uint32>( d_suffixes ),
thrust::device_ptr<const uint32>( d_suffixes ) + n_suffixes,
d_block_bwt.begin(),
priv::string_bwt_functor<string_type>( string_len, string ) );
// check if there is a $ sign
const uint32 block_primary = uint32( thrust::find(
d_block_bwt.begin(),
d_block_bwt.begin() + n_suffixes,
255u ) - d_block_bwt.begin() );
if (block_primary < n_suffixes)
{
// keep track of the global primary position
primary = n_output + block_primary + 1u; // +1u for the implicit empty suffix
}
// and copy the transformed block to the output
priv::device_copy(
n_suffixes,
d_block_bwt.begin(),
output,
n_output + 1u ); // +1u for the implicit empty suffix
// advance the output counter
n_output += n_suffixes;
}
// process a sparse set of suffixes
//
void process_scattered(
const uint32 n_suffixes,
const uint32* d_suffixes,
const uint32* d_slots)
{
priv::alloc_storage( d_block_bwt, n_suffixes );
// compute the bwt of the block
thrust::transform(
thrust::device_ptr<const uint32>( d_suffixes ),
thrust::device_ptr<const uint32>( d_suffixes ) + n_suffixes,
d_block_bwt.begin(),
priv::string_bwt_functor<string_type>( string_len, string ) );
// check if there is a $ sign
const uint32 block_primary = uint32( thrust::find(
d_block_bwt.begin(),
d_block_bwt.begin() + n_suffixes,
255u ) - d_block_bwt.begin() );
if (block_primary < n_suffixes)
{
// keep track of the global primary position
primary = thrust::device_ptr<const uint32>( d_slots )[ block_primary ] + 1u; // +1u for the implicit empty suffix
}
// and scatter the resulting symbols in the proper place
priv::device_scatter(
n_suffixes,
d_block_bwt.begin(),
thrust::make_transform_iterator(
thrust::device_ptr<const uint32>( d_slots ),
priv::offset_functor(1u) ), // +1u for the implicit empty suffix
output );
}
// remove the dollar symbol
//
void remove_dollar()
{
// shift back all symbols following the primary
const uint32 max_block_size = 32*1024*1024;
priv::alloc_storage( d_block_bwt, max_block_size );
for (index_type block_begin = primary; block_begin < string_len; block_begin += max_block_size)
{
const index_type block_end = nvbio::min( block_begin + max_block_size, string_len );
// copy all symbols to a temporary buffer
priv::device_copy(
block_end - block_begin,
output + block_begin + 1u,
d_block_bwt.begin(),
uint32(0) );
// and copy the shifted block to the output
priv::device_copy(
block_end - block_begin,
d_block_bwt.begin(),
output,
block_begin );
}
}
const index_type string_len;
const string_type string;
uint32 primary;
uint32 n_output;
output_iterator output;
thrust::device_vector<uint8> d_block_bwt;
};
/// a utility \ref StringSuffixHandler to retain a Sampled Suffix Array
///
template <typename output_iterator>
struct StringSSAHandler
{
// constructor
//
StringSSAHandler(
const uint32 _string_len,
const uint32 _mod,
output_iterator _output) :
string_len ( _string_len ),
mod ( _mod ),
n_output ( 1 ),
output ( _output )
{
// encode the implicit empty suffix directly
output[0] = uint32(-1);
}
// process the next batch of suffixes
//
void process_batch(
const uint32 n_suffixes,
const uint32* d_suffixes)
{
priv::alloc_storage( h_suffixes, n_suffixes );
thrust::copy(
thrust::device_ptr<const uint32>(d_suffixes),
thrust::device_ptr<const uint32>(d_suffixes) + n_suffixes,
h_suffixes.begin() );
// copy_if
#pragma omp parallel for
for (int i = 0; i < int( n_suffixes ); ++i)
{
const uint32 slot = i + n_output;
if ((slot & (mod-1)) == 0)
output[slot / mod] = h_suffixes[i];
}
// advance the output counter
n_output += n_suffixes;
}
// process a sparse set of suffixes
//
void process_scattered(
const uint32 n_suffixes,
const uint32* d_suffixes,
const uint32* d_slots)
{
priv::alloc_storage( h_slots, n_suffixes );
priv::alloc_storage( h_suffixes, n_suffixes );
thrust::copy(
thrust::device_ptr<const uint32>(d_slots),
thrust::device_ptr<const uint32>(d_slots) + n_suffixes,
h_slots.begin() );
thrust::copy(
thrust::device_ptr<const uint32>(d_suffixes),
thrust::device_ptr<const uint32>(d_suffixes) + n_suffixes,
h_suffixes.begin() );
// scatter_if
#pragma omp parallel for
for (int i = 0; i < int( n_suffixes ); ++i)
{
const uint32 slot = h_slots[i] + 1u; // +1 for the implicit empty suffix
if ((slot & (mod-1)) == 0)
output[slot / mod] = h_suffixes[i];
}
}
const uint32 string_len;
const uint32 mod;
uint32 n_output;
output_iterator output;
thrust::host_vector<uint32> h_slots;
thrust::host_vector<uint32> h_suffixes;
};
/// a utility \ref StringSuffixHandler to retain the BWT and a Sampled Suffix Array
///
template <typename string_type, typename output_bwt_iterator, typename output_ssa_iterator>
struct StringBWTSSAHandler
{
StringBWTSSAHandler(
const uint32 _string_len,
const string_type _string,
const uint32 _mod,
output_bwt_iterator _bwt,
output_ssa_iterator _ssa) :
bwt_handler( _string_len, _string, _bwt ),
ssa_handler( _string_len, _mod, _ssa ) {}
// process the next batch of suffixes
//
void process_batch(
const uint32 n_suffixes,
const uint32* d_suffixes)
{
bwt_handler.process_batch( n_suffixes, d_suffixes );
ssa_handler.process_batch( n_suffixes, d_suffixes );
}
// process a sparse set of suffixes
//
void process_scattered(
const uint32 n_suffixes,
const uint32* d_suffixes,
const uint32* d_slots)
{
bwt_handler.process_scattered( n_suffixes, d_suffixes, d_slots );
ssa_handler.process_scattered( n_suffixes, d_suffixes, d_slots );
}
// return the primary
//
uint32 primary() const { return bwt_handler.primary; }
// remove the dollar symbol
//
void remove_dollar()
{
bwt_handler.remove_dollar();
}
StringBWTHandler<string_type,output_bwt_iterator> bwt_handler;
StringSSAHandler<output_ssa_iterator> ssa_handler;
};
///@} StringSuffixHandlersModule
///@} Sufsort
} // namespace nvbio
|
draw.c | /*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% DDDD RRRR AAA W W %
% D D R R A A W W %
% D D RRRR AAAAA W W W %
% D D R RN A A WW WW %
% DDDD R R A A W W %
% %
% %
% MagickCore Image Drawing Methods %
% %
% %
% Software Design %
% Cristy %
% July 1998 %
% %
% %
% Copyright 1999-2020 ImageMagick Studio LLC, a non-profit organization %
% dedicated to making software imaging solutions freely available. %
% %
% You may not use this file except in compliance with the License. You may %
% obtain a copy of the License at %
% %
% https://imagemagick.org/script/license.php %
% %
% Unless required by applicable law or agreed to in writing, software %
% distributed under the License is distributed on an "AS IS" BASIS, %
% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. %
% See the License for the specific language governing permissions and %
% limitations under the License. %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% Bill Radcliffe of Corbis (www.corbis.com) contributed the polygon
% rendering code based on Paul Heckbert's "Concave Polygon Scan Conversion",
% Graphics Gems, 1990. Leonard Rosenthal and David Harr of Appligent
% (www.appligent.com) contributed the dash pattern, linecap stroking
% algorithm, and minor rendering improvements.
%
*/
/*
Include declarations.
*/
#include "MagickCore/studio.h"
#include "MagickCore/annotate.h"
#include "MagickCore/artifact.h"
#include "MagickCore/blob.h"
#include "MagickCore/cache.h"
#include "MagickCore/cache-private.h"
#include "MagickCore/cache-view.h"
#include "MagickCore/channel.h"
#include "MagickCore/color.h"
#include "MagickCore/colorspace-private.h"
#include "MagickCore/composite.h"
#include "MagickCore/composite-private.h"
#include "MagickCore/constitute.h"
#include "MagickCore/draw.h"
#include "MagickCore/draw-private.h"
#include "MagickCore/enhance.h"
#include "MagickCore/exception.h"
#include "MagickCore/exception-private.h"
#include "MagickCore/gem.h"
#include "MagickCore/geometry.h"
#include "MagickCore/image-private.h"
#include "MagickCore/list.h"
#include "MagickCore/log.h"
#include "MagickCore/memory-private.h"
#include "MagickCore/monitor.h"
#include "MagickCore/monitor-private.h"
#include "MagickCore/option.h"
#include "MagickCore/paint.h"
#include "MagickCore/pixel-accessor.h"
#include "MagickCore/pixel-private.h"
#include "MagickCore/property.h"
#include "MagickCore/resample.h"
#include "MagickCore/resample-private.h"
#include "MagickCore/resource_.h"
#include "MagickCore/splay-tree.h"
#include "MagickCore/string_.h"
#include "MagickCore/string-private.h"
#include "MagickCore/thread-private.h"
#include "MagickCore/token.h"
#include "MagickCore/transform-private.h"
#include "MagickCore/utility.h"
/*
Define declarations.
*/
#define BezierQuantum 200
#define PrimitiveExtentPad 2053
#define MaxBezierCoordinates 67108864
#define ThrowPointExpectedException(token,exception) \
{ \
(void) ThrowMagickException(exception,GetMagickModule(),DrawError, \
"NonconformingDrawingPrimitiveDefinition","`%s'",token); \
status=MagickFalse; \
break; \
}
/*
Typedef declarations.
*/
typedef struct _EdgeInfo
{
SegmentInfo
bounds;
double
scanline;
PointInfo
*points;
size_t
number_points;
ssize_t
direction;
MagickBooleanType
ghostline;
size_t
highwater;
} EdgeInfo;
typedef struct _ElementInfo
{
double
cx,
cy,
major,
minor,
angle;
} ElementInfo;
typedef struct _MVGInfo
{
PrimitiveInfo
**primitive_info;
size_t
*extent;
ssize_t
offset;
PointInfo
point;
ExceptionInfo
*exception;
} MVGInfo;
typedef struct _PolygonInfo
{
EdgeInfo
*edges;
size_t
number_edges;
} PolygonInfo;
typedef enum
{
MoveToCode,
OpenCode,
GhostlineCode,
LineToCode,
EndCode
} PathInfoCode;
typedef struct _PathInfo
{
PointInfo
point;
PathInfoCode
code;
} PathInfo;
/*
Forward declarations.
*/
static Image
*DrawClippingMask(Image *,const DrawInfo *,const char *,const char *,
ExceptionInfo *);
static MagickBooleanType
DrawStrokePolygon(Image *,const DrawInfo *,const PrimitiveInfo *,
ExceptionInfo *),
RenderMVGContent(Image *,const DrawInfo *,const size_t,ExceptionInfo *),
TraceArc(MVGInfo *,const PointInfo,const PointInfo,const PointInfo),
TraceArcPath(MVGInfo *,const PointInfo,const PointInfo,const PointInfo,
const double,const MagickBooleanType,const MagickBooleanType),
TraceBezier(MVGInfo *,const size_t),
TraceCircle(MVGInfo *,const PointInfo,const PointInfo),
TraceEllipse(MVGInfo *,const PointInfo,const PointInfo,const PointInfo),
TraceLine(PrimitiveInfo *,const PointInfo,const PointInfo),
TraceRectangle(PrimitiveInfo *,const PointInfo,const PointInfo),
TraceRoundRectangle(MVGInfo *,const PointInfo,const PointInfo,PointInfo),
TraceSquareLinecap(PrimitiveInfo *,const size_t,const double);
static PrimitiveInfo
*TraceStrokePolygon(const DrawInfo *,const PrimitiveInfo *,ExceptionInfo *);
static ssize_t
TracePath(MVGInfo *,const char *,ExceptionInfo *);
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% A c q u i r e D r a w I n f o %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% AcquireDrawInfo() returns a DrawInfo structure properly initialized.
%
% The format of the AcquireDrawInfo method is:
%
% DrawInfo *AcquireDrawInfo(void)
%
*/
MagickExport DrawInfo *AcquireDrawInfo(void)
{
DrawInfo
*draw_info;
draw_info=(DrawInfo *) AcquireCriticalMemory(sizeof(*draw_info));
GetDrawInfo((ImageInfo *) NULL,draw_info);
return(draw_info);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% C l o n e D r a w I n f o %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% CloneDrawInfo() makes a copy of the given draw_info structure. If NULL
% is specified, a new DrawInfo structure is created initialized to default
% values.
%
% The format of the CloneDrawInfo method is:
%
% DrawInfo *CloneDrawInfo(const ImageInfo *image_info,
% const DrawInfo *draw_info)
%
% A description of each parameter follows:
%
% o image_info: the image info.
%
% o draw_info: the draw info.
%
*/
MagickExport DrawInfo *CloneDrawInfo(const ImageInfo *image_info,
const DrawInfo *draw_info)
{
DrawInfo
*clone_info;
ExceptionInfo
*exception;
clone_info=(DrawInfo *) AcquireCriticalMemory(sizeof(*clone_info));
GetDrawInfo(image_info,clone_info);
if (draw_info == (DrawInfo *) NULL)
return(clone_info);
exception=AcquireExceptionInfo();
if (draw_info->id != (char *) NULL)
(void) CloneString(&clone_info->id,draw_info->id);
if (draw_info->primitive != (char *) NULL)
(void) CloneString(&clone_info->primitive,draw_info->primitive);
if (draw_info->geometry != (char *) NULL)
(void) CloneString(&clone_info->geometry,draw_info->geometry);
clone_info->compliance=draw_info->compliance;
clone_info->viewbox=draw_info->viewbox;
clone_info->affine=draw_info->affine;
clone_info->gravity=draw_info->gravity;
clone_info->fill=draw_info->fill;
clone_info->stroke=draw_info->stroke;
clone_info->stroke_width=draw_info->stroke_width;
if (draw_info->fill_pattern != (Image *) NULL)
clone_info->fill_pattern=CloneImage(draw_info->fill_pattern,0,0,MagickTrue,
exception);
if (draw_info->stroke_pattern != (Image *) NULL)
clone_info->stroke_pattern=CloneImage(draw_info->stroke_pattern,0,0,
MagickTrue,exception);
clone_info->stroke_antialias=draw_info->stroke_antialias;
clone_info->text_antialias=draw_info->text_antialias;
clone_info->fill_rule=draw_info->fill_rule;
clone_info->linecap=draw_info->linecap;
clone_info->linejoin=draw_info->linejoin;
clone_info->miterlimit=draw_info->miterlimit;
clone_info->dash_offset=draw_info->dash_offset;
clone_info->decorate=draw_info->decorate;
clone_info->compose=draw_info->compose;
if (draw_info->text != (char *) NULL)
(void) CloneString(&clone_info->text,draw_info->text);
if (draw_info->font != (char *) NULL)
(void) CloneString(&clone_info->font,draw_info->font);
if (draw_info->metrics != (char *) NULL)
(void) CloneString(&clone_info->metrics,draw_info->metrics);
if (draw_info->family != (char *) NULL)
(void) CloneString(&clone_info->family,draw_info->family);
clone_info->style=draw_info->style;
clone_info->stretch=draw_info->stretch;
clone_info->weight=draw_info->weight;
if (draw_info->encoding != (char *) NULL)
(void) CloneString(&clone_info->encoding,draw_info->encoding);
clone_info->pointsize=draw_info->pointsize;
clone_info->kerning=draw_info->kerning;
clone_info->interline_spacing=draw_info->interline_spacing;
clone_info->interword_spacing=draw_info->interword_spacing;
clone_info->direction=draw_info->direction;
if (draw_info->density != (char *) NULL)
(void) CloneString(&clone_info->density,draw_info->density);
clone_info->align=draw_info->align;
clone_info->undercolor=draw_info->undercolor;
clone_info->border_color=draw_info->border_color;
if (draw_info->server_name != (char *) NULL)
(void) CloneString(&clone_info->server_name,draw_info->server_name);
if (draw_info->dash_pattern != (double *) NULL)
{
register ssize_t
x;
for (x=0; fabs(draw_info->dash_pattern[x]) >= MagickEpsilon; x++) ;
clone_info->dash_pattern=(double *) AcquireQuantumMemory((size_t) (2*x+2),
sizeof(*clone_info->dash_pattern));
if (clone_info->dash_pattern == (double *) NULL)
ThrowFatalException(ResourceLimitFatalError,
"UnableToAllocateDashPattern");
(void) memset(clone_info->dash_pattern,0,(size_t) (2*x+2)*
sizeof(*clone_info->dash_pattern));
(void) memcpy(clone_info->dash_pattern,draw_info->dash_pattern,(size_t)
(x+1)*sizeof(*clone_info->dash_pattern));
}
clone_info->gradient=draw_info->gradient;
if (draw_info->gradient.stops != (StopInfo *) NULL)
{
size_t
number_stops;
number_stops=clone_info->gradient.number_stops;
clone_info->gradient.stops=(StopInfo *) AcquireQuantumMemory((size_t)
number_stops,sizeof(*clone_info->gradient.stops));
if (clone_info->gradient.stops == (StopInfo *) NULL)
ThrowFatalException(ResourceLimitFatalError,
"UnableToAllocateDashPattern");
(void) memcpy(clone_info->gradient.stops,draw_info->gradient.stops,
(size_t) number_stops*sizeof(*clone_info->gradient.stops));
}
clone_info->bounds=draw_info->bounds;
clone_info->fill_alpha=draw_info->fill_alpha;
clone_info->stroke_alpha=draw_info->stroke_alpha;
clone_info->element_reference=draw_info->element_reference;
clone_info->clip_path=draw_info->clip_path;
clone_info->clip_units=draw_info->clip_units;
if (draw_info->clip_mask != (char *) NULL)
(void) CloneString(&clone_info->clip_mask,draw_info->clip_mask);
if (draw_info->clipping_mask != (Image *) NULL)
clone_info->clipping_mask=CloneImage(draw_info->clipping_mask,0,0,
MagickTrue,exception);
if (draw_info->composite_mask != (Image *) NULL)
clone_info->composite_mask=CloneImage(draw_info->composite_mask,0,0,
MagickTrue,exception);
clone_info->render=draw_info->render;
clone_info->debug=IsEventLogging();
exception=DestroyExceptionInfo(exception);
return(clone_info);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ C o n v e r t P a t h T o P o l y g o n %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% ConvertPathToPolygon() converts a path to the more efficient sorted
% rendering form.
%
% The format of the ConvertPathToPolygon method is:
%
% PolygonInfo *ConvertPathToPolygon(const PathInfo *path_info,
% ExceptionInfo *excetion)
%
% A description of each parameter follows:
%
% o ConvertPathToPolygon() returns the path in a more efficient sorted
% rendering form of type PolygonInfo.
%
% o draw_info: Specifies a pointer to an DrawInfo structure.
%
% o path_info: Specifies a pointer to an PathInfo structure.
%
%
*/
static PolygonInfo *DestroyPolygonInfo(PolygonInfo *polygon_info)
{
register ssize_t
i;
if (polygon_info->edges != (EdgeInfo *) NULL)
{
for (i=0; i < (ssize_t) polygon_info->number_edges; i++)
if (polygon_info->edges[i].points != (PointInfo *) NULL)
polygon_info->edges[i].points=(PointInfo *)
RelinquishMagickMemory(polygon_info->edges[i].points);
polygon_info->edges=(EdgeInfo *) RelinquishMagickMemory(
polygon_info->edges);
}
return((PolygonInfo *) RelinquishMagickMemory(polygon_info));
}
#if defined(__cplusplus) || defined(c_plusplus)
extern "C" {
#endif
static int DrawCompareEdges(const void *p_edge,const void *q_edge)
{
#define DrawCompareEdge(p,q) \
{ \
if (((p)-(q)) < 0.0) \
return(-1); \
if (((p)-(q)) > 0.0) \
return(1); \
}
register const PointInfo
*p,
*q;
/*
Edge sorting for right-handed coordinate system.
*/
p=((const EdgeInfo *) p_edge)->points;
q=((const EdgeInfo *) q_edge)->points;
DrawCompareEdge(p[0].y,q[0].y);
DrawCompareEdge(p[0].x,q[0].x);
DrawCompareEdge((p[1].x-p[0].x)*(q[1].y-q[0].y),(p[1].y-p[0].y)*
(q[1].x-q[0].x));
DrawCompareEdge(p[1].y,q[1].y);
DrawCompareEdge(p[1].x,q[1].x);
return(0);
}
#if defined(__cplusplus) || defined(c_plusplus)
}
#endif
static void LogPolygonInfo(const PolygonInfo *polygon_info)
{
register EdgeInfo
*p;
register ssize_t
i,
j;
(void) LogMagickEvent(DrawEvent,GetMagickModule()," begin active-edge");
p=polygon_info->edges;
for (i=0; i < (ssize_t) polygon_info->number_edges; i++)
{
(void) LogMagickEvent(DrawEvent,GetMagickModule()," edge %.20g:",
(double) i);
(void) LogMagickEvent(DrawEvent,GetMagickModule()," direction: %s",
p->direction != MagickFalse ? "down" : "up");
(void) LogMagickEvent(DrawEvent,GetMagickModule()," ghostline: %s",
p->ghostline != MagickFalse ? "transparent" : "opaque");
(void) LogMagickEvent(DrawEvent,GetMagickModule(),
" bounds: %g,%g - %g,%g",p->bounds.x1,p->bounds.y1,
p->bounds.x2,p->bounds.y2);
for (j=0; j < (ssize_t) p->number_points; j++)
(void) LogMagickEvent(DrawEvent,GetMagickModule()," %g,%g",
p->points[j].x,p->points[j].y);
p++;
}
(void) LogMagickEvent(DrawEvent,GetMagickModule()," end active-edge");
}
static void ReversePoints(PointInfo *points,const size_t number_points)
{
PointInfo
point;
register ssize_t
i;
for (i=0; i < (ssize_t) (number_points >> 1); i++)
{
point=points[i];
points[i]=points[number_points-(i+1)];
points[number_points-(i+1)]=point;
}
}
static PolygonInfo *ConvertPathToPolygon(const PathInfo *path_info,
ExceptionInfo *exception)
{
long
direction,
next_direction;
PointInfo
point,
*points;
PolygonInfo
*polygon_info;
SegmentInfo
bounds;
register ssize_t
i,
n;
MagickBooleanType
ghostline;
size_t
edge,
number_edges,
number_points;
/*
Convert a path to the more efficient sorted rendering form.
*/
polygon_info=(PolygonInfo *) AcquireMagickMemory(sizeof(*polygon_info));
if (polygon_info == (PolygonInfo *) NULL)
{
(void) ThrowMagickException(exception,GetMagickModule(),
ResourceLimitError,"MemoryAllocationFailed","`%s'","");
return((PolygonInfo *) NULL);
}
number_edges=16;
polygon_info->edges=(EdgeInfo *) AcquireQuantumMemory(number_edges,
sizeof(*polygon_info->edges));
if (polygon_info->edges == (EdgeInfo *) NULL)
{
(void) ThrowMagickException(exception,GetMagickModule(),
ResourceLimitError,"MemoryAllocationFailed","`%s'","");
return(DestroyPolygonInfo(polygon_info));
}
(void) memset(polygon_info->edges,0,number_edges*
sizeof(*polygon_info->edges));
direction=0;
edge=0;
ghostline=MagickFalse;
n=0;
number_points=0;
points=(PointInfo *) NULL;
(void) memset(&point,0,sizeof(point));
(void) memset(&bounds,0,sizeof(bounds));
polygon_info->edges[edge].number_points=(size_t) n;
polygon_info->edges[edge].scanline=0.0;
polygon_info->edges[edge].highwater=0;
polygon_info->edges[edge].ghostline=ghostline;
polygon_info->edges[edge].direction=(ssize_t) direction;
polygon_info->edges[edge].points=points;
polygon_info->edges[edge].bounds=bounds;
polygon_info->number_edges=0;
for (i=0; path_info[i].code != EndCode; i++)
{
if ((path_info[i].code == MoveToCode) || (path_info[i].code == OpenCode) ||
(path_info[i].code == GhostlineCode))
{
/*
Move to.
*/
if ((points != (PointInfo *) NULL) && (n >= 2))
{
if (edge == number_edges)
{
number_edges<<=1;
polygon_info->edges=(EdgeInfo *) ResizeQuantumMemory(
polygon_info->edges,(size_t) number_edges,
sizeof(*polygon_info->edges));
if (polygon_info->edges == (EdgeInfo *) NULL)
{
(void) ThrowMagickException(exception,GetMagickModule(),
ResourceLimitError,"MemoryAllocationFailed","`%s'","");
points=(PointInfo *) RelinquishMagickMemory(points);
return(DestroyPolygonInfo(polygon_info));
}
}
polygon_info->edges[edge].number_points=(size_t) n;
polygon_info->edges[edge].scanline=(-1.0);
polygon_info->edges[edge].highwater=0;
polygon_info->edges[edge].ghostline=ghostline;
polygon_info->edges[edge].direction=(ssize_t) (direction > 0);
if (direction < 0)
ReversePoints(points,(size_t) n);
polygon_info->edges[edge].points=points;
polygon_info->edges[edge].bounds=bounds;
polygon_info->edges[edge].bounds.y1=points[0].y;
polygon_info->edges[edge].bounds.y2=points[n-1].y;
points=(PointInfo *) NULL;
ghostline=MagickFalse;
edge++;
polygon_info->number_edges=edge;
}
if (points == (PointInfo *) NULL)
{
number_points=16;
points=(PointInfo *) AcquireQuantumMemory((size_t) number_points,
sizeof(*points));
if (points == (PointInfo *) NULL)
{
(void) ThrowMagickException(exception,GetMagickModule(),
ResourceLimitError,"MemoryAllocationFailed","`%s'","");
return(DestroyPolygonInfo(polygon_info));
}
}
ghostline=path_info[i].code == GhostlineCode ? MagickTrue : MagickFalse;
point=path_info[i].point;
points[0]=point;
bounds.x1=point.x;
bounds.x2=point.x;
direction=0;
n=1;
continue;
}
/*
Line to.
*/
next_direction=((path_info[i].point.y > point.y) ||
((fabs(path_info[i].point.y-point.y) < MagickEpsilon) &&
(path_info[i].point.x > point.x))) ? 1 : -1;
if ((points != (PointInfo *) NULL) && (direction != 0) &&
(direction != next_direction))
{
/*
New edge.
*/
point=points[n-1];
if (edge == number_edges)
{
number_edges<<=1;
polygon_info->edges=(EdgeInfo *) ResizeQuantumMemory(
polygon_info->edges,(size_t) number_edges,
sizeof(*polygon_info->edges));
if (polygon_info->edges == (EdgeInfo *) NULL)
{
(void) ThrowMagickException(exception,GetMagickModule(),
ResourceLimitError,"MemoryAllocationFailed","`%s'","");
points=(PointInfo *) RelinquishMagickMemory(points);
return(DestroyPolygonInfo(polygon_info));
}
}
polygon_info->edges[edge].number_points=(size_t) n;
polygon_info->edges[edge].scanline=(-1.0);
polygon_info->edges[edge].highwater=0;
polygon_info->edges[edge].ghostline=ghostline;
polygon_info->edges[edge].direction=(ssize_t) (direction > 0);
if (direction < 0)
ReversePoints(points,(size_t) n);
polygon_info->edges[edge].points=points;
polygon_info->edges[edge].bounds=bounds;
polygon_info->edges[edge].bounds.y1=points[0].y;
polygon_info->edges[edge].bounds.y2=points[n-1].y;
polygon_info->number_edges=edge+1;
points=(PointInfo *) NULL;
number_points=16;
points=(PointInfo *) AcquireQuantumMemory((size_t) number_points,
sizeof(*points));
if (points == (PointInfo *) NULL)
{
(void) ThrowMagickException(exception,GetMagickModule(),
ResourceLimitError,"MemoryAllocationFailed","`%s'","");
return(DestroyPolygonInfo(polygon_info));
}
n=1;
ghostline=MagickFalse;
points[0]=point;
bounds.x1=point.x;
bounds.x2=point.x;
edge++;
}
direction=next_direction;
if (points == (PointInfo *) NULL)
continue;
if (n == (ssize_t) number_points)
{
number_points<<=1;
points=(PointInfo *) ResizeQuantumMemory(points,(size_t) number_points,
sizeof(*points));
if (points == (PointInfo *) NULL)
{
(void) ThrowMagickException(exception,GetMagickModule(),
ResourceLimitError,"MemoryAllocationFailed","`%s'","");
return(DestroyPolygonInfo(polygon_info));
}
}
point=path_info[i].point;
points[n]=point;
if (point.x < bounds.x1)
bounds.x1=point.x;
if (point.x > bounds.x2)
bounds.x2=point.x;
n++;
}
if (points != (PointInfo *) NULL)
{
if (n < 2)
points=(PointInfo *) RelinquishMagickMemory(points);
else
{
if (edge == number_edges)
{
number_edges<<=1;
polygon_info->edges=(EdgeInfo *) ResizeQuantumMemory(
polygon_info->edges,(size_t) number_edges,
sizeof(*polygon_info->edges));
if (polygon_info->edges == (EdgeInfo *) NULL)
{
(void) ThrowMagickException(exception,GetMagickModule(),
ResourceLimitError,"MemoryAllocationFailed","`%s'","");
return(DestroyPolygonInfo(polygon_info));
}
}
polygon_info->edges[edge].number_points=(size_t) n;
polygon_info->edges[edge].scanline=(-1.0);
polygon_info->edges[edge].highwater=0;
polygon_info->edges[edge].ghostline=ghostline;
polygon_info->edges[edge].direction=(ssize_t) (direction > 0);
if (direction < 0)
ReversePoints(points,(size_t) n);
polygon_info->edges[edge].points=points;
polygon_info->edges[edge].bounds=bounds;
polygon_info->edges[edge].bounds.y1=points[0].y;
polygon_info->edges[edge].bounds.y2=points[n-1].y;
points=(PointInfo *) NULL;
ghostline=MagickFalse;
edge++;
polygon_info->number_edges=edge;
}
}
polygon_info->number_edges=edge;
polygon_info->edges=(EdgeInfo *) ResizeQuantumMemory(polygon_info->edges,
polygon_info->number_edges,sizeof(*polygon_info->edges));
if (polygon_info->edges == (EdgeInfo *) NULL)
{
(void) ThrowMagickException(exception,GetMagickModule(),
ResourceLimitError,"MemoryAllocationFailed","`%s'","");
return(DestroyPolygonInfo(polygon_info));
}
for (i=0; i < (ssize_t) polygon_info->number_edges; i++)
{
EdgeInfo
*edge_info;
edge_info=polygon_info->edges+i;
edge_info->points=(PointInfo *) ResizeQuantumMemory(edge_info->points,
edge_info->number_points,sizeof(*edge_info->points));
if (edge_info->points == (PointInfo *) NULL)
{
(void) ThrowMagickException(exception,GetMagickModule(),
ResourceLimitError,"MemoryAllocationFailed","`%s'","");
return(DestroyPolygonInfo(polygon_info));
}
}
qsort(polygon_info->edges,(size_t) polygon_info->number_edges,
sizeof(*polygon_info->edges),DrawCompareEdges);
if (IsEventLogging() != MagickFalse)
LogPolygonInfo(polygon_info);
return(polygon_info);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ C o n v e r t P r i m i t i v e T o P a t h %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% ConvertPrimitiveToPath() converts a PrimitiveInfo structure into a vector
% path structure.
%
% The format of the ConvertPrimitiveToPath method is:
%
% PathInfo *ConvertPrimitiveToPath(const DrawInfo *draw_info,
% const PrimitiveInfo *primitive_info,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o ConvertPrimitiveToPath() returns a vector path structure of type
% PathInfo.
%
% o draw_info: a structure of type DrawInfo.
%
% o primitive_info: Specifies a pointer to an PrimitiveInfo structure.
%
*/
static void LogPathInfo(const PathInfo *path_info)
{
register const PathInfo
*p;
(void) LogMagickEvent(DrawEvent,GetMagickModule()," begin vector-path");
for (p=path_info; p->code != EndCode; p++)
(void) LogMagickEvent(DrawEvent,GetMagickModule(),
" %g,%g %s",p->point.x,p->point.y,p->code == GhostlineCode ?
"moveto ghostline" : p->code == OpenCode ? "moveto open" :
p->code == MoveToCode ? "moveto" : p->code == LineToCode ? "lineto" :
"?");
(void) LogMagickEvent(DrawEvent,GetMagickModule()," end vector-path");
}
static PathInfo *ConvertPrimitiveToPath(const PrimitiveInfo *primitive_info,
ExceptionInfo *exception)
{
MagickBooleanType
closed_subpath;
PathInfo
*path_info;
PathInfoCode
code;
PointInfo
p,
q;
register ssize_t
i,
n;
ssize_t
coordinates,
start;
/*
Converts a PrimitiveInfo structure into a vector path structure.
*/
switch (primitive_info->primitive)
{
case AlphaPrimitive:
case ColorPrimitive:
case ImagePrimitive:
case PointPrimitive:
case TextPrimitive:
return((PathInfo *) NULL);
default:
break;
}
for (i=0; primitive_info[i].primitive != UndefinedPrimitive; i++) ;
path_info=(PathInfo *) AcquireQuantumMemory((size_t) (3UL*i+1UL),
sizeof(*path_info));
if (path_info == (PathInfo *) NULL)
{
(void) ThrowMagickException(exception,GetMagickModule(),
ResourceLimitError,"MemoryAllocationFailed","`%s'","");
return((PathInfo *) NULL);
}
coordinates=0;
closed_subpath=MagickFalse;
n=0;
p.x=(-1.0);
p.y=(-1.0);
q.x=(-1.0);
q.y=(-1.0);
start=0;
for (i=0; primitive_info[i].primitive != UndefinedPrimitive; i++)
{
code=LineToCode;
if (coordinates <= 0)
{
/*
New subpath.
*/
coordinates=(ssize_t) primitive_info[i].coordinates;
p=primitive_info[i].point;
start=n;
code=MoveToCode;
closed_subpath=primitive_info[i].closed_subpath;
}
coordinates--;
if ((code == MoveToCode) || (coordinates <= 0) ||
(fabs(q.x-primitive_info[i].point.x) >= MagickEpsilon) ||
(fabs(q.y-primitive_info[i].point.y) >= MagickEpsilon))
{
/*
Eliminate duplicate points.
*/
path_info[n].code=code;
path_info[n].point=primitive_info[i].point;
q=primitive_info[i].point;
n++;
}
if (coordinates > 0)
continue; /* next point in current subpath */
if (closed_subpath != MagickFalse)
{
closed_subpath=MagickFalse;
continue;
}
/*
Mark the p point as open if the subpath is not closed.
*/
path_info[start].code=OpenCode;
path_info[n].code=GhostlineCode;
path_info[n].point=primitive_info[i].point;
n++;
path_info[n].code=LineToCode;
path_info[n].point=p;
n++;
}
path_info[n].code=EndCode;
path_info[n].point.x=0.0;
path_info[n].point.y=0.0;
if (IsEventLogging() != MagickFalse)
LogPathInfo(path_info);
path_info=(PathInfo *) ResizeQuantumMemory(path_info,(size_t) (n+1),
sizeof(*path_info));
return(path_info);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% D e s t r o y D r a w I n f o %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% DestroyDrawInfo() deallocates memory associated with an DrawInfo structure.
%
% The format of the DestroyDrawInfo method is:
%
% DrawInfo *DestroyDrawInfo(DrawInfo *draw_info)
%
% A description of each parameter follows:
%
% o draw_info: the draw info.
%
*/
MagickExport DrawInfo *DestroyDrawInfo(DrawInfo *draw_info)
{
assert(draw_info != (DrawInfo *) NULL);
if (draw_info->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"...");
assert(draw_info->signature == MagickCoreSignature);
if (draw_info->id != (char *) NULL)
draw_info->id=DestroyString(draw_info->id);
if (draw_info->primitive != (char *) NULL)
draw_info->primitive=DestroyString(draw_info->primitive);
if (draw_info->text != (char *) NULL)
draw_info->text=DestroyString(draw_info->text);
if (draw_info->geometry != (char *) NULL)
draw_info->geometry=DestroyString(draw_info->geometry);
if (draw_info->fill_pattern != (Image *) NULL)
draw_info->fill_pattern=DestroyImage(draw_info->fill_pattern);
if (draw_info->stroke_pattern != (Image *) NULL)
draw_info->stroke_pattern=DestroyImage(draw_info->stroke_pattern);
if (draw_info->font != (char *) NULL)
draw_info->font=DestroyString(draw_info->font);
if (draw_info->metrics != (char *) NULL)
draw_info->metrics=DestroyString(draw_info->metrics);
if (draw_info->family != (char *) NULL)
draw_info->family=DestroyString(draw_info->family);
if (draw_info->encoding != (char *) NULL)
draw_info->encoding=DestroyString(draw_info->encoding);
if (draw_info->density != (char *) NULL)
draw_info->density=DestroyString(draw_info->density);
if (draw_info->server_name != (char *) NULL)
draw_info->server_name=(char *)
RelinquishMagickMemory(draw_info->server_name);
if (draw_info->dash_pattern != (double *) NULL)
draw_info->dash_pattern=(double *) RelinquishMagickMemory(
draw_info->dash_pattern);
if (draw_info->gradient.stops != (StopInfo *) NULL)
draw_info->gradient.stops=(StopInfo *) RelinquishMagickMemory(
draw_info->gradient.stops);
if (draw_info->clip_mask != (char *) NULL)
draw_info->clip_mask=DestroyString(draw_info->clip_mask);
if (draw_info->clipping_mask != (Image *) NULL)
draw_info->clipping_mask=DestroyImage(draw_info->clipping_mask);
if (draw_info->composite_mask != (Image *) NULL)
draw_info->composite_mask=DestroyImage(draw_info->composite_mask);
draw_info->signature=(~MagickCoreSignature);
draw_info=(DrawInfo *) RelinquishMagickMemory(draw_info);
return(draw_info);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% D r a w A f f i n e I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% DrawAffineImage() composites the source over the destination image as
% dictated by the affine transform.
%
% The format of the DrawAffineImage method is:
%
% MagickBooleanType DrawAffineImage(Image *image,const Image *source,
% const AffineMatrix *affine,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o source: the source image.
%
% o affine: the affine transform.
%
% o exception: return any errors or warnings in this structure.
%
*/
static SegmentInfo AffineEdge(const Image *image,const AffineMatrix *affine,
const double y,const SegmentInfo *edge)
{
double
intercept,
z;
register double
x;
SegmentInfo
inverse_edge;
/*
Determine left and right edges.
*/
inverse_edge.x1=edge->x1;
inverse_edge.y1=edge->y1;
inverse_edge.x2=edge->x2;
inverse_edge.y2=edge->y2;
z=affine->ry*y+affine->tx;
if (affine->sx >= MagickEpsilon)
{
intercept=(-z/affine->sx);
x=intercept;
if (x > inverse_edge.x1)
inverse_edge.x1=x;
intercept=(-z+(double) image->columns)/affine->sx;
x=intercept;
if (x < inverse_edge.x2)
inverse_edge.x2=x;
}
else
if (affine->sx < -MagickEpsilon)
{
intercept=(-z+(double) image->columns)/affine->sx;
x=intercept;
if (x > inverse_edge.x1)
inverse_edge.x1=x;
intercept=(-z/affine->sx);
x=intercept;
if (x < inverse_edge.x2)
inverse_edge.x2=x;
}
else
if ((z < 0.0) || ((size_t) floor(z+0.5) >= image->columns))
{
inverse_edge.x2=edge->x1;
return(inverse_edge);
}
/*
Determine top and bottom edges.
*/
z=affine->sy*y+affine->ty;
if (affine->rx >= MagickEpsilon)
{
intercept=(-z/affine->rx);
x=intercept;
if (x > inverse_edge.x1)
inverse_edge.x1=x;
intercept=(-z+(double) image->rows)/affine->rx;
x=intercept;
if (x < inverse_edge.x2)
inverse_edge.x2=x;
}
else
if (affine->rx < -MagickEpsilon)
{
intercept=(-z+(double) image->rows)/affine->rx;
x=intercept;
if (x > inverse_edge.x1)
inverse_edge.x1=x;
intercept=(-z/affine->rx);
x=intercept;
if (x < inverse_edge.x2)
inverse_edge.x2=x;
}
else
if ((z < 0.0) || ((size_t) floor(z+0.5) >= image->rows))
{
inverse_edge.x2=edge->x2;
return(inverse_edge);
}
return(inverse_edge);
}
static AffineMatrix InverseAffineMatrix(const AffineMatrix *affine)
{
AffineMatrix
inverse_affine;
double
determinant;
determinant=PerceptibleReciprocal(affine->sx*affine->sy-affine->rx*
affine->ry);
inverse_affine.sx=determinant*affine->sy;
inverse_affine.rx=determinant*(-affine->rx);
inverse_affine.ry=determinant*(-affine->ry);
inverse_affine.sy=determinant*affine->sx;
inverse_affine.tx=(-affine->tx)*inverse_affine.sx-affine->ty*
inverse_affine.ry;
inverse_affine.ty=(-affine->tx)*inverse_affine.rx-affine->ty*
inverse_affine.sy;
return(inverse_affine);
}
MagickExport MagickBooleanType DrawAffineImage(Image *image,
const Image *source,const AffineMatrix *affine,ExceptionInfo *exception)
{
AffineMatrix
inverse_affine;
CacheView
*image_view,
*source_view;
MagickBooleanType
status;
PixelInfo
zero;
PointInfo
extent[4],
min,
max;
register ssize_t
i;
SegmentInfo
edge;
ssize_t
start,
stop,
y;
/*
Determine bounding box.
*/
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(source != (const Image *) NULL);
assert(source->signature == MagickCoreSignature);
assert(affine != (AffineMatrix *) NULL);
extent[0].x=0.0;
extent[0].y=0.0;
extent[1].x=(double) source->columns-1.0;
extent[1].y=0.0;
extent[2].x=(double) source->columns-1.0;
extent[2].y=(double) source->rows-1.0;
extent[3].x=0.0;
extent[3].y=(double) source->rows-1.0;
for (i=0; i < 4; i++)
{
PointInfo
point;
point=extent[i];
extent[i].x=point.x*affine->sx+point.y*affine->ry+affine->tx;
extent[i].y=point.x*affine->rx+point.y*affine->sy+affine->ty;
}
min=extent[0];
max=extent[0];
for (i=1; i < 4; i++)
{
if (min.x > extent[i].x)
min.x=extent[i].x;
if (min.y > extent[i].y)
min.y=extent[i].y;
if (max.x < extent[i].x)
max.x=extent[i].x;
if (max.y < extent[i].y)
max.y=extent[i].y;
}
/*
Affine transform image.
*/
if (SetImageStorageClass(image,DirectClass,exception) == MagickFalse)
return(MagickFalse);
status=MagickTrue;
edge.x1=MagickMax(min.x,0.0);
edge.y1=MagickMax(min.y,0.0);
edge.x2=MagickMin(max.x,(double) image->columns-1.0);
edge.y2=MagickMin(max.y,(double) image->rows-1.0);
inverse_affine=InverseAffineMatrix(affine);
GetPixelInfo(image,&zero);
start=(ssize_t) ceil(edge.y1-0.5);
stop=(ssize_t) floor(edge.y2+0.5);
source_view=AcquireVirtualCacheView(source,exception);
image_view=AcquireAuthenticCacheView(image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(status) \
magick_number_threads(source,image,stop-start,1)
#endif
for (y=start; y <= stop; y++)
{
PixelInfo
composite,
pixel;
PointInfo
point;
register ssize_t
x;
register Quantum
*magick_restrict q;
SegmentInfo
inverse_edge;
ssize_t
x_offset;
if (status == MagickFalse)
continue;
inverse_edge=AffineEdge(source,&inverse_affine,(double) y,&edge);
if (inverse_edge.x2 < inverse_edge.x1)
continue;
q=GetCacheViewAuthenticPixels(image_view,(ssize_t) ceil(inverse_edge.x1-
0.5),y,(size_t) (floor(inverse_edge.x2+0.5)-ceil(inverse_edge.x1-0.5)+1),
1,exception);
if (q == (Quantum *) NULL)
continue;
pixel=zero;
composite=zero;
x_offset=0;
for (x=(ssize_t) ceil(inverse_edge.x1-0.5); x <= (ssize_t) floor(inverse_edge.x2+0.5); x++)
{
point.x=(double) x*inverse_affine.sx+y*inverse_affine.ry+
inverse_affine.tx;
point.y=(double) x*inverse_affine.rx+y*inverse_affine.sy+
inverse_affine.ty;
status=InterpolatePixelInfo(source,source_view,UndefinedInterpolatePixel,
point.x,point.y,&pixel,exception);
if (status == MagickFalse)
break;
GetPixelInfoPixel(image,q,&composite);
CompositePixelInfoOver(&pixel,pixel.alpha,&composite,composite.alpha,
&composite);
SetPixelViaPixelInfo(image,&composite,q);
x_offset++;
q+=GetPixelChannels(image);
}
if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse)
status=MagickFalse;
}
source_view=DestroyCacheView(source_view);
image_view=DestroyCacheView(image_view);
return(status);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ D r a w B o u n d i n g R e c t a n g l e s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% DrawBoundingRectangles() draws the bounding rectangles on the image. This
% is only useful for developers debugging the rendering algorithm.
%
% The format of the DrawBoundingRectangles method is:
%
% MagickBooleanType DrawBoundingRectangles(Image *image,
% const DrawInfo *draw_info,PolygonInfo *polygon_info,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o draw_info: the draw info.
%
% o polygon_info: Specifies a pointer to a PolygonInfo structure.
%
% o exception: return any errors or warnings in this structure.
%
*/
static MagickBooleanType DrawBoundingRectangles(Image *image,
const DrawInfo *draw_info,const PolygonInfo *polygon_info,
ExceptionInfo *exception)
{
double
mid;
DrawInfo
*clone_info;
MagickStatusType
status;
PointInfo
end,
resolution,
start;
PrimitiveInfo
primitive_info[6];
register ssize_t
i;
SegmentInfo
bounds;
ssize_t
coordinates;
(void) memset(primitive_info,0,sizeof(primitive_info));
clone_info=CloneDrawInfo((ImageInfo *) NULL,draw_info);
status=QueryColorCompliance("#000F",AllCompliance,&clone_info->fill,
exception);
if (status == MagickFalse)
{
clone_info=DestroyDrawInfo(clone_info);
return(MagickFalse);
}
resolution.x=96.0;
resolution.y=96.0;
if (clone_info->density != (char *) NULL)
{
GeometryInfo
geometry_info;
MagickStatusType
flags;
flags=ParseGeometry(clone_info->density,&geometry_info);
resolution.x=geometry_info.rho;
resolution.y=geometry_info.sigma;
if ((flags & SigmaValue) == MagickFalse)
resolution.y=resolution.x;
}
mid=(resolution.x/96.0)*ExpandAffine(&clone_info->affine)*
clone_info->stroke_width/2.0;
bounds.x1=0.0;
bounds.y1=0.0;
bounds.x2=0.0;
bounds.y2=0.0;
if (polygon_info != (PolygonInfo *) NULL)
{
bounds=polygon_info->edges[0].bounds;
for (i=1; i < (ssize_t) polygon_info->number_edges; i++)
{
if (polygon_info->edges[i].bounds.x1 < (double) bounds.x1)
bounds.x1=polygon_info->edges[i].bounds.x1;
if (polygon_info->edges[i].bounds.y1 < (double) bounds.y1)
bounds.y1=polygon_info->edges[i].bounds.y1;
if (polygon_info->edges[i].bounds.x2 > (double) bounds.x2)
bounds.x2=polygon_info->edges[i].bounds.x2;
if (polygon_info->edges[i].bounds.y2 > (double) bounds.y2)
bounds.y2=polygon_info->edges[i].bounds.y2;
}
bounds.x1-=mid;
bounds.x1=bounds.x1 < 0.0 ? 0.0 : bounds.x1 >= (double)
image->columns ? (double) image->columns-1 : bounds.x1;
bounds.y1-=mid;
bounds.y1=bounds.y1 < 0.0 ? 0.0 : bounds.y1 >= (double)
image->rows ? (double) image->rows-1 : bounds.y1;
bounds.x2+=mid;
bounds.x2=bounds.x2 < 0.0 ? 0.0 : bounds.x2 >= (double)
image->columns ? (double) image->columns-1 : bounds.x2;
bounds.y2+=mid;
bounds.y2=bounds.y2 < 0.0 ? 0.0 : bounds.y2 >= (double)
image->rows ? (double) image->rows-1 : bounds.y2;
for (i=0; i < (ssize_t) polygon_info->number_edges; i++)
{
if (polygon_info->edges[i].direction != 0)
status=QueryColorCompliance("#f00",AllCompliance,&clone_info->stroke,
exception);
else
status=QueryColorCompliance("#0f0",AllCompliance,&clone_info->stroke,
exception);
if (status == MagickFalse)
break;
start.x=(double) (polygon_info->edges[i].bounds.x1-mid);
start.y=(double) (polygon_info->edges[i].bounds.y1-mid);
end.x=(double) (polygon_info->edges[i].bounds.x2+mid);
end.y=(double) (polygon_info->edges[i].bounds.y2+mid);
primitive_info[0].primitive=RectanglePrimitive;
status&=TraceRectangle(primitive_info,start,end);
primitive_info[0].method=ReplaceMethod;
coordinates=(ssize_t) primitive_info[0].coordinates;
primitive_info[coordinates].primitive=UndefinedPrimitive;
status=DrawPrimitive(image,clone_info,primitive_info,exception);
if (status == MagickFalse)
break;
}
if (i < (ssize_t) polygon_info->number_edges)
{
clone_info=DestroyDrawInfo(clone_info);
return(status == 0 ? MagickFalse : MagickTrue);
}
}
status=QueryColorCompliance("#00f",AllCompliance,&clone_info->stroke,
exception);
if (status == MagickFalse)
{
clone_info=DestroyDrawInfo(clone_info);
return(MagickFalse);
}
start.x=(double) (bounds.x1-mid);
start.y=(double) (bounds.y1-mid);
end.x=(double) (bounds.x2+mid);
end.y=(double) (bounds.y2+mid);
primitive_info[0].primitive=RectanglePrimitive;
status&=TraceRectangle(primitive_info,start,end);
primitive_info[0].method=ReplaceMethod;
coordinates=(ssize_t) primitive_info[0].coordinates;
primitive_info[coordinates].primitive=UndefinedPrimitive;
status=DrawPrimitive(image,clone_info,primitive_info,exception);
clone_info=DestroyDrawInfo(clone_info);
return(status == 0 ? MagickFalse : MagickTrue);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% D r a w C l i p P a t h %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% DrawClipPath() draws the clip path on the image mask.
%
% The format of the DrawClipPath method is:
%
% MagickBooleanType DrawClipPath(Image *image,const DrawInfo *draw_info,
% const char *id,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o draw_info: the draw info.
%
% o id: the clip path id.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport MagickBooleanType DrawClipPath(Image *image,
const DrawInfo *draw_info,const char *id,ExceptionInfo *exception)
{
const char
*clip_path;
Image
*clipping_mask;
MagickBooleanType
status;
clip_path=GetImageArtifact(image,id);
if (clip_path == (const char *) NULL)
return(MagickFalse);
clipping_mask=DrawClippingMask(image,draw_info,draw_info->clip_mask,clip_path,
exception);
if (clipping_mask == (Image *) NULL)
return(MagickFalse);
status=SetImageMask(image,WritePixelMask,clipping_mask,exception);
clipping_mask=DestroyImage(clipping_mask);
return(status);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% D r a w C l i p p i n g M a s k %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% DrawClippingMask() draws the clip path and returns it as an image clipping
% mask.
%
% The format of the DrawClippingMask method is:
%
% Image *DrawClippingMask(Image *image,const DrawInfo *draw_info,
% const char *id,const char *clip_path,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o draw_info: the draw info.
%
% o id: the clip path id.
%
% o clip_path: the clip path.
%
% o exception: return any errors or warnings in this structure.
%
*/
static Image *DrawClippingMask(Image *image,const DrawInfo *draw_info,
const char *id,const char *clip_path,ExceptionInfo *exception)
{
DrawInfo
*clone_info;
Image
*clip_mask,
*separate_mask;
MagickStatusType
status;
/*
Draw a clip path.
*/
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(draw_info != (const DrawInfo *) NULL);
clip_mask=AcquireImage((const ImageInfo *) NULL,exception);
status=SetImageExtent(clip_mask,image->columns,image->rows,exception);
if (status == MagickFalse)
return(DestroyImage(clip_mask));
status=SetImageMask(clip_mask,WritePixelMask,(Image *) NULL,exception);
status=QueryColorCompliance("#0000",AllCompliance,
&clip_mask->background_color,exception);
clip_mask->background_color.alpha=(MagickRealType) TransparentAlpha;
clip_mask->background_color.alpha_trait=BlendPixelTrait;
status=SetImageBackgroundColor(clip_mask,exception);
if (image->debug != MagickFalse)
(void) LogMagickEvent(DrawEvent,GetMagickModule(),"\nbegin clip-path %s",
id);
clone_info=CloneDrawInfo((ImageInfo *) NULL,draw_info);
(void) CloneString(&clone_info->primitive,clip_path);
status=QueryColorCompliance("#ffffff",AllCompliance,&clone_info->fill,
exception);
if (clone_info->clip_mask != (char *) NULL)
clone_info->clip_mask=DestroyString(clone_info->clip_mask);
status=QueryColorCompliance("#00000000",AllCompliance,&clone_info->stroke,
exception);
clone_info->stroke_width=0.0;
clone_info->alpha=OpaqueAlpha;
clone_info->clip_path=MagickTrue;
status=RenderMVGContent(clip_mask,clone_info,0,exception);
clone_info=DestroyDrawInfo(clone_info);
separate_mask=SeparateImage(clip_mask,AlphaChannel,exception);
if (separate_mask != (Image *) NULL)
{
clip_mask=DestroyImage(clip_mask);
clip_mask=separate_mask;
status=NegateImage(clip_mask,MagickFalse,exception);
if (status == MagickFalse)
clip_mask=DestroyImage(clip_mask);
}
if (status == MagickFalse)
clip_mask=DestroyImage(clip_mask);
if (image->debug != MagickFalse)
(void) LogMagickEvent(DrawEvent,GetMagickModule(),"end clip-path");
return(clip_mask);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% D r a w C o m p o s i t e M a s k %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% DrawCompositeMask() draws the mask path and returns it as an image mask.
%
% The format of the DrawCompositeMask method is:
%
% Image *DrawCompositeMask(Image *image,const DrawInfo *draw_info,
% const char *id,const char *mask_path,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o draw_info: the draw info.
%
% o id: the mask path id.
%
% o mask_path: the mask path.
%
% o exception: return any errors or warnings in this structure.
%
*/
static Image *DrawCompositeMask(Image *image,const DrawInfo *draw_info,
const char *id,const char *mask_path,ExceptionInfo *exception)
{
Image
*composite_mask,
*separate_mask;
DrawInfo
*clone_info;
MagickStatusType
status;
/*
Draw a mask path.
*/
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(draw_info != (const DrawInfo *) NULL);
composite_mask=AcquireImage((const ImageInfo *) NULL,exception);
status=SetImageExtent(composite_mask,image->columns,image->rows,exception);
if (status == MagickFalse)
return(DestroyImage(composite_mask));
status=SetImageMask(composite_mask,CompositePixelMask,(Image *) NULL,
exception);
status=QueryColorCompliance("#0000",AllCompliance,
&composite_mask->background_color,exception);
composite_mask->background_color.alpha=(MagickRealType) TransparentAlpha;
composite_mask->background_color.alpha_trait=BlendPixelTrait;
(void) SetImageBackgroundColor(composite_mask,exception);
if (image->debug != MagickFalse)
(void) LogMagickEvent(DrawEvent,GetMagickModule(),"\nbegin mask-path %s",
id);
clone_info=CloneDrawInfo((ImageInfo *) NULL,draw_info);
(void) CloneString(&clone_info->primitive,mask_path);
status=QueryColorCompliance("#ffffff",AllCompliance,&clone_info->fill,
exception);
status=QueryColorCompliance("#00000000",AllCompliance,&clone_info->stroke,
exception);
clone_info->stroke_width=0.0;
clone_info->alpha=OpaqueAlpha;
status=RenderMVGContent(composite_mask,clone_info,0,exception);
clone_info=DestroyDrawInfo(clone_info);
separate_mask=SeparateImage(composite_mask,AlphaChannel,exception);
if (separate_mask != (Image *) NULL)
{
composite_mask=DestroyImage(composite_mask);
composite_mask=separate_mask;
status=NegateImage(composite_mask,MagickFalse,exception);
if (status == MagickFalse)
composite_mask=DestroyImage(composite_mask);
}
if (status == MagickFalse)
composite_mask=DestroyImage(composite_mask);
if (image->debug != MagickFalse)
(void) LogMagickEvent(DrawEvent,GetMagickModule(),"end mask-path");
return(composite_mask);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ D r a w D a s h P o l y g o n %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% DrawDashPolygon() draws a dashed polygon (line, rectangle, ellipse) on the
% image while respecting the dash offset and dash pattern attributes.
%
% The format of the DrawDashPolygon method is:
%
% MagickBooleanType DrawDashPolygon(const DrawInfo *draw_info,
% const PrimitiveInfo *primitive_info,Image *image,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o draw_info: the draw info.
%
% o primitive_info: Specifies a pointer to a PrimitiveInfo structure.
%
% o image: the image.
%
% o exception: return any errors or warnings in this structure.
%
*/
static MagickBooleanType DrawDashPolygon(const DrawInfo *draw_info,
const PrimitiveInfo *primitive_info,Image *image,ExceptionInfo *exception)
{
double
length,
maximum_length,
offset,
scale,
total_length;
DrawInfo
*clone_info;
MagickStatusType
status;
PrimitiveInfo
*dash_polygon;
register double
dx,
dy;
register ssize_t
i;
size_t
number_vertices;
ssize_t
j,
n;
assert(draw_info != (const DrawInfo *) NULL);
if (image->debug != MagickFalse)
(void) LogMagickEvent(DrawEvent,GetMagickModule()," begin draw-dash");
for (i=0; primitive_info[i].primitive != UndefinedPrimitive; i++) ;
number_vertices=(size_t) i;
dash_polygon=(PrimitiveInfo *) AcquireQuantumMemory((size_t)
(2UL*number_vertices+32UL),sizeof(*dash_polygon));
if (dash_polygon == (PrimitiveInfo *) NULL)
{
(void) ThrowMagickException(exception,GetMagickModule(),
ResourceLimitError,"MemoryAllocationFailed","`%s'","");
return(MagickFalse);
}
(void) memset(dash_polygon,0,(2UL*number_vertices+32UL)*
sizeof(*dash_polygon));
clone_info=CloneDrawInfo((ImageInfo *) NULL,draw_info);
clone_info->miterlimit=0;
dash_polygon[0]=primitive_info[0];
scale=ExpandAffine(&draw_info->affine);
length=scale*draw_info->dash_pattern[0];
offset=fabs(draw_info->dash_offset) >= MagickEpsilon ?
scale*draw_info->dash_offset : 0.0;
j=1;
for (n=0; offset > 0.0; j=0)
{
if (draw_info->dash_pattern[n] <= 0.0)
break;
length=scale*(draw_info->dash_pattern[n]+(n == 0 ? -0.5 : 0.5));
if (offset > length)
{
offset-=length;
n++;
length=scale*draw_info->dash_pattern[n];
continue;
}
if (offset < length)
{
length-=offset;
offset=0.0;
break;
}
offset=0.0;
n++;
}
status=MagickTrue;
maximum_length=0.0;
total_length=0.0;
for (i=1; (i < (ssize_t) number_vertices) && (length >= 0.0); i++)
{
dx=primitive_info[i].point.x-primitive_info[i-1].point.x;
dy=primitive_info[i].point.y-primitive_info[i-1].point.y;
maximum_length=hypot(dx,dy);
if (maximum_length > (double) (MaxBezierCoordinates >> 2))
continue;
if (fabs(length) < MagickEpsilon)
{
if (fabs(draw_info->dash_pattern[n]) >= MagickEpsilon)
n++;
if (fabs(draw_info->dash_pattern[n]) < MagickEpsilon)
n=0;
length=scale*draw_info->dash_pattern[n];
}
for (total_length=0.0; (length >= 0.0) && (maximum_length >= (total_length+length)); )
{
total_length+=length;
if ((n & 0x01) != 0)
{
dash_polygon[0]=primitive_info[0];
dash_polygon[0].point.x=(double) (primitive_info[i-1].point.x+dx*
total_length*PerceptibleReciprocal(maximum_length));
dash_polygon[0].point.y=(double) (primitive_info[i-1].point.y+dy*
total_length*PerceptibleReciprocal(maximum_length));
j=1;
}
else
{
if ((j+1) > (ssize_t) number_vertices)
break;
dash_polygon[j]=primitive_info[i-1];
dash_polygon[j].point.x=(double) (primitive_info[i-1].point.x+dx*
total_length*PerceptibleReciprocal(maximum_length));
dash_polygon[j].point.y=(double) (primitive_info[i-1].point.y+dy*
total_length*PerceptibleReciprocal(maximum_length));
dash_polygon[j].coordinates=1;
j++;
dash_polygon[0].coordinates=(size_t) j;
dash_polygon[j].primitive=UndefinedPrimitive;
status&=DrawStrokePolygon(image,clone_info,dash_polygon,exception);
if (status == MagickFalse)
break;
}
if (fabs(draw_info->dash_pattern[n]) >= MagickEpsilon)
n++;
if (fabs(draw_info->dash_pattern[n]) < MagickEpsilon)
n=0;
length=scale*draw_info->dash_pattern[n];
}
length-=(maximum_length-total_length);
if ((n & 0x01) != 0)
continue;
dash_polygon[j]=primitive_info[i];
dash_polygon[j].coordinates=1;
j++;
}
if ((status != MagickFalse) && (total_length < maximum_length) &&
((n & 0x01) == 0) && (j > 1))
{
dash_polygon[j]=primitive_info[i-1];
dash_polygon[j].point.x+=MagickEpsilon;
dash_polygon[j].point.y+=MagickEpsilon;
dash_polygon[j].coordinates=1;
j++;
dash_polygon[0].coordinates=(size_t) j;
dash_polygon[j].primitive=UndefinedPrimitive;
status&=DrawStrokePolygon(image,clone_info,dash_polygon,exception);
}
dash_polygon=(PrimitiveInfo *) RelinquishMagickMemory(dash_polygon);
clone_info=DestroyDrawInfo(clone_info);
if (image->debug != MagickFalse)
(void) LogMagickEvent(DrawEvent,GetMagickModule()," end draw-dash");
return(status != 0 ? MagickTrue : MagickFalse);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% D r a w G r a d i e n t I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% DrawGradientImage() draws a linear gradient on the image.
%
% The format of the DrawGradientImage method is:
%
% MagickBooleanType DrawGradientImage(Image *image,
% const DrawInfo *draw_info,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o draw_info: the draw info.
%
% o exception: return any errors or warnings in this structure.
%
*/
static inline double GetStopColorOffset(const GradientInfo *gradient,
const ssize_t x,const ssize_t y)
{
switch (gradient->type)
{
case UndefinedGradient:
case LinearGradient:
{
double
gamma,
length,
offset,
scale;
PointInfo
p,
q;
const SegmentInfo
*gradient_vector;
gradient_vector=(&gradient->gradient_vector);
p.x=gradient_vector->x2-gradient_vector->x1;
p.y=gradient_vector->y2-gradient_vector->y1;
q.x=(double) x-gradient_vector->x1;
q.y=(double) y-gradient_vector->y1;
length=sqrt(q.x*q.x+q.y*q.y);
gamma=sqrt(p.x*p.x+p.y*p.y)*length;
gamma=PerceptibleReciprocal(gamma);
scale=p.x*q.x+p.y*q.y;
offset=gamma*scale*length;
return(offset);
}
case RadialGradient:
{
PointInfo
v;
if (gradient->spread == RepeatSpread)
{
v.x=(double) x-gradient->center.x;
v.y=(double) y-gradient->center.y;
return(sqrt(v.x*v.x+v.y*v.y));
}
v.x=(double) (((x-gradient->center.x)*cos(DegreesToRadians(
gradient->angle)))+((y-gradient->center.y)*sin(DegreesToRadians(
gradient->angle))))*PerceptibleReciprocal(gradient->radii.x);
v.y=(double) (((x-gradient->center.x)*sin(DegreesToRadians(
gradient->angle)))-((y-gradient->center.y)*cos(DegreesToRadians(
gradient->angle))))*PerceptibleReciprocal(gradient->radii.y);
return(sqrt(v.x*v.x+v.y*v.y));
}
}
return(0.0);
}
static int StopInfoCompare(const void *x,const void *y)
{
StopInfo
*stop_1,
*stop_2;
stop_1=(StopInfo *) x;
stop_2=(StopInfo *) y;
if (stop_1->offset > stop_2->offset)
return(1);
if (fabs(stop_1->offset-stop_2->offset) <= MagickEpsilon)
return(0);
return(-1);
}
MagickExport MagickBooleanType DrawGradientImage(Image *image,
const DrawInfo *draw_info,ExceptionInfo *exception)
{
CacheView
*image_view;
const GradientInfo
*gradient;
const SegmentInfo
*gradient_vector;
double
length;
MagickBooleanType
status;
PixelInfo
zero;
PointInfo
point;
RectangleInfo
bounding_box;
ssize_t
y;
/*
Draw linear or radial gradient on image.
*/
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(draw_info != (const DrawInfo *) NULL);
gradient=(&draw_info->gradient);
qsort(gradient->stops,gradient->number_stops,sizeof(StopInfo),
StopInfoCompare);
gradient_vector=(&gradient->gradient_vector);
point.x=gradient_vector->x2-gradient_vector->x1;
point.y=gradient_vector->y2-gradient_vector->y1;
length=sqrt(point.x*point.x+point.y*point.y);
bounding_box=gradient->bounding_box;
status=MagickTrue;
GetPixelInfo(image,&zero);
image_view=AcquireAuthenticCacheView(image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(status) \
magick_number_threads(image,image,bounding_box.height-bounding_box.y,1)
#endif
for (y=bounding_box.y; y < (ssize_t) bounding_box.height; y++)
{
double
alpha,
offset;
PixelInfo
composite,
pixel;
register Quantum
*magick_restrict q;
register ssize_t
i,
x;
ssize_t
j;
if (status == MagickFalse)
continue;
q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception);
if (q == (Quantum *) NULL)
{
status=MagickFalse;
continue;
}
pixel=zero;
composite=zero;
offset=GetStopColorOffset(gradient,0,y);
if (gradient->type != RadialGradient)
offset*=PerceptibleReciprocal(length);
for (x=bounding_box.x; x < (ssize_t) bounding_box.width; x++)
{
GetPixelInfoPixel(image,q,&pixel);
switch (gradient->spread)
{
case UndefinedSpread:
case PadSpread:
{
if ((x != (ssize_t) ceil(gradient_vector->x1-0.5)) ||
(y != (ssize_t) ceil(gradient_vector->y1-0.5)))
{
offset=GetStopColorOffset(gradient,x,y);
if (gradient->type != RadialGradient)
offset*=PerceptibleReciprocal(length);
}
for (i=0; i < (ssize_t) gradient->number_stops; i++)
if (offset < gradient->stops[i].offset)
break;
if ((offset < 0.0) || (i == 0))
composite=gradient->stops[0].color;
else
if ((offset > 1.0) || (i == (ssize_t) gradient->number_stops))
composite=gradient->stops[gradient->number_stops-1].color;
else
{
j=i;
i--;
alpha=(offset-gradient->stops[i].offset)/
(gradient->stops[j].offset-gradient->stops[i].offset);
CompositePixelInfoBlend(&gradient->stops[i].color,1.0-alpha,
&gradient->stops[j].color,alpha,&composite);
}
break;
}
case ReflectSpread:
{
if ((x != (ssize_t) ceil(gradient_vector->x1-0.5)) ||
(y != (ssize_t) ceil(gradient_vector->y1-0.5)))
{
offset=GetStopColorOffset(gradient,x,y);
if (gradient->type != RadialGradient)
offset*=PerceptibleReciprocal(length);
}
if (offset < 0.0)
offset=(-offset);
if ((ssize_t) fmod(offset,2.0) == 0)
offset=fmod(offset,1.0);
else
offset=1.0-fmod(offset,1.0);
for (i=0; i < (ssize_t) gradient->number_stops; i++)
if (offset < gradient->stops[i].offset)
break;
if (i == 0)
composite=gradient->stops[0].color;
else
if (i == (ssize_t) gradient->number_stops)
composite=gradient->stops[gradient->number_stops-1].color;
else
{
j=i;
i--;
alpha=(offset-gradient->stops[i].offset)/
(gradient->stops[j].offset-gradient->stops[i].offset);
CompositePixelInfoBlend(&gradient->stops[i].color,1.0-alpha,
&gradient->stops[j].color,alpha,&composite);
}
break;
}
case RepeatSpread:
{
double
repeat;
MagickBooleanType
antialias;
antialias=MagickFalse;
repeat=0.0;
if ((x != (ssize_t) ceil(gradient_vector->x1-0.5)) ||
(y != (ssize_t) ceil(gradient_vector->y1-0.5)))
{
offset=GetStopColorOffset(gradient,x,y);
if (gradient->type == LinearGradient)
{
repeat=fmod(offset,length);
if (repeat < 0.0)
repeat=length-fmod(-repeat,length);
else
repeat=fmod(offset,length);
antialias=(repeat < length) && ((repeat+1.0) > length) ?
MagickTrue : MagickFalse;
offset=PerceptibleReciprocal(length)*repeat;
}
else
{
repeat=fmod(offset,gradient->radius);
if (repeat < 0.0)
repeat=gradient->radius-fmod(-repeat,gradient->radius);
else
repeat=fmod(offset,gradient->radius);
antialias=repeat+1.0 > gradient->radius ? MagickTrue :
MagickFalse;
offset=repeat/gradient->radius;
}
}
for (i=0; i < (ssize_t) gradient->number_stops; i++)
if (offset < gradient->stops[i].offset)
break;
if (i == 0)
composite=gradient->stops[0].color;
else
if (i == (ssize_t) gradient->number_stops)
composite=gradient->stops[gradient->number_stops-1].color;
else
{
j=i;
i--;
alpha=(offset-gradient->stops[i].offset)/
(gradient->stops[j].offset-gradient->stops[i].offset);
if (antialias != MagickFalse)
{
if (gradient->type == LinearGradient)
alpha=length-repeat;
else
alpha=gradient->radius-repeat;
i=0;
j=(ssize_t) gradient->number_stops-1L;
}
CompositePixelInfoBlend(&gradient->stops[i].color,1.0-alpha,
&gradient->stops[j].color,alpha,&composite);
}
break;
}
}
CompositePixelInfoOver(&composite,composite.alpha,&pixel,pixel.alpha,
&pixel);
SetPixelViaPixelInfo(image,&pixel,q);
q+=GetPixelChannels(image);
}
if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse)
status=MagickFalse;
}
image_view=DestroyCacheView(image_view);
return(status);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% D r a w I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% DrawImage() draws a graphic primitive on your image. The primitive
% may be represented as a string or filename. Precede the filename with an
% "at" sign (@) and the contents of the file are drawn on the image. You
% can affect how text is drawn by setting one or more members of the draw
% info structure.
%
% The format of the DrawImage method is:
%
% MagickBooleanType DrawImage(Image *image,const DrawInfo *draw_info,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o draw_info: the draw info.
%
% o exception: return any errors or warnings in this structure.
%
*/
static MagickBooleanType CheckPrimitiveExtent(MVGInfo *mvg_info,
const size_t pad)
{
double
extent;
size_t
quantum;
/*
Check if there is enough storage for drawing pimitives.
*/
extent=(double) mvg_info->offset+pad+PrimitiveExtentPad+1;
quantum=sizeof(**mvg_info->primitive_info);
if (extent <= (double) *mvg_info->extent)
return(MagickTrue);
*mvg_info->primitive_info=(PrimitiveInfo *) ResizeQuantumMemory(
*mvg_info->primitive_info,(size_t) extent,quantum);
if (*mvg_info->primitive_info != (PrimitiveInfo *) NULL)
{
register ssize_t
i;
*mvg_info->extent=(size_t) extent;
for (i=mvg_info->offset+1; i < (ssize_t) extent; i++)
(*mvg_info->primitive_info)[i].primitive=UndefinedPrimitive;
return(MagickTrue);
}
/*
Reallocation failed, allocate a primitive to facilitate unwinding.
*/
(void) ThrowMagickException(mvg_info->exception,GetMagickModule(),
ResourceLimitError,"MemoryAllocationFailed","`%s'","");
if (*mvg_info->primitive_info != (PrimitiveInfo *) NULL)
*mvg_info->primitive_info=(PrimitiveInfo *) RelinquishMagickMemory(
*mvg_info->primitive_info);
*mvg_info->primitive_info=(PrimitiveInfo *) AcquireCriticalMemory(
PrimitiveExtentPad*quantum);
(void) memset(*mvg_info->primitive_info,0,PrimitiveExtentPad*quantum);
*mvg_info->extent=1;
return(MagickFalse);
}
static inline double GetDrawValue(const char *magick_restrict string,
char **magick_restrict sentinal)
{
char
**magick_restrict q;
double
value;
q=sentinal;
value=InterpretLocaleValue(string,q);
if ((IsNaN(value) != 0) || (value < -((double) SSIZE_MAX-512.0)) ||
(value > ((double) SSIZE_MAX-512.0)))
return(0.0);
sentinal=q;
return(value);
}
static int MVGMacroCompare(const void *target,const void *source)
{
const char
*p,
*q;
p=(const char *) target;
q=(const char *) source;
return(strcmp(p,q));
}
static SplayTreeInfo *GetMVGMacros(const char *primitive)
{
char
*macro,
*token;
const char
*q;
size_t
extent;
SplayTreeInfo
*macros;
/*
Scan graphic primitives for definitions and classes.
*/
if (primitive == (const char *) NULL)
return((SplayTreeInfo *) NULL);
macros=NewSplayTree(MVGMacroCompare,RelinquishMagickMemory,
RelinquishMagickMemory);
macro=AcquireString(primitive);
token=AcquireString(primitive);
extent=strlen(token)+MagickPathExtent;
for (q=primitive; *q != '\0'; )
{
if (GetNextToken(q,&q,extent,token) < 1)
break;
if (*token == '\0')
break;
if (LocaleCompare("push",token) == 0)
{
register const char
*end,
*start;
(void) GetNextToken(q,&q,extent,token);
if (*q == '"')
{
char
name[MagickPathExtent];
const char
*p;
ssize_t
n;
/*
Named macro (e.g. push graphic-context "wheel").
*/
(void) GetNextToken(q,&q,extent,token);
start=q;
end=q;
(void) CopyMagickString(name,token,MagickPathExtent);
n=1;
for (p=q; *p != '\0'; )
{
if (GetNextToken(p,&p,extent,token) < 1)
break;
if (*token == '\0')
break;
if (LocaleCompare(token,"pop") == 0)
{
end=p-strlen(token)-1;
n--;
}
if (LocaleCompare(token,"push") == 0)
n++;
if ((n == 0) && (end > start))
{
/*
Extract macro.
*/
(void) GetNextToken(p,&p,extent,token);
(void) CopyMagickString(macro,start,(size_t) (end-start));
(void) AddValueToSplayTree(macros,ConstantString(name),
ConstantString(macro));
break;
}
}
}
}
}
token=DestroyString(token);
macro=DestroyString(macro);
return(macros);
}
static inline MagickBooleanType IsPoint(const char *point)
{
char
*p;
double
value;
value=GetDrawValue(point,&p);
return((fabs(value) < MagickEpsilon) && (p == point) ? MagickFalse :
MagickTrue);
}
static inline MagickBooleanType TracePoint(PrimitiveInfo *primitive_info,
const PointInfo point)
{
primitive_info->coordinates=1;
primitive_info->closed_subpath=MagickFalse;
primitive_info->point=point;
return(MagickTrue);
}
static MagickBooleanType RenderMVGContent(Image *image,
const DrawInfo *draw_info,const size_t depth,ExceptionInfo *exception)
{
#define RenderImageTag "Render/Image"
AffineMatrix
affine,
current;
char
keyword[MagickPathExtent],
geometry[MagickPathExtent],
*next_token,
pattern[MagickPathExtent],
*primitive,
*token;
const char
*q;
double
angle,
coordinates,
cursor,
factor,
primitive_extent;
DrawInfo
*clone_info,
**graphic_context;
MagickBooleanType
proceed;
MagickStatusType
status;
MVGInfo
mvg_info;
PointInfo
point;
PrimitiveInfo
*primitive_info;
PrimitiveType
primitive_type;
register const char
*p;
register ssize_t
i,
x;
SegmentInfo
bounds;
size_t
extent,
number_points,
number_stops;
SplayTreeInfo
*macros;
ssize_t
defsDepth,
j,
k,
n,
symbolDepth;
StopInfo
*stops;
TypeMetric
metrics;
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(draw_info != (DrawInfo *) NULL);
assert(draw_info->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"...");
if (depth > MagickMaxRecursionDepth)
ThrowBinaryException(DrawError,"VectorGraphicsNestedTooDeeply",
image->filename);
if ((draw_info->primitive == (char *) NULL) ||
(*draw_info->primitive == '\0'))
return(MagickFalse);
if (image->debug != MagickFalse)
(void) LogMagickEvent(DrawEvent,GetMagickModule(),"begin draw-image");
if (SetImageStorageClass(image,DirectClass,exception) == MagickFalse)
return(MagickFalse);
if (image->alpha_trait == UndefinedPixelTrait)
{
status=SetImageAlphaChannel(image,OpaqueAlphaChannel,exception);
if (status == MagickFalse)
return(MagickFalse);
}
if ((*draw_info->primitive == '@') && (strlen(draw_info->primitive) > 1) &&
(*(draw_info->primitive+1) != '-') && (depth == 0))
primitive=FileToString(draw_info->primitive+1,~0UL,exception);
else
primitive=AcquireString(draw_info->primitive);
if (primitive == (char *) NULL)
return(MagickFalse);
primitive_extent=(double) strlen(primitive);
(void) SetImageArtifact(image,"mvg:vector-graphics",primitive);
n=0;
number_stops=0;
stops=(StopInfo *) NULL;
/*
Allocate primitive info memory.
*/
graphic_context=(DrawInfo **) AcquireMagickMemory(sizeof(*graphic_context));
if (graphic_context == (DrawInfo **) NULL)
{
primitive=DestroyString(primitive);
ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed",
image->filename);
}
number_points=PrimitiveExtentPad;
primitive_info=(PrimitiveInfo *) AcquireQuantumMemory((size_t) number_points,
sizeof(*primitive_info));
if (primitive_info == (PrimitiveInfo *) NULL)
{
primitive=DestroyString(primitive);
for ( ; n >= 0; n--)
graphic_context[n]=DestroyDrawInfo(graphic_context[n]);
graphic_context=(DrawInfo **) RelinquishMagickMemory(graphic_context);
ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed",
image->filename);
}
(void) memset(primitive_info,0,(size_t) number_points*
sizeof(*primitive_info));
(void) memset(&mvg_info,0,sizeof(mvg_info));
mvg_info.primitive_info=(&primitive_info);
mvg_info.extent=(&number_points);
mvg_info.exception=exception;
graphic_context[n]=CloneDrawInfo((ImageInfo *) NULL,draw_info);
graphic_context[n]->viewbox=image->page;
if ((image->page.width == 0) || (image->page.height == 0))
{
graphic_context[n]->viewbox.width=image->columns;
graphic_context[n]->viewbox.height=image->rows;
}
token=AcquireString(primitive);
extent=strlen(token)+MagickPathExtent;
defsDepth=0;
symbolDepth=0;
cursor=0.0;
macros=GetMVGMacros(primitive);
status=MagickTrue;
for (q=primitive; *q != '\0'; )
{
/*
Interpret graphic primitive.
*/
if (GetNextToken(q,&q,MagickPathExtent,keyword) < 1)
break;
if (*keyword == '\0')
break;
if (*keyword == '#')
{
/*
Comment.
*/
while ((*q != '\n') && (*q != '\0'))
q++;
continue;
}
p=q-strlen(keyword)-1;
primitive_type=UndefinedPrimitive;
current=graphic_context[n]->affine;
GetAffineMatrix(&affine);
*token='\0';
switch (*keyword)
{
case ';':
break;
case 'a':
case 'A':
{
if (LocaleCompare("affine",keyword) == 0)
{
(void) GetNextToken(q,&q,extent,token);
affine.sx=GetDrawValue(token,&next_token);
if (token == next_token)
ThrowPointExpectedException(token,exception);
(void) GetNextToken(q,&q,extent,token);
if (*token == ',')
(void) GetNextToken(q,&q,extent,token);
affine.rx=GetDrawValue(token,&next_token);
if (token == next_token)
ThrowPointExpectedException(token,exception);
(void) GetNextToken(q,&q,extent,token);
if (*token == ',')
(void) GetNextToken(q,&q,extent,token);
affine.ry=GetDrawValue(token,&next_token);
if (token == next_token)
ThrowPointExpectedException(token,exception);
(void) GetNextToken(q,&q,extent,token);
if (*token == ',')
(void) GetNextToken(q,&q,extent,token);
affine.sy=GetDrawValue(token,&next_token);
if (token == next_token)
ThrowPointExpectedException(token,exception);
(void) GetNextToken(q,&q,extent,token);
if (*token == ',')
(void) GetNextToken(q,&q,extent,token);
affine.tx=GetDrawValue(token,&next_token);
if (token == next_token)
ThrowPointExpectedException(token,exception);
(void) GetNextToken(q,&q,extent,token);
if (*token == ',')
(void) GetNextToken(q,&q,extent,token);
affine.ty=GetDrawValue(token,&next_token);
if (token == next_token)
ThrowPointExpectedException(token,exception);
break;
}
if (LocaleCompare("alpha",keyword) == 0)
{
primitive_type=AlphaPrimitive;
break;
}
if (LocaleCompare("arc",keyword) == 0)
{
primitive_type=ArcPrimitive;
break;
}
status=MagickFalse;
break;
}
case 'b':
case 'B':
{
if (LocaleCompare("bezier",keyword) == 0)
{
primitive_type=BezierPrimitive;
break;
}
if (LocaleCompare("border-color",keyword) == 0)
{
(void) GetNextToken(q,&q,extent,token);
status&=QueryColorCompliance(token,AllCompliance,
&graphic_context[n]->border_color,exception);
break;
}
status=MagickFalse;
break;
}
case 'c':
case 'C':
{
if (LocaleCompare("class",keyword) == 0)
{
const char
*mvg_class;
(void) GetNextToken(q,&q,extent,token);
if (*token == '\0')
{
status=MagickFalse;
break;
}
if (LocaleCompare(token,graphic_context[n]->id) == 0)
break;
mvg_class=(const char *) GetValueFromSplayTree(macros,token);
if ((mvg_class != (const char *) NULL) && (p > primitive))
{
char
*elements;
ssize_t
offset;
/*
Inject class elements in stream.
*/
offset=(ssize_t) (p-primitive);
elements=AcquireString(primitive);
elements[offset]='\0';
(void) ConcatenateString(&elements,mvg_class);
(void) ConcatenateString(&elements,"\n");
(void) ConcatenateString(&elements,q);
primitive=DestroyString(primitive);
primitive=elements;
q=primitive+offset;
}
break;
}
if (LocaleCompare("clip-path",keyword) == 0)
{
const char
*clip_path;
/*
Take a node from within the MVG document, and duplicate it here.
*/
(void) GetNextToken(q,&q,extent,token);
if (*token == '\0')
{
status=MagickFalse;
break;
}
(void) CloneString(&graphic_context[n]->clip_mask,token);
clip_path=(const char *) GetValueFromSplayTree(macros,token);
if (clip_path != (const char *) NULL)
{
if (graphic_context[n]->clipping_mask != (Image *) NULL)
graphic_context[n]->clipping_mask=
DestroyImage(graphic_context[n]->clipping_mask);
graphic_context[n]->clipping_mask=DrawClippingMask(image,
graphic_context[n],token,clip_path,exception);
if (graphic_context[n]->compliance != SVGCompliance)
{
clip_path=(const char *) GetValueFromSplayTree(macros,
graphic_context[n]->clip_mask);
if (clip_path != (const char *) NULL)
(void) SetImageArtifact(image,
graphic_context[n]->clip_mask,clip_path);
status&=DrawClipPath(image,graphic_context[n],
graphic_context[n]->clip_mask,exception);
}
}
break;
}
if (LocaleCompare("clip-rule",keyword) == 0)
{
ssize_t
fill_rule;
(void) GetNextToken(q,&q,extent,token);
fill_rule=ParseCommandOption(MagickFillRuleOptions,MagickFalse,
token);
if (fill_rule == -1)
{
status=MagickFalse;
break;
}
graphic_context[n]->fill_rule=(FillRule) fill_rule;
break;
}
if (LocaleCompare("clip-units",keyword) == 0)
{
ssize_t
clip_units;
(void) GetNextToken(q,&q,extent,token);
clip_units=ParseCommandOption(MagickClipPathOptions,MagickFalse,
token);
if (clip_units == -1)
{
status=MagickFalse;
break;
}
graphic_context[n]->clip_units=(ClipPathUnits) clip_units;
if (clip_units == ObjectBoundingBox)
{
GetAffineMatrix(¤t);
affine.sx=draw_info->bounds.x2;
affine.sy=draw_info->bounds.y2;
affine.tx=draw_info->bounds.x1;
affine.ty=draw_info->bounds.y1;
break;
}
break;
}
if (LocaleCompare("circle",keyword) == 0)
{
primitive_type=CirclePrimitive;
break;
}
if (LocaleCompare("color",keyword) == 0)
{
primitive_type=ColorPrimitive;
break;
}
if (LocaleCompare("compliance",keyword) == 0)
{
/*
MVG compliance associates a clipping mask with an image; SVG
compliance associates a clipping mask with a graphics context.
*/
(void) GetNextToken(q,&q,extent,token);
graphic_context[n]->compliance=(ComplianceType) ParseCommandOption(
MagickComplianceOptions,MagickFalse,token);
break;
}
if (LocaleCompare("currentColor",keyword) == 0)
{
(void) GetNextToken(q,&q,extent,token);
break;
}
status=MagickFalse;
break;
}
case 'd':
case 'D':
{
if (LocaleCompare("decorate",keyword) == 0)
{
ssize_t
decorate;
(void) GetNextToken(q,&q,extent,token);
decorate=ParseCommandOption(MagickDecorateOptions,MagickFalse,
token);
if (decorate == -1)
{
status=MagickFalse;
break;
}
graphic_context[n]->decorate=(DecorationType) decorate;
break;
}
if (LocaleCompare("density",keyword) == 0)
{
(void) GetNextToken(q,&q,extent,token);
(void) CloneString(&graphic_context[n]->density,token);
break;
}
if (LocaleCompare("direction",keyword) == 0)
{
ssize_t
direction;
(void) GetNextToken(q,&q,extent,token);
direction=ParseCommandOption(MagickDirectionOptions,MagickFalse,
token);
if (direction == -1)
status=MagickFalse;
else
graphic_context[n]->direction=(DirectionType) direction;
break;
}
status=MagickFalse;
break;
}
case 'e':
case 'E':
{
if (LocaleCompare("ellipse",keyword) == 0)
{
primitive_type=EllipsePrimitive;
break;
}
if (LocaleCompare("encoding",keyword) == 0)
{
(void) GetNextToken(q,&q,extent,token);
(void) CloneString(&graphic_context[n]->encoding,token);
break;
}
status=MagickFalse;
break;
}
case 'f':
case 'F':
{
if (LocaleCompare("fill",keyword) == 0)
{
(void) GetNextToken(q,&q,extent,token);
if (graphic_context[n]->clip_path != MagickFalse)
break;
(void) FormatLocaleString(pattern,MagickPathExtent,"%s",token);
if (GetImageArtifact(image,pattern) != (const char *) NULL)
(void) DrawPatternPath(image,draw_info,token,
&graphic_context[n]->fill_pattern,exception);
else
{
status&=QueryColorCompliance(token,AllCompliance,
&graphic_context[n]->fill,exception);
if (graphic_context[n]->fill_alpha != OpaqueAlpha)
graphic_context[n]->fill.alpha=graphic_context[n]->fill_alpha;
}
break;
}
if (LocaleCompare("fill-opacity",keyword) == 0)
{
double
opacity;
(void) GetNextToken(q,&q,extent,token);
if (graphic_context[n]->clip_path != MagickFalse)
break;
factor=strchr(token,'%') != (char *) NULL ? 0.01 : 1.0;
opacity=MagickMin(MagickMax(factor*
GetDrawValue(token,&next_token),0.0),1.0);
if (token == next_token)
ThrowPointExpectedException(token,exception);
if (graphic_context[n]->compliance == SVGCompliance)
graphic_context[n]->fill_alpha*=opacity;
else
graphic_context[n]->fill_alpha=QuantumRange*opacity;
if (graphic_context[n]->fill.alpha != TransparentAlpha)
graphic_context[n]->fill.alpha=graphic_context[n]->fill_alpha;
else
graphic_context[n]->fill.alpha=(MagickRealType)
ClampToQuantum(QuantumRange*(1.0-opacity));
break;
}
if (LocaleCompare("fill-rule",keyword) == 0)
{
ssize_t
fill_rule;
(void) GetNextToken(q,&q,extent,token);
fill_rule=ParseCommandOption(MagickFillRuleOptions,MagickFalse,
token);
if (fill_rule == -1)
{
status=MagickFalse;
break;
}
graphic_context[n]->fill_rule=(FillRule) fill_rule;
break;
}
if (LocaleCompare("font",keyword) == 0)
{
(void) GetNextToken(q,&q,extent,token);
(void) CloneString(&graphic_context[n]->font,token);
if (LocaleCompare("none",token) == 0)
graphic_context[n]->font=(char *) RelinquishMagickMemory(
graphic_context[n]->font);
break;
}
if (LocaleCompare("font-family",keyword) == 0)
{
(void) GetNextToken(q,&q,extent,token);
(void) CloneString(&graphic_context[n]->family,token);
break;
}
if (LocaleCompare("font-size",keyword) == 0)
{
(void) GetNextToken(q,&q,extent,token);
graphic_context[n]->pointsize=GetDrawValue(token,&next_token);
if (token == next_token)
ThrowPointExpectedException(token,exception);
break;
}
if (LocaleCompare("font-stretch",keyword) == 0)
{
ssize_t
stretch;
(void) GetNextToken(q,&q,extent,token);
stretch=ParseCommandOption(MagickStretchOptions,MagickFalse,token);
if (stretch == -1)
{
status=MagickFalse;
break;
}
graphic_context[n]->stretch=(StretchType) stretch;
break;
}
if (LocaleCompare("font-style",keyword) == 0)
{
ssize_t
style;
(void) GetNextToken(q,&q,extent,token);
style=ParseCommandOption(MagickStyleOptions,MagickFalse,token);
if (style == -1)
{
status=MagickFalse;
break;
}
graphic_context[n]->style=(StyleType) style;
break;
}
if (LocaleCompare("font-weight",keyword) == 0)
{
ssize_t
weight;
(void) GetNextToken(q,&q,extent,token);
weight=ParseCommandOption(MagickWeightOptions,MagickFalse,token);
if (weight == -1)
weight=(ssize_t) StringToUnsignedLong(token);
graphic_context[n]->weight=(size_t) weight;
break;
}
status=MagickFalse;
break;
}
case 'g':
case 'G':
{
if (LocaleCompare("gradient-units",keyword) == 0)
{
(void) GetNextToken(q,&q,extent,token);
break;
}
if (LocaleCompare("gravity",keyword) == 0)
{
ssize_t
gravity;
(void) GetNextToken(q,&q,extent,token);
gravity=ParseCommandOption(MagickGravityOptions,MagickFalse,token);
if (gravity == -1)
{
status=MagickFalse;
break;
}
graphic_context[n]->gravity=(GravityType) gravity;
break;
}
status=MagickFalse;
break;
}
case 'i':
case 'I':
{
if (LocaleCompare("image",keyword) == 0)
{
ssize_t
compose;
primitive_type=ImagePrimitive;
(void) GetNextToken(q,&q,extent,token);
compose=ParseCommandOption(MagickComposeOptions,MagickFalse,token);
if (compose == -1)
{
status=MagickFalse;
break;
}
graphic_context[n]->compose=(CompositeOperator) compose;
break;
}
if (LocaleCompare("interline-spacing",keyword) == 0)
{
(void) GetNextToken(q,&q,extent,token);
graphic_context[n]->interline_spacing=GetDrawValue(token,
&next_token);
if (token == next_token)
ThrowPointExpectedException(token,exception);
break;
}
if (LocaleCompare("interword-spacing",keyword) == 0)
{
(void) GetNextToken(q,&q,extent,token);
graphic_context[n]->interword_spacing=GetDrawValue(token,
&next_token);
if (token == next_token)
ThrowPointExpectedException(token,exception);
break;
}
status=MagickFalse;
break;
}
case 'k':
case 'K':
{
if (LocaleCompare("kerning",keyword) == 0)
{
(void) GetNextToken(q,&q,extent,token);
graphic_context[n]->kerning=GetDrawValue(token,&next_token);
if (token == next_token)
ThrowPointExpectedException(token,exception);
break;
}
status=MagickFalse;
break;
}
case 'l':
case 'L':
{
if (LocaleCompare("letter-spacing",keyword) == 0)
{
(void) GetNextToken(q,&q,extent,token);
if (IsPoint(token) == MagickFalse)
break;
clone_info=CloneDrawInfo((ImageInfo *) NULL,graphic_context[n]);
clone_info->text=AcquireString(" ");
status&=GetTypeMetrics(image,clone_info,&metrics,exception);
graphic_context[n]->kerning=metrics.width*
GetDrawValue(token,&next_token);
clone_info=DestroyDrawInfo(clone_info);
if (token == next_token)
ThrowPointExpectedException(token,exception);
break;
}
if (LocaleCompare("line",keyword) == 0)
{
primitive_type=LinePrimitive;
break;
}
status=MagickFalse;
break;
}
case 'm':
case 'M':
{
if (LocaleCompare("mask",keyword) == 0)
{
const char
*mask_path;
/*
Take a node from within the MVG document, and duplicate it here.
*/
(void) GetNextToken(q,&q,extent,token);
mask_path=(const char *) GetValueFromSplayTree(macros,token);
if (mask_path != (const char *) NULL)
{
if (graphic_context[n]->composite_mask != (Image *) NULL)
graphic_context[n]->composite_mask=
DestroyImage(graphic_context[n]->composite_mask);
graphic_context[n]->composite_mask=DrawCompositeMask(image,
graphic_context[n],token,mask_path,exception);
if (graphic_context[n]->compliance != SVGCompliance)
status=SetImageMask(image,CompositePixelMask,
graphic_context[n]->composite_mask,exception);
}
break;
}
status=MagickFalse;
break;
}
case 'o':
case 'O':
{
if (LocaleCompare("offset",keyword) == 0)
{
(void) GetNextToken(q,&q,extent,token);
break;
}
if (LocaleCompare("opacity",keyword) == 0)
{
double
opacity;
(void) GetNextToken(q,&q,extent,token);
if (graphic_context[n]->clip_path != MagickFalse)
break;
factor=strchr(token,'%') != (char *) NULL ? 0.01 : 1.0;
opacity=MagickMin(MagickMax(factor*
GetDrawValue(token,&next_token),0.0),1.0);
if (token == next_token)
ThrowPointExpectedException(token,exception);
if (graphic_context[n]->compliance == SVGCompliance)
{
graphic_context[n]->fill_alpha*=opacity;
graphic_context[n]->stroke_alpha*=opacity;
}
else
{
graphic_context[n]->fill_alpha=QuantumRange*opacity;
graphic_context[n]->stroke_alpha=QuantumRange*opacity;
}
break;
}
status=MagickFalse;
break;
}
case 'p':
case 'P':
{
if (LocaleCompare("path",keyword) == 0)
{
primitive_type=PathPrimitive;
break;
}
if (LocaleCompare("point",keyword) == 0)
{
primitive_type=PointPrimitive;
break;
}
if (LocaleCompare("polyline",keyword) == 0)
{
primitive_type=PolylinePrimitive;
break;
}
if (LocaleCompare("polygon",keyword) == 0)
{
primitive_type=PolygonPrimitive;
break;
}
if (LocaleCompare("pop",keyword) == 0)
{
if (GetNextToken(q,&q,extent,token) < 1)
break;
if (LocaleCompare("class",token) == 0)
break;
if (LocaleCompare("clip-path",token) == 0)
break;
if (LocaleCompare("defs",token) == 0)
{
defsDepth--;
graphic_context[n]->render=defsDepth > 0 ? MagickFalse :
MagickTrue;
break;
}
if (LocaleCompare("gradient",token) == 0)
break;
if (LocaleCompare("graphic-context",token) == 0)
{
if (n <= 0)
{
(void) ThrowMagickException(exception,GetMagickModule(),
DrawError,"UnbalancedGraphicContextPushPop","`%s'",token);
status=MagickFalse;
n=0;
break;
}
if ((graphic_context[n]->clip_mask != (char *) NULL) &&
(graphic_context[n]->compliance != SVGCompliance))
if (LocaleCompare(graphic_context[n]->clip_mask,
graphic_context[n-1]->clip_mask) != 0)
status=SetImageMask(image,WritePixelMask,(Image *) NULL,
exception);
graphic_context[n]=DestroyDrawInfo(graphic_context[n]);
n--;
break;
}
if (LocaleCompare("mask",token) == 0)
break;
if (LocaleCompare("pattern",token) == 0)
break;
if (LocaleCompare("symbol",token) == 0)
{
symbolDepth--;
graphic_context[n]->render=symbolDepth > 0 ? MagickFalse :
MagickTrue;
break;
}
status=MagickFalse;
break;
}
if (LocaleCompare("push",keyword) == 0)
{
if (GetNextToken(q,&q,extent,token) < 1)
break;
if (LocaleCompare("class",token) == 0)
{
/*
Class context.
*/
for (p=q; *q != '\0'; )
{
if (GetNextToken(q,&q,extent,token) < 1)
break;
if (LocaleCompare(token,"pop") != 0)
continue;
(void) GetNextToken(q,(const char **) NULL,extent,token);
if (LocaleCompare(token,"class") != 0)
continue;
break;
}
(void) GetNextToken(q,&q,extent,token);
break;
}
if (LocaleCompare("clip-path",token) == 0)
{
(void) GetNextToken(q,&q,extent,token);
for (p=q; *q != '\0'; )
{
if (GetNextToken(q,&q,extent,token) < 1)
break;
if (LocaleCompare(token,"pop") != 0)
continue;
(void) GetNextToken(q,(const char **) NULL,extent,token);
if (LocaleCompare(token,"clip-path") != 0)
continue;
break;
}
if ((q == (char *) NULL) || (p == (char *) NULL) || ((q-4) < p))
{
status=MagickFalse;
break;
}
(void) GetNextToken(q,&q,extent,token);
break;
}
if (LocaleCompare("defs",token) == 0)
{
defsDepth++;
graphic_context[n]->render=defsDepth > 0 ? MagickFalse :
MagickTrue;
break;
}
if (LocaleCompare("gradient",token) == 0)
{
char
key[2*MagickPathExtent],
name[MagickPathExtent],
type[MagickPathExtent];
SegmentInfo
segment;
(void) GetNextToken(q,&q,extent,token);
(void) CopyMagickString(name,token,MagickPathExtent);
(void) GetNextToken(q,&q,extent,token);
(void) CopyMagickString(type,token,MagickPathExtent);
(void) GetNextToken(q,&q,extent,token);
segment.x1=GetDrawValue(token,&next_token);
if (token == next_token)
ThrowPointExpectedException(token,exception);
(void) GetNextToken(q,&q,extent,token);
if (*token == ',')
(void) GetNextToken(q,&q,extent,token);
segment.y1=GetDrawValue(token,&next_token);
if (token == next_token)
ThrowPointExpectedException(token,exception);
(void) GetNextToken(q,&q,extent,token);
if (*token == ',')
(void) GetNextToken(q,&q,extent,token);
segment.x2=GetDrawValue(token,&next_token);
if (token == next_token)
ThrowPointExpectedException(token,exception);
(void) GetNextToken(q,&q,extent,token);
if (*token == ',')
(void) GetNextToken(q,&q,extent,token);
segment.y2=GetDrawValue(token,&next_token);
if (token == next_token)
ThrowPointExpectedException(token,exception);
if (LocaleCompare(type,"radial") == 0)
{
(void) GetNextToken(q,&q,extent,token);
if (*token == ',')
(void) GetNextToken(q,&q,extent,token);
}
for (p=q; *q != '\0'; )
{
if (GetNextToken(q,&q,extent,token) < 1)
break;
if (LocaleCompare(token,"pop") != 0)
continue;
(void) GetNextToken(q,(const char **) NULL,extent,token);
if (LocaleCompare(token,"gradient") != 0)
continue;
break;
}
if ((q == (char *) NULL) || (p == (char *) NULL) || ((q-4) < p))
{
status=MagickFalse;
break;
}
(void) CopyMagickString(token,p,(size_t) (q-p-4+1));
bounds.x1=graphic_context[n]->affine.sx*segment.x1+
graphic_context[n]->affine.ry*segment.y1+
graphic_context[n]->affine.tx;
bounds.y1=graphic_context[n]->affine.rx*segment.x1+
graphic_context[n]->affine.sy*segment.y1+
graphic_context[n]->affine.ty;
bounds.x2=graphic_context[n]->affine.sx*segment.x2+
graphic_context[n]->affine.ry*segment.y2+
graphic_context[n]->affine.tx;
bounds.y2=graphic_context[n]->affine.rx*segment.x2+
graphic_context[n]->affine.sy*segment.y2+
graphic_context[n]->affine.ty;
(void) FormatLocaleString(key,MagickPathExtent,"%s",name);
(void) SetImageArtifact(image,key,token);
(void) FormatLocaleString(key,MagickPathExtent,"%s-type",name);
(void) SetImageArtifact(image,key,type);
(void) FormatLocaleString(key,MagickPathExtent,"%s-geometry",
name);
(void) FormatLocaleString(geometry,MagickPathExtent,
"%gx%g%+.15g%+.15g",
MagickMax(fabs(bounds.x2-bounds.x1+1.0),1.0),
MagickMax(fabs(bounds.y2-bounds.y1+1.0),1.0),
bounds.x1,bounds.y1);
(void) SetImageArtifact(image,key,geometry);
(void) GetNextToken(q,&q,extent,token);
break;
}
if (LocaleCompare("graphic-context",token) == 0)
{
n++;
graphic_context=(DrawInfo **) ResizeQuantumMemory(
graphic_context,(size_t) (n+1),sizeof(*graphic_context));
if (graphic_context == (DrawInfo **) NULL)
{
(void) ThrowMagickException(exception,GetMagickModule(),
ResourceLimitError,"MemoryAllocationFailed","`%s'",
image->filename);
break;
}
graphic_context[n]=CloneDrawInfo((ImageInfo *) NULL,
graphic_context[n-1]);
if (*q == '"')
{
(void) GetNextToken(q,&q,extent,token);
(void) CloneString(&graphic_context[n]->id,token);
}
break;
}
if (LocaleCompare("mask",token) == 0)
{
(void) GetNextToken(q,&q,extent,token);
break;
}
if (LocaleCompare("pattern",token) == 0)
{
char
key[2*MagickPathExtent],
name[MagickPathExtent];
RectangleInfo
bounds;
(void) GetNextToken(q,&q,extent,token);
(void) CopyMagickString(name,token,MagickPathExtent);
(void) GetNextToken(q,&q,extent,token);
bounds.x=(ssize_t) ceil(GetDrawValue(token,&next_token)-0.5);
if (token == next_token)
ThrowPointExpectedException(token,exception);
(void) GetNextToken(q,&q,extent,token);
if (*token == ',')
(void) GetNextToken(q,&q,extent,token);
bounds.y=(ssize_t) ceil(GetDrawValue(token,&next_token)-0.5);
if (token == next_token)
ThrowPointExpectedException(token,exception);
(void) GetNextToken(q,&q,extent,token);
if (*token == ',')
(void) GetNextToken(q,&q,extent,token);
bounds.width=(size_t) floor(GetDrawValue(token,&next_token)+
0.5);
if (token == next_token)
ThrowPointExpectedException(token,exception);
(void) GetNextToken(q,&q,extent,token);
if (*token == ',')
(void) GetNextToken(q,&q,extent,token);
bounds.height=(size_t) floor(GetDrawValue(token,&next_token)+
0.5);
if (token == next_token)
ThrowPointExpectedException(token,exception);
for (p=q; *q != '\0'; )
{
if (GetNextToken(q,&q,extent,token) < 1)
break;
if (LocaleCompare(token,"pop") != 0)
continue;
(void) GetNextToken(q,(const char **) NULL,extent,token);
if (LocaleCompare(token,"pattern") != 0)
continue;
break;
}
if ((q == (char *) NULL) || (p == (char *) NULL) || ((q-4) < p))
{
status=MagickFalse;
break;
}
(void) CopyMagickString(token,p,(size_t) (q-p-4+1));
(void) FormatLocaleString(key,MagickPathExtent,"%s",name);
(void) SetImageArtifact(image,key,token);
(void) FormatLocaleString(key,MagickPathExtent,"%s-geometry",
name);
(void) FormatLocaleString(geometry,MagickPathExtent,
"%.20gx%.20g%+.20g%+.20g",(double) bounds.width,(double)
bounds.height,(double) bounds.x,(double) bounds.y);
(void) SetImageArtifact(image,key,geometry);
(void) GetNextToken(q,&q,extent,token);
break;
}
if (LocaleCompare("symbol",token) == 0)
{
symbolDepth++;
graphic_context[n]->render=symbolDepth > 0 ? MagickFalse :
MagickTrue;
break;
}
status=MagickFalse;
break;
}
status=MagickFalse;
break;
}
case 'r':
case 'R':
{
if (LocaleCompare("rectangle",keyword) == 0)
{
primitive_type=RectanglePrimitive;
break;
}
if (LocaleCompare("rotate",keyword) == 0)
{
(void) GetNextToken(q,&q,extent,token);
angle=GetDrawValue(token,&next_token);
if (token == next_token)
ThrowPointExpectedException(token,exception);
affine.sx=cos(DegreesToRadians(fmod((double) angle,360.0)));
affine.rx=sin(DegreesToRadians(fmod((double) angle,360.0)));
affine.ry=(-sin(DegreesToRadians(fmod((double) angle,360.0))));
affine.sy=cos(DegreesToRadians(fmod((double) angle,360.0)));
break;
}
if (LocaleCompare("roundRectangle",keyword) == 0)
{
primitive_type=RoundRectanglePrimitive;
break;
}
status=MagickFalse;
break;
}
case 's':
case 'S':
{
if (LocaleCompare("scale",keyword) == 0)
{
(void) GetNextToken(q,&q,extent,token);
affine.sx=GetDrawValue(token,&next_token);
if (token == next_token)
ThrowPointExpectedException(token,exception);
(void) GetNextToken(q,&q,extent,token);
if (*token == ',')
(void) GetNextToken(q,&q,extent,token);
affine.sy=GetDrawValue(token,&next_token);
if (token == next_token)
ThrowPointExpectedException(token,exception);
break;
}
if (LocaleCompare("skewX",keyword) == 0)
{
(void) GetNextToken(q,&q,extent,token);
angle=GetDrawValue(token,&next_token);
if (token == next_token)
ThrowPointExpectedException(token,exception);
affine.ry=sin(DegreesToRadians(angle));
break;
}
if (LocaleCompare("skewY",keyword) == 0)
{
(void) GetNextToken(q,&q,extent,token);
angle=GetDrawValue(token,&next_token);
if (token == next_token)
ThrowPointExpectedException(token,exception);
affine.rx=(-tan(DegreesToRadians(angle)/2.0));
break;
}
if (LocaleCompare("stop-color",keyword) == 0)
{
PixelInfo
stop_color;
number_stops++;
if (number_stops == 1)
stops=(StopInfo *) AcquireQuantumMemory(2,sizeof(*stops));
else
if (number_stops > 2)
stops=(StopInfo *) ResizeQuantumMemory(stops,number_stops,
sizeof(*stops));
if (stops == (StopInfo *) NULL)
{
(void) ThrowMagickException(exception,GetMagickModule(),
ResourceLimitError,"MemoryAllocationFailed","`%s'",
image->filename);
break;
}
(void) GetNextToken(q,&q,extent,token);
status&=QueryColorCompliance(token,AllCompliance,&stop_color,
exception);
stops[number_stops-1].color=stop_color;
(void) GetNextToken(q,&q,extent,token);
factor=strchr(token,'%') != (char *) NULL ? 0.01 : 1.0;
stops[number_stops-1].offset=factor*GetDrawValue(token,
&next_token);
if (token == next_token)
ThrowPointExpectedException(token,exception);
break;
}
if (LocaleCompare("stroke",keyword) == 0)
{
(void) GetNextToken(q,&q,extent,token);
if (graphic_context[n]->clip_path != MagickFalse)
break;
(void) FormatLocaleString(pattern,MagickPathExtent,"%s",token);
if (GetImageArtifact(image,pattern) != (const char *) NULL)
(void) DrawPatternPath(image,draw_info,token,
&graphic_context[n]->stroke_pattern,exception);
else
{
status&=QueryColorCompliance(token,AllCompliance,
&graphic_context[n]->stroke,exception);
if (graphic_context[n]->stroke_alpha != OpaqueAlpha)
graphic_context[n]->stroke.alpha=
graphic_context[n]->stroke_alpha;
}
break;
}
if (LocaleCompare("stroke-antialias",keyword) == 0)
{
(void) GetNextToken(q,&q,extent,token);
graphic_context[n]->stroke_antialias=StringToLong(token) != 0 ?
MagickTrue : MagickFalse;
break;
}
if (LocaleCompare("stroke-dasharray",keyword) == 0)
{
if (graphic_context[n]->dash_pattern != (double *) NULL)
graphic_context[n]->dash_pattern=(double *)
RelinquishMagickMemory(graphic_context[n]->dash_pattern);
if (IsPoint(q) != MagickFalse)
{
const char
*r;
r=q;
(void) GetNextToken(r,&r,extent,token);
if (*token == ',')
(void) GetNextToken(r,&r,extent,token);
for (x=0; IsPoint(token) != MagickFalse; x++)
{
(void) GetNextToken(r,&r,extent,token);
if (*token == ',')
(void) GetNextToken(r,&r,extent,token);
}
graphic_context[n]->dash_pattern=(double *)
AcquireQuantumMemory((size_t) (2*x+2),
sizeof(*graphic_context[n]->dash_pattern));
if (graphic_context[n]->dash_pattern == (double *) NULL)
{
(void) ThrowMagickException(exception,GetMagickModule(),
ResourceLimitError,"MemoryAllocationFailed","`%s'",
image->filename);
status=MagickFalse;
break;
}
(void) memset(graphic_context[n]->dash_pattern,0,(size_t)
(2*x+2)*sizeof(*graphic_context[n]->dash_pattern));
for (j=0; j < x; j++)
{
(void) GetNextToken(q,&q,extent,token);
if (*token == ',')
(void) GetNextToken(q,&q,extent,token);
graphic_context[n]->dash_pattern[j]=GetDrawValue(token,
&next_token);
if (token == next_token)
ThrowPointExpectedException(token,exception);
if (graphic_context[n]->dash_pattern[j] < 0.0)
status=MagickFalse;
}
if ((x & 0x01) != 0)
for ( ; j < (2*x); j++)
graphic_context[n]->dash_pattern[j]=
graphic_context[n]->dash_pattern[j-x];
graphic_context[n]->dash_pattern[j]=0.0;
break;
}
(void) GetNextToken(q,&q,extent,token);
break;
}
if (LocaleCompare("stroke-dashoffset",keyword) == 0)
{
(void) GetNextToken(q,&q,extent,token);
graphic_context[n]->dash_offset=GetDrawValue(token,&next_token);
if (token == next_token)
ThrowPointExpectedException(token,exception);
break;
}
if (LocaleCompare("stroke-linecap",keyword) == 0)
{
ssize_t
linecap;
(void) GetNextToken(q,&q,extent,token);
linecap=ParseCommandOption(MagickLineCapOptions,MagickFalse,token);
if (linecap == -1)
{
status=MagickFalse;
break;
}
graphic_context[n]->linecap=(LineCap) linecap;
break;
}
if (LocaleCompare("stroke-linejoin",keyword) == 0)
{
ssize_t
linejoin;
(void) GetNextToken(q,&q,extent,token);
linejoin=ParseCommandOption(MagickLineJoinOptions,MagickFalse,
token);
if (linejoin == -1)
{
status=MagickFalse;
break;
}
graphic_context[n]->linejoin=(LineJoin) linejoin;
break;
}
if (LocaleCompare("stroke-miterlimit",keyword) == 0)
{
(void) GetNextToken(q,&q,extent,token);
graphic_context[n]->miterlimit=StringToUnsignedLong(token);
break;
}
if (LocaleCompare("stroke-opacity",keyword) == 0)
{
double
opacity;
(void) GetNextToken(q,&q,extent,token);
if (graphic_context[n]->clip_path != MagickFalse)
break;
factor=strchr(token,'%') != (char *) NULL ? 0.01 : 1.0;
opacity=MagickMin(MagickMax(factor*
GetDrawValue(token,&next_token),0.0),1.0);
if (token == next_token)
ThrowPointExpectedException(token,exception);
if (graphic_context[n]->compliance == SVGCompliance)
graphic_context[n]->stroke_alpha*=opacity;
else
graphic_context[n]->stroke_alpha=QuantumRange*opacity;
if (graphic_context[n]->stroke.alpha != TransparentAlpha)
graphic_context[n]->stroke.alpha=graphic_context[n]->stroke_alpha;
else
graphic_context[n]->stroke.alpha=(MagickRealType)
ClampToQuantum(QuantumRange*(1.0-opacity));
break;
}
if (LocaleCompare("stroke-width",keyword) == 0)
{
(void) GetNextToken(q,&q,extent,token);
if (graphic_context[n]->clip_path != MagickFalse)
break;
graphic_context[n]->stroke_width=GetDrawValue(token,&next_token);
if (token == next_token)
ThrowPointExpectedException(token,exception);
break;
}
status=MagickFalse;
break;
}
case 't':
case 'T':
{
if (LocaleCompare("text",keyword) == 0)
{
primitive_type=TextPrimitive;
cursor=0.0;
break;
}
if (LocaleCompare("text-align",keyword) == 0)
{
ssize_t
align;
(void) GetNextToken(q,&q,extent,token);
align=ParseCommandOption(MagickAlignOptions,MagickFalse,token);
if (align == -1)
{
status=MagickFalse;
break;
}
graphic_context[n]->align=(AlignType) align;
break;
}
if (LocaleCompare("text-anchor",keyword) == 0)
{
ssize_t
align;
(void) GetNextToken(q,&q,extent,token);
align=ParseCommandOption(MagickAlignOptions,MagickFalse,token);
if (align == -1)
{
status=MagickFalse;
break;
}
graphic_context[n]->align=(AlignType) align;
break;
}
if (LocaleCompare("text-antialias",keyword) == 0)
{
(void) GetNextToken(q,&q,extent,token);
graphic_context[n]->text_antialias=StringToLong(token) != 0 ?
MagickTrue : MagickFalse;
break;
}
if (LocaleCompare("text-undercolor",keyword) == 0)
{
(void) GetNextToken(q,&q,extent,token);
status&=QueryColorCompliance(token,AllCompliance,
&graphic_context[n]->undercolor,exception);
break;
}
if (LocaleCompare("translate",keyword) == 0)
{
(void) GetNextToken(q,&q,extent,token);
affine.tx=GetDrawValue(token,&next_token);
if (token == next_token)
ThrowPointExpectedException(token,exception);
(void) GetNextToken(q,&q,extent,token);
if (*token == ',')
(void) GetNextToken(q,&q,extent,token);
affine.ty=GetDrawValue(token,&next_token);
if (token == next_token)
ThrowPointExpectedException(token,exception);
cursor=0.0;
break;
}
status=MagickFalse;
break;
}
case 'u':
case 'U':
{
if (LocaleCompare("use",keyword) == 0)
{
const char
*use;
/*
Get a macro from the MVG document, and "use" it here.
*/
(void) GetNextToken(q,&q,extent,token);
use=(const char *) GetValueFromSplayTree(macros,token);
if (use != (const char *) NULL)
{
clone_info=CloneDrawInfo((ImageInfo *) NULL,graphic_context[n]);
(void) CloneString(&clone_info->primitive,use);
status=RenderMVGContent(image,clone_info,depth+1,exception);
clone_info=DestroyDrawInfo(clone_info);
}
break;
}
status=MagickFalse;
break;
}
case 'v':
case 'V':
{
if (LocaleCompare("viewbox",keyword) == 0)
{
(void) GetNextToken(q,&q,extent,token);
graphic_context[n]->viewbox.x=(ssize_t) ceil(GetDrawValue(token,
&next_token)-0.5);
if (token == next_token)
ThrowPointExpectedException(token,exception);
(void) GetNextToken(q,&q,extent,token);
if (*token == ',')
(void) GetNextToken(q,&q,extent,token);
graphic_context[n]->viewbox.y=(ssize_t) ceil(GetDrawValue(token,
&next_token)-0.5);
if (token == next_token)
ThrowPointExpectedException(token,exception);
(void) GetNextToken(q,&q,extent,token);
if (*token == ',')
(void) GetNextToken(q,&q,extent,token);
graphic_context[n]->viewbox.width=(size_t) floor(GetDrawValue(
token,&next_token)+0.5);
if (token == next_token)
ThrowPointExpectedException(token,exception);
(void) GetNextToken(q,&q,extent,token);
if (*token == ',')
(void) GetNextToken(q,&q,extent,token);
graphic_context[n]->viewbox.height=(size_t) floor(GetDrawValue(
token,&next_token)+0.5);
if (token == next_token)
ThrowPointExpectedException(token,exception);
break;
}
status=MagickFalse;
break;
}
case 'w':
case 'W':
{
if (LocaleCompare("word-spacing",keyword) == 0)
{
(void) GetNextToken(q,&q,extent,token);
graphic_context[n]->interword_spacing=GetDrawValue(token,
&next_token);
if (token == next_token)
ThrowPointExpectedException(token,exception);
break;
}
status=MagickFalse;
break;
}
default:
{
status=MagickFalse;
break;
}
}
if (status == MagickFalse)
break;
if ((fabs(affine.sx-1.0) >= MagickEpsilon) ||
(fabs(affine.rx) >= MagickEpsilon) || (fabs(affine.ry) >= MagickEpsilon) ||
(fabs(affine.sy-1.0) >= MagickEpsilon) ||
(fabs(affine.tx) >= MagickEpsilon) || (fabs(affine.ty) >= MagickEpsilon))
{
graphic_context[n]->affine.sx=current.sx*affine.sx+current.ry*affine.rx;
graphic_context[n]->affine.rx=current.rx*affine.sx+current.sy*affine.rx;
graphic_context[n]->affine.ry=current.sx*affine.ry+current.ry*affine.sy;
graphic_context[n]->affine.sy=current.rx*affine.ry+current.sy*affine.sy;
graphic_context[n]->affine.tx=current.sx*affine.tx+current.ry*affine.ty+
current.tx;
graphic_context[n]->affine.ty=current.rx*affine.tx+current.sy*affine.ty+
current.ty;
}
if (primitive_type == UndefinedPrimitive)
{
if (*q == '\0')
{
if (number_stops > 1)
{
GradientType
type;
type=LinearGradient;
if (draw_info->gradient.type == RadialGradient)
type=RadialGradient;
(void) GradientImage(image,type,PadSpread,stops,number_stops,
exception);
}
if (number_stops > 0)
stops=(StopInfo *) RelinquishMagickMemory(stops);
}
if ((image->debug != MagickFalse) && (q > p))
(void) LogMagickEvent(DrawEvent,GetMagickModule()," %.*s",(int)
(q-p-1),p);
continue;
}
/*
Parse the primitive attributes.
*/
for (i=0; primitive_info[i].primitive != UndefinedPrimitive; i++)
if ((primitive_info[i].primitive == TextPrimitive) ||
(primitive_info[i].primitive == ImagePrimitive))
if (primitive_info[i].text != (char *) NULL)
primitive_info[i].text=DestroyString(primitive_info[i].text);
i=0;
mvg_info.offset=i;
j=0;
primitive_info[0].point.x=0.0;
primitive_info[0].point.y=0.0;
primitive_info[0].coordinates=0;
primitive_info[0].method=FloodfillMethod;
primitive_info[0].closed_subpath=MagickFalse;
for (x=0; *q != '\0'; x++)
{
/*
Define points.
*/
if (IsPoint(q) == MagickFalse)
break;
(void) GetNextToken(q,&q,extent,token);
point.x=GetDrawValue(token,&next_token);
if (token == next_token)
ThrowPointExpectedException(token,exception);
(void) GetNextToken(q,&q,extent,token);
if (*token == ',')
(void) GetNextToken(q,&q,extent,token);
point.y=GetDrawValue(token,&next_token);
if (token == next_token)
ThrowPointExpectedException(token,exception);
(void) GetNextToken(q,(const char **) NULL,extent,token);
if (*token == ',')
(void) GetNextToken(q,&q,extent,token);
primitive_info[i].primitive=primitive_type;
primitive_info[i].point=point;
primitive_info[i].coordinates=0;
primitive_info[i].method=FloodfillMethod;
primitive_info[i].closed_subpath=MagickFalse;
i++;
mvg_info.offset=i;
if (i < (ssize_t) number_points)
continue;
status&=CheckPrimitiveExtent(&mvg_info,number_points);
}
if (status == MagickFalse)
break;
if ((primitive_info[j].primitive == TextPrimitive) ||
(primitive_info[j].primitive == ImagePrimitive))
if (primitive_info[j].text != (char *) NULL)
primitive_info[j].text=DestroyString(primitive_info[j].text);
primitive_info[j].primitive=primitive_type;
primitive_info[j].coordinates=(size_t) x;
primitive_info[j].method=FloodfillMethod;
primitive_info[j].closed_subpath=MagickFalse;
/*
Circumscribe primitive within a circle.
*/
bounds.x1=primitive_info[j].point.x;
bounds.y1=primitive_info[j].point.y;
bounds.x2=primitive_info[j].point.x;
bounds.y2=primitive_info[j].point.y;
for (k=1; k < (ssize_t) primitive_info[j].coordinates; k++)
{
point=primitive_info[j+k].point;
if (point.x < bounds.x1)
bounds.x1=point.x;
if (point.y < bounds.y1)
bounds.y1=point.y;
if (point.x > bounds.x2)
bounds.x2=point.x;
if (point.y > bounds.y2)
bounds.y2=point.y;
}
/*
Speculate how many points our primitive might consume.
*/
coordinates=(double) primitive_info[j].coordinates;
switch (primitive_type)
{
case RectanglePrimitive:
{
coordinates*=5.0;
break;
}
case RoundRectanglePrimitive:
{
double
alpha,
beta,
radius;
alpha=bounds.x2-bounds.x1;
beta=bounds.y2-bounds.y1;
radius=hypot(alpha,beta);
coordinates*=5.0;
coordinates+=2.0*((size_t) ceil((double) MagickPI*radius))+6.0*
BezierQuantum+360.0;
break;
}
case BezierPrimitive:
{
coordinates=(BezierQuantum*(double) primitive_info[j].coordinates);
if (primitive_info[j].coordinates > (108.0*BezierQuantum))
{
(void) ThrowMagickException(exception,GetMagickModule(),DrawError,
"TooManyBezierCoordinates","`%s'",token);
status=MagickFalse;
break;
}
break;
}
case PathPrimitive:
{
char
*s,
*t;
(void) GetNextToken(q,&q,extent,token);
coordinates=1.0;
t=token;
for (s=token; *s != '\0'; s=t)
{
double
value;
value=GetDrawValue(s,&t);
(void) value;
if (s == t)
{
t++;
continue;
}
coordinates++;
}
for (s=token; *s != '\0'; s++)
if (strspn(s,"AaCcQqSsTt") != 0)
coordinates+=(20.0*BezierQuantum)+360.0;
break;
}
default:
break;
}
if (status == MagickFalse)
break;
if (((size_t) (i+coordinates)) >= number_points)
{
/*
Resize based on speculative points required by primitive.
*/
number_points+=coordinates+1;
if (number_points < (size_t) coordinates)
{
(void) ThrowMagickException(exception,GetMagickModule(),
ResourceLimitError,"MemoryAllocationFailed","`%s'",
image->filename);
break;
}
mvg_info.offset=i;
status&=CheckPrimitiveExtent(&mvg_info,number_points);
}
status&=CheckPrimitiveExtent(&mvg_info,PrimitiveExtentPad);
if (status == MagickFalse)
break;
mvg_info.offset=j;
switch (primitive_type)
{
case PointPrimitive:
default:
{
if (primitive_info[j].coordinates != 1)
{
status=MagickFalse;
break;
}
status&=TracePoint(primitive_info+j,primitive_info[j].point);
i=(ssize_t) (j+primitive_info[j].coordinates);
break;
}
case LinePrimitive:
{
double
dx,
dy,
maximum_length;
if (primitive_info[j].coordinates != 2)
{
status=MagickFalse;
break;
}
dx=primitive_info[i].point.x-primitive_info[i-1].point.x;
dy=primitive_info[i].point.y-primitive_info[i-1].point.y;
maximum_length=hypot(dx,dy);
if (maximum_length > (MaxBezierCoordinates/100.0))
ThrowPointExpectedException(keyword,exception);
status&=TraceLine(primitive_info+j,primitive_info[j].point,
primitive_info[j+1].point);
i=(ssize_t) (j+primitive_info[j].coordinates);
break;
}
case RectanglePrimitive:
{
if (primitive_info[j].coordinates != 2)
{
status=MagickFalse;
break;
}
status&=TraceRectangle(primitive_info+j,primitive_info[j].point,
primitive_info[j+1].point);
i=(ssize_t) (j+primitive_info[j].coordinates);
break;
}
case RoundRectanglePrimitive:
{
if (primitive_info[j].coordinates != 3)
{
status=MagickFalse;
break;
}
if ((primitive_info[j+2].point.x < 0.0) ||
(primitive_info[j+2].point.y < 0.0))
{
status=MagickFalse;
break;
}
if ((primitive_info[j+1].point.x-primitive_info[j].point.x) < 0.0)
{
status=MagickFalse;
break;
}
if ((primitive_info[j+1].point.y-primitive_info[j].point.y) < 0.0)
{
status=MagickFalse;
break;
}
status&=TraceRoundRectangle(&mvg_info,primitive_info[j].point,
primitive_info[j+1].point,primitive_info[j+2].point);
i=(ssize_t) (j+primitive_info[j].coordinates);
break;
}
case ArcPrimitive:
{
if (primitive_info[j].coordinates != 3)
{
status=MagickFalse;
break;
}
status&=TraceArc(&mvg_info,primitive_info[j].point,
primitive_info[j+1].point,primitive_info[j+2].point);
i=(ssize_t) (j+primitive_info[j].coordinates);
break;
}
case EllipsePrimitive:
{
if (primitive_info[j].coordinates != 3)
{
status=MagickFalse;
break;
}
if ((primitive_info[j+1].point.x < 0.0) ||
(primitive_info[j+1].point.y < 0.0))
{
status=MagickFalse;
break;
}
status&=TraceEllipse(&mvg_info,primitive_info[j].point,
primitive_info[j+1].point,primitive_info[j+2].point);
i=(ssize_t) (j+primitive_info[j].coordinates);
break;
}
case CirclePrimitive:
{
if (primitive_info[j].coordinates != 2)
{
status=MagickFalse;
break;
}
status&=TraceCircle(&mvg_info,primitive_info[j].point,
primitive_info[j+1].point);
i=(ssize_t) (j+primitive_info[j].coordinates);
break;
}
case PolylinePrimitive:
{
if (primitive_info[j].coordinates < 1)
{
status=MagickFalse;
break;
}
break;
}
case PolygonPrimitive:
{
if (primitive_info[j].coordinates < 3)
{
status=MagickFalse;
break;
}
primitive_info[i]=primitive_info[j];
primitive_info[i].coordinates=0;
primitive_info[j].coordinates++;
primitive_info[j].closed_subpath=MagickTrue;
i++;
break;
}
case BezierPrimitive:
{
if (primitive_info[j].coordinates < 3)
{
status=MagickFalse;
break;
}
status&=TraceBezier(&mvg_info,primitive_info[j].coordinates);
i=(ssize_t) (j+primitive_info[j].coordinates);
break;
}
case PathPrimitive:
{
coordinates=(double) TracePath(&mvg_info,token,exception);
if (coordinates < 0.0)
{
status=MagickFalse;
break;
}
i=(ssize_t) (j+coordinates);
break;
}
case AlphaPrimitive:
case ColorPrimitive:
{
ssize_t
method;
if (primitive_info[j].coordinates != 1)
{
status=MagickFalse;
break;
}
(void) GetNextToken(q,&q,extent,token);
method=ParseCommandOption(MagickMethodOptions,MagickFalse,token);
if (method == -1)
{
status=MagickFalse;
break;
}
primitive_info[j].method=(PaintMethod) method;
break;
}
case TextPrimitive:
{
char
geometry[MagickPathExtent];
if (primitive_info[j].coordinates != 1)
{
status=MagickFalse;
break;
}
if (*token != ',')
(void) GetNextToken(q,&q,extent,token);
(void) CloneString(&primitive_info[j].text,token);
/*
Compute text cursor offset.
*/
clone_info=CloneDrawInfo((ImageInfo *) NULL,graphic_context[n]);
if ((fabs(mvg_info.point.x-primitive_info->point.x) < MagickEpsilon) &&
(fabs(mvg_info.point.y-primitive_info->point.y) < MagickEpsilon))
{
mvg_info.point=primitive_info->point;
primitive_info->point.x+=cursor;
}
else
{
mvg_info.point=primitive_info->point;
cursor=0.0;
}
(void) FormatLocaleString(geometry,MagickPathExtent,"%+f%+f",
primitive_info->point.x,primitive_info->point.y);
clone_info->render=MagickFalse;
clone_info->text=AcquireString(token);
status&=GetTypeMetrics(image,clone_info,&metrics,exception);
clone_info=DestroyDrawInfo(clone_info);
cursor+=metrics.width;
if (graphic_context[n]->compliance != SVGCompliance)
cursor=0.0;
break;
}
case ImagePrimitive:
{
if (primitive_info[j].coordinates != 2)
{
status=MagickFalse;
break;
}
(void) GetNextToken(q,&q,extent,token);
(void) CloneString(&primitive_info[j].text,token);
break;
}
}
mvg_info.offset=i;
if (status == 0)
break;
primitive_info[i].primitive=UndefinedPrimitive;
if ((image->debug != MagickFalse) && (q > p))
(void) LogMagickEvent(DrawEvent,GetMagickModule()," %.*s",(int) (q-p-1),
p);
/*
Sanity check.
*/
status&=CheckPrimitiveExtent(&mvg_info,(size_t)
ExpandAffine(&graphic_context[n]->affine));
if (status == 0)
break;
status&=CheckPrimitiveExtent(&mvg_info,(size_t)
graphic_context[n]->stroke_width);
if (status == 0)
break;
if (i == 0)
continue;
/*
Transform points.
*/
for (i=0; primitive_info[i].primitive != UndefinedPrimitive; i++)
{
point=primitive_info[i].point;
primitive_info[i].point.x=graphic_context[n]->affine.sx*point.x+
graphic_context[n]->affine.ry*point.y+graphic_context[n]->affine.tx;
primitive_info[i].point.y=graphic_context[n]->affine.rx*point.x+
graphic_context[n]->affine.sy*point.y+graphic_context[n]->affine.ty;
point=primitive_info[i].point;
if (point.x < graphic_context[n]->bounds.x1)
graphic_context[n]->bounds.x1=point.x;
if (point.y < graphic_context[n]->bounds.y1)
graphic_context[n]->bounds.y1=point.y;
if (point.x > graphic_context[n]->bounds.x2)
graphic_context[n]->bounds.x2=point.x;
if (point.y > graphic_context[n]->bounds.y2)
graphic_context[n]->bounds.y2=point.y;
if (primitive_info[i].primitive == ImagePrimitive)
break;
if (i >= (ssize_t) number_points)
ThrowFatalException(ResourceLimitFatalError,"MemoryAllocationFailed");
}
if (graphic_context[n]->render != MagickFalse)
{
if ((n != 0) && (graphic_context[n]->compliance != SVGCompliance) &&
(graphic_context[n]->clip_mask != (char *) NULL) &&
(LocaleCompare(graphic_context[n]->clip_mask,
graphic_context[n-1]->clip_mask) != 0))
{
const char
*clip_path;
clip_path=(const char *) GetValueFromSplayTree(macros,
graphic_context[n]->clip_mask);
if (clip_path != (const char *) NULL)
(void) SetImageArtifact(image,graphic_context[n]->clip_mask,
clip_path);
status&=DrawClipPath(image,graphic_context[n],
graphic_context[n]->clip_mask,exception);
}
status&=DrawPrimitive(image,graphic_context[n],primitive_info,
exception);
}
proceed=SetImageProgress(image,RenderImageTag,q-primitive,(MagickSizeType)
primitive_extent);
if (proceed == MagickFalse)
break;
if (status == 0)
break;
}
if (image->debug != MagickFalse)
(void) LogMagickEvent(DrawEvent,GetMagickModule(),"end draw-image");
/*
Relinquish resources.
*/
macros=DestroySplayTree(macros);
token=DestroyString(token);
if (primitive_info != (PrimitiveInfo *) NULL)
{
for (i=0; primitive_info[i].primitive != UndefinedPrimitive; i++)
if ((primitive_info[i].primitive == TextPrimitive) ||
(primitive_info[i].primitive == ImagePrimitive))
if (primitive_info[i].text != (char *) NULL)
primitive_info[i].text=DestroyString(primitive_info[i].text);
primitive_info=(PrimitiveInfo *) RelinquishMagickMemory(primitive_info);
}
primitive=DestroyString(primitive);
if (stops != (StopInfo *) NULL)
stops=(StopInfo *) RelinquishMagickMemory(stops);
for ( ; n >= 0; n--)
graphic_context[n]=DestroyDrawInfo(graphic_context[n]);
graphic_context=(DrawInfo **) RelinquishMagickMemory(graphic_context);
if (status == MagickFalse)
ThrowBinaryException(DrawError,"NonconformingDrawingPrimitiveDefinition",
keyword);
return(status != 0 ? MagickTrue : MagickFalse);
}
MagickExport MagickBooleanType DrawImage(Image *image,const DrawInfo *draw_info,
ExceptionInfo *exception)
{
return(RenderMVGContent(image,draw_info,0,exception));
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% D r a w P a t t e r n P a t h %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% DrawPatternPath() draws a pattern.
%
% The format of the DrawPatternPath method is:
%
% MagickBooleanType DrawPatternPath(Image *image,const DrawInfo *draw_info,
% const char *name,Image **pattern,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o draw_info: the draw info.
%
% o name: the pattern name.
%
% o image: the image.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport MagickBooleanType DrawPatternPath(Image *image,
const DrawInfo *draw_info,const char *name,Image **pattern,
ExceptionInfo *exception)
{
char
property[MagickPathExtent];
const char
*geometry,
*path,
*type;
DrawInfo
*clone_info;
ImageInfo
*image_info;
MagickBooleanType
status;
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(draw_info != (const DrawInfo *) NULL);
assert(name != (const char *) NULL);
(void) FormatLocaleString(property,MagickPathExtent,"%s",name);
path=GetImageArtifact(image,property);
if (path == (const char *) NULL)
return(MagickFalse);
(void) FormatLocaleString(property,MagickPathExtent,"%s-geometry",name);
geometry=GetImageArtifact(image,property);
if (geometry == (const char *) NULL)
return(MagickFalse);
if ((*pattern) != (Image *) NULL)
*pattern=DestroyImage(*pattern);
image_info=AcquireImageInfo();
image_info->size=AcquireString(geometry);
*pattern=AcquireImage(image_info,exception);
image_info=DestroyImageInfo(image_info);
(void) QueryColorCompliance("#00000000",AllCompliance,
&(*pattern)->background_color,exception);
(void) SetImageBackgroundColor(*pattern,exception);
if (image->debug != MagickFalse)
(void) LogMagickEvent(DrawEvent,GetMagickModule(),
"begin pattern-path %s %s",name,geometry);
clone_info=CloneDrawInfo((ImageInfo *) NULL,draw_info);
if (clone_info->fill_pattern != (Image *) NULL)
clone_info->fill_pattern=DestroyImage(clone_info->fill_pattern);
if (clone_info->stroke_pattern != (Image *) NULL)
clone_info->stroke_pattern=DestroyImage(clone_info->stroke_pattern);
(void) FormatLocaleString(property,MagickPathExtent,"%s-type",name);
type=GetImageArtifact(image,property);
if (type != (const char *) NULL)
clone_info->gradient.type=(GradientType) ParseCommandOption(
MagickGradientOptions,MagickFalse,type);
(void) CloneString(&clone_info->primitive,path);
status=RenderMVGContent(*pattern,clone_info,0,exception);
clone_info=DestroyDrawInfo(clone_info);
if (image->debug != MagickFalse)
(void) LogMagickEvent(DrawEvent,GetMagickModule(),"end pattern-path");
return(status);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ D r a w P o l y g o n P r i m i t i v e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% DrawPolygonPrimitive() draws a polygon on the image.
%
% The format of the DrawPolygonPrimitive method is:
%
% MagickBooleanType DrawPolygonPrimitive(Image *image,
% const DrawInfo *draw_info,const PrimitiveInfo *primitive_info,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o draw_info: the draw info.
%
% o primitive_info: Specifies a pointer to a PrimitiveInfo structure.
%
% o exception: return any errors or warnings in this structure.
%
*/
static PolygonInfo **DestroyPolygonThreadSet(PolygonInfo **polygon_info)
{
register ssize_t
i;
assert(polygon_info != (PolygonInfo **) NULL);
for (i=0; i < (ssize_t) GetMagickResourceLimit(ThreadResource); i++)
if (polygon_info[i] != (PolygonInfo *) NULL)
polygon_info[i]=DestroyPolygonInfo(polygon_info[i]);
polygon_info=(PolygonInfo **) RelinquishMagickMemory(polygon_info);
return(polygon_info);
}
static PolygonInfo **AcquirePolygonThreadSet(
const PrimitiveInfo *primitive_info,ExceptionInfo *exception)
{
PathInfo
*magick_restrict path_info;
PolygonInfo
**polygon_info;
register ssize_t
i;
size_t
number_threads;
number_threads=(size_t) GetMagickResourceLimit(ThreadResource);
polygon_info=(PolygonInfo **) AcquireQuantumMemory(number_threads,
sizeof(*polygon_info));
if (polygon_info == (PolygonInfo **) NULL)
{
(void) ThrowMagickException(exception,GetMagickModule(),
ResourceLimitError,"MemoryAllocationFailed","`%s'","");
return((PolygonInfo **) NULL);
}
(void) memset(polygon_info,0,number_threads*sizeof(*polygon_info));
path_info=ConvertPrimitiveToPath(primitive_info,exception);
if (path_info == (PathInfo *) NULL)
return(DestroyPolygonThreadSet(polygon_info));
polygon_info[0]=ConvertPathToPolygon(path_info,exception);
if (polygon_info[0] == (PolygonInfo *) NULL)
{
(void) ThrowMagickException(exception,GetMagickModule(),
ResourceLimitError,"MemoryAllocationFailed","`%s'","");
return(DestroyPolygonThreadSet(polygon_info));
}
for (i=1; i < (ssize_t) number_threads; i++)
{
EdgeInfo
*edge_info;
register ssize_t
j;
polygon_info[i]=(PolygonInfo *) AcquireMagickMemory(
sizeof(*polygon_info[i]));
if (polygon_info[i] == (PolygonInfo *) NULL)
{
(void) ThrowMagickException(exception,GetMagickModule(),
ResourceLimitError,"MemoryAllocationFailed","`%s'","");
return(DestroyPolygonThreadSet(polygon_info));
}
polygon_info[i]->number_edges=0;
edge_info=polygon_info[0]->edges;
polygon_info[i]->edges=(EdgeInfo *) AcquireQuantumMemory(
polygon_info[0]->number_edges,sizeof(*edge_info));
if (polygon_info[i]->edges == (EdgeInfo *) NULL)
{
(void) ThrowMagickException(exception,GetMagickModule(),
ResourceLimitError,"MemoryAllocationFailed","`%s'","");
return(DestroyPolygonThreadSet(polygon_info));
}
(void) memcpy(polygon_info[i]->edges,edge_info,
polygon_info[0]->number_edges*sizeof(*edge_info));
for (j=0; j < (ssize_t) polygon_info[i]->number_edges; j++)
polygon_info[i]->edges[j].points=(PointInfo *) NULL;
polygon_info[i]->number_edges=polygon_info[0]->number_edges;
for (j=0; j < (ssize_t) polygon_info[i]->number_edges; j++)
{
edge_info=polygon_info[0]->edges+j;
polygon_info[i]->edges[j].points=(PointInfo *) AcquireQuantumMemory(
edge_info->number_points,sizeof(*edge_info));
if (polygon_info[i]->edges[j].points == (PointInfo *) NULL)
{
(void) ThrowMagickException(exception,GetMagickModule(),
ResourceLimitError,"MemoryAllocationFailed","`%s'","");
return(DestroyPolygonThreadSet(polygon_info));
}
(void) memcpy(polygon_info[i]->edges[j].points,edge_info->points,
edge_info->number_points*sizeof(*edge_info->points));
}
}
path_info=(PathInfo *) RelinquishMagickMemory(path_info);
return(polygon_info);
}
static size_t DestroyEdge(PolygonInfo *polygon_info,const ssize_t edge)
{
assert(edge < polygon_info->number_edges);
polygon_info->edges[edge].points=(PointInfo *) RelinquishMagickMemory(
polygon_info->edges[edge].points);
polygon_info->number_edges--;
if (edge < polygon_info->number_edges)
(void) memmove(polygon_info->edges+edge,polygon_info->edges+edge+1,
(size_t) (polygon_info->number_edges-edge)*sizeof(*polygon_info->edges));
return(polygon_info->number_edges);
}
static double GetFillAlpha(PolygonInfo *polygon_info,const double mid,
const MagickBooleanType fill,const FillRule fill_rule,const ssize_t x,
const ssize_t y,double *stroke_alpha)
{
double
alpha,
beta,
distance,
subpath_alpha;
PointInfo
delta;
register const PointInfo
*q;
register EdgeInfo
*p;
register ssize_t
i;
ssize_t
j,
winding_number;
/*
Compute fill & stroke opacity for this (x,y) point.
*/
*stroke_alpha=0.0;
subpath_alpha=0.0;
p=polygon_info->edges;
for (j=0; j < (ssize_t) polygon_info->number_edges; j++, p++)
{
if ((double) y <= (p->bounds.y1-mid-0.5))
break;
if ((double) y > (p->bounds.y2+mid+0.5))
{
(void) DestroyEdge(polygon_info,j);
continue;
}
if (((double) x <= (p->bounds.x1-mid-0.5)) ||
((double) x > (p->bounds.x2+mid+0.5)))
continue;
i=(ssize_t) MagickMax((double) p->highwater,1.0);
for ( ; i < (ssize_t) p->number_points; i++)
{
if ((double) y <= (p->points[i-1].y-mid-0.5))
break;
if ((double) y > (p->points[i].y+mid+0.5))
continue;
if (p->scanline != (double) y)
{
p->scanline=(double) y;
p->highwater=(size_t) i;
}
/*
Compute distance between a point and an edge.
*/
q=p->points+i-1;
delta.x=(q+1)->x-q->x;
delta.y=(q+1)->y-q->y;
beta=delta.x*(x-q->x)+delta.y*(y-q->y);
if (beta <= 0.0)
{
delta.x=(double) x-q->x;
delta.y=(double) y-q->y;
distance=delta.x*delta.x+delta.y*delta.y;
}
else
{
alpha=delta.x*delta.x+delta.y*delta.y;
if (beta >= alpha)
{
delta.x=(double) x-(q+1)->x;
delta.y=(double) y-(q+1)->y;
distance=delta.x*delta.x+delta.y*delta.y;
}
else
{
alpha=PerceptibleReciprocal(alpha);
beta=delta.x*(y-q->y)-delta.y*(x-q->x)+MagickEpsilon;
distance=alpha*beta*beta;
}
}
/*
Compute stroke & subpath opacity.
*/
beta=0.0;
if (p->ghostline == MagickFalse)
{
alpha=mid+0.5;
if ((*stroke_alpha < 1.0) &&
(distance <= ((alpha+0.25)*(alpha+0.25))))
{
alpha=mid-0.5;
if (distance <= ((alpha+0.25)*(alpha+0.25)))
*stroke_alpha=1.0;
else
{
beta=1.0;
if (fabs(distance-1.0) >= MagickEpsilon)
beta=sqrt((double) distance);
alpha=beta-mid-0.5;
if (*stroke_alpha < ((alpha-0.25)*(alpha-0.25)))
*stroke_alpha=(alpha-0.25)*(alpha-0.25);
}
}
}
if ((fill == MagickFalse) || (distance > 1.0) || (subpath_alpha >= 1.0))
continue;
if (distance <= 0.0)
{
subpath_alpha=1.0;
continue;
}
if (distance > 1.0)
continue;
if (fabs(beta) < MagickEpsilon)
{
beta=1.0;
if (fabs(distance-1.0) >= MagickEpsilon)
beta=sqrt(distance);
}
alpha=beta-1.0;
if (subpath_alpha < (alpha*alpha))
subpath_alpha=alpha*alpha;
}
}
/*
Compute fill opacity.
*/
if (fill == MagickFalse)
return(0.0);
if (subpath_alpha >= 1.0)
return(1.0);
/*
Determine winding number.
*/
winding_number=0;
p=polygon_info->edges;
for (j=0; j < (ssize_t) polygon_info->number_edges; j++, p++)
{
if ((double) y <= p->bounds.y1)
break;
if (((double) y > p->bounds.y2) || ((double) x <= p->bounds.x1))
continue;
if ((double) x > p->bounds.x2)
{
winding_number+=p->direction ? 1 : -1;
continue;
}
i=(ssize_t) MagickMax((double) p->highwater,1.0);
for ( ; i < (ssize_t) (p->number_points-1); i++)
if ((double) y <= p->points[i].y)
break;
q=p->points+i-1;
if ((((q+1)->x-q->x)*(y-q->y)) <= (((q+1)->y-q->y)*(x-q->x)))
winding_number+=p->direction ? 1 : -1;
}
if (fill_rule != NonZeroRule)
{
if ((MagickAbsoluteValue(winding_number) & 0x01) != 0)
return(1.0);
}
else
if (MagickAbsoluteValue(winding_number) != 0)
return(1.0);
return(subpath_alpha);
}
static MagickBooleanType DrawPolygonPrimitive(Image *image,
const DrawInfo *draw_info,const PrimitiveInfo *primitive_info,
ExceptionInfo *exception)
{
CacheView
*image_view;
const char
*artifact;
MagickBooleanType
fill,
status;
double
mid;
PolygonInfo
**magick_restrict polygon_info;
register EdgeInfo
*p;
register ssize_t
i;
SegmentInfo
bounds;
ssize_t
start_y,
stop_y,
y;
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(draw_info != (DrawInfo *) NULL);
assert(draw_info->signature == MagickCoreSignature);
assert(primitive_info != (PrimitiveInfo *) NULL);
if (primitive_info->coordinates <= 1)
return(MagickTrue);
/*
Compute bounding box.
*/
polygon_info=AcquirePolygonThreadSet(primitive_info,exception);
if (polygon_info == (PolygonInfo **) NULL)
return(MagickFalse);
if (image->debug != MagickFalse)
(void) LogMagickEvent(DrawEvent,GetMagickModule()," begin draw-polygon");
fill=(primitive_info->method == FillToBorderMethod) ||
(primitive_info->method == FloodfillMethod) ? MagickTrue : MagickFalse;
mid=ExpandAffine(&draw_info->affine)*draw_info->stroke_width/2.0;
bounds=polygon_info[0]->edges[0].bounds;
artifact=GetImageArtifact(image,"draw:render-bounding-rectangles");
if (IsStringTrue(artifact) != MagickFalse)
(void) DrawBoundingRectangles(image,draw_info,polygon_info[0],exception);
for (i=1; i < (ssize_t) polygon_info[0]->number_edges; i++)
{
p=polygon_info[0]->edges+i;
if (p->bounds.x1 < bounds.x1)
bounds.x1=p->bounds.x1;
if (p->bounds.y1 < bounds.y1)
bounds.y1=p->bounds.y1;
if (p->bounds.x2 > bounds.x2)
bounds.x2=p->bounds.x2;
if (p->bounds.y2 > bounds.y2)
bounds.y2=p->bounds.y2;
}
bounds.x1-=(mid+1.0);
bounds.y1-=(mid+1.0);
bounds.x2+=(mid+1.0);
bounds.y2+=(mid+1.0);
if ((bounds.x1 >= (double) image->columns) ||
(bounds.y1 >= (double) image->rows) ||
(bounds.x2 <= 0.0) || (bounds.y2 <= 0.0))
{
polygon_info=DestroyPolygonThreadSet(polygon_info);
return(MagickTrue); /* virtual polygon */
}
bounds.x1=bounds.x1 < 0.0 ? 0.0 : bounds.x1 >= (double) image->columns-1.0 ?
(double) image->columns-1.0 : bounds.x1;
bounds.y1=bounds.y1 < 0.0 ? 0.0 : bounds.y1 >= (double) image->rows-1.0 ?
(double) image->rows-1.0 : bounds.y1;
bounds.x2=bounds.x2 < 0.0 ? 0.0 : bounds.x2 >= (double) image->columns-1.0 ?
(double) image->columns-1.0 : bounds.x2;
bounds.y2=bounds.y2 < 0.0 ? 0.0 : bounds.y2 >= (double) image->rows-1.0 ?
(double) image->rows-1.0 : bounds.y2;
status=MagickTrue;
image_view=AcquireAuthenticCacheView(image,exception);
if ((primitive_info->coordinates == 1) ||
(polygon_info[0]->number_edges == 0))
{
/*
Draw point.
*/
start_y=(ssize_t) ceil(bounds.y1-0.5);
stop_y=(ssize_t) floor(bounds.y2+0.5);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(status) \
magick_number_threads(image,image,stop_y-start_y+1,1)
#endif
for (y=start_y; y <= stop_y; y++)
{
MagickBooleanType
sync;
PixelInfo
pixel;
register ssize_t
x;
register Quantum
*magick_restrict q;
ssize_t
start_x,
stop_x;
if (status == MagickFalse)
continue;
start_x=(ssize_t) ceil(bounds.x1-0.5);
stop_x=(ssize_t) floor(bounds.x2+0.5);
x=start_x;
q=GetCacheViewAuthenticPixels(image_view,x,y,(size_t) (stop_x-x+1),1,
exception);
if (q == (Quantum *) NULL)
{
status=MagickFalse;
continue;
}
GetPixelInfo(image,&pixel);
for ( ; x <= stop_x; x++)
{
if ((x == (ssize_t) ceil(primitive_info->point.x-0.5)) &&
(y == (ssize_t) ceil(primitive_info->point.y-0.5)))
{
GetFillColor(draw_info,x-start_x,y-start_y,&pixel,exception);
SetPixelViaPixelInfo(image,&pixel,q);
}
q+=GetPixelChannels(image);
}
sync=SyncCacheViewAuthenticPixels(image_view,exception);
if (sync == MagickFalse)
status=MagickFalse;
}
image_view=DestroyCacheView(image_view);
polygon_info=DestroyPolygonThreadSet(polygon_info);
if (image->debug != MagickFalse)
(void) LogMagickEvent(DrawEvent,GetMagickModule(),
" end draw-polygon");
return(status);
}
/*
Draw polygon or line.
*/
start_y=(ssize_t) ceil(bounds.y1-0.5);
stop_y=(ssize_t) floor(bounds.y2+0.5);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(status) \
magick_number_threads(image,image,stop_y-start_y+1,1)
#endif
for (y=start_y; y <= stop_y; y++)
{
const int
id = GetOpenMPThreadId();
register Quantum
*magick_restrict q;
register ssize_t
x;
ssize_t
start_x,
stop_x;
if (status == MagickFalse)
continue;
start_x=(ssize_t) ceil(bounds.x1-0.5);
stop_x=(ssize_t) floor(bounds.x2+0.5);
q=GetCacheViewAuthenticPixels(image_view,start_x,y,(size_t) (stop_x-start_x+
1),1,exception);
if (q == (Quantum *) NULL)
{
status=MagickFalse;
continue;
}
for (x=start_x; x <= stop_x; x++)
{
double
fill_alpha,
stroke_alpha;
PixelInfo
fill_color,
stroke_color;
/*
Fill and/or stroke.
*/
fill_alpha=GetFillAlpha(polygon_info[id],mid,fill,draw_info->fill_rule,
x,y,&stroke_alpha);
if (draw_info->stroke_antialias == MagickFalse)
{
fill_alpha=fill_alpha > 0.5 ? 1.0 : 0.0;
stroke_alpha=stroke_alpha > 0.5 ? 1.0 : 0.0;
}
GetFillColor(draw_info,x-start_x,y-start_y,&fill_color,exception);
CompositePixelOver(image,&fill_color,fill_alpha*fill_color.alpha,q,
(double) GetPixelAlpha(image,q),q);
GetStrokeColor(draw_info,x-start_x,y-start_y,&stroke_color,exception);
CompositePixelOver(image,&stroke_color,stroke_alpha*stroke_color.alpha,q,
(double) GetPixelAlpha(image,q),q);
q+=GetPixelChannels(image);
}
if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse)
status=MagickFalse;
}
image_view=DestroyCacheView(image_view);
polygon_info=DestroyPolygonThreadSet(polygon_info);
if (image->debug != MagickFalse)
(void) LogMagickEvent(DrawEvent,GetMagickModule()," end draw-polygon");
return(status);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% D r a w P r i m i t i v e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% DrawPrimitive() draws a primitive (line, rectangle, ellipse) on the image.
%
% The format of the DrawPrimitive method is:
%
% MagickBooleanType DrawPrimitive(Image *image,const DrawInfo *draw_info,
% PrimitiveInfo *primitive_info,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o draw_info: the draw info.
%
% o primitive_info: Specifies a pointer to a PrimitiveInfo structure.
%
% o exception: return any errors or warnings in this structure.
%
*/
static void LogPrimitiveInfo(const PrimitiveInfo *primitive_info)
{
const char
*methods[] =
{
"point",
"replace",
"floodfill",
"filltoborder",
"reset",
"?"
};
PointInfo
p,
point,
q;
register ssize_t
i,
x;
ssize_t
coordinates,
y;
x=(ssize_t) ceil(primitive_info->point.x-0.5);
y=(ssize_t) ceil(primitive_info->point.y-0.5);
switch (primitive_info->primitive)
{
case AlphaPrimitive:
{
(void) LogMagickEvent(DrawEvent,GetMagickModule(),
"AlphaPrimitive %.20g,%.20g %s",(double) x,(double) y,
methods[primitive_info->method]);
return;
}
case ColorPrimitive:
{
(void) LogMagickEvent(DrawEvent,GetMagickModule(),
"ColorPrimitive %.20g,%.20g %s",(double) x,(double) y,
methods[primitive_info->method]);
return;
}
case ImagePrimitive:
{
(void) LogMagickEvent(DrawEvent,GetMagickModule(),
"ImagePrimitive %.20g,%.20g",(double) x,(double) y);
return;
}
case PointPrimitive:
{
(void) LogMagickEvent(DrawEvent,GetMagickModule(),
"PointPrimitive %.20g,%.20g %s",(double) x,(double) y,
methods[primitive_info->method]);
return;
}
case TextPrimitive:
{
(void) LogMagickEvent(DrawEvent,GetMagickModule(),
"TextPrimitive %.20g,%.20g",(double) x,(double) y);
return;
}
default:
break;
}
coordinates=0;
p=primitive_info[0].point;
q.x=(-1.0);
q.y=(-1.0);
for (i=0; primitive_info[i].primitive != UndefinedPrimitive; i++)
{
point=primitive_info[i].point;
if (coordinates <= 0)
{
coordinates=(ssize_t) primitive_info[i].coordinates;
(void) LogMagickEvent(DrawEvent,GetMagickModule(),
" begin open (%.20g)",(double) coordinates);
p=point;
}
point=primitive_info[i].point;
if ((fabs(q.x-point.x) >= MagickEpsilon) ||
(fabs(q.y-point.y) >= MagickEpsilon))
(void) LogMagickEvent(DrawEvent,GetMagickModule(),
" %.20g: %.18g,%.18g",(double) coordinates,point.x,point.y);
else
(void) LogMagickEvent(DrawEvent,GetMagickModule(),
" %.20g: %g %g (duplicate)",(double) coordinates,point.x,point.y);
q=point;
coordinates--;
if (coordinates > 0)
continue;
if ((fabs(p.x-point.x) >= MagickEpsilon) ||
(fabs(p.y-point.y) >= MagickEpsilon))
(void) LogMagickEvent(DrawEvent,GetMagickModule()," end last (%.20g)",
(double) coordinates);
else
(void) LogMagickEvent(DrawEvent,GetMagickModule()," end open (%.20g)",
(double) coordinates);
}
}
MagickExport MagickBooleanType DrawPrimitive(Image *image,
const DrawInfo *draw_info,const PrimitiveInfo *primitive_info,
ExceptionInfo *exception)
{
CacheView
*image_view;
MagickStatusType
status;
register ssize_t
i,
x;
ssize_t
y;
if (image->debug != MagickFalse)
{
(void) LogMagickEvent(DrawEvent,GetMagickModule(),
" begin draw-primitive");
(void) LogMagickEvent(DrawEvent,GetMagickModule(),
" affine: %g,%g,%g,%g,%g,%g",draw_info->affine.sx,
draw_info->affine.rx,draw_info->affine.ry,draw_info->affine.sy,
draw_info->affine.tx,draw_info->affine.ty);
}
status=MagickTrue;
if ((IsGrayColorspace(image->colorspace) != MagickFalse) &&
((IsPixelInfoGray(&draw_info->fill) == MagickFalse) ||
(IsPixelInfoGray(&draw_info->stroke) == MagickFalse)))
status&=SetImageColorspace(image,sRGBColorspace,exception);
if (draw_info->compliance == SVGCompliance)
{
status&=SetImageMask(image,WritePixelMask,draw_info->clipping_mask,
exception);
status&=SetImageMask(image,CompositePixelMask,draw_info->composite_mask,
exception);
}
x=(ssize_t) ceil(primitive_info->point.x-0.5);
y=(ssize_t) ceil(primitive_info->point.y-0.5);
image_view=AcquireAuthenticCacheView(image,exception);
switch (primitive_info->primitive)
{
case AlphaPrimitive:
{
if (image->alpha_trait == UndefinedPixelTrait)
status&=SetImageAlphaChannel(image,OpaqueAlphaChannel,exception);
switch (primitive_info->method)
{
case PointMethod:
default:
{
PixelInfo
pixel;
register Quantum
*q;
q=GetCacheViewAuthenticPixels(image_view,x,y,1,1,exception);
if (q == (Quantum *) NULL)
break;
GetFillColor(draw_info,x,y,&pixel,exception);
SetPixelAlpha(image,ClampToQuantum(pixel.alpha),q);
status&=SyncCacheViewAuthenticPixels(image_view,exception);
break;
}
case ReplaceMethod:
{
PixelInfo
pixel,
target;
status&=GetOneCacheViewVirtualPixelInfo(image_view,x,y,&target,
exception);
GetPixelInfo(image,&pixel);
for (y=0; y < (ssize_t) image->rows; y++)
{
register Quantum
*magick_restrict q;
q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,
exception);
if (q == (Quantum *) NULL)
break;
for (x=0; x < (ssize_t) image->columns; x++)
{
GetPixelInfoPixel(image,q,&pixel);
if (IsFuzzyEquivalencePixelInfo(&pixel,&target) == MagickFalse)
{
q+=GetPixelChannels(image);
continue;
}
GetFillColor(draw_info,x,y,&pixel,exception);
SetPixelAlpha(image,ClampToQuantum(pixel.alpha),q);
q+=GetPixelChannels(image);
}
status&=SyncCacheViewAuthenticPixels(image_view,exception);
if (status == MagickFalse)
break;
}
break;
}
case FloodfillMethod:
case FillToBorderMethod:
{
ChannelType
channel_mask;
PixelInfo
target;
status&=GetOneVirtualPixelInfo(image,TileVirtualPixelMethod,x,y,
&target,exception);
if (primitive_info->method == FillToBorderMethod)
{
target.red=(double) draw_info->border_color.red;
target.green=(double) draw_info->border_color.green;
target.blue=(double) draw_info->border_color.blue;
}
channel_mask=SetImageChannelMask(image,AlphaChannel);
status&=FloodfillPaintImage(image,draw_info,&target,x,y,
primitive_info->method == FloodfillMethod ? MagickFalse :
MagickTrue,exception);
(void) SetImageChannelMask(image,channel_mask);
break;
}
case ResetMethod:
{
PixelInfo
pixel;
for (y=0; y < (ssize_t) image->rows; y++)
{
register Quantum
*magick_restrict q;
q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,
exception);
if (q == (Quantum *) NULL)
break;
for (x=0; x < (ssize_t) image->columns; x++)
{
GetFillColor(draw_info,x,y,&pixel,exception);
SetPixelAlpha(image,ClampToQuantum(pixel.alpha),q);
q+=GetPixelChannels(image);
}
status&=SyncCacheViewAuthenticPixels(image_view,exception);
if (status == MagickFalse)
break;
}
break;
}
}
break;
}
case ColorPrimitive:
{
switch (primitive_info->method)
{
case PointMethod:
default:
{
PixelInfo
pixel;
register Quantum
*q;
q=GetCacheViewAuthenticPixels(image_view,x,y,1,1,exception);
if (q == (Quantum *) NULL)
break;
GetPixelInfo(image,&pixel);
GetFillColor(draw_info,x,y,&pixel,exception);
SetPixelViaPixelInfo(image,&pixel,q);
status&=SyncCacheViewAuthenticPixels(image_view,exception);
break;
}
case ReplaceMethod:
{
PixelInfo
pixel,
target;
status&=GetOneCacheViewVirtualPixelInfo(image_view,x,y,&target,
exception);
for (y=0; y < (ssize_t) image->rows; y++)
{
register Quantum
*magick_restrict q;
q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,
exception);
if (q == (Quantum *) NULL)
break;
for (x=0; x < (ssize_t) image->columns; x++)
{
GetPixelInfoPixel(image,q,&pixel);
if (IsFuzzyEquivalencePixelInfo(&pixel,&target) == MagickFalse)
{
q+=GetPixelChannels(image);
continue;
}
GetFillColor(draw_info,x,y,&pixel,exception);
SetPixelViaPixelInfo(image,&pixel,q);
q+=GetPixelChannels(image);
}
status&=SyncCacheViewAuthenticPixels(image_view,exception);
if (status == MagickFalse)
break;
}
break;
}
case FloodfillMethod:
case FillToBorderMethod:
{
PixelInfo
target;
status&=GetOneVirtualPixelInfo(image,TileVirtualPixelMethod,x,y,
&target,exception);
if (primitive_info->method == FillToBorderMethod)
{
target.red=(double) draw_info->border_color.red;
target.green=(double) draw_info->border_color.green;
target.blue=(double) draw_info->border_color.blue;
}
status&=FloodfillPaintImage(image,draw_info,&target,x,y,
primitive_info->method == FloodfillMethod ? MagickFalse :
MagickTrue,exception);
break;
}
case ResetMethod:
{
PixelInfo
pixel;
GetPixelInfo(image,&pixel);
for (y=0; y < (ssize_t) image->rows; y++)
{
register Quantum
*magick_restrict q;
q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,
exception);
if (q == (Quantum *) NULL)
break;
for (x=0; x < (ssize_t) image->columns; x++)
{
GetFillColor(draw_info,x,y,&pixel,exception);
SetPixelViaPixelInfo(image,&pixel,q);
q+=GetPixelChannels(image);
}
status&=SyncCacheViewAuthenticPixels(image_view,exception);
if (status == MagickFalse)
break;
}
break;
}
}
break;
}
case ImagePrimitive:
{
AffineMatrix
affine;
char
composite_geometry[MagickPathExtent];
Image
*composite_image,
*composite_images;
ImageInfo
*clone_info;
RectangleInfo
geometry;
ssize_t
x1,
y1;
if (primitive_info->text == (char *) NULL)
break;
clone_info=AcquireImageInfo();
composite_images=(Image *) NULL;
if (LocaleNCompare(primitive_info->text,"data:",5) == 0)
composite_images=ReadInlineImage(clone_info,primitive_info->text,
exception);
else
if (*primitive_info->text != '\0')
{
(void) CopyMagickString(clone_info->filename,primitive_info->text,
MagickPathExtent);
status&=SetImageInfo(clone_info,0,exception);
if ((LocaleNCompare(clone_info->magick,"http",4) == 0) ||
(LocaleCompare(clone_info->magick,"mpri") == 0))
(void) CopyMagickString(clone_info->filename,primitive_info->text,
MagickPathExtent);
if (clone_info->size != (char *) NULL)
clone_info->size=DestroyString(clone_info->size);
if (clone_info->extract != (char *) NULL)
clone_info->extract=DestroyString(clone_info->extract);
if (*clone_info->filename != '\0')
composite_images=ReadImage(clone_info,exception);
}
clone_info=DestroyImageInfo(clone_info);
if (composite_images == (Image *) NULL)
{
status=MagickFalse;
break;
}
composite_image=RemoveFirstImageFromList(&composite_images);
composite_images=DestroyImageList(composite_images);
(void) SetImageProgressMonitor(composite_image,(MagickProgressMonitor)
NULL,(void *) NULL);
x1=(ssize_t) ceil(primitive_info[1].point.x-0.5);
y1=(ssize_t) ceil(primitive_info[1].point.y-0.5);
if (((x1 != 0L) && (x1 != (ssize_t) composite_image->columns)) ||
((y1 != 0L) && (y1 != (ssize_t) composite_image->rows)))
{
/*
Resize image.
*/
(void) FormatLocaleString(composite_geometry,MagickPathExtent,
"%gx%g!",primitive_info[1].point.x,primitive_info[1].point.y);
composite_image->filter=image->filter;
status&=TransformImage(&composite_image,(char *) NULL,
composite_geometry,exception);
}
if (composite_image->alpha_trait == UndefinedPixelTrait)
status&=SetImageAlphaChannel(composite_image,OpaqueAlphaChannel,
exception);
if (draw_info->alpha != OpaqueAlpha)
status&=SetImageAlpha(composite_image,draw_info->alpha,exception);
SetGeometry(image,&geometry);
image->gravity=draw_info->gravity;
geometry.x=x;
geometry.y=y;
(void) FormatLocaleString(composite_geometry,MagickPathExtent,
"%.20gx%.20g%+.20g%+.20g",(double) composite_image->columns,(double)
composite_image->rows,(double) geometry.x,(double) geometry.y);
(void) ParseGravityGeometry(image,composite_geometry,&geometry,exception);
affine=draw_info->affine;
affine.tx=(double) geometry.x;
affine.ty=(double) geometry.y;
composite_image->interpolate=image->interpolate;
if ((draw_info->compose == OverCompositeOp) ||
(draw_info->compose == SrcOverCompositeOp))
status&=DrawAffineImage(image,composite_image,&affine,exception);
else
status&=CompositeImage(image,composite_image,draw_info->compose,
MagickTrue,geometry.x,geometry.y,exception);
composite_image=DestroyImage(composite_image);
break;
}
case PointPrimitive:
{
PixelInfo
fill_color;
register Quantum
*q;
if ((y < 0) || (y >= (ssize_t) image->rows))
break;
if ((x < 0) || (x >= (ssize_t) image->columns))
break;
q=GetCacheViewAuthenticPixels(image_view,x,y,1,1,exception);
if (q == (Quantum *) NULL)
break;
GetFillColor(draw_info,x,y,&fill_color,exception);
CompositePixelOver(image,&fill_color,(double) fill_color.alpha,q,(double)
GetPixelAlpha(image,q),q);
status&=SyncCacheViewAuthenticPixels(image_view,exception);
break;
}
case TextPrimitive:
{
char
geometry[MagickPathExtent];
DrawInfo
*clone_info;
if (primitive_info->text == (char *) NULL)
break;
clone_info=CloneDrawInfo((ImageInfo *) NULL,draw_info);
(void) CloneString(&clone_info->text,primitive_info->text);
(void) FormatLocaleString(geometry,MagickPathExtent,"%+f%+f",
primitive_info->point.x,primitive_info->point.y);
(void) CloneString(&clone_info->geometry,geometry);
status&=AnnotateImage(image,clone_info,exception);
clone_info=DestroyDrawInfo(clone_info);
break;
}
default:
{
double
mid,
scale;
DrawInfo
*clone_info;
if (IsEventLogging() != MagickFalse)
LogPrimitiveInfo(primitive_info);
scale=ExpandAffine(&draw_info->affine);
if ((draw_info->dash_pattern != (double *) NULL) &&
(fabs(draw_info->dash_pattern[0]) >= MagickEpsilon) &&
(fabs(scale*draw_info->stroke_width) >= MagickEpsilon) &&
(draw_info->stroke.alpha != (Quantum) TransparentAlpha))
{
/*
Draw dash polygon.
*/
clone_info=CloneDrawInfo((ImageInfo *) NULL,draw_info);
clone_info->stroke_width=0.0;
clone_info->stroke.alpha=(MagickRealType) TransparentAlpha;
status&=DrawPolygonPrimitive(image,clone_info,primitive_info,
exception);
clone_info=DestroyDrawInfo(clone_info);
if (status != MagickFalse)
status&=DrawDashPolygon(draw_info,primitive_info,image,exception);
break;
}
mid=ExpandAffine(&draw_info->affine)*draw_info->stroke_width/2.0;
if ((mid > 1.0) &&
((draw_info->stroke.alpha != (Quantum) TransparentAlpha) ||
(draw_info->stroke_pattern != (Image *) NULL)))
{
double
x,
y;
MagickBooleanType
closed_path;
/*
Draw strokes while respecting line cap/join attributes.
*/
closed_path=primitive_info[0].closed_subpath;
i=(ssize_t) primitive_info[0].coordinates;
x=fabs(primitive_info[i-1].point.x-primitive_info[0].point.x);
y=fabs(primitive_info[i-1].point.y-primitive_info[0].point.y);
if ((x < MagickEpsilon) && (y < MagickEpsilon))
closed_path=MagickTrue;
if ((((draw_info->linecap == RoundCap) ||
(closed_path != MagickFalse)) &&
(draw_info->linejoin == RoundJoin)) ||
(primitive_info[i].primitive != UndefinedPrimitive))
{
status&=DrawPolygonPrimitive(image,draw_info,primitive_info,
exception);
break;
}
clone_info=CloneDrawInfo((ImageInfo *) NULL,draw_info);
clone_info->stroke_width=0.0;
clone_info->stroke.alpha=(MagickRealType) TransparentAlpha;
status&=DrawPolygonPrimitive(image,clone_info,primitive_info,
exception);
clone_info=DestroyDrawInfo(clone_info);
if (status != MagickFalse)
status&=DrawStrokePolygon(image,draw_info,primitive_info,exception);
break;
}
status&=DrawPolygonPrimitive(image,draw_info,primitive_info,exception);
break;
}
}
image_view=DestroyCacheView(image_view);
if (draw_info->compliance == SVGCompliance)
{
status&=SetImageMask(image,WritePixelMask,(Image *) NULL,exception);
status&=SetImageMask(image,CompositePixelMask,(Image *) NULL,exception);
}
if (image->debug != MagickFalse)
(void) LogMagickEvent(DrawEvent,GetMagickModule()," end draw-primitive");
return(status != 0 ? MagickTrue : MagickFalse);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ D r a w S t r o k e P o l y g o n %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% DrawStrokePolygon() draws a stroked polygon (line, rectangle, ellipse) on
% the image while respecting the line cap and join attributes.
%
% The format of the DrawStrokePolygon method is:
%
% MagickBooleanType DrawStrokePolygon(Image *image,
% const DrawInfo *draw_info,const PrimitiveInfo *primitive_info)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o draw_info: the draw info.
%
% o primitive_info: Specifies a pointer to a PrimitiveInfo structure.
%
%
*/
static MagickBooleanType DrawRoundLinecap(Image *image,
const DrawInfo *draw_info,const PrimitiveInfo *primitive_info,
ExceptionInfo *exception)
{
PrimitiveInfo
linecap[5];
register ssize_t
i;
for (i=0; i < 4; i++)
linecap[i]=(*primitive_info);
linecap[0].coordinates=4;
linecap[1].point.x+=2.0*MagickEpsilon;
linecap[2].point.x+=2.0*MagickEpsilon;
linecap[2].point.y+=2.0*MagickEpsilon;
linecap[3].point.y+=2.0*MagickEpsilon;
linecap[4].primitive=UndefinedPrimitive;
return(DrawPolygonPrimitive(image,draw_info,linecap,exception));
}
static MagickBooleanType DrawStrokePolygon(Image *image,
const DrawInfo *draw_info,const PrimitiveInfo *primitive_info,
ExceptionInfo *exception)
{
DrawInfo
*clone_info;
MagickBooleanType
closed_path;
MagickStatusType
status;
PrimitiveInfo
*stroke_polygon;
register const PrimitiveInfo
*p,
*q;
/*
Draw stroked polygon.
*/
if (image->debug != MagickFalse)
(void) LogMagickEvent(DrawEvent,GetMagickModule(),
" begin draw-stroke-polygon");
clone_info=CloneDrawInfo((ImageInfo *) NULL,draw_info);
clone_info->fill=draw_info->stroke;
if (clone_info->fill_pattern != (Image *) NULL)
clone_info->fill_pattern=DestroyImage(clone_info->fill_pattern);
if (clone_info->stroke_pattern != (Image *) NULL)
clone_info->fill_pattern=CloneImage(clone_info->stroke_pattern,0,0,
MagickTrue,exception);
clone_info->stroke.alpha=(MagickRealType) TransparentAlpha;
clone_info->stroke_width=0.0;
clone_info->fill_rule=NonZeroRule;
status=MagickTrue;
for (p=primitive_info; p->primitive != UndefinedPrimitive; p+=p->coordinates)
{
if (p->coordinates == 1)
continue;
stroke_polygon=TraceStrokePolygon(draw_info,p,exception);
if (stroke_polygon == (PrimitiveInfo *) NULL)
{
status=0;
break;
}
status&=DrawPolygonPrimitive(image,clone_info,stroke_polygon,exception);
stroke_polygon=(PrimitiveInfo *) RelinquishMagickMemory(stroke_polygon);
if (status == 0)
break;
q=p+p->coordinates-1;
closed_path=p->closed_subpath;
if ((draw_info->linecap == RoundCap) && (closed_path == MagickFalse))
{
status&=DrawRoundLinecap(image,draw_info,p,exception);
status&=DrawRoundLinecap(image,draw_info,q,exception);
}
}
clone_info=DestroyDrawInfo(clone_info);
if (image->debug != MagickFalse)
(void) LogMagickEvent(DrawEvent,GetMagickModule(),
" end draw-stroke-polygon");
return(status != 0 ? MagickTrue : MagickFalse);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% G e t A f f i n e M a t r i x %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetAffineMatrix() returns an AffineMatrix initialized to the identity
% matrix.
%
% The format of the GetAffineMatrix method is:
%
% void GetAffineMatrix(AffineMatrix *affine_matrix)
%
% A description of each parameter follows:
%
% o affine_matrix: the affine matrix.
%
*/
MagickExport void GetAffineMatrix(AffineMatrix *affine_matrix)
{
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"...");
assert(affine_matrix != (AffineMatrix *) NULL);
(void) memset(affine_matrix,0,sizeof(*affine_matrix));
affine_matrix->sx=1.0;
affine_matrix->sy=1.0;
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ G e t D r a w I n f o %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetDrawInfo() initializes draw_info to default values from image_info.
%
% The format of the GetDrawInfo method is:
%
% void GetDrawInfo(const ImageInfo *image_info,DrawInfo *draw_info)
%
% A description of each parameter follows:
%
% o image_info: the image info..
%
% o draw_info: the draw info.
%
*/
MagickExport void GetDrawInfo(const ImageInfo *image_info,DrawInfo *draw_info)
{
char
*next_token;
const char
*option;
ExceptionInfo
*exception;
ImageInfo
*clone_info;
/*
Initialize draw attributes.
*/
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"...");
assert(draw_info != (DrawInfo *) NULL);
(void) memset(draw_info,0,sizeof(*draw_info));
clone_info=CloneImageInfo(image_info);
GetAffineMatrix(&draw_info->affine);
exception=AcquireExceptionInfo();
(void) QueryColorCompliance("#000F",AllCompliance,&draw_info->fill,
exception);
(void) QueryColorCompliance("#FFF0",AllCompliance,&draw_info->stroke,
exception);
draw_info->stroke_antialias=clone_info->antialias;
draw_info->stroke_width=1.0;
draw_info->fill_rule=EvenOddRule;
draw_info->alpha=OpaqueAlpha;
draw_info->fill_alpha=OpaqueAlpha;
draw_info->stroke_alpha=OpaqueAlpha;
draw_info->linecap=ButtCap;
draw_info->linejoin=MiterJoin;
draw_info->miterlimit=10;
draw_info->decorate=NoDecoration;
draw_info->pointsize=12.0;
draw_info->undercolor.alpha=(MagickRealType) TransparentAlpha;
draw_info->compose=OverCompositeOp;
draw_info->render=MagickTrue;
draw_info->clip_path=MagickFalse;
draw_info->debug=IsEventLogging();
if (clone_info->font != (char *) NULL)
draw_info->font=AcquireString(clone_info->font);
if (clone_info->density != (char *) NULL)
draw_info->density=AcquireString(clone_info->density);
draw_info->text_antialias=clone_info->antialias;
if (fabs(clone_info->pointsize) >= MagickEpsilon)
draw_info->pointsize=clone_info->pointsize;
draw_info->border_color=clone_info->border_color;
if (clone_info->server_name != (char *) NULL)
draw_info->server_name=AcquireString(clone_info->server_name);
option=GetImageOption(clone_info,"direction");
if (option != (const char *) NULL)
draw_info->direction=(DirectionType) ParseCommandOption(
MagickDirectionOptions,MagickFalse,option);
else
draw_info->direction=UndefinedDirection;
option=GetImageOption(clone_info,"encoding");
if (option != (const char *) NULL)
(void) CloneString(&draw_info->encoding,option);
option=GetImageOption(clone_info,"family");
if (option != (const char *) NULL)
(void) CloneString(&draw_info->family,option);
option=GetImageOption(clone_info,"fill");
if (option != (const char *) NULL)
(void) QueryColorCompliance(option,AllCompliance,&draw_info->fill,
exception);
option=GetImageOption(clone_info,"gravity");
if (option != (const char *) NULL)
draw_info->gravity=(GravityType) ParseCommandOption(MagickGravityOptions,
MagickFalse,option);
option=GetImageOption(clone_info,"interline-spacing");
if (option != (const char *) NULL)
draw_info->interline_spacing=GetDrawValue(option,&next_token);
option=GetImageOption(clone_info,"interword-spacing");
if (option != (const char *) NULL)
draw_info->interword_spacing=GetDrawValue(option,&next_token);
option=GetImageOption(clone_info,"kerning");
if (option != (const char *) NULL)
draw_info->kerning=GetDrawValue(option,&next_token);
option=GetImageOption(clone_info,"stroke");
if (option != (const char *) NULL)
(void) QueryColorCompliance(option,AllCompliance,&draw_info->stroke,
exception);
option=GetImageOption(clone_info,"strokewidth");
if (option != (const char *) NULL)
draw_info->stroke_width=GetDrawValue(option,&next_token);
option=GetImageOption(clone_info,"style");
if (option != (const char *) NULL)
draw_info->style=(StyleType) ParseCommandOption(MagickStyleOptions,
MagickFalse,option);
option=GetImageOption(clone_info,"undercolor");
if (option != (const char *) NULL)
(void) QueryColorCompliance(option,AllCompliance,&draw_info->undercolor,
exception);
option=GetImageOption(clone_info,"weight");
if (option != (const char *) NULL)
{
ssize_t
weight;
weight=ParseCommandOption(MagickWeightOptions,MagickFalse,option);
if (weight == -1)
weight=(ssize_t) StringToUnsignedLong(option);
draw_info->weight=(size_t) weight;
}
exception=DestroyExceptionInfo(exception);
draw_info->signature=MagickCoreSignature;
clone_info=DestroyImageInfo(clone_info);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ P e r m u t a t e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% Permutate() returns the permuation of the (n,k).
%
% The format of the Permutate method is:
%
% void Permutate(ssize_t n,ssize_t k)
%
% A description of each parameter follows:
%
% o n:
%
% o k:
%
%
*/
static inline double Permutate(const ssize_t n,const ssize_t k)
{
double
r;
register ssize_t
i;
r=1.0;
for (i=k+1; i <= n; i++)
r*=i;
for (i=1; i <= (n-k); i++)
r/=i;
return(r);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ T r a c e P r i m i t i v e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% TracePrimitive is a collection of methods for generating graphic
% primitives such as arcs, ellipses, paths, etc.
%
*/
static MagickBooleanType TraceArc(MVGInfo *mvg_info,const PointInfo start,
const PointInfo end,const PointInfo degrees)
{
PointInfo
center,
radius;
center.x=0.5*(end.x+start.x);
center.y=0.5*(end.y+start.y);
radius.x=fabs(center.x-start.x);
radius.y=fabs(center.y-start.y);
return(TraceEllipse(mvg_info,center,radius,degrees));
}
static MagickBooleanType TraceArcPath(MVGInfo *mvg_info,const PointInfo start,
const PointInfo end,const PointInfo arc,const double angle,
const MagickBooleanType large_arc,const MagickBooleanType sweep)
{
double
alpha,
beta,
delta,
factor,
gamma,
theta;
MagickStatusType
status;
PointInfo
center,
points[3],
radii;
register double
cosine,
sine;
PrimitiveInfo
*primitive_info;
register PrimitiveInfo
*p;
register ssize_t
i;
size_t
arc_segments;
ssize_t
offset;
offset=mvg_info->offset;
primitive_info=(*mvg_info->primitive_info)+mvg_info->offset;
primitive_info->coordinates=0;
if ((fabs(start.x-end.x) < MagickEpsilon) &&
(fabs(start.y-end.y) < MagickEpsilon))
return(TracePoint(primitive_info,end));
radii.x=fabs(arc.x);
radii.y=fabs(arc.y);
if ((radii.x < MagickEpsilon) || (radii.y < MagickEpsilon))
return(TraceLine(primitive_info,start,end));
cosine=cos(DegreesToRadians(fmod((double) angle,360.0)));
sine=sin(DegreesToRadians(fmod((double) angle,360.0)));
center.x=(double) (cosine*(end.x-start.x)/2+sine*(end.y-start.y)/2);
center.y=(double) (cosine*(end.y-start.y)/2-sine*(end.x-start.x)/2);
delta=(center.x*center.x)/(radii.x*radii.x)+(center.y*center.y)/
(radii.y*radii.y);
if (delta < MagickEpsilon)
return(TraceLine(primitive_info,start,end));
if (delta > 1.0)
{
radii.x*=sqrt((double) delta);
radii.y*=sqrt((double) delta);
}
points[0].x=(double) (cosine*start.x/radii.x+sine*start.y/radii.x);
points[0].y=(double) (cosine*start.y/radii.y-sine*start.x/radii.y);
points[1].x=(double) (cosine*end.x/radii.x+sine*end.y/radii.x);
points[1].y=(double) (cosine*end.y/radii.y-sine*end.x/radii.y);
alpha=points[1].x-points[0].x;
beta=points[1].y-points[0].y;
if (fabs(alpha*alpha+beta*beta) < MagickEpsilon)
return(TraceLine(primitive_info,start,end));
factor=PerceptibleReciprocal(alpha*alpha+beta*beta)-0.25;
if (factor <= 0.0)
factor=0.0;
else
{
factor=sqrt((double) factor);
if (sweep == large_arc)
factor=(-factor);
}
center.x=(double) ((points[0].x+points[1].x)/2-factor*beta);
center.y=(double) ((points[0].y+points[1].y)/2+factor*alpha);
alpha=atan2(points[0].y-center.y,points[0].x-center.x);
theta=atan2(points[1].y-center.y,points[1].x-center.x)-alpha;
if ((theta < 0.0) && (sweep != MagickFalse))
theta+=2.0*MagickPI;
else
if ((theta > 0.0) && (sweep == MagickFalse))
theta-=2.0*MagickPI;
arc_segments=(size_t) ceil(fabs((double) (theta/(0.5*MagickPI+
MagickEpsilon))));
status=MagickTrue;
p=primitive_info;
for (i=0; i < (ssize_t) arc_segments; i++)
{
beta=0.5*((alpha+(i+1)*theta/arc_segments)-(alpha+i*theta/arc_segments));
gamma=(8.0/3.0)*sin(fmod((double) (0.5*beta),DegreesToRadians(360.0)))*
sin(fmod((double) (0.5*beta),DegreesToRadians(360.0)))/
sin(fmod((double) beta,DegreesToRadians(360.0)));
points[0].x=(double) (center.x+cos(fmod((double) (alpha+(double) i*theta/
arc_segments),DegreesToRadians(360.0)))-gamma*sin(fmod((double) (alpha+
(double) i*theta/arc_segments),DegreesToRadians(360.0))));
points[0].y=(double) (center.y+sin(fmod((double) (alpha+(double) i*theta/
arc_segments),DegreesToRadians(360.0)))+gamma*cos(fmod((double) (alpha+
(double) i*theta/arc_segments),DegreesToRadians(360.0))));
points[2].x=(double) (center.x+cos(fmod((double) (alpha+(double) (i+1)*
theta/arc_segments),DegreesToRadians(360.0))));
points[2].y=(double) (center.y+sin(fmod((double) (alpha+(double) (i+1)*
theta/arc_segments),DegreesToRadians(360.0))));
points[1].x=(double) (points[2].x+gamma*sin(fmod((double) (alpha+(double)
(i+1)*theta/arc_segments),DegreesToRadians(360.0))));
points[1].y=(double) (points[2].y-gamma*cos(fmod((double) (alpha+(double)
(i+1)*theta/arc_segments),DegreesToRadians(360.0))));
p->point.x=(p == primitive_info) ? start.x : (p-1)->point.x;
p->point.y=(p == primitive_info) ? start.y : (p-1)->point.y;
(p+1)->point.x=(double) (cosine*radii.x*points[0].x-sine*radii.y*
points[0].y);
(p+1)->point.y=(double) (sine*radii.x*points[0].x+cosine*radii.y*
points[0].y);
(p+2)->point.x=(double) (cosine*radii.x*points[1].x-sine*radii.y*
points[1].y);
(p+2)->point.y=(double) (sine*radii.x*points[1].x+cosine*radii.y*
points[1].y);
(p+3)->point.x=(double) (cosine*radii.x*points[2].x-sine*radii.y*
points[2].y);
(p+3)->point.y=(double) (sine*radii.x*points[2].x+cosine*radii.y*
points[2].y);
if (i == (ssize_t) (arc_segments-1))
(p+3)->point=end;
status&=TraceBezier(mvg_info,4);
if (status == 0)
break;
p=(*mvg_info->primitive_info)+mvg_info->offset;
mvg_info->offset+=p->coordinates;
p+=p->coordinates;
}
if (status == 0)
return(MagickFalse);
mvg_info->offset=offset;
primitive_info=(*mvg_info->primitive_info)+mvg_info->offset;
primitive_info->coordinates=(size_t) (p-primitive_info);
primitive_info->closed_subpath=MagickFalse;
for (i=0; i < (ssize_t) primitive_info->coordinates; i++)
{
p->primitive=primitive_info->primitive;
p--;
}
return(MagickTrue);
}
static MagickBooleanType TraceBezier(MVGInfo *mvg_info,
const size_t number_coordinates)
{
double
alpha,
*coefficients,
weight;
PointInfo
end,
point,
*points;
PrimitiveInfo
*primitive_info;
register PrimitiveInfo
*p;
register ssize_t
i,
j;
size_t
control_points,
quantum;
/*
Allocate coefficients.
*/
primitive_info=(*mvg_info->primitive_info)+mvg_info->offset;
quantum=number_coordinates;
for (i=0; i < (ssize_t) number_coordinates; i++)
{
for (j=i+1; j < (ssize_t) number_coordinates; j++)
{
alpha=fabs(primitive_info[j].point.x-primitive_info[i].point.x);
if (alpha > (double) SSIZE_MAX)
{
(void) ThrowMagickException(mvg_info->exception,GetMagickModule(),
ResourceLimitError,"MemoryAllocationFailed","`%s'","");
return(MagickFalse);
}
if (alpha > (double) quantum)
quantum=(size_t) alpha;
alpha=fabs(primitive_info[j].point.y-primitive_info[i].point.y);
if (alpha > (double) SSIZE_MAX)
{
(void) ThrowMagickException(mvg_info->exception,GetMagickModule(),
ResourceLimitError,"MemoryAllocationFailed","`%s'","");
return(MagickFalse);
}
if (alpha > (double) quantum)
quantum=(size_t) alpha;
}
}
primitive_info=(*mvg_info->primitive_info)+mvg_info->offset;
quantum=MagickMin(quantum/number_coordinates,BezierQuantum);
coefficients=(double *) AcquireQuantumMemory(number_coordinates,
sizeof(*coefficients));
points=(PointInfo *) AcquireQuantumMemory(quantum,number_coordinates*
sizeof(*points));
if ((coefficients == (double *) NULL) || (points == (PointInfo *) NULL))
{
if (points != (PointInfo *) NULL)
points=(PointInfo *) RelinquishMagickMemory(points);
if (coefficients != (double *) NULL)
coefficients=(double *) RelinquishMagickMemory(coefficients);
(void) ThrowMagickException(mvg_info->exception,GetMagickModule(),
ResourceLimitError,"MemoryAllocationFailed","`%s'","");
return(MagickFalse);
}
control_points=quantum*number_coordinates;
if (CheckPrimitiveExtent(mvg_info,control_points+1) == MagickFalse)
{
points=(PointInfo *) RelinquishMagickMemory(points);
coefficients=(double *) RelinquishMagickMemory(coefficients);
return(MagickFalse);
}
primitive_info=(*mvg_info->primitive_info)+mvg_info->offset;
/*
Compute bezier points.
*/
end=primitive_info[number_coordinates-1].point;
for (i=0; i < (ssize_t) number_coordinates; i++)
coefficients[i]=Permutate((ssize_t) number_coordinates-1,i);
weight=0.0;
for (i=0; i < (ssize_t) control_points; i++)
{
p=primitive_info;
point.x=0.0;
point.y=0.0;
alpha=pow((double) (1.0-weight),(double) number_coordinates-1.0);
for (j=0; j < (ssize_t) number_coordinates; j++)
{
point.x+=alpha*coefficients[j]*p->point.x;
point.y+=alpha*coefficients[j]*p->point.y;
alpha*=weight/(1.0-weight);
p++;
}
points[i]=point;
weight+=1.0/control_points;
}
/*
Bezier curves are just short segmented polys.
*/
p=primitive_info;
for (i=0; i < (ssize_t) control_points; i++)
{
if (TracePoint(p,points[i]) == MagickFalse)
{
points=(PointInfo *) RelinquishMagickMemory(points);
coefficients=(double *) RelinquishMagickMemory(coefficients);
return(MagickFalse);
}
p+=p->coordinates;
}
if (TracePoint(p,end) == MagickFalse)
{
points=(PointInfo *) RelinquishMagickMemory(points);
coefficients=(double *) RelinquishMagickMemory(coefficients);
return(MagickFalse);
}
p+=p->coordinates;
primitive_info->coordinates=(size_t) (p-primitive_info);
primitive_info->closed_subpath=MagickFalse;
for (i=0; i < (ssize_t) primitive_info->coordinates; i++)
{
p->primitive=primitive_info->primitive;
p--;
}
points=(PointInfo *) RelinquishMagickMemory(points);
coefficients=(double *) RelinquishMagickMemory(coefficients);
return(MagickTrue);
}
static MagickBooleanType TraceCircle(MVGInfo *mvg_info,const PointInfo start,
const PointInfo end)
{
double
alpha,
beta,
radius;
PointInfo
offset,
degrees;
alpha=end.x-start.x;
beta=end.y-start.y;
radius=hypot((double) alpha,(double) beta);
offset.x=(double) radius;
offset.y=(double) radius;
degrees.x=0.0;
degrees.y=360.0;
return(TraceEllipse(mvg_info,start,offset,degrees));
}
static MagickBooleanType TraceEllipse(MVGInfo *mvg_info,const PointInfo center,
const PointInfo radii,const PointInfo arc)
{
double
coordinates,
delta,
step,
x,
y;
PointInfo
angle,
point;
PrimitiveInfo
*primitive_info;
register PrimitiveInfo
*p;
register ssize_t
i;
/*
Ellipses are just short segmented polys.
*/
primitive_info=(*mvg_info->primitive_info)+mvg_info->offset;
primitive_info->coordinates=0;
if ((fabs(radii.x) < MagickEpsilon) || (fabs(radii.y) < MagickEpsilon))
return(MagickTrue);
delta=2.0*PerceptibleReciprocal(MagickMax(radii.x,radii.y));
step=MagickPI/8.0;
if ((delta >= 0.0) && (delta < (MagickPI/8.0)))
step=MagickPI/4.0/(MagickPI*PerceptibleReciprocal(delta)/2.0);
angle.x=DegreesToRadians(arc.x);
y=arc.y;
while (y < arc.x)
y+=360.0;
angle.y=DegreesToRadians(y);
coordinates=ceil((angle.y-angle.x)/step+1.0);
if (coordinates > (108.0*BezierQuantum))
{
(void) ThrowMagickException(mvg_info->exception,GetMagickModule(),
ResourceLimitError,"MemoryAllocationFailed","`%s'","");
return(MagickFalse);
}
if (CheckPrimitiveExtent(mvg_info,(size_t) coordinates) == MagickFalse)
return(MagickFalse);
primitive_info=(*mvg_info->primitive_info)+mvg_info->offset;
for (p=primitive_info; angle.x < angle.y; angle.x+=step)
{
point.x=cos(fmod(angle.x,DegreesToRadians(360.0)))*radii.x+center.x;
point.y=sin(fmod(angle.x,DegreesToRadians(360.0)))*radii.y+center.y;
if (TracePoint(p,point) == MagickFalse)
return(MagickFalse);
p+=p->coordinates;
}
point.x=cos(fmod(angle.y,DegreesToRadians(360.0)))*radii.x+center.x;
point.y=sin(fmod(angle.y,DegreesToRadians(360.0)))*radii.y+center.y;
if (TracePoint(p,point) == MagickFalse)
return(MagickFalse);
p+=p->coordinates;
primitive_info->coordinates=(size_t) (p-primitive_info);
primitive_info->closed_subpath=MagickFalse;
x=fabs(primitive_info[0].point.x-
primitive_info[primitive_info->coordinates-1].point.x);
y=fabs(primitive_info[0].point.y-
primitive_info[primitive_info->coordinates-1].point.y);
if ((x < MagickEpsilon) && (y < MagickEpsilon))
primitive_info->closed_subpath=MagickTrue;
for (i=0; i < (ssize_t) primitive_info->coordinates; i++)
{
p->primitive=primitive_info->primitive;
p--;
}
return(MagickTrue);
}
static MagickBooleanType TraceLine(PrimitiveInfo *primitive_info,
const PointInfo start,const PointInfo end)
{
if (TracePoint(primitive_info,start) == MagickFalse)
return(MagickFalse);
if ((fabs(start.x-end.x) < MagickEpsilon) &&
(fabs(start.y-end.y) < MagickEpsilon))
{
primitive_info->primitive=PointPrimitive;
primitive_info->coordinates=1;
return(MagickTrue);
}
if (TracePoint(primitive_info+1,end) == MagickFalse)
return(MagickFalse);
(primitive_info+1)->primitive=primitive_info->primitive;
primitive_info->coordinates=2;
primitive_info->closed_subpath=MagickFalse;
return(MagickTrue);
}
static ssize_t TracePath(MVGInfo *mvg_info,const char *path,
ExceptionInfo *exception)
{
char
*next_token,
token[MagickPathExtent];
const char
*p;
double
x,
y;
int
attribute,
last_attribute;
MagickBooleanType
status;
PointInfo
end = {0.0, 0.0},
points[4] = { {0.0, 0.0}, {0.0, 0.0}, {0.0, 0.0}, {0.0, 0.0} },
point = {0.0, 0.0},
start = {0.0, 0.0};
PrimitiveInfo
*primitive_info;
PrimitiveType
primitive_type;
register PrimitiveInfo
*q;
register ssize_t
i;
size_t
number_coordinates,
z_count;
ssize_t
subpath_offset;
subpath_offset=mvg_info->offset;
primitive_info=(*mvg_info->primitive_info)+mvg_info->offset;
status=MagickTrue;
attribute=0;
number_coordinates=0;
z_count=0;
primitive_type=primitive_info->primitive;
q=primitive_info;
for (p=path; *p != '\0'; )
{
if (status == MagickFalse)
break;
while (isspace((int) ((unsigned char) *p)) != 0)
p++;
if (*p == '\0')
break;
last_attribute=attribute;
attribute=(int) (*p++);
switch (attribute)
{
case 'a':
case 'A':
{
double
angle = 0.0;
MagickBooleanType
large_arc = MagickFalse,
sweep = MagickFalse;
PointInfo
arc = {0.0, 0.0};
/*
Elliptical arc.
*/
do
{
(void) GetNextToken(p,&p,MagickPathExtent,token);
if (*token == ',')
(void) GetNextToken(p,&p,MagickPathExtent,token);
arc.x=GetDrawValue(token,&next_token);
if (token == next_token)
ThrowPointExpectedException(token,exception);
(void) GetNextToken(p,&p,MagickPathExtent,token);
if (*token == ',')
(void) GetNextToken(p,&p,MagickPathExtent,token);
arc.y=GetDrawValue(token,&next_token);
if (token == next_token)
ThrowPointExpectedException(token,exception);
(void) GetNextToken(p,&p,MagickPathExtent,token);
if (*token == ',')
(void) GetNextToken(p,&p,MagickPathExtent,token);
angle=GetDrawValue(token,&next_token);
if (token == next_token)
ThrowPointExpectedException(token,exception);
(void) GetNextToken(p,&p,MagickPathExtent,token);
if (*token == ',')
(void) GetNextToken(p,&p,MagickPathExtent,token);
large_arc=StringToLong(token) != 0 ? MagickTrue : MagickFalse;
(void) GetNextToken(p,&p,MagickPathExtent,token);
if (*token == ',')
(void) GetNextToken(p,&p,MagickPathExtent,token);
sweep=StringToLong(token) != 0 ? MagickTrue : MagickFalse;
if (*token == ',')
(void) GetNextToken(p,&p,MagickPathExtent,token);
(void) GetNextToken(p,&p,MagickPathExtent,token);
if (*token == ',')
(void) GetNextToken(p,&p,MagickPathExtent,token);
x=GetDrawValue(token,&next_token);
if (token == next_token)
ThrowPointExpectedException(token,exception);
(void) GetNextToken(p,&p,MagickPathExtent,token);
if (*token == ',')
(void) GetNextToken(p,&p,MagickPathExtent,token);
y=GetDrawValue(token,&next_token);
if (token == next_token)
ThrowPointExpectedException(token,exception);
end.x=(double) (attribute == (int) 'A' ? x : point.x+x);
end.y=(double) (attribute == (int) 'A' ? y : point.y+y);
if (TraceArcPath(mvg_info,point,end,arc,angle,large_arc,sweep) == MagickFalse)
return(-1);
q=(*mvg_info->primitive_info)+mvg_info->offset;
mvg_info->offset+=q->coordinates;
q+=q->coordinates;
point=end;
while (isspace((int) ((unsigned char) *p)) != 0)
p++;
if (*p == ',')
p++;
} while (IsPoint(p) != MagickFalse);
break;
}
case 'c':
case 'C':
{
/*
Cubic Bézier curve.
*/
do
{
points[0]=point;
for (i=1; i < 4; i++)
{
(void) GetNextToken(p,&p,MagickPathExtent,token);
if (*token == ',')
(void) GetNextToken(p,&p,MagickPathExtent,token);
x=GetDrawValue(token,&next_token);
if (token == next_token)
ThrowPointExpectedException(token,exception);
(void) GetNextToken(p,&p,MagickPathExtent,token);
if (*token == ',')
(void) GetNextToken(p,&p,MagickPathExtent,token);
y=GetDrawValue(token,&next_token);
if (token == next_token)
ThrowPointExpectedException(token,exception);
end.x=(double) (attribute == (int) 'C' ? x : point.x+x);
end.y=(double) (attribute == (int) 'C' ? y : point.y+y);
points[i]=end;
}
for (i=0; i < 4; i++)
(q+i)->point=points[i];
if (TraceBezier(mvg_info,4) == MagickFalse)
return(-1);
q=(*mvg_info->primitive_info)+mvg_info->offset;
mvg_info->offset+=q->coordinates;
q+=q->coordinates;
point=end;
while (isspace((int) ((unsigned char) *p)) != 0)
p++;
if (*p == ',')
p++;
} while (IsPoint(p) != MagickFalse);
break;
}
case 'H':
case 'h':
{
do
{
(void) GetNextToken(p,&p,MagickPathExtent,token);
if (*token == ',')
(void) GetNextToken(p,&p,MagickPathExtent,token);
x=GetDrawValue(token,&next_token);
if (token == next_token)
ThrowPointExpectedException(token,exception);
point.x=(double) (attribute == (int) 'H' ? x: point.x+x);
if (CheckPrimitiveExtent(mvg_info,PrimitiveExtentPad) == MagickFalse)
return(-1);
q=(*mvg_info->primitive_info)+mvg_info->offset;
if (TracePoint(q,point) == MagickFalse)
return(-1);
mvg_info->offset+=q->coordinates;
q+=q->coordinates;
while (isspace((int) ((unsigned char) *p)) != 0)
p++;
if (*p == ',')
p++;
} while (IsPoint(p) != MagickFalse);
break;
}
case 'l':
case 'L':
{
/*
Line to.
*/
do
{
(void) GetNextToken(p,&p,MagickPathExtent,token);
if (*token == ',')
(void) GetNextToken(p,&p,MagickPathExtent,token);
x=GetDrawValue(token,&next_token);
if (token == next_token)
ThrowPointExpectedException(token,exception);
(void) GetNextToken(p,&p,MagickPathExtent,token);
if (*token == ',')
(void) GetNextToken(p,&p,MagickPathExtent,token);
y=GetDrawValue(token,&next_token);
if (token == next_token)
ThrowPointExpectedException(token,exception);
point.x=(double) (attribute == (int) 'L' ? x : point.x+x);
point.y=(double) (attribute == (int) 'L' ? y : point.y+y);
if (CheckPrimitiveExtent(mvg_info,PrimitiveExtentPad) == MagickFalse)
return(-1);
q=(*mvg_info->primitive_info)+mvg_info->offset;
if (TracePoint(q,point) == MagickFalse)
return(-1);
mvg_info->offset+=q->coordinates;
q+=q->coordinates;
while (isspace((int) ((unsigned char) *p)) != 0)
p++;
if (*p == ',')
p++;
} while (IsPoint(p) != MagickFalse);
break;
}
case 'M':
case 'm':
{
/*
Move to.
*/
if (mvg_info->offset != subpath_offset)
{
primitive_info=(*mvg_info->primitive_info)+subpath_offset;
primitive_info->coordinates=(size_t) (q-primitive_info);
number_coordinates+=primitive_info->coordinates;
primitive_info=q;
subpath_offset=mvg_info->offset;
}
i=0;
do
{
(void) GetNextToken(p,&p,MagickPathExtent,token);
if (*token == ',')
(void) GetNextToken(p,&p,MagickPathExtent,token);
x=GetDrawValue(token,&next_token);
if (token == next_token)
ThrowPointExpectedException(token,exception);
(void) GetNextToken(p,&p,MagickPathExtent,token);
if (*token == ',')
(void) GetNextToken(p,&p,MagickPathExtent,token);
y=GetDrawValue(token,&next_token);
if (token == next_token)
ThrowPointExpectedException(token,exception);
point.x=(double) (attribute == (int) 'M' ? x : point.x+x);
point.y=(double) (attribute == (int) 'M' ? y : point.y+y);
if (i == 0)
start=point;
i++;
if (CheckPrimitiveExtent(mvg_info,PrimitiveExtentPad) == MagickFalse)
return(-1);
q=(*mvg_info->primitive_info)+mvg_info->offset;
if (TracePoint(q,point) == MagickFalse)
return(-1);
mvg_info->offset+=q->coordinates;
q+=q->coordinates;
while (isspace((int) ((unsigned char) *p)) != 0)
p++;
if (*p == ',')
p++;
} while (IsPoint(p) != MagickFalse);
break;
}
case 'q':
case 'Q':
{
/*
Quadratic Bézier curve.
*/
do
{
points[0]=point;
for (i=1; i < 3; i++)
{
(void) GetNextToken(p,&p,MagickPathExtent,token);
if (*token == ',')
(void) GetNextToken(p,&p,MagickPathExtent,token);
x=GetDrawValue(token,&next_token);
if (token == next_token)
ThrowPointExpectedException(token,exception);
(void) GetNextToken(p,&p,MagickPathExtent,token);
if (*token == ',')
(void) GetNextToken(p,&p,MagickPathExtent,token);
y=GetDrawValue(token,&next_token);
if (token == next_token)
ThrowPointExpectedException(token,exception);
if (*p == ',')
p++;
end.x=(double) (attribute == (int) 'Q' ? x : point.x+x);
end.y=(double) (attribute == (int) 'Q' ? y : point.y+y);
points[i]=end;
}
for (i=0; i < 3; i++)
(q+i)->point=points[i];
if (TraceBezier(mvg_info,3) == MagickFalse)
return(-1);
q=(*mvg_info->primitive_info)+mvg_info->offset;
mvg_info->offset+=q->coordinates;
q+=q->coordinates;
point=end;
while (isspace((int) ((unsigned char) *p)) != 0)
p++;
if (*p == ',')
p++;
} while (IsPoint(p) != MagickFalse);
break;
}
case 's':
case 'S':
{
/*
Cubic Bézier curve.
*/
do
{
points[0]=points[3];
points[1].x=2.0*points[3].x-points[2].x;
points[1].y=2.0*points[3].y-points[2].y;
for (i=2; i < 4; i++)
{
(void) GetNextToken(p,&p,MagickPathExtent,token);
if (*token == ',')
(void) GetNextToken(p,&p,MagickPathExtent,token);
x=GetDrawValue(token,&next_token);
if (token == next_token)
ThrowPointExpectedException(token,exception);
(void) GetNextToken(p,&p,MagickPathExtent,token);
if (*token == ',')
(void) GetNextToken(p,&p,MagickPathExtent,token);
y=GetDrawValue(token,&next_token);
if (token == next_token)
ThrowPointExpectedException(token,exception);
if (*p == ',')
p++;
end.x=(double) (attribute == (int) 'S' ? x : point.x+x);
end.y=(double) (attribute == (int) 'S' ? y : point.y+y);
points[i]=end;
}
if (strchr("CcSs",last_attribute) == (char *) NULL)
{
points[0]=point;
points[1]=point;
}
for (i=0; i < 4; i++)
(q+i)->point=points[i];
if (TraceBezier(mvg_info,4) == MagickFalse)
return(-1);
q=(*mvg_info->primitive_info)+mvg_info->offset;
mvg_info->offset+=q->coordinates;
q+=q->coordinates;
point=end;
last_attribute=attribute;
while (isspace((int) ((unsigned char) *p)) != 0)
p++;
if (*p == ',')
p++;
} while (IsPoint(p) != MagickFalse);
break;
}
case 't':
case 'T':
{
/*
Quadratic Bézier curve.
*/
do
{
points[0]=points[2];
points[1].x=2.0*points[2].x-points[1].x;
points[1].y=2.0*points[2].y-points[1].y;
for (i=2; i < 3; i++)
{
(void) GetNextToken(p,&p,MagickPathExtent,token);
if (*token == ',')
(void) GetNextToken(p,&p,MagickPathExtent,token);
x=GetDrawValue(token,&next_token);
if (token == next_token)
ThrowPointExpectedException(token,exception);
(void) GetNextToken(p,&p,MagickPathExtent,token);
if (*token == ',')
(void) GetNextToken(p,&p,MagickPathExtent,token);
y=GetDrawValue(token,&next_token);
if (token == next_token)
ThrowPointExpectedException(token,exception);
end.x=(double) (attribute == (int) 'T' ? x : point.x+x);
end.y=(double) (attribute == (int) 'T' ? y : point.y+y);
points[i]=end;
}
if (status == MagickFalse)
break;
if (strchr("QqTt",last_attribute) == (char *) NULL)
{
points[0]=point;
points[1]=point;
}
for (i=0; i < 3; i++)
(q+i)->point=points[i];
if (TraceBezier(mvg_info,3) == MagickFalse)
return(-1);
q=(*mvg_info->primitive_info)+mvg_info->offset;
mvg_info->offset+=q->coordinates;
q+=q->coordinates;
point=end;
last_attribute=attribute;
while (isspace((int) ((unsigned char) *p)) != 0)
p++;
if (*p == ',')
p++;
} while (IsPoint(p) != MagickFalse);
break;
}
case 'v':
case 'V':
{
/*
Line to.
*/
do
{
(void) GetNextToken(p,&p,MagickPathExtent,token);
if (*token == ',')
(void) GetNextToken(p,&p,MagickPathExtent,token);
y=GetDrawValue(token,&next_token);
if (token == next_token)
ThrowPointExpectedException(token,exception);
point.y=(double) (attribute == (int) 'V' ? y : point.y+y);
if (CheckPrimitiveExtent(mvg_info,PrimitiveExtentPad) == MagickFalse)
return(-1);
q=(*mvg_info->primitive_info)+mvg_info->offset;
if (TracePoint(q,point) == MagickFalse)
return(-1);
mvg_info->offset+=q->coordinates;
q+=q->coordinates;
while (isspace((int) ((unsigned char) *p)) != 0)
p++;
if (*p == ',')
p++;
} while (IsPoint(p) != MagickFalse);
break;
}
case 'z':
case 'Z':
{
/*
Close path.
*/
point=start;
if (CheckPrimitiveExtent(mvg_info,PrimitiveExtentPad) == MagickFalse)
return(-1);
q=(*mvg_info->primitive_info)+mvg_info->offset;
if (TracePoint(q,point) == MagickFalse)
return(-1);
mvg_info->offset+=q->coordinates;
q+=q->coordinates;
primitive_info=(*mvg_info->primitive_info)+subpath_offset;
primitive_info->coordinates=(size_t) (q-primitive_info);
primitive_info->closed_subpath=MagickTrue;
number_coordinates+=primitive_info->coordinates;
primitive_info=q;
subpath_offset=mvg_info->offset;
z_count++;
break;
}
default:
{
ThrowPointExpectedException(token,exception);
break;
}
}
}
if (status == MagickFalse)
return(-1);
primitive_info=(*mvg_info->primitive_info)+subpath_offset;
primitive_info->coordinates=(size_t) (q-primitive_info);
number_coordinates+=primitive_info->coordinates;
for (i=0; i < (ssize_t) number_coordinates; i++)
{
q--;
q->primitive=primitive_type;
if (z_count > 1)
q->method=FillToBorderMethod;
}
q=primitive_info;
return((ssize_t) number_coordinates);
}
static MagickBooleanType TraceRectangle(PrimitiveInfo *primitive_info,
const PointInfo start,const PointInfo end)
{
PointInfo
point;
register PrimitiveInfo
*p;
register ssize_t
i;
p=primitive_info;
if (TracePoint(p,start) == MagickFalse)
return(MagickFalse);
p+=p->coordinates;
point.x=start.x;
point.y=end.y;
if (TracePoint(p,point) == MagickFalse)
return(MagickFalse);
p+=p->coordinates;
if (TracePoint(p,end) == MagickFalse)
return(MagickFalse);
p+=p->coordinates;
point.x=end.x;
point.y=start.y;
if (TracePoint(p,point) == MagickFalse)
return(MagickFalse);
p+=p->coordinates;
if (TracePoint(p,start) == MagickFalse)
return(MagickFalse);
p+=p->coordinates;
primitive_info->coordinates=(size_t) (p-primitive_info);
primitive_info->closed_subpath=MagickTrue;
for (i=0; i < (ssize_t) primitive_info->coordinates; i++)
{
p->primitive=primitive_info->primitive;
p--;
}
return(MagickTrue);
}
static MagickBooleanType TraceRoundRectangle(MVGInfo *mvg_info,
const PointInfo start,const PointInfo end,PointInfo arc)
{
PointInfo
degrees,
point,
segment;
PrimitiveInfo
*primitive_info;
register PrimitiveInfo
*p;
register ssize_t
i;
ssize_t
offset;
offset=mvg_info->offset;
segment.x=fabs(end.x-start.x);
segment.y=fabs(end.y-start.y);
if ((segment.x < MagickEpsilon) || (segment.y < MagickEpsilon))
{
(*mvg_info->primitive_info+mvg_info->offset)->coordinates=0;
return(MagickTrue);
}
if (arc.x > (0.5*segment.x))
arc.x=0.5*segment.x;
if (arc.y > (0.5*segment.y))
arc.y=0.5*segment.y;
point.x=start.x+segment.x-arc.x;
point.y=start.y+arc.y;
degrees.x=270.0;
degrees.y=360.0;
if (TraceEllipse(mvg_info,point,arc,degrees) == MagickFalse)
return(MagickFalse);
p=(*mvg_info->primitive_info)+mvg_info->offset;
mvg_info->offset+=p->coordinates;
point.x=start.x+segment.x-arc.x;
point.y=start.y+segment.y-arc.y;
degrees.x=0.0;
degrees.y=90.0;
if (TraceEllipse(mvg_info,point,arc,degrees) == MagickFalse)
return(MagickFalse);
p=(*mvg_info->primitive_info)+mvg_info->offset;
mvg_info->offset+=p->coordinates;
point.x=start.x+arc.x;
point.y=start.y+segment.y-arc.y;
degrees.x=90.0;
degrees.y=180.0;
if (TraceEllipse(mvg_info,point,arc,degrees) == MagickFalse)
return(MagickFalse);
p=(*mvg_info->primitive_info)+mvg_info->offset;
mvg_info->offset+=p->coordinates;
point.x=start.x+arc.x;
point.y=start.y+arc.y;
degrees.x=180.0;
degrees.y=270.0;
if (TraceEllipse(mvg_info,point,arc,degrees) == MagickFalse)
return(MagickFalse);
p=(*mvg_info->primitive_info)+mvg_info->offset;
mvg_info->offset+=p->coordinates;
if (CheckPrimitiveExtent(mvg_info,PrimitiveExtentPad) == MagickFalse)
return(MagickFalse);
p=(*mvg_info->primitive_info)+mvg_info->offset;
if (TracePoint(p,(*mvg_info->primitive_info+offset)->point) == MagickFalse)
return(MagickFalse);
p+=p->coordinates;
mvg_info->offset=offset;
primitive_info=(*mvg_info->primitive_info)+offset;
primitive_info->coordinates=(size_t) (p-primitive_info);
primitive_info->closed_subpath=MagickTrue;
for (i=0; i < (ssize_t) primitive_info->coordinates; i++)
{
p->primitive=primitive_info->primitive;
p--;
}
return(MagickTrue);
}
static MagickBooleanType TraceSquareLinecap(PrimitiveInfo *primitive_info,
const size_t number_vertices,const double offset)
{
double
distance;
register double
dx,
dy;
register ssize_t
i;
ssize_t
j;
dx=0.0;
dy=0.0;
for (i=1; i < (ssize_t) number_vertices; i++)
{
dx=primitive_info[0].point.x-primitive_info[i].point.x;
dy=primitive_info[0].point.y-primitive_info[i].point.y;
if ((fabs((double) dx) >= MagickEpsilon) ||
(fabs((double) dy) >= MagickEpsilon))
break;
}
if (i == (ssize_t) number_vertices)
i=(ssize_t) number_vertices-1L;
distance=hypot((double) dx,(double) dy);
primitive_info[0].point.x=(double) (primitive_info[i].point.x+
dx*(distance+offset)/distance);
primitive_info[0].point.y=(double) (primitive_info[i].point.y+
dy*(distance+offset)/distance);
for (j=(ssize_t) number_vertices-2; j >= 0; j--)
{
dx=primitive_info[number_vertices-1].point.x-primitive_info[j].point.x;
dy=primitive_info[number_vertices-1].point.y-primitive_info[j].point.y;
if ((fabs((double) dx) >= MagickEpsilon) ||
(fabs((double) dy) >= MagickEpsilon))
break;
}
distance=hypot((double) dx,(double) dy);
primitive_info[number_vertices-1].point.x=(double) (primitive_info[j].point.x+
dx*(distance+offset)/distance);
primitive_info[number_vertices-1].point.y=(double) (primitive_info[j].point.y+
dy*(distance+offset)/distance);
return(MagickTrue);
}
static PrimitiveInfo *TraceStrokePolygon(const DrawInfo *draw_info,
const PrimitiveInfo *primitive_info,ExceptionInfo *exception)
{
#define MaxStrokePad (6*BezierQuantum+360)
#define CheckPathExtent(pad_p,pad_q) \
{ \
if ((pad_p) > MaxBezierCoordinates) \
stroke_p=(PointInfo *) RelinquishMagickMemory(stroke_p); \
else \
if ((ssize_t) (p+(pad_p)) >= (ssize_t) extent_p) \
{ \
if (~extent_p < (pad_p)) \
stroke_p=(PointInfo *) RelinquishMagickMemory(stroke_p); \
else \
{ \
extent_p+=(pad_p); \
stroke_p=(PointInfo *) ResizeQuantumMemory(stroke_p,extent_p+ \
MaxStrokePad,sizeof(*stroke_p)); \
} \
} \
if ((pad_q) > MaxBezierCoordinates) \
stroke_q=(PointInfo *) RelinquishMagickMemory(stroke_q); \
else \
if ((ssize_t) (q+(pad_q)) >= (ssize_t) extent_q) \
{ \
if (~extent_q < (pad_q)) \
stroke_q=(PointInfo *) RelinquishMagickMemory(stroke_q); \
else \
{ \
extent_q+=(pad_q); \
stroke_q=(PointInfo *) ResizeQuantumMemory(stroke_q,extent_q+ \
MaxStrokePad,sizeof(*stroke_q)); \
} \
} \
if ((stroke_p == (PointInfo *) NULL) || (stroke_q == (PointInfo *) NULL)) \
{ \
if (stroke_p != (PointInfo *) NULL) \
stroke_p=(PointInfo *) RelinquishMagickMemory(stroke_p); \
if (stroke_q != (PointInfo *) NULL) \
stroke_q=(PointInfo *) RelinquishMagickMemory(stroke_q); \
polygon_primitive=(PrimitiveInfo *) \
RelinquishMagickMemory(polygon_primitive); \
(void) ThrowMagickException(exception,GetMagickModule(), \
ResourceLimitError,"MemoryAllocationFailed","`%s'",""); \
return((PrimitiveInfo *) NULL); \
} \
}
typedef struct _StrokeSegment
{
double
p,
q;
} StrokeSegment;
double
delta_theta,
dot_product,
mid,
miterlimit;
MagickBooleanType
closed_path;
PointInfo
box_p[5],
box_q[5],
center,
offset,
*stroke_p,
*stroke_q;
PrimitiveInfo
*polygon_primitive,
*stroke_polygon;
register ssize_t
i;
size_t
arc_segments,
extent_p,
extent_q,
number_vertices;
ssize_t
j,
n,
p,
q;
StrokeSegment
dx = {0.0, 0.0},
dy = {0.0, 0.0},
inverse_slope = {0.0, 0.0},
slope = {0.0, 0.0},
theta = {0.0, 0.0};
/*
Allocate paths.
*/
number_vertices=primitive_info->coordinates;
polygon_primitive=(PrimitiveInfo *) AcquireQuantumMemory((size_t)
number_vertices+2UL,sizeof(*polygon_primitive));
if (polygon_primitive == (PrimitiveInfo *) NULL)
{
(void) ThrowMagickException(exception,GetMagickModule(),
ResourceLimitError,"MemoryAllocationFailed","`%s'","");
return((PrimitiveInfo *) NULL);
}
(void) memcpy(polygon_primitive,primitive_info,(size_t) number_vertices*
sizeof(*polygon_primitive));
offset.x=primitive_info[number_vertices-1].point.x-primitive_info[0].point.x;
offset.y=primitive_info[number_vertices-1].point.y-primitive_info[0].point.y;
closed_path=(fabs(offset.x) < MagickEpsilon) &&
(fabs(offset.y) < MagickEpsilon) ? MagickTrue : MagickFalse;
if (((draw_info->linejoin == RoundJoin) ||
(draw_info->linejoin == MiterJoin)) && (closed_path != MagickFalse))
{
polygon_primitive[number_vertices]=primitive_info[1];
number_vertices++;
}
polygon_primitive[number_vertices].primitive=UndefinedPrimitive;
/*
Compute the slope for the first line segment, p.
*/
dx.p=0.0;
dy.p=0.0;
for (n=1; n < (ssize_t) number_vertices; n++)
{
dx.p=polygon_primitive[n].point.x-polygon_primitive[0].point.x;
dy.p=polygon_primitive[n].point.y-polygon_primitive[0].point.y;
if ((fabs(dx.p) >= MagickEpsilon) || (fabs(dy.p) >= MagickEpsilon))
break;
}
if (n == (ssize_t) number_vertices)
{
if ((draw_info->linecap != RoundCap) || (closed_path != MagickFalse))
{
/*
Zero length subpath.
*/
stroke_polygon=(PrimitiveInfo *) AcquireCriticalMemory(
sizeof(*stroke_polygon));
stroke_polygon[0]=polygon_primitive[0];
stroke_polygon[0].coordinates=0;
polygon_primitive=(PrimitiveInfo *) RelinquishMagickMemory(
polygon_primitive);
return(stroke_polygon);
}
n=(ssize_t) number_vertices-1L;
}
extent_p=2*number_vertices;
extent_q=2*number_vertices;
stroke_p=(PointInfo *) AcquireQuantumMemory((size_t) extent_p+MaxStrokePad,
sizeof(*stroke_p));
stroke_q=(PointInfo *) AcquireQuantumMemory((size_t) extent_q+MaxStrokePad,
sizeof(*stroke_q));
if ((stroke_p == (PointInfo *) NULL) || (stroke_q == (PointInfo *) NULL))
{
if (stroke_p != (PointInfo *) NULL)
stroke_p=(PointInfo *) RelinquishMagickMemory(stroke_p);
if (stroke_q != (PointInfo *) NULL)
stroke_q=(PointInfo *) RelinquishMagickMemory(stroke_q);
polygon_primitive=(PrimitiveInfo *)
RelinquishMagickMemory(polygon_primitive);
(void) ThrowMagickException(exception,GetMagickModule(),
ResourceLimitError,"MemoryAllocationFailed","`%s'","");
return((PrimitiveInfo *) NULL);
}
slope.p=0.0;
inverse_slope.p=0.0;
if (fabs(dx.p) < MagickEpsilon)
{
if (dx.p >= 0.0)
slope.p=dy.p < 0.0 ? -1.0/MagickEpsilon : 1.0/MagickEpsilon;
else
slope.p=dy.p < 0.0 ? 1.0/MagickEpsilon : -1.0/MagickEpsilon;
}
else
if (fabs(dy.p) < MagickEpsilon)
{
if (dy.p >= 0.0)
inverse_slope.p=dx.p < 0.0 ? -1.0/MagickEpsilon : 1.0/MagickEpsilon;
else
inverse_slope.p=dx.p < 0.0 ? 1.0/MagickEpsilon : -1.0/MagickEpsilon;
}
else
{
slope.p=dy.p/dx.p;
inverse_slope.p=(-1.0/slope.p);
}
mid=ExpandAffine(&draw_info->affine)*draw_info->stroke_width/2.0;
miterlimit=(double) (draw_info->miterlimit*draw_info->miterlimit*mid*mid);
if ((draw_info->linecap == SquareCap) && (closed_path == MagickFalse))
(void) TraceSquareLinecap(polygon_primitive,number_vertices,mid);
offset.x=sqrt((double) (mid*mid/(inverse_slope.p*inverse_slope.p+1.0)));
offset.y=(double) (offset.x*inverse_slope.p);
if ((dy.p*offset.x-dx.p*offset.y) > 0.0)
{
box_p[0].x=polygon_primitive[0].point.x-offset.x;
box_p[0].y=polygon_primitive[0].point.y-offset.x*inverse_slope.p;
box_p[1].x=polygon_primitive[n].point.x-offset.x;
box_p[1].y=polygon_primitive[n].point.y-offset.x*inverse_slope.p;
box_q[0].x=polygon_primitive[0].point.x+offset.x;
box_q[0].y=polygon_primitive[0].point.y+offset.x*inverse_slope.p;
box_q[1].x=polygon_primitive[n].point.x+offset.x;
box_q[1].y=polygon_primitive[n].point.y+offset.x*inverse_slope.p;
}
else
{
box_p[0].x=polygon_primitive[0].point.x+offset.x;
box_p[0].y=polygon_primitive[0].point.y+offset.y;
box_p[1].x=polygon_primitive[n].point.x+offset.x;
box_p[1].y=polygon_primitive[n].point.y+offset.y;
box_q[0].x=polygon_primitive[0].point.x-offset.x;
box_q[0].y=polygon_primitive[0].point.y-offset.y;
box_q[1].x=polygon_primitive[n].point.x-offset.x;
box_q[1].y=polygon_primitive[n].point.y-offset.y;
}
/*
Create strokes for the line join attribute: bevel, miter, round.
*/
p=0;
q=0;
stroke_q[p++]=box_q[0];
stroke_p[q++]=box_p[0];
for (i=(ssize_t) n+1; i < (ssize_t) number_vertices; i++)
{
/*
Compute the slope for this line segment, q.
*/
dx.q=polygon_primitive[i].point.x-polygon_primitive[n].point.x;
dy.q=polygon_primitive[i].point.y-polygon_primitive[n].point.y;
dot_product=dx.q*dx.q+dy.q*dy.q;
if (dot_product < 0.25)
continue;
slope.q=0.0;
inverse_slope.q=0.0;
if (fabs(dx.q) < MagickEpsilon)
{
if (dx.q >= 0.0)
slope.q=dy.q < 0.0 ? -1.0/MagickEpsilon : 1.0/MagickEpsilon;
else
slope.q=dy.q < 0.0 ? 1.0/MagickEpsilon : -1.0/MagickEpsilon;
}
else
if (fabs(dy.q) < MagickEpsilon)
{
if (dy.q >= 0.0)
inverse_slope.q=dx.q < 0.0 ? -1.0/MagickEpsilon : 1.0/MagickEpsilon;
else
inverse_slope.q=dx.q < 0.0 ? 1.0/MagickEpsilon : -1.0/MagickEpsilon;
}
else
{
slope.q=dy.q/dx.q;
inverse_slope.q=(-1.0/slope.q);
}
offset.x=sqrt((double) (mid*mid/(inverse_slope.q*inverse_slope.q+1.0)));
offset.y=(double) (offset.x*inverse_slope.q);
dot_product=dy.q*offset.x-dx.q*offset.y;
if (dot_product > 0.0)
{
box_p[2].x=polygon_primitive[n].point.x-offset.x;
box_p[2].y=polygon_primitive[n].point.y-offset.y;
box_p[3].x=polygon_primitive[i].point.x-offset.x;
box_p[3].y=polygon_primitive[i].point.y-offset.y;
box_q[2].x=polygon_primitive[n].point.x+offset.x;
box_q[2].y=polygon_primitive[n].point.y+offset.y;
box_q[3].x=polygon_primitive[i].point.x+offset.x;
box_q[3].y=polygon_primitive[i].point.y+offset.y;
}
else
{
box_p[2].x=polygon_primitive[n].point.x+offset.x;
box_p[2].y=polygon_primitive[n].point.y+offset.y;
box_p[3].x=polygon_primitive[i].point.x+offset.x;
box_p[3].y=polygon_primitive[i].point.y+offset.y;
box_q[2].x=polygon_primitive[n].point.x-offset.x;
box_q[2].y=polygon_primitive[n].point.y-offset.y;
box_q[3].x=polygon_primitive[i].point.x-offset.x;
box_q[3].y=polygon_primitive[i].point.y-offset.y;
}
if (fabs((double) (slope.p-slope.q)) < MagickEpsilon)
{
box_p[4]=box_p[1];
box_q[4]=box_q[1];
}
else
{
box_p[4].x=(double) ((slope.p*box_p[0].x-box_p[0].y-slope.q*box_p[3].x+
box_p[3].y)/(slope.p-slope.q));
box_p[4].y=(double) (slope.p*(box_p[4].x-box_p[0].x)+box_p[0].y);
box_q[4].x=(double) ((slope.p*box_q[0].x-box_q[0].y-slope.q*box_q[3].x+
box_q[3].y)/(slope.p-slope.q));
box_q[4].y=(double) (slope.p*(box_q[4].x-box_q[0].x)+box_q[0].y);
}
CheckPathExtent(MaxStrokePad,MaxStrokePad);
dot_product=dx.q*dy.p-dx.p*dy.q;
if (dot_product <= 0.0)
switch (draw_info->linejoin)
{
case BevelJoin:
{
stroke_q[q++]=box_q[1];
stroke_q[q++]=box_q[2];
dot_product=(box_q[4].x-box_p[4].x)*(box_q[4].x-box_p[4].x)+
(box_q[4].y-box_p[4].y)*(box_q[4].y-box_p[4].y);
if (dot_product <= miterlimit)
stroke_p[p++]=box_p[4];
else
{
stroke_p[p++]=box_p[1];
stroke_p[p++]=box_p[2];
}
break;
}
case MiterJoin:
{
dot_product=(box_q[4].x-box_p[4].x)*(box_q[4].x-box_p[4].x)+
(box_q[4].y-box_p[4].y)*(box_q[4].y-box_p[4].y);
if (dot_product <= miterlimit)
{
stroke_q[q++]=box_q[4];
stroke_p[p++]=box_p[4];
}
else
{
stroke_q[q++]=box_q[1];
stroke_q[q++]=box_q[2];
stroke_p[p++]=box_p[1];
stroke_p[p++]=box_p[2];
}
break;
}
case RoundJoin:
{
dot_product=(box_q[4].x-box_p[4].x)*(box_q[4].x-box_p[4].x)+
(box_q[4].y-box_p[4].y)*(box_q[4].y-box_p[4].y);
if (dot_product <= miterlimit)
stroke_p[p++]=box_p[4];
else
{
stroke_p[p++]=box_p[1];
stroke_p[p++]=box_p[2];
}
center=polygon_primitive[n].point;
theta.p=atan2(box_q[1].y-center.y,box_q[1].x-center.x);
theta.q=atan2(box_q[2].y-center.y,box_q[2].x-center.x);
if (theta.q < theta.p)
theta.q+=2.0*MagickPI;
arc_segments=(size_t) ceil((double) ((theta.q-theta.p)/
(2.0*sqrt((double) (1.0/mid)))));
CheckPathExtent(MaxStrokePad,arc_segments+MaxStrokePad);
stroke_q[q].x=box_q[1].x;
stroke_q[q].y=box_q[1].y;
q++;
for (j=1; j < (ssize_t) arc_segments; j++)
{
delta_theta=(double) (j*(theta.q-theta.p)/arc_segments);
stroke_q[q].x=(double) (center.x+mid*cos(fmod((double)
(theta.p+delta_theta),DegreesToRadians(360.0))));
stroke_q[q].y=(double) (center.y+mid*sin(fmod((double)
(theta.p+delta_theta),DegreesToRadians(360.0))));
q++;
}
stroke_q[q++]=box_q[2];
break;
}
default:
break;
}
else
switch (draw_info->linejoin)
{
case BevelJoin:
{
stroke_p[p++]=box_p[1];
stroke_p[p++]=box_p[2];
dot_product=(box_q[4].x-box_p[4].x)*(box_q[4].x-box_p[4].x)+
(box_q[4].y-box_p[4].y)*(box_q[4].y-box_p[4].y);
if (dot_product <= miterlimit)
stroke_q[q++]=box_q[4];
else
{
stroke_q[q++]=box_q[1];
stroke_q[q++]=box_q[2];
}
break;
}
case MiterJoin:
{
dot_product=(box_q[4].x-box_p[4].x)*(box_q[4].x-box_p[4].x)+
(box_q[4].y-box_p[4].y)*(box_q[4].y-box_p[4].y);
if (dot_product <= miterlimit)
{
stroke_q[q++]=box_q[4];
stroke_p[p++]=box_p[4];
}
else
{
stroke_q[q++]=box_q[1];
stroke_q[q++]=box_q[2];
stroke_p[p++]=box_p[1];
stroke_p[p++]=box_p[2];
}
break;
}
case RoundJoin:
{
dot_product=(box_q[4].x-box_p[4].x)*(box_q[4].x-box_p[4].x)+
(box_q[4].y-box_p[4].y)*(box_q[4].y-box_p[4].y);
if (dot_product <= miterlimit)
stroke_q[q++]=box_q[4];
else
{
stroke_q[q++]=box_q[1];
stroke_q[q++]=box_q[2];
}
center=polygon_primitive[n].point;
theta.p=atan2(box_p[1].y-center.y,box_p[1].x-center.x);
theta.q=atan2(box_p[2].y-center.y,box_p[2].x-center.x);
if (theta.p < theta.q)
theta.p+=2.0*MagickPI;
arc_segments=(size_t) ceil((double) ((theta.p-theta.q)/
(2.0*sqrt((double) (1.0/mid)))));
CheckPathExtent(arc_segments+MaxStrokePad,MaxStrokePad);
stroke_p[p++]=box_p[1];
for (j=1; j < (ssize_t) arc_segments; j++)
{
delta_theta=(double) (j*(theta.q-theta.p)/arc_segments);
stroke_p[p].x=(double) (center.x+mid*cos(fmod((double)
(theta.p+delta_theta),DegreesToRadians(360.0))));
stroke_p[p].y=(double) (center.y+mid*sin(fmod((double)
(theta.p+delta_theta),DegreesToRadians(360.0))));
p++;
}
stroke_p[p++]=box_p[2];
break;
}
default:
break;
}
slope.p=slope.q;
inverse_slope.p=inverse_slope.q;
box_p[0]=box_p[2];
box_p[1]=box_p[3];
box_q[0]=box_q[2];
box_q[1]=box_q[3];
dx.p=dx.q;
dy.p=dy.q;
n=i;
}
stroke_p[p++]=box_p[1];
stroke_q[q++]=box_q[1];
/*
Trace stroked polygon.
*/
stroke_polygon=(PrimitiveInfo *) AcquireQuantumMemory((size_t)
(p+q+2UL*closed_path+2UL),sizeof(*stroke_polygon));
if (stroke_polygon == (PrimitiveInfo *) NULL)
{
(void) ThrowMagickException(exception,GetMagickModule(),
ResourceLimitError,"MemoryAllocationFailed","`%s'","");
stroke_p=(PointInfo *) RelinquishMagickMemory(stroke_p);
stroke_q=(PointInfo *) RelinquishMagickMemory(stroke_q);
polygon_primitive=(PrimitiveInfo *) RelinquishMagickMemory(
polygon_primitive);
return(stroke_polygon);
}
for (i=0; i < (ssize_t) p; i++)
{
stroke_polygon[i]=polygon_primitive[0];
stroke_polygon[i].point=stroke_p[i];
}
if (closed_path != MagickFalse)
{
stroke_polygon[i]=polygon_primitive[0];
stroke_polygon[i].point=stroke_polygon[0].point;
i++;
}
for ( ; i < (ssize_t) (p+q+closed_path); i++)
{
stroke_polygon[i]=polygon_primitive[0];
stroke_polygon[i].point=stroke_q[p+q+closed_path-(i+1)];
}
if (closed_path != MagickFalse)
{
stroke_polygon[i]=polygon_primitive[0];
stroke_polygon[i].point=stroke_polygon[p+closed_path].point;
i++;
}
stroke_polygon[i]=polygon_primitive[0];
stroke_polygon[i].point=stroke_polygon[0].point;
i++;
stroke_polygon[i].primitive=UndefinedPrimitive;
stroke_polygon[0].coordinates=(size_t) (p+q+2*closed_path+1);
stroke_p=(PointInfo *) RelinquishMagickMemory(stroke_p);
stroke_q=(PointInfo *) RelinquishMagickMemory(stroke_q);
polygon_primitive=(PrimitiveInfo *) RelinquishMagickMemory(polygon_primitive);
return(stroke_polygon);
}
|
dahua_fmt_plug.c | /*
* Format for cracking Dahua hashes.
*
* http://www.securityfocus.com/archive/1/529799
* https://github.com/depthsecurity/dahua_dvr_auth_bypass
*
* This software is Copyright (c) 2014 Dhiru Kholia <dhiru at openwall.com>,
* and it is hereby released to the general public under the following terms:
*
* Redistribution and use in source and binary forms, with or without#
* modification, are permitted.
*/
#if FMT_EXTERNS_H
extern struct fmt_main fmt_dahua;
#elif FMT_REGISTERS_H
john_register_one(&fmt_dahua);
#else
#include <string.h>
#ifdef _OPENMP
#include <omp.h>
#define OMP_SCALE 2048 // XXX
#endif
#include "arch.h"
#include "md5.h"
#include "misc.h"
#include "common.h"
#include "formats.h"
#include "johnswap.h"
#include "params.h"
#include "options.h"
#include "memdbg.h"
#include <ctype.h>
#define FORMAT_LABEL "dahua"
#define FORMAT_NAME "\"MD5 based authentication\" Dahua"
#define FORMAT_TAG "$dahua$"
#define TAG_LENGTH (sizeof(FORMAT_TAG) - 1)
#define ALGORITHM_NAME "MD5 32/" ARCH_BITS_STR
#define BENCHMARK_COMMENT ""
#define BENCHMARK_LENGTH 0
#define PLAINTEXT_LENGTH 125
#define BINARY_SIZE 8
#define BINARY_ALIGN sizeof(ARCH_WORD_32)
#define SALT_SIZE 0
#define SALT_ALIGN 1
#define MIN_KEYS_PER_CRYPT 1
#define MAX_KEYS_PER_CRYPT 1
static struct fmt_tests tests[] = {
{"$dahua$4WzwxXxM", "888888"}, // from hashcat.net
{"$dahua$HRG6OLE6", "Do You Even Lift?"},
{"$dahua$sh15yfFM", "666666"},
{"$dahua$6QNMIQGe", "admin"},
{NULL}
};
static char (*saved_key)[PLAINTEXT_LENGTH + 1];
static int *saved_len;
static ARCH_WORD_32 (*crypt_out)[BINARY_SIZE / sizeof(ARCH_WORD_32)];
static void init(struct fmt_main *self)
{
#ifdef _OPENMP
int omp_t = omp_get_num_threads();
self->params.min_keys_per_crypt *= omp_t;
omp_t *= OMP_SCALE;
self->params.max_keys_per_crypt *= omp_t;
#endif
saved_key = mem_calloc_tiny(sizeof(*saved_key) *
self->params.max_keys_per_crypt, MEM_ALIGN_WORD);
saved_len = mem_calloc_tiny(sizeof(*saved_len) *
self->params.max_keys_per_crypt, MEM_ALIGN_WORD);
crypt_out = mem_calloc_tiny(sizeof(*crypt_out) *
self->params.max_keys_per_crypt, MEM_ALIGN_WORD);
}
static int valid(char *ciphertext, struct fmt_main *self)
{
char *p = ciphertext;
int i;
if (strncmp(p, FORMAT_TAG, TAG_LENGTH) != 0)
return 0;
p = p + TAG_LENGTH;
if (!p)
return 0;
if (strlen(p) != BINARY_SIZE)
return 0;
for (i = 0; i < BINARY_SIZE; i++)
if (!isalnum((int)(unsigned char)p[i]))
return 0;
return 1;
}
static void *get_binary(char *ciphertext)
{
static union {
char c[BINARY_SIZE];
ARCH_WORD dummy;
} buf;
char *p;
char *out = buf.c;
p = strrchr(ciphertext, '$') + 1;
strncpy(out, p, BINARY_SIZE);
return out;
}
static int get_hash_0(int index) { return crypt_out[index][0] & 0xf; }
static int get_hash_1(int index) { return crypt_out[index][0] & 0xff; }
static int get_hash_2(int index) { return crypt_out[index][0] & 0xfff; }
static int get_hash_3(int index) { return crypt_out[index][0] & 0xffff; }
static int get_hash_4(int index) { return crypt_out[index][0] & 0xfffff; }
static int get_hash_5(int index) { return crypt_out[index][0] & 0xffffff; }
static int get_hash_6(int index) { return crypt_out[index][0] & 0x7ffffff; }
// from hashcat.net (alxchk)
static void compressor(unsigned char *in, unsigned char *out)
{
int i, j;
for (i = 0, j = 0; i < 16; i += 2, j++) {
out[j] = (in[i] + in[i+1]) % 62;
if (out[j] < 10) {
out[j] += 48;
} else if (out[j] < 36) {
out[j] += 55;
} else {
out[j] += 61;
}
}
}
static int crypt_all(int *pcount, struct db_salt *salt)
{
int count = *pcount;
int index = 0;
#ifdef _OPENMP
#pragma omp parallel for
for (index = 0; index < count; index++)
#endif
{
// hash is compressor(md5(password))
MD5_CTX ctx;
unsigned char *out = (unsigned char*)crypt_out[index];
unsigned char hash[16];
MD5_Init(&ctx);
MD5_Update(&ctx, saved_key[index], saved_len[index]);
MD5_Final(hash, &ctx);
compressor(hash, out);
}
return count;
}
static int cmp_all(void *binary, int count)
{
int index = 0;
#ifdef _OPENMP
for (; index < count; index++)
#endif
if (!memcmp(binary, crypt_out[index], BINARY_SIZE))
return 1;
return 0;
}
static int cmp_one(void *binary, int index)
{
return !memcmp(binary, crypt_out[index], BINARY_SIZE);
}
static int cmp_exact(char *source, int index)
{
return 1;
}
static void dahua_set_key(char *key, int index)
{
saved_len[index] = strlen(key);
strncpy(saved_key[index], key, sizeof(saved_key[0]));
}
static char *get_key(int index)
{
return saved_key[index];
}
struct fmt_main fmt_dahua = {
{
FORMAT_LABEL,
FORMAT_NAME,
ALGORITHM_NAME,
BENCHMARK_COMMENT,
BENCHMARK_LENGTH,
PLAINTEXT_LENGTH,
BINARY_SIZE,
BINARY_ALIGN,
SALT_SIZE,
SALT_ALIGN,
MIN_KEYS_PER_CRYPT,
MAX_KEYS_PER_CRYPT,
FMT_CASE | FMT_8_BIT | FMT_OMP,
#if FMT_MAIN_VERSION > 11
{ NULL },
#endif
tests
}, {
init,
fmt_default_done,
fmt_default_reset,
fmt_default_prepare,
valid,
fmt_default_split,
get_binary,
fmt_default_salt,
#if FMT_MAIN_VERSION > 11
{ NULL },
#endif
fmt_default_source,
{
fmt_default_binary_hash_0,
fmt_default_binary_hash_1,
fmt_default_binary_hash_2,
fmt_default_binary_hash_3,
fmt_default_binary_hash_4,
fmt_default_binary_hash_5,
fmt_default_binary_hash_6
},
fmt_default_salt_hash,
fmt_default_set_salt,
dahua_set_key,
get_key,
fmt_default_clear_keys,
crypt_all,
{
get_hash_0,
get_hash_1,
get_hash_2,
get_hash_3,
get_hash_4,
get_hash_5,
get_hash_6
},
cmp_all,
cmp_one,
cmp_exact
}
};
#endif
|
AI_model1.c | #include"AI.h"
#include <omp.h>
#define MAXSTEP 3
//#define CHECK_SCORE
//This is for simple spawn
//the simulation function for the branches in the searching tree
int ai_model1_simulate(GameState *gameState, Player *player, int depth)
{
if(depth<=0)return ai_sum_scores(gameState,player);
int MaxScore=-60000;
int playerTurn=gameState->playerTurn;
int total_num_moves=0;
vector MovesStart,MovesEnd;
vector_init(&MovesStart);
vector_init(&MovesEnd);
int cnt=0;
for(int i=0;i<64;i++)
{
vector CurLegalMoves=env_get_legal_moves(gameState,player,i);
cnt=CurLegalMoves.count;
if(cnt>0){
vector_cat(&MovesEnd,&CurLegalMoves);
for(int j=0;j<cnt;j++) vector_add(&MovesStart,i);
}
vector_free(&CurLegalMoves);
total_num_moves+=cnt;
}
assert(MovesStart.count==MovesEnd.count);
int *Scores=malloc(sizeof(int)*total_num_moves);
omp_set_num_threads(2);
#pragma omp parallel for shared(total_num_moves,gameState,player,MovesStart,MovesEnd,depth,Scores,playerTurn)
for(int i=0;i<total_num_moves;i++)
{
GameState simulation=env_copy_State(gameState);
env_play(&simulation,player,vector_get(&MovesStart,i),vector_get(&MovesEnd,i));
int score=playerTurn*ai_model1_simulate(&simulation,player,depth-1);
Scores[i]=score;
env_free_state(&simulation);
}
for(int i=0;i<total_num_moves;i++)MaxScore=MAX(MaxScore,Scores[i]);
vector_free(&MovesStart);
vector_free(&MovesEnd);
free(Scores);
return MaxScore*playerTurn;
}
//the play function for the root in the searching tree, return the quit from check_end
int ai_model1_play(GameState *gameState, Player *player, int maxStep)
{
int check_end=env_check_end(gameState,player);
if(check_end!=0)
{
env_free_container(gameState);
return check_end;
}
int MaxScore=-60000;
int score;
// vector MovesStart,MovesEnd,Scores;
// vector_init(&BestMovesID);
// vector_init(&MovesStart);
// vector_init(&MovesEnd);
// vector_init(&Scores);
int container_size=gameState->moves_vector_cnt;
int total_num_moves=0;
int *accu_container_size_arr=malloc(sizeof(int)*gameState->moves_vector_cnt);
for(int i=0;i<container_size;i++){
total_num_moves+=gameState->container[i].legal_moves.count;
if(i==0)accu_container_size_arr[0]=0;
else accu_container_size_arr[i]=accu_container_size_arr[i-1]+gameState->container[i-1].legal_moves.count;
}
int *MovesStart=malloc(sizeof(int)*total_num_moves);
int *MovesEnd=malloc(sizeof(int)*total_num_moves);
int *Scores=malloc(sizeof(int)*total_num_moves);
omp_set_num_threads(16);
omp_set_nested(1);
#pragma omp parallel for shared(MovesStart,MovesEnd)
for(int i=0;i<container_size;i++)
{
vector CurLegalMoves=gameState->container[i].legal_moves;
int cnt=CurLegalMoves.count;
int pos=gameState->container[i].pos;
for(int j=0;j<cnt;j++){
MovesStart[accu_container_size_arr[i]+j]=pos;
MovesEnd[accu_container_size_arr[i]+j]=vector_get(&CurLegalMoves,j);
}
}
// assert(MovesStart.count==MovesEnd.count);
int playerTurn=gameState->playerTurn;
#pragma omp parallel for shared(Scores)
for(int i=0;i<total_num_moves;i++)
{
GameState simulation=env_copy_State(gameState);
env_play(&simulation,player,MovesStart[i],MovesEnd[i]);
score=playerTurn*ai_model1_simulate(&simulation,player,maxStep);
Scores[i]=score;
env_free_state(&simulation);
}
int BestMovesCnt=0;
vector BestMovesID;
vector_init(&BestMovesID);
if(stack_check_repeated_move(gameState->moves_stack)){
int MaxScoresArr[6];
for(int i=0;i<6;i++)MaxScoresArr[i]=-60000;
int MinScoreID,MinScoreArrValue;
for(int i=0;i<total_num_moves;i++){
MinScoreArrValue=MaxScoresArr[0];
MinScoreID=0;
for(int j=1;j<6;j++){
if(MaxScoresArr[j]<MinScoreArrValue){
MinScoreArrValue=MaxScoresArr[j];
MinScoreID=j;
}
}
MaxScoresArr[MinScoreID]=MAX(MaxScoresArr[MinScoreID],Scores[i]);
}
for(int i=0;i<total_num_moves;i++){
for(int j=0;j<6;j++){
if(Scores[i]==MaxScoresArr[j]){
vector_add(&BestMovesID,i);
BestMovesCnt++;
}
}
}
}
else{
for(int i=0;i<total_num_moves;i++)MaxScore=MAX(MaxScore,Scores[i]);
for(int i=0;i<total_num_moves;i++){
if(Scores[i]==MaxScore){
vector_add(&BestMovesID,i);
BestMovesCnt++;
}
}
}
int id=vector_get(&BestMovesID,rand()%BestMovesCnt);
#ifdef CHECK_SCORE
printf("It is %d playing\n",gameState->playerTurn);
ai_print_board(gameState);
printf("Current Score is %d\n",ai_sum_scores(gameState,player));
#endif
env_play(gameState,player,MovesStart[id],MovesEnd[id]);
#ifdef CHECK_SCORE
printf("The player has decided to move from %d to %d\n",vector_get(&MovesStart,id),vector_get(&MovesEnd,id));
ai_print_board(gameState);
printf("After making the move, the score is %d\n",ai_sum_scores(gameState,player));
#endif
vector_free(&BestMovesID);
free(MovesStart);
free(MovesEnd);
free(Scores);
env_free_container(gameState);
return 0;
}
|
csr_matvec.c | /*BHEADER**********************************************************************
* Copyright (c) 2006 The Regents of the University of California.
* Produced at the Lawrence Livermore National Laboratory.
* Written by the HYPRE team. UCRL-CODE-222953.
* All rights reserved.
*
* This file is part of HYPRE (see http://www.llnl.gov/CASC/hypre/).
* Please see the COPYRIGHT_and_LICENSE file for the copyright notice,
* disclaimer, contact information and the GNU Lesser General Public License.
*
* HYPRE is free software; you can redistribute it and/or modify it under the
* terms of the GNU General Public License (as published by the Free Software
* Foundation) version 2.1 dated February 1999.
*
* HYPRE is distributed in the hope that it will be useful, but WITHOUT ANY
* WARRANTY; without even the IMPLIED WARRANTY OF MERCHANTABILITY or FITNESS
* FOR A PARTICULAR PURPOSE. See the terms and conditions of the GNU General
* Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public License
* along with this program; if not, write to the Free Software Foundation,
* Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*
* $Revision: 2.10 $
***********************************************************************EHEADER*/
/******************************************************************************
*
* Matvec functions for hypre_CSRMatrix class.
*
*****************************************************************************/
#include "headers.h"
#include <assert.h>
#include "omp.h"
/*--------------------------------------------------------------------------
* hypre_CSRMatrixMatvec
*--------------------------------------------------------------------------*/
int
hypre_CSRMatrixMatvec( double alpha,
hypre_CSRMatrix *A,
hypre_Vector *x,
double beta,
hypre_Vector *y )
{
double *A_data = hypre_CSRMatrixData(A);
int *A_i = hypre_CSRMatrixI(A);
int *A_j = hypre_CSRMatrixJ(A);
int num_rows = hypre_CSRMatrixNumRows(A);
int num_cols = hypre_CSRMatrixNumCols(A);
int *A_rownnz = hypre_CSRMatrixRownnz(A);
int num_rownnz = hypre_CSRMatrixNumRownnz(A);
double *x_data = hypre_VectorData(x);
double *y_data = hypre_VectorData(y);
int x_size = hypre_VectorSize(x);
int y_size = hypre_VectorSize(y);
int num_vectors = hypre_VectorNumVectors(x);
int idxstride_y = hypre_VectorIndexStride(y);
int vecstride_y = hypre_VectorVectorStride(y);
int idxstride_x = hypre_VectorIndexStride(x);
int vecstride_x = hypre_VectorVectorStride(x);
double temp, tempx;
int i, j, jj;
int m;
double xpar=0.7;
int ierr = 0;
/*---------------------------------------------------------------------
* Check for size compatibility. Matvec returns ierr = 1 if
* length of X doesn't equal the number of columns of A,
* ierr = 2 if the length of Y doesn't equal the number of rows
* of A, and ierr = 3 if both are true.
*
* Because temporary vectors are often used in Matvec, none of
* these conditions terminates processing, and the ierr flag
* is informational only.
*--------------------------------------------------------------------*/
//__WHATIF__BEGIN__
hypre_assert( num_vectors == hypre_VectorNumVectors(y) );
if (num_cols != x_size)
ierr = 1;
if (num_rows != y_size)
ierr = 2;
if (num_cols != x_size && num_rows != y_size)
ierr = 3;
/*-----------------------------------------------------------------------
* Do (alpha == 0.0) computation - RDF: USE MACHINE EPS
*-----------------------------------------------------------------------*/
if (alpha == 0.0)
{
for (i = 0; i < num_rows*num_vectors; i++)
y_data[i] *= beta;
return ierr;
}
/*-----------------------------------------------------------------------
* y = (beta/alpha)*y
*-----------------------------------------------------------------------*/
temp = beta / alpha;
if (temp != 1.0)
{
if (temp == 0.0)
{
for (i = 0; i < num_rows*num_vectors; i++)
y_data[i] = 0.0;
}
else
{
for (i = 0; i < num_rows*num_vectors; i++)
y_data[i] *= temp;
}
}
//__WHATIF__END__
/*-----------------------------------------------------------------
* y += A*x
*-----------------------------------------------------------------*/
/* use rownnz pointer to do the A*x multiplication when num_rownnz is smaller than num_rows */
if (num_rownnz < xpar*(num_rows))
{
for (i = 0; i < num_rownnz; i++)
{
m = A_rownnz[i];
/*
* for (jj = A_i[m]; jj < A_i[m+1]; jj++)
* {
* j = A_j[jj];
* y_data[m] += A_data[jj] * x_data[j];
* } */
if ( num_vectors==1 )
{
tempx = y_data[m];
for (jj = A_i[m]; jj < A_i[m+1]; jj++)
tempx += A_data[jj] * x_data[A_j[jj]];
y_data[m] = tempx;
}
else
for ( j=0; j<num_vectors; ++j )
{
tempx = y_data[ j*vecstride_y + m*idxstride_y ];
for (jj = A_i[m]; jj < A_i[m+1]; jj++)
tempx += A_data[jj] * x_data[ j*vecstride_x + A_j[jj]*idxstride_x ];
y_data[ j*vecstride_y + m*idxstride_y] = tempx;
}
}
}
else
{
#pragma omp parallel for private(i,jj,temp) schedule(dynamic, num_rows/16)
//#pragma omp parallel for private(i,jj,temp) schedule(dynamic)
for (i = 0; i < num_rows; i++)
{
if ( num_vectors==1 )
{
temp = y_data[i];
for (jj = A_i[i]; jj < A_i[i+1]; jj++)
temp += A_data[jj] * x_data[A_j[jj]];
y_data[i] = temp;
}
else
for ( j=0; j<num_vectors; ++j )
{
temp = y_data[ j*vecstride_y + i*idxstride_y ];
for (jj = A_i[i]; jj < A_i[i+1]; jj++)
{
temp += A_data[jj] * x_data[ j*vecstride_x + A_j[jj]*idxstride_x ];
}
y_data[ j*vecstride_y + i*idxstride_y ] = temp;
}
}
}
/*-----------------------------------------------------------------
* y = alpha*y
*-----------------------------------------------------------------*/
if (alpha != 1.0)
{
for (i = 0; i < num_rows*num_vectors; i++)
y_data[i] *= alpha;
}
return ierr;
}
/*--------------------------------------------------------------------------
* hypre_CSRMatrixMatvecT
*
* Performs y <- alpha * A^T * x + beta * y
*
* From Van Henson's modification of hypre_CSRMatrixMatvec.
*--------------------------------------------------------------------------*/
int
hypre_CSRMatrixMatvecT( double alpha,
hypre_CSRMatrix *A,
hypre_Vector *x,
double beta,
hypre_Vector *y )
{
double *A_data = hypre_CSRMatrixData(A);
int *A_i = hypre_CSRMatrixI(A);
int *A_j = hypre_CSRMatrixJ(A);
int num_rows = hypre_CSRMatrixNumRows(A);
int num_cols = hypre_CSRMatrixNumCols(A);
double *x_data = hypre_VectorData(x);
double *y_data = hypre_VectorData(y);
int x_size = hypre_VectorSize(x);
int y_size = hypre_VectorSize(y);
int num_vectors = hypre_VectorNumVectors(x);
int idxstride_y = hypre_VectorIndexStride(y);
int vecstride_y = hypre_VectorVectorStride(y);
int idxstride_x = hypre_VectorIndexStride(x);
int vecstride_x = hypre_VectorVectorStride(x);
double temp;
int i, i1, j, jv, jj, ns, ne, size, rest;
int num_threads;
int ierr = 0;
/*---------------------------------------------------------------------
* Check for size compatibility. MatvecT returns ierr = 1 if
* length of X doesn't equal the number of rows of A,
* ierr = 2 if the length of Y doesn't equal the number of
* columns of A, and ierr = 3 if both are true.
*
* Because temporary vectors are often used in MatvecT, none of
* these conditions terminates processing, and the ierr flag
* is informational only.
*--------------------------------------------------------------------*/
hypre_assert( num_vectors == hypre_VectorNumVectors(y) );
if (num_rows != x_size)
ierr = 1;
if (num_cols != y_size)
ierr = 2;
if (num_rows != x_size && num_cols != y_size)
ierr = 3;
/*-----------------------------------------------------------------------
* Do (alpha == 0.0) computation - RDF: USE MACHINE EPS
*-----------------------------------------------------------------------*/
if (alpha == 0.0)
{
for (i = 0; i < num_cols*num_vectors; i++)
y_data[i] *= beta;
return ierr;
}
/*-----------------------------------------------------------------------
* y = (beta/alpha)*y
*-----------------------------------------------------------------------*/
temp = beta / alpha;
if (temp != 1.0)
{
if (temp == 0.0)
{
for (i = 0; i < num_cols*num_vectors; i++)
y_data[i] = 0.0;
}
else
{
for (i = 0; i < num_cols*num_vectors; i++)
y_data[i] *= temp;
}
}
/*-----------------------------------------------------------------
* y += A^T*x
*-----------------------------------------------------------------*/
num_threads = hypre_NumThreads();
if (num_threads > 1)
{
for (i1 = 0; i1 < num_threads; i1++)
{
size = num_cols/num_threads;
rest = num_cols - size*num_threads;
if (i1 < rest)
{
ns = i1*size+i1-1;
ne = (i1+1)*size+i1+1;
}
else
{
ns = i1*size+rest-1;
ne = (i1+1)*size+rest;
}
if ( num_vectors==1 )
{
for (i = 0; i < num_rows; i++)
{
for (jj = A_i[i]; jj < A_i[i+1]; jj++)
{
j = A_j[jj];
if (j > ns && j < ne)
y_data[j] += A_data[jj] * x_data[i];
}
}
}
else
{
for (i = 0; i < num_rows; i++)
{
for ( jv=0; jv<num_vectors; ++jv )
{
for (jj = A_i[i]; jj < A_i[i+1]; jj++)
{
j = A_j[jj];
if (j > ns && j < ne)
y_data[ j*idxstride_y + jv*vecstride_y ] +=
A_data[jj] * x_data[ i*idxstride_x + jv*vecstride_x];
}
}
}
}
}
}
else
{
for (i = 0; i < num_rows; i++)
{
if ( num_vectors==1 )
{
for (jj = A_i[i]; jj < A_i[i+1]; jj++)
{
j = A_j[jj];
y_data[j] += A_data[jj] * x_data[i];
}
}
else
{
for ( jv=0; jv<num_vectors; ++jv )
{
for (jj = A_i[i]; jj < A_i[i+1]; jj++)
{
j = A_j[jj];
y_data[ j*idxstride_y + jv*vecstride_y ] +=
A_data[jj] * x_data[ i*idxstride_x + jv*vecstride_x ];
}
}
}
}
}
/*-----------------------------------------------------------------
* y = alpha*y
*-----------------------------------------------------------------*/
if (alpha != 1.0)
{
for (i = 0; i < num_cols*num_vectors; i++)
y_data[i] *= alpha;
}
return ierr;
}
/*--------------------------------------------------------------------------
* hypre_CSRMatrixMatvec_FF
*--------------------------------------------------------------------------*/
int
hypre_CSRMatrixMatvec_FF( double alpha,
hypre_CSRMatrix *A,
hypre_Vector *x,
double beta,
hypre_Vector *y,
int *CF_marker_x,
int *CF_marker_y,
int fpt )
{
double *A_data = hypre_CSRMatrixData(A);
int *A_i = hypre_CSRMatrixI(A);
int *A_j = hypre_CSRMatrixJ(A);
int num_rows = hypre_CSRMatrixNumRows(A);
int num_cols = hypre_CSRMatrixNumCols(A);
double *x_data = hypre_VectorData(x);
double *y_data = hypre_VectorData(y);
int x_size = hypre_VectorSize(x);
int y_size = hypre_VectorSize(y);
double temp;
int i, jj;
int ierr = 0;
/*---------------------------------------------------------------------
* Check for size compatibility. Matvec returns ierr = 1 if
* length of X doesn't equal the number of columns of A,
* ierr = 2 if the length of Y doesn't equal the number of rows
* of A, and ierr = 3 if both are true.
*
* Because temporary vectors are often used in Matvec, none of
* these conditions terminates processing, and the ierr flag
* is informational only.
*--------------------------------------------------------------------*/
if (num_cols != x_size)
ierr = 1;
if (num_rows != y_size)
ierr = 2;
if (num_cols != x_size && num_rows != y_size)
ierr = 3;
/*-----------------------------------------------------------------------
* Do (alpha == 0.0) computation - RDF: USE MACHINE EPS
*-----------------------------------------------------------------------*/
if (alpha == 0.0)
{
for (i = 0; i < num_rows; i++)
if (CF_marker_x[i] == fpt) y_data[i] *= beta;
return ierr;
}
/*-----------------------------------------------------------------------
* y = (beta/alpha)*y
*-----------------------------------------------------------------------*/
temp = beta / alpha;
if (temp != 1.0)
{
if (temp == 0.0)
{
for (i = 0; i < num_rows; i++)
if (CF_marker_x[i] == fpt) y_data[i] = 0.0;
}
else
{
for (i = 0; i < num_rows; i++)
if (CF_marker_x[i] == fpt) y_data[i] *= temp;
}
}
/*-----------------------------------------------------------------
* y += A*x
*-----------------------------------------------------------------*/
for (i = 0; i < num_rows; i++)
{
if (CF_marker_x[i] == fpt)
{
temp = y_data[i];
for (jj = A_i[i]; jj < A_i[i+1]; jj++)
if (CF_marker_y[A_j[jj]] == fpt) temp += A_data[jj] * x_data[A_j[jj]];
y_data[i] = temp;
}
}
/*-----------------------------------------------------------------
* y = alpha*y
*-----------------------------------------------------------------*/
if (alpha != 1.0)
{
for (i = 0; i < num_rows; i++)
if (CF_marker_x[i] == fpt) y_data[i] *= alpha;
}
return ierr;
}
|
convolution_1x1_int8.h | // SenseNets is pleased to support the open source community by supporting ncnn available.
//
// Copyright (C) 2018 SenseNets Technology Ltd. All rights reserved.
//
// Licensed under the BSD 3-Clause License (the "License"); you may not use this file except
// in compliance with the License. You may obtain a copy of the License at
//
// https://opensource.org/licenses/BSD-3-Clause
//
// Unless required by applicable law or agreed to in writing, software distributed
// under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
// CONDITIONS OF ANY KIND, either express or implied. See the License for the
// specific language governing permissions and limitations under the License.
#if __ARM_NEON
#include <arm_neon.h>
#endif // __ARM_NEON
static void conv1x1s1_sgemm_transform_kernel_int8_neon(const Mat& _kernel, Mat& kernel_tm, int inch, int outch)
{
const signed char* kernel = _kernel;
kernel_tm.create(4*4, inch/4 + inch%4, outch/4 + outch%4, (size_t)1u);
int p = 0;
for (; p+3<outch; p+=4)
{
const signed char* kernel0 = kernel + (p+0)*inch;
const signed char* kernel1 = kernel + (p+1)*inch;
const signed char* kernel2 = kernel + (p+2)*inch;
const signed char* kernel3 = kernel + (p+3)*inch;
signed char* ktmp = kernel_tm.channel(p/4);
for (int q=0; q<inch; q++)
{
// kernel0...3 0
ktmp[0] = kernel0[0];
ktmp[1] = kernel1[0];
ktmp[2] = kernel2[0];
ktmp[3] = kernel3[0];
ktmp += 4;
kernel0 += 1;
kernel1 += 1;
kernel2 += 1;
kernel3 += 1;
}
}
for (; p<outch; p++)
{
const signed char* kernel0 = kernel + p*inch;
signed char* ktmp = kernel_tm.channel(p/4 + p%4);
for (int q=0; q<inch; q++)
{
ktmp[0] = kernel0[0];
ktmp++;
kernel0++;
}
}
}
#if __aarch64__
/*
* Convolution 1x1 quantized with int8,unroll 16 x 8
*/
static void conv1x1s1_int8_neon(const Mat &bottom_blob, Mat &top_blob, const Mat &_kernel, const Option& opt)
{
int inch = bottom_blob.c;
int outw = top_blob.w;
int outh = top_blob.h;
int outch = top_blob.c;
const signed char* kernel = _kernel;
int nn_outch = 0;
int remain_outch_start = 0;
nn_outch = outch >> 3;
remain_outch_start = nn_outch << 3;
#pragma omp parallel for num_threads(opt.num_threads)
for (int pp=0; pp<nn_outch; pp++)
{
int p = pp * 8;
Mat out0 = top_blob.channel(p);
Mat out1 = top_blob.channel(p+1);
Mat out2 = top_blob.channel(p+2);
Mat out3 = top_blob.channel(p+3);
Mat out4 = top_blob.channel(p+4);
Mat out5 = top_blob.channel(p+5);
Mat out6 = top_blob.channel(p+6);
Mat out7 = top_blob.channel(p+7);
out0.fill(0);
out1.fill(0);
out2.fill(0);
out3.fill(0);
out4.fill(0);
out5.fill(0);
out6.fill(0);
out7.fill(0);
int q = 0;
#ifdef __clang__
for (; q+15<inch; q+=16)
{
int* outptr0 = out0;
int* outptr1 = out1;
int* outptr2 = out2;
int* outptr3 = out3;
int* outptr4 = out4;
int* outptr5 = out5;
int* outptr6 = out6;
int* outptr7 = out7;
const signed char* kernel0 = (const signed char*)kernel + p*inch + q;
const signed char* kernel1 = (const signed char*)kernel + (p+1)*inch + q;
const signed char* kernel2 = (const signed char*)kernel + (p+2)*inch + q;
const signed char* kernel3 = (const signed char*)kernel + (p+3)*inch + q;
const signed char* kernel4 = (const signed char*)kernel + (p+4)*inch + q;
const signed char* kernel5 = (const signed char*)kernel + (p+5)*inch + q;
const signed char* kernel6 = (const signed char*)kernel + (p+6)*inch + q;
const signed char* kernel7 = (const signed char*)kernel + (p+7)*inch + q;
const signed char* r0 = bottom_blob.channel(q);
const signed char* r1 = bottom_blob.channel(q+1);
const signed char* r2 = bottom_blob.channel(q+2);
const signed char* r3 = bottom_blob.channel(q+3);
const signed char* r4 = bottom_blob.channel(q+4);
const signed char* r5 = bottom_blob.channel(q+5);
const signed char* r6 = bottom_blob.channel(q+6);
const signed char* r7 = bottom_blob.channel(q+7);
const signed char* r8 = bottom_blob.channel(q+8);
const signed char* r9 = bottom_blob.channel(q+9);
const signed char* r10 = bottom_blob.channel(q+10);
const signed char* r11 = bottom_blob.channel(q+11);
const signed char* r12 = bottom_blob.channel(q+12);
const signed char* r13 = bottom_blob.channel(q+13);
const signed char* r14 = bottom_blob.channel(q+14);
const signed char* r15 = bottom_blob.channel(q+15);
int size = outw * outh;
int nn = size >> 4;
int remain = size & 15;
int8x16_t _k0 = vld1q_s8(kernel0);
int8x16_t _k1 = vld1q_s8(kernel1);
int8x16_t _k2 = vld1q_s8(kernel2);
int8x16_t _k3 = vld1q_s8(kernel3);
int8x16_t _k4 = vld1q_s8(kernel4);
int8x16_t _k5 = vld1q_s8(kernel5);
int8x16_t _k6 = vld1q_s8(kernel6);
int8x16_t _k7 = vld1q_s8(kernel7);
if (nn > 0)
{
asm volatile(
"prfm pldl1keep, [%9, #128] \n"
"prfm pldl1keep, [%10, #128] \n"
"prfm pldl1keep, [%11, #128] \n"
"prfm pldl1keep, [%12, #128] \n"
"ld1 {v8.16b}, [%9], #16 \n" // r0"
"ld1 {v9.16b}, [%10], #16 \n" // r1"
"ld1 {v10.16b}, [%11], #16 \n" // r2"
"ld1 {v11.16b}, [%12], #16 \n" // r3"
"dup v24.16b, %50.b[0] \n" // k00
"dup v25.16b, %50.b[1] \n" // k01
"dup v26.16b, %50.b[2] \n" // k02
"dup v27.16b, %50.b[3] \n" // k03
"0: \n"
"smull v28.8h, v8.8b, v24.8b \n" // r0 * k0
"smull2 v31.8h, v8.16b, v24.16b \n" // r0n * k0
"prfm pldl1keep, [%13, #128] \n"
"prfm pldl1keep, [%14, #128] \n"
"prfm pldl1keep, [%15, #128] \n"
"smlal v28.8h, v9.8b, v25.8b \n" // r0 * k1
"smlal2 v31.8h, v9.16b, v25.16b \n" // r0n * k1
"prfm pldl1keep, [%16, #128] \n"
"ld1 {v12.16b}, [%13], #16 \n" // r4"
"ld1 {v13.16b}, [%14], #16 \n" // r5"
"smlal v28.8h, v10.8b, v26.8b \n"
"smlal2 v31.8h, v10.16b, v26.16b \n"
"ld1 {v14.16b}, [%15], #16 \n" // r6"
"ld1 {v15.16b}, [%16], #16 \n" // r7"
"dup v24.16b, %50.b[4] \n" // k04
"smlal v28.8h, v11.8b, v27.8b \n"
"smlal2 v31.8h, v11.16b, v27.16b \n"
"dup v25.16b, %50.b[5] \n" // k05
"dup v26.16b, %50.b[6] \n" // k06
"dup v27.16b, %50.b[7] \n" // k07
"smlal v28.8h, v12.8b, v24.8b \n" // r4
"smlal2 v31.8h, v12.16b, v24.16b \n" // r4
"prfm pldl1keep, [%1, #128] \n"
"ld1 {v29.4s, v30.4s}, [%1] \n" // sum0
"prfm pldl1keep, [%17, #128] \n"
"smlal v28.8h, v13.8b, v25.8b \n"
"smlal2 v31.8h, v13.16b, v25.16b \n"
"prfm pldl1keep, [%18, #128] \n"
"prfm pldl1keep, [%19, #128] \n"
"prfm pldl1keep, [%20, #128] \n"
"ld1 {v16.16b}, [%17], #16 \n" // r8"
"smlal v28.8h, v14.8b, v26.8b \n"
"smlal2 v31.8h, v14.16b, v26.16b \n"
"ld1 {v17.16b}, [%18], #16 \n" // r9"
"ld1 {v18.16b}, [%19], #16 \n" // r10"
"ld1 {v19.16b}, [%20], #16 \n" // r11"
"smlal v28.8h, v15.8b, v27.8b \n"
"smlal2 v31.8h, v15.16b, v27.16b \n"
"dup v24.16b, %50.b[8] \n" // k08
"dup v25.16b, %50.b[9] \n" // k09
"dup v26.16b, %50.b[10] \n" // k10
"smlal v28.8h, v16.8b, v24.8b \n" // r8
"smlal2 v31.8h, v16.16b, v24.16b \n" // r8
"dup v27.16b, %50.b[11] \n" // k11
"prfm pldl1keep, [%21, #128] \n"
"prfm pldl1keep, [%22, #128] \n"
"smlal v28.8h, v17.8b, v25.8b \n"
"smlal2 v31.8h, v17.16b, v25.16b \n"
"prfm pldl1keep, [%23, #128] \n"
"prfm pldl1keep, [%24, #128] \n"
"ld1 {v20.16b}, [%21], #16 \n" // r12"
"smlal v28.8h, v18.8b, v26.8b \n"
"smlal2 v31.8h, v18.16b, v26.16b \n"
"ld1 {v21.16b}, [%22], #16 \n" // r13"
"ld1 {v22.16b}, [%23], #16 \n" // r14"
"ld1 {v23.16b}, [%24], #16 \n" // r15"
"smlal v28.8h, v19.8b, v27.8b \n"
"smlal2 v31.8h, v19.16b, v27.16b \n"
"dup v24.16b, %50.b[12] \n" // k12
"dup v25.16b, %50.b[13] \n" // k13
"dup v26.16b, %50.b[14] \n" // k14
"smlal v28.8h, v20.8b, v24.8b \n" // r12
"smlal2 v31.8h, v20.16b, v24.16b \n" // r12
"dup v27.16b, %50.b[15] \n" // k15
"smlal v28.8h, v21.8b, v25.8b \n"
"smlal2 v31.8h, v21.16b, v25.16b \n"
"dup v24.16b, %51.b[0] \n" // k00
"smlal v28.8h, v22.8b, v26.8b \n"
"smlal2 v31.8h, v22.16b, v26.16b \n"
"dup v25.16b, %51.b[1] \n" // k01
"smlal v28.8h, v23.8b, v27.8b \n"
"smlal2 v31.8h, v23.16b, v27.16b \n"
"dup v26.16b, %51.b[2] \n" // k02
"saddw v29.4s, v29.4s, v28.4h \n"
"saddw2 v30.4s, v30.4s, v28.8h \n"
"dup v27.16b, %51.b[3] \n" // k03
"st1 {v29.4s, v30.4s}, [%1], #32 \n" // sum0
"ld1 {v29.4s, v30.4s}, [%1] \n" // sum0
"saddw v29.4s, v29.4s, v31.4h \n"
"saddw2 v30.4s, v30.4s, v31.8h \n"
"st1 {v29.4s, v30.4s}, [%1], #32 \n" // sum0
//###########################################
"smull v28.8h, v8.8b, v24.8b \n"
"smull2 v31.8h, v8.16b, v24.16b \n"
"dup v24.16b, %51.b[4] \n" // k04
"smlal v28.8h, v9.8b, v25.8b \n"
"smlal2 v31.8h, v9.16b, v25.16b \n"
"dup v25.16b, %51.b[5] \n" // k05
"smlal v28.8h, v10.8b, v26.8b \n"
"smlal2 v31.8h, v10.16b, v26.16b \n"
"dup v26.16b, %51.b[6] \n" // k06
"smlal v28.8h, v11.8b, v27.8b \n"
"smlal2 v31.8h, v11.16b, v27.16b \n"
"dup v27.16b, %51.b[7] \n" // k07
"smlal v28.8h, v12.8b, v24.8b \n"
"smlal2 v31.8h, v12.16b, v24.16b \n"
"prfm pldl1keep, [%2, #128] \n"
"ld1 {v29.4s, v30.4s}, [%2] \n" // sum1
"smlal v28.8h, v13.8b, v25.8b \n"
"smlal2 v31.8h, v13.16b, v25.16b \n"
"dup v24.16b, %51.b[8] \n" // k08
"smlal v28.8h, v14.8b, v26.8b \n"
"smlal2 v31.8h, v14.16b, v26.16b \n"
"dup v25.16b, %51.b[9] \n" // k09
"smlal v28.8h, v15.8b, v27.8b \n"
"smlal2 v31.8h, v15.16b, v27.16b \n"
"dup v26.16b, %51.b[10] \n" // k10
"smlal v28.8h, v16.8b, v24.8b \n"
"smlal2 v31.8h, v16.16b, v24.16b \n"
"dup v27.16b, %51.b[11] \n" // k11
"smlal v28.8h, v17.8b, v25.8b \n"
"smlal2 v31.8h, v17.16b, v25.16b \n"
"dup v24.16b, %51.b[12] \n" // k12
"smlal v28.8h, v18.8b, v26.8b \n"
"smlal2 v31.8h, v18.16b, v26.16b \n"
"dup v25.16b, %51.b[13] \n" // k13
"smlal v28.8h, v19.8b, v27.8b \n"
"smlal2 v31.8h, v19.16b, v27.16b \n"
"dup v26.16b, %51.b[14] \n" // k14
"smlal v28.8h, v20.8b, v24.8b \n"
"smlal2 v31.8h, v20.16b, v24.16b \n"
"dup v27.16b, %51.b[15] \n" // k15
"smlal v28.8h, v21.8b, v25.8b \n"
"smlal2 v31.8h, v21.16b, v25.16b \n"
"dup v24.16b, %52.b[0] \n" // k00
"smlal v28.8h, v22.8b, v26.8b \n"
"smlal2 v31.8h, v22.16b, v26.16b \n"
"dup v25.16b, %52.b[1] \n" // k01
"smlal v28.8h, v23.8b, v27.8b \n"
"smlal2 v31.8h, v23.16b, v27.16b \n"
"saddw v29.4s, v29.4s, v28.4h \n"
"saddw2 v30.4s, v30.4s, v28.8h \n"
"dup v26.16b, %52.b[2] \n" // k02
"dup v27.16b, %52.b[3] \n" // k03
"st1 {v29.4s, v30.4s}, [%2], #32 \n"
"ld1 {v29.4s, v30.4s}, [%2] \n" // sum1
"saddw v29.4s, v29.4s, v31.4h \n"
"saddw2 v30.4s, v30.4s, v31.8h \n"
"st1 {v29.4s, v30.4s}, [%2], #32 \n"
//########################################### // sum1
"smull v28.8h, v8.8b, v24.8b \n"
"smull2 v31.8h, v8.16b, v24.16b \n"
"dup v24.16b, %52.b[4] \n" // k04
"smlal v28.8h, v9.8b, v25.8b \n"
"smlal2 v31.8h, v9.16b, v25.16b \n"
"dup v25.16b, %52.b[5] \n" // k05
"smlal v28.8h, v10.8b, v26.8b \n"
"smlal2 v31.8h, v10.16b, v26.16b \n"
"dup v26.16b, %52.b[6] \n" // k06
"smlal v28.8h, v11.8b, v27.8b \n"
"smlal2 v31.8h, v11.16b, v27.16b \n"
"dup v27.16b, %52.b[7] \n" // k07
"smlal v28.8h, v12.8b, v24.8b \n"
"smlal2 v31.8h, v12.16b, v24.16b \n"
"prfm pldl1keep, [%3, #128] \n"
"ld1 {v29.4s, v30.4s}, [%3] \n" // sum2
"smlal v28.8h, v13.8b, v25.8b \n"
"smlal2 v31.8h, v13.16b, v25.16b \n"
"dup v24.16b, %52.b[8] \n" // k08
"smlal v28.8h, v14.8b, v26.8b \n"
"smlal2 v31.8h, v14.16b, v26.16b \n"
"dup v25.16b, %52.b[9] \n" // k09
"smlal v28.8h, v15.8b, v27.8b \n"
"smlal2 v31.8h, v15.16b, v27.16b \n"
"dup v26.16b, %52.b[10] \n" // k10
"smlal v28.8h, v16.8b, v24.8b \n"
"smlal2 v31.8h, v16.16b, v24.16b \n"
"dup v27.16b, %52.b[11] \n" // k11
"smlal v28.8h, v17.8b, v25.8b \n"
"smlal2 v31.8h, v17.16b, v25.16b \n"
"dup v24.16b, %52.b[12] \n" // k12
"smlal v28.8h, v18.8b, v26.8b \n"
"smlal2 v31.8h, v18.16b, v26.16b \n"
"dup v25.16b, %52.b[13] \n" // k13
"smlal v28.8h, v19.8b, v27.8b \n"
"smlal2 v31.8h, v19.16b, v27.16b \n"
"dup v26.16b, %52.b[14] \n" // k14
"smlal v28.8h, v20.8b, v24.8b \n"
"smlal2 v31.8h, v20.16b, v24.16b \n"
"dup v27.16b, %52.b[15] \n" // k15
"smlal v28.8h, v21.8b, v25.8b \n"
"smlal2 v31.8h, v21.16b, v25.16b \n"
"dup v24.16b, %53.b[0] \n" // k00
"smlal v28.8h, v22.8b, v26.8b \n"
"smlal2 v31.8h, v22.16b, v26.16b \n"
"dup v25.16b, %53.b[1] \n" // k01
"smlal v28.8h, v23.8b, v27.8b \n"
"smlal2 v31.8h, v23.16b, v27.16b \n"
"saddw v29.4s, v29.4s, v28.4h \n"
"dup v26.16b, %53.b[2] \n" // k02
"saddw2 v30.4s, v30.4s, v28.8h \n"
"dup v27.16b, %53.b[3] \n" // k03
"st1 {v29.4s, v30.4s}, [%3], #32 \n"
"ld1 {v29.4s, v30.4s}, [%3] \n" // sum2
"saddw v29.4s, v29.4s, v31.4h \n"
"saddw2 v30.4s, v30.4s, v31.8h \n"
"st1 {v29.4s, v30.4s}, [%3], #32 \n"
//########################################### //sum 2
"smull v28.8h, v8.8b, v24.8b \n"
"smull2 v31.8h, v8.16b, v24.16b \n"
"dup v24.16b, %53.b[4] \n" // k04
"smlal v28.8h, v9.8b, v25.8b \n"
"smlal2 v31.8h, v9.16b, v25.16b \n"
"dup v25.16b, %53.b[5] \n" // k05
"smlal v28.8h, v10.8b, v26.8b \n"
"smlal2 v31.8h, v10.16b, v26.16b \n"
"dup v26.16b, %53.b[6] \n" // k06
"smlal v28.8h, v11.8b, v27.8b \n"
"smlal2 v31.8h, v11.16b, v27.16b \n"
"dup v27.16b, %53.b[7] \n" // k07
"smlal v28.8h, v12.8b, v24.8b \n"
"smlal2 v31.8h, v12.16b, v24.16b \n"
"prfm pldl1keep, [%4, #128] \n"
"ld1 {v29.4s, v30.4s}, [%4] \n" // sum3
"smlal v28.8h, v13.8b, v25.8b \n"
"smlal2 v31.8h, v13.16b, v25.16b \n"
"dup v24.16b, %53.b[8] \n" // k08
"smlal v28.8h, v14.8b, v26.8b \n"
"smlal2 v31.8h, v14.16b, v26.16b \n"
"dup v25.16b, %53.b[9] \n" // k09
"smlal v28.8h, v15.8b, v27.8b \n"
"smlal2 v31.8h, v15.16b, v27.16b \n"
"dup v26.16b, %53.b[10] \n" // k10
"smlal v28.8h, v16.8b, v24.8b \n"
"smlal2 v31.8h, v16.16b, v24.16b \n"
"dup v27.16b, %53.b[11] \n" // k11
"smlal v28.8h, v17.8b, v25.8b \n"
"smlal2 v31.8h, v17.16b, v25.16b \n"
"dup v24.16b, %53.b[12] \n" // k12
"smlal v28.8h, v18.8b, v26.8b \n"
"smlal2 v31.8h, v18.16b, v26.16b \n"
"dup v25.16b, %53.b[13] \n" // k13
"smlal v28.8h, v19.8b, v27.8b \n"
"smlal2 v31.8h, v19.16b, v27.16b \n"
"dup v26.16b, %53.b[14] \n" // k14
"smlal v28.8h, v20.8b, v24.8b \n"
"smlal2 v31.8h, v20.16b, v24.16b \n"
"dup v27.16b, %53.b[15] \n" // k15
"smlal v28.8h, v21.8b, v25.8b \n"
"smlal2 v31.8h, v21.16b, v25.16b \n"
"dup v24.16b, %54.b[0] \n" // k00
"smlal v28.8h, v22.8b, v26.8b \n"
"smlal2 v31.8h, v22.16b, v26.16b \n"
"dup v25.16b, %54.b[1] \n" // k01
"smlal v28.8h, v23.8b, v27.8b \n"
"smlal2 v31.8h, v23.16b, v27.16b \n"
"saddw v29.4s, v29.4s, v28.4h \n"
"dup v26.16b, %54.b[2] \n" // k02
"saddw2 v30.4s, v30.4s, v28.8h \n"
"dup v27.16b, %54.b[3] \n" // k03
"st1 {v29.4s, v30.4s}, [%4], #32 \n"
"ld1 {v29.4s, v30.4s}, [%4] \n" // sum3
"saddw v29.4s, v29.4s, v31.4h \n"
"saddw2 v30.4s, v30.4s, v31.8h \n"
"st1 {v29.4s, v30.4s}, [%4], #32 \n"
//########################################### // sum3
"smull v28.8h, v8.8b, v24.8b \n"
"smull2 v31.8h, v8.16b, v24.16b \n"
"dup v24.16b, %54.b[4] \n" // k04
"smlal v28.8h, v9.8b, v25.8b \n"
"smlal2 v31.8h, v9.16b, v25.16b \n"
"dup v25.16b, %54.b[5] \n" // k05
"smlal v28.8h, v10.8b, v26.8b \n"
"smlal2 v31.8h, v10.16b, v26.16b \n"
"dup v26.16b, %54.b[6] \n" // k06
"smlal v28.8h, v11.8b, v27.8b \n"
"smlal2 v31.8h, v11.16b, v27.16b \n"
"dup v27.16b, %54.b[7] \n" // k07
"smlal v28.8h, v12.8b, v24.8b \n"
"smlal2 v31.8h, v12.16b, v24.16b \n"
"prfm pldl1keep, [%5, #128] \n"
"ld1 {v29.4s, v30.4s}, [%5] \n" // sum4
"smlal v28.8h, v13.8b, v25.8b \n"
"smlal2 v31.8h, v13.16b, v25.16b \n"
"dup v24.16b, %54.b[8] \n" // k08
"smlal v28.8h, v14.8b, v26.8b \n"
"smlal2 v31.8h, v14.16b, v26.16b \n"
"dup v25.16b, %54.b[9] \n" // k09
"smlal v28.8h, v15.8b, v27.8b \n"
"smlal2 v31.8h, v15.16b, v27.16b \n"
"dup v26.16b, %54.b[10] \n" // k10
"smlal v28.8h, v16.8b, v24.8b \n"
"smlal2 v31.8h, v16.16b, v24.16b \n"
"dup v27.16b, %54.b[11] \n" // k11
"smlal v28.8h, v17.8b, v25.8b \n"
"smlal2 v31.8h, v17.16b, v25.16b \n"
"dup v24.16b, %54.b[12] \n" // k12
"smlal v28.8h, v18.8b, v26.8b \n"
"smlal2 v31.8h, v18.16b, v26.16b \n"
"dup v25.16b, %54.b[13] \n" // k13
"smlal v28.8h, v19.8b, v27.8b \n"
"smlal2 v31.8h, v19.16b, v27.16b \n"
"dup v26.16b, %54.b[14] \n" // k14
"smlal v28.8h, v20.8b, v24.8b \n"
"smlal2 v31.8h, v20.16b, v24.16b \n"
"dup v27.16b, %54.b[15] \n" // k15
"smlal v28.8h, v21.8b, v25.8b \n"
"smlal2 v31.8h, v21.16b, v25.16b \n"
"dup v24.16b, %55.b[0] \n" // k00
"smlal v28.8h, v22.8b, v26.8b \n"
"smlal2 v31.8h, v22.16b, v26.16b \n"
"dup v25.16b, %55.b[1] \n" // k01
"smlal v28.8h, v23.8b, v27.8b \n"
"smlal2 v31.8h, v23.16b, v27.16b \n"
"dup v26.16b, %55.b[2] \n" // k02
"saddw v29.4s, v29.4s, v28.4h \n"
"dup v27.16b, %55.b[3] \n" // k03
"saddw2 v30.4s, v30.4s, v28.8h \n"
"st1 {v29.4s, v30.4s}, [%5], #32 \n"
"ld1 {v29.4s, v30.4s}, [%5] \n" // sum4
"saddw v29.4s, v29.4s, v31.4h \n"
"saddw2 v30.4s, v30.4s, v31.8h \n"
"st1 {v29.4s, v30.4s}, [%5], #32 \n"
//########################################### // sum4
"smull v28.8h, v8.8b, v24.8b \n"
"smull2 v31.8h, v8.16b, v24.16b \n"
"dup v24.16b, %55.b[4] \n" // k04
"smlal v28.8h, v9.8b, v25.8b \n"
"smlal2 v31.8h, v9.16b, v25.16b \n"
"dup v25.16b, %55.b[5] \n" // k05
"smlal v28.8h, v10.8b, v26.8b \n"
"smlal2 v31.8h, v10.16b, v26.16b \n"
"dup v26.16b, %55.b[6] \n" // k06
"smlal v28.8h, v11.8b, v27.8b \n"
"smlal2 v31.8h, v11.16b, v27.16b \n"
"dup v27.16b, %55.b[7] \n" // k07
"smlal v28.8h, v12.8b, v24.8b \n"
"smlal2 v31.8h, v12.16b, v24.16b \n"
"prfm pldl1keep, [%6, #128] \n"
"ld1 {v29.4s, v30.4s}, [%6] \n" // sum5
"smlal v28.8h, v13.8b, v25.8b \n"
"smlal2 v31.8h, v13.16b, v25.16b \n"
"dup v24.16b, %55.b[8] \n" // k08
"smlal v28.8h, v14.8b, v26.8b \n"
"smlal2 v31.8h, v14.16b, v26.16b \n"
"dup v25.16b, %55.b[9] \n" // k09
"smlal v28.8h, v15.8b, v27.8b \n"
"smlal2 v31.8h, v15.16b, v27.16b \n"
"dup v26.16b, %55.b[10] \n" // k10
"smlal v28.8h, v16.8b, v24.8b \n"
"smlal2 v31.8h, v16.16b, v24.16b \n"
"dup v27.16b, %55.b[11] \n" // k11
"smlal v28.8h, v17.8b, v25.8b \n"
"smlal2 v31.8h, v17.16b, v25.16b \n"
"dup v24.16b, %55.b[12] \n" // k12
"smlal v28.8h, v18.8b, v26.8b \n"
"smlal2 v31.8h, v18.16b, v26.16b \n"
"dup v25.16b, %55.b[13] \n" // k13
"smlal v28.8h, v19.8b, v27.8b \n"
"smlal2 v31.8h, v19.16b, v27.16b \n"
"dup v26.16b, %55.b[14] \n" // k14
"smlal v28.8h, v20.8b, v24.8b \n"
"smlal2 v31.8h, v20.16b, v24.16b \n"
"dup v27.16b, %55.b[15] \n" // k15
"smlal v28.8h, v21.8b, v25.8b \n"
"smlal2 v31.8h, v21.16b, v25.16b \n"
"dup v24.16b, %56.b[0] \n" // k00
"smlal v28.8h, v22.8b, v26.8b \n"
"smlal2 v31.8h, v22.16b, v26.16b \n"
"dup v25.16b, %56.b[1] \n" // k01
"smlal v28.8h, v23.8b, v27.8b \n"
"smlal2 v31.8h, v23.16b, v27.16b \n"
"saddw v29.4s, v29.4s, v28.4h \n"
"dup v26.16b, %56.b[2] \n" // k02
"saddw2 v30.4s, v30.4s, v28.8h \n"
"dup v27.16b, %56.b[3] \n" // k03
"st1 {v29.4s, v30.4s}, [%6], #32 \n"
"ld1 {v29.4s, v30.4s}, [%6] \n" // sum5
"saddw v29.4s, v29.4s, v31.4h \n"
"saddw2 v30.4s, v30.4s, v31.8h \n"
"st1 {v29.4s, v30.4s}, [%6], #32 \n"
//########################################### // sum5
"smull v28.8h, v8.8b, v24.8b \n"
"smull2 v31.8h, v8.16b, v24.16b \n"
"dup v24.16b, %56.b[4] \n" // k04
"smlal v28.8h, v9.8b, v25.8b \n"
"smlal2 v31.8h, v9.16b, v25.16b \n"
"dup v25.16b, %56.b[5] \n" // k05
"smlal v28.8h, v10.8b, v26.8b \n"
"smlal2 v31.8h, v10.16b, v26.16b \n"
"dup v26.16b, %56.b[6] \n" // k06
"smlal v28.8h, v11.8b, v27.8b \n"
"smlal2 v31.8h, v11.16b, v27.16b \n"
"dup v27.16b, %56.b[7] \n" // k07
"smlal v28.8h, v12.8b, v24.8b \n"
"smlal2 v31.8h, v12.16b, v24.16b \n"
"prfm pldl1keep, [%7, #128] \n"
"ld1 {v29.4s, v30.4s}, [%7] \n" // sum6
"smlal v28.8h, v13.8b, v25.8b \n"
"smlal2 v31.8h, v13.16b, v25.16b \n"
"dup v24.16b, %56.b[8] \n" // k08
"smlal v28.8h, v14.8b, v26.8b \n"
"smlal2 v31.8h, v14.16b, v26.16b \n"
"dup v25.16b, %56.b[9] \n" // k09
"smlal v28.8h, v15.8b, v27.8b \n"
"smlal2 v31.8h, v15.16b, v27.16b \n"
"dup v26.16b, %56.b[10] \n" // k10
"smlal v28.8h, v16.8b, v24.8b \n"
"smlal2 v31.8h, v16.16b, v24.16b \n"
"dup v27.16b, %56.b[11] \n" // k11
"smlal v28.8h, v17.8b, v25.8b \n"
"smlal2 v31.8h, v17.16b, v25.16b \n"
"dup v24.16b, %56.b[12] \n" // k12
"smlal v28.8h, v18.8b, v26.8b \n"
"smlal2 v31.8h, v18.16b, v26.16b \n"
"dup v25.16b, %56.b[13] \n" // k13
"smlal v28.8h, v19.8b, v27.8b \n"
"smlal2 v31.8h, v19.16b, v27.16b \n"
"dup v26.16b, %56.b[14] \n" // k14
"smlal v28.8h, v20.8b, v24.8b \n"
"smlal2 v31.8h, v20.16b, v24.16b \n"
"dup v27.16b, %56.b[15] \n" // k15
"smlal v28.8h, v21.8b, v25.8b \n"
"smlal2 v31.8h, v21.16b, v25.16b \n"
"dup v24.16b, %57.b[0] \n" // k00
"smlal v28.8h, v22.8b, v26.8b \n"
"smlal2 v31.8h, v22.16b, v26.16b \n"
"dup v25.16b, %57.b[1] \n" // k01
"smlal v28.8h, v23.8b, v27.8b \n"
"smlal2 v31.8h, v23.16b, v27.16b \n"
"dup v26.16b, %57.b[2] \n" // k02
"saddw v29.4s, v29.4s, v28.4h \n"
"saddw2 v30.4s, v30.4s, v28.8h \n"
"dup v27.16b, %57.b[3] \n" // k03
"st1 {v29.4s, v30.4s}, [%7], #32 \n"
"ld1 {v29.4s, v30.4s}, [%7] \n" // sum6
"saddw v29.4s, v29.4s, v31.4h \n"
"saddw2 v30.4s, v30.4s, v31.8h \n"
"st1 {v29.4s, v30.4s}, [%7], #32 \n"
//########################################### // sum6
"smull v28.8h, v8.8b, v24.8b \n"
"smull2 v31.8h, v8.16b, v24.16b \n"
"dup v24.16b, %57.b[4] \n" // k04
"smlal v28.8h, v9.8b, v25.8b \n"
"smlal2 v31.8h, v9.16b, v25.16b \n"
"dup v25.16b, %57.b[5] \n" // k05
"smlal v28.8h, v10.8b, v26.8b \n"
"smlal2 v31.8h, v10.16b, v26.16b \n"
"dup v26.16b, %57.b[6] \n" // k06
"smlal v28.8h, v11.8b, v27.8b \n"
"smlal2 v31.8h, v11.16b, v27.16b \n"
"dup v27.16b, %57.b[7] \n" // k07
"smlal v28.8h, v12.8b, v24.8b \n"
"smlal2 v31.8h, v12.16b, v24.16b \n"
"prfm pldl1keep, [%8, #128] \n"
"ld1 {v29.4s, v30.4s}, [%8] \n" // sum7
"smlal v28.8h, v13.8b, v25.8b \n"
"smlal2 v31.8h, v13.16b, v25.16b \n"
"dup v24.16b, %57.b[8] \n" // k08
"smlal v28.8h, v14.8b, v26.8b \n"
"smlal2 v31.8h, v14.16b, v26.16b \n"
"dup v25.16b, %57.b[9] \n" // k09
"smlal v28.8h, v15.8b, v27.8b \n"
"smlal2 v31.8h, v15.16b, v27.16b \n"
"dup v26.16b, %57.b[10] \n" // k10
"smlal v28.8h, v16.8b, v24.8b \n"
"smlal2 v31.8h, v16.16b, v24.16b \n"
"dup v27.16b, %57.b[11] \n" // k11
"smlal v28.8h, v17.8b, v25.8b \n"
"smlal2 v31.8h, v17.16b, v25.16b \n"
"dup v24.16b, %57.b[12] \n" // k12
"smlal v28.8h, v18.8b, v26.8b \n"
"smlal2 v31.8h, v18.16b, v26.16b \n"
"dup v25.16b, %57.b[13] \n" // k13
"smlal v28.8h, v19.8b, v27.8b \n"
"smlal2 v31.8h, v19.16b, v27.16b \n"
"dup v26.16b, %57.b[14] \n" // k14
"smlal v28.8h, v20.8b, v24.8b \n"
"smlal2 v31.8h, v20.16b, v24.16b \n"
"dup v27.16b, %57.b[15] \n" // k15
"smlal v28.8h, v21.8b, v25.8b \n"
"smlal2 v31.8h, v21.16b, v25.16b \n"
"prfm pldl1keep, [%9, #128] \n"
"prfm pldl1keep, [%10, #128] \n"
"ld1 {v8.16b}, [%9], #16 \n" // r0"
"smlal v28.8h, v22.8b, v26.8b \n"
"smlal2 v31.8h, v22.16b, v26.16b \n"
"ld1 {v9.16b}, [%10], #16 \n" // r1"
"prfm pldl1keep, [%11, #128] \n"
"prfm pldl1keep, [%12, #128] \n"
"smlal v28.8h, v23.8b, v27.8b \n"
"smlal2 v31.8h, v23.16b, v27.16b \n"
"ld1 {v10.16b}, [%11], #16 \n" // r2"
"ld1 {v11.16b}, [%12], #16 \n" // r3"
"dup v24.16b, %50.b[0] \n" // k00
"saddw v29.4s, v29.4s, v28.4h \n"
"dup v25.16b, %50.b[1] \n" // k01
"saddw2 v30.4s, v30.4s, v28.8h \n"
"dup v26.16b, %50.b[2] \n" // k02
"dup v27.16b, %50.b[3] \n" // k03
"st1 {v29.4s, v30.4s}, [%8], #32 \n"
"ld1 {v29.4s, v30.4s}, [%8] \n" // sum7
"saddw v29.4s, v29.4s, v31.4h \n"
"saddw2 v30.4s, v30.4s, v31.8h \n"
"st1 {v29.4s, v30.4s}, [%8], #32 \n"
//########################################### // sum7
"subs %w0, %w0, #1 \n"
"bne 0b \n"
"sub %9, %9, #16 \n"
"sub %10, %10, #16 \n"
"sub %11, %11, #16 \n"
"sub %12, %12, #16 \n"
: "=r"(nn), // %0
"=r"(outptr0),// %1
"=r"(outptr1),// %2
"=r"(outptr2),// %3
"=r"(outptr3),// %4
"=r"(outptr4),// %5
"=r"(outptr5),// %6
"=r"(outptr6),// %7
"=r"(outptr7),// %8
"=r"(r0), // %9
"=r"(r1), // %10
"=r"(r2), // %11
"=r"(r3), // %12
"=r"(r4), // %13
"=r"(r5), // %14
"=r"(r6), // %15
"=r"(r7), // %16
"=r"(r8), // %17
"=r"(r9), // %18
"=r"(r10), // %19
"=r"(r11), // %20
"=r"(r12), // %21
"=r"(r13), // %22
"=r"(r14), // %23
"=r"(r15) // %24
: "0"(nn),
"1"(outptr0),
"2"(outptr1),
"3"(outptr2),
"4"(outptr3),
"5"(outptr4),
"6"(outptr5),
"7"(outptr6),
"8"(outptr7),
"9"(r0),
"10"(r1),
"11"(r2),
"12"(r3),
"13"(r4),
"14"(r5),
"15"(r6),
"16"(r7),
"17"(r8),
"18"(r9),
"19"(r10),
"20"(r11),
"21"(r12),
"22"(r13),
"23"(r14),
"24"(r15),
"w"(_k0), // %50
"w"(_k1), // %51
"w"(_k2), // %52
"w"(_k3), // %53
"w"(_k4), // %54
"w"(_k5), // %55
"w"(_k6), // %56
"w"(_k7) // %57
: "cc", "memory", "v8", "v9", "v10", "v11", "v12", "v13", "v14", "v15", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23", "v24", "v25", "v26", "v27", "v28", "v29", "v30", "v31"
);
}
if (remain >= 8)
{
remain -= 8;
asm volatile(
"prfm pldl1keep, [%9, #128] \n"
"prfm pldl1keep, [%10, #128] \n"
"prfm pldl1keep, [%11, #128] \n"
"prfm pldl1keep, [%12, #128] \n"
"ld1 {v8.8b}, [%9], #8 \n" // r0"
"ld1 {v9.8b}, [%10], #8 \n" // r1"
"ld1 {v10.8b}, [%11], #8 \n" // r2"
"ld1 {v11.8b}, [%12], #8 \n" // r3"
"dup v24.8b, %50.b[0] \n" // k00
"dup v25.8b, %50.b[1] \n" // k01
"dup v26.8b, %50.b[2] \n" // k02
"dup v27.8b, %50.b[3] \n" // k03
"smull v28.8h, v8.8b, v24.8b \n" // r0
"prfm pldl1keep, [%13, #128] \n"
"prfm pldl1keep, [%14, #128] \n"
"prfm pldl1keep, [%15, #128] \n"
"smlal v28.8h, v9.8b, v25.8b \n"
"prfm pldl1keep, [%16, #128] \n"
"ld1 {v12.8b}, [%13], #8 \n" // r4"
"ld1 {v13.8b}, [%14], #8 \n" // r5"
"smlal v28.8h, v10.8b, v26.8b \n"
"ld1 {v14.8b}, [%15], #8 \n" // r6"
"ld1 {v15.8b}, [%16], #8 \n" // r7"
"dup v24.8b, %50.b[4] \n" // k04
"smlal v28.8h, v11.8b, v27.8b \n"
"dup v25.8b, %50.b[5] \n" // k05
"dup v26.8b, %50.b[6] \n" // k06
"dup v27.8b, %50.b[7] \n" // k07
"smlal v28.8h, v12.8b, v24.8b \n" // r4
"prfm pldl1keep, [%1, #128] \n"
"ld1 {v29.4s, v30.4s}, [%1] \n" // sum0
"prfm pldl1keep, [%17, #128] \n"
"smlal v28.8h, v13.8b, v25.8b \n"
"prfm pldl1keep, [%18, #128] \n"
"prfm pldl1keep, [%19, #128] \n"
"prfm pldl1keep, [%20, #128] \n"
"ld1 {v16.8b}, [%17], #8 \n" // r8"
"smlal v28.8h, v14.8b, v26.8b \n"
"ld1 {v17.8b}, [%18], #8 \n" // r9"
"ld1 {v18.8b}, [%19], #8 \n" // r10"
"ld1 {v19.8b}, [%20], #8 \n" // r11"
"smlal v28.8h, v15.8b, v27.8b \n"
"dup v24.8b, %50.b[8] \n" // k08
"dup v25.8b, %50.b[9] \n" // k09
"dup v26.8b, %50.b[10] \n" // k10
"smlal v28.8h, v16.8b, v24.8b \n" // r8
"dup v27.8b, %50.b[11] \n" // k11
"prfm pldl1keep, [%21, #128] \n"
"prfm pldl1keep, [%22, #128] \n"
"smlal v28.8h, v17.8b, v25.8b \n"
"prfm pldl1keep, [%23, #128] \n"
"prfm pldl1keep, [%24, #128] \n"
"ld1 {v20.8b}, [%21], #8 \n" // r12"
"smlal v28.8h, v18.8b, v26.8b \n"
"ld1 {v21.8b}, [%22], #8 \n" // r13"
"ld1 {v22.8b}, [%23], #8 \n" // r14"
"ld1 {v23.8b}, [%24], #8 \n" // r15"
"smlal v28.8h, v19.8b, v27.8b \n"
"dup v24.8b, %50.b[12] \n" // k12
"dup v25.8b, %50.b[13] \n" // k13
"dup v26.8b, %50.b[14] \n" // k14
"smlal v28.8h, v20.8b, v24.8b \n" // r12
"dup v27.8b, %50.b[15] \n" // k15
"smlal v28.8h, v21.8b, v25.8b \n"
"dup v24.8b, %51.b[0] \n" // k00
"smlal v28.8h, v22.8b, v26.8b \n"
"dup v25.8b, %51.b[1] \n" // k01
"smlal v28.8h, v23.8b, v27.8b \n"
"dup v26.8b, %51.b[2] \n" // k02
"saddw v29.4s, v29.4s, v28.4h \n"
"saddw2 v30.4s, v30.4s, v28.8h \n"
"dup v27.8b, %51.b[3] \n" // k03
"st1 {v29.4s, v30.4s}, [%1], #32 \n" // sum0
//###########################################
"smull v28.8h, v8.8b, v24.8b \n"
"dup v24.8b, %51.b[4] \n" // k04
"smlal v28.8h, v9.8b, v25.8b \n"
"dup v25.8b, %51.b[5] \n" // k05
"smlal v28.8h, v10.8b, v26.8b \n"
"dup v26.8b, %51.b[6] \n" // k06
"smlal v28.8h, v11.8b, v27.8b \n"
"dup v27.8b, %51.b[7] \n" // k07
"smlal v28.8h, v12.8b, v24.8b \n"
"prfm pldl1keep, [%2, #128] \n"
"ld1 {v29.4s, v30.4s}, [%2] \n" // sum1
"smlal v28.8h, v13.8b, v25.8b \n"
"dup v24.8b, %51.b[8] \n" // k08
"smlal v28.8h, v14.8b, v26.8b \n"
"dup v25.8b, %51.b[9] \n" // k09
"smlal v28.8h, v15.8b, v27.8b \n"
"dup v26.8b, %51.b[10] \n" // k10
"smlal v28.8h, v16.8b, v24.8b \n"
"dup v27.8b, %51.b[11] \n" // k11
"smlal v28.8h, v17.8b, v25.8b \n"
"dup v24.8b, %51.b[12] \n" // k12
"smlal v28.8h, v18.8b, v26.8b \n"
"dup v25.8b, %51.b[13] \n" // k13
"smlal v28.8h, v19.8b, v27.8b \n"
"dup v26.8b, %51.b[14] \n" // k14
"smlal v28.8h, v20.8b, v24.8b \n"
"dup v27.8b, %51.b[15] \n" // k15
"smlal v28.8h, v21.8b, v25.8b \n"
"dup v24.8b, %52.b[0] \n" // k00
"smlal v28.8h, v22.8b, v26.8b \n"
"dup v25.8b, %52.b[1] \n" // k01
"smlal v28.8h, v23.8b, v27.8b \n"
"saddw v29.4s, v29.4s, v28.4h \n"
"saddw2 v30.4s, v30.4s, v28.8h \n"
"dup v26.8b, %52.b[2] \n" // k02
"dup v27.8b, %52.b[3] \n" // k03
"st1 {v29.4s, v30.4s}, [%2], #32 \n"
//########################################### // sum1
"smull v28.8h, v8.8b, v24.8b \n"
"dup v24.8b, %52.b[4] \n" // k04
"smlal v28.8h, v9.8b, v25.8b \n"
"dup v25.8b, %52.b[5] \n" // k05
"smlal v28.8h, v10.8b, v26.8b \n"
"dup v26.8b, %52.b[6] \n" // k06
"smlal v28.8h, v11.8b, v27.8b \n"
"dup v27.8b, %52.b[7] \n" // k07
"smlal v28.8h, v12.8b, v24.8b \n"
"prfm pldl1keep, [%3, #128] \n"
"ld1 {v29.4s, v30.4s}, [%3] \n" // sum2
"smlal v28.8h, v13.8b, v25.8b \n"
"dup v24.8b, %52.b[8] \n" // k08
"smlal v28.8h, v14.8b, v26.8b \n"
"dup v25.8b, %52.b[9] \n" // k09
"smlal v28.8h, v15.8b, v27.8b \n"
"dup v26.8b, %52.b[10] \n" // k10
"smlal v28.8h, v16.8b, v24.8b \n"
"dup v27.8b, %52.b[11] \n" // k11
"smlal v28.8h, v17.8b, v25.8b \n"
"dup v24.8b, %52.b[12] \n" // k12
"smlal v28.8h, v18.8b, v26.8b \n"
"dup v25.8b, %52.b[13] \n" // k13
"smlal v28.8h, v19.8b, v27.8b \n"
"dup v26.8b, %52.b[14] \n" // k14
"smlal v28.8h, v20.8b, v24.8b \n"
"dup v27.8b, %52.b[15] \n" // k15
"smlal v28.8h, v21.8b, v25.8b \n"
"dup v24.8b, %53.b[0] \n" // k00
"smlal v28.8h, v22.8b, v26.8b \n"
"dup v25.8b, %53.b[1] \n" // k01
"smlal v28.8h, v23.8b, v27.8b \n"
"saddw v29.4s, v29.4s, v28.4h \n"
"dup v26.8b, %53.b[2] \n" // k02
"saddw2 v30.4s, v30.4s, v28.8h \n"
"dup v27.8b, %53.b[3] \n" // k03
"st1 {v29.4s, v30.4s}, [%3], #32 \n"
//########################################### //sum 2
"smull v28.8h, v8.8b, v24.8b \n"
"dup v24.8b, %53.b[4] \n" // k04
"smlal v28.8h, v9.8b, v25.8b \n"
"dup v25.8b, %53.b[5] \n" // k05
"smlal v28.8h, v10.8b, v26.8b \n"
"dup v26.8b, %53.b[6] \n" // k06
"smlal v28.8h, v11.8b, v27.8b \n"
"dup v27.8b, %53.b[7] \n" // k07
"smlal v28.8h, v12.8b, v24.8b \n"
"prfm pldl1keep, [%4, #128] \n"
"ld1 {v29.4s, v30.4s}, [%4] \n" // sum3
"smlal v28.8h, v13.8b, v25.8b \n"
"dup v24.8b, %53.b[8] \n" // k08
"smlal v28.8h, v14.8b, v26.8b \n"
"dup v25.8b, %53.b[9] \n" // k09
"smlal v28.8h, v15.8b, v27.8b \n"
"dup v26.8b, %53.b[10] \n" // k10
"smlal v28.8h, v16.8b, v24.8b \n"
"dup v27.8b, %53.b[11] \n" // k11
"smlal v28.8h, v17.8b, v25.8b \n"
"dup v24.8b, %53.b[12] \n" // k12
"smlal v28.8h, v18.8b, v26.8b \n"
"dup v25.8b, %53.b[13] \n" // k13
"smlal v28.8h, v19.8b, v27.8b \n"
"dup v26.8b, %53.b[14] \n" // k14
"smlal v28.8h, v20.8b, v24.8b \n"
"dup v27.8b, %53.b[15] \n" // k15
"smlal v28.8h, v21.8b, v25.8b \n"
"dup v24.8b, %54.b[0] \n" // k00
"smlal v28.8h, v22.8b, v26.8b \n"
"dup v25.8b, %54.b[1] \n" // k01
"smlal v28.8h, v23.8b, v27.8b \n"
"saddw v29.4s, v29.4s, v28.4h \n"
"dup v26.8b, %54.b[2] \n" // k02
"saddw2 v30.4s, v30.4s, v28.8h \n"
"dup v27.8b, %54.b[3] \n" // k03
"st1 {v29.4s, v30.4s}, [%4], #32 \n"
//########################################### // sum3
"smull v28.8h, v8.8b, v24.8b \n"
"dup v24.8b, %54.b[4] \n" // k04
"smlal v28.8h, v9.8b, v25.8b \n"
"dup v25.8b, %54.b[5] \n" // k05
"smlal v28.8h, v10.8b, v26.8b \n"
"dup v26.8b, %54.b[6] \n" // k06
"smlal v28.8h, v11.8b, v27.8b \n"
"dup v27.8b, %54.b[7] \n" // k07
"smlal v28.8h, v12.8b, v24.8b \n"
"prfm pldl1keep, [%5, #128] \n"
"ld1 {v29.4s, v30.4s}, [%5] \n" // sum4
"smlal v28.8h, v13.8b, v25.8b \n"
"dup v24.8b, %54.b[8] \n" // k08
"smlal v28.8h, v14.8b, v26.8b \n"
"dup v25.8b, %54.b[9] \n" // k09
"smlal v28.8h, v15.8b, v27.8b \n"
"dup v26.8b, %54.b[10] \n" // k10
"smlal v28.8h, v16.8b, v24.8b \n"
"dup v27.8b, %54.b[11] \n" // k11
"smlal v28.8h, v17.8b, v25.8b \n"
"dup v24.8b, %54.b[12] \n" // k12
"smlal v28.8h, v18.8b, v26.8b \n"
"dup v25.8b, %54.b[13] \n" // k13
"smlal v28.8h, v19.8b, v27.8b \n"
"dup v26.8b, %54.b[14] \n" // k14
"smlal v28.8h, v20.8b, v24.8b \n"
"dup v27.8b, %54.b[15] \n" // k15
"smlal v28.8h, v21.8b, v25.8b \n"
"dup v24.8b, %55.b[0] \n" // k00
"smlal v28.8h, v22.8b, v26.8b \n"
"dup v25.8b, %55.b[1] \n" // k01
"smlal v28.8h, v23.8b, v27.8b \n"
"dup v26.8b, %55.b[2] \n" // k02
"saddw v29.4s, v29.4s, v28.4h \n"
"dup v27.8b, %55.b[3] \n" // k03
"saddw2 v30.4s, v30.4s, v28.8h \n"
"st1 {v29.4s, v30.4s}, [%5], #32 \n"
//########################################### // sum4
"smull v28.8h, v8.8b, v24.8b \n"
"dup v24.8b, %55.b[4] \n" // k04
"smlal v28.8h, v9.8b, v25.8b \n"
"dup v25.8b, %55.b[5] \n" // k05
"smlal v28.8h, v10.8b, v26.8b \n"
"dup v26.8b, %55.b[6] \n" // k06
"smlal v28.8h, v11.8b, v27.8b \n"
"dup v27.8b, %55.b[7] \n" // k07
"smlal v28.8h, v12.8b, v24.8b \n"
"prfm pldl1keep, [%6, #128] \n"
"ld1 {v29.4s, v30.4s}, [%6] \n" // sum5
"smlal v28.8h, v13.8b, v25.8b \n"
"dup v24.8b, %55.b[8] \n" // k08
"smlal v28.8h, v14.8b, v26.8b \n"
"dup v25.8b, %55.b[9] \n" // k09
"smlal v28.8h, v15.8b, v27.8b \n"
"dup v26.8b, %55.b[10] \n" // k10
"smlal v28.8h, v16.8b, v24.8b \n"
"dup v27.8b, %55.b[11] \n" // k11
"smlal v28.8h, v17.8b, v25.8b \n"
"dup v24.8b, %55.b[12] \n" // k12
"smlal v28.8h, v18.8b, v26.8b \n"
"dup v25.8b, %55.b[13] \n" // k13
"smlal v28.8h, v19.8b, v27.8b \n"
"dup v26.8b, %55.b[14] \n" // k14
"smlal v28.8h, v20.8b, v24.8b \n"
"dup v27.8b, %55.b[15] \n" // k15
"smlal v28.8h, v21.8b, v25.8b \n"
"dup v24.8b, %56.b[0] \n" // k00
"smlal v28.8h, v22.8b, v26.8b \n"
"dup v25.8b, %56.b[1] \n" // k01
"smlal v28.8h, v23.8b, v27.8b \n"
"saddw v29.4s, v29.4s, v28.4h \n"
"dup v26.8b, %56.b[2] \n" // k02
"saddw2 v30.4s, v30.4s, v28.8h \n"
"dup v27.8b, %56.b[3] \n" // k03
"st1 {v29.4s, v30.4s}, [%6], #32 \n"
//########################################### // sum5
"smull v28.8h, v8.8b, v24.8b \n"
"dup v24.8b, %56.b[4] \n" // k04
"smlal v28.8h, v9.8b, v25.8b \n"
"dup v25.8b, %56.b[5] \n" // k05
"smlal v28.8h, v10.8b, v26.8b \n"
"dup v26.8b, %56.b[6] \n" // k06
"smlal v28.8h, v11.8b, v27.8b \n"
"dup v27.8b, %56.b[7] \n" // k07
"smlal v28.8h, v12.8b, v24.8b \n"
"prfm pldl1keep, [%7, #128] \n"
"ld1 {v29.4s, v30.4s}, [%7] \n" // sum6
"smlal v28.8h, v13.8b, v25.8b \n"
"dup v24.8b, %56.b[8] \n" // k08
"smlal v28.8h, v14.8b, v26.8b \n"
"dup v25.8b, %56.b[9] \n" // k09
"smlal v28.8h, v15.8b, v27.8b \n"
"dup v26.8b, %56.b[10] \n" // k10
"smlal v28.8h, v16.8b, v24.8b \n"
"dup v27.8b, %56.b[11] \n" // k11
"smlal v28.8h, v17.8b, v25.8b \n"
"dup v24.8b, %56.b[12] \n" // k12
"smlal v28.8h, v18.8b, v26.8b \n"
"dup v25.8b, %56.b[13] \n" // k13
"smlal v28.8h, v19.8b, v27.8b \n"
"dup v26.8b, %56.b[14] \n" // k14
"smlal v28.8h, v20.8b, v24.8b \n"
"dup v27.8b, %56.b[15] \n" // k15
"smlal v28.8h, v21.8b, v25.8b \n"
"dup v24.8b, %57.b[0] \n" // k00
"smlal v28.8h, v22.8b, v26.8b \n"
"dup v25.8b, %57.b[1] \n" // k01
"smlal v28.8h, v23.8b, v27.8b \n"
"dup v26.8b, %57.b[2] \n" // k02
"saddw v29.4s, v29.4s, v28.4h \n"
"saddw2 v30.4s, v30.4s, v28.8h \n"
"dup v27.8b, %57.b[3] \n" // k03
"st1 {v29.4s, v30.4s}, [%7], #32 \n"
//########################################### // sum6
"smull v28.8h, v8.8b, v24.8b \n"
"dup v24.8b, %57.b[4] \n" // k04
"smlal v28.8h, v9.8b, v25.8b \n"
"dup v25.8b, %57.b[5] \n" // k05
"smlal v28.8h, v10.8b, v26.8b \n"
"dup v26.8b, %57.b[6] \n" // k06
"smlal v28.8h, v11.8b, v27.8b \n"
"dup v27.8b, %57.b[7] \n" // k07
"smlal v28.8h, v12.8b, v24.8b \n"
"prfm pldl1keep, [%8, #128] \n"
"ld1 {v29.4s, v30.4s}, [%8] \n" // sum7
"smlal v28.8h, v13.8b, v25.8b \n"
"dup v24.8b, %57.b[8] \n" // k08
"smlal v28.8h, v14.8b, v26.8b \n"
"dup v25.8b, %57.b[9] \n" // k09
"smlal v28.8h, v15.8b, v27.8b \n"
"dup v26.8b, %57.b[10] \n" // k10
"smlal v28.8h, v16.8b, v24.8b \n"
"dup v27.8b, %57.b[11] \n" // k11
"smlal v28.8h, v17.8b, v25.8b \n"
"dup v24.8b, %57.b[12] \n" // k12
"smlal v28.8h, v18.8b, v26.8b \n"
"dup v25.8b, %57.b[13] \n" // k13
"smlal v28.8h, v19.8b, v27.8b \n"
"dup v26.8b, %57.b[14] \n" // k14
"smlal v28.8h, v20.8b, v24.8b \n"
"dup v27.8b, %57.b[15] \n" // k15
"smlal v28.8h, v21.8b, v25.8b \n"
"prfm pldl1keep, [%9, #128] \n"
"prfm pldl1keep, [%10, #128] \n"
"ld1 {v8.8b}, [%9], #8 \n" // r0"
"smlal v28.8h, v22.8b, v26.8b \n"
"ld1 {v9.8b}, [%10], #8 \n" // r1"
"prfm pldl1keep, [%11, #128] \n"
"prfm pldl1keep, [%12, #128] \n"
"smlal v28.8h, v23.8b, v27.8b \n"
"ld1 {v10.8b}, [%11], #8 \n" // r2"
"ld1 {v11.8b}, [%12], #8 \n" // r3"
"dup v24.8b, %50.b[0] \n" // k00
"saddw v29.4s, v29.4s, v28.4h \n"
"dup v25.8b, %50.b[1] \n" // k01
"saddw2 v30.4s, v30.4s, v28.8h \n"
"dup v26.8b, %50.b[2] \n" // k02
"dup v27.8b, %50.b[3] \n" // k03
"st1 {v29.4s, v30.4s}, [%8], #32 \n"
//########################################### // sum7
: "=r"(nn), // %0
"=r"(outptr0),// %1
"=r"(outptr1),// %2
"=r"(outptr2),// %3
"=r"(outptr3),// %4
"=r"(outptr4),// %5
"=r"(outptr5),// %6
"=r"(outptr6),// %7
"=r"(outptr7),// %8
"=r"(r0), // %9
"=r"(r1), // %10
"=r"(r2), // %11
"=r"(r3), // %12
"=r"(r4), // %13
"=r"(r5), // %14
"=r"(r6), // %15
"=r"(r7), // %16
"=r"(r8), // %17
"=r"(r9), // %18
"=r"(r10), // %19
"=r"(r11), // %20
"=r"(r12), // %21
"=r"(r13), // %22
"=r"(r14), // %23
"=r"(r15) // %24
: "0"(nn),
"1"(outptr0),
"2"(outptr1),
"3"(outptr2),
"4"(outptr3),
"5"(outptr4),
"6"(outptr5),
"7"(outptr6),
"8"(outptr7),
"9"(r0),
"10"(r1),
"11"(r2),
"12"(r3),
"13"(r4),
"14"(r5),
"15"(r6),
"16"(r7),
"17"(r8),
"18"(r9),
"19"(r10),
"20"(r11),
"21"(r12),
"22"(r13),
"23"(r14),
"24"(r15),
"w"(_k0), // %50
"w"(_k1), // %51
"w"(_k2), // %52
"w"(_k3), // %53
"w"(_k4), // %54
"w"(_k5), // %55
"w"(_k6), // %56
"w"(_k7) // %57
: "cc", "memory", "v8", "v9", "v10", "v11", "v12", "v13", "v14", "v15", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23", "v24", "v25", "v26", "v27", "v28", "v29", "v30", "v31"
);
}
if (remain >= 4)
{
remain -= 4;
asm volatile(
"prfm pldl1keep, [%9, #128] \n"
"prfm pldl1keep, [%10, #128] \n"
"prfm pldl1keep, [%11, #128] \n"
"prfm pldl1keep, [%12, #128] \n"
"ld1 {v8.8b}, [%9], #8 \n" // r0"
"ld1 {v9.8b}, [%10], #8 \n" // r1"
"ld1 {v10.8b}, [%11], #8 \n" // r2"
"ld1 {v11.8b}, [%12], #8 \n" // r3"
"dup v24.8b, %50.b[0] \n" // k00
"dup v25.8b, %50.b[1] \n" // k01
"dup v26.8b, %50.b[2] \n" // k02
"dup v27.8b, %50.b[3] \n" // k03
"smull v28.8h, v8.8b, v24.8b \n" // r0
"prfm pldl1keep, [%13, #128] \n"
"prfm pldl1keep, [%14, #128] \n"
"prfm pldl1keep, [%15, #128] \n"
"smlal v28.8h, v9.8b, v25.8b \n"
"prfm pldl1keep, [%16, #128] \n"
"ld1 {v12.8b}, [%13], #8 \n" // r4"
"ld1 {v13.8b}, [%14], #8 \n" // r5"
"smlal v28.8h, v10.8b, v26.8b \n"
"ld1 {v14.8b}, [%15], #8 \n" // r6"
"ld1 {v15.8b}, [%16], #8 \n" // r7"
"dup v24.8b, %50.b[4] \n" // k04
"smlal v28.8h, v11.8b, v27.8b \n"
"dup v25.8b, %50.b[5] \n" // k05
"dup v26.8b, %50.b[6] \n" // k06
"dup v27.8b, %50.b[7] \n" // k07
"smlal v28.8h, v12.8b, v24.8b \n" // r4
"prfm pldl1keep, [%1, #128] \n"
"ld1 {v29.4s}, [%1] \n" // sum0
"prfm pldl1keep, [%17, #128] \n"
"smlal v28.8h, v13.8b, v25.8b \n"
"prfm pldl1keep, [%18, #128] \n"
"prfm pldl1keep, [%19, #128] \n"
"prfm pldl1keep, [%20, #128] \n"
"ld1 {v16.8b}, [%17], #8 \n" // r8"
"smlal v28.8h, v14.8b, v26.8b \n"
"ld1 {v17.8b}, [%18], #8 \n" // r9"
"ld1 {v18.8b}, [%19], #8 \n" // r10"
"ld1 {v19.8b}, [%20], #8 \n" // r11"
"smlal v28.8h, v15.8b, v27.8b \n"
"dup v24.8b, %50.b[8] \n" // k08
"dup v25.8b, %50.b[9] \n" // k09
"dup v26.8b, %50.b[10] \n" // k10
"smlal v28.8h, v16.8b, v24.8b \n" // r8
"dup v27.8b, %50.b[11] \n" // k11
"prfm pldl1keep, [%21, #128] \n"
"prfm pldl1keep, [%22, #128] \n"
"smlal v28.8h, v17.8b, v25.8b \n"
"prfm pldl1keep, [%23, #128] \n"
"prfm pldl1keep, [%24, #128] \n"
"ld1 {v20.8b}, [%21], #8 \n" // r12"
"smlal v28.8h, v18.8b, v26.8b \n"
"ld1 {v21.8b}, [%22], #8 \n" // r13"
"ld1 {v22.8b}, [%23], #8 \n" // r14"
"ld1 {v23.8b}, [%24], #8 \n" // r15"
"smlal v28.8h, v19.8b, v27.8b \n"
"dup v24.8b, %50.b[12] \n" // k12
"dup v25.8b, %50.b[13] \n" // k13
"dup v26.8b, %50.b[14] \n" // k14
"smlal v28.8h, v20.8b, v24.8b \n" // r12
"dup v27.8b, %50.b[15] \n" // k15
"smlal v28.8h, v21.8b, v25.8b \n"
"dup v24.8b, %51.b[0] \n" // k00
"smlal v28.8h, v22.8b, v26.8b \n"
"dup v25.8b, %51.b[1] \n" // k01
"smlal v28.8h, v23.8b, v27.8b \n"
"dup v26.8b, %51.b[2] \n" // k02
"saddw v29.4s, v29.4s, v28.4h \n"
"dup v27.8b, %51.b[3] \n" // k03
"st1 {v29.4s}, [%1], #16 \n" // sum0
//###########################################
"smull v28.8h, v8.8b, v24.8b \n"
"dup v24.8b, %51.b[4] \n" // k04
"smlal v28.8h, v9.8b, v25.8b \n"
"dup v25.8b, %51.b[5] \n" // k05
"smlal v28.8h, v10.8b, v26.8b \n"
"dup v26.8b, %51.b[6] \n" // k06
"smlal v28.8h, v11.8b, v27.8b \n"
"dup v27.8b, %51.b[7] \n" // k07
"smlal v28.8h, v12.8b, v24.8b \n"
"prfm pldl1keep, [%2, #128] \n"
"ld1 {v29.4s}, [%2] \n" // sum1
"smlal v28.8h, v13.8b, v25.8b \n"
"dup v24.8b, %51.b[8] \n" // k08
"smlal v28.8h, v14.8b, v26.8b \n"
"dup v25.8b, %51.b[9] \n" // k09
"smlal v28.8h, v15.8b, v27.8b \n"
"dup v26.8b, %51.b[10] \n" // k10
"smlal v28.8h, v16.8b, v24.8b \n"
"dup v27.8b, %51.b[11] \n" // k11
"smlal v28.8h, v17.8b, v25.8b \n"
"dup v24.8b, %51.b[12] \n" // k12
"smlal v28.8h, v18.8b, v26.8b \n"
"dup v25.8b, %51.b[13] \n" // k13
"smlal v28.8h, v19.8b, v27.8b \n"
"dup v26.8b, %51.b[14] \n" // k14
"smlal v28.8h, v20.8b, v24.8b \n"
"dup v27.8b, %51.b[15] \n" // k15
"smlal v28.8h, v21.8b, v25.8b \n"
"dup v24.8b, %52.b[0] \n" // k00
"smlal v28.8h, v22.8b, v26.8b \n"
"dup v25.8b, %52.b[1] \n" // k01
"smlal v28.8h, v23.8b, v27.8b \n"
"dup v26.8b, %52.b[2] \n" // k02
"saddw v29.4s, v29.4s, v28.4h \n"
"dup v27.8b, %52.b[3] \n" // k03
"st1 {v29.4s}, [%2], #16 \n"
//########################################### // sum1
"smull v28.8h, v8.8b, v24.8b \n"
"dup v24.8b, %52.b[4] \n" // k04
"smlal v28.8h, v9.8b, v25.8b \n"
"dup v25.8b, %52.b[5] \n" // k05
"smlal v28.8h, v10.8b, v26.8b \n"
"dup v26.8b, %52.b[6] \n" // k06
"smlal v28.8h, v11.8b, v27.8b \n"
"dup v27.8b, %52.b[7] \n" // k07
"smlal v28.8h, v12.8b, v24.8b \n"
"prfm pldl1keep, [%3, #128] \n"
"ld1 {v29.4s}, [%3] \n" // sum2
"smlal v28.8h, v13.8b, v25.8b \n"
"dup v24.8b, %52.b[8] \n" // k08
"smlal v28.8h, v14.8b, v26.8b \n"
"dup v25.8b, %52.b[9] \n" // k09
"smlal v28.8h, v15.8b, v27.8b \n"
"dup v26.8b, %52.b[10] \n" // k10
"smlal v28.8h, v16.8b, v24.8b \n"
"dup v27.8b, %52.b[11] \n" // k11
"smlal v28.8h, v17.8b, v25.8b \n"
"dup v24.8b, %52.b[12] \n" // k12
"smlal v28.8h, v18.8b, v26.8b \n"
"dup v25.8b, %52.b[13] \n" // k13
"smlal v28.8h, v19.8b, v27.8b \n"
"dup v26.8b, %52.b[14] \n" // k14
"smlal v28.8h, v20.8b, v24.8b \n"
"dup v27.8b, %52.b[15] \n" // k15
"smlal v28.8h, v21.8b, v25.8b \n"
"dup v24.8b, %53.b[0] \n" // k00
"smlal v28.8h, v22.8b, v26.8b \n"
"dup v25.8b, %53.b[1] \n" // k01
"smlal v28.8h, v23.8b, v27.8b \n"
"dup v26.8b, %53.b[2] \n" // k02
"saddw v29.4s, v29.4s, v28.4h \n"
"dup v27.8b, %53.b[3] \n" // k03
"st1 {v29.4s}, [%3], #16 \n"
//########################################### //sum 2
"smull v28.8h, v8.8b, v24.8b \n"
"dup v24.8b, %53.b[4] \n" // k04
"smlal v28.8h, v9.8b, v25.8b \n"
"dup v25.8b, %53.b[5] \n" // k05
"smlal v28.8h, v10.8b, v26.8b \n"
"dup v26.8b, %53.b[6] \n" // k06
"smlal v28.8h, v11.8b, v27.8b \n"
"dup v27.8b, %53.b[7] \n" // k07
"smlal v28.8h, v12.8b, v24.8b \n"
"prfm pldl1keep, [%4, #128] \n"
"ld1 {v29.4s}, [%4] \n" // sum3
"smlal v28.8h, v13.8b, v25.8b \n"
"dup v24.8b, %53.b[8] \n" // k08
"smlal v28.8h, v14.8b, v26.8b \n"
"dup v25.8b, %53.b[9] \n" // k09
"smlal v28.8h, v15.8b, v27.8b \n"
"dup v26.8b, %53.b[10] \n" // k10
"smlal v28.8h, v16.8b, v24.8b \n"
"dup v27.8b, %53.b[11] \n" // k11
"smlal v28.8h, v17.8b, v25.8b \n"
"dup v24.8b, %53.b[12] \n" // k12
"smlal v28.8h, v18.8b, v26.8b \n"
"dup v25.8b, %53.b[13] \n" // k13
"smlal v28.8h, v19.8b, v27.8b \n"
"dup v26.8b, %53.b[14] \n" // k14
"smlal v28.8h, v20.8b, v24.8b \n"
"dup v27.8b, %53.b[15] \n" // k15
"smlal v28.8h, v21.8b, v25.8b \n"
"dup v24.8b, %54.b[0] \n" // k00
"smlal v28.8h, v22.8b, v26.8b \n"
"dup v25.8b, %54.b[1] \n" // k01
"smlal v28.8h, v23.8b, v27.8b \n"
"dup v26.8b, %54.b[2] \n" // k02
"saddw v29.4s, v29.4s, v28.4h \n"
"dup v27.8b, %54.b[3] \n" // k03
"st1 {v29.4s}, [%4], #16 \n"
//########################################### // sum3
"smull v28.8h, v8.8b, v24.8b \n"
"dup v24.8b, %54.b[4] \n" // k04
"smlal v28.8h, v9.8b, v25.8b \n"
"dup v25.8b, %54.b[5] \n" // k05
"smlal v28.8h, v10.8b, v26.8b \n"
"dup v26.8b, %54.b[6] \n" // k06
"smlal v28.8h, v11.8b, v27.8b \n"
"dup v27.8b, %54.b[7] \n" // k07
"smlal v28.8h, v12.8b, v24.8b \n"
"prfm pldl1keep, [%5, #128] \n"
"ld1 {v29.4s}, [%5] \n" // sum4
"smlal v28.8h, v13.8b, v25.8b \n"
"dup v24.8b, %54.b[8] \n" // k08
"smlal v28.8h, v14.8b, v26.8b \n"
"dup v25.8b, %54.b[9] \n" // k09
"smlal v28.8h, v15.8b, v27.8b \n"
"dup v26.8b, %54.b[10] \n" // k10
"smlal v28.8h, v16.8b, v24.8b \n"
"dup v27.8b, %54.b[11] \n" // k11
"smlal v28.8h, v17.8b, v25.8b \n"
"dup v24.8b, %54.b[12] \n" // k12
"smlal v28.8h, v18.8b, v26.8b \n"
"dup v25.8b, %54.b[13] \n" // k13
"smlal v28.8h, v19.8b, v27.8b \n"
"dup v26.8b, %54.b[14] \n" // k14
"smlal v28.8h, v20.8b, v24.8b \n"
"dup v27.8b, %54.b[15] \n" // k15
"smlal v28.8h, v21.8b, v25.8b \n"
"dup v24.8b, %55.b[0] \n" // k00
"smlal v28.8h, v22.8b, v26.8b \n"
"dup v25.8b, %55.b[1] \n" // k01
"smlal v28.8h, v23.8b, v27.8b \n"
"dup v26.8b, %55.b[2] \n" // k02
"saddw v29.4s, v29.4s, v28.4h \n"
"dup v27.8b, %55.b[3] \n" // k03
"st1 {v29.4s}, [%5], #16 \n"
//########################################### // sum4
"smull v28.8h, v8.8b, v24.8b \n"
"dup v24.8b, %55.b[4] \n" // k04
"smlal v28.8h, v9.8b, v25.8b \n"
"dup v25.8b, %55.b[5] \n" // k05
"smlal v28.8h, v10.8b, v26.8b \n"
"dup v26.8b, %55.b[6] \n" // k06
"smlal v28.8h, v11.8b, v27.8b \n"
"dup v27.8b, %55.b[7] \n" // k07
"smlal v28.8h, v12.8b, v24.8b \n"
"prfm pldl1keep, [%6, #128] \n"
"ld1 {v29.4s}, [%6] \n" // sum5
"smlal v28.8h, v13.8b, v25.8b \n"
"dup v24.8b, %55.b[8] \n" // k08
"smlal v28.8h, v14.8b, v26.8b \n"
"dup v25.8b, %55.b[9] \n" // k09
"smlal v28.8h, v15.8b, v27.8b \n"
"dup v26.8b, %55.b[10] \n" // k10
"smlal v28.8h, v16.8b, v24.8b \n"
"dup v27.8b, %55.b[11] \n" // k11
"smlal v28.8h, v17.8b, v25.8b \n"
"dup v24.8b, %55.b[12] \n" // k12
"smlal v28.8h, v18.8b, v26.8b \n"
"dup v25.8b, %55.b[13] \n" // k13
"smlal v28.8h, v19.8b, v27.8b \n"
"dup v26.8b, %55.b[14] \n" // k14
"smlal v28.8h, v20.8b, v24.8b \n"
"dup v27.8b, %55.b[15] \n" // k15
"smlal v28.8h, v21.8b, v25.8b \n"
"dup v24.8b, %56.b[0] \n" // k00
"smlal v28.8h, v22.8b, v26.8b \n"
"dup v25.8b, %56.b[1] \n" // k01
"smlal v28.8h, v23.8b, v27.8b \n"
"dup v26.8b, %56.b[2] \n" // k02
"saddw v29.4s, v29.4s, v28.4h \n"
"dup v27.8b, %56.b[3] \n" // k03
"st1 {v29.4s}, [%6], #16 \n"
//########################################### // sum5
"smull v28.8h, v8.8b, v24.8b \n"
"dup v24.8b, %56.b[4] \n" // k04
"smlal v28.8h, v9.8b, v25.8b \n"
"dup v25.8b, %56.b[5] \n" // k05
"smlal v28.8h, v10.8b, v26.8b \n"
"dup v26.8b, %56.b[6] \n" // k06
"smlal v28.8h, v11.8b, v27.8b \n"
"dup v27.8b, %56.b[7] \n" // k07
"smlal v28.8h, v12.8b, v24.8b \n"
"prfm pldl1keep, [%7, #128] \n"
"ld1 {v29.4s}, [%7] \n" // sum6
"smlal v28.8h, v13.8b, v25.8b \n"
"dup v24.8b, %56.b[8] \n" // k08
"smlal v28.8h, v14.8b, v26.8b \n"
"dup v25.8b, %56.b[9] \n" // k09
"smlal v28.8h, v15.8b, v27.8b \n"
"dup v26.8b, %56.b[10] \n" // k10
"smlal v28.8h, v16.8b, v24.8b \n"
"dup v27.8b, %56.b[11] \n" // k11
"smlal v28.8h, v17.8b, v25.8b \n"
"dup v24.8b, %56.b[12] \n" // k12
"smlal v28.8h, v18.8b, v26.8b \n"
"dup v25.8b, %56.b[13] \n" // k13
"smlal v28.8h, v19.8b, v27.8b \n"
"dup v26.8b, %56.b[14] \n" // k14
"smlal v28.8h, v20.8b, v24.8b \n"
"dup v27.8b, %56.b[15] \n" // k15
"smlal v28.8h, v21.8b, v25.8b \n"
"dup v24.8b, %57.b[0] \n" // k00
"smlal v28.8h, v22.8b, v26.8b \n"
"dup v25.8b, %57.b[1] \n" // k01
"smlal v28.8h, v23.8b, v27.8b \n"
"dup v26.8b, %57.b[2] \n" // k02
"saddw v29.4s, v29.4s, v28.4h \n"
"saddw2 v30.4s, v30.4s, v28.8h \n"
"dup v27.8b, %57.b[3] \n" // k03
"st1 {v29.4s}, [%7], #16 \n"
//########################################### // sum6
"smull v28.8h, v8.8b, v24.8b \n"
"dup v24.8b, %57.b[4] \n" // k04
"smlal v28.8h, v9.8b, v25.8b \n"
"dup v25.8b, %57.b[5] \n" // k05
"smlal v28.8h, v10.8b, v26.8b \n"
"dup v26.8b, %57.b[6] \n" // k06
"smlal v28.8h, v11.8b, v27.8b \n"
"dup v27.8b, %57.b[7] \n" // k07
"smlal v28.8h, v12.8b, v24.8b \n"
"prfm pldl1keep, [%8, #128] \n"
"ld1 {v29.4s}, [%8] \n" // sum7
"smlal v28.8h, v13.8b, v25.8b \n"
"dup v24.8b, %57.b[8] \n" // k08
"smlal v28.8h, v14.8b, v26.8b \n"
"dup v25.8b, %57.b[9] \n" // k09
"smlal v28.8h, v15.8b, v27.8b \n"
"dup v26.8b, %57.b[10] \n" // k10
"smlal v28.8h, v16.8b, v24.8b \n"
"dup v27.8b, %57.b[11] \n" // k11
"smlal v28.8h, v17.8b, v25.8b \n"
"dup v24.8b, %57.b[12] \n" // k12
"smlal v28.8h, v18.8b, v26.8b \n"
"dup v25.8b, %57.b[13] \n" // k13
"smlal v28.8h, v19.8b, v27.8b \n"
"dup v26.8b, %57.b[14] \n" // k14
"smlal v28.8h, v20.8b, v24.8b \n"
"dup v27.8b, %57.b[15] \n" // k15
"smlal v28.8h, v21.8b, v25.8b \n"
"sub %9, %9, #4 \n"
"smlal v28.8h, v22.8b, v26.8b \n"
"sub %10, %10, #4 \n"
"sub %11, %11, #4 \n"
"sub %12, %12, #4 \n"
"smlal v28.8h, v23.8b, v27.8b \n"
"sub %13, %13, #4 \n"
"sub %14, %14, #4 \n"
"sub %15, %15, #4 \n"
"sub %16, %16, #4 \n"
"saddw v29.4s, v29.4s, v28.4h \n"
"sub %17, %17, #4 \n"
"sub %18, %18, #4 \n"
"sub %19, %19, #4 \n"
"sub %20, %20, #4 \n"
"st1 {v29.4s}, [%8], #16 \n"
//########################################### // sum7
"sub %21, %21, #4 \n"
"sub %22, %22, #4 \n"
"sub %23, %23, #4 \n"
"sub %24, %24, #4 \n"
: "=r"(nn), // %0
"=r"(outptr0),// %1
"=r"(outptr1),// %2
"=r"(outptr2),// %3
"=r"(outptr3),// %4
"=r"(outptr4),// %5
"=r"(outptr5),// %6
"=r"(outptr6),// %7
"=r"(outptr7),// %8
"=r"(r0), // %9
"=r"(r1), // %10
"=r"(r2), // %11
"=r"(r3), // %12
"=r"(r4), // %13
"=r"(r5), // %14
"=r"(r6), // %15
"=r"(r7), // %16
"=r"(r8), // %17
"=r"(r9), // %18
"=r"(r10), // %19
"=r"(r11), // %20
"=r"(r12), // %21
"=r"(r13), // %22
"=r"(r14), // %23
"=r"(r15) // %24
: "0"(nn),
"1"(outptr0),
"2"(outptr1),
"3"(outptr2),
"4"(outptr3),
"5"(outptr4),
"6"(outptr5),
"7"(outptr6),
"8"(outptr7),
"9"(r0),
"10"(r1),
"11"(r2),
"12"(r3),
"13"(r4),
"14"(r5),
"15"(r6),
"16"(r7),
"17"(r8),
"18"(r9),
"19"(r10),
"20"(r11),
"21"(r12),
"22"(r13),
"23"(r14),
"24"(r15),
"w"(_k0), // %50
"w"(_k1), // %51
"w"(_k2), // %52
"w"(_k3), // %53
"w"(_k4), // %54
"w"(_k5), // %55
"w"(_k6), // %56
"w"(_k7) // %57
: "cc", "memory", "v8", "v9", "v10", "v11", "v12", "v13", "v14", "v15", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23", "v24", "v25", "v26", "v27", "v28", "v29", "v30", "v31"
);
}
for (; remain>0; remain--)
{
// TODO neon optimize
int sum0 = (int)*r0 * kernel0[0] + *r1 * kernel0[1] + *r2 * kernel0[2] + *r3 * kernel0[3] + *r4 * kernel0[4] + *r5 * kernel0[5] + *r6 * kernel0[6] + *r7 * kernel0[7] + *r8 * kernel0[8] + *r9 * kernel0[9] + *r10 * kernel0[10] + *r11 * kernel0[11] + *r12 * kernel0[12] + *r13 * kernel0[13] + *r14 * kernel0[14] + *r15 * kernel0[15];
int sum1 = (int)*r0 * kernel1[0] + *r1 * kernel1[1] + *r2 * kernel1[2] + *r3 * kernel1[3] + *r4 * kernel1[4] + *r5 * kernel1[5] + *r6 * kernel1[6] + *r7 * kernel1[7] + *r8 * kernel1[8] + *r9 * kernel1[9] + *r10 * kernel1[10] + *r11 * kernel1[11] + *r12 * kernel1[12] + *r13 * kernel1[13] + *r14 * kernel1[14] + *r15 * kernel1[15];
int sum2 = (int)*r0 * kernel2[0] + *r1 * kernel2[1] + *r2 * kernel2[2] + *r3 * kernel2[3] + *r4 * kernel2[4] + *r5 * kernel2[5] + *r6 * kernel2[6] + *r7 * kernel2[7] + *r8 * kernel2[8] + *r9 * kernel2[9] + *r10 * kernel2[10] + *r11 * kernel2[11] + *r12 * kernel2[12] + *r13 * kernel2[13] + *r14 * kernel2[14] + *r15 * kernel2[15];
int sum3 = (int)*r0 * kernel3[0] + *r1 * kernel3[1] + *r2 * kernel3[2] + *r3 * kernel3[3] + *r4 * kernel3[4] + *r5 * kernel3[5] + *r6 * kernel3[6] + *r7 * kernel3[7] + *r8 * kernel3[8] + *r9 * kernel3[9] + *r10 * kernel3[10] + *r11 * kernel3[11] + *r12 * kernel3[12] + *r13 * kernel3[13] + *r14 * kernel3[14] + *r15 * kernel3[15];
int sum4 = (int)*r0 * kernel4[0] + *r1 * kernel4[1] + *r2 * kernel4[2] + *r3 * kernel4[3] + *r4 * kernel4[4] + *r5 * kernel4[5] + *r6 * kernel4[6] + *r7 * kernel4[7] + *r8 * kernel4[8] + *r9 * kernel4[9] + *r10 * kernel4[10] + *r11 * kernel4[11] + *r12 * kernel4[12] + *r13 * kernel4[13] + *r14 * kernel4[14] + *r15 * kernel4[15];
int sum5 = (int)*r0 * kernel5[0] + *r1 * kernel5[1] + *r2 * kernel5[2] + *r3 * kernel5[3] + *r4 * kernel5[4] + *r5 * kernel5[5] + *r6 * kernel5[6] + *r7 * kernel5[7] + *r8 * kernel5[8] + *r9 * kernel5[9] + *r10 * kernel5[10] + *r11 * kernel5[11] + *r12 * kernel5[12] + *r13 * kernel5[13] + *r14 * kernel5[14] + *r15 * kernel5[15];
int sum6 = (int)*r0 * kernel6[0] + *r1 * kernel6[1] + *r2 * kernel6[2] + *r3 * kernel6[3] + *r4 * kernel6[4] + *r5 * kernel6[5] + *r6 * kernel6[6] + *r7 * kernel6[7] + *r8 * kernel6[8] + *r9 * kernel6[9] + *r10 * kernel6[10] + *r11 * kernel6[11] + *r12 * kernel6[12] + *r13 * kernel6[13] + *r14 * kernel6[14] + *r15 * kernel6[15];
int sum7 = (int)*r0 * kernel7[0] + *r1 * kernel7[1] + *r2 * kernel7[2] + *r3 * kernel7[3] + *r4 * kernel7[4] + *r5 * kernel7[5] + *r6 * kernel7[6] + *r7 * kernel7[7] + *r8 * kernel7[8] + *r9 * kernel7[9] + *r10 * kernel7[10] + *r11 * kernel7[11] + *r12 * kernel7[12] + *r13 * kernel7[13] + *r14 * kernel7[14] + *r15 * kernel7[15];
*outptr0 += sum0;
*outptr1 += sum1;
*outptr2 += sum2;
*outptr3 += sum3;
*outptr4 += sum4;
*outptr5 += sum5;
*outptr6 += sum6;
*outptr7 += sum7;
r0++;
r1++;
r2++;
r3++;
r4++;
r5++;
r6++;
r7++;
r8++;
r9++;
r10++;
r11++;
r12++;
r13++;
r14++;
r15++;
outptr0++;
outptr1++;
outptr2++;
outptr3++;
outptr4++;
outptr5++;
outptr6++;
outptr7++;
}
}
#else // f**k the gcc limit the num of asm operand less than 30
for (; q+7<inch; q+=8)
{
int* outptr0 = out0;
int* outptr1 = out1;
int* outptr2 = out2;
int* outptr3 = out3;
int* outptr4 = out4;
int* outptr5 = out5;
int* outptr6 = out6;
int* outptr7 = out7;
const signed char* kernel0 = (const signed char*)kernel + p*inch + q;
const signed char* kernel1 = (const signed char*)kernel + (p+1)*inch + q;
const signed char* kernel2 = (const signed char*)kernel + (p+2)*inch + q;
const signed char* kernel3 = (const signed char*)kernel + (p+3)*inch + q;
const signed char* kernel4 = (const signed char*)kernel + (p+4)*inch + q;
const signed char* kernel5 = (const signed char*)kernel + (p+5)*inch + q;
const signed char* kernel6 = (const signed char*)kernel + (p+6)*inch + q;
const signed char* kernel7 = (const signed char*)kernel + (p+7)*inch + q;
const signed char* r0 = bottom_blob.channel(q);
const signed char* r1 = bottom_blob.channel(q+1);
const signed char* r2 = bottom_blob.channel(q+2);
const signed char* r3 = bottom_blob.channel(q+3);
const signed char* r4 = bottom_blob.channel(q+4);
const signed char* r5 = bottom_blob.channel(q+5);
const signed char* r6 = bottom_blob.channel(q+6);
const signed char* r7 = bottom_blob.channel(q+7);
int size = outw * outh;
int nn = size >> 4;
int remain = size & 15;
asm volatile(
"ld1 {v0.16b}, [%0] \n"
"ld1 {v1.16b}, [%1] \n"
"ld1 {v2.16b}, [%2] \n"
"ld1 {v3.16b}, [%3] \n"
"ld1 {v4.16b}, [%4] \n"
"ld1 {v5.16b}, [%5] \n"
"ld1 {v6.16b}, [%6] \n"
"ld1 {v7.16b}, [%7] \n"
:
: "r"(kernel0),
"r"(kernel1),
"r"(kernel2),
"r"(kernel3),
"r"(kernel4),
"r"(kernel5),
"r"(kernel6),
"r"(kernel7)
: "cc", "memory"
);
if (nn > 0)
{
asm volatile(
"prfm pldl1keep, [%18, #128] \n"
"prfm pldl1keep, [%19, #128] \n"
"prfm pldl1keep, [%20, #128] \n"
"prfm pldl1keep, [%21, #128] \n"
"prfm pldl1keep, [%22, #128] \n"
"prfm pldl1keep, [%23, #128] \n"
"prfm pldl1keep, [%24, #128] \n"
"prfm pldl1keep, [%25, #128] \n"
"ld1 {v8.16b}, [%18], #16 \n" // r0"
"ld1 {v9.16b}, [%19], #16 \n" // r1"
"ld1 {v10.16b}, [%20], #16 \n" // r2"
"ld1 {v11.16b}, [%21], #16 \n" // r3"
"ld1 {v12.16b}, [%22], #16 \n" // r4"
"ld1 {v13.16b}, [%23], #16 \n" // r5"
"ld1 {v14.16b}, [%24], #16 \n" // r6"
"ld1 {v15.16b}, [%25], #16 \n" // r7"
"0: \n"
"dup v16.16b, v0.16b[0] \n" // k00
"dup v17.16b, v0.16b[1] \n" // k01
"dup v18.16b, v0.16b[2] \n" // k02
"dup v19.16b, v0.16b[3] \n" // k03
"dup v20.16b, v0.16b[4] \n" // k04
"dup v21.16b, v0.16b[5] \n" // k05
"dup v22.16b, v0.16b[6] \n" // k06
"dup v23.16b, v0.16b[7] \n" // k07
"smull v24.8h, v8.8b, v16.8b \n" // r0 * k0
"smull2 v25.8h, v8.16b, v16.16b \n"
"smlal v24.8h, v9.8b, v17.8b \n" // r0 * k1
"smlal2 v25.8h, v9.16b, v17.16b \n"
"dup v16.16b, v1.16b[0] \n" // k00
"smlal v24.8h, v10.8b, v18.8b \n" // r0 * k2
"smlal2 v25.8h, v10.16b, v18.16b \n"
"dup v17.16b, v1.16b[1] \n" // k01
"smlal v24.8h, v11.8b, v19.8b \n" // r0 * k3
"smlal2 v25.8h, v11.16b, v19.16b \n"
"dup v18.16b, v1.16b[2] \n" // k02
"prfm pldl1keep, [%1, #128] \n"
"ld1 {v26.4s, v27.4s}, [%1] \n" // sum0
"smlal v24.8h, v12.8b, v20.8b \n" // r0 * k4
"smlal2 v25.8h, v12.16b, v20.16b \n"
"dup v19.16b, v1.16b[3] \n" // k03
"smlal v24.8h, v13.8b, v21.8b \n" // r0 * k5
"smlal2 v25.8h, v13.16b, v21.16b \n"
"dup v20.16b, v1.16b[4] \n" // k04
"smlal v24.8h, v14.8b, v22.8b \n" // r0 * k6
"smlal2 v25.8h, v14.16b, v22.16b \n"
"dup v21.16b, v1.16b[5] \n" // k05
"smlal v24.8h, v15.8b, v23.8b \n" // r0 * k7
"smlal2 v25.8h, v15.16b, v23.16b \n"
"saddw v26.4s, v26.4s, v24.4h \n"
"saddw2 v27.4s, v27.4s, v24.8h \n"
"st1 {v26.4s, v27.4s}, [%1], #32 \n"
"ld1 {v28.4s, v29.4s}, [%1] \n" // sum0n
"dup v22.16b, v1.16b[6] \n" // k06
"dup v23.16b, v1.16b[7] \n" // k07
"saddw v28.4s, v28.4s, v25.4h \n"
"saddw2 v29.4s, v29.4s, v25.8h \n"
"st1 {v28.4s, v29.4s}, [%1], #32 \n"
//###########################################
"smull v24.8h, v8.8b, v16.8b \n" // r0 * k0
"smull2 v25.8h, v8.16b, v16.16b \n"
"smlal v24.8h, v9.8b, v17.8b \n" // r0 * k1
"smlal2 v25.8h, v9.16b, v17.16b \n"
"dup v16.16b, v2.16b[0] \n" // k00
"smlal v24.8h, v10.8b, v18.8b \n" // r0 * k2
"smlal2 v25.8h, v10.16b, v18.16b \n"
"dup v17.16b, v2.16b[1] \n" // k01
"smlal v24.8h, v11.8b, v19.8b \n" // r0 * k3
"smlal2 v25.8h, v11.16b, v19.16b \n"
"dup v18.16b, v2.16b[2] \n" // k02
"prfm pldl1keep, [%2, #128] \n"
"ld1 {v26.4s, v27.4s}, [%2] \n" // sum1
"smlal v24.8h, v12.8b, v20.8b \n" // r0 * k4
"smlal2 v25.8h, v12.16b, v20.16b \n"
"dup v19.16b, v2.16b[3] \n" // k03
"smlal v24.8h, v13.8b, v21.8b \n" // r0 * k5
"smlal2 v25.8h, v13.16b, v21.16b \n"
"dup v20.16b, v2.16b[4] \n" // k04
"smlal v24.8h, v14.8b, v22.8b \n" // r0 * k6
"smlal2 v25.8h, v14.16b, v22.16b \n"
"dup v21.16b, v2.16b[5] \n" // k05
"smlal v24.8h, v15.8b, v23.8b \n" // r0 * k7
"smlal2 v25.8h, v15.16b, v23.16b \n"
"saddw v26.4s, v26.4s, v24.4h \n"
"saddw2 v27.4s, v27.4s, v24.8h \n"
"st1 {v26.4s, v27.4s}, [%2], #32 \n"
"ld1 {v28.4s, v29.4s}, [%2] \n" // sum1n
"dup v22.16b, v2.16b[6] \n" // k06
"dup v23.16b, v2.16b[7] \n" // k07
"saddw v28.4s, v28.4s, v25.4h \n"
"saddw2 v29.4s, v29.4s, v25.8h \n"
"st1 {v28.4s, v29.4s}, [%2], #32 \n"
//###########################################
"smull v24.8h, v8.8b, v16.8b \n" // r0 * k0
"smull2 v25.8h, v8.16b, v16.16b \n"
"smlal v24.8h, v9.8b, v17.8b \n" // r0 * k1
"smlal2 v25.8h, v9.16b, v17.16b \n"
"dup v16.16b, v3.16b[0] \n" // k00
"smlal v24.8h, v10.8b, v18.8b \n" // r0 * k2
"smlal2 v25.8h, v10.16b, v18.16b \n"
"dup v17.16b, v3.16b[1] \n" // k01
"smlal v24.8h, v11.8b, v19.8b \n" // r0 * k3
"smlal2 v25.8h, v11.16b, v19.16b \n"
"dup v18.16b, v3.16b[2] \n" // k02
"prfm pldl1keep, [%3, #128] \n"
"ld1 {v26.4s, v27.4s}, [%3] \n" // sum2
"smlal v24.8h, v12.8b, v20.8b \n" // r0 * k4
"smlal2 v25.8h, v12.16b, v20.16b \n"
"dup v19.16b, v3.16b[3] \n" // k03
"smlal v24.8h, v13.8b, v21.8b \n" // r0 * k5
"smlal2 v25.8h, v13.16b, v21.16b \n"
"dup v20.16b, v3.16b[4] \n" // k04
"smlal v24.8h, v14.8b, v22.8b \n" // r0 * k6
"smlal2 v25.8h, v14.16b, v22.16b \n"
"dup v21.16b, v3.16b[5] \n" // k05
"smlal v24.8h, v15.8b, v23.8b \n" // r0 * k7
"smlal2 v25.8h, v15.16b, v23.16b \n"
"saddw v26.4s, v26.4s, v24.4h \n"
"saddw2 v27.4s, v27.4s, v24.8h \n"
"st1 {v26.4s, v27.4s}, [%3], #32 \n"
"ld1 {v28.4s, v29.4s}, [%3] \n" // sum2n
"dup v22.16b, v3.16b[6] \n" // k06
"dup v23.16b, v3.16b[7] \n" // k07
"saddw v28.4s, v28.4s, v25.4h \n"
"saddw2 v29.4s, v29.4s, v25.8h \n"
"st1 {v28.4s, v29.4s}, [%3], #32 \n"
//##########################################
"smull v24.8h, v8.8b, v16.8b \n" // r0 * k0
"smull2 v25.8h, v8.16b, v16.16b \n"
"smlal v24.8h, v9.8b, v17.8b \n" // r0 * k1
"smlal2 v25.8h, v9.16b, v17.16b \n"
"dup v16.16b, v4.16b[0] \n" // k00
"smlal v24.8h, v10.8b, v18.8b \n" // r0 * k2
"smlal2 v25.8h, v10.16b, v18.16b \n"
"dup v17.16b, v4.16b[1] \n" // k01
"smlal v24.8h, v11.8b, v19.8b \n" // r0 * k3
"smlal2 v25.8h, v11.16b, v19.16b \n"
"dup v18.16b, v4.16b[2] \n" // k02
"prfm pldl1keep, [%4, #128] \n"
"ld1 {v26.4s, v27.4s}, [%4] \n" // sum3
"smlal v24.8h, v12.8b, v20.8b \n" // r0 * k4
"smlal2 v25.8h, v12.16b, v20.16b \n"
"dup v19.16b, v4.16b[3] \n" // k03
"smlal v24.8h, v13.8b, v21.8b \n" // r0 * k5
"smlal2 v25.8h, v13.16b, v21.16b \n"
"dup v20.16b, v4.16b[4] \n" // k04
"smlal v24.8h, v14.8b, v22.8b \n" // r0 * k6
"smlal2 v25.8h, v14.16b, v22.16b \n"
"dup v21.16b, v4.16b[5] \n" // k05
"smlal v24.8h, v15.8b, v23.8b \n" // r0 * k7
"smlal2 v25.8h, v15.16b, v23.16b \n"
"saddw v26.4s, v26.4s, v24.4h \n"
"saddw2 v27.4s, v27.4s, v24.8h \n"
"st1 {v26.4s, v27.4s}, [%4], #32 \n"
"ld1 {v28.4s, v29.4s}, [%4] \n" // sum3n
"dup v22.16b, v4.16b[6] \n" // k06
"dup v23.16b, v4.16b[7] \n" // k07
"saddw v28.4s, v28.4s, v25.4h \n"
"saddw2 v29.4s, v29.4s, v25.8h \n"
"st1 {v28.4s, v29.4s}, [%4], #32 \n"
//##########################################
"smull v24.8h, v8.8b, v16.8b \n" // r0 * k0
"smull2 v25.8h, v8.16b, v16.16b \n"
"smlal v24.8h, v9.8b, v17.8b \n" // r0 * k1
"smlal2 v25.8h, v9.16b, v17.16b \n"
"dup v16.16b, v5.16b[0] \n" // k00
"smlal v24.8h, v10.8b, v18.8b \n" // r0 * k2
"smlal2 v25.8h, v10.16b, v18.16b \n"
"dup v17.16b, v5.16b[1] \n" // k01
"smlal v24.8h, v11.8b, v19.8b \n" // r0 * k3
"smlal2 v25.8h, v11.16b, v19.16b \n"
"dup v18.16b, v5.16b[2] \n" // k02
"prfm pldl1keep, [%5, #128] \n"
"ld1 {v26.4s, v27.4s}, [%5] \n" // sum4
"smlal v24.8h, v12.8b, v20.8b \n" // r0 * k4
"smlal2 v25.8h, v12.16b, v20.16b \n"
"dup v19.16b, v5.16b[3] \n" // k03
"smlal v24.8h, v13.8b, v21.8b \n" // r0 * k5
"smlal2 v25.8h, v13.16b, v21.16b \n"
"dup v20.16b, v5.16b[4] \n" // k04
"smlal v24.8h, v14.8b, v22.8b \n" // r0 * k6
"smlal2 v25.8h, v14.16b, v22.16b \n"
"dup v21.16b, v5.16b[5] \n" // k05
"smlal v24.8h, v15.8b, v23.8b \n" // r0 * k7
"smlal2 v25.8h, v15.16b, v23.16b \n"
"saddw v26.4s, v26.4s, v24.4h \n"
"saddw2 v27.4s, v27.4s, v24.8h \n"
"st1 {v26.4s, v27.4s}, [%5], #32 \n"
"ld1 {v28.4s, v29.4s}, [%5] \n" // sum4n
"dup v22.16b, v5.16b[6] \n" // k06
"dup v23.16b, v5.16b[7] \n" // k07
"saddw v28.4s, v28.4s, v25.4h \n"
"saddw2 v29.4s, v29.4s, v25.8h \n"
"st1 {v28.4s, v29.4s}, [%5], #32 \n"
//##########################################
"smull v24.8h, v8.8b, v16.8b \n" // r0 * k0
"smull2 v25.8h, v8.16b, v16.16b \n"
"smlal v24.8h, v9.8b, v17.8b \n" // r0 * k1
"smlal2 v25.8h, v9.16b, v17.16b \n"
"dup v16.16b, v6.16b[0] \n" // k00
"smlal v24.8h, v10.8b, v18.8b \n" // r0 * k2
"smlal2 v25.8h, v10.16b, v18.16b \n"
"dup v17.16b, v6.16b[1] \n" // k01
"smlal v24.8h, v11.8b, v19.8b \n" // r0 * k3
"smlal2 v25.8h, v11.16b, v19.16b \n"
"dup v18.16b, v6.16b[2] \n" // k02
"smlal v24.8h, v12.8b, v20.8b \n" // r0 * k4
"smlal2 v25.8h, v12.16b, v20.16b \n"
"dup v19.16b, v6.16b[3] \n" // k03
"smlal v24.8h, v13.8b, v21.8b \n" // r0 * k5
"smlal2 v25.8h, v13.16b, v21.16b \n"
"dup v20.16b, v6.16b[4] \n" // k04
"smlal v24.8h, v14.8b, v22.8b \n" // r0 * k6
"smlal2 v25.8h, v14.16b, v22.16b \n"
"dup v21.16b, v6.16b[5] \n" // k05
"smlal v24.8h, v15.8b, v23.8b \n" // r0 * k7
"smlal2 v25.8h, v15.16b, v23.16b \n"
"prfm pldl1keep, [%6, #128] \n"
"ld1 {v26.4s, v27.4s}, [%6] \n" // sum5
"saddw v26.4s, v26.4s, v24.4h \n"
"saddw2 v27.4s, v27.4s, v24.8h \n"
"st1 {v26.4s, v27.4s}, [%6], #32 \n"
"ld1 {v28.4s, v29.4s}, [%6] \n" // sum5n
"dup v22.16b, v6.16b[6] \n" // k06
"dup v23.16b, v6.16b[7] \n" // k07
"saddw v28.4s, v28.4s, v25.4h \n"
"saddw2 v29.4s, v29.4s, v25.8h \n"
"st1 {v28.4s, v29.4s}, [%6], #32 \n"
//##########################################
"smull v24.8h, v8.8b, v16.8b \n" // r0 * k0
"smull2 v25.8h, v8.16b, v16.16b \n"
"smlal v24.8h, v9.8b, v17.8b \n" // r0 * k1
"smlal2 v25.8h, v9.16b, v17.16b \n"
"dup v16.16b, v7.16b[0] \n" // k00
"smlal v24.8h, v10.8b, v18.8b \n" // r0 * k2
"smlal2 v25.8h, v10.16b, v18.16b \n"
"dup v17.16b, v7.16b[1] \n" // k01
"smlal v24.8h, v11.8b, v19.8b \n" // r0 * k3
"smlal2 v25.8h, v11.16b, v19.16b \n"
"dup v18.16b, v7.16b[2] \n" // k02
"prfm pldl1keep, [%7, #128] \n"
"ld1 {v26.4s, v27.4s}, [%7] \n" // sum6
"smlal v24.8h, v12.8b, v20.8b \n" // r0 * k4
"smlal2 v25.8h, v12.16b, v20.16b \n"
"dup v19.16b, v7.16b[3] \n" // k03
"smlal v24.8h, v13.8b, v21.8b \n" // r0 * k5
"smlal2 v25.8h, v13.16b, v21.16b \n"
"dup v20.16b, v7.16b[4] \n" // k04
"smlal v24.8h, v14.8b, v22.8b \n" // r0 * k6
"smlal2 v25.8h, v14.16b, v22.16b \n"
"dup v21.16b, v7.16b[5] \n" // k05
"smlal v24.8h, v15.8b, v23.8b \n" // r0 * k7
"smlal2 v25.8h, v15.16b, v23.16b \n"
"saddw v26.4s, v26.4s, v24.4h \n"
"saddw2 v27.4s, v27.4s, v24.8h \n"
"st1 {v26.4s, v27.4s}, [%7], #32 \n"
"ld1 {v28.4s, v29.4s}, [%7] \n" // sum6n
"dup v22.16b, v7.16b[6] \n" // k06
"dup v23.16b, v7.16b[7] \n" // k07
"saddw v28.4s, v28.4s, v25.4h \n"
"saddw2 v29.4s, v29.4s, v25.8h \n"
"st1 {v28.4s, v29.4s}, [%7], #32 \n"
//##########################################
"smull v24.8h, v8.8b, v16.8b \n" // r0 * k0
"smull2 v25.8h, v8.16b, v16.16b \n"
"prfm pldl1keep, [%18, #128] \n"
"prfm pldl1keep, [%19, #128] \n"
"smlal v24.8h, v9.8b, v17.8b \n" // r0 * k1
"smlal2 v25.8h, v9.16b, v17.16b \n"
"prfm pldl1keep, [%20, #128] \n"
"prfm pldl1keep, [%21, #128] \n"
"smlal v24.8h, v10.8b, v18.8b \n" // r0 * k2
"smlal2 v25.8h, v10.16b, v18.16b \n"
"prfm pldl1keep, [%22, #128] \n"
"prfm pldl1keep, [%23, #128] \n"
"smlal v24.8h, v11.8b, v19.8b \n" // r0 * k3
"smlal2 v25.8h, v11.16b, v19.16b \n"
"prfm pldl1keep, [%8, #128] \n"
"ld1 {v26.4s, v27.4s}, [%8] \n" // sum7
"smlal v24.8h, v12.8b, v20.8b \n" // r0 * k4
"smlal2 v25.8h, v12.16b, v20.16b \n"
"prfm pldl1keep, [%24, #128] \n"
"prfm pldl1keep, [%25, #128] \n"
"smlal v24.8h, v13.8b, v21.8b \n" // r0 * k5
"smlal2 v25.8h, v13.16b, v21.16b \n"
"ld1 {v8.16b}, [%18], #16 \n" // r0"
"ld1 {v9.16b}, [%19], #16 \n" // r1"
"smlal v24.8h, v14.8b, v22.8b \n" // r0 * k6
"smlal2 v25.8h, v14.16b, v22.16b \n"
"ld1 {v10.16b}, [%20], #16 \n" // r2"
"ld1 {v11.16b}, [%21], #16 \n" // r3"
"smlal v24.8h, v15.8b, v23.8b \n" // r0 * k7
"smlal2 v25.8h, v15.16b, v23.16b \n"
"ld1 {v12.16b}, [%22], #16 \n" // r4"
"ld1 {v13.16b}, [%23], #16 \n" // r5"
"saddw v26.4s, v26.4s, v24.4h \n"
"saddw2 v27.4s, v27.4s, v24.8h \n"
"st1 {v26.4s, v27.4s}, [%8], #32 \n"
"ld1 {v28.4s, v29.4s}, [%8] \n" // sum7n
"ld1 {v14.16b}, [%24], #16 \n" // r6"
"ld1 {v15.16b}, [%25], #16 \n" // r7"
"saddw v28.4s, v28.4s, v25.4h \n"
"saddw2 v29.4s, v29.4s, v25.8h \n"
"st1 {v28.4s, v29.4s}, [%8], #32 \n"
"subs %w0, %w0, #1 \n"
"bne 0b \n"
"sub %18, %18, #16 \n"
"sub %19, %19, #16 \n"
"sub %20, %20, #16 \n"
"sub %21, %21, #16 \n"
"sub %22, %22, #16 \n"
"sub %23, %23, #16 \n"
"sub %24, %24, #16 \n"
"sub %25, %25, #16 \n"
//##########################################
: "=r"(nn), // %0
"=r"(outptr0),// %1
"=r"(outptr1),// %2
"=r"(outptr2),// %3
"=r"(outptr3),// %4
"=r"(outptr4),// %5
"=r"(outptr5),// %6
"=r"(outptr6),// %7
"=r"(outptr7) // %8
: "0"(nn),
"1"(outptr0),
"2"(outptr1),
"3"(outptr2),
"4"(outptr3),
"5"(outptr4),
"6"(outptr5),
"7"(outptr6),
"8"(outptr7),
"r"(r0), // %18
"r"(r1), // %19
"r"(r2), // %20
"r"(r3), // %21
"r"(r4), // %22
"r"(r5), // %23
"r"(r6), // %24
"r"(r7) // %25
: "cc", "memory", "v8", "v9", "v10", "v11", "v12", "v13", "v14", "v15", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23", "v24", "v25", "v26", "v27", "v28", "v29"
);
}
if (remain == 8)
{
remain -= 8;
asm volatile(
"prfm pldl1keep, [%18, #128] \n"
"prfm pldl1keep, [%19, #128] \n"
"prfm pldl1keep, [%20, #128] \n"
"prfm pldl1keep, [%21, #128] \n"
"prfm pldl1keep, [%22, #128] \n"
"prfm pldl1keep, [%23, #128] \n"
"prfm pldl1keep, [%24, #128] \n"
"prfm pldl1keep, [%25, #128] \n"
"ld1 {v8.8b}, [%18], #8 \n" // r0"
"ld1 {v9.8b}, [%19], #8 \n" // r1"
"ld1 {v10.8b}, [%20], #8 \n" // r2"
"ld1 {v11.8b}, [%21], #8 \n" // r3"
"ld1 {v12.8b}, [%22], #8 \n" // r4"
"ld1 {v13.8b}, [%23], #8 \n" // r5"
"ld1 {v14.8b}, [%24], #8 \n" // r6"
"ld1 {v15.8b}, [%25], #8 \n" // r7"
"dup v16.8b, v0.16b[0] \n" // k00
"dup v17.8b, v0.16b[1] \n" // k01
"dup v18.8b, v0.16b[2] \n" // k02
"dup v19.8b, v0.16b[3] \n" // k03
"dup v20.8b, v0.16b[4] \n" // k04
"dup v21.8b, v0.16b[5] \n" // k05
"dup v22.8b, v0.16b[6] \n" // k06
"dup v23.8b, v0.16b[7] \n" // k07
"smull v24.8h, v8.8b, v16.8b \n" // r0 * k0
"smlal v24.8h, v9.8b, v17.8b \n" // r0 * k1
"smlal v24.8h, v10.8b, v18.8b \n" // r0 * k2
"smlal v24.8h, v11.8b, v19.8b \n" // r0 * k3
"smlal v24.8h, v12.8b, v20.8b \n" // r0 * k4
"smlal v24.8h, v13.8b, v21.8b \n" // r0 * k5
"smlal v24.8h, v14.8b, v22.8b \n" // r0 * k6
"smlal v24.8h, v15.8b, v23.8b \n" // r0 * k7
"prfm pldl1keep, [%1, #128] \n"
"ld1 {v26.4s, v27.4s}, [%1] \n" // sum0
"saddw v26.4s, v26.4s, v24.4h \n"
"saddw2 v27.4s, v27.4s, v24.8h \n"
"st1 {v26.4s, v27.4s}, [%1], #32 \n"
//###########################################
"dup v16.8b, v1.16b[0] \n" // k00
"dup v17.8b, v1.16b[1] \n" // k01
"dup v18.8b, v1.16b[2] \n" // k02
"dup v19.8b, v1.16b[3] \n" // k03
"dup v20.8b, v1.16b[4] \n" // k04
"dup v21.8b, v1.16b[5] \n" // k05
"dup v22.8b, v1.16b[6] \n" // k06
"dup v23.8b, v1.16b[7] \n" // k07
"smull v24.8h, v8.8b, v16.8b \n" // r0 * k0
"smlal v24.8h, v9.8b, v17.8b \n" // r0 * k1
"smlal v24.8h, v10.8b, v18.8b \n" // r0 * k2
"smlal v24.8h, v11.8b, v19.8b \n" // r0 * k3
"smlal v24.8h, v12.8b, v20.8b \n" // r0 * k4
"smlal v24.8h, v13.8b, v21.8b \n" // r0 * k5
"smlal v24.8h, v14.8b, v22.8b \n" // r0 * k6
"smlal v24.8h, v15.8b, v23.8b \n" // r0 * k7
"prfm pldl1keep, [%2, #128] \n"
"ld1 {v26.4s, v27.4s}, [%2] \n" // sum1
"saddw v26.4s, v26.4s, v24.4h \n"
"saddw2 v27.4s, v27.4s, v24.8h \n"
"st1 {v26.4s, v27.4s}, [%2], #32 \n"
//###########################################
"dup v16.8b, v2.16b[0] \n" // k00
"dup v17.8b, v2.16b[1] \n" // k01
"dup v18.8b, v2.16b[2] \n" // k02
"dup v19.8b, v2.16b[3] \n" // k03
"dup v20.8b, v2.16b[4] \n" // k04
"dup v21.8b, v2.16b[5] \n" // k05
"dup v22.8b, v2.16b[6] \n" // k06
"dup v23.8b, v2.16b[7] \n" // k07
"smull v24.8h, v8.8b, v16.8b \n" // r0 * k0
"smlal v24.8h, v9.8b, v17.8b \n" // r0 * k1
"smlal v24.8h, v10.8b, v18.8b \n" // r0 * k2
"smlal v24.8h, v11.8b, v19.8b \n" // r0 * k3
"smlal v24.8h, v12.8b, v20.8b \n" // r0 * k4
"smlal v24.8h, v13.8b, v21.8b \n" // r0 * k5
"smlal v24.8h, v14.8b, v22.8b \n" // r0 * k6
"smlal v24.8h, v15.8b, v23.8b \n" // r0 * k7
"prfm pldl1keep, [%3, #128] \n"
"ld1 {v26.4s, v27.4s}, [%3] \n" // sum2
"saddw v26.4s, v26.4s, v24.4h \n"
"saddw2 v27.4s, v27.4s, v24.8h \n"
"st1 {v26.4s, v27.4s}, [%3], #32 \n"
//##########################################
"dup v16.8b, v3.16b[0] \n" // k00
"dup v17.8b, v3.16b[1] \n" // k01
"dup v18.8b, v3.16b[2] \n" // k02
"dup v19.8b, v3.16b[3] \n" // k03
"dup v20.8b, v3.16b[4] \n" // k04
"dup v21.8b, v3.16b[5] \n" // k05
"dup v22.8b, v3.16b[6] \n" // k06
"dup v23.8b, v3.16b[7] \n" // k07
"smull v24.8h, v8.8b, v16.8b \n" // r0 * k0
"smlal v24.8h, v9.8b, v17.8b \n" // r0 * k1
"smlal v24.8h, v10.8b, v18.8b \n" // r0 * k2
"smlal v24.8h, v11.8b, v19.8b \n" // r0 * k3
"smlal v24.8h, v12.8b, v20.8b \n" // r0 * k4
"smlal v24.8h, v13.8b, v21.8b \n" // r0 * k5
"smlal v24.8h, v14.8b, v22.8b \n" // r0 * k6
"smlal v24.8h, v15.8b, v23.8b \n" // r0 * k7
"prfm pldl1keep, [%4, #128] \n"
"ld1 {v26.4s, v27.4s}, [%4] \n" // sum3
"saddw v26.4s, v26.4s, v24.4h \n"
"saddw2 v27.4s, v27.4s, v24.8h \n"
"st1 {v26.4s, v27.4s}, [%4], #32 \n"
//##########################################
"dup v16.8b, v4.16b[0] \n" // k00
"dup v17.8b, v4.16b[1] \n" // k01
"dup v18.8b, v4.16b[2] \n" // k02
"dup v19.8b, v4.16b[3] \n" // k03
"dup v20.8b, v4.16b[4] \n" // k04
"dup v21.8b, v4.16b[5] \n" // k05
"dup v22.8b, v4.16b[6] \n" // k06
"dup v23.8b, v4.16b[7] \n" // k07
"smull v24.8h, v8.8b, v16.8b \n" // r0 * k0
"smlal v24.8h, v9.8b, v17.8b \n" // r0 * k1
"smlal v24.8h, v10.8b, v18.8b \n" // r0 * k2
"smlal v24.8h, v11.8b, v19.8b \n" // r0 * k3
"smlal v24.8h, v12.8b, v20.8b \n" // r0 * k4
"smlal v24.8h, v13.8b, v21.8b \n" // r0 * k5
"smlal v24.8h, v14.8b, v22.8b \n" // r0 * k6
"smlal v24.8h, v15.8b, v23.8b \n" // r0 * k7
"prfm pldl1keep, [%5, #128] \n"
"ld1 {v26.4s, v27.4s}, [%5] \n" // sum4
"saddw v26.4s, v26.4s, v24.4h \n"
"saddw2 v27.4s, v27.4s, v24.8h \n"
"st1 {v26.4s, v27.4s}, [%5], #32 \n"
//##########################################
"dup v16.8b, v5.16b[0] \n" // k00
"dup v17.8b, v5.16b[1] \n" // k01
"dup v18.8b, v5.16b[2] \n" // k02
"dup v19.8b, v5.16b[3] \n" // k03
"dup v20.8b, v5.16b[4] \n" // k04
"dup v21.8b, v5.16b[5] \n" // k05
"dup v22.8b, v5.16b[6] \n" // k06
"dup v23.8b, v5.16b[7] \n" // k07
"smull v24.8h, v8.8b, v16.8b \n" // r0 * k0
"smlal v24.8h, v9.8b, v17.8b \n" // r0 * k1
"smlal v24.8h, v10.8b, v18.8b \n" // r0 * k2
"smlal v24.8h, v11.8b, v19.8b \n" // r0 * k3
"smlal v24.8h, v12.8b, v20.8b \n" // r0 * k4
"smlal v24.8h, v13.8b, v21.8b \n" // r0 * k5
"smlal v24.8h, v14.8b, v22.8b \n" // r0 * k6
"smlal v24.8h, v15.8b, v23.8b \n" // r0 * k7
"prfm pldl1keep, [%6, #128] \n"
"ld1 {v26.4s, v27.4s}, [%6] \n" // sum5
"saddw v26.4s, v26.4s, v24.4h \n"
"saddw2 v27.4s, v27.4s, v24.8h \n"
"st1 {v26.4s, v27.4s}, [%6], #32 \n"
//##########################################
"dup v16.8b, v6.16b[0] \n" // k00
"dup v17.8b, v6.16b[1] \n" // k01
"dup v18.8b, v6.16b[2] \n" // k02
"dup v19.8b, v6.16b[3] \n" // k03
"dup v20.8b, v6.16b[4] \n" // k04
"dup v21.8b, v6.16b[5] \n" // k05
"dup v22.8b, v6.16b[6] \n" // k06
"dup v23.8b, v6.16b[7] \n" // k07
"smull v24.8h, v8.8b, v16.8b \n" // r0 * k0
"smlal v24.8h, v9.8b, v17.8b \n" // r0 * k1
"smlal v24.8h, v10.8b, v18.8b \n" // r0 * k2
"smlal v24.8h, v11.8b, v19.8b \n" // r0 * k3
"smlal v24.8h, v12.8b, v20.8b \n" // r0 * k4
"smlal v24.8h, v13.8b, v21.8b \n" // r0 * k5
"smlal v24.8h, v14.8b, v22.8b \n" // r0 * k6
"smlal v24.8h, v15.8b, v23.8b \n" // r0 * k7
"prfm pldl1keep, [%7, #128] \n"
"ld1 {v26.4s, v27.4s}, [%7] \n" // sum6
"saddw v26.4s, v26.4s, v24.4h \n"
"saddw2 v27.4s, v27.4s, v24.8h \n"
"st1 {v26.4s, v27.4s}, [%7], #32 \n"
//##########################################
"dup v16.8b, v7.16b[0] \n" // k00
"dup v17.8b, v7.16b[1] \n" // k01
"dup v18.8b, v7.16b[2] \n" // k02
"dup v19.8b, v7.16b[3] \n" // k03
"dup v20.8b, v7.16b[4] \n" // k04
"dup v21.8b, v7.16b[5] \n" // k05
"dup v22.8b, v7.16b[6] \n" // k06
"dup v23.8b, v7.16b[7] \n" // k07
"smull v24.8h, v8.8b, v16.8b \n" // r0 * k0
"smlal v24.8h, v9.8b, v17.8b \n" // r0 * k1
"smlal v24.8h, v10.8b, v18.8b \n" // r0 * k2
"smlal v24.8h, v11.8b, v19.8b \n" // r0 * k3
"smlal v24.8h, v12.8b, v20.8b \n" // r0 * k4
"smlal v24.8h, v13.8b, v21.8b \n" // r0 * k5
"smlal v24.8h, v14.8b, v22.8b \n" // r0 * k6
"smlal v24.8h, v15.8b, v23.8b \n" // r0 * k7
"prfm pldl1keep, [%8, #128] \n"
"ld1 {v26.4s, v27.4s}, [%8] \n" // sum7
"saddw v26.4s, v26.4s, v24.4h \n"
"saddw2 v27.4s, v27.4s, v24.8h \n"
"st1 {v26.4s, v27.4s}, [%8], #32 \n"
//##########################################
: "=r"(nn), // %0
"=r"(outptr0),// %1
"=r"(outptr1),// %2
"=r"(outptr2),// %3
"=r"(outptr3),// %4
"=r"(outptr4),// %5
"=r"(outptr5),// %6
"=r"(outptr6),// %7
"=r"(outptr7) // %8
: "0"(nn),
"1"(outptr0),
"2"(outptr1),
"3"(outptr2),
"4"(outptr3),
"5"(outptr4),
"6"(outptr5),
"7"(outptr6),
"8"(outptr7),
"r"(r0), // %18
"r"(r1), // %19
"r"(r2), // %20
"r"(r3), // %21
"r"(r4), // %22
"r"(r5), // %23
"r"(r6), // %24
"r"(r7) // %25
: "cc", "memory", "v8", "v9", "v10", "v11", "v12", "v13", "v14", "v15", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23", "v24", "v25", "v26", "v27", "v28", "v29"
);
}
if (remain == 4)
{
remain -= 4;
asm volatile(
"ld1 {v8.8b}, [%18], #8 \n" // r0"
"ld1 {v9.8b}, [%19], #8 \n" // r1"
"ld1 {v10.8b}, [%20], #8 \n" // r2"
"ld1 {v11.8b}, [%21], #8 \n" // r3"
"ld1 {v12.8b}, [%22], #8 \n" // r4"
"ld1 {v13.8b}, [%23], #8 \n" // r5"
"ld1 {v14.8b}, [%24], #8 \n" // r6"
"ld1 {v15.8b}, [%25], #8 \n" // r7"
"dup v16.8b, v0.16b[0] \n" // k00
"dup v17.8b, v0.16b[1] \n" // k01
"dup v18.8b, v0.16b[2] \n" // k02
"dup v19.8b, v0.16b[3] \n" // k03
"dup v20.8b, v0.16b[4] \n" // k04
"dup v21.8b, v0.16b[5] \n" // k05
"dup v22.8b, v0.16b[6] \n" // k06
"dup v23.8b, v0.16b[7] \n" // k07
"smull v24.8h, v8.8b, v16.8b \n" // r0 * k0
"smlal v24.8h, v9.8b, v17.8b \n" // r0 * k1
"smlal v24.8h, v10.8b, v18.8b \n" // r0 * k2
"smlal v24.8h, v11.8b, v19.8b \n" // r0 * k3
"smlal v24.8h, v12.8b, v20.8b \n" // r0 * k4
"smlal v24.8h, v13.8b, v21.8b \n" // r0 * k5
"smlal v24.8h, v14.8b, v22.8b \n" // r0 * k6
"smlal v24.8h, v15.8b, v23.8b \n" // r0 * k7
"prfm pldl1keep, [%1, #128] \n"
"ld1 {v26.4s}, [%1] \n" // sum0
"saddw v26.4s, v26.4s, v24.4h \n"
"st1 {v26.4s}, [%1], #16 \n"
//###########################################
"dup v16.8b, v1.16b[0] \n" // k00
"dup v17.8b, v1.16b[1] \n" // k01
"dup v18.8b, v1.16b[2] \n" // k02
"dup v19.8b, v1.16b[3] \n" // k03
"dup v20.8b, v1.16b[4] \n" // k04
"dup v21.8b, v1.16b[5] \n" // k05
"dup v22.8b, v1.16b[6] \n" // k06
"dup v23.8b, v1.16b[7] \n" // k07
"smull v24.8h, v8.8b, v16.8b \n" // r0 * k0
"smlal v24.8h, v9.8b, v17.8b \n" // r0 * k1
"smlal v24.8h, v10.8b, v18.8b \n" // r0 * k2
"smlal v24.8h, v11.8b, v19.8b \n" // r0 * k3
"smlal v24.8h, v12.8b, v20.8b \n" // r0 * k4
"smlal v24.8h, v13.8b, v21.8b \n" // r0 * k5
"smlal v24.8h, v14.8b, v22.8b \n" // r0 * k6
"smlal v24.8h, v15.8b, v23.8b \n" // r0 * k7
"prfm pldl1keep, [%2, #128] \n"
"ld1 {v26.4s}, [%2] \n" // sum1
"saddw v26.4s, v26.4s, v24.4h \n"
"st1 {v26.4s}, [%2], #16 \n"
//###########################################
"dup v16.8b, v2.16b[0] \n" // k00
"dup v17.8b, v2.16b[1] \n" // k01
"dup v18.8b, v2.16b[2] \n" // k02
"dup v19.8b, v2.16b[3] \n" // k03
"dup v20.8b, v2.16b[4] \n" // k04
"dup v21.8b, v2.16b[5] \n" // k05
"dup v22.8b, v2.16b[6] \n" // k06
"dup v23.8b, v2.16b[7] \n" // k07
"smull v24.8h, v8.8b, v16.8b \n" // r0 * k0
"smlal v24.8h, v9.8b, v17.8b \n" // r0 * k1
"smlal v24.8h, v10.8b, v18.8b \n" // r0 * k2
"smlal v24.8h, v11.8b, v19.8b \n" // r0 * k3
"smlal v24.8h, v12.8b, v20.8b \n" // r0 * k4
"smlal v24.8h, v13.8b, v21.8b \n" // r0 * k5
"smlal v24.8h, v14.8b, v22.8b \n" // r0 * k6
"smlal v24.8h, v15.8b, v23.8b \n" // r0 * k7
"prfm pldl1keep, [%3, #128] \n"
"ld1 {v26.4s}, [%3] \n" // sum2
"saddw v26.4s, v26.4s, v24.4h \n"
"st1 {v26.4s}, [%3], #16 \n"
//##########################################
"dup v16.8b, v3.16b[0] \n" // k00
"dup v17.8b, v3.16b[1] \n" // k01
"dup v18.8b, v3.16b[2] \n" // k02
"dup v19.8b, v3.16b[3] \n" // k03
"dup v20.8b, v3.16b[4] \n" // k04
"dup v21.8b, v3.16b[5] \n" // k05
"dup v22.8b, v3.16b[6] \n" // k06
"dup v23.8b, v3.16b[7] \n" // k07
"smull v24.8h, v8.8b, v16.8b \n" // r0 * k0
"smlal v24.8h, v9.8b, v17.8b \n" // r0 * k1
"smlal v24.8h, v10.8b, v18.8b \n" // r0 * k2
"smlal v24.8h, v11.8b, v19.8b \n" // r0 * k3
"smlal v24.8h, v12.8b, v20.8b \n" // r0 * k4
"smlal v24.8h, v13.8b, v21.8b \n" // r0 * k5
"smlal v24.8h, v14.8b, v22.8b \n" // r0 * k6
"smlal v24.8h, v15.8b, v23.8b \n" // r0 * k7
"prfm pldl1keep, [%4, #128] \n"
"ld1 {v26.4s}, [%4] \n" // sum3
"saddw v26.4s, v26.4s, v24.4h \n"
"st1 {v26.4s}, [%4], #16 \n"
//##########################################
"dup v16.8b, v4.16b[0] \n" // k00
"dup v17.8b, v4.16b[1] \n" // k01
"dup v18.8b, v4.16b[2] \n" // k02
"dup v19.8b, v4.16b[3] \n" // k03
"dup v20.8b, v4.16b[4] \n" // k04
"dup v21.8b, v4.16b[5] \n" // k05
"dup v22.8b, v4.16b[6] \n" // k06
"dup v23.8b, v4.16b[7] \n" // k07
"smull v24.8h, v8.8b, v16.8b \n" // r0 * k0
"smlal v24.8h, v9.8b, v17.8b \n" // r0 * k1
"smlal v24.8h, v10.8b, v18.8b \n" // r0 * k2
"smlal v24.8h, v11.8b, v19.8b \n" // r0 * k3
"smlal v24.8h, v12.8b, v20.8b \n" // r0 * k4
"smlal v24.8h, v13.8b, v21.8b \n" // r0 * k5
"smlal v24.8h, v14.8b, v22.8b \n" // r0 * k6
"smlal v24.8h, v15.8b, v23.8b \n" // r0 * k7
"prfm pldl1keep, [%5, #128] \n"
"ld1 {v26.4s}, [%5] \n" // sum4
"saddw v26.4s, v26.4s, v24.4h \n"
"st1 {v26.4s}, [%5], #16 \n"
//##########################################
"dup v16.8b, v5.16b[0] \n" // k00
"dup v17.8b, v5.16b[1] \n" // k01
"dup v18.8b, v5.16b[2] \n" // k02
"dup v19.8b, v5.16b[3] \n" // k03
"dup v20.8b, v5.16b[4] \n" // k04
"dup v21.8b, v5.16b[5] \n" // k05
"dup v22.8b, v5.16b[6] \n" // k06
"dup v23.8b, v5.16b[7] \n" // k07
"smull v24.8h, v8.8b, v16.8b \n" // r0 * k0
"smlal v24.8h, v9.8b, v17.8b \n" // r0 * k1
"smlal v24.8h, v10.8b, v18.8b \n" // r0 * k2
"smlal v24.8h, v11.8b, v19.8b \n" // r0 * k3
"smlal v24.8h, v12.8b, v20.8b \n" // r0 * k4
"smlal v24.8h, v13.8b, v21.8b \n" // r0 * k5
"smlal v24.8h, v14.8b, v22.8b \n" // r0 * k6
"smlal v24.8h, v15.8b, v23.8b \n" // r0 * k7
"prfm pldl1keep, [%6, #128] \n"
"ld1 {v26.4s}, [%6] \n" // sum5
"saddw v26.4s, v26.4s, v24.4h \n"
"st1 {v26.4s}, [%6], #16 \n"
//##########################################
"dup v16.8b, v6.16b[0] \n" // k00
"dup v17.8b, v6.16b[1] \n" // k01
"dup v18.8b, v6.16b[2] \n" // k02
"dup v19.8b, v6.16b[3] \n" // k03
"dup v20.8b, v6.16b[4] \n" // k04
"dup v21.8b, v6.16b[5] \n" // k05
"dup v22.8b, v6.16b[6] \n" // k06
"dup v23.8b, v6.16b[7] \n" // k07
"smull v24.8h, v8.8b, v16.8b \n" // r0 * k0
"smlal v24.8h, v9.8b, v17.8b \n" // r0 * k1
"smlal v24.8h, v10.8b, v18.8b \n" // r0 * k2
"smlal v24.8h, v11.8b, v19.8b \n" // r0 * k3
"smlal v24.8h, v12.8b, v20.8b \n" // r0 * k4
"smlal v24.8h, v13.8b, v21.8b \n" // r0 * k5
"smlal v24.8h, v14.8b, v22.8b \n" // r0 * k6
"smlal v24.8h, v15.8b, v23.8b \n" // r0 * k7
"prfm pldl1keep, [%7, #128] \n"
"ld1 {v26.4s}, [%7] \n" // sum6
"saddw v26.4s, v26.4s, v24.4h \n"
"st1 {v26.4s}, [%7], #16 \n"
//##########################################
"dup v16.8b, v7.16b[0] \n" // k00
"dup v17.8b, v7.16b[1] \n" // k01
"dup v18.8b, v7.16b[2] \n" // k02
"dup v19.8b, v7.16b[3] \n" // k03
"dup v20.8b, v7.16b[4] \n" // k04
"dup v21.8b, v7.16b[5] \n" // k05
"dup v22.8b, v7.16b[6] \n" // k06
"dup v23.8b, v7.16b[7] \n" // k07
"smull v24.8h, v8.8b, v16.8b \n" // r0 * k0
"smlal v24.8h, v9.8b, v17.8b \n" // r0 * k1
"smlal v24.8h, v10.8b, v18.8b \n" // r0 * k2
"smlal v24.8h, v11.8b, v19.8b \n" // r0 * k3
"smlal v24.8h, v12.8b, v20.8b \n" // r0 * k4
"smlal v24.8h, v13.8b, v21.8b \n" // r0 * k5
"smlal v24.8h, v14.8b, v22.8b \n" // r0 * k6
"smlal v24.8h, v15.8b, v23.8b \n" // r0 * k7
"prfm pldl1keep, [%8, #128] \n"
"ld1 {v26.4s}, [%8] \n" // sum7
"saddw v26.4s, v26.4s, v24.4h \n"
"st1 {v26.4s}, [%8], #16 \n"
"sub %18, %18, #4 \n"
"sub %19, %19, #4 \n"
"sub %20, %20, #4 \n"
"sub %21, %21, #4 \n"
"sub %22, %22, #4 \n"
"sub %23, %23, #4 \n"
"sub %24, %24, #4 \n"
"sub %25, %25, #4 \n"
//##########################################
: "=r"(nn), // %0
"=r"(outptr0),// %1
"=r"(outptr1),// %2
"=r"(outptr2),// %3
"=r"(outptr3),// %4
"=r"(outptr4),// %5
"=r"(outptr5),// %6
"=r"(outptr6),// %7
"=r"(outptr7) // %8
: "0"(nn),
"1"(outptr0),
"2"(outptr1),
"3"(outptr2),
"4"(outptr3),
"5"(outptr4),
"6"(outptr5),
"7"(outptr6),
"8"(outptr7),
"r"(r0), // %18
"r"(r1), // %19
"r"(r2), // %20
"r"(r3), // %21
"r"(r4), // %22
"r"(r5), // %23
"r"(r6), // %24
"r"(r7) // %25
: "cc", "memory", "v8", "v9", "v10", "v11", "v12", "v13", "v14", "v15", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23", "v24", "v25", "v26", "v27", "v28", "v29"
);
}
for (; remain>0; remain--)
{
// TODO neon optimize
int sum0 = (int)*r0 * kernel0[0] + *r1 * kernel0[1] + *r2 * kernel0[2] + *r3 * kernel0[3] + *r4 * kernel0[4] + *r5 * kernel0[5] + *r6 * kernel0[6] + *r7 * kernel0[7];
int sum1 = (int)*r0 * kernel1[0] + *r1 * kernel1[1] + *r2 * kernel1[2] + *r3 * kernel1[3] + *r4 * kernel1[4] + *r5 * kernel1[5] + *r6 * kernel1[6] + *r7 * kernel1[7];
int sum2 = (int)*r0 * kernel2[0] + *r1 * kernel2[1] + *r2 * kernel2[2] + *r3 * kernel2[3] + *r4 * kernel2[4] + *r5 * kernel2[5] + *r6 * kernel2[6] + *r7 * kernel2[7];
int sum3 = (int)*r0 * kernel3[0] + *r1 * kernel3[1] + *r2 * kernel3[2] + *r3 * kernel3[3] + *r4 * kernel3[4] + *r5 * kernel3[5] + *r6 * kernel3[6] + *r7 * kernel3[7];
int sum4 = (int)*r0 * kernel4[0] + *r1 * kernel4[1] + *r2 * kernel4[2] + *r3 * kernel4[3] + *r4 * kernel4[4] + *r5 * kernel4[5] + *r6 * kernel4[6] + *r7 * kernel4[7];
int sum5 = (int)*r0 * kernel5[0] + *r1 * kernel5[1] + *r2 * kernel5[2] + *r3 * kernel5[3] + *r4 * kernel5[4] + *r5 * kernel5[5] + *r6 * kernel5[6] + *r7 * kernel5[7];
int sum6 = (int)*r0 * kernel6[0] + *r1 * kernel6[1] + *r2 * kernel6[2] + *r3 * kernel6[3] + *r4 * kernel6[4] + *r5 * kernel6[5] + *r6 * kernel6[6] + *r7 * kernel6[7];
int sum7 = (int)*r0 * kernel7[0] + *r1 * kernel7[1] + *r2 * kernel7[2] + *r3 * kernel7[3] + *r4 * kernel7[4] + *r5 * kernel7[5] + *r6 * kernel7[6] + *r7 * kernel7[7];
*outptr0 += sum0;
*outptr1 += sum1;
*outptr2 += sum2;
*outptr3 += sum3;
*outptr4 += sum4;
*outptr5 += sum5;
*outptr6 += sum6;
*outptr7 += sum7;
r0++;
r1++;
r2++;
r3++;
r4++;
r5++;
r6++;
r7++;
outptr0++;
outptr1++;
outptr2++;
outptr3++;
outptr4++;
outptr5++;
outptr6++;
outptr7++;
}
}
#endif
for (; q<inch; q++)
{
int* outptr0 = out0;
int* outptr1 = out1;
int* outptr2 = out2;
int* outptr3 = out3;
int* outptr4 = out4;
int* outptr5 = out5;
int* outptr6 = out6;
int* outptr7 = out7;
const signed char* img0 = bottom_blob.channel(q);
const signed char* kernel0 = (const signed char*)kernel + p*inch + q;
const signed char* kernel1 = (const signed char*)kernel + (p+1)*inch + q;
const signed char* kernel2 = (const signed char*)kernel + (p+2)*inch + q;
const signed char* kernel3 = (const signed char*)kernel + (p+3)*inch + q;
const signed char* kernel4 = (const signed char*)kernel + (p+4)*inch + q;
const signed char* kernel5 = (const signed char*)kernel + (p+5)*inch + q;
const signed char* kernel6 = (const signed char*)kernel + (p+6)*inch + q;
const signed char* kernel7 = (const signed char*)kernel + (p+7)*inch + q;
const signed char k0 = kernel0[0];
const signed char k1 = kernel1[0];
const signed char k2 = kernel2[0];
const signed char k3 = kernel3[0];
const signed char k4 = kernel4[0];
const signed char k5 = kernel5[0];
const signed char k6 = kernel6[0];
const signed char k7 = kernel7[0];
const signed char* r0 = img0;
int size = outw * outh;
int nn = size >> 3;
int remain = size & 7;
int8x8_t _k0 = vdup_n_s8(k0);
int8x8_t _k1 = vdup_n_s8(k1);
int8x8_t _k2 = vdup_n_s8(k2);
int8x8_t _k3 = vdup_n_s8(k3);
int8x8_t _k4 = vdup_n_s8(k4);
int8x8_t _k5 = vdup_n_s8(k5);
int8x8_t _k6 = vdup_n_s8(k6);
int8x8_t _k7 = vdup_n_s8(k7);
for (; nn>0; nn--)
{
int8x8_t _r0 = vld1_s8(r0);
int32x4_t _out0 = vld1q_s32(outptr0);
int32x4_t _out0n = vld1q_s32(outptr0+4);
int32x4_t _out1 = vld1q_s32(outptr1);
int32x4_t _out1n = vld1q_s32(outptr1+4);
int32x4_t _out2 = vld1q_s32(outptr2);
int32x4_t _out2n = vld1q_s32(outptr2+4);
int32x4_t _out3 = vld1q_s32(outptr3);
int32x4_t _out3n = vld1q_s32(outptr3+4);
int32x4_t _out4 = vld1q_s32(outptr4);
int32x4_t _out4n = vld1q_s32(outptr4+4);
int32x4_t _out5 = vld1q_s32(outptr5);
int32x4_t _out5n = vld1q_s32(outptr5+4);
int32x4_t _out6 = vld1q_s32(outptr6);
int32x4_t _out6n = vld1q_s32(outptr6+4);
int32x4_t _out7 = vld1q_s32(outptr7);
int32x4_t _out7n = vld1q_s32(outptr7+4);
int16x8_t _out0_s16 = vmull_s8(_r0, _k0);
int16x8_t _out1_s16 = vmull_s8(_r0, _k1);
int16x8_t _out2_s16 = vmull_s8(_r0, _k2);
int16x8_t _out3_s16 = vmull_s8(_r0, _k3);
int16x8_t _out4_s16 = vmull_s8(_r0, _k4);
int16x8_t _out5_s16 = vmull_s8(_r0, _k5);
int16x8_t _out6_s16 = vmull_s8(_r0, _k6);
int16x8_t _out7_s16 = vmull_s8(_r0, _k7);
_out0 = vaddw_s16(_out0, vget_low_s16(_out0_s16));
_out0n = vaddw_s16(_out0n, vget_high_s16(_out0_s16));
_out1 = vaddw_s16(_out1, vget_low_s16(_out1_s16));
_out1n = vaddw_s16(_out1n, vget_high_s16(_out1_s16));
_out2 = vaddw_s16(_out2, vget_low_s16(_out2_s16));
_out2n = vaddw_s16(_out2n, vget_high_s16(_out2_s16));
_out3 = vaddw_s16(_out3, vget_low_s16(_out3_s16));
_out3n = vaddw_s16(_out3n, vget_high_s16(_out3_s16));
_out4 = vaddw_s16(_out4, vget_low_s16(_out4_s16));
_out4n = vaddw_s16(_out4n, vget_high_s16(_out4_s16));
_out5 = vaddw_s16(_out5, vget_low_s16(_out5_s16));
_out5n = vaddw_s16(_out5n, vget_high_s16(_out5_s16));
_out6 = vaddw_s16(_out6, vget_low_s16(_out6_s16));
_out6n = vaddw_s16(_out6n, vget_high_s16(_out6_s16));
_out7 = vaddw_s16(_out7, vget_low_s16(_out7_s16));
_out7n = vaddw_s16(_out7n, vget_high_s16(_out7_s16));
vst1q_s32(outptr0, _out0);
vst1q_s32(outptr0+4, _out0n);
vst1q_s32(outptr1, _out1);
vst1q_s32(outptr1+4, _out1n);
vst1q_s32(outptr2, _out2);
vst1q_s32(outptr2+4, _out2n);
vst1q_s32(outptr3, _out3);
vst1q_s32(outptr3+4, _out3n);
vst1q_s32(outptr4, _out4);
vst1q_s32(outptr4+4, _out4n);
vst1q_s32(outptr5, _out5);
vst1q_s32(outptr5+4, _out5n);
vst1q_s32(outptr6, _out6);
vst1q_s32(outptr6+4, _out6n);
vst1q_s32(outptr7, _out7);
vst1q_s32(outptr7+4, _out7n);
r0 += 8;
outptr0 += 8;
outptr1 += 8;
outptr2 += 8;
outptr3 += 8;
outptr4 += 8;
outptr5 += 8;
outptr6 += 8;
outptr7 += 8;
}
for (; remain>0; remain--)
{
// TODO neon optimize
int sum0 = (int)*r0 * k0;
int sum1 = (int)*r0 * k1;
int sum2 = (int)*r0 * k2;
int sum3 = (int)*r0 * k3;
int sum4 = (int)*r0 * k4;
int sum5 = (int)*r0 * k5;
int sum6 = (int)*r0 * k6;
int sum7 = (int)*r0 * k7;
*outptr0 += sum0;
*outptr1 += sum1;
*outptr2 += sum2;
*outptr3 += sum3;
*outptr4 += sum4;
*outptr5 += sum5;
*outptr6 += sum6;
*outptr7 += sum7;
r0++;
outptr0++;
outptr1++;
outptr2++;
outptr3++;
outptr4++;
outptr5++;
outptr6++;
outptr7++;
}
}
}
#pragma omp parallel for num_threads(opt.num_threads)
for (int p=remain_outch_start; p<outch; p++)
{
Mat out = top_blob.channel(p);
out.fill(0);
int q = 0;
for (; q+7<inch; q+=8)
{
int* outptr = out;
const signed char* img0 = bottom_blob.channel(q);
const signed char* img1 = bottom_blob.channel(q+1);
const signed char* img2 = bottom_blob.channel(q+2);
const signed char* img3 = bottom_blob.channel(q+3);
const signed char* img4 = bottom_blob.channel(q+4);
const signed char* img5 = bottom_blob.channel(q+5);
const signed char* img6 = bottom_blob.channel(q+6);
const signed char* img7 = bottom_blob.channel(q+7);
const signed char* kernel0 = (const signed char*)kernel + p*inch + q;
const signed char k0 = kernel0[0];
const signed char k1 = kernel0[1];
const signed char k2 = kernel0[2];
const signed char k3 = kernel0[3];
const signed char k4 = kernel0[4];
const signed char k5 = kernel0[5];
const signed char k6 = kernel0[6];
const signed char k7 = kernel0[7];
const signed char* r0 = img0;
const signed char* r1 = img1;
const signed char* r2 = img2;
const signed char* r3 = img3;
const signed char* r4 = img4;
const signed char* r5 = img5;
const signed char* r6 = img6;
const signed char* r7 = img7;
int size = outw * outh;
int nn = size >> 3;
int remain = size & 7;
int8x8_t _k0 = vdup_n_s8(k0);
int8x8_t _k1 = vdup_n_s8(k1);
int8x8_t _k2 = vdup_n_s8(k2);
int8x8_t _k3 = vdup_n_s8(k3);
int8x8_t _k4 = vdup_n_s8(k4);
int8x8_t _k5 = vdup_n_s8(k5);
int8x8_t _k6 = vdup_n_s8(k6);
int8x8_t _k7 = vdup_n_s8(k7);
for (; nn>0; nn--)
{
int8x8_t _r0 = vld1_s8(r0);
int8x8_t _r1 = vld1_s8(r1);
int8x8_t _r2 = vld1_s8(r2);
int8x8_t _r3 = vld1_s8(r3);
int8x8_t _r4 = vld1_s8(r4);
int8x8_t _r5 = vld1_s8(r5);
int8x8_t _r6 = vld1_s8(r6);
int8x8_t _r7 = vld1_s8(r7);
int32x4_t _out0 = vld1q_s32(outptr);
int32x4_t _out0n = vld1q_s32(outptr+4);
int16x8_t _out0_s16 = vmull_s8(_r0, _k0);
_out0_s16 = vmlal_s8(_out0_s16, _r1, _k1);
_out0_s16 = vmlal_s8(_out0_s16, _r2, _k2);
_out0_s16 = vmlal_s8(_out0_s16, _r3, _k3);
_out0_s16 = vmlal_s8(_out0_s16, _r4, _k4);
_out0_s16 = vmlal_s8(_out0_s16, _r5, _k5);
_out0_s16 = vmlal_s8(_out0_s16, _r6, _k6);
_out0_s16 = vmlal_s8(_out0_s16, _r7, _k7);
_out0 = vaddw_s16(_out0, vget_low_s16(_out0_s16));
_out0n = vaddw_s16(_out0n, vget_high_s16(_out0_s16));
vst1q_s32(outptr, _out0);
vst1q_s32(outptr+4, _out0n);
r0 += 8;
r1 += 8;
r2 += 8;
r3 += 8;
r4 += 8;
r5 += 8;
r6 += 8;
r7 += 8;
outptr += 8;
}
for (; remain>0; remain--)
{
int sum = (int)*r0 * k0;
int sum1 = (int)*r1 * k1;
int sum2 = (int)*r2 * k2;
int sum3 = (int)*r3 * k3;
int sum4 = (int)*r4 * k4;
int sum5 = (int)*r5 * k5;
int sum6 = (int)*r6 * k6;
int sum7 = (int)*r7 * k7;
*outptr += sum + sum1 + sum2 + sum3 + sum4 + sum5 + sum6 + sum7;
r0++;
r1++;
r2++;
r3++;
r4++;
r5++;
r6++;
r7++;
outptr++;
}
}
for (; q<inch; q++)
{
int* outptr = out;
const signed char* img0 = bottom_blob.channel(q);
const signed char* kernel0 = (const signed char*)kernel + p*inch + q;
const signed char k0 = kernel0[0];
const signed char* r0 = img0;
int size = outw * outh;
int nn = size >> 3;
int remain = size & 7;
int8x8_t _k0 = vdup_n_s8(k0);
for (; nn>0; nn--)
{
int8x8_t _r0 = vld1_s8(r0);
int32x4_t _out0 = vld1q_s32(outptr);
int32x4_t _out0n = vld1q_s32(outptr+4);
int16x8_t _out0_s16 = vmull_s8(_r0, _k0);
_out0 = vaddw_s16(_out0, vget_low_s16(_out0_s16));
_out0n = vaddw_s16(_out0n, vget_high_s16(_out0_s16));
vst1q_s32(outptr, _out0);
vst1q_s32(outptr+4, _out0n);
r0 += 8;
outptr += 8;
}
for (; remain>0; remain--)
{
int sum = (int)*r0 * k0;
*outptr += sum;
r0++;
outptr++;
}
}
}
}
#else // __aarch64__
/*
* Convolution 1x1 quantized with int8,unroll 8 x 4
*/
static void conv1x1s1_neon_s8(const Mat& bottom_blob, Mat& top_blob, const Mat& _kernel, const Option& opt)
{
int inch = bottom_blob.c;
int outw = top_blob.w;
int outh = top_blob.h;
int outch = top_blob.c;
const signed char* kernel = _kernel;
int nn_outch = outch >> 2;
int remain_outch_start = nn_outch << 2;
#pragma omp parallel for num_threads(opt.num_threads)
for (int pp=0; pp<nn_outch; pp++)
{
int p = pp * 4;
Mat out0 = top_blob.channel(p);
Mat out1 = top_blob.channel(p+1);
Mat out2 = top_blob.channel(p+2);
Mat out3 = top_blob.channel(p+3);
out0.fill(0);
out1.fill(0);
out2.fill(0);
out3.fill(0);
int q = 0;
for (; q+7<inch; q+=8)
{
int* outptr0 = out0;
int* outptr1 = out1;
int* outptr2 = out2;
int* outptr3 = out3;
const signed char* r0 = bottom_blob.channel(q);
const signed char* r1 = bottom_blob.channel(q+1);
const signed char* r2 = bottom_blob.channel(q+2);
const signed char* r3 = bottom_blob.channel(q+3);
const signed char* r4 = bottom_blob.channel(q+4);
const signed char* r5 = bottom_blob.channel(q+5);
const signed char* r6 = bottom_blob.channel(q+6);
const signed char* r7 = bottom_blob.channel(q+7);
const signed char* kernel0 = (const signed char*)kernel + p*inch + q;
const signed char* kernel1 = (const signed char*)kernel + (p+1)*inch + q;
const signed char* kernel2 = (const signed char*)kernel + (p+2)*inch + q;
const signed char* kernel3 = (const signed char*)kernel + (p+3)*inch + q;
int size = outw * outh;
int nn = size >> 3;
int remain = size & 7;
if (nn > 0)
{
asm volatile(
"vld1.s8 d18, [%0] \n"
"vld1.s8 d19, [%1] \n"
"vld1.s8 d24, [%2] \n"
"vld1.s8 d25, [%3] \n"
: "=r"(kernel0), // %0
"=r"(kernel1), // %1
"=r"(kernel2), // %2
"=r"(kernel3) // %3
: "0"(kernel0),
"1"(kernel1),
"2"(kernel2),
"3"(kernel3)
:
);
asm volatile(
"0: \n"
//ld r0-r7
"pld [%5, #64] \n"
"vld1.s8 {d0}, [%5 :64]! \n" //r0
"pld [%6, #64] \n"
"vld1.s8 {d1}, [%6 :64]! \n" //r1
"pld [%7, #64] \n"
"vld1.s8 {d2}, [%7 :64]! \n" //r2
"pld [%8, #64] \n"
"vld1.s8 {d3}, [%8 :64]! \n" //r3
"pld [%9, #64] \n"
"vld1.s8 {d4}, [%9 :64]! \n" //r4
"pld [%10, #64] \n"
"vld1.s8 {d5}, [%10 :64]! \n" //r5
"pld [%11, #64] \n"
"vld1.s8 {d6}, [%11 :64]! \n" //r6
"pld [%12, #64] \n"
"vld1.s8 {d7}, [%12 :64]! \n" //r7
//###########################################
//load inch kernel_0 k0-k7
"vdup.s8 d8, d18[0] \n"
"vdup.s8 d9, d18[1] \n"
"vdup.s8 d10, d18[2] \n"
"vdup.s8 d11, d18[3] \n"
"vdup.s8 d12, d18[4] \n"
"vdup.s8 d13, d18[5] \n"
"vdup.s8 d14, d18[6] \n"
"vdup.s8 d15, d18[7] \n"
//mla
"vmull.s8 q8, d0, d8 \n"
"vmlal.s8 q8, d1, d9 \n"
"vmlal.s8 q8, d2, d10 \n"
"vmlal.s8 q8, d3, d11 \n"
"vmlal.s8 q8, d4, d12 \n"
"vmlal.s8 q8, d5, d13 \n"
"vmlal.s8 q8, d6, d14 \n"
"vmlal.s8 q8, d7, d15 \n"
//outptr0_s32
"pld [%1, #256] \n"
"vld1.32 {d20-d23}, [%1:128] \n" //outptr0_s32
"vaddw.s16 q10, q10, d16 \n"
"vaddw.s16 q11, q11, d17 \n"
"vst1.32 {d20-d23}, [%1:128]!\n"
//###########################################
//load inch kernel_1 k0-k7
"vdup.s8 d8, d19[0] \n"
"vdup.s8 d9, d19[1] \n"
"vdup.s8 d10, d19[2] \n"
"vdup.s8 d11, d19[3] \n"
"vdup.s8 d12, d19[4] \n"
"vdup.s8 d13, d19[5] \n"
"vdup.s8 d14, d19[6] \n"
"vdup.s8 d15, d19[7] \n"
//mla
"vmull.s8 q8, d0, d8 \n"
"vmlal.s8 q8, d1, d9 \n"
"vmlal.s8 q8, d2, d10 \n"
"vmlal.s8 q8, d3, d11 \n"
"vmlal.s8 q8, d4, d12 \n"
"vmlal.s8 q8, d5, d13 \n"
"vmlal.s8 q8, d6, d14 \n"
"vmlal.s8 q8, d7, d15 \n"
//outptr1_s32
"pld [%2, #256] \n"
"vld1.32 {d20-d23}, [%2:128] \n" //outptr1_s32
"vaddw.s16 q10, q10, d16 \n"
"vaddw.s16 q11, q11, d17 \n"
"vst1.32 {d20-d23}, [%2:128]!\n"
//############################################
//load inch kernel_2 k0-k7
"vdup.s8 d8, d24[0] \n"
"vdup.s8 d9, d24[1] \n"
"vdup.s8 d10, d24[2] \n"
"vdup.s8 d11, d24[3] \n"
"vdup.s8 d12, d24[4] \n"
"vdup.s8 d13, d24[5] \n"
"vdup.s8 d14, d24[6] \n"
"vdup.s8 d15, d24[7] \n"
//mla
"vmull.s8 q8, d0, d8 \n"
"vmlal.s8 q8, d1, d9 \n"
"vmlal.s8 q8, d2, d10 \n"
"vmlal.s8 q8, d3, d11 \n"
"vmlal.s8 q8, d4, d12 \n"
"vmlal.s8 q8, d5, d13 \n"
"vmlal.s8 q8, d6, d14 \n"
"vmlal.s8 q8, d7, d15 \n"
//outptr2_s32
"pld [%3, #256] \n"
"vld1.32 {d20-d23}, [%3:128] \n" //outptr2_s32
"vaddw.s16 q10, q10, d16 \n"
"vaddw.s16 q11, q11, d17 \n"
"vst1.32 {d20-d23}, [%3:128]!\n"
//#############################################
//load inch kernel_3 k0-k7
"vdup.s8 d8, d25[0] \n"
"vdup.s8 d9, d25[1] \n"
"vdup.s8 d10, d25[2] \n"
"vdup.s8 d11, d25[3] \n"
"vdup.s8 d12, d25[4] \n"
"vdup.s8 d13, d25[5] \n"
"vdup.s8 d14, d25[6] \n"
"vdup.s8 d15, d25[7] \n"
//mla
"vmull.s8 q8, d0, d8 \n"
"vmlal.s8 q8, d1, d9 \n"
"vmlal.s8 q8, d2, d10 \n"
"vmlal.s8 q8, d3, d11 \n"
"vmlal.s8 q8, d4, d12 \n"
"vmlal.s8 q8, d5, d13 \n"
"vmlal.s8 q8, d6, d14 \n"
"vmlal.s8 q8, d7, d15 \n"
//outptr3_s32
"pld [%4, #256] \n"
"vld1.32 {d20-d23}, [%4:128] \n" //outptr3_s32
"vaddw.s16 q10, q10, d16 \n"
"vaddw.s16 q11, q11, d17 \n"
"vst1.32 {d20-d23}, [%4:128]!\n"
//next
"subs %0, #1 \n"
"bne 0b \n"
: "=r"(nn), // %0
"=r"(outptr0), // %1
"=r"(outptr1), // %2
"=r"(outptr2), // %3
"=r"(outptr3), // %4
"=r"(r0), // %5
"=r"(r1), // %6
"=r"(r2), // %7
"=r"(r3), // %8
"=r"(r4), // %9
"=r"(r5), // %10
"=r"(r6), // %11
"=r"(r7) // %12
: "0"(nn),
"1"(outptr0),
"2"(outptr1),
"3"(outptr2),
"4"(outptr3),
"5"(r0),
"6"(r1),
"7"(r2),
"8"(r3),
"9"(r4),
"10"(r5),
"11"(r6),
"12"(r7)
: "cc", "memory", "q0", "q1", "q2", "q3", "q4", "q5", "q6", "q7", "q8", "q10", "q11", "q13", "q14", "q15"
);
}
for (; remain>0; remain--)
{
//ToDo Neon
int sum0 = (int)*r0 * kernel0[0] + *r1 * kernel0[1] + *r2 * kernel0[2] + *r3 * kernel0[3] + *r4 * kernel0[4] + *r5 * kernel0[5] + *r6 * kernel0[6] + *r7 * kernel0[7];
int sum1 = (int)*r0 * kernel1[0] + *r1 * kernel1[1] + *r2 * kernel1[2] + *r3 * kernel1[3] + *r4 * kernel1[4] + *r5 * kernel1[5] + *r6 * kernel1[6] + *r7 * kernel1[7];
int sum2 = (int)*r0 * kernel2[0] + *r1 * kernel2[1] + *r2 * kernel2[2] + *r3 * kernel2[3] + *r4 * kernel2[4] + *r5 * kernel2[5] + *r6 * kernel2[6] + *r7 * kernel2[7];
int sum3 = (int)*r0 * kernel3[0] + *r1 * kernel3[1] + *r2 * kernel3[2] + *r3 * kernel3[3] + *r4 * kernel3[4] + *r5 * kernel3[5] + *r6 * kernel3[6] + *r7 * kernel3[7];
*outptr0 += sum0;
*outptr1 += sum1;
*outptr2 += sum2;
*outptr3 += sum3;
r0++;
r1++;
r2++;
r3++;
r4++;
r5++;
r6++;
r7++;
outptr0++;
outptr1++;
outptr2++;
outptr3++;
}
}
for (; q<inch; q++)
{
int* outptr0 = out0;
int* outptr1 = out1;
int* outptr2 = out2;
int* outptr3 = out3;
const signed char* img0_s8 = bottom_blob.channel(q);
const signed char* kernel0 = (const signed char*)kernel + p*inch + q;
const signed char* kernel1 = (const signed char*)kernel + (p+1)*inch + q;
const signed char* kernel2 = (const signed char*)kernel + (p+2)*inch + q;
const signed char* kernel3 = (const signed char*)kernel + (p+3)*inch + q;
const signed char k0 = kernel0[0];
const signed char k1 = kernel1[0];
const signed char k2 = kernel2[0];
const signed char k3 = kernel3[0];
const signed char* r0 = img0_s8;
int size = outw * outh;
int nn = size >> 3;
int remain = size & 7;
int8x8_t _k0 = vdup_n_s8(k0);
int8x8_t _k1 = vdup_n_s8(k1);
int8x8_t _k2 = vdup_n_s8(k2);
int8x8_t _k3 = vdup_n_s8(k3);
if (nn > 0)
{
asm volatile(
"0: \n"
//load r0
"pld [%5, #64] \n"
"vld1.s8 {d8}, [%5 :64]! \n"
//mla
"vmull.s8 q5, d8, %12 \n"
//outptr0_s32
"pld [%1, #256] \n"
"vld1.32 {d12-d15}, [%1] \n"
"vmovl.s16 q8, d10 \n"
"vmovl.s16 q9, d11 \n"
"vadd.s32 q6, q8 \n"
"vadd.s32 q7, q9 \n"
"vst1.32 {d12-d15}, [%1]! \n"
//mla
"vmull.s8 q5, d8, %13 \n"
//outptr1_s32
"pld [%2, #256] \n"
"vld1.32 {d12-d15}, [%2] \n"
"vaddw.s16 q6, q6, d10 \n"
"vaddw.s16 q7, q7, d11 \n"
"vst1.32 {d12-d15}, [%2]! \n"
//mla
"vmull.s8 q5, d8, %14 \n"
//outptr0_s32
"pld [%3, #256] \n"
"vld1.32 {d12-d15}, [%3] \n"
"vaddw.s16 q6, q6, d10 \n"
"vaddw.s16 q7, q7, d11 \n"
"vst1.32 {d12-d15}, [%3]! \n"
//mla
"vmull.s8 q5, d8, %15 \n"
//outptr0_s32
"pld [%4, #256] \n"
"vld1.32 {d12-d15}, [%4] \n"
"vaddw.s16 q6, q6, d10 \n"
"vaddw.s16 q7, q7, d11 \n"
"vst1.32 {d12-d15}, [%4]! \n"
"subs %0, #1 \n"
"bne 0b \n"
: "=r"(nn), // %0
"=r"(outptr0), // %1
"=r"(outptr1), // %2
"=r"(outptr2), // %3
"=r"(outptr3), // %4
"=r"(r0) // %5
: "0"(nn),
"1"(outptr0),
"2"(outptr1),
"3"(outptr2),
"4"(outptr3),
"5"(r0),
"w"(_k0), // %12
"w"(_k1), // %13
"w"(_k2), // %14
"w"(_k3) // %15
: "cc", "memory", "q4", "q5", "q6", "q7", "q8", "q9"
);
}
for (; remain>0; remain--)
{
// TODO neon optimize
int sum0 = (int)*r0 * k0;
int sum1 = (int)*r0 * k1;
int sum2 = (int)*r0 * k2;
int sum3 = (int)*r0 * k3;
*outptr0 += sum0;
*outptr1 += sum1;
*outptr2 += sum2;
*outptr3 += sum3;
r0++;
outptr0++;
outptr1++;
outptr2++;
outptr3++;
}
}
}
#pragma omp parallel for num_threads(opt.num_threads)
for (int p=remain_outch_start; p<outch; p++)
{
Mat out0 = top_blob.channel(p);
out0.fill(0);
int q = 0;
for (; q+7<inch; q+=8)
{
int* outptr0 = out0;
const signed char* r0 = bottom_blob.channel(q);
const signed char* r1 = bottom_blob.channel(q+1);
const signed char* r2 = bottom_blob.channel(q+2);
const signed char* r3 = bottom_blob.channel(q+3);
const signed char* r4 = bottom_blob.channel(q+4);
const signed char* r5 = bottom_blob.channel(q+5);
const signed char* r6 = bottom_blob.channel(q+6);
const signed char* r7 = bottom_blob.channel(q+7);
const signed char* kernel0 = (const signed char*)kernel + p*inch + q;
int size = outw * outh;
int nn = size >> 3;
int remain = size & 7;
if (nn > 0)
{
//load inch kernel_0 k0-k7
asm volatile(
"vld1.s8 d18, [%0] \n"
: "=r"(kernel0) // %0
: "0" (kernel0)
:
);
asm volatile(
"0: \n"
//ld r0-r7
"pld [%2, #64] \n"
"vld1.s8 {d0}, [%2 :64]! \n" //r0
"pld [%3, #64] \n"
"vld1.s8 {d1}, [%3 :64]! \n" //r1
"pld [%4, #64] \n"
"vld1.s8 {d2}, [%4 :64]! \n" //r2
"pld [%5, #64] \n"
"vld1.s8 {d3}, [%5 :64]! \n" //r3
"pld [%6, #64] \n"
"vld1.s8 {d4}, [%6 :64]! \n" //r4
"pld [%7, #64] \n"
"vld1.s8 {d5}, [%7 :64]! \n" //r5
"pld [%8, #64] \n"
"vld1.s8 {d6}, [%8 :64]! \n" //r6
"pld [%9, #64] \n"
"vld1.s8 {d7}, [%9 :64]! \n" //r7
//load inch kernel_0 k0-k7
"vdup.s8 d8, d18[0] \n"
"vdup.s8 d9, d18[1] \n"
"vdup.s8 d10, d18[2] \n"
"vdup.s8 d11, d18[3] \n"
"vdup.s8 d12, d18[4] \n"
"vdup.s8 d13, d18[5] \n"
"vdup.s8 d14, d18[6] \n"
"vdup.s8 d15, d18[7] \n"
//mla
"vmull.s8 q14, d0, d8 \n"
"vmlal.s8 q14, d1, d9 \n"
"vmlal.s8 q14, d2, d10 \n"
"vmlal.s8 q14, d3, d11 \n"
"vmlal.s8 q14, d4, d12 \n"
"vmlal.s8 q14, d5, d13 \n"
"vmlal.s8 q14, d6, d14 \n"
"vmlal.s8 q14, d7, d15 \n"
//outptr0_s32
"pld [%1, #256] \n"
"vld1.32 {d20-d23}, [%1] \n" //outptr0_s32
"vaddw.s16 q10, q10, d28 \n"
"vaddw.s16 q11, q11, d29 \n"
"vst1.32 {d20-d23}, [%1]! \n"
//next
"subs %0, #1 \n"
"bne 0b \n"
: "=r"(nn), // %0
"=r"(outptr0), // %1
"=r"(r0), // %2
"=r"(r1), // %3
"=r"(r2), // %4
"=r"(r3), // %5
"=r"(r4), // %6
"=r"(r5), // %7
"=r"(r6), // %8
"=r"(r7) // %9
: "0"(nn),
"1"(outptr0),
"2"(r0),
"3"(r1),
"4"(r2),
"5"(r3),
"6"(r4),
"7"(r5),
"8"(r6),
"9"(r7)
: "cc", "memory", "q0", "q1", "q2", "q3", "q4", "q5", "q6", "q7", "q10", "q11", "q12", "q13", "q14"
);
}
for (; remain>0; remain--)
{
//ToDo Neon
int sum0 = (int)*r0 * kernel0[0] + *r1 * kernel0[1] + *r2 * kernel0[2] + *r3 * kernel0[3] + *r4 * kernel0[4] + *r5 * kernel0[5] + *r6 * kernel0[6] + *r7 * kernel0[7];
*outptr0 += sum0;
r0++;
r1++;
r2++;
r3++;
r4++;
r5++;
r6++;
r7++;
outptr0++;
}
}
for (; q<inch; q++)
{
int* outptr0 = out0;
const signed char* img0_s8 = bottom_blob.channel(q);
const signed char* r0 = img0_s8;
const signed char* kernel0 = (const signed char*)kernel + p*inch + q;
const signed char k0 = kernel0[0];
int size = outw * outh;
int nn = size >> 3;
int remain = size & 7;
int8x8_t _k0 = vdup_n_s8(k0);
if (nn > 0)
{
asm volatile(
"0: \n"
//load r0
"pld [%2, #64] \n"
"vld1.s8 {d8}, [%2 :64]! \n"
//mla
"vmull.s8 q10, d8, %6 \n"
//outptr0_s32
"pld [%1, #256] \n"
"vld1.32 {d12-d15}, [%1] \n"
"vaddw.s16 q6, q6, d20 \n"
"vaddw.s16 q7, q7, d21 \n"
"vst1.32 {d12-d15}, [%1]! \n"
"subs %0, #1 \n"
"bne 0b \n"
: "=r"(nn), // %0
"=r"(outptr0), // %1
"=r"(r0) // %2
: "0"(nn),
"1"(outptr0),
"2"(r0),
"w"(_k0) // %6
: "cc", "memory", "q4", "q10", "q7", "q8", "q9"
);
}
for (; remain>0; remain--)
{
int sum0 = (int)*r0 * k0;
*outptr0 += sum0;
r0++;
outptr0++;
}
}
}
}
static void conv1x1s1_neon_s8_left4(const Mat& bottom_blob, Mat& top_blob, const Mat& _kernel, const Option& opt)
{
int inch = bottom_blob.c;
int outw = top_blob.w;
int outh = top_blob.h;
int outch = top_blob.c;
const signed char* kernel = _kernel;
int nn_outch = outch >> 2;
int remain_outch_start = nn_outch << 2;
#pragma omp parallel for num_threads(opt.num_threads)
for (int pp=0; pp<nn_outch; pp++)
{
int p = pp * 4;
Mat out0 = top_blob.channel(p);
Mat out1 = top_blob.channel(p+1);
Mat out2 = top_blob.channel(p+2);
Mat out3 = top_blob.channel(p+3);
out0.fill(0);
out1.fill(0);
out2.fill(0);
out3.fill(0);
int q = 0;
for (; q+7<inch; q+=8)
{
int* outptr0 = out0;
int* outptr1 = out1;
int* outptr2 = out2;
int* outptr3 = out3;
const signed char* r0 = bottom_blob.channel(q);
const signed char* r1 = bottom_blob.channel(q+1);
const signed char* r2 = bottom_blob.channel(q+2);
const signed char* r3 = bottom_blob.channel(q+3);
const signed char* r4 = bottom_blob.channel(q+4);
const signed char* r5 = bottom_blob.channel(q+5);
const signed char* r6 = bottom_blob.channel(q+6);
const signed char* r7 = bottom_blob.channel(q+7);
const signed char* kernel0 = (const signed char*)kernel + p*inch + q;
const signed char* kernel1 = (const signed char*)kernel + (p+1)*inch + q;
const signed char* kernel2 = (const signed char*)kernel + (p+2)*inch + q;
const signed char* kernel3 = (const signed char*)kernel + (p+3)*inch + q;
int size = outw * outh;
int nn = size >> 3;
asm volatile(
"vld1.s8 d18, [%0] \n"
"vld1.s8 d19, [%1] \n"
"vld1.s8 d24, [%2] \n"
"vld1.s8 d25, [%3] \n"
: "=r"(kernel0), // %0
"=r"(kernel1), // %1
"=r"(kernel2), // %2
"=r"(kernel3) // %3
: "0"(kernel0),
"1"(kernel1),
"2"(kernel2),
"3"(kernel3)
:
);
if (nn > 0)
{
asm volatile(
"0: \n"
//ld r0-r7
"pld [%5, #64] \n"
"vld1.s8 {d0}, [%5 :64]! \n" //r0
"pld [%6, #64] \n"
"vld1.s8 {d1}, [%6 :64]! \n" //r1
"pld [%7, #64] \n"
"vld1.s8 {d2}, [%7 :64]! \n" //r2
"pld [%8, #64] \n"
"vld1.s8 {d3}, [%8 :64]! \n" //r3
"pld [%9, #64] \n"
"vld1.s8 {d4}, [%9 :64]! \n" //r4
"pld [%10, #64] \n"
"vld1.s8 {d5}, [%10 :64]! \n" //r5
"pld [%11, #64] \n"
"vld1.s8 {d6}, [%11 :64]! \n" //r6
"pld [%12, #64] \n"
"vld1.s8 {d7}, [%12 :64]! \n" //r7
//###########################################
//load inch kernel_0 k0-k7
"vdup.s8 d8, d18[0] \n"
"vdup.s8 d9, d18[1] \n"
"vdup.s8 d10, d18[2] \n"
"vdup.s8 d11, d18[3] \n"
"vdup.s8 d12, d18[4] \n"
"vdup.s8 d13, d18[5] \n"
"vdup.s8 d14, d18[6] \n"
"vdup.s8 d15, d18[7] \n"
//mla
"vmull.s8 q8, d0, d8 \n"
"vmlal.s8 q8, d1, d9 \n"
"vmlal.s8 q8, d2, d10 \n"
"vmlal.s8 q8, d3, d11 \n"
"vmlal.s8 q8, d4, d12 \n"
"vmlal.s8 q8, d5, d13 \n"
"vmlal.s8 q8, d6, d14 \n"
"vmlal.s8 q8, d7, d15 \n"
//outptr0_s32
"pld [%1, #256] \n"
"vld1.32 {d20-d23}, [%1:128] \n" //outptr0_s32
"vaddw.s16 q10, q10, d16 \n"
"vaddw.s16 q11, q11, d17 \n"
"vst1.32 {d20-d23}, [%1:128]!\n"
//###########################################
//load inch kernel_1 k0-k7
"vdup.s8 d8, d19[0] \n"
"vdup.s8 d9, d19[1] \n"
"vdup.s8 d10, d19[2] \n"
"vdup.s8 d11, d19[3] \n"
"vdup.s8 d12, d19[4] \n"
"vdup.s8 d13, d19[5] \n"
"vdup.s8 d14, d19[6] \n"
"vdup.s8 d15, d19[7] \n"
//mla
"vmull.s8 q8, d0, d8 \n"
"vmlal.s8 q8, d1, d9 \n"
"vmlal.s8 q8, d2, d10 \n"
"vmlal.s8 q8, d3, d11 \n"
"vmlal.s8 q8, d4, d12 \n"
"vmlal.s8 q8, d5, d13 \n"
"vmlal.s8 q8, d6, d14 \n"
"vmlal.s8 q8, d7, d15 \n"
//outptr1_s32
"pld [%2, #256] \n"
"vld1.32 {d20-d23}, [%2:128] \n" //outptr1_s32
"vaddw.s16 q10, q10, d16 \n"
"vaddw.s16 q11, q11, d17 \n"
"vst1.32 {d20-d23}, [%2:128]!\n"
//############################################
//load inch kernel_2 k0-k7
"vdup.s8 d8, d24[0] \n"
"vdup.s8 d9, d24[1] \n"
"vdup.s8 d10, d24[2] \n"
"vdup.s8 d11, d24[3] \n"
"vdup.s8 d12, d24[4] \n"
"vdup.s8 d13, d24[5] \n"
"vdup.s8 d14, d24[6] \n"
"vdup.s8 d15, d24[7] \n"
//mla
"vmull.s8 q8, d0, d8 \n"
"vmlal.s8 q8, d1, d9 \n"
"vmlal.s8 q8, d2, d10 \n"
"vmlal.s8 q8, d3, d11 \n"
"vmlal.s8 q8, d4, d12 \n"
"vmlal.s8 q8, d5, d13 \n"
"vmlal.s8 q8, d6, d14 \n"
"vmlal.s8 q8, d7, d15 \n"
//outptr2_s32
"pld [%3, #256] \n"
"vld1.32 {d20-d23}, [%3:128] \n" //outptr2_s32
"vaddw.s16 q10, q10, d16 \n"
"vaddw.s16 q11, q11, d17 \n"
"vst1.32 {d20-d23}, [%3:128]!\n"
//#############################################
//load inch kernel_3 k0-k7
"vdup.s8 d8, d25[0] \n"
"vdup.s8 d9, d25[1] \n"
"vdup.s8 d10, d25[2] \n"
"vdup.s8 d11, d25[3] \n"
"vdup.s8 d12, d25[4] \n"
"vdup.s8 d13, d25[5] \n"
"vdup.s8 d14, d25[6] \n"
"vdup.s8 d15, d25[7] \n"
//mla
"vmull.s8 q8, d0, d8 \n"
"vmlal.s8 q8, d1, d9 \n"
"vmlal.s8 q8, d2, d10 \n"
"vmlal.s8 q8, d3, d11 \n"
"vmlal.s8 q8, d4, d12 \n"
"vmlal.s8 q8, d5, d13 \n"
"vmlal.s8 q8, d6, d14 \n"
"vmlal.s8 q8, d7, d15 \n"
//outptr3_s32
"pld [%4, #256] \n"
"vld1.32 {d20-d23}, [%4:128] \n" //outptr3_s32
"vaddw.s16 q10, q10, d16 \n"
"vaddw.s16 q11, q11, d17 \n"
"vst1.32 {d20-d23}, [%4:128]!\n"
//next
"subs %0, #1 \n"
"bne 0b \n"
: "=r"(nn), // %0
"=r"(outptr0), // %1
"=r"(outptr1), // %2
"=r"(outptr2), // %3
"=r"(outptr3), // %4
"=r"(r0), // %5
"=r"(r1), // %6
"=r"(r2), // %7
"=r"(r3), // %8
"=r"(r4), // %9
"=r"(r5), // %10
"=r"(r6), // %11
"=r"(r7) // %12
: "0"(nn),
"1"(outptr0),
"2"(outptr1),
"3"(outptr2),
"4"(outptr3),
"5"(r0),
"6"(r1),
"7"(r2),
"8"(r3),
"9"(r4),
"10"(r5),
"11"(r6),
"12"(r7)
: "cc", "memory", "q0", "q1", "q2", "q3", "q4", "q5", "q6", "q7", "q8", "q10", "q11"
);
}
asm volatile(
"0: \n"
//ld r0-r7
"pld [%5, #64] \n"
"vld1.s8 {d0}, [%5 :64] \n" //r0
"pld [%6, #64] \n"
"vld1.s8 {d1}, [%6 :64] \n" //r1
"pld [%7, #64] \n"
"vld1.s8 {d2}, [%7 :64] \n" //r2
"pld [%8, #64] \n"
"vld1.s8 {d3}, [%8 :64] \n" //r3
"pld [%9, #64] \n"
"vld1.s8 {d4}, [%9 :64] \n" //r4
"pld [%10, #64] \n"
"vld1.s8 {d5}, [%10 :64] \n" //r5
"pld [%11, #64] \n"
"vld1.s8 {d6}, [%11 :64] \n" //r6
"pld [%12, #64] \n"
"vld1.s8 {d7}, [%12 :64] \n" //r7
"add %5, #4 \n"
"add %6, #4 \n"
"add %7, #4 \n"
"add %8, #4 \n"
"add %9, #4 \n"
"add %10, #4 \n"
"add %11, #4 \n"
"add %12, #4 \n"
//###########################################
//load inch kernel_0 k0-k7
"vdup.s8 d8, d18[0] \n"
"vdup.s8 d9, d18[1] \n"
"vdup.s8 d10, d18[2] \n"
"vdup.s8 d11, d18[3] \n"
"vdup.s8 d12, d18[4] \n"
"vdup.s8 d13, d18[5] \n"
"vdup.s8 d14, d18[6] \n"
"vdup.s8 d15, d18[7] \n"
//mla
"vmull.s8 q8, d0, d8 \n"
"vmlal.s8 q8, d1, d9 \n"
"vmlal.s8 q8, d2, d10 \n"
"vmlal.s8 q8, d3, d11 \n"
"vmlal.s8 q8, d4, d12 \n"
"vmlal.s8 q8, d5, d13 \n"
"vmlal.s8 q8, d6, d14 \n"
"vmlal.s8 q8, d7, d15 \n"
//outptr0_s32
"pld [%1, #128] \n"
"vld1.32 {d20-d21}, [%1:128] \n" //outptr0_s32
"vaddw.s16 q10, q10, d16 \n"
"vst1.32 {d20-d21}, [%1:128]!\n"
//###########################################
//load inch kernel_1 k0-k7
"vdup.s8 d8, d19[0] \n"
"vdup.s8 d9, d19[1] \n"
"vdup.s8 d10, d19[2] \n"
"vdup.s8 d11, d19[3] \n"
"vdup.s8 d12, d19[4] \n"
"vdup.s8 d13, d19[5] \n"
"vdup.s8 d14, d19[6] \n"
"vdup.s8 d15, d19[7] \n"
//mla
"vmull.s8 q8, d0, d8 \n"
"vmlal.s8 q8, d1, d9 \n"
"vmlal.s8 q8, d2, d10 \n"
"vmlal.s8 q8, d3, d11 \n"
"vmlal.s8 q8, d4, d12 \n"
"vmlal.s8 q8, d5, d13 \n"
"vmlal.s8 q8, d6, d14 \n"
"vmlal.s8 q8, d7, d15 \n"
//outptr1_s32
"pld [%2, #128] \n"
"vld1.32 {d20-d21}, [%2:128] \n" //outptr1_s32
"vaddw.s16 q10, q10, d16 \n"
"vst1.32 {d20-d21}, [%2:128]!\n"
//############################################
//load inch kernel_2 k0-k7
"vdup.s8 d8, d24[0] \n"
"vdup.s8 d9, d24[1] \n"
"vdup.s8 d10, d24[2] \n"
"vdup.s8 d11, d24[3] \n"
"vdup.s8 d12, d24[4] \n"
"vdup.s8 d13, d24[5] \n"
"vdup.s8 d14, d24[6] \n"
"vdup.s8 d15, d24[7] \n"
//mla
"vmull.s8 q8, d0, d8 \n"
"vmlal.s8 q8, d1, d9 \n"
"vmlal.s8 q8, d2, d10 \n"
"vmlal.s8 q8, d3, d11 \n"
"vmlal.s8 q8, d4, d12 \n"
"vmlal.s8 q8, d5, d13 \n"
"vmlal.s8 q8, d6, d14 \n"
"vmlal.s8 q8, d7, d15 \n"
//outptr2_s32
"pld [%3, #256] \n"
"vld1.32 {d20-d21}, [%3:128] \n" //outptr2_s32
"vaddw.s16 q10, q10, d16 \n"
"vst1.32 {d20-d21}, [%3:128]!\n"
//#############################################
//load inch kernel_3 k0-k7
"vdup.s8 d8, d25[0] \n"
"vdup.s8 d9, d25[1] \n"
"vdup.s8 d10, d25[2] \n"
"vdup.s8 d11, d25[3] \n"
"vdup.s8 d12, d25[4] \n"
"vdup.s8 d13, d25[5] \n"
"vdup.s8 d14, d25[6] \n"
"vdup.s8 d15, d25[7] \n"
//mla
"vmull.s8 q8, d0, d8 \n"
"vmlal.s8 q8, d1, d9 \n"
"vmlal.s8 q8, d2, d10 \n"
"vmlal.s8 q8, d3, d11 \n"
"vmlal.s8 q8, d4, d12 \n"
"vmlal.s8 q8, d5, d13 \n"
"vmlal.s8 q8, d6, d14 \n"
"vmlal.s8 q8, d7, d15 \n"
//outptr3_s32
"pld [%4, #256] \n"
"vld1.32 {d20-d21}, [%4:128] \n" //outptr3_s32
"vaddw.s16 q10, q10, d16 \n"
"vst1.32 {d20-d21}, [%4:128]!\n"
: "=r"(nn), // %0
"=r"(outptr0), // %1
"=r"(outptr1), // %2
"=r"(outptr2), // %3
"=r"(outptr3), // %4
"=r"(r0), // %5
"=r"(r1), // %6
"=r"(r2), // %7
"=r"(r3), // %8
"=r"(r4), // %9
"=r"(r5), // %10
"=r"(r6), // %11
"=r"(r7) // %12
: "0"(nn),
"1"(outptr0),
"2"(outptr1),
"3"(outptr2),
"4"(outptr3),
"5"(r0),
"6"(r1),
"7"(r2),
"8"(r3),
"9"(r4),
"10"(r5),
"11"(r6),
"12"(r7)
: "cc", "memory", "q0", "q1", "q2", "q3", "q4", "q5", "q6", "q7", "q8", "q10", "q11"
);
}
for (; q<inch; q++)
{
int* outptr0 = out0;
int* outptr1 = out1;
int* outptr2 = out2;
int* outptr3 = out3;
const signed char* img0_s8 = bottom_blob.channel(q);
const signed char* kernel0 = (const signed char*)kernel + p*inch + q;
const signed char* kernel1 = (const signed char*)kernel + (p+1)*inch + q;
const signed char* kernel2 = (const signed char*)kernel + (p+2)*inch + q;
const signed char* kernel3 = (const signed char*)kernel + (p+3)*inch + q;
const signed char k0 = kernel0[0];
const signed char k1 = kernel1[0];
const signed char k2 = kernel2[0];
const signed char k3 = kernel3[0];
const signed char* r0 = img0_s8;
int size = outw * outh;
int nn = size >> 3;
int remain = size & 7;
int8x8_t _k0 = vdup_n_s8(k0);
int8x8_t _k1 = vdup_n_s8(k1);
int8x8_t _k2 = vdup_n_s8(k2);
int8x8_t _k3 = vdup_n_s8(k3);
if (nn > 0)
{
asm volatile(
"0: \n"
//load r0
"pld [%5, #64] \n"
"vld1.s8 {d8}, [%5 :64]! \n"
//mla
"vmull.s8 q5, d8, %12 \n"
//outptr0_s32
"pld [%1, #256] \n"
"vld1.32 {d12-d15}, [%1] \n"
"vaddw.s16 q6, q6, d10 \n"
"vaddw.s16 q7, q7, d11 \n"
"vst1.32 {d12-d15}, [%1]! \n"
//mla
"vmull.s8 q5, d8, %13 \n"
//outptr1_s32
"pld [%2, #256] \n"
"vld1.32 {d12-d15}, [%2] \n"
"vaddw.s16 q6, q6, d10 \n"
"vaddw.s16 q7, q7, d11 \n"
"vst1.32 {d12-d15}, [%2]! \n"
//mla
"vmull.s8 q5, d8, %14 \n"
//outptr0_s32
"pld [%3, #256] \n"
"vld1.32 {d12-d15}, [%3] \n"
"vaddw.s16 q6, q6, d10 \n"
"vaddw.s16 q7, q7, d11 \n"
"vst1.32 {d12-d15}, [%3]! \n"
//mla
"vmull.s8 q5, d8, %15 \n"
//outptr0_s32
"pld [%4, #256] \n"
"vld1.32 {d12-d15}, [%4] \n"
"vaddw.s16 q6, q6, d10 \n"
"vaddw.s16 q7, q7, d11 \n"
"vst1.32 {d12-d15}, [%4]! \n"
"subs %0, #1 \n"
"bne 0b \n"
: "=r"(nn), // %0
"=r"(outptr0), // %1
"=r"(outptr1), // %2
"=r"(outptr2), // %3
"=r"(outptr3), // %4
"=r"(r0) // %5
: "0"(nn),
"1"(outptr0),
"2"(outptr1),
"3"(outptr2),
"4"(outptr3),
"5"(r0),
"w"(_k0), // %12
"w"(_k1), // %13
"w"(_k2), // %14
"w"(_k3) // %15
: "cc", "memory", "q4", "q5", "q6", "q7", "q8", "q9"
);
}
for (; remain>0; remain--)
{
// TODO neon optimize
int sum0 = (int)*r0 * k0;
int sum1 = (int)*r0 * k1;
int sum2 = (int)*r0 * k2;
int sum3 = (int)*r0 * k3;
*outptr0 += sum0;
*outptr1 += sum1;
*outptr2 += sum2;
*outptr3 += sum3;
r0++;
outptr0++;
outptr1++;
outptr2++;
outptr3++;
}
}
}
#pragma omp parallel for num_threads(opt.num_threads)
for (int p=remain_outch_start; p<outch; p++)
{
Mat out0 = top_blob.channel(p);
out0.fill(0);
int q = 0;
for (; q+7<inch; q+=8)
{
int* outptr0 = out0;
const signed char* r0 = bottom_blob.channel(q);
const signed char* r1 = bottom_blob.channel(q+1);
const signed char* r2 = bottom_blob.channel(q+2);
const signed char* r3 = bottom_blob.channel(q+3);
const signed char* r4 = bottom_blob.channel(q+4);
const signed char* r5 = bottom_blob.channel(q+5);
const signed char* r6 = bottom_blob.channel(q+6);
const signed char* r7 = bottom_blob.channel(q+7);
const signed char* kernel0 = (const signed char*)kernel + p*inch + q;
int size = outw * outh;
int nn = size >> 3;
int remain = size & 7;
if (nn > 0)
{
//load inch kernel_0 k0-k7
asm volatile(
"vld1.s8 d18, [%0] \n"
: "=r"(kernel0) // %0
: "0" (kernel0)
:
);
asm volatile(
"0: \n"
//ld r0-r7
"pld [%2, #64] \n"
"vld1.s8 {d0}, [%2 :64]! \n" //r0
"pld [%3, #64] \n"
"vld1.s8 {d1}, [%3 :64]! \n" //r1
"pld [%4, #64] \n"
"vld1.s8 {d2}, [%4 :64]! \n" //r2
"pld [%5, #64] \n"
"vld1.s8 {d3}, [%5 :64]! \n" //r3
"pld [%6, #64] \n"
"vld1.s8 {d4}, [%6 :64]! \n" //r4
"pld [%7, #64] \n"
"vld1.s8 {d5}, [%7 :64]! \n" //r5
"pld [%8, #64] \n"
"vld1.s8 {d6}, [%8 :64]! \n" //r6
"pld [%9, #64] \n"
"vld1.s8 {d7}, [%9 :64]! \n" //r7
//load inch kernel_0 k0-k7
"vdup.s8 d8, d18[0] \n"
"vdup.s8 d9, d18[1] \n"
"vdup.s8 d10, d18[2] \n"
"vdup.s8 d11, d18[3] \n"
"vdup.s8 d12, d18[4] \n"
"vdup.s8 d13, d18[5] \n"
"vdup.s8 d14, d18[6] \n"
"vdup.s8 d15, d18[7] \n"
//mla
"vmull.s8 q8, d0, d8 \n"
"vmlal.s8 q8, d1, d9 \n"
"vmlal.s8 q8, d2, d10 \n"
"vmlal.s8 q8, d3, d11 \n"
"vmlal.s8 q8, d4, d12 \n"
"vmlal.s8 q8, d5, d13 \n"
"vmlal.s8 q8, d6, d14 \n"
"vmlal.s8 q8, d7, d15 \n"
//outptr0_s32
"pld [%1, #256] \n"
"vld1.32 {d20-d23}, [%1] \n" //outptr0_s32
"vaddw.s16 q10, q10, d16 \n"
"vaddw.s16 q11, q11, d17 \n"
"vst1.32 {d20-d23}, [%1]! \n"
//next
"subs %0, #1 \n"
"bne 0b \n"
: "=r"(nn), // %0
"=r"(outptr0), // %1
"=r"(r0), // %2
"=r"(r1), // %3
"=r"(r2), // %4
"=r"(r3), // %5
"=r"(r4), // %6
"=r"(r5), // %7
"=r"(r6), // %8
"=r"(r7) // %9
: "0"(nn),
"1"(outptr0),
"2"(r0),
"3"(r1),
"4"(r2),
"5"(r3),
"6"(r4),
"7"(r5),
"8"(r6),
"9"(r7)
: "cc", "memory", "q0", "q1", "q2", "q3", "q8", "q10", "q11", "q12", "q13"
);
}
for (; remain>0; remain--)
{
//ToDo Neon
int sum0 = (int)*r0 * kernel0[0] + *r1 * kernel0[1] + *r2 * kernel0[2] + *r3 * kernel0[3] + *r4 * kernel0[4] + *r5 * kernel0[5] + *r6 * kernel0[6] + *r7 * kernel0[7];
*outptr0 += sum0;
r0++;
r1++;
r2++;
r3++;
r4++;
r5++;
r6++;
r7++;
outptr0++;
}
}
for (; q<inch; q++)
{
int* outptr0 = out0;
const signed char* img0_s8 = bottom_blob.channel(q);
const signed char* r0 = img0_s8;
const signed char* kernel0 = (const signed char*)kernel + p*inch + q;
const signed char k0 = kernel0[0];
int size = outw * outh;
int nn = size >> 3;
int remain = size & 7;
int8x8_t _k0 = vdup_n_s8(k0);
if (nn > 0)
{
asm volatile(
"0: \n"
//load r0
"pld [%2, #64] \n"
"vld1.s8 {d8}, [%2 :64]! \n"
//mla
"vmull.s8 q5, d8, %6 \n"
//outptr0_s32
"pld [%1, #256] \n"
"vld1.32 {d12-d15}, [%1] \n"
"vaddw.s16 q6, q6, d10 \n"
"vaddw.s16 q7, q7, d11 \n"
"vst1.32 {d12-d15}, [%1]! \n"
"subs %0, #1 \n"
"bne 0b \n"
: "=r"(nn), // %0
"=r"(outptr0), // %1
"=r"(r0) // %2
: "0"(nn),
"1"(outptr0),
"2"(r0),
"w"(_k0) // %6
: "cc", "memory", "q4", "q5", "q7", "q8", "q9"
);
}
for (; remain>0; remain--)
{
int sum0 = (int)*r0 * k0;
*outptr0 += sum0;
r0++;
outptr0++;
}
}
}
}
static void conv1x1s1_int8_neon(const Mat& bottom_blob, Mat& top_blob, const Mat& _kernel, const Option& opt)
{
int size = top_blob.h * top_blob.w;
int remain = size & 7;
typedef void (*conv_func_int8)(const Mat&, Mat&, const Mat&, const Option&);
conv_func_int8 conv_func_table[8] =
{
conv1x1s1_neon_s8, //0
conv1x1s1_neon_s8, //1
conv1x1s1_neon_s8, //2
conv1x1s1_neon_s8, //3
conv1x1s1_neon_s8_left4, //4
conv1x1s1_neon_s8, //5
conv1x1s1_neon_s8, //6
conv1x1s1_neon_s8, //7
};
conv_func_int8 conv = conv_func_table[remain];
conv(bottom_blob, top_blob, _kernel, opt);
return;
}
static void conv1x1s1_sgemm_int8_neon(const Mat& bottom_blob, Mat& top_blob, const Mat& kernel, const Option& opt)
{
int w = bottom_blob.w;
int h = bottom_blob.h;
int inch = bottom_blob.c;
int outch = top_blob.c;
const int size = w * h;
// interleave
Mat tmp(8*4, inch/4+inch%4, size/8 + (size%8)/4 + size%4, 1u);
{
int nn_size = size >> 3;
int remain_size_start = nn_size << 3;
#pragma omp parallel for num_threads(opt.num_threads)
for (int ii=0; ii<nn_size; ii++)
{
int i = ii * 8;
const signed char* img0 = bottom_blob.channel(0);
img0 += i;
signed char* tmpptr = tmp.channel(i/8);
for (int q=0; q<inch; q++)
{
#if __ARM_NEON
asm volatile(
"pld [%0, #64] \n"
"vld1.s8 {d0}, [%0] \n"
"vst1.s8 {d0}, [%1]! \n"
: "=r"(img0), // %0
"=r"(tmpptr) // %1
: "0"(img0),
"1"(tmpptr)
: "memory", "d0"
);
img0 += bottom_blob.cstep;
#else
tmpptr[0] = img0[0];
tmpptr[1] = img0[1];
tmpptr[2] = img0[2];
tmpptr[3] = img0[3];
tmpptr[4] = img0[4];
tmpptr[5] = img0[5];
tmpptr[6] = img0[6];
tmpptr[7] = img0[7];
tmpptr += 8;
img0 += bottom_blob.cstep;
#endif // __ARM_NEON__
}
}
nn_size = (size - remain_size_start) >> 2;
#pragma omp parallel for num_threads(opt.num_threads)
for (int ii=0; ii<nn_size; ii++)
{
int i = remain_size_start + ii * 4;
const signed char* img0 = bottom_blob.channel(0);
img0 += i;
signed char* tmpptr = tmp.channel(i/8 + (i%8)/4);
for (int q=0; q<inch; q++)
{
tmpptr[0] = img0[0];
tmpptr[1] = img0[1];
tmpptr[2] = img0[2];
tmpptr[3] = img0[3];
tmpptr += 4;
img0 += bottom_blob.cstep;
}
}
remain_size_start += nn_size << 2;
#pragma omp parallel for num_threads(opt.num_threads)
for (int i=remain_size_start; i<size; i++)
{
const signed char* img0 = bottom_blob.channel(0);
img0 += i;
signed char* tmpptr = tmp.channel(i/8 + (i%8)/4 + i%4);
for (int q=0; q<inch; q++)
{
tmpptr[0] = img0[0];
tmpptr++;
img0 += bottom_blob.cstep;
}
}
}
// sgemm process
int nn_outch = 0;
int remain_outch_start = 0;
nn_outch = (outch - remain_outch_start) >> 2;
#pragma omp parallel for num_threads(opt.num_threads)
for (int pp=0; pp<nn_outch; pp++)
{
int p = remain_outch_start + pp * 4;
int* outptr0 = top_blob.channel(p);
int* outptr1 = top_blob.channel(p+1);
int* outptr2 = top_blob.channel(p+2);
int* outptr3 = top_blob.channel(p+3);
const int zeros[4] = {0, 0, 0, 0};
const int* biasptr = zeros;
int i = 0;
for (; i+7<size; i+=8)
{
const signed char* tmpptr = tmp.channel(i/8);
const signed char* kptr = kernel.channel(p/4);
#if __ARM_NEON
asm volatile(
// inch loop
"vmov.s32 q6, #0 \n"
"vmov.s32 q7, #0 \n"
"vmov.s32 q8, #0 \n"
"vmov.s32 q9, #0 \n"
"vmov.s32 q10, #0 \n"
"vmov.s32 q11, #0 \n"
"vmov.s32 q12, #0 \n"
"vmov.s32 q13, #0 \n"
"lsr r4, %12, #2 \n"// r4 = nn = inch >> 2
"cmp r4, #0 \n"
"beq 1f \n"
"0: \n"// for(; nn != 0; nn--)
"pld [%4, #128] \n"
"vld1.s8 {d4-d7}, [%4]! \n"// tmpr a00-a07,a10-a17,a20-a27,a30-a37 a(inch)(data)
"vmovl.s8 q5, d7 \n"// a30-a37
"vmovl.s8 q4, d6 \n"// a20-a27
"vmovl.s8 q3, d5 \n"// a10-a17
"vmovl.s8 q2, d4 \n"// a00-a07
"vld1.s8 {d0-d1}, [%5]! \n"// kptr k00-k30,k01-k31,k02-k32,k03-k33 k(outch)(inch)
"vmovl.s8 q1, d1 \n"// k02-k32,k03-k33
"vmovl.s8 q0, d0 \n"// k00-k30,k01-k31
"vmlal.s16 q6, d4, d0[0] \n"// sum0 = (a00-a07) * k00
"vmlal.s16 q7, d5, d0[0] \n"
"vmlal.s16 q8, d4, d0[1] \n"// sum1 = (a00-a07) * k10
"vmlal.s16 q9, d5, d0[1] \n"
"vmlal.s16 q10, d4, d0[2] \n"// sum2 = (a00-a07) * k20
"vmlal.s16 q11, d5, d0[2] \n"
"vmlal.s16 q12, d4, d0[3] \n"// sum3 = (a00-a07) * k30
"vmlal.s16 q13, d5, d0[3] \n"
"vmlal.s16 q6, d6, d1[0] \n"// sum0 += (a10-a17) * k01
"vmlal.s16 q7, d7, d1[0] \n"
"vmlal.s16 q8, d6, d1[1] \n"// sum1 += (a10-a17) * k11
"vmlal.s16 q9, d7, d1[1] \n"
"vmlal.s16 q10, d6, d1[2] \n"// sum2 += (a10-a17) * k21
"vmlal.s16 q11, d7, d1[2] \n"
"vmlal.s16 q12, d6, d1[3] \n"// sum3 += (a10-a17) * k31
"vmlal.s16 q13, d7, d1[3] \n"
"vmlal.s16 q6, d8, d2[0] \n"// sum0 += (a20-a27) * k02
"vmlal.s16 q7, d9, d2[0] \n"
"vmlal.s16 q8, d8, d2[1] \n"// sum1 += (a20-a27) * k12
"vmlal.s16 q9, d9, d2[1] \n"
"vmlal.s16 q10, d8, d2[2] \n"// sum2 += (a20-a27) * k22
"vmlal.s16 q11, d9, d2[2] \n"
"vmlal.s16 q12, d8, d2[3] \n"// sum3 += (a20-a27) * k32
"vmlal.s16 q13, d9, d2[3] \n"
"vmlal.s16 q6, d10, d3[0] \n"// sum0 += (a30-a37) * k03
"vmlal.s16 q7, d11, d3[0] \n"
"vmlal.s16 q8, d10, d3[1] \n"// sum1 += (a30-a37) * k13
"vmlal.s16 q9, d11, d3[1] \n"
"vmlal.s16 q10, d10, d3[2] \n"// sum2 += (a30-a37) * k23
"vmlal.s16 q11, d11, d3[2] \n"
"vmlal.s16 q12, d10, d3[3] \n"// sum3 += (a30-a37) * k33
"vmlal.s16 q13, d11, d3[3] \n"
"subs r4, r4, #1 \n"
"bne 0b \n"// end for
"1: \n"
// remain loop
"and r4, %12, #3 \n"// r4 = remain = inch & 3
"cmp r4, #0 \n"
"beq 3f \n"
"2: \n"// for(; remain != 0; remain--)
"vld1.s8 {d2}, [%4]! \n"// tmpr a00-a07 a(inch)(data)
"vld1.s8 {d0}, [%5] \n"// kptr k00-k30 k(outch)(inch)
"vmovl.s8 q1, d2 \n"
"vmovl.s8 q0, d0 \n"
"add %5, #4 \n"
"vmlal.s16 q6, d2, d0[0] \n"// sum0 += (a00-a07) * k00
"vmlal.s16 q7, d3, d0[0] \n"
"vmlal.s16 q8, d2, d0[1] \n"// sum1 += (a00-a07) * k10
"vmlal.s16 q9, d3, d0[1] \n"
"vmlal.s16 q10, d2, d0[2] \n"// sum2 += (a00-a07) * k20
"vmlal.s16 q11, d3, d0[2] \n"
"vmlal.s16 q12, d2, d0[3] \n"// sum3 += (a00-a07) * k30
"vmlal.s16 q13, d3, d0[3] \n"
"subs r4, r4, #1 \n"
"bne 2b \n"
"3: \n"// store the result to memory
"vst1.s32 {d12-d15}, [%0]! \n"
"vst1.s32 {d16-d19}, [%1]! \n"
"vst1.s32 {d20-d23}, [%2]! \n"
"vst1.s32 {d24-d27}, [%3]! \n"
: "=r"(outptr0), // %0
"=r"(outptr1), // %1
"=r"(outptr2), // %2
"=r"(outptr3), // %3
"=r"(tmpptr), // %4
"=r"(kptr) // %5
: "0"(outptr0),
"1"(outptr1),
"2"(outptr2),
"3"(outptr3),
"4"(tmpptr),
"5"(kptr),
"r"(inch) // %12
: "cc", "memory", "r4", "q0", "q1", "q2", "q3", "q4", "q5", "q6", "q7", "q8", "q9", "q10", "q11", "q12", "q13", "q14", "q15"
);
#else
int sum0_0 = biasptr[0];
int sum0_1 = biasptr[0];
int sum0_2 = biasptr[0];
int sum0_3 = biasptr[0];
int sum0_4 = biasptr[0];
int sum0_5 = biasptr[0];
int sum0_6 = biasptr[0];
int sum0_7 = biasptr[0];
int sum1_0 = biasptr[1];
int sum1_1 = biasptr[1];
int sum1_2 = biasptr[1];
int sum1_3 = biasptr[1];
int sum1_4 = biasptr[1];
int sum1_5 = biasptr[1];
int sum1_6 = biasptr[1];
int sum1_7 = biasptr[1];
int sum2_0 = biasptr[2];
int sum2_1 = biasptr[2];
int sum2_2 = biasptr[2];
int sum2_3 = biasptr[2];
int sum2_4 = biasptr[2];
int sum2_5 = biasptr[2];
int sum2_6 = biasptr[2];
int sum2_7 = biasptr[2];
int sum3_0 = biasptr[3];
int sum3_1 = biasptr[3];
int sum3_2 = biasptr[3];
int sum3_3 = biasptr[3];
int sum3_4 = biasptr[3];
int sum3_5 = biasptr[3];
int sum3_6 = biasptr[3];
int sum3_7 = biasptr[3];
for (int q=0; q<inch; q++)
{
sum0_0 += tmpptr[0] * kptr[0];
sum0_1 += tmpptr[1] * kptr[0];
sum0_2 += tmpptr[2] * kptr[0];
sum0_3 += tmpptr[3] * kptr[0];
sum0_4 += tmpptr[4] * kptr[0];
sum0_5 += tmpptr[5] * kptr[0];
sum0_6 += tmpptr[6] * kptr[0];
sum0_7 += tmpptr[7] * kptr[0];
sum1_0 += tmpptr[0] * kptr[1];
sum1_1 += tmpptr[1] * kptr[1];
sum1_2 += tmpptr[2] * kptr[1];
sum1_3 += tmpptr[3] * kptr[1];
sum1_4 += tmpptr[4] * kptr[1];
sum1_5 += tmpptr[5] * kptr[1];
sum1_6 += tmpptr[6] * kptr[1];
sum1_7 += tmpptr[7] * kptr[1];
sum2_0 += tmpptr[0] * kptr[2];
sum2_1 += tmpptr[1] * kptr[2];
sum2_2 += tmpptr[2] * kptr[2];
sum2_3 += tmpptr[3] * kptr[2];
sum2_4 += tmpptr[4] * kptr[2];
sum2_5 += tmpptr[5] * kptr[2];
sum2_6 += tmpptr[6] * kptr[2];
sum2_7 += tmpptr[7] * kptr[2];
sum3_0 += tmpptr[0] * kptr[3];
sum3_1 += tmpptr[1] * kptr[3];
sum3_2 += tmpptr[2] * kptr[3];
sum3_3 += tmpptr[3] * kptr[3];
sum3_4 += tmpptr[4] * kptr[3];
sum3_5 += tmpptr[5] * kptr[3];
sum3_6 += tmpptr[6] * kptr[3];
sum3_7 += tmpptr[7] * kptr[3];
tmpptr += 8;
kptr += 4;
}
outptr0[0] = sum0_0;
outptr0[1] = sum0_1;
outptr0[2] = sum0_2;
outptr0[3] = sum0_3;
outptr0[4] = sum0_4;
outptr0[5] = sum0_5;
outptr0[6] = sum0_6;
outptr0[7] = sum0_7;
outptr1[0] = sum1_0;
outptr1[1] = sum1_1;
outptr1[2] = sum1_2;
outptr1[3] = sum1_3;
outptr1[4] = sum1_4;
outptr1[5] = sum1_5;
outptr1[6] = sum1_6;
outptr1[7] = sum1_7;
outptr2[0] = sum2_0;
outptr2[1] = sum2_1;
outptr2[2] = sum2_2;
outptr2[3] = sum2_3;
outptr2[4] = sum2_4;
outptr2[5] = sum2_5;
outptr2[6] = sum2_6;
outptr2[7] = sum2_7;
outptr3[0] = sum3_0;
outptr3[1] = sum3_1;
outptr3[2] = sum3_2;
outptr3[3] = sum3_3;
outptr3[4] = sum3_4;
outptr3[5] = sum3_5;
outptr3[6] = sum3_6;
outptr3[7] = sum3_7;
outptr0 += 8;
outptr1 += 8;
outptr2 += 8;
outptr3 += 8;
#endif // __ARM_NEON
}
for (; i+3<size; i+=4)
{
const signed char* tmpptr = tmp.channel(i/8 + (i%8)/4);
const signed char* kptr = kernel.channel(p/4);
#if __ARM_NEON
asm volatile(
// inch loop
"vmov.s32 q6, #0 \n"
"vmov.s32 q7, #0 \n"
"vmov.s32 q8, #0 \n"
"vmov.s32 q9, #0 \n"
"lsr r4, %12, #2 \n"// r4 = nn = inch >> 2
"cmp r4, #0 \n"
"beq 1f \n"
"0: \n"// for(; nn != 0; nn--)
"pld [%4, #128] \n"
"vld1.s8 {d4-d5}, [%4]! \n"// tmpr a00-a03,a10-a13,a20-a23,a30-a33 a(inch)(data)
"vmovl.s8 q3, d5 \n"// a20-a23,a30-a33
"vmovl.s8 q2, d4 \n"// a00-a04,a10-a14
"vld1.s8 {d0-d1}, [%5]! \n"// kptr k00-k30,k01-k31,k02-k32,k03-k33 k(outch)(inch)
"vmovl.s8 q1, d1 \n"// k02-k32,k03-k33
"vmovl.s8 q0, d0 \n"// k00-k30,k01-k31
"vmlal.s16 q6, d4, d0[0] \n"// sum0 = (a00-a03) * k00
"vmlal.s16 q7, d4, d0[1] \n"// sum1 = (a00-a03) * k10
"vmlal.s16 q8, d4, d0[2] \n"// sum2 = (a00-a03) * k20
"vmlal.s16 q9, d4, d0[3] \n"// sum3 = (a00-a03) * k30
"vmlal.s16 q6, d5, d1[0] \n"// sum0 += (a10-a13) * k01
"vmlal.s16 q7, d5, d1[1] \n"// sum1 += (a10-a13) * k11
"vmlal.s16 q8, d5, d1[2] \n"// sum2 += (a10-a13) * k21
"vmlal.s16 q9, d5, d1[3] \n"// sum3 += (a10-a13) * k31
"vmlal.s16 q6, d6, d2[0] \n"// sum0 += (a20-a23) * k02
"vmlal.s16 q7, d6, d2[1] \n"// sum1 += (a20-a23) * k12
"vmlal.s16 q8, d6, d2[2] \n"// sum2 += (a20-a23) * k22
"vmlal.s16 q9, d6, d2[3] \n"// sum3 += (a20-a23) * k32
"vmlal.s16 q6, d7, d3[0] \n"// sum0 += (a30-a33) * k03
"vmlal.s16 q7, d7, d3[1] \n"// sum1 += (a30-a33) * k13
"vmlal.s16 q8, d7, d3[2] \n"// sum2 += (a30-a33) * k23
"vmlal.s16 q9, d7, d3[3] \n"// sum3 += (a30-a33) * k33
"subs r4, r4, #1 \n"
"bne 0b \n"// end for
"1: \n"
// remain loop
"and r4, %12, #3 \n"// r4 = remain = inch & 3
"cmp r4, #0 \n"
"beq 3f \n"
"2: \n"// for(; remain != 0; remain--)
"vld1.s8 {d2}, [%4] \n"// tmpr a00-a03 a(inch)(data)
"vld1.s8 {d0}, [%5] \n"// kptr k00-k30 k(outch)(inch)
"vmovl.s8 q1, d2 \n"
"vmovl.s8 q0, d0 \n"
"add %4, #4 \n"
"add %5, #4 \n"
"vmlal.s16 q6, d2, d0[0] \n"// sum0 += (a00-a03) * k00
"vmlal.s16 q7, d2, d0[1] \n"// sum1 += (a00-a03) * k10
"vmlal.s16 q8, d2, d0[2] \n"// sum2 += (a00-a03) * k20
"vmlal.s16 q9, d2, d0[3] \n"// sum3 += (a00-a03) * k30
"subs r4, r4, #1 \n"
"bne 2b \n"
"3: \n"// store the result to memory
"vst1.s32 {d12-d13}, [%0]! \n"
"vst1.s32 {d14-d15}, [%1]! \n"
"vst1.s32 {d16-d17}, [%2]! \n"
"vst1.s32 {d18-d19}, [%3]! \n"
: "=r"(outptr0), // %0
"=r"(outptr1), // %1
"=r"(outptr2), // %2
"=r"(outptr3), // %3
"=r"(tmpptr), // %4
"=r"(kptr) // %5
: "0"(outptr0),
"1"(outptr1),
"2"(outptr2),
"3"(outptr3),
"4"(tmpptr),
"5"(kptr),
"r"(inch) // %12
: "cc", "memory", "r4", "q0", "q1", "q2", "q3", "q4", "q5", "q6", "q7", "q8", "q9", "q10", "q11", "q12", "q13", "q14", "q15"
);
#else
int sum0_0 = biasptr[0];
int sum0_1 = biasptr[0];
int sum0_2 = biasptr[0];
int sum0_3 = biasptr[0];
int sum1_0 = biasptr[1];
int sum1_1 = biasptr[1];
int sum1_2 = biasptr[1];
int sum1_3 = biasptr[1];
int sum2_0 = biasptr[2];
int sum2_1 = biasptr[2];
int sum2_2 = biasptr[2];
int sum2_3 = biasptr[2];
int sum3_0 = biasptr[3];
int sum3_1 = biasptr[3];
int sum3_2 = biasptr[3];
int sum3_3 = biasptr[3];
for (int q=0; q<inch; q++)
{
sum0_0 += tmpptr[0] * kptr[0];
sum0_1 += tmpptr[1] * kptr[0];
sum0_2 += tmpptr[2] * kptr[0];
sum0_3 += tmpptr[3] * kptr[0];
sum1_0 += tmpptr[0] * kptr[1];
sum1_1 += tmpptr[1] * kptr[1];
sum1_2 += tmpptr[2] * kptr[1];
sum1_3 += tmpptr[3] * kptr[1];
sum2_0 += tmpptr[0] * kptr[2];
sum2_1 += tmpptr[1] * kptr[2];
sum2_2 += tmpptr[2] * kptr[2];
sum2_3 += tmpptr[3] * kptr[2];
sum3_0 += tmpptr[0] * kptr[3];
sum3_1 += tmpptr[1] * kptr[3];
sum3_2 += tmpptr[2] * kptr[3];
sum3_3 += tmpptr[3] * kptr[3];
tmpptr += 4;
kptr += 4;
}
outptr0[0] = sum0_0;
outptr0[1] = sum0_1;
outptr0[2] = sum0_2;
outptr0[3] = sum0_3;
outptr1[0] = sum1_0;
outptr1[1] = sum1_1;
outptr1[2] = sum1_2;
outptr1[3] = sum1_3;
outptr2[0] = sum2_0;
outptr2[1] = sum2_1;
outptr2[2] = sum2_2;
outptr2[3] = sum2_3;
outptr3[0] = sum3_0;
outptr3[1] = sum3_1;
outptr3[2] = sum3_2;
outptr3[3] = sum3_3;
outptr0 += 4;
outptr1 += 4;
outptr2 += 4;
outptr3 += 4;
#endif // __ARM_NEON
}
for (; i<size; i++)
{
const signed char* tmpptr = tmp.channel(i/8 + (i%8)/4 + i%4);
const signed char* kptr = kernel.channel(p/4);
#if __ARM_NEON
asm volatile(
// inch loop
"veor q6, q6, q6 \n"
"veor q7, q7, q7 \n"
"veor q8, q8, q8 \n"
"veor q9, q9, q9 \n"
"vmov.s32 q10, #0 \n"
"lsr r4, %12, #2 \n"// r4 = nn = inch >> 2
"cmp r4, #0 \n"
"beq 1f \n"
"0: \n"// for(; nn != 0; nn--)
"pld [%4, #128] \n"
"vld1.s8 {d4}, [%4] \n"// tmpr a00,a10,a20,a30 a(inch)(data)
"add %4, #4 \n"
"vmovl.s8 q2, d4 \n"// a00,a10,a20,a30
"vld1.s8 {d0-d1}, [%5]! \n"// kptr k00-k30,k01-k31,k02-k32,k03-k33 k(outch)(inch)
"vmovl.s8 q1, d1 \n"// k02-k32,k03-k33
"vmovl.s8 q0, d0 \n"// k00-k30,k01-k31
"vmlal.s16 q6, d0, d4[0] \n"// (k00-k30) * a00
"vmlal.s16 q7, d1, d4[1] \n"// (k01-k31) * a10
"vmlal.s16 q8, d2, d4[2] \n"// (k02-k32) * a20
"vmlal.s16 q9, d3, d4[3] \n"// (k03-k33) * a30
"subs r4, r4, #1 \n"
"bne 0b \n"// end for
"vadd.s32 q6, q6, q7 \n"
"vadd.s32 q9, q9, q8 \n"
"vadd.s32 q10, q6, q9 \n"
"1: \n"
// remain loop
"and r4, %12, #3 \n"// r4 = remain = inch & 3
"cmp r4, #0 \n"
"beq 3f \n"
"2: \n"// for(; remain != 0; remain--)
"vld1.s8 {d2}, [%4] \n"// tmpr a00 a(inch)(data)
"vld1.s8 {d0}, [%5] \n"// kptr k00-k30 k(outch)(inch)
"vmovl.s8 q1, d2 \n"
"vmovl.s8 q0, d0 \n"
"add %4, #1 \n"
"add %5, #4 \n"
"vmlal.s16 q10, d0, d2[0] \n"
"subs r4, r4, #1 \n"
"bne 2b \n"
"3: \n"// store the result to memory
"vst1.s32 {d20[0]}, [%0]! \n"
"vst1.s32 {d20[1]}, [%1]! \n"
"vst1.s32 {d21[0]}, [%2]! \n"
"vst1.s32 {d21[1]}, [%3]! \n"
: "=r"(outptr0), // %0
"=r"(outptr1), // %1
"=r"(outptr2), // %2
"=r"(outptr3), // %3
"=r"(tmpptr), // %4
"=r"(kptr) // %5
: "0"(outptr0),
"1"(outptr1),
"2"(outptr2),
"3"(outptr3),
"4"(tmpptr),
"5"(kptr),
"r"(inch) // %12
: "cc", "memory", "r4", "q0", "q1", "q2", "q3", "q4", "q5", "q6", "q7", "q8", "q9", "q10", "q11", "q12", "q13", "q14", "q15"
);
#else
int sum0 = biasptr[0];
int sum1 = biasptr[1];
int sum2 = biasptr[2];
int sum3 = biasptr[3];
for (int q=0; q<inch; q++)
{
sum0 += tmpptr[0] * kptr[0];
sum1 += tmpptr[0] * kptr[1];
sum2 += tmpptr[0] * kptr[2];
sum3 += tmpptr[0] * kptr[3];
tmpptr++;
kptr += 4;
}
outptr0[0] = sum0;
outptr1[0] = sum1;
outptr2[0] = sum2;
outptr3[0] = sum3;
outptr0++;
outptr1++;
outptr2++;
outptr3++;
#endif // __ARM_NEON
}
}
remain_outch_start += nn_outch << 2;
#pragma omp parallel for num_threads(opt.num_threads)
for (int p=remain_outch_start; p<outch; p++)
{
Mat out0 = top_blob.channel(p);
const int bias0 = 0;
int* outptr0 = out0;
int i = 0;
for (; i+7<size; i+=8)
{
const signed char* tmpptr = tmp.channel(i/8);
const signed char* kptr = kernel.channel(p/4 + p%4);
#if __ARM_NEON
asm volatile(
// inch loop
"vmov.s32 q6, #0 \n"
"vmov.s32 q7, #0 \n"
"lsr r4, %6, #2 \n"// r4 = nn = inch >> 2
"cmp r4, #0 \n"
"beq 1f \n"
"0: \n"// for(; nn != 0; nn--)
"pld [%2, #128] \n"
"vld1.s8 {d4-d7}, [%1]! \n"// tmpr a00-a07,a10-a17,a20-a27,a30-a37 a(inch)(data)
"vmovl.s8 q5, d7 \n"// a30-a37
"vmovl.s8 q4, d6 \n"// a20-a27
"vmovl.s8 q3, d5 \n"// a10-a17
"vmovl.s8 q2, d4 \n"// a00-a07
"vld1.s8 {d0}, [%2] \n"// kptr k00,k01,k02,k03 k(outch)(inch)
"vmovl.s8 q0, d0 \n"// k00,k01,k02,k03
"add %2, #4 \n"
"vmlal.s16 q6, d4, d0[0] \n"// (a00-a07) * k00
"vmlal.s16 q7, d5, d0[0] \n"
"vmlal.s16 q6, d6, d0[1] \n"// (a10-a17) * k01
"vmlal.s16 q7, d7, d0[1] \n"
"vmlal.s16 q6, d8, d0[2] \n"// (a20-a27) * k02
"vmlal.s16 q7, d9, d0[2] \n"
"vmlal.s16 q6, d10, d0[3] \n"// (a30-a37) * k03
"vmlal.s16 q7, d11, d0[3] \n"
"subs r4, r4, #1 \n"
"bne 0b \n"// end for
"1: \n"
// remain loop
"and r4, %6, #3 \n"// r4 = remain = inch & 3
"cmp r4, #0 \n"
"beq 3f \n"
"2: \n"// for(; remain != 0; remain--)
"vld1.s8 {d2}, [%1]! \n"// tmpr a00-a07 a(inch)(data)
"vld1.s8 {d0}, [%2] \n"// kptr k00 k(outch)(inch)
"vmovl.s8 q1, d2 \n"
"vmovl.s8 q0, d0 \n"
"add %2, #1 \n"
"vmlal.s16 q6, d2, d0[0] \n"// (a00-a07) * k00
"vmlal.s16 q7, d3, d0[0] \n"
"subs r4, r4, #1 \n"
"bne 2b \n"
"3: \n"// store the result to memory
"vst1.s32 {d12-d15}, [%0]! \n"
: "=r"(outptr0), // %0
"=r"(tmpptr), // %1
"=r"(kptr) // %2
: "0"(outptr0),
"1"(tmpptr),
"2"(kptr),
"r"(inch) // %6
: "cc", "memory", "r4", "q0", "q1", "q2", "q3", "q4", "q5", "q6", "q7"
);
#else
int sum0 = bias0;
int sum1 = bias0;
int sum2 = bias0;
int sum3 = bias0;
int sum4 = bias0;
int sum5 = bias0;
int sum6 = bias0;
int sum7 = bias0;
for (int q=0; q<inch; q++)
{
sum0 += tmpptr[0] * kptr[0];
sum1 += tmpptr[1] * kptr[0];
sum2 += tmpptr[2] * kptr[0];
sum3 += tmpptr[3] * kptr[0];
sum4 += tmpptr[4] * kptr[0];
sum5 += tmpptr[5] * kptr[0];
sum6 += tmpptr[6] * kptr[0];
sum7 += tmpptr[7] * kptr[0];
tmpptr += 8;
kptr++;
}
outptr0[0] = sum0;
outptr0[1] = sum1;
outptr0[2] = sum2;
outptr0[3] = sum3;
outptr0[4] = sum4;
outptr0[5] = sum5;
outptr0[6] = sum6;
outptr0[7] = sum7;
outptr0 += 8;
#endif // __ARM_NEON
}
for (; i+3<size; i+=4)
{
const signed char* tmpptr = tmp.channel(i/8 + (i%8)/4);
const signed char* kptr = kernel.channel(p/4 + p%4);
#if __ARM_NEON
asm volatile(
// inch loop
"vmov.s32 q6, #0 \n"
"lsr r4, %6, #2 \n"// r4 = nn = inch >> 2
"cmp r4, #0 \n"
"beq 1f \n"
"0: \n"// for(; nn != 0; nn--)
"pld [%2, #128] \n"
"vld1.s8 {d4-d5}, [%1]! \n"// tmpr a00-a03,a10-a13,a20-a23,a30-a33 a(inch)(data)
"vmovl.s8 q3, d5 \n"// a20-a23,a30-a33
"vmovl.s8 q2, d4 \n"// a00-a03,a10-a13
"vld1.s8 {d0}, [%2] \n"// kptr k00,k01,k02,k03 k(outch)(inch)
"vmovl.s8 q0, d0 \n"// k00,k01,k02,k03
"add %2, #4 \n"
"vmlal.s16 q6, d4, d0[0] \n"// (a00-a03) * k00
"vmlal.s16 q6, d5, d0[1] \n"// (a10-a13) * k01
"vmlal.s16 q6, d6, d0[2] \n"// (a20-a23) * k02
"vmlal.s16 q6, d7, d0[3] \n"// (a30-a33) * k03
"subs r4, r4, #1 \n"
"bne 0b \n"// end for
"1: \n"
// remain loop
"and r4, %6, #3 \n"// r4 = remain = inch & 3
"cmp r4, #0 \n"
"beq 3f \n"
"2: \n"// for(; remain != 0; remain--)
"vld1.s8 {d2}, [%1] \n"// tmpr a00-a03 a(inch)(data)
"vld1.s8 {d0}, [%2] \n"// kptr k00 k(outch)(inch)
"vmovl.s8 q1, d2 \n"
"vmovl.s8 q0, d0 \n"
"add %1, #4 \n"
"add %2, #1 \n"
"vmlal.s16 q6, d2, d0[0] \n"// (a00-a03) * k00
"subs r4, r4, #1 \n"
"bne 2b \n"
"3: \n"// store the result to memory
"vst1.s32 {d12-d13}, [%0]! \n"
: "=r"(outptr0), // %0
"=r"(tmpptr), // %1
"=r"(kptr) // %2
: "0"(outptr0),
"1"(tmpptr),
"2"(kptr),
"r"(inch) // %6
: "cc", "memory", "r4", "q0", "q1", "q2", "q3", "q4", "q5", "q6"
);
#else
int sum0 = bias0;
int sum1 = bias0;
int sum2 = bias0;
int sum3 = bias0;
for (int q=0; q<inch; q++)
{
sum0 += tmpptr[0] * kptr[0];
sum1 += tmpptr[1] * kptr[0];
sum2 += tmpptr[2] * kptr[0];
sum3 += tmpptr[3] * kptr[0];
tmpptr += 4;
kptr++;
}
outptr0[0] = sum0;
outptr0[1] = sum1;
outptr0[2] = sum2;
outptr0[3] = sum3;
outptr0 += 4;
#endif // __ARM_NEON
}
for (; i<size; i++)
{
const signed char* tmpptr = tmp.channel(i/8 + (i%8)/4 + i%4);
const signed char* kptr = kernel.channel(p/4 + p%4);
int q = 0;
int sum0 = bias0;
for (; q<inch; q++)
{
sum0 += tmpptr[0] * kptr[0];
tmpptr++;
kptr++;
}
outptr0[0] = sum0;
outptr0++;
}
}
// // NOTE sgemm int8
// for (; p<outch; p++)
// {
// Mat out0 = top_blob.channel(p);
//
// int* outptr0 = out0;
//
// for (int i=0; i<size; i++)
// {
// int sum = 0;
//
// const signed char* kptr = _kernel.channel(p/8 + p%8);
//
// for (int q=0; q<inch; q++)
// {
// const signed char* img0 = bottom_blob.channel(q);
//
// sum += img0[i] * kptr[0];
// kptr ++;
// }
//
// outptr0[i] = sum;
// }
// }
}
#endif // __aarch64__
static void conv1x1s2_int8_neon(const Mat &bottom_blob, Mat &top_blob, const Mat &_kernel, const Option& opt)
{
int w = bottom_blob.w;
int inch = bottom_blob.c;
int outw = top_blob.w;
int outh = top_blob.h;
int outch = top_blob.c;
const int tailstep = w - 2*outw + w;
const signed char *kernel = _kernel;
#pragma omp parallel for num_threads(opt.num_threads)
for (int p = 0; p < outch; p++)
{
Mat out0 = top_blob.channel(p);
out0.fill(0);
int q = 0;
for (; q+7<inch; q+=8)
{
int* outptr0 = out0;
const signed char *kernel0 = (const signed char *)kernel + p * inch + q;
const signed char *r0 = bottom_blob.channel(q);
const signed char *r1 = bottom_blob.channel(q + 1);
const signed char *r2 = bottom_blob.channel(q + 2);
const signed char *r3 = bottom_blob.channel(q + 3);
const signed char *r4 = bottom_blob.channel(q + 4);
const signed char *r5 = bottom_blob.channel(q + 5);
const signed char *r6 = bottom_blob.channel(q + 6);
const signed char *r7 = bottom_blob.channel(q + 7);
for(int i = 0; i < outh; i++)
{
int remain = outw;
for (; remain > 0; remain--)
{
//ToDo Neon
int sum0 = (int)*r0 * (int)kernel0[0] + (int)*r1 * (int)kernel0[1] +
(int)*r2 * (int)kernel0[2] + (int)*r3 * (int)kernel0[3] +
(int)*r4 * (int)kernel0[4] + (int)*r5 * (int)kernel0[5] +
(int)*r6 * (int)kernel0[6] + (int)*r7 * (int)kernel0[7];
*outptr0 += sum0;
r0 += 2;
r1 += 2;
r2 += 2;
r3 += 2;
r4 += 2;
r5 += 2;
r6 += 2;
r7 += 2;
outptr0++;
}
r0 += tailstep;
r1 += tailstep;
r2 += tailstep;
r3 += tailstep;
r4 += tailstep;
r5 += tailstep;
r6 += tailstep;
r7 += tailstep;
}
}
for (; q<inch; q++)
{
int* outptr0 = out0;
const signed char *r0 = bottom_blob.channel(q);
const signed char *kernel0 = (const signed char *)kernel + p * inch + q;
for(int i = 0; i < outh; i++)
{
int remain = outw;
for (; remain > 0; remain--)
{
//ToDo Neon
int sum0 = (int)*r0 * (int)kernel0[0];
*outptr0 += sum0;
r0 += 2;
outptr0++;
}
r0 += tailstep;
}
}
}
}
|
runtime_error.c | // RUN: %libomp-compile-and-run 2>&1 | sort | FileCheck %s
// REQUIRES: ompt
#include <string.h>
#include <stdio.h>
#include "callback.h"
// TODO: use error directive when compiler suppors
typedef void ident_t;
extern void __kmpc_error(ident_t *, int, const char *);
int main() {
#pragma omp parallel num_threads(2)
{
if (omp_get_thread_num() == 0) {
const char *msg = "User message goes here";
printf("0: Message length=%" PRIu64 "\n", (uint64_t)strlen(msg));
__kmpc_error(NULL, ompt_warning, msg);
}
}
return 0;
}
// CHECK: {{^}}0: Message length=[[LENGTH:[0-9]+]]
// CHECK: {{^}}0: NULL_POINTER=[[NULL:.*$]]
// CHECK: {{^}}[[PRIMARY_ID:[0-9]+]]: ompt_event_implicit_task_begin
// CHECK: {{^}}[[PRIMARY_ID]]: ompt_event_runtime_error
// CHECK-SAME: severity=1
// CHECK-SAME: message=User message goes here
// CHECK-SAME: length=[[LENGTH]]
// CHECK-SAME: codeptr_ra={{0x[0-f]+}}
// Message from runtime
// CHECK: {{^}}OMP: Warning{{.*}}User message goes here
|
GrB_Scalar_nvals.c | //------------------------------------------------------------------------------
// GrB_Scalar_nvals: number of entries in a sparse GrB_Scalar
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
#include "GB.h"
GrB_Info GrB_Scalar_nvals // get the number of entries in a GrB_Scalar
(
GrB_Index *nvals, // number of entries (1 or 0)
const GrB_Scalar s // GrB_Scalar to query
)
{
//--------------------------------------------------------------------------
// check inputs
//--------------------------------------------------------------------------
GB_WHERE1 ("GrB_Scalar_nvals (&nvals, s)") ;
GB_RETURN_IF_NULL_OR_FAULTY (s) ;
ASSERT (GB_SCALAR_OK (s)) ;
//--------------------------------------------------------------------------
// get the number of entries
//--------------------------------------------------------------------------
GrB_Info info = GB_nvals (nvals, (GrB_Matrix) s, Context) ;
#pragma omp flush
return (info) ;
}
//------------------------------------------------------------------------------
// GxB_Scalar_nvals: number of entries in a sparse GrB_Scalar (historical)
//------------------------------------------------------------------------------
GrB_Info GxB_Scalar_nvals // get the number of entries in a GrB_Scalar
(
GrB_Index *nvals, // number of entries (1 or 0)
const GrB_Scalar s // GrB_Scalar to query
)
{
return (GrB_Scalar_nvals (nvals, s)) ;
}
|
idasFoodWeb_bnd_omp.c | /*
* -----------------------------------------------------------------
* Programmer(s): Daniel R. Reynolds and Ting Yan @ SMU
* Based on idaFoodWeb_bnd.c and parallelized with OpenMP
* -----------------------------------------------------------------
* SUNDIALS Copyright Start
* Copyright (c) 2002-2019, Lawrence Livermore National Security
* and Southern Methodist University.
* All rights reserved.
*
* See the top-level LICENSE and NOTICE files for details.
*
* SPDX-License-Identifier: BSD-3-Clause
* SUNDIALS Copyright End
* -----------------------------------------------------------------
* Example program for IDAS: Food web problem.
*
* This example program (OpenMP version) uses the SUNBAND linear
* solver, and IDACalcIC for initial condition calculation.
*
* The mathematical problem solved in this example is a DAE system
* that arises from a system of partial differential equations after
* spatial discretization. The PDE system is a food web population
* model, with predator-prey interaction and diffusion on the unit
* square in two dimensions. The dependent variable vector is:
*
* 1 2 ns
* c = (c , c , ..., c ) , ns = 2 * np
*
* and the PDE's are as follows:
*
* i i i
* dc /dt = d(i)*(c + c ) + R (x,y,c) (i = 1,...,np)
* xx yy i
*
* i i
* 0 = d(i)*(c + c ) + R (x,y,c) (i = np+1,...,ns)
* xx yy i
*
* where the reaction terms R are:
*
* i ns j
* R (x,y,c) = c * (b(i) + sum a(i,j)*c )
* i j=1
*
* The number of species is ns = 2 * np, with the first np being
* prey and the last np being predators. The coefficients a(i,j),
* b(i), d(i) are:
*
* a(i,i) = -AA (all i)
* a(i,j) = -GG (i <= np , j > np)
* a(i,j) = EE (i > np, j <= np)
* all other a(i,j) = 0
* b(i) = BB*(1+ alpha * x*y + beta*sin(4 pi x)*sin(4 pi y)) (i <= np)
* b(i) =-BB*(1+ alpha * x*y + beta*sin(4 pi x)*sin(4 pi y)) (i > np)
* d(i) = DPREY (i <= np)
* d(i) = DPRED (i > np)
*
* The various scalar parameters required are set using '#define'
* statements or directly in routine InitUserData. In this program,
* np = 1, ns = 2. The boundary conditions are homogeneous Neumann:
* normal derivative = 0.
*
* A polynomial in x and y is used to set the initial values of the
* first np variables (the prey variables) at each x,y location,
* while initial values for the remaining (predator) variables are
* set to a flat value, which is corrected by IDACalcIC.
*
* The PDEs are discretized by central differencing on a MX by MY
* mesh.
*
* The DAE system is solved by IDAS using the SUNBAND linear solver.
* Output is printed at t = 0, .001, .01, .1, .4, .7, 1.
*
* Optionally, we can set the number of threads from environment
* variable or command line. To check the current value for number
* of threads from environment:
* % echo $OMP_NUM_THREADS
*
* Execution:
*
* To use the default value for the number of threads from
* the OMP_NUM_THREADS environment value:
* % ./idasFoodWeb_bnd_omp
* To specify the number of threads at the command line, use
* % ./idasFoodWeb_bnd_omp num_threads
* where num_threads is the desired number of threads.
*
* -----------------------------------------------------------------
* References:
* [1] Peter N. Brown and Alan C. Hindmarsh,
* Reduced Storage Matrix Methods in Stiff ODE systems, Journal
* of Applied Mathematics and Computation, Vol. 31 (May 1989),
* pp. 40-91.
*
* [2] Peter N. Brown, Alan C. Hindmarsh, and Linda R. Petzold,
* Using Krylov Methods in the Solution of Large-Scale
* Differential-Algebraic Systems, SIAM J. Sci. Comput., 15
* (1994), pp. 1467-1488.
*
* [3] Peter N. Brown, Alan C. Hindmarsh, and Linda R. Petzold,
* Consistent Initial Condition Calculation for Differential-
* Algebraic Systems, SIAM J. Sci. Comput., 19 (1998),
* pp. 1495-1512.
* -----------------------------------------------------------------
*/
#include <stdio.h>
#include <stdlib.h>
#include <math.h>
#include <idas/idas.h>
#include <sunmatrix/sunmatrix_band.h>
#include <sunlinsol/sunlinsol_band.h>
#include <nvector/nvector_openmp.h>
#include <sundials/sundials_direct.h>
#include <sundials/sundials_types.h>
#ifdef _OPENMP
#include <omp.h>
#endif
/* Problem Constants. */
#define NPREY 1 /* No. of prey (= no. of predators). */
#define NUM_SPECIES 2*NPREY
#define PI RCONST(3.1415926535898)
#define FOURPI (RCONST(4.0)*PI)
#define MX 20 /* MX = number of x mesh points */
#define MY 20 /* MY = number of y mesh points */
#define NSMX (NUM_SPECIES * MX)
#define NEQ (NUM_SPECIES*MX*MY)
#define AA RCONST(1.0) /* Coefficient in above eqns. for a */
#define EE RCONST(10000.) /* Coefficient in above eqns. for a */
#define GG RCONST(0.5e-6) /* Coefficient in above eqns. for a */
#define BB RCONST(1.0) /* Coefficient in above eqns. for b */
#define DPREY RCONST(1.0) /* Coefficient in above eqns. for d */
#define DPRED RCONST(0.05) /* Coefficient in above eqns. for d */
#define ALPHA RCONST(50.) /* Coefficient alpha in above eqns. */
#define BETA RCONST(1000.) /* Coefficient beta in above eqns. */
#define AX RCONST(1.0) /* Total range of x variable */
#define AY RCONST(1.0) /* Total range of y variable */
#define RTOL RCONST(1.e-5) /* Relative tolerance */
#define ATOL RCONST(1.e-5) /* Absolute tolerance */
#define NOUT 6 /* Number of output times */
#define TMULT RCONST(10.0) /* Multiplier for tout values */
#define TADD RCONST(0.3) /* Increment for tout values */
#define ZERO RCONST(0.)
#define ONE RCONST(1.0)
/*
* User-defined vector and accessor macro: IJ_Vptr.
* IJ_Vptr is defined in order to express the underlying 3-D structure of
* the dependent variable vector from its underlying 1-D storage (an N_Vector).
* IJ_Vptr(vv,i,j) returns a pointer to the location in vv corresponding to
* species index is = 0, x-index ix = i, and y-index jy = j.
*/
#define IJ_Vptr(vv,i,j) (&NV_Ith_OMP(vv, (i)*NUM_SPECIES + (j)*NSMX))
/* Type: UserData. Contains problem constants, etc. */
typedef struct {
sunindextype Neq, ns, np, mx, my;
realtype dx, dy, **acoef;
realtype cox[NUM_SPECIES], coy[NUM_SPECIES], bcoef[NUM_SPECIES];
N_Vector rates;
int nthreads;
} *UserData;
/* Prototypes for functions called by the IDA Solver. */
static int resweb(realtype time, N_Vector cc, N_Vector cp, N_Vector resval,
void *user_data);
/* Prototypes for private Helper Functions. */
static void InitUserData(UserData webdata);
static void SetInitialProfiles(N_Vector cc, N_Vector cp, N_Vector id,
UserData webdata);
static void PrintHeader(sunindextype mu, sunindextype ml, realtype rtol, realtype atol);
static void PrintOutput(void *ida_mem, N_Vector c, realtype t);
static void PrintFinalStats(void *ida_mem);
static void Fweb(realtype tcalc, N_Vector cc, N_Vector crate, UserData webdata);
static void WebRates(realtype xx, realtype yy, realtype *cxy, realtype *ratesxy,
UserData webdata);
static realtype dotprod(sunindextype size, realtype *x1, realtype *x2);
static int check_retval(void *returnvalue, char *funcname, int opt);
/*
*--------------------------------------------------------------------
* MAIN PROGRAM
*--------------------------------------------------------------------
*/
int main(int argc, char *argv[])
{
void *ida_mem;
SUNMatrix A;
SUNLinearSolver LS;
UserData webdata;
N_Vector cc, cp, id;
int iout, retval;
sunindextype mu, ml;
realtype rtol, atol, t0, tout, tret;
int num_threads;
ida_mem = NULL;
A = NULL;
LS = NULL;
webdata = NULL;
cc = cp = id = NULL;
/* Set the number of threads to use */
num_threads = 1; /* default value */
#ifdef _OPENMP
num_threads = omp_get_max_threads(); /* overwrite with OMP_NUM_THREADS enviroment variable */
#endif
if (argc > 1) /* overwrite with command line value, if supplied */
num_threads = strtol(argv[1], NULL, 0);
/* Allocate and initialize user data block webdata. */
webdata = (UserData) malloc(sizeof *webdata);
webdata->rates = N_VNew_OpenMP(NEQ, num_threads);
webdata->acoef = newDenseMat(NUM_SPECIES, NUM_SPECIES);
webdata->nthreads = num_threads;
InitUserData(webdata);
/* Allocate N-vectors and initialize cc, cp, and id. */
cc = N_VNew_OpenMP(NEQ, num_threads);
if(check_retval((void *)cc, "N_VNew_OpenMP", 0)) return(1);
cp = N_VNew_OpenMP(NEQ, num_threads);
if(check_retval((void *)cp, "N_VNew_OpenMP", 0)) return(1);
id = N_VNew_OpenMP(NEQ, num_threads);
if(check_retval((void *)id, "N_VNew_OpenMP", 0)) return(1);
SetInitialProfiles(cc, cp, id, webdata);
/* Set remaining inputs to IDAMalloc. */
t0 = ZERO;
rtol = RTOL;
atol = ATOL;
/* Call IDACreate and IDAMalloc to initialize IDA. */
ida_mem = IDACreate();
if(check_retval((void *)ida_mem, "IDACreate", 0)) return(1);
retval = IDASetUserData(ida_mem, webdata);
if(check_retval(&retval, "IDASetUserData", 1)) return(1);
retval = IDASetId(ida_mem, id);
if(check_retval(&retval, "IDASetId", 1)) return(1);
retval = IDAInit(ida_mem, resweb, t0, cc, cp);
if(check_retval(&retval, "IDAInit", 1)) return(1);
retval = IDASStolerances(ida_mem, rtol, atol);
if(check_retval(&retval, "IDASStolerances", 1)) return(1);
/* Setup band matrix and linear solver, and attach to IDA. */
mu = ml = NSMX;
A = SUNBandMatrix(NEQ, mu, ml);
if(check_retval((void *)A, "SUNBandMatrix", 0)) return(1);
LS = SUNLinSol_Band(cc, A);
if(check_retval((void *)LS, "SUNLinSol_Band", 0)) return(1);
retval = IDASetLinearSolver(ida_mem, LS, A);
if(check_retval(&retval, "IDASetLinearSolver", 1)) return(1);
/* Call IDACalcIC (with default options) to correct the initial values. */
tout = RCONST(0.001);
retval = IDACalcIC(ida_mem, IDA_YA_YDP_INIT, tout);
if(check_retval(&retval, "IDACalcIC", 1)) return(1);
/* Print heading, basic parameters, and initial values. */
PrintHeader(mu, ml, rtol, atol);
PrintOutput(ida_mem, cc, ZERO);
/* Loop over iout, call IDASolve (normal mode), print selected output. */
for (iout = 1; iout <= NOUT; iout++) {
retval = IDASolve(ida_mem, tout, &tret, cc, cp, IDA_NORMAL);
if(check_retval(&retval, "IDASolve", 1)) return(retval);
PrintOutput(ida_mem, cc, tret);
if (iout < 3) tout *= TMULT; else tout += TADD;
}
/* Print final statistics and free memory. */
PrintFinalStats(ida_mem);
printf("num_threads = %i\n\n", num_threads);
/* Free memory */
IDAFree(&ida_mem);
SUNLinSolFree(LS);
SUNMatDestroy(A);
N_VDestroy_OpenMP(cc);
N_VDestroy_OpenMP(cp);
N_VDestroy_OpenMP(id);
destroyMat(webdata->acoef);
N_VDestroy_OpenMP(webdata->rates);
free(webdata);
return(0);
}
/* Define lines for readability in later routines */
#define acoef (webdata->acoef)
#define bcoef (webdata->bcoef)
#define cox (webdata->cox)
#define coy (webdata->coy)
/*
*--------------------------------------------------------------------
* FUNCTIONS CALLED BY IDA
*--------------------------------------------------------------------
*/
/*
* resweb: System residual function for predator-prey system.
* This routine calls Fweb to get all the right-hand sides of the
* equations, then loads the residual vector accordingly,
* using cp in the case of prey species.
*/
static int resweb(realtype tt, N_Vector cc, N_Vector cp,
N_Vector res, void *user_data)
{
sunindextype jx, jy, is, yloc, loc, np;
realtype *resv, *cpv;
UserData webdata;
webdata = (UserData)user_data;
cpv = NV_DATA_OMP(cp);
resv = NV_DATA_OMP(res);
np = webdata->np;
/* Call Fweb to set res to vector of right-hand sides. */
Fweb(tt, cc, res, webdata);
/* Loop over all grid points, setting residual values appropriately
for differential or algebraic components. */
#pragma omp parallel for default(shared) private(jy, yloc, jx, loc, is) schedule(static) num_threads(webdata->nthreads)
for (jy = 0; jy < MY; jy++) {
yloc = NSMX * jy;
for (jx = 0; jx < MX; jx++) {
loc = yloc + NUM_SPECIES * jx;
for (is = 0; is < NUM_SPECIES; is++) {
if (is < np)
resv[loc+is] = cpv[loc+is] - resv[loc+is];
else
resv[loc+is] = -resv[loc+is];
}
}
}
return(0);
}
/*
*--------------------------------------------------------------------
* PRIVATE FUNCTIONS
*--------------------------------------------------------------------
*/
/*
* InitUserData: Load problem constants in webdata (of type UserData).
*/
static void InitUserData(UserData webdata)
{
int i, j, np;
realtype *a1,*a2, *a3, *a4, dx2, dy2;
webdata->mx = MX;
webdata->my = MY;
webdata->ns = NUM_SPECIES;
webdata->np = NPREY;
webdata->dx = AX/(MX-1);
webdata->dy = AY/(MY-1);
webdata->Neq= NEQ;
/* Set up the coefficients a and b, and others found in the equations. */
np = webdata->np;
dx2 = (webdata->dx)*(webdata->dx); dy2 = (webdata->dy)*(webdata->dy);
for (i = 0; i < np; i++) {
a1 = &(acoef[i][np]);
a2 = &(acoef[i+np][0]);
a3 = &(acoef[i][0]);
a4 = &(acoef[i+np][np]);
/* Fill in the portion of acoef in the four quadrants, row by row. */
for (j = 0; j < np; j++) {
*a1++ = -GG;
*a2++ = EE;
*a3++ = ZERO;
*a4++ = ZERO;
}
/* Reset the diagonal elements of acoef to -AA. */
acoef[i][i] = -AA; acoef[i+np][i+np] = -AA;
/* Set coefficients for b and diffusion terms. */
bcoef[i] = BB; bcoef[i+np] = -BB;
cox[i] = DPREY/dx2; cox[i+np] = DPRED/dx2;
coy[i] = DPREY/dy2; coy[i+np] = DPRED/dy2;
}
}
/*
* SetInitialProfiles: Set initial conditions in cc, cp, and id.
* A polynomial profile is used for the prey cc values, and a constant
* (1.0e5) is loaded as the initial guess for the predator cc values.
* The id values are set to 1 for the prey and 0 for the predators.
* The prey cp values are set according to the given system, and
* the predator cp values are set to zero.
*/
static void SetInitialProfiles(N_Vector cc, N_Vector cp, N_Vector id,
UserData webdata)
{
sunindextype loc, yloc, is, jx, jy, np;
realtype xx, yy, xyfactor;
realtype *ccv, *cpv, *idv;
ccv = NV_DATA_OMP(cc);
cpv = NV_DATA_OMP(cp);
idv = NV_DATA_OMP(id);
np = webdata->np;
/* Loop over grid, load cc values and id values. */
for (jy = 0; jy < MY; jy++) {
yy = jy * webdata->dy;
yloc = NSMX * jy;
for (jx = 0; jx < MX; jx++) {
xx = jx * webdata->dx;
xyfactor = RCONST(16.0)*xx*(ONE-xx)*yy*(ONE-yy);
xyfactor *= xyfactor;
loc = yloc + NUM_SPECIES*jx;
for (is = 0; is < NUM_SPECIES; is++) {
if (is < np) {
ccv[loc+is] = RCONST(10.0) + (realtype)(is+1) * xyfactor;
idv[loc+is] = ONE;
}
else {
ccv[loc+is] = RCONST(1.0e5);
idv[loc+is] = ZERO;
}
}
}
}
/* Set c' for the prey by calling the function Fweb. */
Fweb(ZERO, cc, cp, webdata);
/* Set c' for predators to 0. */
for (jy = 0; jy < MY; jy++) {
yloc = NSMX * jy;
for (jx = 0; jx < MX; jx++) {
loc = yloc + NUM_SPECIES * jx;
for (is = np; is < NUM_SPECIES; is++) {
cpv[loc+is] = ZERO;
}
}
}
}
/*
* Print first lines of output (problem description)
*/
static void PrintHeader(sunindextype mu, sunindextype ml, realtype rtol, realtype atol)
{
printf("\nidasFoodWeb_bnd_omp: Predator-prey DAE OpenMP example problem for IDAS \n\n");
printf("Number of species ns: %d", NUM_SPECIES);
printf(" Mesh dimensions: %d x %d", MX, MY);
printf(" System size: %d\n", NEQ);
#if defined(SUNDIALS_EXTENDED_PRECISION)
printf("Tolerance parameters: rtol = %Lg atol = %Lg\n", rtol, atol);
#elif defined(SUNDIALS_DOUBLE_PRECISION)
printf("Tolerance parameters: rtol = %g atol = %g\n", rtol, atol);
#else
printf("Tolerance parameters: rtol = %g atol = %g\n", rtol, atol);
#endif
printf("Linear solver: SUNBAND, Band parameters mu = %ld, ml = %ld\n",
(long int) mu, (long int) ml);
printf("CalcIC called to correct initial predator concentrations.\n\n");
printf("-----------------------------------------------------------\n");
printf(" t bottom-left top-right");
printf(" | nst k h\n");
printf("-----------------------------------------------------------\n\n");
}
/*
* PrintOutput: Print output values at output time t = tt.
* Selected run statistics are printed. Then values of the concentrations
* are printed for the bottom left and top right grid points only.
*/
static void PrintOutput(void *ida_mem, N_Vector c, realtype t)
{
int i, kused, retval;
long int nst;
realtype *c_bl, *c_tr, hused;
retval = IDAGetLastOrder(ida_mem, &kused);
check_retval(&retval, "IDAGetLastOrder", 1);
retval = IDAGetNumSteps(ida_mem, &nst);
check_retval(&retval, "IDAGetNumSteps", 1);
retval = IDAGetLastStep(ida_mem, &hused);
check_retval(&retval, "IDAGetLastStep", 1);
c_bl = IJ_Vptr(c,0,0);
c_tr = IJ_Vptr(c,MX-1,MY-1);
#if defined(SUNDIALS_EXTENDED_PRECISION)
printf("%8.2Le %12.4Le %12.4Le | %3ld %1d %12.4Le\n",
t, c_bl[0], c_tr[0], nst, kused, hused);
for (i=1;i<NUM_SPECIES;i++)
printf(" %12.4Le %12.4Le |\n",c_bl[i],c_tr[i]);
#elif defined(SUNDIALS_DOUBLE_PRECISION)
printf("%8.2e %12.4e %12.4e | %3ld %1d %12.4e\n",
t, c_bl[0], c_tr[0], nst, kused, hused);
for (i=1;i<NUM_SPECIES;i++)
printf(" %12.4e %12.4e |\n",c_bl[i],c_tr[i]);
#else
printf("%8.2e %12.4e %12.4e | %3ld %1d %12.4e\n",
t, c_bl[0], c_tr[0], nst, kused, hused);
for (i=1;i<NUM_SPECIES;i++)
printf(" %12.4e %12.4e |\n",c_bl[i],c_tr[i]);
#endif
printf("\n");
}
/*
* PrintFinalStats: Print final run data contained in iopt.
*/
static void PrintFinalStats(void *ida_mem)
{
long int nst, nre, nreLS, nni, nje, netf, ncfn;
int retval;
retval = IDAGetNumSteps(ida_mem, &nst);
check_retval(&retval, "IDAGetNumSteps", 1);
retval = IDAGetNumNonlinSolvIters(ida_mem, &nni);
check_retval(&retval, "IDAGetNumNonlinSolvIters", 1);
retval = IDAGetNumResEvals(ida_mem, &nre);
check_retval(&retval, "IDAGetNumResEvals", 1);
retval = IDAGetNumErrTestFails(ida_mem, &netf);
check_retval(&retval, "IDAGetNumErrTestFails", 1);
retval = IDAGetNumNonlinSolvConvFails(ida_mem, &ncfn);
check_retval(&retval, "IDAGetNumNonlinSolvConvFails", 1);
retval = IDAGetNumJacEvals(ida_mem, &nje);
check_retval(&retval, "IDAGetNumJacEvals", 1);
retval = IDAGetNumLinResEvals(ida_mem, &nreLS);
check_retval(&retval, "IDAGetNumLinResEvals", 1);
printf("-----------------------------------------------------------\n");
printf("Final run statistics: \n\n");
printf("Number of steps = %ld\n", nst);
printf("Number of residual evaluations = %ld\n", nre+nreLS);
printf("Number of Jacobian evaluations = %ld\n", nje);
printf("Number of nonlinear iterations = %ld\n", nni);
printf("Number of error test failures = %ld\n", netf);
printf("Number of nonlinear conv. failures = %ld\n", ncfn);
}
/*
* Fweb: Rate function for the food-web problem.
* This routine computes the right-hand sides of the system equations,
* consisting of the diffusion term and interaction term.
* The interaction term is computed by the function WebRates.
*/
static void Fweb(realtype tcalc, N_Vector cc, N_Vector crate,
UserData webdata)
{
sunindextype jx, jy, is, idyu, idyl, idxu, idxl;
realtype xx, yy, *cxy, *ratesxy, *cratexy, dcyli, dcyui, dcxli, dcxui;
/* Loop over grid points, evaluate interaction vector (length ns),
form diffusion difference terms, and load crate. */
for (jy = 0; jy < MY; jy++) {
yy = (webdata->dy) * jy ;
idyu = (jy!=MY-1) ? NSMX : -NSMX;
idyl = (jy!= 0 ) ? NSMX : -NSMX;
for (jx = 0; jx < MX; jx++) {
xx = (webdata->dx) * jx;
idxu = (jx!= MX-1) ? NUM_SPECIES : -NUM_SPECIES;
idxl = (jx!= 0 ) ? NUM_SPECIES : -NUM_SPECIES;
cxy = IJ_Vptr(cc,jx,jy);
ratesxy = IJ_Vptr(webdata->rates,jx,jy);
cratexy = IJ_Vptr(crate,jx,jy);
/* Get interaction vector at this grid point. */
WebRates(xx, yy, cxy, ratesxy, webdata);
/* Loop over species, do differencing, load crate segment. */
#pragma omp parallel for default(shared) private(is, dcyli, dcyui, dcxli, dcxui) schedule(static) num_threads(webdata->nthreads)
for (is = 0; is < NUM_SPECIES; is++) {
/* Differencing in y. */
dcyli = *(cxy+is) - *(cxy - idyl + is) ;
dcyui = *(cxy + idyu + is) - *(cxy+is);
/* Differencing in x. */
dcxli = *(cxy+is) - *(cxy - idxl + is);
dcxui = *(cxy + idxu +is) - *(cxy+is);
/* Compute the crate values at (xx,yy). */
cratexy[is] = coy[is] * (dcyui - dcyli) +
cox[is] * (dcxui - dcxli) + ratesxy[is];
} /* End is loop */
} /* End of jx loop */
} /* End of jy loop */
}
/*
* WebRates: Evaluate reaction rates at a given spatial point.
* At a given (x,y), evaluate the array of ns reaction terms R.
*/
static void WebRates(realtype xx, realtype yy, realtype *cxy, realtype *ratesxy,
UserData webdata)
{
int is;
realtype fac;
for (is = 0; is < NUM_SPECIES; is++)
ratesxy[is] = dotprod(NUM_SPECIES, cxy, acoef[is]);
fac = ONE + ALPHA*xx*yy + BETA*sin(FOURPI*xx)*sin(FOURPI*yy);
for (is = 0; is < NUM_SPECIES; is++)
ratesxy[is] = cxy[is]*( bcoef[is]*fac + ratesxy[is] );
}
/*
* dotprod: dot product routine for realtype arrays, for use by WebRates.
*/
static realtype dotprod(sunindextype size, realtype *x1, realtype *x2)
{
sunindextype i;
realtype *xx1, *xx2, temp = ZERO;
xx1 = x1; xx2 = x2;
for (i = 0; i < size; i++) temp += (*xx1++) * (*xx2++);
return(temp);
}
/*
* Check function return value...
* opt == 0 means SUNDIALS function allocates memory so check if
* returned NULL pointer
* opt == 1 means SUNDIALS function returns an integer value so check if
* retval < 0
* opt == 2 means function allocates memory so check if returned
* NULL pointer
*/
static int check_retval(void *returnvalue, char *funcname, int opt)
{
int *retval;
if (opt == 0 && returnvalue == NULL) {
/* Check if SUNDIALS function returned NULL pointer - no memory allocated */
fprintf(stderr,
"\nSUNDIALS_ERROR: %s() failed - returned NULL pointer\n\n",
funcname);
return(1);
} else if (opt == 1) {
/* Check if retval < 0 */
retval = (int *) returnvalue;
if (*retval < 0) {
fprintf(stderr,
"\nSUNDIALS_ERROR: %s() failed with retval = %d\n\n",
funcname, *retval);
return(1);
}
} else if (opt == 2 && returnvalue == NULL) {
/* Check if function returned NULL pointer - no memory allocated */
fprintf(stderr,
"\nMEMORY_ERROR: %s() failed - returned NULL pointer\n\n",
funcname);
return(1);
}
return(0);
}
|
RangeDrivenOctree.h | /// \ingroup base
/// \class ttk::RangeDrivenOctree
/// \author Julien Tierny <julien.tierny@lip6.fr>
/// \date March 2015.
///
/// \brief TTK optional package for octree based range queries in bivariate
/// volumetric data.
///
/// This class accelerates range-driven queries in bivariate volumetric data.
/// This class is typically used to accelerate fiber surface computation.
///
/// \b Related \b publication \n
/// "Fast and Exact Fiber Surface Extraction for Tetrahedral Meshes" \n
/// Pavol Klacansky, Julien Tierny, Hamish Carr, Zhao Geng \n
/// IEEE Transactions on Visualization and Computer Graphics, 2016.
///
/// \sa FiberSurface.h %for a usage example.
#ifndef _RANGE_DRIVEN_OCTREE_H
#define _RANGE_DRIVEN_OCTREE_H
// base code includes
#include <Triangulation.h>
#include <Wrapper.h>
namespace ttk{
class RangeDrivenOctree : public Debug{
public:
RangeDrivenOctree();
~RangeDrivenOctree();
template <class dataTypeU, class dataTypeV>
inline int build();
inline bool empty() const {
return nodeList_.empty();
}
int flush();
int getTet2NodeMap(std::vector<SimplexId> &map,
const bool &forSegmentation = false) const;
int rangeSegmentQuery(
const std::pair<double, double> &p0,
const std::pair<double, double> &p1,
std::vector<SimplexId> &cellList) const;
inline void setCellList(const SimplexId *cellList){
cellList_ = cellList;
}
inline void setCellNumber(const SimplexId &cellNumber){
cellNumber_ = cellNumber;
}
inline void setLeafMinimumCellNumber(const SimplexId &number){
leafMinimumCellNumber_ = number;
}
inline void setLeafMinimumDomainVolumeRatio(const float &ratio){
leafMinimumRangeAreaRatio_ = ratio;
}
inline void setLeafMinimumRangeAreaRatio(const float &ratio){
leafMinimumRangeAreaRatio_ = ratio;
}
inline void setPointList(const float *pointList){
pointList_ = pointList;
}
inline void setRange(const void *u, const void *v){
u_ = u;
v_ = v;
}
inline void setTriangulation(const Triangulation *triangulation){
triangulation_ = triangulation;
}
inline void setVertexNumber(const SimplexId &vertexNumber){
vertexNumber_ = vertexNumber;
}
int stats(std::ostream &stream);
int statNode(const SimplexId &nodeId, std::ostream &stream);
protected:
class OctreeNode{
public:
std::pair<std::pair<double, double>, std::pair<double, double> >
rangeBox_;
std::vector<SimplexId> cellList_;
std::vector<SimplexId> childList_;
std::vector<std::pair<float, float> >
domainBox_;
};
template <class dataTypeU, class dataTypeV>
int buildNode(const std::vector<SimplexId> &cellList,
const std::vector<std::pair<float, float> > &domainBox,
const std::pair<std::pair<double, double>, std::pair<double, double> >
&rangeBox, SimplexId &nodeId);
int rangeSegmentQuery(
const std::pair<double, double> &p0,
const std::pair<double, double> &p1,
const SimplexId &nodeId,
std::vector<SimplexId> &cellList) const;
bool segmentIntersection(
const std::pair<double, double> &p0,
const std::pair<double, double> &p1,
const std::pair<double, double> &q0,
const std::pair<double, double> &q1) const;
const void *u_;
const void *v_;
const float *pointList_;
const SimplexId *cellList_;
float domainVolume_,
leafMinimumDomainVolumeRatio_,
leafMinimumRangeAreaRatio_,
rangeArea_;
SimplexId cellNumber_, vertexNumber_,
leafMinimumCellNumber_, rootId_;
mutable SimplexId queryResultNumber_;
std::vector<OctreeNode> nodeList_;
std::vector<std::vector<std::pair<float, float> > >
cellDomainBox_;
std::vector<std::pair<std::pair<double, double>, std::pair<double, double>
> >
cellRangeBox_;
const Triangulation *triangulation_;
};
}
// if the package is not a template, comment the following line
// #include <RangeDrivenOctree.cpp>
template <class dataTypeU, class dataTypeV>
int ttk::RangeDrivenOctree::build(){
Timer t;
Memory m;
dataTypeU *u = (dataTypeU *) u_;
dataTypeV *v = (dataTypeV *) v_;
if(triangulation_){
cellNumber_ = triangulation_->getNumberOfCells();
}
if(triangulation_){
vertexNumber_ = triangulation_->getNumberOfVertices();
}
cellDomainBox_.resize(cellNumber_, std::vector<std::pair<float, float> >(3));
cellRangeBox_.resize(cellNumber_);
// WARNING: assuming tets only here
#ifdef TTK_ENABLE_OPENMP
#pragma omp parallel for num_threads(threadNumber_)
#endif
for(SimplexId i = 0; i < cellNumber_; i++){
for(int j = 0; j < 3; j++){
cellDomainBox_[i][j].first = FLT_MAX;
cellDomainBox_[i][j].second = -FLT_MAX;
}
const SimplexId *cell = NULL;
if(!triangulation_){
cell = &(cellList_[5*i + 1]);
}
for(int j = 0; j < 4; j++){
// update the domain bounding box for that tet
SimplexId vertexId = 0;
if(triangulation_){
triangulation_->getCellVertex(i, j, vertexId);
}
else{
vertexId = cell[j];
}
float p[3];
if(triangulation_){
triangulation_->getVertexPoint(vertexId, p[0], p[1], p[2]);
}
else{
p[0] = pointList_[3*vertexId];
p[1] = pointList_[3*vertexId + 1];
p[2] = pointList_[3*vertexId + 2];
}
// update x-range
if(p[0] < cellDomainBox_[i][0].first)
cellDomainBox_[i][0].first = p[0];
if(p[0] > cellDomainBox_[i][0].second)
cellDomainBox_[i][0].second = p[0];
// update y-range
if(p[1] < cellDomainBox_[i][1].first)
cellDomainBox_[i][1].first = p[1];
if(p[1] > cellDomainBox_[i][1].second)
cellDomainBox_[i][1].second = p[1];
// update z-range
if(p[2] < cellDomainBox_[i][2].first)
cellDomainBox_[i][2].first = p[2];
if(p[2] > cellDomainBox_[i][2].second)
cellDomainBox_[i][2].second = p[2];
// update the range bounding box
if(!j){
cellRangeBox_[i].first.first = u[vertexId];
cellRangeBox_[i].first.second = u[vertexId];
cellRangeBox_[i].second.first = v[vertexId];
cellRangeBox_[i].second.second = v[vertexId];
}
else{
// update u
if(u[vertexId] < cellRangeBox_[i].first.first)
cellRangeBox_[i].first.first = u[vertexId];
if(u[vertexId] > cellRangeBox_[i].first.second)
cellRangeBox_[i].first.second = u[vertexId];
// update v
if(v[vertexId] < cellRangeBox_[i].second.first)
cellRangeBox_[i].second.first = v[vertexId];
if(v[vertexId] > cellRangeBox_[i].second.second)
cellRangeBox_[i].second.second = v[vertexId];
}
}
}
std::vector<SimplexId> domain(cellNumber_);
for(SimplexId i = 0; i < cellNumber_; i++)
domain[i] = i;
// get global bBoxes
std::vector<std::pair<float, float> > domainBox(3);
std::pair<std::pair<double, double>, std::pair<double, double> > rangeBox;
for(SimplexId i = 0; i < vertexNumber_; i++){
// domain one
float p[3];
if(triangulation_){
triangulation_->getVertexPoint(i, p[0], p[1], p[2]);
}
else{
p[0] = pointList_[3*i];
p[1] = pointList_[3*i + 1];
p[2] = pointList_[3*i + 2];
}
for(int j = 0; j < 3; j++){
if(!i){
domainBox[j].first = domainBox[j].second = p[j];
}
else{
if(p[j] < domainBox[j].first)
domainBox[j].first = p[j];
if(p[j] > domainBox[j].second)
domainBox[j].second = p[j];
}
}
if(!i){
rangeBox.first.first = rangeBox.first.second = u[i];
rangeBox.second.first = rangeBox.second.second = v[i];
}
else{
if(u[i] < rangeBox.first.first)
rangeBox.first.first = u[i];
if(u[i] > rangeBox.first.second)
rangeBox.first.second = u[i];
if(v[i] < rangeBox.second.first)
rangeBox.second.first = v[i];
if(v[i] > rangeBox.second.second)
rangeBox.second.second = v[i];
}
}
rangeArea_ = (rangeBox.first.second - rangeBox.first.first)
*(rangeBox.second.second - rangeBox.second.first);
domainVolume_ = (domainBox[0].second - domainBox[0].first)
*(domainBox[1].second - domainBox[1].first)
*(domainBox[2].second - domainBox[2].first);
// special case for tets obtained from regular grid subdivision (assuming 6)
if(leafMinimumCellNumber_ < 6)
leafMinimumCellNumber_ = 6;
leafMinimumDomainVolumeRatio_ = (1.0/((float)cellNumber_))/2.0;
{
std::stringstream msg;
msg << "[RangeDrivenOctree] Range area ratio: "
<< leafMinimumRangeAreaRatio_
<< std::endl;
dMsg(std::cout, msg.str(), 4);
}
buildNode<dataTypeU, dataTypeV>(domain, domainBox, rangeBox, rootId_);
{
std::stringstream msg;
msg << "[RangeDrivenOctree] Octree built in "
<< t.getElapsedTime() << " s." << std::endl;
dMsg(std::cout, msg.str(), 2);
}
{
std::stringstream msg;
msg << "[RangeDrivenOctree] Memory: "
<< m.getElapsedUsage() << " MB." << std::endl;
dMsg(std::cout, msg.str(), memoryMsg);
}
// debug
// stats(std::cout);
// end of debug
return 0;
}
template <class dataTypeU, class dataTypeV>
int ttk::RangeDrivenOctree::buildNode(
const std::vector<SimplexId> &cellList,
const std::vector<std::pair<float, float> > &domainBox,
const std::pair<std::pair<double, double>, std::pair<double, double> >
&rangeBox,
SimplexId &nodeId){
nodeId = nodeList_.size();
nodeList_.resize(nodeList_.size() + 1);
nodeList_.back().rangeBox_ = rangeBox;
nodeList_.back().domainBox_ = domainBox;
float rangeArea = (rangeBox.first.second - rangeBox.first.first)
*(rangeBox.second.second - rangeBox.second.first);
float domainVolume = (domainBox[0].second - domainBox[0].first)
*(domainBox[1].second - domainBox[1].first)
*(domainBox[2].second - domainBox[2].first);
if(((SimplexId) cellList.size() > leafMinimumCellNumber_)
&&(rangeArea > leafMinimumRangeAreaRatio_*rangeArea_)
&&(domainVolume > leafMinimumDomainVolumeRatio_*domainVolume_)){
nodeList_.back().childList_.resize(8);
std::vector<std::vector<std::pair<float, float> > > childDomainBox(8);
for(SimplexId i = 0; i < (SimplexId) childDomainBox.size(); i++){
childDomainBox[i].resize(3);
}
std::vector<std::pair<std::pair<dataTypeU, dataTypeU>, std::pair<dataTypeV,
dataTypeV> > >
childRangeBox(8);
std::vector<std::vector<SimplexId> > childCellList(8);
float midX = domainBox[0].first
+ (domainBox[0].second - domainBox[0].first)/2.0;
float midY = domainBox[1].first
+ (domainBox[1].second - domainBox[1].first)/2.0;
float midZ = domainBox[2].first
+ (domainBox[2].second - domainBox[2].first)/2.0;
// 0 - - -
childDomainBox[0][0].first = domainBox[0].first;
childDomainBox[0][0].second = midX;
childDomainBox[0][1].first = domainBox[1].first;
childDomainBox[0][1].second = midY;
childDomainBox[0][2].first = domainBox[2].first;
childDomainBox[0][2].second = midZ;
// 1 - - +
childDomainBox[1][0].first = domainBox[0].first;
childDomainBox[1][0].second = midX;
childDomainBox[1][1].first = domainBox[1].first;
childDomainBox[1][1].second = midY;
childDomainBox[1][2].first = midZ;
childDomainBox[1][2].second = domainBox[2].second;
// 2 - + -
childDomainBox[2][0].first = domainBox[0].first;
childDomainBox[2][0].second = midX;
childDomainBox[2][1].first = midY;
childDomainBox[2][1].second = domainBox[1].second;
childDomainBox[2][2].first = domainBox[2].first;
childDomainBox[2][2].second = midZ;
// 3 - + +
childDomainBox[3][0].first = domainBox[0].first;
childDomainBox[3][0].second = midX;
childDomainBox[3][1].first = midY;
childDomainBox[3][1].second = domainBox[1].second;
childDomainBox[3][2].first = midZ;
childDomainBox[3][2].second = domainBox[2].second;
// 4 + - -
childDomainBox[4][0].first = midX;
childDomainBox[4][0].second = domainBox[0].second;
childDomainBox[4][1].first = domainBox[1].first;
childDomainBox[4][1].second = midY;
childDomainBox[4][2].first = domainBox[2].first;
childDomainBox[4][2].second = midZ;
// 5 + - +
childDomainBox[5][0].first = midX;
childDomainBox[5][0].second = domainBox[0].second;
childDomainBox[5][1].first = domainBox[1].first;
childDomainBox[5][1].second = midY;
childDomainBox[5][2].first = midZ;
childDomainBox[5][2].second = domainBox[2].second;
// 6 + + -
childDomainBox[6][0].first = midX;
childDomainBox[6][0].second = domainBox[0].second;
childDomainBox[6][1].first = midY;
childDomainBox[6][1].second = domainBox[1].second;
childDomainBox[6][2].first = domainBox[2].first;
childDomainBox[6][2].second = midZ;
// 7 + + +
childDomainBox[7][0].first = midX;
childDomainBox[7][0].second = domainBox[0].second;
childDomainBox[7][1].first = midY;
childDomainBox[7][1].second = domainBox[1].second;
childDomainBox[7][2].first = midZ;
childDomainBox[7][2].second = domainBox[2].second;
for(SimplexId i = 0; i < (SimplexId) cellList.size(); i++){
SimplexId childId = 0;
for(int j = 0; j < 8; j++){
if((cellDomainBox_[cellList[i]][0].first
>= childDomainBox[j][0].first)
&&(cellDomainBox_[cellList[i]][0].first
< childDomainBox[j][0].second)
&&
(cellDomainBox_[cellList[i]][1].first
>= childDomainBox[j][1].first)
&&(cellDomainBox_[cellList[i]][1].first
< childDomainBox[j][1].second)
&&
(cellDomainBox_[cellList[i]][2].first
>= childDomainBox[j][2].first)
&&(cellDomainBox_[cellList[i]][2].first
< childDomainBox[j][2].second)){
childId = j;
break;
}
}
// update child's range box
if(childCellList[childId].empty()){
childRangeBox[childId].first.first
= cellRangeBox_[cellList[i]].first.first;
childRangeBox[childId].first.second
= cellRangeBox_[cellList[i]].first.second;
childRangeBox[childId].second.first
= cellRangeBox_[cellList[i]].second.first;
childRangeBox[childId].second.second
= cellRangeBox_[cellList[i]].second.second;
}
else{
if(cellRangeBox_[cellList[i]].first.first
< childRangeBox[childId].first.first){
childRangeBox[childId].first.first
= cellRangeBox_[cellList[i]].first.first;
}
if(cellRangeBox_[cellList[i]].first.second
> childRangeBox[childId].first.second){
childRangeBox[childId].first.second
= cellRangeBox_[cellList[i]].first.second;
}
if(cellRangeBox_[cellList[i]].second.first
< childRangeBox[childId].second.first){
childRangeBox[childId].second.first
= cellRangeBox_[cellList[i]].second.first;
}
if(cellRangeBox_[cellList[i]].second.second
> childRangeBox[childId].second.second){
childRangeBox[childId].second.second
= cellRangeBox_[cellList[i]].second.second;
}
}
childCellList[childId].push_back(cellList[i]);
}
for(int i = 0; i < 8; i++){
buildNode<dataTypeU, dataTypeV>(childCellList[i],
childDomainBox[i], childRangeBox[i], nodeList_[nodeId].childList_[i]);
}
}
else{
// leaf
nodeList_[nodeId].cellList_ = cellList;
}
return 0;
}
#endif // _RANGE_DRIVEN_OCTREE_H
|
trace.c | #include "trace.h"
#include <omp.h>
Event events[MAXTHREADS][MAXEVENTS];
int nevents[MAXTHREADS];
void trace_init(){
int i;
#pragma omp master
{
/* strcpy(colors[0], "#d38d5f"); */
/* strcpy(colors[1], "#ffdd55"); */
/* strcpy(colors[2], "#8dd35f"); */
/* strcpy(colors[3], "#80b3ff"); */
/* strcpy(colors[4], "#e580ff"); */
for(i=0; i<MAXTHREADS; i++)
nevents[i]=0;
t_zero = usecs();
}
return;
}
void trace_event_start(int type){
int iam;
iam = omp_get_thread_num();
(events[iam][nevents[iam]]).t_start = usecs();
(events[iam][nevents[iam]]).type = type;
return;
}
void trace_event_stop(int type){
int iam;
iam = omp_get_thread_num();
(events[iam][nevents[iam]]).t_stop = usecs();
nevents[iam] +=1;
return;
}
void trace_dump(char *fname){
int t, e, i, nth;
long offs, t_stop;
Event ev;
FILE * pFile;
char *colors[] = { "#d38d5f", "#ffdd55", "#8dd35f", "#80b3ff", "#e580ff"};
double scale_x, scale_y, x;
#pragma omp master
{
t_stop = usecs();
nth = 0;
for(i=0; i<MAXTHREADS; i++)
if(nevents[i] > 0) nth=i+1;
/* scale_x = 1000.0; */
/* scale_y = ((double)(nth+1)) *30000 / ((double)(t_stop-t_zero)); */
scale_x = ((double)(t_stop-t_zero))/1000.0;
scale_y = 0.1;
pFile = fopen (fname,"w");
fprintf(pFile,"<svg x=\"-11\" y=\"-8\" width=\"%f\" height=\"%f\">\n",
((double)t_stop-t_zero)*1.06/scale_x, ((double)nth+1)*1.05/scale_y+8);
for(t=0; t<MAXTHREADS; t++){
for(e=0; e<nevents[t]; e++){
ev = events[t][e];
fprintf(pFile," "
"<rect x=\"%f\" y=\"%f\" width=\"%f\" height=\"%f\" "
"fill=\"%7s\" stroke-width=\"0\"/>\n",
(double)(ev.t_start-t_zero)/scale_x,
((double)t)/scale_y,
((double)(ev.t_stop-ev.t_start))/scale_x,
((double)1.0)/scale_y,
colors[ev.type]);
}
}
fprintf(pFile, "<line x1=\"0\" y1=\"%f\" x2=\"%f\" y2=\"%f\" \
style=\"stroke:rgb(0,0,0);stroke-width:1\"/>\n",
((double)nth+1)*1.01/scale_y,
((double)t_stop-t_zero)*1.02/scale_x,
((double)nth+1)*1.01/scale_y);
for(t=0; t<nth; t++){
fprintf(pFile,"<text x=\"%f\" y=\"%f\" style=\"font-size:6pt\">Thread %d</text>\n",
((double)t_stop-t_zero)*1.01/scale_x, ((double)t+0.8)/scale_y, t);
}
fprintf(pFile,"<text x=\"0\" y=\"%f\" style=\"font-size:6pt\">Time (usec.)</text>\n",((double)nth+1)*1.05/scale_y+7);
for(x=((double)t_stop-t_zero)/scale_x/10.0; x<=((double)t_stop-t_zero)/scale_x; x+=((double)t_stop-t_zero)/scale_x/10.0){
fprintf(pFile, "<line x1=\"%f\" y1=\"%f\" x2=\"%f\" y2=\"%f\" \
style=\"stroke:rgb(0,0,0);stroke-width:0.5\"/>\n",
x,((double)nth+1)*1.01/scale_y-1, x, ((double)nth+1)*1.01/scale_y+1);
fprintf(pFile,"<text x=\"%f\" y=\"%f\" style=\"font-size:6pt\"> %.0f </text>\n",
x,((double)nth+1)*1.01/scale_y+7,x*scale_x);
}
fprintf(pFile,"</svg>\n");
}
}
|
convolution_sgemm_pack4to8_fp16s.h | // Tencent is pleased to support the open source community by making ncnn available.
//
// Copyright (C) 2021 THL A29 Limited, a Tencent company. All rights reserved.
//
// Licensed under the BSD 3-Clause License (the "License"); you may not use this file except
// in compliance with the License. You may obtain a copy of the License at
//
// https://opensource.org/licenses/BSD-3-Clause
//
// Unless required by applicable law or agreed to in writing, software distributed
// under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
// CONDITIONS OF ANY KIND, either express or implied. See the License for the
// specific language governing permissions and limitations under the License.
static void im2col_sgemm_pack4to8_fp16sa_neon(const Mat& bottom_im2col, Mat& top_blob, const Mat& kernel, const Mat& _bias, const Option& opt)
{
// Mat bottom_im2col(size, maxk, inch, 8u, 4, opt.workspace_allocator);
const int size = bottom_im2col.w;
const int maxk = bottom_im2col.h;
const int inch = bottom_im2col.c;
const int outch = top_blob.c;
const __fp16* bias = _bias;
// permute
Mat tmp;
if (size >= 8)
tmp.create(8 * maxk, inch, size / 8 + size % 8, 8u, 4, opt.workspace_allocator);
else
tmp.create(maxk, inch, size, 8u, 4, opt.workspace_allocator);
{
int nn_size = size >> 3;
int remain_size_start = 0;
#pragma omp parallel for num_threads(opt.num_threads)
for (int ii = 0; ii < nn_size; ii++)
{
int i = remain_size_start + ii * 8;
__fp16* tmpptr = tmp.channel(i / 8);
for (int q = 0; q < inch; q++)
{
const __fp16* img0 = (const __fp16*)bottom_im2col.channel(q) + i * 4;
for (int k = 0; k < maxk; k++)
{
// transpose 4x8
asm volatile(
"prfm pldl1keep, [%0, #512] \n"
"ld4 {v0.8h, v1.8h, v2.8h, v3.8h}, [%0] \n"
"st1 {v0.8h, v1.8h, v2.8h, v3.8h}, [%1], #64 \n"
: "=r"(img0), // %0
"=r"(tmpptr) // %1
: "0"(img0),
"1"(tmpptr)
: "memory", "v0", "v1", "v2", "v3");
img0 += size * 4;
}
}
}
remain_size_start += nn_size << 3;
#pragma omp parallel for num_threads(opt.num_threads)
for (int i = remain_size_start; i < size; i++)
{
__fp16* tmpptr = tmp.channel(i / 8 + i % 8);
for (int q = 0; q < inch; q++)
{
const __fp16* img0 = (const __fp16*)bottom_im2col.channel(q) + i * 4;
for (int k = 0; k < maxk; k++)
{
asm volatile(
"prfm pldl1keep, [%0, #64] \n"
"ld1 {v0.4h}, [%0] \n"
"st1 {v0.4h}, [%1], #8 \n"
: "=r"(img0), // %0
"=r"(tmpptr) // %1
: "0"(img0),
"1"(tmpptr)
: "memory", "v0");
img0 += size * 4;
}
}
}
}
#pragma omp parallel for num_threads(opt.num_threads)
for (int p = 0; p < outch; p++)
{
__fp16* outptr0 = top_blob.channel(p);
const __fp16 zeros[8] = {0.f, 0.f, 0.f, 0.f, 0.f, 0.f, 0.f, 0.f};
const __fp16* biasptr = bias ? bias + p * 8 : zeros;
int i = 0;
for (; i + 7 < size; i += 8)
{
__fp16* tmpptr = tmp.channel(i / 8);
const __fp16* kptr = kernel.channel(p);
int nn = inch * maxk; // inch always > 0
asm volatile(
"ld1 {v16.8h}, [%8] \n"
"mov v17.16b, v16.16b \n"
"mov v18.16b, v16.16b \n"
"mov v19.16b, v16.16b \n"
"mov v20.16b, v16.16b \n"
"mov v21.16b, v16.16b \n"
"mov v22.16b, v16.16b \n"
"mov v23.16b, v16.16b \n"
"0: \n"
"prfm pldl1keep, [%2, #512] \n"
"ld1 {v0.8h, v1.8h, v2.8h, v3.8h}, [%2], #64 \n" // r0123
"prfm pldl1keep, [%3, #512] \n"
"ld1 {v8.8h, v9.8h, v10.8h, v11.8h}, [%3], #64 \n" // w0123
"fmla v16.8h, v8.8h, v0.h[0] \n"
"fmla v17.8h, v8.8h, v0.h[1] \n"
"fmla v18.8h, v8.8h, v0.h[2] \n"
"fmla v19.8h, v8.8h, v0.h[3] \n"
"fmla v20.8h, v8.8h, v0.h[4] \n"
"fmla v21.8h, v8.8h, v0.h[5] \n"
"fmla v22.8h, v8.8h, v0.h[6] \n"
"fmla v23.8h, v8.8h, v0.h[7] \n"
"fmla v16.8h, v9.8h, v1.h[0] \n"
"fmla v17.8h, v9.8h, v1.h[1] \n"
"fmla v18.8h, v9.8h, v1.h[2] \n"
"fmla v19.8h, v9.8h, v1.h[3] \n"
"fmla v20.8h, v9.8h, v1.h[4] \n"
"fmla v21.8h, v9.8h, v1.h[5] \n"
"fmla v22.8h, v9.8h, v1.h[6] \n"
"fmla v23.8h, v9.8h, v1.h[7] \n"
"fmla v16.8h, v10.8h, v2.h[0] \n"
"fmla v17.8h, v10.8h, v2.h[1] \n"
"fmla v18.8h, v10.8h, v2.h[2] \n"
"fmla v19.8h, v10.8h, v2.h[3] \n"
"fmla v20.8h, v10.8h, v2.h[4] \n"
"fmla v21.8h, v10.8h, v2.h[5] \n"
"fmla v22.8h, v10.8h, v2.h[6] \n"
"fmla v23.8h, v10.8h, v2.h[7] \n"
"fmla v16.8h, v11.8h, v3.h[0] \n"
"fmla v17.8h, v11.8h, v3.h[1] \n"
"fmla v18.8h, v11.8h, v3.h[2] \n"
"fmla v19.8h, v11.8h, v3.h[3] \n"
"fmla v20.8h, v11.8h, v3.h[4] \n"
"fmla v21.8h, v11.8h, v3.h[5] \n"
"fmla v22.8h, v11.8h, v3.h[6] \n"
"fmla v23.8h, v11.8h, v3.h[7] \n"
"subs %w0, %w0, #1 \n"
"bne 0b \n"
"st1 {v16.8h, v17.8h, v18.8h, v19.8h}, [%1], #64 \n"
"st1 {v20.8h, v21.8h, v22.8h, v23.8h}, [%1], #64 \n"
: "=r"(nn), // %0
"=r"(outptr0), // %1
"=r"(tmpptr), // %2
"=r"(kptr) // %3
: "0"(nn),
"1"(outptr0),
"2"(tmpptr),
"3"(kptr),
"r"(biasptr) // %8
: "cc", "memory", "v0", "v1", "v2", "v3", "v8", "v9", "v10", "v11", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23");
}
for (; i < size; i++)
{
__fp16* tmpptr = tmp.channel(i / 8 + i % 8);
const __fp16* kptr = kernel.channel(p);
int nn = inch * maxk; // inch always > 0
float16x8_t _sum0 = vld1q_f16(biasptr);
int q = 0;
for (; q < nn; q++)
{
float16x4_t _r0 = vld1_f16(tmpptr);
float16x8_t _k0 = vld1q_f16(kptr);
float16x8_t _k1 = vld1q_f16(kptr + 8);
float16x8_t _k2 = vld1q_f16(kptr + 16);
float16x8_t _k3 = vld1q_f16(kptr + 24);
_sum0 = vfmaq_lane_f16(_sum0, _k0, _r0, 0);
_sum0 = vfmaq_lane_f16(_sum0, _k1, _r0, 1);
_sum0 = vfmaq_lane_f16(_sum0, _k2, _r0, 2);
_sum0 = vfmaq_lane_f16(_sum0, _k3, _r0, 3);
kptr += 32;
tmpptr += 4;
}
vst1q_f16(outptr0, _sum0);
outptr0 += 8;
}
}
}
static void convolution_im2col_sgemm_transform_kernel_pack4to8_fp16sa_neon(const Mat& _kernel, Mat& kernel_tm, int inch, int outch, int kernel_w, int kernel_h)
{
const int maxk = kernel_w * kernel_h;
// interleave
// src = maxk-inch-outch
// dst = 8b-4a-maxk-inch/4a-outch/8b
Mat kernel = _kernel.reshape(maxk, inch, outch);
kernel_tm.create(32 * maxk, inch / 4, outch / 8, (size_t)2u);
for (int q = 0; q + 7 < outch; q += 8)
{
Mat g0 = kernel_tm.channel(q / 8);
for (int p = 0; p + 3 < inch; p += 4)
{
__fp16* g00 = g0.row<__fp16>(p / 4);
for (int k = 0; k < maxk; k++)
{
for (int i = 0; i < 4; i++)
{
for (int j = 0; j < 8; j++)
{
const float* k00 = kernel.channel(q + j).row(p + i);
g00[0] = (__fp16)k00[k];
g00++;
}
}
}
}
}
}
static void convolution_im2col_sgemm_pack4to8_fp16sa_neon(const Mat& bottom_blob, Mat& top_blob, const Mat& kernel, const Mat& _bias, int kernel_w, int kernel_h, int dilation_w, int dilation_h, int stride_w, int stride_h, const Option& opt)
{
int w = bottom_blob.w;
int inch = bottom_blob.c;
int outw = top_blob.w;
int outh = top_blob.h;
const int size = outw * outh;
const int maxk = kernel_w * kernel_h;
// im2col
Mat bottom_im2col(size, maxk, inch, 8u, 4, opt.workspace_allocator);
{
const int gap = (w * stride_h - outw * stride_w) * 4;
#pragma omp parallel for num_threads(opt.num_threads)
for (int p = 0; p < inch; p++)
{
const Mat img = bottom_blob.channel(p);
__fp16* ptr = bottom_im2col.channel(p);
for (int u = 0; u < kernel_h; u++)
{
for (int v = 0; v < kernel_w; v++)
{
const __fp16* sptr = img.row<const __fp16>(dilation_h * u) + dilation_w * v * 4;
for (int i = 0; i < outh; i++)
{
int j = 0;
for (; j + 3 < outw; j += 4)
{
float16x4_t _val0 = vld1_f16(sptr);
float16x4_t _val1 = vld1_f16(sptr + stride_w * 4);
float16x4_t _val2 = vld1_f16(sptr + stride_w * 8);
float16x4_t _val3 = vld1_f16(sptr + stride_w * 12);
vst1_f16(ptr, _val0);
vst1_f16(ptr + 4, _val1);
vst1_f16(ptr + 8, _val2);
vst1_f16(ptr + 12, _val3);
sptr += stride_w * 16;
ptr += 16;
}
for (; j + 1 < outw; j += 2)
{
float16x4_t _val0 = vld1_f16(sptr);
float16x4_t _val1 = vld1_f16(sptr + stride_w * 4);
vst1_f16(ptr, _val0);
vst1_f16(ptr + 4, _val1);
sptr += stride_w * 8;
ptr += 8;
}
for (; j < outw; j++)
{
float16x4_t _val = vld1_f16(sptr);
vst1_f16(ptr, _val);
sptr += stride_w * 4;
ptr += 4;
}
sptr += gap;
}
}
}
}
}
im2col_sgemm_pack4to8_fp16sa_neon(bottom_im2col, top_blob, kernel, _bias, opt);
}
|
tp_pointcloud_obstacle_barnes_hut_0.h | #pragma once
#include "rsurface_types.h"
#include "surface_energy.h"
#include "helpers.h"
#include "bct_constructors.h"
#include "optimized_bct_types.h"
#include "optimized_cluster_tree.h"
#include "derivative_assembler.h"
namespace rsurfaces
{
// 0-th order multipole approximation of tangent-point energy
class TPPointCloudObstacleBarnesHut0 : public SurfaceEnergy
{
public:
TPPointCloudObstacleBarnesHut0(MeshPtr mesh_, GeomPtr geom_, SurfaceEnergy *bvhSharedFrom_,
Eigen::VectorXd & pt_weights, Eigen::MatrixXd & pt_positions,
mreal alpha_, mreal beta_, mreal theta_, mreal weight_ = 1.)
{
mesh = mesh_;
geom = geom_;
bvh = 0;
bvhSharedFrom = bvhSharedFrom_;
if( pt_positions.rows() != pt_weights.rows() )
{
eprint("TPPointCloudObstacleBarnesHut0: Number of positions vectors and number of weights does not coincide. Aborting.");
return;
}
mint primitive_count = pt_positions.rows();
mint dim = 3;
mint primitive_length = 1;
mreal * P_far = nullptr;
mint far_dim = bvhSharedFrom->GetBVH()->far_dim;
safe_alloc( P_far, primitive_count * far_dim, 0.);
#pragma omp parallel for
for( mint i = 0; i < pt_positions.rows(); ++i )
{
P_far[ far_dim * i + 0] = pt_weights(i);
P_far[ far_dim * i + 1] = pt_positions(i,0);
P_far[ far_dim * i + 2] = pt_positions(i,1);
P_far[ far_dim * i + 3] = pt_positions(i,2);
}
mreal * P_near = nullptr;
mreal * P_coords = nullptr;
mint near_dim = bvhSharedFrom->GetBVH()->near_dim;
safe_alloc( P_near, primitive_count * near_dim, 0.);
safe_alloc( P_coords, primitive_count * dim, 0.);
#pragma omp parallel for
for( mint i = 0; i < pt_positions.rows(); ++i )
{
P_near[ near_dim * i + 0] = pt_weights(i);
P_near[ near_dim * i + 1] = P_coords[ dim * i + 0] = pt_positions(i,0);
P_near[ near_dim * i + 2] = P_coords[ dim * i + 1] = pt_positions(i,1);
P_near[ near_dim * i + 3] = P_coords[ dim * i + 2] = pt_positions(i,2);
}
mint * idx = nullptr;
mint * idxdim = nullptr;
mreal * ones = nullptr;
mreal * zeroes = nullptr;
safe_iota( idx, dim * primitive_count + 1);
safe_alloc( idxdim, dim * primitive_count);
safe_alloc( ones, primitive_count + 1, 1.);
safe_alloc( zeroes, dim * primitive_count, 0.);
#pragma omp parallel for
for( mint i = 0; i < primitive_count; ++i)
{
for( mint k = 0; k < dim; ++k )
{
idxdim[ dim * i + k] = i;
}
}
MKLSparseMatrix AvOp = MKLSparseMatrix( primitive_count, primitive_count, idx, idx, ones ); // identity matrix
MKLSparseMatrix DiffOp = MKLSparseMatrix( dim * primitive_count, primitive_count, idx, idxdim, zeroes ); // zero matrix
// create a cluster tree
o_bvh = new OptimizedClusterTree(
&P_coords[0], // coordinates used for clustering
primitive_count, // number of primitives
dim, // dimension of ambient space
&P_coords[0], // coordinates of the convex hull of each mesh element
primitive_length, // number of points in the convex hull of each mesh element (3 for triangle meshes, 2 for polylines)
&P_near[0], // area, barycenter, and normal of mesh element
near_dim, // number of dofs of P_near per mesh element; it is 7 for polylines and triangle meshes in 3D.
&P_far[0], // area, barycenter, and projector of mesh element
far_dim, // number of dofs of P_far per mesh element; it is 10 for polylines and triangle meshes in 3D.
idx, // some ordering of triangles
DiffOp, // the first-order differential operator belonging to the hi order term of the metric
AvOp // the zeroth-order differential operator belonging to the lo order term of the metric
);
safe_free( P_far );
safe_free( P_near );
safe_free( P_coords );
safe_free(idx);
safe_free(idxdim);
safe_free(ones);
safe_free(zeroes);
alpha = alpha_;
beta = beta_;
theta = theta_;
weight = weight_;
mreal intpart;
use_int = (std::modf(alpha, &intpart) == 0.0) && (std::modf(beta / 2, &intpart) == 0.0);
Update();
}
~TPPointCloudObstacleBarnesHut0()
{
if (o_bvh)
{
delete o_bvh;
}
}
// Returns the current value of the energy.
virtual double Value();
// Returns the current differential of the energy, stored in the given
// V x 3 matrix, where each row holds the differential (a 3-vector) with
// respect to the corresponding vertex.
virtual void Differential(Eigen::MatrixXd &output);
// Update the energy to reflect the current state of the mesh. This could
// involve building a new BVH for Barnes-Hut energies, for instance.
virtual void Update();
// Get the mesh associated with this energy.
virtual MeshPtr GetMesh();
// Get the geometry associated with this geometry.
virtual GeomPtr GetGeom();
// Get the exponents of this energy; only applies to tangent-point energies.
virtual Vector2 GetExponents();
// Get a pointer to the current BVH for this energy.
// Return 0 if the energy doesn't use a BVH.
virtual OptimizedClusterTree *GetBVH();
// Return the separation parameter for this energy.
// Return 0 if this energy doesn't do hierarchical approximation.
virtual double GetTheta();
bool use_int = false;
private:
MeshPtr mesh = nullptr;
GeomPtr geom = nullptr;
mreal alpha = 6.;
mreal beta = 12.;
mreal theta = 0.5;
SurfaceEnergy * bvhSharedFrom;
OptimizedClusterTree * bvh = nullptr;
OptimizedClusterTree * o_bvh = nullptr;
template <typename T1, typename T2>
mreal Energy(T1 alpha, T2 betahalf);
template <typename T1, typename T2>
mreal DEnergy(T1 alpha, T2 betahalf);
}; // TPEnergyBarnesHut0
} // namespace rsurfaces
|
zgeqrf.c | /**
*
* @file
*
* PLASMA is a software package provided by:
* University of Tennessee, US,
* University of Manchester, UK.
*
* @precisions normal z -> s d c
*
**/
#include "plasma.h"
#include "plasma_async.h"
#include "plasma_context.h"
#include "plasma_descriptor.h"
#include "plasma_internal.h"
#include "plasma_tuning.h"
#include "plasma_types.h"
#include "plasma_workspace.h"
/***************************************************************************//**
*
* @ingroup plasma_geqrf
*
* Computes a tile QR factorization of a real or complex m-by-n matrix A.
* The factorization has the form
* \f[ A = Q \times R \f],
* where Q is a matrix with orthonormal columns and R is an upper triangular
* with positive diagonal.
*
*******************************************************************************
*
* @param[in] m
* The number of rows of the matrix A.
* m >= 0.
*
* @param[in] n
* The number of columns of the matrix A.
* n >= 0.
*
* @param[in,out] pA
* On entry, pointer to the m-by-n matrix A.
* On exit, the elements on and above the diagonal of the array contain
* the min(m,n)-by-n upper trapezoidal matrix R (R is upper triangular
* if m >= n); the elements below the diagonal represent the unitary
* matrix Q as a product of elementary reflectors stored by tiles.
*
* @param[in] lda
* The leading dimension of the array A. lda >= max(1,m).
*
* @param[out] T
* On exit, auxiliary factorization data, required by plasma_zgeqrs to
* solve the system of equations.
* Matrix in T is allocated inside this function and needs to be
* destroyed by plasma_desc_destroy.
*
*******************************************************************************
*
* @retval PlasmaSuccess successful exit
* @retval < 0 if -i, the i-th argument had an illegal value
*
*******************************************************************************
*
* @sa plasma_omp_zgeqrf
* @sa plasma_cgeqrf
* @sa plasma_dgeqrf
* @sa plasma_sgeqrf
* @sa plasma_zgeqrs
* @sa plasma_zgels
*
******************************************************************************/
int plasma_zgeqrf(int m, int n,
plasma_complex64_t *pA, int lda,
plasma_desc_t *T)
{
// Get PLASMA context.
plasma_context_t *plasma = plasma_context_self();
if (plasma == NULL) {
plasma_fatal_error("PLASMA not initialized");
return PlasmaErrorNotInitialized;
}
// Check input arguments.
if (m < 0) {
plasma_error("illegal value of m");
return -1;
}
if (n < 0) {
plasma_error("illegal value of n");
return -2;
}
if (lda < imax(1, m)) {
plasma_error("illegal value of lda");
return -4;
}
// quick return
if (imin(m, n) == 0)
return PlasmaSuccess;
// Tune parameters.
if (plasma->tuning)
plasma_tune_geqrf(plasma, PlasmaComplexDouble, m, n);
// Set tiling parameters.
int ib = plasma->ib;
int nb = plasma->nb;
plasma_enum_t householder_mode = plasma->householder_mode;
// Create tile matrix.
plasma_desc_t A;
int retval;
retval = plasma_desc_general_create(PlasmaComplexDouble, nb, nb,
m, n, 0, 0, m, n, &A);
if (retval != PlasmaSuccess) {
plasma_error("plasma_desc_general_create() failed");
return retval;
}
// Prepare descriptor T.
retval = plasma_descT_create(A, ib, householder_mode, T);
if (retval != PlasmaSuccess) {
plasma_error("plasma_descT_create() failed");
return retval;
}
// Allocate workspace.
plasma_workspace_t work;
size_t lwork = nb + ib*nb; // geqrt: tau + work
retval = plasma_workspace_create(&work, lwork, PlasmaComplexDouble);
if (retval != PlasmaSuccess) {
plasma_error("plasma_workspace_create() failed");
return retval;
}
// Initialize sequence.
plasma_sequence_t sequence;
retval = plasma_sequence_init(&sequence);
// Initialize request.
plasma_request_t request;
retval = plasma_request_init(&request);
// asynchronous block
#pragma omp parallel
#pragma omp master
{
// Translate to tile layout.
plasma_omp_zge2desc(pA, lda, A, &sequence, &request);
// Call the tile async function.
plasma_omp_zgeqrf(A, *T, work, &sequence, &request);
// Translate back to LAPACK layout.
plasma_omp_zdesc2ge(A, pA, lda, &sequence, &request);
}
// implicit synchronization
plasma_workspace_destroy(&work);
// Free matrix A in tile layout.
plasma_desc_destroy(&A);
// Return status.
int status = sequence.status;
return status;
}
/***************************************************************************//**
*
* @ingroup plasma_geqrf
*
* Computes a tile QR factorization of a matrix.
* Non-blocking tile version of plasma_zgeqrf().
* May return before the computation is finished.
* Operates on matrices stored by tiles.
* All matrices are passed through descriptors.
* All dimensions are taken from the descriptors.
* Allows for pipelining of operations at runtime.
*
*******************************************************************************
*
* @param[in,out] A
* Descriptor of matrix A.
* A is stored in the tile layout.
*
* @param[out] T
* Descriptor of matrix T.
* On exit, auxiliary factorization data, required by plasma_zgeqrs to
* solve the system of equations.
*
* @param[in] work
* Workspace for the auxiliary arrays needed by some coreblas kernels.
* For QR factorization, contains preallocated space for tau and work
* arrays. Allocated by the plasma_workspace_create function.
*
* @param[in] sequence
* Identifies the sequence of function calls that this call belongs to
* (for completion checks and exception handling purposes).
*
* @param[out] request
* Identifies this function call (for exception handling purposes).
*
* @retval void
* Errors are returned by setting sequence->status and
* request->status to error values. The sequence->status and
* request->status should never be set to PlasmaSuccess (the
* initial values) since another async call may be setting a
* failure value at the same time.
*
*******************************************************************************
*
* @sa plasma_zgeqrf
* @sa plasma_omp_cgeqrf
* @sa plasma_omp_dgeqrf
* @sa plasma_omp_sgeqrf
* @sa plasma_omp_zgeqrs
* @sa plasma_omp_zgeqrs
* @sa plasma_omp_zgels
*
******************************************************************************/
void plasma_omp_zgeqrf(plasma_desc_t A, plasma_desc_t T,
plasma_workspace_t work,
plasma_sequence_t *sequence, plasma_request_t *request)
{
// Get PLASMA context.
plasma_context_t *plasma = plasma_context_self();
if (plasma == NULL) {
plasma_error("PLASMA not initialized");
plasma_request_fail(sequence, request, PlasmaErrorIllegalValue);
return;
}
// Check input arguments.
if (plasma_desc_check(A) != PlasmaSuccess) {
plasma_error("invalid A");
plasma_request_fail(sequence, request, PlasmaErrorIllegalValue);
return;
}
if (plasma_desc_check(T) != PlasmaSuccess) {
plasma_error("invalid T");
plasma_request_fail(sequence, request, PlasmaErrorIllegalValue);
return;
}
if (sequence == NULL) {
plasma_fatal_error("NULL sequence");
plasma_request_fail(sequence, request, PlasmaErrorIllegalValue);
return;
}
if (request == NULL) {
plasma_fatal_error("NULL request");
plasma_request_fail(sequence, request, PlasmaErrorIllegalValue);
return;
}
// quick return
if (imin(A.m, A.n) == 0)
return;
// Call the parallel function.
if (plasma->householder_mode == PlasmaTreeHouseholder) {
plasma_pzgeqrf_tree(A, T, work, sequence, request);
}
else {
plasma_pzgeqrf(A, T, work, sequence, request);
}
}
|
881.c | /* POLYBENCH/GPU-OPENMP
*
* This file is a part of the Polybench/GPU-OpenMP suite
*
* Contact:
* William Killian <killian@udel.edu>
*
* Copyright 2013, The University of Delaware
*/
#include <stdio.h>
#include <unistd.h>
#include <string.h>
#include <math.h>
/* Include polybench common header. */
#include <polybench.h>
/* Include benchmark-specific header. */
/* Default data type is double, default size is 4096x4096. */
#include "convolution-2d.h"
/* Array initialization. */
static
void init_array (int ni, int nj,
DATA_TYPE POLYBENCH_2D(A,NI,NJ,ni,nj))
{
// printf("Initializing Array\n");
int i, j;
for (i = 0; i < ni; i++)
for (j = 0; j < nj; j++)
{
A[i][j] = ((DATA_TYPE) (i + j) / nj);
}
}
/* DCE code. Must scan the entire live-out data.
Can be used also to check the correctness of the output. */
static
void print_array(int ni, int nj,
DATA_TYPE POLYBENCH_2D(B,NI,NJ,ni,nj))
{
int i, j;
for (i = 0; i < ni; i++)
for (j = 0; j < nj; j++) {
fprintf(stderr, DATA_PRINTF_MODIFIER, B[i][j]);
if ((i * NJ + j) % 20 == 0) fprintf(stderr, "\n");
}
fprintf(stderr, "\n");
}
/* Main computational kernel. The whole function will be timed,
including the call and return. */
static
void kernel_conv2d(int ni,
int nj,
DATA_TYPE POLYBENCH_2D(A,NI,NJ,ni,nj),
DATA_TYPE POLYBENCH_2D(B,NI,NJ,ni,nj))
{
int i, j;
#pragma scop
#pragma omp parallel for simd schedule(static, 28)
for (i = 1; i < _PB_NI - 1; ++i)
{
for (j = 1; j < _PB_NJ - 1; ++j)
{
B[i][j] = 0.2 * A[i-1][j-1] + 0.5 * A[i-1][j] + -0.8 * A[i-1][j+1]
+ -0.3 * A[ i ][j-1] + 0.6 * A[ i ][j] + -0.9 * A[ i ][j+1]
+ 0.4 * A[i+1][j-1] + 0.7 * A[i+1][j] + 0.1 * A[i+1][j+1];
}
}
#pragma endscop
// printf("Kernal computation complete !!\n");
}
int main(int argc, char** argv)
{
/* Retrieve problem size. */
int ni = NI;
int nj = NJ;
/* Variable declaration/allocation. */
POLYBENCH_2D_ARRAY_DECL(A, DATA_TYPE, NI, NJ, ni, nj);
POLYBENCH_2D_ARRAY_DECL(B, DATA_TYPE, NI, NJ, ni, nj);
/* Initialize array(s). */
init_array (ni, nj, POLYBENCH_ARRAY(A));
/* Start timer. */
//polybench_start_instruments;
polybench_timer_start();
/* Run kernel. */
kernel_conv2d (ni, nj, POLYBENCH_ARRAY(A), POLYBENCH_ARRAY(B));
/* Stop and print timer. */
polybench_timer_stop();
polybench_timer_print();
//polybench_stop_instruments;
//polybench_print_instruments;
/* Prevent dead-code elimination. All live-out data must be printed
by the function call in argument. */
polybench_prevent_dce(print_array(ni, nj, POLYBENCH_ARRAY(B)));
/* Be clean. */
POLYBENCH_FREE_ARRAY(A);
POLYBENCH_FREE_ARRAY(B);
return 0;
}
|
sdp.c | /*
Solve a semidefinite programming problem using the algorithm of Helmberg,
Rendl, Vanderbei, and Wolkowicz.
Note: This version of the code uses predictor correct steps.
Usage:
Return codes:
0 Success.
1 Success. The problem is primal infeasibile, and we
have a certificate.
2 Success. The problem is dual infeasible, and we have
a certificate.
3. Partial Success. Didn't reach full accuracy.
4 Failure: Maximum iterations reached.
5 Failure: Stuck at edge of primal feasibility.
6 Failure: Stuck at edge of dual feasibility.
7 Failure: Lack of progress
8 Failure: X, Z, or O was singular.
9 Failure: Detected NaN or Inf values.
Notes on data storage: All "2-d" arrays are stored in Fortran style,
column major order. macros in index.h are used to handle indexing into
C 1-d vectors. All "1-d" arrays are stored in C vectors, with the first
element of the vector (index 0) unused. We always start indexing in
both one and two dimensional arrays with 1, ala Fortran. This makes
the C code somewhat clearer, but requires us to pass v+1 into a Fortran
subroutine instead of just passing v.
*/
#include <stdio.h>
#include <stdlib.h>
#include <math.h>
#include "declarations.h"
#ifdef USEGETTIME
/*
* Stuff for keeping track of time.
*/
#include <stddef.h> /* definition of NULL */
#include <sys/time.h> /* definition of timeval struct and protyping of gettime
ofday */
double opotime=0.0;
double factortime=0.0;
double totaltime=0.0;
double othertime=0.0;
double othertime1=0.0;
double othertime2=0.0;
double othertime3=0.0;
struct timeval tp;
double t1=0.0;
double t2=0.0;
double starttime=0.0;
double endtime=0.0;
#endif
int sdp(n,k,C,a,constant_offset,constraints,byblocks,fill,X,y,Z,cholxinv,
cholzinv,pobj,dobj,work1,work2,work3,workvec1,workvec2,workvec3,
workvec4,workvec5,workvec6,workvec7,workvec8,diagO,bestx,besty,bestz,
Zi,O,rhs,dZ,dX,dy,dy1,Fp,printlevel,parameters)
int n;
int k;
struct blockmatrix C;
double *a;
double constant_offset;
struct constraintmatrix *constraints;
struct sparseblock **byblocks;
struct constraintmatrix fill;
struct blockmatrix X;
double *y;
struct blockmatrix Z;
struct blockmatrix cholxinv;
struct blockmatrix cholzinv;
double *pobj;
double *dobj;
struct blockmatrix work1;
struct blockmatrix work2;
struct blockmatrix work3;
double *workvec1;
double *workvec2;
double *workvec3;
double *workvec4;
double *workvec5;
double *workvec6;
double *workvec7;
double *workvec8;
double *diagO;
struct blockmatrix bestx;
double *besty;
struct blockmatrix bestz;
struct blockmatrix Zi;
double *O;
double *rhs;
struct blockmatrix dZ;
struct blockmatrix dX;
double *dy;
double *dy1;
double *Fp;
int printlevel;
struct paramstruc parameters;
{
double gap;
double relgap;
double mu=1.0e30;
double muplus;
double oldmu;
double muk;
double gamma;
double alphap,alphap1;
double alphad,alphad1;
double scale1;
double scale2;
double nrmx;
double relpinfeas=1.0e10;
double reldinfeas=1.0e10;
double newrelpinfeas;
double newreldinfeas;
double limrelpinfeas;
double bestrelpinfeas;
double limreldinfeas;
double bestreldinfeas;
double maxrelinfeas=1.0e100;
double oldmaxrelinfeas=1.0e100;
double newpobj;
double newdobj;
int iter=0;
int m;
int ret;
int i;
int j;
int info;
int ldam;
int retries=0;
int retcode=0;
double bestmeas;
int lastimprove=0;
double diagnrm;
double diagfact=0.0;
double diagadd;
double ent;
/*
* Stuff for instrumentation of iterative refinement.
*/
double relerr1;
double relerr2=0.0;
int refinements;
double besterr;
double relerr;
/*
* Stuff for iterative refinements.
*/
int lastimproverefinement;
double mindiag;
/*
*
*/
int ispfeasprob;
int isdfeasprob;
double normC;
double norma;
/*
* Amount by which to perturb
*/
double perturbfac;
/*
* Sticky flags for primal and dual feasibility.
*/
int probpfeas=0;
int probdfeas=0;
/*
*
*
*/
double pinfeasmeas;
double dinfeasmeas;
/*
* Stuff for adjusting the step fraction.
*/
double mystepfrac;
double minalpha;
/*
*
*/
double newalphap;
/*
* Stuff for keeping track of best solutions.
*/
#define BASIZE 100
double bestarray[BASIZE+1];
/*
* Used in checking whether the primal-dual affine step gives us a new
* best solution.
*/
double affgap,affpobj,affdobj,affrelgap,affrelpinfeas,affreldinfeas;
/*
* Precompute norms of a and C, so that we don't keep doing this
* again and again.
*
*/
norma=norm2(k,a+1);
normC=Fnorm(C);
if (parameters.perturbobj>0)
/* perturbfac=parameters.perturbobj*1.0e-6*normC/sqrt(1.0*n); */
perturbfac=1.0e-6*normC/sqrt(1.0*n);
else
perturbfac=0.0;
/*
* Determine whether or not this is a feasibility problem.
*
*/
if (normC==0.0)
{
if (printlevel >= 1)
printf("This is a pure primal feasibility problem.\n");
ispfeasprob=1;
}
else
ispfeasprob=0;
if (norma==0.0)
{
if (printlevel >= 1)
printf("This is a pure dual feasibility problem.\n");
isdfeasprob=1;
}
else
isdfeasprob=0;
/*
* For historical reasons, we use m for the total number of constraints.
* In this version of the code, k is also the number of constraints.
*/
m=k;
/*
* Work out the leading dimension for the array. Note that we may not
* want to use k itself, for cache issues.
*/
if ((k % 2) == 0)
ldam=k+1;
else
ldam=k;
/*
* Compute Cholesky factors of X and Z.
*/
copy_mat(X,work1);
ret=chol(work1);
/*
* If the matrix was singular, then just return with an error.
*/
if (ret != 0)
{
if (printlevel >= 1)
printf("X was singular!\n");
retcode=8;
goto RETURNBEST;
};
chol_inv(work1,work2);
store_packed(work2,cholxinv);
copy_mat(Z,work1);
ret=chol(work1);
/*
* If the matrix was singular, then just return with an error.
*/
if (ret != 0)
{
if (printlevel >= 1)
printf("Z was singular!\n");
retcode=8;
goto RETURNBEST;
};
chol_inv(work1,work2);
store_packed(work2,cholzinv);
/*
Compute Zi.
*/
copy_mat(work2,work1);
trans(work1);
scale1=1.0;
scale2=0.0;
mat_mult(scale1,scale2,work2,work1,Zi);
if (printlevel >= 4)
printf("Fnorm of Zi is %e \n",Fnorm(Zi));
/*
Compute primal and dual objective values.
*/
*pobj=calc_pobj(C,X,constant_offset);
*dobj=calc_dobj(k,a,y,constant_offset);
if (parameters.usexzgap==0)
{
gap=*dobj-*pobj;
if (gap < 0.0)
gap=0.0;
relgap=gap/(1.0+fabs(*dobj)+fabs(*pobj));
}
else
{
gap=trace_prod(X,Z);
relgap=gap/(1.0+fabs(*dobj)+fabs(*pobj));
};
if (printlevel >= 3)
{
printf("constant offset is %e \n",constant_offset);
printf("Fnorm of X is %e \n",Fnorm(X));
printf("Fnorm of C is %e \n",normC);
};
if (printlevel >= 4)
{
printf("pobj is %e \n",*pobj);
printf("dobj is %e \n",*dobj);
printf("gap is %e \n",gap);
};
relpinfeas=pinfeas(k,constraints,X,a,workvec1);
bestrelpinfeas=relpinfeas;
reldinfeas=dinfeas(k,C,constraints,y,Z,work2);
bestreldinfeas=reldinfeas;
if (relpinfeas < parameters.axtol )
probpfeas=1;
if (reldinfeas < parameters.atytol)
probdfeas=1;
oldmaxrelinfeas=maxrelinfeas;
if (relpinfeas > reldinfeas)
{
maxrelinfeas=relpinfeas;
}
else
{
maxrelinfeas=reldinfeas;
};
/*
* Record this solution as the best solution.
*/
bestmeas=1.0e100;
store_packed(X,bestx);
store_packed(Z,bestz);
for (i=1; i<=k; i++)
besty[i]=y[i];
/*
Initialize the big loop.
*/
alphap=0.0;
alphad=0.0;
/*
Print out some status information.
*/
if (printlevel >= 1)
{
printf("Iter: %2d Ap: %.2e Pobj: % .7e Ad: %.2e Dobj: % .7e \n",iter,alphap,*pobj,alphad,*dobj);
fflush(stdout);
};
while ((relgap > parameters.objtol) || (relpinfeas > parameters.axtol) || (reldinfeas > parameters.atytol))
{
/*
* Call the user exit routine, and let the user stop the process
* if he wants to.
*/
if (user_exit(n,k,C,a,*dobj,*pobj,constant_offset,constraints,
X,y,Z,parameters) == 1)
{
return(0);
}
bestarray[iter % BASIZE]=bestmeas;
/*
* Compute the stepfrac to be used for this iteration.
*/
if (iter > 1)
{
if (alphap > alphad)
minalpha=alphad;
else
minalpha=alphap;
}
else
{
minalpha=1.0;
};
mystepfrac=parameters.minstepfrac+minalpha*(parameters.maxstepfrac-parameters.minstepfrac);
if (printlevel >= 3)
printf("mystepfrac is %e \n",mystepfrac);
/*
* Print out information on primal/dual feasibility.
*/
if (printlevel >= 2)
{
printf("Relative primal infeasibility is %.7e \n",relpinfeas);
printf("Relative dual infeasibility: %.7e \n",reldinfeas);
printf("Relative duality gap is %.7e \n",relgap);
printf("XZ relative duality gap is %.7e \n",trace_prod(X,Z)/(1+fabs(*dobj)));
};
/*
* If this is a feasibility problem, and we've got feasibility,
* then we're done!
*/
if ((ispfeasprob==1) && (relpinfeas < parameters.axtol))
{
if (printlevel >= 3)
printf("Got primal feasibility, so stopping.\n");
for (i=1; i<=k; i++)
y[i]=0.0;
alphad=parameters.atytol/(sqrt(n*1.0)*200);
make_i(work1);
if (alphad*trace_prod(X,work1) > parameters.objtol)
alphad=0.005*parameters.objtol/trace_prod(X,work1);
zero_mat(work2);
addscaledmat(work2,alphad,work1,Z);
relpinfeas=pinfeas(k,constraints,X,a,workvec1);
if (relpinfeas < bestrelpinfeas)
bestrelpinfeas=relpinfeas;
reldinfeas=dinfeas(k,C,constraints,y,Z,work2);
if (reldinfeas < bestreldinfeas)
bestreldinfeas=reldinfeas;
*pobj=calc_pobj(C,X,constant_offset);
*dobj=calc_dobj(k,a,y,constant_offset);
if (parameters.usexzgap==0)
{
gap=*dobj-*pobj;
if (gap < 0.0)
gap=0.0;
relgap=gap/(1.0+fabs(*dobj)+fabs(*pobj));
}
else
{
gap=trace_prod(X,Z);
if (gap < 0.0)
gap=0.0;
relgap=gap/(1.0+fabs(*dobj)+fabs(*pobj));
};
bestmeas=relpinfeas/parameters.axtol;
store_packed(X,bestx);
store_packed(Z,bestz);
for (i=1; i<=k; i++)
besty[i]=y[i];
retcode=0;
goto RETURNBEST;
};
if ((isdfeasprob==1) && (reldinfeas < parameters.atytol))
{
if (printlevel >= 3)
printf("Got dual feasibility, so stopping.\n");
make_i(work1);
zero_mat(work2);
op_a(k,constraints,work1,workvec1);
alphap=parameters.atytol/(200.0*norm2(k,workvec1+1));
if (alphap*trace_prod(work1,Z) > parameters.objtol)
alphap=0.005*parameters.objtol/trace_prod(work1,Z);
addscaledmat(work2,alphap,work1,X);
relpinfeas=pinfeas(k,constraints,X,a,workvec1);
if (relpinfeas < bestrelpinfeas)
bestrelpinfeas=relpinfeas;
reldinfeas=dinfeas(k,C,constraints,y,Z,work2);
if (reldinfeas < bestreldinfeas)
bestreldinfeas=reldinfeas;
*pobj=calc_pobj(C,X,constant_offset);
*dobj=calc_dobj(k,a,y,constant_offset);
if (parameters.usexzgap==0)
{
gap=*dobj-*pobj;
if (gap < 0.0)
gap=0.0;
relgap=gap/(1.0+fabs(*dobj)+fabs(*pobj));
}
else
{
gap=trace_prod(X,Z);
if (gap < 0.0)
gap=0.0;
relgap=gap/(1.0+fabs(*dobj)+fabs(*pobj));
};
bestmeas=reldinfeas/parameters.atytol;
store_packed(X,bestx);
store_packed(Z,bestz);
for (i=1; i<=k; i++)
besty[i]=y[i];
if (printlevel >= 3)
printf("New best solution, %e \n",bestmeas);
retcode=0;
goto RETURNBEST;
};
/*
* Check for primal or dual infeasibility.
*/
op_at(k,y,constraints,work1);
addscaledmat(work1,-1.0,Z,work1);
pinfeasmeas=-(*dobj)/Fnorm(work1);
if (printlevel >= 2)
printf("-a'*y/||A'(y)-Z|| is %e \n",pinfeasmeas);
if ((probpfeas==0) &&
(pinfeasmeas > parameters.pinftol))
{
if (printlevel >= 1)
printf("Declaring primal infeasibility.\n");
for (i=1; i<=k; i++)
y[i]=-y[i]/(*dobj);
zero_mat(work1);
addscaledmat(work1,-1.0/(*dobj),Z,work1);
copy_mat(work1,Z);
retcode=1;
goto RETURNCERT;
};
op_a(k,constraints,X,workvec1);
dinfeasmeas=trace_prod(C,X)/norm2(k,workvec1+1);
if (printlevel >= 2)
printf("<C,X>/||A(X)||=%e\n",dinfeasmeas);
if ((probdfeas==0) &&
(dinfeasmeas>parameters.dinftol))
{
if (printlevel >= 1)
printf("Declaring dual infeasibility.\n");
zero_mat(work1);
addscaledmat(work1,1.0/trace_prod(C,X),X,work1);
copy_mat(work1,X);
retcode=2;
goto RETURNCERT;
};
/*
* Print out the norm(X) for debugging purposes.
*/
if (printlevel >= 3)
{
nrmx=Fnorm(X);
printf("Fnorm of X is %e \n",nrmx);
};
/*
Now, compute the system matrix.
*/
#ifdef USEGETTIME
gettimeofday(&tp,NULL);
t1=(double)tp.tv_sec+(1.0e-6)*tp.tv_usec;
#endif
op_o(k,constraints,byblocks,Zi,X,O,work1,work2);
#ifdef USEGETTIME
gettimeofday(&tp,NULL);
t2=(double)tp.tv_sec+(1.0e-6)*tp.tv_usec;
opotime=opotime+t2-t1;
#endif
/*
* Print out the actual density of Z and X.
*/
if ((iter==5) && (printlevel >= 3))
{
printf("Actual density of O %e\n",actnnz(k,ldam,O)/(1.0*k*k));
for (j=1; j<=X.nblocks; j++)
{
if (X.blocks[j].blockcategory==MATRIX)
{
printf("density X block %d, %e \n",j,actnnz(X.blocks[j].blocksize,
X.blocks[j].blocksize,
X.blocks[j].data.mat)/
(1.0*X.blocks[j].blocksize*X.blocks[j].blocksize));
printf("density Z block %d, %e \n",j,actnnz(Z.blocks[j].blocksize,
Z.blocks[j].blocksize,
Z.blocks[j].data.mat)/
(1.0*Z.blocks[j].blocksize*Z.blocks[j].blocksize));
printf("bandwidth Z block %d, %d/%d \n",j,bandwidth(Z.blocks[j].blocksize,Z.blocks[j].blocksize,Z.blocks[j].data.mat),Z.blocks[j].blocksize);
};
};
};
/*
Save a copy of O in the lower diagonal for later use.
*/
for (i=1; i<=k-1; i++)
for (j=i; j<=k; j++)
O[ijtok(j,i,ldam)]=O[ijtok(i,j,ldam)];
for (i=1; i<=k; i++)
diagO[i]=O[ijtok(i,i,ldam)];
mindiag=1.0e30;
for (i=1; i<=k; i++)
{
if (diagO[i] < mindiag)
{
mindiag=diagO[i];
};
};
/*
This is where we come if factorization failed or the system
was so badly conditioned that we didn't get a usable solution.
*/
diagnrm=0.0;
for (i=1; i<=k; i++)
{
ent=diagO[i];
diagnrm = diagnrm + ent*ent;
};
diagnrm=sqrt(diagnrm);
RETRYFACTOR:
/*
Now, let's make sure that O isn't singular.
*/
diagadd=1.0e-17*diagfact*diagnrm/sqrt(k*1.0);
while ((diagadd + mindiag) <= 0.0)
{
retries++;
if (diagfact==0.0)
{
diagfact=0.1;
diagadd=1.0e-17*diagfact*diagnrm/sqrt(k*1.0);
}
else
{
diagfact=diagfact*10.0;
diagadd=diagadd*10.0;
};
};
if (printlevel >= 3)
printf("diagnrm is %e, adding diagadd %e \n",diagnrm,diagadd);
for (i=1; i<=k; i++)
O[ijtok(i,i,ldam)] += diagadd;
/*
* Scale the O matrix.
*/
for (i=1; i<=k; i++)
workvec8[i]=1.0/sqrt(O[ijtok(i,i,ldam)]);
for (i=1; i<=k; i++)
{
if (workvec8[i] > 1.0e30)
workvec8[i]=1.0e30;
};
#pragma omp parallel for schedule(dynamic,64) default(none) shared(O,ldam,k,workvec8) private(i,j)
for (j=1; j<=k; j++)
for (i=1; i<=j; i++)
O[ijtok(i,j,ldam)]=O[ijtok(i,j,ldam)]*(workvec8[i]*workvec8[j]);
/*
Next, compute the cholesky factorization of the system matrix.
*/
#ifdef USEGETTIME
gettimeofday(&tp,NULL);
t1=(double)tp.tv_sec+(1.0e-6)*tp.tv_usec;
#endif
#ifdef NOUNDERLAPACK
#ifdef CAPSLAPACK
DPOTRF("U",&m,O,&ldam,&info);
#else
dpotrf("U",&m,O,&ldam,&info);
#endif
#else
#ifdef CAPSLAPACK
DPOTRF_("U",&m,O,&ldam,&info);
#else
dpotrf_("U",&m,O,&ldam,&info);
#endif
#endif
#ifdef USEGETTIME
gettimeofday(&tp,NULL);
t2=(double)tp.tv_sec+(1.0e-6)*tp.tv_usec;
factortime=factortime+t2-t1;
#endif
if (info != 0)
{
if (printlevel >= 3)
printf("Factorization of the system matrix failed!\n");
if (retries < 15)
{
if (retries == 0)
diagfact=0.1;
retries=retries+1;
diagfact=diagfact*10.0;
#pragma omp parallel for schedule(dynamic,64) private(i,j) shared(O,k,ldam)
for (i=1; i<=k-1; i++)
for (j=i; j<=k; j++)
O[ijtok(i,j,ldam)]=O[ijtok(j,i,ldam)];
for (i=1; i<=k; i++)
O[ijtok(i,i,ldam)]=diagO[i];
goto RETRYFACTOR;
}
else
{
if (printlevel >= 1)
printf("Factorization of the system matrix failed, giving up. \n");
/*
* Tighten up the solution as much as possible.
*/
retcode=8;
goto RETURNBEST;
};
};
/*
Compute the rhs vector.
*/
for (i=1; i<=k; i++)
rhs[i]=-a[i];
/*
* Add in the corrections for Fd.
*/
/*
his section is where Fd is computed and put into
The rhs.
To save storage, work2 is used instead of a variable named
Fd.
dX will be used as a work variable where needed in place
of old work2 usage.
*/
op_at(k,y,constraints,work1);
addscaledmat(Z,1.0,C,work2);
if ((bestmeas > 1.0e3) && (parameters.perturbobj>0))
{
if (printlevel >= 3)
printf("Perturbing C.\n");
make_i(work3);
addscaledmat(work2,-perturbfac,work3,work2);
};
if ((bestmeas < 1.0e3) && (parameters.perturbobj>0))
{
if (printlevel >= 3)
printf("Perturbing C.\n");
make_i(work3);
addscaledmat(work2,-perturbfac*pow(bestmeas/1000.0,1.5),work3,work2);
};
addscaledmat(work2,-1.0,work1,work2);
scale1=1.0;
scale2=0.0;
mat_multspb(scale1,scale2,Zi,work2,work3,fill);
mat_multspc(scale1,scale2,work3,X,dX,fill);
op_a(k,constraints,dX,workvec1);
for (i=1; i<=k; i++)
rhs[i] = rhs[i]+workvec1[i];
for (i=1; i<=k; i++)
workvec1[i]=rhs[i];
if (printlevel >=3)
{
printf("Fnorm of Fd is %e \n",Fnorm(work2));
printf("Norm of rhs is %e \n",norm2(m,rhs+1));
};
/*
Solve the system of equations for dy.
*/
/*
* First, scale
*/
for (i=1; i<=k; i++)
workvec1[i]=workvec1[i]*workvec8[i];
info=solvesys(k,ldam,O,workvec1);
for (i=1; i<=k; i++)
workvec1[i]=workvec1[i]*workvec8[i];
if (info != 0)
{
if (printlevel >= 1)
printf("Solving for dy failed! \n");
retcode=8;
goto RETURNBEST;
};
/*
* Do iterative refinement.
*/
if ((iter>1) && (relerr2 > parameters.axtol) &&
(parameters.fastmode==0))
{
op_at(k,workvec1,constraints,work1);
mat_multspa(1.0,0.0,work1,X,dX,fill);
mat_multspc(1.0,0.0,Zi,dX,work1,fill);
op_a(k,constraints,work1,workvec2);
for (i=1; i<=k; i++)
workvec2[i]=rhs[i]-workvec2[i];
relerr=norm2(k,workvec2+1)/(1.0+norm2(k,rhs+1));
besterr=relerr;
for (i=1; i<=k; i++)
workvec4[i]=workvec1[i];
if (printlevel >= 3)
{
printf("refinement: Before relative error in Ody=r is %e \n",besterr);
fflush(stdout);
};
refinements=0;
lastimproverefinement=0;
while ((refinements < 20) && (refinements-lastimproverefinement < 3)
&& (besterr > 1.0e-14))
{
refinements++;
for (i=1; i<=k; i++)
workvec3[i]=workvec2[i]*workvec8[i];
info=solvesys(k,ldam,O,workvec3);
for (i=1; i<=k; i++)
workvec3[i]=workvec3[i]*workvec8[i];
for (i=1; i<=k; i++)
workvec1[i]=workvec1[i]+workvec3[i];
op_at(k,workvec1,constraints,work1);
mat_multspa(1.0,0.0,work1,X,dX,fill);
mat_multspc(1.0,0.0,Zi,dX,work1,fill);
op_a(k,constraints,work1,workvec2);
for (i=1; i<=k; i++)
workvec2[i]=rhs[i]-workvec2[i];
relerr=norm2(k,workvec2+1)/(1.0+norm2(k,rhs+1));
if (relerr < besterr)
{
lastimproverefinement=refinements;
besterr=relerr;
for (i=1; i<=k; i++)
workvec4[i]=workvec1[i];
};
if (printlevel >= 4)
{
printf("refinement: During relative error in Ody=r is %e \n",besterr);
fflush(stdout);
};
};
if (printlevel >= 3)
printf("refinement: After relative error in Ody=r is %e, %d \n",besterr,lastimproverefinement);
for (i=1; i<=k; i++)
workvec1[i]=workvec4[i];
};
/*
* Extract dy.
*/
for (i=1; i<=k; i++)
dy[i]=workvec1[i];
if (printlevel >= 3)
printf("Norm of dy is %e \n",norm2(k,dy+1));
/*
Compute dZ
*/
op_at(k,dy,constraints,dZ);
/*
* Note: At this point, dZ only has A'(dy), not -Fd
*/
/*
Compute Zi*A'(dy), and save it in a temp variable for later use.
*/
scale1=1.0;
scale2=0.0;
mat_multspb(scale1,scale2,Zi,dZ,work1,fill);
if (printlevel >= 4)
printf("Fnorm of work1 is %e \n",Fnorm(work1));
/*
* Now, update dZ to include -Fd
*/
if (printlevel >= 3)
{
printf("Before Fd, Fnorm of dZ is %e \n",Fnorm(dZ));
fflush(stdout);
};
addscaledmat(dZ,-1.0,work2,dZ);
if (printlevel >= 3)
{
printf("After Fd, Fnorm of dZ is %e \n",Fnorm(dZ));
fflush(stdout);
};
/*
* Now, we've got dZ in dZ, and Zi*A'(dy) in work1,
* and Zi*Fd in work3
*/
/*
Compute dX=-X+Zi*Fd*X-temp*X;
First, put I-Zi*Fd+work1 in workn2. Then multiply -work2*X, and
put the result in dX.
*/
make_i(work2);
addscaledmat(work2,-1.0,work3,work2);
addscaledmat(work2,1.0,work1,work2);
scale1=-1.0;
scale2=0.0;
mat_mult(scale1,scale2,work2,X,dX);
sym_mat(dX);
if (printlevel >= 3)
{
printf("Fnorm of dX is %e \n",Fnorm(dX));
printf("Fnorm of dZ is %e \n",Fnorm(dZ));
fflush(stdout);
};
/*
* Next, determine mu.
*/
if (relpinfeas < parameters.axtol)
alphap1=linesearch(n,dX,work1,work2,work3,cholxinv,workvec4,
workvec5,workvec6,mystepfrac,1.0,
printlevel);
else
alphap1=linesearch(n,dX,work1,work2,work3,cholxinv,workvec4,
workvec5,workvec6,mystepfrac,1.0,
printlevel);
if (reldinfeas < parameters.atytol)
alphad1=linesearch(n,dZ,work1,work2,work3,cholzinv,workvec4,
workvec5,workvec6,mystepfrac,1.0,
printlevel);
else
alphad1=linesearch(n,dZ,work1,work2,work3,cholzinv,workvec4,
workvec5,workvec6,mystepfrac,1.0,
printlevel);
oldmu=mu;
/* Here, work1 holds X+alphap1*dX, work2=Z+alphad1*dZ */
addscaledmat(X,alphap1,dX,work1);
addscaledmat(Z,alphad1,dZ,work2);
for (i=1; i<=k; i++)
workvec1[i]=y[i]+alphad1*dy[i];
/*
* Check to see whether this affine solution is the best yet.
* The test is somewhat expensive, so don't do it unless
* we're close to done.
*/
if ((bestmeas <1.0e4) && (parameters.affine==0))
{
/*
* Verify that the new X and Z are Cholesky factorizable.
*/
copy_mat(work1,work3);
ret=chol(work3);
while (ret != 0)
{
if (printlevel >=3)
printf("Affine eigsearch missed: adjusting alphap1\n");
alphap1=alphap1*0.9;
addscaledmat(X,alphap1,dX,work1);
copy_mat(work1,work3);
ret=chol(work3);
};
copy_mat(work2,work3);
ret=chol(work3);
while (ret != 0)
{
if (printlevel >=3)
printf("Affine eigsearch missed: adjusting alphad1\n");
alphad1=alphad1*0.9;
addscaledmat(Z,alphad1,dZ,work2);
for (i=1; i<=k; i++)
workvec1[i]=y[i]+alphad1*dy[i];
copy_mat(work2,work3);
ret=chol(work3);
};
/*
* Now, check the quality of this solution.
*/
affpobj=calc_pobj(C,work1,constant_offset);
affdobj=calc_dobj(k,a,workvec1,constant_offset);
/*
* run user exit to check if the affine solution is good enough
*/
if (user_exit(n,k,C,a,affdobj,affpobj,constant_offset,constraints,
work1,workvec1,work2,parameters) == 1)
{
*dobj=affdobj;
*pobj=affpobj;
copy_mat(work1,X);
copy_mat(work2,Z);
for(i=1; i<=k; i++)
y[i]=workvec1[i];
if(printlevel>=3)
printf("Affine step good enough, exiting\n");
return(0);
};
if (parameters.usexzgap==0)
{
affgap=affdobj-affpobj;
if (affgap < 0)
affgap=0.0;
affrelgap=affgap/(1.0+fabs(affpobj)+fabs(affdobj));
}
else
{
affgap=trace_prod(work1,work2);
if (affgap < 0)
affgap=0.0;
affrelgap=affgap/(1.0+fabs(affpobj)+fabs(affdobj));
};
affreldinfeas=dinfeas(k,C,constraints,workvec1,work2,work3);
affrelpinfeas=pinfeas(k,constraints,work1,a,workvec4);
if (printlevel >= 3)
{
printf("affpobj is %e \n",affpobj);
printf("affdobj is %e \n",affdobj);
printf("affrelgap is %e \n",affrelgap);
printf("affrelpinfeas is %e \n",affrelpinfeas);
printf("affreldinfeas is %e \n",affreldinfeas);
};
if ((affrelgap/parameters.objtol < bestmeas) &&
(affrelpinfeas/parameters.axtol <bestmeas) &&
(affreldinfeas/parameters.atytol <bestmeas))
{
lastimprove=iter;
bestmeas=affrelgap/parameters.objtol;
if (affrelpinfeas/parameters.axtol > bestmeas)
bestmeas=affrelpinfeas/parameters.axtol;
if (affreldinfeas/parameters.atytol > bestmeas)
bestmeas=affreldinfeas/parameters.atytol;
store_packed(work1,bestx);
store_packed(work2,bestz);
for (i=1; i<=k; i++)
besty[i]=y[i]+alphad1*dy[i];
if (printlevel >= 3)
printf("Affine step: New best solution, %e \n",bestmeas);
};
if ((ispfeasprob==1) &&
(affrelpinfeas/parameters.axtol < bestmeas))
{
lastimprove=iter;
bestmeas=affrelpinfeas/parameters.axtol;
store_packed(work1,bestx);
for (i=1; i<=k; i++)
besty[i]=0.0;
zero_mat(work3);
addscaledmat(work3,1.0e-50,work1,work3);
store_packed(work3,bestz);
if (printlevel >= 3)
printf("Affine step: New best solution, %e \n",bestmeas);
};
if ((isdfeasprob==1) &&
(affreldinfeas/parameters.atytol <bestmeas))
{
lastimprove=iter;
bestmeas=affreldinfeas/parameters.atytol;
zero_mat(work3);
make_i(work1);
addscaledmat(work3,1.0e-40,work1,work3);
store_packed(work3,bestx);
store_packed(work2,bestz);
for (i=1; i<=k; i++)
besty[i]=workvec1[i];
if (printlevel >= 3)
printf("Affine step: New best solution, %e \n",bestmeas);
};
if (bestmeas < 1.0)
{
if (printlevel >= 3)
printf("Finishing with a final affine step.\n");
iter=iter+1;
if (printlevel >= 1)
printf("Iter: %2d Ap: %.2e Pobj: % .7e Ad: %.2e Dobj: % .7e \n",iter,alphap1,affpobj,alphad1,affdobj);
if (printlevel >= 2)
printf("Total Iterations: %d \n",iter);
store_unpacked(bestx,X);
store_unpacked(bestz,Z);
for (i=1; i<=k; i++)
y[i]=besty[i];
*pobj=calc_pobj(C,X,constant_offset);
*dobj=calc_dobj(k,a,y,constant_offset);
return(0);
};
};
/*
* Compute muplus and prepare for the corrector step.
*/
muplus=trace_prod(work1,work2)/(n);
muk=trace_prod(X,Z)/(n);
if (muk < 0.0)
muk=fabs(muk);
if (muplus < 0.0)
muplus=muk/2;
gamma=(muplus/muk);
/*
* Pick the new mu as follows:
*
* If we have been making good progress (alphap > 0.5) and
* (alphad>0.5) then use mu=muk*gamma*gamma*min(gamma,0.5);
*
* If we haven't been making good progress, then just do
* mu=muplus.
*
* Also, make sure that mu is no larger than the old mu.
*/
if ((relpinfeas < 0.1*parameters.axtol) && (alphad>0.2) &&
(reldinfeas < 0.1*parameters.atytol) && (alphap>0.2) &&
(mu > 1.0e-6) && (gamma < 1.0) && (alphap+alphad > 1.0))
{
mu=muk*pow(gamma,alphap+alphad);
}
else
{
if (muplus < 0.9*muk)
mu=muplus;
else
mu=muk*0.9;
};
/*
* If we have primal and dual infeasibility in hand, then
* make sure that mu is <=muk/2.
*/
if ((relpinfeas < 0.9*parameters.axtol) &&
(reldinfeas < 0.9*parameters.atytol) &&
(mu > muk/2))
mu=muk/2;
/*
* If we want a primal-dual affine step, then set mu=0.0.
*/
if (parameters.affine==1)
{
mu=0.0;
if (printlevel >= 3)
printf("Taking an affine step because parameters.affine=1\n");
};
/*
* Printout some info on mu.
*/
if (printlevel >= 2)
{
printf("muk is %e \n",muk);
printf("muplus is %e \n",muplus);
printf("New mu is %e \n",mu);
printf("mu*n = target duality gap is %e \n",mu*n);
fflush(stdout);
};
/*
* Take a moment to figure out how well we're doing on feasibility.
*/
addscaledmat(X,1.0,dX,work1);
op_a(k,constraints,work1,workvec1);
for (i=1; i<=k; i++)
workvec1[i]=workvec1[i]-a[i];
relerr1=norm2(k,workvec1+1)/(1.0+norma);
if (printlevel >= 3)
{
printf("refinement: Relative error in A(X+dX)=a (Fphat) is %e \n",
relerr1);
};
/*
Now, compute the corrector step.
*/
/*
rhs for the corrector step.
*/
/*
Update Fphat=a-A(X+dX)
*/
addscaledmat(dX,1.0,X,work1);
op_a(k,constraints,work1,Fp);
for (i=1; i<=k; i++)
Fp[i]=a[i]-Fp[i];
/*
The RHS is now A(Zi*Fdhat*X)+A(Zi*(-dZ*dX+mu*I))-Fphat
= A(Zi*(Fdhat*X-dZ*dX+mu*I))-Fphat
*/
make_i(work1);
scale1=0.0;
scale2=mu;
mat_mult(scale1,scale2,work2,work2,work1);
scale1=-1.0;
scale2=1.0;
mat_multspa(scale1,scale2,dZ,dX,work1,fill);
scale1=1.0;
scale2=0.0;
mat_multspc(scale1,scale2,Zi,work1,work2,fill);
/*
Next, compute op_a of work2, and put the result in rhs.
*/
op_a(k,constraints,work2,rhs);
/*
* Finally, subtract off Fphat.
*/
for (i=1; i<=k; i++)
rhs[i]=rhs[i]-Fp[i];
for (i=1; i<=k; i++)
workvec1[i]=rhs[i];
/*
Solve for dy1.
*/
for (i=1; i<=k; i++)
workvec1[i]=workvec1[i]*workvec8[i];
info=solvesys(k,ldam,O,workvec1);
for (i=1; i<=k; i++)
workvec1[i]=workvec1[i]*workvec8[i];
if (info != 0)
{
if (printlevel >= 1)
printf("Solving for dy1 failed! \n");
retcode=8;
goto RETURNBEST;
};
/*
* Do iterative refinement.
*/
if ((iter>1) && (relerr2 > 0.01*parameters.axtol) &&
(parameters.fastmode==0))
{
op_at(k,workvec1,constraints,work1);
mat_multspa(1.0,0.0,work1,X,work2,fill);
mat_multspc(1.0,0.0,Zi,work2,work3,fill);
op_a(k,constraints,work3,workvec2);
for (i=1; i<=k; i++)
workvec2[i]=rhs[i]-workvec2[i];
relerr=norm2(k,workvec2+1)/(1.0+norm2(k,rhs+1));
besterr=relerr;
for (i=1; i<=k; i++)
workvec4[i]=workvec1[i];
if (printlevel >= 3)
{
printf("refinement: Before relative error in Odyhat=r is %e \n",besterr);
};
refinements=0;
lastimproverefinement=0;
while ((refinements < 20) && (refinements-lastimproverefinement < 3)
&& (besterr > 1.0e-14))
{
refinements++;
for (i=1; i<=k; i++)
workvec3[i]=workvec2[i]*workvec8[i];
info=solvesys(k,ldam,O,workvec3);
for (i=1; i<=k; i++)
workvec3[i]=workvec3[i]*workvec8[i];
for (i=1; i<=k; i++)
workvec1[i]=workvec1[i]+workvec3[i];
op_at(k,workvec1,constraints,work1);
mat_multspa(1.0,0.0,work1,X,work2,fill);
mat_multspc(1.0,0.0,Zi,work2,work3,fill);
op_a(k,constraints,work3,workvec2);
for (i=1; i<=k; i++)
workvec2[i]=rhs[i]-workvec2[i];
relerr=norm2(k,workvec2+1)/(1.0+norm2(k,rhs+1));
if (relerr < besterr)
{
lastimproverefinement=refinements;
besterr=relerr;
for (i=1; i<=k; i++)
workvec4[i]=workvec1[i];
};
if (printlevel >= 4)
{
printf("refinement: During relative error in Ody=r is %e \n",besterr);
fflush(stdout);
};
};
if (printlevel >= 3)
{
printf("refinement: After relative error in Odyhat=r is %e,%d \n",besterr,lastimproverefinement);
};
for (i=1; i<=k; i++)
workvec1[i]=workvec4[i];
};
/*
* retrieve dy1.
*/
for (i=1; i<=k; i++)
dy1[i]=workvec1[i];
/*
Compute dZ1=A'(dy1).
dZ1 is stored in work3.
*/
op_at(k,dy1,constraints,work3);
/*
Compute dX1=-Zi*dZ1*X-Zi*dZ*dX+mu*zi;
dX1=Zi*(-dZ1*X-dZ*dX+mu*I)
for storage sake, dX1 is stored in work2.
*/
make_i(work1);
scale1=-1.0;
scale2=mu;
mat_multspa(scale1,scale2,dZ,dX,work1,fill);
scale1=-1.0;
scale2=1.0;
mat_multspa(scale1,scale2,work3,X,work1,fill);
scale1=1.0;
scale2=0.0;
mat_mult(scale1,scale2,Zi,work1,work2);
sym_mat(work2);
addscaledmat(X,1.0,dX,work1);
op_a(k,constraints,work1,workvec1);
for (i=1; i<=k; i++)
workvec1[i]=workvec1[i]-a[i];
relerr2=norm2(k,workvec1+1)/(1.0+norma);
if (printlevel >= 3)
{
printf("refinement: Before dX+dX1 Relative error in A(X+dX)=a is %e \n",relerr2);
if (relerr1 < relerr2)
printf("refinement: worse\n");
else
printf("refinement: better\n");
};
/*
Update the predictor step.
*/
if (printlevel >= 3)
{
printf("Fnorm of dX1 is %e\n",Fnorm(work2));
printf("Fnorm of dZ1 is %e\n",Fnorm(work3));
};
add_mat(work2,dX);
add_mat(work3,dZ);
for (i=1; i<=k; i++)
dy[i]=dy1[i]+dy[i];
/*
* Check A(X+dX)=a.
*/
addscaledmat(X,1.0,dX,work1);
op_a(k,constraints,work1,workvec1);
for (i=1; i<=k; i++)
workvec1[i]=workvec1[i]-a[i];
relerr2=norm2(k,workvec1+1)/(1.0+norma);
if (printlevel >= 3)
{
printf("refinement: Before adjust dX Relative error in A(X+dX)=a is %e \n",relerr2);
if (relerr1 < relerr2)
printf("refinement: worse\n");
else
printf("refinement: better\n");
};
if (printlevel >= 3)
{
printf("Fnorm of dX is %e \n",Fnorm(dX));
printf("Fnorm of dZ is %e \n",Fnorm(dZ));
};
/*
* Take a moment to figure out how well we're doing on feasibility.
*/
addscaledmat(X,1.0,dX,work1);
op_a(k,constraints,work1,workvec1);
for (i=1; i<=k; i++)
workvec1[i]=workvec1[i]-a[i];
relerr2=norm2(k,workvec1+1)/(1.0+norma);
if (printlevel >= 3)
{
printf("refinement: After adjust error in A(X+dX)=a is %e \n",relerr2);
if (relerr1 < relerr2)
printf("refinement: worse\n");
else
printf("refinement: better\n");
};
/*
Now, we've got the individual steps.
Find maximum possible step sizes.
*/
alphap=linesearch(n,dX,work1,work2,work3,cholxinv,workvec4,
workvec5,workvec6,mystepfrac,1.0,
printlevel);
/*
* Compute the objective value of the new solution.
*/
newpobj=calc_pobj(C,work1,constant_offset);
alphad=linesearch(n,dZ,work1,work2,work3,cholzinv,workvec4,
workvec5,workvec6,mystepfrac,1.0,
printlevel);
if (printlevel >= 3)
{
printf("After linesearches, alphap=%e alphad=%e \n",alphap,alphad);
};
for (i=1; i<=k; i++)
workvec1[i]=y[i]+alphad*dy[i];
/*
* Calculate the prospective new dual objective.
*/
newdobj=calc_dobj(k,a,workvec1,constant_offset);
/*
* Check on the feasibility of the new solutions. If they're
* worse, and the old solution was feasible, then don't take the
* step.
*/
addscaledmat(X,alphap,dX,work1);
newrelpinfeas=pinfeas(k,constraints,work1,a,workvec1);
/*
* For the primal infeasibility, check the relative gap and the
* current primal infeasibility to establish a limit on the
* new infeasibility.
*/
limrelpinfeas=bestrelpinfeas*100;
if ((limrelpinfeas >relgap) && (relgap > 0))
limrelpinfeas=relgap;
/*
* In the early stages, don't worry about limrelpinfeas.
*/
if ((iter < 10) || (relpinfeas > 0.01) ||
(dinfeasmeas > 1.0e-3*parameters.dinftol))
limrelpinfeas=relpinfeas*1000;
/*
* Don't ever ask for more than the required tolerance.
*/
if (parameters.axtol > limrelpinfeas)
limrelpinfeas=parameters.axtol;
/*
* If we're trying to prove dual infeasibility than don't limit
* the step.
*/
if ((probdfeas==0) && (dinfeasmeas>1.0e4))
limrelpinfeas=1.0e30;
/*
* Now, make sure that the step keeps us feasible enough.
*/
if (printlevel >= 3)
{
printf("newrelpinfeas is %e\n",newrelpinfeas);
printf("limrelpinfeas is %e\n",limrelpinfeas);
};
i=1;
while (newrelpinfeas > limrelpinfeas)
{
alphap=0.80*alphap;
addscaledmat(X,alphap,dX,work1);
newrelpinfeas=pinfeas(k,constraints,work1,a,workvec1);
newpobj=calc_pobj(C,work1,constant_offset);
i=i+1;
if (i>20)
{
if (printlevel >=3)
printf("Stuck at edge of primal feasibility.\n");
if (retries < 15)
{
if (retries == 0)
diagfact=0.1;
retries=retries+1;
diagfact=diagfact*10.0;
for (i=1; i<=k-1; i++)
for (j=i; j<=k; j++)
O[ijtok(i,j,ldam)]=O[ijtok(j,i,ldam)];
for (i=1; i<=k; i++)
O[ijtok(i,i,ldam)]=diagO[i];
goto RETRYFACTOR;
}
else
{
if (printlevel >= 1)
printf("Stuck at edge of primal feasibility, giving up. \n");
/*
* Tighten up the solution as much as possible.
*/
retcode=5;
goto RETURNBEST;
};
};
};
/*
Now make sure that we aren't stepping outside of dual feasibility.
*/
addscaledmat(Z,alphad,dZ,work1);
for (i=1; i<=k; i++)
workvec1[i]=y[i]+alphad*dy[i];
newreldinfeas=dinfeas(k,C,constraints,workvec1,work1,work2);
limreldinfeas=bestreldinfeas*100;
if ((limreldinfeas > relgap) && (relgap > 0))
limreldinfeas=relgap;
/*
* In the early stages, don't worry about limreldinfeas.
* Also don't worry if we're trying to establish primal
* infeasibility.
*/
if ((iter < 10) || (reldinfeas > 0.01) ||
(pinfeasmeas > 1.0e-3*parameters.pinftol))
limreldinfeas=reldinfeas*1000;
/*
* Don't ever ask for more than the required tolerance.
*/
if (parameters.atytol > limreldinfeas)
limreldinfeas=parameters.atytol;
if (printlevel >= 3)
{
printf("newreldinfeas is %e\n",newreldinfeas);
printf("limreldinfeas is %e\n",limreldinfeas);
};
i=1;
while (newreldinfeas > limreldinfeas)
{
alphad=0.80*alphad;
addscaledmat(Z,alphad,dZ,work1);
for (j=1; j<=k; j++)
workvec1[j]=y[j]+alphad*dy[j];
newreldinfeas=dinfeas(k,C,constraints,workvec1,work1,
work2);
newdobj=calc_dobj(k,a,workvec1,constant_offset);
i=i+1;
if (i>15)
{
if (printlevel >=3)
printf("Stuck at edge of dual feasibility.\n");
if (retries < 15)
{
if (retries == 0)
diagfact=0.1;
retries=retries+1;
diagfact=diagfact*10.0;
for (i=1; i<=k-1; i++)
for (j=i; j<=k; j++)
O[ijtok(i,j,ldam)]=O[ijtok(j,i,ldam)];
for (i=1; i<=k; i++)
O[ijtok(i,i,ldam)]=diagO[i];
goto RETRYFACTOR;
}
else
{
if (printlevel >= 1)
printf("Stuck at edge of dual feasibility, giving up. \n");
/*
* Tighten up the solution as much as possible.
*/
retcode=6;
goto RETURNBEST;
};
};
};
if (printlevel >= 3)
{
printf("After feas check, alphap=%e alphad=%e \n",alphap,alphad);
};
/*
* Give up if step lengths are way too small.
*/
if ((alphap<=parameters.minstepp) ||
(alphad<=parameters.minstepd))
{
if (printlevel >= 2)
printf("line search failure in corrector step.\n");
if (retries < 15)
{
if (retries == 0)
diagfact=0.1;
retries=retries+1;
diagfact=diagfact*10.0;
for (i=1; i<=k-1; i++)
for (j=i; j<=k; j++)
O[ijtok(i,j,ldam)]=O[ijtok(j,i,ldam)];
for (i=1; i<=k; i++)
O[ijtok(i,i,ldam)]=diagO[i];
goto RETRYFACTOR;
}
else
{
if (printlevel >= 1)
printf("Too many line search failures, giving up. \n");
/*
* Tighten up the solution as much as possible.
*/
retcode=9;
goto RETURNBEST;
};
};
/*
* In case alphap changed, recompute these.
*/
addscaledmat(X,alphap,dX,work1);
newpobj=calc_pobj(C,work1,constant_offset);
newrelpinfeas=pinfeas(k,constraints,work1,a,workvec1);
addscaledmat(Z,alphad,dZ,work1);
for (i=1; i<=k; i++)
workvec1[i]=y[i]+alphad*dy[i];
newdobj=calc_dobj(k,a,workvec1,constant_offset);
newreldinfeas=dinfeas(k,C,constraints,workvec1,work1,work2);
/*
* Update cholzinv.
*/
copy_mat(work1,work2);
ret=chol(work2);
while (ret != 0)
{
alphad=alphad*0.90;
addscaledmat(Z,alphad,dZ,work1);
for (i=1; i<=k; i++)
workvec1[i]=y[i]+alphad*dy[i];
copy_mat(work1,work2);
ret=chol(work2);
if (printlevel >=2)
printf("eigsearch missed! Adjusting alphad\n");
};
chol_inv(work2,work3);
store_packed(work3,cholzinv);
/*
Compute Zi.
*/
copy_mat(work3,work1);
trans(work1);
scale1=1.0;
scale2=0.0;
mat_mult(scale1,scale2,work3,work1,Zi);
if (printlevel >= 4)
printf("Fnorm of Zi is %e \n",Fnorm(Zi));
/*
* Confirm that X is in fact factorable. If not, reduce
* alpha until it is.
*/
addscaledmat(X,alphap,dX,work1);
copy_mat(work1,work2);
ret=chol(work2);
while (ret != 0)
{
if (printlevel >=2)
printf("eigsearch missed! Adjusting alphap\n");
alphap=alphap*0.90;
addscaledmat(X,alphap,dX,work1);
copy_mat(work1,work2);
ret=chol(work2);
};
chol_inv(work2,work3);
store_packed(work3,cholxinv);
/*
* do a line search for feasibility.
*/
if (relpinfeas > 1.0)
{
newalphap=alphap;
for (i=1; i<=9; i++)
{
addscaledmat(X,(i*1.0)*alphap/10.0,dX,work1);
if (pinfeas(k,constraints,work1,a,workvec1) < newrelpinfeas)
{
newalphap=i*1.0*alphap/10.0;
newrelpinfeas=pinfeas(k,constraints,work1,a,workvec1);
};
};
if (newalphap < alphap)
{
if (printlevel >= 2)
printf("Feasibility Adjusting alphap to %e \n",newalphap);
alphap=newalphap;
};
};
/*
*Take the step.
*/
addscaledmat(X,alphap,dX,X);
addscaledmat(Z,alphad,dZ,Z);
for (i=1; i<=k; i++)
y[i]=y[i]+alphad*dy[i];
/*
* Update the objectives.
*/
newdobj=calc_dobj(k,a,y,constant_offset);
newpobj=calc_pobj(C,X,constant_offset);
/*
Recompute the objective function values.
*/
*pobj=calc_pobj(C,X,constant_offset);
*dobj=calc_dobj(k,a,y,constant_offset);
if (parameters.usexzgap==0)
{
gap=*dobj-*pobj;
if (gap < 0.0)
gap=0.0;
relgap=gap/(1.0+fabs(*dobj)+fabs(*pobj));
}
else
{
gap=trace_prod(X,Z);
if (gap < 0.0)
gap=0.0;
relgap=gap/(1.0+fabs(*dobj)+fabs(*pobj));
};
if (printlevel >= 2)
{
printf("pobj is %e \n",*pobj);
printf("dobj is %e \n",*dobj);
printf("gap is %e \n",gap);
printf("relgap is %e \n",relgap);
};
relpinfeas=pinfeas(k,constraints,X,a,workvec1);
reldinfeas=dinfeas(k,C,constraints,y,Z,work2);
if (relpinfeas < parameters.axtol )
probpfeas=1;
if (reldinfeas < parameters.atytol)
probdfeas=1;
oldmaxrelinfeas=maxrelinfeas;
if (relpinfeas > reldinfeas)
{
maxrelinfeas=relpinfeas;
}
else
{
maxrelinfeas=reldinfeas;
};
/*
* Make sure that the objective value hasn't gone crazy.
*
* This was a test with isnan, but isnan isn't ANSI C, so
* we use an equivalent that typically works.
*
*/
if ((gap) != (gap))
{
retcode=12;
goto RETURNBEST;
};
/*
* This was a test with isinf, but isinf isn't ANSI C, so
* we just test for extremely large values.
*/
if ((gap > 1.0e100) || (gap < -1.0e100))
{
retcode=12;
goto RETURNBEST;
};
/*
* if this solution is better than the previous best, then
* update our best solution.
*/
if ((relgap/parameters.objtol < bestmeas) &&
(relpinfeas/parameters.axtol <bestmeas) &&
(ispfeasprob==0) && (isdfeasprob==0) &&
(reldinfeas/parameters.atytol <bestmeas))
{
lastimprove=iter;
bestmeas=relgap/parameters.objtol;
if (relpinfeas/parameters.axtol > bestmeas)
bestmeas=relpinfeas/parameters.axtol;
if (reldinfeas/parameters.atytol > bestmeas)
bestmeas=reldinfeas/parameters.atytol;
store_packed(X,bestx);
store_packed(Z,bestz);
for (i=1; i<=k; i++)
besty[i]=y[i];
if (printlevel >= 3)
printf("New best solution, %e \n",bestmeas);
};
if ((ispfeasprob==1) &&
(relpinfeas/parameters.axtol <bestmeas))
{
lastimprove=iter;
bestmeas=relpinfeas/parameters.axtol;
store_packed(X,bestx);
for (i=1; i<=k; i++)
besty[i]=0.0;
make_i(work2);
zero_mat(work3);
addscaledmat(work3,0.005*parameters.objtol/trace_prod(X,work2),work2,work3);
store_packed(work3,bestz);
if (printlevel >= 3)
printf("New best solution, %e \n",bestmeas);
};
if ((isdfeasprob==1) &&
(reldinfeas/parameters.atytol <bestmeas))
{
lastimprove=iter;
bestmeas=reldinfeas/parameters.atytol;
zero_mat(work3);
make_i(work1);
addscaledmat(work3,1.0e-40,work1,work3);
store_packed(work3,bestx);
store_packed(Z,bestz);
for (i=1; i<=k; i++)
besty[i]=y[i];
if (printlevel >= 3)
printf("New best solution, %e \n",bestmeas);
};
/*
* If we haven't improved the gap much in 20 iterations, then
* give up, because we're not making progress.
*/
if ((iter > 60) && (bestmeas > 0.5*bestarray[((iter-20) % BASIZE)]))
{
if (printlevel >= 1)
printf("Lack of progress. Giving up!\n");
retcode=7;
goto RETURNBEST;
};
/*
Update counters and display status.
*/
iter++;
if (printlevel >= 1)
{
printf("Iter: %2d Ap: %.2e Pobj: % .7e Ad: %.2e Dobj: % .7e \n",iter,alphap,*pobj,alphad,*dobj);
fflush(stdout);
};
/*
* If iter gets above maxiter, then exit.
*/
if (iter >= parameters.maxiter)
{
if (printlevel >= 1)
printf("Maximum iterations reached. \n");
retcode=4;
goto RETURNBEST;
};
};
/*
* Return success.
*/
retcode=0;
RETURNBEST:
store_unpacked(bestx,X);
store_unpacked(bestz,Z);
for (i=1; i<=k; i++)
y[i]=besty[i];
if (parameters.usexzgap==0)
{
*pobj=calc_pobj(C,X,constant_offset);
*dobj=calc_dobj(k,a,y,constant_offset);
gap=*dobj-*pobj;
}
else
{
*pobj=calc_pobj(C,X,constant_offset);
*dobj=calc_dobj(k,a,y,constant_offset);
gap=trace_prod(X,Z);
};
if ((gap < 0.0) && (parameters.usexzgap==0) && (parameters.tweakgap==1)
&& (ispfeasprob==0) && (isdfeasprob==0))
{
tweakgap(n,k,a,constraints,gap,Z,dZ,y,dy,work1,work2,work3,dX,
workvec1,workvec2,workvec3,workvec4,printlevel);
};
/*
Recompute the objective function values.
*/
*pobj=calc_pobj(C,X,constant_offset);
*dobj=calc_dobj(k,a,y,constant_offset);
if (parameters.usexzgap==0)
{
gap=*dobj-*pobj;
if (gap < 0.0)
gap=0.0;
relgap=gap/(1.0+fabs(*dobj)+fabs(*pobj));
}
else
{
gap=trace_prod(X,Z);
if (gap < 0.0)
gap=0.0;
relgap=gap/(1.0+fabs(*dobj)+fabs(*pobj));
};
/*
* Recheck the primal and dual infeasibilities and gap.
*/
relpinfeas=pinfeas(k,constraints,X,a,workvec1);
reldinfeas=dinfeas(k,C,constraints,y,Z,work1);
if (relpinfeas < parameters.axtol )
probpfeas=1;
if (reldinfeas < parameters.atytol)
probdfeas=1;
if ((relgap < parameters.objtol) &&
(relpinfeas <parameters.axtol ) &&
(reldinfeas <parameters.atytol))
{
/*
* Our best solution actually did satisfy the conditions, so we've
* succeeded in solving the problem!
*
*/
retcode=0;
};
/*
* Check for a good solution with slightly reduced accuracy.
*/
if ((ispfeasprob==0) && (isdfeasprob==0))
{
bestmeas=relgap/parameters.objtol;
if ((relpinfeas/parameters.axtol) > bestmeas)
bestmeas=relpinfeas/parameters.axtol;
if ((reldinfeas/parameters.atytol) > bestmeas)
bestmeas=reldinfeas/parameters.atytol;
if ((bestmeas > 1.0) && (bestmeas < 1000.0))
retcode=3;
};
if (ispfeasprob==1)
{
bestmeas=relpinfeas/parameters.axtol;
if ((bestmeas > 1.0) && (bestmeas < 1000.0))
retcode=3;
};
if (isdfeasprob==1)
{
bestmeas=reldinfeas/parameters.atytol;
if ((bestmeas > 1.0) && (bestmeas < 1000.0))
retcode=3;
};
/*
* Come here if we have an infeasible problem and are returning the
* certificate of infeasibility.
*/
RETURNCERT:
/*
* Print out the total number of iterations.
*/
if (printlevel >= 2)
printf("Total Iterations: %d \n",iter);
/*
* Now, go ahead and return.
*/
return(retcode);
}
|
omp.cats | #ifndef OPENMP_OMP_CATS
#define OPENMP_OMP_CATS
/* ****** ****** */
#include <omp.h>
/* ****** Utilities ****** */
#define atscntrb_openmp_STR(x) #x
#define atscntrb_openmp_STRINGIFY(x) atscntrb_openmp_STR(x)
#define atscntrb_openmp_CONCATFUN(X, Y) X ( Y )
/* ****** OpenMP Directives (#pragmas) ****** */
#define atscntrb_openmp_omp_barrier() \
_Pragma(atscntrb_openmp_STRINGIFY(omp barrier))
#define atscntrb_openmp_omp_barrier_beg() \
{ _Pragma(atscntrb_openmp_STRINGIFY(omp barrier))
#define atscntrb_openmp_omp_barrier_end() NULL; }
// #pragma omp parallel private(thread_id)
#define atscntrb_openmp_omp_parallel_private_beg(thread_id) \
_Pragma(atscntrb_openmp_STRINGIFY(atscntrb_openmp_CONCATFUN \
(omp parallel private, thread_id))) {
//
#define atscntrb_openmp_omp_parallel_private_end() NULL; }
/* ****** ********* ****** */
#define atscntrb_openmp_omp_get_thread_num() omp_get_thread_num()
#define atscntrb_openmp_omp_get_num_threads() omp_get_num_threads()
#endif // ifndef OPENMP_OMP_CATS
/* end of [omp.cats] */
|
binbased_projection.h | // KRATOS __ __ _____ ____ _ _ ___ _ _ ____
// | \/ | ____/ ___|| | | |_ _| \ | |/ ___|
// | |\/| | _| \___ \| |_| || || \| | | _
// | | | | |___ ___) | _ || || |\ | |_| |
// |_| |_|_____|____/|_| |_|___|_| \_|\____| APPLICATION
//
// License: BSD License
// Kratos default license: kratos/license.txt
//
// Main authors: Antonia Larese De Tetto
//
#if !defined(KRATOS_BINBASED_PROJECTION )
#define KRATOS_BINBASED_PROJECTION
//External includes
// System includes
#include <string>
#include <iostream>
#include <stdlib.h>
// Project includes
#include "includes/define.h"
#include "includes/model_part.h"
#include "utilities/timer.h"
#include "meshing_application_variables.h"
//Database includes
#include "spatial_containers/spatial_containers.h"
#include "utilities/binbased_fast_point_locator.h"
#include "utilities/binbased_nodes_in_element_locator.h"
namespace Kratos
{
///@name Kratos Globals
///@{
///@}
///@name Type Definitions
///@{
///@}
///@name Enum's
///@{
///@}
///@name Functions
///@{
///@}
///@name Kratos Classes
///@{
/// This class allows the interpolation between non-matching meshes in 2D and 3D.
/** @author Antonia Larese De Tetto <antoldt@cimne.upc.edu>
*
* This class allows the interpolation of a scalar or vectorial variable between non-matching meshes
* in 2D and 3D.
*
* For every node of the destination model part it is checked in which element of the origin model part it is
* contained and a linear interpolation is performed
*
* The data structure used by default is static bin.
* In order to use this utility the construction of a bin of object @see BinBasedNodesInElementLocator
* and a bin of nodes @see BinBasedFastPointLocator
* is required at the beginning of the calculation (only ONCE).
*/
//class BinBasedMeshTransfer
template<std::size_t TDim >
class BinBasedMeshTransfer
{
public:
///@name Type Definitions
///@{
/// Pointer definition of BinBasedMeshTransfer
KRATOS_CLASS_POINTER_DEFINITION(BinBasedMeshTransfer<TDim >);
/// Node type definition
typedef Node<3> NodeType;
typedef Geometry<NodeType> GeometryType;
///@}
///@name Life Cycle
///@{
/// Default constructor.
BinBasedMeshTransfer() = default; //
/// Destructor.
virtual ~BinBasedMeshTransfer() = default;
///@}
///@name Operators
///@{
///@}
///@name Operations
///@{
//If you want to pass the whole model part
//**********************************************************************
//**********************************************************************
/// Interpolate the whole problem type
/**
* @param rOrigin_ModelPart: the model part all the variable should be taken from
* @param rDestination_ModelPart: the destination model part where we want to know the values of the variables
*/
void DirectInterpolation(
ModelPart& rOrigin_ModelPart ,
ModelPart& rDestination_ModelPart
)
{
KRATOS_TRY
KRATOS_ERROR << "Not implemented yet" << std::endl;
KRATOS_CATCH("")
}
//If you want to pass only one variable
//**********************************************************************
//**********************************************************************
/// Interpolate one variable from the fixed mesh to the moving one
/**
* @param rFixed_ModelPart: the model part all the variable should be taken from
* @param rMoving_ModelPart: the destination model part where we want to know the values of the variables
* @param rFixedDomainVariable: the name of the interpolated variable in the origin model part
* @param rMovingDomainVariable: the name of the interpolated variable in the destination model part
* @param node_locator: precomputed bin of objects. It is to be constructed separately @see binbased_fast_point_locator.h
*/
// Form fixed to moving model part
template<class TDataType>
void DirectVariableInterpolation(
ModelPart& rFixed_ModelPart ,
ModelPart& rMoving_ModelPart,
Variable<TDataType>& rFixedDomainVariable ,
Variable<TDataType>& rMovingDomainVariable,
BinBasedFastPointLocator<TDim>& node_locator
)
{
KRATOS_TRY
KRATOS_INFO("BinBasedMeshTransfer") << "Interpolate From Fixed Mesh*************************************" << std::endl;
//creating an auxiliary list for the new nodes
for(auto node_it = rMoving_ModelPart.NodesBegin(); node_it != rMoving_ModelPart.NodesEnd(); ++node_it) {
ClearVariables(node_it, rMovingDomainVariable);
}
Vector N(TDim + 1);
const int max_results = 10000;
typename BinBasedFastPointLocator<TDim>::ResultContainerType results(max_results);
const int nparticles = rMoving_ModelPart.Nodes().size();
#pragma omp parallel for firstprivate(results,N)
for (int i = 0; i < nparticles; i++) {
ModelPart::NodesContainerType::iterator iparticle = rMoving_ModelPart.NodesBegin() + i;
NodeType::Pointer pparticle = *(iparticle.base());
auto result_begin = results.begin();
Element::Pointer pelement;
bool is_found = node_locator.FindPointOnMesh(pparticle->Coordinates(), N, pelement, result_begin, max_results);
if (is_found == true) {
//Interpolate( ElemIt, N, *it_found , rFixedDomainVariable , rMovingDomainVariable );
Interpolate( pelement, N, pparticle, rFixedDomainVariable , rMovingDomainVariable );
}
}
KRATOS_CATCH("")
}
/// Map one variable from the moving mesh to the fixed one -The two meshes should be of the same dimensions otherwise better to use
/// MappingFromMovingMesh_VariableMeshes that is a much generic tool.
/**
* @param rFixed_ModelPart: the model part all the variable should be taken from
* @param rMoving_ModelPart: the destination model part where we want to know the values of the variables
* @param rFixedDomainVariable: the name of the interpolated variable in the origin model part
* @param rMovingDomainVariable: the name of the interpolated variable in the destination model part
* @param node_locator: precomputed bin of objects (elelments of the fixed mesh). It is to be constructed separately @see binbased_nodes_in_element_locator
*/
// From moving to fixed model part
template<class TDataType>
void MappingFromMovingMesh(
ModelPart& rMoving_ModelPart ,
ModelPart& rFixed_ModelPart,
Variable<TDataType>& rMovingDomainVariable ,
Variable<TDataType>& rFixedDomainVariable,
BinBasedFastPointLocator<TDim>& node_locator //this is a bin of objects which contains the FIXED model part
)
{
KRATOS_TRY
KRATOS_INFO("BinBasedMeshTransfer") << "Transfer From Moving Mesh*************************************" << std::endl;
if (rMoving_ModelPart.NodesBegin()->SolutionStepsDataHas(rMovingDomainVariable) == false)
KRATOS_THROW_ERROR(std::logic_error, "Add MovingDomain VARIABLE!!!!!! ERROR", "");
if (rFixed_ModelPart.NodesBegin()->SolutionStepsDataHas(rFixedDomainVariable) == false)
KRATOS_THROW_ERROR(std::logic_error, "Add FixedDomain VARIABLE!!!!!! ERROR", "");
//creating an auxiliary list for the new nodes
for(ModelPart::NodesContainerType::iterator node_it = rFixed_ModelPart.NodesBegin();
node_it != rFixed_ModelPart.NodesEnd(); ++node_it)
{
ClearVariables(node_it, rFixedDomainVariable);
}
for (ModelPart::NodesContainerType::iterator node_it = rFixed_ModelPart.NodesBegin();
node_it != rFixed_ModelPart.NodesEnd(); node_it++)
{
// if (node_it->IsFixed(VELOCITY_X) == false)
// {
// (node_it)->FastGetSolutionStepValue(VELOCITY) = ZeroVector(3);
// (node_it)->FastGetSolutionStepValue(TEMPERATURE) = 0.0;
(node_it)->GetValue(YOUNG_MODULUS) = 0.0;
// }
}
//defintions for spatial search
// typedef NodeType PointType;
// typedef NodeType::Pointer PointTypePointer;
Vector N(TDim + 1);
const int max_results = 10000;
typename BinBasedFastPointLocator<TDim>::ResultContainerType results(max_results);
const int nparticles = rMoving_ModelPart.Nodes().size();
#pragma omp parallel for firstprivate(results,N)
for (int i = 0; i < nparticles; i++)
{
ModelPart::NodesContainerType::iterator iparticle = rMoving_ModelPart.NodesBegin() + i;
NodeType::Pointer pparticle = *(iparticle.base());
auto result_begin = results.begin();
Element::Pointer pelement;
bool is_found = node_locator.FindPointOnMesh(pparticle->Coordinates(), N, pelement, result_begin, max_results);
if (is_found == true)
{
GeometryType& geom = pelement->GetGeometry();
// const array_1d<double, 3 > & vel_particle = (iparticle)->FastGetSolutionStepValue(VELOCITY);
// const double& temperature_particle = (iparticle)->FastGetSolutionStepValue(TEMPERATURE);
const TDataType& value = (iparticle)->FastGetSolutionStepValue(rMovingDomainVariable);
for (std::size_t k = 0; k < geom.size(); k++)
{
geom[k].SetLock();
geom[k].FastGetSolutionStepValue(rFixedDomainVariable) += N[k] * value;
geom[k].GetValue(YOUNG_MODULUS) += N[k];
geom[k].UnSetLock();
}
}
}
for (ModelPart::NodesContainerType::iterator node_it = rFixed_ModelPart.NodesBegin();
node_it != rFixed_ModelPart.NodesEnd(); node_it++)
{
const double NN = (node_it)->GetValue(YOUNG_MODULUS);
if (NN != 0.0)
{
(node_it)->FastGetSolutionStepValue(rFixedDomainVariable) /= NN;
}
}
KRATOS_CATCH("")
}
// From moving to fixed model part
/// Interpolate one variable from the moving mesh to the fixed one
/**
* @param rFixed_ModelPart: the model part all the variable should be taken from
* @param rMoving_ModelPart: the destination model part where we want to know the values of the variables
* @param rFixedDomainVariable: the name of the interpolated variable in the origin model part
* @param rMovingDomainVariable: the name of the interpolated variable in the destination model part
* @param node_locator: precomputed bin of nodes of the fixed mesh. It is to be constructed separately @see binbased_nodes_in_element_locator
*/
template<class TDataType>
void MappingFromMovingMesh_VariableMeshes(
ModelPart& rMoving_ModelPart ,
ModelPart& rFixed_ModelPart,
Variable<TDataType>& rMovingDomainVariable ,
Variable<TDataType>& rFixedDomainVariable,
BinBasedNodesInElementLocator<TDim>& node_locator //this is a bin of objects which contains the FIXED model part
)
{
KRATOS_TRY
KRATOS_WATCH("Transfer From Moving Mesh*************************************")
if (rMoving_ModelPart.NodesBegin()->SolutionStepsDataHas(rMovingDomainVariable) == false)
KRATOS_THROW_ERROR(std::logic_error, "Add MovingDomain VARIABLE!!!!!! ERROR", "");
if (rFixed_ModelPart.NodesBegin()->SolutionStepsDataHas(rFixedDomainVariable) == false)
KRATOS_THROW_ERROR(std::logic_error, "Add FixedDomain VARIABLE!!!!!! ERROR", "");
//creating an auxiliary list for the new nodes
for(ModelPart::NodesContainerType::iterator node_it = rFixed_ModelPart.NodesBegin();
node_it != rFixed_ModelPart.NodesEnd(); ++node_it)
{
ClearVariables(node_it, rFixedDomainVariable);
}
//defintions for spatial search
typedef typename BinBasedNodesInElementLocator<TDim>::PointVector PointVector;
typedef typename BinBasedNodesInElementLocator<TDim>::DistanceVector DistanceVector;
const std::size_t max_results = 5000;
Matrix Nmat(max_results,TDim+1);
boost::numeric::ublas::vector<int> positions(max_results);
PointVector work_results(max_results);
DistanceVector work_distances(max_results);
Node<3> work_point(0,0.0,0.0,0.0);
for(ModelPart::ElementsContainerType::iterator elem_it = rMoving_ModelPart.ElementsBegin(); elem_it != rMoving_ModelPart.ElementsEnd(); ++elem_it)
{
std::size_t nfound = node_locator.FindNodesInElement(*(elem_it.base()), positions, Nmat, max_results, work_results.begin(), work_distances.begin(), work_point);
for(std::size_t k=0; k<nfound; k++)
{
auto it = work_results.begin() + positions[k];
array_1d<double,TDim+1> N = row(Nmat,k);
Interpolate( *(elem_it.base()), N, *it, rMovingDomainVariable , rFixedDomainVariable);
}
}
KRATOS_CATCH("")
}
///@}
///@name Access
///@{
///@}
///@name Inquiry
///@{
///@}
///@name Input and output
///@{
/// Turn back information as a stemplate<class T, std::size_t dim> tring.
virtual std::string Info() const
{
return "";
}
/// Print information about this object.
virtual void PrintInfo(std::ostream& rOStream) const {}
/// Print object's data.
virtual void PrintData(std::ostream& rOStream) const {}
///@}
///@name Friends
///@{
///@}
protected:
///@name Protected static Member rVariables
///@{
///@}
///@name Protected member rVariables
///@{ template<class T, std::size_t dim>
///@}
///@name Protected Operators
///@{
///@}
///@name Protected Operations
///@{
///@}
///@name Protected Access
///@{
///@}
///@name Protected Inquiry
///@{
///@}
///@name Protected LifeCycle
///@{
///@}
private:
///@name Static Member rVariables
///@{
///@}
///@name Member rVariables
///@{
inline void CalculateCenterAndSearchRadius(GeometryType&geom,
double& xc, double& yc, double& zc, double& R, array_1d<double,3>& N
)
{
double x0 = geom[0].X();
double y0 = geom[0].Y();
double x1 = geom[1].X();
double y1 = geom[1].Y();
double x2 = geom[2].X();
double y2 = geom[2].Y();
xc = 0.3333333333333333333*(x0+x1+x2);
yc = 0.3333333333333333333*(y0+y1+y2);
zc = 0.0;
double R1 = (xc-x0)*(xc-x0) + (yc-y0)*(yc-y0);
double R2 = (xc-x1)*(xc-x1) + (yc-y1)*(yc-y1);
double R3 = (xc-x2)*(xc-x2) + (yc-y2)*(yc-y2);
R = R1;
if(R2 > R) R = R2;
if(R3 > R) R = R3;
R = 1.01 * sqrt(R);
}
//***************************************
//***************************************
inline void CalculateCenterAndSearchRadius(GeometryType&geom,
double& xc, double& yc, double& zc, double& R, array_1d<double,4>& N
)
{
double x0 = geom[0].X();
double y0 = geom[0].Y();
double z0 = geom[0].Z();
double x1 = geom[1].X();
double y1 = geom[1].Y();
double z1 = geom[1].Z();
double x2 = geom[2].X();
double y2 = geom[2].Y();
double z2 = geom[2].Z();
double x3 = geom[3].X();
double y3 = geom[3].Y();
double z3 = geom[3].Z();
xc = 0.25*(x0+x1+x2+x3);
yc = 0.25*(y0+y1+y2+y3);
zc = 0.25*(z0+z1+z2+z3);
double R1 = (xc-x0)*(xc-x0) + (yc-y0)*(yc-y0) + (zc-z0)*(zc-z0);
double R2 = (xc-x1)*(xc-x1) + (yc-y1)*(yc-y1) + (zc-z1)*(zc-z1);
double R3 = (xc-x2)*(xc-x2) + (yc-y2)*(yc-y2) + (zc-z2)*(zc-z2);
double R4 = (xc-x3)*(xc-x3) + (yc-y3)*(yc-y3) + (zc-z3)*(zc-z3);
R = R1;
if(R2 > R) R = R2;
if(R3 > R) R = R3;
if(R4 > R) R = R4;
R = sqrt(R);
}
//***************************************
//***************************************
inline double CalculateVol( const double x0, const double y0,
const double x1, const double y1,
const double x2, const double y2
)
{
return 0.5*( (x1-x0)*(y2-y0)- (y1-y0)*(x2-x0) );
}
//***************************************
//***************************************
inline double CalculateVol( const double x0, const double y0, const double z0,
const double x1, const double y1, const double z1,
const double x2, const double y2, const double z2,
const double x3, const double y3, const double z3
)
{
double x10 = x1 - x0;
double y10 = y1 - y0;
double z10 = z1 - z0;
double x20 = x2 - x0;
double y20 = y2 - y0;
double z20 = z2 - z0;
double x30 = x3 - x0;
double y30 = y3 - y0;
double z30 = z3 - z0;
double detJ = x10 * y20 * z30 - x10 * y30 * z20 + y10 * z20 * x30 - y10 * x20 * z30 + z10 * x20 * y30 - z10 * y20 * x30;
return detJ*0.1666666666666666666667;
//return 0.5*( (x1-x0)*(y2-y0)- (y1-y0)*(x2-x0) );
}
//***************************************
//***************************************
inline bool CalculatePosition( GeometryType&geom,
const double xc, const double yc, const double zc,
array_1d<double,3>& N
)
{
double x0 = geom[0].X();
double y0 = geom[0].Y();
double x1 = geom[1].X();
double y1 = geom[1].Y();
double x2 = geom[2].X();
double y2 = geom[2].Y();
double area = CalculateVol(x0,y0,x1,y1,x2,y2);
double inv_area = 0.0;
if(area == 0.0)
{
// KRATOS_THROW_ERROR(std::logic_error,"element with zero area found","");
//The interpolated node will not be inside an elemente with zero area
return false;
}
else
{
inv_area = 1.0 / area;
}
N[0] = CalculateVol(x1,y1,x2,y2,xc,yc) * inv_area;
N[1] = CalculateVol(x2,y2,x0,y0,xc,yc) * inv_area;
N[2] = CalculateVol(x0,y0,x1,y1,xc,yc) * inv_area;
if(N[0] >= 0.0 && N[1] >= 0.0 && N[2] >= 0.0 && N[0] <=1.0 && N[1]<= 1.0 && N[2] <= 1.0) //if the xc yc is inside the triangle return true
return true;
return false;
}
//***************************************
//***************************************
inline bool CalculatePosition( GeometryType&geom,
const double xc, const double yc, const double zc,
array_1d<double,4>& N
)
{
double x0 = geom[0].X();
double y0 = geom[0].Y();
double z0 = geom[0].Z();
double x1 = geom[1].X();
double y1 = geom[1].Y();
double z1 = geom[1].Z();
double x2 = geom[2].X();
double y2 = geom[2].Y();
double z2 = geom[2].Z();
double x3 = geom[3].X();
double y3 = geom[3].Y();
double z3 = geom[3].Z();
double vol = CalculateVol(x0,y0,z0,x1,y1,z1,x2,y2,z2,x3,y3,z3);
double inv_vol = 0.0;
if(vol < 0.0000000000001)
{
// KRATOS_THROW_ERROR(std::logic_error,"element with zero vol found","");
//The interpolated node will not be inside an elemente with zero volume
return false;
// KRATOS_WATCH("++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++")
}
else
{
inv_vol = 1.0 / vol;
}
N[0] = CalculateVol(x1,y1,z1,x3,y3,z3,x2,y2,z2,xc,yc,zc) * inv_vol;
N[1] = CalculateVol(x3,y3,z3,x0,y0,z0,x2,y2,z2,xc,yc,zc) * inv_vol;
N[2] = CalculateVol(x3,y3,z3,x1,y1,z1,x0,y0,z0,xc,yc,zc) * inv_vol;
N[3] = CalculateVol(x0,y0,z0,x1,y1,z1,x2,y2,z2,xc,yc,zc) * inv_vol;
if(N[0] >= 0.0 && N[1] >= 0.0 && N[2] >= 0.0 && N[3] >=0.0 &&
N[0] <= 1.0 && N[1] <= 1.0 && N[2] <= 1.0 && N[3] <=1.0)
//if the xc yc zc is inside the tetrahedron return true
return true;
return false;
}
//ElemI Element iterator
//N Shape functions
//step_data_size
//pnode pointer to the node
//projecting total model part 2Dversion
void Interpolate(
Element::Pointer ElemIt,
const Vector& N,
int step_data_size,
NodeType::Pointer pnode)
{
//Geometry element of the rOrigin_ModelPart
GeometryType& geom = ElemIt->GetGeometry();
const std::size_t buffer_size = pnode->GetBufferSize();
const std::size_t vector_size = N.size();
for(std::size_t step = 0; step<buffer_size; step++) {
//getting the data of the solution step
double* step_data = (pnode)->SolutionStepData().Data(step);
double* node0_data = geom[0].SolutionStepData().Data(step);
//copying this data in the position of the vector we are interested in
for(int j= 0; j< step_data_size; j++) {
step_data[j] = N[0]*node0_data[j];
}
for(std::size_t k= 1; k< vector_size; k++) {
double* node1_data = geom[k].SolutionStepData().Data(step);
for(int j= 0; j< step_data_size; j++) {
step_data[j] += N[k]*node1_data[j];
}
}
}
// pnode->GetValue(IS_VISITED) = 1.0;
}
//projecting an array1D 2Dversion
void Interpolate(
Element::Pointer ElemIt,
const Vector& N,
NodeType::Pointer pnode,
Variable<array_1d<double,3> >& rOriginVariable,
Variable<array_1d<double,3> >& rDestinationVariable)
{
//Geometry element of the rOrigin_ModelPart
GeometryType& geom = ElemIt->GetGeometry();
const std::size_t buffer_size = pnode->GetBufferSize();
const std::size_t vector_size = N.size();
for(std::size_t step = 0; step<buffer_size; step++) {
//getting the data of the solution step
array_1d<double,3>& step_data = (pnode)->FastGetSolutionStepValue(rDestinationVariable , step);
//Reference or no reference???//CANCELLA
step_data = N[0] * geom[0].FastGetSolutionStepValue(rOriginVariable , step);
// Copying this data in the position of the vector we are interested in
for(std::size_t j= 1; j< vector_size; j++) {
const array_1d<double,3>& node_data = geom[j].FastGetSolutionStepValue(rOriginVariable , step);
step_data += N[j] * node_data;
}
}
// pnode->GetValue(IS_VISITED) = 1.0;
}
//projecting a scalar 2Dversion
void Interpolate(
Element::Pointer ElemIt,
const Vector& N,
NodeType::Pointer pnode,
Variable<double>& rOriginVariable,
Variable<double>& rDestinationVariable)
{
//Geometry element of the rOrigin_ModelPart
GeometryType& geom = ElemIt->GetGeometry();
const std::size_t buffer_size = pnode->GetBufferSize();
const std::size_t vector_size = N.size();
//facendo un loop sugli step temporali step_data come salva i dati al passo anteriore? Cioś dove passiamo l'informazione ai nodi???
for(std::size_t step = 0; step<buffer_size; step++) {
//getting the data of the solution step
double& step_data = (pnode)->FastGetSolutionStepValue(rDestinationVariable , step);
//Reference or no reference???//CANCELLA
//copying this data in the position of the vector we are interested in
step_data = N[0] * geom[0].FastGetSolutionStepValue(rOriginVariable , step);
// Copying this data in the position of the vector we are interested in
for(std::size_t j= 1; j< vector_size; j++) {
const double node_data = geom[j].FastGetSolutionStepValue(rOriginVariable , step);
step_data += N[j] * node_data;
}
}
// pnode->GetValue(IS_VISITED) = 1.0;
}
inline void Clear(ModelPart::NodesContainerType::iterator node_it, int step_data_size )
{
std::size_t buffer_size = node_it->GetBufferSize();
for(std::size_t step = 0; step<buffer_size; step++)
{
//getting the data of the solution step
double* step_data = (node_it)->SolutionStepData().Data(step);
//copying this data in the position of the vector we are interested in
for(int j= 0; j< step_data_size; j++)
{
step_data[j] = 0.0;
}
}
}
inline void ClearVariables(ModelPart::NodesContainerType::iterator node_it , Variable<array_1d<double,3> >& rVariable)
{
array_1d<double, 3>& Aux_var = node_it->FastGetSolutionStepValue(rVariable, 0);
noalias(Aux_var) = ZeroVector(3);
}
inline void ClearVariables(ModelPart::NodesContainerType::iterator node_it, Variable<double>& rVariable)
{
double& Aux_var = node_it->FastGetSolutionStepValue(rVariable, 0);
Aux_var = 0.0;
}
///@}
///@name Private Operators
///@{
///@}
///@name Private Operations
///@{
///@}
///@name Private Access
///@{
///@}
///@name Private Inquiry
///@{
///@}
///@name Un accessible methods
///@{
/// Assignment operator.
BinBasedMeshTransfer& operator=(BinBasedMeshTransfer const& rOther);
///@}
}; // Class BinBasedMeshTransfer
///@}
///@name Type Definitions
///@{
///@}
///@name Input and output
///@{
/// output stream function
template<std::size_t TDim>
inline std::ostream& operator << (std::ostream& rOStream,
const BinBasedMeshTransfer<TDim>& rThis)
{
rThis.PrintInfo(rOStream);
rOStream << std::endl;
rThis.PrintData(rOStream);
return rOStream;
}
///@}
} // namespace Kratos.
#endif // KRATOS_BINBASED_PROJECTION defined
|
poly_driver.c | #include <stdlib.h>
#include "util.h"
#include "poly_driver.h"
void poly(const int n, VARR sum, const VARR x, const FLOAT_TYPE * restrict P)
{
__builtin_assume_aligned(x,ALIGN);
__builtin_assume_aligned(P,ALIGN);
__builtin_assume_aligned(sum,ALIGN);
VARR tmp __attribute__ ((aligned (ALIGN))) = {0.0};
for (int i=0; i<=n; i++) {
#ifdef __INTEL_COMPILER
#pragma omp simd
#endif
for (int v=0; v<TMPLEN; v++) {
tmp[v] = tmp[v]*x[v] + P[i];
}
}
#ifdef __INTEL_COMPILER
#pragma omp simd
#endif
for (int v=0; v<TMPLEN; v++) sum[v] += tmp[v];
}
void do_test_rw(const int n, const size_t TSIZE,
const FLOAT_TYPE * const restrict A,
FLOAT_TYPE * const restrict B,
const FLOAT_TYPE * const restrict P)
{
__builtin_assume_aligned(A,ALIGN);
__builtin_assume_aligned(B,ALIGN);
__builtin_assume_aligned(P,ALIGN);
#pragma omp parallel for schedule(static,LB_LEN)
for (size_t j=0; j<TSIZE; j+=TMPLEN*CHUNKSIZE) {
for (size_t k=j; k<j+TMPLEN*CHUNKSIZE; k+=TMPLEN) {
poly(n,&B[k],&A[k],P);
}
}
}
void do_test_ro(const int n, const size_t TSIZE,
const FLOAT_TYPE * const restrict A,
FLOAT_TYPE * const restrict sum,
const FLOAT_TYPE * const restrict P)
{
__builtin_assume_aligned(A,ALIGN);
__builtin_assume_aligned(P,ALIGN);
FLOAT_TYPE tmp[TMPLEN] __attribute__ ((aligned (ALIGN))) = {0.0};
#pragma omp parallel for schedule(static,LB_LEN) reduction(+:tmp[:TMPLEN])
for (size_t j=0; j<TSIZE; j+=TMPLEN*CHUNKSIZE) {
for (size_t k=j; k<j+TMPLEN*CHUNKSIZE; k+=TMPLEN) {
poly(n,tmp,&A[k],P);
}
}
for (int v=0; v<TMPLEN; v++) *sum += tmp[v];
}
|
Example_device.1.c | /*
* @@name: device.1c
* @@type: C
* @@compilable: yes
* @@linkable: no
* @@expect: success
* @@version: omp_4.0
*/
#include <stdio.h>
#include <omp.h>
#pragma omp declare target
void vec_mult(float *p, float *v1, float *v2, int N);
extern float *p, *v1, *v2;
extern int N;
#pragma omp end declare target
extern void init_vars(float *, float *, int);
extern void output(float *, int);
void foo()
{
init_vars(v1, v2, N);
#pragma omp target device(42) map(p[:N], v1[:N], v2[:N])
{
vec_mult(p, v1, v2, N);
}
output(p, N);
}
void vec_mult(float *p, float *v1, float *v2, int N)
{
int i;
int nthreads;
if (!omp_is_initial_device())
{
printf("1024 threads on target device\n");
nthreads = 1024;
}
else
{
printf("8 threads on initial device\n");
nthreads = 8;
}
#pragma omp parallel for private(i) num_threads(nthreads)
for (i=0; i<N; i++)
p[i] = v1[i] * v2[i];
}
|
bml_transpose_ellsort_typed.c | #include "../../macros.h"
#include "../../typed.h"
#include "../bml_allocate.h"
#include "../bml_parallel.h"
#include "../bml_transpose.h"
#include "../bml_types.h"
#include "bml_allocate_ellsort.h"
#include "bml_transpose_ellsort.h"
#include "bml_types_ellsort.h"
#include <complex.h>
#include <math.h>
#include <stdlib.h>
#include <string.h>
#ifdef _OPENMP
#include <omp.h>
#endif
/** Transpose a matrix.
*
* \ingroup transpose_group
*
* \param A The matrix to be transposed
* \return the transposed A
*/
bml_matrix_ellsort_t
* TYPED_FUNC(bml_transpose_new_ellsort) (bml_matrix_ellsort_t * A)
{
bml_matrix_dimension_t matrix_dimension = { A->N, A->N, A->M };
bml_matrix_ellsort_t *B =
TYPED_FUNC(bml_noinit_matrix_ellsort) (matrix_dimension,
A->distribution_mode);
REAL_T *A_value = (REAL_T *) A->value;
int *A_index = A->index;
int *A_nnz = A->nnz;
int *A_localRowMin = A->domain->localRowMin;
int *A_localRowMax = A->domain->localRowMax;
REAL_T *B_value = (REAL_T *) B->value;
int *B_index = B->index;
int *B_nnz = B->nnz;
int myRank = bml_getMyRank();
// Transpose all elements
#ifdef _OPENMP
omp_lock_t *row_lock =
(omp_lock_t *) malloc(sizeof(omp_lock_t) * matrix_dimension.N_rows);
#pragma omp parallel for
for (int i = 0; i < matrix_dimension.N_rows; i++)
{
omp_init_lock(&row_lock[i]);
}
#endif
#pragma omp parallel for \
shared(matrix_dimension, B_index, B_value, B_nnz, A_index, A_value, A_nnz,row_lock)
for (int i = 0; i < matrix_dimension.N_rows; i++)
{
for (int j = 0; j < A_nnz[i]; j++)
{
int trow = A_index[ROWMAJOR(i, j, matrix_dimension.N_rows,
matrix_dimension.N_nz_max)];
#ifdef _OPENMP
omp_set_lock(&row_lock[trow]);
#endif
int colcnt = B_nnz[trow];
B_index[ROWMAJOR
(trow, colcnt, matrix_dimension.N_rows,
matrix_dimension.N_nz_max)] = i;
B_value[ROWMAJOR
(trow, colcnt, matrix_dimension.N_rows,
matrix_dimension.N_nz_max)] =
A_value[ROWMAJOR
(i, j, matrix_dimension.N_rows,
matrix_dimension.N_nz_max)];
B_nnz[trow]++;
#ifdef _OPENMP
omp_unset_lock(&row_lock[trow]);
#endif
}
}
return B;
/*
int Alrmin = A_localRowMin[myRank];
int Alrmax = A_localRowMax[myRank];
#pragma omp parallel for \
shared(N, M, B_index, B_value, B_nnz) \
shared(A_index, A_value, A_nnz,Alrmin,Alrmax)
//for (int i = 0; i < N; i++)
for (int i = Alrmin; i < Alrmax; i++)
{
for (int j = 0; j < N; j++)
{
int Annzj = A_nnz[j];
for (int k = 0; k < Annzj; k++)
{
if (A_index[ROWMAJOR(j, k, N, M)] != i) {}
else {
B_index[ROWMAJOR(i, B_nnz[i], N, M)] = j;
B_value[ROWMAJOR(i, B_nnz[i], N, M)] = A_value[ROWMAJOR(j, k, N, M)];
B_nnz[i]++;
break;
}
}
}
}
return B;
*/
}
/** Transpose a matrix in place.
*
* \ingroup transpose_group
*
* \param A The matrix to be transposeed
* \return the transposed A
*/
void TYPED_FUNC(
bml_transpose_ellsort) (
bml_matrix_ellsort_t * A)
{
int N = A->N;
int M = A->M;
REAL_T *A_value = (REAL_T *) A->value;
int *A_index = A->index;
int *A_nnz = A->nnz;
#pragma omp parallel for shared(N, M, A_value, A_index, A_nnz)
for (int i = 0; i < N; i++)
{
for (int j = A_nnz[i] - 1; j >= 0; j--)
{
if (A_index[ROWMAJOR(i, j, N, M)] > i)
{
int ind = A_index[ROWMAJOR(i, j, N, M)];
int exchangeDone = 0;
for (int k = 0; k < A_nnz[ind]; k++)
{
// Existing corresponding value for transpose - exchange
if (A_index[ROWMAJOR(ind, k, N, M)] == i)
{
REAL_T tmp = A_value[ROWMAJOR(i, j, N, M)];
#pragma omp critical
{
A_value[ROWMAJOR(i, j, N, M)] =
A_value[ROWMAJOR(ind, k, N, M)];
A_value[ROWMAJOR(ind, k, N, M)] = tmp;
}
exchangeDone = 1;
break;
}
}
// If no match add to end of row
if (!exchangeDone)
{
int jind = A_nnz[ind];
#pragma omp critical
{
A_index[ROWMAJOR(ind, jind, N, M)] = i;
A_value[ROWMAJOR(ind, jind, N, M)] =
A_value[ROWMAJOR(i, j, N, M)];
A_nnz[ind]++;
A_nnz[i]--;
}
}
}
}
}
}
|
3d7pt_var.c | /*
* Order-1, 3D 7 point stencil with variable coefficients
* Adapted from PLUTO and Pochoir test bench
*
* Tareq Malas
*/
#include <stdio.h>
#include <stdlib.h>
#include <sys/time.h>
#ifdef LIKWID_PERFMON
#include <likwid.h>
#endif
#include "print_utils.h"
#define TESTS 2
#define MAX(a,b) ((a) > (b) ? a : b)
#define MIN(a,b) ((a) < (b) ? a : b)
/* Subtract the `struct timeval' values X and Y,
* storing the result in RESULT.
*
* Return 1 if the difference is negative, otherwise 0.
*/
int timeval_subtract(struct timeval *result, struct timeval *x, struct timeval *y)
{
/* Perform the carry for the later subtraction by updating y. */
if (x->tv_usec < y->tv_usec)
{
int nsec = (y->tv_usec - x->tv_usec) / 1000000 + 1;
y->tv_usec -= 1000000 * nsec;
y->tv_sec += nsec;
}
if (x->tv_usec - y->tv_usec > 1000000)
{
int nsec = (x->tv_usec - y->tv_usec) / 1000000;
y->tv_usec += 1000000 * nsec;
y->tv_sec -= nsec;
}
/* Compute the time remaining to wait.
* tv_usec is certainly positive.
*/
result->tv_sec = x->tv_sec - y->tv_sec;
result->tv_usec = x->tv_usec - y->tv_usec;
/* Return 1 if result is negative. */
return x->tv_sec < y->tv_sec;
}
int main(int argc, char *argv[])
{
int t, i, j, k, m, test;
int Nx, Ny, Nz, Nt;
if (argc > 3) {
Nx = atoi(argv[1])+2;
Ny = atoi(argv[2])+2;
Nz = atoi(argv[3])+2;
}
if (argc > 4)
Nt = atoi(argv[4]);
// allocate the arrays
double ****A = (double ****) malloc(sizeof(double***)*2);
for(m=0; m<2;m++){
A[m] = (double ***) malloc(sizeof(double**)*Nz);
for(i=0; i<Nz; i++){
A[m][i] = (double**) malloc(sizeof(double*)*Ny);
for(j=0;j<Ny;j++){
A[m][i][j] = (double*) malloc(sizeof(double)*Nx);
}
}
}
double ****coef = (double ****) malloc(sizeof(double***)*7);
for(m=0; m<7;m++){
coef[m] = (double ***) malloc(sizeof(double**)*Nz);
for(i=0; i<Nz; i++){
coef[m][i] = (double**) malloc(sizeof(double*)*Ny);
for(j=0;j<Ny;j++){
coef[m][i][j] = (double*) malloc(sizeof(double)*Nx);
}
}
}
// tile size information, including extra element to decide the list length
int *tile_size = (int*) malloc(sizeof(int));
tile_size[0] = -1;
// The list is modified here before source-to-source transformations
tile_size = (int*) realloc((void *)tile_size, sizeof(int)*5);
tile_size[0] = 8;
tile_size[1] = 8;
tile_size[2] = 8;
tile_size[3] = 32;
tile_size[4] = -1;
// for timekeeping
int ts_return = -1;
struct timeval start, end, result;
double tdiff = 0.0, min_tdiff=1.e100;
const int BASE = 1024;
// initialize variables
//
srand(42);
for (i = 1; i < Nz; i++) {
for (j = 1; j < Ny; j++) {
for (k = 1; k < Nx; k++) {
A[0][i][j][k] = 1.0 * (rand() % BASE);
}
}
}
for (m=0; m<7; m++) {
for (i=1; i<Nz; i++) {
for (j=1; j<Ny; j++) {
for (k=1; k<Nx; k++) {
coef[m][i][j][k] = 1.0 * (rand() % BASE);
}
}
}
}
#ifdef LIKWID_PERFMON
LIKWID_MARKER_INIT;
#pragma omp parallel
{
LIKWID_MARKER_THREADINIT;
#pragma omp barrier
LIKWID_MARKER_START("calc");
}
#endif
int num_threads = 1;
#if defined(_OPENMP)
num_threads = omp_get_max_threads();
#endif
for(test=0; test<TESTS; test++){
gettimeofday(&start, 0);
// serial execution - Addition: 6 && Multiplication: 2
#pragma scop
for (t = 0; t < Nt-1; t++) {
for (i = 1; i < Nz-1; i++) {
for (j = 1; j < Ny-1; j++) {
for (k = 1; k < Nx-1; k++) {
A[(t+1)%2][i][j][k] = coef[0][i][j][k] * A[t%2][i ][j ][k ] +
coef[1][i][j][k] * A[t%2][i-1][j ][k ] +
coef[2][i][j][k] * A[t%2][i ][j-1][k ] +
coef[3][i][j][k] * A[t%2][i ][j ][k-1] +
coef[4][i][j][k] * A[t%2][i+1][j ][k ] +
coef[5][i][j][k] * A[t%2][i ][j+1][k ] +
coef[6][i][j][k] * A[t%2][i ][j ][k+1];
}
}
}
}
#pragma endscop
gettimeofday(&end, 0);
ts_return = timeval_subtract(&result, &end, &start);
tdiff = (double) (result.tv_sec + result.tv_usec * 1.0e-6);
min_tdiff = min(min_tdiff, tdiff);
printf("Rank 0 TEST# %d time: %f\n", test, tdiff);
}
PRINT_RESULTS(1, "variable no-symmetry")
#ifdef LIKWID_PERFMON
#pragma omp parallel
{
LIKWID_MARKER_STOP("calc");
}
LIKWID_MARKER_CLOSE;
#endif
// Free allocated arrays
for(i=0; i<Nz; i++){
for(j=0;j<Ny;j++){
free(A[0][i][j]);
free(A[1][i][j]);
}
free(A[0][i]);
free(A[1][i]);
}
free(A[0]);
free(A[1]);
for(m=0; m<7;m++){
for(i=0; i<Nz; i++){
for(j=0;j<Ny;j++){
free(coef[m][i][j]);
}
free(coef[m][i]);
}
free(coef[m]);
}
return 0;
}
|
utils.h | // Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#pragma once
#include <iostream>
#include <vector>
#include <string>
#include <opencv2/core/core.hpp>
#include <opencv2/imgproc/imgproc.hpp>
#include <opencv2/highgui/highgui.hpp>
#ifdef _WIN32
#define GLOG_NO_ABBREVIATED_SEVERITIES
#include <windows.h>
#else
#include <dirent.h>
#include <sys/types.h>
#endif
namespace PaddleSolution {
namespace utils {
inline std::string path_join(const std::string& dir,
const std::string& path) {
std::string seperator = "/";
#ifdef _WIN32
seperator = "\\";
#endif
return dir + seperator + path;
}
#ifndef _WIN32
// scan a directory and get all files with input extensions
inline std::vector<std::string> get_directory_images(
const std::string& path, const std::string& exts) {
std::vector<std::string> imgs;
struct dirent *entry;
DIR *dir = opendir(path.c_str());
if (dir == NULL) {
closedir(dir);
return imgs;
}
while ((entry = readdir(dir)) != NULL) {
std::string item = entry->d_name;
auto ext = strrchr(entry->d_name, '.');
if (!ext || std::string(ext) == "." || std::string(ext) == "..") {
continue;
}
if (exts.find(ext) != std::string::npos) {
imgs.push_back(path_join(path, entry->d_name));
}
}
return imgs;
}
#else
// scan a directory and get all files with input extensions
inline std::vector<std::string> get_directory_images(
const std::string& path, const std::string& exts) {
std::string pattern(path);
pattern.append("\\*");
std::vector<std::string> imgs;
WIN32_FIND_DATA data;
HANDLE hFind;
if ((hFind = FindFirstFile(pattern.c_str(), &data)) != INVALID_HANDLE_VALUE) {
do {
auto fname = std::string(data.cFileName);
auto pos = fname.rfind(".");
auto ext = fname.substr(pos + 1);
if (ext.size() > 1 && exts.find(ext) != std::string::npos) {
imgs.push_back(path + "\\" + data.cFileName);
}
} while (FindNextFile(hFind, &data) != 0);
FindClose(hFind);
}
return imgs;
}
#endif
// normalize and HWC_BGR -> CHW_RGB
inline void normalize(cv::Mat& im, float* data, std::vector<float>& fmean,
std::vector<float>& fstd) {
int rh = im.rows;
int rw = im.cols;
int rc = im.channels();
double normf = static_cast<double>(1.0) / 255.0;
#pragma omp parallel for
for (int h = 0; h < rh; ++h) {
const uchar* ptr = im.ptr<uchar>(h);
int im_index = 0;
for (int w = 0; w < rw; ++w) {
for (int c = 0; c < rc; ++c) {
int top_index = (c * rh + h) * rw + w;
float pixel = static_cast<float>(ptr[im_index++]);
pixel = (pixel * normf - fmean[c]) / fstd[c];
data[top_index] = pixel;
}
}
}
}
// flatten a cv::mat
inline void flatten_mat(cv::Mat& im, float* data) {
int rh = im.rows;
int rw = im.cols;
int rc = im.channels();
#pragma omp parallel for
for (int h = 0; h < rh; ++h) {
const uchar* ptr = im.ptr<uchar>(h);
int im_index = 0;
int top_index = h * rw * rc;
for (int w = 0; w < rw; ++w) {
for (int c = 0; c < rc; ++c) {
float pixel = static_cast<float>(ptr[im_index++]);
data[top_index++] = pixel;
}
}
}
}
// argmax
inline void argmax(float* out, std::vector<int>& shape,
std::vector<uchar>& mask, std::vector<uchar>& scoremap) {
int out_img_len = shape[1] * shape[2];
int blob_out_len = out_img_len * shape[0];
/*
Eigen::TensorMap<Eigen::Tensor<float, 3>> out_3d(out, shape[0], shape[1], shape[2]);
Eigen::Tensor<Eigen::DenseIndex, 2> argmax = out_3d.argmax(0);
*/
float max_value = -1;
int label = 0;
#pragma omp parallel private(label)
for (int i = 0; i < out_img_len; ++i) {
max_value = -1;
label = 0;
#pragma omp for reduction(max : max_value)
for (int j = 0; j < shape[0]; ++j) {
int index = i + j * out_img_len;
if (index >= blob_out_len) {
continue;
}
float value = out[index];
if (value > max_value) {
max_value = value;
label = j;
}
}
if (label == 0) max_value = 0;
mask[i] = uchar(label);
scoremap[i] = uchar(max_value * 255);
}
}
} // namespace utils
} // namespace PaddleSolution
|
kmeans_clustering.c | /*****************************************************************************/
/*IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING. */
/*By downloading, copying, installing or using the software you agree */
/*to this license. If you do not agree to this license, do not download, */
/*install, copy or use the software. */
/* */
/* */
/*Copyright (c) 2005 Northwestern University */
/*All rights reserved. */
/*Redistribution of the software in source and binary forms, */
/*with or without modification, is permitted provided that the */
/*following conditions are met: */
/* */
/*1 Redistributions of source code must retain the above copyright */
/* notice, this list of conditions and the following disclaimer. */
/* */
/*2 Redistributions in binary form must reproduce the above copyright */
/* notice, this list of conditions and the following disclaimer in the */
/* documentation and/or other materials provided with the distribution.*/
/* */
/*3 Neither the name of Northwestern University nor the names of its */
/* contributors may be used to endorse or promote products derived */
/* from this software without specific prior written permission. */
/* */
/*THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS ``AS */
/*IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED */
/*TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY, NON-INFRINGEMENT AND */
/*FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL */
/*NORTHWESTERN UNIVERSITY OR ITS CONTRIBUTORS BE LIABLE FOR ANY DIRECT, */
/*INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES */
/*(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR */
/*SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) */
/*HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, */
/*STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN */
/*ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE */
/*POSSIBILITY OF SUCH DAMAGE. */
/******************************************************************************/
/*************************************************************************/
/** File: kmeans_clustering.c **/
/** Description: Implementation of regular k-means clustering **/
/** algorithm **/
/** Author: Wei-keng Liao **/
/** ECE Department, Northwestern University **/
/** email: wkliao@ece.northwestern.edu **/
/** **/
/** Edited by: Jay Pisharath **/
/** Northwestern University. **/
/** **/
/** ================================================================ **/
/** **/
/**Edited by: Sang-Ha Lee **/
/**University of Virginia **/
/** **/
/**Description: No longer supports fuzzy c-means clustering; **/
/** only regular k-means clustering. **/
/** Simplified for main functionality: regular k-means **/
/** clustering. **/
/** **/
/*************************************************************************/
#include <stdio.h>
#include <stdlib.h>
#include <float.h>
#include <math.h>
#include "kmeans.h"
#include <omp.h>
#define RANDOM_MAX 2147483647
#ifndef FLT_MAX
#define FLT_MAX 3.40282347e+38
#endif
extern double wtime(void);
extern int num_omp_threads;
int find_nearest_point(float *pt, /* [nfeatures] */
int nfeatures,
float **pts, /* [npts][nfeatures] */
int npts)
{
int index, i;
float min_dist=FLT_MAX;
/* find the cluster center id with min distance to pt */
for (i=0; i<npts; i++) {
float dist;
dist = euclid_dist_2(pt, pts[i], nfeatures); /* no need square root */
if (dist < min_dist) {
min_dist = dist;
index = i;
}
}
return(index);
}
/*----< euclid_dist_2() >----------------------------------------------------*/
/* multi-dimensional spatial Euclid distance square */
__inline
float euclid_dist_2(float *pt1,
float *pt2,
int numdims)
{
int i;
float ans=0.0;
for (i=0; i<numdims; i++)
ans += (pt1[i]-pt2[i]) * (pt1[i]-pt2[i]);
return(ans);
}
/*----< kmeans_clustering() >---------------------------------------------*/
float** kmeans_clustering(float **feature, /* in: [npoints][nfeatures] */
int nfeatures,
int npoints,
int nclusters,
float threshold,
int *membership) /* out: [npoints] */
{
int i, j, k, n=0, index, loop=0;
int *new_centers_len; /* [nclusters]: no. of points in each cluster */
float **new_centers; /* [nclusters][nfeatures] */
float **clusters; /* out: [nclusters][nfeatures] */
float delta;
double timing;
int nthreads;
int **partial_new_centers_len;
float ***partial_new_centers;
nthreads = num_omp_threads;
/* allocate space for returning variable clusters[] */
clusters = (float**) malloc(nclusters * sizeof(float*));
clusters[0] = (float*) malloc(nclusters * nfeatures * sizeof(float));
for (i=1; i<nclusters; i++)
clusters[i] = clusters[i-1] + nfeatures;
/* randomly pick cluster centers */
for (i=0; i<nclusters; i++) {
//n = (int)rand() % npoints;
for (j=0; j<nfeatures; j++)
clusters[i][j] = feature[n][j];
n++;
}
for (i=0; i<npoints; i++)
membership[i] = -1;
/* need to initialize new_centers_len and new_centers[0] to all 0 */
new_centers_len = (int*) calloc(nclusters, sizeof(int));
new_centers = (float**) malloc(nclusters * sizeof(float*));
new_centers[0] = (float*) calloc(nclusters * nfeatures, sizeof(float));
for (i=1; i<nclusters; i++)
new_centers[i] = new_centers[i-1] + nfeatures;
partial_new_centers_len = (int**) malloc(nthreads * sizeof(int*));
partial_new_centers_len[0] = (int*) calloc(nthreads*nclusters, sizeof(int));
for (i=1; i<nthreads; i++)
partial_new_centers_len[i] = partial_new_centers_len[i-1]+nclusters;
partial_new_centers =(float***)malloc(nthreads * sizeof(float**));
partial_new_centers[0] =(float**) malloc(nthreads*nclusters * sizeof(float*));
for (i=1; i<nthreads; i++)
partial_new_centers[i] = partial_new_centers[i-1] + nclusters;
for (i=0; i<nthreads; i++)
{
for (j=0; j<nclusters; j++)
partial_new_centers[i][j] = (float*)calloc(nfeatures, sizeof(float));
}
printf("num of threads = %d\n", num_omp_threads);
do {
delta = 0.0;
omp_set_num_threads(num_omp_threads);
#pragma omp parallel \
shared(feature,clusters,membership,partial_new_centers,partial_new_centers_len)
{
int tid = omp_get_thread_num();
#pragma omp for \
private(i,j,index) \
firstprivate(npoints,nclusters,nfeatures) \
schedule(static) \
reduction(+:delta)
for (i=0; i<npoints; i++) {
/* find the index of nestest cluster centers */
index = find_nearest_point(feature[i],
nfeatures,
clusters,
nclusters);
/* if membership changes, increase delta by 1 */
if (membership[i] != index) delta += 1.0;
/* assign the membership to object i */
membership[i] = index;
/* update new cluster centers : sum of all objects located
within */
partial_new_centers_len[tid][index]++;
for (j=0; j<nfeatures; j++)
partial_new_centers[tid][index][j] += feature[i][j];
}
} /* end of #pragma omp parallel */
/* let the main thread perform the array reduction */
for (i=0; i<nclusters; i++) {
for (j=0; j<nthreads; j++) {
new_centers_len[i] += partial_new_centers_len[j][i];
partial_new_centers_len[j][i] = 0.0;
for (k=0; k<nfeatures; k++) {
new_centers[i][k] += partial_new_centers[j][i][k];
partial_new_centers[j][i][k] = 0.0;
}
}
}
/* replace old cluster centers with new_centers */
for (i=0; i<nclusters; i++) {
for (j=0; j<nfeatures; j++) {
if (new_centers_len[i] > 0)
clusters[i][j] = new_centers[i][j] / new_centers_len[i];
new_centers[i][j] = 0.0; /* set back to 0 */
}
new_centers_len[i] = 0; /* set back to 0 */
}
} while (delta > threshold && loop++ < 500);
free(new_centers[0]);
free(new_centers);
free(new_centers_len);
return clusters;
}
|
omp_matvec.c | /******************************************************************************
* OpenMP Example - Matrix-vector multiplication - C/C++ Version
* FILE: omp_matvec.c
* DESCRIPTION:
* This example multiplies all row i elements of matrix A with vector
* element b(i) and stores the summed products in vector c(i). A total is
* maintained for the entire matrix. Performed by using the OpenMP loop
* work-sharing construct. The update of the shared global total is
* serialized by using the OpenMP critical directive.
* SOURCE: Blaise Barney 5/99
* LAST REVISED:
******************************************************************************/
#include <omp.h>
#include <stdio.h>
#define SIZE 10
int
main ()
{
float A[SIZE][SIZE], b[SIZE], c[SIZE], total;
int i, j, tid;
/* Initializations */
total = 0.0;
for (i=0; i < SIZE; i++)
{
for (j=0; j < SIZE; j++)
A[i][j] = (j+1) * 1.0;
b[i] = 1.0 * (i+1);
c[i] = 0.0;
}
printf("\nStarting values of matrix A and vector b:\n");
for (i=0; i < SIZE; i++)
{
printf(" A[%d]= ",i);
for (j=0; j < SIZE; j++)
printf("%.1f ",A[i][j]);
printf(" b[%d]= %.1f\n",i,b[i]);
}
printf("\nResults by thread/row:\n");
/* Create a team of threads and scope variables */
#pragma omp parallel shared(A,b,c,total) private(tid,i)
{
tid = omp_get_thread_num();
/* Loop work-sharing construct - distribute rows of matrix */
#pragma omp for private(j)
for (i=0; i < SIZE; i++)
{
for (j=0; j < SIZE; j++)
c[i] += (A[i][j] * b[i]);
/* Update and display of running total must be serialized */
#pragma omp critical
{
total = total + c[i];
printf(" thread %d did row %d\t c[%d]=%.2f\t",tid,i,i,c[i]);
printf("Running total= %.2f\n",total);
}
} /* end of parallel i loop */
} /* end of parallel construct */
printf("\nMatrix-vector total - sum of all c[] = %.2f\n\n",total);
return 0;
}
|
omp_for_schedule_dynamic.c | // RUN: %libomp-compile-and-run
/*
* Test for dynamic scheduling with chunk size
* Method: caculate how many times the iteration space is dispatched
* and judge if each dispatch has the requested chunk size
* unless it is the last one.
* It is possible for two adjacent chunks are assigned to the same thread
* Modified by Chunhua Liao
*/
#include <stdio.h>
#include <omp.h>
#include <unistd.h>
#include <stdlib.h>
#include "omp_testsuite.h"
#define CFDMAX_SIZE 100
const int chunk_size = 7;
int test_omp_for_schedule_dynamic()
{
int tid;
int *tids;
int i;
int tidsArray[CFDMAX_SIZE];
int count = 0;
int tmp_count = 0; /*dispatch times*/
int *tmp; /*store chunk size for each dispatch*/
int result = 0;
tids = tidsArray;
#pragma omp parallel private(tid) shared(tids)
{ /* begin of parallel */
int tid;
tid = omp_get_thread_num ();
#pragma omp for schedule(dynamic,chunk_size)
for (i = 0; i < CFDMAX_SIZE; i++) {
tids[i] = tid;
}
}
for (i = 0; i < CFDMAX_SIZE - 1; ++i) {
if (tids[i] != tids[i + 1]) {
count++;
}
}
tmp = (int *) malloc (sizeof (int) * (count + 1));
tmp[0] = 1;
for (i = 0; i < CFDMAX_SIZE - 1; ++i) {
if (tmp_count > count) {
printf ("--------------------\nTestinternal Error: List too small!!!\n--------------------\n"); /* Error handling */
break;
}
if (tids[i] != tids[i + 1]) {
tmp_count++;
tmp[tmp_count] = 1;
} else {
tmp[tmp_count]++;
}
}
/* is dynamic statement working? */
for (i = 0; i < count; i++) {
if ((tmp[i]%chunk_size)!=0) {
/* it is possible for 2 adjacent chunks assigned to a same thread */
result++;
fprintf(stderr,"The intermediate dispatch has wrong chunksize.\n");
/* result += ((tmp[i] / chunk_size) - 1); */
}
}
if ((tmp[count]%chunk_size)!=(CFDMAX_SIZE%chunk_size)) {
result++;
fprintf(stderr,"the last dispatch has wrong chunksize.\n");
}
/* for (int i=0;i<count+1;++i) printf("%d\t:=\t%d\n",i+1,tmp[i]); */
return (result==0);
}
int main()
{
int i;
int num_failed=0;
for(i = 0; i < REPETITIONS; i++) {
if(!test_omp_for_schedule_dynamic()) {
num_failed++;
}
}
return num_failed;
}
|
expected_output.c | #include <stdio.h>
#include <unistd.h>
#include <string.h>
#include <math.h>
#include <polybench.h>
#include "mvt.h"
/**
* This version is stamped on May 10, 2016
*
* Contact:
* Louis-Noel Pouchet <pouchet.ohio-state.edu>
* Tomofumi Yuki <tomofumi.yuki.fr>
*
* Web address: http://polybench.sourceforge.net
*/
/*mvt.c: this file is part of PolyBench/C*/
/*Include polybench common header.*/
/*Include benchmark-specific header.*/
/*Array initialization.*/
static void init_array(int n, double x1[2000], double x2[2000], double y_1[2000], double y_2[2000], double A[2000][2000]) {
int i, j;
for(i = 0; i < n; i++) {
x1[i] = (double) (i % n) / n;
x2[i] = (double) ((i + 1) % n) / n;
y_1[i] = (double) ((i + 3) % n) / n;
y_2[i] = (double) ((i + 4) % n) / n;
for(j = 0; j < n; j++)
A[i][j] = (double) (i * j % n) / n;
}
}
/*DCE code. Must scan the entire live-out data.
Can be used also to check the correctness of the output.*/
static void print_array(int n, double x1[2000], double x2[2000]) {
int i;
fprintf(stderr, "==BEGIN DUMP_ARRAYS==\n");
fprintf(stderr, "begin dump: %s", "x1");
for(i = 0; i < n; i++) {
if(i % 20 == 0) fprintf(stderr, "\n");
fprintf(stderr, "%0.2lf ", x1[i]);
}
fprintf(stderr, "\nend dump: %s\n", "x1");
fprintf(stderr, "begin dump: %s", "x2");
for(i = 0; i < n; i++) {
if(i % 20 == 0) fprintf(stderr, "\n");
fprintf(stderr, "%0.2lf ", x2[i]);
}
fprintf(stderr, "\nend dump: %s\n", "x2");
fprintf(stderr, "==END DUMP_ARRAYS==\n");
}
/*Main computational kernel. The whole function will be timed,
including the call and return.*/
static void kernel_mvt(int n, double x1[2000], double x2[2000], double y_1[2000], double y_2[2000], double A[2000][2000]) {
int i, j;
#pragma omp parallel for default(shared) private(i, j) firstprivate(n, A, y_1)
for(i = 0; i < n; i++) {
// #pragma omp parallel for default(shared) private(j) firstprivate(n, i, A, y_1) reduction(+ : x1[i])
for(j = 0; j < n; j++)
x1[i] = x1[i] + A[i][j] * y_1[j];
}
#pragma omp parallel for default(shared) private(i, j) firstprivate(n, A, y_2)
for(i = 0; i < n; i++) {
// #pragma omp parallel for default(shared) private(j) firstprivate(n, i, A, y_2) reduction(+ : x2[i])
for(j = 0; j < n; j++)
x2[i] = x2[i] + A[j][i] * y_2[j];
}
}
int main(int argc, char **argv) {
/*Retrieve problem size.*/
int n = 2000;
/*Variable declaration/allocation.*/
double (*A)[2000][2000];
A = (double (*)[2000][2000]) polybench_alloc_data((2000 + 0) * (2000 + 0), sizeof(double));
;
double (*x1)[2000];
x1 = (double (*)[2000]) polybench_alloc_data(2000 + 0, sizeof(double));
;
double (*x2)[2000];
x2 = (double (*)[2000]) polybench_alloc_data(2000 + 0, sizeof(double));
;
double (*y_1)[2000];
y_1 = (double (*)[2000]) polybench_alloc_data(2000 + 0, sizeof(double));
;
double (*y_2)[2000];
y_2 = (double (*)[2000]) polybench_alloc_data(2000 + 0, sizeof(double));
;
/*Initialize array(s).*/
init_array(n, *x1, *x2, *y_1, *y_2, *A);
/*Start timer.*/
;
/*Run kernel.*/
kernel_mvt(n, *x1, *x2, *y_1, *y_2, *A);
/*Stop and print timer.*/
;
;
/*Prevent dead-code elimination. All live-out data must be printed
by the function call in argument.*/
if(argc > 42 && !strcmp(argv[0], "")) print_array(n, *x1, *x2);
/*Be clean.*/
free((void *) A);
;
free((void *) x1);
;
free((void *) x2);
;
free((void *) y_1);
;
free((void *) y_2);
;
return 0;
}
|
declare_mapper_messages.c | // RUN: %clang_cc1 -verify -fopenmp -ferror-limit 100 %s
// RUN: %clang_cc1 -verify -fopenmp-simd -ferror-limit 100 %s
int temp; // expected-note {{'temp' declared here}}
struct vec { // expected-note {{definition of 'struct vec' is not complete until the closing '}'}}
int len;
#pragma omp declare mapper(id: struct vec v) map(v.len) // expected-error {{incomplete definition of type 'struct vec'}}
double *data;
};
#pragma omp declare mapper // expected-error {{expected '(' after 'declare mapper'}}
#pragma omp declare mapper { // expected-error {{expected '(' after 'declare mapper'}}
#pragma omp declare mapper( // expected-error {{expected a type}} expected-error {{expected declarator on 'omp declare mapper' directive}}
#pragma omp declare mapper(# // expected-error {{expected a type}} expected-error {{expected declarator on 'omp declare mapper' directive}}
#pragma omp declare mapper(struct v // expected-error {{expected declarator on 'omp declare mapper' directive}}
#pragma omp declare mapper(struct vec // expected-error {{expected declarator on 'omp declare mapper' directive}}
#pragma omp declare mapper(S v // expected-error {{unknown type name 'S'}}
#pragma omp declare mapper(struct vec v // expected-error {{expected ')'}} expected-note {{to match this '('}}
#pragma omp declare mapper(aa:struct vec v) // expected-error {{expected at least one clause on '#pragma omp declare mapper' directive}}
#pragma omp declare mapper(bb:struct vec v) private(v) // expected-error {{expected at least one clause on '#pragma omp declare mapper' directive}} // expected-error {{unexpected OpenMP clause 'private' in directive '#pragma omp declare mapper'}}
#pragma omp declare mapper(cc:struct vec v) map(v) ( // expected-warning {{extra tokens at the end of '#pragma omp declare mapper' are ignored}}
#pragma omp declare mapper(++: struct vec v) map(v.len) // expected-error {{illegal OpenMP user-defined mapper identifier}}
#pragma omp declare mapper(id1: struct vec v) map(v.len, temp) // expected-error {{only variable v is allowed in map clauses of this 'omp declare mapper' directive}}
#pragma omp declare mapper(default : struct vec kk) map(kk.data[0:2]) // expected-note {{previous definition is here}}
#pragma omp declare mapper(struct vec v) map(v.len) // expected-error {{redefinition of user-defined mapper for type 'struct vec' with name 'default'}}
#pragma omp declare mapper(int v) map(v) // expected-error {{mapper type must be of struct, union or class type}}
int fun(int arg) {
#pragma omp declare mapper(id: struct vec v) map(v.len)
{
#pragma omp declare mapper(id: struct vec v) map(v.len) // expected-note {{previous definition is here}}
#pragma omp declare mapper(id: struct vec v) map(v.len) // expected-error {{redefinition of user-defined mapper for type 'struct vec' with name 'id'}}
{
#pragma omp declare mapper(id: struct vec v) map(v.len)
struct vec vv, v1;
#pragma omp target map(mapper) // expected-error {{use of undeclared identifier 'mapper'}}
{}
#pragma omp target map(mapper:vv) // expected-error {{expected '(' after 'mapper'}}
{}
#pragma omp target map(mapper( :vv) // expected-error {{expected expression}} expected-error {{expected ')'}} expected-warning {{implicit declaration of function 'mapper' is invalid in C99}} expected-note {{to match this '('}}
{}
#pragma omp target map(mapper(aa :vv) // expected-error {{use of undeclared identifier 'aa'}} expected-error {{expected ')'}} expected-warning {{implicit declaration of function 'mapper' is invalid in C99}} expected-note {{to match this '('}}
{}
#pragma omp target map(mapper(ab) :vv) // expected-error {{missing map type}} expected-error {{cannot find a valid user-defined mapper for type 'struct vec' with name 'ab'}}
{}
#pragma omp target map(mapper(aa) :vv) // expected-error {{missing map type}}
{}
#pragma omp target map(mapper(aa) to:vv) map(close mapper(aa) from:v1)
{}
#pragma omp target update to(mapper) // expected-error {{expected '(' after 'mapper'}} expected-error {{expected expression}} expected-error {{expected at least one 'to' clause or 'from' clause specified to '#pragma omp target update'}}
#pragma omp target update to(mapper() // expected-error {{illegal OpenMP user-defined mapper identifier}} expected-error {{expected expression}} expected-error {{expected at least one 'to' clause or 'from' clause specified to '#pragma omp target update'}}
#pragma omp target update to(mapper:vv) // expected-error {{expected '(' after 'mapper'}} expected-error {{expected at least one 'to' clause or 'from' clause specified to '#pragma omp target update'}}
#pragma omp target update to(mapper(:vv) // expected-error {{illegal OpenMP user-defined mapper identifier}} expected-error {{expected at least one 'to' clause or 'from' clause specified to '#pragma omp target update'}}
#pragma omp target update to(mapper(aa :vv) // expected-error {{expected ')'}} expected-note {{to match this '('}} expected-error {{expected at least one 'to' clause or 'from' clause specified to '#pragma omp target update'}}
#pragma omp target update to(mapper(ab):vv) // expected-error {{cannot find a valid user-defined mapper for type 'struct vec' with name 'ab'}} expected-error {{expected at least one 'to' clause or 'from' clause specified to '#pragma omp target update'}}
#pragma omp target update to(mapper(aa):vv)
}
}
return arg;
}
|
GB_unop__identity_uint16_uint16.c | //------------------------------------------------------------------------------
// GB_unop: hard-coded functions for each built-in unary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// If this file is in the Generated2/ folder, do not edit it
// (it is auto-generated from Generator/*).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_control.h"
#include "GB_atomics.h"
#include "GB_unop__include.h"
// C=unop(A) is defined by the following types and operators:
// op(A) function: GB (_unop_apply__(none))
// op(A') function: GB (_unop_tran__identity_uint16_uint16)
// C type: uint16_t
// A type: uint16_t
// cast: uint16_t cij = aij
// unaryop: cij = aij
#define GB_ATYPE \
uint16_t
#define GB_CTYPE \
uint16_t
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
uint16_t aij = Ax [pA]
#define GB_CX(p) Cx [p]
// unary operator
#define GB_OP(z, x) \
z = x ;
// casting
#define GB_CAST(z, aij) \
uint16_t z = aij ;
// cij = op (aij)
#define GB_CAST_OP(pC,pA) \
{ \
/* aij = Ax [pA] */ \
uint16_t aij = Ax [pA] ; \
/* Cx [pC] = op (cast (aij)) */ \
uint16_t z = aij ; \
Cx [pC] = z ; \
}
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_IDENTITY || GxB_NO_UINT16)
//------------------------------------------------------------------------------
// Cx = op (cast (Ax)): apply a unary operator
//------------------------------------------------------------------------------
#if 0
GrB_Info GB (_unop_apply__(none))
(
uint16_t *Cx, // Cx and Ax may be aliased
const uint16_t *Ax,
const int8_t *restrict Ab, // A->b if A is bitmap
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
if (Ab == NULL)
{
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
uint16_t aij = Ax [p] ;
uint16_t z = aij ;
Cx [p] = z ;
}
}
else
{
// bitmap case, no transpose; A->b already memcpy'd into C->b
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!Ab [p]) continue ;
uint16_t aij = Ax [p] ;
uint16_t z = aij ;
Cx [p] = z ;
}
}
return (GrB_SUCCESS) ;
#endif
}
#endif
//------------------------------------------------------------------------------
// C = op (cast (A')): transpose, typecast, and apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB (_unop_tran__identity_uint16_uint16)
(
GrB_Matrix C,
const GrB_Matrix A,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
suma_vector.c | #include <stdio.h>
#include <stdlib.h>
#include "omp.h"
/*
Basado en el tutorial:
http://openmp.org/mp-documents/omp-hands-on-SC08.pdf
*/
void suma(double *A, double *B, double *C, int n_points);
double *reserva(int n_points);
void print(double *x, int n_points);
void main (){
double *A, *B, *C;
int n_points = 100000;
A = reserva(n_points);
B = reserva(n_points);
C = reserva(n_points);
suma(A, B, C, n_points);
print(C,10);
}
void print(double *x, int n_points){
int i;
for(i = 0 ; i < n_points ; i++){
fprintf(stdout, "%f\n", x[i]);
}
}
void suma(double *A, double *B, double *C, int n_points){
int i, j;
#pragma omp parallel for private(j), shared(A,B,C)
for (i = 0 ; i < n_points; i++){
C[i] = 0.0;
for(j=0;j<50000;j++){
C[i] += A[i] + B[i];
}
}
}
double *reserva(int n_points){
double *x;
int i;
if(!(x=malloc(sizeof(double) * n_points))){
printf("malloc problem\n");
exit(1);
}
for(i=0;i<n_points;i++){
x[i] = 1.0;
}
return x;
}
|
interpolate_op.h | /* Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserve.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
#pragma once
#include <algorithm>
#include <string>
#include <vector>
#include "paddle/fluid/framework/op_registry.h"
#include "paddle/fluid/operators/math/math_function.h"
#include "paddle/fluid/platform/hostdevice.h"
namespace paddle {
namespace operators {
template <typename T, size_t D, int MajorType = Eigen::RowMajor,
typename IndexType = Eigen::DenseIndex>
using EigenTensor = framework::EigenTensor<T, D, MajorType, IndexType>;
using Tensor = framework::Tensor;
using DataLayout = framework::DataLayout;
inline std::vector<int> get_new_shape(
const std::vector<const Tensor*>& list_new_shape_tensor) {
// get tensor from
std::vector<int> vec_new_shape;
for (size_t i = 0; i < list_new_shape_tensor.size(); ++i) {
auto tensor = list_new_shape_tensor[i];
PADDLE_ENFORCE_EQ(tensor->dims(), framework::make_ddim({1}),
platform::errors::InvalidArgument(
"The shape of dimension tensor should be [1],"
"but received d%.",
tensor->dims()));
if (platform::is_gpu_place(tensor->place())) {
framework::Tensor temp;
paddle::framework::TensorCopySync(*tensor, platform::CPUPlace(), &temp);
vec_new_shape.push_back(static_cast<int32_t>(*temp.data<int32_t>()));
} else {
vec_new_shape.push_back(static_cast<int32_t>(*tensor->data<int32_t>()));
}
}
return vec_new_shape;
}
template <typename T>
inline std::vector<T> get_new_data_from_tensor(const Tensor* new_data_tensor) {
std::vector<T> vec_new_data;
auto* new_data = new_data_tensor->data<T>();
framework::Tensor cpu_starts_tensor;
if (platform::is_gpu_place(new_data_tensor->place())) {
paddle::framework::TensorCopySync(*new_data_tensor, platform::CPUPlace(),
&cpu_starts_tensor);
new_data = cpu_starts_tensor.data<T>();
}
vec_new_data = std::vector<T>(new_data, new_data + new_data_tensor->numel());
return vec_new_data;
}
inline void ExtractNCDWH(const framework::DDim& dims,
const DataLayout& data_layout, int* N, int* C, int* D,
int* H, int* W) {
*N = dims[0];
if (dims.size() == 3) {
*C = data_layout == DataLayout::kNCHW ? dims[1] : dims[2];
*D = 1;
*H = 1;
*W = data_layout == DataLayout::kNCHW ? dims[2] : dims[1];
} else if (dims.size() == 4) {
*C = data_layout == DataLayout::kNCHW ? dims[1] : dims[3];
*D = 1;
*H = data_layout == DataLayout::kNCHW ? dims[2] : dims[1];
*W = data_layout == DataLayout::kNCHW ? dims[3] : dims[2];
} else {
*C = data_layout == DataLayout::kNCHW ? dims[1] : dims[4];
*D = data_layout == DataLayout::kNCHW ? dims[2] : dims[1];
*H = data_layout == DataLayout::kNCHW ? dims[3] : dims[2];
*W = data_layout == DataLayout::kNCHW ? dims[4] : dims[3];
}
}
template <typename T>
static void NearestNeighborInterpolate(const Tensor& input, Tensor* output,
const float ratio_h, const float ratio_w,
const int n, const int c,
const int out_h, const int out_w,
const bool align_corners,
const DataLayout& data_layout) {
auto input_t = EigenTensor<T, 4>::From(input);
auto output_t = EigenTensor<T, 4>::From(*output);
for (int k = 0; k < out_h; k++) { // loop for images
int in_k = (align_corners) ? static_cast<int>(ratio_h * k + 0.5)
: static_cast<int>(ratio_h * k);
for (int l = 0; l < out_w; l++) {
int in_l = (align_corners) ? static_cast<int>(ratio_w * l + 0.5)
: static_cast<int>(ratio_w * l);
for (int i = 0; i < n; i++) { // loop for batches
for (int j = 0; j < c; j++) { // loop for channels
if (data_layout == DataLayout::kNCHW) {
output_t(i, j, k, l) = input_t(i, j, in_k, in_l);
} else {
output_t(i, k, l, j) = input_t(i, in_k, in_l, j);
}
}
}
}
}
}
template <typename T>
static void LinearInterpolation(const Tensor& input, Tensor* output,
const float ratio_w, const int in_w,
const int n, const int c, const int out_w,
const bool align_corners, const bool align_mode,
const DataLayout data_layout) {
auto input_t = EigenTensor<T, 3>::From(input);
auto output_t = EigenTensor<T, 3>::From(*output);
bool align_flag = (align_mode == 0 && !align_corners);
std::vector<int> vx_w, vx_e;
std::vector<float> vd_w, vd_e;
vx_w.reserve(out_w);
vx_e.reserve(out_w);
vd_w.reserve(out_w);
vd_e.reserve(out_w);
#ifdef PADDLE_WITH_MKLML
#pragma omp parallel for
#endif
for (int l = 0; l < out_w; l++) {
int x_w = align_flag ? static_cast<int>(ratio_w * (l + 0.5) - 0.5)
: static_cast<int>(ratio_w * l);
x_w = (x_w > 0) ? x_w : 0; // w
int x_e = (x_w < (in_w - 1)) ? (x_w + 1) : x_w; // w_id
float idx_src_x = ratio_w * (l + 0.5) - 0.5;
idx_src_x = (idx_src_x > 0) ? idx_src_x : 0;
float d_w = align_flag ? idx_src_x - x_w : ratio_w * l - x_w; // w1lambda
float d_e = 1.f - d_w; // w2lambda
{
vx_w[l] = x_w;
vx_e[l] = x_e;
vd_w[l] = d_w;
vd_e[l] = d_e;
}
}
#ifdef PADDLE_WITH_MKLML
#pragma omp parallel for collapse(3)
#endif
for (int i = 0; i < n; i++) { // loop for batches
for (int j = 0; j < c; j++) { // loop for channels
for (int l = 0; l < out_w; l++) {
// linear interpolation
T out_t;
if (data_layout == DataLayout::kNCHW) {
out_t = input_t(i, j, vx_w[l]) * vd_e[l] +
input_t(i, j, vx_e[l]) * vd_w[l];
output_t(i, j, l) = out_t;
} else {
out_t = input_t(i, vx_w[l], j) * vd_e[l] +
input_t(i, vx_e[l], j) * vd_w[l];
output_t(i, l, j) = out_t;
}
}
}
}
}
template <typename T>
static void LinearInterpolationGrad(const Tensor& output_grad,
Tensor* input_grad, const float ratio_w,
const int in_w, const int n, const int c,
const int out_w, const bool align_corners,
const int align_mode,
const DataLayout data_layout) {
auto input_grad_t = EigenTensor<T, 3>::From(*input_grad);
auto output_grad_t = EigenTensor<T, 3>::From(output_grad);
bool align_flag = (align_mode == 0 && !align_corners);
for (int l = 0; l < out_w; l++) {
int x_w = align_flag ? static_cast<int>(ratio_w * (l + 0.5) - 0.5)
: static_cast<int>(ratio_w * l);
x_w = (x_w > 0) ? x_w : 0; // w
int x_e = (x_w < (in_w - 1)) ? (x_w + 1) : x_w; // w_id
float idx_src_x = ratio_w * (l + 0.5) - 0.5;
idx_src_x = (idx_src_x > 0) ? idx_src_x : 0;
float d_w = align_flag ? idx_src_x - x_w : ratio_w * l - x_w; // w1lambda
float d_e = 1.f - d_w; // w2lambda
for (int i = 0; i < n; i++) { // loop for batches
for (int j = 0; j < c; j++) { // loop for channels
// linear interpolation grad
if (data_layout == DataLayout::kNCHW) {
const T grad = output_grad_t(i, j, l);
input_grad_t(i, j, x_w) += static_cast<T>(grad * d_e);
input_grad_t(i, j, x_e) += static_cast<T>(grad * d_w);
} else {
const T grad = output_grad_t(i, l, j);
input_grad_t(i, x_w, j) += static_cast<T>(grad * d_e);
input_grad_t(i, x_e, j) += static_cast<T>(grad * d_w);
}
}
}
}
}
template <typename T>
static void BilinearInterpolation(const Tensor& input, Tensor* output,
const float ratio_h, const float ratio_w,
const int in_h, const int in_w, const int n,
const int c, const int out_h, const int out_w,
const bool align_corners,
const bool align_mode,
const DataLayout data_layout) {
auto input_t = EigenTensor<T, 4>::From(input);
auto output_t = EigenTensor<T, 4>::From(*output);
bool align_flag = (align_mode == 0 && !align_corners);
std::vector<int> vy_n, vy_s;
std::vector<float> vd_n, vd_s;
vy_n.reserve(out_h);
vy_s.reserve(out_h);
vd_n.reserve(out_h);
vd_s.reserve(out_h);
#ifdef PADDLE_WITH_MKLML
#pragma omp parallel for
#endif
for (int k = 0; k < out_h; k++) {
int y_n = align_flag ? static_cast<int>(ratio_h * (k + 0.5) - 0.5)
: static_cast<int>(ratio_h * k);
y_n = (y_n > 0) ? y_n : 0;
int y_s = (y_n + 1) < (in_h - 1) ? (y_n + 1) : (in_h - 1);
float idx_src_y = ratio_h * (k + 0.5) - 0.5;
idx_src_y = (idx_src_y > 0) ? idx_src_y : 0;
float d_n = align_flag ? idx_src_y - y_n : ratio_h * k - y_n;
float d_s = 1.f - d_n;
{
vy_n[k] = y_n;
vy_s[k] = y_s;
vd_n[k] = d_n;
vd_s[k] = d_s;
}
}
std::vector<int> vx_w, vx_e;
std::vector<float> vd_w, vd_e;
vx_w.reserve(out_w);
vx_e.reserve(out_w);
vd_w.reserve(out_w);
vd_e.reserve(out_w);
#ifdef PADDLE_WITH_MKLML
#pragma omp parallel for
#endif
for (int l = 0; l < out_w; l++) {
int x_w = (align_mode == 0 && !align_corners)
? static_cast<int>(ratio_w * (l + 0.5) - 0.5)
: static_cast<int>(ratio_w * l);
x_w = (x_w > 0) ? x_w : 0;
int x_e = (x_w + 1) < (in_w - 1) ? (x_w + 1) : (in_w - 1);
float idx_src_x = ratio_w * (l + 0.5) - 0.5;
idx_src_x = (idx_src_x > 0) ? idx_src_x : 0;
float d_w = align_flag ? idx_src_x - x_w : ratio_w * l - x_w;
float d_e = 1.f - d_w;
{
vx_w[l] = x_w;
vx_e[l] = x_e;
vd_w[l] = d_w;
vd_e[l] = d_e;
}
}
#ifdef PADDLE_WITH_MKLML
#pragma omp parallel for collapse(4)
#endif
for (int i = 0; i < n; i++) { // loop for batches
for (int j = 0; j < c; j++) { // loop for channels
for (int k = 0; k < out_h; k++) { // loop for images
for (int l = 0; l < out_w; l++) {
// bilinear interpolation
T out_t;
if (data_layout == DataLayout::kNCHW) {
out_t = input_t(i, j, vy_n[k], vx_w[l]) * vd_s[k] * vd_e[l] +
input_t(i, j, vy_s[k], vx_w[l]) * vd_n[k] * vd_e[l] +
input_t(i, j, vy_n[k], vx_e[l]) * vd_s[k] * vd_w[l] +
input_t(i, j, vy_s[k], vx_e[l]) * vd_n[k] * vd_w[l];
output_t(i, j, k, l) = out_t;
} else {
out_t = input_t(i, vy_n[k], vx_w[l], j) * vd_s[k] * vd_e[l] +
input_t(i, vy_s[k], vx_w[l], j) * vd_n[k] * vd_e[l] +
input_t(i, vy_n[k], vx_e[l], j) * vd_s[k] * vd_w[l] +
input_t(i, vy_s[k], vx_e[l], j) * vd_n[k] * vd_w[l];
output_t(i, k, l, j) = out_t;
}
}
}
}
}
}
template <typename T>
static void TrilinearInterpolation(
const Tensor& input, Tensor* output, const float ratio_d,
const float ratio_h, const float ratio_w, const int in_d, const int in_h,
const int in_w, const int n, const int c, const int out_d, const int out_h,
const int out_w, const bool align_corners, const bool align_mode,
const DataLayout& data_layout) {
auto input_t = EigenTensor<T, 5>::From(input);
auto output_t = EigenTensor<T, 5>::From(*output);
bool align_flag = (align_mode == 0 && !align_corners);
std::vector<int> vt_f, vt_b;
std::vector<float> vd_f, vd_b;
vt_f.reserve(out_d);
vt_b.reserve(out_d);
vd_f.reserve(out_d);
vd_b.reserve(out_d);
#ifdef PADDLE_WITH_MKLML
#pragma omp parallel for
#endif
for (int j = 0; j < out_d; j++) {
int t_f = align_flag ? static_cast<int>(ratio_d * (j + 0.5) - 0.5)
: static_cast<int>(ratio_d * j);
t_f = (t_f > 0) ? t_f : 0;
int t_b = (t_f + 1) < (in_d - 1) ? (t_f + 1) : (in_d - 1);
float idx_src_t = ratio_d * (j + 0.5) - 0.5;
idx_src_t = (idx_src_t > 0) ? idx_src_t : 0;
float d_f = align_flag ? idx_src_t - t_f : ratio_d * j - t_f;
float d_b = 1.f - d_f;
{
vt_f[j] = t_f;
vt_b[j] = t_b;
vd_f[j] = d_f;
vd_b[j] = d_b;
}
}
std::vector<int> vy_n, vy_s;
std::vector<float> vd_n, vd_s;
vy_n.reserve(out_h);
vy_s.reserve(out_h);
vd_n.reserve(out_h);
vd_s.reserve(out_h);
#ifdef PADDLE_WITH_MKLML
#pragma omp parallel for
#endif
for (int k = 0; k < out_h; k++) {
int y_n = align_flag ? static_cast<int>(ratio_h * (k + 0.5) - 0.5)
: static_cast<int>(ratio_h * k);
y_n = (y_n > 0) ? y_n : 0;
int y_s = (y_n + 1) < (in_h - 1) ? (y_n + 1) : (in_h - 1);
float idx_src_y = ratio_h * (k + 0.5) - 0.5;
idx_src_y = (idx_src_y > 0) ? idx_src_y : 0;
float d_n = align_flag ? idx_src_y - y_n : ratio_h * k - y_n;
float d_s = 1.f - d_n;
{
vy_n[k] = y_n;
vy_s[k] = y_s;
vd_n[k] = d_n;
vd_s[k] = d_s;
}
}
std::vector<int> vx_w, vx_e;
std::vector<float> vd_w, vd_e;
vx_w.reserve(out_w);
vx_e.reserve(out_w);
vd_w.reserve(out_w);
vd_e.reserve(out_w);
#ifdef PADDLE_WITH_MKLML
#pragma omp parallel for
#endif
for (int l = 0; l < out_w; l++) {
int x_w = (align_mode == 0 && !align_corners)
? static_cast<int>(ratio_w * (l + 0.5) - 0.5)
: static_cast<int>(ratio_w * l);
x_w = (x_w > 0) ? x_w : 0;
int x_e = (x_w + 1) < (in_w - 1) ? (x_w + 1) : (in_w - 1);
float idx_src_x = ratio_w * (l + 0.5) - 0.5;
idx_src_x = (idx_src_x > 0) ? idx_src_x : 0;
float d_w = align_flag ? idx_src_x - x_w : ratio_w * l - x_w;
float d_e = 1.f - d_w;
{
vx_w[l] = x_w;
vx_e[l] = x_e;
vd_w[l] = d_w;
vd_e[l] = d_e;
}
}
#ifdef PADDLE_WITH_MKLML
#pragma omp parallel for collapse(5)
#endif
for (int b = 0; b < n; b++) { // loop for batches
for (int i = 0; i < c; i++) { // loop for channels
for (int j = 0; j < out_d; j++) { // loop for D, H, W
for (int k = 0; k < out_h; k++) {
for (int l = 0; l < out_w; l++) {
// trilinear interpolation
if (data_layout == DataLayout::kNCHW) {
T out_t = input_t(b, i, vt_f[j], vy_n[k], vx_w[l]) * vd_b[j] *
vd_s[k] * vd_e[l] +
input_t(b, i, vt_f[j], vy_n[k], vx_e[l]) * vd_b[j] *
vd_s[k] * vd_w[l] +
input_t(b, i, vt_f[j], vy_s[k], vx_w[l]) * vd_b[j] *
vd_n[k] * vd_e[l] +
input_t(b, i, vt_f[j], vy_s[k], vx_e[l]) * vd_b[j] *
vd_n[k] * vd_w[l] +
input_t(b, i, vt_b[j], vy_n[k], vx_w[l]) * vd_f[j] *
vd_s[k] * vd_e[l] +
input_t(b, i, vt_b[j], vy_n[k], vx_e[l]) * vd_f[j] *
vd_s[k] * vd_w[l] +
input_t(b, i, vt_b[j], vy_s[k], vx_w[l]) * vd_f[j] *
vd_n[k] * vd_e[l] +
input_t(b, i, vt_b[j], vy_s[k], vx_e[l]) * vd_f[j] *
vd_n[k] * vd_w[l];
output_t(b, i, j, k, l) = out_t;
} else {
T out_t = input_t(b, vt_f[j], vy_n[k], vx_w[l], i) * vd_b[j] *
vd_s[k] * vd_e[l] +
input_t(b, vt_f[j], vy_n[k], vx_e[l], i) * vd_b[j] *
vd_s[k] * vd_w[l] +
input_t(b, vt_f[j], vy_s[k], vx_w[l], i) * vd_b[j] *
vd_n[k] * vd_e[l] +
input_t(b, vt_f[j], vy_s[k], vx_e[l], i) * vd_b[j] *
vd_n[k] * vd_w[l] +
input_t(b, vt_b[j], vy_n[k], vx_w[l], i) * vd_f[j] *
vd_s[k] * vd_e[l] +
input_t(b, vt_b[j], vy_n[k], vx_e[l], i) * vd_f[j] *
vd_s[k] * vd_w[l] +
input_t(b, vt_b[j], vy_s[k], vx_w[l], i) * vd_f[j] *
vd_n[k] * vd_e[l] +
input_t(b, vt_b[j], vy_s[k], vx_e[l], i) * vd_f[j] *
vd_n[k] * vd_w[l];
output_t(b, j, k, l, i) = out_t;
}
}
}
}
}
}
}
template <typename T>
HOSTDEVICE inline T cubic_convolution1(T x, T A) {
return ((A + 2) * x - (A + 3)) * x * x + 1;
}
template <typename T>
HOSTDEVICE inline T cubic_convolution2(T x, T A) {
return ((A * x - 5 * A) * x + 8 * A) * x - 4 * A;
}
template <typename T>
HOSTDEVICE inline void get_cubic_upsample_coefficients(T coeffs[4], T t) {
T A = -0.75;
T x1 = t;
coeffs[0] = cubic_convolution2<T>(x1 + 1.0, A);
coeffs[1] = cubic_convolution1<T>(x1, A);
// opposite coefficients
T x2 = 1.0 - t;
coeffs[2] = cubic_convolution1<T>(x2, A);
coeffs[3] = cubic_convolution2<T>(x2 + 1.0, A);
}
template <typename T>
static inline T cubic_interp(T x0, T x1, T x2, T x3, T t) {
T coeffs[4];
get_cubic_upsample_coefficients<T>(coeffs, t);
return x0 * coeffs[0] + x1 * coeffs[1] + x2 * coeffs[2] + x3 * coeffs[3];
}
template <typename T>
static void BicubicInterpolation(const Tensor& input, Tensor* output,
const float ratio_h, const float ratio_w,
const int in_h, const int in_w, const int n,
const int c, const int out_h, const int out_w,
const bool align_corners,
const DataLayout data_layout) {
auto input_t = EigenTensor<T, 4>::From(input);
auto output_t = EigenTensor<T, 4>::From(*output);
for (int k = 0; k < out_h; k++) { // loop for images
T y_n = align_corners ? static_cast<T>(ratio_h * k)
: static_cast<T>(ratio_h * (k + 0.5) - 0.5);
int input_y = floorf(y_n);
const T y_t = y_n - input_y;
for (int l = 0; l < out_w; l++) {
T x_n = align_corners ? static_cast<T>(ratio_w * l)
: static_cast<T>(ratio_w * (l + 0.5) - 0.5);
int input_x = floorf(x_n);
const T x_t = x_n - input_x;
for (int i = 0; i < n; i++) { // loop for batches
for (int j = 0; j < c; j++) { // loop for channels
T coefficients[4];
// interp 4 times in x direction
for (int ii = 0; ii < 4; ii++) {
int access_y = std::max(std::min(input_y - 1 + ii, in_h - 1),
static_cast<int>(0));
int access_x_0 =
std::max(std::min(input_x - 1, in_w - 1), static_cast<int>(0));
int access_x_1 =
std::max(std::min(input_x + 0, in_w - 1), static_cast<int>(0));
int access_x_2 =
std::max(std::min(input_x + 1, in_w - 1), static_cast<int>(0));
int access_x_3 =
std::max(std::min(input_x + 2, in_w - 1), static_cast<int>(0));
if (data_layout == DataLayout::kNCHW) {
coefficients[ii] =
cubic_interp<T>(input_t(i, j, access_y, access_x_0),
input_t(i, j, access_y, access_x_1),
input_t(i, j, access_y, access_x_2),
input_t(i, j, access_y, access_x_3), x_t);
} else {
coefficients[ii] =
cubic_interp<T>(input_t(i, access_y, access_x_0, j),
input_t(i, access_y, access_x_1, j),
input_t(i, access_y, access_x_2, j),
input_t(i, access_y, access_x_3, j), x_t);
}
}
// interp y direction
if (data_layout == DataLayout::kNCHW) {
output_t(i, j, k, l) =
cubic_interp<T>(coefficients[0], coefficients[1],
coefficients[2], coefficients[3], y_t);
} else {
output_t(i, k, l, j) =
cubic_interp<T>(coefficients[0], coefficients[1],
coefficients[2], coefficients[3], y_t);
}
}
}
}
}
}
template <typename T>
static void NearestNeighborInterpolateGrad(
const Tensor& output_grad, Tensor* input_grad, const float ratio_h,
const float ratio_w, const int n, const int c, const int out_h,
const int out_w, const bool align_corners, const DataLayout data_layout) {
auto input_grad_t = EigenTensor<T, 4>::From(*input_grad);
auto output_grad_t = EigenTensor<T, 4>::From(output_grad);
for (int k = 0; k < out_h; k++) { // loop for images
int in_k = (align_corners) ? static_cast<int>(ratio_h * k + 0.5)
: static_cast<int>(ratio_h * k);
for (int l = 0; l < out_w; l++) {
int in_l = (align_corners) ? static_cast<int>(ratio_w * l + 0.5)
: static_cast<int>(ratio_w * l);
for (int i = 0; i < n; i++) { // loop for batches
for (int j = 0; j < c; j++) { // loop for channels
if (data_layout == DataLayout::kNCHW) {
input_grad_t(i, j, in_k, in_l) += output_grad_t(i, j, k, l);
} else {
input_grad_t(i, in_k, in_l, j) += output_grad_t(i, k, l, j);
}
}
}
}
}
}
template <typename T>
static void BilinearInterpolationGrad(
const Tensor& output_grad, Tensor* input_grad, const float ratio_h,
const float ratio_w, const int in_h, const int in_w, const int n,
const int c, const int out_h, const int out_w, const bool align_corners,
const int align_mode, const DataLayout data_layout) {
auto input_grad_t = EigenTensor<T, 4>::From(*input_grad);
auto output_grad_t = EigenTensor<T, 4>::From(output_grad);
bool align_flag = (align_mode == 0 && !align_corners);
for (int k = 0; k < out_h; k++) { // loop for images
int y_n = align_flag ? static_cast<int>(ratio_h * (k + 0.5) - 0.5)
: static_cast<int>(ratio_h * k);
y_n = (y_n > 0) ? y_n : 0;
int y_s = (y_n + 1) < (in_h - 1) ? (y_n + 1) : (in_h - 1);
float idx_src_y = ratio_h * (k + 0.5) - 0.5;
idx_src_y = (idx_src_y > 0) ? idx_src_y : 0;
float d_n = align_flag ? idx_src_y - y_n : ratio_h * k - y_n;
float d_s = 1.f - d_n;
for (int l = 0; l < out_w; l++) {
int x_w = align_flag ? static_cast<int>(ratio_w * (l + 0.5) - 0.5)
: static_cast<int>(ratio_w * l);
x_w = (x_w > 0) ? x_w : 0;
int x_e = (x_w + 1) < (in_w - 1) ? (x_w + 1) : (in_w - 1);
float idx_src_x = ratio_w * (l + 0.5) - 0.5;
idx_src_x = (idx_src_x > 0) ? idx_src_x : 0;
float d_w = align_flag ? idx_src_x - x_w : ratio_w * l - x_w;
float d_e = 1.f - d_w;
for (int i = 0; i < n; i++) { // loop for batches
for (int j = 0; j < c; j++) { // loop for channels
// bilinear interpolation grad
if (data_layout == DataLayout::kNCHW) {
const T grad = output_grad_t(i, j, k, l);
input_grad_t(i, j, y_n, x_w) += static_cast<T>(grad * d_s * d_e);
input_grad_t(i, j, y_s, x_w) += static_cast<T>(grad * d_n * d_e);
input_grad_t(i, j, y_n, x_e) += static_cast<T>(grad * d_s * d_w);
input_grad_t(i, j, y_s, x_e) += static_cast<T>(grad * d_n * d_w);
} else {
const T grad = output_grad_t(i, k, l, j);
input_grad_t(i, y_n, x_w, j) += static_cast<T>(grad * d_s * d_e);
input_grad_t(i, y_s, x_w, j) += static_cast<T>(grad * d_n * d_e);
input_grad_t(i, y_n, x_e, j) += static_cast<T>(grad * d_s * d_w);
input_grad_t(i, y_s, x_e, j) += static_cast<T>(grad * d_n * d_w);
}
}
}
}
}
}
template <typename T>
static void TrilinearInterpolationGrad(
const Tensor& output_grad, Tensor* input_grad, const float ratio_d,
const float ratio_h, const float ratio_w, const int in_d, const int in_h,
const int in_w, const int n, const int c, const int out_d, const int out_h,
const int out_w, const bool align_corners, const int align_mode,
const DataLayout data_layout) {
auto input_grad_t = EigenTensor<T, 5>::From(*input_grad);
auto output_grad_t = EigenTensor<T, 5>::From(output_grad);
bool align_flag = (align_mode == 0 && !align_corners);
for (int j = 0; j < out_d; j++) { // loop for D
int t_f = align_flag ? static_cast<int>(ratio_d * (j + 0.5) - 0.5)
: static_cast<int>(ratio_d * j);
t_f = (t_f > 0) ? t_f : 0;
int t_b = (t_f + 1) < (in_d - 1) ? (t_f + 1) : (in_d - 1);
float idx_src_t = ratio_d * (j + 0.5) - 0.5;
idx_src_t = (idx_src_t > 0) ? idx_src_t : 0;
float d_f = align_flag ? idx_src_t - t_f : ratio_d * j - t_f;
float d_b = 1.f - d_f;
for (int k = 0; k < out_h; k++) { // loop for H
int y_n = align_flag ? static_cast<int>(ratio_h * (k + 0.5) - 0.5)
: static_cast<int>(ratio_h * k);
y_n = (y_n > 0) ? y_n : 0;
int y_s = (y_n + 1) < (in_h - 1) ? (y_n + 1) : (in_h - 1);
float idx_src_y = ratio_h * (k + 0.5) - 0.5;
idx_src_y = (idx_src_y > 0) ? idx_src_y : 0;
float d_n = align_flag ? idx_src_y - y_n : ratio_h * k - y_n;
float d_s = 1.f - d_n;
for (int l = 0; l < out_w; l++) { // loop for W
int x_w = align_flag ? static_cast<int>(ratio_w * (l + 0.5) - 0.5)
: static_cast<int>(ratio_w * l);
x_w = (x_w > 0) ? x_w : 0;
int x_e = (x_w + 1) < (in_w - 1) ? (x_w + 1) : (in_w - 1);
float idx_src_x = ratio_w * (l + 0.5) - 0.5;
idx_src_x = (idx_src_x > 0) ? idx_src_x : 0;
float d_w = align_flag ? idx_src_x - x_w : ratio_w * l - x_w;
float d_e = 1.f - d_w;
for (int b = 0; b < n; b++) { // loop for batches
for (int i = 0; i < c; i++) { // loop for channels
// trilinear interpolation grad
if (data_layout == DataLayout::kNCHW) {
const T grad = output_grad_t(b, i, j, k, l);
input_grad_t(b, i, t_f, y_n, x_w) +=
static_cast<T>(grad * d_b * d_s * d_e);
input_grad_t(b, i, t_f, y_n, x_e) +=
static_cast<T>(grad * d_b * d_s * d_w);
input_grad_t(b, i, t_f, y_s, x_w) +=
static_cast<T>(grad * d_b * d_n * d_e);
input_grad_t(b, i, t_f, y_s, x_e) +=
static_cast<T>(grad * d_b * d_n * d_w);
input_grad_t(b, i, t_b, y_n, x_w) +=
static_cast<T>(grad * d_f * d_s * d_e);
input_grad_t(b, i, t_b, y_n, x_e) +=
static_cast<T>(grad * d_f * d_s * d_w);
input_grad_t(b, i, t_b, y_s, x_w) +=
static_cast<T>(grad * d_f * d_n * d_e);
input_grad_t(b, i, t_b, y_s, x_e) +=
static_cast<T>(grad * d_f * d_n * d_w);
} else {
const T grad = output_grad_t(b, j, k, l, i);
input_grad_t(b, t_f, y_n, x_w, i) +=
static_cast<T>(grad * d_b * d_s * d_e);
input_grad_t(b, t_f, y_n, x_e, i) +=
static_cast<T>(grad * d_b * d_s * d_w);
input_grad_t(b, t_f, y_s, x_w, i) +=
static_cast<T>(grad * d_b * d_n * d_e);
input_grad_t(b, t_f, y_s, x_e, i) +=
static_cast<T>(grad * d_b * d_n * d_w);
input_grad_t(b, t_b, y_n, x_w, i) +=
static_cast<T>(grad * d_f * d_s * d_e);
input_grad_t(b, t_b, y_n, x_e, i) +=
static_cast<T>(grad * d_f * d_s * d_w);
input_grad_t(b, t_b, y_s, x_w, i) +=
static_cast<T>(grad * d_f * d_n * d_e);
input_grad_t(b, t_b, y_s, x_e, i) +=
static_cast<T>(grad * d_f * d_n * d_w);
}
}
}
}
}
}
}
template <typename T>
static void BicubicInterpolationGrad(const Tensor& output_grad,
Tensor* input_grad, const float ratio_h,
const float ratio_w, const int in_h,
const int in_w, const int n, const int c,
const int out_h, const int out_w,
const bool align_corners,
const DataLayout data_layout) {
auto input_grad_t = EigenTensor<T, 4>::From(*input_grad);
auto output_grad_t = EigenTensor<T, 4>::From(output_grad);
for (int k = 0; k < out_h; k++) { // loop for images
T y_n = align_corners ? static_cast<T>(ratio_h * k)
: static_cast<T>(ratio_h * (k + 0.5) - 0.5);
int input_y = floorf(y_n);
T y_t = y_n - input_y;
for (int l = 0; l < out_w; l++) {
T x_n = align_corners ? static_cast<T>(ratio_w * l)
: static_cast<T>(ratio_w * (l + 0.5) - 0.5);
int input_x = floorf(x_n);
T x_t = x_n - input_x;
T x_coeffs[4];
T y_coeffs[4];
get_cubic_upsample_coefficients<T>(x_coeffs, x_t);
get_cubic_upsample_coefficients<T>(y_coeffs, y_t);
for (int i = 0; i < n; i++) { // loop for batches
for (int j = 0; j < c; j++) { // loop for channels
// bicubic interpolation grad
for (int ii = 0; ii < 4; ii++) {
for (int jj = 0; jj < 4; jj++) {
int access_x = std::max(std::min(input_x - 1 + ii, in_w - 1),
static_cast<int>(0));
int access_y = std::max(std::min(input_y - 1 + jj, in_h - 1),
static_cast<int>(0));
if (data_layout == DataLayout::kNCHW) {
T grad = output_grad_t(i, j, k, l);
input_grad_t(i, j, access_y, access_x) +=
grad * y_coeffs[jj] * x_coeffs[ii];
} else {
T grad = output_grad_t(i, k, l, j);
input_grad_t(i, access_y, access_x, j) +=
grad * y_coeffs[jj] * x_coeffs[ii];
}
}
}
}
}
}
}
}
template <typename T>
static void Interpolate1DCPUFwd(const framework::ExecutionContext& ctx,
const Tensor& input, Tensor* output) {
const std::string data_layout_str = ctx.Attr<std::string>("data_layout");
const DataLayout data_layout = framework::StringToDataLayout(data_layout_str);
int n, c, in_d, in_h, in_w;
ExtractNCDWH(input.dims(), data_layout, &n, &c, &in_d, &in_h, &in_w);
auto interp_method = ctx.Attr<std::string>("interp_method");
bool align_corners = ctx.Attr<bool>("align_corners");
int align_mode = ctx.Attr<int>("align_mode");
int out_w = ctx.Attr<int>("out_w");
auto list_new_size_tensor = ctx.MultiInput<framework::Tensor>("SizeTensor");
if (list_new_size_tensor.size() > 0) {
// have size tensor
auto new_size = get_new_shape(list_new_size_tensor);
out_w = new_size[0];
} else {
float scale;
auto scale_tensor = ctx.Input<Tensor>("Scale");
if (scale_tensor != nullptr) {
auto scale_data = get_new_data_from_tensor<float>(scale_tensor);
scale = scale_data[0];
} else {
scale = ctx.Attr<float>("scale");
}
if (scale > 0) {
out_w = static_cast<int>(in_w * scale);
}
auto out_size = ctx.Input<Tensor>("OutSize");
if (out_size != nullptr) {
auto out_size_data = get_new_data_from_tensor<int>(out_size);
out_w = out_size_data[0];
}
}
PADDLE_ENFORCE_GT(out_w, 0, platform::errors::InvalidArgument(
"out_w in Attr(out_shape) of Op(interpolate) "
"should be greater than 0."));
framework::DDim dim_out;
if (data_layout == DataLayout::kNCHW) {
dim_out = {n, c, out_w};
} else {
dim_out = {n, out_w, c};
}
output->mutable_data<T>(dim_out, ctx.GetPlace());
if (in_w == out_w) {
framework::TensorCopy(input, ctx.GetPlace(), output);
return;
}
float ratio_w = 0.f;
if (out_w > 1) {
ratio_w = (align_corners) ? static_cast<float>(in_w - 1) / (out_w - 1)
: static_cast<float>(in_w) / out_w;
}
if ("linear" == interp_method) {
LinearInterpolation<T>(input, output, ratio_w, in_w, n, c, out_w,
align_corners, align_mode, data_layout);
}
}
template <typename T>
static void Interpolate2DCPUFwd(const framework::ExecutionContext& ctx,
const Tensor& input, Tensor* output) {
const std::string data_layout_str = ctx.Attr<std::string>("data_layout");
const DataLayout data_layout = framework::StringToDataLayout(data_layout_str);
int n, c, in_d, in_h, in_w;
ExtractNCDWH(input.dims(), data_layout, &n, &c, &in_d, &in_h, &in_w);
auto interp_method = ctx.Attr<std::string>("interp_method");
bool align_corners = ctx.Attr<bool>("align_corners");
int align_mode = ctx.Attr<int>("align_mode");
int out_h = ctx.Attr<int>("out_h");
int out_w = ctx.Attr<int>("out_w");
auto list_new_size_tensor = ctx.MultiInput<framework::Tensor>("SizeTensor");
if (list_new_size_tensor.size() > 0) {
// have size tensor
auto new_size = get_new_shape(list_new_size_tensor);
out_h = new_size[0];
out_w = new_size[1];
} else {
float scale;
auto scale_tensor = ctx.Input<Tensor>("Scale");
if (scale_tensor != nullptr) {
auto scale_data = get_new_data_from_tensor<float>(scale_tensor);
scale = scale_data[0];
} else {
scale = ctx.Attr<float>("scale");
}
if (scale > 0) {
out_h = static_cast<int>(in_h * scale);
out_w = static_cast<int>(in_w * scale);
}
auto out_size = ctx.Input<Tensor>("OutSize");
if (out_size != nullptr) {
auto out_size_data = get_new_data_from_tensor<int>(out_size);
out_h = out_size_data[0];
out_w = out_size_data[1];
}
}
PADDLE_ENFORCE_GT(out_h, 0, platform::errors::InvalidArgument(
"out_h in Attr(out_shape) of Op(interpolate) "
"should be greater than 0."));
PADDLE_ENFORCE_GT(out_w, 0, platform::errors::InvalidArgument(
"out_w in Attr(out_shape) of Op(interpolate) "
"should be greater than 0."));
framework::DDim dim_out;
if (data_layout == DataLayout::kNCHW) {
dim_out = {n, c, out_h, out_w};
} else {
dim_out = {n, out_h, out_w, c};
}
output->mutable_data<T>(dim_out, ctx.GetPlace());
if (in_h == out_h && in_w == out_w) {
framework::TensorCopy(input, ctx.GetPlace(), output);
return;
}
float ratio_h = 0.f;
float ratio_w = 0.f;
if (out_h > 1) {
ratio_h = (align_corners) ? static_cast<float>(in_h - 1) / (out_h - 1)
: static_cast<float>(in_h) / out_h;
}
if (out_w > 1) {
ratio_w = (align_corners) ? static_cast<float>(in_w - 1) / (out_w - 1)
: static_cast<float>(in_w) / out_w;
}
if ("bilinear" == interp_method) {
BilinearInterpolation<T>(input, output, ratio_h, ratio_w, in_h, in_w, n, c,
out_h, out_w, align_corners, align_mode,
data_layout);
} else if ("nearest" == interp_method) {
NearestNeighborInterpolate<T>(input, output, ratio_h, ratio_w, n, c, out_h,
out_w, align_corners, data_layout);
} else if ("bicubic" == interp_method) {
BicubicInterpolation<T>(input, output, ratio_h, ratio_w, in_h, in_w, n, c,
out_h, out_w, align_corners, data_layout);
}
}
template <typename T>
static void Interpolate3DCPUFwd(const framework::ExecutionContext& ctx,
const Tensor& input, Tensor* output) {
const std::string data_layout_str = ctx.Attr<std::string>("data_layout");
const DataLayout data_layout = framework::StringToDataLayout(data_layout_str);
int n, c, in_d, in_h, in_w;
ExtractNCDWH(input.dims(), data_layout, &n, &c, &in_d, &in_h, &in_w);
auto interp_method = ctx.Attr<std::string>("interp_method");
bool align_corners = ctx.Attr<bool>("align_corners");
int align_mode = ctx.Attr<int>("align_mode");
int out_d = ctx.Attr<int>("out_d");
int out_h = ctx.Attr<int>("out_h");
int out_w = ctx.Attr<int>("out_w");
auto list_new_size_tensor = ctx.MultiInput<framework::Tensor>("SizeTensor");
if (list_new_size_tensor.size() > 0) {
// have size tensor
auto new_size = get_new_shape(list_new_size_tensor);
out_d = new_size[0];
out_h = new_size[1];
out_w = new_size[2];
} else {
float scale;
auto scale_tensor = ctx.Input<Tensor>("Scale");
if (scale_tensor != nullptr) {
auto scale_data = get_new_data_from_tensor<float>(scale_tensor);
scale = scale_data[0];
} else {
scale = ctx.Attr<float>("scale");
}
if (scale > 0) {
out_d = static_cast<int>(in_d * scale);
out_h = static_cast<int>(in_h * scale);
out_w = static_cast<int>(in_w * scale);
}
auto out_size = ctx.Input<Tensor>("OutSize");
if (out_size != nullptr) {
auto out_size_data = get_new_data_from_tensor<int>(out_size);
out_d = out_size_data[0];
out_h = out_size_data[1];
out_w = out_size_data[2];
}
}
PADDLE_ENFORCE_GT(out_d, 0, platform::errors::InvalidArgument(
"out_d in Attr(out_shape) of Op(interpolate) "
"should be greater than 0."));
PADDLE_ENFORCE_GT(out_h, 0, platform::errors::InvalidArgument(
"out_h in Attr(out_shape) of Op(interpolate) "
"should be greater than 0."));
PADDLE_ENFORCE_GT(out_w, 0, platform::errors::InvalidArgument(
"out_w in Attr(out_shape) of Op(interpolate) "
"should be greater than 0."));
framework::DDim dim_out;
if (data_layout == DataLayout::kNCHW) {
dim_out = {n, c, out_d, out_h, out_w};
} else {
dim_out = {n, out_d, out_h, out_w, c};
}
output->mutable_data<T>(dim_out, ctx.GetPlace());
if (in_d == out_d && in_h == out_h && in_w == out_w) {
framework::TensorCopy(input, ctx.GetPlace(), output);
return;
}
float ratio_d = 0.f;
float ratio_h = 0.f;
float ratio_w = 0.f;
if (out_d > 1) {
ratio_d = (align_corners) ? static_cast<float>(in_d - 1) / (out_d - 1)
: static_cast<float>(in_d) / out_d;
}
if (out_h > 1) {
ratio_h = (align_corners) ? static_cast<float>(in_h - 1) / (out_h - 1)
: static_cast<float>(in_h) / out_h;
}
if (out_w > 1) {
ratio_w = (align_corners) ? static_cast<float>(in_w - 1) / (out_w - 1)
: static_cast<float>(in_w) / out_w;
}
if ("trilinear" == interp_method) {
TrilinearInterpolation<T>(input, output, ratio_d, ratio_h, ratio_w, in_d,
in_h, in_w, n, c, out_d, out_h, out_w,
align_corners, align_mode, data_layout);
}
}
template <typename T>
static void Interpolate1DCPUBwd(const framework::ExecutionContext& ctx,
Tensor* input_grad, const Tensor& output_grad) {
auto* input = ctx.Input<Tensor>("X");
const std::string data_layout_str = ctx.Attr<std::string>("data_layout");
const DataLayout data_layout = framework::StringToDataLayout(data_layout_str);
int n, c, in_d, in_h, in_w;
ExtractNCDWH(input->dims(), data_layout, &n, &c, &in_d, &in_h, &in_w);
auto interp_method = ctx.Attr<std::string>("interp_method");
bool align_corners = ctx.Attr<bool>("align_corners");
int align_mode = ctx.Attr<int>("align_mode");
int out_w = ctx.Attr<int>("out_w");
float scale;
auto scale_tensor = ctx.Input<Tensor>("Scale");
if (scale_tensor != nullptr) {
auto scale_data = get_new_data_from_tensor<float>(scale_tensor);
scale = scale_data[0];
} else {
scale = ctx.Attr<float>("scale");
}
if (scale > 0) {
out_w = static_cast<int>(in_w * scale);
}
auto out_size = ctx.Input<Tensor>("OutSize");
if (out_size != nullptr) {
auto out_size_data = get_new_data_from_tensor<int>(out_size);
out_w = out_size_data[0];
}
auto list_new_size_tensor = ctx.MultiInput<framework::Tensor>("SizeTensor");
if (list_new_size_tensor.size() > 0) {
// have size tensor
auto new_size = get_new_shape(list_new_size_tensor);
out_w = new_size[0];
}
framework::DDim dim_grad;
if (data_layout == DataLayout::kNCHW) {
dim_grad = {n, c, in_w};
} else {
dim_grad = {n, in_w, c};
}
input_grad->mutable_data<T>(dim_grad, ctx.GetPlace());
auto& device_ctx = ctx.template device_context<platform::CPUDeviceContext>();
math::SetConstant<platform::CPUDeviceContext, T> zero;
zero(device_ctx, input_grad, static_cast<T>(0.0));
if (in_w == out_w) {
framework::TensorCopy(output_grad, ctx.GetPlace(), input_grad);
return;
}
float ratio_w = 0.f;
if (out_w > 1) {
ratio_w = (align_corners) ? static_cast<float>(in_w - 1) / (out_w - 1)
: static_cast<float>(in_w) / out_w;
}
if ("linear" == interp_method) {
LinearInterpolationGrad<T>(output_grad, input_grad, ratio_w, in_w, n, c,
out_w, align_corners, align_mode, data_layout);
}
}
template <typename T>
static void Interpolate2DCPUBwd(const framework::ExecutionContext& ctx,
Tensor* input_grad, const Tensor& output_grad) {
auto* input = ctx.Input<Tensor>("X");
const std::string data_layout_str = ctx.Attr<std::string>("data_layout");
const DataLayout data_layout = framework::StringToDataLayout(data_layout_str);
int n, c, in_d, in_h, in_w;
ExtractNCDWH(input->dims(), data_layout, &n, &c, &in_d, &in_h, &in_w);
auto interp_method = ctx.Attr<std::string>("interp_method");
bool align_corners = ctx.Attr<bool>("align_corners");
int align_mode = ctx.Attr<int>("align_mode");
int out_h = ctx.Attr<int>("out_h");
int out_w = ctx.Attr<int>("out_w");
float scale;
auto scale_tensor = ctx.Input<Tensor>("Scale");
if (scale_tensor != nullptr) {
auto scale_data = get_new_data_from_tensor<float>(scale_tensor);
scale = scale_data[0];
} else {
scale = ctx.Attr<float>("scale");
}
if (scale > 0) {
out_h = static_cast<int>(in_h * scale);
out_w = static_cast<int>(in_w * scale);
}
auto out_size = ctx.Input<Tensor>("OutSize");
if (out_size != nullptr) {
auto out_size_data = get_new_data_from_tensor<int>(out_size);
out_h = out_size_data[0];
out_w = out_size_data[1];
}
auto list_new_size_tensor = ctx.MultiInput<framework::Tensor>("SizeTensor");
if (list_new_size_tensor.size() > 0) {
// have size tensor
auto new_size = get_new_shape(list_new_size_tensor);
out_h = new_size[0];
out_w = new_size[1];
}
framework::DDim dim_grad;
if (data_layout == DataLayout::kNCHW) {
dim_grad = {n, c, in_h, in_w};
} else {
dim_grad = {n, in_h, in_w, c};
}
input_grad->mutable_data<T>(dim_grad, ctx.GetPlace());
auto& device_ctx = ctx.template device_context<platform::CPUDeviceContext>();
math::SetConstant<platform::CPUDeviceContext, T> zero;
zero(device_ctx, input_grad, static_cast<T>(0.0));
if (in_h == out_h && in_w == out_w) {
framework::TensorCopy(output_grad, ctx.GetPlace(), input_grad);
return;
}
float ratio_h = 0.f;
float ratio_w = 0.f;
if (out_h > 1) {
ratio_h = (align_corners) ? static_cast<float>(in_h - 1) / (out_h - 1)
: static_cast<float>(in_h) / out_h;
}
if (out_w > 1) {
ratio_w = (align_corners) ? static_cast<float>(in_w - 1) / (out_w - 1)
: static_cast<float>(in_w) / out_w;
}
if ("bilinear" == interp_method) {
BilinearInterpolationGrad<T>(output_grad, input_grad, ratio_h, ratio_w,
in_h, in_w, n, c, out_h, out_w, align_corners,
align_mode, data_layout);
} else if ("nearest" == interp_method) {
NearestNeighborInterpolateGrad<T>(output_grad, input_grad, ratio_h, ratio_w,
n, c, out_h, out_w, align_corners,
data_layout);
} else if ("bicubic" == interp_method) {
BicubicInterpolationGrad<T>(output_grad, input_grad, ratio_h, ratio_w, in_h,
in_w, n, c, out_h, out_w, align_corners,
data_layout);
}
}
template <typename T>
static void Interpolate3DCPUBwd(const framework::ExecutionContext& ctx,
Tensor* input_grad, const Tensor output_grad) {
auto* input = ctx.Input<Tensor>("X");
const std::string data_layout_str = ctx.Attr<std::string>("data_layout");
const DataLayout data_layout = framework::StringToDataLayout(data_layout_str);
int n, c, in_d, in_h, in_w;
ExtractNCDWH(input->dims(), data_layout, &n, &c, &in_d, &in_h, &in_w);
auto interp_method = ctx.Attr<std::string>("interp_method");
bool align_corners = ctx.Attr<bool>("align_corners");
int align_mode = ctx.Attr<int>("align_mode");
int out_d = ctx.Attr<int>("out_d");
int out_h = ctx.Attr<int>("out_h");
int out_w = ctx.Attr<int>("out_w");
float scale;
auto scale_tensor = ctx.Input<Tensor>("Scale");
if (scale_tensor != nullptr) {
auto scale_data = get_new_data_from_tensor<float>(scale_tensor);
scale = scale_data[0];
} else {
scale = ctx.Attr<float>("scale");
}
if (scale > 0) {
out_d = static_cast<int>(in_d * scale);
out_h = static_cast<int>(in_h * scale);
out_w = static_cast<int>(in_w * scale);
}
auto out_size = ctx.Input<Tensor>("OutSize");
if (out_size != nullptr) {
auto out_size_data = get_new_data_from_tensor<int>(out_size);
out_d = out_size_data[0];
out_h = out_size_data[1];
out_w = out_size_data[2];
}
auto list_new_size_tensor = ctx.MultiInput<framework::Tensor>("SizeTensor");
if (list_new_size_tensor.size() > 0) {
// have size tensor
auto new_size = get_new_shape(list_new_size_tensor);
out_d = new_size[0];
out_h = new_size[1];
out_w = new_size[2];
}
framework::DDim dim_grad;
if (data_layout == DataLayout::kNCHW) {
dim_grad = {n, c, in_d, in_h, in_w};
} else {
dim_grad = {n, in_d, in_h, in_w, c};
}
input_grad->mutable_data<T>(dim_grad, ctx.GetPlace());
auto& device_ctx = ctx.template device_context<platform::CPUDeviceContext>();
math::SetConstant<platform::CPUDeviceContext, T> zero;
zero(device_ctx, input_grad, static_cast<T>(0.0));
if (in_d == out_d && in_h == out_h && in_w == out_w) {
framework::TensorCopy(output_grad, ctx.GetPlace(), input_grad);
return;
}
float ratio_d = 0.f;
float ratio_h = 0.f;
float ratio_w = 0.f;
if (out_d > 1) {
ratio_d = (align_corners) ? static_cast<float>(in_d - 1) / (out_d - 1)
: static_cast<float>(in_d) / out_d;
}
if (out_h > 1) {
ratio_h = (align_corners) ? static_cast<float>(in_h - 1) / (out_h - 1)
: static_cast<float>(in_h) / out_h;
}
if (out_w > 1) {
ratio_w = (align_corners) ? static_cast<float>(in_w - 1) / (out_w - 1)
: static_cast<float>(in_w) / out_w;
}
if ("trilinear" == interp_method) {
TrilinearInterpolationGrad<T>(
output_grad, input_grad, ratio_d, ratio_h, ratio_w, in_d, in_h, in_w, n,
c, out_d, out_h, out_w, align_corners, align_mode, data_layout);
}
}
template <typename T>
class InterpolateKernel : public framework::OpKernel<T> {
public:
void Compute(const framework::ExecutionContext& ctx) const override {
auto* input = ctx.Input<Tensor>("X");
auto* output = ctx.Output<Tensor>("Out");
auto input_dims = input->dims();
if (input_dims.size() == 3) { // 1D interpolation
Interpolate1DCPUFwd<T>(ctx, *input, output);
} else if (input_dims.size() == 4) { // 2D interpolation
Interpolate2DCPUFwd<T>(ctx, *input, output);
} else if (input_dims.size() == 5) { // 3D interpolation
Interpolate3DCPUFwd<T>(ctx, *input, output);
}
}
};
template <typename T>
class InterpolateGradKernel : public framework::OpKernel<T> {
public:
void Compute(const framework::ExecutionContext& ctx) const override {
auto* input_grad = ctx.Output<Tensor>(framework::GradVarName("X"));
auto* output_grad = ctx.Input<Tensor>(framework::GradVarName("Out"));
auto output_grad_dims = output_grad->dims();
if (output_grad_dims.size() == 3) { // 1D interpolation grad
Interpolate1DCPUBwd<T>(ctx, input_grad, *output_grad);
} else if (output_grad_dims.size() == 4) { // 2D interpolation grad
Interpolate2DCPUBwd<T>(ctx, input_grad, *output_grad);
} else if (output_grad_dims.size() == 5) { // 3D interpolation grad
Interpolate3DCPUBwd<T>(ctx, input_grad, *output_grad);
}
}
};
} // namespace operators
} // namespace paddle
|
eavlInfoTopologyGatherMapOp.h | // Copyright 2010-2014 UT-Battelle, LLC. See LICENSE.txt for more information.
#ifndef EAVL_INFO_TOPOLOGY_GATHER_MAP_OP_H
#define EAVL_INFO_TOPOLOGY_GATHER_MAP_OP_H
#include "eavlCUDA.h"
#include "eavlCellSet.h"
#include "eavlCellSetExplicit.h"
#include "eavlCellSetAllStructured.h"
#include "eavlDataSet.h"
#include "eavlArray.h"
#include "eavlOpDispatch.h"
#include "eavlOperation.h"
#include "eavlTopology.h"
#include "eavlException.h"
#include <time.h>
#ifdef HAVE_OPENMP
#include <omp.h>
#endif
#ifndef DOXYGEN
template <class CONN>
struct eavlInfoTopologyGatherMapOp_CPU
{
static inline eavlArray::Location location() { return eavlArray::HOST; }
template <class F, class IN, class OUT, class INDEX>
static void call(int nitems, CONN &conn,
const IN inputs, OUT outputs,
INDEX indices, F &functor)
{
int *sparseindices = get<0>(indices).array;
#pragma omp parallel for
for (int denseindex = 0; denseindex < nitems; ++denseindex)
{
int sparseindex = sparseindices[get<0>(indices).indexer.index(denseindex)];
int shapeType = conn.GetShapeType(sparseindex);
collect(denseindex, outputs) = functor(shapeType, collect(sparseindex, inputs));
}
}
};
#if defined __CUDACC__
template <class CONN, class F, class IN, class OUT, class INDEX>
__global__ void
eavlInfoTopologyGatherMapOp_kernel(int nitems, CONN conn,
const IN inputs, OUT outputs,
INDEX indices, F functor)
{
int *sparseindices = get<0>(indices).array;
const int numThreads = blockDim.x * gridDim.x;
const int threadID = blockIdx.x * blockDim.x + threadIdx.x;
for (int denseindex = threadID; denseindex < nitems; denseindex += numThreads)
{
int sparseindex = sparseindices[get<0>(indices).indexer.index(denseindex)];
int shapeType = conn.GetShapeType(sparseindex);
collect(denseindex, outputs) = functor(shapeType, collect(sparseindex, inputs));
}
}
template <class CONN>
struct eavlInfoTopologyGatherMapOp_GPU
{
static inline eavlArray::Location location() { return eavlArray::DEVICE; }
template <class F, class IN, class OUT, class INDEX>
static void call(int nitems, CONN &conn,
const IN inputs, OUT outputs,
INDEX indices, F &functor)
{
int numThreads = 256;
dim3 threads(numThreads, 1, 1);
dim3 blocks (32, 1, 1);
eavlInfoTopologyGatherMapOp_kernel<<< blocks, threads >>>(nitems, conn,
inputs, outputs,
indices, functor);
CUDA_CHECK_ERROR();
}
};
#endif
#endif
// ****************************************************************************
// Class: eavlInfoTopologyGatherMapOp
//
// Purpose:
/// Map from one element in a mesh to the same element, with
/// topological information passed along to the functor.
/// In this gather version of the operation, the inputs (on the destination)
/// topology are sparsely indexed and the outputs are compacted, i.e.
/// the outputs are densely indexed 0 to n-1.
//
// Programmer: Jeremy Meredith
// Creation: August 1, 2013
//
// Modifications:
// ****************************************************************************
template <class I, class O, class INDEX, class F>
class eavlInfoTopologyGatherMapOp : public eavlOperation
{
protected:
eavlCellSet *cells;
eavlTopology topology;
I inputs;
O outputs;
INDEX indices;
F functor;
public:
eavlInfoTopologyGatherMapOp(eavlCellSet *c, eavlTopology t,
I i, O o, INDEX ind, F f)
: cells(c), topology(t), inputs(i), outputs(o), indices(ind), functor(f)
{
}
virtual void GoCPU()
{
eavlCellSetExplicit *elExp = dynamic_cast<eavlCellSetExplicit*>(cells);
eavlCellSetAllStructured *elStr = dynamic_cast<eavlCellSetAllStructured*>(cells);
int n = outputs.first.length();
if (elExp)
{
eavlExplicitConnectivity &conn = elExp->GetConnectivity(topology);
eavlOpDispatch<eavlInfoTopologyGatherMapOp_CPU<eavlExplicitConnectivity> >(n, conn, inputs, outputs, indices, functor);
}
else if (elStr)
{
eavlRegularConnectivity conn = eavlRegularConnectivity(elStr->GetRegularStructure(),topology);
eavlOpDispatch<eavlInfoTopologyGatherMapOp_CPU<eavlRegularConnectivity> >(n, conn, inputs, outputs, indices, functor);
}
}
virtual void GoGPU()
{
#ifdef HAVE_CUDA
eavlCellSetExplicit *elExp = dynamic_cast<eavlCellSetExplicit*>(cells);
eavlCellSetAllStructured *elStr = dynamic_cast<eavlCellSetAllStructured*>(cells);
int n = outputs.first.length();
if (elExp)
{
eavlExplicitConnectivity &conn = elExp->GetConnectivity(topology);
conn.shapetype.NeedOnDevice();
conn.connectivity.NeedOnDevice();
conn.mapCellToIndex.NeedOnDevice();
eavlOpDispatch<eavlInfoTopologyGatherMapOp_GPU<eavlExplicitConnectivity> >(n, conn, inputs, outputs, indices, functor);
conn.shapetype.NeedOnHost();
conn.connectivity.NeedOnHost();
conn.mapCellToIndex.NeedOnHost();
}
else if (elStr)
{
eavlRegularConnectivity conn = eavlRegularConnectivity(elStr->GetRegularStructure(),topology);
eavlOpDispatch<eavlInfoTopologyGatherMapOp_GPU<eavlRegularConnectivity> >(n, conn, inputs, outputs, indices, functor);
}
#else
THROW(eavlException,"Executing GPU code without compiling under CUDA compiler.");
#endif
}
};
// helper function for type deduction
template <class I, class O, class INDEX, class F>
eavlInfoTopologyGatherMapOp<I,O,INDEX,F> *new_eavlInfoTopologyGatherMapOp(eavlCellSet *c, eavlTopology t,
I i, O o, INDEX indices, F f)
{
return new eavlInfoTopologyGatherMapOp<I,O,INDEX,F>(c,t,i,o,indices,f);
}
#endif
|
target.c | #include <stdio.h>
#include "assert.h"
#include <unistd.h>
#pragma omp declare target
void vmul(int*a, int*b, int*c, int N){
#pragma omp parallel for
for(int i=0;i<N;i++) {
c[i]=a[i]*b[i];
}
}
#pragma omp end declare target
|
spmv_N_thread_static.c | /*
*********************************************
* 314 Principles of Programming Languages *
* Fall 2016 *
*********************************************
*
* Read a real (non-complex) sparse matrix from a Matrix Market (v. 2.0) file
* and a vector from a txt file, perform matrix multiplication and store the
* result to output.txt. This is the parallel and static version of sparse matrix vector
* multiplication.
*
*
*
* NOTES:
*
* 1) Matrix Market files are always 1-based, i.e. the index of the first
* element of a matrix is (1,1), not (0,0) as in C. ADJUST THESE
* OFFSETS ACCORDINGLY offsets accordingly when reading and writing
* to files.
*
* 2) ANSI C requires one to use the "l" format modifier when reading
* double precision floating point numbers in scanf() and
* its variants. For example, use "%lf", "%lg", or "%le"
* when reading doubles, otherwise errors will occur.
*/
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include "mmio.h"
#include <omp.h>
#include <sys/time.h>
#include "utils.h"
//sorting according to the index
void quicksort(double* a, double* vindex, int* rindex, int* cindex, int n)
{
int i, j, m;
double p, t, s;
if (n < 2)
return;
p = vindex[n / 2];
for (i = 0, j = n - 1;; i++, j--) {
while (vindex[i]<p)
i++;
while (p<vindex[j])
j--;
if (i >= j)
break;
t = a[i];
a[i] = a[j];
a[j] = t;
s = vindex[i];
vindex[i] = vindex[j];
vindex[j] = s;
m = rindex[i];
rindex[i] = rindex[j];
rindex[j] = m;
m = cindex[i];
cindex[i] = cindex[j];
cindex[j] = m;
}
quicksort(a, vindex, rindex, cindex, i);
quicksort(a + i, vindex + i, rindex + i, cindex + i, n - i);
}
int main(int argc, char *argv[])
{
int ret_code;
MM_typecode matcode;
FILE *f;
int M, N, nz; //M is row number, N is column number and nz is the number of entry
int tmp, i, j, vecdim, *rIndex, *cIndex, *rsIndex, *reIndex;
double *val, *res, *vec, *vIndex;
if (argc < 4)
{
fprintf(stderr, "Usage: %s [martix-market-filename] [input-vector-filename] [thread-num]\n", argv[0]);
exit(1);
}
printf("\nOpening input matrix file: %s\n", argv[1]);
if ((f = fopen(argv[1], "r")) == NULL)
{
printf("Fail to open the input matrix file!\n");
exit(1);
}
if (mm_read_banner(f, &matcode) != 0)
{
printf("Could not process Matrix Market banner.\n");
exit(1);
}
/* This is how one can screen matrix types if their application */
/* only supports a subset of the Matrix Market data types. */
if (mm_is_complex(matcode) && mm_is_matrix(matcode) &&
mm_is_sparse(matcode))
{
printf("Sorry, this application does not support ");
printf("Market Market type: [%s]\n", mm_typecode_to_str(matcode));
exit(1);
}
/* find out size of sparse matrix .... */
if ((ret_code = mm_read_mtx_crd_size(f, &M, &N, &nz)) != 0)
exit(1);
/* reseve memory for matrices */
rIndex = (int *)malloc(nz * sizeof(int));
cIndex = (int *)malloc(nz * sizeof(int));
val = (double *)malloc(nz * sizeof(double));
/* NOTE: when reading in doubles, ANSI C requires the use of the "l" */
/* specifier as in "%lg", "%lf", "%le", otherwise errors will occur */
/* (ANSI C X3.159-1989, Sec. 4.9.6.2, p. 136 lines 13-15) */
for (i = 0; i<nz; i++)
{
fscanf(f, "%d %d %lg\n", &rIndex[i], &cIndex[i], &val[i]);
rIndex[i]--; /* adjust from 1-based to 0-based */
cIndex[i]--;
}
if (f != stdin) fclose(f);
printf("Opening input vector file: %s\n", argv[2]);
//open and load the vector input
if ((f = fopen(argv[2], "r")) == NULL)
{
printf("Fail to open the input vector file!\n");
exit(1);
}
fscanf(f, "%d\n", &vecdim);
if (vecdim != M)
{
printf("dimension mismatch!\n");
exit(1);
}
vec = (double*)malloc(vecdim * sizeof(double));
for (i = 0; i<vecdim; i++)
{
fscanf(f, "%lg\n", &vec[i]);
}
if (f != stdin) fclose(f);
//the original calculation result
double* res_seq = (double*)malloc(M*sizeof(double));
memset(res_seq, 0, M*sizeof(double));
getmul(val, vec, rIndex, cIndex, nz, res_seq);
vIndex = (double*)malloc(nz*sizeof(double));
memset(vIndex, 0, nz*sizeof(double));
for (i = 0; i < nz; i++)
{
vIndex[i] = (double)rIndex[i] * N + cIndex[i];
if (vIndex[i] < 0)
{
printf("Error!\n");
exit(1);
}
}
quicksort(val, vIndex, rIndex, cIndex, nz);
//We use rsIndex/reIndex to keep the start/end position of each row. The intial values are
//-1 or -2 for all entries. rsIndex[i] indicates the start poistion of the i-th row. Hence
//the position index of the i-th row is from rsIndex[i] to reIndex[i]
rsIndex = (int*)malloc(M*sizeof(int)); //start/end position of each row
memset(rsIndex, -1, M*sizeof(int));
reIndex = (int*)malloc(M*sizeof(int));
memset(reIndex, -2, M*sizeof(int));
for (i = 0; i<nz; i++)
{
int tmp = (int)(vIndex[i] / N);
if (rsIndex[tmp] == -1)
{
rsIndex[tmp] = i;
reIndex[tmp] = i;
}
else
reIndex[tmp] = i;
}
int thread_num = atoi(argv[3]);
omp_set_num_threads(thread_num);
printf("\n Start computation ... \n");
struct timeval start, end;
gettimeofday(&start, NULL);
/************************/
/* now calculate the multiplication */
/************************/
res = (double*)malloc(M*sizeof(double));
memset(res, 0, M*sizeof(double));
// Your OpenMP pragma should be inserted for one or both loops below.
// You need to determine which loop is safe to be parallelized.
// You will also need to use correct parallelization parameters. Please use a
// static schedule for this parallelization stratey.
#pragma omp parallel num_threads(thread_num)
{
#pragma omp for private(j, i, tmp) schedule(static)
for (i=0; i<M; i++)
{
for (j = rsIndex[i]; j <= reIndex[i]; j++)
{
tmp = cIndex[j];
res[i] += val[j] * vec[tmp];
}
}
}
gettimeofday(&end, NULL);
printf(" End of computation ... \n\n");
long elapsed_time = ((end.tv_sec * 1000000 + end.tv_usec)
- (start.tv_sec * 1000000 + start.tv_usec));
if (!checkerror(res, res_seq, M))
{
printf("Calculation Error!\n");
exit(1);
}
else {
printf(" Test Result Passed ... \n");
}
printf(" Static Parallelization Total time: %ld micro-seconds\n\n", elapsed_time);
if (!checkerror(res, res_seq, M))
{
printf("Calculation Error!\n");
exit(1);
}
// save the result
if ((f = fopen("output.txt", "w")) == NULL)
{
printf("Fail to open the output file!\n");
exit(1);
}
for (i = 0; i<vecdim; i++)
{
fprintf(f, "%lg\n", res[i]);
}
fclose(f);
free(res_seq);
free(vIndex);
free(res);
free(vec);
free(rIndex);
free(cIndex);
free(val);
free(rsIndex);
free(reIndex);
return 0;
}
|
GB_unaryop__abs_fp64_bool.c | //------------------------------------------------------------------------------
// GB_unaryop: hard-coded functions for each built-in unary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2020, All Rights Reserved.
// http://suitesparse.com See GraphBLAS/Doc/License.txt for license.
//------------------------------------------------------------------------------
// If this file is in the Generated/ folder, do not edit it (auto-generated).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_control.h"
#include "GB_iterator.h"
#include "GB_unaryop__include.h"
// C=unop(A) is defined by the following types and operators:
// op(A) function: GB_unop__abs_fp64_bool
// op(A') function: GB_tran__abs_fp64_bool
// C type: double
// A type: bool
// cast: double cij = (double) aij
// unaryop: cij = fabs (aij)
#define GB_ATYPE \
bool
#define GB_CTYPE \
double
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
bool aij = Ax [pA]
#define GB_CX(p) Cx [p]
// unary operator
#define GB_OP(z, x) \
z = fabs (x) ;
// casting
#define GB_CASTING(z, aij) \
double z = (double) aij ;
// cij = op (cast (aij))
#define GB_CAST_OP(pC,pA) \
{ \
/* aij = Ax [pA] */ \
GB_GETA (aij, Ax, pA) ; \
/* Cx [pC] = op (cast (aij)) */ \
GB_CASTING (z, aij) ; \
GB_OP (GB_CX (pC), z) ; \
}
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_ABS || GxB_NO_FP64 || GxB_NO_BOOL)
//------------------------------------------------------------------------------
// Cx = op (cast (Ax)): apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_unop__abs_fp64_bool
(
double *Cx, // Cx and Ax may be aliased
bool *Ax,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
GB_CAST_OP (p, p) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (cast (A')): transpose, typecast, and apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_tran__abs_fp64_bool
(
GrB_Matrix C,
const GrB_Matrix A,
int64_t *GB_RESTRICT *Rowcounts,
GBI_single_iterator Iter,
const int64_t *GB_RESTRICT A_slice,
int naslice
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#define GB_PHASE_2_OF_2
#include "GB_unaryop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
progress_counter.h | #ifndef TEX_PROGRESSCOUNTER_HEADER
#define TEX_PROGRESSCOUNTER_HEADER
#include <atomic>
#include <fstream>
#include <iostream>
#include <sstream>
#include "util/timer.h"
#include <cmath>
enum ProgressCounterStyle{
ETA,
SIMPLE
};
static const std::string clear = "\r" + std::string(80,' ') + "\r";
/*
* 显示处理进度
* */
class ProgressCounter{
private:
std::ofstream tty;
util::WallTimer timer; // 耗时
std::string task; //
std::size_t max;
std::atomic_size_t count;
public:
ProgressCounter(std::string const & _task, std::size_t max);
template<ProgressCounterStyle T> void progress(void);
void inc(void);
};
inline ProgressCounter::ProgressCounter(std::string const & _task, std::size_t _max): tty("/dev/tty", std::ios_base::out), timer(),task(_task), max(_max), count(0) {}
template<ProgressCounterStyle T>
void ProgressCounter::progress(void){
if ((max > 100 && count % (max / 100) == 0) || max <= 100) {
float percent = static_cast<float>(count) / max;
int ipercent = std::floor(percent * 100.0f + 0.5f);
std::stringstream ss;
ss << clear << task << " " << ipercent << "%...";
if (T == ETA && ipercent > 3){
std::size_t const elapsed = timer.get_elapsed();
std::size_t eta = (elapsed / percent - elapsed) / 1000;
ss << " eta ~ " << eta << " s";
}
#pragma omp critical(progress_counter_progress)
tty << ss.rdbuf() << std::flush;
}
}
inline void ProgressCounter::inc(void){
std::size_t tmp;
tmp = ++count;
if(tmp == max) {
std::stringstream ss;
ss << clear << task << " 100%... 完成. (花费 "
<< timer.get_elapsed_sec() << "秒)";
#pragma omp critical(progress_counter_inc)
std::cout << ss.rdbuf() << std::endl;
}
}
#endif /* TEX_PROGRESSCOUNTER_HEADER */ |
ParallelAlgorithms.c | /*
============================================================================
Author : Roberto Diaz Morales
============================================================================
Copyright (c) 2016 Roberto Díaz Morales
Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files
(the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge,
publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so,
subject to the following conditions:
The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE
FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
============================================================================
*/
/**
* @brief Functions to perform some parallel linear algebra tasks.
*
* Parallel procedures to solve linear systems, cholesky factorization,
* matrix products or triangular matrix inversion.
*
* @file ParallelAlgorithms.c
* @author Roberto Diaz Morales
* @date 23 Aug 2016
* @see ParallelAlgorithms.h
*/
#include "ParallelAlgorithms.h"
#include <omp.h>
#include <stdio.h>
#include <stdlib.h>
#include <math.h>
#include <string.h>
/**
* @cond
*/
extern void dgemm_(char *transa, char *transb, int *m, int *n, int *k, double
*alpha, double *a, int *lda, double *b, int *ldb, double *beta, double *c,
int *ldc );
extern void dpotrs_(char *uplo, int *n, int *nrhs, double *A, int *lda,
double *B, int *ldb, int *info);
extern void dtrtri_(char *uplo, char *diag, int *n, double *a, int *lda,
int *info);
extern void dpotrf_(char *uplo, int *n, double *A, int *lda, int *info);
extern void dsyrk_(char *uplo, char *trans, int *n, int *k,
double *alpha, double *a, int *lda, double *beta,
double *c, int *ldc);
extern void dtrmm_(char *side, char *uplo, char *transA, char *diag,
int *m, int *n, double *alpha, double *A, int *ldA,
double *B, int *ldB);
/** @brief Auxiliar memory to perform temporal results in the parallel algebra operations. */
double **auxmemory1;
/** @brief Auxiliar memory to perform temporal results in the parallel algebra operations. */
double **auxmemory2;
/** @brief Auxiliar memory to perform temporal results in the parallel algebra operations. */
double **auxmemory3;
/**
* @brief Function to allocate auxiliar memory to be used in the algebra operations.
*
*
* The parallel lineal algebra functions of this module require some memory for every thead to
* allocate temporal results. This function must be called before any other function.
*
* @param Threads The number of threads to parallelize the linear algebra functions.
* @param size The size of the dimensions of the matrices that will be handle. If a matrix has a rows and b columns, then n=max(a,b).
* @see updateMemory()
*/
void initMemory(int Threads, int size){
auxmemory1=(double **) calloc(Threads,sizeof(double*));
auxmemory2=(double **) calloc(Threads,sizeof(double*));
auxmemory3=(double **) calloc(Threads,sizeof(double*));
int i;
for(i=0;i<Threads;i++){
auxmemory1[i]=(double *) calloc(pow(ceil(1.0*(size)),2),sizeof(double));
auxmemory2[i]=(double *) calloc(pow(ceil(1.0*(size)),2),sizeof(double));
auxmemory3[i]=(double *) calloc(pow(ceil(1.0*(size)),2),sizeof(double));
}
}
/**
* @brief Function to free auxiliar memory to be used in the algebra operations.
*
*
* The parallel lineal algebra functions of this module require some memory for every thead to
* allocate temporal results. This function must be called after any other function.
*
* @param Threads The number of threads to parallelize the linear algebra functions.
* @see updateMemory()
*/
void freeMemory(int Threads){
int i;
for(i=0;i<Threads;i++){
free(auxmemory1[i]);
free(auxmemory2[i]);
free(auxmemory3[i]);
}
free(auxmemory1);
free(auxmemory2);
free(auxmemory3);
}
/**
* @brief Function to update auxiliar memory to be used in the algebra operations.
*
* The parallel lineal algebra functions of this module require some memory for every thead to
* allocate temporal results. This function must be called if we allocated the memory using the function initMemory and we are going to work with bigger matrices.
*
* @param Threads The number of threads to parallelize the linear algebra functions.
* @param size The size of the dimensions of the matrices that will be handle. If a matrix has a rows and b columns, then n=max(a,b).
* @see initMemory()
*/
void updateMemory(int Threads, int size){
int i;
for(i=0;i<Threads;i++){
auxmemory1[i]=realloc(auxmemory1[i],pow(ceil(1.0*(size)),2)*sizeof(double));
auxmemory2[i]=realloc(auxmemory2[i],pow(ceil(1.0*(size)),2)*sizeof(double));
auxmemory3[i]=realloc(auxmemory3[i],pow(ceil(1.0*(size)),2)*sizeof(double));
}
}
/**
* @brief This function saves a piece of a matrix in the auxiliar memory.
*
* This is an auxiliry function of the linear algebra funtions. It is used to save a submatrix of
* a matrix given as a parameter in the auxiliar memory.
*
* @param matrix The original matrix.
* @param size1 The number of rows fo the original matrix.
* @param size2 The number of columns of the original matrix.
* @param O1 The row where the submatrix starts.
* @param O2 The column where the submatrix starts.
* @param A The pointer where the submatrix will be storaged.
* @param size3 Number of rows of the submatrix to storage.
* @param size4 Number of columns of the submatrix to storage.
* @param nCores Number of threads to perform this task.
*/
void getSubMatrix(double *matrix,int size1,int size2,int O1,int O2,double *A, int size3,int size4,int nCores){
int j;
for(j=0;j<size4;j++){
memcpy(&A[j*size3],&matrix[(j+O2)*size1+O1],size3*sizeof(double));
}
}
/**
* @brief This function loads a piece of a matrix in the auxiliar memory and storage it in a matrix.
*
* This is an auxiliry function of the linear algebra funtions. It is used to load a matrix from
* the auxiliar memory and storage it as a submatrix of another matrix.
*
* @param matrix The original matrix where the submatrix will be allocated.
* @param size1 The number of rows fo the original matrix.
* @param size2 The number of columns of the original matrix.
* @param O1 The row of the original matrix where the submatrix starts.
* @param O2 The column of the original matrix where the submatrix starts.
* @param A The pointer where the submatrix is storaged.
* @param size3 Number of rows of the submatrix that is storaged.
* @param size4 Number of columns of the submatrix that is storaged.
* @param nCores Number of threads to perform this task.
*/
void putSubMatrix(double *matrix,int size1,int size2,int O1,int O2,double *A, int size3,int size4,int nCores){
int j;
for(j=0;j<size4;j++){
memcpy(&matrix[(j+O2)*size1+O1],&A[j*size3],size3*sizeof(double));
}
}
/**
* @brief Main function that performs a parallel cholesky factorization.
*
* This function performs a parallel cholesky factorization on a sqaure submatrix
* of a matrix recived as an argument. It uses openmp to create to parallelize the task using
* different threads and every one of them performs a subtask using the function Chol.
* @param matrix The matrix to perform the cholesky factorization.
* @param r The number of rows of matrix
* @param c The number of columns of matrix.
* @param ro The row where the submatrix starts.
* @param co The column where the submatrix starts.
* @param n The order of the square submatrix
* @param nCores The number of threads to perform the task.
* @param deep It is a recursive function, this parameter tell the recursion deep to use.
* @see Chol()
*/
void ParallelChol(double *matrix,int r,int c, int ro, int co, int n,int nCores, int deep){
double *memaux = (double *)calloc(2*pow(ceil(0.5*n),2),sizeof(double));
int blockSize = pow(ceil(0.5*n),2)/nCores;
int i;
#pragma omp parallel default(shared) private(i)
{
#pragma omp for schedule(static)
for (i=0;i<nCores;i++){
Chol(matrix,r,c,ro,co,n,nCores,i,deep,0,memaux,blockSize);
}
}
free(memaux);
}
/**
* @brief Auxiliar function to perform a parallel cholesky factorization.
*
* This function is called by ParallelChol, that creates different threads and call this function
* on each one using its thread index and the total number of threads as parameters.
*
* This is a recursive function where every thread finally find out the part of the matrix that need
* and the linear algebra operations that it has to do.
*
* @param matrix The matrix to perform the cholesky factorization.
* @param r The number of rows of matrix
* @param c The number of columns of matrix.
* @param ro The row where the submatrix starts.
* @param co The column where the submatrix starts.
* @param n The order of the square submatrix
* @param nCores The number of threads to perform the task.
* @param numTh Thread identifier.
* @param posIni Variable to know what task to do by this thread.
* @param memaux The auxiliar memory.
* @param blockSize The size of its memory to save temporal results.
* @param deep It is a recursive function, this parameter tell the recursion deep to use.
* @see ParallelChol()
* @see initMemory()
* @see updateMemory()
*/
void Chol(double *matrix,int r,int c, int ro, int co, int n,int nCores,int numTh, int deep,int posIni,double *memaux, int blockSize){
if(deep<=1 | n < 8){
if(numTh==posIni & n>0){
double *m=auxmemory1[numTh];
getSubMatrix(matrix,r,c,ro,co,m, n,n,1);
int info;
char s='L';
dpotrf_(&s,&n, m, &n,&info);
if(info != 0 & posIni==2){
printf("Error en dpotrf %d ------------------------------\n",info);
exit(0);
}
int i,j;
for(i=1;i<n;i++){
for(j=0;j<i;j++){
m[i*n+j]=0.0;
}
}
putSubMatrix(matrix,r,c,ro,co,m, n,n,1);
}
}else{
int size1=ceil(0.5*n);
int size2=n-size1;
Chol(matrix,r,c,ro,co,size1,nCores,numTh,deep-1,posIni,memaux,blockSize);
#pragma omp barrier
MoveMatrix(matrix,r,ro,c,co,&memaux[posIni*blockSize],size1,0,size1,0,size1,size1, nCores,posIni,numTh);
#pragma omp barrier
TriangleInversion(&memaux[posIni*blockSize],size1, size1, 0, 0, size1, nCores,posIni,numTh,&memaux[size1*size1],blockSize);
#pragma omp barrier
MoveMatrix(matrix,r,ro+size1,c,co,&memaux[posIni*blockSize+size1*size1],size2,0,size1,0,size2,size1, nCores,posIni,numTh);
#pragma omp barrier
NLTProduct(&memaux[posIni*blockSize],size1,0,size1,0,&memaux[posIni*blockSize+size1*size1],size2,0,size1,0,size1,size2,1.0,matrix,r,ro+size1,c, co, nCores,posIni,numTh);
#pragma omp barrier
AATProduct(matrix,r,ro+size1,c,co,size2,size1,-1.0,1.0,matrix,r,ro+size1,c, co+size1, nCores,posIni,numTh);
#pragma omp barrier
if(numTh==posIni){
double *Zeroes = (double *) malloc(size1*size2*sizeof(double));
putSubMatrix(matrix,r,c,ro,co+size1,Zeroes, size1,size2,1);
free(Zeroes);
}
#pragma omp barrier
Chol(matrix,r,c,ro+size1,co+size1,size2,nCores,numTh,deep-1,posIni,memaux,blockSize);
#pragma omp barrier
}
}
/**
* @brief Main function to solve a linear system in parallel.
*
* This function solves a linear system of two submatrices of matrix1 and matrix2.
* It uses openmp to create different threads and every one of them performs a subtask
* using the function LinearSystem.
* If submatrix1 is the submatrix of matrix1 and submatrix2 is the submatrix of matrix2 then
* it performs (submatrix1)^-1*submatrix2.
*
* @param matrix1 The first matrix.
* @param r1 The number of rows of matrix1.
* @param c1 The number of columns of matrix1.
* @param ro1 The row where the submatrix starts.
* @param co1 The column where the submatrix starts.
* @param matrix2 The second matrix.
* @param r2 The number of rows of matrix2.
* @param c2 The number of columns of matrix2.
* @param ro2 The row where the second submatrix starts.
* @param co2 The column where the second submatrix starts.
* @param n The number of rows of the submatrix1
* @param m The number of columns of the submatrix2.
* @param result The matrix where the result should be storaged.
* @param rr The number of rows of matrix result.
* @param cr The number of columns of matrix result.
* @param ror The row where the result submatrix starts.
* @param cor The column where the result submatrix starts.
* @param nCores The number of threads to launch to solve the task.
*/
void ParallelLinearSystem(double *matrix1,int r1,int c1, int ro1, int co1,double *matrix2,int r2,int c2, int ro2, int co2,int n, int m,double *result,int rr,int cr, int ror, int cor, int nCores){
if(n>nCores){
double *memaux = (double *)calloc(2*pow(ceil(0.5*n),2),sizeof(double));
int blockSize = pow(ceil(0.5*n),2)/nCores;
int i;
#pragma omp parallel default(shared) private(i)
{
#pragma omp for schedule(static)
for (i=0;i<nCores;i++){
LinearSystem(matrix1,r1,c1,ro1,co1,matrix2,r2,c2,ro2,co2,n,m,result,rr,cr,ror,cor,nCores,i,0,memaux,blockSize);
}
}
free(memaux);
}else{
int info;
char s='L';
int cols=1;
dpotrf_(&s,&n, matrix1, &n,&info);
memcpy(result,matrix2,n*sizeof(double));
dpotrs_(&s,&n,&cols, matrix1, &n, result,&n,&info);
}
}
/**
* @brief This function is called by ParallelLinearSystems, that creates different threads and call this function
* on each one using its thread index and the total number of threads as parameters.
*
* This is a recursive function where every thread finally find out the part of the matrix that need
* and the linear algebra operations that it has to do.
*
* @param matrix1 The first matrix.
* @param r1 The number of rows of matrix1.
* @param c1 The number of columns of matrix1.
* @param ro1 The row where the submatrix starts.
* @param co1 The column where the submatrix starts.
* @param matrix2 The second matrix.
* @param r2 The number of rows of matrix2.
* @param c2 The number of columns of matrix2.
* @param ro2 The row where the second submatrix starts.
* @param co2 The column where the second submatrix starts.
* @param n The number of rows of the submatrix1
* @param m The number of columns of the submatrix2.
* @param result The matrix where the result should be storaged.
* @param rr The number of rows of matrix result.
* @param cr The number of columns of matrix result.
* @param ror The row where the result submatrix starts.
* @param cor The column where the result submatrix starts.
* @param nCores The number of threads to launch to solve the task.
* @param numTh Thread identifier.
* @param posIni Variable to know what task to do by this thread.
* @param memaux The auxiliar memory.
* @param blockSize The size of its memory to save temporal results.
* @see ParallelLinearSystem()
*/
void LinearSystem(double *matrix1,int r1,int c1, int ro1, int co1,double *matrix2,int r2,int c2, int ro2, int co2,int n, int m,double *result,int rr,int cr, int ror, int cor, int nCores, int numTh,int posIni,double *memaux, int blockSize){
int deep=2;
Chol(matrix1,r1,c1,ro1,co1,n,nCores,numTh,deep,posIni,memaux,blockSize);
#pragma omp barrier
int info;
char s='L';
int ncols = 1;
int j,k;
for(j=0;j<n;j++){
for(k=0;k<j;k++){
matrix1[j*n+k]=0.0;
}
}
if(numTh==0){
memcpy(&result[0],&matrix2[0],n*sizeof(double));
dpotrs_(&s,&n,&ncols, &matrix1[0], &n, &result[0],&n,&info);
}
}
/**
* @brief This function performs a lower triangular matrix inverse in parallel of a submatrix.
*
* This function performs a lower triangular matrix inverse in parallel. It is used by the
* ParallelLinearSystem function to solve the linear system using Cholesky Factorization.
*
* It makes use of the functions DiagInversion, InversionNLProducts and InversionLNProducts
* to tell every thread the operations that they must do.
*
*
* @param matrix The matrix.
* @param r The number of rows of matrix.
* @param c The number of columns of matrix.
* @param ro The row where the submatrix starts.
* @param co The column where the submatrix starts.
* @param n The order of the submatrix.
* @param nCores The number of threads to perform the matrix inversion.
* @param posIni Variable to know what task to do by this thread.
* @param numTh Thread identifier.
* @param memaux The auxiliar memory.
* @param blockSize The size of its memory to save temporal results.
* @see DiagInversion()
* @see InversionNLProducts()
* @see InversionLNProducts()
*/
void TriangleInversion(double *matrix,int r, int c, int ro, int co, int n,int nCores,int posIni, int numTh,double *memaux,int blockSize){
#pragma omp barrier
DiagInversion(matrix,r,c,ro,co,n,nCores,posIni,numTh);
#pragma omp barrier
int deep=log(nCores)/log(2);
int o;
#pragma omp barrier
for (o=deep;o>=1;o--){
InversionNLProducts(matrix,r,c,ro,co,n,nCores,posIni,o,numTh,memaux,blockSize);
#pragma omp barrier
InversionLNProducts(matrix,r,c,ro,co,n,nCores,posIni,o,numTh,memaux,blockSize);
#pragma omp barrier
}
}
/**
* @brief This function is an auxiliar function TriangleInversion.
*
* This function is an auxiliar function of TriangleInversion and is in charge of small matrix
* inversions of the submtrices that has a portion of the diagonal.
*
* @param matrix The matrix.
* @param r The number of rows of matrix.
* @param c The number of columns of matrix.
* @param ro The row where the submatrix starts.
* @param co The column where the submatrix starts.
* @param n The order of the submatrix.
* @param nCores The number of threads to perform the matrix inversion.
* @param posIni Variable to know what task to do by this thread.
* @param numTh Thread identifier.
* @see TriangleInversion()
*/
void DiagInversion(double *matrix,int r, int c, int ro, int co, int n,int nCores,int posIni,int numTh){
if(nCores <= 1){
if(n>0){
double *m=auxmemory1[numTh];
getSubMatrix(matrix,r,c,ro,co,m, n,n,1);
int info;
char s1='L';
char s2='N';
dtrtri_(&s1,&s2,&n, m, &n,&info);
if(info != 0){printf("Error en dtrtri %d ------------------------------\n",info);}
putSubMatrix(matrix,r,c,ro,co,m, n,n,1);
}
}else{
int size1=ceil(0.5*n);
int size2=n-size1;
if(numTh<posIni+nCores/2)
DiagInversion(matrix,r,c,ro,co,size1,nCores/2,posIni,numTh);
else
DiagInversion(matrix,r,c,ro+size1,co+size1, size2,nCores/2,posIni+nCores/2,numTh);
}
}
/**
* @brief This function is an auxiliar function TriangleInversion.
*
* This function is an auxiliar function of TriangleInversion and is in charge of products
* of matrices and lower triangular matrices (previously storaged in the auxiliar memory) that
* are portions of the original matrix.
*
* @param matrix The matrix.
* @param r The number of rows of matrix.
* @param c The number of columns of matrix.
* @param ro The row where the submatrix starts.
* @param co The column where the submatrix starts.
* @param n The order of the submatrix.
* @param nCores The number of threads to perform the matrix inversion.
* @param posIni Variable to know what task to do by this thread.
* @param deep The deep of the recursion.
* @param numTh Thread identifier.
* @param memaux The auxiliar memory for this task.
* @param blockSize The size of its memory to save temporal results.
* @see TriangleInversion()
*/
void InversionNLProducts(double *matrix,int r, int c, int ro, int co, int n,int nCores,int posIni,int deep,int numTh, double *memaux, int blockSize){
int size1=ceil(0.5*n);
int size2=n-size1;
if(deep==1){
double *C1=&memaux[posIni*blockSize];
NLProduct(matrix,r,ro,c,co,matrix,r,ro+size1,c,co,size1,size2,-1.0,C1,size2,0,size1,0, nCores,posIni,numTh);
}else{
if(numTh<posIni+nCores/2)
InversionNLProducts(matrix,r,c,ro,co,size1,nCores/2,posIni,deep-1,numTh,memaux,blockSize);
else
InversionNLProducts(matrix,r,c,ro+size1,co+size1,size2,nCores/2,posIni+nCores/2,deep-1,numTh,memaux,blockSize);
}
}
/**
* @brief This function is an auxiliar function TriangleInversion.
*
* This function is an auxiliar function of TriangleInversion and is in charge of products
* of lower triangular matrices (previously storaged in the auxiliar memory) and matrices that
* are portions of the original matrix.
*
* @param matrix The matrix.
* @param r The number of rows of matrix.
* @param c The number of columns of matrix.
* @param ro The row where the submatrix starts.
* @param co The column where the submatrix starts.
* @param n The order of the submatrix.
* @param nCores The number of threads to perform the matrix inversion.
* @param posIni Variable to know what task to do by this thread.
* @param deep The deep of the recursion.
* @param numTh Thread identifier.
* @param memaux The auxiliar memory for this task.
* @param blockSize The size of its memory to save temporal results.
* @see TriangleInversion()
*/
void InversionLNProducts(double *matrix,int r, int c, int ro, int co, int n,int nCores,int posIni,int deep,int numTh, double *memaux, int blockSize){
int size1=ceil(0.5*n);
int size2=n-size1;
if(deep==1){
double *C1=&memaux[posIni*blockSize];
LNProduct(matrix,r,ro+size1,c,co+size1,C1,size2,0,size1,0,size2,size1,1.0,matrix,r,ro+size1,c,co, nCores,posIni,numTh);
}else{
if(numTh<posIni+nCores/2)
InversionLNProducts(matrix,r,c,ro,co,size1,nCores/2,posIni,deep-1,numTh,memaux,blockSize);
else
InversionLNProducts(matrix,r,c,ro+size1,co+size1,size2,nCores/2,posIni+nCores/2,deep-1,numTh,memaux,blockSize);
}
}
/**
* @brief An auxiliar function of used to perform parallel matrix products.
*
* This is an auxiliar function that takes subm1 (a sub matrix of matrix m1), subm2 (a submatrix of matrix m2) and performs the following operation on subresult (a sub matrix
* of matrix result): subresult = K1*subm1*subm2+K2*subresult
*
* @param m1 The first matrix.
* @param r1 The number of rows of the first matrix.
* @param c1 The number of columns of the first matrix.
* @param ro1 The row where the submatrix of m1 starts.
* @param co1 The column where the submatrix of m1 starts.
* @param m2 The second matrix.
* @param r2 The number of rows of matrix.
* @param c2 The number of columns of matrix.
* @param ro2 The row where the submatrix starts.
* @param co2 The column where the submatrix starts.
* @param n1 The number of rows of the subm1.
* @param n2 The number of columns of the subm1.
* @param n3 The number of columns of the subm2.
* @param K1 The first constant of the formula.
* @param K2 The second constant of the formula.
* @param result The matrix to storage the result.
* @param rr The number of rows of result.
* @param cr The number of columns of result.
* @param ror The row where the submatrix of result starts.
* @param cor The column where the submatrix of result starts.
* @param nCores The total number of threads.
* @param posIni Variable to know what task to do by this thread.
* @param numTh The thread identifier.
* @param orientation Auxiliar variable to know the next task to perform.
*/
void NNProduct(double *m1,int r1,int ro1,int c1, int co1,double *m2,int r2,int ro2,int c2, int co2,int n1,int n2,int n3,double K1,double K2,double *result,int rr,int ror,int cr, int cor, int nCores,int posIni,int numTh,int orientation){
if(nCores <= 1){
if(n1 >0 & n3>0){
double *mresultT=auxmemory2[numTh];
getSubMatrix(result,rr,cr,ror,cor,mresultT, n1,n3,nCores);
if(n2 > 0){
double *m1T=auxmemory1[numTh];
double *m2T=auxmemory3[numTh];
getSubMatrix(m1,r1,c1,ro1,co1,m1T, n1,n2,nCores);
getSubMatrix(m2,r2,c2,ro2,co2,m2T, n2,n3,nCores);
char transN = 'N';
char transY = 'N';
dgemm_(&transN, &transY, &(n1), &(n3), &(n2),&(K1), m1T, &(n1), m2T, &(n2), &(K2), mresultT, &(n1));
//cblas_dgemm (CblasColMajor, CblasNoTrans, CblasNoTrans, n1,n3,n2,K1, m1T, n1, m2T, n2, K2, mresultT, n1);
}else{
int i;
for(i=0;i<n2*n3;i++) mresultT[i]=K2*mresultT[i];
}
putSubMatrix(result,rr,cr,ror,cor,mresultT, n1,n3,nCores);
//free(mresultT);
}
}else{
int rows1A=ceil(0.5*n1);
int rows1B=n1-rows1A;
int cols1A=ceil(0.5*n2);
int cols1B=n2-cols1A;
int rows2A=ceil(0.5*n2);
int rows2B=n2-rows2A;
int cols2A=ceil(0.5*n3);
int cols2B=n3-cols2A;
if(numTh<posIni+nCores/2){
NNProduct(m1,r1,ro1,c1,co1,m2,r2,ro2,c2,co2,rows1A,cols1A,cols2A,K1,K2,result,rr,ror,cr,cor, nCores/2,posIni,numTh,orientation);
NNProduct(m1,r1,ro1,c1,co1+cols1A,m2,r2,ro2+rows2A,c2,co2,rows1A,cols1B,cols2A,K1,1.0,result,rr,ror,cr,cor, nCores/2,posIni,numTh,orientation);
if(orientation==1){
NNProduct(m1,r1,ro1+rows1A,c1,co1,m2,r2,ro2,c2,co2,rows1B,cols1A,cols2A,K1,K2,result,rr,ror+rows1A,cr,cor,nCores/2,posIni,numTh,orientation);
NNProduct(m1,r1,ro1+rows1A,c1,co1+cols1A,m2,r2,ro2+rows2A,c2,co2,rows1B,cols1B,cols2A,K1,1.0,result,rr,ror+rows1A,cr,cor, nCores/2,posIni,numTh,orientation);
}else{
NNProduct(m1,r1,ro1,c1,co1,m2,r2,ro2,c2,co2+cols2A,rows1A,cols1A,cols2B,K1,K2,result,rr,ror,cr,cor+cols2A, nCores/2,posIni,numTh,orientation);
NNProduct(m1,r1,ro1,c1,co1+cols1A,m2,r2,ro2+rows2A,c2,co2+cols2A,rows1A,cols1B,cols2B,K1,1.0,result,rr,ror,cr,cor+cols2A, nCores/2,posIni,numTh,orientation);
}
}else{
if(orientation==2){
NNProduct(m1,r1,ro1+rows1A,c1,co1,m2,r2,ro2,c2,co2,rows1B,cols1A,cols2A,K1,K2,result,rr,ror+rows1A,cr,cor,nCores/2,posIni+nCores/2,numTh,orientation);
NNProduct(m1,r1,ro1+rows1A,c1,co1+cols1A,m2,r2,ro2+rows2A,c2,co2,rows1B,cols1B,cols2A,K1,1.0,result,rr,ror+rows1A,cr,cor, nCores/2,posIni+nCores/2,numTh,orientation);
}else{
NNProduct(m1,r1,ro1,c1,co1,m2,r2,ro2,c2,co2+cols2A,rows1A,cols1A,cols2B,K1,K2,result,rr,ror,cr,cor+cols2A, nCores/2,posIni+nCores/2,numTh,orientation);
NNProduct(m1,r1,ro1,c1,co1+cols1A,m2,r2,ro2+rows2A,c2,co2+cols2A,rows1A,cols1B,cols2B,K1,1.0,result,rr,ror,cr,cor+cols2A, nCores/2,posIni+nCores/2,numTh,orientation);
}
NNProduct(m1,r1,ro1+rows1A,c1,co1,m2,r2,ro2,c2,co2+cols2A,rows1B,cols1A,cols2B,K1,K2,result,rr,ror+rows1A,cr,cor+cols2A, nCores/2,posIni+nCores/2,numTh,orientation);
NNProduct(m1,r1,ro1+rows1A,c1,co1+cols1A,m2,r2,ro2+rows2A,c2,co2+cols2A,rows1B,cols1B,cols2B,K1,1.0,result,rr,ror+rows1A,cr,cor+cols2A, nCores/2,posIni+nCores/2,numTh,orientation);
}
}
}
/**
* @brief It moves a submatrix from matrix m1 to matrix m2.
*
* This function takes a submatrix form matrix m2 and allocated it in another submatrix of matrix m2.
* @param m1 The matrix origin.
* @param r1 The number of rows of the origin matrix.
* @param c1 The number of columns of the origin matrix.
* @param ro1 The row of the origin matrix where the submatrix starts.
* @param co1 The column of the origin where the submatrix starts.
* @param m2 The destination matrix.
* @param r2 The number of rows of the destination matrix.
* @param c2 The number of columns of the destination matrix.
* @param ro2 The row of the destination matrix where the submatrix starts.
* @param co2 The column of the destination matrix where the submatrix starts.
* @param n1 The number of rows of the submatrix.
* @param n2 The number of columns of the submatrix.
* @param numTh The thread identifier.
* @param nCores The total number of threads.
* @param posIni Variable to know what task to do by this thread.
*/
void MoveMatrix(double *m1,int r1,int ro1,int c1, int co1,double *m2,int r2,int ro2,int c2, int co2, int n1, int n2, int nCores,int posIni,int numTh){
if(nCores <= 1){
if(n1 >0 & n2>0){
double *mmv=auxmemory2[numTh];
getSubMatrix(m1,r1,c1,ro1,co1,mmv, n1,n2,nCores);
putSubMatrix(m2,r2,c2,ro2,co2,mmv, n1,n2,nCores);
}
}else{
int rows1A=ceil(0.5*n1);
int rows1B=n1-rows1A;
int cols1A=ceil(0.5*n2);
int cols1B=n2-cols1A;
if(numTh<posIni+nCores/2){
MoveMatrix(m1,r1,ro1,c1,co1,m2,r2,ro2,c2,co2,rows1A,cols1A,nCores/2,posIni,numTh);
MoveMatrix(m1,r1,ro1+rows1A,c1,co1,m2,r2,ro2+rows1A,c2,co2,rows1B,cols1A,nCores/2,posIni,numTh);
}else{
MoveMatrix(m1,r1,ro1,c1,co1+cols1A,m2,r2,ro2,c2,co2+cols1A,rows1A,cols1B,nCores/2,posIni+nCores/2,numTh);
MoveMatrix(m1,r1,ro1+rows1A,c1,co1+cols1A,m2,r2,ro2+rows1A,c2,co2+cols1A,rows1B,cols1B,nCores/2,posIni+nCores/2,numTh);
}
}
}
/**
* @brief An auxiliar function of used to perform parallel matrix products.
*
* This is an auxiliar function that takes subm1 (a sub matrix of matrix of the transpose matrix of m1), subm2 (a submatrix of matrix m2
* and performs the following operation on subresult (a sub matrix of matrix result): subresult = K1*subm1*subm2+K2*subresult
*
* @param m1 The first matrix.
* @param r1 The number of rows of the first matrix.
* @param c1 The number of columns of the first matrix.
* @param ro1 The row where the submatrix of m1 starts.
* @param co1 The column where the submatrix of m1 starts.
* @param m2 The second matrix.
* @param r2 The number of rows of matrix.
* @param c2 The number of columns of matrix.
* @param ro2 The row where the submatrix starts.
* @param co2 The column where the submatrix starts.
* @param n1 The number of rows of the subm1.
* @param n2 The number of columns of the subm1.
* @param n3 The number of columns of the subm2.
* @param K1 The first constant of the formula.
* @param K2 The second constant of the formula.
* @param result The matrix to storage the result.
* @param rr The number of rows of result.
* @param cr The number of columns of result.
* @param ror The row where the submatrix of result starts.
* @param cor The column where the submatrix of result starts.
* @param nCores The total number of threads.
* @param posIni Variable to know what task to do by this thread.
* @param numTh The thread identifier.
*/
void TNNProduct(double *m1,int r1,int ro1,int c1, int co1,double *m2,int r2,int ro2,int c2, int co2,int n1,int n2,int n3,double K1,double K2,double *result,int rr,int ror,int cr, int cor, int nCores,int posIni,int numTh){
if(nCores <= 1){
if(n2 >0 & n3>0){
double *mresultT=auxmemory2[numTh];
getSubMatrix(result,rr,cr,ror,cor,mresultT, n2,n3,nCores);
if(n1 >0){
double *m1T=auxmemory1[numTh];
double *m2T=auxmemory3[numTh];
getSubMatrix(m1,r1,c1,ro1,co1,m1T, n1,n2,nCores);
getSubMatrix(m2,r2,c2,ro2,co2,m2T, n1,n3,nCores);
char transN = 'N';
char transY = 'N';
dgemm_(&transN, &transY, &(n2), &(n3), &(n1),&(K1), m1T, &(n1), m2T, &(n1), &(K2), mresultT, &(n2));
//cblas_dgemm (CblasColMajor, CblasTrans, CblasNoTrans, n2,n3,n1,K1, m1T, n1, m2T, n1, K2, mresultT, n2);
}else{
int i;
for(i=0;i<n2*n3;i++) mresultT[i]=K2*mresultT[i];
}
putSubMatrix(result,rr,cr,ror,cor,mresultT, n2,n3,nCores);
}
}else{
int rows1A=ceil(0.5*n1);
int rows1B=n1-rows1A;
int cols1A=ceil(0.5*n2);
int cols1B=n2-cols1A;
int rows2A=ceil(0.5*n1);
int rows2B=n1-rows1A;
int cols2A=ceil(0.5*n3);
int cols2B=n3-cols2A;
if(numTh<posIni+nCores/2){
TNNProduct(m1,r1,ro1,c1,co1,m2,r2,ro2,c2,co2,rows1A,cols1A,cols2A,K1,K2,result,rr,ror,cr,cor, nCores/2,posIni,numTh);
TNNProduct(m1,r1,ro1+rows1A,c1,co1,m2,r2,ro2+rows2A,c2,co2,rows1B,cols1A,cols2A,K1,1.0,result,rr,ror,cr,cor, nCores/2,posIni,numTh);
TNNProduct(m1,r1,ro1,c1,co1+cols1A,m2,r2,ro2,c2,co2,rows1A,cols1B,cols2A,K1,K2,result,rr,ror+cols1A,cr,cor,nCores/2,posIni,numTh);
TNNProduct(m1,r1,ro1+rows1A,c1,co1+cols1A,m2,r2,ro2+rows2A,c2,co2,rows1B,cols1B,cols2A,K1,1.0,result,rr,ror+cols1A,cr,cor, nCores/2,posIni,numTh);
}else{
TNNProduct(m1,r1,ro1,c1,co1,m2,r2,ro2,c2,co2+cols2A,rows1A,cols1A,cols2B,K1,K2,result,rr,ror,cr,cor+cols2A, nCores/2,posIni+nCores/2,numTh);
TNNProduct(m1,r1,ro1+rows1A,c1,co1,m2,r2,ro2+rows2A,c2,co2+cols2A,rows1B,cols1A,cols2B,K1,1.0,result,rr,ror,cr,cor+cols2A, nCores/2,posIni+nCores/2,numTh);
TNNProduct(m1,r1,ro1,c1,co1+cols1A,m2,r2,ro2,c2,co2+cols2A,rows1A,cols1B,cols2B,K1,K2,result,rr,ror+cols1A,cr,cor+cols2A, nCores/2,posIni+nCores/2,numTh);
TNNProduct(m1,r1,ro1+rows1A,c1,co1+cols1A,m2,r2,ro2+rows2A,c2,co2+cols2A,rows1B,cols1B,cols2B,K1,1.0,result,rr,ror+cols1A,cr,cor+cols2A, nCores/2,posIni+nCores/2,numTh);
}
}
}
/**
* @brief An auxiliar function of used to perform parallel matrix products.
*
* This is an auxiliar function that takes subm1 (a sub matrix of matrix of m1), subm2 (a submatrix of the transpose matrix of m2
* and performs the following operation on subresult (a sub matrix of matrix result): subresult = K1*subm1*subm2+K2*subresult
*
* @param m1 The first matrix.
* @param r1 The number of rows of the first matrix.
* @param c1 The number of columns of the first matrix.
* @param ro1 The row where the submatrix of m1 starts.
* @param co1 The column where the submatrix of m1 starts.
* @param m2 The second matrix.
* @param r2 The number of rows of matrix.
* @param c2 The number of columns of matrix.
* @param ro2 The row where the submatrix starts.
* @param co2 The column where the submatrix starts.
* @param n1 The number of rows of the subm1.
* @param n2 The number of columns of the subm1.
* @param n3 The number of columns of the subm2.
* @param K1 The first constant of the formula.
* @param K2 The second constant of the formula.
* @param result The matrix to storage the result.
* @param rr The number of rows of result.
* @param cr The number of columns of result.
* @param ror The row where the submatrix of result starts.
* @param cor The column where the submatrix of result starts.
* @param nCores The total number of threads.
* @param posIni Variable to know what task to do by this thread.
* @param numTh The thread identifier.
*/
void NNTProduct(double *m1,int r1,int ro1,int c1, int co1,double *m2,int r2,int ro2,int c2, int co2,int n1,int n2,int n3,double K1,double K2,double *result,int rr,int ror,int cr, int cor, int nCores,int posIni,int numTh){
if(nCores <= 1){
if(n1 >0 & n3>0){
double *mresultT=auxmemory2[numTh];
getSubMatrix(result,rr,cr,ror,cor,mresultT, n1,n3,nCores);
if(n2 > 0){
double *m1T=auxmemory1[numTh];
double *m2T=auxmemory3[numTh];
getSubMatrix(m1,r1,c1,ro1,co1,m1T, n1,n2,nCores);
getSubMatrix(m2,r2,c2,ro2,co2,m2T, n3,n2,nCores);
char transN = 'N';
char transY = 'T';
dgemm_(&transN, &transY, &(n1), &(n3), &(n2),&(K1), m1T, &(n1), m2T, &(n3), &(K2), mresultT, &(n1));
//cblas_dgemm (CblasColMajor, CblasNoTrans, CblasTrans, n1,n3,n2,K1, m1T, n1, m2T, n3, K2, mresultT, n1);
}else{
int i;
for(i=0;i<n1*n3;i++) mresultT[i]=K2*mresultT[i];
}
putSubMatrix(result,rr,cr,ror,cor,mresultT, n1,n3,nCores);
}
}else{
int rows1A=ceil(0.5*n1);
int rows1B=n1-rows1A;
int cols1A=ceil(0.5*n2);
int cols1B=n2-cols1A;
int rows2A=ceil(0.5*n3);
int rows2B=n3-rows2A;
int cols2A=ceil(0.5*n2);
int cols2B=n2-cols2A;
if(numTh<posIni+nCores/2){
NNTProduct(m1,r1,ro1,c1,co1,m2,r2,ro2,c2,co2,rows1A,cols1A,rows2A,K1,K2,result,rr,ror,cr,cor, nCores/2,posIni,numTh);
NNTProduct(m1,r1,ro1,c1,co1+cols1A,m2,r2,ro2,c2,co2+cols2A,rows1A,cols1B,rows2A,K1,1.0,result,rr,ror,cr,cor, nCores/2,posIni,numTh);
NNTProduct(m1,r1,ro1,c1,co1,m2,r2,ro2+rows2A,c2,co2,rows1A,cols1A,rows2B,K1,K2,result,rr,ror,cr,cor+rows2A, nCores/2,posIni,numTh);
NNTProduct(m1,r1,ro1,c1,co1+cols1A,m2,r2,ro2+rows2A,c2,co2+cols2A,rows1A,cols1B,rows2B,K1,1.0,result,rr,ror,cr,cor+rows2A, nCores/2,posIni,numTh);
}else{
NNTProduct(m1,r1,ro1+rows1A,c1,co1,m2,r2,ro2+rows2A,c2,co2,rows1B,cols1A,rows2B,K1,K2,result,rr,ror+rows1A,cr,cor+rows2A, nCores/2,posIni+nCores/2,numTh);
NNTProduct(m1,r1,ro1+rows1A,c1,co1+cols1A,m2,r2,ro2+rows2A,c2,co2+cols2A,rows1B,cols1B,rows2B,K1,1.0,result,rr,ror+rows1A,cr,cor+rows2A, nCores/2,posIni+nCores/2,numTh);
NNTProduct(m1,r1,ro1+rows1A,c1,co1,m2,r2,ro2,c2,co2,rows1B,cols1A,rows2A,K1,K2,result,rr,ror+rows1A,cr,cor,nCores/2,posIni+nCores/2,numTh);
NNTProduct(m1,r1,ro1+rows1A,c1,co1+cols1A,m2,r2,ro2,c2,co2+cols2A,rows1B,cols1B,rows2A,K1,1.0,result,rr,ror+rows1A,cr,cor, nCores/2,posIni+nCores/2,numTh);
}
}
}
/**
* @brief An auxiliar function of used to perform parallel matrix products.
*
* This is an auxiliar function that takes subm1 (a sub matrix of matrix of m1) and performs the following operation
* on subresult (a sub matrix of matrix result): subresult = K1*subm1*subm1^T+K2*subresult
*
* @param m1 The first matrix.
* @param r1 The number of rows of the first matrix.
* @param c1 The number of columns of the first matrix.
* @param ro1 The row where the submatrix of m1 starts.
* @param co1 The column where the submatrix of m1 starts.
* @param n1 The number of rows of the subm1.
* @param n2 The number of columns of the subm1.
* @param K1 The first constant of the formula.
* @param K2 The second constant of the formula.
* @param result The matrix to storage the result.
* @param rr The number of rows of result.
* @param cr The number of columns of result.
* @param ror The row where the submatrix of result starts.
* @param cor The column where the submatrix of result starts.
* @param nCores The total number of threads.
* @param posIni Variable to know what task to do by this thread.
* @param numTh The thread identifier.
*/
void AATProduct(double *m1,int r1,int ro1,int c1, int co1,int n1,int n2,double K1,double K2,double *result,int rr,int ror,int cr, int cor, int nCores,int posIni,int numTh){
if(nCores <= 1){
if(n1 >0 & n2>0){
double *m1T = (double *) malloc(n1*n2*sizeof(double));
double *mresultT = (double *) malloc(n1*n1*sizeof(double));
getSubMatrix(m1,r1,c1,ro1,co1,m1T, n1,n2,nCores);
getSubMatrix(result,rr,cr,ror,cor,mresultT, n1,n1,nCores);
char transN = 'L';
char transY = 'N';
dsyrk_(&transN, &transY, &(n1), &(n2), &(K1), m1T, &(n1), &(K2), mresultT, &(n1));
//cblas_dsyrk (CblasColMajor, CblasLower, CblasNoTrans, n1,n2,K1, m1T, n1, K2, mresultT, n1);
int i,j;
for(i=0;i<n1;i++){
for(j=i;j<n1;j++){
mresultT[j*n1+i]=mresultT[i*n1+j];
}
}
putSubMatrix(result,rr,cr,ror,cor,mresultT, n1,n1,nCores);
free(m1T);
free(mresultT);
}
}else{
int rows1A=ceil(0.5*n1);
int rows1B=n1-rows1A;
int cols1A=ceil(0.5*n2);
int cols1B=n2-cols1A;
if(numTh<posIni+nCores/2){
AATProduct(m1,r1,ro1,c1,co1,rows1A,cols1A,K1,K2,result,rr,ror,cr,cor, nCores/2,posIni,numTh);
AATProduct(m1,r1,ro1,c1,co1+cols1A,rows1A,cols1B,K1,1.0,result,rr,ror,cr,cor, nCores/2,posIni,numTh);
}else{
AATProduct(m1,r1,ro1+rows1A,c1,co1,rows1B,cols1A,K1,K2,result,rr,ror+rows1A,cr,cor+rows1A, nCores/2,posIni+nCores/2,numTh);
AATProduct(m1,r1,ro1+rows1A,c1,co1+cols1A,rows1B,cols1B,K1,1.0,result,rr,ror+rows1A,cr,cor+rows1A, nCores/2,posIni+nCores/2,numTh);
}
NNTProduct(m1,r1,ro1+rows1A,c1,co1,m1,r1,ro1,c1,co1,rows1B,cols1A,rows1A,K1,K2,result,rr,ror+rows1A,cr,cor,nCores,posIni,numTh);
NNTProduct(m1,r1,ro1+rows1A,c1,co1+cols1A,m1,r1,ro1,c1,co1+cols1A,rows1B,cols1B,rows1A,K1,1.0,result,rr,ror+rows1A,cr,cor, nCores,posIni,numTh);
}
}
/**
* @brief This function performs a product of a lower triangular submatrix of matrix m1
* and a submatrix of m2 in parallel.
*
* Begin subm1 a lower triangular submatrix of matrix m1 and subm2 a submatrix of m2,
* this funtion execute K1(subm1*subm2) and saves the result in a different matrix.
*
* @param m1 The first matrix.
* @param r1 The number of rows of the first matrix.
* @param c1 The number of columns of the first matrix.
* @param ro1 The row where the submatrix of m1 starts.
* @param co1 The column where the submatrix of m1 starts.
* @param m2 The second matrix.
* @param r2 The number of rows of matrix.
* @param c2 The number of columns of matrix.
* @param ro2 The row where the submatrix starts.
* @param co2 The column where the submatrix starts.
* @param n1 The number of rows of the submatrix1.
* @param n2 The number of columns of the submatrix2.
* @param K1 The constant of the formula.
* @param result The matrix to storage the result.
* @param rr The number of rows of result.
* @param cr The number of columns of result.
* @param ror The row where the submatrix of result starts.
* @param cor The column where the submatrix of result starts.
* @param nCores The total number of threads.
* @param posIni Variable to know what task to do by this thread.
* @param numTh The thread identifier.
*/
void LNProduct(double *m1,int r1,int ro1,int c1, int co1,double *m2,int r2,int ro2,int c2, int co2,int n1,int n2,double K1,double *result,int rr,int ror,int cr, int cor, int nCores,int posIni,int numTh){
if(nCores <= 1){
if(n1 >0 & n2>0){
double *m1T=auxmemory2[numTh];
double *mresultT=auxmemory1[numTh];
getSubMatrix(m1,r1,c1,ro1,co1,m1T, n1,n1,nCores);
getSubMatrix(m2,r2,c2,ro2,co2,mresultT, n1,n2,nCores);
char side = 'L';
char uplo = 'L';
char transa = 'N';
char diag = 'N';
dtrmm_(&side, &uplo, &transa, &diag, &(n1), &(n2), &(K1), m1T, &(n1), mresultT, &(n1));
//cblas_dtrmm (CblasColMajor, CblasLeft,CblasLower,CblasNoTrans,CblasNonUnit,n1,n2,K1, m1T, n1, mresultT, n1);
putSubMatrix(result,rr,cr,ror,cor,mresultT, n1,n2,nCores);
}
}else{
int rows1A=ceil(0.5*n1);
int rows1B=n1-rows1A;
int cols1A=ceil(0.5*n1);
int cols1B=n1-cols1A;
int rows2A=ceil(0.5*n1);
int rows2B=n1-cols1A;
int cols2A=ceil(0.5*n2);
int cols2B=n2-cols2A;
if(numTh<posIni+nCores/2){
LNProduct(m1,r1,ro1,c1,co1,m2,r2,ro2,c2,co2,rows1A,cols2A,K1,result,rr,ror,cr,cor, nCores/2,posIni,numTh);
LNProduct(m1,r1,ro1+rows1A,c1,co1+cols1A,m2,r2,ro2+rows2A,c2,co2,rows1B,cols2A,K1,result,rr,ror+rows1A,cr,cor, nCores/2,posIni,numTh);
NNProduct(m1,r1,ro1+rows1A,c1,co1,m2,r2,ro2,c2,co2,rows1B,cols1A,cols2A,K1,1.0,result,rr,ror+rows1A,cr,cor,nCores/2,posIni,numTh,1);
}else{
LNProduct(m1,r1,ro1,c1,co1,m2,r2,ro2,c2,co2+cols2A,rows1A,cols2B,K1,result,rr,ror,cr,cor+cols2A, nCores/2,posIni+nCores/2,numTh);
LNProduct(m1,r1,ro1+rows1A,c1,co1+cols1A,m2,r2,ro2+rows2A,c2,co2+cols2A,rows1B,cols2B,K1,result,rr,ror+rows1A,cr,cor+cols2A, nCores/2,posIni+nCores/2,numTh);
NNProduct(m1,r1,ro1+rows1A,c1,co1,m2,r2,ro2,c2,co2+cols2A,rows1B,cols1A,cols2B,K1,1,result,rr,ror+rows1A,cr,cor+cols2A, nCores/2,posIni+nCores/2,numTh,1);
}
}
}
/**
* @brief This function performs a product of a lower triangular submatrix of the transpose of matrix m1
* and a submatrix of m2 in parallel.
*
* Begin subm1 a lower triangular submatrix of the transpose of matrix m1 and subm2 a submatrix of m2,
* this funtion execute K1(subm1*subm2) and saves the result in a different matrix.
*
* @param m1 The first matrix.
* @param r1 The number of rows of the first matrix.
* @param c1 The number of columns of the first matrix.
* @param ro1 The row where the submatrix of m1 starts.
* @param co1 The column where the submatrix of m1 starts.
* @param m2 The second matrix.
* @param r2 The number of rows of matrix.
* @param c2 The number of columns of matrix.
* @param ro2 The row where the submatrix starts.
* @param co2 The column where the submatrix starts.
* @param n1 The number of rows of the submatrix1.
* @param n2 The number of columns of the submatrix2.
* @param K1 The constant of the formula.
* @param result The matrix to storage the result.
* @param rr The number of rows of result.
* @param cr The number of columns of result.
* @param ror The row where the submatrix of result starts.
* @param cor The column where the submatrix of result starts.
* @param nCores The total number of threads.
* @param posIni Variable to know what task to do by this thread.
* @param numTh The thread identifier.
*/
void LTNProduct(double *m1,int r1,int ro1,int c1, int co1,double *m2,int r2,int ro2,int c2, int co2,int n1,int n2,double K1,double *result,int rr,int ror,int cr, int cor, int nCores,int posIni,int numTh){
if(nCores <= 1){
if(n1 >0 & n2>0){
int in;
double *m1T=auxmemory1[numTh];
double *m2T=auxmemory2[numTh];
getSubMatrix(m1,r1,c1,ro1,co1,m1T, n1,n1,1);
getSubMatrix(result,rr,cr,ror,cor,m2T, n1,n2,1);
char side = 'L';
char uplo = 'L';
char transa = 'T';
char diag = 'N';
dtrmm_(&side, &uplo, &transa, &diag, &(n1), &(n2), &(K1), m1T, &(n1), m2T, &(n1));
//cblas_dtrmm (CblasColMajor, CblasLeft,CblasLower,CblasTrans,CblasNonUnit,n1,n2,K1, m1T, n1, m2T, n1);
putSubMatrix(result,rr,cr,ror,cor,m2T, n1,n2,1);
}
}else{
int rows1A=ceil(0.5*n1);
int rows1B=n1-rows1A;
int cols1A=ceil(0.5*n1);
int cols1B=n1-cols1A;
int rows2A=ceil(0.5*n1);
int rows2B=n1-cols1A;
int cols2A=ceil(0.5*n2);
int cols2B=n2-cols2A;
if(numTh<posIni+nCores/2){
LTNProduct(m1,r1,ro1+rows1A,c1,co1+cols1A,m2,r2,ro2+rows2A,c2,co2,rows1B,cols2A,K1,result,rr,ror+cols1A,cr,cor, nCores/2,posIni,numTh);
LTNProduct(m1,r1,ro1,c1,co1,m2,r2,ro2,c2,co2,rows1A,cols2A,K1,result,rr,ror,cr,cor, nCores/2,posIni,numTh);
TNNProduct(m1,r1,ro1+rows1A,c1,co1,m2,r2,ro2+rows2A,c2,co2,rows1B,cols1A,cols2A,K1,1.0,result,rr,ror,cr,cor,nCores/2,posIni,numTh);
}else{
LTNProduct(m1,r1,ro1+rows1A,c1,co1+cols1A,m2,r2,ro2+rows2A,c2,co2+cols2A,rows1B,cols2B,K1,result,rr,ror+cols1A,cr,cor+cols2A, nCores/2,posIni+nCores/2,numTh);
LTNProduct(m1,r1,ro1,c1,co1,m2,r2,ro2,c2,co2+cols2A,rows1A,cols2B,K1,result,rr,ror,cr,cor+cols2A, nCores/2,posIni+nCores/2,numTh);
TNNProduct(m1,r1,ro1+rows1A,c1,co1,m2,r2,ro2+rows2A,c2,co2+cols2A,rows1B,cols1A,cols2B,K1,1.0,result,rr,ror,cr,cor+cols2A,nCores/2,posIni+nCores/2,numTh);
}
}
}
/**
* @brief This function performs a product of a submatrix of matrix m1 and a triangular submatrix
* of m2 in parallel.
*
* Begin subm1 a submatrix of m1 and subm2 a triangular submatrix of m2,
* this funtion execute K1(subm1*subm2) and saves the result in a different matrix.
*
* @param m1 The first matrix.
* @param r1 The number of rows of the first matrix.
* @param c1 The number of columns of the first matrix.
* @param ro1 The row where the submatrix of m1 starts.
* @param co1 The column where the submatrix of m1 starts.
* @param m2 The second matrix.
* @param r2 The number of rows of matrix.
* @param c2 The number of columns of matrix.
* @param ro2 The row where the submatrix starts.
* @param co2 The column where the submatrix starts.
* @param n1 The number of rows of the submatrix1.
* @param n2 The number of columns of the submatrix1 (and rows and columns of submatrix2).
* @param K1 The constant of the formula.
* @param result The matrix to storage the result.
* @param rr The number of rows of result.
* @param cr The number of columns of result.
* @param ror The row where the submatrix of result starts.
* @param cor The column where the submatrix of result starts.
* @param nCores The total number of threads.
* @param posIni Variable to know what task to do by this thread.
* @param numTh The thread identifier.
*/
void NLProduct(double *m1,int r1,int ro1,int c1, int co1,double *m2,int r2,int ro2,int c2, int co2,int n1,int n2,double K1,double *result,int rr,int ror,int cr, int cor, int nCores,int posIni,int numTh){
if(nCores <= 1){
if(n1 >0 & n2>0){
double *m1T=auxmemory1[numTh];
double *mresultT=auxmemory2[numTh];
getSubMatrix(m1,r1,c1,ro1,co1,m1T, n1,n1,nCores);
getSubMatrix(m2,r2,c2,ro2,co2,mresultT, n2,n1,nCores);
char side = 'R';
char uplo = 'L';
char transa = 'N';
char diag = 'N';
dtrmm_(&side, &uplo, &transa, &diag, &(n2), &(n1), &(K1), m1T, &(n1), mresultT, &(n2));
//cblas_dtrmm (CblasColMajor, CblasRight,CblasLower,CblasNoTrans,CblasNonUnit,n2,n1,K1, m1T, n1, mresultT, n2);
putSubMatrix(result,rr,cr,ror,cor,mresultT, n2,n1,nCores);
}
}else{
int rows1A=ceil(0.5*n1);
int rows1B=n1-rows1A;
int cols1A=ceil(0.5*n1);
int cols1B=n1-cols1A;
int rows2A=ceil(0.5*n2);
int rows2B=n2-rows2A;
int cols2A=ceil(0.5*n1);
int cols2B=n1-cols2A;
if(numTh<posIni+nCores/2){
NLProduct(m1,r1,ro1,c1,co1,m2,r2,ro2,c2,co2,rows1A,rows2A,K1,result,rr,ror,cr,cor, nCores/2,posIni,numTh);
NNProduct(m2,r2,ro2,c2,co2+cols2A,m1,r1,ro1+rows1A,c1,co1,rows2A,cols2B,cols1A,K1,1,result,rr,ror,cr,cor, nCores/2,posIni,numTh,2);
NLProduct(m1,r1,ro1+rows1A,c1,co1+cols1A,m2,r2,ro2,c2,co2+cols2A,rows1B,rows2A,K1,result,rr,ror,cr,cor+cols1A, nCores/2,posIni,numTh);
}else{
NLProduct(m1,r1,ro1,c1,co1,m2,r2,ro2+rows2A,c2,co2,rows1A,rows2B,K1,result,rr,ror+rows2A,cr,cor,nCores/2,posIni+nCores/2,numTh);
NNProduct(m2,r2,ro2+rows2A,c2,co2+cols2A,m1,r1,ro1+rows1A,c1,co1,rows2B,cols2B,cols1A,K1,1,result,rr,ror+rows2A,cr,cor, nCores/2,posIni+nCores/2,numTh,2);
NLProduct(m1,r1,ro1+rows1A,c1,co1+cols1A,m2,r2,ro2+rows2A,c2,co2+cols2A,rows1B,rows2B,K1,result,rr,ror+rows2A,cr,cor+cols1A, nCores/2,posIni+nCores/2,numTh);
}
}
}
/**
* @brief This function performs a product of a submatrix of matrix m1 and a triangular submatrix
* of the transpose of m2 in parallel.
*
* Begin subm1 a submatrix of m1 and subm2 a triangular submatrix of the transpose of m2,
* this funtion execute K1(subm1*subm2) and saves the result in a different matrix.
*
* @param m1 The first matrix.
* @param r1 The number of rows of the first matrix.
* @param c1 The number of columns of the first matrix.
* @param ro1 The row where the submatrix of m1 starts.
* @param co1 The column where the submatrix of m1 starts.
* @param m2 The second matrix.
* @param r2 The number of rows of matrix.
* @param c2 The number of columns of matrix.
* @param ro2 The row where the submatrix starts.
* @param co2 The column where the submatrix starts.
* @param n1 The number of rows of the submatrix1.
* @param n2 The number of columns of the submatrix1 (and rows and columns of submatrix2).
* @param K1 The constant of the formula.
* @param result The matrix to storage the result.
* @param rr The number of rows of result.
* @param cr The number of columns of result.
* @param ror The row where the submatrix of result starts.
* @param cor The column where the submatrix of result starts.
* @param nCores The total number of threads.
* @param posIni Variable to know what task to do by this thread.
* @param numTh The thread identifier.
*/
void NLTProduct(double *m1,int r1,int ro1,int c1, int co1,double *m2,int r2,int ro2,int c2, int co2,int n1,int n2,double K1,double *result,int rr,int ror,int cr, int cor, int nCores,int posIni,int numTh){
if(nCores <= 1){
if(n1 >0 & n2>0){
double *m1T=auxmemory1[numTh];
double *m2T=auxmemory2[numTh];
getSubMatrix(m1,r1,c1,ro1,co1,m1T, n1,n1,nCores);
getSubMatrix(m2,r2,c2,ro2,co2,m2T, n2,n1,nCores);
char side = 'R';
char uplo = 'L';
char transa = 'T';
char diag = 'N';
dtrmm_(&side, &uplo, &transa, &diag, &(n2), &(n1), &(K1), m1T, &(n1), m2T, &(n2));
//cblas_dtrmm (CblasColMajor, CblasRight,CblasLower,CblasTrans,CblasNonUnit,n2,n1,K1, m1T, n1, m2T, n2);
putSubMatrix(result,rr,cr,ror,cor,m2T, n2,n1,nCores);
}
}else{
int rows1A=ceil(0.5*n1);
int rows1B=n1-rows1A;
int cols1A=ceil(0.5*n1);
int cols1B=n1-cols1A;
int rows2A=ceil(0.5*n2);
int rows2B=n2-rows2A;
int cols2A=ceil(0.5*n1);
int cols2B=n1-cols2A;
if(numTh<posIni+nCores/2){
NLTProduct(m1,r1,ro1,c1,co1,m2,r2,ro2,c2,co2,rows1A,rows2A,K1,result,rr,ror,cr,cor,nCores/2,posIni,numTh);
NLTProduct(m1,r1,ro1+rows1A,c1,co1+cols1A,m2,r2,ro2,c2,co2+cols2A,rows1B,rows2A,K1,result,rr,ror,cr,cor+rows1A,nCores/2,posIni,numTh);
NNTProduct(m2,r2,ro2,c2,co2,m1,r1,ro1+rows1A,c1,co1,rows2A,cols2A,rows1B,K1,1.0,result,rr,ror,cr,cor+rows1A,nCores/2,posIni,numTh);
}else{
NLTProduct(m1,r1,ro1,c1,co1,m2,r2,ro2+rows2A,c2,co2,rows1A,rows2B,K1,result,rr,ror+rows2A,cr,cor, nCores/2,posIni+nCores/2,numTh);
NLTProduct(m1,r1,ro1+rows1A,c1,co1+cols1A,m2,r2,ro2+rows2A,c2,co2+cols2A,rows1B,rows2B,K1,result,rr,ror+rows2A,cr,cor+rows1A, nCores/2,posIni+nCores/2,numTh);
NNTProduct(m2,r2,ro2+rows2A,c2,co2,m1,r1,ro1+rows1A,c1,co1,rows2B,cols2A,rows1B,K1,1.0,result,rr,ror+rows2A,cr,cor+rows1A, nCores/2,posIni+nCores/2,numTh);
}
}
}
/**
* @brief This function performs a product of a vector and the transpose of a square matrix in parallel.
*
* This function performs a product of a vector and the transpose of a matrix in parallel.
*
* @param m1 The vector.
* @param size The length of the vector and the order of the square matrix.
* @param m2 The square matrix.
* @param result The matrix to storage the result.
* @param numThreads Number of threads to pefrom this task.
*/
void ParallelVectorMatrixT(double *m1,int size,double *m2,double *result, int numThreads){
int i;
char transN = 'N';
char transY = 'T';
int aux=1;
double aux2=0;
double factor=1.0;
#pragma omp parallel default(shared) private(i)
{
#pragma omp for schedule(static)
for (i=0;i<numThreads;i++){
int InitCol=round(i*size/numThreads);
int FinalCol=round((i+1)*size/numThreads)-1;
int lenghtCol=FinalCol-InitCol+1;
if(lenghtCol>0){
dgemm_(&transN, &transN, &(lenghtCol), &(aux), &(size),&factor, &m2[InitCol], &(size), m1, &size, &aux2, &result[InitCol], &(size));
}
}
}
}
/**
* @brief This function performs a product of a vector and a square matrix in parallel.
*
* This function performs a product of a vector and a matrix in parallel.
*
* This function performs a vector matrix product in parallel.
* @param m1 The vector.
* @param size The length of the vector and the order of the square matrix.
* @param m2 The square matrix.
* @param result The matrix to storage the result.
* @param numThreads Number of threads to pefrom this task.
*/
void ParallelVectorMatrix(double *m1,int size,double *m2,double *result, int numThreads){
int i;
char transN = 'N';
char transY = 'N';
int aux=1;
double aux2=0;
double factor=1.0;
#pragma omp parallel default(shared) private(i)
{
#pragma omp for schedule(static)
for (i=0;i<numThreads;i++){
int InitCol=round(i*size/numThreads);
int FinalCol=round((i+1)*size/numThreads)-1;
int lenghtCol=FinalCol-InitCol+1;
if(lenghtCol>0){
dgemm_(&transN, &transY, &aux, &(lenghtCol), &(size),&factor, m1, &aux, &m2[size*InitCol], &size, &aux2, &result[InitCol], &aux);
}
}
}
}
/**
* @endcond
*/
|
test.c | // RUN: %libomptarget-compile-run-and-check-aarch64-unknown-linux-gnu
// RUN: %libomptarget-compile-run-and-check-powerpc64-ibm-linux-gnu
// RUN: %libomptarget-compile-run-and-check-powerpc64le-ibm-linux-gnu
// RUN: %libomptarget-compile-run-and-check-x86_64-pc-linux-gnu
#define IN_PARALLEL 0
#include <omp.h>
#include <stdio.h>
#include <stdlib.h>
#include <stdbool.h>
#define N 1000
/*
* Test if it is possible to:
* 1. target enter data to depend 'in' and 'out'
* 2. target exit data to depend 'in' and 'out'
* 3. Mix target-based tasks with host tasks.
*/
int main(){
int errors = 0;
bool isHost = true;
double sum = 0.0;
double* h_array = (double *) malloc(N * sizeof(double));
double* in_1 = (double *) malloc(N * sizeof(double));
double* in_2 = (double *) malloc(N * sizeof(double));
#if IN_PARALLEL
#pragma omp parallel
{
#pragma omp master
{
#endif
// host task
#pragma omp task depend(out: in_1) shared(in_1)
{
for (int i = 0; i < N; ++i) {
in_1[i] = 1;
}
}
// host task
#pragma omp task depend(out: in_2) shared(in_2)
{
for (int i = 0; i < N; ++i) {
in_2[i] = 2;
}
}
// target enter data
#pragma omp target enter data nowait map(alloc: h_array[0:N]) map(to: in_1[0:N]) map(to: in_2[0:N]) depend(out: h_array) depend(in: in_1) depend(in: in_2)
// target task to compute on the device
#pragma omp target nowait map(tofrom: isHost) depend(inout: h_array)
{
isHost = omp_is_initial_device();
for (int i = 0; i < N; ++i) {
h_array[i] = in_1[i]*in_2[i];
}
}
// target exit data
#pragma omp target exit data nowait map(from: h_array[0:N]) depend(inout: h_array)
// host task
#pragma omp task depend(in: h_array) shared(sum, h_array)
{
// checking results
for (int i = 0; i < N; ++i) {
sum += h_array[i];
}
}
#if IN_PARALLEL
} // master
} // parallel
#else
#pragma omp taskwait
#endif
errors = 2.0*N != sum;
if (!errors)
printf("Test passed\n");
else
printf("Test failed on %s: sum = %g\n", (isHost ? "host" : "device"), sum);
return errors;
}
|
pr66142.c | /* PR middle-end/66142 */
/* { dg-do compile } */
/* { dg-additional-options "-ffast-math -fopenmp-simd" } */
/* { dg-additional-options "-mavx" { target avx_runtime } } */
struct A { float x, y; };
struct B { struct A t, w; };
static inline float
bar (const struct B *x)
{
struct A r;
float b, c, d;
r.x = x->t.x;
r.y = x->t.y;
b = r.x * x->w.x + r.y * x->w.y;
c = b + r.x * r.x + r.y * r.y;
if (c > 0.0f)
return c + 3.0f;
return 0.0f;
}
void
foo (float *a, float *b, float *c)
{
int i;
float z = 0.0f;
float u = *a;
#pragma omp simd
for (i = 0; i < 32; i++)
{
float x = b[i];
float y = c[i];
struct B r;
r.t.x = 1.0f;
r.t.y = u;
r.w.x = x;
r.w.y = y;
z += bar (&r);
}
*a = z;
}
/* { dg-final { scan-tree-dump-times "vectorized 1 loops in function" 1 "vect" { target vect_condition } } } */
|
Example_udr.2.c | /*
* @@name: udr.2.c
* @@type: C
* @@compilable: yes
* @@linkable: no
* @@expect: success
* @@version: omp_4.0
*/
#include <stdio.h>
#include <limits.h>
struct point {
int x;
int y;
};
#pragma omp declare reduction(min : struct point : \
omp_out.x = omp_in.x > omp_out.x ? omp_out.x : omp_in.x, \
omp_out.y = omp_in.y > omp_out.y ? omp_out.y : omp_in.y ) \
initializer( omp_priv = { INT_MAX, INT_MAX } )
#pragma omp declare reduction(max : struct point : \
omp_out.x = omp_in.x < omp_out.x ? omp_out.x : omp_in.x, \
omp_out.y = omp_in.y < omp_out.y ? omp_out.y : omp_in.y ) \
initializer( omp_priv = { 0, 0 } )
void find_enclosing_rectangle ( int n, struct point points[] )
{
struct point minp = { INT_MAX, INT_MAX }, maxp = {0,0};
int i;
#pragma omp parallel for reduction(min:minp) reduction(max:maxp)
for ( i = 0; i < n; i++ ) {
if ( points[i].x < minp.x ) minp.x = points[i].x;
if ( points[i].y < minp.y ) minp.y = points[i].y;
if ( points[i].x > maxp.x ) maxp.x = points[i].x;
if ( points[i].y > maxp.y ) maxp.y = points[i].y;
}
printf("min = (%d, %d)\n", minp.x, minp.y);
printf("max = (%d, %d)\n", maxp.x, maxp.y);
}
|
mxEvaluateSurfValue.c | #ifdef _OPENMP
#include <omp.h>
#endif
#include "mex.h"
#include "blas.h"
#define NRHS 4
#define NLHS 2
// #define DEBUG
void mexFunction(int nlhs, mxArray *plhs[], int nrhs, const mxArray *prhs[]) {
/* check input & output */
if (nrhs != NRHS) {
mexPrintf("Matlab:%s:InvalidNumberInput,\n", __FILE__);
mexPrintf("%d inputs required.\n", NRHS);
}
if (nlhs != NLHS) {
mexPrintf("Matlab:%s:InvalidNumberOutput,\n", __FILE__);
mexPrintf("%d inputs required.\n", NLHS);
}
double *FToE = mxGetPr(prhs[0]);
double *FToN1 = mxGetPr(prhs[1]);
double *FToN2 = mxGetPr(prhs[2]);
double *fphys = mxGetPr(prhs[3]);
const mwSize *dims = mxGetDimensions(prhs[1]);
const int Nfp = dims[0];
const int Ne = dims[1]; // num of edges
dims = mxGetDimensions(prhs[3]);
const int Np = dims[0]; // num of interp nodes
const int K = dims[1]; // num of elements
int Nfield;
if (mxGetNumberOfDimensions(prhs[3]) > 2) {
Nfield = dims[2];
} else {
Nfield = 1;
}
#ifdef DEBUG
mexPrintf("Nfp = %d, Ne = %d, Np = %d, K = %d\n", Nfp, Ne, Np, K);
#endif
const size_t ndimOut = 3;
const mwSize dimOut[3] = {Nfp, Ne, Nfield};
plhs[0] = mxCreateNumericArray(ndimOut, dimOut, mxDOUBLE_CLASS, mxREAL);
plhs[1] = mxCreateNumericArray(ndimOut, dimOut, mxDOUBLE_CLASS, mxREAL);
double *fM = mxGetPr(plhs[0]);
double *fP = mxGetPr(plhs[1]);
#ifdef _OPENMP
#pragma omp parallel for num_threads(DG_THREADS)
#endif
for (int fld = 0; fld < Nfield; fld++) {
double *fM_ = fM + Nfp * Ne * fld;
double *fP_ = fP + Nfp * Ne * fld;
double *fval = fphys + Np * K * fld;
for (int k = 0; k < Ne; k++) {
const int e1 = (int)FToE[2 * k] - 1;
const int e2 = (int)FToE[2 * k + 1] - 1;
for (int n = 0; n < Nfp; n++) {
const int sk = n + k * Nfp;
const int n1 = (int)FToN1[sk] + e1 * Np - 1;
const int n2 = (int)FToN2[sk] + e2 * Np - 1;
fM_[sk] = fval[n1];
fP_[sk] = fval[n2];
}
}
}
}
|
bitshuffle_core.c | /*
* Bitshuffle - Filter for improving compression of typed binary data.
*
* Author: Kiyoshi Masui <kiyo@physics.ubc.ca>
* Website: http://www.github.com/kiyo-masui/bitshuffle
* Created: 2014
*
* See LICENSE file for details about copyright and rights to use.
*
*/
#include "bitshuffle_core.h"
#include "bitshuffle_internals.h"
#include <stdio.h>
#include <string.h>
#if defined(__AVX2__) && defined (__SSE2__)
#define USEAVX2
#endif
#if defined(__SSE2__)
#define USESSE2
#endif
#if defined(__ARM_NEON__) || (__ARM_NEON)
#ifdef __aarch64__
#define USEARMNEON
#endif
#endif
// Conditional includes for SSE2 and AVX2.
#ifdef USEAVX2
#include <immintrin.h>
#elif defined USESSE2
#include <emmintrin.h>
#elif defined USEARMNEON
#include <arm_neon.h>
#endif
#if defined(_OPENMP) && defined(_MSC_VER)
typedef int64_t omp_size_t;
#else
typedef size_t omp_size_t;
#endif
// Macros.
#define CHECK_MULT_EIGHT(n) if (n % 8) return -80;
#define MAX(X,Y) ((X) > (Y) ? (X) : (Y))
/* ---- Functions indicating compile time instruction set. ---- */
int bshuf_using_NEON(void) {
#ifdef USEARMNEON
return 1;
#else
return 0;
#endif
}
int bshuf_using_SSE2(void) {
#ifdef USESSE2
return 1;
#else
return 0;
#endif
}
int bshuf_using_AVX2(void) {
#ifdef USEAVX2
return 1;
#else
return 0;
#endif
}
/* ---- Worker code not requiring special instruction sets. ----
*
* The following code does not use any x86 specific vectorized instructions
* and should compile on any machine
*
*/
/* Transpose 8x8 bit array packed into a single quadword *x*.
* *t* is workspace. */
#define TRANS_BIT_8X8(x, t) { \
t = (x ^ (x >> 7)) & 0x00AA00AA00AA00AALL; \
x = x ^ t ^ (t << 7); \
t = (x ^ (x >> 14)) & 0x0000CCCC0000CCCCLL; \
x = x ^ t ^ (t << 14); \
t = (x ^ (x >> 28)) & 0x00000000F0F0F0F0LL; \
x = x ^ t ^ (t << 28); \
}
/* Transpose 8x8 bit array along the diagonal from upper right
to lower left */
#define TRANS_BIT_8X8_BE(x, t) { \
t = (x ^ (x >> 9)) & 0x0055005500550055LL; \
x = x ^ t ^ (t << 9); \
t = (x ^ (x >> 18)) & 0x0000333300003333LL; \
x = x ^ t ^ (t << 18); \
t = (x ^ (x >> 36)) & 0x000000000F0F0F0FLL; \
x = x ^ t ^ (t << 36); \
}
/* Transpose of an array of arbitrarily typed elements. */
#define TRANS_ELEM_TYPE(in, out, lda, ldb, type_t) { \
size_t ii, jj, kk; \
const type_t* in_type = (const type_t*) in; \
type_t* out_type = (type_t*) out; \
for(ii = 0; ii + 7 < lda; ii += 8) { \
for(jj = 0; jj < ldb; jj++) { \
for(kk = 0; kk < 8; kk++) { \
out_type[jj*lda + ii + kk] = \
in_type[ii*ldb + kk * ldb + jj]; \
} \
} \
} \
for(ii = lda - lda % 8; ii < lda; ii ++) { \
for(jj = 0; jj < ldb; jj++) { \
out_type[jj*lda + ii] = in_type[ii*ldb + jj]; \
} \
} \
}
/* Memory copy with bshuf call signature. For testing and profiling. */
int64_t bshuf_copy(const void* in, void* out, const size_t size,
const size_t elem_size) {
const char* in_b = (const char*) in;
char* out_b = (char*) out;
memcpy(out_b, in_b, size * elem_size);
return size * elem_size;
}
/* Transpose bytes within elements, starting partway through input. */
int64_t bshuf_trans_byte_elem_remainder(const void* in, void* out, const size_t size,
const size_t elem_size, const size_t start) {
size_t ii, jj, kk;
const char* in_b = (const char*) in;
char* out_b = (char*) out;
CHECK_MULT_EIGHT(start);
if (size > start) {
// ii loop separated into 2 loops so the compiler can unroll
// the inner one.
for (ii = start; ii + 7 < size; ii += 8) {
for (jj = 0; jj < elem_size; jj++) {
for (kk = 0; kk < 8; kk++) {
out_b[jj * size + ii + kk]
= in_b[ii * elem_size + kk * elem_size + jj];
}
}
}
for (ii = size - size % 8; ii < size; ii ++) {
for (jj = 0; jj < elem_size; jj++) {
out_b[jj * size + ii] = in_b[ii * elem_size + jj];
}
}
}
return size * elem_size;
}
/* Transpose bytes within elements. */
int64_t bshuf_trans_byte_elem_scal(const void* in, void* out, const size_t size,
const size_t elem_size) {
return bshuf_trans_byte_elem_remainder(in, out, size, elem_size, 0);
}
/* Transpose bits within bytes. */
int64_t bshuf_trans_bit_byte_remainder(const void* in, void* out, const size_t size,
const size_t elem_size, const size_t start_byte) {
const uint64_t* in_b = (const uint64_t*) in;
uint8_t* out_b = (uint8_t*) out;
uint64_t x, t;
size_t ii, kk;
size_t nbyte = elem_size * size;
size_t nbyte_bitrow = nbyte / 8;
uint64_t e=1;
const int little_endian = *(uint8_t *) &e == 1;
const size_t bit_row_skip = little_endian ? nbyte_bitrow : -nbyte_bitrow;
const int64_t bit_row_offset = little_endian ? 0 : 7 * nbyte_bitrow;
CHECK_MULT_EIGHT(nbyte);
CHECK_MULT_EIGHT(start_byte);
for (ii = start_byte / 8; ii < nbyte_bitrow; ii ++) {
x = in_b[ii];
if (little_endian) {
TRANS_BIT_8X8(x, t);
} else {
TRANS_BIT_8X8_BE(x, t);
}
for (kk = 0; kk < 8; kk ++) {
out_b[bit_row_offset + kk * bit_row_skip + ii] = x;
x = x >> 8;
}
}
return size * elem_size;
}
/* Transpose bits within bytes. */
int64_t bshuf_trans_bit_byte_scal(const void* in, void* out, const size_t size,
const size_t elem_size) {
return bshuf_trans_bit_byte_remainder(in, out, size, elem_size, 0);
}
/* General transpose of an array, optimized for large element sizes. */
int64_t bshuf_trans_elem(const void* in, void* out, const size_t lda,
const size_t ldb, const size_t elem_size) {
size_t ii, jj;
const char* in_b = (const char*) in;
char* out_b = (char*) out;
for(ii = 0; ii < lda; ii++) {
for(jj = 0; jj < ldb; jj++) {
memcpy(&out_b[(jj*lda + ii) * elem_size],
&in_b[(ii*ldb + jj) * elem_size], elem_size);
}
}
return lda * ldb * elem_size;
}
/* Transpose rows of shuffled bits (size / 8 bytes) within groups of 8. */
int64_t bshuf_trans_bitrow_eight(const void* in, void* out, const size_t size,
const size_t elem_size) {
size_t nbyte_bitrow = size / 8;
CHECK_MULT_EIGHT(size);
return bshuf_trans_elem(in, out, 8, elem_size, nbyte_bitrow);
}
/* Transpose bits within elements. */
int64_t bshuf_trans_bit_elem_scal(const void* in, void* out, const size_t size,
const size_t elem_size) {
int64_t count;
void *tmp_buf;
CHECK_MULT_EIGHT(size);
tmp_buf = malloc(size * elem_size);
if (tmp_buf == NULL) return -1;
count = bshuf_trans_byte_elem_scal(in, out, size, elem_size);
CHECK_ERR_FREE(count, tmp_buf);
count = bshuf_trans_bit_byte_scal(out, tmp_buf, size, elem_size);
CHECK_ERR_FREE(count, tmp_buf);
count = bshuf_trans_bitrow_eight(tmp_buf, out, size, elem_size);
free(tmp_buf);
return count;
}
/* For data organized into a row for each bit (8 * elem_size rows), transpose
* the bytes. */
int64_t bshuf_trans_byte_bitrow_scal(const void* in, void* out, const size_t size,
const size_t elem_size) {
size_t ii, jj, kk, nbyte_row;
const char *in_b;
char *out_b;
in_b = (const char*) in;
out_b = (char*) out;
nbyte_row = size / 8;
CHECK_MULT_EIGHT(size);
for (jj = 0; jj < elem_size; jj++) {
for (ii = 0; ii < nbyte_row; ii++) {
for (kk = 0; kk < 8; kk++) {
out_b[ii * 8 * elem_size + jj * 8 + kk] = \
in_b[(jj * 8 + kk) * nbyte_row + ii];
}
}
}
return size * elem_size;
}
/* Shuffle bits within the bytes of eight element blocks. */
int64_t bshuf_shuffle_bit_eightelem_scal(const void* in, void* out, \
const size_t size, const size_t elem_size) {
const char *in_b;
char *out_b;
uint64_t x, t;
size_t ii, jj, kk;
size_t nbyte, out_index;
uint64_t e=1;
const int little_endian = *(uint8_t *) &e == 1;
const size_t elem_skip = little_endian ? elem_size : -elem_size;
const uint64_t elem_offset = little_endian ? 0 : 7 * elem_size;
CHECK_MULT_EIGHT(size);
in_b = (const char*) in;
out_b = (char*) out;
nbyte = elem_size * size;
for (jj = 0; jj < 8 * elem_size; jj += 8) {
for (ii = 0; ii + 8 * elem_size - 1 < nbyte; ii += 8 * elem_size) {
x = *((uint64_t*) &in_b[ii + jj]);
if (little_endian) {
TRANS_BIT_8X8(x, t);
} else {
TRANS_BIT_8X8_BE(x, t);
}
for (kk = 0; kk < 8; kk++) {
out_index = ii + jj / 8 + elem_offset + kk * elem_skip;
*((uint8_t*) &out_b[out_index]) = x;
x = x >> 8;
}
}
}
return size * elem_size;
}
/* Untranspose bits within elements. */
int64_t bshuf_untrans_bit_elem_scal(const void* in, void* out, const size_t size,
const size_t elem_size) {
int64_t count;
void *tmp_buf;
CHECK_MULT_EIGHT(size);
tmp_buf = malloc(size * elem_size);
if (tmp_buf == NULL) return -1;
count = bshuf_trans_byte_bitrow_scal(in, tmp_buf, size, elem_size);
CHECK_ERR_FREE(count, tmp_buf);
count = bshuf_shuffle_bit_eightelem_scal(tmp_buf, out, size, elem_size);
free(tmp_buf);
return count;
}
/* ---- Worker code that uses Arm NEON ----
*
* The following code makes use of the Arm NEON instruction set.
* NEON technology is the implementation of the ARM Advanced Single
* Instruction Multiple Data (SIMD) extension.
* The NEON unit is the component of the processor that executes SIMD instructions.
* It is also called the NEON Media Processing Engine (MPE).
*
*/
#ifdef USEARMNEON
/* Transpose bytes within elements for 16 bit elements. */
int64_t bshuf_trans_byte_elem_NEON_16(const void* in, void* out, const size_t size) {
size_t ii;
const char *in_b = (const char*) in;
char *out_b = (char*) out;
int8x16_t a0, b0, a1, b1;
for (ii=0; ii + 15 < size; ii += 16) {
a0 = vld1q_s8(in_b + 2*ii + 0*16);
b0 = vld1q_s8(in_b + 2*ii + 1*16);
a1 = vzip1q_s8(a0, b0);
b1 = vzip2q_s8(a0, b0);
a0 = vzip1q_s8(a1, b1);
b0 = vzip2q_s8(a1, b1);
a1 = vzip1q_s8(a0, b0);
b1 = vzip2q_s8(a0, b0);
a0 = vzip1q_s8(a1, b1);
b0 = vzip2q_s8(a1, b1);
vst1q_s8(out_b + 0*size + ii, a0);
vst1q_s8(out_b + 1*size + ii, b0);
}
return bshuf_trans_byte_elem_remainder(in, out, size, 2,
size - size % 16);
}
/* Transpose bytes within elements for 32 bit elements. */
int64_t bshuf_trans_byte_elem_NEON_32(const void* in, void* out, const size_t size) {
size_t ii;
const char *in_b;
char *out_b;
in_b = (const char*) in;
out_b = (char*) out;
int8x16_t a0, b0, c0, d0, a1, b1, c1, d1;
int64x2_t a2, b2, c2, d2;
for (ii=0; ii + 15 < size; ii += 16) {
a0 = vld1q_s8(in_b + 4*ii + 0*16);
b0 = vld1q_s8(in_b + 4*ii + 1*16);
c0 = vld1q_s8(in_b + 4*ii + 2*16);
d0 = vld1q_s8(in_b + 4*ii + 3*16);
a1 = vzip1q_s8(a0, b0);
b1 = vzip2q_s8(a0, b0);
c1 = vzip1q_s8(c0, d0);
d1 = vzip2q_s8(c0, d0);
a0 = vzip1q_s8(a1, b1);
b0 = vzip2q_s8(a1, b1);
c0 = vzip1q_s8(c1, d1);
d0 = vzip2q_s8(c1, d1);
a1 = vzip1q_s8(a0, b0);
b1 = vzip2q_s8(a0, b0);
c1 = vzip1q_s8(c0, d0);
d1 = vzip2q_s8(c0, d0);
a2 = vzip1q_s64(vreinterpretq_s64_s8(a1), vreinterpretq_s64_s8(c1));
b2 = vzip2q_s64(vreinterpretq_s64_s8(a1), vreinterpretq_s64_s8(c1));
c2 = vzip1q_s64(vreinterpretq_s64_s8(b1), vreinterpretq_s64_s8(d1));
d2 = vzip2q_s64(vreinterpretq_s64_s8(b1), vreinterpretq_s64_s8(d1));
vst1q_s64((int64_t *) (out_b + 0*size + ii), a2);
vst1q_s64((int64_t *) (out_b + 1*size + ii), b2);
vst1q_s64((int64_t *) (out_b + 2*size + ii), c2);
vst1q_s64((int64_t *) (out_b + 3*size + ii), d2);
}
return bshuf_trans_byte_elem_remainder(in, out, size, 4,
size - size % 16);
}
/* Transpose bytes within elements for 64 bit elements. */
int64_t bshuf_trans_byte_elem_NEON_64(const void* in, void* out, const size_t size) {
size_t ii;
const char* in_b = (const char*) in;
char* out_b = (char*) out;
int8x16_t a0, b0, c0, d0, e0, f0, g0, h0;
int8x16_t a1, b1, c1, d1, e1, f1, g1, h1;
for (ii=0; ii + 15 < size; ii += 16) {
a0 = vld1q_s8(in_b + 8*ii + 0*16);
b0 = vld1q_s8(in_b + 8*ii + 1*16);
c0 = vld1q_s8(in_b + 8*ii + 2*16);
d0 = vld1q_s8(in_b + 8*ii + 3*16);
e0 = vld1q_s8(in_b + 8*ii + 4*16);
f0 = vld1q_s8(in_b + 8*ii + 5*16);
g0 = vld1q_s8(in_b + 8*ii + 6*16);
h0 = vld1q_s8(in_b + 8*ii + 7*16);
a1 = vzip1q_s8 (a0, b0);
b1 = vzip2q_s8 (a0, b0);
c1 = vzip1q_s8 (c0, d0);
d1 = vzip2q_s8 (c0, d0);
e1 = vzip1q_s8 (e0, f0);
f1 = vzip2q_s8 (e0, f0);
g1 = vzip1q_s8 (g0, h0);
h1 = vzip2q_s8 (g0, h0);
a0 = vzip1q_s8 (a1, b1);
b0 = vzip2q_s8 (a1, b1);
c0 = vzip1q_s8 (c1, d1);
d0 = vzip2q_s8 (c1, d1);
e0 = vzip1q_s8 (e1, f1);
f0 = vzip2q_s8 (e1, f1);
g0 = vzip1q_s8 (g1, h1);
h0 = vzip2q_s8 (g1, h1);
a1 = (int8x16_t) vzip1q_s32 (vreinterpretq_s32_s8 (a0), vreinterpretq_s32_s8 (c0));
b1 = (int8x16_t) vzip2q_s32 (vreinterpretq_s32_s8 (a0), vreinterpretq_s32_s8 (c0));
c1 = (int8x16_t) vzip1q_s32 (vreinterpretq_s32_s8 (b0), vreinterpretq_s32_s8 (d0));
d1 = (int8x16_t) vzip2q_s32 (vreinterpretq_s32_s8 (b0), vreinterpretq_s32_s8 (d0));
e1 = (int8x16_t) vzip1q_s32 (vreinterpretq_s32_s8 (e0), vreinterpretq_s32_s8 (g0));
f1 = (int8x16_t) vzip2q_s32 (vreinterpretq_s32_s8 (e0), vreinterpretq_s32_s8 (g0));
g1 = (int8x16_t) vzip1q_s32 (vreinterpretq_s32_s8 (f0), vreinterpretq_s32_s8 (h0));
h1 = (int8x16_t) vzip2q_s32 (vreinterpretq_s32_s8 (f0), vreinterpretq_s32_s8 (h0));
a0 = (int8x16_t) vzip1q_s64 (vreinterpretq_s64_s8 (a1), vreinterpretq_s64_s8 (e1));
b0 = (int8x16_t) vzip2q_s64 (vreinterpretq_s64_s8 (a1), vreinterpretq_s64_s8 (e1));
c0 = (int8x16_t) vzip1q_s64 (vreinterpretq_s64_s8 (b1), vreinterpretq_s64_s8 (f1));
d0 = (int8x16_t) vzip2q_s64 (vreinterpretq_s64_s8 (b1), vreinterpretq_s64_s8 (f1));
e0 = (int8x16_t) vzip1q_s64 (vreinterpretq_s64_s8 (c1), vreinterpretq_s64_s8 (g1));
f0 = (int8x16_t) vzip2q_s64 (vreinterpretq_s64_s8 (c1), vreinterpretq_s64_s8 (g1));
g0 = (int8x16_t) vzip1q_s64 (vreinterpretq_s64_s8 (d1), vreinterpretq_s64_s8 (h1));
h0 = (int8x16_t) vzip2q_s64 (vreinterpretq_s64_s8 (d1), vreinterpretq_s64_s8 (h1));
vst1q_s8(out_b + 0*size + ii, a0);
vst1q_s8(out_b + 1*size + ii, b0);
vst1q_s8(out_b + 2*size + ii, c0);
vst1q_s8(out_b + 3*size + ii, d0);
vst1q_s8(out_b + 4*size + ii, e0);
vst1q_s8(out_b + 5*size + ii, f0);
vst1q_s8(out_b + 6*size + ii, g0);
vst1q_s8(out_b + 7*size + ii, h0);
}
return bshuf_trans_byte_elem_remainder(in, out, size, 8,
size - size % 16);
}
/* Transpose bytes within elements using best NEON algorithm available. */
int64_t bshuf_trans_byte_elem_NEON(const void* in, void* out, const size_t size,
const size_t elem_size) {
int64_t count;
// Trivial cases: power of 2 bytes.
switch (elem_size) {
case 1:
count = bshuf_copy(in, out, size, elem_size);
return count;
case 2:
count = bshuf_trans_byte_elem_NEON_16(in, out, size);
return count;
case 4:
count = bshuf_trans_byte_elem_NEON_32(in, out, size);
return count;
case 8:
count = bshuf_trans_byte_elem_NEON_64(in, out, size);
return count;
}
// Worst case: odd number of bytes. Turns out that this is faster for
// (odd * 2) byte elements as well (hence % 4).
if (elem_size % 4) {
count = bshuf_trans_byte_elem_scal(in, out, size, elem_size);
return count;
}
// Multiple of power of 2: transpose hierarchically.
{
size_t nchunk_elem;
void* tmp_buf = malloc(size * elem_size);
if (tmp_buf == NULL) return -1;
if ((elem_size % 8) == 0) {
nchunk_elem = elem_size / 8;
TRANS_ELEM_TYPE(in, out, size, nchunk_elem, int64_t);
count = bshuf_trans_byte_elem_NEON_64(out, tmp_buf,
size * nchunk_elem);
bshuf_trans_elem(tmp_buf, out, 8, nchunk_elem, size);
} else if ((elem_size % 4) == 0) {
nchunk_elem = elem_size / 4;
TRANS_ELEM_TYPE(in, out, size, nchunk_elem, int32_t);
count = bshuf_trans_byte_elem_NEON_32(out, tmp_buf,
size * nchunk_elem);
bshuf_trans_elem(tmp_buf, out, 4, nchunk_elem, size);
} else {
// Not used since scalar algorithm is faster.
nchunk_elem = elem_size / 2;
TRANS_ELEM_TYPE(in, out, size, nchunk_elem, int16_t);
count = bshuf_trans_byte_elem_NEON_16(out, tmp_buf,
size * nchunk_elem);
bshuf_trans_elem(tmp_buf, out, 2, nchunk_elem, size);
}
free(tmp_buf);
return count;
}
}
/* Creates a mask made up of the most significant
* bit of each byte of 'input'
*/
int32_t move_byte_mask_neon(uint8x16_t input) {
return ( ((input[0] & 0x80) >> 7) | (((input[1] & 0x80) >> 7) << 1) | (((input[2] & 0x80) >> 7) << 2) | (((input[3] & 0x80) >> 7) << 3)
| (((input[4] & 0x80) >> 7) << 4) | (((input[5] & 0x80) >> 7) << 5) | (((input[6] & 0x80) >> 7) << 6) | (((input[7] & 0x80) >> 7) << 7)
| (((input[8] & 0x80) >> 7) << 8) | (((input[9] & 0x80) >> 7) << 9) | (((input[10] & 0x80) >> 7) << 10) | (((input[11] & 0x80) >> 7) << 11)
| (((input[12] & 0x80) >> 7) << 12) | (((input[13] & 0x80) >> 7) << 13) | (((input[14] & 0x80) >> 7) << 14) | (((input[15] & 0x80) >> 7) << 15)
);
}
/* Transpose bits within bytes. */
int64_t bshuf_trans_bit_byte_NEON(const void* in, void* out, const size_t size,
const size_t elem_size) {
size_t ii, kk;
const char* in_b = (const char*) in;
char* out_b = (char*) out;
uint16_t* out_ui16;
int64_t count;
size_t nbyte = elem_size * size;
CHECK_MULT_EIGHT(nbyte);
int16x8_t xmm;
int32_t bt;
for (ii = 0; ii + 15 < nbyte; ii += 16) {
xmm = vld1q_s16((int16_t *) (in_b + ii));
for (kk = 0; kk < 8; kk++) {
bt = move_byte_mask_neon((uint8x16_t) xmm);
xmm = vshlq_n_s16(xmm, 1);
out_ui16 = (uint16_t*) &out_b[((7 - kk) * nbyte + ii) / 8];
*out_ui16 = bt;
}
}
count = bshuf_trans_bit_byte_remainder(in, out, size, elem_size,
nbyte - nbyte % 16);
return count;
}
/* Transpose bits within elements. */
int64_t bshuf_trans_bit_elem_NEON(const void* in, void* out, const size_t size,
const size_t elem_size) {
int64_t count;
CHECK_MULT_EIGHT(size);
void* tmp_buf = malloc(size * elem_size);
if (tmp_buf == NULL) return -1;
count = bshuf_trans_byte_elem_NEON(in, out, size, elem_size);
CHECK_ERR_FREE(count, tmp_buf);
count = bshuf_trans_bit_byte_NEON(out, tmp_buf, size, elem_size);
CHECK_ERR_FREE(count, tmp_buf);
count = bshuf_trans_bitrow_eight(tmp_buf, out, size, elem_size);
free(tmp_buf);
return count;
}
/* For data organized into a row for each bit (8 * elem_size rows), transpose
* the bytes. */
int64_t bshuf_trans_byte_bitrow_NEON(const void* in, void* out, const size_t size,
const size_t elem_size) {
size_t ii, jj;
const char* in_b = (const char*) in;
char* out_b = (char*) out;
CHECK_MULT_EIGHT(size);
size_t nrows = 8 * elem_size;
size_t nbyte_row = size / 8;
int8x16_t a0, b0, c0, d0, e0, f0, g0, h0;
int8x16_t a1, b1, c1, d1, e1, f1, g1, h1;
int64x1_t *as, *bs, *cs, *ds, *es, *fs, *gs, *hs;
for (ii = 0; ii + 7 < nrows; ii += 8) {
for (jj = 0; jj + 15 < nbyte_row; jj += 16) {
a0 = vld1q_s8(in_b + (ii + 0)*nbyte_row + jj);
b0 = vld1q_s8(in_b + (ii + 1)*nbyte_row + jj);
c0 = vld1q_s8(in_b + (ii + 2)*nbyte_row + jj);
d0 = vld1q_s8(in_b + (ii + 3)*nbyte_row + jj);
e0 = vld1q_s8(in_b + (ii + 4)*nbyte_row + jj);
f0 = vld1q_s8(in_b + (ii + 5)*nbyte_row + jj);
g0 = vld1q_s8(in_b + (ii + 6)*nbyte_row + jj);
h0 = vld1q_s8(in_b + (ii + 7)*nbyte_row + jj);
a1 = vzip1q_s8(a0, b0);
b1 = vzip1q_s8(c0, d0);
c1 = vzip1q_s8(e0, f0);
d1 = vzip1q_s8(g0, h0);
e1 = vzip2q_s8(a0, b0);
f1 = vzip2q_s8(c0, d0);
g1 = vzip2q_s8(e0, f0);
h1 = vzip2q_s8(g0, h0);
a0 = (int8x16_t) vzip1q_s16 (vreinterpretq_s16_s8 (a1), vreinterpretq_s16_s8 (b1));
b0= (int8x16_t) vzip1q_s16 (vreinterpretq_s16_s8 (c1), vreinterpretq_s16_s8 (d1));
c0 = (int8x16_t) vzip2q_s16 (vreinterpretq_s16_s8 (a1), vreinterpretq_s16_s8 (b1));
d0 = (int8x16_t) vzip2q_s16 (vreinterpretq_s16_s8 (c1), vreinterpretq_s16_s8 (d1));
e0 = (int8x16_t) vzip1q_s16 (vreinterpretq_s16_s8 (e1), vreinterpretq_s16_s8 (f1));
f0 = (int8x16_t) vzip1q_s16 (vreinterpretq_s16_s8 (g1), vreinterpretq_s16_s8 (h1));
g0 = (int8x16_t) vzip2q_s16 (vreinterpretq_s16_s8 (e1), vreinterpretq_s16_s8 (f1));
h0 = (int8x16_t) vzip2q_s16 (vreinterpretq_s16_s8 (g1), vreinterpretq_s16_s8 (h1));
a1 = (int8x16_t) vzip1q_s32 (vreinterpretq_s32_s8 (a0), vreinterpretq_s32_s8 (b0));
b1 = (int8x16_t) vzip2q_s32 (vreinterpretq_s32_s8 (a0), vreinterpretq_s32_s8 (b0));
c1 = (int8x16_t) vzip1q_s32 (vreinterpretq_s32_s8 (c0), vreinterpretq_s32_s8 (d0));
d1 = (int8x16_t) vzip2q_s32 (vreinterpretq_s32_s8 (c0), vreinterpretq_s32_s8 (d0));
e1 = (int8x16_t) vzip1q_s32 (vreinterpretq_s32_s8 (e0), vreinterpretq_s32_s8 (f0));
f1 = (int8x16_t) vzip2q_s32 (vreinterpretq_s32_s8 (e0), vreinterpretq_s32_s8 (f0));
g1 = (int8x16_t) vzip1q_s32 (vreinterpretq_s32_s8 (g0), vreinterpretq_s32_s8 (h0));
h1 = (int8x16_t) vzip2q_s32 (vreinterpretq_s32_s8 (g0), vreinterpretq_s32_s8 (h0));
as = (int64x1_t *) &a1;
bs = (int64x1_t *) &b1;
cs = (int64x1_t *) &c1;
ds = (int64x1_t *) &d1;
es = (int64x1_t *) &e1;
fs = (int64x1_t *) &f1;
gs = (int64x1_t *) &g1;
hs = (int64x1_t *) &h1;
vst1_s64((int64_t *)(out_b + (jj + 0) * nrows + ii), *as);
vst1_s64((int64_t *)(out_b + (jj + 1) * nrows + ii), *(as + 1));
vst1_s64((int64_t *)(out_b + (jj + 2) * nrows + ii), *bs);
vst1_s64((int64_t *)(out_b + (jj + 3) * nrows + ii), *(bs + 1));
vst1_s64((int64_t *)(out_b + (jj + 4) * nrows + ii), *cs);
vst1_s64((int64_t *)(out_b + (jj + 5) * nrows + ii), *(cs + 1));
vst1_s64((int64_t *)(out_b + (jj + 6) * nrows + ii), *ds);
vst1_s64((int64_t *)(out_b + (jj + 7) * nrows + ii), *(ds + 1));
vst1_s64((int64_t *)(out_b + (jj + 8) * nrows + ii), *es);
vst1_s64((int64_t *)(out_b + (jj + 9) * nrows + ii), *(es + 1));
vst1_s64((int64_t *)(out_b + (jj + 10) * nrows + ii), *fs);
vst1_s64((int64_t *)(out_b + (jj + 11) * nrows + ii), *(fs + 1));
vst1_s64((int64_t *)(out_b + (jj + 12) * nrows + ii), *gs);
vst1_s64((int64_t *)(out_b + (jj + 13) * nrows + ii), *(gs + 1));
vst1_s64((int64_t *)(out_b + (jj + 14) * nrows + ii), *hs);
vst1_s64((int64_t *)(out_b + (jj + 15) * nrows + ii), *(hs + 1));
}
for (jj = nbyte_row - nbyte_row % 16; jj < nbyte_row; jj ++) {
out_b[jj * nrows + ii + 0] = in_b[(ii + 0)*nbyte_row + jj];
out_b[jj * nrows + ii + 1] = in_b[(ii + 1)*nbyte_row + jj];
out_b[jj * nrows + ii + 2] = in_b[(ii + 2)*nbyte_row + jj];
out_b[jj * nrows + ii + 3] = in_b[(ii + 3)*nbyte_row + jj];
out_b[jj * nrows + ii + 4] = in_b[(ii + 4)*nbyte_row + jj];
out_b[jj * nrows + ii + 5] = in_b[(ii + 5)*nbyte_row + jj];
out_b[jj * nrows + ii + 6] = in_b[(ii + 6)*nbyte_row + jj];
out_b[jj * nrows + ii + 7] = in_b[(ii + 7)*nbyte_row + jj];
}
}
return size * elem_size;
}
/* Shuffle bits within the bytes of eight element blocks. */
int64_t bshuf_shuffle_bit_eightelem_NEON(const void* in, void* out, const size_t size,
const size_t elem_size) {
CHECK_MULT_EIGHT(size);
// With a bit of care, this could be written such that such that it is
// in_buf = out_buf safe.
const char* in_b = (const char*) in;
uint16_t* out_ui16 = (uint16_t*) out;
size_t ii, jj, kk;
size_t nbyte = elem_size * size;
int16x8_t xmm;
int32_t bt;
if (elem_size % 2) {
bshuf_shuffle_bit_eightelem_scal(in, out, size, elem_size);
} else {
for (ii = 0; ii + 8 * elem_size - 1 < nbyte;
ii += 8 * elem_size) {
for (jj = 0; jj + 15 < 8 * elem_size; jj += 16) {
xmm = vld1q_s16((int16_t *) &in_b[ii + jj]);
for (kk = 0; kk < 8; kk++) {
bt = move_byte_mask_neon((uint8x16_t) xmm);
xmm = vshlq_n_s16(xmm, 1);
size_t ind = (ii + jj / 8 + (7 - kk) * elem_size);
out_ui16[ind / 2] = bt;
}
}
}
}
return size * elem_size;
}
/* Untranspose bits within elements. */
int64_t bshuf_untrans_bit_elem_NEON(const void* in, void* out, const size_t size,
const size_t elem_size) {
int64_t count;
CHECK_MULT_EIGHT(size);
void* tmp_buf = malloc(size * elem_size);
if (tmp_buf == NULL) return -1;
count = bshuf_trans_byte_bitrow_NEON(in, tmp_buf, size, elem_size);
CHECK_ERR_FREE(count, tmp_buf);
count = bshuf_shuffle_bit_eightelem_NEON(tmp_buf, out, size, elem_size);
free(tmp_buf);
return count;
}
#else // #ifdef USEARMNEON
int64_t bshuf_untrans_bit_elem_NEON(const void* in, void* out, const size_t size,
const size_t elem_size) {
return -13;
}
int64_t bshuf_trans_bit_elem_NEON(const void* in, void* out, const size_t size,
const size_t elem_size) {
return -13;
}
int64_t bshuf_trans_byte_bitrow_NEON(const void* in, void* out, const size_t size,
const size_t elem_size) {
return -13;
}
int64_t bshuf_trans_bit_byte_NEON(const void* in, void* out, const size_t size,
const size_t elem_size) {
return -13;
}
int64_t bshuf_trans_byte_elem_NEON(const void* in, void* out, const size_t size,
const size_t elem_size) {
return -13;
}
int64_t bshuf_trans_byte_elem_NEON_64(const void* in, void* out, const size_t size) {
return -13;
}
int64_t bshuf_trans_byte_elem_NEON_32(const void* in, void* out, const size_t size) {
return -13;
}
int64_t bshuf_trans_byte_elem_NEON_16(const void* in, void* out, const size_t size) {
return -13;
}
int64_t bshuf_shuffle_bit_eightelem_NEON(const void* in, void* out, const size_t size,
const size_t elem_size) {
return -13;
}
#endif
/* ---- Worker code that uses SSE2 ----
*
* The following code makes use of the SSE2 instruction set and specialized
* 16 byte registers. The SSE2 instructions are present on modern x86
* processors. The first Intel processor microarchitecture supporting SSE2 was
* Pentium 4 (2000).
*
*/
#ifdef USESSE2
/* Transpose bytes within elements for 16 bit elements. */
int64_t bshuf_trans_byte_elem_SSE_16(const void* in, void* out, const size_t size) {
size_t ii;
const char *in_b = (const char*) in;
char *out_b = (char*) out;
__m128i a0, b0, a1, b1;
for (ii=0; ii + 15 < size; ii += 16) {
a0 = _mm_loadu_si128((__m128i *) &in_b[2*ii + 0*16]);
b0 = _mm_loadu_si128((__m128i *) &in_b[2*ii + 1*16]);
a1 = _mm_unpacklo_epi8(a0, b0);
b1 = _mm_unpackhi_epi8(a0, b0);
a0 = _mm_unpacklo_epi8(a1, b1);
b0 = _mm_unpackhi_epi8(a1, b1);
a1 = _mm_unpacklo_epi8(a0, b0);
b1 = _mm_unpackhi_epi8(a0, b0);
a0 = _mm_unpacklo_epi8(a1, b1);
b0 = _mm_unpackhi_epi8(a1, b1);
_mm_storeu_si128((__m128i *) &out_b[0*size + ii], a0);
_mm_storeu_si128((__m128i *) &out_b[1*size + ii], b0);
}
return bshuf_trans_byte_elem_remainder(in, out, size, 2,
size - size % 16);
}
/* Transpose bytes within elements for 32 bit elements. */
int64_t bshuf_trans_byte_elem_SSE_32(const void* in, void* out, const size_t size) {
size_t ii;
const char *in_b;
char *out_b;
in_b = (const char*) in;
out_b = (char*) out;
__m128i a0, b0, c0, d0, a1, b1, c1, d1;
for (ii=0; ii + 15 < size; ii += 16) {
a0 = _mm_loadu_si128((__m128i *) &in_b[4*ii + 0*16]);
b0 = _mm_loadu_si128((__m128i *) &in_b[4*ii + 1*16]);
c0 = _mm_loadu_si128((__m128i *) &in_b[4*ii + 2*16]);
d0 = _mm_loadu_si128((__m128i *) &in_b[4*ii + 3*16]);
a1 = _mm_unpacklo_epi8(a0, b0);
b1 = _mm_unpackhi_epi8(a0, b0);
c1 = _mm_unpacklo_epi8(c0, d0);
d1 = _mm_unpackhi_epi8(c0, d0);
a0 = _mm_unpacklo_epi8(a1, b1);
b0 = _mm_unpackhi_epi8(a1, b1);
c0 = _mm_unpacklo_epi8(c1, d1);
d0 = _mm_unpackhi_epi8(c1, d1);
a1 = _mm_unpacklo_epi8(a0, b0);
b1 = _mm_unpackhi_epi8(a0, b0);
c1 = _mm_unpacklo_epi8(c0, d0);
d1 = _mm_unpackhi_epi8(c0, d0);
a0 = _mm_unpacklo_epi64(a1, c1);
b0 = _mm_unpackhi_epi64(a1, c1);
c0 = _mm_unpacklo_epi64(b1, d1);
d0 = _mm_unpackhi_epi64(b1, d1);
_mm_storeu_si128((__m128i *) &out_b[0*size + ii], a0);
_mm_storeu_si128((__m128i *) &out_b[1*size + ii], b0);
_mm_storeu_si128((__m128i *) &out_b[2*size + ii], c0);
_mm_storeu_si128((__m128i *) &out_b[3*size + ii], d0);
}
return bshuf_trans_byte_elem_remainder(in, out, size, 4,
size - size % 16);
}
/* Transpose bytes within elements for 64 bit elements. */
int64_t bshuf_trans_byte_elem_SSE_64(const void* in, void* out, const size_t size) {
size_t ii;
const char* in_b = (const char*) in;
char* out_b = (char*) out;
__m128i a0, b0, c0, d0, e0, f0, g0, h0;
__m128i a1, b1, c1, d1, e1, f1, g1, h1;
for (ii=0; ii + 15 < size; ii += 16) {
a0 = _mm_loadu_si128((__m128i *) &in_b[8*ii + 0*16]);
b0 = _mm_loadu_si128((__m128i *) &in_b[8*ii + 1*16]);
c0 = _mm_loadu_si128((__m128i *) &in_b[8*ii + 2*16]);
d0 = _mm_loadu_si128((__m128i *) &in_b[8*ii + 3*16]);
e0 = _mm_loadu_si128((__m128i *) &in_b[8*ii + 4*16]);
f0 = _mm_loadu_si128((__m128i *) &in_b[8*ii + 5*16]);
g0 = _mm_loadu_si128((__m128i *) &in_b[8*ii + 6*16]);
h0 = _mm_loadu_si128((__m128i *) &in_b[8*ii + 7*16]);
a1 = _mm_unpacklo_epi8(a0, b0);
b1 = _mm_unpackhi_epi8(a0, b0);
c1 = _mm_unpacklo_epi8(c0, d0);
d1 = _mm_unpackhi_epi8(c0, d0);
e1 = _mm_unpacklo_epi8(e0, f0);
f1 = _mm_unpackhi_epi8(e0, f0);
g1 = _mm_unpacklo_epi8(g0, h0);
h1 = _mm_unpackhi_epi8(g0, h0);
a0 = _mm_unpacklo_epi8(a1, b1);
b0 = _mm_unpackhi_epi8(a1, b1);
c0 = _mm_unpacklo_epi8(c1, d1);
d0 = _mm_unpackhi_epi8(c1, d1);
e0 = _mm_unpacklo_epi8(e1, f1);
f0 = _mm_unpackhi_epi8(e1, f1);
g0 = _mm_unpacklo_epi8(g1, h1);
h0 = _mm_unpackhi_epi8(g1, h1);
a1 = _mm_unpacklo_epi32(a0, c0);
b1 = _mm_unpackhi_epi32(a0, c0);
c1 = _mm_unpacklo_epi32(b0, d0);
d1 = _mm_unpackhi_epi32(b0, d0);
e1 = _mm_unpacklo_epi32(e0, g0);
f1 = _mm_unpackhi_epi32(e0, g0);
g1 = _mm_unpacklo_epi32(f0, h0);
h1 = _mm_unpackhi_epi32(f0, h0);
a0 = _mm_unpacklo_epi64(a1, e1);
b0 = _mm_unpackhi_epi64(a1, e1);
c0 = _mm_unpacklo_epi64(b1, f1);
d0 = _mm_unpackhi_epi64(b1, f1);
e0 = _mm_unpacklo_epi64(c1, g1);
f0 = _mm_unpackhi_epi64(c1, g1);
g0 = _mm_unpacklo_epi64(d1, h1);
h0 = _mm_unpackhi_epi64(d1, h1);
_mm_storeu_si128((__m128i *) &out_b[0*size + ii], a0);
_mm_storeu_si128((__m128i *) &out_b[1*size + ii], b0);
_mm_storeu_si128((__m128i *) &out_b[2*size + ii], c0);
_mm_storeu_si128((__m128i *) &out_b[3*size + ii], d0);
_mm_storeu_si128((__m128i *) &out_b[4*size + ii], e0);
_mm_storeu_si128((__m128i *) &out_b[5*size + ii], f0);
_mm_storeu_si128((__m128i *) &out_b[6*size + ii], g0);
_mm_storeu_si128((__m128i *) &out_b[7*size + ii], h0);
}
return bshuf_trans_byte_elem_remainder(in, out, size, 8,
size - size % 16);
}
/* Transpose bytes within elements using best SSE algorithm available. */
int64_t bshuf_trans_byte_elem_SSE(const void* in, void* out, const size_t size,
const size_t elem_size) {
int64_t count;
// Trivial cases: power of 2 bytes.
switch (elem_size) {
case 1:
count = bshuf_copy(in, out, size, elem_size);
return count;
case 2:
count = bshuf_trans_byte_elem_SSE_16(in, out, size);
return count;
case 4:
count = bshuf_trans_byte_elem_SSE_32(in, out, size);
return count;
case 8:
count = bshuf_trans_byte_elem_SSE_64(in, out, size);
return count;
}
// Worst case: odd number of bytes. Turns out that this is faster for
// (odd * 2) byte elements as well (hence % 4).
if (elem_size % 4) {
count = bshuf_trans_byte_elem_scal(in, out, size, elem_size);
return count;
}
// Multiple of power of 2: transpose hierarchically.
{
size_t nchunk_elem;
void* tmp_buf = malloc(size * elem_size);
if (tmp_buf == NULL) return -1;
if ((elem_size % 8) == 0) {
nchunk_elem = elem_size / 8;
TRANS_ELEM_TYPE(in, out, size, nchunk_elem, int64_t);
count = bshuf_trans_byte_elem_SSE_64(out, tmp_buf,
size * nchunk_elem);
bshuf_trans_elem(tmp_buf, out, 8, nchunk_elem, size);
} else if ((elem_size % 4) == 0) {
nchunk_elem = elem_size / 4;
TRANS_ELEM_TYPE(in, out, size, nchunk_elem, int32_t);
count = bshuf_trans_byte_elem_SSE_32(out, tmp_buf,
size * nchunk_elem);
bshuf_trans_elem(tmp_buf, out, 4, nchunk_elem, size);
} else {
// Not used since scalar algorithm is faster.
nchunk_elem = elem_size / 2;
TRANS_ELEM_TYPE(in, out, size, nchunk_elem, int16_t);
count = bshuf_trans_byte_elem_SSE_16(out, tmp_buf,
size * nchunk_elem);
bshuf_trans_elem(tmp_buf, out, 2, nchunk_elem, size);
}
free(tmp_buf);
return count;
}
}
/* Transpose bits within bytes. */
int64_t bshuf_trans_bit_byte_SSE(const void* in, void* out, const size_t size,
const size_t elem_size) {
size_t ii, kk;
const char* in_b = (const char*) in;
char* out_b = (char*) out;
uint16_t* out_ui16;
int64_t count;
size_t nbyte = elem_size * size;
CHECK_MULT_EIGHT(nbyte);
__m128i xmm;
int32_t bt;
for (ii = 0; ii + 15 < nbyte; ii += 16) {
xmm = _mm_loadu_si128((__m128i *) &in_b[ii]);
for (kk = 0; kk < 8; kk++) {
bt = _mm_movemask_epi8(xmm);
xmm = _mm_slli_epi16(xmm, 1);
out_ui16 = (uint16_t*) &out_b[((7 - kk) * nbyte + ii) / 8];
*out_ui16 = bt;
}
}
count = bshuf_trans_bit_byte_remainder(in, out, size, elem_size,
nbyte - nbyte % 16);
return count;
}
/* Transpose bits within elements. */
int64_t bshuf_trans_bit_elem_SSE(const void* in, void* out, const size_t size,
const size_t elem_size) {
int64_t count;
CHECK_MULT_EIGHT(size);
void* tmp_buf = malloc(size * elem_size);
if (tmp_buf == NULL) return -1;
count = bshuf_trans_byte_elem_SSE(in, out, size, elem_size);
CHECK_ERR_FREE(count, tmp_buf);
count = bshuf_trans_bit_byte_SSE(out, tmp_buf, size, elem_size);
CHECK_ERR_FREE(count, tmp_buf);
count = bshuf_trans_bitrow_eight(tmp_buf, out, size, elem_size);
free(tmp_buf);
return count;
}
/* For data organized into a row for each bit (8 * elem_size rows), transpose
* the bytes. */
int64_t bshuf_trans_byte_bitrow_SSE(const void* in, void* out, const size_t size,
const size_t elem_size) {
size_t ii, jj;
const char* in_b = (const char*) in;
char* out_b = (char*) out;
CHECK_MULT_EIGHT(size);
size_t nrows = 8 * elem_size;
size_t nbyte_row = size / 8;
__m128i a0, b0, c0, d0, e0, f0, g0, h0;
__m128i a1, b1, c1, d1, e1, f1, g1, h1;
__m128 *as, *bs, *cs, *ds, *es, *fs, *gs, *hs;
for (ii = 0; ii + 7 < nrows; ii += 8) {
for (jj = 0; jj + 15 < nbyte_row; jj += 16) {
a0 = _mm_loadu_si128((__m128i *) &in_b[(ii + 0)*nbyte_row + jj]);
b0 = _mm_loadu_si128((__m128i *) &in_b[(ii + 1)*nbyte_row + jj]);
c0 = _mm_loadu_si128((__m128i *) &in_b[(ii + 2)*nbyte_row + jj]);
d0 = _mm_loadu_si128((__m128i *) &in_b[(ii + 3)*nbyte_row + jj]);
e0 = _mm_loadu_si128((__m128i *) &in_b[(ii + 4)*nbyte_row + jj]);
f0 = _mm_loadu_si128((__m128i *) &in_b[(ii + 5)*nbyte_row + jj]);
g0 = _mm_loadu_si128((__m128i *) &in_b[(ii + 6)*nbyte_row + jj]);
h0 = _mm_loadu_si128((__m128i *) &in_b[(ii + 7)*nbyte_row + jj]);
a1 = _mm_unpacklo_epi8(a0, b0);
b1 = _mm_unpacklo_epi8(c0, d0);
c1 = _mm_unpacklo_epi8(e0, f0);
d1 = _mm_unpacklo_epi8(g0, h0);
e1 = _mm_unpackhi_epi8(a0, b0);
f1 = _mm_unpackhi_epi8(c0, d0);
g1 = _mm_unpackhi_epi8(e0, f0);
h1 = _mm_unpackhi_epi8(g0, h0);
a0 = _mm_unpacklo_epi16(a1, b1);
b0 = _mm_unpacklo_epi16(c1, d1);
c0 = _mm_unpackhi_epi16(a1, b1);
d0 = _mm_unpackhi_epi16(c1, d1);
e0 = _mm_unpacklo_epi16(e1, f1);
f0 = _mm_unpacklo_epi16(g1, h1);
g0 = _mm_unpackhi_epi16(e1, f1);
h0 = _mm_unpackhi_epi16(g1, h1);
a1 = _mm_unpacklo_epi32(a0, b0);
b1 = _mm_unpackhi_epi32(a0, b0);
c1 = _mm_unpacklo_epi32(c0, d0);
d1 = _mm_unpackhi_epi32(c0, d0);
e1 = _mm_unpacklo_epi32(e0, f0);
f1 = _mm_unpackhi_epi32(e0, f0);
g1 = _mm_unpacklo_epi32(g0, h0);
h1 = _mm_unpackhi_epi32(g0, h0);
// We don't have a storeh instruction for integers, so interpret
// as a float. Have a storel (_mm_storel_epi64).
as = (__m128 *) &a1;
bs = (__m128 *) &b1;
cs = (__m128 *) &c1;
ds = (__m128 *) &d1;
es = (__m128 *) &e1;
fs = (__m128 *) &f1;
gs = (__m128 *) &g1;
hs = (__m128 *) &h1;
_mm_storel_pi((__m64 *) &out_b[(jj + 0) * nrows + ii], *as);
_mm_storel_pi((__m64 *) &out_b[(jj + 2) * nrows + ii], *bs);
_mm_storel_pi((__m64 *) &out_b[(jj + 4) * nrows + ii], *cs);
_mm_storel_pi((__m64 *) &out_b[(jj + 6) * nrows + ii], *ds);
_mm_storel_pi((__m64 *) &out_b[(jj + 8) * nrows + ii], *es);
_mm_storel_pi((__m64 *) &out_b[(jj + 10) * nrows + ii], *fs);
_mm_storel_pi((__m64 *) &out_b[(jj + 12) * nrows + ii], *gs);
_mm_storel_pi((__m64 *) &out_b[(jj + 14) * nrows + ii], *hs);
_mm_storeh_pi((__m64 *) &out_b[(jj + 1) * nrows + ii], *as);
_mm_storeh_pi((__m64 *) &out_b[(jj + 3) * nrows + ii], *bs);
_mm_storeh_pi((__m64 *) &out_b[(jj + 5) * nrows + ii], *cs);
_mm_storeh_pi((__m64 *) &out_b[(jj + 7) * nrows + ii], *ds);
_mm_storeh_pi((__m64 *) &out_b[(jj + 9) * nrows + ii], *es);
_mm_storeh_pi((__m64 *) &out_b[(jj + 11) * nrows + ii], *fs);
_mm_storeh_pi((__m64 *) &out_b[(jj + 13) * nrows + ii], *gs);
_mm_storeh_pi((__m64 *) &out_b[(jj + 15) * nrows + ii], *hs);
}
for (jj = nbyte_row - nbyte_row % 16; jj < nbyte_row; jj ++) {
out_b[jj * nrows + ii + 0] = in_b[(ii + 0)*nbyte_row + jj];
out_b[jj * nrows + ii + 1] = in_b[(ii + 1)*nbyte_row + jj];
out_b[jj * nrows + ii + 2] = in_b[(ii + 2)*nbyte_row + jj];
out_b[jj * nrows + ii + 3] = in_b[(ii + 3)*nbyte_row + jj];
out_b[jj * nrows + ii + 4] = in_b[(ii + 4)*nbyte_row + jj];
out_b[jj * nrows + ii + 5] = in_b[(ii + 5)*nbyte_row + jj];
out_b[jj * nrows + ii + 6] = in_b[(ii + 6)*nbyte_row + jj];
out_b[jj * nrows + ii + 7] = in_b[(ii + 7)*nbyte_row + jj];
}
}
return size * elem_size;
}
/* Shuffle bits within the bytes of eight element blocks. */
int64_t bshuf_shuffle_bit_eightelem_SSE(const void* in, void* out, const size_t size,
const size_t elem_size) {
CHECK_MULT_EIGHT(size);
// With a bit of care, this could be written such that such that it is
// in_buf = out_buf safe.
const char* in_b = (const char*) in;
uint16_t* out_ui16 = (uint16_t*) out;
size_t ii, jj, kk;
size_t nbyte = elem_size * size;
__m128i xmm;
int32_t bt;
if (elem_size % 2) {
bshuf_shuffle_bit_eightelem_scal(in, out, size, elem_size);
} else {
for (ii = 0; ii + 8 * elem_size - 1 < nbyte;
ii += 8 * elem_size) {
for (jj = 0; jj + 15 < 8 * elem_size; jj += 16) {
xmm = _mm_loadu_si128((__m128i *) &in_b[ii + jj]);
for (kk = 0; kk < 8; kk++) {
bt = _mm_movemask_epi8(xmm);
xmm = _mm_slli_epi16(xmm, 1);
size_t ind = (ii + jj / 8 + (7 - kk) * elem_size);
out_ui16[ind / 2] = bt;
}
}
}
}
return size * elem_size;
}
/* Untranspose bits within elements. */
int64_t bshuf_untrans_bit_elem_SSE(const void* in, void* out, const size_t size,
const size_t elem_size) {
int64_t count;
CHECK_MULT_EIGHT(size);
void* tmp_buf = malloc(size * elem_size);
if (tmp_buf == NULL) return -1;
count = bshuf_trans_byte_bitrow_SSE(in, tmp_buf, size, elem_size);
CHECK_ERR_FREE(count, tmp_buf);
count = bshuf_shuffle_bit_eightelem_SSE(tmp_buf, out, size, elem_size);
free(tmp_buf);
return count;
}
#else // #ifdef USESSE2
int64_t bshuf_untrans_bit_elem_SSE(const void* in, void* out, const size_t size,
const size_t elem_size) {
return -11;
}
int64_t bshuf_trans_bit_elem_SSE(const void* in, void* out, const size_t size,
const size_t elem_size) {
return -11;
}
int64_t bshuf_trans_byte_bitrow_SSE(const void* in, void* out, const size_t size,
const size_t elem_size) {
return -11;
}
int64_t bshuf_trans_bit_byte_SSE(const void* in, void* out, const size_t size,
const size_t elem_size) {
return -11;
}
int64_t bshuf_trans_byte_elem_SSE(const void* in, void* out, const size_t size,
const size_t elem_size) {
return -11;
}
int64_t bshuf_trans_byte_elem_SSE_64(const void* in, void* out, const size_t size) {
return -11;
}
int64_t bshuf_trans_byte_elem_SSE_32(const void* in, void* out, const size_t size) {
return -11;
}
int64_t bshuf_trans_byte_elem_SSE_16(const void* in, void* out, const size_t size) {
return -11;
}
int64_t bshuf_shuffle_bit_eightelem_SSE(const void* in, void* out, const size_t size,
const size_t elem_size) {
return -11;
}
#endif // #ifdef USESSE2
/* ---- Code that requires AVX2. Intel Haswell (2013) and later. ---- */
/* ---- Worker code that uses AVX2 ----
*
* The following code makes use of the AVX2 instruction set and specialized
* 32 byte registers. The AVX2 instructions are present on newer x86
* processors. The first Intel processor microarchitecture supporting AVX2 was
* Haswell (2013).
*
*/
#ifdef USEAVX2
/* Transpose bits within bytes. */
int64_t bshuf_trans_bit_byte_AVX(const void* in, void* out, const size_t size,
const size_t elem_size) {
size_t ii, kk;
const char* in_b = (const char*) in;
char* out_b = (char*) out;
int32_t* out_i32;
size_t nbyte = elem_size * size;
int64_t count;
__m256i ymm;
int32_t bt;
for (ii = 0; ii + 31 < nbyte; ii += 32) {
ymm = _mm256_loadu_si256((__m256i *) &in_b[ii]);
for (kk = 0; kk < 8; kk++) {
bt = _mm256_movemask_epi8(ymm);
ymm = _mm256_slli_epi16(ymm, 1);
out_i32 = (int32_t*) &out_b[((7 - kk) * nbyte + ii) / 8];
*out_i32 = bt;
}
}
count = bshuf_trans_bit_byte_remainder(in, out, size, elem_size,
nbyte - nbyte % 32);
return count;
}
/* Transpose bits within elements. */
int64_t bshuf_trans_bit_elem_AVX(const void* in, void* out, const size_t size,
const size_t elem_size) {
int64_t count;
CHECK_MULT_EIGHT(size);
void* tmp_buf = malloc(size * elem_size);
if (tmp_buf == NULL) return -1;
count = bshuf_trans_byte_elem_SSE(in, out, size, elem_size);
CHECK_ERR_FREE(count, tmp_buf);
count = bshuf_trans_bit_byte_AVX(out, tmp_buf, size, elem_size);
CHECK_ERR_FREE(count, tmp_buf);
count = bshuf_trans_bitrow_eight(tmp_buf, out, size, elem_size);
free(tmp_buf);
return count;
}
/* For data organized into a row for each bit (8 * elem_size rows), transpose
* the bytes. */
int64_t bshuf_trans_byte_bitrow_AVX(const void* in, void* out, const size_t size,
const size_t elem_size) {
size_t hh, ii, jj, kk, mm;
const char* in_b = (const char*) in;
char* out_b = (char*) out;
CHECK_MULT_EIGHT(size);
size_t nrows = 8 * elem_size;
size_t nbyte_row = size / 8;
if (elem_size % 4) return bshuf_trans_byte_bitrow_SSE(in, out, size,
elem_size);
__m256i ymm_0[8];
__m256i ymm_1[8];
__m256i ymm_storeage[8][4];
for (jj = 0; jj + 31 < nbyte_row; jj += 32) {
for (ii = 0; ii + 3 < elem_size; ii += 4) {
for (hh = 0; hh < 4; hh ++) {
for (kk = 0; kk < 8; kk ++){
ymm_0[kk] = _mm256_loadu_si256((__m256i *) &in_b[
(ii * 8 + hh * 8 + kk) * nbyte_row + jj]);
}
for (kk = 0; kk < 4; kk ++){
ymm_1[kk] = _mm256_unpacklo_epi8(ymm_0[kk * 2],
ymm_0[kk * 2 + 1]);
ymm_1[kk + 4] = _mm256_unpackhi_epi8(ymm_0[kk * 2],
ymm_0[kk * 2 + 1]);
}
for (kk = 0; kk < 2; kk ++){
for (mm = 0; mm < 2; mm ++){
ymm_0[kk * 4 + mm] = _mm256_unpacklo_epi16(
ymm_1[kk * 4 + mm * 2],
ymm_1[kk * 4 + mm * 2 + 1]);
ymm_0[kk * 4 + mm + 2] = _mm256_unpackhi_epi16(
ymm_1[kk * 4 + mm * 2],
ymm_1[kk * 4 + mm * 2 + 1]);
}
}
for (kk = 0; kk < 4; kk ++){
ymm_1[kk * 2] = _mm256_unpacklo_epi32(ymm_0[kk * 2],
ymm_0[kk * 2 + 1]);
ymm_1[kk * 2 + 1] = _mm256_unpackhi_epi32(ymm_0[kk * 2],
ymm_0[kk * 2 + 1]);
}
for (kk = 0; kk < 8; kk ++){
ymm_storeage[kk][hh] = ymm_1[kk];
}
}
for (mm = 0; mm < 8; mm ++) {
for (kk = 0; kk < 4; kk ++){
ymm_0[kk] = ymm_storeage[mm][kk];
}
ymm_1[0] = _mm256_unpacklo_epi64(ymm_0[0], ymm_0[1]);
ymm_1[1] = _mm256_unpacklo_epi64(ymm_0[2], ymm_0[3]);
ymm_1[2] = _mm256_unpackhi_epi64(ymm_0[0], ymm_0[1]);
ymm_1[3] = _mm256_unpackhi_epi64(ymm_0[2], ymm_0[3]);
ymm_0[0] = _mm256_permute2x128_si256(ymm_1[0], ymm_1[1], 32);
ymm_0[1] = _mm256_permute2x128_si256(ymm_1[2], ymm_1[3], 32);
ymm_0[2] = _mm256_permute2x128_si256(ymm_1[0], ymm_1[1], 49);
ymm_0[3] = _mm256_permute2x128_si256(ymm_1[2], ymm_1[3], 49);
_mm256_storeu_si256((__m256i *) &out_b[
(jj + mm * 2 + 0 * 16) * nrows + ii * 8], ymm_0[0]);
_mm256_storeu_si256((__m256i *) &out_b[
(jj + mm * 2 + 0 * 16 + 1) * nrows + ii * 8], ymm_0[1]);
_mm256_storeu_si256((__m256i *) &out_b[
(jj + mm * 2 + 1 * 16) * nrows + ii * 8], ymm_0[2]);
_mm256_storeu_si256((__m256i *) &out_b[
(jj + mm * 2 + 1 * 16 + 1) * nrows + ii * 8], ymm_0[3]);
}
}
}
for (ii = 0; ii < nrows; ii ++ ) {
for (jj = nbyte_row - nbyte_row % 32; jj < nbyte_row; jj ++) {
out_b[jj * nrows + ii] = in_b[ii * nbyte_row + jj];
}
}
return size * elem_size;
}
/* Shuffle bits within the bytes of eight element blocks. */
int64_t bshuf_shuffle_bit_eightelem_AVX(const void* in, void* out, const size_t size,
const size_t elem_size) {
CHECK_MULT_EIGHT(size);
// With a bit of care, this could be written such that such that it is
// in_buf = out_buf safe.
const char* in_b = (const char*) in;
char* out_b = (char*) out;
size_t ii, jj, kk;
size_t nbyte = elem_size * size;
__m256i ymm;
int32_t bt;
if (elem_size % 4) {
return bshuf_shuffle_bit_eightelem_SSE(in, out, size, elem_size);
} else {
for (jj = 0; jj + 31 < 8 * elem_size; jj += 32) {
for (ii = 0; ii + 8 * elem_size - 1 < nbyte;
ii += 8 * elem_size) {
ymm = _mm256_loadu_si256((__m256i *) &in_b[ii + jj]);
for (kk = 0; kk < 8; kk++) {
bt = _mm256_movemask_epi8(ymm);
ymm = _mm256_slli_epi16(ymm, 1);
size_t ind = (ii + jj / 8 + (7 - kk) * elem_size);
* (int32_t *) &out_b[ind] = bt;
}
}
}
}
return size * elem_size;
}
/* Untranspose bits within elements. */
int64_t bshuf_untrans_bit_elem_AVX(const void* in, void* out, const size_t size,
const size_t elem_size) {
int64_t count;
CHECK_MULT_EIGHT(size);
void* tmp_buf = malloc(size * elem_size);
if (tmp_buf == NULL) return -1;
count = bshuf_trans_byte_bitrow_AVX(in, tmp_buf, size, elem_size);
CHECK_ERR_FREE(count, tmp_buf);
count = bshuf_shuffle_bit_eightelem_AVX(tmp_buf, out, size, elem_size);
free(tmp_buf);
return count;
}
#else // #ifdef USEAVX2
int64_t bshuf_trans_bit_byte_AVX(const void* in, void* out, const size_t size,
const size_t elem_size) {
return -12;
}
int64_t bshuf_trans_bit_elem_AVX(const void* in, void* out, const size_t size,
const size_t elem_size) {
return -12;
}
int64_t bshuf_trans_byte_bitrow_AVX(const void* in, void* out, const size_t size,
const size_t elem_size) {
return -12;
}
int64_t bshuf_shuffle_bit_eightelem_AVX(const void* in, void* out, const size_t size,
const size_t elem_size) {
return -12;
}
int64_t bshuf_untrans_bit_elem_AVX(const void* in, void* out, const size_t size,
const size_t elem_size) {
return -12;
}
#endif // #ifdef USEAVX2
/* ---- Drivers selecting best instruction set at compile time. ---- */
int64_t bshuf_trans_bit_elem(const void* in, void* out, const size_t size,
const size_t elem_size) {
int64_t count;
#ifdef USEAVX2
count = bshuf_trans_bit_elem_AVX(in, out, size, elem_size);
#elif defined(USESSE2)
count = bshuf_trans_bit_elem_SSE(in, out, size, elem_size);
#elif defined(USEARMNEON)
count = bshuf_trans_bit_elem_NEON(in, out, size, elem_size);
#else
count = bshuf_trans_bit_elem_scal(in, out, size, elem_size);
#endif
return count;
}
int64_t bshuf_untrans_bit_elem(const void* in, void* out, const size_t size,
const size_t elem_size) {
int64_t count;
#ifdef USEAVX2
count = bshuf_untrans_bit_elem_AVX(in, out, size, elem_size);
#elif defined(USESSE2)
count = bshuf_untrans_bit_elem_SSE(in, out, size, elem_size);
#elif defined(USEARMNEON)
count = bshuf_untrans_bit_elem_NEON(in, out, size, elem_size);
#else
count = bshuf_untrans_bit_elem_scal(in, out, size, elem_size);
#endif
return count;
}
/* ---- Wrappers for implementing blocking ---- */
/* Wrap a function for processing a single block to process an entire buffer in
* parallel. */
int64_t bshuf_blocked_wrap_fun(bshufBlockFunDef fun, const void* in, void* out, \
const size_t size, const size_t elem_size, size_t block_size) {
omp_size_t ii = 0;
int64_t err = 0;
int64_t count, cum_count=0;
size_t last_block_size;
size_t leftover_bytes;
size_t this_iter;
char *last_in;
char *last_out;
ioc_chain C;
ioc_init(&C, in, out);
if (block_size == 0) {
block_size = bshuf_default_block_size(elem_size);
}
if (block_size % BSHUF_BLOCKED_MULT) return -81;
#if defined(_OPENMP)
#pragma omp parallel for schedule(dynamic, 1) \
private(count) reduction(+ : cum_count)
#endif
for (ii = 0; ii < (omp_size_t)( size / block_size ); ii ++) {
count = fun(&C, block_size, elem_size);
if (count < 0) err = count;
cum_count += count;
}
last_block_size = size % block_size;
last_block_size = last_block_size - last_block_size % BSHUF_BLOCKED_MULT;
if (last_block_size) {
count = fun(&C, last_block_size, elem_size);
if (count < 0) err = count;
cum_count += count;
}
if (err < 0) return err;
leftover_bytes = size % BSHUF_BLOCKED_MULT * elem_size;
//this_iter;
last_in = (char *) ioc_get_in(&C, &this_iter);
ioc_set_next_in(&C, &this_iter, (void *) (last_in + leftover_bytes));
last_out = (char *) ioc_get_out(&C, &this_iter);
ioc_set_next_out(&C, &this_iter, (void *) (last_out + leftover_bytes));
memcpy(last_out, last_in, leftover_bytes);
ioc_destroy(&C);
return cum_count + leftover_bytes;
}
/* Bitshuffle a single block. */
int64_t bshuf_bitshuffle_block(ioc_chain *C_ptr, \
const size_t size, const size_t elem_size) {
size_t this_iter;
const void *in;
void *out;
int64_t count;
in = ioc_get_in(C_ptr, &this_iter);
ioc_set_next_in(C_ptr, &this_iter,
(void*) ((char*) in + size * elem_size));
out = ioc_get_out(C_ptr, &this_iter);
ioc_set_next_out(C_ptr, &this_iter,
(void *) ((char *) out + size * elem_size));
count = bshuf_trans_bit_elem(in, out, size, elem_size);
return count;
}
/* Bitunshuffle a single block. */
int64_t bshuf_bitunshuffle_block(ioc_chain* C_ptr, \
const size_t size, const size_t elem_size) {
size_t this_iter;
const void *in;
void *out;
int64_t count;
in = ioc_get_in(C_ptr, &this_iter);
ioc_set_next_in(C_ptr, &this_iter,
(void*) ((char*) in + size * elem_size));
out = ioc_get_out(C_ptr, &this_iter);
ioc_set_next_out(C_ptr, &this_iter,
(void *) ((char *) out + size * elem_size));
count = bshuf_untrans_bit_elem(in, out, size, elem_size);
return count;
}
/* Write a 64 bit unsigned integer to a buffer in big endian order. */
void bshuf_write_uint64_BE(void* buf, uint64_t num) {
int ii;
uint8_t* b = (uint8_t*) buf;
uint64_t pow28 = 1 << 8;
for (ii = 7; ii >= 0; ii--) {
b[ii] = num % pow28;
num = num / pow28;
}
}
/* Read a 64 bit unsigned integer from a buffer big endian order. */
uint64_t bshuf_read_uint64_BE(void* buf) {
int ii;
uint8_t* b = (uint8_t*) buf;
uint64_t num = 0, pow28 = 1 << 8, cp = 1;
for (ii = 7; ii >= 0; ii--) {
num += b[ii] * cp;
cp *= pow28;
}
return num;
}
/* Write a 32 bit unsigned integer to a buffer in big endian order. */
void bshuf_write_uint32_BE(void* buf, uint32_t num) {
int ii;
uint8_t* b = (uint8_t*) buf;
uint32_t pow28 = 1 << 8;
for (ii = 3; ii >= 0; ii--) {
b[ii] = num % pow28;
num = num / pow28;
}
}
/* Read a 32 bit unsigned integer from a buffer big endian order. */
uint32_t bshuf_read_uint32_BE(const void* buf) {
int ii;
uint8_t* b = (uint8_t*) buf;
uint32_t num = 0, pow28 = 1 << 8, cp = 1;
for (ii = 3; ii >= 0; ii--) {
num += b[ii] * cp;
cp *= pow28;
}
return num;
}
/* ---- Public functions ----
*
* See header file for description and usage.
*
*/
size_t bshuf_default_block_size(const size_t elem_size) {
// This function needs to be absolutely stable between versions.
// Otherwise encoded data will not be decodable.
size_t block_size = BSHUF_TARGET_BLOCK_SIZE_B / elem_size;
// Ensure it is a required multiple.
block_size = (block_size / BSHUF_BLOCKED_MULT) * BSHUF_BLOCKED_MULT;
return MAX(block_size, BSHUF_MIN_RECOMMEND_BLOCK);
}
int64_t bshuf_bitshuffle(const void* in, void* out, const size_t size,
const size_t elem_size, size_t block_size) {
return bshuf_blocked_wrap_fun(&bshuf_bitshuffle_block, in, out, size,
elem_size, block_size);
}
int64_t bshuf_bitunshuffle(const void* in, void* out, const size_t size,
const size_t elem_size, size_t block_size) {
return bshuf_blocked_wrap_fun(&bshuf_bitunshuffle_block, in, out, size,
elem_size, block_size);
}
#undef TRANS_BIT_8X8
#undef TRANS_ELEM_TYPE
#undef MAX
#undef CHECK_MULT_EIGHT
#undef CHECK_ERR_FREE
#undef USESSE2
#undef USEAVX2
|
pr96424.c | /* PR tree-optimization/96424 */
/* { dg-do compile } */
/* { dg-options "-fopenmp -O0 -fexceptions -fnon-call-exceptions -fprofile-use -Wno-missing-profile" } */
void
foo (void)
{
int i, j;
#pragma omp for collapse (2)
for (i = 0; i < 10; ++i)
for (j = 0; j <= i; ++j)
;
}
void
bar (void)
{
int i, j;
#pragma omp for collapse (2)
for (i = 0; i < 10; ++i)
for (j = 0; j < i; ++j)
;
}
|
utils.c |
#include <math.h>
#include <stdio.h>
#include <stdlib.h>
#include "utils.h"
// #include "lapacke.h"
/* Auxiliary routine: printing a matrix */
// void print_matrix( char* desc, lapack_int m, lapack_int n, double* a, lapack_int lda )
// {
// lapack_int i, j;
// printf( "\n %s\n", desc );
// for( i = 0; i < m; i++ ) {
// for( j = 0; j < n; j++ ) printf( " %6.2f", a[i*lda+j] );
// printf( "\n" );
// }
// }
// /* Auxiliary routine: printing a vector of integers */
// void print_int_vector( char* desc, lapack_int n, lapack_int* a )
// {
// lapack_int j;
// printf( "\n %s\n", desc );
// for( j = 0; j < n; j++ ) printf( " %6i", a[j] );
// printf( "\n" );
// }
// void testlapacke()
// {
// // export LD_LIBRARY_PATH=/anaconda3/envs/owcsimpy-dev/lib/:$LD_LIBRARY_PATH
// int N = 5;
// int NRHS = 3;
// int LDA = N;
// int LDB = NRHS;
// lapack_int n = N, nrhs = NRHS, lda = LDA, ldb = LDB, info;
// lapack_int ipiv[5];
// double a[5*5] = {
// 6.80, -6.05, -0.45, 8.32, -9.67,
// -2.11, -3.30, 2.58, 2.71, -5.14,
// 5.66, 5.36, -2.70, 4.35, -7.26,
// 5.97, -4.44, 0.27, -7.17, 6.08,
// 8.23, 1.08, 9.04, 2.14, -6.87
// };
// double b[3*5] = {
// 4.02, -1.56, 9.81,
// 6.19, 4.00, -4.09,
// -8.22, -8.67, -4.57,
// -7.57, 1.75, -8.61,
// -3.03, 2.86, 8.99
// };
// double aNorm;
// double rcond;
// char ONE_NORM = '1';
// lapack_int NROWS = n;
// lapack_int NCOLS = n;
// lapack_int LEADING_DIMENSION_A = n;
// /* Print Entry Matrix */
// print_matrix( "Entry Matrix A", n, n, a, lda );
// /* Print Right Rand Side */
// print_matrix( "Right Rand Side", n, nrhs, b, ldb );
// printf( "\n" );
// /* Executable statements */
// printf( "LAPACKE_dgecon Example Program Results\n" );
// aNorm = LAPACKE_dlange(LAPACK_ROW_MAJOR, ONE_NORM, NROWS, NCOLS, a, LEADING_DIMENSION_A);
// info = LAPACKE_dgetrf(LAPACK_ROW_MAJOR, NROWS, NCOLS, a, LEADING_DIMENSION_A, ipiv);
// info = LAPACKE_dgecon(LAPACK_ROW_MAJOR, ONE_NORM, n, a, LEADING_DIMENSION_A, aNorm, &rcond); // aNorm should be 35.019999999999996
// double work[4*N];
// int iwork[N];
// //info = LAPACKE_dgecon_work(LAPACK_ROW_MAJOR, ONE_NORM, n, a, LEADING_DIMENSION_A, aNorm, &rcond, work, iwork); // aNorm should be 35.019999999999996
// //dgecon_( &ONE_NORM, &n, a, &LEADING_DIMENSION_A, &aNorm, &rcond, work, iwork, &info );
// /* Check for the exact singularity */
// if (info == 0)
// {
// printf("LAPACKE_dgecon completed SUCCESSFULLY...\n");
// }
// else if ( info < 0 )
// {
// printf( "Element %d of A had an illegal value\n", -info );
// exit( 1 );
// }
// else
// {
// printf( "Unrecognized value of INFO = %d\n", info );
// exit( 1 );
// }
// /* Print solution */
// printf("LAPACKE_dlange / One-norm of A = %lf\n", aNorm);
// printf("LAPACKE_dgecon / RCOND of A = %f\n", rcond);
// }
double calcDot_c(Point p1, Point p2)
{
return p1.x*p2.x+p1.y*p2.y+p1.z*p2.z;
}
Point calcAdd_c(Point p1, Point p2)
{
Point res;
res.x = p1.x+p2.x;
res.y = p1.y+p2.y;
res.z = p1.z+p2.z;
return res;
}
Point calcMult_c(double a,Point p1)
{
Point res;
res.x = a*p1.x;
res.y = a*p1.y;
res.z = a*p1.z;
return res;
}
// FIXME: change the type into bool or char
double checkBlockage_c(Point ctrPoint1, Point ctrPoint2, Point ctrPointB,
Point normalVectB, Point vertsB[4], const double areaB)
{
const double verysmall = 1e-6;
int i;
// FIXME: static, only handle rect, update for future release
Point verts[5];
Point triVerts[4];
// Point *verts = malloc(5*sizeof(Point)); // rectangle
// Point *triVerts = malloc(4*sizeof(Point)); // triangle
for (i=0;i<4;i++)
{
verts[i] = vertsB[i];
// printf("%f,%f,%f \n",vertsB[i].x,vertsB[i].y,vertsB[i].z);
}
verts[4] = vertsB[0];
// for (int i=0;i<5;i++)
// printf("%f,%f,%f \n",verts[i].x,verts[i].y,verts[i].z);
// double area = calcArea3DPoly_c(4,verts,normalVectB);
Point u,w;
u = calcAdd_c(ctrPoint2,calcMult_c(-1,ctrPoint1));
w = calcAdd_c(ctrPoint1,calcMult_c(-1,ctrPointB));
// u.x = ctrPoint2.x-ctrPoint1.x;
// u.y = ctrPoint2.y-ctrPoint1.y;
// u.z = ctrPoint2.z-ctrPoint1.z;
// w.x = ctrPoint1.x-ctrPointB.x;
// w.y = ctrPoint1.y-ctrPointB.y;
// w.z = ctrPoint1.z-ctrPointB.z;
double dot = calcDot_c(normalVectB,u);
// Sum of triangles' areas
double totalarea = 0;
// double area = 0; // Debug-purpose
if (fabs(dot) < verysmall) // Line segment and the planeB is parallel
{
// printf("dot:%f\n",dot);
// free(verts);
// free(triVerts);
return 0; // Not blocked
}
else
{
// Parametric value of the intersecting point
double ti = -1*calcDot_c(normalVectB,w)/calcDot_c(normalVectB,u);
// if(!(0 <= ti && ti <= 1) )
if(!(0 < ti && ti < 1) )
{
/*
This is when the intersecting point is not between plane1 and plane2.
In addition, a strict inequality is needed to handle the case when
the intersencting point comes from the same plane as the tail or head
points.
*/
// free(verts);
// free(triVerts);
return 0;
}
else
{
// The intersecting point
Point intersectingPoint = calcAdd_c(ctrPoint1,calcMult_c(ti,u));
// Calculate triangles' areas
for(i=0;i<4;i++) // loop for 4 triangles
{
// Assign triangles' vertices
triVerts[0] = intersectingPoint;
triVerts[1] = verts[i];
triVerts[2] = verts[i+1];
triVerts[3] = intersectingPoint;
totalarea += calcArea3DPoly_c(3,triVerts,normalVectB);
// Debug
// area = calcArea3DPoly_c(3,triVerts,normalVectB);
// totalarea += area;
// printf("i:%d,area:%f\n",i,area);
}
// printf("totalarea:%f\n",i,totalarea);
// free(verts);
// free(triVerts);
if(fabs(totalarea-areaB) < verysmall)
return 1; // Blocked
else
return 0; // Not blocked
// return totalarea;
}
}
}
// see: http://geomalgorithms.com/a01-_area.html
double calcArea3DPoly_c( int n, Point* V, Point N )
{
float area = 0;
float an, ax, ay, az; // abs value of normal and its coords
int coord; // coord to ignore: 1=x, 2=y, 3=z
int i, j, k; // loop indices
if (n < 3) return 0; // a degenerate polygon
// select largest abs coordinate to ignore for projection
ax = (N.x>0 ? N.x : -N.x); // abs x-coord
ay = (N.y>0 ? N.y : -N.y); // abs y-coord
az = (N.z>0 ? N.z : -N.z); // abs z-coord
coord = 3; // ignore z-coord
if (ax > ay) {
if (ax > az) coord = 1; // ignore x-coord
}
else if (ay > az) coord = 2; // ignore y-coord
// compute area of the 2D projection
switch (coord) {
case 1:
for (i=1, j=2, k=0; i<n; i++, j++, k++)
area += (V[i].y * (V[j].z - V[k].z));
break;
case 2:
for (i=1, j=2, k=0; i<n; i++, j++, k++)
area += (V[i].z * (V[j].x - V[k].x));
break;
case 3:
for (i=1, j=2, k=0; i<n; i++, j++, k++)
area += (V[i].x * (V[j].y - V[k].y));
break;
}
switch (coord) { // wrap-around term
case 1:
area += (V[n].y * (V[1].z - V[n-1].z));
break;
case 2:
area += (V[n].z * (V[1].x - V[n-1].x));
break;
case 3:
area += (V[n].x * (V[1].y - V[n-1].y));
break;
}
// scale to get area before projection
an = sqrt( ax*ax + ay*ay + az*az); // length of normal vector
switch (coord) {
case 1:
area *= (an / (2 * N.x));
break;
case 2:
area *= (an / (2 * N.y));
break;
case 3:
area *= (an / (2 * N.z));
}
return fabs(area); // I add absolute value here
}
double calcAngle_c( Point v1, Point v2 )
{
double length_v1 = v1.x*v1.x+v1.y*v1.y+v1.z*v1.z;
double length_v2 = v2.x*v2.x+v2.y*v2.y+v2.z*v2.z;
double dot = (v1.x*v2.x+v1.y*v2.y+v1.z*v2.z)/sqrt(length_v1*length_v2);
// Clipping to -1 to 1 for numerical stability
dot = ( dot < -1.0 ? -1.0 : ( dot > 1.0 ? 1.0 : dot ) );
double angle = acos(dot);
return angle;
}
// see:
// https://github.com/chtran/computer_vision/blob/master/proj3/code/vlfeat/vl/rodrigues.c
// https://en.wikipedia.org/wiki/Rodrigues%27_rotation_formula
void calcRodriguesMtx_c(const double angle,const double* k_pt, double* R_pt)
{
#define k(i) k_pt[(i)]
#define R(i,j) R_pt[(i)+3*(j)]
const double verysmall = 1e-6;
double kx = k_pt[0];
double ky = k_pt[1];
double kz = k_pt[2];
double length_k = sqrt(kx*kx +
ky*ky +
kz*kz ) ;
if (length_k < verysmall)
{
R(0,0) = 1.0 ; R(0,1) = 0.0 ; R(0,2) = 0.0 ;
R(1,0) = 0.0 ; R(1,1) = 1.0 ; R(1,2) = 0.0 ;
R(2,0) = 0.0 ; R(2,1) = 0.0 ; R(2,2) = 1.0 ;
}
else
{
double x = kx / length_k ;
double y = ky / length_k ;
double z = kz / length_k ;
double xx = x*x ;
double xy = x*y ;
double xz = x*z ;
double yy = y*y ;
double yz = y*z ;
double zz = z*z ;
const double yx = xy ;
// const double zx = xz ;
// const double zy = yz ;
double sth = sin(-angle) ;
double cth = cos(-angle) ;
double mcth = 1.0 - cth ;
R(0,0) = 1 - mcth * (yy+zz) ;
R(1,0) = sth*z + mcth * xy ;
R(2,0) = - sth*y + mcth * xz ;
R(0,1) = - sth*z + mcth * yx ;
R(1,1) = 1 - mcth * (zz+xx) ;
R(2,1) = sth*x + mcth * yz ;
R(0,2) = sth*y + mcth * xz ;
R(1,2) = - sth*x + mcth * yz ;
R(2,2) = 1 - mcth * (xx+yy) ;
}
return;
}
void initPoint(Point *pt, double x, double y, double z)
{
pt->x = x;
pt->y = y;
pt->z = z;
}
// JUNK
// void getPowerDelayMtxAll_c(int N,
// BareDetector collector,PointSource emitter,
// SimplePlane* planes, double* powMtx,double* delayMtx,
// double* powVect_t,double* delayVect_t,
// double* powVect_r,double* delayVect_r)
// {
// /*
// Refer to eq. (18).
// Assumptions:
// m =1
// FoV = pi/2
// Currently assume no blockage to test the speed.
// */
// #define isClose(a,b) fabs((a) - (b)) <= (1e-08 + 1e-05 * fabs(b))
// #define P(i,j) powMtx[(j)+N*(i)] // column-wise (row-major)
// #define D(i,j) delayMtx[(j)+N*(i)] // column-wise (row-major)
// const int speed_of_light = 299792458;
// int i;
// int k;
// #pragma omp parallel for
// for(i=0;i<N;i++)
// { // Iteration over the row (collector)
// // This is very important to define it inside the loop if you
// // want to use openmp
// char isVisible; // Need the smallest size. Hence using char
// double vartheta,psi;
// char isVisible_t; // Need the smallest size. Hence using char
// double vartheta_t,psi_t;
// char isVisible_r; // Need the smallest size. Hence using char
// double vartheta_r,psi_r;
// // For t_f
// isVisible_t = 1;
// vartheta_t = calcAngle_c(emitter.normalVect,calcAdd_c(planes[i].ctrPoint,calcMult_c(-1,emitter.ctrPoint)));
// psi_t = calcAngle_c(planes[i].normalVect,calcAdd_c(emitter.ctrPoint,calcMult_c(-1,planes[i].ctrPoint)));
// // Visible only when 0 <= vartheta <= pi/2 and 0<= psi <= pi/2
// if (!(vartheta_t>=0 && vartheta_t < M_PI/2) || !(psi_t>=0 && psi_t < M_PI/2))
// {
// isVisible_t = 0;
// }
// if (isClose(vartheta_t,M_PI/2) || isClose(psi_t,M_PI/2))
// {
// isVisible_t = 0;
// }
// // distance
// Point dist = calcAdd_c(planes[i].ctrPoint,calcMult_c(-1,emitter.ctrPoint));
// double d_ki_square = calcDot_c(dist,dist);
// if(!isVisible_t)
// {
// powVect_t[i] = 0;
// delayVect_t[i] = 0;
// }
// else
// {
// powVect_t[i] = ((emitter.m+1)/(2*M_PI))*pow(cos(vartheta_t),emitter.m)*planes[i].area*cos(psi_t)/d_ki_square;
// delayVect_t[i] = sqrt(d_ki_square)/speed_of_light;
// }
// // End for t_f
// // For r_f
// isVisible_r = 1;
// vartheta_r = calcAngle_c(planes[i].normalVect,calcAdd_c(collector.ctrPoint,calcMult_c(-1,planes[i].ctrPoint)));
// psi_r = calcAngle_c(collector.normalVect,calcAdd_c(planes[i].ctrPoint,calcMult_c(-1,collector.ctrPoint)));
// // Visible only when 0 <= vartheta <= pi/2 and 0<= psi <= pi/2
// if (!(vartheta_r>=0 && vartheta_r < M_PI/2) || !(psi_r>=0 && psi_r < collector.FoV))
// {
// isVisible_r = 0;
// }
// if (isClose(vartheta_r,M_PI/2) || isClose(psi_r,collector.FoV))
// {
// isVisible_r = 0;
// }
// // distance
// dist = calcAdd_c(collector.ctrPoint,calcMult_c(-1,planes[i].ctrPoint));
// d_ki_square = calcDot_c(dist,dist);
// if(!isVisible_r)
// {
// powVect_r[i] = 0;
// delayVect_r[i] = 0;
// }
// else
// {
// powVect_r[i] = (1/M_PI)*cos(vartheta_r)*collector.area*cos(psi_r)/d_ki_square;
// delayVect_r[i] = sqrt(d_ki_square)/speed_of_light;
// }
// // End for r_f
// for(k=0;k<N;k++)
// { // Iteration over the column (emitter)
// if(i!=k)
// {
// // Calculate visibility based on angles
// isVisible = 1;
// vartheta = calcAngle_c(planes[k].normalVect,calcAdd_c(planes[i].ctrPoint,calcMult_c(-1,planes[k].ctrPoint)));
// psi = calcAngle_c(planes[i].normalVect,calcAdd_c(planes[k].ctrPoint,calcMult_c(-1,planes[i].ctrPoint)));
// // Visible only when 0 <= vartheta <= pi/2 and 0<= psi <= pi/2
// if (!(vartheta>=0 && vartheta < M_PI/2) || !(psi>=0 && psi < M_PI/2))
// {
// isVisible = 0;
// }
// if (isClose(vartheta,M_PI/2) || isClose(psi,M_PI/2))
// {
// isVisible = 0;
// }
// // distance
// dist = calcAdd_c(planes[i].ctrPoint,calcMult_c(-1,planes[k].ctrPoint));
// d_ki_square = calcDot_c(dist,dist);
// if(!isVisible)
// {
// P(i,k) = 0;
// D(i,k) = 0;
// }
// else
// {
// P(i,k) = (1/M_PI)*cos(vartheta)*planes[i].area*cos(psi)/d_ki_square;
// D(i,k) = sqrt(d_ki_square)/speed_of_light;
// }
// }
// else
// {
// P(i,k) = 0;
// D(i,k) = 0;
// }
// }
// }
// }
|
3d7pt.lbpar.c | #include <omp.h>
#include <math.h>
#define ceild(n,d) ceil(((double)(n))/((double)(d)))
#define floord(n,d) floor(((double)(n))/((double)(d)))
#define max(x,y) ((x) > (y)? (x) : (y))
#define min(x,y) ((x) < (y)? (x) : (y))
/*
* Order-1, 3D 7 point stencil
* Adapted from PLUTO and Pochoir test bench
*
* Tareq Malas
*/
#include <stdio.h>
#include <stdlib.h>
#include <sys/time.h>
#ifdef LIKWID_PERFMON
#include <likwid.h>
#endif
#include "print_utils.h"
#define TESTS 2
#define MAX(a,b) ((a) > (b) ? a : b)
#define MIN(a,b) ((a) < (b) ? a : b)
/* Subtract the `struct timeval' values X and Y,
* storing the result in RESULT.
*
* Return 1 if the difference is negative, otherwise 0.
*/
int timeval_subtract(struct timeval *result, struct timeval *x, struct timeval *y)
{
/* Perform the carry for the later subtraction by updating y. */
if (x->tv_usec < y->tv_usec)
{
int nsec = (y->tv_usec - x->tv_usec) / 1000000 + 1;
y->tv_usec -= 1000000 * nsec;
y->tv_sec += nsec;
}
if (x->tv_usec - y->tv_usec > 1000000)
{
int nsec = (x->tv_usec - y->tv_usec) / 1000000;
y->tv_usec += 1000000 * nsec;
y->tv_sec -= nsec;
}
/* Compute the time remaining to wait.
* tv_usec is certainly positive.
*/
result->tv_sec = x->tv_sec - y->tv_sec;
result->tv_usec = x->tv_usec - y->tv_usec;
/* Return 1 if result is negative. */
return x->tv_sec < y->tv_sec;
}
int main(int argc, char *argv[])
{
int t, i, j, k, test;
int Nx, Ny, Nz, Nt;
if (argc > 3) {
Nx = atoi(argv[1])+2;
Ny = atoi(argv[2])+2;
Nz = atoi(argv[3])+2;
}
if (argc > 4)
Nt = atoi(argv[4]);
double ****A = (double ****) malloc(sizeof(double***)*2);
A[0] = (double ***) malloc(sizeof(double**)*Nz);
A[1] = (double ***) malloc(sizeof(double**)*Nz);
for(i=0; i<Nz; i++){
A[0][i] = (double**) malloc(sizeof(double*)*Ny);
A[1][i] = (double**) malloc(sizeof(double*)*Ny);
for(j=0;j<Ny;j++){
A[0][i][j] = (double*) malloc(sizeof(double)*Nx);
A[1][i][j] = (double*) malloc(sizeof(double)*Nx);
}
}
// tile size information, including extra element to decide the list length
int *tile_size = (int*) malloc(sizeof(int));
tile_size[0] = -1;
// The list is modified here before source-to-source transformations
tile_size = (int*) realloc((void *)tile_size, sizeof(int)*5);
tile_size[0] = 32;
tile_size[1] = 32;
tile_size[2] = 4;
tile_size[3] = 64;
tile_size[4] = -1;
// for timekeeping
int ts_return = -1;
struct timeval start, end, result;
double tdiff = 0.0, min_tdiff=1.e100;
const int BASE = 1024;
const double alpha = 0.0876;
const double beta = 0.0765;
// initialize variables
//
srand(42);
for (i = 1; i < Nz; i++) {
for (j = 1; j < Ny; j++) {
for (k = 1; k < Nx; k++) {
A[0][i][j][k] = 1.0 * (rand() % BASE);
}
}
}
#ifdef LIKWID_PERFMON
LIKWID_MARKER_INIT;
#pragma omp parallel
{
LIKWID_MARKER_THREADINIT;
#pragma omp barrier
LIKWID_MARKER_START("calc");
}
#endif
int num_threads = 1;
#if defined(_OPENMP)
num_threads = omp_get_max_threads();
#endif
for(test=0; test<TESTS; test++){
gettimeofday(&start, 0);
// serial execution - Addition: 6 && Multiplication: 2
/* Copyright (C) 1991-2014 Free Software Foundation, Inc.
This file is part of the GNU C Library.
The GNU C Library is free software; you can redistribute it and/or
modify it under the terms of the GNU Lesser General Public
License as published by the Free Software Foundation; either
version 2.1 of the License, or (at your option) any later version.
The GNU C Library is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
Lesser General Public License for more details.
You should have received a copy of the GNU Lesser General Public
License along with the GNU C Library; if not, see
<http://www.gnu.org/licenses/>. */
/* This header is separate from features.h so that the compiler can
include it implicitly at the start of every compilation. It must
not itself include <features.h> or any other header that includes
<features.h> because the implicit include comes before any feature
test macros that may be defined in a source file before it first
explicitly includes a system header. GCC knows the name of this
header in order to preinclude it. */
/* glibc's intent is to support the IEC 559 math functionality, real
and complex. If the GCC (4.9 and later) predefined macros
specifying compiler intent are available, use them to determine
whether the overall intent is to support these features; otherwise,
presume an older compiler has intent to support these features and
define these macros by default. */
/* wchar_t uses ISO/IEC 10646 (2nd ed., published 2011-03-15) /
Unicode 6.0. */
/* We do not support C11 <threads.h>. */
int t1, t2, t3, t4, t5, t6, t7, t8;
int lb, ub, lbp, ubp, lb2, ub2;
register int lbv, ubv;
/* Start of CLooG code */
if ((Nt >= 2) && (Nx >= 3) && (Ny >= 3) && (Nz >= 3)) {
for (t1=-1;t1<=floord(Nt-2,16);t1++) {
lbp=max(ceild(t1,2),ceild(32*t1-Nt+3,32));
ubp=min(floord(Nt+Nz-4,32),floord(16*t1+Nz+13,32));
#pragma omp parallel for private(lbv,ubv,t3,t4,t5,t6,t7,t8)
for (t2=lbp;t2<=ubp;t2++) {
for (t3=max(max(0,ceild(32*t2-Nz,4)),4*t1);t3<=min(min(min(floord(Nt+Ny-4,4),floord(16*t1+Ny+29,4)),floord(32*t2+Ny+28,4)),floord(32*t1-32*t2+Nz+Ny+27,4));t3++) {
for (t4=max(max(max(0,ceild(t1-3,4)),ceild(32*t2-Nz-60,64)),ceild(4*t3-Ny-60,64));t4<=min(min(min(min(floord(4*t3+Nx,64),floord(Nt+Nx-4,64)),floord(16*t1+Nx+29,64)),floord(32*t2+Nx+28,64)),floord(32*t1-32*t2+Nz+Nx+27,64));t4++) {
for (t5=max(max(max(max(max(0,16*t1),32*t1-32*t2+1),32*t2-Nz+2),4*t3-Ny+2),64*t4-Nx+2);t5<=min(min(min(min(min(Nt-2,16*t1+31),32*t2+30),4*t3+2),64*t4+62),32*t1-32*t2+Nz+29);t5++) {
for (t6=max(max(32*t2,t5+1),-32*t1+32*t2+2*t5-31);t6<=min(min(32*t2+31,-32*t1+32*t2+2*t5),t5+Nz-2);t6++) {
for (t7=max(4*t3,t5+1);t7<=min(4*t3+3,t5+Ny-2);t7++) {
lbv=max(64*t4,t5+1);
ubv=min(64*t4+63,t5+Nx-2);
#pragma ivdep
#pragma vector always
for (t8=lbv;t8<=ubv;t8++) {
A[( t5 + 1) % 2][ (-t5+t6)][ (-t5+t7)][ (-t5+t8)] = ((alpha * A[ t5 % 2][ (-t5+t6)][ (-t5+t7)][ (-t5+t8)]) + (beta * (((((A[ t5 % 2][ (-t5+t6) - 1][ (-t5+t7)][ (-t5+t8)] + A[ t5 % 2][ (-t5+t6)][ (-t5+t7) - 1][ (-t5+t8)]) + A[ t5 % 2][ (-t5+t6)][ (-t5+t7)][ (-t5+t8) - 1]) + A[ t5 % 2][ (-t5+t6) + 1][ (-t5+t7)][ (-t5+t8)]) + A[ t5 % 2][ (-t5+t6)][ (-t5+t7) + 1][ (-t5+t8)]) + A[ t5 % 2][ (-t5+t6)][ (-t5+t7)][ (-t5+t8) + 1])));;
}
}
}
}
}
}
}
}
}
/* End of CLooG code */
gettimeofday(&end, 0);
ts_return = timeval_subtract(&result, &end, &start);
tdiff = (double) (result.tv_sec + result.tv_usec * 1.0e-6);
min_tdiff = min(min_tdiff, tdiff);
printf("Rank 0 TEST# %d time: %f\n", test, tdiff);
}
PRINT_RESULTS(1, "constant")
#ifdef LIKWID_PERFMON
#pragma omp parallel
{
LIKWID_MARKER_STOP("calc");
}
LIKWID_MARKER_CLOSE;
#endif
// Free allocated arrays (Causing performance degradation
/* for(i=0; i<Nz; i++){
for(j=0;j<Ny;j++){
free(A[0][i][j]);
free(A[1][i][j]);
}
free(A[0][i]);
free(A[1][i]);
}
free(A[0]);
free(A[1]);
*/
return 0;
}
|
GB_binop__lt_int32.c | //------------------------------------------------------------------------------
// GB_binop: hard-coded functions for each built-in binary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// If this file is in the Generated2/ folder, do not edit it
// (it is auto-generated from Generator/*).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_emult.h"
#include "GB_control.h"
#include "GB_ek_slice.h"
#include "GB_dense.h"
#include "GB_atomics.h"
#include "GB_bitmap_assign_methods.h"
#include "GB_binop__include.h"
// C=binop(A,B) is defined by the following types and operators:
// A+B function (eWiseAdd): GB (_AaddB__lt_int32)
// A.*B function (eWiseMult): GB (_AemultB_01__lt_int32)
// A.*B function (eWiseMult): GB (_AemultB_02__lt_int32)
// A.*B function (eWiseMult): GB (_AemultB_03__lt_int32)
// A.*B function (eWiseMult): GB (_AemultB_bitmap__lt_int32)
// A*D function (colscale): GB (_AxD__lt_int32)
// D*A function (rowscale): GB (_DxB__lt_int32)
// C+=B function (dense accum): GB (_Cdense_accumB__lt_int32)
// C+=b function (dense accum): GB (_Cdense_accumb__lt_int32)
// C+=A+B function (dense ewise3): GB ((none))
// C=A+B function (dense ewise3): GB (_Cdense_ewise3_noaccum__lt_int32)
// C=scalar+B GB (_bind1st__lt_int32)
// C=scalar+B' GB (_bind1st_tran__lt_int32)
// C=A+scalar GB (_bind2nd__lt_int32)
// C=A'+scalar GB (_bind2nd_tran__lt_int32)
// C type: bool
// A type: int32_t
// B,b type: int32_t
// BinaryOp: cij = (aij < bij)
#define GB_ATYPE \
int32_t
#define GB_BTYPE \
int32_t
#define GB_CTYPE \
bool
// true if the types of A and B are identical
#define GB_ATYPE_IS_BTYPE \
1
// true if the types of C and A are identical
#define GB_CTYPE_IS_ATYPE \
0
// true if the types of C and B are identical
#define GB_CTYPE_IS_BTYPE \
0
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA,A_iso) \
int32_t aij = GBX (Ax, pA, A_iso)
// bij = Bx [pB]
#define GB_GETB(bij,Bx,pB,B_iso) \
int32_t bij = GBX (Bx, pB, B_iso)
// declare scalar of the same type as C
#define GB_CTYPE_SCALAR(t) \
bool t
// cij = Ax [pA]
#define GB_COPY_A_TO_C(cij,Ax,pA,A_iso) \
cij = GBX (Ax, pA, A_iso)
// cij = Bx [pB]
#define GB_COPY_B_TO_C(cij,Bx,pB,B_iso) \
cij = GBX (Bx, pB, B_iso)
#define GB_CX(p) Cx [p]
// binary operator
#define GB_BINOP(z,x,y,i,j) \
z = (x < y) ;
// true if the binop must be flipped
#define GB_BINOP_FLIP \
0
// op is second
#define GB_OP_IS_SECOND \
0
// do the numerical phases of GB_add and GB_emult
#define GB_PHASE_2_OF_2
// hard-coded loops can be vectorized
#define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_LT || GxB_NO_INT32 || GxB_NO_LT_INT32)
//------------------------------------------------------------------------------
// C += A+B, all 3 matrices dense
//------------------------------------------------------------------------------
#if 0
// The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV.
void GB ((none))
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#include "GB_dense_ewise3_accum_template.c"
}
#endif
//------------------------------------------------------------------------------
// C = A+B, all 3 matrices dense
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_ewise3_noaccum__lt_int32)
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_dense_ewise3_noaccum_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += B, accumulate a sparse matrix into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumB__lt_int32)
(
GrB_Matrix C,
const GrB_Matrix B,
const int64_t *B_ek_slicing, const int B_ntasks, const int B_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#if 0
{
#include "GB_dense_subassign_23_template.c"
}
#endif
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += b, accumulate a scalar into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumb__lt_int32)
(
GrB_Matrix C,
const GB_void *p_bwork,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#if 0
{
// get the scalar b for C += b, of type int32_t
int32_t bwork = (*((int32_t *) p_bwork)) ;
#include "GB_dense_subassign_22_template.c"
return (GrB_SUCCESS) ;
}
#endif
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = A*D, column scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB (_AxD__lt_int32)
(
GrB_Matrix C,
const GrB_Matrix A, bool A_is_pattern,
const GrB_Matrix D, bool D_is_pattern,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
bool *restrict Cx = (bool *) C->x ;
#include "GB_AxB_colscale_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = D*B, row scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB (_DxB__lt_int32)
(
GrB_Matrix C,
const GrB_Matrix D, bool D_is_pattern,
const GrB_Matrix B, bool B_is_pattern,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
bool *restrict Cx = (bool *) C->x ;
#include "GB_AxB_rowscale_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseAdd: C = A+B or C<M> = A+B
//------------------------------------------------------------------------------
GrB_Info GB (_AaddB__lt_int32)
(
GrB_Matrix C,
const int C_sparsity,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool Ch_is_Mh,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
GB_WERK_DECLARE (M_ek_slicing, int64_t) ;
GB_WERK_DECLARE (A_ek_slicing, int64_t) ;
GB_WERK_DECLARE (B_ek_slicing, int64_t) ;
#include "GB_add_template.c"
GB_FREE_WORK ;
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C = A.*B or C<M> = A.*B
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_01__lt_int32)
(
GrB_Matrix C,
const int C_sparsity,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_01_meta.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C<#> = A.*B when A is sparse/hyper and B is bitmap/full
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_02__lt_int32)
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool flipxy,
const int64_t *restrict Cp_kfirst,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#if GB_BINOP_FLIP
// The operator is not commutative, and does not have a flipped
// variant. For example z=atan2(y,x).
if (flipxy)
{
// use fmult(y,x)
#undef GB_FLIPPED
#define GB_FLIPPED 1
#include "GB_emult_02_template.c"
}
else
{
// use fmult(x,y)
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
}
#else
// No need to handle the flip: the operator is either commutative, or
// has been handled by changing z=div(y,x) to z=rdiv(x,y) for example.
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
#endif
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C<M> = A.*B, M sparse/hyper, A and B bitmap/full
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_03__lt_int32)
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict Cp_kfirst,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_03_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C=A.*B, C<M>=A.*B, C<!M>=A.*B where C is bitmap
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_bitmap__lt_int32)
(
GrB_Matrix C,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_bitmap_emult_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st
//------------------------------------------------------------------------------
GrB_Info GB (_bind1st__lt_int32)
(
GB_void *Cx_output, // Cx and Bx may be aliased
const GB_void *x_input,
const GB_void *Bx_input,
const int8_t *restrict Bb,
int64_t bnz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
bool *Cx = (bool *) Cx_output ;
int32_t x = (*((int32_t *) x_input)) ;
int32_t *Bx = (int32_t *) Bx_input ;
int64_t p ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < bnz ; p++)
{
if (!GBB (Bb, p)) continue ;
int32_t bij = GBX (Bx, p, false) ;
Cx [p] = (x < bij) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd
//------------------------------------------------------------------------------
GrB_Info GB (_bind2nd__lt_int32)
(
GB_void *Cx_output, // Cx and Ax may be aliased
const GB_void *Ax_input,
const GB_void *y_input,
const int8_t *restrict Ab,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
bool *Cx = (bool *) Cx_output ;
int32_t *Ax = (int32_t *) Ax_input ;
int32_t y = (*((int32_t *) y_input)) ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!GBB (Ab, p)) continue ;
int32_t aij = GBX (Ax, p, false) ;
Cx [p] = (aij < y) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (x, A'): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (x, aij), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
int32_t aij = GBX (Ax, pA, false) ; \
Cx [pC] = (x < aij) ; \
}
GrB_Info GB (_bind1st_tran__lt_int32)
(
GrB_Matrix C,
const GB_void *x_input,
const GrB_Matrix A,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
// GB_unop_transpose.c uses GB_ATYPE, but A is
// the 2nd input to binary operator z=f(x,y).
#undef GB_ATYPE
#define GB_ATYPE \
int32_t
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int32_t x = (*((const int32_t *) x_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
#undef GB_ATYPE
#define GB_ATYPE \
int32_t
}
//------------------------------------------------------------------------------
// C = op (A', y): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (aij, y), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
int32_t aij = GBX (Ax, pA, false) ; \
Cx [pC] = (aij < y) ; \
}
GrB_Info GB (_bind2nd_tran__lt_int32)
(
GrB_Matrix C,
const GrB_Matrix A,
const GB_void *y_input,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int32_t y = (*((const int32_t *) y_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
DRB018-plusplus-orig-yes.c | /*
Copyright (C) 1991-2018 Free Software Foundation, Inc.
This file is part of the GNU C Library.
The GNU C Library is free software; you can redistribute it andor
modify it under the terms of the GNU Lesser General Public
License as published by the Free Software Foundation; either
version 2.1 of the License, or (at your option) any later version.
The GNU C Library is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
Lesser General Public License for more details.
You should have received a copy of the GNU Lesser General Public
License along with the GNU C Library; if not, see
<http:www.gnu.org/licenses/>.
*/
/*
This header is separate from features.h so that the compiler can
include it implicitly at the start of every compilation. It must
not itself include <features.h> or any other header that includes
<features.h> because the implicit include comes before any feature
test macros that may be defined in a source file before it first
explicitly includes a system header. GCC knows the name of this
header in order to preinclude it.
*/
/*
glibc's intent is to support the IEC 559 math functionality, real
and complex. If the GCC (4.9 and later) predefined macros
specifying compiler intent are available, use them to determine
whether the overall intent is to support these features; otherwise,
presume an older compiler has intent to support these features and
define these macros by default.
*/
/*
wchar_t uses Unicode 10.0.0. Version 10.0 of the Unicode Standard is
synchronized with ISOIEC 10646:2017, fifth edition, plus
the following additions from Amendment 1 to the fifth edition:
- 56 emoji characters
- 285 hentaigana
- 3 additional Zanabazar Square characters
*/
/*
Copyright (c) 2017, Lawrence Livermore National Security, LLC.
Produced at the Lawrence Livermore National Laboratory
Written by Chunhua Liao, Pei-Hung Lin, Joshua Asplund,
Markus Schordan, and Ian Karlin
(email: liao6@llnl.gov, lin32@llnl.gov, asplund1@llnl.gov,
schordan1@llnl.gov, karlin1@llnl.gov)
LLNL-CODE-732144
All rights reserved.
This file is part of DataRaceBench. For details, see
https:github.comLLNL/dataracebench. Please also see the LICENSE file
for our additional BSD notice.
Redistribution and use in source and binary forms, with
or without modification, are permitted provided that the following
conditions are met:
* Redistributions of source code must retain the above copyright
notice, this list of conditions and the disclaimer below.
* Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the disclaimer (as noted below)
in the documentation and/or other materials provided with the
distribution.
* Neither the name of the LLNS/LLNL nor the names of its contributors
may be used to endorse or promote products derived from this
software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND
CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES,
INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL LAWRENCE LIVERMORE NATIONAL
SECURITY, LLC, THE U.S. DEPARTMENT OF ENERGY OR CONTRIBUTORS BE
LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY,
OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
THE POSSIBILITY OF SUCH DAMAGE.
*/
/*
Data race on outLen due to ++ operation.
Adding private (outLen) can avoid race condition. But it is wrong semantically.
Data races on outLen also cause output[outLen++] to have data races.
Data race pairs (we allow two pairs to preserve the original code pattern):
1. outLen@72 vs. outLen@72
2. output[]@72 vs. output[]@72
*/
#include <stdlib.h>
#include <stdio.h>
int input[1000];
int output[1000];
int main()
{
int i;
int inLen = 1000;
int outLen = 0;
int _ret_val_0;
#pragma cetus private(i)
#pragma loop name main#0
#pragma cetus parallel
#pragma omp parallel for private(i)
for (i=0; i<inLen; ++ i)
{
input[i]=i;
}
#pragma cetus private(i)
#pragma loop name main#1
for (i=0; i<inLen; ++ i)
{
output[outLen ++ ]=input[i];
}
printf("output[500]=%d\n", output[500]);
_ret_val_0=0;
return _ret_val_0;
}
|
draw.c | /*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% DDDD RRRR AAA W W %
% D D R R A A W W %
% D D RRRR AAAAA W W W %
% D D R RN A A WW WW %
% DDDD R R A A W W %
% %
% %
% MagickCore Image Drawing Methods %
% %
% %
% Software Design %
% Cristy %
% July 1998 %
% %
% %
% Copyright 1999-2017 ImageMagick Studio LLC, a non-profit organization %
% dedicated to making software imaging solutions freely available. %
% %
% You may not use this file except in compliance with the License. You may %
% obtain a copy of the License at %
% %
% https://www.imagemagick.org/script/license.php %
% %
% Unless required by applicable law or agreed to in writing, software %
% distributed under the License is distributed on an "AS IS" BASIS, %
% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. %
% See the License for the specific language governing permissions and %
% limitations under the License. %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% Bill Radcliffe of Corbis (www.corbis.com) contributed the polygon
% rendering code based on Paul Heckbert's "Concave Polygon Scan Conversion",
% Graphics Gems, 1990. Leonard Rosenthal and David Harr of Appligent
% (www.appligent.com) contributed the dash pattern, linecap stroking
% algorithm, and minor rendering improvements.
%
*/
/*
Include declarations.
*/
#include "MagickCore/studio.h"
#include "MagickCore/annotate.h"
#include "MagickCore/artifact.h"
#include "MagickCore/blob.h"
#include "MagickCore/cache.h"
#include "MagickCore/cache-private.h"
#include "MagickCore/cache-view.h"
#include "MagickCore/channel.h"
#include "MagickCore/color.h"
#include "MagickCore/colorspace-private.h"
#include "MagickCore/composite.h"
#include "MagickCore/composite-private.h"
#include "MagickCore/constitute.h"
#include "MagickCore/draw.h"
#include "MagickCore/draw-private.h"
#include "MagickCore/enhance.h"
#include "MagickCore/exception.h"
#include "MagickCore/exception-private.h"
#include "MagickCore/gem.h"
#include "MagickCore/geometry.h"
#include "MagickCore/image-private.h"
#include "MagickCore/list.h"
#include "MagickCore/log.h"
#include "MagickCore/memory-private.h"
#include "MagickCore/monitor.h"
#include "MagickCore/monitor-private.h"
#include "MagickCore/option.h"
#include "MagickCore/paint.h"
#include "MagickCore/pixel-accessor.h"
#include "MagickCore/pixel-private.h"
#include "MagickCore/property.h"
#include "MagickCore/resample.h"
#include "MagickCore/resample-private.h"
#include "MagickCore/resource_.h"
#include "MagickCore/string_.h"
#include "MagickCore/string-private.h"
#include "MagickCore/thread-private.h"
#include "MagickCore/token.h"
#include "MagickCore/transform-private.h"
#include "MagickCore/utility.h"
/*
Define declarations.
*/
#define BezierQuantum 200
#define DrawEpsilon (1.0e-10)
/*
Typedef declarations.
*/
typedef struct _EdgeInfo
{
SegmentInfo
bounds;
double
scanline;
PointInfo
*points;
size_t
number_points;
ssize_t
direction;
MagickBooleanType
ghostline;
size_t
highwater;
} EdgeInfo;
typedef struct _ElementInfo
{
double
cx,
cy,
major,
minor,
angle;
} ElementInfo;
typedef struct _PolygonInfo
{
EdgeInfo
*edges;
size_t
number_edges;
} PolygonInfo;
typedef enum
{
MoveToCode,
OpenCode,
GhostlineCode,
LineToCode,
EndCode
} PathInfoCode;
typedef struct _PathInfo
{
PointInfo
point;
PathInfoCode
code;
} PathInfo;
/*
Forward declarations.
*/
static MagickBooleanType
DrawStrokePolygon(Image *,const DrawInfo *,const PrimitiveInfo *,
ExceptionInfo *);
static PrimitiveInfo
*TraceStrokePolygon(const DrawInfo *,const PrimitiveInfo *);
static size_t
TracePath(PrimitiveInfo *,const char *);
static void
TraceArc(PrimitiveInfo *,const PointInfo,const PointInfo,const PointInfo),
TraceArcPath(PrimitiveInfo *,const PointInfo,const PointInfo,const PointInfo,
const double,const MagickBooleanType,const MagickBooleanType),
TraceBezier(PrimitiveInfo *,const size_t),
TraceCircle(PrimitiveInfo *,const PointInfo,const PointInfo),
TraceEllipse(PrimitiveInfo *,const PointInfo,const PointInfo,
const PointInfo),
TraceLine(PrimitiveInfo *,const PointInfo,const PointInfo),
TraceRectangle(PrimitiveInfo *,const PointInfo,const PointInfo),
TraceRoundRectangle(PrimitiveInfo *,const PointInfo,const PointInfo,
PointInfo),
TraceSquareLinecap(PrimitiveInfo *,const size_t,const double);
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% A c q u i r e D r a w I n f o %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% AcquireDrawInfo() returns a DrawInfo structure properly initialized.
%
% The format of the AcquireDrawInfo method is:
%
% DrawInfo *AcquireDrawInfo(void)
%
*/
MagickExport DrawInfo *AcquireDrawInfo(void)
{
DrawInfo
*draw_info;
draw_info=(DrawInfo *) AcquireCriticalMemory(sizeof(*draw_info));
GetDrawInfo((ImageInfo *) NULL,draw_info);
return(draw_info);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% C l o n e D r a w I n f o %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% CloneDrawInfo() makes a copy of the given draw_info structure. If NULL
% is specified, a new DrawInfo structure is created initialized to default
% values.
%
% The format of the CloneDrawInfo method is:
%
% DrawInfo *CloneDrawInfo(const ImageInfo *image_info,
% const DrawInfo *draw_info)
%
% A description of each parameter follows:
%
% o image_info: the image info.
%
% o draw_info: the draw info.
%
*/
MagickExport DrawInfo *CloneDrawInfo(const ImageInfo *image_info,
const DrawInfo *draw_info)
{
DrawInfo
*clone_info;
ExceptionInfo
*exception;
clone_info=(DrawInfo *) AcquireCriticalMemory(sizeof(*clone_info));
GetDrawInfo(image_info,clone_info);
if (draw_info == (DrawInfo *) NULL)
return(clone_info);
exception=AcquireExceptionInfo();
if (clone_info->primitive != (char *) NULL)
(void) CloneString(&clone_info->primitive,draw_info->primitive);
if (draw_info->geometry != (char *) NULL)
(void) CloneString(&clone_info->geometry,draw_info->geometry);
clone_info->viewbox=draw_info->viewbox;
clone_info->affine=draw_info->affine;
clone_info->gravity=draw_info->gravity;
clone_info->fill=draw_info->fill;
clone_info->stroke=draw_info->stroke;
clone_info->stroke_width=draw_info->stroke_width;
if (draw_info->fill_pattern != (Image *) NULL)
clone_info->fill_pattern=CloneImage(draw_info->fill_pattern,0,0,MagickTrue,
exception);
if (draw_info->stroke_pattern != (Image *) NULL)
clone_info->stroke_pattern=CloneImage(draw_info->stroke_pattern,0,0,
MagickTrue,exception);
clone_info->stroke_antialias=draw_info->stroke_antialias;
clone_info->text_antialias=draw_info->text_antialias;
clone_info->fill_rule=draw_info->fill_rule;
clone_info->linecap=draw_info->linecap;
clone_info->linejoin=draw_info->linejoin;
clone_info->miterlimit=draw_info->miterlimit;
clone_info->dash_offset=draw_info->dash_offset;
clone_info->decorate=draw_info->decorate;
clone_info->compose=draw_info->compose;
if (draw_info->text != (char *) NULL)
(void) CloneString(&clone_info->text,draw_info->text);
if (draw_info->font != (char *) NULL)
(void) CloneString(&clone_info->font,draw_info->font);
if (draw_info->metrics != (char *) NULL)
(void) CloneString(&clone_info->metrics,draw_info->metrics);
if (draw_info->family != (char *) NULL)
(void) CloneString(&clone_info->family,draw_info->family);
clone_info->style=draw_info->style;
clone_info->stretch=draw_info->stretch;
clone_info->weight=draw_info->weight;
if (draw_info->encoding != (char *) NULL)
(void) CloneString(&clone_info->encoding,draw_info->encoding);
clone_info->pointsize=draw_info->pointsize;
clone_info->kerning=draw_info->kerning;
clone_info->interline_spacing=draw_info->interline_spacing;
clone_info->interword_spacing=draw_info->interword_spacing;
clone_info->direction=draw_info->direction;
if (draw_info->density != (char *) NULL)
(void) CloneString(&clone_info->density,draw_info->density);
clone_info->align=draw_info->align;
clone_info->undercolor=draw_info->undercolor;
clone_info->border_color=draw_info->border_color;
if (draw_info->server_name != (char *) NULL)
(void) CloneString(&clone_info->server_name,draw_info->server_name);
if (draw_info->dash_pattern != (double *) NULL)
{
register ssize_t
x;
for (x=0; fabs(draw_info->dash_pattern[x]) >= DrawEpsilon; x++) ;
clone_info->dash_pattern=(double *) AcquireQuantumMemory((size_t) x+1UL,
sizeof(*clone_info->dash_pattern));
if (clone_info->dash_pattern == (double *) NULL)
ThrowFatalException(ResourceLimitFatalError,
"UnableToAllocateDashPattern");
(void) CopyMagickMemory(clone_info->dash_pattern,draw_info->dash_pattern,
(size_t) (x+1)*sizeof(*clone_info->dash_pattern));
}
clone_info->gradient=draw_info->gradient;
if (draw_info->gradient.stops != (StopInfo *) NULL)
{
size_t
number_stops;
number_stops=clone_info->gradient.number_stops;
clone_info->gradient.stops=(StopInfo *) AcquireQuantumMemory((size_t)
number_stops,sizeof(*clone_info->gradient.stops));
if (clone_info->gradient.stops == (StopInfo *) NULL)
ThrowFatalException(ResourceLimitFatalError,
"UnableToAllocateDashPattern");
(void) CopyMagickMemory(clone_info->gradient.stops,
draw_info->gradient.stops,(size_t) number_stops*
sizeof(*clone_info->gradient.stops));
}
if (draw_info->clip_mask != (char *) NULL)
(void) CloneString(&clone_info->clip_mask,draw_info->clip_mask);
clone_info->bounds=draw_info->bounds;
clone_info->clip_units=draw_info->clip_units;
clone_info->render=draw_info->render;
clone_info->fill_alpha=draw_info->fill_alpha;
clone_info->stroke_alpha=draw_info->stroke_alpha;
clone_info->element_reference=draw_info->element_reference;
clone_info->debug=IsEventLogging();
exception=DestroyExceptionInfo(exception);
return(clone_info);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ C o n v e r t P a t h T o P o l y g o n %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% ConvertPathToPolygon() converts a path to the more efficient sorted
% rendering form.
%
% The format of the ConvertPathToPolygon method is:
%
% PolygonInfo *ConvertPathToPolygon(const DrawInfo *draw_info,
% const PathInfo *path_info)
%
% A description of each parameter follows:
%
% o Method ConvertPathToPolygon returns the path in a more efficient sorted
% rendering form of type PolygonInfo.
%
% o draw_info: Specifies a pointer to an DrawInfo structure.
%
% o path_info: Specifies a pointer to an PathInfo structure.
%
%
*/
#if defined(__cplusplus) || defined(c_plusplus)
extern "C" {
#endif
static int CompareEdges(const void *x,const void *y)
{
register const EdgeInfo
*p,
*q;
/*
Compare two edges.
*/
p=(const EdgeInfo *) x;
q=(const EdgeInfo *) y;
if ((p->points[0].y-DrawEpsilon) > q->points[0].y)
return(1);
if ((p->points[0].y+DrawEpsilon) < q->points[0].y)
return(-1);
if ((p->points[0].x-DrawEpsilon) > q->points[0].x)
return(1);
if ((p->points[0].x+DrawEpsilon) < q->points[0].x)
return(-1);
if (((p->points[1].x-p->points[0].x)*(q->points[1].y-q->points[0].y)-
(p->points[1].y-p->points[0].y)*(q->points[1].x-q->points[0].x)) > 0.0)
return(1);
return(-1);
}
#if defined(__cplusplus) || defined(c_plusplus)
}
#endif
static void LogPolygonInfo(const PolygonInfo *polygon_info)
{
register EdgeInfo
*p;
register ssize_t
i,
j;
(void) LogMagickEvent(DrawEvent,GetMagickModule()," begin active-edge");
p=polygon_info->edges;
for (i=0; i < (ssize_t) polygon_info->number_edges; i++)
{
(void) LogMagickEvent(DrawEvent,GetMagickModule()," edge %.20g:",
(double) i);
(void) LogMagickEvent(DrawEvent,GetMagickModule()," direction: %s",
p->direction != MagickFalse ? "down" : "up");
(void) LogMagickEvent(DrawEvent,GetMagickModule()," ghostline: %s",
p->ghostline != MagickFalse ? "transparent" : "opaque");
(void) LogMagickEvent(DrawEvent,GetMagickModule(),
" bounds: %g,%g - %g,%g",p->bounds.x1,p->bounds.y1,
p->bounds.x2,p->bounds.y2);
for (j=0; j < (ssize_t) p->number_points; j++)
(void) LogMagickEvent(DrawEvent,GetMagickModule()," %g,%g",
p->points[j].x,p->points[j].y);
p++;
}
(void) LogMagickEvent(DrawEvent,GetMagickModule()," end active-edge");
}
static void ReversePoints(PointInfo *points,const size_t number_points)
{
PointInfo
point;
register ssize_t
i;
for (i=0; i < (ssize_t) (number_points >> 1); i++)
{
point=points[i];
points[i]=points[number_points-(i+1)];
points[number_points-(i+1)]=point;
}
}
static PolygonInfo *ConvertPathToPolygon(const PathInfo *path_info)
{
long
direction,
next_direction;
PointInfo
point,
*points;
PolygonInfo
*polygon_info;
SegmentInfo
bounds;
register ssize_t
i,
n;
MagickBooleanType
ghostline;
size_t
edge,
number_edges,
number_points;
/*
Convert a path to the more efficient sorted rendering form.
*/
polygon_info=(PolygonInfo *) AcquireMagickMemory(sizeof(*polygon_info));
if (polygon_info == (PolygonInfo *) NULL)
return((PolygonInfo *) NULL);
number_edges=16;
polygon_info->edges=(EdgeInfo *) AcquireQuantumMemory(number_edges,
sizeof(*polygon_info->edges));
if (polygon_info->edges == (EdgeInfo *) NULL)
return((PolygonInfo *) NULL);
(void) ResetMagickMemory(polygon_info->edges,0,number_edges*
sizeof(*polygon_info->edges));
direction=0;
edge=0;
ghostline=MagickFalse;
n=0;
number_points=0;
points=(PointInfo *) NULL;
(void) ResetMagickMemory(&point,0,sizeof(point));
(void) ResetMagickMemory(&bounds,0,sizeof(bounds));
for (i=0; path_info[i].code != EndCode; i++)
{
if ((path_info[i].code == MoveToCode) || (path_info[i].code == OpenCode) ||
(path_info[i].code == GhostlineCode))
{
/*
Move to.
*/
if ((points != (PointInfo *) NULL) && (n >= 2))
{
if (edge == number_edges)
{
number_edges<<=1;
polygon_info->edges=(EdgeInfo *) ResizeQuantumMemory(
polygon_info->edges,(size_t) number_edges,
sizeof(*polygon_info->edges));
if (polygon_info->edges == (EdgeInfo *) NULL)
return((PolygonInfo *) NULL);
}
polygon_info->edges[edge].number_points=(size_t) n;
polygon_info->edges[edge].scanline=(-1.0);
polygon_info->edges[edge].highwater=0;
polygon_info->edges[edge].ghostline=ghostline;
polygon_info->edges[edge].direction=(ssize_t) (direction > 0);
if (direction < 0)
ReversePoints(points,(size_t) n);
polygon_info->edges[edge].points=points;
polygon_info->edges[edge].bounds=bounds;
polygon_info->edges[edge].bounds.y1=points[0].y;
polygon_info->edges[edge].bounds.y2=points[n-1].y;
points=(PointInfo *) NULL;
ghostline=MagickFalse;
edge++;
}
if (points == (PointInfo *) NULL)
{
number_points=16;
points=(PointInfo *) AcquireQuantumMemory((size_t) number_points,
sizeof(*points));
if (points == (PointInfo *) NULL)
return((PolygonInfo *) NULL);
}
ghostline=path_info[i].code == GhostlineCode ? MagickTrue : MagickFalse;
point=path_info[i].point;
points[0]=point;
bounds.x1=point.x;
bounds.x2=point.x;
direction=0;
n=1;
continue;
}
/*
Line to.
*/
next_direction=((path_info[i].point.y > point.y) ||
((fabs(path_info[i].point.y-point.y) < DrawEpsilon) &&
(path_info[i].point.x > point.x))) ? 1 : -1;
if ((points != (PointInfo *) NULL) && (direction != 0) &&
(direction != next_direction))
{
/*
New edge.
*/
point=points[n-1];
if (edge == number_edges)
{
number_edges<<=1;
polygon_info->edges=(EdgeInfo *) ResizeQuantumMemory(
polygon_info->edges,(size_t) number_edges,
sizeof(*polygon_info->edges));
if (polygon_info->edges == (EdgeInfo *) NULL)
return((PolygonInfo *) NULL);
}
polygon_info->edges[edge].number_points=(size_t) n;
polygon_info->edges[edge].scanline=(-1.0);
polygon_info->edges[edge].highwater=0;
polygon_info->edges[edge].ghostline=ghostline;
polygon_info->edges[edge].direction=(ssize_t) (direction > 0);
if (direction < 0)
ReversePoints(points,(size_t) n);
polygon_info->edges[edge].points=points;
polygon_info->edges[edge].bounds=bounds;
polygon_info->edges[edge].bounds.y1=points[0].y;
polygon_info->edges[edge].bounds.y2=points[n-1].y;
number_points=16;
points=(PointInfo *) AcquireQuantumMemory((size_t) number_points,
sizeof(*points));
if (points == (PointInfo *) NULL)
return((PolygonInfo *) NULL);
n=1;
ghostline=MagickFalse;
points[0]=point;
bounds.x1=point.x;
bounds.x2=point.x;
edge++;
}
direction=next_direction;
if (points == (PointInfo *) NULL)
continue;
if (n == (ssize_t) number_points)
{
number_points<<=1;
points=(PointInfo *) ResizeQuantumMemory(points,(size_t) number_points,
sizeof(*points));
if (points == (PointInfo *) NULL)
return((PolygonInfo *) NULL);
}
point=path_info[i].point;
points[n]=point;
if (point.x < bounds.x1)
bounds.x1=point.x;
if (point.x > bounds.x2)
bounds.x2=point.x;
n++;
}
if (points != (PointInfo *) NULL)
{
if (n < 2)
points=(PointInfo *) RelinquishMagickMemory(points);
else
{
if (edge == number_edges)
{
number_edges<<=1;
polygon_info->edges=(EdgeInfo *) ResizeQuantumMemory(
polygon_info->edges,(size_t) number_edges,
sizeof(*polygon_info->edges));
if (polygon_info->edges == (EdgeInfo *) NULL)
return((PolygonInfo *) NULL);
}
polygon_info->edges[edge].number_points=(size_t) n;
polygon_info->edges[edge].scanline=(-1.0);
polygon_info->edges[edge].highwater=0;
polygon_info->edges[edge].ghostline=ghostline;
polygon_info->edges[edge].direction=(ssize_t) (direction > 0);
if (direction < 0)
ReversePoints(points,(size_t) n);
polygon_info->edges[edge].points=points;
polygon_info->edges[edge].bounds=bounds;
polygon_info->edges[edge].bounds.y1=points[0].y;
polygon_info->edges[edge].bounds.y2=points[n-1].y;
ghostline=MagickFalse;
edge++;
}
}
polygon_info->number_edges=edge;
qsort(polygon_info->edges,(size_t) polygon_info->number_edges,
sizeof(*polygon_info->edges),CompareEdges);
if (IsEventLogging() != MagickFalse)
LogPolygonInfo(polygon_info);
return(polygon_info);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ C o n v e r t P r i m i t i v e T o P a t h %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% ConvertPrimitiveToPath() converts a PrimitiveInfo structure into a vector
% path structure.
%
% The format of the ConvertPrimitiveToPath method is:
%
% PathInfo *ConvertPrimitiveToPath(const DrawInfo *draw_info,
% const PrimitiveInfo *primitive_info)
%
% A description of each parameter follows:
%
% o Method ConvertPrimitiveToPath returns a vector path structure of type
% PathInfo.
%
% o draw_info: a structure of type DrawInfo.
%
% o primitive_info: Specifies a pointer to an PrimitiveInfo structure.
%
%
*/
static void LogPathInfo(const PathInfo *path_info)
{
register const PathInfo
*p;
(void) LogMagickEvent(DrawEvent,GetMagickModule()," begin vector-path");
for (p=path_info; p->code != EndCode; p++)
(void) LogMagickEvent(DrawEvent,GetMagickModule(),
" %g,%g %s",p->point.x,p->point.y,p->code == GhostlineCode ?
"moveto ghostline" : p->code == OpenCode ? "moveto open" :
p->code == MoveToCode ? "moveto" : p->code == LineToCode ? "lineto" :
"?");
(void) LogMagickEvent(DrawEvent,GetMagickModule()," end vector-path");
}
static PathInfo *ConvertPrimitiveToPath(const PrimitiveInfo *primitive_info)
{
PathInfo
*path_info;
PathInfoCode
code;
PointInfo
p,
q;
register ssize_t
i,
n;
ssize_t
coordinates,
start;
/*
Converts a PrimitiveInfo structure into a vector path structure.
*/
switch (primitive_info->primitive)
{
case AlphaPrimitive:
case ColorPrimitive:
case ImagePrimitive:
case PointPrimitive:
case TextPrimitive:
return((PathInfo *) NULL);
default:
break;
}
for (i=0; primitive_info[i].primitive != UndefinedPrimitive; i++) ;
path_info=(PathInfo *) AcquireQuantumMemory((size_t) (2UL*i+3UL),
sizeof(*path_info));
if (path_info == (PathInfo *) NULL)
return((PathInfo *) NULL);
coordinates=0;
n=0;
p.x=(-1.0);
p.y=(-1.0);
q.x=(-1.0);
q.y=(-1.0);
start=0;
for (i=0; primitive_info[i].primitive != UndefinedPrimitive; i++)
{
code=LineToCode;
if (coordinates <= 0)
{
coordinates=(ssize_t) primitive_info[i].coordinates;
p=primitive_info[i].point;
start=n;
code=MoveToCode;
}
coordinates--;
/*
Eliminate duplicate points.
*/
if ((i == 0) || (fabs(q.x-primitive_info[i].point.x) >= DrawEpsilon) ||
(fabs(q.y-primitive_info[i].point.y) >= DrawEpsilon))
{
path_info[n].code=code;
path_info[n].point=primitive_info[i].point;
q=primitive_info[i].point;
n++;
}
if (coordinates > 0)
continue;
if ((fabs(p.x-primitive_info[i].point.x) < DrawEpsilon) &&
(fabs(p.y-primitive_info[i].point.y) < DrawEpsilon))
continue;
/*
Mark the p point as open if it does not match the q.
*/
path_info[start].code=OpenCode;
path_info[n].code=GhostlineCode;
path_info[n].point=primitive_info[i].point;
n++;
path_info[n].code=LineToCode;
path_info[n].point=p;
n++;
}
path_info[n].code=EndCode;
path_info[n].point.x=0.0;
path_info[n].point.y=0.0;
if (IsEventLogging() != MagickFalse)
LogPathInfo(path_info);
return(path_info);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% D e s t r o y D r a w I n f o %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% DestroyDrawInfo() deallocates memory associated with an DrawInfo
% structure.
%
% The format of the DestroyDrawInfo method is:
%
% DrawInfo *DestroyDrawInfo(DrawInfo *draw_info)
%
% A description of each parameter follows:
%
% o draw_info: the draw info.
%
*/
MagickExport DrawInfo *DestroyDrawInfo(DrawInfo *draw_info)
{
if (draw_info->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"...");
assert(draw_info != (DrawInfo *) NULL);
assert(draw_info->signature == MagickCoreSignature);
if (draw_info->primitive != (char *) NULL)
draw_info->primitive=DestroyString(draw_info->primitive);
if (draw_info->text != (char *) NULL)
draw_info->text=DestroyString(draw_info->text);
if (draw_info->geometry != (char *) NULL)
draw_info->geometry=DestroyString(draw_info->geometry);
if (draw_info->fill_pattern != (Image *) NULL)
draw_info->fill_pattern=DestroyImage(draw_info->fill_pattern);
if (draw_info->stroke_pattern != (Image *) NULL)
draw_info->stroke_pattern=DestroyImage(draw_info->stroke_pattern);
if (draw_info->font != (char *) NULL)
draw_info->font=DestroyString(draw_info->font);
if (draw_info->metrics != (char *) NULL)
draw_info->metrics=DestroyString(draw_info->metrics);
if (draw_info->family != (char *) NULL)
draw_info->family=DestroyString(draw_info->family);
if (draw_info->encoding != (char *) NULL)
draw_info->encoding=DestroyString(draw_info->encoding);
if (draw_info->density != (char *) NULL)
draw_info->density=DestroyString(draw_info->density);
if (draw_info->server_name != (char *) NULL)
draw_info->server_name=(char *)
RelinquishMagickMemory(draw_info->server_name);
if (draw_info->dash_pattern != (double *) NULL)
draw_info->dash_pattern=(double *) RelinquishMagickMemory(
draw_info->dash_pattern);
if (draw_info->gradient.stops != (StopInfo *) NULL)
draw_info->gradient.stops=(StopInfo *) RelinquishMagickMemory(
draw_info->gradient.stops);
if (draw_info->clip_mask != (char *) NULL)
draw_info->clip_mask=DestroyString(draw_info->clip_mask);
draw_info->signature=(~MagickCoreSignature);
draw_info=(DrawInfo *) RelinquishMagickMemory(draw_info);
return(draw_info);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ D e s t r o y E d g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% DestroyEdge() destroys the specified polygon edge.
%
% The format of the DestroyEdge method is:
%
% ssize_t DestroyEdge(PolygonInfo *polygon_info,const int edge)
%
% A description of each parameter follows:
%
% o polygon_info: Specifies a pointer to an PolygonInfo structure.
%
% o edge: the polygon edge number to destroy.
%
*/
static size_t DestroyEdge(PolygonInfo *polygon_info,
const size_t edge)
{
assert(edge < polygon_info->number_edges);
polygon_info->edges[edge].points=(PointInfo *) RelinquishMagickMemory(
polygon_info->edges[edge].points);
polygon_info->number_edges--;
if (edge < polygon_info->number_edges)
(void) CopyMagickMemory(polygon_info->edges+edge,polygon_info->edges+edge+1,
(size_t) (polygon_info->number_edges-edge)*sizeof(*polygon_info->edges));
return(polygon_info->number_edges);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ D e s t r o y P o l y g o n I n f o %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% DestroyPolygonInfo() destroys the PolygonInfo data structure.
%
% The format of the DestroyPolygonInfo method is:
%
% PolygonInfo *DestroyPolygonInfo(PolygonInfo *polygon_info)
%
% A description of each parameter follows:
%
% o polygon_info: Specifies a pointer to an PolygonInfo structure.
%
*/
static PolygonInfo *DestroyPolygonInfo(PolygonInfo *polygon_info)
{
register ssize_t
i;
for (i=0; i < (ssize_t) polygon_info->number_edges; i++)
polygon_info->edges[i].points=(PointInfo *)
RelinquishMagickMemory(polygon_info->edges[i].points);
polygon_info->edges=(EdgeInfo *) RelinquishMagickMemory(polygon_info->edges);
return((PolygonInfo *) RelinquishMagickMemory(polygon_info));
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% D r a w A f f i n e I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% DrawAffineImage() composites the source over the destination image as
% dictated by the affine transform.
%
% The format of the DrawAffineImage method is:
%
% MagickBooleanType DrawAffineImage(Image *image,const Image *source,
% const AffineMatrix *affine,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o source: the source image.
%
% o affine: the affine transform.
%
% o exception: return any errors or warnings in this structure.
%
*/
static SegmentInfo AffineEdge(const Image *image,const AffineMatrix *affine,
const double y,const SegmentInfo *edge)
{
double
intercept,
z;
register double
x;
SegmentInfo
inverse_edge;
/*
Determine left and right edges.
*/
inverse_edge.x1=edge->x1;
inverse_edge.y1=edge->y1;
inverse_edge.x2=edge->x2;
inverse_edge.y2=edge->y2;
z=affine->ry*y+affine->tx;
if (affine->sx >= DrawEpsilon)
{
intercept=(-z/affine->sx);
x=intercept;
if (x > inverse_edge.x1)
inverse_edge.x1=x;
intercept=(-z+(double) image->columns)/affine->sx;
x=intercept;
if (x < inverse_edge.x2)
inverse_edge.x2=x;
}
else
if (affine->sx < -DrawEpsilon)
{
intercept=(-z+(double) image->columns)/affine->sx;
x=intercept;
if (x > inverse_edge.x1)
inverse_edge.x1=x;
intercept=(-z/affine->sx);
x=intercept;
if (x < inverse_edge.x2)
inverse_edge.x2=x;
}
else
if ((z < 0.0) || ((size_t) floor(z+0.5) >= image->columns))
{
inverse_edge.x2=edge->x1;
return(inverse_edge);
}
/*
Determine top and bottom edges.
*/
z=affine->sy*y+affine->ty;
if (affine->rx >= DrawEpsilon)
{
intercept=(-z/affine->rx);
x=intercept;
if (x > inverse_edge.x1)
inverse_edge.x1=x;
intercept=(-z+(double) image->rows)/affine->rx;
x=intercept;
if (x < inverse_edge.x2)
inverse_edge.x2=x;
}
else
if (affine->rx < -DrawEpsilon)
{
intercept=(-z+(double) image->rows)/affine->rx;
x=intercept;
if (x > inverse_edge.x1)
inverse_edge.x1=x;
intercept=(-z/affine->rx);
x=intercept;
if (x < inverse_edge.x2)
inverse_edge.x2=x;
}
else
if ((z < 0.0) || ((size_t) floor(z+0.5) >= image->rows))
{
inverse_edge.x2=edge->x2;
return(inverse_edge);
}
return(inverse_edge);
}
static AffineMatrix InverseAffineMatrix(const AffineMatrix *affine)
{
AffineMatrix
inverse_affine;
double
determinant;
determinant=PerceptibleReciprocal(affine->sx*affine->sy-affine->rx*
affine->ry);
inverse_affine.sx=determinant*affine->sy;
inverse_affine.rx=determinant*(-affine->rx);
inverse_affine.ry=determinant*(-affine->ry);
inverse_affine.sy=determinant*affine->sx;
inverse_affine.tx=(-affine->tx)*inverse_affine.sx-affine->ty*
inverse_affine.ry;
inverse_affine.ty=(-affine->tx)*inverse_affine.rx-affine->ty*
inverse_affine.sy;
return(inverse_affine);
}
MagickExport MagickBooleanType DrawAffineImage(Image *image,
const Image *source,const AffineMatrix *affine,ExceptionInfo *exception)
{
AffineMatrix
inverse_affine;
CacheView
*image_view,
*source_view;
MagickBooleanType
status;
PixelInfo
zero;
PointInfo
extent[4],
min,
max;
register ssize_t
i;
SegmentInfo
edge;
ssize_t
start,
stop,
y;
/*
Determine bounding box.
*/
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(source != (const Image *) NULL);
assert(source->signature == MagickCoreSignature);
assert(affine != (AffineMatrix *) NULL);
extent[0].x=0.0;
extent[0].y=0.0;
extent[1].x=(double) source->columns-1.0;
extent[1].y=0.0;
extent[2].x=(double) source->columns-1.0;
extent[2].y=(double) source->rows-1.0;
extent[3].x=0.0;
extent[3].y=(double) source->rows-1.0;
for (i=0; i < 4; i++)
{
PointInfo
point;
point=extent[i];
extent[i].x=point.x*affine->sx+point.y*affine->ry+affine->tx;
extent[i].y=point.x*affine->rx+point.y*affine->sy+affine->ty;
}
min=extent[0];
max=extent[0];
for (i=1; i < 4; i++)
{
if (min.x > extent[i].x)
min.x=extent[i].x;
if (min.y > extent[i].y)
min.y=extent[i].y;
if (max.x < extent[i].x)
max.x=extent[i].x;
if (max.y < extent[i].y)
max.y=extent[i].y;
}
/*
Affine transform image.
*/
if (SetImageStorageClass(image,DirectClass,exception) == MagickFalse)
return(MagickFalse);
status=MagickTrue;
edge.x1=MagickMax(min.x,0.0);
edge.y1=MagickMax(min.y,0.0);
edge.x2=MagickMin(max.x,(double) image->columns-1.0);
edge.y2=MagickMin(max.y,(double) image->rows-1.0);
inverse_affine=InverseAffineMatrix(affine);
GetPixelInfo(image,&zero);
start=(ssize_t) ceil(edge.y1-0.5);
stop=(ssize_t) floor(edge.y2+0.5);
source_view=AcquireVirtualCacheView(source,exception);
image_view=AcquireAuthenticCacheView(image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static,4) shared(status) \
magick_number_threads(source,image,stop-start,1)
#endif
for (y=start; y <= stop; y++)
{
PixelInfo
composite,
pixel;
PointInfo
point;
register ssize_t
x;
register Quantum
*magick_restrict q;
SegmentInfo
inverse_edge;
ssize_t
x_offset;
inverse_edge=AffineEdge(source,&inverse_affine,(double) y,&edge);
if (inverse_edge.x2 < inverse_edge.x1)
continue;
q=GetCacheViewAuthenticPixels(image_view,(ssize_t) ceil(inverse_edge.x1-
0.5),y,(size_t) (floor(inverse_edge.x2+0.5)-ceil(inverse_edge.x1-0.5)+1),
1,exception);
if (q == (Quantum *) NULL)
continue;
pixel=zero;
composite=zero;
x_offset=0;
for (x=(ssize_t) ceil(inverse_edge.x1-0.5); x <= (ssize_t) floor(inverse_edge.x2+0.5); x++)
{
point.x=(double) x*inverse_affine.sx+y*inverse_affine.ry+
inverse_affine.tx;
point.y=(double) x*inverse_affine.rx+y*inverse_affine.sy+
inverse_affine.ty;
(void) InterpolatePixelInfo(source,source_view,UndefinedInterpolatePixel,
point.x,point.y,&pixel,exception);
GetPixelInfoPixel(image,q,&composite);
CompositePixelInfoOver(&pixel,pixel.alpha,&composite,composite.alpha,
&composite);
SetPixelViaPixelInfo(image,&composite,q);
x_offset++;
q+=GetPixelChannels(image);
}
if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse)
status=MagickFalse;
}
source_view=DestroyCacheView(source_view);
image_view=DestroyCacheView(image_view);
return(status);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ D r a w B o u n d i n g R e c t a n g l e s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% DrawBoundingRectangles() draws the bounding rectangles on the image. This
% is only useful for developers debugging the rendering algorithm.
%
% The format of the DrawBoundingRectangles method is:
%
% void DrawBoundingRectangles(Image *image,const DrawInfo *draw_info,
% PolygonInfo *polygon_info,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o draw_info: the draw info.
%
% o polygon_info: Specifies a pointer to a PolygonInfo structure.
%
% o exception: return any errors or warnings in this structure.
%
*/
static void DrawBoundingRectangles(Image *image,const DrawInfo *draw_info,
const PolygonInfo *polygon_info,ExceptionInfo *exception)
{
DrawInfo
*clone_info;
double
mid;
PointInfo
end,
resolution,
start;
PrimitiveInfo
primitive_info[6];
register ssize_t
i;
SegmentInfo
bounds;
ssize_t
coordinates;
clone_info=CloneDrawInfo((ImageInfo *) NULL,draw_info);
(void) QueryColorCompliance("#000F",AllCompliance,&clone_info->fill,
exception);
resolution.x=96.0;
resolution.y=96.0;
if (clone_info->density != (char *) NULL)
{
GeometryInfo
geometry_info;
MagickStatusType
flags;
flags=ParseGeometry(clone_info->density,&geometry_info);
resolution.x=geometry_info.rho;
resolution.y=geometry_info.sigma;
if ((flags & SigmaValue) == MagickFalse)
resolution.y=resolution.x;
}
mid=(resolution.x/96.0)*ExpandAffine(&clone_info->affine)*
clone_info->stroke_width/2.0;
bounds.x1=0.0;
bounds.y1=0.0;
bounds.x2=0.0;
bounds.y2=0.0;
if (polygon_info != (PolygonInfo *) NULL)
{
bounds=polygon_info->edges[0].bounds;
for (i=1; i < (ssize_t) polygon_info->number_edges; i++)
{
if (polygon_info->edges[i].bounds.x1 < (double) bounds.x1)
bounds.x1=polygon_info->edges[i].bounds.x1;
if (polygon_info->edges[i].bounds.y1 < (double) bounds.y1)
bounds.y1=polygon_info->edges[i].bounds.y1;
if (polygon_info->edges[i].bounds.x2 > (double) bounds.x2)
bounds.x2=polygon_info->edges[i].bounds.x2;
if (polygon_info->edges[i].bounds.y2 > (double) bounds.y2)
bounds.y2=polygon_info->edges[i].bounds.y2;
}
bounds.x1-=mid;
bounds.x1=bounds.x1 < 0.0 ? 0.0 : bounds.x1 >= (double)
image->columns ? (double) image->columns-1 : bounds.x1;
bounds.y1-=mid;
bounds.y1=bounds.y1 < 0.0 ? 0.0 : bounds.y1 >= (double)
image->rows ? (double) image->rows-1 : bounds.y1;
bounds.x2+=mid;
bounds.x2=bounds.x2 < 0.0 ? 0.0 : bounds.x2 >= (double)
image->columns ? (double) image->columns-1 : bounds.x2;
bounds.y2+=mid;
bounds.y2=bounds.y2 < 0.0 ? 0.0 : bounds.y2 >= (double)
image->rows ? (double) image->rows-1 : bounds.y2;
for (i=0; i < (ssize_t) polygon_info->number_edges; i++)
{
if (polygon_info->edges[i].direction != 0)
(void) QueryColorCompliance("red",AllCompliance,&clone_info->stroke,
exception);
else
(void) QueryColorCompliance("green",AllCompliance,&clone_info->stroke,
exception);
start.x=(double) (polygon_info->edges[i].bounds.x1-mid);
start.y=(double) (polygon_info->edges[i].bounds.y1-mid);
end.x=(double) (polygon_info->edges[i].bounds.x2+mid);
end.y=(double) (polygon_info->edges[i].bounds.y2+mid);
primitive_info[0].primitive=RectanglePrimitive;
TraceRectangle(primitive_info,start,end);
primitive_info[0].method=ReplaceMethod;
coordinates=(ssize_t) primitive_info[0].coordinates;
primitive_info[coordinates].primitive=UndefinedPrimitive;
(void) DrawPrimitive(image,clone_info,primitive_info,exception);
}
}
(void) QueryColorCompliance("blue",AllCompliance,&clone_info->stroke,
exception);
start.x=(double) (bounds.x1-mid);
start.y=(double) (bounds.y1-mid);
end.x=(double) (bounds.x2+mid);
end.y=(double) (bounds.y2+mid);
primitive_info[0].primitive=RectanglePrimitive;
TraceRectangle(primitive_info,start,end);
primitive_info[0].method=ReplaceMethod;
coordinates=(ssize_t) primitive_info[0].coordinates;
primitive_info[coordinates].primitive=UndefinedPrimitive;
(void) DrawPrimitive(image,clone_info,primitive_info,exception);
clone_info=DestroyDrawInfo(clone_info);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% D r a w C l i p P a t h %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% DrawClipPath() draws the clip path on the image mask.
%
% The format of the DrawClipPath method is:
%
% MagickBooleanType DrawClipPath(Image *image,const DrawInfo *draw_info,
% const char *name,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o draw_info: the draw info.
%
% o name: the name of the clip path.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport MagickBooleanType DrawClipPath(Image *image,
const DrawInfo *draw_info,const char *name,ExceptionInfo *exception)
{
char
filename[MagickPathExtent];
Image
*clip_mask;
const char
*value;
DrawInfo
*clone_info;
MagickStatusType
status;
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(draw_info != (const DrawInfo *) NULL);
(void) FormatLocaleString(filename,MagickPathExtent,"%s",name);
value=GetImageArtifact(image,filename);
if (value == (const char *) NULL)
return(MagickFalse);
clip_mask=CloneImage(image,image->columns,image->rows,MagickTrue,exception);
if (clip_mask == (Image *) NULL)
return(MagickFalse);
(void) QueryColorCompliance("#0000",AllCompliance,
&clip_mask->background_color,exception);
clip_mask->background_color.alpha=(MagickRealType) TransparentAlpha;
(void) SetImageBackgroundColor(clip_mask,exception);
if (image->debug != MagickFalse)
(void) LogMagickEvent(DrawEvent,GetMagickModule(),"\nbegin clip-path %s",
draw_info->clip_mask);
clone_info=CloneDrawInfo((ImageInfo *) NULL,draw_info);
(void) CloneString(&clone_info->primitive,value);
(void) QueryColorCompliance("#ffffff",AllCompliance,&clone_info->fill,
exception);
clone_info->clip_mask=(char *) NULL;
status=NegateImage(clip_mask,MagickFalse,exception);
(void) SetImageMask(image,ReadPixelMask,clip_mask,exception);
clip_mask=DestroyImage(clip_mask);
status&=DrawImage(image,clone_info,exception);
clone_info=DestroyDrawInfo(clone_info);
if (image->debug != MagickFalse)
(void) LogMagickEvent(DrawEvent,GetMagickModule(),"end clip-path");
return(status != 0 ? MagickTrue : MagickFalse);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ D r a w D a s h P o l y g o n %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% DrawDashPolygon() draws a dashed polygon (line, rectangle, ellipse) on the
% image while respecting the dash offset and dash pattern attributes.
%
% The format of the DrawDashPolygon method is:
%
% MagickBooleanType DrawDashPolygon(const DrawInfo *draw_info,
% const PrimitiveInfo *primitive_info,Image *image,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o draw_info: the draw info.
%
% o primitive_info: Specifies a pointer to a PrimitiveInfo structure.
%
% o image: the image.
%
% o exception: return any errors or warnings in this structure.
%
*/
static MagickBooleanType DrawDashPolygon(const DrawInfo *draw_info,
const PrimitiveInfo *primitive_info,Image *image,ExceptionInfo *exception)
{
DrawInfo
*clone_info;
double
length,
maximum_length,
offset,
scale,
total_length;
MagickStatusType
status;
PrimitiveInfo
*dash_polygon;
register ssize_t
i;
register double
dx,
dy;
size_t
number_vertices;
ssize_t
j,
n;
assert(draw_info != (const DrawInfo *) NULL);
if (image->debug != MagickFalse)
(void) LogMagickEvent(DrawEvent,GetMagickModule()," begin draw-dash");
for (i=0; primitive_info[i].primitive != UndefinedPrimitive; i++) ;
number_vertices=(size_t) i;
dash_polygon=(PrimitiveInfo *) AcquireQuantumMemory((size_t)
(2UL*number_vertices+1UL),sizeof(*dash_polygon));
if (dash_polygon == (PrimitiveInfo *) NULL)
return(MagickFalse);
clone_info=CloneDrawInfo((ImageInfo *) NULL,draw_info);
clone_info->miterlimit=0;
dash_polygon[0]=primitive_info[0];
scale=ExpandAffine(&draw_info->affine);
length=scale*(draw_info->dash_pattern[0]-0.5);
offset=fabs(draw_info->dash_offset) >= DrawEpsilon ?
scale*draw_info->dash_offset : 0.0;
j=1;
for (n=0; offset > 0.0; j=0)
{
if (draw_info->dash_pattern[n] <= 0.0)
break;
length=scale*(draw_info->dash_pattern[n]+(n == 0 ? -0.5 : 0.5));
if (offset > length)
{
offset-=length;
n++;
length=scale*(draw_info->dash_pattern[n]+0.5);
continue;
}
if (offset < length)
{
length-=offset;
offset=0.0;
break;
}
offset=0.0;
n++;
}
status=MagickTrue;
maximum_length=0.0;
total_length=0.0;
for (i=1; (i < (ssize_t) number_vertices) && (length >= 0.0); i++)
{
dx=primitive_info[i].point.x-primitive_info[i-1].point.x;
dy=primitive_info[i].point.y-primitive_info[i-1].point.y;
maximum_length=hypot((double) dx,dy);
if (fabs(length) < DrawEpsilon)
{
n++;
if (fabs(draw_info->dash_pattern[n]) < DrawEpsilon)
n=0;
length=scale*(draw_info->dash_pattern[n]+(n == 0 ? -0.5 : 0.5));
}
for (total_length=0.0; (length >= 0.0) && (maximum_length >= (total_length+length)); )
{
total_length+=length;
if ((n & 0x01) != 0)
{
dash_polygon[0]=primitive_info[0];
dash_polygon[0].point.x=(double) (primitive_info[i-1].point.x+dx*
total_length/maximum_length);
dash_polygon[0].point.y=(double) (primitive_info[i-1].point.y+dy*
total_length/maximum_length);
j=1;
}
else
{
if ((j+1) > (ssize_t) (2*number_vertices))
break;
dash_polygon[j]=primitive_info[i-1];
dash_polygon[j].point.x=(double) (primitive_info[i-1].point.x+dx*
total_length/maximum_length);
dash_polygon[j].point.y=(double) (primitive_info[i-1].point.y+dy*
total_length/maximum_length);
dash_polygon[j].coordinates=1;
j++;
dash_polygon[0].coordinates=(size_t) j;
dash_polygon[j].primitive=UndefinedPrimitive;
status&=DrawStrokePolygon(image,clone_info,dash_polygon,exception);
}
n++;
if (fabs(draw_info->dash_pattern[n]) < DrawEpsilon)
n=0;
length=scale*(draw_info->dash_pattern[n]+(n == 0 ? -0.5 : 0.5));
}
length-=(maximum_length-total_length);
if ((n & 0x01) != 0)
continue;
dash_polygon[j]=primitive_info[i];
dash_polygon[j].coordinates=1;
j++;
}
if ((total_length <= maximum_length) && ((n & 0x01) == 0) && (j > 1))
{
dash_polygon[j]=primitive_info[i-1];
dash_polygon[j].point.x+=DrawEpsilon;
dash_polygon[j].point.y+=DrawEpsilon;
dash_polygon[j].coordinates=1;
j++;
dash_polygon[0].coordinates=(size_t) j;
dash_polygon[j].primitive=UndefinedPrimitive;
status&=DrawStrokePolygon(image,clone_info,dash_polygon,exception);
}
dash_polygon=(PrimitiveInfo *) RelinquishMagickMemory(dash_polygon);
clone_info=DestroyDrawInfo(clone_info);
if (image->debug != MagickFalse)
(void) LogMagickEvent(DrawEvent,GetMagickModule()," end draw-dash");
return(status != 0 ? MagickTrue : MagickFalse);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% D r a w I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% DrawImage() draws a graphic primitive on your image. The primitive
% may be represented as a string or filename. Precede the filename with an
% "at" sign (@) and the contents of the file are drawn on the image. You
% can affect how text is drawn by setting one or more members of the draw
% info structure.
%
% The format of the DrawImage method is:
%
% MagickBooleanType DrawImage(Image *image,const DrawInfo *draw_info,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o draw_info: the draw info.
%
% o exception: return any errors or warnings in this structure.
%
*/
static inline MagickBooleanType IsPoint(const char *point)
{
char
*p;
double
value;
value=StringToDouble(point,&p);
return((fabs(value) < DrawEpsilon) && (p == point) ? MagickFalse : MagickTrue);
}
static inline void TracePoint(PrimitiveInfo *primitive_info,
const PointInfo point)
{
primitive_info->coordinates=1;
primitive_info->point=point;
}
MagickExport MagickBooleanType DrawImage(Image *image,const DrawInfo *draw_info,
ExceptionInfo *exception)
{
#define RenderImageTag "Render/Image"
AffineMatrix
affine,
current;
char
keyword[MagickPathExtent],
geometry[MagickPathExtent],
*next_token,
pattern[MagickPathExtent],
*primitive,
*token;
const char
*q;
double
angle,
factor,
points_extent,
primitive_extent;
DrawInfo
**graphic_context;
MagickBooleanType
proceed;
MagickSizeType
number_points;
MagickStatusType
status;
PointInfo
point;
PrimitiveInfo
*primitive_info;
PrimitiveType
primitive_type;
register const char
*p;
register ssize_t
i,
x;
SegmentInfo
bounds;
size_t
extent,
number_stops;
ssize_t
j,
k,
n;
StopInfo
*stops;
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(draw_info != (DrawInfo *) NULL);
assert(draw_info->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"...");
if ((draw_info->primitive == (char *) NULL) ||
(*draw_info->primitive == '\0'))
return(MagickFalse);
if (image->debug != MagickFalse)
(void) LogMagickEvent(DrawEvent,GetMagickModule(),"begin draw-image");
if (*draw_info->primitive != '@')
primitive=AcquireString(draw_info->primitive);
else
primitive=FileToString(draw_info->primitive+1,~0UL,exception);
if (primitive == (char *) NULL)
return(MagickFalse);
primitive_extent=(double) strlen(primitive);
(void) SetImageArtifact(image,"MVG",primitive);
n=0;
number_stops=0;
stops=(StopInfo *) NULL;
/*
Allocate primitive info memory.
*/
graphic_context=(DrawInfo **) AcquireMagickMemory(sizeof(*graphic_context));
if (graphic_context == (DrawInfo **) NULL)
{
primitive=DestroyString(primitive);
ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed",
image->filename);
}
number_points=6553;
primitive_info=(PrimitiveInfo *) AcquireQuantumMemory((size_t) number_points,
sizeof(*primitive_info));
if (primitive_info == (PrimitiveInfo *) NULL)
{
primitive=DestroyString(primitive);
for ( ; n >= 0; n--)
graphic_context[n]=DestroyDrawInfo(graphic_context[n]);
graphic_context=(DrawInfo **) RelinquishMagickMemory(graphic_context);
ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed",
image->filename);
}
graphic_context[n]=CloneDrawInfo((ImageInfo *) NULL,draw_info);
graphic_context[n]->viewbox=image->page;
if ((image->page.width == 0) || (image->page.height == 0))
{
graphic_context[n]->viewbox.width=image->columns;
graphic_context[n]->viewbox.height=image->rows;
}
token=AcquireString(primitive);
extent=strlen(token)+MagickPathExtent;
if (SetImageStorageClass(image,DirectClass,exception) == MagickFalse)
return(MagickFalse);
status=MagickTrue;
for (q=primitive; *q != '\0'; )
{
/*
Interpret graphic primitive.
*/
GetNextToken(q,&q,MagickPathExtent,keyword);
if (*keyword == '\0')
break;
if (*keyword == '#')
{
/*
Comment.
*/
while ((*q != '\n') && (*q != '\0'))
q++;
continue;
}
p=q-strlen(keyword)-1;
primitive_type=UndefinedPrimitive;
current=graphic_context[n]->affine;
GetAffineMatrix(&affine);
switch (*keyword)
{
case ';':
break;
case 'a':
case 'A':
{
if (LocaleCompare("affine",keyword) == 0)
{
GetNextToken(q,&q,extent,token);
affine.sx=StringToDouble(token,&next_token);
if (token == next_token)
status=MagickFalse;
GetNextToken(q,&q,extent,token);
if (*token == ',')
GetNextToken(q,&q,extent,token);
affine.rx=StringToDouble(token,&next_token);
if (token == next_token)
status=MagickFalse;
GetNextToken(q,&q,extent,token);
if (*token == ',')
GetNextToken(q,&q,extent,token);
affine.ry=StringToDouble(token,&next_token);
if (token == next_token)
status=MagickFalse;
GetNextToken(q,&q,extent,token);
if (*token == ',')
GetNextToken(q,&q,extent,token);
affine.sy=StringToDouble(token,&next_token);
if (token == next_token)
status=MagickFalse;
GetNextToken(q,&q,extent,token);
if (*token == ',')
GetNextToken(q,&q,extent,token);
affine.tx=StringToDouble(token,&next_token);
if (token == next_token)
status=MagickFalse;
GetNextToken(q,&q,extent,token);
if (*token == ',')
GetNextToken(q,&q,extent,token);
affine.ty=StringToDouble(token,&next_token);
if (token == next_token)
status=MagickFalse;
break;
}
if (LocaleCompare("alpha",keyword) == 0)
{
primitive_type=AlphaPrimitive;
break;
}
if (LocaleCompare("arc",keyword) == 0)
{
primitive_type=ArcPrimitive;
break;
}
status=MagickFalse;
break;
}
case 'b':
case 'B':
{
if (LocaleCompare("bezier",keyword) == 0)
{
primitive_type=BezierPrimitive;
break;
}
if (LocaleCompare("border-color",keyword) == 0)
{
GetNextToken(q,&q,extent,token);
(void) QueryColorCompliance(token,AllCompliance,
&graphic_context[n]->border_color,exception);
break;
}
status=MagickFalse;
break;
}
case 'c':
case 'C':
{
if (LocaleCompare("clip-path",keyword) == 0)
{
/*
Create clip mask.
*/
GetNextToken(q,&q,extent,token);
(void) CloneString(&graphic_context[n]->clip_mask,token);
(void) DrawClipPath(image,graphic_context[n],
graphic_context[n]->clip_mask,exception);
break;
}
if (LocaleCompare("clip-rule",keyword) == 0)
{
ssize_t
fill_rule;
GetNextToken(q,&q,extent,token);
fill_rule=ParseCommandOption(MagickFillRuleOptions,MagickFalse,
token);
if (fill_rule == -1)
status=MagickFalse;
else
graphic_context[n]->fill_rule=(FillRule) fill_rule;
break;
}
if (LocaleCompare("clip-units",keyword) == 0)
{
ssize_t
clip_units;
GetNextToken(q,&q,extent,token);
clip_units=ParseCommandOption(MagickClipPathOptions,MagickFalse,
token);
if (clip_units == -1)
{
status=MagickFalse;
break;
}
graphic_context[n]->clip_units=(ClipPathUnits) clip_units;
if (clip_units == ObjectBoundingBox)
{
GetAffineMatrix(¤t);
affine.sx=draw_info->bounds.x2;
affine.sy=draw_info->bounds.y2;
affine.tx=draw_info->bounds.x1;
affine.ty=draw_info->bounds.y1;
break;
}
break;
}
if (LocaleCompare("circle",keyword) == 0)
{
primitive_type=CirclePrimitive;
break;
}
if (LocaleCompare("color",keyword) == 0)
{
primitive_type=ColorPrimitive;
break;
}
status=MagickFalse;
break;
}
case 'd':
case 'D':
{
if (LocaleCompare("decorate",keyword) == 0)
{
ssize_t
decorate;
GetNextToken(q,&q,extent,token);
decorate=ParseCommandOption(MagickDecorateOptions,MagickFalse,
token);
if (decorate == -1)
status=MagickFalse;
else
graphic_context[n]->decorate=(DecorationType) decorate;
break;
}
if (LocaleCompare("density",keyword) == 0)
{
GetNextToken(q,&q,extent,token);
(void) CloneString(&graphic_context[n]->density,token);
break;
}
if (LocaleCompare("direction",keyword) == 0)
{
ssize_t
direction;
GetNextToken(q,&q,extent,token);
direction=ParseCommandOption(MagickDirectionOptions,MagickFalse,
token);
if (direction == -1)
status=MagickFalse;
else
graphic_context[n]->direction=(DirectionType) direction;
break;
}
status=MagickFalse;
break;
}
case 'e':
case 'E':
{
if (LocaleCompare("ellipse",keyword) == 0)
{
primitive_type=EllipsePrimitive;
break;
}
if (LocaleCompare("encoding",keyword) == 0)
{
GetNextToken(q,&q,extent,token);
(void) CloneString(&graphic_context[n]->encoding,token);
break;
}
status=MagickFalse;
break;
}
case 'f':
case 'F':
{
if (LocaleCompare("fill",keyword) == 0)
{
GetNextToken(q,&q,extent,token);
(void) FormatLocaleString(pattern,MagickPathExtent,"%s",token);
if (GetImageArtifact(image,pattern) != (const char *) NULL)
(void) DrawPatternPath(image,draw_info,token,
&graphic_context[n]->fill_pattern,exception);
else
{
status&=QueryColorCompliance(token,AllCompliance,
&graphic_context[n]->fill,exception);
if (graphic_context[n]->fill_alpha != OpaqueAlpha)
graphic_context[n]->fill.alpha=graphic_context[n]->fill_alpha;
if (status == MagickFalse)
{
ImageInfo
*pattern_info;
pattern_info=AcquireImageInfo();
(void) CopyMagickString(pattern_info->filename,token,
MagickPathExtent);
graphic_context[n]->fill_pattern=ReadImage(pattern_info,
exception);
CatchException(exception);
pattern_info=DestroyImageInfo(pattern_info);
}
}
break;
}
if (LocaleCompare("fill-opacity",keyword) == 0)
{
GetNextToken(q,&q,extent,token);
factor=strchr(token,'%') != (char *) NULL ? 0.01 : 1.0;
graphic_context[n]->fill.alpha=(MagickRealType) (QuantumRange-
ClampToQuantum((MagickRealType) QuantumRange*(1.0-factor*
StringToDouble(token,&next_token))));
if (token == next_token)
status=MagickFalse;
break;
}
if (LocaleCompare("fill-rule",keyword) == 0)
{
ssize_t
fill_rule;
GetNextToken(q,&q,extent,token);
fill_rule=ParseCommandOption(MagickFillRuleOptions,MagickFalse,
token);
if (fill_rule == -1)
status=MagickFalse;
else
graphic_context[n]->fill_rule=(FillRule) fill_rule;
break;
}
if (LocaleCompare("font",keyword) == 0)
{
GetNextToken(q,&q,extent,token);
(void) CloneString(&graphic_context[n]->font,token);
if (LocaleCompare("none",token) == 0)
graphic_context[n]->font=(char *) RelinquishMagickMemory(
graphic_context[n]->font);
break;
}
if (LocaleCompare("font-family",keyword) == 0)
{
GetNextToken(q,&q,extent,token);
(void) CloneString(&graphic_context[n]->family,token);
break;
}
if (LocaleCompare("font-size",keyword) == 0)
{
GetNextToken(q,&q,extent,token);
graphic_context[n]->pointsize=StringToDouble(token,&next_token);
if (token == next_token)
status=MagickFalse;
break;
}
if (LocaleCompare("font-stretch",keyword) == 0)
{
ssize_t
stretch;
GetNextToken(q,&q,extent,token);
stretch=ParseCommandOption(MagickStretchOptions,MagickFalse,token);
if (stretch == -1)
status=MagickFalse;
else
graphic_context[n]->stretch=(StretchType) stretch;
break;
}
if (LocaleCompare("font-style",keyword) == 0)
{
ssize_t
style;
GetNextToken(q,&q,extent,token);
style=ParseCommandOption(MagickStyleOptions,MagickFalse,token);
if (style == -1)
status=MagickFalse;
else
graphic_context[n]->style=(StyleType) style;
break;
}
if (LocaleCompare("font-weight",keyword) == 0)
{
ssize_t
weight;
GetNextToken(q,&q,extent,token);
weight=ParseCommandOption(MagickWeightOptions,MagickFalse,token);
if (weight == -1)
weight=(ssize_t) StringToUnsignedLong(token);
graphic_context[n]->weight=(size_t) weight;
break;
}
status=MagickFalse;
break;
}
case 'g':
case 'G':
{
if (LocaleCompare("gradient-units",keyword) == 0)
{
GetNextToken(q,&q,extent,token);
break;
}
if (LocaleCompare("gravity",keyword) == 0)
{
ssize_t
gravity;
GetNextToken(q,&q,extent,token);
gravity=ParseCommandOption(MagickGravityOptions,MagickFalse,token);
if (gravity == -1)
status=MagickFalse;
else
graphic_context[n]->gravity=(GravityType) gravity;
break;
}
status=MagickFalse;
break;
}
case 'i':
case 'I':
{
if (LocaleCompare("image",keyword) == 0)
{
ssize_t
compose;
primitive_type=ImagePrimitive;
GetNextToken(q,&q,extent,token);
compose=ParseCommandOption(MagickComposeOptions,MagickFalse,token);
if (compose == -1)
status=MagickFalse;
else
graphic_context[n]->compose=(CompositeOperator) compose;
break;
}
if (LocaleCompare("interline-spacing",keyword) == 0)
{
GetNextToken(q,&q,extent,token);
graphic_context[n]->interline_spacing=StringToDouble(token,
&next_token);
if (token == next_token)
status=MagickFalse;
break;
}
if (LocaleCompare("interword-spacing",keyword) == 0)
{
GetNextToken(q,&q,extent,token);
graphic_context[n]->interword_spacing=StringToDouble(token,
&next_token);
if (token == next_token)
status=MagickFalse;
break;
}
status=MagickFalse;
break;
}
case 'k':
case 'K':
{
if (LocaleCompare("kerning",keyword) == 0)
{
GetNextToken(q,&q,extent,token);
graphic_context[n]->kerning=StringToDouble(token,&next_token);
if (token == next_token)
status=MagickFalse;
break;
}
status=MagickFalse;
break;
}
case 'l':
case 'L':
{
if (LocaleCompare("line",keyword) == 0)
primitive_type=LinePrimitive;
else
status=MagickFalse;
break;
}
case 'o':
case 'O':
{
if (LocaleCompare("offset",keyword) == 0)
{
GetNextToken(q,&q,extent,token);
break;
}
if (LocaleCompare("opacity",keyword) == 0)
{
GetNextToken(q,&q,extent,token);
factor=strchr(token,'%') != (char *) NULL ? 0.01 : 1.0;
graphic_context[n]->alpha=(Quantum) (QuantumRange*(1.0-
(QuantumScale*graphic_context[n]->alpha*(1.0-factor*
StringToDouble(token,&next_token)))));
graphic_context[n]->fill_alpha=QuantumRange*(1.0-(QuantumScale*
graphic_context[n]->fill_alpha*(1.0-factor*StringToDouble(token,
&next_token))));
graphic_context[n]->stroke_alpha=QuantumRange*(1.0-(QuantumScale*
graphic_context[n]->stroke_alpha*(1.0-factor*StringToDouble(token,
&next_token))));
if (token == next_token)
status=MagickFalse;
break;
}
status=MagickFalse;
break;
}
case 'p':
case 'P':
{
if (LocaleCompare("path",keyword) == 0)
{
primitive_type=PathPrimitive;
break;
}
if (LocaleCompare("point",keyword) == 0)
{
primitive_type=PointPrimitive;
break;
}
if (LocaleCompare("polyline",keyword) == 0)
{
primitive_type=PolylinePrimitive;
break;
}
if (LocaleCompare("polygon",keyword) == 0)
{
primitive_type=PolygonPrimitive;
break;
}
if (LocaleCompare("pop",keyword) == 0)
{
GetNextToken(q,&q,extent,token);
if (LocaleCompare("clip-path",token) == 0)
break;
if (LocaleCompare("defs",token) == 0)
break;
if (LocaleCompare("gradient",token) == 0)
break;
if (LocaleCompare("graphic-context",token) == 0)
{
if (n <= 0)
{
(void) ThrowMagickException(exception,GetMagickModule(),
DrawError,"UnbalancedGraphicContextPushPop","`%s'",token);
status=MagickFalse;
n=0;
break;
}
if (graphic_context[n]->clip_mask != (char *) NULL)
if (LocaleCompare(graphic_context[n]->clip_mask,
graphic_context[n-1]->clip_mask) != 0)
(void) SetImageMask(image,ReadPixelMask,(Image *) NULL,
exception);
graphic_context[n]=DestroyDrawInfo(graphic_context[n]);
n--;
break;
}
if (LocaleCompare("pattern",token) == 0)
break;
status=MagickFalse;
break;
}
if (LocaleCompare("push",keyword) == 0)
{
GetNextToken(q,&q,extent,token);
if (LocaleCompare("clip-path",token) == 0)
{
char
name[MagickPathExtent];
GetNextToken(q,&q,extent,token);
(void) FormatLocaleString(name,MagickPathExtent,"%s",token);
for (p=q; *q != '\0'; )
{
GetNextToken(q,&q,extent,token);
if (LocaleCompare(token,"pop") != 0)
continue;
GetNextToken(q,(const char **) NULL,extent,token);
if (LocaleCompare(token,"clip-path") != 0)
continue;
break;
}
if (p > (q-5))
{
status=MagickFalse;
break;
}
(void) CopyMagickString(token,p,(size_t) (q-p-4+1));
(void) SetImageArtifact(image,name,token);
GetNextToken(q,&q,extent,token);
break;
}
if (LocaleCompare("gradient",token) == 0)
{
char
key[2*MagickPathExtent],
name[MagickPathExtent],
type[MagickPathExtent];
SegmentInfo
segment;
GetNextToken(q,&q,extent,token);
(void) CopyMagickString(name,token,MagickPathExtent);
GetNextToken(q,&q,extent,token);
(void) CopyMagickString(type,token,MagickPathExtent);
GetNextToken(q,&q,extent,token);
segment.x1=StringToDouble(token,&next_token);
if (token == next_token)
status=MagickFalse;
GetNextToken(q,&q,extent,token);
if (*token == ',')
GetNextToken(q,&q,extent,token);
segment.y1=StringToDouble(token,&next_token);
if (token == next_token)
status=MagickFalse;
GetNextToken(q,&q,extent,token);
if (*token == ',')
GetNextToken(q,&q,extent,token);
segment.x2=StringToDouble(token,&next_token);
if (token == next_token)
status=MagickFalse;
GetNextToken(q,&q,extent,token);
if (*token == ',')
GetNextToken(q,&q,extent,token);
segment.y2=StringToDouble(token,&next_token);
if (token == next_token)
status=MagickFalse;
if (LocaleCompare(type,"radial") == 0)
{
GetNextToken(q,&q,extent,token);
if (*token == ',')
GetNextToken(q,&q,extent,token);
}
for (p=q; *q != '\0'; )
{
GetNextToken(q,&q,extent,token);
if (LocaleCompare(token,"pop") != 0)
continue;
GetNextToken(q,(const char **) NULL,extent,token);
if (LocaleCompare(token,"gradient") != 0)
continue;
break;
}
if (p > (q-5))
{
status=MagickFalse;
break;
}
(void) CopyMagickString(token,p,(size_t) (q-p-4+1));
bounds.x1=graphic_context[n]->affine.sx*segment.x1+
graphic_context[n]->affine.ry*segment.y1+
graphic_context[n]->affine.tx;
bounds.y1=graphic_context[n]->affine.rx*segment.x1+
graphic_context[n]->affine.sy*segment.y1+
graphic_context[n]->affine.ty;
bounds.x2=graphic_context[n]->affine.sx*segment.x2+
graphic_context[n]->affine.ry*segment.y2+
graphic_context[n]->affine.tx;
bounds.y2=graphic_context[n]->affine.rx*segment.x2+
graphic_context[n]->affine.sy*segment.y2+
graphic_context[n]->affine.ty;
(void) FormatLocaleString(key,MagickPathExtent,"%s",name);
(void) SetImageArtifact(image,key,token);
(void) FormatLocaleString(key,MagickPathExtent,"%s-type",name);
(void) SetImageArtifact(image,key,type);
(void) FormatLocaleString(key,MagickPathExtent,"%s-geometry",
name);
(void) FormatLocaleString(geometry,MagickPathExtent,
"%gx%g%+.15g%+.15g",
MagickMax(fabs(bounds.x2-bounds.x1+1.0),1.0),
MagickMax(fabs(bounds.y2-bounds.y1+1.0),1.0),
bounds.x1,bounds.y1);
(void) SetImageArtifact(image,key,geometry);
GetNextToken(q,&q,extent,token);
break;
}
if (LocaleCompare("pattern",token) == 0)
{
char
key[2*MagickPathExtent],
name[MagickPathExtent];
RectangleInfo
pattern_bounds;
GetNextToken(q,&q,extent,token);
(void) CopyMagickString(name,token,MagickPathExtent);
GetNextToken(q,&q,extent,token);
pattern_bounds.x=(ssize_t) ceil(StringToDouble(token,
&next_token)-0.5);
if (token == next_token)
status=MagickFalse;
GetNextToken(q,&q,extent,token);
if (*token == ',')
GetNextToken(q,&q,extent,token);
pattern_bounds.y=(ssize_t) ceil(StringToDouble(token,
&next_token)-0.5);
if (token == next_token)
status=MagickFalse;
GetNextToken(q,&q,extent,token);
if (*token == ',')
GetNextToken(q,&q,extent,token);
pattern_bounds.width=(size_t) floor(StringToDouble(token,
&next_token)+0.5);
if (token == next_token)
status=MagickFalse;
GetNextToken(q,&q,extent,token);
if (*token == ',')
GetNextToken(q,&q,extent,token);
pattern_bounds.height=(size_t) floor(StringToDouble(token,
&next_token)+0.5);
if (token == next_token)
status=MagickFalse;
for (p=q; *q != '\0'; )
{
GetNextToken(q,&q,extent,token);
if (LocaleCompare(token,"pop") != 0)
continue;
GetNextToken(q,(const char **) NULL,extent,token);
if (LocaleCompare(token,"pattern") != 0)
continue;
break;
}
if (p > (q-5))
{
status=MagickFalse;
break;
}
(void) CopyMagickString(token,p,(size_t) (q-p-4+1));
(void) FormatLocaleString(key,MagickPathExtent,"%s",name);
(void) SetImageArtifact(image,key,token);
(void) FormatLocaleString(key,MagickPathExtent,"%s-geometry",
name);
(void) FormatLocaleString(geometry,MagickPathExtent,
"%.20gx%.20g%+.20g%+.20g",(double)pattern_bounds.width,
(double)pattern_bounds.height,(double)pattern_bounds.x,
(double)pattern_bounds.y);
(void) SetImageArtifact(image,key,geometry);
GetNextToken(q,&q,extent,token);
break;
}
if (LocaleCompare("graphic-context",token) == 0)
{
n++;
graphic_context=(DrawInfo **) ResizeQuantumMemory(
graphic_context,(size_t) (n+1),sizeof(*graphic_context));
if (graphic_context == (DrawInfo **) NULL)
{
(void) ThrowMagickException(exception,GetMagickModule(),
ResourceLimitError,"MemoryAllocationFailed","`%s'",
image->filename);
break;
}
graphic_context[n]=CloneDrawInfo((ImageInfo *) NULL,
graphic_context[n-1]);
break;
}
if (LocaleCompare("defs",token) == 0)
break;
status=MagickFalse;
break;
}
status=MagickFalse;
break;
}
case 'r':
case 'R':
{
if (LocaleCompare("rectangle",keyword) == 0)
{
primitive_type=RectanglePrimitive;
break;
}
if (LocaleCompare("rotate",keyword) == 0)
{
GetNextToken(q,&q,extent,token);
angle=StringToDouble(token,&next_token);
if (token == next_token)
status=MagickFalse;
affine.sx=cos(DegreesToRadians(fmod((double) angle,360.0)));
affine.rx=sin(DegreesToRadians(fmod((double) angle,360.0)));
affine.ry=(-sin(DegreesToRadians(fmod((double) angle,360.0))));
affine.sy=cos(DegreesToRadians(fmod((double) angle,360.0)));
break;
}
if (LocaleCompare("roundRectangle",keyword) == 0)
{
primitive_type=RoundRectanglePrimitive;
break;
}
status=MagickFalse;
break;
}
case 's':
case 'S':
{
if (LocaleCompare("scale",keyword) == 0)
{
GetNextToken(q,&q,extent,token);
affine.sx=StringToDouble(token,&next_token);
if (token == next_token)
status=MagickFalse;
GetNextToken(q,&q,extent,token);
if (*token == ',')
GetNextToken(q,&q,extent,token);
affine.sy=StringToDouble(token,&next_token);
if (token == next_token)
status=MagickFalse;
break;
}
if (LocaleCompare("skewX",keyword) == 0)
{
GetNextToken(q,&q,extent,token);
angle=StringToDouble(token,&next_token);
if (token == next_token)
status=MagickFalse;
affine.ry=sin(DegreesToRadians(angle));
break;
}
if (LocaleCompare("skewY",keyword) == 0)
{
GetNextToken(q,&q,extent,token);
angle=StringToDouble(token,&next_token);
if (token == next_token)
status=MagickFalse;
affine.rx=(-tan(DegreesToRadians(angle)/2.0));
break;
}
if (LocaleCompare("stop-color",keyword) == 0)
{
PixelInfo
stop_color;
number_stops++;
if (number_stops == 1)
stops=(StopInfo *) AcquireQuantumMemory(2,sizeof(*stops));
else if (number_stops > 2)
stops=(StopInfo *) ResizeQuantumMemory(stops,number_stops,
sizeof(*stops));
if (stops == (StopInfo *) NULL)
{
(void) ThrowMagickException(exception,GetMagickModule(),
ResourceLimitError,"MemoryAllocationFailed","`%s'",
image->filename);
break;
}
GetNextToken(q,&q,extent,token);
(void) QueryColorCompliance(token,AllCompliance,&stop_color,
exception);
stops[number_stops-1].color=stop_color;
GetNextToken(q,&q,extent,token);
stops[number_stops-1].offset=StringToDouble(token,&next_token);
if (token == next_token)
status=MagickFalse;
break;
}
if (LocaleCompare("stroke",keyword) == 0)
{
GetNextToken(q,&q,extent,token);
(void) FormatLocaleString(pattern,MagickPathExtent,"%s",token);
if (GetImageArtifact(image,pattern) != (const char *) NULL)
(void) DrawPatternPath(image,draw_info,token,
&graphic_context[n]->stroke_pattern,exception);
else
{
status&=QueryColorCompliance(token,AllCompliance,
&graphic_context[n]->stroke,exception);
if (graphic_context[n]->stroke_alpha != OpaqueAlpha)
graphic_context[n]->stroke.alpha=
graphic_context[n]->stroke_alpha;
if (status == MagickFalse)
{
ImageInfo
*pattern_info;
pattern_info=AcquireImageInfo();
(void) CopyMagickString(pattern_info->filename,token,
MagickPathExtent);
graphic_context[n]->stroke_pattern=ReadImage(pattern_info,
exception);
CatchException(exception);
pattern_info=DestroyImageInfo(pattern_info);
}
}
break;
}
if (LocaleCompare("stroke-antialias",keyword) == 0)
{
GetNextToken(q,&q,extent,token);
graphic_context[n]->stroke_antialias=
StringToLong(token) != 0 ? MagickTrue : MagickFalse;
break;
}
if (LocaleCompare("stroke-dasharray",keyword) == 0)
{
if (graphic_context[n]->dash_pattern != (double *) NULL)
graphic_context[n]->dash_pattern=(double *)
RelinquishMagickMemory(graphic_context[n]->dash_pattern);
if (IsPoint(q) != MagickFalse)
{
const char
*r;
r=q;
GetNextToken(r,&r,extent,token);
if (*token == ',')
GetNextToken(r,&r,extent,token);
for (x=0; IsPoint(token) != MagickFalse; x++)
{
GetNextToken(r,&r,extent,token);
if (*token == ',')
GetNextToken(r,&r,extent,token);
}
graphic_context[n]->dash_pattern=(double *)
AcquireQuantumMemory((size_t) (2UL*x+2UL),
sizeof(*graphic_context[n]->dash_pattern));
if (graphic_context[n]->dash_pattern == (double *) NULL)
{
(void) ThrowMagickException(exception,GetMagickModule(),
ResourceLimitError,"MemoryAllocationFailed","`%s'",
image->filename);
status=MagickFalse;
break;
}
for (j=0; j < x; j++)
{
GetNextToken(q,&q,extent,token);
if (*token == ',')
GetNextToken(q,&q,extent,token);
graphic_context[n]->dash_pattern[j]=StringToDouble(token,
&next_token);
if (token == next_token)
status=MagickFalse;
if (graphic_context[n]->dash_pattern[j] < 0.0)
status=MagickFalse;
}
if ((x & 0x01) != 0)
for ( ; j < (2*x); j++)
graphic_context[n]->dash_pattern[j]=
graphic_context[n]->dash_pattern[j-x];
graphic_context[n]->dash_pattern[j]=0.0;
break;
}
GetNextToken(q,&q,extent,token);
break;
}
if (LocaleCompare("stroke-dashoffset",keyword) == 0)
{
GetNextToken(q,&q,extent,token);
graphic_context[n]->dash_offset=StringToDouble(token,
&next_token);
if (token == next_token)
status=MagickFalse;
break;
}
if (LocaleCompare("stroke-linecap",keyword) == 0)
{
ssize_t
linecap;
GetNextToken(q,&q,extent,token);
linecap=ParseCommandOption(MagickLineCapOptions,MagickFalse,token);
if (linecap == -1)
status=MagickFalse;
else
graphic_context[n]->linecap=(LineCap) linecap;
break;
}
if (LocaleCompare("stroke-linejoin",keyword) == 0)
{
ssize_t
linejoin;
GetNextToken(q,&q,extent,token);
linejoin=ParseCommandOption(MagickLineJoinOptions,MagickFalse,
token);
if (linejoin == -1)
status=MagickFalse;
else
graphic_context[n]->linejoin=(LineJoin) linejoin;
break;
}
if (LocaleCompare("stroke-miterlimit",keyword) == 0)
{
GetNextToken(q,&q,extent,token);
graphic_context[n]->miterlimit=StringToUnsignedLong(token);
break;
}
if (LocaleCompare("stroke-opacity",keyword) == 0)
{
GetNextToken(q,&q,extent,token);
factor=strchr(token,'%') != (char *) NULL ? 0.01 : 1.0;
graphic_context[n]->stroke.alpha=(MagickRealType) (QuantumRange-
ClampToQuantum((MagickRealType) QuantumRange*(1.0-factor*
StringToDouble(token,&next_token))));
if (token == next_token)
status=MagickFalse;
break;
}
if (LocaleCompare("stroke-width",keyword) == 0)
{
GetNextToken(q,&q,extent,token);
graphic_context[n]->stroke_width=StringToDouble(token,&next_token);
if (token == next_token)
status=MagickFalse;
break;
}
status=MagickFalse;
break;
}
case 't':
case 'T':
{
if (LocaleCompare("text",keyword) == 0)
{
primitive_type=TextPrimitive;
break;
}
if (LocaleCompare("text-align",keyword) == 0)
{
ssize_t
align;
GetNextToken(q,&q,extent,token);
align=ParseCommandOption(MagickAlignOptions,MagickFalse,token);
if (align == -1)
status=MagickFalse;
else
graphic_context[n]->align=(AlignType) align;
break;
}
if (LocaleCompare("text-anchor",keyword) == 0)
{
ssize_t
align;
GetNextToken(q,&q,extent,token);
align=ParseCommandOption(MagickAlignOptions,MagickFalse,token);
if (align == -1)
status=MagickFalse;
else
graphic_context[n]->align=(AlignType) align;
break;
}
if (LocaleCompare("text-antialias",keyword) == 0)
{
GetNextToken(q,&q,extent,token);
graphic_context[n]->text_antialias=StringToLong(token) != 0 ?
MagickTrue : MagickFalse;
break;
}
if (LocaleCompare("text-undercolor",keyword) == 0)
{
GetNextToken(q,&q,extent,token);
(void) QueryColorCompliance(token,AllCompliance,
&graphic_context[n]->undercolor,exception);
break;
}
if (LocaleCompare("translate",keyword) == 0)
{
GetNextToken(q,&q,extent,token);
affine.tx=StringToDouble(token,&next_token);
if (token == next_token)
status=MagickFalse;
GetNextToken(q,&q,extent,token);
if (*token == ',')
GetNextToken(q,&q,extent,token);
affine.ty=StringToDouble(token,&next_token);
if (token == next_token)
status=MagickFalse;
break;
}
status=MagickFalse;
break;
}
case 'v':
case 'V':
{
if (LocaleCompare("viewbox",keyword) == 0)
{
GetNextToken(q,&q,extent,token);
graphic_context[n]->viewbox.x=(ssize_t) ceil(StringToDouble(token,
&next_token)-0.5);
if (token == next_token)
status=MagickFalse;
GetNextToken(q,&q,extent,token);
if (*token == ',')
GetNextToken(q,&q,extent,token);
graphic_context[n]->viewbox.y=(ssize_t) ceil(StringToDouble(token,
&next_token)-0.5);
if (token == next_token)
status=MagickFalse;
GetNextToken(q,&q,extent,token);
if (*token == ',')
GetNextToken(q,&q,extent,token);
graphic_context[n]->viewbox.width=(size_t) floor(StringToDouble(
token,&next_token)+0.5);
if (token == next_token)
status=MagickFalse;
GetNextToken(q,&q,extent,token);
if (*token == ',')
GetNextToken(q,&q,extent,token);
graphic_context[n]->viewbox.height=(size_t) floor(StringToDouble(
token,&next_token)+0.5);
if (token == next_token)
status=MagickFalse;
break;
}
status=MagickFalse;
break;
}
default:
{
status=MagickFalse;
break;
}
}
if (status == MagickFalse)
break;
if ((fabs(affine.sx-1.0) >= DrawEpsilon) ||
(fabs(affine.rx) >= DrawEpsilon) || (fabs(affine.ry) >= DrawEpsilon) ||
(fabs(affine.sy-1.0) >= DrawEpsilon) ||
(fabs(affine.tx) >= DrawEpsilon) || (fabs(affine.ty) >= DrawEpsilon))
{
graphic_context[n]->affine.sx=current.sx*affine.sx+current.ry*affine.rx;
graphic_context[n]->affine.rx=current.rx*affine.sx+current.sy*affine.rx;
graphic_context[n]->affine.ry=current.sx*affine.ry+current.ry*affine.sy;
graphic_context[n]->affine.sy=current.rx*affine.ry+current.sy*affine.sy;
graphic_context[n]->affine.tx=current.sx*affine.tx+current.ry*affine.ty+
current.tx;
graphic_context[n]->affine.ty=current.rx*affine.tx+current.sy*affine.ty+
current.ty;
}
if (primitive_type == UndefinedPrimitive)
{
if (*q == '\0')
{
if (number_stops > 1)
{
GradientType
type;
type=LinearGradient;
if (draw_info->gradient.type == RadialGradient)
type=RadialGradient;
(void) GradientImage(image,type,PadSpread,stops,number_stops,
exception);
}
if (number_stops > 0)
stops=(StopInfo *) RelinquishMagickMemory(stops);
}
if (image->debug != MagickFalse)
(void) LogMagickEvent(DrawEvent,GetMagickModule()," %.*s",(int)
(q-p),p);
continue;
}
/*
Parse the primitive attributes.
*/
i=0;
j=0;
primitive_info[0].point.x=0.0;
primitive_info[0].point.y=0.0;
for (x=0; *q != '\0'; x++)
{
/*
Define points.
*/
if (IsPoint(q) == MagickFalse)
break;
GetNextToken(q,&q,extent,token);
point.x=StringToDouble(token,&next_token);
if (token == next_token)
status=MagickFalse;
GetNextToken(q,&q,extent,token);
if (*token == ',')
GetNextToken(q,&q,extent,token);
point.y=StringToDouble(token,&next_token);
if (token == next_token)
status=MagickFalse;
GetNextToken(q,(const char **) NULL,extent,token);
if (*token == ',')
GetNextToken(q,&q,extent,token);
primitive_info[i].primitive=primitive_type;
primitive_info[i].point=point;
primitive_info[i].coordinates=0;
primitive_info[i].method=FloodfillMethod;
i++;
if (i < (ssize_t) number_points)
continue;
number_points<<=1;
primitive_info=(PrimitiveInfo *) ResizeQuantumMemory(primitive_info,
(size_t) number_points,sizeof(*primitive_info));
if ((primitive_info == (PrimitiveInfo *) NULL) ||
(number_points != (MagickSizeType) ((size_t) number_points)))
ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed",
image->filename);
}
primitive_info[j].primitive=primitive_type;
primitive_info[j].coordinates=(size_t) x;
primitive_info[j].method=FloodfillMethod;
primitive_info[j].text=(char *) NULL;
/*
Circumscribe primitive within a circle.
*/
bounds.x1=primitive_info[j].point.x;
bounds.y1=primitive_info[j].point.y;
bounds.x2=primitive_info[j].point.x;
bounds.y2=primitive_info[j].point.y;
for (k=1; k < (ssize_t) primitive_info[j].coordinates; k++)
{
point=primitive_info[j+k].point;
if (point.x < bounds.x1)
bounds.x1=point.x;
if (point.y < bounds.y1)
bounds.y1=point.y;
if (point.x > bounds.x2)
bounds.x2=point.x;
if (point.y > bounds.y2)
bounds.y2=point.y;
}
/*
Speculate how many points our primitive might consume.
*/
points_extent=(double) primitive_info[j].coordinates;
switch (primitive_type)
{
case RectanglePrimitive:
{
points_extent*=5;
break;
}
case RoundRectanglePrimitive:
{
double
alpha,
beta,
radius;
alpha=bounds.x2-bounds.x1;
beta=bounds.y2-bounds.y1;
radius=hypot((double) alpha,(double) beta);
points_extent*=5;
points_extent+=2*ceil((double) MagickPI*radius)+6*BezierQuantum+360;
break;
}
case BezierPrimitive:
{
if (primitive_info[j].coordinates > 107)
(void) ThrowMagickException(exception,GetMagickModule(),DrawError,
"TooManyBezierCoordinates","`%s'",token);
points_extent=(double) (BezierQuantum*primitive_info[j].coordinates);
break;
}
case PathPrimitive:
{
char
*s,
*t;
GetNextToken(q,&q,extent,token);
points_extent=1;
t=token;
for (s=token; *s != '\0'; s=t)
{
double
value;
value=StringToDouble(s,&t);
(void) value;
if (s == t)
{
t++;
continue;
}
points_extent++;
}
points_extent=points_extent*BezierQuantum;
break;
}
case CirclePrimitive:
case ArcPrimitive:
case EllipsePrimitive:
{
double
alpha,
beta,
radius;
alpha=bounds.x2-bounds.x1;
beta=bounds.y2-bounds.y1;
radius=hypot((double) alpha,(double) beta);
points_extent=2*ceil((double) MagickPI*radius)+6*BezierQuantum+360;
break;
}
default:
break;
}
if (((double) ((size_t) points_extent)) < points_extent)
{
(void) ThrowMagickException(exception,GetMagickModule(),
ResourceLimitError,"MemoryAllocationFailed","`%s'",image->filename);
break;
}
if (((MagickSizeType) (i+points_extent)) >= number_points)
{
/*
Resize based on speculative points required by primitive.
*/
number_points+=points_extent+1;
primitive_info=(PrimitiveInfo *) ResizeQuantumMemory(primitive_info,
(size_t) number_points,sizeof(*primitive_info));
if ((primitive_info == (PrimitiveInfo *) NULL) ||
(number_points != (MagickSizeType) ((size_t) number_points)))
{
(void) ThrowMagickException(exception,GetMagickModule(),
ResourceLimitError,"MemoryAllocationFailed","`%s'",
image->filename);
break;
}
}
switch (primitive_type)
{
case PointPrimitive:
default:
{
if (primitive_info[j].coordinates != 1)
{
status=MagickFalse;
break;
}
TracePoint(primitive_info+j,primitive_info[j].point);
i=(ssize_t) (j+primitive_info[j].coordinates);
break;
}
case LinePrimitive:
{
if (primitive_info[j].coordinates != 2)
{
status=MagickFalse;
break;
}
TraceLine(primitive_info+j,primitive_info[j].point,
primitive_info[j+1].point);
i=(ssize_t) (j+primitive_info[j].coordinates);
break;
}
case RectanglePrimitive:
{
if (primitive_info[j].coordinates != 2)
{
status=MagickFalse;
break;
}
TraceRectangle(primitive_info+j,primitive_info[j].point,
primitive_info[j+1].point);
i=(ssize_t) (j+primitive_info[j].coordinates);
break;
}
case RoundRectanglePrimitive:
{
if (primitive_info[j].coordinates != 3)
{
status=MagickFalse;
break;
}
TraceRoundRectangle(primitive_info+j,primitive_info[j].point,
primitive_info[j+1].point,primitive_info[j+2].point);
i=(ssize_t) (j+primitive_info[j].coordinates);
break;
}
case ArcPrimitive:
{
if (primitive_info[j].coordinates != 3)
{
primitive_type=UndefinedPrimitive;
break;
}
TraceArc(primitive_info+j,primitive_info[j].point,
primitive_info[j+1].point,primitive_info[j+2].point);
i=(ssize_t) (j+primitive_info[j].coordinates);
break;
}
case EllipsePrimitive:
{
if (primitive_info[j].coordinates != 3)
{
status=MagickFalse;
break;
}
TraceEllipse(primitive_info+j,primitive_info[j].point,
primitive_info[j+1].point,primitive_info[j+2].point);
i=(ssize_t) (j+primitive_info[j].coordinates);
break;
}
case CirclePrimitive:
{
if (primitive_info[j].coordinates != 2)
{
status=MagickFalse;
break;
}
TraceCircle(primitive_info+j,primitive_info[j].point,
primitive_info[j+1].point);
i=(ssize_t) (j+primitive_info[j].coordinates);
break;
}
case PolylinePrimitive:
break;
case PolygonPrimitive:
{
primitive_info[i]=primitive_info[j];
primitive_info[i].coordinates=0;
primitive_info[j].coordinates++;
i++;
break;
}
case BezierPrimitive:
{
if (primitive_info[j].coordinates < 3)
{
status=MagickFalse;
break;
}
TraceBezier(primitive_info+j,primitive_info[j].coordinates);
i=(ssize_t) (j+primitive_info[j].coordinates);
break;
}
case PathPrimitive:
{
i=(ssize_t) (j+TracePath(primitive_info+j,token));
break;
}
case AlphaPrimitive:
case ColorPrimitive:
{
ssize_t
method;
if (primitive_info[j].coordinates != 1)
{
status=MagickFalse;
break;
}
GetNextToken(q,&q,extent,token);
method=ParseCommandOption(MagickMethodOptions,MagickFalse,token);
if (method == -1)
status=MagickFalse;
else
primitive_info[j].method=(PaintMethod) method;
break;
}
case TextPrimitive:
{
if (primitive_info[j].coordinates != 1)
{
status=MagickFalse;
break;
}
if (*token != ',')
GetNextToken(q,&q,extent,token);
primitive_info[j].text=AcquireString(token);
break;
}
case ImagePrimitive:
{
if (primitive_info[j].coordinates != 2)
{
status=MagickFalse;
break;
}
GetNextToken(q,&q,extent,token);
primitive_info[j].text=AcquireString(token);
break;
}
}
if (primitive_info == (PrimitiveInfo *) NULL)
break;
if (image->debug != MagickFalse)
(void) LogMagickEvent(DrawEvent,GetMagickModule()," %.*s",(int) (q-p),p);
if (status == MagickFalse)
break;
primitive_info[i].primitive=UndefinedPrimitive;
if (i == 0)
continue;
/*
Transform points.
*/
for (i=0; primitive_info[i].primitive != UndefinedPrimitive; i++)
{
point=primitive_info[i].point;
primitive_info[i].point.x=graphic_context[n]->affine.sx*point.x+
graphic_context[n]->affine.ry*point.y+graphic_context[n]->affine.tx;
primitive_info[i].point.y=graphic_context[n]->affine.rx*point.x+
graphic_context[n]->affine.sy*point.y+graphic_context[n]->affine.ty;
point=primitive_info[i].point;
if (point.x < graphic_context[n]->bounds.x1)
graphic_context[n]->bounds.x1=point.x;
if (point.y < graphic_context[n]->bounds.y1)
graphic_context[n]->bounds.y1=point.y;
if (point.x > graphic_context[n]->bounds.x2)
graphic_context[n]->bounds.x2=point.x;
if (point.y > graphic_context[n]->bounds.y2)
graphic_context[n]->bounds.y2=point.y;
if (primitive_info[i].primitive == ImagePrimitive)
break;
if (i >= (ssize_t) number_points)
ThrowFatalException(ResourceLimitFatalError,"MemoryAllocationFailed");
}
if (graphic_context[n]->render != MagickFalse)
{
if ((n != 0) && (graphic_context[n]->clip_mask != (char *) NULL) &&
(LocaleCompare(graphic_context[n]->clip_mask,
graphic_context[n-1]->clip_mask) != 0))
status&=DrawClipPath(image,graphic_context[n],
graphic_context[n]->clip_mask,exception);
status&=DrawPrimitive(image,graphic_context[n],primitive_info,
exception);
}
if (primitive_info->text != (char *) NULL)
primitive_info->text=(char *) RelinquishMagickMemory(
primitive_info->text);
proceed=SetImageProgress(image,RenderImageTag,q-primitive,(MagickSizeType)
primitive_extent);
if (proceed == MagickFalse)
break;
if (status == 0)
break;
}
if (image->debug != MagickFalse)
(void) LogMagickEvent(DrawEvent,GetMagickModule(),"end draw-image");
/*
Relinquish resources.
*/
token=DestroyString(token);
if (primitive_info != (PrimitiveInfo *) NULL)
primitive_info=(PrimitiveInfo *) RelinquishMagickMemory(primitive_info);
primitive=DestroyString(primitive);
for ( ; n >= 0; n--)
graphic_context[n]=DestroyDrawInfo(graphic_context[n]);
graphic_context=(DrawInfo **) RelinquishMagickMemory(graphic_context);
if (status == MagickFalse)
ThrowBinaryException(DrawError,"NonconformingDrawingPrimitiveDefinition",
keyword);
return(status != 0 ? MagickTrue : MagickFalse);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% D r a w G r a d i e n t I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% DrawGradientImage() draws a linear gradient on the image.
%
% The format of the DrawGradientImage method is:
%
% MagickBooleanType DrawGradientImage(Image *image,
% const DrawInfo *draw_info,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o draw_info: the draw info.
%
% o exception: return any errors or warnings in this structure.
%
*/
static inline double GetStopColorOffset(const GradientInfo *gradient,
const ssize_t x,const ssize_t y)
{
switch (gradient->type)
{
case UndefinedGradient:
case LinearGradient:
{
double
gamma,
length,
offset,
scale;
PointInfo
p,
q;
const SegmentInfo
*gradient_vector;
gradient_vector=(&gradient->gradient_vector);
p.x=gradient_vector->x2-gradient_vector->x1;
p.y=gradient_vector->y2-gradient_vector->y1;
q.x=(double) x-gradient_vector->x1;
q.y=(double) y-gradient_vector->y1;
length=sqrt(q.x*q.x+q.y*q.y);
gamma=sqrt(p.x*p.x+p.y*p.y)*length;
gamma=PerceptibleReciprocal(gamma);
scale=p.x*q.x+p.y*q.y;
offset=gamma*scale*length;
return(offset);
}
case RadialGradient:
{
PointInfo
v;
if (gradient->spread == RepeatSpread)
{
v.x=(double) x-gradient->center.x;
v.y=(double) y-gradient->center.y;
return(sqrt(v.x*v.x+v.y*v.y));
}
v.x=(double) (((x-gradient->center.x)*cos(DegreesToRadians(
gradient->angle)))+((y-gradient->center.y)*sin(DegreesToRadians(
gradient->angle))))/gradient->radii.x;
v.y=(double) (((x-gradient->center.x)*sin(DegreesToRadians(
gradient->angle)))-((y-gradient->center.y)*cos(DegreesToRadians(
gradient->angle))))/gradient->radii.y;
return(sqrt(v.x*v.x+v.y*v.y));
}
}
return(0.0);
}
static int StopInfoCompare(const void *x,const void *y)
{
StopInfo
*stop_1,
*stop_2;
stop_1=(StopInfo *) x;
stop_2=(StopInfo *) y;
if (stop_1->offset > stop_2->offset)
return(1);
if (fabs(stop_1->offset-stop_2->offset) <= DrawEpsilon)
return(0);
return(-1);
}
MagickExport MagickBooleanType DrawGradientImage(Image *image,
const DrawInfo *draw_info,ExceptionInfo *exception)
{
CacheView
*image_view;
const GradientInfo
*gradient;
const SegmentInfo
*gradient_vector;
double
length;
MagickBooleanType
status;
PixelInfo
zero;
PointInfo
point;
RectangleInfo
bounding_box;
ssize_t
y;
/*
Draw linear or radial gradient on image.
*/
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(draw_info != (const DrawInfo *) NULL);
gradient=(&draw_info->gradient);
qsort(gradient->stops,gradient->number_stops,sizeof(StopInfo),
StopInfoCompare);
gradient_vector=(&gradient->gradient_vector);
point.x=gradient_vector->x2-gradient_vector->x1;
point.y=gradient_vector->y2-gradient_vector->y1;
length=sqrt(point.x*point.x+point.y*point.y);
bounding_box=gradient->bounding_box;
status=MagickTrue;
GetPixelInfo(image,&zero);
image_view=AcquireAuthenticCacheView(image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static,4) shared(status) \
magick_number_threads(image,image,bounding_box.height-bounding_box.y,1)
#endif
for (y=bounding_box.y; y < (ssize_t) bounding_box.height; y++)
{
PixelInfo
composite,
pixel;
double
alpha,
offset;
register Quantum
*magick_restrict q;
register ssize_t
i,
x;
ssize_t
j;
if (status == MagickFalse)
continue;
q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception);
if (q == (Quantum *) NULL)
{
status=MagickFalse;
continue;
}
pixel=zero;
composite=zero;
offset=GetStopColorOffset(gradient,0,y);
if (gradient->type != RadialGradient)
offset/=length;
for (x=bounding_box.x; x < (ssize_t) bounding_box.width; x++)
{
GetPixelInfoPixel(image,q,&pixel);
switch (gradient->spread)
{
case UndefinedSpread:
case PadSpread:
{
if ((x != (ssize_t) ceil(gradient_vector->x1-0.5)) ||
(y != (ssize_t) ceil(gradient_vector->y1-0.5)))
{
offset=GetStopColorOffset(gradient,x,y);
if (gradient->type != RadialGradient)
offset/=length;
}
for (i=0; i < (ssize_t) gradient->number_stops; i++)
if (offset < gradient->stops[i].offset)
break;
if ((offset < 0.0) || (i == 0))
composite=gradient->stops[0].color;
else
if ((offset > 1.0) || (i == (ssize_t) gradient->number_stops))
composite=gradient->stops[gradient->number_stops-1].color;
else
{
j=i;
i--;
alpha=(offset-gradient->stops[i].offset)/
(gradient->stops[j].offset-gradient->stops[i].offset);
CompositePixelInfoBlend(&gradient->stops[i].color,1.0-alpha,
&gradient->stops[j].color,alpha,&composite);
}
break;
}
case ReflectSpread:
{
if ((x != (ssize_t) ceil(gradient_vector->x1-0.5)) ||
(y != (ssize_t) ceil(gradient_vector->y1-0.5)))
{
offset=GetStopColorOffset(gradient,x,y);
if (gradient->type != RadialGradient)
offset/=length;
}
if (offset < 0.0)
offset=(-offset);
if ((ssize_t) fmod(offset,2.0) == 0)
offset=fmod(offset,1.0);
else
offset=1.0-fmod(offset,1.0);
for (i=0; i < (ssize_t) gradient->number_stops; i++)
if (offset < gradient->stops[i].offset)
break;
if (i == 0)
composite=gradient->stops[0].color;
else
if (i == (ssize_t) gradient->number_stops)
composite=gradient->stops[gradient->number_stops-1].color;
else
{
j=i;
i--;
alpha=(offset-gradient->stops[i].offset)/
(gradient->stops[j].offset-gradient->stops[i].offset);
CompositePixelInfoBlend(&gradient->stops[i].color,1.0-alpha,
&gradient->stops[j].color,alpha,&composite);
}
break;
}
case RepeatSpread:
{
MagickBooleanType
antialias;
double
repeat;
antialias=MagickFalse;
repeat=0.0;
if ((x != (ssize_t) ceil(gradient_vector->x1-0.5)) ||
(y != (ssize_t) ceil(gradient_vector->y1-0.5)))
{
offset=GetStopColorOffset(gradient,x,y);
if (gradient->type == LinearGradient)
{
repeat=fmod(offset,length);
if (repeat < 0.0)
repeat=length-fmod(-repeat,length);
else
repeat=fmod(offset,length);
antialias=(repeat < length) && ((repeat+1.0) > length) ?
MagickTrue : MagickFalse;
offset=repeat/length;
}
else
{
repeat=fmod(offset,gradient->radius);
if (repeat < 0.0)
repeat=gradient->radius-fmod(-repeat,gradient->radius);
else
repeat=fmod(offset,gradient->radius);
antialias=repeat+1.0 > gradient->radius ? MagickTrue :
MagickFalse;
offset=repeat/gradient->radius;
}
}
for (i=0; i < (ssize_t) gradient->number_stops; i++)
if (offset < gradient->stops[i].offset)
break;
if (i == 0)
composite=gradient->stops[0].color;
else
if (i == (ssize_t) gradient->number_stops)
composite=gradient->stops[gradient->number_stops-1].color;
else
{
j=i;
i--;
alpha=(offset-gradient->stops[i].offset)/
(gradient->stops[j].offset-gradient->stops[i].offset);
if (antialias != MagickFalse)
{
if (gradient->type == LinearGradient)
alpha=length-repeat;
else
alpha=gradient->radius-repeat;
i=0;
j=(ssize_t) gradient->number_stops-1L;
}
CompositePixelInfoBlend(&gradient->stops[i].color,1.0-alpha,
&gradient->stops[j].color,alpha,&composite);
}
break;
}
}
CompositePixelInfoOver(&composite,composite.alpha,&pixel,pixel.alpha,
&pixel);
SetPixelViaPixelInfo(image,&pixel,q);
q+=GetPixelChannels(image);
}
if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse)
status=MagickFalse;
}
image_view=DestroyCacheView(image_view);
return(status);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% D r a w P a t t e r n P a t h %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% DrawPatternPath() draws a pattern.
%
% The format of the DrawPatternPath method is:
%
% MagickBooleanType DrawPatternPath(Image *image,const DrawInfo *draw_info,
% const char *name,Image **pattern,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o draw_info: the draw info.
%
% o name: the pattern name.
%
% o image: the image.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport MagickBooleanType DrawPatternPath(Image *image,
const DrawInfo *draw_info,const char *name,Image **pattern,
ExceptionInfo *exception)
{
char
property[MagickPathExtent];
const char
*geometry,
*path,
*type;
DrawInfo
*clone_info;
ImageInfo
*image_info;
MagickBooleanType
status;
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(draw_info != (const DrawInfo *) NULL);
assert(name != (const char *) NULL);
(void) FormatLocaleString(property,MagickPathExtent,"%s",name);
path=GetImageArtifact(image,property);
if (path == (const char *) NULL)
return(MagickFalse);
(void) FormatLocaleString(property,MagickPathExtent,"%s-geometry",name);
geometry=GetImageArtifact(image,property);
if (geometry == (const char *) NULL)
return(MagickFalse);
if ((*pattern) != (Image *) NULL)
*pattern=DestroyImage(*pattern);
image_info=AcquireImageInfo();
image_info->size=AcquireString(geometry);
*pattern=AcquireImage(image_info,exception);
image_info=DestroyImageInfo(image_info);
(void) QueryColorCompliance("#000000ff",AllCompliance,
&(*pattern)->background_color,exception);
(void) SetImageBackgroundColor(*pattern,exception);
if (image->debug != MagickFalse)
(void) LogMagickEvent(DrawEvent,GetMagickModule(),
"begin pattern-path %s %s",name,geometry);
clone_info=CloneDrawInfo((ImageInfo *) NULL,draw_info);
clone_info->fill_pattern=NewImageList();
clone_info->stroke_pattern=NewImageList();
(void) FormatLocaleString(property,MagickPathExtent,"%s-type",name);
type=GetImageArtifact(image,property);
if (type != (const char *) NULL)
clone_info->gradient.type=(GradientType) ParseCommandOption(
MagickGradientOptions,MagickFalse,type);
(void) CloneString(&clone_info->primitive,path);
status=DrawImage(*pattern,clone_info,exception);
clone_info=DestroyDrawInfo(clone_info);
if (image->debug != MagickFalse)
(void) LogMagickEvent(DrawEvent,GetMagickModule(),"end pattern-path");
return(status);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ D r a w P o l y g o n P r i m i t i v e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% DrawPolygonPrimitive() draws a polygon on the image.
%
% The format of the DrawPolygonPrimitive method is:
%
% MagickBooleanType DrawPolygonPrimitive(Image *image,
% const DrawInfo *draw_info,const PrimitiveInfo *primitive_info,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o draw_info: the draw info.
%
% o primitive_info: Specifies a pointer to a PrimitiveInfo structure.
%
% o exception: return any errors or warnings in this structure.
%
*/
static PolygonInfo **DestroyPolygonThreadSet(PolygonInfo **polygon_info)
{
register ssize_t
i;
assert(polygon_info != (PolygonInfo **) NULL);
for (i=0; i < (ssize_t) GetMagickResourceLimit(ThreadResource); i++)
if (polygon_info[i] != (PolygonInfo *) NULL)
polygon_info[i]=DestroyPolygonInfo(polygon_info[i]);
polygon_info=(PolygonInfo **) RelinquishMagickMemory(polygon_info);
return(polygon_info);
}
static PolygonInfo **AcquirePolygonThreadSet(
const PrimitiveInfo *primitive_info)
{
PathInfo
*magick_restrict path_info;
PolygonInfo
**polygon_info;
register ssize_t
i;
size_t
number_threads;
number_threads=(size_t) GetMagickResourceLimit(ThreadResource);
polygon_info=(PolygonInfo **) AcquireQuantumMemory(number_threads,
sizeof(*polygon_info));
if (polygon_info == (PolygonInfo **) NULL)
return((PolygonInfo **) NULL);
(void) ResetMagickMemory(polygon_info,0,number_threads*sizeof(*polygon_info));
path_info=ConvertPrimitiveToPath(primitive_info);
if (path_info == (PathInfo *) NULL)
return(DestroyPolygonThreadSet(polygon_info));
for (i=0; i < (ssize_t) number_threads; i++)
{
polygon_info[i]=ConvertPathToPolygon(path_info);
if (polygon_info[i] == (PolygonInfo *) NULL)
return(DestroyPolygonThreadSet(polygon_info));
}
path_info=(PathInfo *) RelinquishMagickMemory(path_info);
return(polygon_info);
}
static double GetFillAlpha(PolygonInfo *polygon_info,const double mid,
const MagickBooleanType fill,const FillRule fill_rule,const ssize_t x,
const ssize_t y,double *stroke_alpha)
{
double
alpha,
beta,
distance,
subpath_alpha;
PointInfo
delta;
register const PointInfo
*q;
register EdgeInfo
*p;
register ssize_t
i;
ssize_t
j,
winding_number;
/*
Compute fill & stroke opacity for this (x,y) point.
*/
*stroke_alpha=0.0;
subpath_alpha=0.0;
p=polygon_info->edges;
for (j=0; j < (ssize_t) polygon_info->number_edges; j++, p++)
{
if ((double) y <= (p->bounds.y1-mid-0.5))
break;
if ((double) y > (p->bounds.y2+mid+0.5))
{
(void) DestroyEdge(polygon_info,(size_t) j);
continue;
}
if (((double) x <= (p->bounds.x1-mid-0.5)) ||
((double) x > (p->bounds.x2+mid+0.5)))
continue;
i=(ssize_t) MagickMax((double) p->highwater,1.0);
for ( ; i < (ssize_t) p->number_points; i++)
{
if ((double) y <= (p->points[i-1].y-mid-0.5))
break;
if ((double) y > (p->points[i].y+mid+0.5))
continue;
if (p->scanline != (double) y)
{
p->scanline=(double) y;
p->highwater=(size_t) i;
}
/*
Compute distance between a point and an edge.
*/
q=p->points+i-1;
delta.x=(q+1)->x-q->x;
delta.y=(q+1)->y-q->y;
beta=delta.x*(x-q->x)+delta.y*(y-q->y);
if (beta < 0.0)
{
delta.x=(double) x-q->x;
delta.y=(double) y-q->y;
distance=delta.x*delta.x+delta.y*delta.y;
}
else
{
alpha=delta.x*delta.x+delta.y*delta.y;
if (beta > alpha)
{
delta.x=(double) x-(q+1)->x;
delta.y=(double) y-(q+1)->y;
distance=delta.x*delta.x+delta.y*delta.y;
}
else
{
alpha=1.0/alpha;
beta=delta.x*(y-q->y)-delta.y*(x-q->x);
distance=alpha*beta*beta;
}
}
/*
Compute stroke & subpath opacity.
*/
beta=0.0;
if (p->ghostline == MagickFalse)
{
alpha=mid+0.5;
if ((*stroke_alpha < 1.0) &&
(distance <= ((alpha+0.25)*(alpha+0.25))))
{
alpha=mid-0.5;
if (distance <= ((alpha+0.25)*(alpha+0.25)))
*stroke_alpha=1.0;
else
{
beta=1.0;
if (fabs(distance-1.0) >= DrawEpsilon)
beta=sqrt((double) distance);
alpha=beta-mid-0.5;
if (*stroke_alpha < ((alpha-0.25)*(alpha-0.25)))
*stroke_alpha=(alpha-0.25)*(alpha-0.25);
}
}
}
if ((fill == MagickFalse) || (distance > 1.0) || (subpath_alpha >= 1.0))
continue;
if (distance <= 0.0)
{
subpath_alpha=1.0;
continue;
}
if (distance > 1.0)
continue;
if (fabs(beta) < DrawEpsilon)
{
beta=1.0;
if (fabs(distance-1.0) >= DrawEpsilon)
beta=sqrt(distance);
}
alpha=beta-1.0;
if (subpath_alpha < (alpha*alpha))
subpath_alpha=alpha*alpha;
}
}
/*
Compute fill opacity.
*/
if (fill == MagickFalse)
return(0.0);
if (subpath_alpha >= 1.0)
return(1.0);
/*
Determine winding number.
*/
winding_number=0;
p=polygon_info->edges;
for (j=0; j < (ssize_t) polygon_info->number_edges; j++, p++)
{
if ((double) y <= p->bounds.y1)
break;
if (((double) y > p->bounds.y2) || ((double) x <= p->bounds.x1))
continue;
if ((double) x > p->bounds.x2)
{
winding_number+=p->direction ? 1 : -1;
continue;
}
i=(ssize_t) MagickMax((double) p->highwater,1.0);
for ( ; i < (ssize_t) p->number_points; i++)
if ((double) y <= p->points[i].y)
break;
q=p->points+i-1;
if ((((q+1)->x-q->x)*(y-q->y)) <= (((q+1)->y-q->y)*(x-q->x)))
winding_number+=p->direction ? 1 : -1;
}
if (fill_rule != NonZeroRule)
{
if ((MagickAbsoluteValue(winding_number) & 0x01) != 0)
return(1.0);
}
else
if (MagickAbsoluteValue(winding_number) != 0)
return(1.0);
return(subpath_alpha);
}
static MagickBooleanType DrawPolygonPrimitive(Image *image,
const DrawInfo *draw_info,const PrimitiveInfo *primitive_info,
ExceptionInfo *exception)
{
CacheView
*image_view;
MagickBooleanType
fill,
status;
double
mid;
PolygonInfo
**magick_restrict polygon_info;
register EdgeInfo
*p;
register ssize_t
i;
SegmentInfo
bounds;
ssize_t
start_y,
stop_y,
y;
/*
Compute bounding box.
*/
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(draw_info != (DrawInfo *) NULL);
assert(draw_info->signature == MagickCoreSignature);
assert(primitive_info != (PrimitiveInfo *) NULL);
if (primitive_info->coordinates == 0)
return(MagickTrue);
polygon_info=AcquirePolygonThreadSet(primitive_info);
if (polygon_info == (PolygonInfo **) NULL)
return(MagickFalse);
DisableMSCWarning(4127)
if (0)
DrawBoundingRectangles(image,draw_info,polygon_info[0],exception);
RestoreMSCWarning
if (image->debug != MagickFalse)
(void) LogMagickEvent(DrawEvent,GetMagickModule()," begin draw-polygon");
fill=(primitive_info->method == FillToBorderMethod) ||
(primitive_info->method == FloodfillMethod) ? MagickTrue : MagickFalse;
mid=ExpandAffine(&draw_info->affine)*draw_info->stroke_width/2.0;
bounds=polygon_info[0]->edges[0].bounds;
for (i=1; i < (ssize_t) polygon_info[0]->number_edges; i++)
{
p=polygon_info[0]->edges+i;
if (p->bounds.x1 < bounds.x1)
bounds.x1=p->bounds.x1;
if (p->bounds.y1 < bounds.y1)
bounds.y1=p->bounds.y1;
if (p->bounds.x2 > bounds.x2)
bounds.x2=p->bounds.x2;
if (p->bounds.y2 > bounds.y2)
bounds.y2=p->bounds.y2;
}
bounds.x1-=(mid+1.0);
bounds.x1=bounds.x1 < 0.0 ? 0.0 : (size_t) ceil(bounds.x1-0.5) >=
image->columns ? (double) image->columns-1 : bounds.x1;
bounds.y1-=(mid+1.0);
bounds.y1=bounds.y1 < 0.0 ? 0.0 : (size_t) ceil(bounds.y1-0.5) >=
image->rows ? (double) image->rows-1 : bounds.y1;
bounds.x2+=(mid+1.0);
bounds.x2=bounds.x2 < 0.0 ? 0.0 : (size_t) floor(bounds.x2+0.5) >=
image->columns ? (double) image->columns-1 : bounds.x2;
bounds.y2+=(mid+1.0);
bounds.y2=bounds.y2 < 0.0 ? 0.0 : (size_t) floor(bounds.y2+0.5) >=
image->rows ? (double) image->rows-1 : bounds.y2;
status=MagickTrue;
image_view=AcquireAuthenticCacheView(image,exception);
if ((primitive_info->coordinates == 1) ||
(polygon_info[0]->number_edges == 0))
{
/*
Draw point.
*/
start_y=(ssize_t) ceil(bounds.y1-0.5);
stop_y=(ssize_t) floor(bounds.y2+0.5);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static,4) shared(status) \
magick_number_threads(image,image,stop_y-start_y+1,1)
#endif
for (y=start_y; y <= stop_y; y++)
{
MagickBooleanType
sync;
PixelInfo
pixel;
register ssize_t
x;
register Quantum
*magick_restrict q;
ssize_t
start_x,
stop_x;
if (status == MagickFalse)
continue;
start_x=(ssize_t) ceil(bounds.x1-0.5);
stop_x=(ssize_t) floor(bounds.x2+0.5);
x=start_x;
q=GetCacheViewAuthenticPixels(image_view,x,y,(size_t) (stop_x-x+1),1,
exception);
if (q == (Quantum *) NULL)
{
status=MagickFalse;
continue;
}
GetPixelInfo(image,&pixel);
for ( ; x <= stop_x; x++)
{
if ((x == (ssize_t) ceil(primitive_info->point.x-0.5)) &&
(y == (ssize_t) ceil(primitive_info->point.y-0.5)))
{
GetFillColor(draw_info,x-start_x,y-start_y,&pixel,exception);
SetPixelViaPixelInfo(image,&pixel,q);
}
q+=GetPixelChannels(image);
}
sync=SyncCacheViewAuthenticPixels(image_view,exception);
if (sync == MagickFalse)
status=MagickFalse;
}
image_view=DestroyCacheView(image_view);
polygon_info=DestroyPolygonThreadSet(polygon_info);
if (image->debug != MagickFalse)
(void) LogMagickEvent(DrawEvent,GetMagickModule(),
" end draw-polygon");
return(status);
}
/*
Draw polygon or line.
*/
if (image->alpha_trait == UndefinedPixelTrait)
(void) SetImageAlphaChannel(image,OpaqueAlphaChannel,exception);
start_y=(ssize_t) ceil(bounds.y1-0.5);
stop_y=(ssize_t) floor(bounds.y2+0.5);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static,4) shared(status) \
magick_number_threads(image,image,stop_y-start_y+1,1)
#endif
for (y=start_y; y <= stop_y; y++)
{
const int
id = GetOpenMPThreadId();
double
fill_alpha,
stroke_alpha;
PixelInfo
fill_color,
stroke_color;
register Quantum
*magick_restrict q;
register ssize_t
x;
ssize_t
start_x,
stop_x;
if (status == MagickFalse)
continue;
start_x=(ssize_t) ceil(bounds.x1-0.5);
stop_x=(ssize_t) floor(bounds.x2+0.5);
q=GetCacheViewAuthenticPixels(image_view,start_x,y,(size_t) (stop_x-start_x+ 1),1,exception);
if (q == (Quantum *) NULL)
{
status=MagickFalse;
continue;
}
for (x=start_x; x <= stop_x; x++)
{
/*
Fill and/or stroke.
*/
fill_alpha=GetFillAlpha(polygon_info[id],mid,fill,draw_info->fill_rule,
x,y,&stroke_alpha);
if (draw_info->stroke_antialias == MagickFalse)
{
fill_alpha=fill_alpha > 0.25 ? 1.0 : 0.0;
stroke_alpha=stroke_alpha > 0.25 ? 1.0 : 0.0;
}
GetFillColor(draw_info,x-start_x,y-start_y,&fill_color,exception);
fill_alpha=fill_alpha*fill_color.alpha;
CompositePixelOver(image,&fill_color,fill_alpha,q,(double)
GetPixelAlpha(image,q),q);
GetStrokeColor(draw_info,x-start_x,y-start_y,&stroke_color,exception);
stroke_alpha=stroke_alpha*stroke_color.alpha;
CompositePixelOver(image,&stroke_color,stroke_alpha,q,(double)
GetPixelAlpha(image,q),q);
q+=GetPixelChannels(image);
}
if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse)
status=MagickFalse;
}
image_view=DestroyCacheView(image_view);
polygon_info=DestroyPolygonThreadSet(polygon_info);
if (image->debug != MagickFalse)
(void) LogMagickEvent(DrawEvent,GetMagickModule()," end draw-polygon");
return(status);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% D r a w P r i m i t i v e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% DrawPrimitive() draws a primitive (line, rectangle, ellipse) on the image.
%
% The format of the DrawPrimitive method is:
%
% MagickBooleanType DrawPrimitive(Image *image,const DrawInfo *draw_info,
% PrimitiveInfo *primitive_info,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o draw_info: the draw info.
%
% o primitive_info: Specifies a pointer to a PrimitiveInfo structure.
%
% o exception: return any errors or warnings in this structure.
%
*/
static void LogPrimitiveInfo(const PrimitiveInfo *primitive_info)
{
const char
*methods[] =
{
"point",
"replace",
"floodfill",
"filltoborder",
"reset",
"?"
};
PointInfo
p,
q,
point;
register ssize_t
i,
x;
ssize_t
coordinates,
y;
x=(ssize_t) ceil(primitive_info->point.x-0.5);
y=(ssize_t) ceil(primitive_info->point.y-0.5);
switch (primitive_info->primitive)
{
case AlphaPrimitive:
{
(void) LogMagickEvent(DrawEvent,GetMagickModule(),
"AlphaPrimitive %.20g,%.20g %s",(double) x,(double) y,
methods[primitive_info->method]);
return;
}
case ColorPrimitive:
{
(void) LogMagickEvent(DrawEvent,GetMagickModule(),
"ColorPrimitive %.20g,%.20g %s",(double) x,(double) y,
methods[primitive_info->method]);
return;
}
case ImagePrimitive:
{
(void) LogMagickEvent(DrawEvent,GetMagickModule(),
"ImagePrimitive %.20g,%.20g",(double) x,(double) y);
return;
}
case PointPrimitive:
{
(void) LogMagickEvent(DrawEvent,GetMagickModule(),
"PointPrimitive %.20g,%.20g %s",(double) x,(double) y,
methods[primitive_info->method]);
return;
}
case TextPrimitive:
{
(void) LogMagickEvent(DrawEvent,GetMagickModule(),
"TextPrimitive %.20g,%.20g",(double) x,(double) y);
return;
}
default:
break;
}
coordinates=0;
p=primitive_info[0].point;
q.x=(-1.0);
q.y=(-1.0);
for (i=0; primitive_info[i].primitive != UndefinedPrimitive; i++)
{
point=primitive_info[i].point;
if (coordinates <= 0)
{
coordinates=(ssize_t) primitive_info[i].coordinates;
(void) LogMagickEvent(DrawEvent,GetMagickModule(),
" begin open (%.20g)",(double) coordinates);
p=point;
}
point=primitive_info[i].point;
if ((fabs(q.x-point.x) >= DrawEpsilon) ||
(fabs(q.y-point.y) >= DrawEpsilon))
(void) LogMagickEvent(DrawEvent,GetMagickModule(),
" %.20g: %.18g,%.18g",(double) coordinates,point.x,point.y);
else
(void) LogMagickEvent(DrawEvent,GetMagickModule(),
" %.20g: %g %g (duplicate)",(double) coordinates,point.x,point.y);
q=point;
coordinates--;
if (coordinates > 0)
continue;
if ((fabs(p.x-point.x) >= DrawEpsilon) ||
(fabs(p.y-point.y) >= DrawEpsilon))
(void) LogMagickEvent(DrawEvent,GetMagickModule()," end last (%.20g)",
(double) coordinates);
else
(void) LogMagickEvent(DrawEvent,GetMagickModule()," end open (%.20g)",
(double) coordinates);
}
}
MagickExport MagickBooleanType DrawPrimitive(Image *image,
const DrawInfo *draw_info,const PrimitiveInfo *primitive_info,
ExceptionInfo *exception)
{
CacheView
*image_view;
MagickStatusType
status;
register ssize_t
i,
x;
ssize_t
y;
if (image->debug != MagickFalse)
{
(void) LogMagickEvent(DrawEvent,GetMagickModule(),
" begin draw-primitive");
(void) LogMagickEvent(DrawEvent,GetMagickModule(),
" affine: %g,%g,%g,%g,%g,%g",draw_info->affine.sx,
draw_info->affine.rx,draw_info->affine.ry,draw_info->affine.sy,
draw_info->affine.tx,draw_info->affine.ty);
}
if ((IsGrayColorspace(image->colorspace) != MagickFalse) &&
((IsPixelInfoGray(&draw_info->fill) == MagickFalse) ||
(IsPixelInfoGray(&draw_info->stroke) == MagickFalse)))
(void) SetImageColorspace(image,sRGBColorspace,exception);
status=MagickTrue;
x=(ssize_t) ceil(primitive_info->point.x-0.5);
y=(ssize_t) ceil(primitive_info->point.y-0.5);
image_view=AcquireAuthenticCacheView(image,exception);
switch (primitive_info->primitive)
{
case AlphaPrimitive:
{
if (image->alpha_trait == UndefinedPixelTrait)
(void) SetImageAlphaChannel(image,OpaqueAlphaChannel,exception);
switch (primitive_info->method)
{
case PointMethod:
default:
{
PixelInfo
pixel;
register Quantum
*q;
q=GetCacheViewAuthenticPixels(image_view,x,y,1,1,exception);
if (q == (Quantum *) NULL)
break;
GetFillColor(draw_info,x,y,&pixel,exception);
SetPixelAlpha(image,ClampToQuantum(pixel.alpha),q);
(void) SyncCacheViewAuthenticPixels(image_view,exception);
break;
}
case ReplaceMethod:
{
MagickBooleanType
sync;
PixelInfo
pixel,
target;
(void) GetOneCacheViewVirtualPixelInfo(image_view,x,y,&target,
exception);
GetPixelInfo(image,&pixel);
for (y=0; y < (ssize_t) image->rows; y++)
{
register Quantum
*magick_restrict q;
q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,
exception);
if (q == (Quantum *) NULL)
break;
for (x=0; x < (ssize_t) image->columns; x++)
{
GetPixelInfoPixel(image,q,&pixel);
if (IsFuzzyEquivalencePixelInfo(&pixel,&target) == MagickFalse)
{
q+=GetPixelChannels(image);
continue;
}
GetFillColor(draw_info,x,y,&pixel,exception);
SetPixelAlpha(image,ClampToQuantum(pixel.alpha),q);
q+=GetPixelChannels(image);
}
sync=SyncCacheViewAuthenticPixels(image_view,exception);
if (sync == MagickFalse)
break;
}
break;
}
case FloodfillMethod:
case FillToBorderMethod:
{
ChannelType
channel_mask;
PixelInfo
target;
(void) GetOneVirtualPixelInfo(image,TileVirtualPixelMethod,x,y,
&target,exception);
if (primitive_info->method == FillToBorderMethod)
{
target.red=(double) draw_info->border_color.red;
target.green=(double) draw_info->border_color.green;
target.blue=(double) draw_info->border_color.blue;
}
channel_mask=SetImageChannelMask(image,AlphaChannel);
status&=FloodfillPaintImage(image,draw_info,&target,x,y,
primitive_info->method == FloodfillMethod ? MagickFalse :
MagickTrue,exception);
(void) SetImageChannelMask(image,channel_mask);
break;
}
case ResetMethod:
{
MagickBooleanType
sync;
PixelInfo
pixel;
for (y=0; y < (ssize_t) image->rows; y++)
{
register Quantum
*magick_restrict q;
q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,
exception);
if (q == (Quantum *) NULL)
break;
for (x=0; x < (ssize_t) image->columns; x++)
{
GetFillColor(draw_info,x,y,&pixel,exception);
SetPixelAlpha(image,ClampToQuantum(pixel.alpha),q);
q+=GetPixelChannels(image);
}
sync=SyncCacheViewAuthenticPixels(image_view,exception);
if (sync == MagickFalse)
break;
}
break;
}
}
break;
}
case ColorPrimitive:
{
switch (primitive_info->method)
{
case PointMethod:
default:
{
PixelInfo
pixel;
register Quantum
*q;
q=GetCacheViewAuthenticPixels(image_view,x,y,1,1,exception);
if (q == (Quantum *) NULL)
break;
GetPixelInfo(image,&pixel);
GetFillColor(draw_info,x,y,&pixel,exception);
SetPixelViaPixelInfo(image,&pixel,q);
(void) SyncCacheViewAuthenticPixels(image_view,exception);
break;
}
case ReplaceMethod:
{
MagickBooleanType
sync;
PixelInfo
pixel,
target;
(void) GetOneCacheViewVirtualPixelInfo(image_view,x,y,&target,
exception);
for (y=0; y < (ssize_t) image->rows; y++)
{
register Quantum
*magick_restrict q;
q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,
exception);
if (q == (Quantum *) NULL)
break;
for (x=0; x < (ssize_t) image->columns; x++)
{
GetPixelInfoPixel(image,q,&pixel);
if (IsFuzzyEquivalencePixelInfo(&pixel,&target) == MagickFalse)
{
q+=GetPixelChannels(image);
continue;
}
GetFillColor(draw_info,x,y,&pixel,exception);
SetPixelViaPixelInfo(image,&pixel,q);
q+=GetPixelChannels(image);
}
sync=SyncCacheViewAuthenticPixels(image_view,exception);
if (sync == MagickFalse)
break;
}
break;
}
case FloodfillMethod:
case FillToBorderMethod:
{
PixelInfo
target;
(void) GetOneVirtualPixelInfo(image,TileVirtualPixelMethod,x,y,
&target,exception);
if (primitive_info->method == FillToBorderMethod)
{
target.red=(double) draw_info->border_color.red;
target.green=(double) draw_info->border_color.green;
target.blue=(double) draw_info->border_color.blue;
}
status&=FloodfillPaintImage(image,draw_info,&target,x,y,
primitive_info->method == FloodfillMethod ? MagickFalse :
MagickTrue,exception);
break;
}
case ResetMethod:
{
MagickBooleanType
sync;
PixelInfo
pixel;
GetPixelInfo(image,&pixel);
for (y=0; y < (ssize_t) image->rows; y++)
{
register Quantum
*magick_restrict q;
q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,
exception);
if (q == (Quantum *) NULL)
break;
for (x=0; x < (ssize_t) image->columns; x++)
{
GetFillColor(draw_info,x,y,&pixel,exception);
SetPixelViaPixelInfo(image,&pixel,q);
q+=GetPixelChannels(image);
}
sync=SyncCacheViewAuthenticPixels(image_view,exception);
if (sync == MagickFalse)
break;
}
break;
}
}
break;
}
case ImagePrimitive:
{
AffineMatrix
affine;
char
composite_geometry[MagickPathExtent];
Image
*composite_image;
ImageInfo
*clone_info;
RectangleInfo
geometry;
ssize_t
x1,
y1;
if (primitive_info->text == (char *) NULL)
break;
clone_info=AcquireImageInfo();
if (LocaleNCompare(primitive_info->text,"data:",5) == 0)
composite_image=ReadInlineImage(clone_info,primitive_info->text,
exception);
else
{
(void) CopyMagickString(clone_info->filename,primitive_info->text,
MagickPathExtent);
composite_image=ReadImage(clone_info,exception);
}
clone_info=DestroyImageInfo(clone_info);
if (composite_image == (Image *) NULL)
break;
(void) SetImageProgressMonitor(composite_image,(MagickProgressMonitor)
NULL,(void *) NULL);
x1=(ssize_t) ceil(primitive_info[1].point.x-0.5);
y1=(ssize_t) ceil(primitive_info[1].point.y-0.5);
if (((x1 != 0L) && (x1 != (ssize_t) composite_image->columns)) ||
((y1 != 0L) && (y1 != (ssize_t) composite_image->rows)))
{
/*
Resize image.
*/
(void) FormatLocaleString(composite_geometry,MagickPathExtent,
"%gx%g!",primitive_info[1].point.x,primitive_info[1].point.y);
composite_image->filter=image->filter;
(void) TransformImage(&composite_image,(char *) NULL,
composite_geometry,exception);
}
if (composite_image->alpha_trait == UndefinedPixelTrait)
(void) SetImageAlphaChannel(composite_image,OpaqueAlphaChannel,
exception);
if (draw_info->alpha != OpaqueAlpha)
(void) SetImageAlpha(composite_image,draw_info->alpha,exception);
SetGeometry(image,&geometry);
image->gravity=draw_info->gravity;
geometry.x=x;
geometry.y=y;
(void) FormatLocaleString(composite_geometry,MagickPathExtent,
"%.20gx%.20g%+.20g%+.20g",(double) composite_image->columns,(double)
composite_image->rows,(double) geometry.x,(double) geometry.y);
(void) ParseGravityGeometry(image,composite_geometry,&geometry,exception);
affine=draw_info->affine;
affine.tx=(double) geometry.x;
affine.ty=(double) geometry.y;
composite_image->interpolate=image->interpolate;
status&=DrawAffineImage(image,composite_image,&affine,exception);
composite_image=DestroyImage(composite_image);
break;
}
case PointPrimitive:
{
PixelInfo
fill_color;
register Quantum
*q;
if ((y < 0) || (y >= (ssize_t) image->rows))
break;
if ((x < 0) || (x >= (ssize_t) image->columns))
break;
q=GetCacheViewAuthenticPixels(image_view,x,y,1,1,exception);
if (q == (Quantum *) NULL)
break;
GetFillColor(draw_info,x,y,&fill_color,exception);
CompositePixelOver(image,&fill_color,(double) fill_color.alpha,q,
(double) GetPixelAlpha(image,q),q);
(void) SyncCacheViewAuthenticPixels(image_view,exception);
break;
}
case TextPrimitive:
{
char
geometry[MagickPathExtent];
DrawInfo
*clone_info;
if (primitive_info->text == (char *) NULL)
break;
clone_info=CloneDrawInfo((ImageInfo *) NULL,draw_info);
(void) CloneString(&clone_info->text,primitive_info->text);
(void) FormatLocaleString(geometry,MagickPathExtent,"%+f%+f",
primitive_info->point.x,primitive_info->point.y);
(void) CloneString(&clone_info->geometry,geometry);
status&=AnnotateImage(image,clone_info,exception);
clone_info=DestroyDrawInfo(clone_info);
break;
}
default:
{
double
mid,
scale;
DrawInfo
*clone_info;
if (IsEventLogging() != MagickFalse)
LogPrimitiveInfo(primitive_info);
scale=ExpandAffine(&draw_info->affine);
if ((draw_info->dash_pattern != (double *) NULL) &&
(fabs(draw_info->dash_pattern[0]) >= DrawEpsilon) &&
(fabs(scale*draw_info->stroke_width) >= DrawEpsilon) &&
(draw_info->stroke.alpha != (Quantum) TransparentAlpha))
{
/*
Draw dash polygon.
*/
clone_info=CloneDrawInfo((ImageInfo *) NULL,draw_info);
clone_info->stroke_width=0.0;
clone_info->stroke.alpha=(MagickRealType) TransparentAlpha;
status&=DrawPolygonPrimitive(image,clone_info,primitive_info,
exception);
clone_info=DestroyDrawInfo(clone_info);
(void) DrawDashPolygon(draw_info,primitive_info,image,exception);
break;
}
mid=ExpandAffine(&draw_info->affine)*draw_info->stroke_width/2.0;
if ((mid > 1.0) &&
((draw_info->stroke.alpha != (Quantum) TransparentAlpha) ||
(draw_info->stroke_pattern != (Image *) NULL)))
{
MagickBooleanType
closed_path;
/*
Draw strokes while respecting line cap/join attributes.
*/
for (i=0; primitive_info[i].primitive != UndefinedPrimitive; i++) ;
closed_path=
(fabs(primitive_info[i-1].point.x-primitive_info[0].point.x) < DrawEpsilon) &&
(fabs(primitive_info[i-1].point.y-primitive_info[0].point.y) < DrawEpsilon) ?
MagickTrue : MagickFalse;
i=(ssize_t) primitive_info[0].coordinates;
if ((((draw_info->linecap == RoundCap) ||
(closed_path != MagickFalse)) &&
(draw_info->linejoin == RoundJoin)) ||
(primitive_info[i].primitive != UndefinedPrimitive))
{
(void) DrawPolygonPrimitive(image,draw_info,primitive_info,
exception);
break;
}
clone_info=CloneDrawInfo((ImageInfo *) NULL,draw_info);
clone_info->stroke_width=0.0;
clone_info->stroke.alpha=(MagickRealType) TransparentAlpha;
status&=DrawPolygonPrimitive(image,clone_info,primitive_info,
exception);
clone_info=DestroyDrawInfo(clone_info);
status&=DrawStrokePolygon(image,draw_info,primitive_info,exception);
break;
}
status&=DrawPolygonPrimitive(image,draw_info,primitive_info,exception);
break;
}
}
image_view=DestroyCacheView(image_view);
if (image->debug != MagickFalse)
(void) LogMagickEvent(DrawEvent,GetMagickModule()," end draw-primitive");
return(status != 0 ? MagickTrue : MagickFalse);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ D r a w S t r o k e P o l y g o n %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% DrawStrokePolygon() draws a stroked polygon (line, rectangle, ellipse) on
% the image while respecting the line cap and join attributes.
%
% The format of the DrawStrokePolygon method is:
%
% MagickBooleanType DrawStrokePolygon(Image *image,
% const DrawInfo *draw_info,const PrimitiveInfo *primitive_info)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o draw_info: the draw info.
%
% o primitive_info: Specifies a pointer to a PrimitiveInfo structure.
%
%
*/
static void DrawRoundLinecap(Image *image,const DrawInfo *draw_info,
const PrimitiveInfo *primitive_info,ExceptionInfo *exception)
{
PrimitiveInfo
linecap[5];
register ssize_t
i;
for (i=0; i < 4; i++)
linecap[i]=(*primitive_info);
linecap[0].coordinates=4;
linecap[1].point.x+=2.0*DrawEpsilon;
linecap[2].point.x+=2.0*DrawEpsilon;
linecap[2].point.y+=2.0*DrawEpsilon;
linecap[3].point.y+=2.0*DrawEpsilon;
linecap[4].primitive=UndefinedPrimitive;
(void) DrawPolygonPrimitive(image,draw_info,linecap,exception);
}
static MagickBooleanType DrawStrokePolygon(Image *image,
const DrawInfo *draw_info,const PrimitiveInfo *primitive_info,
ExceptionInfo *exception)
{
DrawInfo
*clone_info;
MagickBooleanType
closed_path;
MagickStatusType
status;
PrimitiveInfo
*stroke_polygon;
register const PrimitiveInfo
*p,
*q;
/*
Draw stroked polygon.
*/
if (image->debug != MagickFalse)
(void) LogMagickEvent(DrawEvent,GetMagickModule(),
" begin draw-stroke-polygon");
clone_info=CloneDrawInfo((ImageInfo *) NULL,draw_info);
clone_info->fill=draw_info->stroke;
if (clone_info->fill_pattern != (Image *) NULL)
clone_info->fill_pattern=DestroyImage(clone_info->fill_pattern);
if (clone_info->stroke_pattern != (Image *) NULL)
clone_info->fill_pattern=CloneImage(clone_info->stroke_pattern,0,0,
MagickTrue,exception);
clone_info->stroke.alpha=(MagickRealType) TransparentAlpha;
clone_info->stroke_width=0.0;
clone_info->fill_rule=NonZeroRule;
status=MagickTrue;
for (p=primitive_info; p->primitive != UndefinedPrimitive; p+=p->coordinates)
{
stroke_polygon=TraceStrokePolygon(draw_info,p);
if (stroke_polygon == (PrimitiveInfo *) NULL)
{
status=0;
break;
}
status&=DrawPolygonPrimitive(image,clone_info,stroke_polygon,exception);
if (status == 0)
break;
stroke_polygon=(PrimitiveInfo *) RelinquishMagickMemory(stroke_polygon);
q=p+p->coordinates-1;
closed_path=(fabs(q->point.x-p->point.x) < DrawEpsilon) &&
(fabs(q->point.y-p->point.y) < DrawEpsilon) ? MagickTrue : MagickFalse;
if ((draw_info->linecap == RoundCap) && (closed_path == MagickFalse))
{
DrawRoundLinecap(image,draw_info,p,exception);
DrawRoundLinecap(image,draw_info,q,exception);
}
}
clone_info=DestroyDrawInfo(clone_info);
if (image->debug != MagickFalse)
(void) LogMagickEvent(DrawEvent,GetMagickModule(),
" end draw-stroke-polygon");
return(status != 0 ? MagickTrue : MagickFalse);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% G e t A f f i n e M a t r i x %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetAffineMatrix() returns an AffineMatrix initialized to the identity
% matrix.
%
% The format of the GetAffineMatrix method is:
%
% void GetAffineMatrix(AffineMatrix *affine_matrix)
%
% A description of each parameter follows:
%
% o affine_matrix: the affine matrix.
%
*/
MagickExport void GetAffineMatrix(AffineMatrix *affine_matrix)
{
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"...");
assert(affine_matrix != (AffineMatrix *) NULL);
(void) ResetMagickMemory(affine_matrix,0,sizeof(*affine_matrix));
affine_matrix->sx=1.0;
affine_matrix->sy=1.0;
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ G e t D r a w I n f o %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetDrawInfo() initializes draw_info to default values from image_info.
%
% The format of the GetDrawInfo method is:
%
% void GetDrawInfo(const ImageInfo *image_info,DrawInfo *draw_info)
%
% A description of each parameter follows:
%
% o image_info: the image info..
%
% o draw_info: the draw info.
%
*/
MagickExport void GetDrawInfo(const ImageInfo *image_info,DrawInfo *draw_info)
{
char
*next_token;
const char
*option;
ExceptionInfo
*exception;
ImageInfo
*clone_info;
/*
Initialize draw attributes.
*/
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"...");
assert(draw_info != (DrawInfo *) NULL);
(void) ResetMagickMemory(draw_info,0,sizeof(*draw_info));
clone_info=CloneImageInfo(image_info);
GetAffineMatrix(&draw_info->affine);
exception=AcquireExceptionInfo();
(void) QueryColorCompliance("#000F",AllCompliance,&draw_info->fill,
exception);
(void) QueryColorCompliance("#0000",AllCompliance,&draw_info->stroke,
exception);
draw_info->stroke_width=1.0;
draw_info->fill_rule=EvenOddRule;
draw_info->alpha=OpaqueAlpha;
draw_info->fill_alpha=OpaqueAlpha;
draw_info->stroke_alpha=OpaqueAlpha;
draw_info->linecap=ButtCap;
draw_info->linejoin=MiterJoin;
draw_info->miterlimit=10;
draw_info->decorate=NoDecoration;
draw_info->pointsize=12.0;
draw_info->undercolor.alpha=(MagickRealType) TransparentAlpha;
draw_info->compose=OverCompositeOp;
draw_info->render=MagickTrue;
draw_info->debug=IsEventLogging();
draw_info->stroke_antialias=clone_info->antialias;
if (clone_info->font != (char *) NULL)
draw_info->font=AcquireString(clone_info->font);
if (clone_info->density != (char *) NULL)
draw_info->density=AcquireString(clone_info->density);
draw_info->text_antialias=clone_info->antialias;
if (fabs(clone_info->pointsize) >= DrawEpsilon)
draw_info->pointsize=clone_info->pointsize;
draw_info->border_color=clone_info->border_color;
if (clone_info->server_name != (char *) NULL)
draw_info->server_name=AcquireString(clone_info->server_name);
option=GetImageOption(clone_info,"direction");
if (option != (const char *) NULL)
draw_info->direction=(DirectionType) ParseCommandOption(
MagickDirectionOptions,MagickFalse,option);
else
draw_info->direction=UndefinedDirection;
option=GetImageOption(clone_info,"encoding");
if (option != (const char *) NULL)
(void) CloneString(&draw_info->encoding,option);
option=GetImageOption(clone_info,"family");
if (option != (const char *) NULL)
(void) CloneString(&draw_info->family,option);
option=GetImageOption(clone_info,"fill");
if (option != (const char *) NULL)
(void) QueryColorCompliance(option,AllCompliance,&draw_info->fill,
exception);
option=GetImageOption(clone_info,"gravity");
if (option != (const char *) NULL)
draw_info->gravity=(GravityType) ParseCommandOption(MagickGravityOptions,
MagickFalse,option);
option=GetImageOption(clone_info,"interline-spacing");
if (option != (const char *) NULL)
draw_info->interline_spacing=StringToDouble(option,&next_token);
option=GetImageOption(clone_info,"interword-spacing");
if (option != (const char *) NULL)
draw_info->interword_spacing=StringToDouble(option,&next_token);
option=GetImageOption(clone_info,"kerning");
if (option != (const char *) NULL)
draw_info->kerning=StringToDouble(option,&next_token);
option=GetImageOption(clone_info,"stroke");
if (option != (const char *) NULL)
(void) QueryColorCompliance(option,AllCompliance,&draw_info->stroke,
exception);
option=GetImageOption(clone_info,"strokewidth");
if (option != (const char *) NULL)
draw_info->stroke_width=StringToDouble(option,&next_token);
option=GetImageOption(clone_info,"style");
if (option != (const char *) NULL)
draw_info->style=(StyleType) ParseCommandOption(MagickStyleOptions,
MagickFalse,option);
option=GetImageOption(clone_info,"undercolor");
if (option != (const char *) NULL)
(void) QueryColorCompliance(option,AllCompliance,&draw_info->undercolor,
exception);
option=GetImageOption(clone_info,"weight");
if (option != (const char *) NULL)
{
ssize_t
weight;
weight=ParseCommandOption(MagickWeightOptions,MagickFalse,option);
if (weight == -1)
weight=(ssize_t) StringToUnsignedLong(option);
draw_info->weight=(size_t) weight;
}
exception=DestroyExceptionInfo(exception);
draw_info->signature=MagickCoreSignature;
clone_info=DestroyImageInfo(clone_info);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ P e r m u t a t e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% Permutate() returns the permuation of the (n,k).
%
% The format of the Permutate method is:
%
% void Permutate(ssize_t n,ssize_t k)
%
% A description of each parameter follows:
%
% o n:
%
% o k:
%
%
*/
static inline double Permutate(const ssize_t n,const ssize_t k)
{
double
r;
register ssize_t
i;
r=1.0;
for (i=k+1; i <= n; i++)
r*=i;
for (i=1; i <= (n-k); i++)
r/=i;
return(r);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ T r a c e P r i m i t i v e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% TracePrimitive is a collection of methods for generating graphic
% primitives such as arcs, ellipses, paths, etc.
%
*/
static void TraceArc(PrimitiveInfo *primitive_info,const PointInfo start,
const PointInfo end,const PointInfo degrees)
{
PointInfo
center,
radii;
center.x=0.5*(end.x+start.x);
center.y=0.5*(end.y+start.y);
radii.x=fabs(center.x-start.x);
radii.y=fabs(center.y-start.y);
TraceEllipse(primitive_info,center,radii,degrees);
}
static void TraceArcPath(PrimitiveInfo *primitive_info,const PointInfo start,
const PointInfo end,const PointInfo arc,const double angle,
const MagickBooleanType large_arc,const MagickBooleanType sweep)
{
double
alpha,
beta,
delta,
factor,
gamma,
theta;
PointInfo
center,
points[3],
radii;
register double
cosine,
sine;
register PrimitiveInfo
*p;
register ssize_t
i;
size_t
arc_segments;
if ((fabs(start.x-end.x) < DrawEpsilon) &&
(fabs(start.y-end.y) < DrawEpsilon))
{
TracePoint(primitive_info,end);
return;
}
radii.x=fabs(arc.x);
radii.y=fabs(arc.y);
if ((fabs(radii.x) < DrawEpsilon) || (fabs(radii.y) < DrawEpsilon))
{
TraceLine(primitive_info,start,end);
return;
}
cosine=cos(DegreesToRadians(fmod((double) angle,360.0)));
sine=sin(DegreesToRadians(fmod((double) angle,360.0)));
center.x=(double) (cosine*(end.x-start.x)/2+sine*(end.y-start.y)/2);
center.y=(double) (cosine*(end.y-start.y)/2-sine*(end.x-start.x)/2);
delta=(center.x*center.x)/(radii.x*radii.x)+(center.y*center.y)/
(radii.y*radii.y);
if (delta < DrawEpsilon)
{
TraceLine(primitive_info,start,end);
return;
}
if (delta > 1.0)
{
radii.x*=sqrt((double) delta);
radii.y*=sqrt((double) delta);
}
points[0].x=(double) (cosine*start.x/radii.x+sine*start.y/radii.x);
points[0].y=(double) (cosine*start.y/radii.y-sine*start.x/radii.y);
points[1].x=(double) (cosine*end.x/radii.x+sine*end.y/radii.x);
points[1].y=(double) (cosine*end.y/radii.y-sine*end.x/radii.y);
alpha=points[1].x-points[0].x;
beta=points[1].y-points[0].y;
factor=PerceptibleReciprocal(alpha*alpha+beta*beta)-0.25;
if (factor <= 0.0)
factor=0.0;
else
{
factor=sqrt((double) factor);
if (sweep == large_arc)
factor=(-factor);
}
center.x=(double) ((points[0].x+points[1].x)/2-factor*beta);
center.y=(double) ((points[0].y+points[1].y)/2+factor*alpha);
alpha=atan2(points[0].y-center.y,points[0].x-center.x);
theta=atan2(points[1].y-center.y,points[1].x-center.x)-alpha;
if ((theta < 0.0) && (sweep != MagickFalse))
theta+=2.0*MagickPI;
else
if ((theta > 0.0) && (sweep == MagickFalse))
theta-=2.0*MagickPI;
arc_segments=(size_t) ceil(fabs((double) (theta/(0.5*MagickPI+DrawEpsilon))));
p=primitive_info;
for (i=0; i < (ssize_t) arc_segments; i++)
{
beta=0.5*((alpha+(i+1)*theta/arc_segments)-(alpha+i*theta/arc_segments));
gamma=(8.0/3.0)*sin(fmod((double) (0.5*beta),DegreesToRadians(360.0)))*
sin(fmod((double) (0.5*beta),DegreesToRadians(360.0)))/
sin(fmod((double) beta,DegreesToRadians(360.0)));
points[0].x=(double) (center.x+cos(fmod((double) (alpha+(double) i*theta/
arc_segments),DegreesToRadians(360.0)))-gamma*sin(fmod((double) (alpha+
(double) i*theta/arc_segments),DegreesToRadians(360.0))));
points[0].y=(double) (center.y+sin(fmod((double) (alpha+(double) i*theta/
arc_segments),DegreesToRadians(360.0)))+gamma*cos(fmod((double) (alpha+
(double) i*theta/arc_segments),DegreesToRadians(360.0))));
points[2].x=(double) (center.x+cos(fmod((double) (alpha+(double) (i+1)*
theta/arc_segments),DegreesToRadians(360.0))));
points[2].y=(double) (center.y+sin(fmod((double) (alpha+(double) (i+1)*
theta/arc_segments),DegreesToRadians(360.0))));
points[1].x=(double) (points[2].x+gamma*sin(fmod((double) (alpha+(double)
(i+1)*theta/arc_segments),DegreesToRadians(360.0))));
points[1].y=(double) (points[2].y-gamma*cos(fmod((double) (alpha+(double)
(i+1)*theta/arc_segments),DegreesToRadians(360.0))));
p->point.x=(p == primitive_info) ? start.x : (p-1)->point.x;
p->point.y=(p == primitive_info) ? start.y : (p-1)->point.y;
(p+1)->point.x=(double) (cosine*radii.x*points[0].x-sine*radii.y*
points[0].y);
(p+1)->point.y=(double) (sine*radii.x*points[0].x+cosine*radii.y*
points[0].y);
(p+2)->point.x=(double) (cosine*radii.x*points[1].x-sine*radii.y*
points[1].y);
(p+2)->point.y=(double) (sine*radii.x*points[1].x+cosine*radii.y*
points[1].y);
(p+3)->point.x=(double) (cosine*radii.x*points[2].x-sine*radii.y*
points[2].y);
(p+3)->point.y=(double) (sine*radii.x*points[2].x+cosine*radii.y*
points[2].y);
if (i == (ssize_t) (arc_segments-1))
(p+3)->point=end;
TraceBezier(p,4);
p+=p->coordinates;
}
primitive_info->coordinates=(size_t) (p-primitive_info);
for (i=0; i < (ssize_t) primitive_info->coordinates; i++)
{
p->primitive=primitive_info->primitive;
p--;
}
}
static void TraceBezier(PrimitiveInfo *primitive_info,
const size_t number_coordinates)
{
double
alpha,
*coefficients,
weight;
PointInfo
end,
point,
*points;
register PrimitiveInfo
*p;
register ssize_t
i,
j;
size_t
control_points,
quantum;
/*
Allocate coeficients.
*/
quantum=number_coordinates;
for (i=0; i < (ssize_t) number_coordinates; i++)
{
for (j=i+1; j < (ssize_t) number_coordinates; j++)
{
alpha=fabs(primitive_info[j].point.x-primitive_info[i].point.x);
if (alpha > (double) quantum)
quantum=(size_t) alpha;
alpha=fabs(primitive_info[j].point.y-primitive_info[i].point.y);
if (alpha > (double) quantum)
quantum=(size_t) alpha;
}
}
quantum=(size_t) MagickMin((double) quantum/number_coordinates,
(double) BezierQuantum);
control_points=quantum*number_coordinates;
coefficients=(double *) AcquireQuantumMemory((size_t)
number_coordinates,sizeof(*coefficients));
points=(PointInfo *) AcquireQuantumMemory((size_t) control_points,
sizeof(*points));
if ((coefficients == (double *) NULL) || (points == (PointInfo *) NULL))
ThrowFatalException(ResourceLimitFatalError,"MemoryAllocationFailed");
/*
Compute bezier points.
*/
end=primitive_info[number_coordinates-1].point;
for (i=0; i < (ssize_t) number_coordinates; i++)
coefficients[i]=Permutate((ssize_t) number_coordinates-1,i);
weight=0.0;
for (i=0; i < (ssize_t) control_points; i++)
{
p=primitive_info;
point.x=0.0;
point.y=0.0;
alpha=pow((double) (1.0-weight),(double) number_coordinates-1.0);
for (j=0; j < (ssize_t) number_coordinates; j++)
{
point.x+=alpha*coefficients[j]*p->point.x;
point.y+=alpha*coefficients[j]*p->point.y;
alpha*=weight/(1.0-weight);
p++;
}
points[i]=point;
weight+=1.0/control_points;
}
/*
Bezier curves are just short segmented polys.
*/
p=primitive_info;
for (i=0; i < (ssize_t) control_points; i++)
{
TracePoint(p,points[i]);
p+=p->coordinates;
}
TracePoint(p,end);
p+=p->coordinates;
primitive_info->coordinates=(size_t) (p-primitive_info);
for (i=0; i < (ssize_t) primitive_info->coordinates; i++)
{
p->primitive=primitive_info->primitive;
p--;
}
points=(PointInfo *) RelinquishMagickMemory(points);
coefficients=(double *) RelinquishMagickMemory(coefficients);
}
static void TraceCircle(PrimitiveInfo *primitive_info,const PointInfo start,
const PointInfo end)
{
double
alpha,
beta,
radius;
PointInfo
offset,
degrees;
alpha=end.x-start.x;
beta=end.y-start.y;
radius=hypot((double) alpha,(double) beta);
offset.x=(double) radius;
offset.y=(double) radius;
degrees.x=0.0;
degrees.y=360.0;
TraceEllipse(primitive_info,start,offset,degrees);
}
static void TraceEllipse(PrimitiveInfo *primitive_info,const PointInfo start,
const PointInfo stop,const PointInfo degrees)
{
double
delta,
step,
y;
PointInfo
angle,
point;
register PrimitiveInfo
*p;
register ssize_t
i;
/*
Ellipses are just short segmented polys.
*/
if ((fabs(stop.x) < DrawEpsilon) && (fabs(stop.y) < DrawEpsilon))
{
TracePoint(primitive_info,start);
return;
}
delta=2.0/MagickMax(stop.x,stop.y);
step=MagickPI/8.0;
if ((delta >= 0.0) && (delta < (MagickPI/8.0)))
step=MagickPI/(4*(MagickPI/delta/2+0.5));
angle.x=DegreesToRadians(degrees.x);
y=degrees.y;
while (y < degrees.x)
y+=360.0;
angle.y=DegreesToRadians(y);
for (p=primitive_info; angle.x < angle.y; angle.x+=step)
{
point.x=cos(fmod(angle.x,DegreesToRadians(360.0)))*stop.x+start.x;
point.y=sin(fmod(angle.x,DegreesToRadians(360.0)))*stop.y+start.y;
TracePoint(p,point);
p+=p->coordinates;
}
point.x=cos(fmod(angle.y,DegreesToRadians(360.0)))*stop.x+start.x;
point.y=sin(fmod(angle.y,DegreesToRadians(360.0)))*stop.y+start.y;
TracePoint(p,point);
p+=p->coordinates;
primitive_info->coordinates=(size_t) (p-primitive_info);
for (i=0; i < (ssize_t) primitive_info->coordinates; i++)
{
p->primitive=primitive_info->primitive;
p--;
}
}
static void TraceLine(PrimitiveInfo *primitive_info,const PointInfo start,
const PointInfo end)
{
TracePoint(primitive_info,start);
if ((fabs(start.x-end.x) < DrawEpsilon) &&
(fabs(start.y-end.y) < DrawEpsilon))
{
primitive_info->primitive=PointPrimitive;
primitive_info->coordinates=1;
return;
}
TracePoint(primitive_info+1,end);
(primitive_info+1)->primitive=primitive_info->primitive;
primitive_info->coordinates=2;
}
static size_t TracePath(PrimitiveInfo *primitive_info,const char *path)
{
char
*next_token,
token[MagickPathExtent];
const char
*p;
double
x,
y;
int
attribute,
last_attribute;
PointInfo
end = {0.0, 0.0},
points[4] = { {0.0,0.0}, {0.0,0.0}, {0.0,0.0}, {0.0,0.0} },
point = {0.0, 0.0},
start = {0.0, 0.0};
PrimitiveType
primitive_type;
register PrimitiveInfo
*q;
register ssize_t
i;
size_t
number_coordinates,
z_count;
attribute=0;
number_coordinates=0;
z_count=0;
primitive_type=primitive_info->primitive;
q=primitive_info;
for (p=path; *p != '\0'; )
{
while (isspace((int) ((unsigned char) *p)) != 0)
p++;
if (*p == '\0')
break;
last_attribute=attribute;
attribute=(int) (*p++);
switch (attribute)
{
case 'a':
case 'A':
{
double
angle;
MagickBooleanType
large_arc,
sweep;
PointInfo
arc;
/*
Compute arc points.
*/
do
{
GetNextToken(p,&p,MagickPathExtent,token);
if (*token == ',')
GetNextToken(p,&p,MagickPathExtent,token);
arc.x=StringToDouble(token,&next_token);
GetNextToken(p,&p,MagickPathExtent,token);
if (*token == ',')
GetNextToken(p,&p,MagickPathExtent,token);
arc.y=StringToDouble(token,&next_token);
GetNextToken(p,&p,MagickPathExtent,token);
if (*token == ',')
GetNextToken(p,&p,MagickPathExtent,token);
angle=StringToDouble(token,&next_token);
GetNextToken(p,&p,MagickPathExtent,token);
if (*token == ',')
GetNextToken(p,&p,MagickPathExtent,token);
large_arc=StringToLong(token) != 0 ? MagickTrue : MagickFalse;
GetNextToken(p,&p,MagickPathExtent,token);
if (*token == ',')
GetNextToken(p,&p,MagickPathExtent,token);
sweep=StringToLong(token) != 0 ? MagickTrue : MagickFalse;
GetNextToken(p,&p,MagickPathExtent,token);
if (*token == ',')
GetNextToken(p,&p,MagickPathExtent,token);
x=StringToDouble(token,&next_token);
GetNextToken(p,&p,MagickPathExtent,token);
if (*token == ',')
GetNextToken(p,&p,MagickPathExtent,token);
y=StringToDouble(token,&next_token);
end.x=(double) (attribute == (int) 'A' ? x : point.x+x);
end.y=(double) (attribute == (int) 'A' ? y : point.y+y);
TraceArcPath(q,point,end,arc,angle,large_arc,sweep);
q+=q->coordinates;
point=end;
while (isspace((int) ((unsigned char) *p)) != 0)
p++;
if (*p == ',')
p++;
} while (IsPoint(p) != MagickFalse);
break;
}
case 'c':
case 'C':
{
/*
Compute bezier points.
*/
do
{
points[0]=point;
for (i=1; i < 4; i++)
{
GetNextToken(p,&p,MagickPathExtent,token);
if (*token == ',')
GetNextToken(p,&p,MagickPathExtent,token);
x=StringToDouble(token,&next_token);
GetNextToken(p,&p,MagickPathExtent,token);
if (*token == ',')
GetNextToken(p,&p,MagickPathExtent,token);
y=StringToDouble(token,&next_token);
end.x=(double) (attribute == (int) 'C' ? x : point.x+x);
end.y=(double) (attribute == (int) 'C' ? y : point.y+y);
points[i]=end;
}
for (i=0; i < 4; i++)
(q+i)->point=points[i];
TraceBezier(q,4);
q+=q->coordinates;
point=end;
} while (IsPoint(p) != MagickFalse);
break;
}
case 'H':
case 'h':
{
do
{
GetNextToken(p,&p,MagickPathExtent,token);
if (*token == ',')
GetNextToken(p,&p,MagickPathExtent,token);
x=StringToDouble(token,&next_token);
point.x=(double) (attribute == (int) 'H' ? x: point.x+x);
TracePoint(q,point);
q+=q->coordinates;
} while (IsPoint(p) != MagickFalse);
break;
}
case 'l':
case 'L':
{
do
{
GetNextToken(p,&p,MagickPathExtent,token);
if (*token == ',')
GetNextToken(p,&p,MagickPathExtent,token);
x=StringToDouble(token,&next_token);
GetNextToken(p,&p,MagickPathExtent,token);
if (*token == ',')
GetNextToken(p,&p,MagickPathExtent,token);
y=StringToDouble(token,&next_token);
point.x=(double) (attribute == (int) 'L' ? x : point.x+x);
point.y=(double) (attribute == (int) 'L' ? y : point.y+y);
TracePoint(q,point);
q+=q->coordinates;
} while (IsPoint(p) != MagickFalse);
break;
}
case 'M':
case 'm':
{
if (q != primitive_info)
{
primitive_info->coordinates=(size_t) (q-primitive_info);
number_coordinates+=primitive_info->coordinates;
primitive_info=q;
}
i=0;
do
{
GetNextToken(p,&p,MagickPathExtent,token);
if (*token == ',')
GetNextToken(p,&p,MagickPathExtent,token);
x=StringToDouble(token,&next_token);
GetNextToken(p,&p,MagickPathExtent,token);
if (*token == ',')
GetNextToken(p,&p,MagickPathExtent,token);
y=StringToDouble(token,&next_token);
point.x=(double) (attribute == (int) 'M' ? x : point.x+x);
point.y=(double) (attribute == (int) 'M' ? y : point.y+y);
if (i == 0)
start=point;
i++;
TracePoint(q,point);
q+=q->coordinates;
if ((i != 0) && (attribute == (int) 'M'))
{
TracePoint(q,point);
q+=q->coordinates;
}
} while (IsPoint(p) != MagickFalse);
break;
}
case 'q':
case 'Q':
{
/*
Compute bezier points.
*/
do
{
points[0]=point;
for (i=1; i < 3; i++)
{
GetNextToken(p,&p,MagickPathExtent,token);
if (*token == ',')
GetNextToken(p,&p,MagickPathExtent,token);
x=StringToDouble(token,&next_token);
GetNextToken(p,&p,MagickPathExtent,token);
if (*token == ',')
GetNextToken(p,&p,MagickPathExtent,token);
y=StringToDouble(token,&next_token);
if (*p == ',')
p++;
end.x=(double) (attribute == (int) 'Q' ? x : point.x+x);
end.y=(double) (attribute == (int) 'Q' ? y : point.y+y);
points[i]=end;
}
for (i=0; i < 3; i++)
(q+i)->point=points[i];
TraceBezier(q,3);
q+=q->coordinates;
point=end;
} while (IsPoint(p) != MagickFalse);
break;
}
case 's':
case 'S':
{
/*
Compute bezier points.
*/
do
{
points[0]=points[3];
points[1].x=2.0*points[3].x-points[2].x;
points[1].y=2.0*points[3].y-points[2].y;
for (i=2; i < 4; i++)
{
GetNextToken(p,&p,MagickPathExtent,token);
if (*token == ',')
GetNextToken(p,&p,MagickPathExtent,token);
x=StringToDouble(token,&next_token);
GetNextToken(p,&p,MagickPathExtent,token);
if (*token == ',')
GetNextToken(p,&p,MagickPathExtent,token);
y=StringToDouble(token,&next_token);
if (*p == ',')
p++;
end.x=(double) (attribute == (int) 'S' ? x : point.x+x);
end.y=(double) (attribute == (int) 'S' ? y : point.y+y);
points[i]=end;
}
if (strchr("CcSs",last_attribute) == (char *) NULL)
{
points[0]=point;
points[1]=point;
}
for (i=0; i < 4; i++)
(q+i)->point=points[i];
TraceBezier(q,4);
q+=q->coordinates;
point=end;
} while (IsPoint(p) != MagickFalse);
break;
}
case 't':
case 'T':
{
/*
Compute bezier points.
*/
do
{
points[0]=points[2];
points[1].x=2.0*points[2].x-points[1].x;
points[1].y=2.0*points[2].y-points[1].y;
for (i=2; i < 3; i++)
{
GetNextToken(p,&p,MagickPathExtent,token);
if (*token == ',')
GetNextToken(p,&p,MagickPathExtent,token);
x=StringToDouble(token,&next_token);
GetNextToken(p,&p,MagickPathExtent,token);
if (*token == ',')
GetNextToken(p,&p,MagickPathExtent,token);
y=StringToDouble(token,&next_token);
end.x=(double) (attribute == (int) 'T' ? x : point.x+x);
end.y=(double) (attribute == (int) 'T' ? y : point.y+y);
points[i]=end;
}
if (strchr("QqTt",last_attribute) == (char *) NULL)
{
points[0]=point;
points[1]=point;
}
for (i=0; i < 3; i++)
(q+i)->point=points[i];
TraceBezier(q,3);
q+=q->coordinates;
point=end;
} while (IsPoint(p) != MagickFalse);
break;
}
case 'v':
case 'V':
{
do
{
GetNextToken(p,&p,MagickPathExtent,token);
if (*token == ',')
GetNextToken(p,&p,MagickPathExtent,token);
y=StringToDouble(token,&next_token);
point.y=(double) (attribute == (int) 'V' ? y : point.y+y);
TracePoint(q,point);
q+=q->coordinates;
} while (IsPoint(p) != MagickFalse);
break;
}
case 'z':
case 'Z':
{
point=start;
TracePoint(q,point);
q+=q->coordinates;
primitive_info->coordinates=(size_t) (q-primitive_info);
number_coordinates+=primitive_info->coordinates;
primitive_info=q;
z_count++;
break;
}
default:
{
if (isalpha((int) ((unsigned char) attribute)) != 0)
(void) FormatLocaleFile(stderr,"attribute not recognized: %c\n",
attribute);
break;
}
}
}
primitive_info->coordinates=(size_t) (q-primitive_info);
number_coordinates+=primitive_info->coordinates;
for (i=0; i < (ssize_t) number_coordinates; i++)
{
q--;
q->primitive=primitive_type;
if (z_count > 1)
q->method=FillToBorderMethod;
}
q=primitive_info;
return(number_coordinates);
}
static void TraceRectangle(PrimitiveInfo *primitive_info,const PointInfo start,
const PointInfo end)
{
PointInfo
point;
register PrimitiveInfo
*p;
register ssize_t
i;
p=primitive_info;
TracePoint(p,start);
p+=p->coordinates;
point.x=start.x;
point.y=end.y;
TracePoint(p,point);
p+=p->coordinates;
TracePoint(p,end);
p+=p->coordinates;
point.x=end.x;
point.y=start.y;
TracePoint(p,point);
p+=p->coordinates;
TracePoint(p,start);
p+=p->coordinates;
primitive_info->coordinates=(size_t) (p-primitive_info);
for (i=0; i < (ssize_t) primitive_info->coordinates; i++)
{
p->primitive=primitive_info->primitive;
p--;
}
}
static void TraceRoundRectangle(PrimitiveInfo *primitive_info,
const PointInfo start,const PointInfo end,PointInfo arc)
{
PointInfo
degrees,
offset,
point;
register PrimitiveInfo
*p;
register ssize_t
i;
p=primitive_info;
offset.x=fabs(end.x-start.x);
offset.y=fabs(end.y-start.y);
if (arc.x > (0.5*offset.x))
arc.x=0.5*offset.x;
if (arc.y > (0.5*offset.y))
arc.y=0.5*offset.y;
point.x=start.x+offset.x-arc.x;
point.y=start.y+arc.y;
degrees.x=270.0;
degrees.y=360.0;
TraceEllipse(p,point,arc,degrees);
p+=p->coordinates;
point.x=start.x+offset.x-arc.x;
point.y=start.y+offset.y-arc.y;
degrees.x=0.0;
degrees.y=90.0;
TraceEllipse(p,point,arc,degrees);
p+=p->coordinates;
point.x=start.x+arc.x;
point.y=start.y+offset.y-arc.y;
degrees.x=90.0;
degrees.y=180.0;
TraceEllipse(p,point,arc,degrees);
p+=p->coordinates;
point.x=start.x+arc.x;
point.y=start.y+arc.y;
degrees.x=180.0;
degrees.y=270.0;
TraceEllipse(p,point,arc,degrees);
p+=p->coordinates;
TracePoint(p,primitive_info->point);
p+=p->coordinates;
primitive_info->coordinates=(size_t) (p-primitive_info);
for (i=0; i < (ssize_t) primitive_info->coordinates; i++)
{
p->primitive=primitive_info->primitive;
p--;
}
}
static void TraceSquareLinecap(PrimitiveInfo *primitive_info,
const size_t number_vertices,const double offset)
{
double
distance;
register double
dx,
dy;
register ssize_t
i;
ssize_t
j;
dx=0.0;
dy=0.0;
for (i=1; i < (ssize_t) number_vertices; i++)
{
dx=primitive_info[0].point.x-primitive_info[i].point.x;
dy=primitive_info[0].point.y-primitive_info[i].point.y;
if ((fabs((double) dx) >= DrawEpsilon) ||
(fabs((double) dy) >= DrawEpsilon))
break;
}
if (i == (ssize_t) number_vertices)
i=(ssize_t) number_vertices-1L;
distance=hypot((double) dx,(double) dy);
primitive_info[0].point.x=(double) (primitive_info[i].point.x+
dx*(distance+offset)/distance);
primitive_info[0].point.y=(double) (primitive_info[i].point.y+
dy*(distance+offset)/distance);
for (j=(ssize_t) number_vertices-2; j >= 0; j--)
{
dx=primitive_info[number_vertices-1].point.x-primitive_info[j].point.x;
dy=primitive_info[number_vertices-1].point.y-primitive_info[j].point.y;
if ((fabs((double) dx) >= DrawEpsilon) ||
(fabs((double) dy) >= DrawEpsilon))
break;
}
distance=hypot((double) dx,(double) dy);
primitive_info[number_vertices-1].point.x=(double) (primitive_info[j].point.x+
dx*(distance+offset)/distance);
primitive_info[number_vertices-1].point.y=(double) (primitive_info[j].point.y+
dy*(distance+offset)/distance);
}
static PrimitiveInfo *TraceStrokePolygon(const DrawInfo *draw_info,
const PrimitiveInfo *primitive_info)
{
typedef struct _LineSegment
{
double
p,
q;
} LineSegment;
double
delta_theta,
dot_product,
mid,
miterlimit;
LineSegment
dx,
dy,
inverse_slope,
slope,
theta;
MagickBooleanType
closed_path;
PointInfo
box_p[5],
box_q[5],
center,
offset,
*path_p,
*path_q;
PrimitiveInfo
*polygon_primitive,
*stroke_polygon;
register ssize_t
i;
size_t
arc_segments,
max_strokes,
number_vertices;
ssize_t
j,
n,
p,
q;
/*
Allocate paths.
*/
number_vertices=primitive_info->coordinates;
max_strokes=2*number_vertices+6*BezierQuantum+360;
path_p=(PointInfo *) AcquireQuantumMemory((size_t) max_strokes,
sizeof(*path_p));
path_q=(PointInfo *) AcquireQuantumMemory((size_t) max_strokes,
sizeof(*path_q));
polygon_primitive=(PrimitiveInfo *) AcquireQuantumMemory((size_t)
number_vertices+2UL,sizeof(*polygon_primitive));
if ((path_p == (PointInfo *) NULL) || (path_q == (PointInfo *) NULL) ||
(polygon_primitive == (PrimitiveInfo *) NULL))
{
if (path_p != (PointInfo *) NULL)
path_p=(PointInfo *) RelinquishMagickMemory(path_p);
if (path_q != (PointInfo *) NULL)
path_q=(PointInfo *) RelinquishMagickMemory(path_q);
if (polygon_primitive != (PrimitiveInfo *) NULL)
polygon_primitive=(PrimitiveInfo *) RelinquishMagickMemory(
polygon_primitive);
return((PrimitiveInfo *) NULL);
}
(void) CopyMagickMemory(polygon_primitive,primitive_info,(size_t)
number_vertices*sizeof(*polygon_primitive));
closed_path=
(fabs(primitive_info[number_vertices-1].point.x-primitive_info[0].point.x) < DrawEpsilon) &&
(fabs(primitive_info[number_vertices-1].point.y-primitive_info[0].point.y) < DrawEpsilon) ?
MagickTrue : MagickFalse;
if (((draw_info->linejoin == RoundJoin) ||
(draw_info->linejoin == MiterJoin)) && (closed_path != MagickFalse))
{
polygon_primitive[number_vertices]=primitive_info[1];
number_vertices++;
}
polygon_primitive[number_vertices].primitive=UndefinedPrimitive;
/*
Compute the slope for the first line segment, p.
*/
dx.p=0.0;
dy.p=0.0;
for (n=1; n < (ssize_t) number_vertices; n++)
{
dx.p=polygon_primitive[n].point.x-polygon_primitive[0].point.x;
dy.p=polygon_primitive[n].point.y-polygon_primitive[0].point.y;
if ((fabs(dx.p) >= DrawEpsilon) || (fabs(dy.p) >= DrawEpsilon))
break;
}
if (n == (ssize_t) number_vertices)
n=(ssize_t) number_vertices-1L;
slope.p=0.0;
inverse_slope.p=0.0;
if (fabs(dx.p) < DrawEpsilon)
{
if (dx.p >= 0.0)
slope.p=dy.p < 0.0 ? -1.0/DrawEpsilon : 1.0/DrawEpsilon;
else
slope.p=dy.p < 0.0 ? 1.0/DrawEpsilon : -1.0/DrawEpsilon;
}
else
if (fabs(dy.p) < DrawEpsilon)
{
if (dy.p >= 0.0)
inverse_slope.p=dx.p < 0.0 ? -1.0/DrawEpsilon : 1.0/DrawEpsilon;
else
inverse_slope.p=dx.p < 0.0 ? 1.0/DrawEpsilon : -1.0/DrawEpsilon;
}
else
{
slope.p=dy.p/dx.p;
inverse_slope.p=(-1.0/slope.p);
}
mid=ExpandAffine(&draw_info->affine)*draw_info->stroke_width/2.0;
miterlimit=(double) (draw_info->miterlimit*draw_info->miterlimit*mid*mid);
if ((draw_info->linecap == SquareCap) && (closed_path == MagickFalse))
TraceSquareLinecap(polygon_primitive,number_vertices,mid);
offset.x=sqrt((double) (mid*mid/(inverse_slope.p*inverse_slope.p+1.0)));
offset.y=(double) (offset.x*inverse_slope.p);
if ((dy.p*offset.x-dx.p*offset.y) > 0.0)
{
box_p[0].x=polygon_primitive[0].point.x-offset.x;
box_p[0].y=polygon_primitive[0].point.y-offset.x*inverse_slope.p;
box_p[1].x=polygon_primitive[n].point.x-offset.x;
box_p[1].y=polygon_primitive[n].point.y-offset.x*inverse_slope.p;
box_q[0].x=polygon_primitive[0].point.x+offset.x;
box_q[0].y=polygon_primitive[0].point.y+offset.x*inverse_slope.p;
box_q[1].x=polygon_primitive[n].point.x+offset.x;
box_q[1].y=polygon_primitive[n].point.y+offset.x*inverse_slope.p;
}
else
{
box_p[0].x=polygon_primitive[0].point.x+offset.x;
box_p[0].y=polygon_primitive[0].point.y+offset.y;
box_p[1].x=polygon_primitive[n].point.x+offset.x;
box_p[1].y=polygon_primitive[n].point.y+offset.y;
box_q[0].x=polygon_primitive[0].point.x-offset.x;
box_q[0].y=polygon_primitive[0].point.y-offset.y;
box_q[1].x=polygon_primitive[n].point.x-offset.x;
box_q[1].y=polygon_primitive[n].point.y-offset.y;
}
/*
Create strokes for the line join attribute: bevel, miter, round.
*/
p=0;
q=0;
path_q[p++]=box_q[0];
path_p[q++]=box_p[0];
for (i=(ssize_t) n+1; i < (ssize_t) number_vertices; i++)
{
/*
Compute the slope for this line segment, q.
*/
dx.q=polygon_primitive[i].point.x-polygon_primitive[n].point.x;
dy.q=polygon_primitive[i].point.y-polygon_primitive[n].point.y;
dot_product=dx.q*dx.q+dy.q*dy.q;
if (dot_product < 0.25)
continue;
slope.q=0.0;
inverse_slope.q=0.0;
if (fabs(dx.q) < DrawEpsilon)
{
if (dx.q >= 0.0)
slope.q=dy.q < 0.0 ? -1.0/DrawEpsilon : 1.0/DrawEpsilon;
else
slope.q=dy.q < 0.0 ? 1.0/DrawEpsilon : -1.0/DrawEpsilon;
}
else
if (fabs(dy.q) < DrawEpsilon)
{
if (dy.q >= 0.0)
inverse_slope.q=dx.q < 0.0 ? -1.0/DrawEpsilon : 1.0/DrawEpsilon;
else
inverse_slope.q=dx.q < 0.0 ? 1.0/DrawEpsilon : -1.0/DrawEpsilon;
}
else
{
slope.q=dy.q/dx.q;
inverse_slope.q=(-1.0/slope.q);
}
offset.x=sqrt((double) (mid*mid/(inverse_slope.q*inverse_slope.q+1.0)));
offset.y=(double) (offset.x*inverse_slope.q);
dot_product=dy.q*offset.x-dx.q*offset.y;
if (dot_product > 0.0)
{
box_p[2].x=polygon_primitive[n].point.x-offset.x;
box_p[2].y=polygon_primitive[n].point.y-offset.y;
box_p[3].x=polygon_primitive[i].point.x-offset.x;
box_p[3].y=polygon_primitive[i].point.y-offset.y;
box_q[2].x=polygon_primitive[n].point.x+offset.x;
box_q[2].y=polygon_primitive[n].point.y+offset.y;
box_q[3].x=polygon_primitive[i].point.x+offset.x;
box_q[3].y=polygon_primitive[i].point.y+offset.y;
}
else
{
box_p[2].x=polygon_primitive[n].point.x+offset.x;
box_p[2].y=polygon_primitive[n].point.y+offset.y;
box_p[3].x=polygon_primitive[i].point.x+offset.x;
box_p[3].y=polygon_primitive[i].point.y+offset.y;
box_q[2].x=polygon_primitive[n].point.x-offset.x;
box_q[2].y=polygon_primitive[n].point.y-offset.y;
box_q[3].x=polygon_primitive[i].point.x-offset.x;
box_q[3].y=polygon_primitive[i].point.y-offset.y;
}
if (fabs((double) (slope.p-slope.q)) < DrawEpsilon)
{
box_p[4]=box_p[1];
box_q[4]=box_q[1];
}
else
{
box_p[4].x=(double) ((slope.p*box_p[0].x-box_p[0].y-slope.q*box_p[3].x+
box_p[3].y)/(slope.p-slope.q));
box_p[4].y=(double) (slope.p*(box_p[4].x-box_p[0].x)+box_p[0].y);
box_q[4].x=(double) ((slope.p*box_q[0].x-box_q[0].y-slope.q*box_q[3].x+
box_q[3].y)/(slope.p-slope.q));
box_q[4].y=(double) (slope.p*(box_q[4].x-box_q[0].x)+box_q[0].y);
}
if (q >= (ssize_t) (max_strokes-6*BezierQuantum-360))
{
if (~max_strokes < (6*BezierQuantum+360))
{
path_p=(PointInfo *) RelinquishMagickMemory(path_p);
path_q=(PointInfo *) RelinquishMagickMemory(path_q);
}
else
{
max_strokes+=6*BezierQuantum+360;
path_p=(PointInfo *) ResizeQuantumMemory(path_p,max_strokes,
sizeof(*path_p));
path_q=(PointInfo *) ResizeQuantumMemory(path_q,max_strokes,
sizeof(*path_q));
}
if ((path_p == (PointInfo *) NULL) || (path_q == (PointInfo *) NULL))
{
if (path_p != (PointInfo *) NULL)
path_p=(PointInfo *) RelinquishMagickMemory(path_p);
if (path_q != (PointInfo *) NULL)
path_q=(PointInfo *) RelinquishMagickMemory(path_q);
polygon_primitive=(PrimitiveInfo *)
RelinquishMagickMemory(polygon_primitive);
return((PrimitiveInfo *) NULL);
}
}
dot_product=dx.q*dy.p-dx.p*dy.q;
if (dot_product <= 0.0)
switch (draw_info->linejoin)
{
case BevelJoin:
{
path_q[q++]=box_q[1];
path_q[q++]=box_q[2];
dot_product=(box_q[4].x-box_p[4].x)*(box_q[4].x-box_p[4].x)+
(box_q[4].y-box_p[4].y)*(box_q[4].y-box_p[4].y);
if (dot_product <= miterlimit)
path_p[p++]=box_p[4];
else
{
path_p[p++]=box_p[1];
path_p[p++]=box_p[2];
}
break;
}
case MiterJoin:
{
dot_product=(box_q[4].x-box_p[4].x)*(box_q[4].x-box_p[4].x)+
(box_q[4].y-box_p[4].y)*(box_q[4].y-box_p[4].y);
if (dot_product <= miterlimit)
{
path_q[q++]=box_q[4];
path_p[p++]=box_p[4];
}
else
{
path_q[q++]=box_q[1];
path_q[q++]=box_q[2];
path_p[p++]=box_p[1];
path_p[p++]=box_p[2];
}
break;
}
case RoundJoin:
{
dot_product=(box_q[4].x-box_p[4].x)*(box_q[4].x-box_p[4].x)+
(box_q[4].y-box_p[4].y)*(box_q[4].y-box_p[4].y);
if (dot_product <= miterlimit)
path_p[p++]=box_p[4];
else
{
path_p[p++]=box_p[1];
path_p[p++]=box_p[2];
}
center=polygon_primitive[n].point;
theta.p=atan2(box_q[1].y-center.y,box_q[1].x-center.x);
theta.q=atan2(box_q[2].y-center.y,box_q[2].x-center.x);
if (theta.q < theta.p)
theta.q+=2.0*MagickPI;
arc_segments=(size_t) ceil((double) ((theta.q-theta.p)/
(2.0*sqrt((double) (1.0/mid)))));
path_q[q].x=box_q[1].x;
path_q[q].y=box_q[1].y;
q++;
for (j=1; j < (ssize_t) arc_segments; j++)
{
delta_theta=(double) (j*(theta.q-theta.p)/arc_segments);
path_q[q].x=(double) (center.x+mid*cos(fmod((double)
(theta.p+delta_theta),DegreesToRadians(360.0))));
path_q[q].y=(double) (center.y+mid*sin(fmod((double)
(theta.p+delta_theta),DegreesToRadians(360.0))));
q++;
}
path_q[q++]=box_q[2];
break;
}
default:
break;
}
else
switch (draw_info->linejoin)
{
case BevelJoin:
{
path_p[p++]=box_p[1];
path_p[p++]=box_p[2];
dot_product=(box_q[4].x-box_p[4].x)*(box_q[4].x-box_p[4].x)+
(box_q[4].y-box_p[4].y)*(box_q[4].y-box_p[4].y);
if (dot_product <= miterlimit)
path_q[q++]=box_q[4];
else
{
path_q[q++]=box_q[1];
path_q[q++]=box_q[2];
}
break;
}
case MiterJoin:
{
dot_product=(box_q[4].x-box_p[4].x)*(box_q[4].x-box_p[4].x)+
(box_q[4].y-box_p[4].y)*(box_q[4].y-box_p[4].y);
if (dot_product <= miterlimit)
{
path_q[q++]=box_q[4];
path_p[p++]=box_p[4];
}
else
{
path_q[q++]=box_q[1];
path_q[q++]=box_q[2];
path_p[p++]=box_p[1];
path_p[p++]=box_p[2];
}
break;
}
case RoundJoin:
{
dot_product=(box_q[4].x-box_p[4].x)*(box_q[4].x-box_p[4].x)+
(box_q[4].y-box_p[4].y)*(box_q[4].y-box_p[4].y);
if (dot_product <= miterlimit)
path_q[q++]=box_q[4];
else
{
path_q[q++]=box_q[1];
path_q[q++]=box_q[2];
}
center=polygon_primitive[n].point;
theta.p=atan2(box_p[1].y-center.y,box_p[1].x-center.x);
theta.q=atan2(box_p[2].y-center.y,box_p[2].x-center.x);
if (theta.p < theta.q)
theta.p+=2.0*MagickPI;
arc_segments=(size_t) ceil((double) ((theta.p-theta.q)/
(2.0*sqrt((double) (1.0/mid)))));
path_p[p++]=box_p[1];
for (j=1; j < (ssize_t) arc_segments; j++)
{
delta_theta=(double) (j*(theta.q-theta.p)/arc_segments);
path_p[p].x=(double) (center.x+mid*cos(fmod((double)
(theta.p+delta_theta),DegreesToRadians(360.0))));
path_p[p].y=(double) (center.y+mid*sin(fmod((double)
(theta.p+delta_theta),DegreesToRadians(360.0))));
p++;
}
path_p[p++]=box_p[2];
break;
}
default:
break;
}
slope.p=slope.q;
inverse_slope.p=inverse_slope.q;
box_p[0]=box_p[2];
box_p[1]=box_p[3];
box_q[0]=box_q[2];
box_q[1]=box_q[3];
dx.p=dx.q;
dy.p=dy.q;
n=i;
}
path_p[p++]=box_p[1];
path_q[q++]=box_q[1];
/*
Trace stroked polygon.
*/
stroke_polygon=(PrimitiveInfo *) AcquireQuantumMemory((size_t)
(p+q+2UL*closed_path+2UL),sizeof(*stroke_polygon));
if (stroke_polygon != (PrimitiveInfo *) NULL)
{
for (i=0; i < (ssize_t) p; i++)
{
stroke_polygon[i]=polygon_primitive[0];
stroke_polygon[i].point=path_p[i];
}
if (closed_path != MagickFalse)
{
stroke_polygon[i]=polygon_primitive[0];
stroke_polygon[i].point=stroke_polygon[0].point;
i++;
}
for ( ; i < (ssize_t) (p+q+closed_path); i++)
{
stroke_polygon[i]=polygon_primitive[0];
stroke_polygon[i].point=path_q[p+q+closed_path-(i+1)];
}
if (closed_path != MagickFalse)
{
stroke_polygon[i]=polygon_primitive[0];
stroke_polygon[i].point=stroke_polygon[p+closed_path].point;
i++;
}
stroke_polygon[i]=polygon_primitive[0];
stroke_polygon[i].point=stroke_polygon[0].point;
i++;
stroke_polygon[i].primitive=UndefinedPrimitive;
stroke_polygon[0].coordinates=(size_t) (p+q+2*closed_path+1);
}
path_p=(PointInfo *) RelinquishMagickMemory(path_p);
path_q=(PointInfo *) RelinquishMagickMemory(path_q);
polygon_primitive=(PrimitiveInfo *) RelinquishMagickMemory(polygon_primitive);
return(stroke_polygon);
}
|
_utils.c | /* Generated by Cython 0.28.5 */
/* BEGIN: Cython Metadata
{
"distutils": {
"depends": [
"mnnpy/_utils.h"
],
"extra_compile_args": [
"-O2",
"-ffast-math",
"-march=native",
"-fopenmp"
],
"extra_link_args": [
"-fopenmp"
],
"include_dirs": [
"mnnpy"
],
"name": "mnnpy._utils",
"sources": [
"mnnpy/_utils.pyx"
]
},
"module_name": "mnnpy._utils"
}
END: Cython Metadata */
#define PY_SSIZE_T_CLEAN
#include "Python.h"
#ifndef Py_PYTHON_H
#error Python headers needed to compile C extensions, please install development version of Python.
#elif PY_VERSION_HEX < 0x02060000 || (0x03000000 <= PY_VERSION_HEX && PY_VERSION_HEX < 0x03030000)
#error Cython requires Python 2.6+ or Python 3.3+.
#else
#define CYTHON_ABI "0_28_5"
#define CYTHON_FUTURE_DIVISION 0
#include <stddef.h>
#ifndef offsetof
#define offsetof(type, member) ( (size_t) & ((type*)0) -> member )
#endif
#if !defined(WIN32) && !defined(MS_WINDOWS)
#ifndef __stdcall
#define __stdcall
#endif
#ifndef __cdecl
#define __cdecl
#endif
#ifndef __fastcall
#define __fastcall
#endif
#endif
#ifndef DL_IMPORT
#define DL_IMPORT(t) t
#endif
#ifndef DL_EXPORT
#define DL_EXPORT(t) t
#endif
#define __PYX_COMMA ,
#ifndef HAVE_LONG_LONG
#if PY_VERSION_HEX >= 0x02070000
#define HAVE_LONG_LONG
#endif
#endif
#ifndef PY_LONG_LONG
#define PY_LONG_LONG LONG_LONG
#endif
#ifndef Py_HUGE_VAL
#define Py_HUGE_VAL HUGE_VAL
#endif
#ifdef PYPY_VERSION
#define CYTHON_COMPILING_IN_PYPY 1
#define CYTHON_COMPILING_IN_PYSTON 0
#define CYTHON_COMPILING_IN_CPYTHON 0
#undef CYTHON_USE_TYPE_SLOTS
#define CYTHON_USE_TYPE_SLOTS 0
#undef CYTHON_USE_PYTYPE_LOOKUP
#define CYTHON_USE_PYTYPE_LOOKUP 0
#if PY_VERSION_HEX < 0x03050000
#undef CYTHON_USE_ASYNC_SLOTS
#define CYTHON_USE_ASYNC_SLOTS 0
#elif !defined(CYTHON_USE_ASYNC_SLOTS)
#define CYTHON_USE_ASYNC_SLOTS 1
#endif
#undef CYTHON_USE_PYLIST_INTERNALS
#define CYTHON_USE_PYLIST_INTERNALS 0
#undef CYTHON_USE_UNICODE_INTERNALS
#define CYTHON_USE_UNICODE_INTERNALS 0
#undef CYTHON_USE_UNICODE_WRITER
#define CYTHON_USE_UNICODE_WRITER 0
#undef CYTHON_USE_PYLONG_INTERNALS
#define CYTHON_USE_PYLONG_INTERNALS 0
#undef CYTHON_AVOID_BORROWED_REFS
#define CYTHON_AVOID_BORROWED_REFS 1
#undef CYTHON_ASSUME_SAFE_MACROS
#define CYTHON_ASSUME_SAFE_MACROS 0
#undef CYTHON_UNPACK_METHODS
#define CYTHON_UNPACK_METHODS 0
#undef CYTHON_FAST_THREAD_STATE
#define CYTHON_FAST_THREAD_STATE 0
#undef CYTHON_FAST_PYCALL
#define CYTHON_FAST_PYCALL 0
#undef CYTHON_PEP489_MULTI_PHASE_INIT
#define CYTHON_PEP489_MULTI_PHASE_INIT 0
#undef CYTHON_USE_TP_FINALIZE
#define CYTHON_USE_TP_FINALIZE 0
#elif defined(PYSTON_VERSION)
#define CYTHON_COMPILING_IN_PYPY 0
#define CYTHON_COMPILING_IN_PYSTON 1
#define CYTHON_COMPILING_IN_CPYTHON 0
#ifndef CYTHON_USE_TYPE_SLOTS
#define CYTHON_USE_TYPE_SLOTS 1
#endif
#undef CYTHON_USE_PYTYPE_LOOKUP
#define CYTHON_USE_PYTYPE_LOOKUP 0
#undef CYTHON_USE_ASYNC_SLOTS
#define CYTHON_USE_ASYNC_SLOTS 0
#undef CYTHON_USE_PYLIST_INTERNALS
#define CYTHON_USE_PYLIST_INTERNALS 0
#ifndef CYTHON_USE_UNICODE_INTERNALS
#define CYTHON_USE_UNICODE_INTERNALS 1
#endif
#undef CYTHON_USE_UNICODE_WRITER
#define CYTHON_USE_UNICODE_WRITER 0
#undef CYTHON_USE_PYLONG_INTERNALS
#define CYTHON_USE_PYLONG_INTERNALS 0
#ifndef CYTHON_AVOID_BORROWED_REFS
#define CYTHON_AVOID_BORROWED_REFS 0
#endif
#ifndef CYTHON_ASSUME_SAFE_MACROS
#define CYTHON_ASSUME_SAFE_MACROS 1
#endif
#ifndef CYTHON_UNPACK_METHODS
#define CYTHON_UNPACK_METHODS 1
#endif
#undef CYTHON_FAST_THREAD_STATE
#define CYTHON_FAST_THREAD_STATE 0
#undef CYTHON_FAST_PYCALL
#define CYTHON_FAST_PYCALL 0
#undef CYTHON_PEP489_MULTI_PHASE_INIT
#define CYTHON_PEP489_MULTI_PHASE_INIT 0
#undef CYTHON_USE_TP_FINALIZE
#define CYTHON_USE_TP_FINALIZE 0
#else
#define CYTHON_COMPILING_IN_PYPY 0
#define CYTHON_COMPILING_IN_PYSTON 0
#define CYTHON_COMPILING_IN_CPYTHON 1
#ifndef CYTHON_USE_TYPE_SLOTS
#define CYTHON_USE_TYPE_SLOTS 1
#endif
#if PY_VERSION_HEX < 0x02070000
#undef CYTHON_USE_PYTYPE_LOOKUP
#define CYTHON_USE_PYTYPE_LOOKUP 0
#elif !defined(CYTHON_USE_PYTYPE_LOOKUP)
#define CYTHON_USE_PYTYPE_LOOKUP 1
#endif
#if PY_MAJOR_VERSION < 3
#undef CYTHON_USE_ASYNC_SLOTS
#define CYTHON_USE_ASYNC_SLOTS 0
#elif !defined(CYTHON_USE_ASYNC_SLOTS)
#define CYTHON_USE_ASYNC_SLOTS 1
#endif
#if PY_VERSION_HEX < 0x02070000
#undef CYTHON_USE_PYLONG_INTERNALS
#define CYTHON_USE_PYLONG_INTERNALS 0
#elif !defined(CYTHON_USE_PYLONG_INTERNALS)
#define CYTHON_USE_PYLONG_INTERNALS 1
#endif
#ifndef CYTHON_USE_PYLIST_INTERNALS
#define CYTHON_USE_PYLIST_INTERNALS 1
#endif
#ifndef CYTHON_USE_UNICODE_INTERNALS
#define CYTHON_USE_UNICODE_INTERNALS 1
#endif
#if PY_VERSION_HEX < 0x030300F0
#undef CYTHON_USE_UNICODE_WRITER
#define CYTHON_USE_UNICODE_WRITER 0
#elif !defined(CYTHON_USE_UNICODE_WRITER)
#define CYTHON_USE_UNICODE_WRITER 1
#endif
#ifndef CYTHON_AVOID_BORROWED_REFS
#define CYTHON_AVOID_BORROWED_REFS 0
#endif
#ifndef CYTHON_ASSUME_SAFE_MACROS
#define CYTHON_ASSUME_SAFE_MACROS 1
#endif
#ifndef CYTHON_UNPACK_METHODS
#define CYTHON_UNPACK_METHODS 1
#endif
#ifndef CYTHON_FAST_THREAD_STATE
#define CYTHON_FAST_THREAD_STATE 1
#endif
#ifndef CYTHON_FAST_PYCALL
#define CYTHON_FAST_PYCALL 1
#endif
#ifndef CYTHON_PEP489_MULTI_PHASE_INIT
#define CYTHON_PEP489_MULTI_PHASE_INIT (0 && PY_VERSION_HEX >= 0x03050000)
#endif
#ifndef CYTHON_USE_TP_FINALIZE
#define CYTHON_USE_TP_FINALIZE (PY_VERSION_HEX >= 0x030400a1)
#endif
#endif
#if !defined(CYTHON_FAST_PYCCALL)
#define CYTHON_FAST_PYCCALL (CYTHON_FAST_PYCALL && PY_VERSION_HEX >= 0x030600B1)
#endif
#if CYTHON_USE_PYLONG_INTERNALS
#include "longintrepr.h"
#undef SHIFT
#undef BASE
#undef MASK
#endif
#ifndef __has_attribute
#define __has_attribute(x) 0
#endif
#ifndef __has_cpp_attribute
#define __has_cpp_attribute(x) 0
#endif
#ifndef CYTHON_RESTRICT
#if defined(__GNUC__)
#define CYTHON_RESTRICT __restrict__
#elif defined(_MSC_VER) && _MSC_VER >= 1400
#define CYTHON_RESTRICT __restrict
#elif defined (__STDC_VERSION__) && __STDC_VERSION__ >= 199901L
#define CYTHON_RESTRICT restrict
#else
#define CYTHON_RESTRICT
#endif
#endif
#ifndef CYTHON_UNUSED
# if defined(__GNUC__)
# if !(defined(__cplusplus)) || (__GNUC__ > 3 || (__GNUC__ == 3 && __GNUC_MINOR__ >= 4))
# define CYTHON_UNUSED __attribute__ ((__unused__))
# else
# define CYTHON_UNUSED
# endif
# elif defined(__ICC) || (defined(__INTEL_COMPILER) && !defined(_MSC_VER))
# define CYTHON_UNUSED __attribute__ ((__unused__))
# else
# define CYTHON_UNUSED
# endif
#endif
#ifndef CYTHON_MAYBE_UNUSED_VAR
# if defined(__cplusplus)
template<class T> void CYTHON_MAYBE_UNUSED_VAR( const T& ) { }
# else
# define CYTHON_MAYBE_UNUSED_VAR(x) (void)(x)
# endif
#endif
#ifndef CYTHON_NCP_UNUSED
# if CYTHON_COMPILING_IN_CPYTHON
# define CYTHON_NCP_UNUSED
# else
# define CYTHON_NCP_UNUSED CYTHON_UNUSED
# endif
#endif
#define __Pyx_void_to_None(void_result) ((void)(void_result), Py_INCREF(Py_None), Py_None)
#ifdef _MSC_VER
#ifndef _MSC_STDINT_H_
#if _MSC_VER < 1300
typedef unsigned char uint8_t;
typedef unsigned int uint32_t;
#else
typedef unsigned __int8 uint8_t;
typedef unsigned __int32 uint32_t;
#endif
#endif
#else
#include <stdint.h>
#endif
#ifndef CYTHON_FALLTHROUGH
#if defined(__cplusplus) && __cplusplus >= 201103L
#if __has_cpp_attribute(fallthrough)
#define CYTHON_FALLTHROUGH [[fallthrough]]
#elif __has_cpp_attribute(clang::fallthrough)
#define CYTHON_FALLTHROUGH [[clang::fallthrough]]
#elif __has_cpp_attribute(gnu::fallthrough)
#define CYTHON_FALLTHROUGH [[gnu::fallthrough]]
#endif
#endif
#ifndef CYTHON_FALLTHROUGH
#if __has_attribute(fallthrough)
#define CYTHON_FALLTHROUGH __attribute__((fallthrough))
#else
#define CYTHON_FALLTHROUGH
#endif
#endif
#if defined(__clang__ ) && defined(__apple_build_version__)
#if __apple_build_version__ < 7000000
#undef CYTHON_FALLTHROUGH
#define CYTHON_FALLTHROUGH
#endif
#endif
#endif
#ifndef CYTHON_INLINE
#if defined(__clang__)
#define CYTHON_INLINE __inline__ __attribute__ ((__unused__))
#elif defined(__GNUC__)
#define CYTHON_INLINE __inline__
#elif defined(_MSC_VER)
#define CYTHON_INLINE __inline
#elif defined (__STDC_VERSION__) && __STDC_VERSION__ >= 199901L
#define CYTHON_INLINE inline
#else
#define CYTHON_INLINE
#endif
#endif
#if CYTHON_COMPILING_IN_PYPY && PY_VERSION_HEX < 0x02070600 && !defined(Py_OptimizeFlag)
#define Py_OptimizeFlag 0
#endif
#define __PYX_BUILD_PY_SSIZE_T "n"
#define CYTHON_FORMAT_SSIZE_T "z"
#if PY_MAJOR_VERSION < 3
#define __Pyx_BUILTIN_MODULE_NAME "__builtin__"
#define __Pyx_PyCode_New(a, k, l, s, f, code, c, n, v, fv, cell, fn, name, fline, lnos)\
PyCode_New(a+k, l, s, f, code, c, n, v, fv, cell, fn, name, fline, lnos)
#define __Pyx_DefaultClassType PyClass_Type
#else
#define __Pyx_BUILTIN_MODULE_NAME "builtins"
#define __Pyx_PyCode_New(a, k, l, s, f, code, c, n, v, fv, cell, fn, name, fline, lnos)\
PyCode_New(a, k, l, s, f, code, c, n, v, fv, cell, fn, name, fline, lnos)
#define __Pyx_DefaultClassType PyType_Type
#endif
#ifndef Py_TPFLAGS_CHECKTYPES
#define Py_TPFLAGS_CHECKTYPES 0
#endif
#ifndef Py_TPFLAGS_HAVE_INDEX
#define Py_TPFLAGS_HAVE_INDEX 0
#endif
#ifndef Py_TPFLAGS_HAVE_NEWBUFFER
#define Py_TPFLAGS_HAVE_NEWBUFFER 0
#endif
#ifndef Py_TPFLAGS_HAVE_FINALIZE
#define Py_TPFLAGS_HAVE_FINALIZE 0
#endif
#if PY_VERSION_HEX <= 0x030700A3 || !defined(METH_FASTCALL)
#ifndef METH_FASTCALL
#define METH_FASTCALL 0x80
#endif
typedef PyObject *(*__Pyx_PyCFunctionFast) (PyObject *self, PyObject *const *args, Py_ssize_t nargs);
typedef PyObject *(*__Pyx_PyCFunctionFastWithKeywords) (PyObject *self, PyObject *const *args,
Py_ssize_t nargs, PyObject *kwnames);
#else
#define __Pyx_PyCFunctionFast _PyCFunctionFast
#define __Pyx_PyCFunctionFastWithKeywords _PyCFunctionFastWithKeywords
#endif
#if CYTHON_FAST_PYCCALL
#define __Pyx_PyFastCFunction_Check(func)\
((PyCFunction_Check(func) && (METH_FASTCALL == (PyCFunction_GET_FLAGS(func) & ~(METH_CLASS | METH_STATIC | METH_COEXIST | METH_KEYWORDS)))))
#else
#define __Pyx_PyFastCFunction_Check(func) 0
#endif
#if CYTHON_COMPILING_IN_PYPY && !defined(PyObject_Malloc)
#define PyObject_Malloc(s) PyMem_Malloc(s)
#define PyObject_Free(p) PyMem_Free(p)
#define PyObject_Realloc(p) PyMem_Realloc(p)
#endif
#if CYTHON_COMPILING_IN_PYSTON
#define __Pyx_PyCode_HasFreeVars(co) PyCode_HasFreeVars(co)
#define __Pyx_PyFrame_SetLineNumber(frame, lineno) PyFrame_SetLineNumber(frame, lineno)
#else
#define __Pyx_PyCode_HasFreeVars(co) (PyCode_GetNumFree(co) > 0)
#define __Pyx_PyFrame_SetLineNumber(frame, lineno) (frame)->f_lineno = (lineno)
#endif
#if !CYTHON_FAST_THREAD_STATE || PY_VERSION_HEX < 0x02070000
#define __Pyx_PyThreadState_Current PyThreadState_GET()
#elif PY_VERSION_HEX >= 0x03060000
#define __Pyx_PyThreadState_Current _PyThreadState_UncheckedGet()
#elif PY_VERSION_HEX >= 0x03000000
#define __Pyx_PyThreadState_Current PyThreadState_GET()
#else
#define __Pyx_PyThreadState_Current _PyThreadState_Current
#endif
#if PY_VERSION_HEX < 0x030700A2 && !defined(PyThread_tss_create) && !defined(Py_tss_NEEDS_INIT)
#include "pythread.h"
#define Py_tss_NEEDS_INIT 0
typedef int Py_tss_t;
static CYTHON_INLINE int PyThread_tss_create(Py_tss_t *key) {
*key = PyThread_create_key();
return 0; // PyThread_create_key reports success always
}
static CYTHON_INLINE Py_tss_t * PyThread_tss_alloc(void) {
Py_tss_t *key = (Py_tss_t *)PyObject_Malloc(sizeof(Py_tss_t));
*key = Py_tss_NEEDS_INIT;
return key;
}
static CYTHON_INLINE void PyThread_tss_free(Py_tss_t *key) {
PyObject_Free(key);
}
static CYTHON_INLINE int PyThread_tss_is_created(Py_tss_t *key) {
return *key != Py_tss_NEEDS_INIT;
}
static CYTHON_INLINE void PyThread_tss_delete(Py_tss_t *key) {
PyThread_delete_key(*key);
*key = Py_tss_NEEDS_INIT;
}
static CYTHON_INLINE int PyThread_tss_set(Py_tss_t *key, void *value) {
return PyThread_set_key_value(*key, value);
}
static CYTHON_INLINE void * PyThread_tss_get(Py_tss_t *key) {
return PyThread_get_key_value(*key);
}
#endif // TSS (Thread Specific Storage) API
#if CYTHON_COMPILING_IN_CPYTHON || defined(_PyDict_NewPresized)
#define __Pyx_PyDict_NewPresized(n) ((n <= 8) ? PyDict_New() : _PyDict_NewPresized(n))
#else
#define __Pyx_PyDict_NewPresized(n) PyDict_New()
#endif
#if PY_MAJOR_VERSION >= 3 || CYTHON_FUTURE_DIVISION
#define __Pyx_PyNumber_Divide(x,y) PyNumber_TrueDivide(x,y)
#define __Pyx_PyNumber_InPlaceDivide(x,y) PyNumber_InPlaceTrueDivide(x,y)
#else
#define __Pyx_PyNumber_Divide(x,y) PyNumber_Divide(x,y)
#define __Pyx_PyNumber_InPlaceDivide(x,y) PyNumber_InPlaceDivide(x,y)
#endif
#if CYTHON_COMPILING_IN_CPYTHON && PY_VERSION_HEX >= 0x030500A1 && CYTHON_USE_UNICODE_INTERNALS
#define __Pyx_PyDict_GetItemStr(dict, name) _PyDict_GetItem_KnownHash(dict, name, ((PyASCIIObject *) name)->hash)
#else
#define __Pyx_PyDict_GetItemStr(dict, name) PyDict_GetItem(dict, name)
#endif
#if PY_VERSION_HEX > 0x03030000 && defined(PyUnicode_KIND)
#define CYTHON_PEP393_ENABLED 1
#define __Pyx_PyUnicode_READY(op) (likely(PyUnicode_IS_READY(op)) ?\
0 : _PyUnicode_Ready((PyObject *)(op)))
#define __Pyx_PyUnicode_GET_LENGTH(u) PyUnicode_GET_LENGTH(u)
#define __Pyx_PyUnicode_READ_CHAR(u, i) PyUnicode_READ_CHAR(u, i)
#define __Pyx_PyUnicode_MAX_CHAR_VALUE(u) PyUnicode_MAX_CHAR_VALUE(u)
#define __Pyx_PyUnicode_KIND(u) PyUnicode_KIND(u)
#define __Pyx_PyUnicode_DATA(u) PyUnicode_DATA(u)
#define __Pyx_PyUnicode_READ(k, d, i) PyUnicode_READ(k, d, i)
#define __Pyx_PyUnicode_WRITE(k, d, i, ch) PyUnicode_WRITE(k, d, i, ch)
#define __Pyx_PyUnicode_IS_TRUE(u) (0 != (likely(PyUnicode_IS_READY(u)) ? PyUnicode_GET_LENGTH(u) : PyUnicode_GET_SIZE(u)))
#else
#define CYTHON_PEP393_ENABLED 0
#define PyUnicode_1BYTE_KIND 1
#define PyUnicode_2BYTE_KIND 2
#define PyUnicode_4BYTE_KIND 4
#define __Pyx_PyUnicode_READY(op) (0)
#define __Pyx_PyUnicode_GET_LENGTH(u) PyUnicode_GET_SIZE(u)
#define __Pyx_PyUnicode_READ_CHAR(u, i) ((Py_UCS4)(PyUnicode_AS_UNICODE(u)[i]))
#define __Pyx_PyUnicode_MAX_CHAR_VALUE(u) ((sizeof(Py_UNICODE) == 2) ? 65535 : 1114111)
#define __Pyx_PyUnicode_KIND(u) (sizeof(Py_UNICODE))
#define __Pyx_PyUnicode_DATA(u) ((void*)PyUnicode_AS_UNICODE(u))
#define __Pyx_PyUnicode_READ(k, d, i) ((void)(k), (Py_UCS4)(((Py_UNICODE*)d)[i]))
#define __Pyx_PyUnicode_WRITE(k, d, i, ch) (((void)(k)), ((Py_UNICODE*)d)[i] = ch)
#define __Pyx_PyUnicode_IS_TRUE(u) (0 != PyUnicode_GET_SIZE(u))
#endif
#if CYTHON_COMPILING_IN_PYPY
#define __Pyx_PyUnicode_Concat(a, b) PyNumber_Add(a, b)
#define __Pyx_PyUnicode_ConcatSafe(a, b) PyNumber_Add(a, b)
#else
#define __Pyx_PyUnicode_Concat(a, b) PyUnicode_Concat(a, b)
#define __Pyx_PyUnicode_ConcatSafe(a, b) ((unlikely((a) == Py_None) || unlikely((b) == Py_None)) ?\
PyNumber_Add(a, b) : __Pyx_PyUnicode_Concat(a, b))
#endif
#if CYTHON_COMPILING_IN_PYPY && !defined(PyUnicode_Contains)
#define PyUnicode_Contains(u, s) PySequence_Contains(u, s)
#endif
#if CYTHON_COMPILING_IN_PYPY && !defined(PyByteArray_Check)
#define PyByteArray_Check(obj) PyObject_TypeCheck(obj, &PyByteArray_Type)
#endif
#if CYTHON_COMPILING_IN_PYPY && !defined(PyObject_Format)
#define PyObject_Format(obj, fmt) PyObject_CallMethod(obj, "__format__", "O", fmt)
#endif
#define __Pyx_PyString_FormatSafe(a, b) ((unlikely((a) == Py_None)) ? PyNumber_Remainder(a, b) : __Pyx_PyString_Format(a, b))
#define __Pyx_PyUnicode_FormatSafe(a, b) ((unlikely((a) == Py_None)) ? PyNumber_Remainder(a, b) : PyUnicode_Format(a, b))
#if PY_MAJOR_VERSION >= 3
#define __Pyx_PyString_Format(a, b) PyUnicode_Format(a, b)
#else
#define __Pyx_PyString_Format(a, b) PyString_Format(a, b)
#endif
#if PY_MAJOR_VERSION < 3 && !defined(PyObject_ASCII)
#define PyObject_ASCII(o) PyObject_Repr(o)
#endif
#if PY_MAJOR_VERSION >= 3
#define PyBaseString_Type PyUnicode_Type
#define PyStringObject PyUnicodeObject
#define PyString_Type PyUnicode_Type
#define PyString_Check PyUnicode_Check
#define PyString_CheckExact PyUnicode_CheckExact
#define PyObject_Unicode PyObject_Str
#endif
#if PY_MAJOR_VERSION >= 3
#define __Pyx_PyBaseString_Check(obj) PyUnicode_Check(obj)
#define __Pyx_PyBaseString_CheckExact(obj) PyUnicode_CheckExact(obj)
#else
#define __Pyx_PyBaseString_Check(obj) (PyString_Check(obj) || PyUnicode_Check(obj))
#define __Pyx_PyBaseString_CheckExact(obj) (PyString_CheckExact(obj) || PyUnicode_CheckExact(obj))
#endif
#ifndef PySet_CheckExact
#define PySet_CheckExact(obj) (Py_TYPE(obj) == &PySet_Type)
#endif
#if CYTHON_ASSUME_SAFE_MACROS
#define __Pyx_PySequence_SIZE(seq) Py_SIZE(seq)
#else
#define __Pyx_PySequence_SIZE(seq) PySequence_Size(seq)
#endif
#if PY_MAJOR_VERSION >= 3
#define PyIntObject PyLongObject
#define PyInt_Type PyLong_Type
#define PyInt_Check(op) PyLong_Check(op)
#define PyInt_CheckExact(op) PyLong_CheckExact(op)
#define PyInt_FromString PyLong_FromString
#define PyInt_FromUnicode PyLong_FromUnicode
#define PyInt_FromLong PyLong_FromLong
#define PyInt_FromSize_t PyLong_FromSize_t
#define PyInt_FromSsize_t PyLong_FromSsize_t
#define PyInt_AsLong PyLong_AsLong
#define PyInt_AS_LONG PyLong_AS_LONG
#define PyInt_AsSsize_t PyLong_AsSsize_t
#define PyInt_AsUnsignedLongMask PyLong_AsUnsignedLongMask
#define PyInt_AsUnsignedLongLongMask PyLong_AsUnsignedLongLongMask
#define PyNumber_Int PyNumber_Long
#endif
#if PY_MAJOR_VERSION >= 3
#define PyBoolObject PyLongObject
#endif
#if PY_MAJOR_VERSION >= 3 && CYTHON_COMPILING_IN_PYPY
#ifndef PyUnicode_InternFromString
#define PyUnicode_InternFromString(s) PyUnicode_FromString(s)
#endif
#endif
#if PY_VERSION_HEX < 0x030200A4
typedef long Py_hash_t;
#define __Pyx_PyInt_FromHash_t PyInt_FromLong
#define __Pyx_PyInt_AsHash_t PyInt_AsLong
#else
#define __Pyx_PyInt_FromHash_t PyInt_FromSsize_t
#define __Pyx_PyInt_AsHash_t PyInt_AsSsize_t
#endif
#if PY_MAJOR_VERSION >= 3
#define __Pyx_PyMethod_New(func, self, klass) ((self) ? PyMethod_New(func, self) : (Py_INCREF(func), func))
#else
#define __Pyx_PyMethod_New(func, self, klass) PyMethod_New(func, self, klass)
#endif
#if CYTHON_USE_ASYNC_SLOTS
#if PY_VERSION_HEX >= 0x030500B1
#define __Pyx_PyAsyncMethodsStruct PyAsyncMethods
#define __Pyx_PyType_AsAsync(obj) (Py_TYPE(obj)->tp_as_async)
#else
#define __Pyx_PyType_AsAsync(obj) ((__Pyx_PyAsyncMethodsStruct*) (Py_TYPE(obj)->tp_reserved))
#endif
#else
#define __Pyx_PyType_AsAsync(obj) NULL
#endif
#ifndef __Pyx_PyAsyncMethodsStruct
typedef struct {
unaryfunc am_await;
unaryfunc am_aiter;
unaryfunc am_anext;
} __Pyx_PyAsyncMethodsStruct;
#endif
#if defined(WIN32) || defined(MS_WINDOWS)
#define _USE_MATH_DEFINES
#endif
#include <math.h>
#ifdef NAN
#define __PYX_NAN() ((float) NAN)
#else
static CYTHON_INLINE float __PYX_NAN() {
float value;
memset(&value, 0xFF, sizeof(value));
return value;
}
#endif
#if defined(__CYGWIN__) && defined(_LDBL_EQ_DBL)
#define __Pyx_truncl trunc
#else
#define __Pyx_truncl truncl
#endif
#define __PYX_ERR(f_index, lineno, Ln_error) \
{ \
__pyx_filename = __pyx_f[f_index]; __pyx_lineno = lineno; __pyx_clineno = __LINE__; goto Ln_error; \
}
#ifndef __PYX_EXTERN_C
#ifdef __cplusplus
#define __PYX_EXTERN_C extern "C"
#else
#define __PYX_EXTERN_C extern
#endif
#endif
#define __PYX_HAVE__mnnpy___utils
#define __PYX_HAVE_API__mnnpy___utils
/* Early includes */
#include <string.h>
#include <stdlib.h>
#include <math.h>
#include "_utils.h"
#include "pythread.h"
#include <stdio.h>
#include "pystate.h"
#ifdef _OPENMP
#include <omp.h>
#endif /* _OPENMP */
#if defined(PYREX_WITHOUT_ASSERTIONS) && !defined(CYTHON_WITHOUT_ASSERTIONS)
#define CYTHON_WITHOUT_ASSERTIONS
#endif
typedef struct {PyObject **p; const char *s; const Py_ssize_t n; const char* encoding;
const char is_unicode; const char is_str; const char intern; } __Pyx_StringTabEntry;
#define __PYX_DEFAULT_STRING_ENCODING_IS_ASCII 0
#define __PYX_DEFAULT_STRING_ENCODING_IS_DEFAULT 0
#define __PYX_DEFAULT_STRING_ENCODING ""
#define __Pyx_PyObject_FromString __Pyx_PyBytes_FromString
#define __Pyx_PyObject_FromStringAndSize __Pyx_PyBytes_FromStringAndSize
#define __Pyx_uchar_cast(c) ((unsigned char)c)
#define __Pyx_long_cast(x) ((long)x)
#define __Pyx_fits_Py_ssize_t(v, type, is_signed) (\
(sizeof(type) < sizeof(Py_ssize_t)) ||\
(sizeof(type) > sizeof(Py_ssize_t) &&\
likely(v < (type)PY_SSIZE_T_MAX ||\
v == (type)PY_SSIZE_T_MAX) &&\
(!is_signed || likely(v > (type)PY_SSIZE_T_MIN ||\
v == (type)PY_SSIZE_T_MIN))) ||\
(sizeof(type) == sizeof(Py_ssize_t) &&\
(is_signed || likely(v < (type)PY_SSIZE_T_MAX ||\
v == (type)PY_SSIZE_T_MAX))) )
#if defined (__cplusplus) && __cplusplus >= 201103L
#include <cstdlib>
#define __Pyx_sst_abs(value) std::abs(value)
#elif SIZEOF_INT >= SIZEOF_SIZE_T
#define __Pyx_sst_abs(value) abs(value)
#elif SIZEOF_LONG >= SIZEOF_SIZE_T
#define __Pyx_sst_abs(value) labs(value)
#elif defined (_MSC_VER)
#define __Pyx_sst_abs(value) ((Py_ssize_t)_abs64(value))
#elif defined (__STDC_VERSION__) && __STDC_VERSION__ >= 199901L
#define __Pyx_sst_abs(value) llabs(value)
#elif defined (__GNUC__)
#define __Pyx_sst_abs(value) __builtin_llabs(value)
#else
#define __Pyx_sst_abs(value) ((value<0) ? -value : value)
#endif
static CYTHON_INLINE const char* __Pyx_PyObject_AsString(PyObject*);
static CYTHON_INLINE const char* __Pyx_PyObject_AsStringAndSize(PyObject*, Py_ssize_t* length);
#define __Pyx_PyByteArray_FromString(s) PyByteArray_FromStringAndSize((const char*)s, strlen((const char*)s))
#define __Pyx_PyByteArray_FromStringAndSize(s, l) PyByteArray_FromStringAndSize((const char*)s, l)
#define __Pyx_PyBytes_FromString PyBytes_FromString
#define __Pyx_PyBytes_FromStringAndSize PyBytes_FromStringAndSize
static CYTHON_INLINE PyObject* __Pyx_PyUnicode_FromString(const char*);
#if PY_MAJOR_VERSION < 3
#define __Pyx_PyStr_FromString __Pyx_PyBytes_FromString
#define __Pyx_PyStr_FromStringAndSize __Pyx_PyBytes_FromStringAndSize
#else
#define __Pyx_PyStr_FromString __Pyx_PyUnicode_FromString
#define __Pyx_PyStr_FromStringAndSize __Pyx_PyUnicode_FromStringAndSize
#endif
#define __Pyx_PyBytes_AsWritableString(s) ((char*) PyBytes_AS_STRING(s))
#define __Pyx_PyBytes_AsWritableSString(s) ((signed char*) PyBytes_AS_STRING(s))
#define __Pyx_PyBytes_AsWritableUString(s) ((unsigned char*) PyBytes_AS_STRING(s))
#define __Pyx_PyBytes_AsString(s) ((const char*) PyBytes_AS_STRING(s))
#define __Pyx_PyBytes_AsSString(s) ((const signed char*) PyBytes_AS_STRING(s))
#define __Pyx_PyBytes_AsUString(s) ((const unsigned char*) PyBytes_AS_STRING(s))
#define __Pyx_PyObject_AsWritableString(s) ((char*) __Pyx_PyObject_AsString(s))
#define __Pyx_PyObject_AsWritableSString(s) ((signed char*) __Pyx_PyObject_AsString(s))
#define __Pyx_PyObject_AsWritableUString(s) ((unsigned char*) __Pyx_PyObject_AsString(s))
#define __Pyx_PyObject_AsSString(s) ((const signed char*) __Pyx_PyObject_AsString(s))
#define __Pyx_PyObject_AsUString(s) ((const unsigned char*) __Pyx_PyObject_AsString(s))
#define __Pyx_PyObject_FromCString(s) __Pyx_PyObject_FromString((const char*)s)
#define __Pyx_PyBytes_FromCString(s) __Pyx_PyBytes_FromString((const char*)s)
#define __Pyx_PyByteArray_FromCString(s) __Pyx_PyByteArray_FromString((const char*)s)
#define __Pyx_PyStr_FromCString(s) __Pyx_PyStr_FromString((const char*)s)
#define __Pyx_PyUnicode_FromCString(s) __Pyx_PyUnicode_FromString((const char*)s)
static CYTHON_INLINE size_t __Pyx_Py_UNICODE_strlen(const Py_UNICODE *u) {
const Py_UNICODE *u_end = u;
while (*u_end++) ;
return (size_t)(u_end - u - 1);
}
#define __Pyx_PyUnicode_FromUnicode(u) PyUnicode_FromUnicode(u, __Pyx_Py_UNICODE_strlen(u))
#define __Pyx_PyUnicode_FromUnicodeAndLength PyUnicode_FromUnicode
#define __Pyx_PyUnicode_AsUnicode PyUnicode_AsUnicode
#define __Pyx_NewRef(obj) (Py_INCREF(obj), obj)
#define __Pyx_Owned_Py_None(b) __Pyx_NewRef(Py_None)
static CYTHON_INLINE PyObject * __Pyx_PyBool_FromLong(long b);
static CYTHON_INLINE int __Pyx_PyObject_IsTrue(PyObject*);
static CYTHON_INLINE PyObject* __Pyx_PyNumber_IntOrLong(PyObject* x);
#define __Pyx_PySequence_Tuple(obj)\
(likely(PyTuple_CheckExact(obj)) ? __Pyx_NewRef(obj) : PySequence_Tuple(obj))
static CYTHON_INLINE Py_ssize_t __Pyx_PyIndex_AsSsize_t(PyObject*);
static CYTHON_INLINE PyObject * __Pyx_PyInt_FromSize_t(size_t);
#if CYTHON_ASSUME_SAFE_MACROS
#define __pyx_PyFloat_AsDouble(x) (PyFloat_CheckExact(x) ? PyFloat_AS_DOUBLE(x) : PyFloat_AsDouble(x))
#else
#define __pyx_PyFloat_AsDouble(x) PyFloat_AsDouble(x)
#endif
#define __pyx_PyFloat_AsFloat(x) ((float) __pyx_PyFloat_AsDouble(x))
#if PY_MAJOR_VERSION >= 3
#define __Pyx_PyNumber_Int(x) (PyLong_CheckExact(x) ? __Pyx_NewRef(x) : PyNumber_Long(x))
#else
#define __Pyx_PyNumber_Int(x) (PyInt_CheckExact(x) ? __Pyx_NewRef(x) : PyNumber_Int(x))
#endif
#define __Pyx_PyNumber_Float(x) (PyFloat_CheckExact(x) ? __Pyx_NewRef(x) : PyNumber_Float(x))
#if PY_MAJOR_VERSION < 3 && __PYX_DEFAULT_STRING_ENCODING_IS_ASCII
static int __Pyx_sys_getdefaultencoding_not_ascii;
static int __Pyx_init_sys_getdefaultencoding_params(void) {
PyObject* sys;
PyObject* default_encoding = NULL;
PyObject* ascii_chars_u = NULL;
PyObject* ascii_chars_b = NULL;
const char* default_encoding_c;
sys = PyImport_ImportModule("sys");
if (!sys) goto bad;
default_encoding = PyObject_CallMethod(sys, (char*) "getdefaultencoding", NULL);
Py_DECREF(sys);
if (!default_encoding) goto bad;
default_encoding_c = PyBytes_AsString(default_encoding);
if (!default_encoding_c) goto bad;
if (strcmp(default_encoding_c, "ascii") == 0) {
__Pyx_sys_getdefaultencoding_not_ascii = 0;
} else {
char ascii_chars[128];
int c;
for (c = 0; c < 128; c++) {
ascii_chars[c] = c;
}
__Pyx_sys_getdefaultencoding_not_ascii = 1;
ascii_chars_u = PyUnicode_DecodeASCII(ascii_chars, 128, NULL);
if (!ascii_chars_u) goto bad;
ascii_chars_b = PyUnicode_AsEncodedString(ascii_chars_u, default_encoding_c, NULL);
if (!ascii_chars_b || !PyBytes_Check(ascii_chars_b) || memcmp(ascii_chars, PyBytes_AS_STRING(ascii_chars_b), 128) != 0) {
PyErr_Format(
PyExc_ValueError,
"This module compiled with c_string_encoding=ascii, but default encoding '%.200s' is not a superset of ascii.",
default_encoding_c);
goto bad;
}
Py_DECREF(ascii_chars_u);
Py_DECREF(ascii_chars_b);
}
Py_DECREF(default_encoding);
return 0;
bad:
Py_XDECREF(default_encoding);
Py_XDECREF(ascii_chars_u);
Py_XDECREF(ascii_chars_b);
return -1;
}
#endif
#if __PYX_DEFAULT_STRING_ENCODING_IS_DEFAULT && PY_MAJOR_VERSION >= 3
#define __Pyx_PyUnicode_FromStringAndSize(c_str, size) PyUnicode_DecodeUTF8(c_str, size, NULL)
#else
#define __Pyx_PyUnicode_FromStringAndSize(c_str, size) PyUnicode_Decode(c_str, size, __PYX_DEFAULT_STRING_ENCODING, NULL)
#if __PYX_DEFAULT_STRING_ENCODING_IS_DEFAULT
static char* __PYX_DEFAULT_STRING_ENCODING;
static int __Pyx_init_sys_getdefaultencoding_params(void) {
PyObject* sys;
PyObject* default_encoding = NULL;
char* default_encoding_c;
sys = PyImport_ImportModule("sys");
if (!sys) goto bad;
default_encoding = PyObject_CallMethod(sys, (char*) (const char*) "getdefaultencoding", NULL);
Py_DECREF(sys);
if (!default_encoding) goto bad;
default_encoding_c = PyBytes_AsString(default_encoding);
if (!default_encoding_c) goto bad;
__PYX_DEFAULT_STRING_ENCODING = (char*) malloc(strlen(default_encoding_c));
if (!__PYX_DEFAULT_STRING_ENCODING) goto bad;
strcpy(__PYX_DEFAULT_STRING_ENCODING, default_encoding_c);
Py_DECREF(default_encoding);
return 0;
bad:
Py_XDECREF(default_encoding);
return -1;
}
#endif
#endif
/* Test for GCC > 2.95 */
#if defined(__GNUC__) && (__GNUC__ > 2 || (__GNUC__ == 2 && (__GNUC_MINOR__ > 95)))
#define likely(x) __builtin_expect(!!(x), 1)
#define unlikely(x) __builtin_expect(!!(x), 0)
#else /* !__GNUC__ or GCC < 2.95 */
#define likely(x) (x)
#define unlikely(x) (x)
#endif /* __GNUC__ */
static CYTHON_INLINE void __Pyx_pretend_to_initialize(void* ptr) { (void)ptr; }
static PyObject *__pyx_m = NULL;
static PyObject *__pyx_d;
static PyObject *__pyx_b;
static PyObject *__pyx_cython_runtime = NULL;
static PyObject *__pyx_empty_tuple;
static PyObject *__pyx_empty_bytes;
static PyObject *__pyx_empty_unicode;
static int __pyx_lineno;
static int __pyx_clineno = 0;
static const char * __pyx_cfilenm= __FILE__;
static const char *__pyx_filename;
static const char *__pyx_f[] = {
"mnnpy/_utils.pyx",
"stringsource",
};
/* ForceInitThreads.proto */
#ifndef __PYX_FORCE_INIT_THREADS
#define __PYX_FORCE_INIT_THREADS 0
#endif
/* NoFastGil.proto */
#define __Pyx_PyGILState_Ensure PyGILState_Ensure
#define __Pyx_PyGILState_Release PyGILState_Release
#define __Pyx_FastGIL_Remember()
#define __Pyx_FastGIL_Forget()
#define __Pyx_FastGilFuncInit()
/* MemviewSliceStruct.proto */
struct __pyx_memoryview_obj;
typedef struct {
struct __pyx_memoryview_obj *memview;
char *data;
Py_ssize_t shape[8];
Py_ssize_t strides[8];
Py_ssize_t suboffsets[8];
} __Pyx_memviewslice;
#define __Pyx_MemoryView_Len(m) (m.shape[0])
/* Atomics.proto */
#include <pythread.h>
#ifndef CYTHON_ATOMICS
#define CYTHON_ATOMICS 1
#endif
#define __pyx_atomic_int_type int
#if CYTHON_ATOMICS && __GNUC__ >= 4 && (__GNUC_MINOR__ > 1 ||\
(__GNUC_MINOR__ == 1 && __GNUC_PATCHLEVEL >= 2)) &&\
!defined(__i386__)
#define __pyx_atomic_incr_aligned(value, lock) __sync_fetch_and_add(value, 1)
#define __pyx_atomic_decr_aligned(value, lock) __sync_fetch_and_sub(value, 1)
#ifdef __PYX_DEBUG_ATOMICS
#warning "Using GNU atomics"
#endif
#elif CYTHON_ATOMICS && defined(_MSC_VER) && 0
#include <Windows.h>
#undef __pyx_atomic_int_type
#define __pyx_atomic_int_type LONG
#define __pyx_atomic_incr_aligned(value, lock) InterlockedIncrement(value)
#define __pyx_atomic_decr_aligned(value, lock) InterlockedDecrement(value)
#ifdef __PYX_DEBUG_ATOMICS
#pragma message ("Using MSVC atomics")
#endif
#elif CYTHON_ATOMICS && (defined(__ICC) || defined(__INTEL_COMPILER)) && 0
#define __pyx_atomic_incr_aligned(value, lock) _InterlockedIncrement(value)
#define __pyx_atomic_decr_aligned(value, lock) _InterlockedDecrement(value)
#ifdef __PYX_DEBUG_ATOMICS
#warning "Using Intel atomics"
#endif
#else
#undef CYTHON_ATOMICS
#define CYTHON_ATOMICS 0
#ifdef __PYX_DEBUG_ATOMICS
#warning "Not using atomics"
#endif
#endif
typedef volatile __pyx_atomic_int_type __pyx_atomic_int;
#if CYTHON_ATOMICS
#define __pyx_add_acquisition_count(memview)\
__pyx_atomic_incr_aligned(__pyx_get_slice_count_pointer(memview), memview->lock)
#define __pyx_sub_acquisition_count(memview)\
__pyx_atomic_decr_aligned(__pyx_get_slice_count_pointer(memview), memview->lock)
#else
#define __pyx_add_acquisition_count(memview)\
__pyx_add_acquisition_count_locked(__pyx_get_slice_count_pointer(memview), memview->lock)
#define __pyx_sub_acquisition_count(memview)\
__pyx_sub_acquisition_count_locked(__pyx_get_slice_count_pointer(memview), memview->lock)
#endif
/* BufferFormatStructs.proto */
#define IS_UNSIGNED(type) (((type) -1) > 0)
struct __Pyx_StructField_;
#define __PYX_BUF_FLAGS_PACKED_STRUCT (1 << 0)
typedef struct {
const char* name;
struct __Pyx_StructField_* fields;
size_t size;
size_t arraysize[8];
int ndim;
char typegroup;
char is_unsigned;
int flags;
} __Pyx_TypeInfo;
typedef struct __Pyx_StructField_ {
__Pyx_TypeInfo* type;
const char* name;
size_t offset;
} __Pyx_StructField;
typedef struct {
__Pyx_StructField* field;
size_t parent_offset;
} __Pyx_BufFmt_StackElem;
typedef struct {
__Pyx_StructField root;
__Pyx_BufFmt_StackElem* head;
size_t fmt_offset;
size_t new_count, enc_count;
size_t struct_alignment;
int is_complex;
char enc_type;
char new_packmode;
char enc_packmode;
char is_valid_array;
} __Pyx_BufFmt_Context;
/*--- Type declarations ---*/
struct __pyx_array_obj;
struct __pyx_MemviewEnum_obj;
struct __pyx_memoryview_obj;
struct __pyx_memoryviewslice_obj;
struct __pyx_opt_args_5mnnpy_6_utils__adjust_shift_variance;
/* "mnnpy/_utils.pyx":89
* @cython.boundscheck(False) # Deactivate bounds checking
* @cython.wraparound(False) # Deactivate negative indexing.
* cpdef _adjust_shift_variance(data1, data2, correction, sigma, n_jobs, var_subset=None): # <<<<<<<<<<<<<<
* if var_subset is not None:
* vect = correction[:, var_subset]
*/
struct __pyx_opt_args_5mnnpy_6_utils__adjust_shift_variance {
int __pyx_n;
PyObject *var_subset;
};
/* "View.MemoryView":104
*
* @cname("__pyx_array")
* cdef class array: # <<<<<<<<<<<<<<
*
* cdef:
*/
struct __pyx_array_obj {
PyObject_HEAD
struct __pyx_vtabstruct_array *__pyx_vtab;
char *data;
Py_ssize_t len;
char *format;
int ndim;
Py_ssize_t *_shape;
Py_ssize_t *_strides;
Py_ssize_t itemsize;
PyObject *mode;
PyObject *_format;
void (*callback_free_data)(void *);
int free_data;
int dtype_is_object;
};
/* "View.MemoryView":278
*
* @cname('__pyx_MemviewEnum')
* cdef class Enum(object): # <<<<<<<<<<<<<<
* cdef object name
* def __init__(self, name):
*/
struct __pyx_MemviewEnum_obj {
PyObject_HEAD
PyObject *name;
};
/* "View.MemoryView":329
*
* @cname('__pyx_memoryview')
* cdef class memoryview(object): # <<<<<<<<<<<<<<
*
* cdef object obj
*/
struct __pyx_memoryview_obj {
PyObject_HEAD
struct __pyx_vtabstruct_memoryview *__pyx_vtab;
PyObject *obj;
PyObject *_size;
PyObject *_array_interface;
PyThread_type_lock lock;
__pyx_atomic_int acquisition_count[2];
__pyx_atomic_int *acquisition_count_aligned_p;
Py_buffer view;
int flags;
int dtype_is_object;
__Pyx_TypeInfo *typeinfo;
};
/* "View.MemoryView":960
*
* @cname('__pyx_memoryviewslice')
* cdef class _memoryviewslice(memoryview): # <<<<<<<<<<<<<<
* "Internal class for passing memoryview slices to Python"
*
*/
struct __pyx_memoryviewslice_obj {
struct __pyx_memoryview_obj __pyx_base;
__Pyx_memviewslice from_slice;
PyObject *from_object;
PyObject *(*to_object_func)(char *);
int (*to_dtype_func)(char *, PyObject *);
};
/* "View.MemoryView":104
*
* @cname("__pyx_array")
* cdef class array: # <<<<<<<<<<<<<<
*
* cdef:
*/
struct __pyx_vtabstruct_array {
PyObject *(*get_memview)(struct __pyx_array_obj *);
};
static struct __pyx_vtabstruct_array *__pyx_vtabptr_array;
/* "View.MemoryView":329
*
* @cname('__pyx_memoryview')
* cdef class memoryview(object): # <<<<<<<<<<<<<<
*
* cdef object obj
*/
struct __pyx_vtabstruct_memoryview {
char *(*get_item_pointer)(struct __pyx_memoryview_obj *, PyObject *);
PyObject *(*is_slice)(struct __pyx_memoryview_obj *, PyObject *);
PyObject *(*setitem_slice_assignment)(struct __pyx_memoryview_obj *, PyObject *, PyObject *);
PyObject *(*setitem_slice_assign_scalar)(struct __pyx_memoryview_obj *, struct __pyx_memoryview_obj *, PyObject *);
PyObject *(*setitem_indexed)(struct __pyx_memoryview_obj *, PyObject *, PyObject *);
PyObject *(*convert_item_to_object)(struct __pyx_memoryview_obj *, char *);
PyObject *(*assign_item_from_object)(struct __pyx_memoryview_obj *, char *, PyObject *);
};
static struct __pyx_vtabstruct_memoryview *__pyx_vtabptr_memoryview;
/* "View.MemoryView":960
*
* @cname('__pyx_memoryviewslice')
* cdef class _memoryviewslice(memoryview): # <<<<<<<<<<<<<<
* "Internal class for passing memoryview slices to Python"
*
*/
struct __pyx_vtabstruct__memoryviewslice {
struct __pyx_vtabstruct_memoryview __pyx_base;
};
static struct __pyx_vtabstruct__memoryviewslice *__pyx_vtabptr__memoryviewslice;
/* --- Runtime support code (head) --- */
/* Refnanny.proto */
#ifndef CYTHON_REFNANNY
#define CYTHON_REFNANNY 0
#endif
#if CYTHON_REFNANNY
typedef struct {
void (*INCREF)(void*, PyObject*, int);
void (*DECREF)(void*, PyObject*, int);
void (*GOTREF)(void*, PyObject*, int);
void (*GIVEREF)(void*, PyObject*, int);
void* (*SetupContext)(const char*, int, const char*);
void (*FinishContext)(void**);
} __Pyx_RefNannyAPIStruct;
static __Pyx_RefNannyAPIStruct *__Pyx_RefNanny = NULL;
static __Pyx_RefNannyAPIStruct *__Pyx_RefNannyImportAPI(const char *modname);
#define __Pyx_RefNannyDeclarations void *__pyx_refnanny = NULL;
#ifdef WITH_THREAD
#define __Pyx_RefNannySetupContext(name, acquire_gil)\
if (acquire_gil) {\
PyGILState_STATE __pyx_gilstate_save = PyGILState_Ensure();\
__pyx_refnanny = __Pyx_RefNanny->SetupContext((name), __LINE__, __FILE__);\
PyGILState_Release(__pyx_gilstate_save);\
} else {\
__pyx_refnanny = __Pyx_RefNanny->SetupContext((name), __LINE__, __FILE__);\
}
#else
#define __Pyx_RefNannySetupContext(name, acquire_gil)\
__pyx_refnanny = __Pyx_RefNanny->SetupContext((name), __LINE__, __FILE__)
#endif
#define __Pyx_RefNannyFinishContext()\
__Pyx_RefNanny->FinishContext(&__pyx_refnanny)
#define __Pyx_INCREF(r) __Pyx_RefNanny->INCREF(__pyx_refnanny, (PyObject *)(r), __LINE__)
#define __Pyx_DECREF(r) __Pyx_RefNanny->DECREF(__pyx_refnanny, (PyObject *)(r), __LINE__)
#define __Pyx_GOTREF(r) __Pyx_RefNanny->GOTREF(__pyx_refnanny, (PyObject *)(r), __LINE__)
#define __Pyx_GIVEREF(r) __Pyx_RefNanny->GIVEREF(__pyx_refnanny, (PyObject *)(r), __LINE__)
#define __Pyx_XINCREF(r) do { if((r) != NULL) {__Pyx_INCREF(r); }} while(0)
#define __Pyx_XDECREF(r) do { if((r) != NULL) {__Pyx_DECREF(r); }} while(0)
#define __Pyx_XGOTREF(r) do { if((r) != NULL) {__Pyx_GOTREF(r); }} while(0)
#define __Pyx_XGIVEREF(r) do { if((r) != NULL) {__Pyx_GIVEREF(r);}} while(0)
#else
#define __Pyx_RefNannyDeclarations
#define __Pyx_RefNannySetupContext(name, acquire_gil)
#define __Pyx_RefNannyFinishContext()
#define __Pyx_INCREF(r) Py_INCREF(r)
#define __Pyx_DECREF(r) Py_DECREF(r)
#define __Pyx_GOTREF(r)
#define __Pyx_GIVEREF(r)
#define __Pyx_XINCREF(r) Py_XINCREF(r)
#define __Pyx_XDECREF(r) Py_XDECREF(r)
#define __Pyx_XGOTREF(r)
#define __Pyx_XGIVEREF(r)
#endif
#define __Pyx_XDECREF_SET(r, v) do {\
PyObject *tmp = (PyObject *) r;\
r = v; __Pyx_XDECREF(tmp);\
} while (0)
#define __Pyx_DECREF_SET(r, v) do {\
PyObject *tmp = (PyObject *) r;\
r = v; __Pyx_DECREF(tmp);\
} while (0)
#define __Pyx_CLEAR(r) do { PyObject* tmp = ((PyObject*)(r)); r = NULL; __Pyx_DECREF(tmp);} while(0)
#define __Pyx_XCLEAR(r) do { if((r) != NULL) {PyObject* tmp = ((PyObject*)(r)); r = NULL; __Pyx_DECREF(tmp);}} while(0)
/* PyObjectGetAttrStr.proto */
#if CYTHON_USE_TYPE_SLOTS
static CYTHON_INLINE PyObject* __Pyx_PyObject_GetAttrStr(PyObject* obj, PyObject* attr_name);
#else
#define __Pyx_PyObject_GetAttrStr(o,n) PyObject_GetAttr(o,n)
#endif
/* GetBuiltinName.proto */
static PyObject *__Pyx_GetBuiltinName(PyObject *name);
/* PyThreadStateGet.proto */
#if CYTHON_FAST_THREAD_STATE
#define __Pyx_PyThreadState_declare PyThreadState *__pyx_tstate;
#define __Pyx_PyThreadState_assign __pyx_tstate = __Pyx_PyThreadState_Current;
#define __Pyx_PyErr_Occurred() __pyx_tstate->curexc_type
#else
#define __Pyx_PyThreadState_declare
#define __Pyx_PyThreadState_assign
#define __Pyx_PyErr_Occurred() PyErr_Occurred()
#endif
/* PyErrFetchRestore.proto */
#if CYTHON_FAST_THREAD_STATE
#define __Pyx_PyErr_Clear() __Pyx_ErrRestore(NULL, NULL, NULL)
#define __Pyx_ErrRestoreWithState(type, value, tb) __Pyx_ErrRestoreInState(PyThreadState_GET(), type, value, tb)
#define __Pyx_ErrFetchWithState(type, value, tb) __Pyx_ErrFetchInState(PyThreadState_GET(), type, value, tb)
#define __Pyx_ErrRestore(type, value, tb) __Pyx_ErrRestoreInState(__pyx_tstate, type, value, tb)
#define __Pyx_ErrFetch(type, value, tb) __Pyx_ErrFetchInState(__pyx_tstate, type, value, tb)
static CYTHON_INLINE void __Pyx_ErrRestoreInState(PyThreadState *tstate, PyObject *type, PyObject *value, PyObject *tb);
static CYTHON_INLINE void __Pyx_ErrFetchInState(PyThreadState *tstate, PyObject **type, PyObject **value, PyObject **tb);
#if CYTHON_COMPILING_IN_CPYTHON
#define __Pyx_PyErr_SetNone(exc) (Py_INCREF(exc), __Pyx_ErrRestore((exc), NULL, NULL))
#else
#define __Pyx_PyErr_SetNone(exc) PyErr_SetNone(exc)
#endif
#else
#define __Pyx_PyErr_Clear() PyErr_Clear()
#define __Pyx_PyErr_SetNone(exc) PyErr_SetNone(exc)
#define __Pyx_ErrRestoreWithState(type, value, tb) PyErr_Restore(type, value, tb)
#define __Pyx_ErrFetchWithState(type, value, tb) PyErr_Fetch(type, value, tb)
#define __Pyx_ErrRestoreInState(tstate, type, value, tb) PyErr_Restore(type, value, tb)
#define __Pyx_ErrFetchInState(tstate, type, value, tb) PyErr_Fetch(type, value, tb)
#define __Pyx_ErrRestore(type, value, tb) PyErr_Restore(type, value, tb)
#define __Pyx_ErrFetch(type, value, tb) PyErr_Fetch(type, value, tb)
#endif
/* WriteUnraisableException.proto */
static void __Pyx_WriteUnraisable(const char *name, int clineno,
int lineno, const char *filename,
int full_traceback, int nogil);
/* GetItemInt.proto */
#define __Pyx_GetItemInt(o, i, type, is_signed, to_py_func, is_list, wraparound, boundscheck)\
(__Pyx_fits_Py_ssize_t(i, type, is_signed) ?\
__Pyx_GetItemInt_Fast(o, (Py_ssize_t)i, is_list, wraparound, boundscheck) :\
(is_list ? (PyErr_SetString(PyExc_IndexError, "list index out of range"), (PyObject*)NULL) :\
__Pyx_GetItemInt_Generic(o, to_py_func(i))))
#define __Pyx_GetItemInt_List(o, i, type, is_signed, to_py_func, is_list, wraparound, boundscheck)\
(__Pyx_fits_Py_ssize_t(i, type, is_signed) ?\
__Pyx_GetItemInt_List_Fast(o, (Py_ssize_t)i, wraparound, boundscheck) :\
(PyErr_SetString(PyExc_IndexError, "list index out of range"), (PyObject*)NULL))
static CYTHON_INLINE PyObject *__Pyx_GetItemInt_List_Fast(PyObject *o, Py_ssize_t i,
int wraparound, int boundscheck);
#define __Pyx_GetItemInt_Tuple(o, i, type, is_signed, to_py_func, is_list, wraparound, boundscheck)\
(__Pyx_fits_Py_ssize_t(i, type, is_signed) ?\
__Pyx_GetItemInt_Tuple_Fast(o, (Py_ssize_t)i, wraparound, boundscheck) :\
(PyErr_SetString(PyExc_IndexError, "tuple index out of range"), (PyObject*)NULL))
static CYTHON_INLINE PyObject *__Pyx_GetItemInt_Tuple_Fast(PyObject *o, Py_ssize_t i,
int wraparound, int boundscheck);
static PyObject *__Pyx_GetItemInt_Generic(PyObject *o, PyObject* j);
static CYTHON_INLINE PyObject *__Pyx_GetItemInt_Fast(PyObject *o, Py_ssize_t i,
int is_list, int wraparound, int boundscheck);
/* ObjectGetItem.proto */
#if CYTHON_USE_TYPE_SLOTS
static CYTHON_INLINE PyObject *__Pyx_PyObject_GetItem(PyObject *obj, PyObject* key);
#else
#define __Pyx_PyObject_GetItem(obj, key) PyObject_GetItem(obj, key)
#endif
/* GetModuleGlobalName.proto */
static CYTHON_INLINE PyObject *__Pyx_GetModuleGlobalName(PyObject *name);
/* PyObjectCall.proto */
#if CYTHON_COMPILING_IN_CPYTHON
static CYTHON_INLINE PyObject* __Pyx_PyObject_Call(PyObject *func, PyObject *arg, PyObject *kw);
#else
#define __Pyx_PyObject_Call(func, arg, kw) PyObject_Call(func, arg, kw)
#endif
/* PyIntBinop.proto */
#if !CYTHON_COMPILING_IN_PYPY
static PyObject* __Pyx_PyInt_AddObjC(PyObject *op1, PyObject *op2, long intval, int inplace);
#else
#define __Pyx_PyInt_AddObjC(op1, op2, intval, inplace)\
(inplace ? PyNumber_InPlaceAdd(op1, op2) : PyNumber_Add(op1, op2))
#endif
/* MemviewSliceInit.proto */
#define __Pyx_BUF_MAX_NDIMS %(BUF_MAX_NDIMS)d
#define __Pyx_MEMVIEW_DIRECT 1
#define __Pyx_MEMVIEW_PTR 2
#define __Pyx_MEMVIEW_FULL 4
#define __Pyx_MEMVIEW_CONTIG 8
#define __Pyx_MEMVIEW_STRIDED 16
#define __Pyx_MEMVIEW_FOLLOW 32
#define __Pyx_IS_C_CONTIG 1
#define __Pyx_IS_F_CONTIG 2
static int __Pyx_init_memviewslice(
struct __pyx_memoryview_obj *memview,
int ndim,
__Pyx_memviewslice *memviewslice,
int memview_is_new_reference);
static CYTHON_INLINE int __pyx_add_acquisition_count_locked(
__pyx_atomic_int *acquisition_count, PyThread_type_lock lock);
static CYTHON_INLINE int __pyx_sub_acquisition_count_locked(
__pyx_atomic_int *acquisition_count, PyThread_type_lock lock);
#define __pyx_get_slice_count_pointer(memview) (memview->acquisition_count_aligned_p)
#define __pyx_get_slice_count(memview) (*__pyx_get_slice_count_pointer(memview))
#define __PYX_INC_MEMVIEW(slice, have_gil) __Pyx_INC_MEMVIEW(slice, have_gil, __LINE__)
#define __PYX_XDEC_MEMVIEW(slice, have_gil) __Pyx_XDEC_MEMVIEW(slice, have_gil, __LINE__)
static CYTHON_INLINE void __Pyx_INC_MEMVIEW(__Pyx_memviewslice *, int, int);
static CYTHON_INLINE void __Pyx_XDEC_MEMVIEW(__Pyx_memviewslice *, int, int);
/* PyFunctionFastCall.proto */
#if CYTHON_FAST_PYCALL
#define __Pyx_PyFunction_FastCall(func, args, nargs)\
__Pyx_PyFunction_FastCallDict((func), (args), (nargs), NULL)
#if 1 || PY_VERSION_HEX < 0x030600B1
static PyObject *__Pyx_PyFunction_FastCallDict(PyObject *func, PyObject **args, int nargs, PyObject *kwargs);
#else
#define __Pyx_PyFunction_FastCallDict(func, args, nargs, kwargs) _PyFunction_FastCallDict(func, args, nargs, kwargs)
#endif
#endif
/* PyCFunctionFastCall.proto */
#if CYTHON_FAST_PYCCALL
static CYTHON_INLINE PyObject *__Pyx_PyCFunction_FastCall(PyObject *func, PyObject **args, Py_ssize_t nargs);
#else
#define __Pyx_PyCFunction_FastCall(func, args, nargs) (assert(0), NULL)
#endif
/* RaiseArgTupleInvalid.proto */
static void __Pyx_RaiseArgtupleInvalid(const char* func_name, int exact,
Py_ssize_t num_min, Py_ssize_t num_max, Py_ssize_t num_found);
/* RaiseDoubleKeywords.proto */
static void __Pyx_RaiseDoubleKeywordsError(const char* func_name, PyObject* kw_name);
/* ParseKeywords.proto */
static int __Pyx_ParseOptionalKeywords(PyObject *kwds, PyObject **argnames[],\
PyObject *kwds2, PyObject *values[], Py_ssize_t num_pos_args,\
const char* function_name);
/* ArgTypeTest.proto */
#define __Pyx_ArgTypeTest(obj, type, none_allowed, name, exact)\
((likely((Py_TYPE(obj) == type) | (none_allowed && (obj == Py_None)))) ? 1 :\
__Pyx__ArgTypeTest(obj, type, name, exact))
static int __Pyx__ArgTypeTest(PyObject *obj, PyTypeObject *type, const char *name, int exact);
/* RaiseException.proto */
static void __Pyx_Raise(PyObject *type, PyObject *value, PyObject *tb, PyObject *cause);
/* PyObjectCallMethO.proto */
#if CYTHON_COMPILING_IN_CPYTHON
static CYTHON_INLINE PyObject* __Pyx_PyObject_CallMethO(PyObject *func, PyObject *arg);
#endif
/* PyObjectCallOneArg.proto */
static CYTHON_INLINE PyObject* __Pyx_PyObject_CallOneArg(PyObject *func, PyObject *arg);
/* IncludeStringH.proto */
#include <string.h>
/* BytesEquals.proto */
static CYTHON_INLINE int __Pyx_PyBytes_Equals(PyObject* s1, PyObject* s2, int equals);
/* UnicodeEquals.proto */
static CYTHON_INLINE int __Pyx_PyUnicode_Equals(PyObject* s1, PyObject* s2, int equals);
/* StrEquals.proto */
#if PY_MAJOR_VERSION >= 3
#define __Pyx_PyString_Equals __Pyx_PyUnicode_Equals
#else
#define __Pyx_PyString_Equals __Pyx_PyBytes_Equals
#endif
/* None.proto */
static CYTHON_INLINE Py_ssize_t __Pyx_div_Py_ssize_t(Py_ssize_t, Py_ssize_t);
/* UnaryNegOverflows.proto */
#define UNARY_NEG_WOULD_OVERFLOW(x)\
(((x) < 0) & ((unsigned long)(x) == 0-(unsigned long)(x)))
static CYTHON_UNUSED int __pyx_array_getbuffer(PyObject *__pyx_v_self, Py_buffer *__pyx_v_info, int __pyx_v_flags); /*proto*/
static PyObject *__pyx_array_get_memview(struct __pyx_array_obj *); /*proto*/
/* GetAttr.proto */
static CYTHON_INLINE PyObject *__Pyx_GetAttr(PyObject *, PyObject *);
/* decode_c_string_utf16.proto */
static CYTHON_INLINE PyObject *__Pyx_PyUnicode_DecodeUTF16(const char *s, Py_ssize_t size, const char *errors) {
int byteorder = 0;
return PyUnicode_DecodeUTF16(s, size, errors, &byteorder);
}
static CYTHON_INLINE PyObject *__Pyx_PyUnicode_DecodeUTF16LE(const char *s, Py_ssize_t size, const char *errors) {
int byteorder = -1;
return PyUnicode_DecodeUTF16(s, size, errors, &byteorder);
}
static CYTHON_INLINE PyObject *__Pyx_PyUnicode_DecodeUTF16BE(const char *s, Py_ssize_t size, const char *errors) {
int byteorder = 1;
return PyUnicode_DecodeUTF16(s, size, errors, &byteorder);
}
/* decode_c_string.proto */
static CYTHON_INLINE PyObject* __Pyx_decode_c_string(
const char* cstring, Py_ssize_t start, Py_ssize_t stop,
const char* encoding, const char* errors,
PyObject* (*decode_func)(const char *s, Py_ssize_t size, const char *errors));
/* PyErrExceptionMatches.proto */
#if CYTHON_FAST_THREAD_STATE
#define __Pyx_PyErr_ExceptionMatches(err) __Pyx_PyErr_ExceptionMatchesInState(__pyx_tstate, err)
static CYTHON_INLINE int __Pyx_PyErr_ExceptionMatchesInState(PyThreadState* tstate, PyObject* err);
#else
#define __Pyx_PyErr_ExceptionMatches(err) PyErr_ExceptionMatches(err)
#endif
/* GetAttr3.proto */
static CYTHON_INLINE PyObject *__Pyx_GetAttr3(PyObject *, PyObject *, PyObject *);
/* RaiseTooManyValuesToUnpack.proto */
static CYTHON_INLINE void __Pyx_RaiseTooManyValuesError(Py_ssize_t expected);
/* RaiseNeedMoreValuesToUnpack.proto */
static CYTHON_INLINE void __Pyx_RaiseNeedMoreValuesError(Py_ssize_t index);
/* RaiseNoneIterError.proto */
static CYTHON_INLINE void __Pyx_RaiseNoneNotIterableError(void);
/* ExtTypeTest.proto */
static CYTHON_INLINE int __Pyx_TypeTest(PyObject *obj, PyTypeObject *type);
/* SaveResetException.proto */
#if CYTHON_FAST_THREAD_STATE
#define __Pyx_ExceptionSave(type, value, tb) __Pyx__ExceptionSave(__pyx_tstate, type, value, tb)
static CYTHON_INLINE void __Pyx__ExceptionSave(PyThreadState *tstate, PyObject **type, PyObject **value, PyObject **tb);
#define __Pyx_ExceptionReset(type, value, tb) __Pyx__ExceptionReset(__pyx_tstate, type, value, tb)
static CYTHON_INLINE void __Pyx__ExceptionReset(PyThreadState *tstate, PyObject *type, PyObject *value, PyObject *tb);
#else
#define __Pyx_ExceptionSave(type, value, tb) PyErr_GetExcInfo(type, value, tb)
#define __Pyx_ExceptionReset(type, value, tb) PyErr_SetExcInfo(type, value, tb)
#endif
/* GetException.proto */
#if CYTHON_FAST_THREAD_STATE
#define __Pyx_GetException(type, value, tb) __Pyx__GetException(__pyx_tstate, type, value, tb)
static int __Pyx__GetException(PyThreadState *tstate, PyObject **type, PyObject **value, PyObject **tb);
#else
static int __Pyx_GetException(PyObject **type, PyObject **value, PyObject **tb);
#endif
/* SwapException.proto */
#if CYTHON_FAST_THREAD_STATE
#define __Pyx_ExceptionSwap(type, value, tb) __Pyx__ExceptionSwap(__pyx_tstate, type, value, tb)
static CYTHON_INLINE void __Pyx__ExceptionSwap(PyThreadState *tstate, PyObject **type, PyObject **value, PyObject **tb);
#else
static CYTHON_INLINE void __Pyx_ExceptionSwap(PyObject **type, PyObject **value, PyObject **tb);
#endif
/* Import.proto */
static PyObject *__Pyx_Import(PyObject *name, PyObject *from_list, int level);
/* FastTypeChecks.proto */
#if CYTHON_COMPILING_IN_CPYTHON
#define __Pyx_TypeCheck(obj, type) __Pyx_IsSubtype(Py_TYPE(obj), (PyTypeObject *)type)
static CYTHON_INLINE int __Pyx_IsSubtype(PyTypeObject *a, PyTypeObject *b);
static CYTHON_INLINE int __Pyx_PyErr_GivenExceptionMatches(PyObject *err, PyObject *type);
static CYTHON_INLINE int __Pyx_PyErr_GivenExceptionMatches2(PyObject *err, PyObject *type1, PyObject *type2);
#else
#define __Pyx_TypeCheck(obj, type) PyObject_TypeCheck(obj, (PyTypeObject *)type)
#define __Pyx_PyErr_GivenExceptionMatches(err, type) PyErr_GivenExceptionMatches(err, type)
#define __Pyx_PyErr_GivenExceptionMatches2(err, type1, type2) (PyErr_GivenExceptionMatches(err, type1) || PyErr_GivenExceptionMatches(err, type2))
#endif
#define __Pyx_PyException_Check(obj) __Pyx_TypeCheck(obj, PyExc_Exception)
static CYTHON_UNUSED int __pyx_memoryview_getbuffer(PyObject *__pyx_v_self, Py_buffer *__pyx_v_info, int __pyx_v_flags); /*proto*/
/* ListCompAppend.proto */
#if CYTHON_USE_PYLIST_INTERNALS && CYTHON_ASSUME_SAFE_MACROS
static CYTHON_INLINE int __Pyx_ListComp_Append(PyObject* list, PyObject* x) {
PyListObject* L = (PyListObject*) list;
Py_ssize_t len = Py_SIZE(list);
if (likely(L->allocated > len)) {
Py_INCREF(x);
PyList_SET_ITEM(list, len, x);
Py_SIZE(list) = len+1;
return 0;
}
return PyList_Append(list, x);
}
#else
#define __Pyx_ListComp_Append(L,x) PyList_Append(L,x)
#endif
/* ListExtend.proto */
static CYTHON_INLINE int __Pyx_PyList_Extend(PyObject* L, PyObject* v) {
#if CYTHON_COMPILING_IN_CPYTHON
PyObject* none = _PyList_Extend((PyListObject*)L, v);
if (unlikely(!none))
return -1;
Py_DECREF(none);
return 0;
#else
return PyList_SetSlice(L, PY_SSIZE_T_MAX, PY_SSIZE_T_MAX, v);
#endif
}
/* ListAppend.proto */
#if CYTHON_USE_PYLIST_INTERNALS && CYTHON_ASSUME_SAFE_MACROS
static CYTHON_INLINE int __Pyx_PyList_Append(PyObject* list, PyObject* x) {
PyListObject* L = (PyListObject*) list;
Py_ssize_t len = Py_SIZE(list);
if (likely(L->allocated > len) & likely(len > (L->allocated >> 1))) {
Py_INCREF(x);
PyList_SET_ITEM(list, len, x);
Py_SIZE(list) = len+1;
return 0;
}
return PyList_Append(list, x);
}
#else
#define __Pyx_PyList_Append(L,x) PyList_Append(L,x)
#endif
/* None.proto */
static CYTHON_INLINE void __Pyx_RaiseUnboundLocalError(const char *varname);
/* None.proto */
static CYTHON_INLINE long __Pyx_div_long(long, long);
/* ImportFrom.proto */
static PyObject* __Pyx_ImportFrom(PyObject* module, PyObject* name);
/* HasAttr.proto */
static CYTHON_INLINE int __Pyx_HasAttr(PyObject *, PyObject *);
/* PyObject_GenericGetAttrNoDict.proto */
#if CYTHON_USE_TYPE_SLOTS && CYTHON_USE_PYTYPE_LOOKUP && PY_VERSION_HEX < 0x03070000
static CYTHON_INLINE PyObject* __Pyx_PyObject_GenericGetAttrNoDict(PyObject* obj, PyObject* attr_name);
#else
#define __Pyx_PyObject_GenericGetAttrNoDict PyObject_GenericGetAttr
#endif
/* PyObject_GenericGetAttr.proto */
#if CYTHON_USE_TYPE_SLOTS && CYTHON_USE_PYTYPE_LOOKUP && PY_VERSION_HEX < 0x03070000
static PyObject* __Pyx_PyObject_GenericGetAttr(PyObject* obj, PyObject* attr_name);
#else
#define __Pyx_PyObject_GenericGetAttr PyObject_GenericGetAttr
#endif
/* SetVTable.proto */
static int __Pyx_SetVtable(PyObject *dict, void *vtable);
/* SetupReduce.proto */
static int __Pyx_setup_reduce(PyObject* type_obj);
/* CLineInTraceback.proto */
#ifdef CYTHON_CLINE_IN_TRACEBACK
#define __Pyx_CLineForTraceback(tstate, c_line) (((CYTHON_CLINE_IN_TRACEBACK)) ? c_line : 0)
#else
static int __Pyx_CLineForTraceback(PyThreadState *tstate, int c_line);
#endif
/* CodeObjectCache.proto */
typedef struct {
PyCodeObject* code_object;
int code_line;
} __Pyx_CodeObjectCacheEntry;
struct __Pyx_CodeObjectCache {
int count;
int max_count;
__Pyx_CodeObjectCacheEntry* entries;
};
static struct __Pyx_CodeObjectCache __pyx_code_cache = {0,0,NULL};
static int __pyx_bisect_code_objects(__Pyx_CodeObjectCacheEntry* entries, int count, int code_line);
static PyCodeObject *__pyx_find_code_object(int code_line);
static void __pyx_insert_code_object(int code_line, PyCodeObject* code_object);
/* AddTraceback.proto */
static void __Pyx_AddTraceback(const char *funcname, int c_line,
int py_line, const char *filename);
#if PY_MAJOR_VERSION < 3
static int __Pyx_GetBuffer(PyObject *obj, Py_buffer *view, int flags);
static void __Pyx_ReleaseBuffer(Py_buffer *view);
#else
#define __Pyx_GetBuffer PyObject_GetBuffer
#define __Pyx_ReleaseBuffer PyBuffer_Release
#endif
/* BufferStructDeclare.proto */
typedef struct {
Py_ssize_t shape, strides, suboffsets;
} __Pyx_Buf_DimInfo;
typedef struct {
size_t refcount;
Py_buffer pybuffer;
} __Pyx_Buffer;
typedef struct {
__Pyx_Buffer *rcbuffer;
char *data;
__Pyx_Buf_DimInfo diminfo[8];
} __Pyx_LocalBuf_ND;
/* MemviewSliceIsContig.proto */
static int __pyx_memviewslice_is_contig(const __Pyx_memviewslice mvs, char order, int ndim);
/* OverlappingSlices.proto */
static int __pyx_slices_overlap(__Pyx_memviewslice *slice1,
__Pyx_memviewslice *slice2,
int ndim, size_t itemsize);
/* Capsule.proto */
static CYTHON_INLINE PyObject *__pyx_capsule_create(void *p, const char *sig);
/* CIntToPy.proto */
static CYTHON_INLINE PyObject* __Pyx_PyInt_From_long(long value);
/* MemviewSliceCopyTemplate.proto */
static __Pyx_memviewslice
__pyx_memoryview_copy_new_contig(const __Pyx_memviewslice *from_mvs,
const char *mode, int ndim,
size_t sizeof_dtype, int contig_flag,
int dtype_is_object);
/* CIntFromPy.proto */
static CYTHON_INLINE int __Pyx_PyInt_As_int(PyObject *);
/* CIntFromPy.proto */
static CYTHON_INLINE long __Pyx_PyInt_As_long(PyObject *);
/* CIntToPy.proto */
static CYTHON_INLINE PyObject* __Pyx_PyInt_From_int(int value);
/* CIntFromPy.proto */
static CYTHON_INLINE char __Pyx_PyInt_As_char(PyObject *);
/* IsLittleEndian.proto */
static CYTHON_INLINE int __Pyx_Is_Little_Endian(void);
/* BufferFormatCheck.proto */
static const char* __Pyx_BufFmt_CheckString(__Pyx_BufFmt_Context* ctx, const char* ts);
static void __Pyx_BufFmt_Init(__Pyx_BufFmt_Context* ctx,
__Pyx_BufFmt_StackElem* stack,
__Pyx_TypeInfo* type);
/* TypeInfoCompare.proto */
static int __pyx_typeinfo_cmp(__Pyx_TypeInfo *a, __Pyx_TypeInfo *b);
/* MemviewSliceValidateAndInit.proto */
static int __Pyx_ValidateAndInit_memviewslice(
int *axes_specs,
int c_or_f_flag,
int buf_flags,
int ndim,
__Pyx_TypeInfo *dtype,
__Pyx_BufFmt_StackElem stack[],
__Pyx_memviewslice *memviewslice,
PyObject *original_obj);
/* ObjectToMemviewSlice.proto */
static CYTHON_INLINE __Pyx_memviewslice __Pyx_PyObject_to_MemoryviewSlice_dsds_float(PyObject *, int writable_flag);
/* ObjectToMemviewSlice.proto */
static CYTHON_INLINE __Pyx_memviewslice __Pyx_PyObject_to_MemoryviewSlice_ds_float(PyObject *, int writable_flag);
/* CheckBinaryVersion.proto */
static int __Pyx_check_binary_version(void);
/* InitStrings.proto */
static int __Pyx_InitStrings(__Pyx_StringTabEntry *t);
static PyObject *__pyx_array_get_memview(struct __pyx_array_obj *__pyx_v_self); /* proto*/
static char *__pyx_memoryview_get_item_pointer(struct __pyx_memoryview_obj *__pyx_v_self, PyObject *__pyx_v_index); /* proto*/
static PyObject *__pyx_memoryview_is_slice(struct __pyx_memoryview_obj *__pyx_v_self, PyObject *__pyx_v_obj); /* proto*/
static PyObject *__pyx_memoryview_setitem_slice_assignment(struct __pyx_memoryview_obj *__pyx_v_self, PyObject *__pyx_v_dst, PyObject *__pyx_v_src); /* proto*/
static PyObject *__pyx_memoryview_setitem_slice_assign_scalar(struct __pyx_memoryview_obj *__pyx_v_self, struct __pyx_memoryview_obj *__pyx_v_dst, PyObject *__pyx_v_value); /* proto*/
static PyObject *__pyx_memoryview_setitem_indexed(struct __pyx_memoryview_obj *__pyx_v_self, PyObject *__pyx_v_index, PyObject *__pyx_v_value); /* proto*/
static PyObject *__pyx_memoryview_convert_item_to_object(struct __pyx_memoryview_obj *__pyx_v_self, char *__pyx_v_itemp); /* proto*/
static PyObject *__pyx_memoryview_assign_item_from_object(struct __pyx_memoryview_obj *__pyx_v_self, char *__pyx_v_itemp, PyObject *__pyx_v_value); /* proto*/
static PyObject *__pyx_memoryviewslice_convert_item_to_object(struct __pyx_memoryviewslice_obj *__pyx_v_self, char *__pyx_v_itemp); /* proto*/
static PyObject *__pyx_memoryviewslice_assign_item_from_object(struct __pyx_memoryviewslice_obj *__pyx_v_self, char *__pyx_v_itemp, PyObject *__pyx_v_value); /* proto*/
/* Module declarations from 'cython.view' */
/* Module declarations from 'cython' */
/* Module declarations from 'libc.string' */
/* Module declarations from 'libc.stdlib' */
/* Module declarations from 'libc.math' */
/* Module declarations from 'mnnpy._utils' */
static PyTypeObject *__pyx_array_type = 0;
static PyTypeObject *__pyx_MemviewEnum_type = 0;
static PyTypeObject *__pyx_memoryview_type = 0;
static PyTypeObject *__pyx_memoryviewslice_type = 0;
static PyObject *generic = 0;
static PyObject *strided = 0;
static PyObject *indirect = 0;
static PyObject *contiguous = 0;
static PyObject *indirect_contiguous = 0;
static int __pyx_memoryview_thread_locks_used;
static PyThread_type_lock __pyx_memoryview_thread_locks[8];
static float __pyx_f_5mnnpy_6_utils__adjust_s_variance(__Pyx_memviewslice, __Pyx_memviewslice, __Pyx_memviewslice, __Pyx_memviewslice, float); /*proto*/
static PyObject *__pyx_f_5mnnpy_6_utils__adjust_shift_variance(PyObject *, PyObject *, PyObject *, PyObject *, PyObject *, int __pyx_skip_dispatch, struct __pyx_opt_args_5mnnpy_6_utils__adjust_shift_variance *__pyx_optional_args); /*proto*/
static struct __pyx_array_obj *__pyx_array_new(PyObject *, Py_ssize_t, char *, char *, char *); /*proto*/
static void *__pyx_align_pointer(void *, size_t); /*proto*/
static PyObject *__pyx_memoryview_new(PyObject *, int, int, __Pyx_TypeInfo *); /*proto*/
static CYTHON_INLINE int __pyx_memoryview_check(PyObject *); /*proto*/
static PyObject *_unellipsify(PyObject *, int); /*proto*/
static PyObject *assert_direct_dimensions(Py_ssize_t *, int); /*proto*/
static struct __pyx_memoryview_obj *__pyx_memview_slice(struct __pyx_memoryview_obj *, PyObject *); /*proto*/
static int __pyx_memoryview_slice_memviewslice(__Pyx_memviewslice *, Py_ssize_t, Py_ssize_t, Py_ssize_t, int, int, int *, Py_ssize_t, Py_ssize_t, Py_ssize_t, int, int, int, int); /*proto*/
static char *__pyx_pybuffer_index(Py_buffer *, char *, Py_ssize_t, Py_ssize_t); /*proto*/
static int __pyx_memslice_transpose(__Pyx_memviewslice *); /*proto*/
static PyObject *__pyx_memoryview_fromslice(__Pyx_memviewslice, int, PyObject *(*)(char *), int (*)(char *, PyObject *), int); /*proto*/
static __Pyx_memviewslice *__pyx_memoryview_get_slice_from_memoryview(struct __pyx_memoryview_obj *, __Pyx_memviewslice *); /*proto*/
static void __pyx_memoryview_slice_copy(struct __pyx_memoryview_obj *, __Pyx_memviewslice *); /*proto*/
static PyObject *__pyx_memoryview_copy_object(struct __pyx_memoryview_obj *); /*proto*/
static PyObject *__pyx_memoryview_copy_object_from_slice(struct __pyx_memoryview_obj *, __Pyx_memviewslice *); /*proto*/
static Py_ssize_t abs_py_ssize_t(Py_ssize_t); /*proto*/
static char __pyx_get_best_slice_order(__Pyx_memviewslice *, int); /*proto*/
static void _copy_strided_to_strided(char *, Py_ssize_t *, char *, Py_ssize_t *, Py_ssize_t *, Py_ssize_t *, int, size_t); /*proto*/
static void copy_strided_to_strided(__Pyx_memviewslice *, __Pyx_memviewslice *, int, size_t); /*proto*/
static Py_ssize_t __pyx_memoryview_slice_get_size(__Pyx_memviewslice *, int); /*proto*/
static Py_ssize_t __pyx_fill_contig_strides_array(Py_ssize_t *, Py_ssize_t *, Py_ssize_t, int, char); /*proto*/
static void *__pyx_memoryview_copy_data_to_temp(__Pyx_memviewslice *, __Pyx_memviewslice *, char, int); /*proto*/
static int __pyx_memoryview_err_extents(int, Py_ssize_t, Py_ssize_t); /*proto*/
static int __pyx_memoryview_err_dim(PyObject *, char *, int); /*proto*/
static int __pyx_memoryview_err(PyObject *, char *); /*proto*/
static int __pyx_memoryview_copy_contents(__Pyx_memviewslice, __Pyx_memviewslice, int, int, int); /*proto*/
static void __pyx_memoryview_broadcast_leading(__Pyx_memviewslice *, int, int); /*proto*/
static void __pyx_memoryview_refcount_copying(__Pyx_memviewslice *, int, int, int); /*proto*/
static void __pyx_memoryview_refcount_objects_in_slice_with_gil(char *, Py_ssize_t *, Py_ssize_t *, int, int); /*proto*/
static void __pyx_memoryview_refcount_objects_in_slice(char *, Py_ssize_t *, Py_ssize_t *, int, int); /*proto*/
static void __pyx_memoryview_slice_assign_scalar(__Pyx_memviewslice *, int, size_t, void *, int); /*proto*/
static void __pyx_memoryview__slice_assign_scalar(char *, Py_ssize_t *, Py_ssize_t *, int, size_t, void *); /*proto*/
static PyObject *__pyx_unpickle_Enum__set_state(struct __pyx_MemviewEnum_obj *, PyObject *); /*proto*/
static __Pyx_TypeInfo __Pyx_TypeInfo_float = { "float", NULL, sizeof(float), { 0 }, 0, 'R', 0, 0 };
#define __Pyx_MODULE_NAME "mnnpy._utils"
extern int __pyx_module_is_main_mnnpy___utils;
int __pyx_module_is_main_mnnpy___utils = 0;
/* Implementation of 'mnnpy._utils' */
static PyObject *__pyx_builtin_range;
static PyObject *__pyx_builtin_ValueError;
static PyObject *__pyx_builtin_MemoryError;
static PyObject *__pyx_builtin_enumerate;
static PyObject *__pyx_builtin_TypeError;
static PyObject *__pyx_builtin_Ellipsis;
static PyObject *__pyx_builtin_id;
static PyObject *__pyx_builtin_IndexError;
static const char __pyx_k_O[] = "O";
static const char __pyx_k_c[] = "c";
static const char __pyx_k_id[] = "id";
static const char __pyx_k_np[] = "np";
static const char __pyx_k_new[] = "__new__";
static const char __pyx_k_obj[] = "obj";
static const char __pyx_k_base[] = "base";
static const char __pyx_k_dict[] = "__dict__";
static const char __pyx_k_fmax[] = "fmax";
static const char __pyx_k_main[] = "__main__";
static const char __pyx_k_mode[] = "mode";
static const char __pyx_k_name[] = "name";
static const char __pyx_k_ndim[] = "ndim";
static const char __pyx_k_pack[] = "pack";
static const char __pyx_k_size[] = "size";
static const char __pyx_k_step[] = "step";
static const char __pyx_k_stop[] = "stop";
static const char __pyx_k_test[] = "__test__";
static const char __pyx_k_ASCII[] = "ASCII";
static const char __pyx_k_class[] = "__class__";
static const char __pyx_k_data1[] = "data1";
static const char __pyx_k_data2[] = "data2";
static const char __pyx_k_dtype[] = "dtype";
static const char __pyx_k_error[] = "error";
static const char __pyx_k_flags[] = "flags";
static const char __pyx_k_numpy[] = "numpy";
static const char __pyx_k_range[] = "range";
static const char __pyx_k_shape[] = "shape";
static const char __pyx_k_sigma[] = "sigma";
static const char __pyx_k_start[] = "start";
static const char __pyx_k_zeros[] = "zeros";
static const char __pyx_k_encode[] = "encode";
static const char __pyx_k_format[] = "format";
static const char __pyx_k_import[] = "__import__";
static const char __pyx_k_n_jobs[] = "n_jobs";
static const char __pyx_k_name_2[] = "__name__";
static const char __pyx_k_pickle[] = "pickle";
static const char __pyx_k_reduce[] = "__reduce__";
static const char __pyx_k_struct[] = "struct";
static const char __pyx_k_unpack[] = "unpack";
static const char __pyx_k_update[] = "update";
static const char __pyx_k_float32[] = "float32";
static const char __pyx_k_fortran[] = "fortran";
static const char __pyx_k_memview[] = "memview";
static const char __pyx_k_Ellipsis[] = "Ellipsis";
static const char __pyx_k_getstate[] = "__getstate__";
static const char __pyx_k_itemsize[] = "itemsize";
static const char __pyx_k_pyx_type[] = "__pyx_type";
static const char __pyx_k_setstate[] = "__setstate__";
static const char __pyx_k_TypeError[] = "TypeError";
static const char __pyx_k_enumerate[] = "enumerate";
static const char __pyx_k_pyx_state[] = "__pyx_state";
static const char __pyx_k_reduce_ex[] = "__reduce_ex__";
static const char __pyx_k_IndexError[] = "IndexError";
static const char __pyx_k_ValueError[] = "ValueError";
static const char __pyx_k_correction[] = "correction";
static const char __pyx_k_pyx_result[] = "__pyx_result";
static const char __pyx_k_pyx_vtable[] = "__pyx_vtable__";
static const char __pyx_k_var_subset[] = "var_subset";
static const char __pyx_k_MemoryError[] = "MemoryError";
static const char __pyx_k_PickleError[] = "PickleError";
static const char __pyx_k_pyx_checksum[] = "__pyx_checksum";
static const char __pyx_k_stringsource[] = "stringsource";
static const char __pyx_k_pyx_getbuffer[] = "__pyx_getbuffer";
static const char __pyx_k_reduce_cython[] = "__reduce_cython__";
static const char __pyx_k_View_MemoryView[] = "View.MemoryView";
static const char __pyx_k_allocate_buffer[] = "allocate_buffer";
static const char __pyx_k_dtype_is_object[] = "dtype_is_object";
static const char __pyx_k_pyx_PickleError[] = "__pyx_PickleError";
static const char __pyx_k_setstate_cython[] = "__setstate_cython__";
static const char __pyx_k_pyx_unpickle_Enum[] = "__pyx_unpickle_Enum";
static const char __pyx_k_cline_in_traceback[] = "cline_in_traceback";
static const char __pyx_k_strided_and_direct[] = "<strided and direct>";
static const char __pyx_k_strided_and_indirect[] = "<strided and indirect>";
static const char __pyx_k_contiguous_and_direct[] = "<contiguous and direct>";
static const char __pyx_k_MemoryView_of_r_object[] = "<MemoryView of %r object>";
static const char __pyx_k_MemoryView_of_r_at_0x_x[] = "<MemoryView of %r at 0x%x>";
static const char __pyx_k_contiguous_and_indirect[] = "<contiguous and indirect>";
static const char __pyx_k_Cannot_index_with_type_s[] = "Cannot index with type '%s'";
static const char __pyx_k_Invalid_shape_in_axis_d_d[] = "Invalid shape in axis %d: %d.";
static const char __pyx_k_itemsize_0_for_cython_array[] = "itemsize <= 0 for cython.array";
static const char __pyx_k_unable_to_allocate_array_data[] = "unable to allocate array data.";
static const char __pyx_k_strided_and_direct_or_indirect[] = "<strided and direct or indirect>";
static const char __pyx_k_Buffer_view_does_not_expose_stri[] = "Buffer view does not expose strides";
static const char __pyx_k_Can_only_create_a_buffer_that_is[] = "Can only create a buffer that is contiguous in memory.";
static const char __pyx_k_Cannot_assign_to_read_only_memor[] = "Cannot assign to read-only memoryview";
static const char __pyx_k_Cannot_create_writable_memory_vi[] = "Cannot create writable memory view from read-only memoryview";
static const char __pyx_k_Empty_shape_tuple_for_cython_arr[] = "Empty shape tuple for cython.array";
static const char __pyx_k_Incompatible_checksums_s_vs_0xb0[] = "Incompatible checksums (%s vs 0xb068931 = (name))";
static const char __pyx_k_Indirect_dimensions_not_supporte[] = "Indirect dimensions not supported";
static const char __pyx_k_Invalid_mode_expected_c_or_fortr[] = "Invalid mode, expected 'c' or 'fortran', got %s";
static const char __pyx_k_Out_of_bounds_on_buffer_access_a[] = "Out of bounds on buffer access (axis %d)";
static const char __pyx_k_Unable_to_convert_item_to_object[] = "Unable to convert item to object";
static const char __pyx_k_got_differing_extents_in_dimensi[] = "got differing extents in dimension %d (got %d and %d)";
static const char __pyx_k_no_default___reduce___due_to_non[] = "no default __reduce__ due to non-trivial __cinit__";
static const char __pyx_k_unable_to_allocate_shape_and_str[] = "unable to allocate shape and strides.";
static PyObject *__pyx_n_s_ASCII;
static PyObject *__pyx_kp_s_Buffer_view_does_not_expose_stri;
static PyObject *__pyx_kp_s_Can_only_create_a_buffer_that_is;
static PyObject *__pyx_kp_s_Cannot_assign_to_read_only_memor;
static PyObject *__pyx_kp_s_Cannot_create_writable_memory_vi;
static PyObject *__pyx_kp_s_Cannot_index_with_type_s;
static PyObject *__pyx_n_s_Ellipsis;
static PyObject *__pyx_kp_s_Empty_shape_tuple_for_cython_arr;
static PyObject *__pyx_kp_s_Incompatible_checksums_s_vs_0xb0;
static PyObject *__pyx_n_s_IndexError;
static PyObject *__pyx_kp_s_Indirect_dimensions_not_supporte;
static PyObject *__pyx_kp_s_Invalid_mode_expected_c_or_fortr;
static PyObject *__pyx_kp_s_Invalid_shape_in_axis_d_d;
static PyObject *__pyx_n_s_MemoryError;
static PyObject *__pyx_kp_s_MemoryView_of_r_at_0x_x;
static PyObject *__pyx_kp_s_MemoryView_of_r_object;
static PyObject *__pyx_n_b_O;
static PyObject *__pyx_kp_s_Out_of_bounds_on_buffer_access_a;
static PyObject *__pyx_n_s_PickleError;
static PyObject *__pyx_n_s_TypeError;
static PyObject *__pyx_kp_s_Unable_to_convert_item_to_object;
static PyObject *__pyx_n_s_ValueError;
static PyObject *__pyx_n_s_View_MemoryView;
static PyObject *__pyx_n_s_allocate_buffer;
static PyObject *__pyx_n_s_base;
static PyObject *__pyx_n_s_c;
static PyObject *__pyx_n_u_c;
static PyObject *__pyx_n_s_class;
static PyObject *__pyx_n_s_cline_in_traceback;
static PyObject *__pyx_kp_s_contiguous_and_direct;
static PyObject *__pyx_kp_s_contiguous_and_indirect;
static PyObject *__pyx_n_s_correction;
static PyObject *__pyx_n_s_data1;
static PyObject *__pyx_n_s_data2;
static PyObject *__pyx_n_s_dict;
static PyObject *__pyx_n_s_dtype;
static PyObject *__pyx_n_s_dtype_is_object;
static PyObject *__pyx_n_s_encode;
static PyObject *__pyx_n_s_enumerate;
static PyObject *__pyx_n_s_error;
static PyObject *__pyx_n_s_flags;
static PyObject *__pyx_n_s_float32;
static PyObject *__pyx_n_s_fmax;
static PyObject *__pyx_n_s_format;
static PyObject *__pyx_n_s_fortran;
static PyObject *__pyx_n_u_fortran;
static PyObject *__pyx_n_s_getstate;
static PyObject *__pyx_kp_s_got_differing_extents_in_dimensi;
static PyObject *__pyx_n_s_id;
static PyObject *__pyx_n_s_import;
static PyObject *__pyx_n_s_itemsize;
static PyObject *__pyx_kp_s_itemsize_0_for_cython_array;
static PyObject *__pyx_n_s_main;
static PyObject *__pyx_n_s_memview;
static PyObject *__pyx_n_s_mode;
static PyObject *__pyx_n_s_n_jobs;
static PyObject *__pyx_n_s_name;
static PyObject *__pyx_n_s_name_2;
static PyObject *__pyx_n_s_ndim;
static PyObject *__pyx_n_s_new;
static PyObject *__pyx_kp_s_no_default___reduce___due_to_non;
static PyObject *__pyx_n_s_np;
static PyObject *__pyx_n_s_numpy;
static PyObject *__pyx_n_s_obj;
static PyObject *__pyx_n_s_pack;
static PyObject *__pyx_n_s_pickle;
static PyObject *__pyx_n_s_pyx_PickleError;
static PyObject *__pyx_n_s_pyx_checksum;
static PyObject *__pyx_n_s_pyx_getbuffer;
static PyObject *__pyx_n_s_pyx_result;
static PyObject *__pyx_n_s_pyx_state;
static PyObject *__pyx_n_s_pyx_type;
static PyObject *__pyx_n_s_pyx_unpickle_Enum;
static PyObject *__pyx_n_s_pyx_vtable;
static PyObject *__pyx_n_s_range;
static PyObject *__pyx_n_s_reduce;
static PyObject *__pyx_n_s_reduce_cython;
static PyObject *__pyx_n_s_reduce_ex;
static PyObject *__pyx_n_s_setstate;
static PyObject *__pyx_n_s_setstate_cython;
static PyObject *__pyx_n_s_shape;
static PyObject *__pyx_n_s_sigma;
static PyObject *__pyx_n_s_size;
static PyObject *__pyx_n_s_start;
static PyObject *__pyx_n_s_step;
static PyObject *__pyx_n_s_stop;
static PyObject *__pyx_kp_s_strided_and_direct;
static PyObject *__pyx_kp_s_strided_and_direct_or_indirect;
static PyObject *__pyx_kp_s_strided_and_indirect;
static PyObject *__pyx_kp_s_stringsource;
static PyObject *__pyx_n_s_struct;
static PyObject *__pyx_n_s_test;
static PyObject *__pyx_kp_s_unable_to_allocate_array_data;
static PyObject *__pyx_kp_s_unable_to_allocate_shape_and_str;
static PyObject *__pyx_n_s_unpack;
static PyObject *__pyx_n_s_update;
static PyObject *__pyx_n_s_var_subset;
static PyObject *__pyx_n_s_zeros;
static PyObject *__pyx_pf_5mnnpy_6_utils__adjust_shift_variance(CYTHON_UNUSED PyObject *__pyx_self, PyObject *__pyx_v_data1, PyObject *__pyx_v_data2, PyObject *__pyx_v_correction, PyObject *__pyx_v_sigma, PyObject *__pyx_v_n_jobs, PyObject *__pyx_v_var_subset); /* proto */
static int __pyx_array___pyx_pf_15View_dot_MemoryView_5array___cinit__(struct __pyx_array_obj *__pyx_v_self, PyObject *__pyx_v_shape, Py_ssize_t __pyx_v_itemsize, PyObject *__pyx_v_format, PyObject *__pyx_v_mode, int __pyx_v_allocate_buffer); /* proto */
static int __pyx_array___pyx_pf_15View_dot_MemoryView_5array_2__getbuffer__(struct __pyx_array_obj *__pyx_v_self, Py_buffer *__pyx_v_info, int __pyx_v_flags); /* proto */
static void __pyx_array___pyx_pf_15View_dot_MemoryView_5array_4__dealloc__(struct __pyx_array_obj *__pyx_v_self); /* proto */
static PyObject *__pyx_pf_15View_dot_MemoryView_5array_7memview___get__(struct __pyx_array_obj *__pyx_v_self); /* proto */
static Py_ssize_t __pyx_array___pyx_pf_15View_dot_MemoryView_5array_6__len__(struct __pyx_array_obj *__pyx_v_self); /* proto */
static PyObject *__pyx_array___pyx_pf_15View_dot_MemoryView_5array_8__getattr__(struct __pyx_array_obj *__pyx_v_self, PyObject *__pyx_v_attr); /* proto */
static PyObject *__pyx_array___pyx_pf_15View_dot_MemoryView_5array_10__getitem__(struct __pyx_array_obj *__pyx_v_self, PyObject *__pyx_v_item); /* proto */
static int __pyx_array___pyx_pf_15View_dot_MemoryView_5array_12__setitem__(struct __pyx_array_obj *__pyx_v_self, PyObject *__pyx_v_item, PyObject *__pyx_v_value); /* proto */
static PyObject *__pyx_pf___pyx_array___reduce_cython__(CYTHON_UNUSED struct __pyx_array_obj *__pyx_v_self); /* proto */
static PyObject *__pyx_pf___pyx_array_2__setstate_cython__(CYTHON_UNUSED struct __pyx_array_obj *__pyx_v_self, CYTHON_UNUSED PyObject *__pyx_v___pyx_state); /* proto */
static int __pyx_MemviewEnum___pyx_pf_15View_dot_MemoryView_4Enum___init__(struct __pyx_MemviewEnum_obj *__pyx_v_self, PyObject *__pyx_v_name); /* proto */
static PyObject *__pyx_MemviewEnum___pyx_pf_15View_dot_MemoryView_4Enum_2__repr__(struct __pyx_MemviewEnum_obj *__pyx_v_self); /* proto */
static PyObject *__pyx_pf___pyx_MemviewEnum___reduce_cython__(struct __pyx_MemviewEnum_obj *__pyx_v_self); /* proto */
static PyObject *__pyx_pf___pyx_MemviewEnum_2__setstate_cython__(struct __pyx_MemviewEnum_obj *__pyx_v_self, PyObject *__pyx_v___pyx_state); /* proto */
static int __pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview___cinit__(struct __pyx_memoryview_obj *__pyx_v_self, PyObject *__pyx_v_obj, int __pyx_v_flags, int __pyx_v_dtype_is_object); /* proto */
static void __pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_2__dealloc__(struct __pyx_memoryview_obj *__pyx_v_self); /* proto */
static PyObject *__pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_4__getitem__(struct __pyx_memoryview_obj *__pyx_v_self, PyObject *__pyx_v_index); /* proto */
static int __pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_6__setitem__(struct __pyx_memoryview_obj *__pyx_v_self, PyObject *__pyx_v_index, PyObject *__pyx_v_value); /* proto */
static int __pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_8__getbuffer__(struct __pyx_memoryview_obj *__pyx_v_self, Py_buffer *__pyx_v_info, int __pyx_v_flags); /* proto */
static PyObject *__pyx_pf_15View_dot_MemoryView_10memoryview_1T___get__(struct __pyx_memoryview_obj *__pyx_v_self); /* proto */
static PyObject *__pyx_pf_15View_dot_MemoryView_10memoryview_4base___get__(struct __pyx_memoryview_obj *__pyx_v_self); /* proto */
static PyObject *__pyx_pf_15View_dot_MemoryView_10memoryview_5shape___get__(struct __pyx_memoryview_obj *__pyx_v_self); /* proto */
static PyObject *__pyx_pf_15View_dot_MemoryView_10memoryview_7strides___get__(struct __pyx_memoryview_obj *__pyx_v_self); /* proto */
static PyObject *__pyx_pf_15View_dot_MemoryView_10memoryview_10suboffsets___get__(struct __pyx_memoryview_obj *__pyx_v_self); /* proto */
static PyObject *__pyx_pf_15View_dot_MemoryView_10memoryview_4ndim___get__(struct __pyx_memoryview_obj *__pyx_v_self); /* proto */
static PyObject *__pyx_pf_15View_dot_MemoryView_10memoryview_8itemsize___get__(struct __pyx_memoryview_obj *__pyx_v_self); /* proto */
static PyObject *__pyx_pf_15View_dot_MemoryView_10memoryview_6nbytes___get__(struct __pyx_memoryview_obj *__pyx_v_self); /* proto */
static PyObject *__pyx_pf_15View_dot_MemoryView_10memoryview_4size___get__(struct __pyx_memoryview_obj *__pyx_v_self); /* proto */
static Py_ssize_t __pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_10__len__(struct __pyx_memoryview_obj *__pyx_v_self); /* proto */
static PyObject *__pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_12__repr__(struct __pyx_memoryview_obj *__pyx_v_self); /* proto */
static PyObject *__pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_14__str__(struct __pyx_memoryview_obj *__pyx_v_self); /* proto */
static PyObject *__pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_16is_c_contig(struct __pyx_memoryview_obj *__pyx_v_self); /* proto */
static PyObject *__pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_18is_f_contig(struct __pyx_memoryview_obj *__pyx_v_self); /* proto */
static PyObject *__pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_20copy(struct __pyx_memoryview_obj *__pyx_v_self); /* proto */
static PyObject *__pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_22copy_fortran(struct __pyx_memoryview_obj *__pyx_v_self); /* proto */
static PyObject *__pyx_pf___pyx_memoryview___reduce_cython__(CYTHON_UNUSED struct __pyx_memoryview_obj *__pyx_v_self); /* proto */
static PyObject *__pyx_pf___pyx_memoryview_2__setstate_cython__(CYTHON_UNUSED struct __pyx_memoryview_obj *__pyx_v_self, CYTHON_UNUSED PyObject *__pyx_v___pyx_state); /* proto */
static void __pyx_memoryviewslice___pyx_pf_15View_dot_MemoryView_16_memoryviewslice___dealloc__(struct __pyx_memoryviewslice_obj *__pyx_v_self); /* proto */
static PyObject *__pyx_pf_15View_dot_MemoryView_16_memoryviewslice_4base___get__(struct __pyx_memoryviewslice_obj *__pyx_v_self); /* proto */
static PyObject *__pyx_pf___pyx_memoryviewslice___reduce_cython__(CYTHON_UNUSED struct __pyx_memoryviewslice_obj *__pyx_v_self); /* proto */
static PyObject *__pyx_pf___pyx_memoryviewslice_2__setstate_cython__(CYTHON_UNUSED struct __pyx_memoryviewslice_obj *__pyx_v_self, CYTHON_UNUSED PyObject *__pyx_v___pyx_state); /* proto */
static PyObject *__pyx_pf_15View_dot_MemoryView___pyx_unpickle_Enum(CYTHON_UNUSED PyObject *__pyx_self, PyObject *__pyx_v___pyx_type, long __pyx_v___pyx_checksum, PyObject *__pyx_v___pyx_state); /* proto */
static PyObject *__pyx_tp_new_array(PyTypeObject *t, PyObject *a, PyObject *k); /*proto*/
static PyObject *__pyx_tp_new_Enum(PyTypeObject *t, PyObject *a, PyObject *k); /*proto*/
static PyObject *__pyx_tp_new_memoryview(PyTypeObject *t, PyObject *a, PyObject *k); /*proto*/
static PyObject *__pyx_tp_new__memoryviewslice(PyTypeObject *t, PyObject *a, PyObject *k); /*proto*/
static PyObject *__pyx_int_0;
static PyObject *__pyx_int_1;
static PyObject *__pyx_int_184977713;
static PyObject *__pyx_int_neg_1;
static PyObject *__pyx_slice_;
static PyObject *__pyx_slice__2;
static PyObject *__pyx_slice__3;
static PyObject *__pyx_slice__4;
static PyObject *__pyx_tuple__5;
static PyObject *__pyx_tuple__6;
static PyObject *__pyx_tuple__7;
static PyObject *__pyx_tuple__8;
static PyObject *__pyx_tuple__9;
static PyObject *__pyx_slice__21;
static PyObject *__pyx_slice__22;
static PyObject *__pyx_slice__23;
static PyObject *__pyx_tuple__10;
static PyObject *__pyx_tuple__11;
static PyObject *__pyx_tuple__12;
static PyObject *__pyx_tuple__13;
static PyObject *__pyx_tuple__14;
static PyObject *__pyx_tuple__15;
static PyObject *__pyx_tuple__16;
static PyObject *__pyx_tuple__17;
static PyObject *__pyx_tuple__18;
static PyObject *__pyx_tuple__19;
static PyObject *__pyx_tuple__20;
static PyObject *__pyx_tuple__24;
static PyObject *__pyx_tuple__25;
static PyObject *__pyx_tuple__26;
static PyObject *__pyx_tuple__27;
static PyObject *__pyx_tuple__28;
static PyObject *__pyx_tuple__29;
static PyObject *__pyx_tuple__30;
static PyObject *__pyx_tuple__31;
static PyObject *__pyx_tuple__32;
static PyObject *__pyx_codeobj__33;
/* Late includes */
/* "mnnpy/_utils.pyx":12
* @cython.boundscheck(False) # Deactivate bounds checking
* @cython.wraparound(False) # Deactivate negative indexing.
* cdef float _adjust_s_variance(float [:, :] data1, float [:, :] data2, float [:] curcell, float [:] curvect, float sigma) nogil: # <<<<<<<<<<<<<<
* cdef Py_ssize_t i, j
* cdef Py_ssize_t n_c1 = data1.shape[0]
*/
static float __pyx_f_5mnnpy_6_utils__adjust_s_variance(__Pyx_memviewslice __pyx_v_data1, __Pyx_memviewslice __pyx_v_data2, __Pyx_memviewslice __pyx_v_curcell, __Pyx_memviewslice __pyx_v_curvect, float __pyx_v_sigma) {
Py_ssize_t __pyx_v_i;
Py_ssize_t __pyx_v_j;
Py_ssize_t __pyx_v_n_c1;
Py_ssize_t __pyx_v_n_c2;
Py_ssize_t __pyx_v_n_v;
float __pyx_v_prob2;
float __pyx_v_totalprob2;
float __pyx_v_curproj;
float __pyx_v_l2_norm;
float *__pyx_v_grad;
float **__pyx_v_d1;
float __pyx_v_sameproj;
float __pyx_v_samedist;
float __pyx_v_dist_scale;
float *__pyx_v_dist_working;
float __pyx_v_sameprob;
float __pyx_v_totalprob1;
float __pyx_v_otherdist;
float __pyx_v_weight;
float __pyx_v_target;
float __pyx_v_cumulative;
float __pyx_v_ref_quan;
float __pyx_r;
Py_ssize_t __pyx_t_1;
Py_ssize_t __pyx_t_2;
Py_ssize_t __pyx_t_3;
Py_ssize_t __pyx_t_4;
Py_ssize_t __pyx_t_5;
Py_ssize_t __pyx_t_6;
float __pyx_t_7;
Py_ssize_t __pyx_t_8;
Py_ssize_t __pyx_t_9;
Py_ssize_t __pyx_t_10;
Py_ssize_t __pyx_t_11;
Py_ssize_t __pyx_t_12;
Py_ssize_t __pyx_t_13;
Py_ssize_t __pyx_t_14;
Py_ssize_t __pyx_t_15;
Py_ssize_t __pyx_t_16;
Py_ssize_t __pyx_t_17;
int __pyx_t_18;
long __pyx_t_19;
Py_ssize_t __pyx_t_20;
Py_ssize_t __pyx_t_21;
Py_ssize_t __pyx_t_22;
Py_ssize_t __pyx_t_23;
Py_ssize_t __pyx_t_24;
/* "mnnpy/_utils.pyx":14
* cdef float _adjust_s_variance(float [:, :] data1, float [:, :] data2, float [:] curcell, float [:] curvect, float sigma) nogil:
* cdef Py_ssize_t i, j
* cdef Py_ssize_t n_c1 = data1.shape[0] # <<<<<<<<<<<<<<
* cdef Py_ssize_t n_c2 = data2.shape[0]
* cdef Py_ssize_t n_v = data2.shape[1]
*/
__pyx_v_n_c1 = (__pyx_v_data1.shape[0]);
/* "mnnpy/_utils.pyx":15
* cdef Py_ssize_t i, j
* cdef Py_ssize_t n_c1 = data1.shape[0]
* cdef Py_ssize_t n_c2 = data2.shape[0] # <<<<<<<<<<<<<<
* cdef Py_ssize_t n_v = data2.shape[1]
* cdef float prob2 = 0
*/
__pyx_v_n_c2 = (__pyx_v_data2.shape[0]);
/* "mnnpy/_utils.pyx":16
* cdef Py_ssize_t n_c1 = data1.shape[0]
* cdef Py_ssize_t n_c2 = data2.shape[0]
* cdef Py_ssize_t n_v = data2.shape[1] # <<<<<<<<<<<<<<
* cdef float prob2 = 0
* cdef float totalprob2 = 0
*/
__pyx_v_n_v = (__pyx_v_data2.shape[1]);
/* "mnnpy/_utils.pyx":17
* cdef Py_ssize_t n_c2 = data2.shape[0]
* cdef Py_ssize_t n_v = data2.shape[1]
* cdef float prob2 = 0 # <<<<<<<<<<<<<<
* cdef float totalprob2 = 0
* cdef float curproj = 0
*/
__pyx_v_prob2 = 0.0;
/* "mnnpy/_utils.pyx":18
* cdef Py_ssize_t n_v = data2.shape[1]
* cdef float prob2 = 0
* cdef float totalprob2 = 0 # <<<<<<<<<<<<<<
* cdef float curproj = 0
* cdef float l2_norm = 0
*/
__pyx_v_totalprob2 = 0.0;
/* "mnnpy/_utils.pyx":19
* cdef float prob2 = 0
* cdef float totalprob2 = 0
* cdef float curproj = 0 # <<<<<<<<<<<<<<
* cdef float l2_norm = 0
* for j in range(n_v):
*/
__pyx_v_curproj = 0.0;
/* "mnnpy/_utils.pyx":20
* cdef float totalprob2 = 0
* cdef float curproj = 0
* cdef float l2_norm = 0 # <<<<<<<<<<<<<<
* for j in range(n_v):
* l2_norm += curvect[j] * curvect[j]
*/
__pyx_v_l2_norm = 0.0;
/* "mnnpy/_utils.pyx":21
* cdef float curproj = 0
* cdef float l2_norm = 0
* for j in range(n_v): # <<<<<<<<<<<<<<
* l2_norm += curvect[j] * curvect[j]
* l2_norm = sqrt(l2_norm)
*/
__pyx_t_1 = __pyx_v_n_v;
__pyx_t_2 = __pyx_t_1;
for (__pyx_t_3 = 0; __pyx_t_3 < __pyx_t_2; __pyx_t_3+=1) {
__pyx_v_j = __pyx_t_3;
/* "mnnpy/_utils.pyx":22
* cdef float l2_norm = 0
* for j in range(n_v):
* l2_norm += curvect[j] * curvect[j] # <<<<<<<<<<<<<<
* l2_norm = sqrt(l2_norm)
* cdef float *grad = <float *>malloc(n_v * sizeof(float))
*/
__pyx_t_4 = __pyx_v_j;
__pyx_t_5 = __pyx_v_j;
__pyx_v_l2_norm = (__pyx_v_l2_norm + ((*((float *) ( /* dim=0 */ (__pyx_v_curvect.data + __pyx_t_4 * __pyx_v_curvect.strides[0]) ))) * (*((float *) ( /* dim=0 */ (__pyx_v_curvect.data + __pyx_t_5 * __pyx_v_curvect.strides[0]) )))));
}
/* "mnnpy/_utils.pyx":23
* for j in range(n_v):
* l2_norm += curvect[j] * curvect[j]
* l2_norm = sqrt(l2_norm) # <<<<<<<<<<<<<<
* cdef float *grad = <float *>malloc(n_v * sizeof(float))
* for j in range(n_v):
*/
__pyx_v_l2_norm = sqrt(__pyx_v_l2_norm);
/* "mnnpy/_utils.pyx":24
* l2_norm += curvect[j] * curvect[j]
* l2_norm = sqrt(l2_norm)
* cdef float *grad = <float *>malloc(n_v * sizeof(float)) # <<<<<<<<<<<<<<
* for j in range(n_v):
* grad[j] = curvect[j] / l2_norm
*/
__pyx_v_grad = ((float *)malloc((__pyx_v_n_v * (sizeof(float)))));
/* "mnnpy/_utils.pyx":25
* l2_norm = sqrt(l2_norm)
* cdef float *grad = <float *>malloc(n_v * sizeof(float))
* for j in range(n_v): # <<<<<<<<<<<<<<
* grad[j] = curvect[j] / l2_norm
* curproj += grad[j] * curcell[j]
*/
__pyx_t_1 = __pyx_v_n_v;
__pyx_t_2 = __pyx_t_1;
for (__pyx_t_3 = 0; __pyx_t_3 < __pyx_t_2; __pyx_t_3+=1) {
__pyx_v_j = __pyx_t_3;
/* "mnnpy/_utils.pyx":26
* cdef float *grad = <float *>malloc(n_v * sizeof(float))
* for j in range(n_v):
* grad[j] = curvect[j] / l2_norm # <<<<<<<<<<<<<<
* curproj += grad[j] * curcell[j]
* cdef float **d1 = <float **>malloc(n_c1 * sizeof(float*))
*/
__pyx_t_6 = __pyx_v_j;
__pyx_t_7 = (*((float *) ( /* dim=0 */ (__pyx_v_curvect.data + __pyx_t_6 * __pyx_v_curvect.strides[0]) )));
if (unlikely(__pyx_v_l2_norm == 0)) {
#ifdef WITH_THREAD
PyGILState_STATE __pyx_gilstate_save = __Pyx_PyGILState_Ensure();
#endif
PyErr_SetString(PyExc_ZeroDivisionError, "float division");
#ifdef WITH_THREAD
__Pyx_PyGILState_Release(__pyx_gilstate_save);
#endif
__PYX_ERR(0, 26, __pyx_L1_error)
}
(__pyx_v_grad[__pyx_v_j]) = (__pyx_t_7 / __pyx_v_l2_norm);
/* "mnnpy/_utils.pyx":27
* for j in range(n_v):
* grad[j] = curvect[j] / l2_norm
* curproj += grad[j] * curcell[j] # <<<<<<<<<<<<<<
* cdef float **d1 = <float **>malloc(n_c1 * sizeof(float*))
* for i in range(n_c1):
*/
__pyx_t_8 = __pyx_v_j;
__pyx_v_curproj = (__pyx_v_curproj + ((__pyx_v_grad[__pyx_v_j]) * (*((float *) ( /* dim=0 */ (__pyx_v_curcell.data + __pyx_t_8 * __pyx_v_curcell.strides[0]) )))));
}
/* "mnnpy/_utils.pyx":28
* grad[j] = curvect[j] / l2_norm
* curproj += grad[j] * curcell[j]
* cdef float **d1 = <float **>malloc(n_c1 * sizeof(float*)) # <<<<<<<<<<<<<<
* for i in range(n_c1):
* d1[i] = <float *>malloc(2 * sizeof(float))
*/
__pyx_v_d1 = ((float **)malloc((__pyx_v_n_c1 * (sizeof(float *)))));
/* "mnnpy/_utils.pyx":29
* curproj += grad[j] * curcell[j]
* cdef float **d1 = <float **>malloc(n_c1 * sizeof(float*))
* for i in range(n_c1): # <<<<<<<<<<<<<<
* d1[i] = <float *>malloc(2 * sizeof(float))
* cdef float sameproj
*/
__pyx_t_1 = __pyx_v_n_c1;
__pyx_t_2 = __pyx_t_1;
for (__pyx_t_3 = 0; __pyx_t_3 < __pyx_t_2; __pyx_t_3+=1) {
__pyx_v_i = __pyx_t_3;
/* "mnnpy/_utils.pyx":30
* cdef float **d1 = <float **>malloc(n_c1 * sizeof(float*))
* for i in range(n_c1):
* d1[i] = <float *>malloc(2 * sizeof(float)) # <<<<<<<<<<<<<<
* cdef float sameproj
* cdef float samedist
*/
(__pyx_v_d1[__pyx_v_i]) = ((float *)malloc((2 * (sizeof(float)))));
}
/* "mnnpy/_utils.pyx":34
* cdef float samedist
* cdef float dist_scale
* cdef float *dist_working = <float *>malloc(n_v * sizeof(float)) # <<<<<<<<<<<<<<
* cdef float sameprob
* for i in range(n_c2):
*/
__pyx_v_dist_working = ((float *)malloc((__pyx_v_n_v * (sizeof(float)))));
/* "mnnpy/_utils.pyx":36
* cdef float *dist_working = <float *>malloc(n_v * sizeof(float))
* cdef float sameprob
* for i in range(n_c2): # <<<<<<<<<<<<<<
* sameproj = 0
* samedist = 0
*/
__pyx_t_1 = __pyx_v_n_c2;
__pyx_t_2 = __pyx_t_1;
for (__pyx_t_3 = 0; __pyx_t_3 < __pyx_t_2; __pyx_t_3+=1) {
__pyx_v_i = __pyx_t_3;
/* "mnnpy/_utils.pyx":37
* cdef float sameprob
* for i in range(n_c2):
* sameproj = 0 # <<<<<<<<<<<<<<
* samedist = 0
* dist_scale = 0
*/
__pyx_v_sameproj = 0.0;
/* "mnnpy/_utils.pyx":38
* for i in range(n_c2):
* sameproj = 0
* samedist = 0 # <<<<<<<<<<<<<<
* dist_scale = 0
* for j in range(n_v):
*/
__pyx_v_samedist = 0.0;
/* "mnnpy/_utils.pyx":39
* sameproj = 0
* samedist = 0
* dist_scale = 0 # <<<<<<<<<<<<<<
* for j in range(n_v):
* sameproj += grad[j] * data2[i, j]
*/
__pyx_v_dist_scale = 0.0;
/* "mnnpy/_utils.pyx":40
* samedist = 0
* dist_scale = 0
* for j in range(n_v): # <<<<<<<<<<<<<<
* sameproj += grad[j] * data2[i, j]
* dist_working[j] = curcell[j] - data2[i, j]
*/
__pyx_t_9 = __pyx_v_n_v;
__pyx_t_10 = __pyx_t_9;
for (__pyx_t_11 = 0; __pyx_t_11 < __pyx_t_10; __pyx_t_11+=1) {
__pyx_v_j = __pyx_t_11;
/* "mnnpy/_utils.pyx":41
* dist_scale = 0
* for j in range(n_v):
* sameproj += grad[j] * data2[i, j] # <<<<<<<<<<<<<<
* dist_working[j] = curcell[j] - data2[i, j]
* for j in range(n_v):
*/
__pyx_t_12 = __pyx_v_i;
__pyx_t_13 = __pyx_v_j;
__pyx_v_sameproj = (__pyx_v_sameproj + ((__pyx_v_grad[__pyx_v_j]) * (*((float *) ( /* dim=1 */ (( /* dim=0 */ (__pyx_v_data2.data + __pyx_t_12 * __pyx_v_data2.strides[0]) ) + __pyx_t_13 * __pyx_v_data2.strides[1]) )))));
/* "mnnpy/_utils.pyx":42
* for j in range(n_v):
* sameproj += grad[j] * data2[i, j]
* dist_working[j] = curcell[j] - data2[i, j] # <<<<<<<<<<<<<<
* for j in range(n_v):
* dist_scale += dist_working[j] * grad[j]
*/
__pyx_t_14 = __pyx_v_j;
__pyx_t_15 = __pyx_v_i;
__pyx_t_16 = __pyx_v_j;
(__pyx_v_dist_working[__pyx_v_j]) = ((*((float *) ( /* dim=0 */ (__pyx_v_curcell.data + __pyx_t_14 * __pyx_v_curcell.strides[0]) ))) - (*((float *) ( /* dim=1 */ (( /* dim=0 */ (__pyx_v_data2.data + __pyx_t_15 * __pyx_v_data2.strides[0]) ) + __pyx_t_16 * __pyx_v_data2.strides[1]) ))));
}
/* "mnnpy/_utils.pyx":43
* sameproj += grad[j] * data2[i, j]
* dist_working[j] = curcell[j] - data2[i, j]
* for j in range(n_v): # <<<<<<<<<<<<<<
* dist_scale += dist_working[j] * grad[j]
* for j in range(n_v):
*/
__pyx_t_9 = __pyx_v_n_v;
__pyx_t_10 = __pyx_t_9;
for (__pyx_t_11 = 0; __pyx_t_11 < __pyx_t_10; __pyx_t_11+=1) {
__pyx_v_j = __pyx_t_11;
/* "mnnpy/_utils.pyx":44
* dist_working[j] = curcell[j] - data2[i, j]
* for j in range(n_v):
* dist_scale += dist_working[j] * grad[j] # <<<<<<<<<<<<<<
* for j in range(n_v):
* dist_working[j] -= grad[j] * dist_scale
*/
__pyx_v_dist_scale = (__pyx_v_dist_scale + ((__pyx_v_dist_working[__pyx_v_j]) * (__pyx_v_grad[__pyx_v_j])));
}
/* "mnnpy/_utils.pyx":45
* for j in range(n_v):
* dist_scale += dist_working[j] * grad[j]
* for j in range(n_v): # <<<<<<<<<<<<<<
* dist_working[j] -= grad[j] * dist_scale
* samedist += dist_working[j] * dist_working[j]
*/
__pyx_t_9 = __pyx_v_n_v;
__pyx_t_10 = __pyx_t_9;
for (__pyx_t_11 = 0; __pyx_t_11 < __pyx_t_10; __pyx_t_11+=1) {
__pyx_v_j = __pyx_t_11;
/* "mnnpy/_utils.pyx":46
* dist_scale += dist_working[j] * grad[j]
* for j in range(n_v):
* dist_working[j] -= grad[j] * dist_scale # <<<<<<<<<<<<<<
* samedist += dist_working[j] * dist_working[j]
* sameprob = exp(-samedist / sigma)
*/
__pyx_t_17 = __pyx_v_j;
(__pyx_v_dist_working[__pyx_t_17]) = ((__pyx_v_dist_working[__pyx_t_17]) - ((__pyx_v_grad[__pyx_v_j]) * __pyx_v_dist_scale));
/* "mnnpy/_utils.pyx":47
* for j in range(n_v):
* dist_working[j] -= grad[j] * dist_scale
* samedist += dist_working[j] * dist_working[j] # <<<<<<<<<<<<<<
* sameprob = exp(-samedist / sigma)
* if sameproj <= curproj:
*/
__pyx_v_samedist = (__pyx_v_samedist + ((__pyx_v_dist_working[__pyx_v_j]) * (__pyx_v_dist_working[__pyx_v_j])));
}
/* "mnnpy/_utils.pyx":48
* dist_working[j] -= grad[j] * dist_scale
* samedist += dist_working[j] * dist_working[j]
* sameprob = exp(-samedist / sigma) # <<<<<<<<<<<<<<
* if sameproj <= curproj:
* prob2 += sameprob
*/
__pyx_t_7 = (-__pyx_v_samedist);
if (unlikely(__pyx_v_sigma == 0)) {
#ifdef WITH_THREAD
PyGILState_STATE __pyx_gilstate_save = __Pyx_PyGILState_Ensure();
#endif
PyErr_SetString(PyExc_ZeroDivisionError, "float division");
#ifdef WITH_THREAD
__Pyx_PyGILState_Release(__pyx_gilstate_save);
#endif
__PYX_ERR(0, 48, __pyx_L1_error)
}
__pyx_v_sameprob = exp((__pyx_t_7 / __pyx_v_sigma));
/* "mnnpy/_utils.pyx":49
* samedist += dist_working[j] * dist_working[j]
* sameprob = exp(-samedist / sigma)
* if sameproj <= curproj: # <<<<<<<<<<<<<<
* prob2 += sameprob
* totalprob2 += sameprob
*/
__pyx_t_18 = ((__pyx_v_sameproj <= __pyx_v_curproj) != 0);
if (__pyx_t_18) {
/* "mnnpy/_utils.pyx":50
* sameprob = exp(-samedist / sigma)
* if sameproj <= curproj:
* prob2 += sameprob # <<<<<<<<<<<<<<
* totalprob2 += sameprob
* prob2 /= totalprob2
*/
__pyx_v_prob2 = (__pyx_v_prob2 + __pyx_v_sameprob);
/* "mnnpy/_utils.pyx":49
* samedist += dist_working[j] * dist_working[j]
* sameprob = exp(-samedist / sigma)
* if sameproj <= curproj: # <<<<<<<<<<<<<<
* prob2 += sameprob
* totalprob2 += sameprob
*/
}
/* "mnnpy/_utils.pyx":51
* if sameproj <= curproj:
* prob2 += sameprob
* totalprob2 += sameprob # <<<<<<<<<<<<<<
* prob2 /= totalprob2
* cdef float totalprob1 = 0
*/
__pyx_v_totalprob2 = (__pyx_v_totalprob2 + __pyx_v_sameprob);
}
/* "mnnpy/_utils.pyx":52
* prob2 += sameprob
* totalprob2 += sameprob
* prob2 /= totalprob2 # <<<<<<<<<<<<<<
* cdef float totalprob1 = 0
* cdef float otherdist
*/
if (unlikely(__pyx_v_totalprob2 == 0)) {
#ifdef WITH_THREAD
PyGILState_STATE __pyx_gilstate_save = __Pyx_PyGILState_Ensure();
#endif
PyErr_SetString(PyExc_ZeroDivisionError, "float division");
#ifdef WITH_THREAD
__Pyx_PyGILState_Release(__pyx_gilstate_save);
#endif
__PYX_ERR(0, 52, __pyx_L1_error)
}
__pyx_v_prob2 = (__pyx_v_prob2 / __pyx_v_totalprob2);
/* "mnnpy/_utils.pyx":53
* totalprob2 += sameprob
* prob2 /= totalprob2
* cdef float totalprob1 = 0 # <<<<<<<<<<<<<<
* cdef float otherdist
* cdef float weight
*/
__pyx_v_totalprob1 = 0.0;
/* "mnnpy/_utils.pyx":56
* cdef float otherdist
* cdef float weight
* for i in range(n_c1): # <<<<<<<<<<<<<<
* otherdist = 0
* dist_scale = 0
*/
__pyx_t_1 = __pyx_v_n_c1;
__pyx_t_2 = __pyx_t_1;
for (__pyx_t_3 = 0; __pyx_t_3 < __pyx_t_2; __pyx_t_3+=1) {
__pyx_v_i = __pyx_t_3;
/* "mnnpy/_utils.pyx":57
* cdef float weight
* for i in range(n_c1):
* otherdist = 0 # <<<<<<<<<<<<<<
* dist_scale = 0
* d1[i][0] = 0
*/
__pyx_v_otherdist = 0.0;
/* "mnnpy/_utils.pyx":58
* for i in range(n_c1):
* otherdist = 0
* dist_scale = 0 # <<<<<<<<<<<<<<
* d1[i][0] = 0
* for j in range(n_v):
*/
__pyx_v_dist_scale = 0.0;
/* "mnnpy/_utils.pyx":59
* otherdist = 0
* dist_scale = 0
* d1[i][0] = 0 # <<<<<<<<<<<<<<
* for j in range(n_v):
* d1[i][0] += grad[j] * data1[i, j]
*/
((__pyx_v_d1[__pyx_v_i])[0]) = 0.0;
/* "mnnpy/_utils.pyx":60
* dist_scale = 0
* d1[i][0] = 0
* for j in range(n_v): # <<<<<<<<<<<<<<
* d1[i][0] += grad[j] * data1[i, j]
* dist_working[j] = curcell[j] - data1[i, j]
*/
__pyx_t_9 = __pyx_v_n_v;
__pyx_t_10 = __pyx_t_9;
for (__pyx_t_11 = 0; __pyx_t_11 < __pyx_t_10; __pyx_t_11+=1) {
__pyx_v_j = __pyx_t_11;
/* "mnnpy/_utils.pyx":61
* d1[i][0] = 0
* for j in range(n_v):
* d1[i][0] += grad[j] * data1[i, j] # <<<<<<<<<<<<<<
* dist_working[j] = curcell[j] - data1[i, j]
* for j in range(n_v):
*/
__pyx_t_17 = __pyx_v_i;
__pyx_t_19 = 0;
__pyx_t_20 = __pyx_v_i;
__pyx_t_21 = __pyx_v_j;
((__pyx_v_d1[__pyx_t_17])[__pyx_t_19]) = (((__pyx_v_d1[__pyx_t_17])[__pyx_t_19]) + ((__pyx_v_grad[__pyx_v_j]) * (*((float *) ( /* dim=1 */ (( /* dim=0 */ (__pyx_v_data1.data + __pyx_t_20 * __pyx_v_data1.strides[0]) ) + __pyx_t_21 * __pyx_v_data1.strides[1]) )))));
/* "mnnpy/_utils.pyx":62
* for j in range(n_v):
* d1[i][0] += grad[j] * data1[i, j]
* dist_working[j] = curcell[j] - data1[i, j] # <<<<<<<<<<<<<<
* for j in range(n_v):
* dist_scale += dist_working[j] * grad[j]
*/
__pyx_t_22 = __pyx_v_j;
__pyx_t_23 = __pyx_v_i;
__pyx_t_24 = __pyx_v_j;
(__pyx_v_dist_working[__pyx_v_j]) = ((*((float *) ( /* dim=0 */ (__pyx_v_curcell.data + __pyx_t_22 * __pyx_v_curcell.strides[0]) ))) - (*((float *) ( /* dim=1 */ (( /* dim=0 */ (__pyx_v_data1.data + __pyx_t_23 * __pyx_v_data1.strides[0]) ) + __pyx_t_24 * __pyx_v_data1.strides[1]) ))));
}
/* "mnnpy/_utils.pyx":63
* d1[i][0] += grad[j] * data1[i, j]
* dist_working[j] = curcell[j] - data1[i, j]
* for j in range(n_v): # <<<<<<<<<<<<<<
* dist_scale += dist_working[j] * grad[j]
* for j in range(n_v):
*/
__pyx_t_9 = __pyx_v_n_v;
__pyx_t_10 = __pyx_t_9;
for (__pyx_t_11 = 0; __pyx_t_11 < __pyx_t_10; __pyx_t_11+=1) {
__pyx_v_j = __pyx_t_11;
/* "mnnpy/_utils.pyx":64
* dist_working[j] = curcell[j] - data1[i, j]
* for j in range(n_v):
* dist_scale += dist_working[j] * grad[j] # <<<<<<<<<<<<<<
* for j in range(n_v):
* dist_working[j] -= grad[j] * dist_scale
*/
__pyx_v_dist_scale = (__pyx_v_dist_scale + ((__pyx_v_dist_working[__pyx_v_j]) * (__pyx_v_grad[__pyx_v_j])));
}
/* "mnnpy/_utils.pyx":65
* for j in range(n_v):
* dist_scale += dist_working[j] * grad[j]
* for j in range(n_v): # <<<<<<<<<<<<<<
* dist_working[j] -= grad[j] * dist_scale
* otherdist += dist_working[j] * dist_working[j]
*/
__pyx_t_9 = __pyx_v_n_v;
__pyx_t_10 = __pyx_t_9;
for (__pyx_t_11 = 0; __pyx_t_11 < __pyx_t_10; __pyx_t_11+=1) {
__pyx_v_j = __pyx_t_11;
/* "mnnpy/_utils.pyx":66
* dist_scale += dist_working[j] * grad[j]
* for j in range(n_v):
* dist_working[j] -= grad[j] * dist_scale # <<<<<<<<<<<<<<
* otherdist += dist_working[j] * dist_working[j]
* weight = exp(-otherdist / sigma)
*/
__pyx_t_17 = __pyx_v_j;
(__pyx_v_dist_working[__pyx_t_17]) = ((__pyx_v_dist_working[__pyx_t_17]) - ((__pyx_v_grad[__pyx_v_j]) * __pyx_v_dist_scale));
/* "mnnpy/_utils.pyx":67
* for j in range(n_v):
* dist_working[j] -= grad[j] * dist_scale
* otherdist += dist_working[j] * dist_working[j] # <<<<<<<<<<<<<<
* weight = exp(-otherdist / sigma)
* d1[i][1] = weight
*/
__pyx_v_otherdist = (__pyx_v_otherdist + ((__pyx_v_dist_working[__pyx_v_j]) * (__pyx_v_dist_working[__pyx_v_j])));
}
/* "mnnpy/_utils.pyx":68
* dist_working[j] -= grad[j] * dist_scale
* otherdist += dist_working[j] * dist_working[j]
* weight = exp(-otherdist / sigma) # <<<<<<<<<<<<<<
* d1[i][1] = weight
* totalprob1 += weight
*/
__pyx_t_7 = (-__pyx_v_otherdist);
if (unlikely(__pyx_v_sigma == 0)) {
#ifdef WITH_THREAD
PyGILState_STATE __pyx_gilstate_save = __Pyx_PyGILState_Ensure();
#endif
PyErr_SetString(PyExc_ZeroDivisionError, "float division");
#ifdef WITH_THREAD
__Pyx_PyGILState_Release(__pyx_gilstate_save);
#endif
__PYX_ERR(0, 68, __pyx_L1_error)
}
__pyx_v_weight = exp((__pyx_t_7 / __pyx_v_sigma));
/* "mnnpy/_utils.pyx":69
* otherdist += dist_working[j] * dist_working[j]
* weight = exp(-otherdist / sigma)
* d1[i][1] = weight # <<<<<<<<<<<<<<
* totalprob1 += weight
* qsort(d1, n_c1, sizeof(float*), &comp)
*/
((__pyx_v_d1[__pyx_v_i])[1]) = __pyx_v_weight;
/* "mnnpy/_utils.pyx":70
* weight = exp(-otherdist / sigma)
* d1[i][1] = weight
* totalprob1 += weight # <<<<<<<<<<<<<<
* qsort(d1, n_c1, sizeof(float*), &comp)
* cdef float target = prob2 * totalprob1
*/
__pyx_v_totalprob1 = (__pyx_v_totalprob1 + __pyx_v_weight);
}
/* "mnnpy/_utils.pyx":71
* d1[i][1] = weight
* totalprob1 += weight
* qsort(d1, n_c1, sizeof(float*), &comp) # <<<<<<<<<<<<<<
* cdef float target = prob2 * totalprob1
* cdef float cumulative = 0
*/
qsort(__pyx_v_d1, __pyx_v_n_c1, (sizeof(float *)), (&comp));
/* "mnnpy/_utils.pyx":72
* totalprob1 += weight
* qsort(d1, n_c1, sizeof(float*), &comp)
* cdef float target = prob2 * totalprob1 # <<<<<<<<<<<<<<
* cdef float cumulative = 0
* cdef float ref_quan = d1[n_c1 - 1][0]
*/
__pyx_v_target = (__pyx_v_prob2 * __pyx_v_totalprob1);
/* "mnnpy/_utils.pyx":73
* qsort(d1, n_c1, sizeof(float*), &comp)
* cdef float target = prob2 * totalprob1
* cdef float cumulative = 0 # <<<<<<<<<<<<<<
* cdef float ref_quan = d1[n_c1 - 1][0]
* for i in range(n_c1):
*/
__pyx_v_cumulative = 0.0;
/* "mnnpy/_utils.pyx":74
* cdef float target = prob2 * totalprob1
* cdef float cumulative = 0
* cdef float ref_quan = d1[n_c1 - 1][0] # <<<<<<<<<<<<<<
* for i in range(n_c1):
* cumulative += d1[i][1]
*/
__pyx_v_ref_quan = ((__pyx_v_d1[(__pyx_v_n_c1 - 1)])[0]);
/* "mnnpy/_utils.pyx":75
* cdef float cumulative = 0
* cdef float ref_quan = d1[n_c1 - 1][0]
* for i in range(n_c1): # <<<<<<<<<<<<<<
* cumulative += d1[i][1]
* if cumulative > target:
*/
__pyx_t_1 = __pyx_v_n_c1;
__pyx_t_2 = __pyx_t_1;
for (__pyx_t_3 = 0; __pyx_t_3 < __pyx_t_2; __pyx_t_3+=1) {
__pyx_v_i = __pyx_t_3;
/* "mnnpy/_utils.pyx":76
* cdef float ref_quan = d1[n_c1 - 1][0]
* for i in range(n_c1):
* cumulative += d1[i][1] # <<<<<<<<<<<<<<
* if cumulative > target:
* ref_quan = d1[i][0]
*/
__pyx_v_cumulative = (__pyx_v_cumulative + ((__pyx_v_d1[__pyx_v_i])[1]));
/* "mnnpy/_utils.pyx":77
* for i in range(n_c1):
* cumulative += d1[i][1]
* if cumulative > target: # <<<<<<<<<<<<<<
* ref_quan = d1[i][0]
* break
*/
__pyx_t_18 = ((__pyx_v_cumulative > __pyx_v_target) != 0);
if (__pyx_t_18) {
/* "mnnpy/_utils.pyx":78
* cumulative += d1[i][1]
* if cumulative > target:
* ref_quan = d1[i][0] # <<<<<<<<<<<<<<
* break
* free(grad)
*/
__pyx_v_ref_quan = ((__pyx_v_d1[__pyx_v_i])[0]);
/* "mnnpy/_utils.pyx":79
* if cumulative > target:
* ref_quan = d1[i][0]
* break # <<<<<<<<<<<<<<
* free(grad)
* free(dist_working)
*/
goto __pyx_L27_break;
/* "mnnpy/_utils.pyx":77
* for i in range(n_c1):
* cumulative += d1[i][1]
* if cumulative > target: # <<<<<<<<<<<<<<
* ref_quan = d1[i][0]
* break
*/
}
}
__pyx_L27_break:;
/* "mnnpy/_utils.pyx":80
* ref_quan = d1[i][0]
* break
* free(grad) # <<<<<<<<<<<<<<
* free(dist_working)
* for i in range(n_c1):
*/
free(__pyx_v_grad);
/* "mnnpy/_utils.pyx":81
* break
* free(grad)
* free(dist_working) # <<<<<<<<<<<<<<
* for i in range(n_c1):
* free(d1[i])
*/
free(__pyx_v_dist_working);
/* "mnnpy/_utils.pyx":82
* free(grad)
* free(dist_working)
* for i in range(n_c1): # <<<<<<<<<<<<<<
* free(d1[i])
* free(d1)
*/
__pyx_t_1 = __pyx_v_n_c1;
__pyx_t_2 = __pyx_t_1;
for (__pyx_t_3 = 0; __pyx_t_3 < __pyx_t_2; __pyx_t_3+=1) {
__pyx_v_i = __pyx_t_3;
/* "mnnpy/_utils.pyx":83
* free(dist_working)
* for i in range(n_c1):
* free(d1[i]) # <<<<<<<<<<<<<<
* free(d1)
* return (ref_quan - curproj) / l2_norm
*/
free((__pyx_v_d1[__pyx_v_i]));
}
/* "mnnpy/_utils.pyx":84
* for i in range(n_c1):
* free(d1[i])
* free(d1) # <<<<<<<<<<<<<<
* return (ref_quan - curproj) / l2_norm
*
*/
free(__pyx_v_d1);
/* "mnnpy/_utils.pyx":85
* free(d1[i])
* free(d1)
* return (ref_quan - curproj) / l2_norm # <<<<<<<<<<<<<<
*
* @cython.boundscheck(False) # Deactivate bounds checking
*/
__pyx_t_7 = (__pyx_v_ref_quan - __pyx_v_curproj);
if (unlikely(__pyx_v_l2_norm == 0)) {
#ifdef WITH_THREAD
PyGILState_STATE __pyx_gilstate_save = __Pyx_PyGILState_Ensure();
#endif
PyErr_SetString(PyExc_ZeroDivisionError, "float division");
#ifdef WITH_THREAD
__Pyx_PyGILState_Release(__pyx_gilstate_save);
#endif
__PYX_ERR(0, 85, __pyx_L1_error)
}
__pyx_r = (__pyx_t_7 / __pyx_v_l2_norm);
goto __pyx_L0;
/* "mnnpy/_utils.pyx":12
* @cython.boundscheck(False) # Deactivate bounds checking
* @cython.wraparound(False) # Deactivate negative indexing.
* cdef float _adjust_s_variance(float [:, :] data1, float [:, :] data2, float [:] curcell, float [:] curvect, float sigma) nogil: # <<<<<<<<<<<<<<
* cdef Py_ssize_t i, j
* cdef Py_ssize_t n_c1 = data1.shape[0]
*/
/* function exit code */
__pyx_L1_error:;
__Pyx_WriteUnraisable("mnnpy._utils._adjust_s_variance", __pyx_clineno, __pyx_lineno, __pyx_filename, 1, 1);
__pyx_r = 0;
__pyx_L0:;
return __pyx_r;
}
/* "mnnpy/_utils.pyx":89
* @cython.boundscheck(False) # Deactivate bounds checking
* @cython.wraparound(False) # Deactivate negative indexing.
* cpdef _adjust_shift_variance(data1, data2, correction, sigma, n_jobs, var_subset=None): # <<<<<<<<<<<<<<
* if var_subset is not None:
* vect = correction[:, var_subset]
*/
static PyObject *__pyx_pw_5mnnpy_6_utils_1_adjust_shift_variance(PyObject *__pyx_self, PyObject *__pyx_args, PyObject *__pyx_kwds); /*proto*/
static PyObject *__pyx_f_5mnnpy_6_utils__adjust_shift_variance(PyObject *__pyx_v_data1, PyObject *__pyx_v_data2, PyObject *__pyx_v_correction, PyObject *__pyx_v_sigma, PyObject *__pyx_v_n_jobs, CYTHON_UNUSED int __pyx_skip_dispatch, struct __pyx_opt_args_5mnnpy_6_utils__adjust_shift_variance *__pyx_optional_args) {
PyObject *__pyx_v_var_subset = ((PyObject *)Py_None);
PyObject *__pyx_v_vect = NULL;
__Pyx_memviewslice __pyx_v_da1 = { 0, 0, { 0 }, { 0 }, { 0 } };
__Pyx_memviewslice __pyx_v_da2 = { 0, 0, { 0 }, { 0 }, { 0 } };
__Pyx_memviewslice __pyx_v_vec = { 0, 0, { 0 }, { 0 }, { 0 } };
PyObject *__pyx_v_scaling = NULL;
__Pyx_memviewslice __pyx_v_s1 = { 0, 0, { 0 }, { 0 }, { 0 } };
float __pyx_v_s2;
Py_ssize_t __pyx_v_i;
Py_ssize_t __pyx_v_n_c2;
CYTHON_UNUSED int __pyx_v_chunksize;
PyObject *__pyx_r = NULL;
__Pyx_RefNannyDeclarations
int __pyx_t_1;
int __pyx_t_2;
PyObject *__pyx_t_3 = NULL;
PyObject *__pyx_t_4 = NULL;
__Pyx_memviewslice __pyx_t_5 = { 0, 0, { 0 }, { 0 }, { 0 } };
PyObject *__pyx_t_6 = NULL;
PyObject *__pyx_t_7 = NULL;
PyObject *__pyx_t_8 = NULL;
__Pyx_memviewslice __pyx_t_9 = { 0, 0, { 0 }, { 0 }, { 0 } };
float __pyx_t_10;
Py_ssize_t __pyx_t_11;
int __pyx_t_12;
Py_ssize_t __pyx_t_13;
Py_ssize_t __pyx_t_14;
__Pyx_memviewslice __pyx_t_15 = { 0, 0, { 0 }, { 0 }, { 0 } };
Py_ssize_t __pyx_t_16;
__Pyx_RefNannySetupContext("_adjust_shift_variance", 0);
if (__pyx_optional_args) {
if (__pyx_optional_args->__pyx_n > 0) {
__pyx_v_var_subset = __pyx_optional_args->var_subset;
}
}
__Pyx_INCREF(__pyx_v_data1);
__Pyx_INCREF(__pyx_v_data2);
/* "mnnpy/_utils.pyx":90
* @cython.wraparound(False) # Deactivate negative indexing.
* cpdef _adjust_shift_variance(data1, data2, correction, sigma, n_jobs, var_subset=None):
* if var_subset is not None: # <<<<<<<<<<<<<<
* vect = correction[:, var_subset]
* data1 = data1[:, var_subset]
*/
__pyx_t_1 = (__pyx_v_var_subset != Py_None);
__pyx_t_2 = (__pyx_t_1 != 0);
if (__pyx_t_2) {
/* "mnnpy/_utils.pyx":91
* cpdef _adjust_shift_variance(data1, data2, correction, sigma, n_jobs, var_subset=None):
* if var_subset is not None:
* vect = correction[:, var_subset] # <<<<<<<<<<<<<<
* data1 = data1[:, var_subset]
* data2 = data2[:, var_subset]
*/
__pyx_t_3 = PyTuple_New(2); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 91, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_3);
__Pyx_INCREF(__pyx_slice_);
__Pyx_GIVEREF(__pyx_slice_);
PyTuple_SET_ITEM(__pyx_t_3, 0, __pyx_slice_);
__Pyx_INCREF(__pyx_v_var_subset);
__Pyx_GIVEREF(__pyx_v_var_subset);
PyTuple_SET_ITEM(__pyx_t_3, 1, __pyx_v_var_subset);
__pyx_t_4 = __Pyx_PyObject_GetItem(__pyx_v_correction, __pyx_t_3); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 91, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_4);
__Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
__pyx_v_vect = __pyx_t_4;
__pyx_t_4 = 0;
/* "mnnpy/_utils.pyx":92
* if var_subset is not None:
* vect = correction[:, var_subset]
* data1 = data1[:, var_subset] # <<<<<<<<<<<<<<
* data2 = data2[:, var_subset]
* else:
*/
__pyx_t_4 = PyTuple_New(2); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 92, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_4);
__Pyx_INCREF(__pyx_slice__2);
__Pyx_GIVEREF(__pyx_slice__2);
PyTuple_SET_ITEM(__pyx_t_4, 0, __pyx_slice__2);
__Pyx_INCREF(__pyx_v_var_subset);
__Pyx_GIVEREF(__pyx_v_var_subset);
PyTuple_SET_ITEM(__pyx_t_4, 1, __pyx_v_var_subset);
__pyx_t_3 = __Pyx_PyObject_GetItem(__pyx_v_data1, __pyx_t_4); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 92, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_3);
__Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
__Pyx_DECREF_SET(__pyx_v_data1, __pyx_t_3);
__pyx_t_3 = 0;
/* "mnnpy/_utils.pyx":93
* vect = correction[:, var_subset]
* data1 = data1[:, var_subset]
* data2 = data2[:, var_subset] # <<<<<<<<<<<<<<
* else:
* vect = correction
*/
__pyx_t_3 = PyTuple_New(2); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 93, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_3);
__Pyx_INCREF(__pyx_slice__3);
__Pyx_GIVEREF(__pyx_slice__3);
PyTuple_SET_ITEM(__pyx_t_3, 0, __pyx_slice__3);
__Pyx_INCREF(__pyx_v_var_subset);
__Pyx_GIVEREF(__pyx_v_var_subset);
PyTuple_SET_ITEM(__pyx_t_3, 1, __pyx_v_var_subset);
__pyx_t_4 = __Pyx_PyObject_GetItem(__pyx_v_data2, __pyx_t_3); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 93, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_4);
__Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
__Pyx_DECREF_SET(__pyx_v_data2, __pyx_t_4);
__pyx_t_4 = 0;
/* "mnnpy/_utils.pyx":90
* @cython.wraparound(False) # Deactivate negative indexing.
* cpdef _adjust_shift_variance(data1, data2, correction, sigma, n_jobs, var_subset=None):
* if var_subset is not None: # <<<<<<<<<<<<<<
* vect = correction[:, var_subset]
* data1 = data1[:, var_subset]
*/
goto __pyx_L3;
}
/* "mnnpy/_utils.pyx":95
* data2 = data2[:, var_subset]
* else:
* vect = correction # <<<<<<<<<<<<<<
* cdef float [:, :] da1 = data1
* cdef float [:, :] da2 = data2
*/
/*else*/ {
__Pyx_INCREF(__pyx_v_correction);
__pyx_v_vect = __pyx_v_correction;
}
__pyx_L3:;
/* "mnnpy/_utils.pyx":96
* else:
* vect = correction
* cdef float [:, :] da1 = data1 # <<<<<<<<<<<<<<
* cdef float [:, :] da2 = data2
* cdef float [:, :] vec = vect
*/
__pyx_t_5 = __Pyx_PyObject_to_MemoryviewSlice_dsds_float(__pyx_v_data1, PyBUF_WRITABLE); if (unlikely(!__pyx_t_5.memview)) __PYX_ERR(0, 96, __pyx_L1_error)
__pyx_v_da1 = __pyx_t_5;
__pyx_t_5.memview = NULL;
__pyx_t_5.data = NULL;
/* "mnnpy/_utils.pyx":97
* vect = correction
* cdef float [:, :] da1 = data1
* cdef float [:, :] da2 = data2 # <<<<<<<<<<<<<<
* cdef float [:, :] vec = vect
* scaling = np.zeros(data2.shape[0], dtype=np.float32)
*/
__pyx_t_5 = __Pyx_PyObject_to_MemoryviewSlice_dsds_float(__pyx_v_data2, PyBUF_WRITABLE); if (unlikely(!__pyx_t_5.memview)) __PYX_ERR(0, 97, __pyx_L1_error)
__pyx_v_da2 = __pyx_t_5;
__pyx_t_5.memview = NULL;
__pyx_t_5.data = NULL;
/* "mnnpy/_utils.pyx":98
* cdef float [:, :] da1 = data1
* cdef float [:, :] da2 = data2
* cdef float [:, :] vec = vect # <<<<<<<<<<<<<<
* scaling = np.zeros(data2.shape[0], dtype=np.float32)
* cdef float [:] s1 = scaling
*/
__pyx_t_5 = __Pyx_PyObject_to_MemoryviewSlice_dsds_float(__pyx_v_vect, PyBUF_WRITABLE); if (unlikely(!__pyx_t_5.memview)) __PYX_ERR(0, 98, __pyx_L1_error)
__pyx_v_vec = __pyx_t_5;
__pyx_t_5.memview = NULL;
__pyx_t_5.data = NULL;
/* "mnnpy/_utils.pyx":99
* cdef float [:, :] da2 = data2
* cdef float [:, :] vec = vect
* scaling = np.zeros(data2.shape[0], dtype=np.float32) # <<<<<<<<<<<<<<
* cdef float [:] s1 = scaling
* cdef float s2 = sigma
*/
__pyx_t_4 = __Pyx_GetModuleGlobalName(__pyx_n_s_np); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 99, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_4);
__pyx_t_3 = __Pyx_PyObject_GetAttrStr(__pyx_t_4, __pyx_n_s_zeros); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 99, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_3);
__Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
__pyx_t_4 = __Pyx_PyObject_GetAttrStr(__pyx_v_data2, __pyx_n_s_shape); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 99, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_4);
__pyx_t_6 = __Pyx_GetItemInt(__pyx_t_4, 0, long, 1, __Pyx_PyInt_From_long, 0, 0, 0); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 99, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_6);
__Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
__pyx_t_4 = PyTuple_New(1); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 99, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_4);
__Pyx_GIVEREF(__pyx_t_6);
PyTuple_SET_ITEM(__pyx_t_4, 0, __pyx_t_6);
__pyx_t_6 = 0;
__pyx_t_6 = __Pyx_PyDict_NewPresized(1); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 99, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_6);
__pyx_t_7 = __Pyx_GetModuleGlobalName(__pyx_n_s_np); if (unlikely(!__pyx_t_7)) __PYX_ERR(0, 99, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_7);
__pyx_t_8 = __Pyx_PyObject_GetAttrStr(__pyx_t_7, __pyx_n_s_float32); if (unlikely(!__pyx_t_8)) __PYX_ERR(0, 99, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_8);
__Pyx_DECREF(__pyx_t_7); __pyx_t_7 = 0;
if (PyDict_SetItem(__pyx_t_6, __pyx_n_s_dtype, __pyx_t_8) < 0) __PYX_ERR(0, 99, __pyx_L1_error)
__Pyx_DECREF(__pyx_t_8); __pyx_t_8 = 0;
__pyx_t_8 = __Pyx_PyObject_Call(__pyx_t_3, __pyx_t_4, __pyx_t_6); if (unlikely(!__pyx_t_8)) __PYX_ERR(0, 99, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_8);
__Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
__Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
__Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0;
__pyx_v_scaling = __pyx_t_8;
__pyx_t_8 = 0;
/* "mnnpy/_utils.pyx":100
* cdef float [:, :] vec = vect
* scaling = np.zeros(data2.shape[0], dtype=np.float32)
* cdef float [:] s1 = scaling # <<<<<<<<<<<<<<
* cdef float s2 = sigma
* cdef Py_ssize_t i
*/
__pyx_t_9 = __Pyx_PyObject_to_MemoryviewSlice_ds_float(__pyx_v_scaling, PyBUF_WRITABLE); if (unlikely(!__pyx_t_9.memview)) __PYX_ERR(0, 100, __pyx_L1_error)
__pyx_v_s1 = __pyx_t_9;
__pyx_t_9.memview = NULL;
__pyx_t_9.data = NULL;
/* "mnnpy/_utils.pyx":101
* scaling = np.zeros(data2.shape[0], dtype=np.float32)
* cdef float [:] s1 = scaling
* cdef float s2 = sigma # <<<<<<<<<<<<<<
* cdef Py_ssize_t i
* cdef Py_ssize_t n_c2 = data2.shape[0]
*/
__pyx_t_10 = __pyx_PyFloat_AsFloat(__pyx_v_sigma); if (unlikely((__pyx_t_10 == (float)-1) && PyErr_Occurred())) __PYX_ERR(0, 101, __pyx_L1_error)
__pyx_v_s2 = __pyx_t_10;
/* "mnnpy/_utils.pyx":103
* cdef float s2 = sigma
* cdef Py_ssize_t i
* cdef Py_ssize_t n_c2 = data2.shape[0] # <<<<<<<<<<<<<<
* cdef int chunksize=int(n_c2/n_jobs) + 1
* for i in prange(n_c2, nogil=True, chunksize=chunksize, schedule='static'):
*/
__pyx_t_8 = __Pyx_PyObject_GetAttrStr(__pyx_v_data2, __pyx_n_s_shape); if (unlikely(!__pyx_t_8)) __PYX_ERR(0, 103, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_8);
__pyx_t_6 = __Pyx_GetItemInt(__pyx_t_8, 0, long, 1, __Pyx_PyInt_From_long, 0, 0, 0); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 103, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_6);
__Pyx_DECREF(__pyx_t_8); __pyx_t_8 = 0;
__pyx_t_11 = __Pyx_PyIndex_AsSsize_t(__pyx_t_6); if (unlikely((__pyx_t_11 == (Py_ssize_t)-1) && PyErr_Occurred())) __PYX_ERR(0, 103, __pyx_L1_error)
__Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0;
__pyx_v_n_c2 = __pyx_t_11;
/* "mnnpy/_utils.pyx":104
* cdef Py_ssize_t i
* cdef Py_ssize_t n_c2 = data2.shape[0]
* cdef int chunksize=int(n_c2/n_jobs) + 1 # <<<<<<<<<<<<<<
* for i in prange(n_c2, nogil=True, chunksize=chunksize, schedule='static'):
* s1[i] = _adjust_s_variance(da1, da2, da2[i], vec[i], s2)
*/
__pyx_t_6 = PyInt_FromSsize_t(__pyx_v_n_c2); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 104, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_6);
__pyx_t_8 = __Pyx_PyNumber_Divide(__pyx_t_6, __pyx_v_n_jobs); if (unlikely(!__pyx_t_8)) __PYX_ERR(0, 104, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_8);
__Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0;
__pyx_t_6 = __Pyx_PyNumber_Int(__pyx_t_8); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 104, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_6);
__Pyx_DECREF(__pyx_t_8); __pyx_t_8 = 0;
__pyx_t_8 = __Pyx_PyInt_AddObjC(__pyx_t_6, __pyx_int_1, 1, 0); if (unlikely(!__pyx_t_8)) __PYX_ERR(0, 104, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_8);
__Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0;
__pyx_t_12 = __Pyx_PyInt_As_int(__pyx_t_8); if (unlikely((__pyx_t_12 == (int)-1) && PyErr_Occurred())) __PYX_ERR(0, 104, __pyx_L1_error)
__Pyx_DECREF(__pyx_t_8); __pyx_t_8 = 0;
__pyx_v_chunksize = __pyx_t_12;
/* "mnnpy/_utils.pyx":105
* cdef Py_ssize_t n_c2 = data2.shape[0]
* cdef int chunksize=int(n_c2/n_jobs) + 1
* for i in prange(n_c2, nogil=True, chunksize=chunksize, schedule='static'): # <<<<<<<<<<<<<<
* s1[i] = _adjust_s_variance(da1, da2, da2[i], vec[i], s2)
* scaling = np.fmax(scaling, 1)
*/
{
#ifdef WITH_THREAD
PyThreadState *_save;
Py_UNBLOCK_THREADS
__Pyx_FastGIL_Remember();
#endif
/*try:*/ {
__pyx_t_11 = __pyx_v_n_c2;
if (1 == 0) abort();
{
Py_ssize_t __pyx_parallel_temp0 = ((Py_ssize_t)0xbad0bad0);
const char *__pyx_parallel_filename = NULL; int __pyx_parallel_lineno = 0, __pyx_parallel_clineno = 0;
PyObject *__pyx_parallel_exc_type = NULL, *__pyx_parallel_exc_value = NULL, *__pyx_parallel_exc_tb = NULL;
int __pyx_parallel_why;
__pyx_parallel_why = 0;
__pyx_t_12 = __pyx_v_chunksize;
#if ((defined(__APPLE__) || defined(__OSX__)) && (defined(__GNUC__) && (__GNUC__ > 2 || (__GNUC__ == 2 && (__GNUC_MINOR__ > 95)))))
#undef likely
#undef unlikely
#define likely(x) (x)
#define unlikely(x) (x)
#endif
__pyx_t_14 = (__pyx_t_11 - 0 + 1 - 1/abs(1)) / 1;
if (__pyx_t_14 > 0)
{
#ifdef _OPENMP
#pragma omp parallel private(__pyx_t_16) firstprivate(__pyx_t_15, __pyx_t_9) private(__pyx_filename, __pyx_lineno, __pyx_clineno) shared(__pyx_parallel_why, __pyx_parallel_exc_type, __pyx_parallel_exc_value, __pyx_parallel_exc_tb)
#endif /* _OPENMP */
{
#ifdef _OPENMP
#ifdef WITH_THREAD
PyGILState_STATE __pyx_gilstate_save = __Pyx_PyGILState_Ensure();
#endif
Py_BEGIN_ALLOW_THREADS
#endif /* _OPENMP */
#ifdef _OPENMP
#pragma omp for firstprivate(__pyx_v_i) lastprivate(__pyx_v_i) schedule(static, __pyx_t_12)
#endif /* _OPENMP */
for (__pyx_t_13 = 0; __pyx_t_13 < __pyx_t_14; __pyx_t_13++){
if (__pyx_parallel_why < 2)
{
__pyx_v_i = (Py_ssize_t)(0 + 1 * __pyx_t_13);
/* "mnnpy/_utils.pyx":106
* cdef int chunksize=int(n_c2/n_jobs) + 1
* for i in prange(n_c2, nogil=True, chunksize=chunksize, schedule='static'):
* s1[i] = _adjust_s_variance(da1, da2, da2[i], vec[i], s2) # <<<<<<<<<<<<<<
* scaling = np.fmax(scaling, 1)
* return correction * scaling[:, None]
*/
__pyx_t_9.data = __pyx_v_da2.data;
__pyx_t_9.memview = __pyx_v_da2.memview;
__PYX_INC_MEMVIEW(&__pyx_t_9, 0);
{
Py_ssize_t __pyx_tmp_idx = __pyx_v_i;
Py_ssize_t __pyx_tmp_shape = __pyx_v_da2.shape[0];
Py_ssize_t __pyx_tmp_stride = __pyx_v_da2.strides[0];
if (0 && (__pyx_tmp_idx < 0))
__pyx_tmp_idx += __pyx_tmp_shape;
if (0 && (__pyx_tmp_idx < 0 || __pyx_tmp_idx >= __pyx_tmp_shape)) {
#ifdef WITH_THREAD
PyGILState_STATE __pyx_gilstate_save = PyGILState_Ensure();
#endif
PyErr_SetString(PyExc_IndexError, "Index out of bounds (axis 0)");
#ifdef WITH_THREAD
PyGILState_Release(__pyx_gilstate_save);
#endif
__PYX_ERR(0, 106, __pyx_L9_error)
}
__pyx_t_9.data += __pyx_tmp_idx * __pyx_tmp_stride;
}
__pyx_t_9.shape[0] = __pyx_v_da2.shape[1];
__pyx_t_9.strides[0] = __pyx_v_da2.strides[1];
__pyx_t_9.suboffsets[0] = -1;
__pyx_t_15.data = __pyx_v_vec.data;
__pyx_t_15.memview = __pyx_v_vec.memview;
__PYX_INC_MEMVIEW(&__pyx_t_15, 0);
{
Py_ssize_t __pyx_tmp_idx = __pyx_v_i;
Py_ssize_t __pyx_tmp_shape = __pyx_v_vec.shape[0];
Py_ssize_t __pyx_tmp_stride = __pyx_v_vec.strides[0];
if (0 && (__pyx_tmp_idx < 0))
__pyx_tmp_idx += __pyx_tmp_shape;
if (0 && (__pyx_tmp_idx < 0 || __pyx_tmp_idx >= __pyx_tmp_shape)) {
#ifdef WITH_THREAD
PyGILState_STATE __pyx_gilstate_save = PyGILState_Ensure();
#endif
PyErr_SetString(PyExc_IndexError, "Index out of bounds (axis 0)");
#ifdef WITH_THREAD
PyGILState_Release(__pyx_gilstate_save);
#endif
__PYX_ERR(0, 106, __pyx_L9_error)
}
__pyx_t_15.data += __pyx_tmp_idx * __pyx_tmp_stride;
}
__pyx_t_15.shape[0] = __pyx_v_vec.shape[1];
__pyx_t_15.strides[0] = __pyx_v_vec.strides[1];
__pyx_t_15.suboffsets[0] = -1;
__pyx_t_16 = __pyx_v_i;
*((float *) ( /* dim=0 */ (__pyx_v_s1.data + __pyx_t_16 * __pyx_v_s1.strides[0]) )) = __pyx_f_5mnnpy_6_utils__adjust_s_variance(__pyx_v_da1, __pyx_v_da2, __pyx_t_9, __pyx_t_15, __pyx_v_s2);
__PYX_XDEC_MEMVIEW(&__pyx_t_9, 0);
__pyx_t_9.memview = NULL;
__pyx_t_9.data = NULL;
__PYX_XDEC_MEMVIEW(&__pyx_t_15, 0);
__pyx_t_15.memview = NULL;
__pyx_t_15.data = NULL;
goto __pyx_L12;
__pyx_L9_error:;
{
#ifdef WITH_THREAD
PyGILState_STATE __pyx_gilstate_save = __Pyx_PyGILState_Ensure();
#endif
#ifdef _OPENMP
#pragma omp flush(__pyx_parallel_exc_type)
#endif /* _OPENMP */
if (!__pyx_parallel_exc_type) {
__Pyx_ErrFetchWithState(&__pyx_parallel_exc_type, &__pyx_parallel_exc_value, &__pyx_parallel_exc_tb);
__pyx_parallel_filename = __pyx_filename; __pyx_parallel_lineno = __pyx_lineno; __pyx_parallel_clineno = __pyx_clineno;
__Pyx_GOTREF(__pyx_parallel_exc_type);
}
#ifdef WITH_THREAD
__Pyx_PyGILState_Release(__pyx_gilstate_save);
#endif
}
__pyx_parallel_why = 4;
goto __pyx_L11;
__pyx_L11:;
#ifdef _OPENMP
#pragma omp critical(__pyx_parallel_lastprivates0)
#endif /* _OPENMP */
{
__pyx_parallel_temp0 = __pyx_v_i;
}
__pyx_L12:;
#ifdef _OPENMP
#pragma omp flush(__pyx_parallel_why)
#endif /* _OPENMP */
}
}
#ifdef _OPENMP
Py_END_ALLOW_THREADS
#else
{
#ifdef WITH_THREAD
PyGILState_STATE __pyx_gilstate_save = __Pyx_PyGILState_Ensure();
#endif
#endif /* _OPENMP */
/* Clean up any temporaries */
__PYX_XDEC_MEMVIEW(&__pyx_t_15, 0);
__PYX_XDEC_MEMVIEW(&__pyx_t_9, 0);
#ifdef WITH_THREAD
__Pyx_PyGILState_Release(__pyx_gilstate_save);
#endif
#ifndef _OPENMP
}
#endif /* _OPENMP */
}
}
if (__pyx_parallel_exc_type) {
/* This may have been overridden by a continue, break or return in another thread. Prefer the error. */
__pyx_parallel_why = 4;
}
if (__pyx_parallel_why) {
__pyx_v_i = __pyx_parallel_temp0;
switch (__pyx_parallel_why) {
case 4:
{
#ifdef WITH_THREAD
PyGILState_STATE __pyx_gilstate_save = __Pyx_PyGILState_Ensure();
#endif
__Pyx_GIVEREF(__pyx_parallel_exc_type);
__Pyx_ErrRestoreWithState(__pyx_parallel_exc_type, __pyx_parallel_exc_value, __pyx_parallel_exc_tb);
__pyx_filename = __pyx_parallel_filename; __pyx_lineno = __pyx_parallel_lineno; __pyx_clineno = __pyx_parallel_clineno;
#ifdef WITH_THREAD
__Pyx_PyGILState_Release(__pyx_gilstate_save);
#endif
}
goto __pyx_L5_error;
}
}
}
#if ((defined(__APPLE__) || defined(__OSX__)) && (defined(__GNUC__) && (__GNUC__ > 2 || (__GNUC__ == 2 && (__GNUC_MINOR__ > 95)))))
#undef likely
#undef unlikely
#define likely(x) __builtin_expect(!!(x), 1)
#define unlikely(x) __builtin_expect(!!(x), 0)
#endif
}
/* "mnnpy/_utils.pyx":105
* cdef Py_ssize_t n_c2 = data2.shape[0]
* cdef int chunksize=int(n_c2/n_jobs) + 1
* for i in prange(n_c2, nogil=True, chunksize=chunksize, schedule='static'): # <<<<<<<<<<<<<<
* s1[i] = _adjust_s_variance(da1, da2, da2[i], vec[i], s2)
* scaling = np.fmax(scaling, 1)
*/
/*finally:*/ {
/*normal exit:*/{
#ifdef WITH_THREAD
__Pyx_FastGIL_Forget();
Py_BLOCK_THREADS
#endif
goto __pyx_L6;
}
__pyx_L5_error: {
#ifdef WITH_THREAD
__Pyx_FastGIL_Forget();
Py_BLOCK_THREADS
#endif
goto __pyx_L1_error;
}
__pyx_L6:;
}
}
/* "mnnpy/_utils.pyx":107
* for i in prange(n_c2, nogil=True, chunksize=chunksize, schedule='static'):
* s1[i] = _adjust_s_variance(da1, da2, da2[i], vec[i], s2)
* scaling = np.fmax(scaling, 1) # <<<<<<<<<<<<<<
* return correction * scaling[:, None]
*/
__pyx_t_6 = __Pyx_GetModuleGlobalName(__pyx_n_s_np); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 107, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_6);
__pyx_t_4 = __Pyx_PyObject_GetAttrStr(__pyx_t_6, __pyx_n_s_fmax); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 107, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_4);
__Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0;
__pyx_t_6 = NULL;
__pyx_t_12 = 0;
if (CYTHON_UNPACK_METHODS && unlikely(PyMethod_Check(__pyx_t_4))) {
__pyx_t_6 = PyMethod_GET_SELF(__pyx_t_4);
if (likely(__pyx_t_6)) {
PyObject* function = PyMethod_GET_FUNCTION(__pyx_t_4);
__Pyx_INCREF(__pyx_t_6);
__Pyx_INCREF(function);
__Pyx_DECREF_SET(__pyx_t_4, function);
__pyx_t_12 = 1;
}
}
#if CYTHON_FAST_PYCALL
if (PyFunction_Check(__pyx_t_4)) {
PyObject *__pyx_temp[3] = {__pyx_t_6, __pyx_v_scaling, __pyx_int_1};
__pyx_t_8 = __Pyx_PyFunction_FastCall(__pyx_t_4, __pyx_temp+1-__pyx_t_12, 2+__pyx_t_12); if (unlikely(!__pyx_t_8)) __PYX_ERR(0, 107, __pyx_L1_error)
__Pyx_XDECREF(__pyx_t_6); __pyx_t_6 = 0;
__Pyx_GOTREF(__pyx_t_8);
} else
#endif
#if CYTHON_FAST_PYCCALL
if (__Pyx_PyFastCFunction_Check(__pyx_t_4)) {
PyObject *__pyx_temp[3] = {__pyx_t_6, __pyx_v_scaling, __pyx_int_1};
__pyx_t_8 = __Pyx_PyCFunction_FastCall(__pyx_t_4, __pyx_temp+1-__pyx_t_12, 2+__pyx_t_12); if (unlikely(!__pyx_t_8)) __PYX_ERR(0, 107, __pyx_L1_error)
__Pyx_XDECREF(__pyx_t_6); __pyx_t_6 = 0;
__Pyx_GOTREF(__pyx_t_8);
} else
#endif
{
__pyx_t_3 = PyTuple_New(2+__pyx_t_12); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 107, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_3);
if (__pyx_t_6) {
__Pyx_GIVEREF(__pyx_t_6); PyTuple_SET_ITEM(__pyx_t_3, 0, __pyx_t_6); __pyx_t_6 = NULL;
}
__Pyx_INCREF(__pyx_v_scaling);
__Pyx_GIVEREF(__pyx_v_scaling);
PyTuple_SET_ITEM(__pyx_t_3, 0+__pyx_t_12, __pyx_v_scaling);
__Pyx_INCREF(__pyx_int_1);
__Pyx_GIVEREF(__pyx_int_1);
PyTuple_SET_ITEM(__pyx_t_3, 1+__pyx_t_12, __pyx_int_1);
__pyx_t_8 = __Pyx_PyObject_Call(__pyx_t_4, __pyx_t_3, NULL); if (unlikely(!__pyx_t_8)) __PYX_ERR(0, 107, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_8);
__Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
}
__Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
__Pyx_DECREF_SET(__pyx_v_scaling, __pyx_t_8);
__pyx_t_8 = 0;
/* "mnnpy/_utils.pyx":108
* s1[i] = _adjust_s_variance(da1, da2, da2[i], vec[i], s2)
* scaling = np.fmax(scaling, 1)
* return correction * scaling[:, None] # <<<<<<<<<<<<<<
*/
__Pyx_XDECREF(__pyx_r);
__pyx_t_8 = __Pyx_PyObject_GetItem(__pyx_v_scaling, __pyx_tuple__5); if (unlikely(!__pyx_t_8)) __PYX_ERR(0, 108, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_8);
__pyx_t_4 = PyNumber_Multiply(__pyx_v_correction, __pyx_t_8); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 108, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_4);
__Pyx_DECREF(__pyx_t_8); __pyx_t_8 = 0;
__pyx_r = __pyx_t_4;
__pyx_t_4 = 0;
goto __pyx_L0;
/* "mnnpy/_utils.pyx":89
* @cython.boundscheck(False) # Deactivate bounds checking
* @cython.wraparound(False) # Deactivate negative indexing.
* cpdef _adjust_shift_variance(data1, data2, correction, sigma, n_jobs, var_subset=None): # <<<<<<<<<<<<<<
* if var_subset is not None:
* vect = correction[:, var_subset]
*/
/* function exit code */
__pyx_L1_error:;
__Pyx_XDECREF(__pyx_t_3);
__Pyx_XDECREF(__pyx_t_4);
__PYX_XDEC_MEMVIEW(&__pyx_t_5, 1);
__Pyx_XDECREF(__pyx_t_6);
__Pyx_XDECREF(__pyx_t_7);
__Pyx_XDECREF(__pyx_t_8);
__PYX_XDEC_MEMVIEW(&__pyx_t_9, 1);
__PYX_XDEC_MEMVIEW(&__pyx_t_15, 1);
__Pyx_AddTraceback("mnnpy._utils._adjust_shift_variance", __pyx_clineno, __pyx_lineno, __pyx_filename);
__pyx_r = 0;
__pyx_L0:;
__Pyx_XDECREF(__pyx_v_vect);
__PYX_XDEC_MEMVIEW(&__pyx_v_da1, 1);
__PYX_XDEC_MEMVIEW(&__pyx_v_da2, 1);
__PYX_XDEC_MEMVIEW(&__pyx_v_vec, 1);
__Pyx_XDECREF(__pyx_v_scaling);
__PYX_XDEC_MEMVIEW(&__pyx_v_s1, 1);
__Pyx_XDECREF(__pyx_v_data1);
__Pyx_XDECREF(__pyx_v_data2);
__Pyx_XGIVEREF(__pyx_r);
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
/* Python wrapper */
static PyObject *__pyx_pw_5mnnpy_6_utils_1_adjust_shift_variance(PyObject *__pyx_self, PyObject *__pyx_args, PyObject *__pyx_kwds); /*proto*/
static PyObject *__pyx_pw_5mnnpy_6_utils_1_adjust_shift_variance(PyObject *__pyx_self, PyObject *__pyx_args, PyObject *__pyx_kwds) {
PyObject *__pyx_v_data1 = 0;
PyObject *__pyx_v_data2 = 0;
PyObject *__pyx_v_correction = 0;
PyObject *__pyx_v_sigma = 0;
PyObject *__pyx_v_n_jobs = 0;
PyObject *__pyx_v_var_subset = 0;
PyObject *__pyx_r = 0;
__Pyx_RefNannyDeclarations
__Pyx_RefNannySetupContext("_adjust_shift_variance (wrapper)", 0);
{
static PyObject **__pyx_pyargnames[] = {&__pyx_n_s_data1,&__pyx_n_s_data2,&__pyx_n_s_correction,&__pyx_n_s_sigma,&__pyx_n_s_n_jobs,&__pyx_n_s_var_subset,0};
PyObject* values[6] = {0,0,0,0,0,0};
values[5] = ((PyObject *)Py_None);
if (unlikely(__pyx_kwds)) {
Py_ssize_t kw_args;
const Py_ssize_t pos_args = PyTuple_GET_SIZE(__pyx_args);
switch (pos_args) {
case 6: values[5] = PyTuple_GET_ITEM(__pyx_args, 5);
CYTHON_FALLTHROUGH;
case 5: values[4] = PyTuple_GET_ITEM(__pyx_args, 4);
CYTHON_FALLTHROUGH;
case 4: values[3] = PyTuple_GET_ITEM(__pyx_args, 3);
CYTHON_FALLTHROUGH;
case 3: values[2] = PyTuple_GET_ITEM(__pyx_args, 2);
CYTHON_FALLTHROUGH;
case 2: values[1] = PyTuple_GET_ITEM(__pyx_args, 1);
CYTHON_FALLTHROUGH;
case 1: values[0] = PyTuple_GET_ITEM(__pyx_args, 0);
CYTHON_FALLTHROUGH;
case 0: break;
default: goto __pyx_L5_argtuple_error;
}
kw_args = PyDict_Size(__pyx_kwds);
switch (pos_args) {
case 0:
if (likely((values[0] = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_data1)) != 0)) kw_args--;
else goto __pyx_L5_argtuple_error;
CYTHON_FALLTHROUGH;
case 1:
if (likely((values[1] = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_data2)) != 0)) kw_args--;
else {
__Pyx_RaiseArgtupleInvalid("_adjust_shift_variance", 0, 5, 6, 1); __PYX_ERR(0, 89, __pyx_L3_error)
}
CYTHON_FALLTHROUGH;
case 2:
if (likely((values[2] = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_correction)) != 0)) kw_args--;
else {
__Pyx_RaiseArgtupleInvalid("_adjust_shift_variance", 0, 5, 6, 2); __PYX_ERR(0, 89, __pyx_L3_error)
}
CYTHON_FALLTHROUGH;
case 3:
if (likely((values[3] = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_sigma)) != 0)) kw_args--;
else {
__Pyx_RaiseArgtupleInvalid("_adjust_shift_variance", 0, 5, 6, 3); __PYX_ERR(0, 89, __pyx_L3_error)
}
CYTHON_FALLTHROUGH;
case 4:
if (likely((values[4] = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_n_jobs)) != 0)) kw_args--;
else {
__Pyx_RaiseArgtupleInvalid("_adjust_shift_variance", 0, 5, 6, 4); __PYX_ERR(0, 89, __pyx_L3_error)
}
CYTHON_FALLTHROUGH;
case 5:
if (kw_args > 0) {
PyObject* value = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_var_subset);
if (value) { values[5] = value; kw_args--; }
}
}
if (unlikely(kw_args > 0)) {
if (unlikely(__Pyx_ParseOptionalKeywords(__pyx_kwds, __pyx_pyargnames, 0, values, pos_args, "_adjust_shift_variance") < 0)) __PYX_ERR(0, 89, __pyx_L3_error)
}
} else {
switch (PyTuple_GET_SIZE(__pyx_args)) {
case 6: values[5] = PyTuple_GET_ITEM(__pyx_args, 5);
CYTHON_FALLTHROUGH;
case 5: values[4] = PyTuple_GET_ITEM(__pyx_args, 4);
values[3] = PyTuple_GET_ITEM(__pyx_args, 3);
values[2] = PyTuple_GET_ITEM(__pyx_args, 2);
values[1] = PyTuple_GET_ITEM(__pyx_args, 1);
values[0] = PyTuple_GET_ITEM(__pyx_args, 0);
break;
default: goto __pyx_L5_argtuple_error;
}
}
__pyx_v_data1 = values[0];
__pyx_v_data2 = values[1];
__pyx_v_correction = values[2];
__pyx_v_sigma = values[3];
__pyx_v_n_jobs = values[4];
__pyx_v_var_subset = values[5];
}
goto __pyx_L4_argument_unpacking_done;
__pyx_L5_argtuple_error:;
__Pyx_RaiseArgtupleInvalid("_adjust_shift_variance", 0, 5, 6, PyTuple_GET_SIZE(__pyx_args)); __PYX_ERR(0, 89, __pyx_L3_error)
__pyx_L3_error:;
__Pyx_AddTraceback("mnnpy._utils._adjust_shift_variance", __pyx_clineno, __pyx_lineno, __pyx_filename);
__Pyx_RefNannyFinishContext();
return NULL;
__pyx_L4_argument_unpacking_done:;
__pyx_r = __pyx_pf_5mnnpy_6_utils__adjust_shift_variance(__pyx_self, __pyx_v_data1, __pyx_v_data2, __pyx_v_correction, __pyx_v_sigma, __pyx_v_n_jobs, __pyx_v_var_subset);
/* function exit code */
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
static PyObject *__pyx_pf_5mnnpy_6_utils__adjust_shift_variance(CYTHON_UNUSED PyObject *__pyx_self, PyObject *__pyx_v_data1, PyObject *__pyx_v_data2, PyObject *__pyx_v_correction, PyObject *__pyx_v_sigma, PyObject *__pyx_v_n_jobs, PyObject *__pyx_v_var_subset) {
PyObject *__pyx_r = NULL;
__Pyx_RefNannyDeclarations
PyObject *__pyx_t_1 = NULL;
struct __pyx_opt_args_5mnnpy_6_utils__adjust_shift_variance __pyx_t_2;
__Pyx_RefNannySetupContext("_adjust_shift_variance", 0);
__Pyx_XDECREF(__pyx_r);
__pyx_t_2.__pyx_n = 1;
__pyx_t_2.var_subset = __pyx_v_var_subset;
__pyx_t_1 = __pyx_f_5mnnpy_6_utils__adjust_shift_variance(__pyx_v_data1, __pyx_v_data2, __pyx_v_correction, __pyx_v_sigma, __pyx_v_n_jobs, 0, &__pyx_t_2); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 89, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_1);
__pyx_r = __pyx_t_1;
__pyx_t_1 = 0;
goto __pyx_L0;
/* function exit code */
__pyx_L1_error:;
__Pyx_XDECREF(__pyx_t_1);
__Pyx_AddTraceback("mnnpy._utils._adjust_shift_variance", __pyx_clineno, __pyx_lineno, __pyx_filename);
__pyx_r = NULL;
__pyx_L0:;
__Pyx_XGIVEREF(__pyx_r);
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
/* "View.MemoryView":121
* cdef bint dtype_is_object
*
* def __cinit__(array self, tuple shape, Py_ssize_t itemsize, format not None, # <<<<<<<<<<<<<<
* mode="c", bint allocate_buffer=True):
*
*/
/* Python wrapper */
static int __pyx_array___cinit__(PyObject *__pyx_v_self, PyObject *__pyx_args, PyObject *__pyx_kwds); /*proto*/
static int __pyx_array___cinit__(PyObject *__pyx_v_self, PyObject *__pyx_args, PyObject *__pyx_kwds) {
PyObject *__pyx_v_shape = 0;
Py_ssize_t __pyx_v_itemsize;
PyObject *__pyx_v_format = 0;
PyObject *__pyx_v_mode = 0;
int __pyx_v_allocate_buffer;
int __pyx_r;
__Pyx_RefNannyDeclarations
__Pyx_RefNannySetupContext("__cinit__ (wrapper)", 0);
{
static PyObject **__pyx_pyargnames[] = {&__pyx_n_s_shape,&__pyx_n_s_itemsize,&__pyx_n_s_format,&__pyx_n_s_mode,&__pyx_n_s_allocate_buffer,0};
PyObject* values[5] = {0,0,0,0,0};
values[3] = ((PyObject *)__pyx_n_s_c);
if (unlikely(__pyx_kwds)) {
Py_ssize_t kw_args;
const Py_ssize_t pos_args = PyTuple_GET_SIZE(__pyx_args);
switch (pos_args) {
case 5: values[4] = PyTuple_GET_ITEM(__pyx_args, 4);
CYTHON_FALLTHROUGH;
case 4: values[3] = PyTuple_GET_ITEM(__pyx_args, 3);
CYTHON_FALLTHROUGH;
case 3: values[2] = PyTuple_GET_ITEM(__pyx_args, 2);
CYTHON_FALLTHROUGH;
case 2: values[1] = PyTuple_GET_ITEM(__pyx_args, 1);
CYTHON_FALLTHROUGH;
case 1: values[0] = PyTuple_GET_ITEM(__pyx_args, 0);
CYTHON_FALLTHROUGH;
case 0: break;
default: goto __pyx_L5_argtuple_error;
}
kw_args = PyDict_Size(__pyx_kwds);
switch (pos_args) {
case 0:
if (likely((values[0] = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_shape)) != 0)) kw_args--;
else goto __pyx_L5_argtuple_error;
CYTHON_FALLTHROUGH;
case 1:
if (likely((values[1] = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_itemsize)) != 0)) kw_args--;
else {
__Pyx_RaiseArgtupleInvalid("__cinit__", 0, 3, 5, 1); __PYX_ERR(1, 121, __pyx_L3_error)
}
CYTHON_FALLTHROUGH;
case 2:
if (likely((values[2] = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_format)) != 0)) kw_args--;
else {
__Pyx_RaiseArgtupleInvalid("__cinit__", 0, 3, 5, 2); __PYX_ERR(1, 121, __pyx_L3_error)
}
CYTHON_FALLTHROUGH;
case 3:
if (kw_args > 0) {
PyObject* value = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_mode);
if (value) { values[3] = value; kw_args--; }
}
CYTHON_FALLTHROUGH;
case 4:
if (kw_args > 0) {
PyObject* value = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_allocate_buffer);
if (value) { values[4] = value; kw_args--; }
}
}
if (unlikely(kw_args > 0)) {
if (unlikely(__Pyx_ParseOptionalKeywords(__pyx_kwds, __pyx_pyargnames, 0, values, pos_args, "__cinit__") < 0)) __PYX_ERR(1, 121, __pyx_L3_error)
}
} else {
switch (PyTuple_GET_SIZE(__pyx_args)) {
case 5: values[4] = PyTuple_GET_ITEM(__pyx_args, 4);
CYTHON_FALLTHROUGH;
case 4: values[3] = PyTuple_GET_ITEM(__pyx_args, 3);
CYTHON_FALLTHROUGH;
case 3: values[2] = PyTuple_GET_ITEM(__pyx_args, 2);
values[1] = PyTuple_GET_ITEM(__pyx_args, 1);
values[0] = PyTuple_GET_ITEM(__pyx_args, 0);
break;
default: goto __pyx_L5_argtuple_error;
}
}
__pyx_v_shape = ((PyObject*)values[0]);
__pyx_v_itemsize = __Pyx_PyIndex_AsSsize_t(values[1]); if (unlikely((__pyx_v_itemsize == (Py_ssize_t)-1) && PyErr_Occurred())) __PYX_ERR(1, 121, __pyx_L3_error)
__pyx_v_format = values[2];
__pyx_v_mode = values[3];
if (values[4]) {
__pyx_v_allocate_buffer = __Pyx_PyObject_IsTrue(values[4]); if (unlikely((__pyx_v_allocate_buffer == (int)-1) && PyErr_Occurred())) __PYX_ERR(1, 122, __pyx_L3_error)
} else {
/* "View.MemoryView":122
*
* def __cinit__(array self, tuple shape, Py_ssize_t itemsize, format not None,
* mode="c", bint allocate_buffer=True): # <<<<<<<<<<<<<<
*
* cdef int idx
*/
__pyx_v_allocate_buffer = ((int)1);
}
}
goto __pyx_L4_argument_unpacking_done;
__pyx_L5_argtuple_error:;
__Pyx_RaiseArgtupleInvalid("__cinit__", 0, 3, 5, PyTuple_GET_SIZE(__pyx_args)); __PYX_ERR(1, 121, __pyx_L3_error)
__pyx_L3_error:;
__Pyx_AddTraceback("View.MemoryView.array.__cinit__", __pyx_clineno, __pyx_lineno, __pyx_filename);
__Pyx_RefNannyFinishContext();
return -1;
__pyx_L4_argument_unpacking_done:;
if (unlikely(!__Pyx_ArgTypeTest(((PyObject *)__pyx_v_shape), (&PyTuple_Type), 1, "shape", 1))) __PYX_ERR(1, 121, __pyx_L1_error)
if (unlikely(((PyObject *)__pyx_v_format) == Py_None)) {
PyErr_Format(PyExc_TypeError, "Argument '%.200s' must not be None", "format"); __PYX_ERR(1, 121, __pyx_L1_error)
}
__pyx_r = __pyx_array___pyx_pf_15View_dot_MemoryView_5array___cinit__(((struct __pyx_array_obj *)__pyx_v_self), __pyx_v_shape, __pyx_v_itemsize, __pyx_v_format, __pyx_v_mode, __pyx_v_allocate_buffer);
/* "View.MemoryView":121
* cdef bint dtype_is_object
*
* def __cinit__(array self, tuple shape, Py_ssize_t itemsize, format not None, # <<<<<<<<<<<<<<
* mode="c", bint allocate_buffer=True):
*
*/
/* function exit code */
goto __pyx_L0;
__pyx_L1_error:;
__pyx_r = -1;
__pyx_L0:;
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
static int __pyx_array___pyx_pf_15View_dot_MemoryView_5array___cinit__(struct __pyx_array_obj *__pyx_v_self, PyObject *__pyx_v_shape, Py_ssize_t __pyx_v_itemsize, PyObject *__pyx_v_format, PyObject *__pyx_v_mode, int __pyx_v_allocate_buffer) {
int __pyx_v_idx;
Py_ssize_t __pyx_v_i;
Py_ssize_t __pyx_v_dim;
PyObject **__pyx_v_p;
char __pyx_v_order;
int __pyx_r;
__Pyx_RefNannyDeclarations
Py_ssize_t __pyx_t_1;
int __pyx_t_2;
PyObject *__pyx_t_3 = NULL;
int __pyx_t_4;
PyObject *__pyx_t_5 = NULL;
char *__pyx_t_6;
int __pyx_t_7;
Py_ssize_t __pyx_t_8;
PyObject *__pyx_t_9 = NULL;
PyObject *__pyx_t_10 = NULL;
Py_ssize_t __pyx_t_11;
__Pyx_RefNannySetupContext("__cinit__", 0);
__Pyx_INCREF(__pyx_v_format);
/* "View.MemoryView":128
* cdef PyObject **p
*
* self.ndim = <int> len(shape) # <<<<<<<<<<<<<<
* self.itemsize = itemsize
*
*/
if (unlikely(__pyx_v_shape == Py_None)) {
PyErr_SetString(PyExc_TypeError, "object of type 'NoneType' has no len()");
__PYX_ERR(1, 128, __pyx_L1_error)
}
__pyx_t_1 = PyTuple_GET_SIZE(__pyx_v_shape); if (unlikely(__pyx_t_1 == ((Py_ssize_t)-1))) __PYX_ERR(1, 128, __pyx_L1_error)
__pyx_v_self->ndim = ((int)__pyx_t_1);
/* "View.MemoryView":129
*
* self.ndim = <int> len(shape)
* self.itemsize = itemsize # <<<<<<<<<<<<<<
*
* if not self.ndim:
*/
__pyx_v_self->itemsize = __pyx_v_itemsize;
/* "View.MemoryView":131
* self.itemsize = itemsize
*
* if not self.ndim: # <<<<<<<<<<<<<<
* raise ValueError("Empty shape tuple for cython.array")
*
*/
__pyx_t_2 = ((!(__pyx_v_self->ndim != 0)) != 0);
if (unlikely(__pyx_t_2)) {
/* "View.MemoryView":132
*
* if not self.ndim:
* raise ValueError("Empty shape tuple for cython.array") # <<<<<<<<<<<<<<
*
* if itemsize <= 0:
*/
__pyx_t_3 = __Pyx_PyObject_Call(__pyx_builtin_ValueError, __pyx_tuple__6, NULL); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 132, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_3);
__Pyx_Raise(__pyx_t_3, 0, 0, 0);
__Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
__PYX_ERR(1, 132, __pyx_L1_error)
/* "View.MemoryView":131
* self.itemsize = itemsize
*
* if not self.ndim: # <<<<<<<<<<<<<<
* raise ValueError("Empty shape tuple for cython.array")
*
*/
}
/* "View.MemoryView":134
* raise ValueError("Empty shape tuple for cython.array")
*
* if itemsize <= 0: # <<<<<<<<<<<<<<
* raise ValueError("itemsize <= 0 for cython.array")
*
*/
__pyx_t_2 = ((__pyx_v_itemsize <= 0) != 0);
if (unlikely(__pyx_t_2)) {
/* "View.MemoryView":135
*
* if itemsize <= 0:
* raise ValueError("itemsize <= 0 for cython.array") # <<<<<<<<<<<<<<
*
* if not isinstance(format, bytes):
*/
__pyx_t_3 = __Pyx_PyObject_Call(__pyx_builtin_ValueError, __pyx_tuple__7, NULL); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 135, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_3);
__Pyx_Raise(__pyx_t_3, 0, 0, 0);
__Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
__PYX_ERR(1, 135, __pyx_L1_error)
/* "View.MemoryView":134
* raise ValueError("Empty shape tuple for cython.array")
*
* if itemsize <= 0: # <<<<<<<<<<<<<<
* raise ValueError("itemsize <= 0 for cython.array")
*
*/
}
/* "View.MemoryView":137
* raise ValueError("itemsize <= 0 for cython.array")
*
* if not isinstance(format, bytes): # <<<<<<<<<<<<<<
* format = format.encode('ASCII')
* self._format = format # keep a reference to the byte string
*/
__pyx_t_2 = PyBytes_Check(__pyx_v_format);
__pyx_t_4 = ((!(__pyx_t_2 != 0)) != 0);
if (__pyx_t_4) {
/* "View.MemoryView":138
*
* if not isinstance(format, bytes):
* format = format.encode('ASCII') # <<<<<<<<<<<<<<
* self._format = format # keep a reference to the byte string
* self.format = self._format
*/
__pyx_t_3 = __Pyx_PyObject_GetAttrStr(__pyx_v_format, __pyx_n_s_encode); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 138, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_3);
__pyx_t_5 = __Pyx_PyObject_Call(__pyx_t_3, __pyx_tuple__8, NULL); if (unlikely(!__pyx_t_5)) __PYX_ERR(1, 138, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_5);
__Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
__Pyx_DECREF_SET(__pyx_v_format, __pyx_t_5);
__pyx_t_5 = 0;
/* "View.MemoryView":137
* raise ValueError("itemsize <= 0 for cython.array")
*
* if not isinstance(format, bytes): # <<<<<<<<<<<<<<
* format = format.encode('ASCII')
* self._format = format # keep a reference to the byte string
*/
}
/* "View.MemoryView":139
* if not isinstance(format, bytes):
* format = format.encode('ASCII')
* self._format = format # keep a reference to the byte string # <<<<<<<<<<<<<<
* self.format = self._format
*
*/
if (!(likely(PyBytes_CheckExact(__pyx_v_format))||((__pyx_v_format) == Py_None)||(PyErr_Format(PyExc_TypeError, "Expected %.16s, got %.200s", "bytes", Py_TYPE(__pyx_v_format)->tp_name), 0))) __PYX_ERR(1, 139, __pyx_L1_error)
__pyx_t_5 = __pyx_v_format;
__Pyx_INCREF(__pyx_t_5);
__Pyx_GIVEREF(__pyx_t_5);
__Pyx_GOTREF(__pyx_v_self->_format);
__Pyx_DECREF(__pyx_v_self->_format);
__pyx_v_self->_format = ((PyObject*)__pyx_t_5);
__pyx_t_5 = 0;
/* "View.MemoryView":140
* format = format.encode('ASCII')
* self._format = format # keep a reference to the byte string
* self.format = self._format # <<<<<<<<<<<<<<
*
*
*/
if (unlikely(__pyx_v_self->_format == Py_None)) {
PyErr_SetString(PyExc_TypeError, "expected bytes, NoneType found");
__PYX_ERR(1, 140, __pyx_L1_error)
}
__pyx_t_6 = __Pyx_PyBytes_AsWritableString(__pyx_v_self->_format); if (unlikely((!__pyx_t_6) && PyErr_Occurred())) __PYX_ERR(1, 140, __pyx_L1_error)
__pyx_v_self->format = __pyx_t_6;
/* "View.MemoryView":143
*
*
* self._shape = <Py_ssize_t *> PyObject_Malloc(sizeof(Py_ssize_t)*self.ndim*2) # <<<<<<<<<<<<<<
* self._strides = self._shape + self.ndim
*
*/
__pyx_v_self->_shape = ((Py_ssize_t *)PyObject_Malloc((((sizeof(Py_ssize_t)) * __pyx_v_self->ndim) * 2)));
/* "View.MemoryView":144
*
* self._shape = <Py_ssize_t *> PyObject_Malloc(sizeof(Py_ssize_t)*self.ndim*2)
* self._strides = self._shape + self.ndim # <<<<<<<<<<<<<<
*
* if not self._shape:
*/
__pyx_v_self->_strides = (__pyx_v_self->_shape + __pyx_v_self->ndim);
/* "View.MemoryView":146
* self._strides = self._shape + self.ndim
*
* if not self._shape: # <<<<<<<<<<<<<<
* raise MemoryError("unable to allocate shape and strides.")
*
*/
__pyx_t_4 = ((!(__pyx_v_self->_shape != 0)) != 0);
if (unlikely(__pyx_t_4)) {
/* "View.MemoryView":147
*
* if not self._shape:
* raise MemoryError("unable to allocate shape and strides.") # <<<<<<<<<<<<<<
*
*
*/
__pyx_t_5 = __Pyx_PyObject_Call(__pyx_builtin_MemoryError, __pyx_tuple__9, NULL); if (unlikely(!__pyx_t_5)) __PYX_ERR(1, 147, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_5);
__Pyx_Raise(__pyx_t_5, 0, 0, 0);
__Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
__PYX_ERR(1, 147, __pyx_L1_error)
/* "View.MemoryView":146
* self._strides = self._shape + self.ndim
*
* if not self._shape: # <<<<<<<<<<<<<<
* raise MemoryError("unable to allocate shape and strides.")
*
*/
}
/* "View.MemoryView":150
*
*
* for idx, dim in enumerate(shape): # <<<<<<<<<<<<<<
* if dim <= 0:
* raise ValueError("Invalid shape in axis %d: %d." % (idx, dim))
*/
__pyx_t_7 = 0;
__pyx_t_5 = __pyx_v_shape; __Pyx_INCREF(__pyx_t_5); __pyx_t_1 = 0;
for (;;) {
if (__pyx_t_1 >= PyTuple_GET_SIZE(__pyx_t_5)) break;
#if CYTHON_ASSUME_SAFE_MACROS && !CYTHON_AVOID_BORROWED_REFS
__pyx_t_3 = PyTuple_GET_ITEM(__pyx_t_5, __pyx_t_1); __Pyx_INCREF(__pyx_t_3); __pyx_t_1++; if (unlikely(0 < 0)) __PYX_ERR(1, 150, __pyx_L1_error)
#else
__pyx_t_3 = PySequence_ITEM(__pyx_t_5, __pyx_t_1); __pyx_t_1++; if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 150, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_3);
#endif
__pyx_t_8 = __Pyx_PyIndex_AsSsize_t(__pyx_t_3); if (unlikely((__pyx_t_8 == (Py_ssize_t)-1) && PyErr_Occurred())) __PYX_ERR(1, 150, __pyx_L1_error)
__Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
__pyx_v_dim = __pyx_t_8;
__pyx_v_idx = __pyx_t_7;
__pyx_t_7 = (__pyx_t_7 + 1);
/* "View.MemoryView":151
*
* for idx, dim in enumerate(shape):
* if dim <= 0: # <<<<<<<<<<<<<<
* raise ValueError("Invalid shape in axis %d: %d." % (idx, dim))
* self._shape[idx] = dim
*/
__pyx_t_4 = ((__pyx_v_dim <= 0) != 0);
if (unlikely(__pyx_t_4)) {
/* "View.MemoryView":152
* for idx, dim in enumerate(shape):
* if dim <= 0:
* raise ValueError("Invalid shape in axis %d: %d." % (idx, dim)) # <<<<<<<<<<<<<<
* self._shape[idx] = dim
*
*/
__pyx_t_3 = __Pyx_PyInt_From_int(__pyx_v_idx); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 152, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_3);
__pyx_t_9 = PyInt_FromSsize_t(__pyx_v_dim); if (unlikely(!__pyx_t_9)) __PYX_ERR(1, 152, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_9);
__pyx_t_10 = PyTuple_New(2); if (unlikely(!__pyx_t_10)) __PYX_ERR(1, 152, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_10);
__Pyx_GIVEREF(__pyx_t_3);
PyTuple_SET_ITEM(__pyx_t_10, 0, __pyx_t_3);
__Pyx_GIVEREF(__pyx_t_9);
PyTuple_SET_ITEM(__pyx_t_10, 1, __pyx_t_9);
__pyx_t_3 = 0;
__pyx_t_9 = 0;
__pyx_t_9 = __Pyx_PyString_Format(__pyx_kp_s_Invalid_shape_in_axis_d_d, __pyx_t_10); if (unlikely(!__pyx_t_9)) __PYX_ERR(1, 152, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_9);
__Pyx_DECREF(__pyx_t_10); __pyx_t_10 = 0;
__pyx_t_10 = __Pyx_PyObject_CallOneArg(__pyx_builtin_ValueError, __pyx_t_9); if (unlikely(!__pyx_t_10)) __PYX_ERR(1, 152, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_10);
__Pyx_DECREF(__pyx_t_9); __pyx_t_9 = 0;
__Pyx_Raise(__pyx_t_10, 0, 0, 0);
__Pyx_DECREF(__pyx_t_10); __pyx_t_10 = 0;
__PYX_ERR(1, 152, __pyx_L1_error)
/* "View.MemoryView":151
*
* for idx, dim in enumerate(shape):
* if dim <= 0: # <<<<<<<<<<<<<<
* raise ValueError("Invalid shape in axis %d: %d." % (idx, dim))
* self._shape[idx] = dim
*/
}
/* "View.MemoryView":153
* if dim <= 0:
* raise ValueError("Invalid shape in axis %d: %d." % (idx, dim))
* self._shape[idx] = dim # <<<<<<<<<<<<<<
*
* cdef char order
*/
(__pyx_v_self->_shape[__pyx_v_idx]) = __pyx_v_dim;
/* "View.MemoryView":150
*
*
* for idx, dim in enumerate(shape): # <<<<<<<<<<<<<<
* if dim <= 0:
* raise ValueError("Invalid shape in axis %d: %d." % (idx, dim))
*/
}
__Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
/* "View.MemoryView":156
*
* cdef char order
* if mode == 'fortran': # <<<<<<<<<<<<<<
* order = b'F'
* self.mode = u'fortran'
*/
__pyx_t_4 = (__Pyx_PyString_Equals(__pyx_v_mode, __pyx_n_s_fortran, Py_EQ)); if (unlikely(__pyx_t_4 < 0)) __PYX_ERR(1, 156, __pyx_L1_error)
if (__pyx_t_4) {
/* "View.MemoryView":157
* cdef char order
* if mode == 'fortran':
* order = b'F' # <<<<<<<<<<<<<<
* self.mode = u'fortran'
* elif mode == 'c':
*/
__pyx_v_order = 'F';
/* "View.MemoryView":158
* if mode == 'fortran':
* order = b'F'
* self.mode = u'fortran' # <<<<<<<<<<<<<<
* elif mode == 'c':
* order = b'C'
*/
__Pyx_INCREF(__pyx_n_u_fortran);
__Pyx_GIVEREF(__pyx_n_u_fortran);
__Pyx_GOTREF(__pyx_v_self->mode);
__Pyx_DECREF(__pyx_v_self->mode);
__pyx_v_self->mode = __pyx_n_u_fortran;
/* "View.MemoryView":156
*
* cdef char order
* if mode == 'fortran': # <<<<<<<<<<<<<<
* order = b'F'
* self.mode = u'fortran'
*/
goto __pyx_L10;
}
/* "View.MemoryView":159
* order = b'F'
* self.mode = u'fortran'
* elif mode == 'c': # <<<<<<<<<<<<<<
* order = b'C'
* self.mode = u'c'
*/
__pyx_t_4 = (__Pyx_PyString_Equals(__pyx_v_mode, __pyx_n_s_c, Py_EQ)); if (unlikely(__pyx_t_4 < 0)) __PYX_ERR(1, 159, __pyx_L1_error)
if (likely(__pyx_t_4)) {
/* "View.MemoryView":160
* self.mode = u'fortran'
* elif mode == 'c':
* order = b'C' # <<<<<<<<<<<<<<
* self.mode = u'c'
* else:
*/
__pyx_v_order = 'C';
/* "View.MemoryView":161
* elif mode == 'c':
* order = b'C'
* self.mode = u'c' # <<<<<<<<<<<<<<
* else:
* raise ValueError("Invalid mode, expected 'c' or 'fortran', got %s" % mode)
*/
__Pyx_INCREF(__pyx_n_u_c);
__Pyx_GIVEREF(__pyx_n_u_c);
__Pyx_GOTREF(__pyx_v_self->mode);
__Pyx_DECREF(__pyx_v_self->mode);
__pyx_v_self->mode = __pyx_n_u_c;
/* "View.MemoryView":159
* order = b'F'
* self.mode = u'fortran'
* elif mode == 'c': # <<<<<<<<<<<<<<
* order = b'C'
* self.mode = u'c'
*/
goto __pyx_L10;
}
/* "View.MemoryView":163
* self.mode = u'c'
* else:
* raise ValueError("Invalid mode, expected 'c' or 'fortran', got %s" % mode) # <<<<<<<<<<<<<<
*
* self.len = fill_contig_strides_array(self._shape, self._strides,
*/
/*else*/ {
__pyx_t_5 = __Pyx_PyString_Format(__pyx_kp_s_Invalid_mode_expected_c_or_fortr, __pyx_v_mode); if (unlikely(!__pyx_t_5)) __PYX_ERR(1, 163, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_5);
__pyx_t_10 = __Pyx_PyObject_CallOneArg(__pyx_builtin_ValueError, __pyx_t_5); if (unlikely(!__pyx_t_10)) __PYX_ERR(1, 163, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_10);
__Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
__Pyx_Raise(__pyx_t_10, 0, 0, 0);
__Pyx_DECREF(__pyx_t_10); __pyx_t_10 = 0;
__PYX_ERR(1, 163, __pyx_L1_error)
}
__pyx_L10:;
/* "View.MemoryView":165
* raise ValueError("Invalid mode, expected 'c' or 'fortran', got %s" % mode)
*
* self.len = fill_contig_strides_array(self._shape, self._strides, # <<<<<<<<<<<<<<
* itemsize, self.ndim, order)
*
*/
__pyx_v_self->len = __pyx_fill_contig_strides_array(__pyx_v_self->_shape, __pyx_v_self->_strides, __pyx_v_itemsize, __pyx_v_self->ndim, __pyx_v_order);
/* "View.MemoryView":168
* itemsize, self.ndim, order)
*
* self.free_data = allocate_buffer # <<<<<<<<<<<<<<
* self.dtype_is_object = format == b'O'
* if allocate_buffer:
*/
__pyx_v_self->free_data = __pyx_v_allocate_buffer;
/* "View.MemoryView":169
*
* self.free_data = allocate_buffer
* self.dtype_is_object = format == b'O' # <<<<<<<<<<<<<<
* if allocate_buffer:
*
*/
__pyx_t_10 = PyObject_RichCompare(__pyx_v_format, __pyx_n_b_O, Py_EQ); __Pyx_XGOTREF(__pyx_t_10); if (unlikely(!__pyx_t_10)) __PYX_ERR(1, 169, __pyx_L1_error)
__pyx_t_4 = __Pyx_PyObject_IsTrue(__pyx_t_10); if (unlikely((__pyx_t_4 == (int)-1) && PyErr_Occurred())) __PYX_ERR(1, 169, __pyx_L1_error)
__Pyx_DECREF(__pyx_t_10); __pyx_t_10 = 0;
__pyx_v_self->dtype_is_object = __pyx_t_4;
/* "View.MemoryView":170
* self.free_data = allocate_buffer
* self.dtype_is_object = format == b'O'
* if allocate_buffer: # <<<<<<<<<<<<<<
*
*
*/
__pyx_t_4 = (__pyx_v_allocate_buffer != 0);
if (__pyx_t_4) {
/* "View.MemoryView":173
*
*
* self.data = <char *>malloc(self.len) # <<<<<<<<<<<<<<
* if not self.data:
* raise MemoryError("unable to allocate array data.")
*/
__pyx_v_self->data = ((char *)malloc(__pyx_v_self->len));
/* "View.MemoryView":174
*
* self.data = <char *>malloc(self.len)
* if not self.data: # <<<<<<<<<<<<<<
* raise MemoryError("unable to allocate array data.")
*
*/
__pyx_t_4 = ((!(__pyx_v_self->data != 0)) != 0);
if (unlikely(__pyx_t_4)) {
/* "View.MemoryView":175
* self.data = <char *>malloc(self.len)
* if not self.data:
* raise MemoryError("unable to allocate array data.") # <<<<<<<<<<<<<<
*
* if self.dtype_is_object:
*/
__pyx_t_10 = __Pyx_PyObject_Call(__pyx_builtin_MemoryError, __pyx_tuple__10, NULL); if (unlikely(!__pyx_t_10)) __PYX_ERR(1, 175, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_10);
__Pyx_Raise(__pyx_t_10, 0, 0, 0);
__Pyx_DECREF(__pyx_t_10); __pyx_t_10 = 0;
__PYX_ERR(1, 175, __pyx_L1_error)
/* "View.MemoryView":174
*
* self.data = <char *>malloc(self.len)
* if not self.data: # <<<<<<<<<<<<<<
* raise MemoryError("unable to allocate array data.")
*
*/
}
/* "View.MemoryView":177
* raise MemoryError("unable to allocate array data.")
*
* if self.dtype_is_object: # <<<<<<<<<<<<<<
* p = <PyObject **> self.data
* for i in range(self.len / itemsize):
*/
__pyx_t_4 = (__pyx_v_self->dtype_is_object != 0);
if (__pyx_t_4) {
/* "View.MemoryView":178
*
* if self.dtype_is_object:
* p = <PyObject **> self.data # <<<<<<<<<<<<<<
* for i in range(self.len / itemsize):
* p[i] = Py_None
*/
__pyx_v_p = ((PyObject **)__pyx_v_self->data);
/* "View.MemoryView":179
* if self.dtype_is_object:
* p = <PyObject **> self.data
* for i in range(self.len / itemsize): # <<<<<<<<<<<<<<
* p[i] = Py_None
* Py_INCREF(Py_None)
*/
if (unlikely(__pyx_v_itemsize == 0)) {
PyErr_SetString(PyExc_ZeroDivisionError, "integer division or modulo by zero");
__PYX_ERR(1, 179, __pyx_L1_error)
}
else if (sizeof(Py_ssize_t) == sizeof(long) && (!(((Py_ssize_t)-1) > 0)) && unlikely(__pyx_v_itemsize == (Py_ssize_t)-1) && unlikely(UNARY_NEG_WOULD_OVERFLOW(__pyx_v_self->len))) {
PyErr_SetString(PyExc_OverflowError, "value too large to perform division");
__PYX_ERR(1, 179, __pyx_L1_error)
}
__pyx_t_1 = __Pyx_div_Py_ssize_t(__pyx_v_self->len, __pyx_v_itemsize);
__pyx_t_8 = __pyx_t_1;
for (__pyx_t_11 = 0; __pyx_t_11 < __pyx_t_8; __pyx_t_11+=1) {
__pyx_v_i = __pyx_t_11;
/* "View.MemoryView":180
* p = <PyObject **> self.data
* for i in range(self.len / itemsize):
* p[i] = Py_None # <<<<<<<<<<<<<<
* Py_INCREF(Py_None)
*
*/
(__pyx_v_p[__pyx_v_i]) = Py_None;
/* "View.MemoryView":181
* for i in range(self.len / itemsize):
* p[i] = Py_None
* Py_INCREF(Py_None) # <<<<<<<<<<<<<<
*
* @cname('getbuffer')
*/
Py_INCREF(Py_None);
}
/* "View.MemoryView":177
* raise MemoryError("unable to allocate array data.")
*
* if self.dtype_is_object: # <<<<<<<<<<<<<<
* p = <PyObject **> self.data
* for i in range(self.len / itemsize):
*/
}
/* "View.MemoryView":170
* self.free_data = allocate_buffer
* self.dtype_is_object = format == b'O'
* if allocate_buffer: # <<<<<<<<<<<<<<
*
*
*/
}
/* "View.MemoryView":121
* cdef bint dtype_is_object
*
* def __cinit__(array self, tuple shape, Py_ssize_t itemsize, format not None, # <<<<<<<<<<<<<<
* mode="c", bint allocate_buffer=True):
*
*/
/* function exit code */
__pyx_r = 0;
goto __pyx_L0;
__pyx_L1_error:;
__Pyx_XDECREF(__pyx_t_3);
__Pyx_XDECREF(__pyx_t_5);
__Pyx_XDECREF(__pyx_t_9);
__Pyx_XDECREF(__pyx_t_10);
__Pyx_AddTraceback("View.MemoryView.array.__cinit__", __pyx_clineno, __pyx_lineno, __pyx_filename);
__pyx_r = -1;
__pyx_L0:;
__Pyx_XDECREF(__pyx_v_format);
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
/* "View.MemoryView":184
*
* @cname('getbuffer')
* def __getbuffer__(self, Py_buffer *info, int flags): # <<<<<<<<<<<<<<
* cdef int bufmode = -1
* if self.mode == u"c":
*/
/* Python wrapper */
static CYTHON_UNUSED int __pyx_array_getbuffer(PyObject *__pyx_v_self, Py_buffer *__pyx_v_info, int __pyx_v_flags); /*proto*/
static CYTHON_UNUSED int __pyx_array_getbuffer(PyObject *__pyx_v_self, Py_buffer *__pyx_v_info, int __pyx_v_flags) {
int __pyx_r;
__Pyx_RefNannyDeclarations
__Pyx_RefNannySetupContext("__getbuffer__ (wrapper)", 0);
__pyx_r = __pyx_array___pyx_pf_15View_dot_MemoryView_5array_2__getbuffer__(((struct __pyx_array_obj *)__pyx_v_self), ((Py_buffer *)__pyx_v_info), ((int)__pyx_v_flags));
/* function exit code */
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
static int __pyx_array___pyx_pf_15View_dot_MemoryView_5array_2__getbuffer__(struct __pyx_array_obj *__pyx_v_self, Py_buffer *__pyx_v_info, int __pyx_v_flags) {
int __pyx_v_bufmode;
int __pyx_r;
__Pyx_RefNannyDeclarations
int __pyx_t_1;
int __pyx_t_2;
PyObject *__pyx_t_3 = NULL;
char *__pyx_t_4;
Py_ssize_t __pyx_t_5;
int __pyx_t_6;
Py_ssize_t *__pyx_t_7;
if (__pyx_v_info == NULL) {
PyErr_SetString(PyExc_BufferError, "PyObject_GetBuffer: view==NULL argument is obsolete");
return -1;
}
__Pyx_RefNannySetupContext("__getbuffer__", 0);
__pyx_v_info->obj = Py_None; __Pyx_INCREF(Py_None);
__Pyx_GIVEREF(__pyx_v_info->obj);
/* "View.MemoryView":185
* @cname('getbuffer')
* def __getbuffer__(self, Py_buffer *info, int flags):
* cdef int bufmode = -1 # <<<<<<<<<<<<<<
* if self.mode == u"c":
* bufmode = PyBUF_C_CONTIGUOUS | PyBUF_ANY_CONTIGUOUS
*/
__pyx_v_bufmode = -1;
/* "View.MemoryView":186
* def __getbuffer__(self, Py_buffer *info, int flags):
* cdef int bufmode = -1
* if self.mode == u"c": # <<<<<<<<<<<<<<
* bufmode = PyBUF_C_CONTIGUOUS | PyBUF_ANY_CONTIGUOUS
* elif self.mode == u"fortran":
*/
__pyx_t_1 = (__Pyx_PyUnicode_Equals(__pyx_v_self->mode, __pyx_n_u_c, Py_EQ)); if (unlikely(__pyx_t_1 < 0)) __PYX_ERR(1, 186, __pyx_L1_error)
__pyx_t_2 = (__pyx_t_1 != 0);
if (__pyx_t_2) {
/* "View.MemoryView":187
* cdef int bufmode = -1
* if self.mode == u"c":
* bufmode = PyBUF_C_CONTIGUOUS | PyBUF_ANY_CONTIGUOUS # <<<<<<<<<<<<<<
* elif self.mode == u"fortran":
* bufmode = PyBUF_F_CONTIGUOUS | PyBUF_ANY_CONTIGUOUS
*/
__pyx_v_bufmode = (PyBUF_C_CONTIGUOUS | PyBUF_ANY_CONTIGUOUS);
/* "View.MemoryView":186
* def __getbuffer__(self, Py_buffer *info, int flags):
* cdef int bufmode = -1
* if self.mode == u"c": # <<<<<<<<<<<<<<
* bufmode = PyBUF_C_CONTIGUOUS | PyBUF_ANY_CONTIGUOUS
* elif self.mode == u"fortran":
*/
goto __pyx_L3;
}
/* "View.MemoryView":188
* if self.mode == u"c":
* bufmode = PyBUF_C_CONTIGUOUS | PyBUF_ANY_CONTIGUOUS
* elif self.mode == u"fortran": # <<<<<<<<<<<<<<
* bufmode = PyBUF_F_CONTIGUOUS | PyBUF_ANY_CONTIGUOUS
* if not (flags & bufmode):
*/
__pyx_t_2 = (__Pyx_PyUnicode_Equals(__pyx_v_self->mode, __pyx_n_u_fortran, Py_EQ)); if (unlikely(__pyx_t_2 < 0)) __PYX_ERR(1, 188, __pyx_L1_error)
__pyx_t_1 = (__pyx_t_2 != 0);
if (__pyx_t_1) {
/* "View.MemoryView":189
* bufmode = PyBUF_C_CONTIGUOUS | PyBUF_ANY_CONTIGUOUS
* elif self.mode == u"fortran":
* bufmode = PyBUF_F_CONTIGUOUS | PyBUF_ANY_CONTIGUOUS # <<<<<<<<<<<<<<
* if not (flags & bufmode):
* raise ValueError("Can only create a buffer that is contiguous in memory.")
*/
__pyx_v_bufmode = (PyBUF_F_CONTIGUOUS | PyBUF_ANY_CONTIGUOUS);
/* "View.MemoryView":188
* if self.mode == u"c":
* bufmode = PyBUF_C_CONTIGUOUS | PyBUF_ANY_CONTIGUOUS
* elif self.mode == u"fortran": # <<<<<<<<<<<<<<
* bufmode = PyBUF_F_CONTIGUOUS | PyBUF_ANY_CONTIGUOUS
* if not (flags & bufmode):
*/
}
__pyx_L3:;
/* "View.MemoryView":190
* elif self.mode == u"fortran":
* bufmode = PyBUF_F_CONTIGUOUS | PyBUF_ANY_CONTIGUOUS
* if not (flags & bufmode): # <<<<<<<<<<<<<<
* raise ValueError("Can only create a buffer that is contiguous in memory.")
* info.buf = self.data
*/
__pyx_t_1 = ((!((__pyx_v_flags & __pyx_v_bufmode) != 0)) != 0);
if (unlikely(__pyx_t_1)) {
/* "View.MemoryView":191
* bufmode = PyBUF_F_CONTIGUOUS | PyBUF_ANY_CONTIGUOUS
* if not (flags & bufmode):
* raise ValueError("Can only create a buffer that is contiguous in memory.") # <<<<<<<<<<<<<<
* info.buf = self.data
* info.len = self.len
*/
__pyx_t_3 = __Pyx_PyObject_Call(__pyx_builtin_ValueError, __pyx_tuple__11, NULL); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 191, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_3);
__Pyx_Raise(__pyx_t_3, 0, 0, 0);
__Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
__PYX_ERR(1, 191, __pyx_L1_error)
/* "View.MemoryView":190
* elif self.mode == u"fortran":
* bufmode = PyBUF_F_CONTIGUOUS | PyBUF_ANY_CONTIGUOUS
* if not (flags & bufmode): # <<<<<<<<<<<<<<
* raise ValueError("Can only create a buffer that is contiguous in memory.")
* info.buf = self.data
*/
}
/* "View.MemoryView":192
* if not (flags & bufmode):
* raise ValueError("Can only create a buffer that is contiguous in memory.")
* info.buf = self.data # <<<<<<<<<<<<<<
* info.len = self.len
* info.ndim = self.ndim
*/
__pyx_t_4 = __pyx_v_self->data;
__pyx_v_info->buf = __pyx_t_4;
/* "View.MemoryView":193
* raise ValueError("Can only create a buffer that is contiguous in memory.")
* info.buf = self.data
* info.len = self.len # <<<<<<<<<<<<<<
* info.ndim = self.ndim
* info.shape = self._shape
*/
__pyx_t_5 = __pyx_v_self->len;
__pyx_v_info->len = __pyx_t_5;
/* "View.MemoryView":194
* info.buf = self.data
* info.len = self.len
* info.ndim = self.ndim # <<<<<<<<<<<<<<
* info.shape = self._shape
* info.strides = self._strides
*/
__pyx_t_6 = __pyx_v_self->ndim;
__pyx_v_info->ndim = __pyx_t_6;
/* "View.MemoryView":195
* info.len = self.len
* info.ndim = self.ndim
* info.shape = self._shape # <<<<<<<<<<<<<<
* info.strides = self._strides
* info.suboffsets = NULL
*/
__pyx_t_7 = __pyx_v_self->_shape;
__pyx_v_info->shape = __pyx_t_7;
/* "View.MemoryView":196
* info.ndim = self.ndim
* info.shape = self._shape
* info.strides = self._strides # <<<<<<<<<<<<<<
* info.suboffsets = NULL
* info.itemsize = self.itemsize
*/
__pyx_t_7 = __pyx_v_self->_strides;
__pyx_v_info->strides = __pyx_t_7;
/* "View.MemoryView":197
* info.shape = self._shape
* info.strides = self._strides
* info.suboffsets = NULL # <<<<<<<<<<<<<<
* info.itemsize = self.itemsize
* info.readonly = 0
*/
__pyx_v_info->suboffsets = NULL;
/* "View.MemoryView":198
* info.strides = self._strides
* info.suboffsets = NULL
* info.itemsize = self.itemsize # <<<<<<<<<<<<<<
* info.readonly = 0
*
*/
__pyx_t_5 = __pyx_v_self->itemsize;
__pyx_v_info->itemsize = __pyx_t_5;
/* "View.MemoryView":199
* info.suboffsets = NULL
* info.itemsize = self.itemsize
* info.readonly = 0 # <<<<<<<<<<<<<<
*
* if flags & PyBUF_FORMAT:
*/
__pyx_v_info->readonly = 0;
/* "View.MemoryView":201
* info.readonly = 0
*
* if flags & PyBUF_FORMAT: # <<<<<<<<<<<<<<
* info.format = self.format
* else:
*/
__pyx_t_1 = ((__pyx_v_flags & PyBUF_FORMAT) != 0);
if (__pyx_t_1) {
/* "View.MemoryView":202
*
* if flags & PyBUF_FORMAT:
* info.format = self.format # <<<<<<<<<<<<<<
* else:
* info.format = NULL
*/
__pyx_t_4 = __pyx_v_self->format;
__pyx_v_info->format = __pyx_t_4;
/* "View.MemoryView":201
* info.readonly = 0
*
* if flags & PyBUF_FORMAT: # <<<<<<<<<<<<<<
* info.format = self.format
* else:
*/
goto __pyx_L5;
}
/* "View.MemoryView":204
* info.format = self.format
* else:
* info.format = NULL # <<<<<<<<<<<<<<
*
* info.obj = self
*/
/*else*/ {
__pyx_v_info->format = NULL;
}
__pyx_L5:;
/* "View.MemoryView":206
* info.format = NULL
*
* info.obj = self # <<<<<<<<<<<<<<
*
* __pyx_getbuffer = capsule(<void *> &__pyx_array_getbuffer, "getbuffer(obj, view, flags)")
*/
__Pyx_INCREF(((PyObject *)__pyx_v_self));
__Pyx_GIVEREF(((PyObject *)__pyx_v_self));
__Pyx_GOTREF(__pyx_v_info->obj);
__Pyx_DECREF(__pyx_v_info->obj);
__pyx_v_info->obj = ((PyObject *)__pyx_v_self);
/* "View.MemoryView":184
*
* @cname('getbuffer')
* def __getbuffer__(self, Py_buffer *info, int flags): # <<<<<<<<<<<<<<
* cdef int bufmode = -1
* if self.mode == u"c":
*/
/* function exit code */
__pyx_r = 0;
goto __pyx_L0;
__pyx_L1_error:;
__Pyx_XDECREF(__pyx_t_3);
__Pyx_AddTraceback("View.MemoryView.array.__getbuffer__", __pyx_clineno, __pyx_lineno, __pyx_filename);
__pyx_r = -1;
if (__pyx_v_info->obj != NULL) {
__Pyx_GOTREF(__pyx_v_info->obj);
__Pyx_DECREF(__pyx_v_info->obj); __pyx_v_info->obj = 0;
}
goto __pyx_L2;
__pyx_L0:;
if (__pyx_v_info->obj == Py_None) {
__Pyx_GOTREF(__pyx_v_info->obj);
__Pyx_DECREF(__pyx_v_info->obj); __pyx_v_info->obj = 0;
}
__pyx_L2:;
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
/* "View.MemoryView":210
* __pyx_getbuffer = capsule(<void *> &__pyx_array_getbuffer, "getbuffer(obj, view, flags)")
*
* def __dealloc__(array self): # <<<<<<<<<<<<<<
* if self.callback_free_data != NULL:
* self.callback_free_data(self.data)
*/
/* Python wrapper */
static void __pyx_array___dealloc__(PyObject *__pyx_v_self); /*proto*/
static void __pyx_array___dealloc__(PyObject *__pyx_v_self) {
__Pyx_RefNannyDeclarations
__Pyx_RefNannySetupContext("__dealloc__ (wrapper)", 0);
__pyx_array___pyx_pf_15View_dot_MemoryView_5array_4__dealloc__(((struct __pyx_array_obj *)__pyx_v_self));
/* function exit code */
__Pyx_RefNannyFinishContext();
}
static void __pyx_array___pyx_pf_15View_dot_MemoryView_5array_4__dealloc__(struct __pyx_array_obj *__pyx_v_self) {
__Pyx_RefNannyDeclarations
int __pyx_t_1;
__Pyx_RefNannySetupContext("__dealloc__", 0);
/* "View.MemoryView":211
*
* def __dealloc__(array self):
* if self.callback_free_data != NULL: # <<<<<<<<<<<<<<
* self.callback_free_data(self.data)
* elif self.free_data:
*/
__pyx_t_1 = ((__pyx_v_self->callback_free_data != NULL) != 0);
if (__pyx_t_1) {
/* "View.MemoryView":212
* def __dealloc__(array self):
* if self.callback_free_data != NULL:
* self.callback_free_data(self.data) # <<<<<<<<<<<<<<
* elif self.free_data:
* if self.dtype_is_object:
*/
__pyx_v_self->callback_free_data(__pyx_v_self->data);
/* "View.MemoryView":211
*
* def __dealloc__(array self):
* if self.callback_free_data != NULL: # <<<<<<<<<<<<<<
* self.callback_free_data(self.data)
* elif self.free_data:
*/
goto __pyx_L3;
}
/* "View.MemoryView":213
* if self.callback_free_data != NULL:
* self.callback_free_data(self.data)
* elif self.free_data: # <<<<<<<<<<<<<<
* if self.dtype_is_object:
* refcount_objects_in_slice(self.data, self._shape,
*/
__pyx_t_1 = (__pyx_v_self->free_data != 0);
if (__pyx_t_1) {
/* "View.MemoryView":214
* self.callback_free_data(self.data)
* elif self.free_data:
* if self.dtype_is_object: # <<<<<<<<<<<<<<
* refcount_objects_in_slice(self.data, self._shape,
* self._strides, self.ndim, False)
*/
__pyx_t_1 = (__pyx_v_self->dtype_is_object != 0);
if (__pyx_t_1) {
/* "View.MemoryView":215
* elif self.free_data:
* if self.dtype_is_object:
* refcount_objects_in_slice(self.data, self._shape, # <<<<<<<<<<<<<<
* self._strides, self.ndim, False)
* free(self.data)
*/
__pyx_memoryview_refcount_objects_in_slice(__pyx_v_self->data, __pyx_v_self->_shape, __pyx_v_self->_strides, __pyx_v_self->ndim, 0);
/* "View.MemoryView":214
* self.callback_free_data(self.data)
* elif self.free_data:
* if self.dtype_is_object: # <<<<<<<<<<<<<<
* refcount_objects_in_slice(self.data, self._shape,
* self._strides, self.ndim, False)
*/
}
/* "View.MemoryView":217
* refcount_objects_in_slice(self.data, self._shape,
* self._strides, self.ndim, False)
* free(self.data) # <<<<<<<<<<<<<<
* PyObject_Free(self._shape)
*
*/
free(__pyx_v_self->data);
/* "View.MemoryView":213
* if self.callback_free_data != NULL:
* self.callback_free_data(self.data)
* elif self.free_data: # <<<<<<<<<<<<<<
* if self.dtype_is_object:
* refcount_objects_in_slice(self.data, self._shape,
*/
}
__pyx_L3:;
/* "View.MemoryView":218
* self._strides, self.ndim, False)
* free(self.data)
* PyObject_Free(self._shape) # <<<<<<<<<<<<<<
*
* @property
*/
PyObject_Free(__pyx_v_self->_shape);
/* "View.MemoryView":210
* __pyx_getbuffer = capsule(<void *> &__pyx_array_getbuffer, "getbuffer(obj, view, flags)")
*
* def __dealloc__(array self): # <<<<<<<<<<<<<<
* if self.callback_free_data != NULL:
* self.callback_free_data(self.data)
*/
/* function exit code */
__Pyx_RefNannyFinishContext();
}
/* "View.MemoryView":221
*
* @property
* def memview(self): # <<<<<<<<<<<<<<
* return self.get_memview()
*
*/
/* Python wrapper */
static PyObject *__pyx_pw_15View_dot_MemoryView_5array_7memview_1__get__(PyObject *__pyx_v_self); /*proto*/
static PyObject *__pyx_pw_15View_dot_MemoryView_5array_7memview_1__get__(PyObject *__pyx_v_self) {
PyObject *__pyx_r = 0;
__Pyx_RefNannyDeclarations
__Pyx_RefNannySetupContext("__get__ (wrapper)", 0);
__pyx_r = __pyx_pf_15View_dot_MemoryView_5array_7memview___get__(((struct __pyx_array_obj *)__pyx_v_self));
/* function exit code */
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
static PyObject *__pyx_pf_15View_dot_MemoryView_5array_7memview___get__(struct __pyx_array_obj *__pyx_v_self) {
PyObject *__pyx_r = NULL;
__Pyx_RefNannyDeclarations
PyObject *__pyx_t_1 = NULL;
__Pyx_RefNannySetupContext("__get__", 0);
/* "View.MemoryView":222
* @property
* def memview(self):
* return self.get_memview() # <<<<<<<<<<<<<<
*
* @cname('get_memview')
*/
__Pyx_XDECREF(__pyx_r);
__pyx_t_1 = ((struct __pyx_vtabstruct_array *)__pyx_v_self->__pyx_vtab)->get_memview(__pyx_v_self); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 222, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_1);
__pyx_r = __pyx_t_1;
__pyx_t_1 = 0;
goto __pyx_L0;
/* "View.MemoryView":221
*
* @property
* def memview(self): # <<<<<<<<<<<<<<
* return self.get_memview()
*
*/
/* function exit code */
__pyx_L1_error:;
__Pyx_XDECREF(__pyx_t_1);
__Pyx_AddTraceback("View.MemoryView.array.memview.__get__", __pyx_clineno, __pyx_lineno, __pyx_filename);
__pyx_r = NULL;
__pyx_L0:;
__Pyx_XGIVEREF(__pyx_r);
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
/* "View.MemoryView":225
*
* @cname('get_memview')
* cdef get_memview(self): # <<<<<<<<<<<<<<
* flags = PyBUF_ANY_CONTIGUOUS|PyBUF_FORMAT|PyBUF_WRITABLE
* return memoryview(self, flags, self.dtype_is_object)
*/
static PyObject *__pyx_array_get_memview(struct __pyx_array_obj *__pyx_v_self) {
int __pyx_v_flags;
PyObject *__pyx_r = NULL;
__Pyx_RefNannyDeclarations
PyObject *__pyx_t_1 = NULL;
PyObject *__pyx_t_2 = NULL;
PyObject *__pyx_t_3 = NULL;
__Pyx_RefNannySetupContext("get_memview", 0);
/* "View.MemoryView":226
* @cname('get_memview')
* cdef get_memview(self):
* flags = PyBUF_ANY_CONTIGUOUS|PyBUF_FORMAT|PyBUF_WRITABLE # <<<<<<<<<<<<<<
* return memoryview(self, flags, self.dtype_is_object)
*
*/
__pyx_v_flags = ((PyBUF_ANY_CONTIGUOUS | PyBUF_FORMAT) | PyBUF_WRITABLE);
/* "View.MemoryView":227
* cdef get_memview(self):
* flags = PyBUF_ANY_CONTIGUOUS|PyBUF_FORMAT|PyBUF_WRITABLE
* return memoryview(self, flags, self.dtype_is_object) # <<<<<<<<<<<<<<
*
* def __len__(self):
*/
__Pyx_XDECREF(__pyx_r);
__pyx_t_1 = __Pyx_PyInt_From_int(__pyx_v_flags); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 227, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_1);
__pyx_t_2 = __Pyx_PyBool_FromLong(__pyx_v_self->dtype_is_object); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 227, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_2);
__pyx_t_3 = PyTuple_New(3); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 227, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_3);
__Pyx_INCREF(((PyObject *)__pyx_v_self));
__Pyx_GIVEREF(((PyObject *)__pyx_v_self));
PyTuple_SET_ITEM(__pyx_t_3, 0, ((PyObject *)__pyx_v_self));
__Pyx_GIVEREF(__pyx_t_1);
PyTuple_SET_ITEM(__pyx_t_3, 1, __pyx_t_1);
__Pyx_GIVEREF(__pyx_t_2);
PyTuple_SET_ITEM(__pyx_t_3, 2, __pyx_t_2);
__pyx_t_1 = 0;
__pyx_t_2 = 0;
__pyx_t_2 = __Pyx_PyObject_Call(((PyObject *)__pyx_memoryview_type), __pyx_t_3, NULL); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 227, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_2);
__Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
__pyx_r = __pyx_t_2;
__pyx_t_2 = 0;
goto __pyx_L0;
/* "View.MemoryView":225
*
* @cname('get_memview')
* cdef get_memview(self): # <<<<<<<<<<<<<<
* flags = PyBUF_ANY_CONTIGUOUS|PyBUF_FORMAT|PyBUF_WRITABLE
* return memoryview(self, flags, self.dtype_is_object)
*/
/* function exit code */
__pyx_L1_error:;
__Pyx_XDECREF(__pyx_t_1);
__Pyx_XDECREF(__pyx_t_2);
__Pyx_XDECREF(__pyx_t_3);
__Pyx_AddTraceback("View.MemoryView.array.get_memview", __pyx_clineno, __pyx_lineno, __pyx_filename);
__pyx_r = 0;
__pyx_L0:;
__Pyx_XGIVEREF(__pyx_r);
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
/* "View.MemoryView":229
* return memoryview(self, flags, self.dtype_is_object)
*
* def __len__(self): # <<<<<<<<<<<<<<
* return self._shape[0]
*
*/
/* Python wrapper */
static Py_ssize_t __pyx_array___len__(PyObject *__pyx_v_self); /*proto*/
static Py_ssize_t __pyx_array___len__(PyObject *__pyx_v_self) {
Py_ssize_t __pyx_r;
__Pyx_RefNannyDeclarations
__Pyx_RefNannySetupContext("__len__ (wrapper)", 0);
__pyx_r = __pyx_array___pyx_pf_15View_dot_MemoryView_5array_6__len__(((struct __pyx_array_obj *)__pyx_v_self));
/* function exit code */
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
static Py_ssize_t __pyx_array___pyx_pf_15View_dot_MemoryView_5array_6__len__(struct __pyx_array_obj *__pyx_v_self) {
Py_ssize_t __pyx_r;
__Pyx_RefNannyDeclarations
__Pyx_RefNannySetupContext("__len__", 0);
/* "View.MemoryView":230
*
* def __len__(self):
* return self._shape[0] # <<<<<<<<<<<<<<
*
* def __getattr__(self, attr):
*/
__pyx_r = (__pyx_v_self->_shape[0]);
goto __pyx_L0;
/* "View.MemoryView":229
* return memoryview(self, flags, self.dtype_is_object)
*
* def __len__(self): # <<<<<<<<<<<<<<
* return self._shape[0]
*
*/
/* function exit code */
__pyx_L0:;
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
/* "View.MemoryView":232
* return self._shape[0]
*
* def __getattr__(self, attr): # <<<<<<<<<<<<<<
* return getattr(self.memview, attr)
*
*/
/* Python wrapper */
static PyObject *__pyx_array___getattr__(PyObject *__pyx_v_self, PyObject *__pyx_v_attr); /*proto*/
static PyObject *__pyx_array___getattr__(PyObject *__pyx_v_self, PyObject *__pyx_v_attr) {
PyObject *__pyx_r = 0;
__Pyx_RefNannyDeclarations
__Pyx_RefNannySetupContext("__getattr__ (wrapper)", 0);
__pyx_r = __pyx_array___pyx_pf_15View_dot_MemoryView_5array_8__getattr__(((struct __pyx_array_obj *)__pyx_v_self), ((PyObject *)__pyx_v_attr));
/* function exit code */
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
static PyObject *__pyx_array___pyx_pf_15View_dot_MemoryView_5array_8__getattr__(struct __pyx_array_obj *__pyx_v_self, PyObject *__pyx_v_attr) {
PyObject *__pyx_r = NULL;
__Pyx_RefNannyDeclarations
PyObject *__pyx_t_1 = NULL;
PyObject *__pyx_t_2 = NULL;
__Pyx_RefNannySetupContext("__getattr__", 0);
/* "View.MemoryView":233
*
* def __getattr__(self, attr):
* return getattr(self.memview, attr) # <<<<<<<<<<<<<<
*
* def __getitem__(self, item):
*/
__Pyx_XDECREF(__pyx_r);
__pyx_t_1 = __Pyx_PyObject_GetAttrStr(((PyObject *)__pyx_v_self), __pyx_n_s_memview); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 233, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_1);
__pyx_t_2 = __Pyx_GetAttr(__pyx_t_1, __pyx_v_attr); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 233, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_2);
__Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
__pyx_r = __pyx_t_2;
__pyx_t_2 = 0;
goto __pyx_L0;
/* "View.MemoryView":232
* return self._shape[0]
*
* def __getattr__(self, attr): # <<<<<<<<<<<<<<
* return getattr(self.memview, attr)
*
*/
/* function exit code */
__pyx_L1_error:;
__Pyx_XDECREF(__pyx_t_1);
__Pyx_XDECREF(__pyx_t_2);
__Pyx_AddTraceback("View.MemoryView.array.__getattr__", __pyx_clineno, __pyx_lineno, __pyx_filename);
__pyx_r = NULL;
__pyx_L0:;
__Pyx_XGIVEREF(__pyx_r);
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
/* "View.MemoryView":235
* return getattr(self.memview, attr)
*
* def __getitem__(self, item): # <<<<<<<<<<<<<<
* return self.memview[item]
*
*/
/* Python wrapper */
static PyObject *__pyx_array___getitem__(PyObject *__pyx_v_self, PyObject *__pyx_v_item); /*proto*/
static PyObject *__pyx_array___getitem__(PyObject *__pyx_v_self, PyObject *__pyx_v_item) {
PyObject *__pyx_r = 0;
__Pyx_RefNannyDeclarations
__Pyx_RefNannySetupContext("__getitem__ (wrapper)", 0);
__pyx_r = __pyx_array___pyx_pf_15View_dot_MemoryView_5array_10__getitem__(((struct __pyx_array_obj *)__pyx_v_self), ((PyObject *)__pyx_v_item));
/* function exit code */
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
static PyObject *__pyx_array___pyx_pf_15View_dot_MemoryView_5array_10__getitem__(struct __pyx_array_obj *__pyx_v_self, PyObject *__pyx_v_item) {
PyObject *__pyx_r = NULL;
__Pyx_RefNannyDeclarations
PyObject *__pyx_t_1 = NULL;
PyObject *__pyx_t_2 = NULL;
__Pyx_RefNannySetupContext("__getitem__", 0);
/* "View.MemoryView":236
*
* def __getitem__(self, item):
* return self.memview[item] # <<<<<<<<<<<<<<
*
* def __setitem__(self, item, value):
*/
__Pyx_XDECREF(__pyx_r);
__pyx_t_1 = __Pyx_PyObject_GetAttrStr(((PyObject *)__pyx_v_self), __pyx_n_s_memview); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 236, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_1);
__pyx_t_2 = __Pyx_PyObject_GetItem(__pyx_t_1, __pyx_v_item); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 236, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_2);
__Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
__pyx_r = __pyx_t_2;
__pyx_t_2 = 0;
goto __pyx_L0;
/* "View.MemoryView":235
* return getattr(self.memview, attr)
*
* def __getitem__(self, item): # <<<<<<<<<<<<<<
* return self.memview[item]
*
*/
/* function exit code */
__pyx_L1_error:;
__Pyx_XDECREF(__pyx_t_1);
__Pyx_XDECREF(__pyx_t_2);
__Pyx_AddTraceback("View.MemoryView.array.__getitem__", __pyx_clineno, __pyx_lineno, __pyx_filename);
__pyx_r = NULL;
__pyx_L0:;
__Pyx_XGIVEREF(__pyx_r);
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
/* "View.MemoryView":238
* return self.memview[item]
*
* def __setitem__(self, item, value): # <<<<<<<<<<<<<<
* self.memview[item] = value
*
*/
/* Python wrapper */
static int __pyx_array___setitem__(PyObject *__pyx_v_self, PyObject *__pyx_v_item, PyObject *__pyx_v_value); /*proto*/
static int __pyx_array___setitem__(PyObject *__pyx_v_self, PyObject *__pyx_v_item, PyObject *__pyx_v_value) {
int __pyx_r;
__Pyx_RefNannyDeclarations
__Pyx_RefNannySetupContext("__setitem__ (wrapper)", 0);
__pyx_r = __pyx_array___pyx_pf_15View_dot_MemoryView_5array_12__setitem__(((struct __pyx_array_obj *)__pyx_v_self), ((PyObject *)__pyx_v_item), ((PyObject *)__pyx_v_value));
/* function exit code */
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
static int __pyx_array___pyx_pf_15View_dot_MemoryView_5array_12__setitem__(struct __pyx_array_obj *__pyx_v_self, PyObject *__pyx_v_item, PyObject *__pyx_v_value) {
int __pyx_r;
__Pyx_RefNannyDeclarations
PyObject *__pyx_t_1 = NULL;
__Pyx_RefNannySetupContext("__setitem__", 0);
/* "View.MemoryView":239
*
* def __setitem__(self, item, value):
* self.memview[item] = value # <<<<<<<<<<<<<<
*
*
*/
__pyx_t_1 = __Pyx_PyObject_GetAttrStr(((PyObject *)__pyx_v_self), __pyx_n_s_memview); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 239, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_1);
if (unlikely(PyObject_SetItem(__pyx_t_1, __pyx_v_item, __pyx_v_value) < 0)) __PYX_ERR(1, 239, __pyx_L1_error)
__Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
/* "View.MemoryView":238
* return self.memview[item]
*
* def __setitem__(self, item, value): # <<<<<<<<<<<<<<
* self.memview[item] = value
*
*/
/* function exit code */
__pyx_r = 0;
goto __pyx_L0;
__pyx_L1_error:;
__Pyx_XDECREF(__pyx_t_1);
__Pyx_AddTraceback("View.MemoryView.array.__setitem__", __pyx_clineno, __pyx_lineno, __pyx_filename);
__pyx_r = -1;
__pyx_L0:;
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
/* "(tree fragment)":1
* def __reduce_cython__(self): # <<<<<<<<<<<<<<
* raise TypeError("no default __reduce__ due to non-trivial __cinit__")
* def __setstate_cython__(self, __pyx_state):
*/
/* Python wrapper */
static PyObject *__pyx_pw___pyx_array_1__reduce_cython__(PyObject *__pyx_v_self, CYTHON_UNUSED PyObject *unused); /*proto*/
static PyObject *__pyx_pw___pyx_array_1__reduce_cython__(PyObject *__pyx_v_self, CYTHON_UNUSED PyObject *unused) {
PyObject *__pyx_r = 0;
__Pyx_RefNannyDeclarations
__Pyx_RefNannySetupContext("__reduce_cython__ (wrapper)", 0);
__pyx_r = __pyx_pf___pyx_array___reduce_cython__(((struct __pyx_array_obj *)__pyx_v_self));
/* function exit code */
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
static PyObject *__pyx_pf___pyx_array___reduce_cython__(CYTHON_UNUSED struct __pyx_array_obj *__pyx_v_self) {
PyObject *__pyx_r = NULL;
__Pyx_RefNannyDeclarations
PyObject *__pyx_t_1 = NULL;
__Pyx_RefNannySetupContext("__reduce_cython__", 0);
/* "(tree fragment)":2
* def __reduce_cython__(self):
* raise TypeError("no default __reduce__ due to non-trivial __cinit__") # <<<<<<<<<<<<<<
* def __setstate_cython__(self, __pyx_state):
* raise TypeError("no default __reduce__ due to non-trivial __cinit__")
*/
__pyx_t_1 = __Pyx_PyObject_Call(__pyx_builtin_TypeError, __pyx_tuple__12, NULL); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 2, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_1);
__Pyx_Raise(__pyx_t_1, 0, 0, 0);
__Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
__PYX_ERR(1, 2, __pyx_L1_error)
/* "(tree fragment)":1
* def __reduce_cython__(self): # <<<<<<<<<<<<<<
* raise TypeError("no default __reduce__ due to non-trivial __cinit__")
* def __setstate_cython__(self, __pyx_state):
*/
/* function exit code */
__pyx_L1_error:;
__Pyx_XDECREF(__pyx_t_1);
__Pyx_AddTraceback("View.MemoryView.array.__reduce_cython__", __pyx_clineno, __pyx_lineno, __pyx_filename);
__pyx_r = NULL;
__Pyx_XGIVEREF(__pyx_r);
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
/* "(tree fragment)":3
* def __reduce_cython__(self):
* raise TypeError("no default __reduce__ due to non-trivial __cinit__")
* def __setstate_cython__(self, __pyx_state): # <<<<<<<<<<<<<<
* raise TypeError("no default __reduce__ due to non-trivial __cinit__")
*/
/* Python wrapper */
static PyObject *__pyx_pw___pyx_array_3__setstate_cython__(PyObject *__pyx_v_self, PyObject *__pyx_v___pyx_state); /*proto*/
static PyObject *__pyx_pw___pyx_array_3__setstate_cython__(PyObject *__pyx_v_self, PyObject *__pyx_v___pyx_state) {
PyObject *__pyx_r = 0;
__Pyx_RefNannyDeclarations
__Pyx_RefNannySetupContext("__setstate_cython__ (wrapper)", 0);
__pyx_r = __pyx_pf___pyx_array_2__setstate_cython__(((struct __pyx_array_obj *)__pyx_v_self), ((PyObject *)__pyx_v___pyx_state));
/* function exit code */
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
static PyObject *__pyx_pf___pyx_array_2__setstate_cython__(CYTHON_UNUSED struct __pyx_array_obj *__pyx_v_self, CYTHON_UNUSED PyObject *__pyx_v___pyx_state) {
PyObject *__pyx_r = NULL;
__Pyx_RefNannyDeclarations
PyObject *__pyx_t_1 = NULL;
__Pyx_RefNannySetupContext("__setstate_cython__", 0);
/* "(tree fragment)":4
* raise TypeError("no default __reduce__ due to non-trivial __cinit__")
* def __setstate_cython__(self, __pyx_state):
* raise TypeError("no default __reduce__ due to non-trivial __cinit__") # <<<<<<<<<<<<<<
*/
__pyx_t_1 = __Pyx_PyObject_Call(__pyx_builtin_TypeError, __pyx_tuple__13, NULL); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 4, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_1);
__Pyx_Raise(__pyx_t_1, 0, 0, 0);
__Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
__PYX_ERR(1, 4, __pyx_L1_error)
/* "(tree fragment)":3
* def __reduce_cython__(self):
* raise TypeError("no default __reduce__ due to non-trivial __cinit__")
* def __setstate_cython__(self, __pyx_state): # <<<<<<<<<<<<<<
* raise TypeError("no default __reduce__ due to non-trivial __cinit__")
*/
/* function exit code */
__pyx_L1_error:;
__Pyx_XDECREF(__pyx_t_1);
__Pyx_AddTraceback("View.MemoryView.array.__setstate_cython__", __pyx_clineno, __pyx_lineno, __pyx_filename);
__pyx_r = NULL;
__Pyx_XGIVEREF(__pyx_r);
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
/* "View.MemoryView":243
*
* @cname("__pyx_array_new")
* cdef array array_cwrapper(tuple shape, Py_ssize_t itemsize, char *format, # <<<<<<<<<<<<<<
* char *mode, char *buf):
* cdef array result
*/
static struct __pyx_array_obj *__pyx_array_new(PyObject *__pyx_v_shape, Py_ssize_t __pyx_v_itemsize, char *__pyx_v_format, char *__pyx_v_mode, char *__pyx_v_buf) {
struct __pyx_array_obj *__pyx_v_result = 0;
struct __pyx_array_obj *__pyx_r = NULL;
__Pyx_RefNannyDeclarations
int __pyx_t_1;
PyObject *__pyx_t_2 = NULL;
PyObject *__pyx_t_3 = NULL;
PyObject *__pyx_t_4 = NULL;
PyObject *__pyx_t_5 = NULL;
__Pyx_RefNannySetupContext("array_cwrapper", 0);
/* "View.MemoryView":247
* cdef array result
*
* if buf == NULL: # <<<<<<<<<<<<<<
* result = array(shape, itemsize, format, mode.decode('ASCII'))
* else:
*/
__pyx_t_1 = ((__pyx_v_buf == NULL) != 0);
if (__pyx_t_1) {
/* "View.MemoryView":248
*
* if buf == NULL:
* result = array(shape, itemsize, format, mode.decode('ASCII')) # <<<<<<<<<<<<<<
* else:
* result = array(shape, itemsize, format, mode.decode('ASCII'),
*/
__pyx_t_2 = PyInt_FromSsize_t(__pyx_v_itemsize); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 248, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_2);
__pyx_t_3 = __Pyx_PyBytes_FromString(__pyx_v_format); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 248, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_3);
__pyx_t_4 = __Pyx_decode_c_string(__pyx_v_mode, 0, strlen(__pyx_v_mode), NULL, NULL, PyUnicode_DecodeASCII); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 248, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_4);
__pyx_t_5 = PyTuple_New(4); if (unlikely(!__pyx_t_5)) __PYX_ERR(1, 248, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_5);
__Pyx_INCREF(__pyx_v_shape);
__Pyx_GIVEREF(__pyx_v_shape);
PyTuple_SET_ITEM(__pyx_t_5, 0, __pyx_v_shape);
__Pyx_GIVEREF(__pyx_t_2);
PyTuple_SET_ITEM(__pyx_t_5, 1, __pyx_t_2);
__Pyx_GIVEREF(__pyx_t_3);
PyTuple_SET_ITEM(__pyx_t_5, 2, __pyx_t_3);
__Pyx_GIVEREF(__pyx_t_4);
PyTuple_SET_ITEM(__pyx_t_5, 3, __pyx_t_4);
__pyx_t_2 = 0;
__pyx_t_3 = 0;
__pyx_t_4 = 0;
__pyx_t_4 = __Pyx_PyObject_Call(((PyObject *)__pyx_array_type), __pyx_t_5, NULL); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 248, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_4);
__Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
__pyx_v_result = ((struct __pyx_array_obj *)__pyx_t_4);
__pyx_t_4 = 0;
/* "View.MemoryView":247
* cdef array result
*
* if buf == NULL: # <<<<<<<<<<<<<<
* result = array(shape, itemsize, format, mode.decode('ASCII'))
* else:
*/
goto __pyx_L3;
}
/* "View.MemoryView":250
* result = array(shape, itemsize, format, mode.decode('ASCII'))
* else:
* result = array(shape, itemsize, format, mode.decode('ASCII'), # <<<<<<<<<<<<<<
* allocate_buffer=False)
* result.data = buf
*/
/*else*/ {
__pyx_t_4 = PyInt_FromSsize_t(__pyx_v_itemsize); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 250, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_4);
__pyx_t_5 = __Pyx_PyBytes_FromString(__pyx_v_format); if (unlikely(!__pyx_t_5)) __PYX_ERR(1, 250, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_5);
__pyx_t_3 = __Pyx_decode_c_string(__pyx_v_mode, 0, strlen(__pyx_v_mode), NULL, NULL, PyUnicode_DecodeASCII); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 250, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_3);
__pyx_t_2 = PyTuple_New(4); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 250, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_2);
__Pyx_INCREF(__pyx_v_shape);
__Pyx_GIVEREF(__pyx_v_shape);
PyTuple_SET_ITEM(__pyx_t_2, 0, __pyx_v_shape);
__Pyx_GIVEREF(__pyx_t_4);
PyTuple_SET_ITEM(__pyx_t_2, 1, __pyx_t_4);
__Pyx_GIVEREF(__pyx_t_5);
PyTuple_SET_ITEM(__pyx_t_2, 2, __pyx_t_5);
__Pyx_GIVEREF(__pyx_t_3);
PyTuple_SET_ITEM(__pyx_t_2, 3, __pyx_t_3);
__pyx_t_4 = 0;
__pyx_t_5 = 0;
__pyx_t_3 = 0;
/* "View.MemoryView":251
* else:
* result = array(shape, itemsize, format, mode.decode('ASCII'),
* allocate_buffer=False) # <<<<<<<<<<<<<<
* result.data = buf
*
*/
__pyx_t_3 = __Pyx_PyDict_NewPresized(1); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 251, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_3);
if (PyDict_SetItem(__pyx_t_3, __pyx_n_s_allocate_buffer, Py_False) < 0) __PYX_ERR(1, 251, __pyx_L1_error)
/* "View.MemoryView":250
* result = array(shape, itemsize, format, mode.decode('ASCII'))
* else:
* result = array(shape, itemsize, format, mode.decode('ASCII'), # <<<<<<<<<<<<<<
* allocate_buffer=False)
* result.data = buf
*/
__pyx_t_5 = __Pyx_PyObject_Call(((PyObject *)__pyx_array_type), __pyx_t_2, __pyx_t_3); if (unlikely(!__pyx_t_5)) __PYX_ERR(1, 250, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_5);
__Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
__Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
__pyx_v_result = ((struct __pyx_array_obj *)__pyx_t_5);
__pyx_t_5 = 0;
/* "View.MemoryView":252
* result = array(shape, itemsize, format, mode.decode('ASCII'),
* allocate_buffer=False)
* result.data = buf # <<<<<<<<<<<<<<
*
* return result
*/
__pyx_v_result->data = __pyx_v_buf;
}
__pyx_L3:;
/* "View.MemoryView":254
* result.data = buf
*
* return result # <<<<<<<<<<<<<<
*
*
*/
__Pyx_XDECREF(((PyObject *)__pyx_r));
__Pyx_INCREF(((PyObject *)__pyx_v_result));
__pyx_r = __pyx_v_result;
goto __pyx_L0;
/* "View.MemoryView":243
*
* @cname("__pyx_array_new")
* cdef array array_cwrapper(tuple shape, Py_ssize_t itemsize, char *format, # <<<<<<<<<<<<<<
* char *mode, char *buf):
* cdef array result
*/
/* function exit code */
__pyx_L1_error:;
__Pyx_XDECREF(__pyx_t_2);
__Pyx_XDECREF(__pyx_t_3);
__Pyx_XDECREF(__pyx_t_4);
__Pyx_XDECREF(__pyx_t_5);
__Pyx_AddTraceback("View.MemoryView.array_cwrapper", __pyx_clineno, __pyx_lineno, __pyx_filename);
__pyx_r = 0;
__pyx_L0:;
__Pyx_XDECREF((PyObject *)__pyx_v_result);
__Pyx_XGIVEREF((PyObject *)__pyx_r);
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
/* "View.MemoryView":280
* cdef class Enum(object):
* cdef object name
* def __init__(self, name): # <<<<<<<<<<<<<<
* self.name = name
* def __repr__(self):
*/
/* Python wrapper */
static int __pyx_MemviewEnum___init__(PyObject *__pyx_v_self, PyObject *__pyx_args, PyObject *__pyx_kwds); /*proto*/
static int __pyx_MemviewEnum___init__(PyObject *__pyx_v_self, PyObject *__pyx_args, PyObject *__pyx_kwds) {
PyObject *__pyx_v_name = 0;
int __pyx_r;
__Pyx_RefNannyDeclarations
__Pyx_RefNannySetupContext("__init__ (wrapper)", 0);
{
static PyObject **__pyx_pyargnames[] = {&__pyx_n_s_name,0};
PyObject* values[1] = {0};
if (unlikely(__pyx_kwds)) {
Py_ssize_t kw_args;
const Py_ssize_t pos_args = PyTuple_GET_SIZE(__pyx_args);
switch (pos_args) {
case 1: values[0] = PyTuple_GET_ITEM(__pyx_args, 0);
CYTHON_FALLTHROUGH;
case 0: break;
default: goto __pyx_L5_argtuple_error;
}
kw_args = PyDict_Size(__pyx_kwds);
switch (pos_args) {
case 0:
if (likely((values[0] = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_name)) != 0)) kw_args--;
else goto __pyx_L5_argtuple_error;
}
if (unlikely(kw_args > 0)) {
if (unlikely(__Pyx_ParseOptionalKeywords(__pyx_kwds, __pyx_pyargnames, 0, values, pos_args, "__init__") < 0)) __PYX_ERR(1, 280, __pyx_L3_error)
}
} else if (PyTuple_GET_SIZE(__pyx_args) != 1) {
goto __pyx_L5_argtuple_error;
} else {
values[0] = PyTuple_GET_ITEM(__pyx_args, 0);
}
__pyx_v_name = values[0];
}
goto __pyx_L4_argument_unpacking_done;
__pyx_L5_argtuple_error:;
__Pyx_RaiseArgtupleInvalid("__init__", 1, 1, 1, PyTuple_GET_SIZE(__pyx_args)); __PYX_ERR(1, 280, __pyx_L3_error)
__pyx_L3_error:;
__Pyx_AddTraceback("View.MemoryView.Enum.__init__", __pyx_clineno, __pyx_lineno, __pyx_filename);
__Pyx_RefNannyFinishContext();
return -1;
__pyx_L4_argument_unpacking_done:;
__pyx_r = __pyx_MemviewEnum___pyx_pf_15View_dot_MemoryView_4Enum___init__(((struct __pyx_MemviewEnum_obj *)__pyx_v_self), __pyx_v_name);
/* function exit code */
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
static int __pyx_MemviewEnum___pyx_pf_15View_dot_MemoryView_4Enum___init__(struct __pyx_MemviewEnum_obj *__pyx_v_self, PyObject *__pyx_v_name) {
int __pyx_r;
__Pyx_RefNannyDeclarations
__Pyx_RefNannySetupContext("__init__", 0);
/* "View.MemoryView":281
* cdef object name
* def __init__(self, name):
* self.name = name # <<<<<<<<<<<<<<
* def __repr__(self):
* return self.name
*/
__Pyx_INCREF(__pyx_v_name);
__Pyx_GIVEREF(__pyx_v_name);
__Pyx_GOTREF(__pyx_v_self->name);
__Pyx_DECREF(__pyx_v_self->name);
__pyx_v_self->name = __pyx_v_name;
/* "View.MemoryView":280
* cdef class Enum(object):
* cdef object name
* def __init__(self, name): # <<<<<<<<<<<<<<
* self.name = name
* def __repr__(self):
*/
/* function exit code */
__pyx_r = 0;
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
/* "View.MemoryView":282
* def __init__(self, name):
* self.name = name
* def __repr__(self): # <<<<<<<<<<<<<<
* return self.name
*
*/
/* Python wrapper */
static PyObject *__pyx_MemviewEnum___repr__(PyObject *__pyx_v_self); /*proto*/
static PyObject *__pyx_MemviewEnum___repr__(PyObject *__pyx_v_self) {
PyObject *__pyx_r = 0;
__Pyx_RefNannyDeclarations
__Pyx_RefNannySetupContext("__repr__ (wrapper)", 0);
__pyx_r = __pyx_MemviewEnum___pyx_pf_15View_dot_MemoryView_4Enum_2__repr__(((struct __pyx_MemviewEnum_obj *)__pyx_v_self));
/* function exit code */
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
static PyObject *__pyx_MemviewEnum___pyx_pf_15View_dot_MemoryView_4Enum_2__repr__(struct __pyx_MemviewEnum_obj *__pyx_v_self) {
PyObject *__pyx_r = NULL;
__Pyx_RefNannyDeclarations
__Pyx_RefNannySetupContext("__repr__", 0);
/* "View.MemoryView":283
* self.name = name
* def __repr__(self):
* return self.name # <<<<<<<<<<<<<<
*
* cdef generic = Enum("<strided and direct or indirect>")
*/
__Pyx_XDECREF(__pyx_r);
__Pyx_INCREF(__pyx_v_self->name);
__pyx_r = __pyx_v_self->name;
goto __pyx_L0;
/* "View.MemoryView":282
* def __init__(self, name):
* self.name = name
* def __repr__(self): # <<<<<<<<<<<<<<
* return self.name
*
*/
/* function exit code */
__pyx_L0:;
__Pyx_XGIVEREF(__pyx_r);
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
/* "(tree fragment)":1
* def __reduce_cython__(self): # <<<<<<<<<<<<<<
* cdef bint use_setstate
* state = (self.name,)
*/
/* Python wrapper */
static PyObject *__pyx_pw___pyx_MemviewEnum_1__reduce_cython__(PyObject *__pyx_v_self, CYTHON_UNUSED PyObject *unused); /*proto*/
static PyObject *__pyx_pw___pyx_MemviewEnum_1__reduce_cython__(PyObject *__pyx_v_self, CYTHON_UNUSED PyObject *unused) {
PyObject *__pyx_r = 0;
__Pyx_RefNannyDeclarations
__Pyx_RefNannySetupContext("__reduce_cython__ (wrapper)", 0);
__pyx_r = __pyx_pf___pyx_MemviewEnum___reduce_cython__(((struct __pyx_MemviewEnum_obj *)__pyx_v_self));
/* function exit code */
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
static PyObject *__pyx_pf___pyx_MemviewEnum___reduce_cython__(struct __pyx_MemviewEnum_obj *__pyx_v_self) {
int __pyx_v_use_setstate;
PyObject *__pyx_v_state = NULL;
PyObject *__pyx_v__dict = NULL;
PyObject *__pyx_r = NULL;
__Pyx_RefNannyDeclarations
PyObject *__pyx_t_1 = NULL;
int __pyx_t_2;
int __pyx_t_3;
PyObject *__pyx_t_4 = NULL;
PyObject *__pyx_t_5 = NULL;
__Pyx_RefNannySetupContext("__reduce_cython__", 0);
/* "(tree fragment)":3
* def __reduce_cython__(self):
* cdef bint use_setstate
* state = (self.name,) # <<<<<<<<<<<<<<
* _dict = getattr(self, '__dict__', None)
* if _dict is not None:
*/
__pyx_t_1 = PyTuple_New(1); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 3, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_1);
__Pyx_INCREF(__pyx_v_self->name);
__Pyx_GIVEREF(__pyx_v_self->name);
PyTuple_SET_ITEM(__pyx_t_1, 0, __pyx_v_self->name);
__pyx_v_state = ((PyObject*)__pyx_t_1);
__pyx_t_1 = 0;
/* "(tree fragment)":4
* cdef bint use_setstate
* state = (self.name,)
* _dict = getattr(self, '__dict__', None) # <<<<<<<<<<<<<<
* if _dict is not None:
* state += (_dict,)
*/
__pyx_t_1 = __Pyx_GetAttr3(((PyObject *)__pyx_v_self), __pyx_n_s_dict, Py_None); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 4, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_1);
__pyx_v__dict = __pyx_t_1;
__pyx_t_1 = 0;
/* "(tree fragment)":5
* state = (self.name,)
* _dict = getattr(self, '__dict__', None)
* if _dict is not None: # <<<<<<<<<<<<<<
* state += (_dict,)
* use_setstate = True
*/
__pyx_t_2 = (__pyx_v__dict != Py_None);
__pyx_t_3 = (__pyx_t_2 != 0);
if (__pyx_t_3) {
/* "(tree fragment)":6
* _dict = getattr(self, '__dict__', None)
* if _dict is not None:
* state += (_dict,) # <<<<<<<<<<<<<<
* use_setstate = True
* else:
*/
__pyx_t_1 = PyTuple_New(1); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 6, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_1);
__Pyx_INCREF(__pyx_v__dict);
__Pyx_GIVEREF(__pyx_v__dict);
PyTuple_SET_ITEM(__pyx_t_1, 0, __pyx_v__dict);
__pyx_t_4 = PyNumber_InPlaceAdd(__pyx_v_state, __pyx_t_1); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 6, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_4);
__Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
__Pyx_DECREF_SET(__pyx_v_state, ((PyObject*)__pyx_t_4));
__pyx_t_4 = 0;
/* "(tree fragment)":7
* if _dict is not None:
* state += (_dict,)
* use_setstate = True # <<<<<<<<<<<<<<
* else:
* use_setstate = self.name is not None
*/
__pyx_v_use_setstate = 1;
/* "(tree fragment)":5
* state = (self.name,)
* _dict = getattr(self, '__dict__', None)
* if _dict is not None: # <<<<<<<<<<<<<<
* state += (_dict,)
* use_setstate = True
*/
goto __pyx_L3;
}
/* "(tree fragment)":9
* use_setstate = True
* else:
* use_setstate = self.name is not None # <<<<<<<<<<<<<<
* if use_setstate:
* return __pyx_unpickle_Enum, (type(self), 0xb068931, None), state
*/
/*else*/ {
__pyx_t_3 = (__pyx_v_self->name != Py_None);
__pyx_v_use_setstate = __pyx_t_3;
}
__pyx_L3:;
/* "(tree fragment)":10
* else:
* use_setstate = self.name is not None
* if use_setstate: # <<<<<<<<<<<<<<
* return __pyx_unpickle_Enum, (type(self), 0xb068931, None), state
* else:
*/
__pyx_t_3 = (__pyx_v_use_setstate != 0);
if (__pyx_t_3) {
/* "(tree fragment)":11
* use_setstate = self.name is not None
* if use_setstate:
* return __pyx_unpickle_Enum, (type(self), 0xb068931, None), state # <<<<<<<<<<<<<<
* else:
* return __pyx_unpickle_Enum, (type(self), 0xb068931, state)
*/
__Pyx_XDECREF(__pyx_r);
__pyx_t_4 = __Pyx_GetModuleGlobalName(__pyx_n_s_pyx_unpickle_Enum); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 11, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_4);
__pyx_t_1 = PyTuple_New(3); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 11, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_1);
__Pyx_INCREF(((PyObject *)Py_TYPE(((PyObject *)__pyx_v_self))));
__Pyx_GIVEREF(((PyObject *)Py_TYPE(((PyObject *)__pyx_v_self))));
PyTuple_SET_ITEM(__pyx_t_1, 0, ((PyObject *)Py_TYPE(((PyObject *)__pyx_v_self))));
__Pyx_INCREF(__pyx_int_184977713);
__Pyx_GIVEREF(__pyx_int_184977713);
PyTuple_SET_ITEM(__pyx_t_1, 1, __pyx_int_184977713);
__Pyx_INCREF(Py_None);
__Pyx_GIVEREF(Py_None);
PyTuple_SET_ITEM(__pyx_t_1, 2, Py_None);
__pyx_t_5 = PyTuple_New(3); if (unlikely(!__pyx_t_5)) __PYX_ERR(1, 11, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_5);
__Pyx_GIVEREF(__pyx_t_4);
PyTuple_SET_ITEM(__pyx_t_5, 0, __pyx_t_4);
__Pyx_GIVEREF(__pyx_t_1);
PyTuple_SET_ITEM(__pyx_t_5, 1, __pyx_t_1);
__Pyx_INCREF(__pyx_v_state);
__Pyx_GIVEREF(__pyx_v_state);
PyTuple_SET_ITEM(__pyx_t_5, 2, __pyx_v_state);
__pyx_t_4 = 0;
__pyx_t_1 = 0;
__pyx_r = __pyx_t_5;
__pyx_t_5 = 0;
goto __pyx_L0;
/* "(tree fragment)":10
* else:
* use_setstate = self.name is not None
* if use_setstate: # <<<<<<<<<<<<<<
* return __pyx_unpickle_Enum, (type(self), 0xb068931, None), state
* else:
*/
}
/* "(tree fragment)":13
* return __pyx_unpickle_Enum, (type(self), 0xb068931, None), state
* else:
* return __pyx_unpickle_Enum, (type(self), 0xb068931, state) # <<<<<<<<<<<<<<
* def __setstate_cython__(self, __pyx_state):
* __pyx_unpickle_Enum__set_state(self, __pyx_state)
*/
/*else*/ {
__Pyx_XDECREF(__pyx_r);
__pyx_t_5 = __Pyx_GetModuleGlobalName(__pyx_n_s_pyx_unpickle_Enum); if (unlikely(!__pyx_t_5)) __PYX_ERR(1, 13, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_5);
__pyx_t_1 = PyTuple_New(3); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 13, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_1);
__Pyx_INCREF(((PyObject *)Py_TYPE(((PyObject *)__pyx_v_self))));
__Pyx_GIVEREF(((PyObject *)Py_TYPE(((PyObject *)__pyx_v_self))));
PyTuple_SET_ITEM(__pyx_t_1, 0, ((PyObject *)Py_TYPE(((PyObject *)__pyx_v_self))));
__Pyx_INCREF(__pyx_int_184977713);
__Pyx_GIVEREF(__pyx_int_184977713);
PyTuple_SET_ITEM(__pyx_t_1, 1, __pyx_int_184977713);
__Pyx_INCREF(__pyx_v_state);
__Pyx_GIVEREF(__pyx_v_state);
PyTuple_SET_ITEM(__pyx_t_1, 2, __pyx_v_state);
__pyx_t_4 = PyTuple_New(2); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 13, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_4);
__Pyx_GIVEREF(__pyx_t_5);
PyTuple_SET_ITEM(__pyx_t_4, 0, __pyx_t_5);
__Pyx_GIVEREF(__pyx_t_1);
PyTuple_SET_ITEM(__pyx_t_4, 1, __pyx_t_1);
__pyx_t_5 = 0;
__pyx_t_1 = 0;
__pyx_r = __pyx_t_4;
__pyx_t_4 = 0;
goto __pyx_L0;
}
/* "(tree fragment)":1
* def __reduce_cython__(self): # <<<<<<<<<<<<<<
* cdef bint use_setstate
* state = (self.name,)
*/
/* function exit code */
__pyx_L1_error:;
__Pyx_XDECREF(__pyx_t_1);
__Pyx_XDECREF(__pyx_t_4);
__Pyx_XDECREF(__pyx_t_5);
__Pyx_AddTraceback("View.MemoryView.Enum.__reduce_cython__", __pyx_clineno, __pyx_lineno, __pyx_filename);
__pyx_r = NULL;
__pyx_L0:;
__Pyx_XDECREF(__pyx_v_state);
__Pyx_XDECREF(__pyx_v__dict);
__Pyx_XGIVEREF(__pyx_r);
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
/* "(tree fragment)":14
* else:
* return __pyx_unpickle_Enum, (type(self), 0xb068931, state)
* def __setstate_cython__(self, __pyx_state): # <<<<<<<<<<<<<<
* __pyx_unpickle_Enum__set_state(self, __pyx_state)
*/
/* Python wrapper */
static PyObject *__pyx_pw___pyx_MemviewEnum_3__setstate_cython__(PyObject *__pyx_v_self, PyObject *__pyx_v___pyx_state); /*proto*/
static PyObject *__pyx_pw___pyx_MemviewEnum_3__setstate_cython__(PyObject *__pyx_v_self, PyObject *__pyx_v___pyx_state) {
PyObject *__pyx_r = 0;
__Pyx_RefNannyDeclarations
__Pyx_RefNannySetupContext("__setstate_cython__ (wrapper)", 0);
__pyx_r = __pyx_pf___pyx_MemviewEnum_2__setstate_cython__(((struct __pyx_MemviewEnum_obj *)__pyx_v_self), ((PyObject *)__pyx_v___pyx_state));
/* function exit code */
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
static PyObject *__pyx_pf___pyx_MemviewEnum_2__setstate_cython__(struct __pyx_MemviewEnum_obj *__pyx_v_self, PyObject *__pyx_v___pyx_state) {
PyObject *__pyx_r = NULL;
__Pyx_RefNannyDeclarations
PyObject *__pyx_t_1 = NULL;
__Pyx_RefNannySetupContext("__setstate_cython__", 0);
/* "(tree fragment)":15
* return __pyx_unpickle_Enum, (type(self), 0xb068931, state)
* def __setstate_cython__(self, __pyx_state):
* __pyx_unpickle_Enum__set_state(self, __pyx_state) # <<<<<<<<<<<<<<
*/
if (!(likely(PyTuple_CheckExact(__pyx_v___pyx_state))||((__pyx_v___pyx_state) == Py_None)||(PyErr_Format(PyExc_TypeError, "Expected %.16s, got %.200s", "tuple", Py_TYPE(__pyx_v___pyx_state)->tp_name), 0))) __PYX_ERR(1, 15, __pyx_L1_error)
__pyx_t_1 = __pyx_unpickle_Enum__set_state(__pyx_v_self, ((PyObject*)__pyx_v___pyx_state)); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 15, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_1);
__Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
/* "(tree fragment)":14
* else:
* return __pyx_unpickle_Enum, (type(self), 0xb068931, state)
* def __setstate_cython__(self, __pyx_state): # <<<<<<<<<<<<<<
* __pyx_unpickle_Enum__set_state(self, __pyx_state)
*/
/* function exit code */
__pyx_r = Py_None; __Pyx_INCREF(Py_None);
goto __pyx_L0;
__pyx_L1_error:;
__Pyx_XDECREF(__pyx_t_1);
__Pyx_AddTraceback("View.MemoryView.Enum.__setstate_cython__", __pyx_clineno, __pyx_lineno, __pyx_filename);
__pyx_r = NULL;
__pyx_L0:;
__Pyx_XGIVEREF(__pyx_r);
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
/* "View.MemoryView":297
*
* @cname('__pyx_align_pointer')
* cdef void *align_pointer(void *memory, size_t alignment) nogil: # <<<<<<<<<<<<<<
* "Align pointer memory on a given boundary"
* cdef Py_intptr_t aligned_p = <Py_intptr_t> memory
*/
static void *__pyx_align_pointer(void *__pyx_v_memory, size_t __pyx_v_alignment) {
Py_intptr_t __pyx_v_aligned_p;
size_t __pyx_v_offset;
void *__pyx_r;
int __pyx_t_1;
/* "View.MemoryView":299
* cdef void *align_pointer(void *memory, size_t alignment) nogil:
* "Align pointer memory on a given boundary"
* cdef Py_intptr_t aligned_p = <Py_intptr_t> memory # <<<<<<<<<<<<<<
* cdef size_t offset
*
*/
__pyx_v_aligned_p = ((Py_intptr_t)__pyx_v_memory);
/* "View.MemoryView":303
*
* with cython.cdivision(True):
* offset = aligned_p % alignment # <<<<<<<<<<<<<<
*
* if offset > 0:
*/
__pyx_v_offset = (__pyx_v_aligned_p % __pyx_v_alignment);
/* "View.MemoryView":305
* offset = aligned_p % alignment
*
* if offset > 0: # <<<<<<<<<<<<<<
* aligned_p += alignment - offset
*
*/
__pyx_t_1 = ((__pyx_v_offset > 0) != 0);
if (__pyx_t_1) {
/* "View.MemoryView":306
*
* if offset > 0:
* aligned_p += alignment - offset # <<<<<<<<<<<<<<
*
* return <void *> aligned_p
*/
__pyx_v_aligned_p = (__pyx_v_aligned_p + (__pyx_v_alignment - __pyx_v_offset));
/* "View.MemoryView":305
* offset = aligned_p % alignment
*
* if offset > 0: # <<<<<<<<<<<<<<
* aligned_p += alignment - offset
*
*/
}
/* "View.MemoryView":308
* aligned_p += alignment - offset
*
* return <void *> aligned_p # <<<<<<<<<<<<<<
*
*
*/
__pyx_r = ((void *)__pyx_v_aligned_p);
goto __pyx_L0;
/* "View.MemoryView":297
*
* @cname('__pyx_align_pointer')
* cdef void *align_pointer(void *memory, size_t alignment) nogil: # <<<<<<<<<<<<<<
* "Align pointer memory on a given boundary"
* cdef Py_intptr_t aligned_p = <Py_intptr_t> memory
*/
/* function exit code */
__pyx_L0:;
return __pyx_r;
}
/* "View.MemoryView":344
* cdef __Pyx_TypeInfo *typeinfo
*
* def __cinit__(memoryview self, object obj, int flags, bint dtype_is_object=False): # <<<<<<<<<<<<<<
* self.obj = obj
* self.flags = flags
*/
/* Python wrapper */
static int __pyx_memoryview___cinit__(PyObject *__pyx_v_self, PyObject *__pyx_args, PyObject *__pyx_kwds); /*proto*/
static int __pyx_memoryview___cinit__(PyObject *__pyx_v_self, PyObject *__pyx_args, PyObject *__pyx_kwds) {
PyObject *__pyx_v_obj = 0;
int __pyx_v_flags;
int __pyx_v_dtype_is_object;
int __pyx_r;
__Pyx_RefNannyDeclarations
__Pyx_RefNannySetupContext("__cinit__ (wrapper)", 0);
{
static PyObject **__pyx_pyargnames[] = {&__pyx_n_s_obj,&__pyx_n_s_flags,&__pyx_n_s_dtype_is_object,0};
PyObject* values[3] = {0,0,0};
if (unlikely(__pyx_kwds)) {
Py_ssize_t kw_args;
const Py_ssize_t pos_args = PyTuple_GET_SIZE(__pyx_args);
switch (pos_args) {
case 3: values[2] = PyTuple_GET_ITEM(__pyx_args, 2);
CYTHON_FALLTHROUGH;
case 2: values[1] = PyTuple_GET_ITEM(__pyx_args, 1);
CYTHON_FALLTHROUGH;
case 1: values[0] = PyTuple_GET_ITEM(__pyx_args, 0);
CYTHON_FALLTHROUGH;
case 0: break;
default: goto __pyx_L5_argtuple_error;
}
kw_args = PyDict_Size(__pyx_kwds);
switch (pos_args) {
case 0:
if (likely((values[0] = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_obj)) != 0)) kw_args--;
else goto __pyx_L5_argtuple_error;
CYTHON_FALLTHROUGH;
case 1:
if (likely((values[1] = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_flags)) != 0)) kw_args--;
else {
__Pyx_RaiseArgtupleInvalid("__cinit__", 0, 2, 3, 1); __PYX_ERR(1, 344, __pyx_L3_error)
}
CYTHON_FALLTHROUGH;
case 2:
if (kw_args > 0) {
PyObject* value = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_dtype_is_object);
if (value) { values[2] = value; kw_args--; }
}
}
if (unlikely(kw_args > 0)) {
if (unlikely(__Pyx_ParseOptionalKeywords(__pyx_kwds, __pyx_pyargnames, 0, values, pos_args, "__cinit__") < 0)) __PYX_ERR(1, 344, __pyx_L3_error)
}
} else {
switch (PyTuple_GET_SIZE(__pyx_args)) {
case 3: values[2] = PyTuple_GET_ITEM(__pyx_args, 2);
CYTHON_FALLTHROUGH;
case 2: values[1] = PyTuple_GET_ITEM(__pyx_args, 1);
values[0] = PyTuple_GET_ITEM(__pyx_args, 0);
break;
default: goto __pyx_L5_argtuple_error;
}
}
__pyx_v_obj = values[0];
__pyx_v_flags = __Pyx_PyInt_As_int(values[1]); if (unlikely((__pyx_v_flags == (int)-1) && PyErr_Occurred())) __PYX_ERR(1, 344, __pyx_L3_error)
if (values[2]) {
__pyx_v_dtype_is_object = __Pyx_PyObject_IsTrue(values[2]); if (unlikely((__pyx_v_dtype_is_object == (int)-1) && PyErr_Occurred())) __PYX_ERR(1, 344, __pyx_L3_error)
} else {
__pyx_v_dtype_is_object = ((int)0);
}
}
goto __pyx_L4_argument_unpacking_done;
__pyx_L5_argtuple_error:;
__Pyx_RaiseArgtupleInvalid("__cinit__", 0, 2, 3, PyTuple_GET_SIZE(__pyx_args)); __PYX_ERR(1, 344, __pyx_L3_error)
__pyx_L3_error:;
__Pyx_AddTraceback("View.MemoryView.memoryview.__cinit__", __pyx_clineno, __pyx_lineno, __pyx_filename);
__Pyx_RefNannyFinishContext();
return -1;
__pyx_L4_argument_unpacking_done:;
__pyx_r = __pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview___cinit__(((struct __pyx_memoryview_obj *)__pyx_v_self), __pyx_v_obj, __pyx_v_flags, __pyx_v_dtype_is_object);
/* function exit code */
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
static int __pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview___cinit__(struct __pyx_memoryview_obj *__pyx_v_self, PyObject *__pyx_v_obj, int __pyx_v_flags, int __pyx_v_dtype_is_object) {
int __pyx_r;
__Pyx_RefNannyDeclarations
int __pyx_t_1;
int __pyx_t_2;
int __pyx_t_3;
int __pyx_t_4;
__Pyx_RefNannySetupContext("__cinit__", 0);
/* "View.MemoryView":345
*
* def __cinit__(memoryview self, object obj, int flags, bint dtype_is_object=False):
* self.obj = obj # <<<<<<<<<<<<<<
* self.flags = flags
* if type(self) is memoryview or obj is not None:
*/
__Pyx_INCREF(__pyx_v_obj);
__Pyx_GIVEREF(__pyx_v_obj);
__Pyx_GOTREF(__pyx_v_self->obj);
__Pyx_DECREF(__pyx_v_self->obj);
__pyx_v_self->obj = __pyx_v_obj;
/* "View.MemoryView":346
* def __cinit__(memoryview self, object obj, int flags, bint dtype_is_object=False):
* self.obj = obj
* self.flags = flags # <<<<<<<<<<<<<<
* if type(self) is memoryview or obj is not None:
* __Pyx_GetBuffer(obj, &self.view, flags)
*/
__pyx_v_self->flags = __pyx_v_flags;
/* "View.MemoryView":347
* self.obj = obj
* self.flags = flags
* if type(self) is memoryview or obj is not None: # <<<<<<<<<<<<<<
* __Pyx_GetBuffer(obj, &self.view, flags)
* if <PyObject *> self.view.obj == NULL:
*/
__pyx_t_2 = (((PyObject *)Py_TYPE(((PyObject *)__pyx_v_self))) == ((PyObject *)__pyx_memoryview_type));
__pyx_t_3 = (__pyx_t_2 != 0);
if (!__pyx_t_3) {
} else {
__pyx_t_1 = __pyx_t_3;
goto __pyx_L4_bool_binop_done;
}
__pyx_t_3 = (__pyx_v_obj != Py_None);
__pyx_t_2 = (__pyx_t_3 != 0);
__pyx_t_1 = __pyx_t_2;
__pyx_L4_bool_binop_done:;
if (__pyx_t_1) {
/* "View.MemoryView":348
* self.flags = flags
* if type(self) is memoryview or obj is not None:
* __Pyx_GetBuffer(obj, &self.view, flags) # <<<<<<<<<<<<<<
* if <PyObject *> self.view.obj == NULL:
* (<__pyx_buffer *> &self.view).obj = Py_None
*/
__pyx_t_4 = __Pyx_GetBuffer(__pyx_v_obj, (&__pyx_v_self->view), __pyx_v_flags); if (unlikely(__pyx_t_4 == ((int)-1))) __PYX_ERR(1, 348, __pyx_L1_error)
/* "View.MemoryView":349
* if type(self) is memoryview or obj is not None:
* __Pyx_GetBuffer(obj, &self.view, flags)
* if <PyObject *> self.view.obj == NULL: # <<<<<<<<<<<<<<
* (<__pyx_buffer *> &self.view).obj = Py_None
* Py_INCREF(Py_None)
*/
__pyx_t_1 = ((((PyObject *)__pyx_v_self->view.obj) == NULL) != 0);
if (__pyx_t_1) {
/* "View.MemoryView":350
* __Pyx_GetBuffer(obj, &self.view, flags)
* if <PyObject *> self.view.obj == NULL:
* (<__pyx_buffer *> &self.view).obj = Py_None # <<<<<<<<<<<<<<
* Py_INCREF(Py_None)
*
*/
((Py_buffer *)(&__pyx_v_self->view))->obj = Py_None;
/* "View.MemoryView":351
* if <PyObject *> self.view.obj == NULL:
* (<__pyx_buffer *> &self.view).obj = Py_None
* Py_INCREF(Py_None) # <<<<<<<<<<<<<<
*
* global __pyx_memoryview_thread_locks_used
*/
Py_INCREF(Py_None);
/* "View.MemoryView":349
* if type(self) is memoryview or obj is not None:
* __Pyx_GetBuffer(obj, &self.view, flags)
* if <PyObject *> self.view.obj == NULL: # <<<<<<<<<<<<<<
* (<__pyx_buffer *> &self.view).obj = Py_None
* Py_INCREF(Py_None)
*/
}
/* "View.MemoryView":347
* self.obj = obj
* self.flags = flags
* if type(self) is memoryview or obj is not None: # <<<<<<<<<<<<<<
* __Pyx_GetBuffer(obj, &self.view, flags)
* if <PyObject *> self.view.obj == NULL:
*/
}
/* "View.MemoryView":354
*
* global __pyx_memoryview_thread_locks_used
* if __pyx_memoryview_thread_locks_used < THREAD_LOCKS_PREALLOCATED: # <<<<<<<<<<<<<<
* self.lock = __pyx_memoryview_thread_locks[__pyx_memoryview_thread_locks_used]
* __pyx_memoryview_thread_locks_used += 1
*/
__pyx_t_1 = ((__pyx_memoryview_thread_locks_used < 8) != 0);
if (__pyx_t_1) {
/* "View.MemoryView":355
* global __pyx_memoryview_thread_locks_used
* if __pyx_memoryview_thread_locks_used < THREAD_LOCKS_PREALLOCATED:
* self.lock = __pyx_memoryview_thread_locks[__pyx_memoryview_thread_locks_used] # <<<<<<<<<<<<<<
* __pyx_memoryview_thread_locks_used += 1
* if self.lock is NULL:
*/
__pyx_v_self->lock = (__pyx_memoryview_thread_locks[__pyx_memoryview_thread_locks_used]);
/* "View.MemoryView":356
* if __pyx_memoryview_thread_locks_used < THREAD_LOCKS_PREALLOCATED:
* self.lock = __pyx_memoryview_thread_locks[__pyx_memoryview_thread_locks_used]
* __pyx_memoryview_thread_locks_used += 1 # <<<<<<<<<<<<<<
* if self.lock is NULL:
* self.lock = PyThread_allocate_lock()
*/
__pyx_memoryview_thread_locks_used = (__pyx_memoryview_thread_locks_used + 1);
/* "View.MemoryView":354
*
* global __pyx_memoryview_thread_locks_used
* if __pyx_memoryview_thread_locks_used < THREAD_LOCKS_PREALLOCATED: # <<<<<<<<<<<<<<
* self.lock = __pyx_memoryview_thread_locks[__pyx_memoryview_thread_locks_used]
* __pyx_memoryview_thread_locks_used += 1
*/
}
/* "View.MemoryView":357
* self.lock = __pyx_memoryview_thread_locks[__pyx_memoryview_thread_locks_used]
* __pyx_memoryview_thread_locks_used += 1
* if self.lock is NULL: # <<<<<<<<<<<<<<
* self.lock = PyThread_allocate_lock()
* if self.lock is NULL:
*/
__pyx_t_1 = ((__pyx_v_self->lock == NULL) != 0);
if (__pyx_t_1) {
/* "View.MemoryView":358
* __pyx_memoryview_thread_locks_used += 1
* if self.lock is NULL:
* self.lock = PyThread_allocate_lock() # <<<<<<<<<<<<<<
* if self.lock is NULL:
* raise MemoryError
*/
__pyx_v_self->lock = PyThread_allocate_lock();
/* "View.MemoryView":359
* if self.lock is NULL:
* self.lock = PyThread_allocate_lock()
* if self.lock is NULL: # <<<<<<<<<<<<<<
* raise MemoryError
*
*/
__pyx_t_1 = ((__pyx_v_self->lock == NULL) != 0);
if (unlikely(__pyx_t_1)) {
/* "View.MemoryView":360
* self.lock = PyThread_allocate_lock()
* if self.lock is NULL:
* raise MemoryError # <<<<<<<<<<<<<<
*
* if flags & PyBUF_FORMAT:
*/
PyErr_NoMemory(); __PYX_ERR(1, 360, __pyx_L1_error)
/* "View.MemoryView":359
* if self.lock is NULL:
* self.lock = PyThread_allocate_lock()
* if self.lock is NULL: # <<<<<<<<<<<<<<
* raise MemoryError
*
*/
}
/* "View.MemoryView":357
* self.lock = __pyx_memoryview_thread_locks[__pyx_memoryview_thread_locks_used]
* __pyx_memoryview_thread_locks_used += 1
* if self.lock is NULL: # <<<<<<<<<<<<<<
* self.lock = PyThread_allocate_lock()
* if self.lock is NULL:
*/
}
/* "View.MemoryView":362
* raise MemoryError
*
* if flags & PyBUF_FORMAT: # <<<<<<<<<<<<<<
* self.dtype_is_object = (self.view.format[0] == b'O' and self.view.format[1] == b'\0')
* else:
*/
__pyx_t_1 = ((__pyx_v_flags & PyBUF_FORMAT) != 0);
if (__pyx_t_1) {
/* "View.MemoryView":363
*
* if flags & PyBUF_FORMAT:
* self.dtype_is_object = (self.view.format[0] == b'O' and self.view.format[1] == b'\0') # <<<<<<<<<<<<<<
* else:
* self.dtype_is_object = dtype_is_object
*/
__pyx_t_2 = (((__pyx_v_self->view.format[0]) == 'O') != 0);
if (__pyx_t_2) {
} else {
__pyx_t_1 = __pyx_t_2;
goto __pyx_L11_bool_binop_done;
}
__pyx_t_2 = (((__pyx_v_self->view.format[1]) == '\x00') != 0);
__pyx_t_1 = __pyx_t_2;
__pyx_L11_bool_binop_done:;
__pyx_v_self->dtype_is_object = __pyx_t_1;
/* "View.MemoryView":362
* raise MemoryError
*
* if flags & PyBUF_FORMAT: # <<<<<<<<<<<<<<
* self.dtype_is_object = (self.view.format[0] == b'O' and self.view.format[1] == b'\0')
* else:
*/
goto __pyx_L10;
}
/* "View.MemoryView":365
* self.dtype_is_object = (self.view.format[0] == b'O' and self.view.format[1] == b'\0')
* else:
* self.dtype_is_object = dtype_is_object # <<<<<<<<<<<<<<
*
* self.acquisition_count_aligned_p = <__pyx_atomic_int *> align_pointer(
*/
/*else*/ {
__pyx_v_self->dtype_is_object = __pyx_v_dtype_is_object;
}
__pyx_L10:;
/* "View.MemoryView":367
* self.dtype_is_object = dtype_is_object
*
* self.acquisition_count_aligned_p = <__pyx_atomic_int *> align_pointer( # <<<<<<<<<<<<<<
* <void *> &self.acquisition_count[0], sizeof(__pyx_atomic_int))
* self.typeinfo = NULL
*/
__pyx_v_self->acquisition_count_aligned_p = ((__pyx_atomic_int *)__pyx_align_pointer(((void *)(&(__pyx_v_self->acquisition_count[0]))), (sizeof(__pyx_atomic_int))));
/* "View.MemoryView":369
* self.acquisition_count_aligned_p = <__pyx_atomic_int *> align_pointer(
* <void *> &self.acquisition_count[0], sizeof(__pyx_atomic_int))
* self.typeinfo = NULL # <<<<<<<<<<<<<<
*
* def __dealloc__(memoryview self):
*/
__pyx_v_self->typeinfo = NULL;
/* "View.MemoryView":344
* cdef __Pyx_TypeInfo *typeinfo
*
* def __cinit__(memoryview self, object obj, int flags, bint dtype_is_object=False): # <<<<<<<<<<<<<<
* self.obj = obj
* self.flags = flags
*/
/* function exit code */
__pyx_r = 0;
goto __pyx_L0;
__pyx_L1_error:;
__Pyx_AddTraceback("View.MemoryView.memoryview.__cinit__", __pyx_clineno, __pyx_lineno, __pyx_filename);
__pyx_r = -1;
__pyx_L0:;
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
/* "View.MemoryView":371
* self.typeinfo = NULL
*
* def __dealloc__(memoryview self): # <<<<<<<<<<<<<<
* if self.obj is not None:
* __Pyx_ReleaseBuffer(&self.view)
*/
/* Python wrapper */
static void __pyx_memoryview___dealloc__(PyObject *__pyx_v_self); /*proto*/
static void __pyx_memoryview___dealloc__(PyObject *__pyx_v_self) {
__Pyx_RefNannyDeclarations
__Pyx_RefNannySetupContext("__dealloc__ (wrapper)", 0);
__pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_2__dealloc__(((struct __pyx_memoryview_obj *)__pyx_v_self));
/* function exit code */
__Pyx_RefNannyFinishContext();
}
static void __pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_2__dealloc__(struct __pyx_memoryview_obj *__pyx_v_self) {
int __pyx_v_i;
__Pyx_RefNannyDeclarations
int __pyx_t_1;
int __pyx_t_2;
int __pyx_t_3;
int __pyx_t_4;
int __pyx_t_5;
PyThread_type_lock __pyx_t_6;
PyThread_type_lock __pyx_t_7;
__Pyx_RefNannySetupContext("__dealloc__", 0);
/* "View.MemoryView":372
*
* def __dealloc__(memoryview self):
* if self.obj is not None: # <<<<<<<<<<<<<<
* __Pyx_ReleaseBuffer(&self.view)
*
*/
__pyx_t_1 = (__pyx_v_self->obj != Py_None);
__pyx_t_2 = (__pyx_t_1 != 0);
if (__pyx_t_2) {
/* "View.MemoryView":373
* def __dealloc__(memoryview self):
* if self.obj is not None:
* __Pyx_ReleaseBuffer(&self.view) # <<<<<<<<<<<<<<
*
* cdef int i
*/
__Pyx_ReleaseBuffer((&__pyx_v_self->view));
/* "View.MemoryView":372
*
* def __dealloc__(memoryview self):
* if self.obj is not None: # <<<<<<<<<<<<<<
* __Pyx_ReleaseBuffer(&self.view)
*
*/
}
/* "View.MemoryView":377
* cdef int i
* global __pyx_memoryview_thread_locks_used
* if self.lock != NULL: # <<<<<<<<<<<<<<
* for i in range(__pyx_memoryview_thread_locks_used):
* if __pyx_memoryview_thread_locks[i] is self.lock:
*/
__pyx_t_2 = ((__pyx_v_self->lock != NULL) != 0);
if (__pyx_t_2) {
/* "View.MemoryView":378
* global __pyx_memoryview_thread_locks_used
* if self.lock != NULL:
* for i in range(__pyx_memoryview_thread_locks_used): # <<<<<<<<<<<<<<
* if __pyx_memoryview_thread_locks[i] is self.lock:
* __pyx_memoryview_thread_locks_used -= 1
*/
__pyx_t_3 = __pyx_memoryview_thread_locks_used;
__pyx_t_4 = __pyx_t_3;
for (__pyx_t_5 = 0; __pyx_t_5 < __pyx_t_4; __pyx_t_5+=1) {
__pyx_v_i = __pyx_t_5;
/* "View.MemoryView":379
* if self.lock != NULL:
* for i in range(__pyx_memoryview_thread_locks_used):
* if __pyx_memoryview_thread_locks[i] is self.lock: # <<<<<<<<<<<<<<
* __pyx_memoryview_thread_locks_used -= 1
* if i != __pyx_memoryview_thread_locks_used:
*/
__pyx_t_2 = (((__pyx_memoryview_thread_locks[__pyx_v_i]) == __pyx_v_self->lock) != 0);
if (__pyx_t_2) {
/* "View.MemoryView":380
* for i in range(__pyx_memoryview_thread_locks_used):
* if __pyx_memoryview_thread_locks[i] is self.lock:
* __pyx_memoryview_thread_locks_used -= 1 # <<<<<<<<<<<<<<
* if i != __pyx_memoryview_thread_locks_used:
* __pyx_memoryview_thread_locks[i], __pyx_memoryview_thread_locks[__pyx_memoryview_thread_locks_used] = (
*/
__pyx_memoryview_thread_locks_used = (__pyx_memoryview_thread_locks_used - 1);
/* "View.MemoryView":381
* if __pyx_memoryview_thread_locks[i] is self.lock:
* __pyx_memoryview_thread_locks_used -= 1
* if i != __pyx_memoryview_thread_locks_used: # <<<<<<<<<<<<<<
* __pyx_memoryview_thread_locks[i], __pyx_memoryview_thread_locks[__pyx_memoryview_thread_locks_used] = (
* __pyx_memoryview_thread_locks[__pyx_memoryview_thread_locks_used], __pyx_memoryview_thread_locks[i])
*/
__pyx_t_2 = ((__pyx_v_i != __pyx_memoryview_thread_locks_used) != 0);
if (__pyx_t_2) {
/* "View.MemoryView":383
* if i != __pyx_memoryview_thread_locks_used:
* __pyx_memoryview_thread_locks[i], __pyx_memoryview_thread_locks[__pyx_memoryview_thread_locks_used] = (
* __pyx_memoryview_thread_locks[__pyx_memoryview_thread_locks_used], __pyx_memoryview_thread_locks[i]) # <<<<<<<<<<<<<<
* break
* else:
*/
__pyx_t_6 = (__pyx_memoryview_thread_locks[__pyx_memoryview_thread_locks_used]);
__pyx_t_7 = (__pyx_memoryview_thread_locks[__pyx_v_i]);
/* "View.MemoryView":382
* __pyx_memoryview_thread_locks_used -= 1
* if i != __pyx_memoryview_thread_locks_used:
* __pyx_memoryview_thread_locks[i], __pyx_memoryview_thread_locks[__pyx_memoryview_thread_locks_used] = ( # <<<<<<<<<<<<<<
* __pyx_memoryview_thread_locks[__pyx_memoryview_thread_locks_used], __pyx_memoryview_thread_locks[i])
* break
*/
(__pyx_memoryview_thread_locks[__pyx_v_i]) = __pyx_t_6;
(__pyx_memoryview_thread_locks[__pyx_memoryview_thread_locks_used]) = __pyx_t_7;
/* "View.MemoryView":381
* if __pyx_memoryview_thread_locks[i] is self.lock:
* __pyx_memoryview_thread_locks_used -= 1
* if i != __pyx_memoryview_thread_locks_used: # <<<<<<<<<<<<<<
* __pyx_memoryview_thread_locks[i], __pyx_memoryview_thread_locks[__pyx_memoryview_thread_locks_used] = (
* __pyx_memoryview_thread_locks[__pyx_memoryview_thread_locks_used], __pyx_memoryview_thread_locks[i])
*/
}
/* "View.MemoryView":384
* __pyx_memoryview_thread_locks[i], __pyx_memoryview_thread_locks[__pyx_memoryview_thread_locks_used] = (
* __pyx_memoryview_thread_locks[__pyx_memoryview_thread_locks_used], __pyx_memoryview_thread_locks[i])
* break # <<<<<<<<<<<<<<
* else:
* PyThread_free_lock(self.lock)
*/
goto __pyx_L6_break;
/* "View.MemoryView":379
* if self.lock != NULL:
* for i in range(__pyx_memoryview_thread_locks_used):
* if __pyx_memoryview_thread_locks[i] is self.lock: # <<<<<<<<<<<<<<
* __pyx_memoryview_thread_locks_used -= 1
* if i != __pyx_memoryview_thread_locks_used:
*/
}
}
/*else*/ {
/* "View.MemoryView":386
* break
* else:
* PyThread_free_lock(self.lock) # <<<<<<<<<<<<<<
*
* cdef char *get_item_pointer(memoryview self, object index) except NULL:
*/
PyThread_free_lock(__pyx_v_self->lock);
}
__pyx_L6_break:;
/* "View.MemoryView":377
* cdef int i
* global __pyx_memoryview_thread_locks_used
* if self.lock != NULL: # <<<<<<<<<<<<<<
* for i in range(__pyx_memoryview_thread_locks_used):
* if __pyx_memoryview_thread_locks[i] is self.lock:
*/
}
/* "View.MemoryView":371
* self.typeinfo = NULL
*
* def __dealloc__(memoryview self): # <<<<<<<<<<<<<<
* if self.obj is not None:
* __Pyx_ReleaseBuffer(&self.view)
*/
/* function exit code */
__Pyx_RefNannyFinishContext();
}
/* "View.MemoryView":388
* PyThread_free_lock(self.lock)
*
* cdef char *get_item_pointer(memoryview self, object index) except NULL: # <<<<<<<<<<<<<<
* cdef Py_ssize_t dim
* cdef char *itemp = <char *> self.view.buf
*/
static char *__pyx_memoryview_get_item_pointer(struct __pyx_memoryview_obj *__pyx_v_self, PyObject *__pyx_v_index) {
Py_ssize_t __pyx_v_dim;
char *__pyx_v_itemp;
PyObject *__pyx_v_idx = NULL;
char *__pyx_r;
__Pyx_RefNannyDeclarations
Py_ssize_t __pyx_t_1;
PyObject *__pyx_t_2 = NULL;
Py_ssize_t __pyx_t_3;
PyObject *(*__pyx_t_4)(PyObject *);
PyObject *__pyx_t_5 = NULL;
Py_ssize_t __pyx_t_6;
char *__pyx_t_7;
__Pyx_RefNannySetupContext("get_item_pointer", 0);
/* "View.MemoryView":390
* cdef char *get_item_pointer(memoryview self, object index) except NULL:
* cdef Py_ssize_t dim
* cdef char *itemp = <char *> self.view.buf # <<<<<<<<<<<<<<
*
* for dim, idx in enumerate(index):
*/
__pyx_v_itemp = ((char *)__pyx_v_self->view.buf);
/* "View.MemoryView":392
* cdef char *itemp = <char *> self.view.buf
*
* for dim, idx in enumerate(index): # <<<<<<<<<<<<<<
* itemp = pybuffer_index(&self.view, itemp, idx, dim)
*
*/
__pyx_t_1 = 0;
if (likely(PyList_CheckExact(__pyx_v_index)) || PyTuple_CheckExact(__pyx_v_index)) {
__pyx_t_2 = __pyx_v_index; __Pyx_INCREF(__pyx_t_2); __pyx_t_3 = 0;
__pyx_t_4 = NULL;
} else {
__pyx_t_3 = -1; __pyx_t_2 = PyObject_GetIter(__pyx_v_index); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 392, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_2);
__pyx_t_4 = Py_TYPE(__pyx_t_2)->tp_iternext; if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 392, __pyx_L1_error)
}
for (;;) {
if (likely(!__pyx_t_4)) {
if (likely(PyList_CheckExact(__pyx_t_2))) {
if (__pyx_t_3 >= PyList_GET_SIZE(__pyx_t_2)) break;
#if CYTHON_ASSUME_SAFE_MACROS && !CYTHON_AVOID_BORROWED_REFS
__pyx_t_5 = PyList_GET_ITEM(__pyx_t_2, __pyx_t_3); __Pyx_INCREF(__pyx_t_5); __pyx_t_3++; if (unlikely(0 < 0)) __PYX_ERR(1, 392, __pyx_L1_error)
#else
__pyx_t_5 = PySequence_ITEM(__pyx_t_2, __pyx_t_3); __pyx_t_3++; if (unlikely(!__pyx_t_5)) __PYX_ERR(1, 392, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_5);
#endif
} else {
if (__pyx_t_3 >= PyTuple_GET_SIZE(__pyx_t_2)) break;
#if CYTHON_ASSUME_SAFE_MACROS && !CYTHON_AVOID_BORROWED_REFS
__pyx_t_5 = PyTuple_GET_ITEM(__pyx_t_2, __pyx_t_3); __Pyx_INCREF(__pyx_t_5); __pyx_t_3++; if (unlikely(0 < 0)) __PYX_ERR(1, 392, __pyx_L1_error)
#else
__pyx_t_5 = PySequence_ITEM(__pyx_t_2, __pyx_t_3); __pyx_t_3++; if (unlikely(!__pyx_t_5)) __PYX_ERR(1, 392, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_5);
#endif
}
} else {
__pyx_t_5 = __pyx_t_4(__pyx_t_2);
if (unlikely(!__pyx_t_5)) {
PyObject* exc_type = PyErr_Occurred();
if (exc_type) {
if (likely(__Pyx_PyErr_GivenExceptionMatches(exc_type, PyExc_StopIteration))) PyErr_Clear();
else __PYX_ERR(1, 392, __pyx_L1_error)
}
break;
}
__Pyx_GOTREF(__pyx_t_5);
}
__Pyx_XDECREF_SET(__pyx_v_idx, __pyx_t_5);
__pyx_t_5 = 0;
__pyx_v_dim = __pyx_t_1;
__pyx_t_1 = (__pyx_t_1 + 1);
/* "View.MemoryView":393
*
* for dim, idx in enumerate(index):
* itemp = pybuffer_index(&self.view, itemp, idx, dim) # <<<<<<<<<<<<<<
*
* return itemp
*/
__pyx_t_6 = __Pyx_PyIndex_AsSsize_t(__pyx_v_idx); if (unlikely((__pyx_t_6 == (Py_ssize_t)-1) && PyErr_Occurred())) __PYX_ERR(1, 393, __pyx_L1_error)
__pyx_t_7 = __pyx_pybuffer_index((&__pyx_v_self->view), __pyx_v_itemp, __pyx_t_6, __pyx_v_dim); if (unlikely(__pyx_t_7 == ((char *)NULL))) __PYX_ERR(1, 393, __pyx_L1_error)
__pyx_v_itemp = __pyx_t_7;
/* "View.MemoryView":392
* cdef char *itemp = <char *> self.view.buf
*
* for dim, idx in enumerate(index): # <<<<<<<<<<<<<<
* itemp = pybuffer_index(&self.view, itemp, idx, dim)
*
*/
}
__Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
/* "View.MemoryView":395
* itemp = pybuffer_index(&self.view, itemp, idx, dim)
*
* return itemp # <<<<<<<<<<<<<<
*
*
*/
__pyx_r = __pyx_v_itemp;
goto __pyx_L0;
/* "View.MemoryView":388
* PyThread_free_lock(self.lock)
*
* cdef char *get_item_pointer(memoryview self, object index) except NULL: # <<<<<<<<<<<<<<
* cdef Py_ssize_t dim
* cdef char *itemp = <char *> self.view.buf
*/
/* function exit code */
__pyx_L1_error:;
__Pyx_XDECREF(__pyx_t_2);
__Pyx_XDECREF(__pyx_t_5);
__Pyx_AddTraceback("View.MemoryView.memoryview.get_item_pointer", __pyx_clineno, __pyx_lineno, __pyx_filename);
__pyx_r = NULL;
__pyx_L0:;
__Pyx_XDECREF(__pyx_v_idx);
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
/* "View.MemoryView":398
*
*
* def __getitem__(memoryview self, object index): # <<<<<<<<<<<<<<
* if index is Ellipsis:
* return self
*/
/* Python wrapper */
static PyObject *__pyx_memoryview___getitem__(PyObject *__pyx_v_self, PyObject *__pyx_v_index); /*proto*/
static PyObject *__pyx_memoryview___getitem__(PyObject *__pyx_v_self, PyObject *__pyx_v_index) {
PyObject *__pyx_r = 0;
__Pyx_RefNannyDeclarations
__Pyx_RefNannySetupContext("__getitem__ (wrapper)", 0);
__pyx_r = __pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_4__getitem__(((struct __pyx_memoryview_obj *)__pyx_v_self), ((PyObject *)__pyx_v_index));
/* function exit code */
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
static PyObject *__pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_4__getitem__(struct __pyx_memoryview_obj *__pyx_v_self, PyObject *__pyx_v_index) {
PyObject *__pyx_v_have_slices = NULL;
PyObject *__pyx_v_indices = NULL;
char *__pyx_v_itemp;
PyObject *__pyx_r = NULL;
__Pyx_RefNannyDeclarations
int __pyx_t_1;
int __pyx_t_2;
PyObject *__pyx_t_3 = NULL;
PyObject *__pyx_t_4 = NULL;
PyObject *__pyx_t_5 = NULL;
char *__pyx_t_6;
__Pyx_RefNannySetupContext("__getitem__", 0);
/* "View.MemoryView":399
*
* def __getitem__(memoryview self, object index):
* if index is Ellipsis: # <<<<<<<<<<<<<<
* return self
*
*/
__pyx_t_1 = (__pyx_v_index == __pyx_builtin_Ellipsis);
__pyx_t_2 = (__pyx_t_1 != 0);
if (__pyx_t_2) {
/* "View.MemoryView":400
* def __getitem__(memoryview self, object index):
* if index is Ellipsis:
* return self # <<<<<<<<<<<<<<
*
* have_slices, indices = _unellipsify(index, self.view.ndim)
*/
__Pyx_XDECREF(__pyx_r);
__Pyx_INCREF(((PyObject *)__pyx_v_self));
__pyx_r = ((PyObject *)__pyx_v_self);
goto __pyx_L0;
/* "View.MemoryView":399
*
* def __getitem__(memoryview self, object index):
* if index is Ellipsis: # <<<<<<<<<<<<<<
* return self
*
*/
}
/* "View.MemoryView":402
* return self
*
* have_slices, indices = _unellipsify(index, self.view.ndim) # <<<<<<<<<<<<<<
*
* cdef char *itemp
*/
__pyx_t_3 = _unellipsify(__pyx_v_index, __pyx_v_self->view.ndim); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 402, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_3);
if (likely(__pyx_t_3 != Py_None)) {
PyObject* sequence = __pyx_t_3;
Py_ssize_t size = __Pyx_PySequence_SIZE(sequence);
if (unlikely(size != 2)) {
if (size > 2) __Pyx_RaiseTooManyValuesError(2);
else if (size >= 0) __Pyx_RaiseNeedMoreValuesError(size);
__PYX_ERR(1, 402, __pyx_L1_error)
}
#if CYTHON_ASSUME_SAFE_MACROS && !CYTHON_AVOID_BORROWED_REFS
__pyx_t_4 = PyTuple_GET_ITEM(sequence, 0);
__pyx_t_5 = PyTuple_GET_ITEM(sequence, 1);
__Pyx_INCREF(__pyx_t_4);
__Pyx_INCREF(__pyx_t_5);
#else
__pyx_t_4 = PySequence_ITEM(sequence, 0); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 402, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_4);
__pyx_t_5 = PySequence_ITEM(sequence, 1); if (unlikely(!__pyx_t_5)) __PYX_ERR(1, 402, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_5);
#endif
__Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
} else {
__Pyx_RaiseNoneNotIterableError(); __PYX_ERR(1, 402, __pyx_L1_error)
}
__pyx_v_have_slices = __pyx_t_4;
__pyx_t_4 = 0;
__pyx_v_indices = __pyx_t_5;
__pyx_t_5 = 0;
/* "View.MemoryView":405
*
* cdef char *itemp
* if have_slices: # <<<<<<<<<<<<<<
* return memview_slice(self, indices)
* else:
*/
__pyx_t_2 = __Pyx_PyObject_IsTrue(__pyx_v_have_slices); if (unlikely(__pyx_t_2 < 0)) __PYX_ERR(1, 405, __pyx_L1_error)
if (__pyx_t_2) {
/* "View.MemoryView":406
* cdef char *itemp
* if have_slices:
* return memview_slice(self, indices) # <<<<<<<<<<<<<<
* else:
* itemp = self.get_item_pointer(indices)
*/
__Pyx_XDECREF(__pyx_r);
__pyx_t_3 = ((PyObject *)__pyx_memview_slice(__pyx_v_self, __pyx_v_indices)); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 406, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_3);
__pyx_r = __pyx_t_3;
__pyx_t_3 = 0;
goto __pyx_L0;
/* "View.MemoryView":405
*
* cdef char *itemp
* if have_slices: # <<<<<<<<<<<<<<
* return memview_slice(self, indices)
* else:
*/
}
/* "View.MemoryView":408
* return memview_slice(self, indices)
* else:
* itemp = self.get_item_pointer(indices) # <<<<<<<<<<<<<<
* return self.convert_item_to_object(itemp)
*
*/
/*else*/ {
__pyx_t_6 = ((struct __pyx_vtabstruct_memoryview *)__pyx_v_self->__pyx_vtab)->get_item_pointer(__pyx_v_self, __pyx_v_indices); if (unlikely(__pyx_t_6 == ((char *)NULL))) __PYX_ERR(1, 408, __pyx_L1_error)
__pyx_v_itemp = __pyx_t_6;
/* "View.MemoryView":409
* else:
* itemp = self.get_item_pointer(indices)
* return self.convert_item_to_object(itemp) # <<<<<<<<<<<<<<
*
* def __setitem__(memoryview self, object index, object value):
*/
__Pyx_XDECREF(__pyx_r);
__pyx_t_3 = ((struct __pyx_vtabstruct_memoryview *)__pyx_v_self->__pyx_vtab)->convert_item_to_object(__pyx_v_self, __pyx_v_itemp); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 409, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_3);
__pyx_r = __pyx_t_3;
__pyx_t_3 = 0;
goto __pyx_L0;
}
/* "View.MemoryView":398
*
*
* def __getitem__(memoryview self, object index): # <<<<<<<<<<<<<<
* if index is Ellipsis:
* return self
*/
/* function exit code */
__pyx_L1_error:;
__Pyx_XDECREF(__pyx_t_3);
__Pyx_XDECREF(__pyx_t_4);
__Pyx_XDECREF(__pyx_t_5);
__Pyx_AddTraceback("View.MemoryView.memoryview.__getitem__", __pyx_clineno, __pyx_lineno, __pyx_filename);
__pyx_r = NULL;
__pyx_L0:;
__Pyx_XDECREF(__pyx_v_have_slices);
__Pyx_XDECREF(__pyx_v_indices);
__Pyx_XGIVEREF(__pyx_r);
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
/* "View.MemoryView":411
* return self.convert_item_to_object(itemp)
*
* def __setitem__(memoryview self, object index, object value): # <<<<<<<<<<<<<<
* if self.view.readonly:
* raise TypeError("Cannot assign to read-only memoryview")
*/
/* Python wrapper */
static int __pyx_memoryview___setitem__(PyObject *__pyx_v_self, PyObject *__pyx_v_index, PyObject *__pyx_v_value); /*proto*/
static int __pyx_memoryview___setitem__(PyObject *__pyx_v_self, PyObject *__pyx_v_index, PyObject *__pyx_v_value) {
int __pyx_r;
__Pyx_RefNannyDeclarations
__Pyx_RefNannySetupContext("__setitem__ (wrapper)", 0);
__pyx_r = __pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_6__setitem__(((struct __pyx_memoryview_obj *)__pyx_v_self), ((PyObject *)__pyx_v_index), ((PyObject *)__pyx_v_value));
/* function exit code */
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
static int __pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_6__setitem__(struct __pyx_memoryview_obj *__pyx_v_self, PyObject *__pyx_v_index, PyObject *__pyx_v_value) {
PyObject *__pyx_v_have_slices = NULL;
PyObject *__pyx_v_obj = NULL;
int __pyx_r;
__Pyx_RefNannyDeclarations
int __pyx_t_1;
PyObject *__pyx_t_2 = NULL;
PyObject *__pyx_t_3 = NULL;
PyObject *__pyx_t_4 = NULL;
__Pyx_RefNannySetupContext("__setitem__", 0);
__Pyx_INCREF(__pyx_v_index);
/* "View.MemoryView":412
*
* def __setitem__(memoryview self, object index, object value):
* if self.view.readonly: # <<<<<<<<<<<<<<
* raise TypeError("Cannot assign to read-only memoryview")
*
*/
__pyx_t_1 = (__pyx_v_self->view.readonly != 0);
if (unlikely(__pyx_t_1)) {
/* "View.MemoryView":413
* def __setitem__(memoryview self, object index, object value):
* if self.view.readonly:
* raise TypeError("Cannot assign to read-only memoryview") # <<<<<<<<<<<<<<
*
* have_slices, index = _unellipsify(index, self.view.ndim)
*/
__pyx_t_2 = __Pyx_PyObject_Call(__pyx_builtin_TypeError, __pyx_tuple__14, NULL); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 413, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_2);
__Pyx_Raise(__pyx_t_2, 0, 0, 0);
__Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
__PYX_ERR(1, 413, __pyx_L1_error)
/* "View.MemoryView":412
*
* def __setitem__(memoryview self, object index, object value):
* if self.view.readonly: # <<<<<<<<<<<<<<
* raise TypeError("Cannot assign to read-only memoryview")
*
*/
}
/* "View.MemoryView":415
* raise TypeError("Cannot assign to read-only memoryview")
*
* have_slices, index = _unellipsify(index, self.view.ndim) # <<<<<<<<<<<<<<
*
* if have_slices:
*/
__pyx_t_2 = _unellipsify(__pyx_v_index, __pyx_v_self->view.ndim); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 415, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_2);
if (likely(__pyx_t_2 != Py_None)) {
PyObject* sequence = __pyx_t_2;
Py_ssize_t size = __Pyx_PySequence_SIZE(sequence);
if (unlikely(size != 2)) {
if (size > 2) __Pyx_RaiseTooManyValuesError(2);
else if (size >= 0) __Pyx_RaiseNeedMoreValuesError(size);
__PYX_ERR(1, 415, __pyx_L1_error)
}
#if CYTHON_ASSUME_SAFE_MACROS && !CYTHON_AVOID_BORROWED_REFS
__pyx_t_3 = PyTuple_GET_ITEM(sequence, 0);
__pyx_t_4 = PyTuple_GET_ITEM(sequence, 1);
__Pyx_INCREF(__pyx_t_3);
__Pyx_INCREF(__pyx_t_4);
#else
__pyx_t_3 = PySequence_ITEM(sequence, 0); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 415, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_3);
__pyx_t_4 = PySequence_ITEM(sequence, 1); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 415, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_4);
#endif
__Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
} else {
__Pyx_RaiseNoneNotIterableError(); __PYX_ERR(1, 415, __pyx_L1_error)
}
__pyx_v_have_slices = __pyx_t_3;
__pyx_t_3 = 0;
__Pyx_DECREF_SET(__pyx_v_index, __pyx_t_4);
__pyx_t_4 = 0;
/* "View.MemoryView":417
* have_slices, index = _unellipsify(index, self.view.ndim)
*
* if have_slices: # <<<<<<<<<<<<<<
* obj = self.is_slice(value)
* if obj:
*/
__pyx_t_1 = __Pyx_PyObject_IsTrue(__pyx_v_have_slices); if (unlikely(__pyx_t_1 < 0)) __PYX_ERR(1, 417, __pyx_L1_error)
if (__pyx_t_1) {
/* "View.MemoryView":418
*
* if have_slices:
* obj = self.is_slice(value) # <<<<<<<<<<<<<<
* if obj:
* self.setitem_slice_assignment(self[index], obj)
*/
__pyx_t_2 = ((struct __pyx_vtabstruct_memoryview *)__pyx_v_self->__pyx_vtab)->is_slice(__pyx_v_self, __pyx_v_value); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 418, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_2);
__pyx_v_obj = __pyx_t_2;
__pyx_t_2 = 0;
/* "View.MemoryView":419
* if have_slices:
* obj = self.is_slice(value)
* if obj: # <<<<<<<<<<<<<<
* self.setitem_slice_assignment(self[index], obj)
* else:
*/
__pyx_t_1 = __Pyx_PyObject_IsTrue(__pyx_v_obj); if (unlikely(__pyx_t_1 < 0)) __PYX_ERR(1, 419, __pyx_L1_error)
if (__pyx_t_1) {
/* "View.MemoryView":420
* obj = self.is_slice(value)
* if obj:
* self.setitem_slice_assignment(self[index], obj) # <<<<<<<<<<<<<<
* else:
* self.setitem_slice_assign_scalar(self[index], value)
*/
__pyx_t_2 = __Pyx_PyObject_GetItem(((PyObject *)__pyx_v_self), __pyx_v_index); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 420, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_2);
__pyx_t_4 = ((struct __pyx_vtabstruct_memoryview *)__pyx_v_self->__pyx_vtab)->setitem_slice_assignment(__pyx_v_self, __pyx_t_2, __pyx_v_obj); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 420, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_4);
__Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
__Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
/* "View.MemoryView":419
* if have_slices:
* obj = self.is_slice(value)
* if obj: # <<<<<<<<<<<<<<
* self.setitem_slice_assignment(self[index], obj)
* else:
*/
goto __pyx_L5;
}
/* "View.MemoryView":422
* self.setitem_slice_assignment(self[index], obj)
* else:
* self.setitem_slice_assign_scalar(self[index], value) # <<<<<<<<<<<<<<
* else:
* self.setitem_indexed(index, value)
*/
/*else*/ {
__pyx_t_4 = __Pyx_PyObject_GetItem(((PyObject *)__pyx_v_self), __pyx_v_index); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 422, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_4);
if (!(likely(((__pyx_t_4) == Py_None) || likely(__Pyx_TypeTest(__pyx_t_4, __pyx_memoryview_type))))) __PYX_ERR(1, 422, __pyx_L1_error)
__pyx_t_2 = ((struct __pyx_vtabstruct_memoryview *)__pyx_v_self->__pyx_vtab)->setitem_slice_assign_scalar(__pyx_v_self, ((struct __pyx_memoryview_obj *)__pyx_t_4), __pyx_v_value); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 422, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_2);
__Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
__Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
}
__pyx_L5:;
/* "View.MemoryView":417
* have_slices, index = _unellipsify(index, self.view.ndim)
*
* if have_slices: # <<<<<<<<<<<<<<
* obj = self.is_slice(value)
* if obj:
*/
goto __pyx_L4;
}
/* "View.MemoryView":424
* self.setitem_slice_assign_scalar(self[index], value)
* else:
* self.setitem_indexed(index, value) # <<<<<<<<<<<<<<
*
* cdef is_slice(self, obj):
*/
/*else*/ {
__pyx_t_2 = ((struct __pyx_vtabstruct_memoryview *)__pyx_v_self->__pyx_vtab)->setitem_indexed(__pyx_v_self, __pyx_v_index, __pyx_v_value); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 424, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_2);
__Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
}
__pyx_L4:;
/* "View.MemoryView":411
* return self.convert_item_to_object(itemp)
*
* def __setitem__(memoryview self, object index, object value): # <<<<<<<<<<<<<<
* if self.view.readonly:
* raise TypeError("Cannot assign to read-only memoryview")
*/
/* function exit code */
__pyx_r = 0;
goto __pyx_L0;
__pyx_L1_error:;
__Pyx_XDECREF(__pyx_t_2);
__Pyx_XDECREF(__pyx_t_3);
__Pyx_XDECREF(__pyx_t_4);
__Pyx_AddTraceback("View.MemoryView.memoryview.__setitem__", __pyx_clineno, __pyx_lineno, __pyx_filename);
__pyx_r = -1;
__pyx_L0:;
__Pyx_XDECREF(__pyx_v_have_slices);
__Pyx_XDECREF(__pyx_v_obj);
__Pyx_XDECREF(__pyx_v_index);
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
/* "View.MemoryView":426
* self.setitem_indexed(index, value)
*
* cdef is_slice(self, obj): # <<<<<<<<<<<<<<
* if not isinstance(obj, memoryview):
* try:
*/
static PyObject *__pyx_memoryview_is_slice(struct __pyx_memoryview_obj *__pyx_v_self, PyObject *__pyx_v_obj) {
PyObject *__pyx_r = NULL;
__Pyx_RefNannyDeclarations
int __pyx_t_1;
int __pyx_t_2;
PyObject *__pyx_t_3 = NULL;
PyObject *__pyx_t_4 = NULL;
PyObject *__pyx_t_5 = NULL;
PyObject *__pyx_t_6 = NULL;
PyObject *__pyx_t_7 = NULL;
PyObject *__pyx_t_8 = NULL;
int __pyx_t_9;
__Pyx_RefNannySetupContext("is_slice", 0);
__Pyx_INCREF(__pyx_v_obj);
/* "View.MemoryView":427
*
* cdef is_slice(self, obj):
* if not isinstance(obj, memoryview): # <<<<<<<<<<<<<<
* try:
* obj = memoryview(obj, self.flags|PyBUF_ANY_CONTIGUOUS,
*/
__pyx_t_1 = __Pyx_TypeCheck(__pyx_v_obj, __pyx_memoryview_type);
__pyx_t_2 = ((!(__pyx_t_1 != 0)) != 0);
if (__pyx_t_2) {
/* "View.MemoryView":428
* cdef is_slice(self, obj):
* if not isinstance(obj, memoryview):
* try: # <<<<<<<<<<<<<<
* obj = memoryview(obj, self.flags|PyBUF_ANY_CONTIGUOUS,
* self.dtype_is_object)
*/
{
__Pyx_PyThreadState_declare
__Pyx_PyThreadState_assign
__Pyx_ExceptionSave(&__pyx_t_3, &__pyx_t_4, &__pyx_t_5);
__Pyx_XGOTREF(__pyx_t_3);
__Pyx_XGOTREF(__pyx_t_4);
__Pyx_XGOTREF(__pyx_t_5);
/*try:*/ {
/* "View.MemoryView":429
* if not isinstance(obj, memoryview):
* try:
* obj = memoryview(obj, self.flags|PyBUF_ANY_CONTIGUOUS, # <<<<<<<<<<<<<<
* self.dtype_is_object)
* except TypeError:
*/
__pyx_t_6 = __Pyx_PyInt_From_int((__pyx_v_self->flags | PyBUF_ANY_CONTIGUOUS)); if (unlikely(!__pyx_t_6)) __PYX_ERR(1, 429, __pyx_L4_error)
__Pyx_GOTREF(__pyx_t_6);
/* "View.MemoryView":430
* try:
* obj = memoryview(obj, self.flags|PyBUF_ANY_CONTIGUOUS,
* self.dtype_is_object) # <<<<<<<<<<<<<<
* except TypeError:
* return None
*/
__pyx_t_7 = __Pyx_PyBool_FromLong(__pyx_v_self->dtype_is_object); if (unlikely(!__pyx_t_7)) __PYX_ERR(1, 430, __pyx_L4_error)
__Pyx_GOTREF(__pyx_t_7);
/* "View.MemoryView":429
* if not isinstance(obj, memoryview):
* try:
* obj = memoryview(obj, self.flags|PyBUF_ANY_CONTIGUOUS, # <<<<<<<<<<<<<<
* self.dtype_is_object)
* except TypeError:
*/
__pyx_t_8 = PyTuple_New(3); if (unlikely(!__pyx_t_8)) __PYX_ERR(1, 429, __pyx_L4_error)
__Pyx_GOTREF(__pyx_t_8);
__Pyx_INCREF(__pyx_v_obj);
__Pyx_GIVEREF(__pyx_v_obj);
PyTuple_SET_ITEM(__pyx_t_8, 0, __pyx_v_obj);
__Pyx_GIVEREF(__pyx_t_6);
PyTuple_SET_ITEM(__pyx_t_8, 1, __pyx_t_6);
__Pyx_GIVEREF(__pyx_t_7);
PyTuple_SET_ITEM(__pyx_t_8, 2, __pyx_t_7);
__pyx_t_6 = 0;
__pyx_t_7 = 0;
__pyx_t_7 = __Pyx_PyObject_Call(((PyObject *)__pyx_memoryview_type), __pyx_t_8, NULL); if (unlikely(!__pyx_t_7)) __PYX_ERR(1, 429, __pyx_L4_error)
__Pyx_GOTREF(__pyx_t_7);
__Pyx_DECREF(__pyx_t_8); __pyx_t_8 = 0;
__Pyx_DECREF_SET(__pyx_v_obj, __pyx_t_7);
__pyx_t_7 = 0;
/* "View.MemoryView":428
* cdef is_slice(self, obj):
* if not isinstance(obj, memoryview):
* try: # <<<<<<<<<<<<<<
* obj = memoryview(obj, self.flags|PyBUF_ANY_CONTIGUOUS,
* self.dtype_is_object)
*/
}
__Pyx_XDECREF(__pyx_t_3); __pyx_t_3 = 0;
__Pyx_XDECREF(__pyx_t_4); __pyx_t_4 = 0;
__Pyx_XDECREF(__pyx_t_5); __pyx_t_5 = 0;
goto __pyx_L9_try_end;
__pyx_L4_error:;
__Pyx_XDECREF(__pyx_t_6); __pyx_t_6 = 0;
__Pyx_XDECREF(__pyx_t_8); __pyx_t_8 = 0;
__Pyx_XDECREF(__pyx_t_7); __pyx_t_7 = 0;
/* "View.MemoryView":431
* obj = memoryview(obj, self.flags|PyBUF_ANY_CONTIGUOUS,
* self.dtype_is_object)
* except TypeError: # <<<<<<<<<<<<<<
* return None
*
*/
__pyx_t_9 = __Pyx_PyErr_ExceptionMatches(__pyx_builtin_TypeError);
if (__pyx_t_9) {
__Pyx_AddTraceback("View.MemoryView.memoryview.is_slice", __pyx_clineno, __pyx_lineno, __pyx_filename);
if (__Pyx_GetException(&__pyx_t_7, &__pyx_t_8, &__pyx_t_6) < 0) __PYX_ERR(1, 431, __pyx_L6_except_error)
__Pyx_GOTREF(__pyx_t_7);
__Pyx_GOTREF(__pyx_t_8);
__Pyx_GOTREF(__pyx_t_6);
/* "View.MemoryView":432
* self.dtype_is_object)
* except TypeError:
* return None # <<<<<<<<<<<<<<
*
* return obj
*/
__Pyx_XDECREF(__pyx_r);
__pyx_r = Py_None; __Pyx_INCREF(Py_None);
__Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0;
__Pyx_DECREF(__pyx_t_7); __pyx_t_7 = 0;
__Pyx_DECREF(__pyx_t_8); __pyx_t_8 = 0;
goto __pyx_L7_except_return;
}
goto __pyx_L6_except_error;
__pyx_L6_except_error:;
/* "View.MemoryView":428
* cdef is_slice(self, obj):
* if not isinstance(obj, memoryview):
* try: # <<<<<<<<<<<<<<
* obj = memoryview(obj, self.flags|PyBUF_ANY_CONTIGUOUS,
* self.dtype_is_object)
*/
__Pyx_XGIVEREF(__pyx_t_3);
__Pyx_XGIVEREF(__pyx_t_4);
__Pyx_XGIVEREF(__pyx_t_5);
__Pyx_ExceptionReset(__pyx_t_3, __pyx_t_4, __pyx_t_5);
goto __pyx_L1_error;
__pyx_L7_except_return:;
__Pyx_XGIVEREF(__pyx_t_3);
__Pyx_XGIVEREF(__pyx_t_4);
__Pyx_XGIVEREF(__pyx_t_5);
__Pyx_ExceptionReset(__pyx_t_3, __pyx_t_4, __pyx_t_5);
goto __pyx_L0;
__pyx_L9_try_end:;
}
/* "View.MemoryView":427
*
* cdef is_slice(self, obj):
* if not isinstance(obj, memoryview): # <<<<<<<<<<<<<<
* try:
* obj = memoryview(obj, self.flags|PyBUF_ANY_CONTIGUOUS,
*/
}
/* "View.MemoryView":434
* return None
*
* return obj # <<<<<<<<<<<<<<
*
* cdef setitem_slice_assignment(self, dst, src):
*/
__Pyx_XDECREF(__pyx_r);
__Pyx_INCREF(__pyx_v_obj);
__pyx_r = __pyx_v_obj;
goto __pyx_L0;
/* "View.MemoryView":426
* self.setitem_indexed(index, value)
*
* cdef is_slice(self, obj): # <<<<<<<<<<<<<<
* if not isinstance(obj, memoryview):
* try:
*/
/* function exit code */
__pyx_L1_error:;
__Pyx_XDECREF(__pyx_t_6);
__Pyx_XDECREF(__pyx_t_7);
__Pyx_XDECREF(__pyx_t_8);
__Pyx_AddTraceback("View.MemoryView.memoryview.is_slice", __pyx_clineno, __pyx_lineno, __pyx_filename);
__pyx_r = 0;
__pyx_L0:;
__Pyx_XDECREF(__pyx_v_obj);
__Pyx_XGIVEREF(__pyx_r);
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
/* "View.MemoryView":436
* return obj
*
* cdef setitem_slice_assignment(self, dst, src): # <<<<<<<<<<<<<<
* cdef __Pyx_memviewslice dst_slice
* cdef __Pyx_memviewslice src_slice
*/
static PyObject *__pyx_memoryview_setitem_slice_assignment(struct __pyx_memoryview_obj *__pyx_v_self, PyObject *__pyx_v_dst, PyObject *__pyx_v_src) {
__Pyx_memviewslice __pyx_v_dst_slice;
__Pyx_memviewslice __pyx_v_src_slice;
PyObject *__pyx_r = NULL;
__Pyx_RefNannyDeclarations
PyObject *__pyx_t_1 = NULL;
int __pyx_t_2;
int __pyx_t_3;
int __pyx_t_4;
__Pyx_RefNannySetupContext("setitem_slice_assignment", 0);
/* "View.MemoryView":440
* cdef __Pyx_memviewslice src_slice
*
* memoryview_copy_contents(get_slice_from_memview(src, &src_slice)[0], # <<<<<<<<<<<<<<
* get_slice_from_memview(dst, &dst_slice)[0],
* src.ndim, dst.ndim, self.dtype_is_object)
*/
if (!(likely(((__pyx_v_src) == Py_None) || likely(__Pyx_TypeTest(__pyx_v_src, __pyx_memoryview_type))))) __PYX_ERR(1, 440, __pyx_L1_error)
/* "View.MemoryView":441
*
* memoryview_copy_contents(get_slice_from_memview(src, &src_slice)[0],
* get_slice_from_memview(dst, &dst_slice)[0], # <<<<<<<<<<<<<<
* src.ndim, dst.ndim, self.dtype_is_object)
*
*/
if (!(likely(((__pyx_v_dst) == Py_None) || likely(__Pyx_TypeTest(__pyx_v_dst, __pyx_memoryview_type))))) __PYX_ERR(1, 441, __pyx_L1_error)
/* "View.MemoryView":442
* memoryview_copy_contents(get_slice_from_memview(src, &src_slice)[0],
* get_slice_from_memview(dst, &dst_slice)[0],
* src.ndim, dst.ndim, self.dtype_is_object) # <<<<<<<<<<<<<<
*
* cdef setitem_slice_assign_scalar(self, memoryview dst, value):
*/
__pyx_t_1 = __Pyx_PyObject_GetAttrStr(__pyx_v_src, __pyx_n_s_ndim); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 442, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_1);
__pyx_t_2 = __Pyx_PyInt_As_int(__pyx_t_1); if (unlikely((__pyx_t_2 == (int)-1) && PyErr_Occurred())) __PYX_ERR(1, 442, __pyx_L1_error)
__Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
__pyx_t_1 = __Pyx_PyObject_GetAttrStr(__pyx_v_dst, __pyx_n_s_ndim); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 442, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_1);
__pyx_t_3 = __Pyx_PyInt_As_int(__pyx_t_1); if (unlikely((__pyx_t_3 == (int)-1) && PyErr_Occurred())) __PYX_ERR(1, 442, __pyx_L1_error)
__Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
/* "View.MemoryView":440
* cdef __Pyx_memviewslice src_slice
*
* memoryview_copy_contents(get_slice_from_memview(src, &src_slice)[0], # <<<<<<<<<<<<<<
* get_slice_from_memview(dst, &dst_slice)[0],
* src.ndim, dst.ndim, self.dtype_is_object)
*/
__pyx_t_4 = __pyx_memoryview_copy_contents((__pyx_memoryview_get_slice_from_memoryview(((struct __pyx_memoryview_obj *)__pyx_v_src), (&__pyx_v_src_slice))[0]), (__pyx_memoryview_get_slice_from_memoryview(((struct __pyx_memoryview_obj *)__pyx_v_dst), (&__pyx_v_dst_slice))[0]), __pyx_t_2, __pyx_t_3, __pyx_v_self->dtype_is_object); if (unlikely(__pyx_t_4 == ((int)-1))) __PYX_ERR(1, 440, __pyx_L1_error)
/* "View.MemoryView":436
* return obj
*
* cdef setitem_slice_assignment(self, dst, src): # <<<<<<<<<<<<<<
* cdef __Pyx_memviewslice dst_slice
* cdef __Pyx_memviewslice src_slice
*/
/* function exit code */
__pyx_r = Py_None; __Pyx_INCREF(Py_None);
goto __pyx_L0;
__pyx_L1_error:;
__Pyx_XDECREF(__pyx_t_1);
__Pyx_AddTraceback("View.MemoryView.memoryview.setitem_slice_assignment", __pyx_clineno, __pyx_lineno, __pyx_filename);
__pyx_r = 0;
__pyx_L0:;
__Pyx_XGIVEREF(__pyx_r);
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
/* "View.MemoryView":444
* src.ndim, dst.ndim, self.dtype_is_object)
*
* cdef setitem_slice_assign_scalar(self, memoryview dst, value): # <<<<<<<<<<<<<<
* cdef int array[128]
* cdef void *tmp = NULL
*/
static PyObject *__pyx_memoryview_setitem_slice_assign_scalar(struct __pyx_memoryview_obj *__pyx_v_self, struct __pyx_memoryview_obj *__pyx_v_dst, PyObject *__pyx_v_value) {
int __pyx_v_array[0x80];
void *__pyx_v_tmp;
void *__pyx_v_item;
__Pyx_memviewslice *__pyx_v_dst_slice;
__Pyx_memviewslice __pyx_v_tmp_slice;
PyObject *__pyx_r = NULL;
__Pyx_RefNannyDeclarations
int __pyx_t_1;
PyObject *__pyx_t_2 = NULL;
int __pyx_t_3;
int __pyx_t_4;
char const *__pyx_t_5;
PyObject *__pyx_t_6 = NULL;
PyObject *__pyx_t_7 = NULL;
PyObject *__pyx_t_8 = NULL;
PyObject *__pyx_t_9 = NULL;
PyObject *__pyx_t_10 = NULL;
PyObject *__pyx_t_11 = NULL;
__Pyx_RefNannySetupContext("setitem_slice_assign_scalar", 0);
/* "View.MemoryView":446
* cdef setitem_slice_assign_scalar(self, memoryview dst, value):
* cdef int array[128]
* cdef void *tmp = NULL # <<<<<<<<<<<<<<
* cdef void *item
*
*/
__pyx_v_tmp = NULL;
/* "View.MemoryView":451
* cdef __Pyx_memviewslice *dst_slice
* cdef __Pyx_memviewslice tmp_slice
* dst_slice = get_slice_from_memview(dst, &tmp_slice) # <<<<<<<<<<<<<<
*
* if <size_t>self.view.itemsize > sizeof(array):
*/
__pyx_v_dst_slice = __pyx_memoryview_get_slice_from_memoryview(__pyx_v_dst, (&__pyx_v_tmp_slice));
/* "View.MemoryView":453
* dst_slice = get_slice_from_memview(dst, &tmp_slice)
*
* if <size_t>self.view.itemsize > sizeof(array): # <<<<<<<<<<<<<<
* tmp = PyMem_Malloc(self.view.itemsize)
* if tmp == NULL:
*/
__pyx_t_1 = ((((size_t)__pyx_v_self->view.itemsize) > (sizeof(__pyx_v_array))) != 0);
if (__pyx_t_1) {
/* "View.MemoryView":454
*
* if <size_t>self.view.itemsize > sizeof(array):
* tmp = PyMem_Malloc(self.view.itemsize) # <<<<<<<<<<<<<<
* if tmp == NULL:
* raise MemoryError
*/
__pyx_v_tmp = PyMem_Malloc(__pyx_v_self->view.itemsize);
/* "View.MemoryView":455
* if <size_t>self.view.itemsize > sizeof(array):
* tmp = PyMem_Malloc(self.view.itemsize)
* if tmp == NULL: # <<<<<<<<<<<<<<
* raise MemoryError
* item = tmp
*/
__pyx_t_1 = ((__pyx_v_tmp == NULL) != 0);
if (unlikely(__pyx_t_1)) {
/* "View.MemoryView":456
* tmp = PyMem_Malloc(self.view.itemsize)
* if tmp == NULL:
* raise MemoryError # <<<<<<<<<<<<<<
* item = tmp
* else:
*/
PyErr_NoMemory(); __PYX_ERR(1, 456, __pyx_L1_error)
/* "View.MemoryView":455
* if <size_t>self.view.itemsize > sizeof(array):
* tmp = PyMem_Malloc(self.view.itemsize)
* if tmp == NULL: # <<<<<<<<<<<<<<
* raise MemoryError
* item = tmp
*/
}
/* "View.MemoryView":457
* if tmp == NULL:
* raise MemoryError
* item = tmp # <<<<<<<<<<<<<<
* else:
* item = <void *> array
*/
__pyx_v_item = __pyx_v_tmp;
/* "View.MemoryView":453
* dst_slice = get_slice_from_memview(dst, &tmp_slice)
*
* if <size_t>self.view.itemsize > sizeof(array): # <<<<<<<<<<<<<<
* tmp = PyMem_Malloc(self.view.itemsize)
* if tmp == NULL:
*/
goto __pyx_L3;
}
/* "View.MemoryView":459
* item = tmp
* else:
* item = <void *> array # <<<<<<<<<<<<<<
*
* try:
*/
/*else*/ {
__pyx_v_item = ((void *)__pyx_v_array);
}
__pyx_L3:;
/* "View.MemoryView":461
* item = <void *> array
*
* try: # <<<<<<<<<<<<<<
* if self.dtype_is_object:
* (<PyObject **> item)[0] = <PyObject *> value
*/
/*try:*/ {
/* "View.MemoryView":462
*
* try:
* if self.dtype_is_object: # <<<<<<<<<<<<<<
* (<PyObject **> item)[0] = <PyObject *> value
* else:
*/
__pyx_t_1 = (__pyx_v_self->dtype_is_object != 0);
if (__pyx_t_1) {
/* "View.MemoryView":463
* try:
* if self.dtype_is_object:
* (<PyObject **> item)[0] = <PyObject *> value # <<<<<<<<<<<<<<
* else:
* self.assign_item_from_object(<char *> item, value)
*/
(((PyObject **)__pyx_v_item)[0]) = ((PyObject *)__pyx_v_value);
/* "View.MemoryView":462
*
* try:
* if self.dtype_is_object: # <<<<<<<<<<<<<<
* (<PyObject **> item)[0] = <PyObject *> value
* else:
*/
goto __pyx_L8;
}
/* "View.MemoryView":465
* (<PyObject **> item)[0] = <PyObject *> value
* else:
* self.assign_item_from_object(<char *> item, value) # <<<<<<<<<<<<<<
*
*
*/
/*else*/ {
__pyx_t_2 = ((struct __pyx_vtabstruct_memoryview *)__pyx_v_self->__pyx_vtab)->assign_item_from_object(__pyx_v_self, ((char *)__pyx_v_item), __pyx_v_value); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 465, __pyx_L6_error)
__Pyx_GOTREF(__pyx_t_2);
__Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
}
__pyx_L8:;
/* "View.MemoryView":469
*
*
* if self.view.suboffsets != NULL: # <<<<<<<<<<<<<<
* assert_direct_dimensions(self.view.suboffsets, self.view.ndim)
* slice_assign_scalar(dst_slice, dst.view.ndim, self.view.itemsize,
*/
__pyx_t_1 = ((__pyx_v_self->view.suboffsets != NULL) != 0);
if (__pyx_t_1) {
/* "View.MemoryView":470
*
* if self.view.suboffsets != NULL:
* assert_direct_dimensions(self.view.suboffsets, self.view.ndim) # <<<<<<<<<<<<<<
* slice_assign_scalar(dst_slice, dst.view.ndim, self.view.itemsize,
* item, self.dtype_is_object)
*/
__pyx_t_2 = assert_direct_dimensions(__pyx_v_self->view.suboffsets, __pyx_v_self->view.ndim); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 470, __pyx_L6_error)
__Pyx_GOTREF(__pyx_t_2);
__Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
/* "View.MemoryView":469
*
*
* if self.view.suboffsets != NULL: # <<<<<<<<<<<<<<
* assert_direct_dimensions(self.view.suboffsets, self.view.ndim)
* slice_assign_scalar(dst_slice, dst.view.ndim, self.view.itemsize,
*/
}
/* "View.MemoryView":471
* if self.view.suboffsets != NULL:
* assert_direct_dimensions(self.view.suboffsets, self.view.ndim)
* slice_assign_scalar(dst_slice, dst.view.ndim, self.view.itemsize, # <<<<<<<<<<<<<<
* item, self.dtype_is_object)
* finally:
*/
__pyx_memoryview_slice_assign_scalar(__pyx_v_dst_slice, __pyx_v_dst->view.ndim, __pyx_v_self->view.itemsize, __pyx_v_item, __pyx_v_self->dtype_is_object);
}
/* "View.MemoryView":474
* item, self.dtype_is_object)
* finally:
* PyMem_Free(tmp) # <<<<<<<<<<<<<<
*
* cdef setitem_indexed(self, index, value):
*/
/*finally:*/ {
/*normal exit:*/{
PyMem_Free(__pyx_v_tmp);
goto __pyx_L7;
}
__pyx_L6_error:;
/*exception exit:*/{
__Pyx_PyThreadState_declare
__Pyx_PyThreadState_assign
__pyx_t_6 = 0; __pyx_t_7 = 0; __pyx_t_8 = 0; __pyx_t_9 = 0; __pyx_t_10 = 0; __pyx_t_11 = 0;
__Pyx_XDECREF(__pyx_t_2); __pyx_t_2 = 0;
if (PY_MAJOR_VERSION >= 3) __Pyx_ExceptionSwap(&__pyx_t_9, &__pyx_t_10, &__pyx_t_11);
if ((PY_MAJOR_VERSION < 3) || unlikely(__Pyx_GetException(&__pyx_t_6, &__pyx_t_7, &__pyx_t_8) < 0)) __Pyx_ErrFetch(&__pyx_t_6, &__pyx_t_7, &__pyx_t_8);
__Pyx_XGOTREF(__pyx_t_6);
__Pyx_XGOTREF(__pyx_t_7);
__Pyx_XGOTREF(__pyx_t_8);
__Pyx_XGOTREF(__pyx_t_9);
__Pyx_XGOTREF(__pyx_t_10);
__Pyx_XGOTREF(__pyx_t_11);
__pyx_t_3 = __pyx_lineno; __pyx_t_4 = __pyx_clineno; __pyx_t_5 = __pyx_filename;
{
PyMem_Free(__pyx_v_tmp);
}
if (PY_MAJOR_VERSION >= 3) {
__Pyx_XGIVEREF(__pyx_t_9);
__Pyx_XGIVEREF(__pyx_t_10);
__Pyx_XGIVEREF(__pyx_t_11);
__Pyx_ExceptionReset(__pyx_t_9, __pyx_t_10, __pyx_t_11);
}
__Pyx_XGIVEREF(__pyx_t_6);
__Pyx_XGIVEREF(__pyx_t_7);
__Pyx_XGIVEREF(__pyx_t_8);
__Pyx_ErrRestore(__pyx_t_6, __pyx_t_7, __pyx_t_8);
__pyx_t_6 = 0; __pyx_t_7 = 0; __pyx_t_8 = 0; __pyx_t_9 = 0; __pyx_t_10 = 0; __pyx_t_11 = 0;
__pyx_lineno = __pyx_t_3; __pyx_clineno = __pyx_t_4; __pyx_filename = __pyx_t_5;
goto __pyx_L1_error;
}
__pyx_L7:;
}
/* "View.MemoryView":444
* src.ndim, dst.ndim, self.dtype_is_object)
*
* cdef setitem_slice_assign_scalar(self, memoryview dst, value): # <<<<<<<<<<<<<<
* cdef int array[128]
* cdef void *tmp = NULL
*/
/* function exit code */
__pyx_r = Py_None; __Pyx_INCREF(Py_None);
goto __pyx_L0;
__pyx_L1_error:;
__Pyx_XDECREF(__pyx_t_2);
__Pyx_AddTraceback("View.MemoryView.memoryview.setitem_slice_assign_scalar", __pyx_clineno, __pyx_lineno, __pyx_filename);
__pyx_r = 0;
__pyx_L0:;
__Pyx_XGIVEREF(__pyx_r);
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
/* "View.MemoryView":476
* PyMem_Free(tmp)
*
* cdef setitem_indexed(self, index, value): # <<<<<<<<<<<<<<
* cdef char *itemp = self.get_item_pointer(index)
* self.assign_item_from_object(itemp, value)
*/
static PyObject *__pyx_memoryview_setitem_indexed(struct __pyx_memoryview_obj *__pyx_v_self, PyObject *__pyx_v_index, PyObject *__pyx_v_value) {
char *__pyx_v_itemp;
PyObject *__pyx_r = NULL;
__Pyx_RefNannyDeclarations
char *__pyx_t_1;
PyObject *__pyx_t_2 = NULL;
__Pyx_RefNannySetupContext("setitem_indexed", 0);
/* "View.MemoryView":477
*
* cdef setitem_indexed(self, index, value):
* cdef char *itemp = self.get_item_pointer(index) # <<<<<<<<<<<<<<
* self.assign_item_from_object(itemp, value)
*
*/
__pyx_t_1 = ((struct __pyx_vtabstruct_memoryview *)__pyx_v_self->__pyx_vtab)->get_item_pointer(__pyx_v_self, __pyx_v_index); if (unlikely(__pyx_t_1 == ((char *)NULL))) __PYX_ERR(1, 477, __pyx_L1_error)
__pyx_v_itemp = __pyx_t_1;
/* "View.MemoryView":478
* cdef setitem_indexed(self, index, value):
* cdef char *itemp = self.get_item_pointer(index)
* self.assign_item_from_object(itemp, value) # <<<<<<<<<<<<<<
*
* cdef convert_item_to_object(self, char *itemp):
*/
__pyx_t_2 = ((struct __pyx_vtabstruct_memoryview *)__pyx_v_self->__pyx_vtab)->assign_item_from_object(__pyx_v_self, __pyx_v_itemp, __pyx_v_value); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 478, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_2);
__Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
/* "View.MemoryView":476
* PyMem_Free(tmp)
*
* cdef setitem_indexed(self, index, value): # <<<<<<<<<<<<<<
* cdef char *itemp = self.get_item_pointer(index)
* self.assign_item_from_object(itemp, value)
*/
/* function exit code */
__pyx_r = Py_None; __Pyx_INCREF(Py_None);
goto __pyx_L0;
__pyx_L1_error:;
__Pyx_XDECREF(__pyx_t_2);
__Pyx_AddTraceback("View.MemoryView.memoryview.setitem_indexed", __pyx_clineno, __pyx_lineno, __pyx_filename);
__pyx_r = 0;
__pyx_L0:;
__Pyx_XGIVEREF(__pyx_r);
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
/* "View.MemoryView":480
* self.assign_item_from_object(itemp, value)
*
* cdef convert_item_to_object(self, char *itemp): # <<<<<<<<<<<<<<
* """Only used if instantiated manually by the user, or if Cython doesn't
* know how to convert the type"""
*/
static PyObject *__pyx_memoryview_convert_item_to_object(struct __pyx_memoryview_obj *__pyx_v_self, char *__pyx_v_itemp) {
PyObject *__pyx_v_struct = NULL;
PyObject *__pyx_v_bytesitem = 0;
PyObject *__pyx_v_result = NULL;
PyObject *__pyx_r = NULL;
__Pyx_RefNannyDeclarations
PyObject *__pyx_t_1 = NULL;
PyObject *__pyx_t_2 = NULL;
PyObject *__pyx_t_3 = NULL;
PyObject *__pyx_t_4 = NULL;
PyObject *__pyx_t_5 = NULL;
PyObject *__pyx_t_6 = NULL;
PyObject *__pyx_t_7 = NULL;
int __pyx_t_8;
PyObject *__pyx_t_9 = NULL;
size_t __pyx_t_10;
int __pyx_t_11;
__Pyx_RefNannySetupContext("convert_item_to_object", 0);
/* "View.MemoryView":483
* """Only used if instantiated manually by the user, or if Cython doesn't
* know how to convert the type"""
* import struct # <<<<<<<<<<<<<<
* cdef bytes bytesitem
*
*/
__pyx_t_1 = __Pyx_Import(__pyx_n_s_struct, 0, 0); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 483, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_1);
__pyx_v_struct = __pyx_t_1;
__pyx_t_1 = 0;
/* "View.MemoryView":486
* cdef bytes bytesitem
*
* bytesitem = itemp[:self.view.itemsize] # <<<<<<<<<<<<<<
* try:
* result = struct.unpack(self.view.format, bytesitem)
*/
__pyx_t_1 = __Pyx_PyBytes_FromStringAndSize(__pyx_v_itemp + 0, __pyx_v_self->view.itemsize - 0); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 486, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_1);
__pyx_v_bytesitem = ((PyObject*)__pyx_t_1);
__pyx_t_1 = 0;
/* "View.MemoryView":487
*
* bytesitem = itemp[:self.view.itemsize]
* try: # <<<<<<<<<<<<<<
* result = struct.unpack(self.view.format, bytesitem)
* except struct.error:
*/
{
__Pyx_PyThreadState_declare
__Pyx_PyThreadState_assign
__Pyx_ExceptionSave(&__pyx_t_2, &__pyx_t_3, &__pyx_t_4);
__Pyx_XGOTREF(__pyx_t_2);
__Pyx_XGOTREF(__pyx_t_3);
__Pyx_XGOTREF(__pyx_t_4);
/*try:*/ {
/* "View.MemoryView":488
* bytesitem = itemp[:self.view.itemsize]
* try:
* result = struct.unpack(self.view.format, bytesitem) # <<<<<<<<<<<<<<
* except struct.error:
* raise ValueError("Unable to convert item to object")
*/
__pyx_t_5 = __Pyx_PyObject_GetAttrStr(__pyx_v_struct, __pyx_n_s_unpack); if (unlikely(!__pyx_t_5)) __PYX_ERR(1, 488, __pyx_L3_error)
__Pyx_GOTREF(__pyx_t_5);
__pyx_t_6 = __Pyx_PyBytes_FromString(__pyx_v_self->view.format); if (unlikely(!__pyx_t_6)) __PYX_ERR(1, 488, __pyx_L3_error)
__Pyx_GOTREF(__pyx_t_6);
__pyx_t_7 = NULL;
__pyx_t_8 = 0;
if (CYTHON_UNPACK_METHODS && likely(PyMethod_Check(__pyx_t_5))) {
__pyx_t_7 = PyMethod_GET_SELF(__pyx_t_5);
if (likely(__pyx_t_7)) {
PyObject* function = PyMethod_GET_FUNCTION(__pyx_t_5);
__Pyx_INCREF(__pyx_t_7);
__Pyx_INCREF(function);
__Pyx_DECREF_SET(__pyx_t_5, function);
__pyx_t_8 = 1;
}
}
#if CYTHON_FAST_PYCALL
if (PyFunction_Check(__pyx_t_5)) {
PyObject *__pyx_temp[3] = {__pyx_t_7, __pyx_t_6, __pyx_v_bytesitem};
__pyx_t_1 = __Pyx_PyFunction_FastCall(__pyx_t_5, __pyx_temp+1-__pyx_t_8, 2+__pyx_t_8); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 488, __pyx_L3_error)
__Pyx_XDECREF(__pyx_t_7); __pyx_t_7 = 0;
__Pyx_GOTREF(__pyx_t_1);
__Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0;
} else
#endif
#if CYTHON_FAST_PYCCALL
if (__Pyx_PyFastCFunction_Check(__pyx_t_5)) {
PyObject *__pyx_temp[3] = {__pyx_t_7, __pyx_t_6, __pyx_v_bytesitem};
__pyx_t_1 = __Pyx_PyCFunction_FastCall(__pyx_t_5, __pyx_temp+1-__pyx_t_8, 2+__pyx_t_8); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 488, __pyx_L3_error)
__Pyx_XDECREF(__pyx_t_7); __pyx_t_7 = 0;
__Pyx_GOTREF(__pyx_t_1);
__Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0;
} else
#endif
{
__pyx_t_9 = PyTuple_New(2+__pyx_t_8); if (unlikely(!__pyx_t_9)) __PYX_ERR(1, 488, __pyx_L3_error)
__Pyx_GOTREF(__pyx_t_9);
if (__pyx_t_7) {
__Pyx_GIVEREF(__pyx_t_7); PyTuple_SET_ITEM(__pyx_t_9, 0, __pyx_t_7); __pyx_t_7 = NULL;
}
__Pyx_GIVEREF(__pyx_t_6);
PyTuple_SET_ITEM(__pyx_t_9, 0+__pyx_t_8, __pyx_t_6);
__Pyx_INCREF(__pyx_v_bytesitem);
__Pyx_GIVEREF(__pyx_v_bytesitem);
PyTuple_SET_ITEM(__pyx_t_9, 1+__pyx_t_8, __pyx_v_bytesitem);
__pyx_t_6 = 0;
__pyx_t_1 = __Pyx_PyObject_Call(__pyx_t_5, __pyx_t_9, NULL); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 488, __pyx_L3_error)
__Pyx_GOTREF(__pyx_t_1);
__Pyx_DECREF(__pyx_t_9); __pyx_t_9 = 0;
}
__Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
__pyx_v_result = __pyx_t_1;
__pyx_t_1 = 0;
/* "View.MemoryView":487
*
* bytesitem = itemp[:self.view.itemsize]
* try: # <<<<<<<<<<<<<<
* result = struct.unpack(self.view.format, bytesitem)
* except struct.error:
*/
}
/* "View.MemoryView":492
* raise ValueError("Unable to convert item to object")
* else:
* if len(self.view.format) == 1: # <<<<<<<<<<<<<<
* return result[0]
* return result
*/
/*else:*/ {
__pyx_t_10 = strlen(__pyx_v_self->view.format);
__pyx_t_11 = ((__pyx_t_10 == 1) != 0);
if (__pyx_t_11) {
/* "View.MemoryView":493
* else:
* if len(self.view.format) == 1:
* return result[0] # <<<<<<<<<<<<<<
* return result
*
*/
__Pyx_XDECREF(__pyx_r);
__pyx_t_1 = __Pyx_GetItemInt(__pyx_v_result, 0, long, 1, __Pyx_PyInt_From_long, 0, 0, 1); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 493, __pyx_L5_except_error)
__Pyx_GOTREF(__pyx_t_1);
__pyx_r = __pyx_t_1;
__pyx_t_1 = 0;
goto __pyx_L6_except_return;
/* "View.MemoryView":492
* raise ValueError("Unable to convert item to object")
* else:
* if len(self.view.format) == 1: # <<<<<<<<<<<<<<
* return result[0]
* return result
*/
}
/* "View.MemoryView":494
* if len(self.view.format) == 1:
* return result[0]
* return result # <<<<<<<<<<<<<<
*
* cdef assign_item_from_object(self, char *itemp, object value):
*/
__Pyx_XDECREF(__pyx_r);
__Pyx_INCREF(__pyx_v_result);
__pyx_r = __pyx_v_result;
goto __pyx_L6_except_return;
}
__pyx_L3_error:;
__Pyx_XDECREF(__pyx_t_7); __pyx_t_7 = 0;
__Pyx_XDECREF(__pyx_t_6); __pyx_t_6 = 0;
__Pyx_XDECREF(__pyx_t_9); __pyx_t_9 = 0;
__Pyx_XDECREF(__pyx_t_5); __pyx_t_5 = 0;
__Pyx_XDECREF(__pyx_t_1); __pyx_t_1 = 0;
/* "View.MemoryView":489
* try:
* result = struct.unpack(self.view.format, bytesitem)
* except struct.error: # <<<<<<<<<<<<<<
* raise ValueError("Unable to convert item to object")
* else:
*/
__Pyx_ErrFetch(&__pyx_t_1, &__pyx_t_5, &__pyx_t_9);
__pyx_t_6 = __Pyx_PyObject_GetAttrStr(__pyx_v_struct, __pyx_n_s_error); if (unlikely(!__pyx_t_6)) __PYX_ERR(1, 489, __pyx_L5_except_error)
__Pyx_GOTREF(__pyx_t_6);
__pyx_t_8 = __Pyx_PyErr_GivenExceptionMatches(__pyx_t_1, __pyx_t_6);
__Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0;
__Pyx_ErrRestore(__pyx_t_1, __pyx_t_5, __pyx_t_9);
__pyx_t_1 = 0; __pyx_t_5 = 0; __pyx_t_9 = 0;
if (__pyx_t_8) {
__Pyx_AddTraceback("View.MemoryView.memoryview.convert_item_to_object", __pyx_clineno, __pyx_lineno, __pyx_filename);
if (__Pyx_GetException(&__pyx_t_9, &__pyx_t_5, &__pyx_t_1) < 0) __PYX_ERR(1, 489, __pyx_L5_except_error)
__Pyx_GOTREF(__pyx_t_9);
__Pyx_GOTREF(__pyx_t_5);
__Pyx_GOTREF(__pyx_t_1);
/* "View.MemoryView":490
* result = struct.unpack(self.view.format, bytesitem)
* except struct.error:
* raise ValueError("Unable to convert item to object") # <<<<<<<<<<<<<<
* else:
* if len(self.view.format) == 1:
*/
__pyx_t_6 = __Pyx_PyObject_Call(__pyx_builtin_ValueError, __pyx_tuple__15, NULL); if (unlikely(!__pyx_t_6)) __PYX_ERR(1, 490, __pyx_L5_except_error)
__Pyx_GOTREF(__pyx_t_6);
__Pyx_Raise(__pyx_t_6, 0, 0, 0);
__Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0;
__PYX_ERR(1, 490, __pyx_L5_except_error)
}
goto __pyx_L5_except_error;
__pyx_L5_except_error:;
/* "View.MemoryView":487
*
* bytesitem = itemp[:self.view.itemsize]
* try: # <<<<<<<<<<<<<<
* result = struct.unpack(self.view.format, bytesitem)
* except struct.error:
*/
__Pyx_XGIVEREF(__pyx_t_2);
__Pyx_XGIVEREF(__pyx_t_3);
__Pyx_XGIVEREF(__pyx_t_4);
__Pyx_ExceptionReset(__pyx_t_2, __pyx_t_3, __pyx_t_4);
goto __pyx_L1_error;
__pyx_L6_except_return:;
__Pyx_XGIVEREF(__pyx_t_2);
__Pyx_XGIVEREF(__pyx_t_3);
__Pyx_XGIVEREF(__pyx_t_4);
__Pyx_ExceptionReset(__pyx_t_2, __pyx_t_3, __pyx_t_4);
goto __pyx_L0;
}
/* "View.MemoryView":480
* self.assign_item_from_object(itemp, value)
*
* cdef convert_item_to_object(self, char *itemp): # <<<<<<<<<<<<<<
* """Only used if instantiated manually by the user, or if Cython doesn't
* know how to convert the type"""
*/
/* function exit code */
__pyx_L1_error:;
__Pyx_XDECREF(__pyx_t_1);
__Pyx_XDECREF(__pyx_t_5);
__Pyx_XDECREF(__pyx_t_6);
__Pyx_XDECREF(__pyx_t_7);
__Pyx_XDECREF(__pyx_t_9);
__Pyx_AddTraceback("View.MemoryView.memoryview.convert_item_to_object", __pyx_clineno, __pyx_lineno, __pyx_filename);
__pyx_r = 0;
__pyx_L0:;
__Pyx_XDECREF(__pyx_v_struct);
__Pyx_XDECREF(__pyx_v_bytesitem);
__Pyx_XDECREF(__pyx_v_result);
__Pyx_XGIVEREF(__pyx_r);
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
/* "View.MemoryView":496
* return result
*
* cdef assign_item_from_object(self, char *itemp, object value): # <<<<<<<<<<<<<<
* """Only used if instantiated manually by the user, or if Cython doesn't
* know how to convert the type"""
*/
static PyObject *__pyx_memoryview_assign_item_from_object(struct __pyx_memoryview_obj *__pyx_v_self, char *__pyx_v_itemp, PyObject *__pyx_v_value) {
PyObject *__pyx_v_struct = NULL;
char __pyx_v_c;
PyObject *__pyx_v_bytesvalue = 0;
Py_ssize_t __pyx_v_i;
PyObject *__pyx_r = NULL;
__Pyx_RefNannyDeclarations
PyObject *__pyx_t_1 = NULL;
int __pyx_t_2;
int __pyx_t_3;
PyObject *__pyx_t_4 = NULL;
PyObject *__pyx_t_5 = NULL;
PyObject *__pyx_t_6 = NULL;
int __pyx_t_7;
PyObject *__pyx_t_8 = NULL;
Py_ssize_t __pyx_t_9;
PyObject *__pyx_t_10 = NULL;
char *__pyx_t_11;
char *__pyx_t_12;
char *__pyx_t_13;
char *__pyx_t_14;
__Pyx_RefNannySetupContext("assign_item_from_object", 0);
/* "View.MemoryView":499
* """Only used if instantiated manually by the user, or if Cython doesn't
* know how to convert the type"""
* import struct # <<<<<<<<<<<<<<
* cdef char c
* cdef bytes bytesvalue
*/
__pyx_t_1 = __Pyx_Import(__pyx_n_s_struct, 0, 0); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 499, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_1);
__pyx_v_struct = __pyx_t_1;
__pyx_t_1 = 0;
/* "View.MemoryView":504
* cdef Py_ssize_t i
*
* if isinstance(value, tuple): # <<<<<<<<<<<<<<
* bytesvalue = struct.pack(self.view.format, *value)
* else:
*/
__pyx_t_2 = PyTuple_Check(__pyx_v_value);
__pyx_t_3 = (__pyx_t_2 != 0);
if (__pyx_t_3) {
/* "View.MemoryView":505
*
* if isinstance(value, tuple):
* bytesvalue = struct.pack(self.view.format, *value) # <<<<<<<<<<<<<<
* else:
* bytesvalue = struct.pack(self.view.format, value)
*/
__pyx_t_1 = __Pyx_PyObject_GetAttrStr(__pyx_v_struct, __pyx_n_s_pack); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 505, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_1);
__pyx_t_4 = __Pyx_PyBytes_FromString(__pyx_v_self->view.format); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 505, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_4);
__pyx_t_5 = PyTuple_New(1); if (unlikely(!__pyx_t_5)) __PYX_ERR(1, 505, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_5);
__Pyx_GIVEREF(__pyx_t_4);
PyTuple_SET_ITEM(__pyx_t_5, 0, __pyx_t_4);
__pyx_t_4 = 0;
__pyx_t_4 = __Pyx_PySequence_Tuple(__pyx_v_value); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 505, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_4);
__pyx_t_6 = PyNumber_Add(__pyx_t_5, __pyx_t_4); if (unlikely(!__pyx_t_6)) __PYX_ERR(1, 505, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_6);
__Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
__Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
__pyx_t_4 = __Pyx_PyObject_Call(__pyx_t_1, __pyx_t_6, NULL); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 505, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_4);
__Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
__Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0;
if (!(likely(PyBytes_CheckExact(__pyx_t_4))||((__pyx_t_4) == Py_None)||(PyErr_Format(PyExc_TypeError, "Expected %.16s, got %.200s", "bytes", Py_TYPE(__pyx_t_4)->tp_name), 0))) __PYX_ERR(1, 505, __pyx_L1_error)
__pyx_v_bytesvalue = ((PyObject*)__pyx_t_4);
__pyx_t_4 = 0;
/* "View.MemoryView":504
* cdef Py_ssize_t i
*
* if isinstance(value, tuple): # <<<<<<<<<<<<<<
* bytesvalue = struct.pack(self.view.format, *value)
* else:
*/
goto __pyx_L3;
}
/* "View.MemoryView":507
* bytesvalue = struct.pack(self.view.format, *value)
* else:
* bytesvalue = struct.pack(self.view.format, value) # <<<<<<<<<<<<<<
*
* for i, c in enumerate(bytesvalue):
*/
/*else*/ {
__pyx_t_6 = __Pyx_PyObject_GetAttrStr(__pyx_v_struct, __pyx_n_s_pack); if (unlikely(!__pyx_t_6)) __PYX_ERR(1, 507, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_6);
__pyx_t_1 = __Pyx_PyBytes_FromString(__pyx_v_self->view.format); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 507, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_1);
__pyx_t_5 = NULL;
__pyx_t_7 = 0;
if (CYTHON_UNPACK_METHODS && likely(PyMethod_Check(__pyx_t_6))) {
__pyx_t_5 = PyMethod_GET_SELF(__pyx_t_6);
if (likely(__pyx_t_5)) {
PyObject* function = PyMethod_GET_FUNCTION(__pyx_t_6);
__Pyx_INCREF(__pyx_t_5);
__Pyx_INCREF(function);
__Pyx_DECREF_SET(__pyx_t_6, function);
__pyx_t_7 = 1;
}
}
#if CYTHON_FAST_PYCALL
if (PyFunction_Check(__pyx_t_6)) {
PyObject *__pyx_temp[3] = {__pyx_t_5, __pyx_t_1, __pyx_v_value};
__pyx_t_4 = __Pyx_PyFunction_FastCall(__pyx_t_6, __pyx_temp+1-__pyx_t_7, 2+__pyx_t_7); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 507, __pyx_L1_error)
__Pyx_XDECREF(__pyx_t_5); __pyx_t_5 = 0;
__Pyx_GOTREF(__pyx_t_4);
__Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
} else
#endif
#if CYTHON_FAST_PYCCALL
if (__Pyx_PyFastCFunction_Check(__pyx_t_6)) {
PyObject *__pyx_temp[3] = {__pyx_t_5, __pyx_t_1, __pyx_v_value};
__pyx_t_4 = __Pyx_PyCFunction_FastCall(__pyx_t_6, __pyx_temp+1-__pyx_t_7, 2+__pyx_t_7); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 507, __pyx_L1_error)
__Pyx_XDECREF(__pyx_t_5); __pyx_t_5 = 0;
__Pyx_GOTREF(__pyx_t_4);
__Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
} else
#endif
{
__pyx_t_8 = PyTuple_New(2+__pyx_t_7); if (unlikely(!__pyx_t_8)) __PYX_ERR(1, 507, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_8);
if (__pyx_t_5) {
__Pyx_GIVEREF(__pyx_t_5); PyTuple_SET_ITEM(__pyx_t_8, 0, __pyx_t_5); __pyx_t_5 = NULL;
}
__Pyx_GIVEREF(__pyx_t_1);
PyTuple_SET_ITEM(__pyx_t_8, 0+__pyx_t_7, __pyx_t_1);
__Pyx_INCREF(__pyx_v_value);
__Pyx_GIVEREF(__pyx_v_value);
PyTuple_SET_ITEM(__pyx_t_8, 1+__pyx_t_7, __pyx_v_value);
__pyx_t_1 = 0;
__pyx_t_4 = __Pyx_PyObject_Call(__pyx_t_6, __pyx_t_8, NULL); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 507, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_4);
__Pyx_DECREF(__pyx_t_8); __pyx_t_8 = 0;
}
__Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0;
if (!(likely(PyBytes_CheckExact(__pyx_t_4))||((__pyx_t_4) == Py_None)||(PyErr_Format(PyExc_TypeError, "Expected %.16s, got %.200s", "bytes", Py_TYPE(__pyx_t_4)->tp_name), 0))) __PYX_ERR(1, 507, __pyx_L1_error)
__pyx_v_bytesvalue = ((PyObject*)__pyx_t_4);
__pyx_t_4 = 0;
}
__pyx_L3:;
/* "View.MemoryView":509
* bytesvalue = struct.pack(self.view.format, value)
*
* for i, c in enumerate(bytesvalue): # <<<<<<<<<<<<<<
* itemp[i] = c
*
*/
__pyx_t_9 = 0;
if (unlikely(__pyx_v_bytesvalue == Py_None)) {
PyErr_SetString(PyExc_TypeError, "'NoneType' is not iterable");
__PYX_ERR(1, 509, __pyx_L1_error)
}
__Pyx_INCREF(__pyx_v_bytesvalue);
__pyx_t_10 = __pyx_v_bytesvalue;
__pyx_t_12 = PyBytes_AS_STRING(__pyx_t_10);
__pyx_t_13 = (__pyx_t_12 + PyBytes_GET_SIZE(__pyx_t_10));
for (__pyx_t_14 = __pyx_t_12; __pyx_t_14 < __pyx_t_13; __pyx_t_14++) {
__pyx_t_11 = __pyx_t_14;
__pyx_v_c = (__pyx_t_11[0]);
/* "View.MemoryView":510
*
* for i, c in enumerate(bytesvalue):
* itemp[i] = c # <<<<<<<<<<<<<<
*
* @cname('getbuffer')
*/
__pyx_v_i = __pyx_t_9;
/* "View.MemoryView":509
* bytesvalue = struct.pack(self.view.format, value)
*
* for i, c in enumerate(bytesvalue): # <<<<<<<<<<<<<<
* itemp[i] = c
*
*/
__pyx_t_9 = (__pyx_t_9 + 1);
/* "View.MemoryView":510
*
* for i, c in enumerate(bytesvalue):
* itemp[i] = c # <<<<<<<<<<<<<<
*
* @cname('getbuffer')
*/
(__pyx_v_itemp[__pyx_v_i]) = __pyx_v_c;
}
__Pyx_DECREF(__pyx_t_10); __pyx_t_10 = 0;
/* "View.MemoryView":496
* return result
*
* cdef assign_item_from_object(self, char *itemp, object value): # <<<<<<<<<<<<<<
* """Only used if instantiated manually by the user, or if Cython doesn't
* know how to convert the type"""
*/
/* function exit code */
__pyx_r = Py_None; __Pyx_INCREF(Py_None);
goto __pyx_L0;
__pyx_L1_error:;
__Pyx_XDECREF(__pyx_t_1);
__Pyx_XDECREF(__pyx_t_4);
__Pyx_XDECREF(__pyx_t_5);
__Pyx_XDECREF(__pyx_t_6);
__Pyx_XDECREF(__pyx_t_8);
__Pyx_XDECREF(__pyx_t_10);
__Pyx_AddTraceback("View.MemoryView.memoryview.assign_item_from_object", __pyx_clineno, __pyx_lineno, __pyx_filename);
__pyx_r = 0;
__pyx_L0:;
__Pyx_XDECREF(__pyx_v_struct);
__Pyx_XDECREF(__pyx_v_bytesvalue);
__Pyx_XGIVEREF(__pyx_r);
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
/* "View.MemoryView":513
*
* @cname('getbuffer')
* def __getbuffer__(self, Py_buffer *info, int flags): # <<<<<<<<<<<<<<
* if flags & PyBUF_WRITABLE and self.view.readonly:
* raise ValueError("Cannot create writable memory view from read-only memoryview")
*/
/* Python wrapper */
static CYTHON_UNUSED int __pyx_memoryview_getbuffer(PyObject *__pyx_v_self, Py_buffer *__pyx_v_info, int __pyx_v_flags); /*proto*/
static CYTHON_UNUSED int __pyx_memoryview_getbuffer(PyObject *__pyx_v_self, Py_buffer *__pyx_v_info, int __pyx_v_flags) {
int __pyx_r;
__Pyx_RefNannyDeclarations
__Pyx_RefNannySetupContext("__getbuffer__ (wrapper)", 0);
__pyx_r = __pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_8__getbuffer__(((struct __pyx_memoryview_obj *)__pyx_v_self), ((Py_buffer *)__pyx_v_info), ((int)__pyx_v_flags));
/* function exit code */
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
static int __pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_8__getbuffer__(struct __pyx_memoryview_obj *__pyx_v_self, Py_buffer *__pyx_v_info, int __pyx_v_flags) {
int __pyx_r;
__Pyx_RefNannyDeclarations
int __pyx_t_1;
int __pyx_t_2;
PyObject *__pyx_t_3 = NULL;
Py_ssize_t *__pyx_t_4;
char *__pyx_t_5;
void *__pyx_t_6;
int __pyx_t_7;
Py_ssize_t __pyx_t_8;
if (__pyx_v_info == NULL) {
PyErr_SetString(PyExc_BufferError, "PyObject_GetBuffer: view==NULL argument is obsolete");
return -1;
}
__Pyx_RefNannySetupContext("__getbuffer__", 0);
__pyx_v_info->obj = Py_None; __Pyx_INCREF(Py_None);
__Pyx_GIVEREF(__pyx_v_info->obj);
/* "View.MemoryView":514
* @cname('getbuffer')
* def __getbuffer__(self, Py_buffer *info, int flags):
* if flags & PyBUF_WRITABLE and self.view.readonly: # <<<<<<<<<<<<<<
* raise ValueError("Cannot create writable memory view from read-only memoryview")
*
*/
__pyx_t_2 = ((__pyx_v_flags & PyBUF_WRITABLE) != 0);
if (__pyx_t_2) {
} else {
__pyx_t_1 = __pyx_t_2;
goto __pyx_L4_bool_binop_done;
}
__pyx_t_2 = (__pyx_v_self->view.readonly != 0);
__pyx_t_1 = __pyx_t_2;
__pyx_L4_bool_binop_done:;
if (unlikely(__pyx_t_1)) {
/* "View.MemoryView":515
* def __getbuffer__(self, Py_buffer *info, int flags):
* if flags & PyBUF_WRITABLE and self.view.readonly:
* raise ValueError("Cannot create writable memory view from read-only memoryview") # <<<<<<<<<<<<<<
*
* if flags & PyBUF_STRIDES:
*/
__pyx_t_3 = __Pyx_PyObject_Call(__pyx_builtin_ValueError, __pyx_tuple__16, NULL); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 515, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_3);
__Pyx_Raise(__pyx_t_3, 0, 0, 0);
__Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
__PYX_ERR(1, 515, __pyx_L1_error)
/* "View.MemoryView":514
* @cname('getbuffer')
* def __getbuffer__(self, Py_buffer *info, int flags):
* if flags & PyBUF_WRITABLE and self.view.readonly: # <<<<<<<<<<<<<<
* raise ValueError("Cannot create writable memory view from read-only memoryview")
*
*/
}
/* "View.MemoryView":517
* raise ValueError("Cannot create writable memory view from read-only memoryview")
*
* if flags & PyBUF_STRIDES: # <<<<<<<<<<<<<<
* info.shape = self.view.shape
* else:
*/
__pyx_t_1 = ((__pyx_v_flags & PyBUF_STRIDES) != 0);
if (__pyx_t_1) {
/* "View.MemoryView":518
*
* if flags & PyBUF_STRIDES:
* info.shape = self.view.shape # <<<<<<<<<<<<<<
* else:
* info.shape = NULL
*/
__pyx_t_4 = __pyx_v_self->view.shape;
__pyx_v_info->shape = __pyx_t_4;
/* "View.MemoryView":517
* raise ValueError("Cannot create writable memory view from read-only memoryview")
*
* if flags & PyBUF_STRIDES: # <<<<<<<<<<<<<<
* info.shape = self.view.shape
* else:
*/
goto __pyx_L6;
}
/* "View.MemoryView":520
* info.shape = self.view.shape
* else:
* info.shape = NULL # <<<<<<<<<<<<<<
*
* if flags & PyBUF_STRIDES:
*/
/*else*/ {
__pyx_v_info->shape = NULL;
}
__pyx_L6:;
/* "View.MemoryView":522
* info.shape = NULL
*
* if flags & PyBUF_STRIDES: # <<<<<<<<<<<<<<
* info.strides = self.view.strides
* else:
*/
__pyx_t_1 = ((__pyx_v_flags & PyBUF_STRIDES) != 0);
if (__pyx_t_1) {
/* "View.MemoryView":523
*
* if flags & PyBUF_STRIDES:
* info.strides = self.view.strides # <<<<<<<<<<<<<<
* else:
* info.strides = NULL
*/
__pyx_t_4 = __pyx_v_self->view.strides;
__pyx_v_info->strides = __pyx_t_4;
/* "View.MemoryView":522
* info.shape = NULL
*
* if flags & PyBUF_STRIDES: # <<<<<<<<<<<<<<
* info.strides = self.view.strides
* else:
*/
goto __pyx_L7;
}
/* "View.MemoryView":525
* info.strides = self.view.strides
* else:
* info.strides = NULL # <<<<<<<<<<<<<<
*
* if flags & PyBUF_INDIRECT:
*/
/*else*/ {
__pyx_v_info->strides = NULL;
}
__pyx_L7:;
/* "View.MemoryView":527
* info.strides = NULL
*
* if flags & PyBUF_INDIRECT: # <<<<<<<<<<<<<<
* info.suboffsets = self.view.suboffsets
* else:
*/
__pyx_t_1 = ((__pyx_v_flags & PyBUF_INDIRECT) != 0);
if (__pyx_t_1) {
/* "View.MemoryView":528
*
* if flags & PyBUF_INDIRECT:
* info.suboffsets = self.view.suboffsets # <<<<<<<<<<<<<<
* else:
* info.suboffsets = NULL
*/
__pyx_t_4 = __pyx_v_self->view.suboffsets;
__pyx_v_info->suboffsets = __pyx_t_4;
/* "View.MemoryView":527
* info.strides = NULL
*
* if flags & PyBUF_INDIRECT: # <<<<<<<<<<<<<<
* info.suboffsets = self.view.suboffsets
* else:
*/
goto __pyx_L8;
}
/* "View.MemoryView":530
* info.suboffsets = self.view.suboffsets
* else:
* info.suboffsets = NULL # <<<<<<<<<<<<<<
*
* if flags & PyBUF_FORMAT:
*/
/*else*/ {
__pyx_v_info->suboffsets = NULL;
}
__pyx_L8:;
/* "View.MemoryView":532
* info.suboffsets = NULL
*
* if flags & PyBUF_FORMAT: # <<<<<<<<<<<<<<
* info.format = self.view.format
* else:
*/
__pyx_t_1 = ((__pyx_v_flags & PyBUF_FORMAT) != 0);
if (__pyx_t_1) {
/* "View.MemoryView":533
*
* if flags & PyBUF_FORMAT:
* info.format = self.view.format # <<<<<<<<<<<<<<
* else:
* info.format = NULL
*/
__pyx_t_5 = __pyx_v_self->view.format;
__pyx_v_info->format = __pyx_t_5;
/* "View.MemoryView":532
* info.suboffsets = NULL
*
* if flags & PyBUF_FORMAT: # <<<<<<<<<<<<<<
* info.format = self.view.format
* else:
*/
goto __pyx_L9;
}
/* "View.MemoryView":535
* info.format = self.view.format
* else:
* info.format = NULL # <<<<<<<<<<<<<<
*
* info.buf = self.view.buf
*/
/*else*/ {
__pyx_v_info->format = NULL;
}
__pyx_L9:;
/* "View.MemoryView":537
* info.format = NULL
*
* info.buf = self.view.buf # <<<<<<<<<<<<<<
* info.ndim = self.view.ndim
* info.itemsize = self.view.itemsize
*/
__pyx_t_6 = __pyx_v_self->view.buf;
__pyx_v_info->buf = __pyx_t_6;
/* "View.MemoryView":538
*
* info.buf = self.view.buf
* info.ndim = self.view.ndim # <<<<<<<<<<<<<<
* info.itemsize = self.view.itemsize
* info.len = self.view.len
*/
__pyx_t_7 = __pyx_v_self->view.ndim;
__pyx_v_info->ndim = __pyx_t_7;
/* "View.MemoryView":539
* info.buf = self.view.buf
* info.ndim = self.view.ndim
* info.itemsize = self.view.itemsize # <<<<<<<<<<<<<<
* info.len = self.view.len
* info.readonly = self.view.readonly
*/
__pyx_t_8 = __pyx_v_self->view.itemsize;
__pyx_v_info->itemsize = __pyx_t_8;
/* "View.MemoryView":540
* info.ndim = self.view.ndim
* info.itemsize = self.view.itemsize
* info.len = self.view.len # <<<<<<<<<<<<<<
* info.readonly = self.view.readonly
* info.obj = self
*/
__pyx_t_8 = __pyx_v_self->view.len;
__pyx_v_info->len = __pyx_t_8;
/* "View.MemoryView":541
* info.itemsize = self.view.itemsize
* info.len = self.view.len
* info.readonly = self.view.readonly # <<<<<<<<<<<<<<
* info.obj = self
*
*/
__pyx_t_1 = __pyx_v_self->view.readonly;
__pyx_v_info->readonly = __pyx_t_1;
/* "View.MemoryView":542
* info.len = self.view.len
* info.readonly = self.view.readonly
* info.obj = self # <<<<<<<<<<<<<<
*
* __pyx_getbuffer = capsule(<void *> &__pyx_memoryview_getbuffer, "getbuffer(obj, view, flags)")
*/
__Pyx_INCREF(((PyObject *)__pyx_v_self));
__Pyx_GIVEREF(((PyObject *)__pyx_v_self));
__Pyx_GOTREF(__pyx_v_info->obj);
__Pyx_DECREF(__pyx_v_info->obj);
__pyx_v_info->obj = ((PyObject *)__pyx_v_self);
/* "View.MemoryView":513
*
* @cname('getbuffer')
* def __getbuffer__(self, Py_buffer *info, int flags): # <<<<<<<<<<<<<<
* if flags & PyBUF_WRITABLE and self.view.readonly:
* raise ValueError("Cannot create writable memory view from read-only memoryview")
*/
/* function exit code */
__pyx_r = 0;
goto __pyx_L0;
__pyx_L1_error:;
__Pyx_XDECREF(__pyx_t_3);
__Pyx_AddTraceback("View.MemoryView.memoryview.__getbuffer__", __pyx_clineno, __pyx_lineno, __pyx_filename);
__pyx_r = -1;
if (__pyx_v_info->obj != NULL) {
__Pyx_GOTREF(__pyx_v_info->obj);
__Pyx_DECREF(__pyx_v_info->obj); __pyx_v_info->obj = 0;
}
goto __pyx_L2;
__pyx_L0:;
if (__pyx_v_info->obj == Py_None) {
__Pyx_GOTREF(__pyx_v_info->obj);
__Pyx_DECREF(__pyx_v_info->obj); __pyx_v_info->obj = 0;
}
__pyx_L2:;
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
/* "View.MemoryView":548
*
* @property
* def T(self): # <<<<<<<<<<<<<<
* cdef _memoryviewslice result = memoryview_copy(self)
* transpose_memslice(&result.from_slice)
*/
/* Python wrapper */
static PyObject *__pyx_pw_15View_dot_MemoryView_10memoryview_1T_1__get__(PyObject *__pyx_v_self); /*proto*/
static PyObject *__pyx_pw_15View_dot_MemoryView_10memoryview_1T_1__get__(PyObject *__pyx_v_self) {
PyObject *__pyx_r = 0;
__Pyx_RefNannyDeclarations
__Pyx_RefNannySetupContext("__get__ (wrapper)", 0);
__pyx_r = __pyx_pf_15View_dot_MemoryView_10memoryview_1T___get__(((struct __pyx_memoryview_obj *)__pyx_v_self));
/* function exit code */
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
static PyObject *__pyx_pf_15View_dot_MemoryView_10memoryview_1T___get__(struct __pyx_memoryview_obj *__pyx_v_self) {
struct __pyx_memoryviewslice_obj *__pyx_v_result = 0;
PyObject *__pyx_r = NULL;
__Pyx_RefNannyDeclarations
PyObject *__pyx_t_1 = NULL;
int __pyx_t_2;
__Pyx_RefNannySetupContext("__get__", 0);
/* "View.MemoryView":549
* @property
* def T(self):
* cdef _memoryviewslice result = memoryview_copy(self) # <<<<<<<<<<<<<<
* transpose_memslice(&result.from_slice)
* return result
*/
__pyx_t_1 = __pyx_memoryview_copy_object(__pyx_v_self); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 549, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_1);
if (!(likely(((__pyx_t_1) == Py_None) || likely(__Pyx_TypeTest(__pyx_t_1, __pyx_memoryviewslice_type))))) __PYX_ERR(1, 549, __pyx_L1_error)
__pyx_v_result = ((struct __pyx_memoryviewslice_obj *)__pyx_t_1);
__pyx_t_1 = 0;
/* "View.MemoryView":550
* def T(self):
* cdef _memoryviewslice result = memoryview_copy(self)
* transpose_memslice(&result.from_slice) # <<<<<<<<<<<<<<
* return result
*
*/
__pyx_t_2 = __pyx_memslice_transpose((&__pyx_v_result->from_slice)); if (unlikely(__pyx_t_2 == ((int)0))) __PYX_ERR(1, 550, __pyx_L1_error)
/* "View.MemoryView":551
* cdef _memoryviewslice result = memoryview_copy(self)
* transpose_memslice(&result.from_slice)
* return result # <<<<<<<<<<<<<<
*
* @property
*/
__Pyx_XDECREF(__pyx_r);
__Pyx_INCREF(((PyObject *)__pyx_v_result));
__pyx_r = ((PyObject *)__pyx_v_result);
goto __pyx_L0;
/* "View.MemoryView":548
*
* @property
* def T(self): # <<<<<<<<<<<<<<
* cdef _memoryviewslice result = memoryview_copy(self)
* transpose_memslice(&result.from_slice)
*/
/* function exit code */
__pyx_L1_error:;
__Pyx_XDECREF(__pyx_t_1);
__Pyx_AddTraceback("View.MemoryView.memoryview.T.__get__", __pyx_clineno, __pyx_lineno, __pyx_filename);
__pyx_r = NULL;
__pyx_L0:;
__Pyx_XDECREF((PyObject *)__pyx_v_result);
__Pyx_XGIVEREF(__pyx_r);
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
/* "View.MemoryView":554
*
* @property
* def base(self): # <<<<<<<<<<<<<<
* return self.obj
*
*/
/* Python wrapper */
static PyObject *__pyx_pw_15View_dot_MemoryView_10memoryview_4base_1__get__(PyObject *__pyx_v_self); /*proto*/
static PyObject *__pyx_pw_15View_dot_MemoryView_10memoryview_4base_1__get__(PyObject *__pyx_v_self) {
PyObject *__pyx_r = 0;
__Pyx_RefNannyDeclarations
__Pyx_RefNannySetupContext("__get__ (wrapper)", 0);
__pyx_r = __pyx_pf_15View_dot_MemoryView_10memoryview_4base___get__(((struct __pyx_memoryview_obj *)__pyx_v_self));
/* function exit code */
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
static PyObject *__pyx_pf_15View_dot_MemoryView_10memoryview_4base___get__(struct __pyx_memoryview_obj *__pyx_v_self) {
PyObject *__pyx_r = NULL;
__Pyx_RefNannyDeclarations
__Pyx_RefNannySetupContext("__get__", 0);
/* "View.MemoryView":555
* @property
* def base(self):
* return self.obj # <<<<<<<<<<<<<<
*
* @property
*/
__Pyx_XDECREF(__pyx_r);
__Pyx_INCREF(__pyx_v_self->obj);
__pyx_r = __pyx_v_self->obj;
goto __pyx_L0;
/* "View.MemoryView":554
*
* @property
* def base(self): # <<<<<<<<<<<<<<
* return self.obj
*
*/
/* function exit code */
__pyx_L0:;
__Pyx_XGIVEREF(__pyx_r);
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
/* "View.MemoryView":558
*
* @property
* def shape(self): # <<<<<<<<<<<<<<
* return tuple([length for length in self.view.shape[:self.view.ndim]])
*
*/
/* Python wrapper */
static PyObject *__pyx_pw_15View_dot_MemoryView_10memoryview_5shape_1__get__(PyObject *__pyx_v_self); /*proto*/
static PyObject *__pyx_pw_15View_dot_MemoryView_10memoryview_5shape_1__get__(PyObject *__pyx_v_self) {
PyObject *__pyx_r = 0;
__Pyx_RefNannyDeclarations
__Pyx_RefNannySetupContext("__get__ (wrapper)", 0);
__pyx_r = __pyx_pf_15View_dot_MemoryView_10memoryview_5shape___get__(((struct __pyx_memoryview_obj *)__pyx_v_self));
/* function exit code */
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
static PyObject *__pyx_pf_15View_dot_MemoryView_10memoryview_5shape___get__(struct __pyx_memoryview_obj *__pyx_v_self) {
Py_ssize_t __pyx_v_length;
PyObject *__pyx_r = NULL;
__Pyx_RefNannyDeclarations
PyObject *__pyx_t_1 = NULL;
Py_ssize_t *__pyx_t_2;
Py_ssize_t *__pyx_t_3;
Py_ssize_t *__pyx_t_4;
PyObject *__pyx_t_5 = NULL;
__Pyx_RefNannySetupContext("__get__", 0);
/* "View.MemoryView":559
* @property
* def shape(self):
* return tuple([length for length in self.view.shape[:self.view.ndim]]) # <<<<<<<<<<<<<<
*
* @property
*/
__Pyx_XDECREF(__pyx_r);
__pyx_t_1 = PyList_New(0); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 559, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_1);
__pyx_t_3 = (__pyx_v_self->view.shape + __pyx_v_self->view.ndim);
for (__pyx_t_4 = __pyx_v_self->view.shape; __pyx_t_4 < __pyx_t_3; __pyx_t_4++) {
__pyx_t_2 = __pyx_t_4;
__pyx_v_length = (__pyx_t_2[0]);
__pyx_t_5 = PyInt_FromSsize_t(__pyx_v_length); if (unlikely(!__pyx_t_5)) __PYX_ERR(1, 559, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_5);
if (unlikely(__Pyx_ListComp_Append(__pyx_t_1, (PyObject*)__pyx_t_5))) __PYX_ERR(1, 559, __pyx_L1_error)
__Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
}
__pyx_t_5 = PyList_AsTuple(((PyObject*)__pyx_t_1)); if (unlikely(!__pyx_t_5)) __PYX_ERR(1, 559, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_5);
__Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
__pyx_r = __pyx_t_5;
__pyx_t_5 = 0;
goto __pyx_L0;
/* "View.MemoryView":558
*
* @property
* def shape(self): # <<<<<<<<<<<<<<
* return tuple([length for length in self.view.shape[:self.view.ndim]])
*
*/
/* function exit code */
__pyx_L1_error:;
__Pyx_XDECREF(__pyx_t_1);
__Pyx_XDECREF(__pyx_t_5);
__Pyx_AddTraceback("View.MemoryView.memoryview.shape.__get__", __pyx_clineno, __pyx_lineno, __pyx_filename);
__pyx_r = NULL;
__pyx_L0:;
__Pyx_XGIVEREF(__pyx_r);
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
/* "View.MemoryView":562
*
* @property
* def strides(self): # <<<<<<<<<<<<<<
* if self.view.strides == NULL:
*
*/
/* Python wrapper */
static PyObject *__pyx_pw_15View_dot_MemoryView_10memoryview_7strides_1__get__(PyObject *__pyx_v_self); /*proto*/
static PyObject *__pyx_pw_15View_dot_MemoryView_10memoryview_7strides_1__get__(PyObject *__pyx_v_self) {
PyObject *__pyx_r = 0;
__Pyx_RefNannyDeclarations
__Pyx_RefNannySetupContext("__get__ (wrapper)", 0);
__pyx_r = __pyx_pf_15View_dot_MemoryView_10memoryview_7strides___get__(((struct __pyx_memoryview_obj *)__pyx_v_self));
/* function exit code */
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
static PyObject *__pyx_pf_15View_dot_MemoryView_10memoryview_7strides___get__(struct __pyx_memoryview_obj *__pyx_v_self) {
Py_ssize_t __pyx_v_stride;
PyObject *__pyx_r = NULL;
__Pyx_RefNannyDeclarations
int __pyx_t_1;
PyObject *__pyx_t_2 = NULL;
Py_ssize_t *__pyx_t_3;
Py_ssize_t *__pyx_t_4;
Py_ssize_t *__pyx_t_5;
PyObject *__pyx_t_6 = NULL;
__Pyx_RefNannySetupContext("__get__", 0);
/* "View.MemoryView":563
* @property
* def strides(self):
* if self.view.strides == NULL: # <<<<<<<<<<<<<<
*
* raise ValueError("Buffer view does not expose strides")
*/
__pyx_t_1 = ((__pyx_v_self->view.strides == NULL) != 0);
if (unlikely(__pyx_t_1)) {
/* "View.MemoryView":565
* if self.view.strides == NULL:
*
* raise ValueError("Buffer view does not expose strides") # <<<<<<<<<<<<<<
*
* return tuple([stride for stride in self.view.strides[:self.view.ndim]])
*/
__pyx_t_2 = __Pyx_PyObject_Call(__pyx_builtin_ValueError, __pyx_tuple__17, NULL); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 565, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_2);
__Pyx_Raise(__pyx_t_2, 0, 0, 0);
__Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
__PYX_ERR(1, 565, __pyx_L1_error)
/* "View.MemoryView":563
* @property
* def strides(self):
* if self.view.strides == NULL: # <<<<<<<<<<<<<<
*
* raise ValueError("Buffer view does not expose strides")
*/
}
/* "View.MemoryView":567
* raise ValueError("Buffer view does not expose strides")
*
* return tuple([stride for stride in self.view.strides[:self.view.ndim]]) # <<<<<<<<<<<<<<
*
* @property
*/
__Pyx_XDECREF(__pyx_r);
__pyx_t_2 = PyList_New(0); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 567, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_2);
__pyx_t_4 = (__pyx_v_self->view.strides + __pyx_v_self->view.ndim);
for (__pyx_t_5 = __pyx_v_self->view.strides; __pyx_t_5 < __pyx_t_4; __pyx_t_5++) {
__pyx_t_3 = __pyx_t_5;
__pyx_v_stride = (__pyx_t_3[0]);
__pyx_t_6 = PyInt_FromSsize_t(__pyx_v_stride); if (unlikely(!__pyx_t_6)) __PYX_ERR(1, 567, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_6);
if (unlikely(__Pyx_ListComp_Append(__pyx_t_2, (PyObject*)__pyx_t_6))) __PYX_ERR(1, 567, __pyx_L1_error)
__Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0;
}
__pyx_t_6 = PyList_AsTuple(((PyObject*)__pyx_t_2)); if (unlikely(!__pyx_t_6)) __PYX_ERR(1, 567, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_6);
__Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
__pyx_r = __pyx_t_6;
__pyx_t_6 = 0;
goto __pyx_L0;
/* "View.MemoryView":562
*
* @property
* def strides(self): # <<<<<<<<<<<<<<
* if self.view.strides == NULL:
*
*/
/* function exit code */
__pyx_L1_error:;
__Pyx_XDECREF(__pyx_t_2);
__Pyx_XDECREF(__pyx_t_6);
__Pyx_AddTraceback("View.MemoryView.memoryview.strides.__get__", __pyx_clineno, __pyx_lineno, __pyx_filename);
__pyx_r = NULL;
__pyx_L0:;
__Pyx_XGIVEREF(__pyx_r);
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
/* "View.MemoryView":570
*
* @property
* def suboffsets(self): # <<<<<<<<<<<<<<
* if self.view.suboffsets == NULL:
* return (-1,) * self.view.ndim
*/
/* Python wrapper */
static PyObject *__pyx_pw_15View_dot_MemoryView_10memoryview_10suboffsets_1__get__(PyObject *__pyx_v_self); /*proto*/
static PyObject *__pyx_pw_15View_dot_MemoryView_10memoryview_10suboffsets_1__get__(PyObject *__pyx_v_self) {
PyObject *__pyx_r = 0;
__Pyx_RefNannyDeclarations
__Pyx_RefNannySetupContext("__get__ (wrapper)", 0);
__pyx_r = __pyx_pf_15View_dot_MemoryView_10memoryview_10suboffsets___get__(((struct __pyx_memoryview_obj *)__pyx_v_self));
/* function exit code */
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
static PyObject *__pyx_pf_15View_dot_MemoryView_10memoryview_10suboffsets___get__(struct __pyx_memoryview_obj *__pyx_v_self) {
Py_ssize_t __pyx_v_suboffset;
PyObject *__pyx_r = NULL;
__Pyx_RefNannyDeclarations
int __pyx_t_1;
PyObject *__pyx_t_2 = NULL;
PyObject *__pyx_t_3 = NULL;
Py_ssize_t *__pyx_t_4;
Py_ssize_t *__pyx_t_5;
Py_ssize_t *__pyx_t_6;
__Pyx_RefNannySetupContext("__get__", 0);
/* "View.MemoryView":571
* @property
* def suboffsets(self):
* if self.view.suboffsets == NULL: # <<<<<<<<<<<<<<
* return (-1,) * self.view.ndim
*
*/
__pyx_t_1 = ((__pyx_v_self->view.suboffsets == NULL) != 0);
if (__pyx_t_1) {
/* "View.MemoryView":572
* def suboffsets(self):
* if self.view.suboffsets == NULL:
* return (-1,) * self.view.ndim # <<<<<<<<<<<<<<
*
* return tuple([suboffset for suboffset in self.view.suboffsets[:self.view.ndim]])
*/
__Pyx_XDECREF(__pyx_r);
__pyx_t_2 = __Pyx_PyInt_From_int(__pyx_v_self->view.ndim); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 572, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_2);
__pyx_t_3 = PyNumber_Multiply(__pyx_tuple__18, __pyx_t_2); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 572, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_3);
__Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
__pyx_r = __pyx_t_3;
__pyx_t_3 = 0;
goto __pyx_L0;
/* "View.MemoryView":571
* @property
* def suboffsets(self):
* if self.view.suboffsets == NULL: # <<<<<<<<<<<<<<
* return (-1,) * self.view.ndim
*
*/
}
/* "View.MemoryView":574
* return (-1,) * self.view.ndim
*
* return tuple([suboffset for suboffset in self.view.suboffsets[:self.view.ndim]]) # <<<<<<<<<<<<<<
*
* @property
*/
__Pyx_XDECREF(__pyx_r);
__pyx_t_3 = PyList_New(0); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 574, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_3);
__pyx_t_5 = (__pyx_v_self->view.suboffsets + __pyx_v_self->view.ndim);
for (__pyx_t_6 = __pyx_v_self->view.suboffsets; __pyx_t_6 < __pyx_t_5; __pyx_t_6++) {
__pyx_t_4 = __pyx_t_6;
__pyx_v_suboffset = (__pyx_t_4[0]);
__pyx_t_2 = PyInt_FromSsize_t(__pyx_v_suboffset); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 574, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_2);
if (unlikely(__Pyx_ListComp_Append(__pyx_t_3, (PyObject*)__pyx_t_2))) __PYX_ERR(1, 574, __pyx_L1_error)
__Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
}
__pyx_t_2 = PyList_AsTuple(((PyObject*)__pyx_t_3)); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 574, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_2);
__Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
__pyx_r = __pyx_t_2;
__pyx_t_2 = 0;
goto __pyx_L0;
/* "View.MemoryView":570
*
* @property
* def suboffsets(self): # <<<<<<<<<<<<<<
* if self.view.suboffsets == NULL:
* return (-1,) * self.view.ndim
*/
/* function exit code */
__pyx_L1_error:;
__Pyx_XDECREF(__pyx_t_2);
__Pyx_XDECREF(__pyx_t_3);
__Pyx_AddTraceback("View.MemoryView.memoryview.suboffsets.__get__", __pyx_clineno, __pyx_lineno, __pyx_filename);
__pyx_r = NULL;
__pyx_L0:;
__Pyx_XGIVEREF(__pyx_r);
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
/* "View.MemoryView":577
*
* @property
* def ndim(self): # <<<<<<<<<<<<<<
* return self.view.ndim
*
*/
/* Python wrapper */
static PyObject *__pyx_pw_15View_dot_MemoryView_10memoryview_4ndim_1__get__(PyObject *__pyx_v_self); /*proto*/
static PyObject *__pyx_pw_15View_dot_MemoryView_10memoryview_4ndim_1__get__(PyObject *__pyx_v_self) {
PyObject *__pyx_r = 0;
__Pyx_RefNannyDeclarations
__Pyx_RefNannySetupContext("__get__ (wrapper)", 0);
__pyx_r = __pyx_pf_15View_dot_MemoryView_10memoryview_4ndim___get__(((struct __pyx_memoryview_obj *)__pyx_v_self));
/* function exit code */
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
static PyObject *__pyx_pf_15View_dot_MemoryView_10memoryview_4ndim___get__(struct __pyx_memoryview_obj *__pyx_v_self) {
PyObject *__pyx_r = NULL;
__Pyx_RefNannyDeclarations
PyObject *__pyx_t_1 = NULL;
__Pyx_RefNannySetupContext("__get__", 0);
/* "View.MemoryView":578
* @property
* def ndim(self):
* return self.view.ndim # <<<<<<<<<<<<<<
*
* @property
*/
__Pyx_XDECREF(__pyx_r);
__pyx_t_1 = __Pyx_PyInt_From_int(__pyx_v_self->view.ndim); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 578, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_1);
__pyx_r = __pyx_t_1;
__pyx_t_1 = 0;
goto __pyx_L0;
/* "View.MemoryView":577
*
* @property
* def ndim(self): # <<<<<<<<<<<<<<
* return self.view.ndim
*
*/
/* function exit code */
__pyx_L1_error:;
__Pyx_XDECREF(__pyx_t_1);
__Pyx_AddTraceback("View.MemoryView.memoryview.ndim.__get__", __pyx_clineno, __pyx_lineno, __pyx_filename);
__pyx_r = NULL;
__pyx_L0:;
__Pyx_XGIVEREF(__pyx_r);
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
/* "View.MemoryView":581
*
* @property
* def itemsize(self): # <<<<<<<<<<<<<<
* return self.view.itemsize
*
*/
/* Python wrapper */
static PyObject *__pyx_pw_15View_dot_MemoryView_10memoryview_8itemsize_1__get__(PyObject *__pyx_v_self); /*proto*/
static PyObject *__pyx_pw_15View_dot_MemoryView_10memoryview_8itemsize_1__get__(PyObject *__pyx_v_self) {
PyObject *__pyx_r = 0;
__Pyx_RefNannyDeclarations
__Pyx_RefNannySetupContext("__get__ (wrapper)", 0);
__pyx_r = __pyx_pf_15View_dot_MemoryView_10memoryview_8itemsize___get__(((struct __pyx_memoryview_obj *)__pyx_v_self));
/* function exit code */
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
static PyObject *__pyx_pf_15View_dot_MemoryView_10memoryview_8itemsize___get__(struct __pyx_memoryview_obj *__pyx_v_self) {
PyObject *__pyx_r = NULL;
__Pyx_RefNannyDeclarations
PyObject *__pyx_t_1 = NULL;
__Pyx_RefNannySetupContext("__get__", 0);
/* "View.MemoryView":582
* @property
* def itemsize(self):
* return self.view.itemsize # <<<<<<<<<<<<<<
*
* @property
*/
__Pyx_XDECREF(__pyx_r);
__pyx_t_1 = PyInt_FromSsize_t(__pyx_v_self->view.itemsize); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 582, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_1);
__pyx_r = __pyx_t_1;
__pyx_t_1 = 0;
goto __pyx_L0;
/* "View.MemoryView":581
*
* @property
* def itemsize(self): # <<<<<<<<<<<<<<
* return self.view.itemsize
*
*/
/* function exit code */
__pyx_L1_error:;
__Pyx_XDECREF(__pyx_t_1);
__Pyx_AddTraceback("View.MemoryView.memoryview.itemsize.__get__", __pyx_clineno, __pyx_lineno, __pyx_filename);
__pyx_r = NULL;
__pyx_L0:;
__Pyx_XGIVEREF(__pyx_r);
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
/* "View.MemoryView":585
*
* @property
* def nbytes(self): # <<<<<<<<<<<<<<
* return self.size * self.view.itemsize
*
*/
/* Python wrapper */
static PyObject *__pyx_pw_15View_dot_MemoryView_10memoryview_6nbytes_1__get__(PyObject *__pyx_v_self); /*proto*/
static PyObject *__pyx_pw_15View_dot_MemoryView_10memoryview_6nbytes_1__get__(PyObject *__pyx_v_self) {
PyObject *__pyx_r = 0;
__Pyx_RefNannyDeclarations
__Pyx_RefNannySetupContext("__get__ (wrapper)", 0);
__pyx_r = __pyx_pf_15View_dot_MemoryView_10memoryview_6nbytes___get__(((struct __pyx_memoryview_obj *)__pyx_v_self));
/* function exit code */
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
static PyObject *__pyx_pf_15View_dot_MemoryView_10memoryview_6nbytes___get__(struct __pyx_memoryview_obj *__pyx_v_self) {
PyObject *__pyx_r = NULL;
__Pyx_RefNannyDeclarations
PyObject *__pyx_t_1 = NULL;
PyObject *__pyx_t_2 = NULL;
PyObject *__pyx_t_3 = NULL;
__Pyx_RefNannySetupContext("__get__", 0);
/* "View.MemoryView":586
* @property
* def nbytes(self):
* return self.size * self.view.itemsize # <<<<<<<<<<<<<<
*
* @property
*/
__Pyx_XDECREF(__pyx_r);
__pyx_t_1 = __Pyx_PyObject_GetAttrStr(((PyObject *)__pyx_v_self), __pyx_n_s_size); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 586, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_1);
__pyx_t_2 = PyInt_FromSsize_t(__pyx_v_self->view.itemsize); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 586, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_2);
__pyx_t_3 = PyNumber_Multiply(__pyx_t_1, __pyx_t_2); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 586, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_3);
__Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
__Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
__pyx_r = __pyx_t_3;
__pyx_t_3 = 0;
goto __pyx_L0;
/* "View.MemoryView":585
*
* @property
* def nbytes(self): # <<<<<<<<<<<<<<
* return self.size * self.view.itemsize
*
*/
/* function exit code */
__pyx_L1_error:;
__Pyx_XDECREF(__pyx_t_1);
__Pyx_XDECREF(__pyx_t_2);
__Pyx_XDECREF(__pyx_t_3);
__Pyx_AddTraceback("View.MemoryView.memoryview.nbytes.__get__", __pyx_clineno, __pyx_lineno, __pyx_filename);
__pyx_r = NULL;
__pyx_L0:;
__Pyx_XGIVEREF(__pyx_r);
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
/* "View.MemoryView":589
*
* @property
* def size(self): # <<<<<<<<<<<<<<
* if self._size is None:
* result = 1
*/
/* Python wrapper */
static PyObject *__pyx_pw_15View_dot_MemoryView_10memoryview_4size_1__get__(PyObject *__pyx_v_self); /*proto*/
static PyObject *__pyx_pw_15View_dot_MemoryView_10memoryview_4size_1__get__(PyObject *__pyx_v_self) {
PyObject *__pyx_r = 0;
__Pyx_RefNannyDeclarations
__Pyx_RefNannySetupContext("__get__ (wrapper)", 0);
__pyx_r = __pyx_pf_15View_dot_MemoryView_10memoryview_4size___get__(((struct __pyx_memoryview_obj *)__pyx_v_self));
/* function exit code */
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
static PyObject *__pyx_pf_15View_dot_MemoryView_10memoryview_4size___get__(struct __pyx_memoryview_obj *__pyx_v_self) {
PyObject *__pyx_v_result = NULL;
PyObject *__pyx_v_length = NULL;
PyObject *__pyx_r = NULL;
__Pyx_RefNannyDeclarations
int __pyx_t_1;
int __pyx_t_2;
Py_ssize_t *__pyx_t_3;
Py_ssize_t *__pyx_t_4;
Py_ssize_t *__pyx_t_5;
PyObject *__pyx_t_6 = NULL;
__Pyx_RefNannySetupContext("__get__", 0);
/* "View.MemoryView":590
* @property
* def size(self):
* if self._size is None: # <<<<<<<<<<<<<<
* result = 1
*
*/
__pyx_t_1 = (__pyx_v_self->_size == Py_None);
__pyx_t_2 = (__pyx_t_1 != 0);
if (__pyx_t_2) {
/* "View.MemoryView":591
* def size(self):
* if self._size is None:
* result = 1 # <<<<<<<<<<<<<<
*
* for length in self.view.shape[:self.view.ndim]:
*/
__Pyx_INCREF(__pyx_int_1);
__pyx_v_result = __pyx_int_1;
/* "View.MemoryView":593
* result = 1
*
* for length in self.view.shape[:self.view.ndim]: # <<<<<<<<<<<<<<
* result *= length
*
*/
__pyx_t_4 = (__pyx_v_self->view.shape + __pyx_v_self->view.ndim);
for (__pyx_t_5 = __pyx_v_self->view.shape; __pyx_t_5 < __pyx_t_4; __pyx_t_5++) {
__pyx_t_3 = __pyx_t_5;
__pyx_t_6 = PyInt_FromSsize_t((__pyx_t_3[0])); if (unlikely(!__pyx_t_6)) __PYX_ERR(1, 593, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_6);
__Pyx_XDECREF_SET(__pyx_v_length, __pyx_t_6);
__pyx_t_6 = 0;
/* "View.MemoryView":594
*
* for length in self.view.shape[:self.view.ndim]:
* result *= length # <<<<<<<<<<<<<<
*
* self._size = result
*/
__pyx_t_6 = PyNumber_InPlaceMultiply(__pyx_v_result, __pyx_v_length); if (unlikely(!__pyx_t_6)) __PYX_ERR(1, 594, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_6);
__Pyx_DECREF_SET(__pyx_v_result, __pyx_t_6);
__pyx_t_6 = 0;
}
/* "View.MemoryView":596
* result *= length
*
* self._size = result # <<<<<<<<<<<<<<
*
* return self._size
*/
__Pyx_INCREF(__pyx_v_result);
__Pyx_GIVEREF(__pyx_v_result);
__Pyx_GOTREF(__pyx_v_self->_size);
__Pyx_DECREF(__pyx_v_self->_size);
__pyx_v_self->_size = __pyx_v_result;
/* "View.MemoryView":590
* @property
* def size(self):
* if self._size is None: # <<<<<<<<<<<<<<
* result = 1
*
*/
}
/* "View.MemoryView":598
* self._size = result
*
* return self._size # <<<<<<<<<<<<<<
*
* def __len__(self):
*/
__Pyx_XDECREF(__pyx_r);
__Pyx_INCREF(__pyx_v_self->_size);
__pyx_r = __pyx_v_self->_size;
goto __pyx_L0;
/* "View.MemoryView":589
*
* @property
* def size(self): # <<<<<<<<<<<<<<
* if self._size is None:
* result = 1
*/
/* function exit code */
__pyx_L1_error:;
__Pyx_XDECREF(__pyx_t_6);
__Pyx_AddTraceback("View.MemoryView.memoryview.size.__get__", __pyx_clineno, __pyx_lineno, __pyx_filename);
__pyx_r = NULL;
__pyx_L0:;
__Pyx_XDECREF(__pyx_v_result);
__Pyx_XDECREF(__pyx_v_length);
__Pyx_XGIVEREF(__pyx_r);
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
/* "View.MemoryView":600
* return self._size
*
* def __len__(self): # <<<<<<<<<<<<<<
* if self.view.ndim >= 1:
* return self.view.shape[0]
*/
/* Python wrapper */
static Py_ssize_t __pyx_memoryview___len__(PyObject *__pyx_v_self); /*proto*/
static Py_ssize_t __pyx_memoryview___len__(PyObject *__pyx_v_self) {
Py_ssize_t __pyx_r;
__Pyx_RefNannyDeclarations
__Pyx_RefNannySetupContext("__len__ (wrapper)", 0);
__pyx_r = __pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_10__len__(((struct __pyx_memoryview_obj *)__pyx_v_self));
/* function exit code */
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
static Py_ssize_t __pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_10__len__(struct __pyx_memoryview_obj *__pyx_v_self) {
Py_ssize_t __pyx_r;
__Pyx_RefNannyDeclarations
int __pyx_t_1;
__Pyx_RefNannySetupContext("__len__", 0);
/* "View.MemoryView":601
*
* def __len__(self):
* if self.view.ndim >= 1: # <<<<<<<<<<<<<<
* return self.view.shape[0]
*
*/
__pyx_t_1 = ((__pyx_v_self->view.ndim >= 1) != 0);
if (__pyx_t_1) {
/* "View.MemoryView":602
* def __len__(self):
* if self.view.ndim >= 1:
* return self.view.shape[0] # <<<<<<<<<<<<<<
*
* return 0
*/
__pyx_r = (__pyx_v_self->view.shape[0]);
goto __pyx_L0;
/* "View.MemoryView":601
*
* def __len__(self):
* if self.view.ndim >= 1: # <<<<<<<<<<<<<<
* return self.view.shape[0]
*
*/
}
/* "View.MemoryView":604
* return self.view.shape[0]
*
* return 0 # <<<<<<<<<<<<<<
*
* def __repr__(self):
*/
__pyx_r = 0;
goto __pyx_L0;
/* "View.MemoryView":600
* return self._size
*
* def __len__(self): # <<<<<<<<<<<<<<
* if self.view.ndim >= 1:
* return self.view.shape[0]
*/
/* function exit code */
__pyx_L0:;
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
/* "View.MemoryView":606
* return 0
*
* def __repr__(self): # <<<<<<<<<<<<<<
* return "<MemoryView of %r at 0x%x>" % (self.base.__class__.__name__,
* id(self))
*/
/* Python wrapper */
static PyObject *__pyx_memoryview___repr__(PyObject *__pyx_v_self); /*proto*/
static PyObject *__pyx_memoryview___repr__(PyObject *__pyx_v_self) {
PyObject *__pyx_r = 0;
__Pyx_RefNannyDeclarations
__Pyx_RefNannySetupContext("__repr__ (wrapper)", 0);
__pyx_r = __pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_12__repr__(((struct __pyx_memoryview_obj *)__pyx_v_self));
/* function exit code */
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
static PyObject *__pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_12__repr__(struct __pyx_memoryview_obj *__pyx_v_self) {
PyObject *__pyx_r = NULL;
__Pyx_RefNannyDeclarations
PyObject *__pyx_t_1 = NULL;
PyObject *__pyx_t_2 = NULL;
PyObject *__pyx_t_3 = NULL;
__Pyx_RefNannySetupContext("__repr__", 0);
/* "View.MemoryView":607
*
* def __repr__(self):
* return "<MemoryView of %r at 0x%x>" % (self.base.__class__.__name__, # <<<<<<<<<<<<<<
* id(self))
*
*/
__Pyx_XDECREF(__pyx_r);
__pyx_t_1 = __Pyx_PyObject_GetAttrStr(((PyObject *)__pyx_v_self), __pyx_n_s_base); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 607, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_1);
__pyx_t_2 = __Pyx_PyObject_GetAttrStr(__pyx_t_1, __pyx_n_s_class); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 607, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_2);
__Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
__pyx_t_1 = __Pyx_PyObject_GetAttrStr(__pyx_t_2, __pyx_n_s_name_2); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 607, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_1);
__Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
/* "View.MemoryView":608
* def __repr__(self):
* return "<MemoryView of %r at 0x%x>" % (self.base.__class__.__name__,
* id(self)) # <<<<<<<<<<<<<<
*
* def __str__(self):
*/
__pyx_t_2 = __Pyx_PyObject_CallOneArg(__pyx_builtin_id, ((PyObject *)__pyx_v_self)); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 608, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_2);
/* "View.MemoryView":607
*
* def __repr__(self):
* return "<MemoryView of %r at 0x%x>" % (self.base.__class__.__name__, # <<<<<<<<<<<<<<
* id(self))
*
*/
__pyx_t_3 = PyTuple_New(2); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 607, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_3);
__Pyx_GIVEREF(__pyx_t_1);
PyTuple_SET_ITEM(__pyx_t_3, 0, __pyx_t_1);
__Pyx_GIVEREF(__pyx_t_2);
PyTuple_SET_ITEM(__pyx_t_3, 1, __pyx_t_2);
__pyx_t_1 = 0;
__pyx_t_2 = 0;
__pyx_t_2 = __Pyx_PyString_Format(__pyx_kp_s_MemoryView_of_r_at_0x_x, __pyx_t_3); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 607, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_2);
__Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
__pyx_r = __pyx_t_2;
__pyx_t_2 = 0;
goto __pyx_L0;
/* "View.MemoryView":606
* return 0
*
* def __repr__(self): # <<<<<<<<<<<<<<
* return "<MemoryView of %r at 0x%x>" % (self.base.__class__.__name__,
* id(self))
*/
/* function exit code */
__pyx_L1_error:;
__Pyx_XDECREF(__pyx_t_1);
__Pyx_XDECREF(__pyx_t_2);
__Pyx_XDECREF(__pyx_t_3);
__Pyx_AddTraceback("View.MemoryView.memoryview.__repr__", __pyx_clineno, __pyx_lineno, __pyx_filename);
__pyx_r = NULL;
__pyx_L0:;
__Pyx_XGIVEREF(__pyx_r);
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
/* "View.MemoryView":610
* id(self))
*
* def __str__(self): # <<<<<<<<<<<<<<
* return "<MemoryView of %r object>" % (self.base.__class__.__name__,)
*
*/
/* Python wrapper */
static PyObject *__pyx_memoryview___str__(PyObject *__pyx_v_self); /*proto*/
static PyObject *__pyx_memoryview___str__(PyObject *__pyx_v_self) {
PyObject *__pyx_r = 0;
__Pyx_RefNannyDeclarations
__Pyx_RefNannySetupContext("__str__ (wrapper)", 0);
__pyx_r = __pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_14__str__(((struct __pyx_memoryview_obj *)__pyx_v_self));
/* function exit code */
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
static PyObject *__pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_14__str__(struct __pyx_memoryview_obj *__pyx_v_self) {
PyObject *__pyx_r = NULL;
__Pyx_RefNannyDeclarations
PyObject *__pyx_t_1 = NULL;
PyObject *__pyx_t_2 = NULL;
__Pyx_RefNannySetupContext("__str__", 0);
/* "View.MemoryView":611
*
* def __str__(self):
* return "<MemoryView of %r object>" % (self.base.__class__.__name__,) # <<<<<<<<<<<<<<
*
*
*/
__Pyx_XDECREF(__pyx_r);
__pyx_t_1 = __Pyx_PyObject_GetAttrStr(((PyObject *)__pyx_v_self), __pyx_n_s_base); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 611, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_1);
__pyx_t_2 = __Pyx_PyObject_GetAttrStr(__pyx_t_1, __pyx_n_s_class); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 611, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_2);
__Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
__pyx_t_1 = __Pyx_PyObject_GetAttrStr(__pyx_t_2, __pyx_n_s_name_2); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 611, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_1);
__Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
__pyx_t_2 = PyTuple_New(1); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 611, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_2);
__Pyx_GIVEREF(__pyx_t_1);
PyTuple_SET_ITEM(__pyx_t_2, 0, __pyx_t_1);
__pyx_t_1 = 0;
__pyx_t_1 = __Pyx_PyString_Format(__pyx_kp_s_MemoryView_of_r_object, __pyx_t_2); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 611, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_1);
__Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
__pyx_r = __pyx_t_1;
__pyx_t_1 = 0;
goto __pyx_L0;
/* "View.MemoryView":610
* id(self))
*
* def __str__(self): # <<<<<<<<<<<<<<
* return "<MemoryView of %r object>" % (self.base.__class__.__name__,)
*
*/
/* function exit code */
__pyx_L1_error:;
__Pyx_XDECREF(__pyx_t_1);
__Pyx_XDECREF(__pyx_t_2);
__Pyx_AddTraceback("View.MemoryView.memoryview.__str__", __pyx_clineno, __pyx_lineno, __pyx_filename);
__pyx_r = NULL;
__pyx_L0:;
__Pyx_XGIVEREF(__pyx_r);
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
/* "View.MemoryView":614
*
*
* def is_c_contig(self): # <<<<<<<<<<<<<<
* cdef __Pyx_memviewslice *mslice
* cdef __Pyx_memviewslice tmp
*/
/* Python wrapper */
static PyObject *__pyx_memoryview_is_c_contig(PyObject *__pyx_v_self, CYTHON_UNUSED PyObject *unused); /*proto*/
static PyObject *__pyx_memoryview_is_c_contig(PyObject *__pyx_v_self, CYTHON_UNUSED PyObject *unused) {
PyObject *__pyx_r = 0;
__Pyx_RefNannyDeclarations
__Pyx_RefNannySetupContext("is_c_contig (wrapper)", 0);
__pyx_r = __pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_16is_c_contig(((struct __pyx_memoryview_obj *)__pyx_v_self));
/* function exit code */
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
static PyObject *__pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_16is_c_contig(struct __pyx_memoryview_obj *__pyx_v_self) {
__Pyx_memviewslice *__pyx_v_mslice;
__Pyx_memviewslice __pyx_v_tmp;
PyObject *__pyx_r = NULL;
__Pyx_RefNannyDeclarations
PyObject *__pyx_t_1 = NULL;
__Pyx_RefNannySetupContext("is_c_contig", 0);
/* "View.MemoryView":617
* cdef __Pyx_memviewslice *mslice
* cdef __Pyx_memviewslice tmp
* mslice = get_slice_from_memview(self, &tmp) # <<<<<<<<<<<<<<
* return slice_is_contig(mslice[0], 'C', self.view.ndim)
*
*/
__pyx_v_mslice = __pyx_memoryview_get_slice_from_memoryview(__pyx_v_self, (&__pyx_v_tmp));
/* "View.MemoryView":618
* cdef __Pyx_memviewslice tmp
* mslice = get_slice_from_memview(self, &tmp)
* return slice_is_contig(mslice[0], 'C', self.view.ndim) # <<<<<<<<<<<<<<
*
* def is_f_contig(self):
*/
__Pyx_XDECREF(__pyx_r);
__pyx_t_1 = __Pyx_PyBool_FromLong(__pyx_memviewslice_is_contig((__pyx_v_mslice[0]), 'C', __pyx_v_self->view.ndim)); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 618, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_1);
__pyx_r = __pyx_t_1;
__pyx_t_1 = 0;
goto __pyx_L0;
/* "View.MemoryView":614
*
*
* def is_c_contig(self): # <<<<<<<<<<<<<<
* cdef __Pyx_memviewslice *mslice
* cdef __Pyx_memviewslice tmp
*/
/* function exit code */
__pyx_L1_error:;
__Pyx_XDECREF(__pyx_t_1);
__Pyx_AddTraceback("View.MemoryView.memoryview.is_c_contig", __pyx_clineno, __pyx_lineno, __pyx_filename);
__pyx_r = NULL;
__pyx_L0:;
__Pyx_XGIVEREF(__pyx_r);
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
/* "View.MemoryView":620
* return slice_is_contig(mslice[0], 'C', self.view.ndim)
*
* def is_f_contig(self): # <<<<<<<<<<<<<<
* cdef __Pyx_memviewslice *mslice
* cdef __Pyx_memviewslice tmp
*/
/* Python wrapper */
static PyObject *__pyx_memoryview_is_f_contig(PyObject *__pyx_v_self, CYTHON_UNUSED PyObject *unused); /*proto*/
static PyObject *__pyx_memoryview_is_f_contig(PyObject *__pyx_v_self, CYTHON_UNUSED PyObject *unused) {
PyObject *__pyx_r = 0;
__Pyx_RefNannyDeclarations
__Pyx_RefNannySetupContext("is_f_contig (wrapper)", 0);
__pyx_r = __pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_18is_f_contig(((struct __pyx_memoryview_obj *)__pyx_v_self));
/* function exit code */
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
static PyObject *__pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_18is_f_contig(struct __pyx_memoryview_obj *__pyx_v_self) {
__Pyx_memviewslice *__pyx_v_mslice;
__Pyx_memviewslice __pyx_v_tmp;
PyObject *__pyx_r = NULL;
__Pyx_RefNannyDeclarations
PyObject *__pyx_t_1 = NULL;
__Pyx_RefNannySetupContext("is_f_contig", 0);
/* "View.MemoryView":623
* cdef __Pyx_memviewslice *mslice
* cdef __Pyx_memviewslice tmp
* mslice = get_slice_from_memview(self, &tmp) # <<<<<<<<<<<<<<
* return slice_is_contig(mslice[0], 'F', self.view.ndim)
*
*/
__pyx_v_mslice = __pyx_memoryview_get_slice_from_memoryview(__pyx_v_self, (&__pyx_v_tmp));
/* "View.MemoryView":624
* cdef __Pyx_memviewslice tmp
* mslice = get_slice_from_memview(self, &tmp)
* return slice_is_contig(mslice[0], 'F', self.view.ndim) # <<<<<<<<<<<<<<
*
* def copy(self):
*/
__Pyx_XDECREF(__pyx_r);
__pyx_t_1 = __Pyx_PyBool_FromLong(__pyx_memviewslice_is_contig((__pyx_v_mslice[0]), 'F', __pyx_v_self->view.ndim)); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 624, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_1);
__pyx_r = __pyx_t_1;
__pyx_t_1 = 0;
goto __pyx_L0;
/* "View.MemoryView":620
* return slice_is_contig(mslice[0], 'C', self.view.ndim)
*
* def is_f_contig(self): # <<<<<<<<<<<<<<
* cdef __Pyx_memviewslice *mslice
* cdef __Pyx_memviewslice tmp
*/
/* function exit code */
__pyx_L1_error:;
__Pyx_XDECREF(__pyx_t_1);
__Pyx_AddTraceback("View.MemoryView.memoryview.is_f_contig", __pyx_clineno, __pyx_lineno, __pyx_filename);
__pyx_r = NULL;
__pyx_L0:;
__Pyx_XGIVEREF(__pyx_r);
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
/* "View.MemoryView":626
* return slice_is_contig(mslice[0], 'F', self.view.ndim)
*
* def copy(self): # <<<<<<<<<<<<<<
* cdef __Pyx_memviewslice mslice
* cdef int flags = self.flags & ~PyBUF_F_CONTIGUOUS
*/
/* Python wrapper */
static PyObject *__pyx_memoryview_copy(PyObject *__pyx_v_self, CYTHON_UNUSED PyObject *unused); /*proto*/
static PyObject *__pyx_memoryview_copy(PyObject *__pyx_v_self, CYTHON_UNUSED PyObject *unused) {
PyObject *__pyx_r = 0;
__Pyx_RefNannyDeclarations
__Pyx_RefNannySetupContext("copy (wrapper)", 0);
__pyx_r = __pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_20copy(((struct __pyx_memoryview_obj *)__pyx_v_self));
/* function exit code */
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
static PyObject *__pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_20copy(struct __pyx_memoryview_obj *__pyx_v_self) {
__Pyx_memviewslice __pyx_v_mslice;
int __pyx_v_flags;
PyObject *__pyx_r = NULL;
__Pyx_RefNannyDeclarations
__Pyx_memviewslice __pyx_t_1;
PyObject *__pyx_t_2 = NULL;
__Pyx_RefNannySetupContext("copy", 0);
/* "View.MemoryView":628
* def copy(self):
* cdef __Pyx_memviewslice mslice
* cdef int flags = self.flags & ~PyBUF_F_CONTIGUOUS # <<<<<<<<<<<<<<
*
* slice_copy(self, &mslice)
*/
__pyx_v_flags = (__pyx_v_self->flags & (~PyBUF_F_CONTIGUOUS));
/* "View.MemoryView":630
* cdef int flags = self.flags & ~PyBUF_F_CONTIGUOUS
*
* slice_copy(self, &mslice) # <<<<<<<<<<<<<<
* mslice = slice_copy_contig(&mslice, "c", self.view.ndim,
* self.view.itemsize,
*/
__pyx_memoryview_slice_copy(__pyx_v_self, (&__pyx_v_mslice));
/* "View.MemoryView":631
*
* slice_copy(self, &mslice)
* mslice = slice_copy_contig(&mslice, "c", self.view.ndim, # <<<<<<<<<<<<<<
* self.view.itemsize,
* flags|PyBUF_C_CONTIGUOUS,
*/
__pyx_t_1 = __pyx_memoryview_copy_new_contig((&__pyx_v_mslice), ((char *)"c"), __pyx_v_self->view.ndim, __pyx_v_self->view.itemsize, (__pyx_v_flags | PyBUF_C_CONTIGUOUS), __pyx_v_self->dtype_is_object); if (unlikely(PyErr_Occurred())) __PYX_ERR(1, 631, __pyx_L1_error)
__pyx_v_mslice = __pyx_t_1;
/* "View.MemoryView":636
* self.dtype_is_object)
*
* return memoryview_copy_from_slice(self, &mslice) # <<<<<<<<<<<<<<
*
* def copy_fortran(self):
*/
__Pyx_XDECREF(__pyx_r);
__pyx_t_2 = __pyx_memoryview_copy_object_from_slice(__pyx_v_self, (&__pyx_v_mslice)); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 636, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_2);
__pyx_r = __pyx_t_2;
__pyx_t_2 = 0;
goto __pyx_L0;
/* "View.MemoryView":626
* return slice_is_contig(mslice[0], 'F', self.view.ndim)
*
* def copy(self): # <<<<<<<<<<<<<<
* cdef __Pyx_memviewslice mslice
* cdef int flags = self.flags & ~PyBUF_F_CONTIGUOUS
*/
/* function exit code */
__pyx_L1_error:;
__Pyx_XDECREF(__pyx_t_2);
__Pyx_AddTraceback("View.MemoryView.memoryview.copy", __pyx_clineno, __pyx_lineno, __pyx_filename);
__pyx_r = NULL;
__pyx_L0:;
__Pyx_XGIVEREF(__pyx_r);
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
/* "View.MemoryView":638
* return memoryview_copy_from_slice(self, &mslice)
*
* def copy_fortran(self): # <<<<<<<<<<<<<<
* cdef __Pyx_memviewslice src, dst
* cdef int flags = self.flags & ~PyBUF_C_CONTIGUOUS
*/
/* Python wrapper */
static PyObject *__pyx_memoryview_copy_fortran(PyObject *__pyx_v_self, CYTHON_UNUSED PyObject *unused); /*proto*/
static PyObject *__pyx_memoryview_copy_fortran(PyObject *__pyx_v_self, CYTHON_UNUSED PyObject *unused) {
PyObject *__pyx_r = 0;
__Pyx_RefNannyDeclarations
__Pyx_RefNannySetupContext("copy_fortran (wrapper)", 0);
__pyx_r = __pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_22copy_fortran(((struct __pyx_memoryview_obj *)__pyx_v_self));
/* function exit code */
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
static PyObject *__pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_22copy_fortran(struct __pyx_memoryview_obj *__pyx_v_self) {
__Pyx_memviewslice __pyx_v_src;
__Pyx_memviewslice __pyx_v_dst;
int __pyx_v_flags;
PyObject *__pyx_r = NULL;
__Pyx_RefNannyDeclarations
__Pyx_memviewslice __pyx_t_1;
PyObject *__pyx_t_2 = NULL;
__Pyx_RefNannySetupContext("copy_fortran", 0);
/* "View.MemoryView":640
* def copy_fortran(self):
* cdef __Pyx_memviewslice src, dst
* cdef int flags = self.flags & ~PyBUF_C_CONTIGUOUS # <<<<<<<<<<<<<<
*
* slice_copy(self, &src)
*/
__pyx_v_flags = (__pyx_v_self->flags & (~PyBUF_C_CONTIGUOUS));
/* "View.MemoryView":642
* cdef int flags = self.flags & ~PyBUF_C_CONTIGUOUS
*
* slice_copy(self, &src) # <<<<<<<<<<<<<<
* dst = slice_copy_contig(&src, "fortran", self.view.ndim,
* self.view.itemsize,
*/
__pyx_memoryview_slice_copy(__pyx_v_self, (&__pyx_v_src));
/* "View.MemoryView":643
*
* slice_copy(self, &src)
* dst = slice_copy_contig(&src, "fortran", self.view.ndim, # <<<<<<<<<<<<<<
* self.view.itemsize,
* flags|PyBUF_F_CONTIGUOUS,
*/
__pyx_t_1 = __pyx_memoryview_copy_new_contig((&__pyx_v_src), ((char *)"fortran"), __pyx_v_self->view.ndim, __pyx_v_self->view.itemsize, (__pyx_v_flags | PyBUF_F_CONTIGUOUS), __pyx_v_self->dtype_is_object); if (unlikely(PyErr_Occurred())) __PYX_ERR(1, 643, __pyx_L1_error)
__pyx_v_dst = __pyx_t_1;
/* "View.MemoryView":648
* self.dtype_is_object)
*
* return memoryview_copy_from_slice(self, &dst) # <<<<<<<<<<<<<<
*
*
*/
__Pyx_XDECREF(__pyx_r);
__pyx_t_2 = __pyx_memoryview_copy_object_from_slice(__pyx_v_self, (&__pyx_v_dst)); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 648, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_2);
__pyx_r = __pyx_t_2;
__pyx_t_2 = 0;
goto __pyx_L0;
/* "View.MemoryView":638
* return memoryview_copy_from_slice(self, &mslice)
*
* def copy_fortran(self): # <<<<<<<<<<<<<<
* cdef __Pyx_memviewslice src, dst
* cdef int flags = self.flags & ~PyBUF_C_CONTIGUOUS
*/
/* function exit code */
__pyx_L1_error:;
__Pyx_XDECREF(__pyx_t_2);
__Pyx_AddTraceback("View.MemoryView.memoryview.copy_fortran", __pyx_clineno, __pyx_lineno, __pyx_filename);
__pyx_r = NULL;
__pyx_L0:;
__Pyx_XGIVEREF(__pyx_r);
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
/* "(tree fragment)":1
* def __reduce_cython__(self): # <<<<<<<<<<<<<<
* raise TypeError("no default __reduce__ due to non-trivial __cinit__")
* def __setstate_cython__(self, __pyx_state):
*/
/* Python wrapper */
static PyObject *__pyx_pw___pyx_memoryview_1__reduce_cython__(PyObject *__pyx_v_self, CYTHON_UNUSED PyObject *unused); /*proto*/
static PyObject *__pyx_pw___pyx_memoryview_1__reduce_cython__(PyObject *__pyx_v_self, CYTHON_UNUSED PyObject *unused) {
PyObject *__pyx_r = 0;
__Pyx_RefNannyDeclarations
__Pyx_RefNannySetupContext("__reduce_cython__ (wrapper)", 0);
__pyx_r = __pyx_pf___pyx_memoryview___reduce_cython__(((struct __pyx_memoryview_obj *)__pyx_v_self));
/* function exit code */
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
static PyObject *__pyx_pf___pyx_memoryview___reduce_cython__(CYTHON_UNUSED struct __pyx_memoryview_obj *__pyx_v_self) {
PyObject *__pyx_r = NULL;
__Pyx_RefNannyDeclarations
PyObject *__pyx_t_1 = NULL;
__Pyx_RefNannySetupContext("__reduce_cython__", 0);
/* "(tree fragment)":2
* def __reduce_cython__(self):
* raise TypeError("no default __reduce__ due to non-trivial __cinit__") # <<<<<<<<<<<<<<
* def __setstate_cython__(self, __pyx_state):
* raise TypeError("no default __reduce__ due to non-trivial __cinit__")
*/
__pyx_t_1 = __Pyx_PyObject_Call(__pyx_builtin_TypeError, __pyx_tuple__19, NULL); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 2, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_1);
__Pyx_Raise(__pyx_t_1, 0, 0, 0);
__Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
__PYX_ERR(1, 2, __pyx_L1_error)
/* "(tree fragment)":1
* def __reduce_cython__(self): # <<<<<<<<<<<<<<
* raise TypeError("no default __reduce__ due to non-trivial __cinit__")
* def __setstate_cython__(self, __pyx_state):
*/
/* function exit code */
__pyx_L1_error:;
__Pyx_XDECREF(__pyx_t_1);
__Pyx_AddTraceback("View.MemoryView.memoryview.__reduce_cython__", __pyx_clineno, __pyx_lineno, __pyx_filename);
__pyx_r = NULL;
__Pyx_XGIVEREF(__pyx_r);
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
/* "(tree fragment)":3
* def __reduce_cython__(self):
* raise TypeError("no default __reduce__ due to non-trivial __cinit__")
* def __setstate_cython__(self, __pyx_state): # <<<<<<<<<<<<<<
* raise TypeError("no default __reduce__ due to non-trivial __cinit__")
*/
/* Python wrapper */
static PyObject *__pyx_pw___pyx_memoryview_3__setstate_cython__(PyObject *__pyx_v_self, PyObject *__pyx_v___pyx_state); /*proto*/
static PyObject *__pyx_pw___pyx_memoryview_3__setstate_cython__(PyObject *__pyx_v_self, PyObject *__pyx_v___pyx_state) {
PyObject *__pyx_r = 0;
__Pyx_RefNannyDeclarations
__Pyx_RefNannySetupContext("__setstate_cython__ (wrapper)", 0);
__pyx_r = __pyx_pf___pyx_memoryview_2__setstate_cython__(((struct __pyx_memoryview_obj *)__pyx_v_self), ((PyObject *)__pyx_v___pyx_state));
/* function exit code */
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
static PyObject *__pyx_pf___pyx_memoryview_2__setstate_cython__(CYTHON_UNUSED struct __pyx_memoryview_obj *__pyx_v_self, CYTHON_UNUSED PyObject *__pyx_v___pyx_state) {
PyObject *__pyx_r = NULL;
__Pyx_RefNannyDeclarations
PyObject *__pyx_t_1 = NULL;
__Pyx_RefNannySetupContext("__setstate_cython__", 0);
/* "(tree fragment)":4
* raise TypeError("no default __reduce__ due to non-trivial __cinit__")
* def __setstate_cython__(self, __pyx_state):
* raise TypeError("no default __reduce__ due to non-trivial __cinit__") # <<<<<<<<<<<<<<
*/
__pyx_t_1 = __Pyx_PyObject_Call(__pyx_builtin_TypeError, __pyx_tuple__20, NULL); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 4, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_1);
__Pyx_Raise(__pyx_t_1, 0, 0, 0);
__Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
__PYX_ERR(1, 4, __pyx_L1_error)
/* "(tree fragment)":3
* def __reduce_cython__(self):
* raise TypeError("no default __reduce__ due to non-trivial __cinit__")
* def __setstate_cython__(self, __pyx_state): # <<<<<<<<<<<<<<
* raise TypeError("no default __reduce__ due to non-trivial __cinit__")
*/
/* function exit code */
__pyx_L1_error:;
__Pyx_XDECREF(__pyx_t_1);
__Pyx_AddTraceback("View.MemoryView.memoryview.__setstate_cython__", __pyx_clineno, __pyx_lineno, __pyx_filename);
__pyx_r = NULL;
__Pyx_XGIVEREF(__pyx_r);
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
/* "View.MemoryView":652
*
* @cname('__pyx_memoryview_new')
* cdef memoryview_cwrapper(object o, int flags, bint dtype_is_object, __Pyx_TypeInfo *typeinfo): # <<<<<<<<<<<<<<
* cdef memoryview result = memoryview(o, flags, dtype_is_object)
* result.typeinfo = typeinfo
*/
static PyObject *__pyx_memoryview_new(PyObject *__pyx_v_o, int __pyx_v_flags, int __pyx_v_dtype_is_object, __Pyx_TypeInfo *__pyx_v_typeinfo) {
struct __pyx_memoryview_obj *__pyx_v_result = 0;
PyObject *__pyx_r = NULL;
__Pyx_RefNannyDeclarations
PyObject *__pyx_t_1 = NULL;
PyObject *__pyx_t_2 = NULL;
PyObject *__pyx_t_3 = NULL;
__Pyx_RefNannySetupContext("memoryview_cwrapper", 0);
/* "View.MemoryView":653
* @cname('__pyx_memoryview_new')
* cdef memoryview_cwrapper(object o, int flags, bint dtype_is_object, __Pyx_TypeInfo *typeinfo):
* cdef memoryview result = memoryview(o, flags, dtype_is_object) # <<<<<<<<<<<<<<
* result.typeinfo = typeinfo
* return result
*/
__pyx_t_1 = __Pyx_PyInt_From_int(__pyx_v_flags); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 653, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_1);
__pyx_t_2 = __Pyx_PyBool_FromLong(__pyx_v_dtype_is_object); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 653, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_2);
__pyx_t_3 = PyTuple_New(3); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 653, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_3);
__Pyx_INCREF(__pyx_v_o);
__Pyx_GIVEREF(__pyx_v_o);
PyTuple_SET_ITEM(__pyx_t_3, 0, __pyx_v_o);
__Pyx_GIVEREF(__pyx_t_1);
PyTuple_SET_ITEM(__pyx_t_3, 1, __pyx_t_1);
__Pyx_GIVEREF(__pyx_t_2);
PyTuple_SET_ITEM(__pyx_t_3, 2, __pyx_t_2);
__pyx_t_1 = 0;
__pyx_t_2 = 0;
__pyx_t_2 = __Pyx_PyObject_Call(((PyObject *)__pyx_memoryview_type), __pyx_t_3, NULL); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 653, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_2);
__Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
__pyx_v_result = ((struct __pyx_memoryview_obj *)__pyx_t_2);
__pyx_t_2 = 0;
/* "View.MemoryView":654
* cdef memoryview_cwrapper(object o, int flags, bint dtype_is_object, __Pyx_TypeInfo *typeinfo):
* cdef memoryview result = memoryview(o, flags, dtype_is_object)
* result.typeinfo = typeinfo # <<<<<<<<<<<<<<
* return result
*
*/
__pyx_v_result->typeinfo = __pyx_v_typeinfo;
/* "View.MemoryView":655
* cdef memoryview result = memoryview(o, flags, dtype_is_object)
* result.typeinfo = typeinfo
* return result # <<<<<<<<<<<<<<
*
* @cname('__pyx_memoryview_check')
*/
__Pyx_XDECREF(__pyx_r);
__Pyx_INCREF(((PyObject *)__pyx_v_result));
__pyx_r = ((PyObject *)__pyx_v_result);
goto __pyx_L0;
/* "View.MemoryView":652
*
* @cname('__pyx_memoryview_new')
* cdef memoryview_cwrapper(object o, int flags, bint dtype_is_object, __Pyx_TypeInfo *typeinfo): # <<<<<<<<<<<<<<
* cdef memoryview result = memoryview(o, flags, dtype_is_object)
* result.typeinfo = typeinfo
*/
/* function exit code */
__pyx_L1_error:;
__Pyx_XDECREF(__pyx_t_1);
__Pyx_XDECREF(__pyx_t_2);
__Pyx_XDECREF(__pyx_t_3);
__Pyx_AddTraceback("View.MemoryView.memoryview_cwrapper", __pyx_clineno, __pyx_lineno, __pyx_filename);
__pyx_r = 0;
__pyx_L0:;
__Pyx_XDECREF((PyObject *)__pyx_v_result);
__Pyx_XGIVEREF(__pyx_r);
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
/* "View.MemoryView":658
*
* @cname('__pyx_memoryview_check')
* cdef inline bint memoryview_check(object o): # <<<<<<<<<<<<<<
* return isinstance(o, memoryview)
*
*/
static CYTHON_INLINE int __pyx_memoryview_check(PyObject *__pyx_v_o) {
int __pyx_r;
__Pyx_RefNannyDeclarations
int __pyx_t_1;
__Pyx_RefNannySetupContext("memoryview_check", 0);
/* "View.MemoryView":659
* @cname('__pyx_memoryview_check')
* cdef inline bint memoryview_check(object o):
* return isinstance(o, memoryview) # <<<<<<<<<<<<<<
*
* cdef tuple _unellipsify(object index, int ndim):
*/
__pyx_t_1 = __Pyx_TypeCheck(__pyx_v_o, __pyx_memoryview_type);
__pyx_r = __pyx_t_1;
goto __pyx_L0;
/* "View.MemoryView":658
*
* @cname('__pyx_memoryview_check')
* cdef inline bint memoryview_check(object o): # <<<<<<<<<<<<<<
* return isinstance(o, memoryview)
*
*/
/* function exit code */
__pyx_L0:;
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
/* "View.MemoryView":661
* return isinstance(o, memoryview)
*
* cdef tuple _unellipsify(object index, int ndim): # <<<<<<<<<<<<<<
* """
* Replace all ellipses with full slices and fill incomplete indices with
*/
static PyObject *_unellipsify(PyObject *__pyx_v_index, int __pyx_v_ndim) {
PyObject *__pyx_v_tup = NULL;
PyObject *__pyx_v_result = NULL;
int __pyx_v_have_slices;
int __pyx_v_seen_ellipsis;
CYTHON_UNUSED PyObject *__pyx_v_idx = NULL;
PyObject *__pyx_v_item = NULL;
Py_ssize_t __pyx_v_nslices;
PyObject *__pyx_r = NULL;
__Pyx_RefNannyDeclarations
int __pyx_t_1;
int __pyx_t_2;
PyObject *__pyx_t_3 = NULL;
PyObject *__pyx_t_4 = NULL;
Py_ssize_t __pyx_t_5;
PyObject *(*__pyx_t_6)(PyObject *);
PyObject *__pyx_t_7 = NULL;
Py_ssize_t __pyx_t_8;
int __pyx_t_9;
int __pyx_t_10;
PyObject *__pyx_t_11 = NULL;
__Pyx_RefNannySetupContext("_unellipsify", 0);
/* "View.MemoryView":666
* full slices.
* """
* if not isinstance(index, tuple): # <<<<<<<<<<<<<<
* tup = (index,)
* else:
*/
__pyx_t_1 = PyTuple_Check(__pyx_v_index);
__pyx_t_2 = ((!(__pyx_t_1 != 0)) != 0);
if (__pyx_t_2) {
/* "View.MemoryView":667
* """
* if not isinstance(index, tuple):
* tup = (index,) # <<<<<<<<<<<<<<
* else:
* tup = index
*/
__pyx_t_3 = PyTuple_New(1); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 667, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_3);
__Pyx_INCREF(__pyx_v_index);
__Pyx_GIVEREF(__pyx_v_index);
PyTuple_SET_ITEM(__pyx_t_3, 0, __pyx_v_index);
__pyx_v_tup = __pyx_t_3;
__pyx_t_3 = 0;
/* "View.MemoryView":666
* full slices.
* """
* if not isinstance(index, tuple): # <<<<<<<<<<<<<<
* tup = (index,)
* else:
*/
goto __pyx_L3;
}
/* "View.MemoryView":669
* tup = (index,)
* else:
* tup = index # <<<<<<<<<<<<<<
*
* result = []
*/
/*else*/ {
__Pyx_INCREF(__pyx_v_index);
__pyx_v_tup = __pyx_v_index;
}
__pyx_L3:;
/* "View.MemoryView":671
* tup = index
*
* result = [] # <<<<<<<<<<<<<<
* have_slices = False
* seen_ellipsis = False
*/
__pyx_t_3 = PyList_New(0); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 671, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_3);
__pyx_v_result = ((PyObject*)__pyx_t_3);
__pyx_t_3 = 0;
/* "View.MemoryView":672
*
* result = []
* have_slices = False # <<<<<<<<<<<<<<
* seen_ellipsis = False
* for idx, item in enumerate(tup):
*/
__pyx_v_have_slices = 0;
/* "View.MemoryView":673
* result = []
* have_slices = False
* seen_ellipsis = False # <<<<<<<<<<<<<<
* for idx, item in enumerate(tup):
* if item is Ellipsis:
*/
__pyx_v_seen_ellipsis = 0;
/* "View.MemoryView":674
* have_slices = False
* seen_ellipsis = False
* for idx, item in enumerate(tup): # <<<<<<<<<<<<<<
* if item is Ellipsis:
* if not seen_ellipsis:
*/
__Pyx_INCREF(__pyx_int_0);
__pyx_t_3 = __pyx_int_0;
if (likely(PyList_CheckExact(__pyx_v_tup)) || PyTuple_CheckExact(__pyx_v_tup)) {
__pyx_t_4 = __pyx_v_tup; __Pyx_INCREF(__pyx_t_4); __pyx_t_5 = 0;
__pyx_t_6 = NULL;
} else {
__pyx_t_5 = -1; __pyx_t_4 = PyObject_GetIter(__pyx_v_tup); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 674, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_4);
__pyx_t_6 = Py_TYPE(__pyx_t_4)->tp_iternext; if (unlikely(!__pyx_t_6)) __PYX_ERR(1, 674, __pyx_L1_error)
}
for (;;) {
if (likely(!__pyx_t_6)) {
if (likely(PyList_CheckExact(__pyx_t_4))) {
if (__pyx_t_5 >= PyList_GET_SIZE(__pyx_t_4)) break;
#if CYTHON_ASSUME_SAFE_MACROS && !CYTHON_AVOID_BORROWED_REFS
__pyx_t_7 = PyList_GET_ITEM(__pyx_t_4, __pyx_t_5); __Pyx_INCREF(__pyx_t_7); __pyx_t_5++; if (unlikely(0 < 0)) __PYX_ERR(1, 674, __pyx_L1_error)
#else
__pyx_t_7 = PySequence_ITEM(__pyx_t_4, __pyx_t_5); __pyx_t_5++; if (unlikely(!__pyx_t_7)) __PYX_ERR(1, 674, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_7);
#endif
} else {
if (__pyx_t_5 >= PyTuple_GET_SIZE(__pyx_t_4)) break;
#if CYTHON_ASSUME_SAFE_MACROS && !CYTHON_AVOID_BORROWED_REFS
__pyx_t_7 = PyTuple_GET_ITEM(__pyx_t_4, __pyx_t_5); __Pyx_INCREF(__pyx_t_7); __pyx_t_5++; if (unlikely(0 < 0)) __PYX_ERR(1, 674, __pyx_L1_error)
#else
__pyx_t_7 = PySequence_ITEM(__pyx_t_4, __pyx_t_5); __pyx_t_5++; if (unlikely(!__pyx_t_7)) __PYX_ERR(1, 674, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_7);
#endif
}
} else {
__pyx_t_7 = __pyx_t_6(__pyx_t_4);
if (unlikely(!__pyx_t_7)) {
PyObject* exc_type = PyErr_Occurred();
if (exc_type) {
if (likely(__Pyx_PyErr_GivenExceptionMatches(exc_type, PyExc_StopIteration))) PyErr_Clear();
else __PYX_ERR(1, 674, __pyx_L1_error)
}
break;
}
__Pyx_GOTREF(__pyx_t_7);
}
__Pyx_XDECREF_SET(__pyx_v_item, __pyx_t_7);
__pyx_t_7 = 0;
__Pyx_INCREF(__pyx_t_3);
__Pyx_XDECREF_SET(__pyx_v_idx, __pyx_t_3);
__pyx_t_7 = __Pyx_PyInt_AddObjC(__pyx_t_3, __pyx_int_1, 1, 0); if (unlikely(!__pyx_t_7)) __PYX_ERR(1, 674, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_7);
__Pyx_DECREF(__pyx_t_3);
__pyx_t_3 = __pyx_t_7;
__pyx_t_7 = 0;
/* "View.MemoryView":675
* seen_ellipsis = False
* for idx, item in enumerate(tup):
* if item is Ellipsis: # <<<<<<<<<<<<<<
* if not seen_ellipsis:
* result.extend([slice(None)] * (ndim - len(tup) + 1))
*/
__pyx_t_2 = (__pyx_v_item == __pyx_builtin_Ellipsis);
__pyx_t_1 = (__pyx_t_2 != 0);
if (__pyx_t_1) {
/* "View.MemoryView":676
* for idx, item in enumerate(tup):
* if item is Ellipsis:
* if not seen_ellipsis: # <<<<<<<<<<<<<<
* result.extend([slice(None)] * (ndim - len(tup) + 1))
* seen_ellipsis = True
*/
__pyx_t_1 = ((!(__pyx_v_seen_ellipsis != 0)) != 0);
if (__pyx_t_1) {
/* "View.MemoryView":677
* if item is Ellipsis:
* if not seen_ellipsis:
* result.extend([slice(None)] * (ndim - len(tup) + 1)) # <<<<<<<<<<<<<<
* seen_ellipsis = True
* else:
*/
__pyx_t_8 = PyObject_Length(__pyx_v_tup); if (unlikely(__pyx_t_8 == ((Py_ssize_t)-1))) __PYX_ERR(1, 677, __pyx_L1_error)
__pyx_t_7 = PyList_New(1 * ((((__pyx_v_ndim - __pyx_t_8) + 1)<0) ? 0:((__pyx_v_ndim - __pyx_t_8) + 1))); if (unlikely(!__pyx_t_7)) __PYX_ERR(1, 677, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_7);
{ Py_ssize_t __pyx_temp;
for (__pyx_temp=0; __pyx_temp < ((__pyx_v_ndim - __pyx_t_8) + 1); __pyx_temp++) {
__Pyx_INCREF(__pyx_slice__21);
__Pyx_GIVEREF(__pyx_slice__21);
PyList_SET_ITEM(__pyx_t_7, __pyx_temp, __pyx_slice__21);
}
}
__pyx_t_9 = __Pyx_PyList_Extend(__pyx_v_result, __pyx_t_7); if (unlikely(__pyx_t_9 == ((int)-1))) __PYX_ERR(1, 677, __pyx_L1_error)
__Pyx_DECREF(__pyx_t_7); __pyx_t_7 = 0;
/* "View.MemoryView":678
* if not seen_ellipsis:
* result.extend([slice(None)] * (ndim - len(tup) + 1))
* seen_ellipsis = True # <<<<<<<<<<<<<<
* else:
* result.append(slice(None))
*/
__pyx_v_seen_ellipsis = 1;
/* "View.MemoryView":676
* for idx, item in enumerate(tup):
* if item is Ellipsis:
* if not seen_ellipsis: # <<<<<<<<<<<<<<
* result.extend([slice(None)] * (ndim - len(tup) + 1))
* seen_ellipsis = True
*/
goto __pyx_L7;
}
/* "View.MemoryView":680
* seen_ellipsis = True
* else:
* result.append(slice(None)) # <<<<<<<<<<<<<<
* have_slices = True
* else:
*/
/*else*/ {
__pyx_t_9 = __Pyx_PyList_Append(__pyx_v_result, __pyx_slice__22); if (unlikely(__pyx_t_9 == ((int)-1))) __PYX_ERR(1, 680, __pyx_L1_error)
}
__pyx_L7:;
/* "View.MemoryView":681
* else:
* result.append(slice(None))
* have_slices = True # <<<<<<<<<<<<<<
* else:
* if not isinstance(item, slice) and not PyIndex_Check(item):
*/
__pyx_v_have_slices = 1;
/* "View.MemoryView":675
* seen_ellipsis = False
* for idx, item in enumerate(tup):
* if item is Ellipsis: # <<<<<<<<<<<<<<
* if not seen_ellipsis:
* result.extend([slice(None)] * (ndim - len(tup) + 1))
*/
goto __pyx_L6;
}
/* "View.MemoryView":683
* have_slices = True
* else:
* if not isinstance(item, slice) and not PyIndex_Check(item): # <<<<<<<<<<<<<<
* raise TypeError("Cannot index with type '%s'" % type(item))
*
*/
/*else*/ {
__pyx_t_2 = PySlice_Check(__pyx_v_item);
__pyx_t_10 = ((!(__pyx_t_2 != 0)) != 0);
if (__pyx_t_10) {
} else {
__pyx_t_1 = __pyx_t_10;
goto __pyx_L9_bool_binop_done;
}
__pyx_t_10 = ((!(PyIndex_Check(__pyx_v_item) != 0)) != 0);
__pyx_t_1 = __pyx_t_10;
__pyx_L9_bool_binop_done:;
if (unlikely(__pyx_t_1)) {
/* "View.MemoryView":684
* else:
* if not isinstance(item, slice) and not PyIndex_Check(item):
* raise TypeError("Cannot index with type '%s'" % type(item)) # <<<<<<<<<<<<<<
*
* have_slices = have_slices or isinstance(item, slice)
*/
__pyx_t_7 = __Pyx_PyString_Format(__pyx_kp_s_Cannot_index_with_type_s, ((PyObject *)Py_TYPE(__pyx_v_item))); if (unlikely(!__pyx_t_7)) __PYX_ERR(1, 684, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_7);
__pyx_t_11 = __Pyx_PyObject_CallOneArg(__pyx_builtin_TypeError, __pyx_t_7); if (unlikely(!__pyx_t_11)) __PYX_ERR(1, 684, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_11);
__Pyx_DECREF(__pyx_t_7); __pyx_t_7 = 0;
__Pyx_Raise(__pyx_t_11, 0, 0, 0);
__Pyx_DECREF(__pyx_t_11); __pyx_t_11 = 0;
__PYX_ERR(1, 684, __pyx_L1_error)
/* "View.MemoryView":683
* have_slices = True
* else:
* if not isinstance(item, slice) and not PyIndex_Check(item): # <<<<<<<<<<<<<<
* raise TypeError("Cannot index with type '%s'" % type(item))
*
*/
}
/* "View.MemoryView":686
* raise TypeError("Cannot index with type '%s'" % type(item))
*
* have_slices = have_slices or isinstance(item, slice) # <<<<<<<<<<<<<<
* result.append(item)
*
*/
__pyx_t_10 = (__pyx_v_have_slices != 0);
if (!__pyx_t_10) {
} else {
__pyx_t_1 = __pyx_t_10;
goto __pyx_L11_bool_binop_done;
}
__pyx_t_10 = PySlice_Check(__pyx_v_item);
__pyx_t_2 = (__pyx_t_10 != 0);
__pyx_t_1 = __pyx_t_2;
__pyx_L11_bool_binop_done:;
__pyx_v_have_slices = __pyx_t_1;
/* "View.MemoryView":687
*
* have_slices = have_slices or isinstance(item, slice)
* result.append(item) # <<<<<<<<<<<<<<
*
* nslices = ndim - len(result)
*/
__pyx_t_9 = __Pyx_PyList_Append(__pyx_v_result, __pyx_v_item); if (unlikely(__pyx_t_9 == ((int)-1))) __PYX_ERR(1, 687, __pyx_L1_error)
}
__pyx_L6:;
/* "View.MemoryView":674
* have_slices = False
* seen_ellipsis = False
* for idx, item in enumerate(tup): # <<<<<<<<<<<<<<
* if item is Ellipsis:
* if not seen_ellipsis:
*/
}
__Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
__Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
/* "View.MemoryView":689
* result.append(item)
*
* nslices = ndim - len(result) # <<<<<<<<<<<<<<
* if nslices:
* result.extend([slice(None)] * nslices)
*/
__pyx_t_5 = PyList_GET_SIZE(__pyx_v_result); if (unlikely(__pyx_t_5 == ((Py_ssize_t)-1))) __PYX_ERR(1, 689, __pyx_L1_error)
__pyx_v_nslices = (__pyx_v_ndim - __pyx_t_5);
/* "View.MemoryView":690
*
* nslices = ndim - len(result)
* if nslices: # <<<<<<<<<<<<<<
* result.extend([slice(None)] * nslices)
*
*/
__pyx_t_1 = (__pyx_v_nslices != 0);
if (__pyx_t_1) {
/* "View.MemoryView":691
* nslices = ndim - len(result)
* if nslices:
* result.extend([slice(None)] * nslices) # <<<<<<<<<<<<<<
*
* return have_slices or nslices, tuple(result)
*/
__pyx_t_3 = PyList_New(1 * ((__pyx_v_nslices<0) ? 0:__pyx_v_nslices)); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 691, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_3);
{ Py_ssize_t __pyx_temp;
for (__pyx_temp=0; __pyx_temp < __pyx_v_nslices; __pyx_temp++) {
__Pyx_INCREF(__pyx_slice__23);
__Pyx_GIVEREF(__pyx_slice__23);
PyList_SET_ITEM(__pyx_t_3, __pyx_temp, __pyx_slice__23);
}
}
__pyx_t_9 = __Pyx_PyList_Extend(__pyx_v_result, __pyx_t_3); if (unlikely(__pyx_t_9 == ((int)-1))) __PYX_ERR(1, 691, __pyx_L1_error)
__Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
/* "View.MemoryView":690
*
* nslices = ndim - len(result)
* if nslices: # <<<<<<<<<<<<<<
* result.extend([slice(None)] * nslices)
*
*/
}
/* "View.MemoryView":693
* result.extend([slice(None)] * nslices)
*
* return have_slices or nslices, tuple(result) # <<<<<<<<<<<<<<
*
* cdef assert_direct_dimensions(Py_ssize_t *suboffsets, int ndim):
*/
__Pyx_XDECREF(__pyx_r);
if (!__pyx_v_have_slices) {
} else {
__pyx_t_4 = __Pyx_PyBool_FromLong(__pyx_v_have_slices); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 693, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_4);
__pyx_t_3 = __pyx_t_4;
__pyx_t_4 = 0;
goto __pyx_L14_bool_binop_done;
}
__pyx_t_4 = PyInt_FromSsize_t(__pyx_v_nslices); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 693, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_4);
__pyx_t_3 = __pyx_t_4;
__pyx_t_4 = 0;
__pyx_L14_bool_binop_done:;
__pyx_t_4 = PyList_AsTuple(__pyx_v_result); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 693, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_4);
__pyx_t_11 = PyTuple_New(2); if (unlikely(!__pyx_t_11)) __PYX_ERR(1, 693, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_11);
__Pyx_GIVEREF(__pyx_t_3);
PyTuple_SET_ITEM(__pyx_t_11, 0, __pyx_t_3);
__Pyx_GIVEREF(__pyx_t_4);
PyTuple_SET_ITEM(__pyx_t_11, 1, __pyx_t_4);
__pyx_t_3 = 0;
__pyx_t_4 = 0;
__pyx_r = ((PyObject*)__pyx_t_11);
__pyx_t_11 = 0;
goto __pyx_L0;
/* "View.MemoryView":661
* return isinstance(o, memoryview)
*
* cdef tuple _unellipsify(object index, int ndim): # <<<<<<<<<<<<<<
* """
* Replace all ellipses with full slices and fill incomplete indices with
*/
/* function exit code */
__pyx_L1_error:;
__Pyx_XDECREF(__pyx_t_3);
__Pyx_XDECREF(__pyx_t_4);
__Pyx_XDECREF(__pyx_t_7);
__Pyx_XDECREF(__pyx_t_11);
__Pyx_AddTraceback("View.MemoryView._unellipsify", __pyx_clineno, __pyx_lineno, __pyx_filename);
__pyx_r = 0;
__pyx_L0:;
__Pyx_XDECREF(__pyx_v_tup);
__Pyx_XDECREF(__pyx_v_result);
__Pyx_XDECREF(__pyx_v_idx);
__Pyx_XDECREF(__pyx_v_item);
__Pyx_XGIVEREF(__pyx_r);
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
/* "View.MemoryView":695
* return have_slices or nslices, tuple(result)
*
* cdef assert_direct_dimensions(Py_ssize_t *suboffsets, int ndim): # <<<<<<<<<<<<<<
* for suboffset in suboffsets[:ndim]:
* if suboffset >= 0:
*/
static PyObject *assert_direct_dimensions(Py_ssize_t *__pyx_v_suboffsets, int __pyx_v_ndim) {
Py_ssize_t __pyx_v_suboffset;
PyObject *__pyx_r = NULL;
__Pyx_RefNannyDeclarations
Py_ssize_t *__pyx_t_1;
Py_ssize_t *__pyx_t_2;
Py_ssize_t *__pyx_t_3;
int __pyx_t_4;
PyObject *__pyx_t_5 = NULL;
__Pyx_RefNannySetupContext("assert_direct_dimensions", 0);
/* "View.MemoryView":696
*
* cdef assert_direct_dimensions(Py_ssize_t *suboffsets, int ndim):
* for suboffset in suboffsets[:ndim]: # <<<<<<<<<<<<<<
* if suboffset >= 0:
* raise ValueError("Indirect dimensions not supported")
*/
__pyx_t_2 = (__pyx_v_suboffsets + __pyx_v_ndim);
for (__pyx_t_3 = __pyx_v_suboffsets; __pyx_t_3 < __pyx_t_2; __pyx_t_3++) {
__pyx_t_1 = __pyx_t_3;
__pyx_v_suboffset = (__pyx_t_1[0]);
/* "View.MemoryView":697
* cdef assert_direct_dimensions(Py_ssize_t *suboffsets, int ndim):
* for suboffset in suboffsets[:ndim]:
* if suboffset >= 0: # <<<<<<<<<<<<<<
* raise ValueError("Indirect dimensions not supported")
*
*/
__pyx_t_4 = ((__pyx_v_suboffset >= 0) != 0);
if (unlikely(__pyx_t_4)) {
/* "View.MemoryView":698
* for suboffset in suboffsets[:ndim]:
* if suboffset >= 0:
* raise ValueError("Indirect dimensions not supported") # <<<<<<<<<<<<<<
*
*
*/
__pyx_t_5 = __Pyx_PyObject_Call(__pyx_builtin_ValueError, __pyx_tuple__24, NULL); if (unlikely(!__pyx_t_5)) __PYX_ERR(1, 698, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_5);
__Pyx_Raise(__pyx_t_5, 0, 0, 0);
__Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
__PYX_ERR(1, 698, __pyx_L1_error)
/* "View.MemoryView":697
* cdef assert_direct_dimensions(Py_ssize_t *suboffsets, int ndim):
* for suboffset in suboffsets[:ndim]:
* if suboffset >= 0: # <<<<<<<<<<<<<<
* raise ValueError("Indirect dimensions not supported")
*
*/
}
}
/* "View.MemoryView":695
* return have_slices or nslices, tuple(result)
*
* cdef assert_direct_dimensions(Py_ssize_t *suboffsets, int ndim): # <<<<<<<<<<<<<<
* for suboffset in suboffsets[:ndim]:
* if suboffset >= 0:
*/
/* function exit code */
__pyx_r = Py_None; __Pyx_INCREF(Py_None);
goto __pyx_L0;
__pyx_L1_error:;
__Pyx_XDECREF(__pyx_t_5);
__Pyx_AddTraceback("View.MemoryView.assert_direct_dimensions", __pyx_clineno, __pyx_lineno, __pyx_filename);
__pyx_r = 0;
__pyx_L0:;
__Pyx_XGIVEREF(__pyx_r);
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
/* "View.MemoryView":705
*
* @cname('__pyx_memview_slice')
* cdef memoryview memview_slice(memoryview memview, object indices): # <<<<<<<<<<<<<<
* cdef int new_ndim = 0, suboffset_dim = -1, dim
* cdef bint negative_step
*/
static struct __pyx_memoryview_obj *__pyx_memview_slice(struct __pyx_memoryview_obj *__pyx_v_memview, PyObject *__pyx_v_indices) {
int __pyx_v_new_ndim;
int __pyx_v_suboffset_dim;
int __pyx_v_dim;
__Pyx_memviewslice __pyx_v_src;
__Pyx_memviewslice __pyx_v_dst;
__Pyx_memviewslice *__pyx_v_p_src;
struct __pyx_memoryviewslice_obj *__pyx_v_memviewsliceobj = 0;
__Pyx_memviewslice *__pyx_v_p_dst;
int *__pyx_v_p_suboffset_dim;
Py_ssize_t __pyx_v_start;
Py_ssize_t __pyx_v_stop;
Py_ssize_t __pyx_v_step;
int __pyx_v_have_start;
int __pyx_v_have_stop;
int __pyx_v_have_step;
PyObject *__pyx_v_index = NULL;
struct __pyx_memoryview_obj *__pyx_r = NULL;
__Pyx_RefNannyDeclarations
int __pyx_t_1;
int __pyx_t_2;
PyObject *__pyx_t_3 = NULL;
struct __pyx_memoryview_obj *__pyx_t_4;
char *__pyx_t_5;
int __pyx_t_6;
Py_ssize_t __pyx_t_7;
PyObject *(*__pyx_t_8)(PyObject *);
PyObject *__pyx_t_9 = NULL;
Py_ssize_t __pyx_t_10;
int __pyx_t_11;
Py_ssize_t __pyx_t_12;
__Pyx_RefNannySetupContext("memview_slice", 0);
/* "View.MemoryView":706
* @cname('__pyx_memview_slice')
* cdef memoryview memview_slice(memoryview memview, object indices):
* cdef int new_ndim = 0, suboffset_dim = -1, dim # <<<<<<<<<<<<<<
* cdef bint negative_step
* cdef __Pyx_memviewslice src, dst
*/
__pyx_v_new_ndim = 0;
__pyx_v_suboffset_dim = -1;
/* "View.MemoryView":713
*
*
* memset(&dst, 0, sizeof(dst)) # <<<<<<<<<<<<<<
*
* cdef _memoryviewslice memviewsliceobj
*/
(void)(memset((&__pyx_v_dst), 0, (sizeof(__pyx_v_dst))));
/* "View.MemoryView":717
* cdef _memoryviewslice memviewsliceobj
*
* assert memview.view.ndim > 0 # <<<<<<<<<<<<<<
*
* if isinstance(memview, _memoryviewslice):
*/
#ifndef CYTHON_WITHOUT_ASSERTIONS
if (unlikely(!Py_OptimizeFlag)) {
if (unlikely(!((__pyx_v_memview->view.ndim > 0) != 0))) {
PyErr_SetNone(PyExc_AssertionError);
__PYX_ERR(1, 717, __pyx_L1_error)
}
}
#endif
/* "View.MemoryView":719
* assert memview.view.ndim > 0
*
* if isinstance(memview, _memoryviewslice): # <<<<<<<<<<<<<<
* memviewsliceobj = memview
* p_src = &memviewsliceobj.from_slice
*/
__pyx_t_1 = __Pyx_TypeCheck(((PyObject *)__pyx_v_memview), __pyx_memoryviewslice_type);
__pyx_t_2 = (__pyx_t_1 != 0);
if (__pyx_t_2) {
/* "View.MemoryView":720
*
* if isinstance(memview, _memoryviewslice):
* memviewsliceobj = memview # <<<<<<<<<<<<<<
* p_src = &memviewsliceobj.from_slice
* else:
*/
if (!(likely(((((PyObject *)__pyx_v_memview)) == Py_None) || likely(__Pyx_TypeTest(((PyObject *)__pyx_v_memview), __pyx_memoryviewslice_type))))) __PYX_ERR(1, 720, __pyx_L1_error)
__pyx_t_3 = ((PyObject *)__pyx_v_memview);
__Pyx_INCREF(__pyx_t_3);
__pyx_v_memviewsliceobj = ((struct __pyx_memoryviewslice_obj *)__pyx_t_3);
__pyx_t_3 = 0;
/* "View.MemoryView":721
* if isinstance(memview, _memoryviewslice):
* memviewsliceobj = memview
* p_src = &memviewsliceobj.from_slice # <<<<<<<<<<<<<<
* else:
* slice_copy(memview, &src)
*/
__pyx_v_p_src = (&__pyx_v_memviewsliceobj->from_slice);
/* "View.MemoryView":719
* assert memview.view.ndim > 0
*
* if isinstance(memview, _memoryviewslice): # <<<<<<<<<<<<<<
* memviewsliceobj = memview
* p_src = &memviewsliceobj.from_slice
*/
goto __pyx_L3;
}
/* "View.MemoryView":723
* p_src = &memviewsliceobj.from_slice
* else:
* slice_copy(memview, &src) # <<<<<<<<<<<<<<
* p_src = &src
*
*/
/*else*/ {
__pyx_memoryview_slice_copy(__pyx_v_memview, (&__pyx_v_src));
/* "View.MemoryView":724
* else:
* slice_copy(memview, &src)
* p_src = &src # <<<<<<<<<<<<<<
*
*
*/
__pyx_v_p_src = (&__pyx_v_src);
}
__pyx_L3:;
/* "View.MemoryView":730
*
*
* dst.memview = p_src.memview # <<<<<<<<<<<<<<
* dst.data = p_src.data
*
*/
__pyx_t_4 = __pyx_v_p_src->memview;
__pyx_v_dst.memview = __pyx_t_4;
/* "View.MemoryView":731
*
* dst.memview = p_src.memview
* dst.data = p_src.data # <<<<<<<<<<<<<<
*
*
*/
__pyx_t_5 = __pyx_v_p_src->data;
__pyx_v_dst.data = __pyx_t_5;
/* "View.MemoryView":736
*
*
* cdef __Pyx_memviewslice *p_dst = &dst # <<<<<<<<<<<<<<
* cdef int *p_suboffset_dim = &suboffset_dim
* cdef Py_ssize_t start, stop, step
*/
__pyx_v_p_dst = (&__pyx_v_dst);
/* "View.MemoryView":737
*
* cdef __Pyx_memviewslice *p_dst = &dst
* cdef int *p_suboffset_dim = &suboffset_dim # <<<<<<<<<<<<<<
* cdef Py_ssize_t start, stop, step
* cdef bint have_start, have_stop, have_step
*/
__pyx_v_p_suboffset_dim = (&__pyx_v_suboffset_dim);
/* "View.MemoryView":741
* cdef bint have_start, have_stop, have_step
*
* for dim, index in enumerate(indices): # <<<<<<<<<<<<<<
* if PyIndex_Check(index):
* slice_memviewslice(
*/
__pyx_t_6 = 0;
if (likely(PyList_CheckExact(__pyx_v_indices)) || PyTuple_CheckExact(__pyx_v_indices)) {
__pyx_t_3 = __pyx_v_indices; __Pyx_INCREF(__pyx_t_3); __pyx_t_7 = 0;
__pyx_t_8 = NULL;
} else {
__pyx_t_7 = -1; __pyx_t_3 = PyObject_GetIter(__pyx_v_indices); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 741, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_3);
__pyx_t_8 = Py_TYPE(__pyx_t_3)->tp_iternext; if (unlikely(!__pyx_t_8)) __PYX_ERR(1, 741, __pyx_L1_error)
}
for (;;) {
if (likely(!__pyx_t_8)) {
if (likely(PyList_CheckExact(__pyx_t_3))) {
if (__pyx_t_7 >= PyList_GET_SIZE(__pyx_t_3)) break;
#if CYTHON_ASSUME_SAFE_MACROS && !CYTHON_AVOID_BORROWED_REFS
__pyx_t_9 = PyList_GET_ITEM(__pyx_t_3, __pyx_t_7); __Pyx_INCREF(__pyx_t_9); __pyx_t_7++; if (unlikely(0 < 0)) __PYX_ERR(1, 741, __pyx_L1_error)
#else
__pyx_t_9 = PySequence_ITEM(__pyx_t_3, __pyx_t_7); __pyx_t_7++; if (unlikely(!__pyx_t_9)) __PYX_ERR(1, 741, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_9);
#endif
} else {
if (__pyx_t_7 >= PyTuple_GET_SIZE(__pyx_t_3)) break;
#if CYTHON_ASSUME_SAFE_MACROS && !CYTHON_AVOID_BORROWED_REFS
__pyx_t_9 = PyTuple_GET_ITEM(__pyx_t_3, __pyx_t_7); __Pyx_INCREF(__pyx_t_9); __pyx_t_7++; if (unlikely(0 < 0)) __PYX_ERR(1, 741, __pyx_L1_error)
#else
__pyx_t_9 = PySequence_ITEM(__pyx_t_3, __pyx_t_7); __pyx_t_7++; if (unlikely(!__pyx_t_9)) __PYX_ERR(1, 741, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_9);
#endif
}
} else {
__pyx_t_9 = __pyx_t_8(__pyx_t_3);
if (unlikely(!__pyx_t_9)) {
PyObject* exc_type = PyErr_Occurred();
if (exc_type) {
if (likely(__Pyx_PyErr_GivenExceptionMatches(exc_type, PyExc_StopIteration))) PyErr_Clear();
else __PYX_ERR(1, 741, __pyx_L1_error)
}
break;
}
__Pyx_GOTREF(__pyx_t_9);
}
__Pyx_XDECREF_SET(__pyx_v_index, __pyx_t_9);
__pyx_t_9 = 0;
__pyx_v_dim = __pyx_t_6;
__pyx_t_6 = (__pyx_t_6 + 1);
/* "View.MemoryView":742
*
* for dim, index in enumerate(indices):
* if PyIndex_Check(index): # <<<<<<<<<<<<<<
* slice_memviewslice(
* p_dst, p_src.shape[dim], p_src.strides[dim], p_src.suboffsets[dim],
*/
__pyx_t_2 = (PyIndex_Check(__pyx_v_index) != 0);
if (__pyx_t_2) {
/* "View.MemoryView":746
* p_dst, p_src.shape[dim], p_src.strides[dim], p_src.suboffsets[dim],
* dim, new_ndim, p_suboffset_dim,
* index, 0, 0, # start, stop, step # <<<<<<<<<<<<<<
* 0, 0, 0, # have_{start,stop,step}
* False)
*/
__pyx_t_10 = __Pyx_PyIndex_AsSsize_t(__pyx_v_index); if (unlikely((__pyx_t_10 == (Py_ssize_t)-1) && PyErr_Occurred())) __PYX_ERR(1, 746, __pyx_L1_error)
/* "View.MemoryView":743
* for dim, index in enumerate(indices):
* if PyIndex_Check(index):
* slice_memviewslice( # <<<<<<<<<<<<<<
* p_dst, p_src.shape[dim], p_src.strides[dim], p_src.suboffsets[dim],
* dim, new_ndim, p_suboffset_dim,
*/
__pyx_t_11 = __pyx_memoryview_slice_memviewslice(__pyx_v_p_dst, (__pyx_v_p_src->shape[__pyx_v_dim]), (__pyx_v_p_src->strides[__pyx_v_dim]), (__pyx_v_p_src->suboffsets[__pyx_v_dim]), __pyx_v_dim, __pyx_v_new_ndim, __pyx_v_p_suboffset_dim, __pyx_t_10, 0, 0, 0, 0, 0, 0); if (unlikely(__pyx_t_11 == ((int)-1))) __PYX_ERR(1, 743, __pyx_L1_error)
/* "View.MemoryView":742
*
* for dim, index in enumerate(indices):
* if PyIndex_Check(index): # <<<<<<<<<<<<<<
* slice_memviewslice(
* p_dst, p_src.shape[dim], p_src.strides[dim], p_src.suboffsets[dim],
*/
goto __pyx_L6;
}
/* "View.MemoryView":749
* 0, 0, 0, # have_{start,stop,step}
* False)
* elif index is None: # <<<<<<<<<<<<<<
* p_dst.shape[new_ndim] = 1
* p_dst.strides[new_ndim] = 0
*/
__pyx_t_2 = (__pyx_v_index == Py_None);
__pyx_t_1 = (__pyx_t_2 != 0);
if (__pyx_t_1) {
/* "View.MemoryView":750
* False)
* elif index is None:
* p_dst.shape[new_ndim] = 1 # <<<<<<<<<<<<<<
* p_dst.strides[new_ndim] = 0
* p_dst.suboffsets[new_ndim] = -1
*/
(__pyx_v_p_dst->shape[__pyx_v_new_ndim]) = 1;
/* "View.MemoryView":751
* elif index is None:
* p_dst.shape[new_ndim] = 1
* p_dst.strides[new_ndim] = 0 # <<<<<<<<<<<<<<
* p_dst.suboffsets[new_ndim] = -1
* new_ndim += 1
*/
(__pyx_v_p_dst->strides[__pyx_v_new_ndim]) = 0;
/* "View.MemoryView":752
* p_dst.shape[new_ndim] = 1
* p_dst.strides[new_ndim] = 0
* p_dst.suboffsets[new_ndim] = -1 # <<<<<<<<<<<<<<
* new_ndim += 1
* else:
*/
(__pyx_v_p_dst->suboffsets[__pyx_v_new_ndim]) = -1L;
/* "View.MemoryView":753
* p_dst.strides[new_ndim] = 0
* p_dst.suboffsets[new_ndim] = -1
* new_ndim += 1 # <<<<<<<<<<<<<<
* else:
* start = index.start or 0
*/
__pyx_v_new_ndim = (__pyx_v_new_ndim + 1);
/* "View.MemoryView":749
* 0, 0, 0, # have_{start,stop,step}
* False)
* elif index is None: # <<<<<<<<<<<<<<
* p_dst.shape[new_ndim] = 1
* p_dst.strides[new_ndim] = 0
*/
goto __pyx_L6;
}
/* "View.MemoryView":755
* new_ndim += 1
* else:
* start = index.start or 0 # <<<<<<<<<<<<<<
* stop = index.stop or 0
* step = index.step or 0
*/
/*else*/ {
__pyx_t_9 = __Pyx_PyObject_GetAttrStr(__pyx_v_index, __pyx_n_s_start); if (unlikely(!__pyx_t_9)) __PYX_ERR(1, 755, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_9);
__pyx_t_1 = __Pyx_PyObject_IsTrue(__pyx_t_9); if (unlikely(__pyx_t_1 < 0)) __PYX_ERR(1, 755, __pyx_L1_error)
if (!__pyx_t_1) {
__Pyx_DECREF(__pyx_t_9); __pyx_t_9 = 0;
} else {
__pyx_t_12 = __Pyx_PyIndex_AsSsize_t(__pyx_t_9); if (unlikely((__pyx_t_12 == (Py_ssize_t)-1) && PyErr_Occurred())) __PYX_ERR(1, 755, __pyx_L1_error)
__pyx_t_10 = __pyx_t_12;
__Pyx_DECREF(__pyx_t_9); __pyx_t_9 = 0;
goto __pyx_L7_bool_binop_done;
}
__pyx_t_10 = 0;
__pyx_L7_bool_binop_done:;
__pyx_v_start = __pyx_t_10;
/* "View.MemoryView":756
* else:
* start = index.start or 0
* stop = index.stop or 0 # <<<<<<<<<<<<<<
* step = index.step or 0
*
*/
__pyx_t_9 = __Pyx_PyObject_GetAttrStr(__pyx_v_index, __pyx_n_s_stop); if (unlikely(!__pyx_t_9)) __PYX_ERR(1, 756, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_9);
__pyx_t_1 = __Pyx_PyObject_IsTrue(__pyx_t_9); if (unlikely(__pyx_t_1 < 0)) __PYX_ERR(1, 756, __pyx_L1_error)
if (!__pyx_t_1) {
__Pyx_DECREF(__pyx_t_9); __pyx_t_9 = 0;
} else {
__pyx_t_12 = __Pyx_PyIndex_AsSsize_t(__pyx_t_9); if (unlikely((__pyx_t_12 == (Py_ssize_t)-1) && PyErr_Occurred())) __PYX_ERR(1, 756, __pyx_L1_error)
__pyx_t_10 = __pyx_t_12;
__Pyx_DECREF(__pyx_t_9); __pyx_t_9 = 0;
goto __pyx_L9_bool_binop_done;
}
__pyx_t_10 = 0;
__pyx_L9_bool_binop_done:;
__pyx_v_stop = __pyx_t_10;
/* "View.MemoryView":757
* start = index.start or 0
* stop = index.stop or 0
* step = index.step or 0 # <<<<<<<<<<<<<<
*
* have_start = index.start is not None
*/
__pyx_t_9 = __Pyx_PyObject_GetAttrStr(__pyx_v_index, __pyx_n_s_step); if (unlikely(!__pyx_t_9)) __PYX_ERR(1, 757, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_9);
__pyx_t_1 = __Pyx_PyObject_IsTrue(__pyx_t_9); if (unlikely(__pyx_t_1 < 0)) __PYX_ERR(1, 757, __pyx_L1_error)
if (!__pyx_t_1) {
__Pyx_DECREF(__pyx_t_9); __pyx_t_9 = 0;
} else {
__pyx_t_12 = __Pyx_PyIndex_AsSsize_t(__pyx_t_9); if (unlikely((__pyx_t_12 == (Py_ssize_t)-1) && PyErr_Occurred())) __PYX_ERR(1, 757, __pyx_L1_error)
__pyx_t_10 = __pyx_t_12;
__Pyx_DECREF(__pyx_t_9); __pyx_t_9 = 0;
goto __pyx_L11_bool_binop_done;
}
__pyx_t_10 = 0;
__pyx_L11_bool_binop_done:;
__pyx_v_step = __pyx_t_10;
/* "View.MemoryView":759
* step = index.step or 0
*
* have_start = index.start is not None # <<<<<<<<<<<<<<
* have_stop = index.stop is not None
* have_step = index.step is not None
*/
__pyx_t_9 = __Pyx_PyObject_GetAttrStr(__pyx_v_index, __pyx_n_s_start); if (unlikely(!__pyx_t_9)) __PYX_ERR(1, 759, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_9);
__pyx_t_1 = (__pyx_t_9 != Py_None);
__Pyx_DECREF(__pyx_t_9); __pyx_t_9 = 0;
__pyx_v_have_start = __pyx_t_1;
/* "View.MemoryView":760
*
* have_start = index.start is not None
* have_stop = index.stop is not None # <<<<<<<<<<<<<<
* have_step = index.step is not None
*
*/
__pyx_t_9 = __Pyx_PyObject_GetAttrStr(__pyx_v_index, __pyx_n_s_stop); if (unlikely(!__pyx_t_9)) __PYX_ERR(1, 760, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_9);
__pyx_t_1 = (__pyx_t_9 != Py_None);
__Pyx_DECREF(__pyx_t_9); __pyx_t_9 = 0;
__pyx_v_have_stop = __pyx_t_1;
/* "View.MemoryView":761
* have_start = index.start is not None
* have_stop = index.stop is not None
* have_step = index.step is not None # <<<<<<<<<<<<<<
*
* slice_memviewslice(
*/
__pyx_t_9 = __Pyx_PyObject_GetAttrStr(__pyx_v_index, __pyx_n_s_step); if (unlikely(!__pyx_t_9)) __PYX_ERR(1, 761, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_9);
__pyx_t_1 = (__pyx_t_9 != Py_None);
__Pyx_DECREF(__pyx_t_9); __pyx_t_9 = 0;
__pyx_v_have_step = __pyx_t_1;
/* "View.MemoryView":763
* have_step = index.step is not None
*
* slice_memviewslice( # <<<<<<<<<<<<<<
* p_dst, p_src.shape[dim], p_src.strides[dim], p_src.suboffsets[dim],
* dim, new_ndim, p_suboffset_dim,
*/
__pyx_t_11 = __pyx_memoryview_slice_memviewslice(__pyx_v_p_dst, (__pyx_v_p_src->shape[__pyx_v_dim]), (__pyx_v_p_src->strides[__pyx_v_dim]), (__pyx_v_p_src->suboffsets[__pyx_v_dim]), __pyx_v_dim, __pyx_v_new_ndim, __pyx_v_p_suboffset_dim, __pyx_v_start, __pyx_v_stop, __pyx_v_step, __pyx_v_have_start, __pyx_v_have_stop, __pyx_v_have_step, 1); if (unlikely(__pyx_t_11 == ((int)-1))) __PYX_ERR(1, 763, __pyx_L1_error)
/* "View.MemoryView":769
* have_start, have_stop, have_step,
* True)
* new_ndim += 1 # <<<<<<<<<<<<<<
*
* if isinstance(memview, _memoryviewslice):
*/
__pyx_v_new_ndim = (__pyx_v_new_ndim + 1);
}
__pyx_L6:;
/* "View.MemoryView":741
* cdef bint have_start, have_stop, have_step
*
* for dim, index in enumerate(indices): # <<<<<<<<<<<<<<
* if PyIndex_Check(index):
* slice_memviewslice(
*/
}
__Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
/* "View.MemoryView":771
* new_ndim += 1
*
* if isinstance(memview, _memoryviewslice): # <<<<<<<<<<<<<<
* return memoryview_fromslice(dst, new_ndim,
* memviewsliceobj.to_object_func,
*/
__pyx_t_1 = __Pyx_TypeCheck(((PyObject *)__pyx_v_memview), __pyx_memoryviewslice_type);
__pyx_t_2 = (__pyx_t_1 != 0);
if (__pyx_t_2) {
/* "View.MemoryView":772
*
* if isinstance(memview, _memoryviewslice):
* return memoryview_fromslice(dst, new_ndim, # <<<<<<<<<<<<<<
* memviewsliceobj.to_object_func,
* memviewsliceobj.to_dtype_func,
*/
__Pyx_XDECREF(((PyObject *)__pyx_r));
/* "View.MemoryView":773
* if isinstance(memview, _memoryviewslice):
* return memoryview_fromslice(dst, new_ndim,
* memviewsliceobj.to_object_func, # <<<<<<<<<<<<<<
* memviewsliceobj.to_dtype_func,
* memview.dtype_is_object)
*/
if (unlikely(!__pyx_v_memviewsliceobj)) { __Pyx_RaiseUnboundLocalError("memviewsliceobj"); __PYX_ERR(1, 773, __pyx_L1_error) }
/* "View.MemoryView":774
* return memoryview_fromslice(dst, new_ndim,
* memviewsliceobj.to_object_func,
* memviewsliceobj.to_dtype_func, # <<<<<<<<<<<<<<
* memview.dtype_is_object)
* else:
*/
if (unlikely(!__pyx_v_memviewsliceobj)) { __Pyx_RaiseUnboundLocalError("memviewsliceobj"); __PYX_ERR(1, 774, __pyx_L1_error) }
/* "View.MemoryView":772
*
* if isinstance(memview, _memoryviewslice):
* return memoryview_fromslice(dst, new_ndim, # <<<<<<<<<<<<<<
* memviewsliceobj.to_object_func,
* memviewsliceobj.to_dtype_func,
*/
__pyx_t_3 = __pyx_memoryview_fromslice(__pyx_v_dst, __pyx_v_new_ndim, __pyx_v_memviewsliceobj->to_object_func, __pyx_v_memviewsliceobj->to_dtype_func, __pyx_v_memview->dtype_is_object); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 772, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_3);
if (!(likely(((__pyx_t_3) == Py_None) || likely(__Pyx_TypeTest(__pyx_t_3, __pyx_memoryview_type))))) __PYX_ERR(1, 772, __pyx_L1_error)
__pyx_r = ((struct __pyx_memoryview_obj *)__pyx_t_3);
__pyx_t_3 = 0;
goto __pyx_L0;
/* "View.MemoryView":771
* new_ndim += 1
*
* if isinstance(memview, _memoryviewslice): # <<<<<<<<<<<<<<
* return memoryview_fromslice(dst, new_ndim,
* memviewsliceobj.to_object_func,
*/
}
/* "View.MemoryView":777
* memview.dtype_is_object)
* else:
* return memoryview_fromslice(dst, new_ndim, NULL, NULL, # <<<<<<<<<<<<<<
* memview.dtype_is_object)
*
*/
/*else*/ {
__Pyx_XDECREF(((PyObject *)__pyx_r));
/* "View.MemoryView":778
* else:
* return memoryview_fromslice(dst, new_ndim, NULL, NULL,
* memview.dtype_is_object) # <<<<<<<<<<<<<<
*
*
*/
__pyx_t_3 = __pyx_memoryview_fromslice(__pyx_v_dst, __pyx_v_new_ndim, NULL, NULL, __pyx_v_memview->dtype_is_object); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 777, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_3);
/* "View.MemoryView":777
* memview.dtype_is_object)
* else:
* return memoryview_fromslice(dst, new_ndim, NULL, NULL, # <<<<<<<<<<<<<<
* memview.dtype_is_object)
*
*/
if (!(likely(((__pyx_t_3) == Py_None) || likely(__Pyx_TypeTest(__pyx_t_3, __pyx_memoryview_type))))) __PYX_ERR(1, 777, __pyx_L1_error)
__pyx_r = ((struct __pyx_memoryview_obj *)__pyx_t_3);
__pyx_t_3 = 0;
goto __pyx_L0;
}
/* "View.MemoryView":705
*
* @cname('__pyx_memview_slice')
* cdef memoryview memview_slice(memoryview memview, object indices): # <<<<<<<<<<<<<<
* cdef int new_ndim = 0, suboffset_dim = -1, dim
* cdef bint negative_step
*/
/* function exit code */
__pyx_L1_error:;
__Pyx_XDECREF(__pyx_t_3);
__Pyx_XDECREF(__pyx_t_9);
__Pyx_AddTraceback("View.MemoryView.memview_slice", __pyx_clineno, __pyx_lineno, __pyx_filename);
__pyx_r = 0;
__pyx_L0:;
__Pyx_XDECREF((PyObject *)__pyx_v_memviewsliceobj);
__Pyx_XDECREF(__pyx_v_index);
__Pyx_XGIVEREF((PyObject *)__pyx_r);
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
/* "View.MemoryView":802
*
* @cname('__pyx_memoryview_slice_memviewslice')
* cdef int slice_memviewslice( # <<<<<<<<<<<<<<
* __Pyx_memviewslice *dst,
* Py_ssize_t shape, Py_ssize_t stride, Py_ssize_t suboffset,
*/
static int __pyx_memoryview_slice_memviewslice(__Pyx_memviewslice *__pyx_v_dst, Py_ssize_t __pyx_v_shape, Py_ssize_t __pyx_v_stride, Py_ssize_t __pyx_v_suboffset, int __pyx_v_dim, int __pyx_v_new_ndim, int *__pyx_v_suboffset_dim, Py_ssize_t __pyx_v_start, Py_ssize_t __pyx_v_stop, Py_ssize_t __pyx_v_step, int __pyx_v_have_start, int __pyx_v_have_stop, int __pyx_v_have_step, int __pyx_v_is_slice) {
Py_ssize_t __pyx_v_new_shape;
int __pyx_v_negative_step;
int __pyx_r;
int __pyx_t_1;
int __pyx_t_2;
int __pyx_t_3;
/* "View.MemoryView":822
* cdef bint negative_step
*
* if not is_slice: # <<<<<<<<<<<<<<
*
* if start < 0:
*/
__pyx_t_1 = ((!(__pyx_v_is_slice != 0)) != 0);
if (__pyx_t_1) {
/* "View.MemoryView":824
* if not is_slice:
*
* if start < 0: # <<<<<<<<<<<<<<
* start += shape
* if not 0 <= start < shape:
*/
__pyx_t_1 = ((__pyx_v_start < 0) != 0);
if (__pyx_t_1) {
/* "View.MemoryView":825
*
* if start < 0:
* start += shape # <<<<<<<<<<<<<<
* if not 0 <= start < shape:
* _err_dim(IndexError, "Index out of bounds (axis %d)", dim)
*/
__pyx_v_start = (__pyx_v_start + __pyx_v_shape);
/* "View.MemoryView":824
* if not is_slice:
*
* if start < 0: # <<<<<<<<<<<<<<
* start += shape
* if not 0 <= start < shape:
*/
}
/* "View.MemoryView":826
* if start < 0:
* start += shape
* if not 0 <= start < shape: # <<<<<<<<<<<<<<
* _err_dim(IndexError, "Index out of bounds (axis %d)", dim)
* else:
*/
__pyx_t_1 = (0 <= __pyx_v_start);
if (__pyx_t_1) {
__pyx_t_1 = (__pyx_v_start < __pyx_v_shape);
}
__pyx_t_2 = ((!(__pyx_t_1 != 0)) != 0);
if (__pyx_t_2) {
/* "View.MemoryView":827
* start += shape
* if not 0 <= start < shape:
* _err_dim(IndexError, "Index out of bounds (axis %d)", dim) # <<<<<<<<<<<<<<
* else:
*
*/
__pyx_t_3 = __pyx_memoryview_err_dim(__pyx_builtin_IndexError, ((char *)"Index out of bounds (axis %d)"), __pyx_v_dim); if (unlikely(__pyx_t_3 == ((int)-1))) __PYX_ERR(1, 827, __pyx_L1_error)
/* "View.MemoryView":826
* if start < 0:
* start += shape
* if not 0 <= start < shape: # <<<<<<<<<<<<<<
* _err_dim(IndexError, "Index out of bounds (axis %d)", dim)
* else:
*/
}
/* "View.MemoryView":822
* cdef bint negative_step
*
* if not is_slice: # <<<<<<<<<<<<<<
*
* if start < 0:
*/
goto __pyx_L3;
}
/* "View.MemoryView":830
* else:
*
* negative_step = have_step != 0 and step < 0 # <<<<<<<<<<<<<<
*
* if have_step and step == 0:
*/
/*else*/ {
__pyx_t_1 = ((__pyx_v_have_step != 0) != 0);
if (__pyx_t_1) {
} else {
__pyx_t_2 = __pyx_t_1;
goto __pyx_L6_bool_binop_done;
}
__pyx_t_1 = ((__pyx_v_step < 0) != 0);
__pyx_t_2 = __pyx_t_1;
__pyx_L6_bool_binop_done:;
__pyx_v_negative_step = __pyx_t_2;
/* "View.MemoryView":832
* negative_step = have_step != 0 and step < 0
*
* if have_step and step == 0: # <<<<<<<<<<<<<<
* _err_dim(ValueError, "Step may not be zero (axis %d)", dim)
*
*/
__pyx_t_1 = (__pyx_v_have_step != 0);
if (__pyx_t_1) {
} else {
__pyx_t_2 = __pyx_t_1;
goto __pyx_L9_bool_binop_done;
}
__pyx_t_1 = ((__pyx_v_step == 0) != 0);
__pyx_t_2 = __pyx_t_1;
__pyx_L9_bool_binop_done:;
if (__pyx_t_2) {
/* "View.MemoryView":833
*
* if have_step and step == 0:
* _err_dim(ValueError, "Step may not be zero (axis %d)", dim) # <<<<<<<<<<<<<<
*
*
*/
__pyx_t_3 = __pyx_memoryview_err_dim(__pyx_builtin_ValueError, ((char *)"Step may not be zero (axis %d)"), __pyx_v_dim); if (unlikely(__pyx_t_3 == ((int)-1))) __PYX_ERR(1, 833, __pyx_L1_error)
/* "View.MemoryView":832
* negative_step = have_step != 0 and step < 0
*
* if have_step and step == 0: # <<<<<<<<<<<<<<
* _err_dim(ValueError, "Step may not be zero (axis %d)", dim)
*
*/
}
/* "View.MemoryView":836
*
*
* if have_start: # <<<<<<<<<<<<<<
* if start < 0:
* start += shape
*/
__pyx_t_2 = (__pyx_v_have_start != 0);
if (__pyx_t_2) {
/* "View.MemoryView":837
*
* if have_start:
* if start < 0: # <<<<<<<<<<<<<<
* start += shape
* if start < 0:
*/
__pyx_t_2 = ((__pyx_v_start < 0) != 0);
if (__pyx_t_2) {
/* "View.MemoryView":838
* if have_start:
* if start < 0:
* start += shape # <<<<<<<<<<<<<<
* if start < 0:
* start = 0
*/
__pyx_v_start = (__pyx_v_start + __pyx_v_shape);
/* "View.MemoryView":839
* if start < 0:
* start += shape
* if start < 0: # <<<<<<<<<<<<<<
* start = 0
* elif start >= shape:
*/
__pyx_t_2 = ((__pyx_v_start < 0) != 0);
if (__pyx_t_2) {
/* "View.MemoryView":840
* start += shape
* if start < 0:
* start = 0 # <<<<<<<<<<<<<<
* elif start >= shape:
* if negative_step:
*/
__pyx_v_start = 0;
/* "View.MemoryView":839
* if start < 0:
* start += shape
* if start < 0: # <<<<<<<<<<<<<<
* start = 0
* elif start >= shape:
*/
}
/* "View.MemoryView":837
*
* if have_start:
* if start < 0: # <<<<<<<<<<<<<<
* start += shape
* if start < 0:
*/
goto __pyx_L12;
}
/* "View.MemoryView":841
* if start < 0:
* start = 0
* elif start >= shape: # <<<<<<<<<<<<<<
* if negative_step:
* start = shape - 1
*/
__pyx_t_2 = ((__pyx_v_start >= __pyx_v_shape) != 0);
if (__pyx_t_2) {
/* "View.MemoryView":842
* start = 0
* elif start >= shape:
* if negative_step: # <<<<<<<<<<<<<<
* start = shape - 1
* else:
*/
__pyx_t_2 = (__pyx_v_negative_step != 0);
if (__pyx_t_2) {
/* "View.MemoryView":843
* elif start >= shape:
* if negative_step:
* start = shape - 1 # <<<<<<<<<<<<<<
* else:
* start = shape
*/
__pyx_v_start = (__pyx_v_shape - 1);
/* "View.MemoryView":842
* start = 0
* elif start >= shape:
* if negative_step: # <<<<<<<<<<<<<<
* start = shape - 1
* else:
*/
goto __pyx_L14;
}
/* "View.MemoryView":845
* start = shape - 1
* else:
* start = shape # <<<<<<<<<<<<<<
* else:
* if negative_step:
*/
/*else*/ {
__pyx_v_start = __pyx_v_shape;
}
__pyx_L14:;
/* "View.MemoryView":841
* if start < 0:
* start = 0
* elif start >= shape: # <<<<<<<<<<<<<<
* if negative_step:
* start = shape - 1
*/
}
__pyx_L12:;
/* "View.MemoryView":836
*
*
* if have_start: # <<<<<<<<<<<<<<
* if start < 0:
* start += shape
*/
goto __pyx_L11;
}
/* "View.MemoryView":847
* start = shape
* else:
* if negative_step: # <<<<<<<<<<<<<<
* start = shape - 1
* else:
*/
/*else*/ {
__pyx_t_2 = (__pyx_v_negative_step != 0);
if (__pyx_t_2) {
/* "View.MemoryView":848
* else:
* if negative_step:
* start = shape - 1 # <<<<<<<<<<<<<<
* else:
* start = 0
*/
__pyx_v_start = (__pyx_v_shape - 1);
/* "View.MemoryView":847
* start = shape
* else:
* if negative_step: # <<<<<<<<<<<<<<
* start = shape - 1
* else:
*/
goto __pyx_L15;
}
/* "View.MemoryView":850
* start = shape - 1
* else:
* start = 0 # <<<<<<<<<<<<<<
*
* if have_stop:
*/
/*else*/ {
__pyx_v_start = 0;
}
__pyx_L15:;
}
__pyx_L11:;
/* "View.MemoryView":852
* start = 0
*
* if have_stop: # <<<<<<<<<<<<<<
* if stop < 0:
* stop += shape
*/
__pyx_t_2 = (__pyx_v_have_stop != 0);
if (__pyx_t_2) {
/* "View.MemoryView":853
*
* if have_stop:
* if stop < 0: # <<<<<<<<<<<<<<
* stop += shape
* if stop < 0:
*/
__pyx_t_2 = ((__pyx_v_stop < 0) != 0);
if (__pyx_t_2) {
/* "View.MemoryView":854
* if have_stop:
* if stop < 0:
* stop += shape # <<<<<<<<<<<<<<
* if stop < 0:
* stop = 0
*/
__pyx_v_stop = (__pyx_v_stop + __pyx_v_shape);
/* "View.MemoryView":855
* if stop < 0:
* stop += shape
* if stop < 0: # <<<<<<<<<<<<<<
* stop = 0
* elif stop > shape:
*/
__pyx_t_2 = ((__pyx_v_stop < 0) != 0);
if (__pyx_t_2) {
/* "View.MemoryView":856
* stop += shape
* if stop < 0:
* stop = 0 # <<<<<<<<<<<<<<
* elif stop > shape:
* stop = shape
*/
__pyx_v_stop = 0;
/* "View.MemoryView":855
* if stop < 0:
* stop += shape
* if stop < 0: # <<<<<<<<<<<<<<
* stop = 0
* elif stop > shape:
*/
}
/* "View.MemoryView":853
*
* if have_stop:
* if stop < 0: # <<<<<<<<<<<<<<
* stop += shape
* if stop < 0:
*/
goto __pyx_L17;
}
/* "View.MemoryView":857
* if stop < 0:
* stop = 0
* elif stop > shape: # <<<<<<<<<<<<<<
* stop = shape
* else:
*/
__pyx_t_2 = ((__pyx_v_stop > __pyx_v_shape) != 0);
if (__pyx_t_2) {
/* "View.MemoryView":858
* stop = 0
* elif stop > shape:
* stop = shape # <<<<<<<<<<<<<<
* else:
* if negative_step:
*/
__pyx_v_stop = __pyx_v_shape;
/* "View.MemoryView":857
* if stop < 0:
* stop = 0
* elif stop > shape: # <<<<<<<<<<<<<<
* stop = shape
* else:
*/
}
__pyx_L17:;
/* "View.MemoryView":852
* start = 0
*
* if have_stop: # <<<<<<<<<<<<<<
* if stop < 0:
* stop += shape
*/
goto __pyx_L16;
}
/* "View.MemoryView":860
* stop = shape
* else:
* if negative_step: # <<<<<<<<<<<<<<
* stop = -1
* else:
*/
/*else*/ {
__pyx_t_2 = (__pyx_v_negative_step != 0);
if (__pyx_t_2) {
/* "View.MemoryView":861
* else:
* if negative_step:
* stop = -1 # <<<<<<<<<<<<<<
* else:
* stop = shape
*/
__pyx_v_stop = -1L;
/* "View.MemoryView":860
* stop = shape
* else:
* if negative_step: # <<<<<<<<<<<<<<
* stop = -1
* else:
*/
goto __pyx_L19;
}
/* "View.MemoryView":863
* stop = -1
* else:
* stop = shape # <<<<<<<<<<<<<<
*
* if not have_step:
*/
/*else*/ {
__pyx_v_stop = __pyx_v_shape;
}
__pyx_L19:;
}
__pyx_L16:;
/* "View.MemoryView":865
* stop = shape
*
* if not have_step: # <<<<<<<<<<<<<<
* step = 1
*
*/
__pyx_t_2 = ((!(__pyx_v_have_step != 0)) != 0);
if (__pyx_t_2) {
/* "View.MemoryView":866
*
* if not have_step:
* step = 1 # <<<<<<<<<<<<<<
*
*
*/
__pyx_v_step = 1;
/* "View.MemoryView":865
* stop = shape
*
* if not have_step: # <<<<<<<<<<<<<<
* step = 1
*
*/
}
/* "View.MemoryView":870
*
* with cython.cdivision(True):
* new_shape = (stop - start) // step # <<<<<<<<<<<<<<
*
* if (stop - start) - step * new_shape:
*/
__pyx_v_new_shape = ((__pyx_v_stop - __pyx_v_start) / __pyx_v_step);
/* "View.MemoryView":872
* new_shape = (stop - start) // step
*
* if (stop - start) - step * new_shape: # <<<<<<<<<<<<<<
* new_shape += 1
*
*/
__pyx_t_2 = (((__pyx_v_stop - __pyx_v_start) - (__pyx_v_step * __pyx_v_new_shape)) != 0);
if (__pyx_t_2) {
/* "View.MemoryView":873
*
* if (stop - start) - step * new_shape:
* new_shape += 1 # <<<<<<<<<<<<<<
*
* if new_shape < 0:
*/
__pyx_v_new_shape = (__pyx_v_new_shape + 1);
/* "View.MemoryView":872
* new_shape = (stop - start) // step
*
* if (stop - start) - step * new_shape: # <<<<<<<<<<<<<<
* new_shape += 1
*
*/
}
/* "View.MemoryView":875
* new_shape += 1
*
* if new_shape < 0: # <<<<<<<<<<<<<<
* new_shape = 0
*
*/
__pyx_t_2 = ((__pyx_v_new_shape < 0) != 0);
if (__pyx_t_2) {
/* "View.MemoryView":876
*
* if new_shape < 0:
* new_shape = 0 # <<<<<<<<<<<<<<
*
*
*/
__pyx_v_new_shape = 0;
/* "View.MemoryView":875
* new_shape += 1
*
* if new_shape < 0: # <<<<<<<<<<<<<<
* new_shape = 0
*
*/
}
/* "View.MemoryView":879
*
*
* dst.strides[new_ndim] = stride * step # <<<<<<<<<<<<<<
* dst.shape[new_ndim] = new_shape
* dst.suboffsets[new_ndim] = suboffset
*/
(__pyx_v_dst->strides[__pyx_v_new_ndim]) = (__pyx_v_stride * __pyx_v_step);
/* "View.MemoryView":880
*
* dst.strides[new_ndim] = stride * step
* dst.shape[new_ndim] = new_shape # <<<<<<<<<<<<<<
* dst.suboffsets[new_ndim] = suboffset
*
*/
(__pyx_v_dst->shape[__pyx_v_new_ndim]) = __pyx_v_new_shape;
/* "View.MemoryView":881
* dst.strides[new_ndim] = stride * step
* dst.shape[new_ndim] = new_shape
* dst.suboffsets[new_ndim] = suboffset # <<<<<<<<<<<<<<
*
*
*/
(__pyx_v_dst->suboffsets[__pyx_v_new_ndim]) = __pyx_v_suboffset;
}
__pyx_L3:;
/* "View.MemoryView":884
*
*
* if suboffset_dim[0] < 0: # <<<<<<<<<<<<<<
* dst.data += start * stride
* else:
*/
__pyx_t_2 = (((__pyx_v_suboffset_dim[0]) < 0) != 0);
if (__pyx_t_2) {
/* "View.MemoryView":885
*
* if suboffset_dim[0] < 0:
* dst.data += start * stride # <<<<<<<<<<<<<<
* else:
* dst.suboffsets[suboffset_dim[0]] += start * stride
*/
__pyx_v_dst->data = (__pyx_v_dst->data + (__pyx_v_start * __pyx_v_stride));
/* "View.MemoryView":884
*
*
* if suboffset_dim[0] < 0: # <<<<<<<<<<<<<<
* dst.data += start * stride
* else:
*/
goto __pyx_L23;
}
/* "View.MemoryView":887
* dst.data += start * stride
* else:
* dst.suboffsets[suboffset_dim[0]] += start * stride # <<<<<<<<<<<<<<
*
* if suboffset >= 0:
*/
/*else*/ {
__pyx_t_3 = (__pyx_v_suboffset_dim[0]);
(__pyx_v_dst->suboffsets[__pyx_t_3]) = ((__pyx_v_dst->suboffsets[__pyx_t_3]) + (__pyx_v_start * __pyx_v_stride));
}
__pyx_L23:;
/* "View.MemoryView":889
* dst.suboffsets[suboffset_dim[0]] += start * stride
*
* if suboffset >= 0: # <<<<<<<<<<<<<<
* if not is_slice:
* if new_ndim == 0:
*/
__pyx_t_2 = ((__pyx_v_suboffset >= 0) != 0);
if (__pyx_t_2) {
/* "View.MemoryView":890
*
* if suboffset >= 0:
* if not is_slice: # <<<<<<<<<<<<<<
* if new_ndim == 0:
* dst.data = (<char **> dst.data)[0] + suboffset
*/
__pyx_t_2 = ((!(__pyx_v_is_slice != 0)) != 0);
if (__pyx_t_2) {
/* "View.MemoryView":891
* if suboffset >= 0:
* if not is_slice:
* if new_ndim == 0: # <<<<<<<<<<<<<<
* dst.data = (<char **> dst.data)[0] + suboffset
* else:
*/
__pyx_t_2 = ((__pyx_v_new_ndim == 0) != 0);
if (__pyx_t_2) {
/* "View.MemoryView":892
* if not is_slice:
* if new_ndim == 0:
* dst.data = (<char **> dst.data)[0] + suboffset # <<<<<<<<<<<<<<
* else:
* _err_dim(IndexError, "All dimensions preceding dimension %d "
*/
__pyx_v_dst->data = ((((char **)__pyx_v_dst->data)[0]) + __pyx_v_suboffset);
/* "View.MemoryView":891
* if suboffset >= 0:
* if not is_slice:
* if new_ndim == 0: # <<<<<<<<<<<<<<
* dst.data = (<char **> dst.data)[0] + suboffset
* else:
*/
goto __pyx_L26;
}
/* "View.MemoryView":894
* dst.data = (<char **> dst.data)[0] + suboffset
* else:
* _err_dim(IndexError, "All dimensions preceding dimension %d " # <<<<<<<<<<<<<<
* "must be indexed and not sliced", dim)
* else:
*/
/*else*/ {
/* "View.MemoryView":895
* else:
* _err_dim(IndexError, "All dimensions preceding dimension %d "
* "must be indexed and not sliced", dim) # <<<<<<<<<<<<<<
* else:
* suboffset_dim[0] = new_ndim
*/
__pyx_t_3 = __pyx_memoryview_err_dim(__pyx_builtin_IndexError, ((char *)"All dimensions preceding dimension %d must be indexed and not sliced"), __pyx_v_dim); if (unlikely(__pyx_t_3 == ((int)-1))) __PYX_ERR(1, 894, __pyx_L1_error)
}
__pyx_L26:;
/* "View.MemoryView":890
*
* if suboffset >= 0:
* if not is_slice: # <<<<<<<<<<<<<<
* if new_ndim == 0:
* dst.data = (<char **> dst.data)[0] + suboffset
*/
goto __pyx_L25;
}
/* "View.MemoryView":897
* "must be indexed and not sliced", dim)
* else:
* suboffset_dim[0] = new_ndim # <<<<<<<<<<<<<<
*
* return 0
*/
/*else*/ {
(__pyx_v_suboffset_dim[0]) = __pyx_v_new_ndim;
}
__pyx_L25:;
/* "View.MemoryView":889
* dst.suboffsets[suboffset_dim[0]] += start * stride
*
* if suboffset >= 0: # <<<<<<<<<<<<<<
* if not is_slice:
* if new_ndim == 0:
*/
}
/* "View.MemoryView":899
* suboffset_dim[0] = new_ndim
*
* return 0 # <<<<<<<<<<<<<<
*
*
*/
__pyx_r = 0;
goto __pyx_L0;
/* "View.MemoryView":802
*
* @cname('__pyx_memoryview_slice_memviewslice')
* cdef int slice_memviewslice( # <<<<<<<<<<<<<<
* __Pyx_memviewslice *dst,
* Py_ssize_t shape, Py_ssize_t stride, Py_ssize_t suboffset,
*/
/* function exit code */
__pyx_L1_error:;
{
#ifdef WITH_THREAD
PyGILState_STATE __pyx_gilstate_save = __Pyx_PyGILState_Ensure();
#endif
__Pyx_AddTraceback("View.MemoryView.slice_memviewslice", __pyx_clineno, __pyx_lineno, __pyx_filename);
#ifdef WITH_THREAD
__Pyx_PyGILState_Release(__pyx_gilstate_save);
#endif
}
__pyx_r = -1;
__pyx_L0:;
return __pyx_r;
}
/* "View.MemoryView":905
*
* @cname('__pyx_pybuffer_index')
* cdef char *pybuffer_index(Py_buffer *view, char *bufp, Py_ssize_t index, # <<<<<<<<<<<<<<
* Py_ssize_t dim) except NULL:
* cdef Py_ssize_t shape, stride, suboffset = -1
*/
static char *__pyx_pybuffer_index(Py_buffer *__pyx_v_view, char *__pyx_v_bufp, Py_ssize_t __pyx_v_index, Py_ssize_t __pyx_v_dim) {
Py_ssize_t __pyx_v_shape;
Py_ssize_t __pyx_v_stride;
Py_ssize_t __pyx_v_suboffset;
Py_ssize_t __pyx_v_itemsize;
char *__pyx_v_resultp;
char *__pyx_r;
__Pyx_RefNannyDeclarations
Py_ssize_t __pyx_t_1;
int __pyx_t_2;
PyObject *__pyx_t_3 = NULL;
PyObject *__pyx_t_4 = NULL;
__Pyx_RefNannySetupContext("pybuffer_index", 0);
/* "View.MemoryView":907
* cdef char *pybuffer_index(Py_buffer *view, char *bufp, Py_ssize_t index,
* Py_ssize_t dim) except NULL:
* cdef Py_ssize_t shape, stride, suboffset = -1 # <<<<<<<<<<<<<<
* cdef Py_ssize_t itemsize = view.itemsize
* cdef char *resultp
*/
__pyx_v_suboffset = -1L;
/* "View.MemoryView":908
* Py_ssize_t dim) except NULL:
* cdef Py_ssize_t shape, stride, suboffset = -1
* cdef Py_ssize_t itemsize = view.itemsize # <<<<<<<<<<<<<<
* cdef char *resultp
*
*/
__pyx_t_1 = __pyx_v_view->itemsize;
__pyx_v_itemsize = __pyx_t_1;
/* "View.MemoryView":911
* cdef char *resultp
*
* if view.ndim == 0: # <<<<<<<<<<<<<<
* shape = view.len / itemsize
* stride = itemsize
*/
__pyx_t_2 = ((__pyx_v_view->ndim == 0) != 0);
if (__pyx_t_2) {
/* "View.MemoryView":912
*
* if view.ndim == 0:
* shape = view.len / itemsize # <<<<<<<<<<<<<<
* stride = itemsize
* else:
*/
if (unlikely(__pyx_v_itemsize == 0)) {
PyErr_SetString(PyExc_ZeroDivisionError, "integer division or modulo by zero");
__PYX_ERR(1, 912, __pyx_L1_error)
}
else if (sizeof(Py_ssize_t) == sizeof(long) && (!(((Py_ssize_t)-1) > 0)) && unlikely(__pyx_v_itemsize == (Py_ssize_t)-1) && unlikely(UNARY_NEG_WOULD_OVERFLOW(__pyx_v_view->len))) {
PyErr_SetString(PyExc_OverflowError, "value too large to perform division");
__PYX_ERR(1, 912, __pyx_L1_error)
}
__pyx_v_shape = __Pyx_div_Py_ssize_t(__pyx_v_view->len, __pyx_v_itemsize);
/* "View.MemoryView":913
* if view.ndim == 0:
* shape = view.len / itemsize
* stride = itemsize # <<<<<<<<<<<<<<
* else:
* shape = view.shape[dim]
*/
__pyx_v_stride = __pyx_v_itemsize;
/* "View.MemoryView":911
* cdef char *resultp
*
* if view.ndim == 0: # <<<<<<<<<<<<<<
* shape = view.len / itemsize
* stride = itemsize
*/
goto __pyx_L3;
}
/* "View.MemoryView":915
* stride = itemsize
* else:
* shape = view.shape[dim] # <<<<<<<<<<<<<<
* stride = view.strides[dim]
* if view.suboffsets != NULL:
*/
/*else*/ {
__pyx_v_shape = (__pyx_v_view->shape[__pyx_v_dim]);
/* "View.MemoryView":916
* else:
* shape = view.shape[dim]
* stride = view.strides[dim] # <<<<<<<<<<<<<<
* if view.suboffsets != NULL:
* suboffset = view.suboffsets[dim]
*/
__pyx_v_stride = (__pyx_v_view->strides[__pyx_v_dim]);
/* "View.MemoryView":917
* shape = view.shape[dim]
* stride = view.strides[dim]
* if view.suboffsets != NULL: # <<<<<<<<<<<<<<
* suboffset = view.suboffsets[dim]
*
*/
__pyx_t_2 = ((__pyx_v_view->suboffsets != NULL) != 0);
if (__pyx_t_2) {
/* "View.MemoryView":918
* stride = view.strides[dim]
* if view.suboffsets != NULL:
* suboffset = view.suboffsets[dim] # <<<<<<<<<<<<<<
*
* if index < 0:
*/
__pyx_v_suboffset = (__pyx_v_view->suboffsets[__pyx_v_dim]);
/* "View.MemoryView":917
* shape = view.shape[dim]
* stride = view.strides[dim]
* if view.suboffsets != NULL: # <<<<<<<<<<<<<<
* suboffset = view.suboffsets[dim]
*
*/
}
}
__pyx_L3:;
/* "View.MemoryView":920
* suboffset = view.suboffsets[dim]
*
* if index < 0: # <<<<<<<<<<<<<<
* index += view.shape[dim]
* if index < 0:
*/
__pyx_t_2 = ((__pyx_v_index < 0) != 0);
if (__pyx_t_2) {
/* "View.MemoryView":921
*
* if index < 0:
* index += view.shape[dim] # <<<<<<<<<<<<<<
* if index < 0:
* raise IndexError("Out of bounds on buffer access (axis %d)" % dim)
*/
__pyx_v_index = (__pyx_v_index + (__pyx_v_view->shape[__pyx_v_dim]));
/* "View.MemoryView":922
* if index < 0:
* index += view.shape[dim]
* if index < 0: # <<<<<<<<<<<<<<
* raise IndexError("Out of bounds on buffer access (axis %d)" % dim)
*
*/
__pyx_t_2 = ((__pyx_v_index < 0) != 0);
if (unlikely(__pyx_t_2)) {
/* "View.MemoryView":923
* index += view.shape[dim]
* if index < 0:
* raise IndexError("Out of bounds on buffer access (axis %d)" % dim) # <<<<<<<<<<<<<<
*
* if index >= shape:
*/
__pyx_t_3 = PyInt_FromSsize_t(__pyx_v_dim); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 923, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_3);
__pyx_t_4 = __Pyx_PyString_Format(__pyx_kp_s_Out_of_bounds_on_buffer_access_a, __pyx_t_3); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 923, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_4);
__Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
__pyx_t_3 = __Pyx_PyObject_CallOneArg(__pyx_builtin_IndexError, __pyx_t_4); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 923, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_3);
__Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
__Pyx_Raise(__pyx_t_3, 0, 0, 0);
__Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
__PYX_ERR(1, 923, __pyx_L1_error)
/* "View.MemoryView":922
* if index < 0:
* index += view.shape[dim]
* if index < 0: # <<<<<<<<<<<<<<
* raise IndexError("Out of bounds on buffer access (axis %d)" % dim)
*
*/
}
/* "View.MemoryView":920
* suboffset = view.suboffsets[dim]
*
* if index < 0: # <<<<<<<<<<<<<<
* index += view.shape[dim]
* if index < 0:
*/
}
/* "View.MemoryView":925
* raise IndexError("Out of bounds on buffer access (axis %d)" % dim)
*
* if index >= shape: # <<<<<<<<<<<<<<
* raise IndexError("Out of bounds on buffer access (axis %d)" % dim)
*
*/
__pyx_t_2 = ((__pyx_v_index >= __pyx_v_shape) != 0);
if (unlikely(__pyx_t_2)) {
/* "View.MemoryView":926
*
* if index >= shape:
* raise IndexError("Out of bounds on buffer access (axis %d)" % dim) # <<<<<<<<<<<<<<
*
* resultp = bufp + index * stride
*/
__pyx_t_3 = PyInt_FromSsize_t(__pyx_v_dim); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 926, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_3);
__pyx_t_4 = __Pyx_PyString_Format(__pyx_kp_s_Out_of_bounds_on_buffer_access_a, __pyx_t_3); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 926, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_4);
__Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
__pyx_t_3 = __Pyx_PyObject_CallOneArg(__pyx_builtin_IndexError, __pyx_t_4); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 926, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_3);
__Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
__Pyx_Raise(__pyx_t_3, 0, 0, 0);
__Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
__PYX_ERR(1, 926, __pyx_L1_error)
/* "View.MemoryView":925
* raise IndexError("Out of bounds on buffer access (axis %d)" % dim)
*
* if index >= shape: # <<<<<<<<<<<<<<
* raise IndexError("Out of bounds on buffer access (axis %d)" % dim)
*
*/
}
/* "View.MemoryView":928
* raise IndexError("Out of bounds on buffer access (axis %d)" % dim)
*
* resultp = bufp + index * stride # <<<<<<<<<<<<<<
* if suboffset >= 0:
* resultp = (<char **> resultp)[0] + suboffset
*/
__pyx_v_resultp = (__pyx_v_bufp + (__pyx_v_index * __pyx_v_stride));
/* "View.MemoryView":929
*
* resultp = bufp + index * stride
* if suboffset >= 0: # <<<<<<<<<<<<<<
* resultp = (<char **> resultp)[0] + suboffset
*
*/
__pyx_t_2 = ((__pyx_v_suboffset >= 0) != 0);
if (__pyx_t_2) {
/* "View.MemoryView":930
* resultp = bufp + index * stride
* if suboffset >= 0:
* resultp = (<char **> resultp)[0] + suboffset # <<<<<<<<<<<<<<
*
* return resultp
*/
__pyx_v_resultp = ((((char **)__pyx_v_resultp)[0]) + __pyx_v_suboffset);
/* "View.MemoryView":929
*
* resultp = bufp + index * stride
* if suboffset >= 0: # <<<<<<<<<<<<<<
* resultp = (<char **> resultp)[0] + suboffset
*
*/
}
/* "View.MemoryView":932
* resultp = (<char **> resultp)[0] + suboffset
*
* return resultp # <<<<<<<<<<<<<<
*
*
*/
__pyx_r = __pyx_v_resultp;
goto __pyx_L0;
/* "View.MemoryView":905
*
* @cname('__pyx_pybuffer_index')
* cdef char *pybuffer_index(Py_buffer *view, char *bufp, Py_ssize_t index, # <<<<<<<<<<<<<<
* Py_ssize_t dim) except NULL:
* cdef Py_ssize_t shape, stride, suboffset = -1
*/
/* function exit code */
__pyx_L1_error:;
__Pyx_XDECREF(__pyx_t_3);
__Pyx_XDECREF(__pyx_t_4);
__Pyx_AddTraceback("View.MemoryView.pybuffer_index", __pyx_clineno, __pyx_lineno, __pyx_filename);
__pyx_r = NULL;
__pyx_L0:;
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
/* "View.MemoryView":938
*
* @cname('__pyx_memslice_transpose')
* cdef int transpose_memslice(__Pyx_memviewslice *memslice) nogil except 0: # <<<<<<<<<<<<<<
* cdef int ndim = memslice.memview.view.ndim
*
*/
static int __pyx_memslice_transpose(__Pyx_memviewslice *__pyx_v_memslice) {
int __pyx_v_ndim;
Py_ssize_t *__pyx_v_shape;
Py_ssize_t *__pyx_v_strides;
int __pyx_v_i;
int __pyx_v_j;
int __pyx_r;
int __pyx_t_1;
Py_ssize_t *__pyx_t_2;
long __pyx_t_3;
long __pyx_t_4;
Py_ssize_t __pyx_t_5;
Py_ssize_t __pyx_t_6;
int __pyx_t_7;
int __pyx_t_8;
int __pyx_t_9;
/* "View.MemoryView":939
* @cname('__pyx_memslice_transpose')
* cdef int transpose_memslice(__Pyx_memviewslice *memslice) nogil except 0:
* cdef int ndim = memslice.memview.view.ndim # <<<<<<<<<<<<<<
*
* cdef Py_ssize_t *shape = memslice.shape
*/
__pyx_t_1 = __pyx_v_memslice->memview->view.ndim;
__pyx_v_ndim = __pyx_t_1;
/* "View.MemoryView":941
* cdef int ndim = memslice.memview.view.ndim
*
* cdef Py_ssize_t *shape = memslice.shape # <<<<<<<<<<<<<<
* cdef Py_ssize_t *strides = memslice.strides
*
*/
__pyx_t_2 = __pyx_v_memslice->shape;
__pyx_v_shape = __pyx_t_2;
/* "View.MemoryView":942
*
* cdef Py_ssize_t *shape = memslice.shape
* cdef Py_ssize_t *strides = memslice.strides # <<<<<<<<<<<<<<
*
*
*/
__pyx_t_2 = __pyx_v_memslice->strides;
__pyx_v_strides = __pyx_t_2;
/* "View.MemoryView":946
*
* cdef int i, j
* for i in range(ndim / 2): # <<<<<<<<<<<<<<
* j = ndim - 1 - i
* strides[i], strides[j] = strides[j], strides[i]
*/
__pyx_t_3 = __Pyx_div_long(__pyx_v_ndim, 2);
__pyx_t_4 = __pyx_t_3;
for (__pyx_t_1 = 0; __pyx_t_1 < __pyx_t_4; __pyx_t_1+=1) {
__pyx_v_i = __pyx_t_1;
/* "View.MemoryView":947
* cdef int i, j
* for i in range(ndim / 2):
* j = ndim - 1 - i # <<<<<<<<<<<<<<
* strides[i], strides[j] = strides[j], strides[i]
* shape[i], shape[j] = shape[j], shape[i]
*/
__pyx_v_j = ((__pyx_v_ndim - 1) - __pyx_v_i);
/* "View.MemoryView":948
* for i in range(ndim / 2):
* j = ndim - 1 - i
* strides[i], strides[j] = strides[j], strides[i] # <<<<<<<<<<<<<<
* shape[i], shape[j] = shape[j], shape[i]
*
*/
__pyx_t_5 = (__pyx_v_strides[__pyx_v_j]);
__pyx_t_6 = (__pyx_v_strides[__pyx_v_i]);
(__pyx_v_strides[__pyx_v_i]) = __pyx_t_5;
(__pyx_v_strides[__pyx_v_j]) = __pyx_t_6;
/* "View.MemoryView":949
* j = ndim - 1 - i
* strides[i], strides[j] = strides[j], strides[i]
* shape[i], shape[j] = shape[j], shape[i] # <<<<<<<<<<<<<<
*
* if memslice.suboffsets[i] >= 0 or memslice.suboffsets[j] >= 0:
*/
__pyx_t_6 = (__pyx_v_shape[__pyx_v_j]);
__pyx_t_5 = (__pyx_v_shape[__pyx_v_i]);
(__pyx_v_shape[__pyx_v_i]) = __pyx_t_6;
(__pyx_v_shape[__pyx_v_j]) = __pyx_t_5;
/* "View.MemoryView":951
* shape[i], shape[j] = shape[j], shape[i]
*
* if memslice.suboffsets[i] >= 0 or memslice.suboffsets[j] >= 0: # <<<<<<<<<<<<<<
* _err(ValueError, "Cannot transpose memoryview with indirect dimensions")
*
*/
__pyx_t_8 = (((__pyx_v_memslice->suboffsets[__pyx_v_i]) >= 0) != 0);
if (!__pyx_t_8) {
} else {
__pyx_t_7 = __pyx_t_8;
goto __pyx_L6_bool_binop_done;
}
__pyx_t_8 = (((__pyx_v_memslice->suboffsets[__pyx_v_j]) >= 0) != 0);
__pyx_t_7 = __pyx_t_8;
__pyx_L6_bool_binop_done:;
if (__pyx_t_7) {
/* "View.MemoryView":952
*
* if memslice.suboffsets[i] >= 0 or memslice.suboffsets[j] >= 0:
* _err(ValueError, "Cannot transpose memoryview with indirect dimensions") # <<<<<<<<<<<<<<
*
* return 1
*/
__pyx_t_9 = __pyx_memoryview_err(__pyx_builtin_ValueError, ((char *)"Cannot transpose memoryview with indirect dimensions")); if (unlikely(__pyx_t_9 == ((int)-1))) __PYX_ERR(1, 952, __pyx_L1_error)
/* "View.MemoryView":951
* shape[i], shape[j] = shape[j], shape[i]
*
* if memslice.suboffsets[i] >= 0 or memslice.suboffsets[j] >= 0: # <<<<<<<<<<<<<<
* _err(ValueError, "Cannot transpose memoryview with indirect dimensions")
*
*/
}
}
/* "View.MemoryView":954
* _err(ValueError, "Cannot transpose memoryview with indirect dimensions")
*
* return 1 # <<<<<<<<<<<<<<
*
*
*/
__pyx_r = 1;
goto __pyx_L0;
/* "View.MemoryView":938
*
* @cname('__pyx_memslice_transpose')
* cdef int transpose_memslice(__Pyx_memviewslice *memslice) nogil except 0: # <<<<<<<<<<<<<<
* cdef int ndim = memslice.memview.view.ndim
*
*/
/* function exit code */
__pyx_L1_error:;
{
#ifdef WITH_THREAD
PyGILState_STATE __pyx_gilstate_save = __Pyx_PyGILState_Ensure();
#endif
__Pyx_AddTraceback("View.MemoryView.transpose_memslice", __pyx_clineno, __pyx_lineno, __pyx_filename);
#ifdef WITH_THREAD
__Pyx_PyGILState_Release(__pyx_gilstate_save);
#endif
}
__pyx_r = 0;
__pyx_L0:;
return __pyx_r;
}
/* "View.MemoryView":971
* cdef int (*to_dtype_func)(char *, object) except 0
*
* def __dealloc__(self): # <<<<<<<<<<<<<<
* __PYX_XDEC_MEMVIEW(&self.from_slice, 1)
*
*/
/* Python wrapper */
static void __pyx_memoryviewslice___dealloc__(PyObject *__pyx_v_self); /*proto*/
static void __pyx_memoryviewslice___dealloc__(PyObject *__pyx_v_self) {
__Pyx_RefNannyDeclarations
__Pyx_RefNannySetupContext("__dealloc__ (wrapper)", 0);
__pyx_memoryviewslice___pyx_pf_15View_dot_MemoryView_16_memoryviewslice___dealloc__(((struct __pyx_memoryviewslice_obj *)__pyx_v_self));
/* function exit code */
__Pyx_RefNannyFinishContext();
}
static void __pyx_memoryviewslice___pyx_pf_15View_dot_MemoryView_16_memoryviewslice___dealloc__(struct __pyx_memoryviewslice_obj *__pyx_v_self) {
__Pyx_RefNannyDeclarations
__Pyx_RefNannySetupContext("__dealloc__", 0);
/* "View.MemoryView":972
*
* def __dealloc__(self):
* __PYX_XDEC_MEMVIEW(&self.from_slice, 1) # <<<<<<<<<<<<<<
*
* cdef convert_item_to_object(self, char *itemp):
*/
__PYX_XDEC_MEMVIEW((&__pyx_v_self->from_slice), 1);
/* "View.MemoryView":971
* cdef int (*to_dtype_func)(char *, object) except 0
*
* def __dealloc__(self): # <<<<<<<<<<<<<<
* __PYX_XDEC_MEMVIEW(&self.from_slice, 1)
*
*/
/* function exit code */
__Pyx_RefNannyFinishContext();
}
/* "View.MemoryView":974
* __PYX_XDEC_MEMVIEW(&self.from_slice, 1)
*
* cdef convert_item_to_object(self, char *itemp): # <<<<<<<<<<<<<<
* if self.to_object_func != NULL:
* return self.to_object_func(itemp)
*/
static PyObject *__pyx_memoryviewslice_convert_item_to_object(struct __pyx_memoryviewslice_obj *__pyx_v_self, char *__pyx_v_itemp) {
PyObject *__pyx_r = NULL;
__Pyx_RefNannyDeclarations
int __pyx_t_1;
PyObject *__pyx_t_2 = NULL;
__Pyx_RefNannySetupContext("convert_item_to_object", 0);
/* "View.MemoryView":975
*
* cdef convert_item_to_object(self, char *itemp):
* if self.to_object_func != NULL: # <<<<<<<<<<<<<<
* return self.to_object_func(itemp)
* else:
*/
__pyx_t_1 = ((__pyx_v_self->to_object_func != NULL) != 0);
if (__pyx_t_1) {
/* "View.MemoryView":976
* cdef convert_item_to_object(self, char *itemp):
* if self.to_object_func != NULL:
* return self.to_object_func(itemp) # <<<<<<<<<<<<<<
* else:
* return memoryview.convert_item_to_object(self, itemp)
*/
__Pyx_XDECREF(__pyx_r);
__pyx_t_2 = __pyx_v_self->to_object_func(__pyx_v_itemp); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 976, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_2);
__pyx_r = __pyx_t_2;
__pyx_t_2 = 0;
goto __pyx_L0;
/* "View.MemoryView":975
*
* cdef convert_item_to_object(self, char *itemp):
* if self.to_object_func != NULL: # <<<<<<<<<<<<<<
* return self.to_object_func(itemp)
* else:
*/
}
/* "View.MemoryView":978
* return self.to_object_func(itemp)
* else:
* return memoryview.convert_item_to_object(self, itemp) # <<<<<<<<<<<<<<
*
* cdef assign_item_from_object(self, char *itemp, object value):
*/
/*else*/ {
__Pyx_XDECREF(__pyx_r);
__pyx_t_2 = __pyx_memoryview_convert_item_to_object(((struct __pyx_memoryview_obj *)__pyx_v_self), __pyx_v_itemp); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 978, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_2);
__pyx_r = __pyx_t_2;
__pyx_t_2 = 0;
goto __pyx_L0;
}
/* "View.MemoryView":974
* __PYX_XDEC_MEMVIEW(&self.from_slice, 1)
*
* cdef convert_item_to_object(self, char *itemp): # <<<<<<<<<<<<<<
* if self.to_object_func != NULL:
* return self.to_object_func(itemp)
*/
/* function exit code */
__pyx_L1_error:;
__Pyx_XDECREF(__pyx_t_2);
__Pyx_AddTraceback("View.MemoryView._memoryviewslice.convert_item_to_object", __pyx_clineno, __pyx_lineno, __pyx_filename);
__pyx_r = 0;
__pyx_L0:;
__Pyx_XGIVEREF(__pyx_r);
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
/* "View.MemoryView":980
* return memoryview.convert_item_to_object(self, itemp)
*
* cdef assign_item_from_object(self, char *itemp, object value): # <<<<<<<<<<<<<<
* if self.to_dtype_func != NULL:
* self.to_dtype_func(itemp, value)
*/
static PyObject *__pyx_memoryviewslice_assign_item_from_object(struct __pyx_memoryviewslice_obj *__pyx_v_self, char *__pyx_v_itemp, PyObject *__pyx_v_value) {
PyObject *__pyx_r = NULL;
__Pyx_RefNannyDeclarations
int __pyx_t_1;
int __pyx_t_2;
PyObject *__pyx_t_3 = NULL;
__Pyx_RefNannySetupContext("assign_item_from_object", 0);
/* "View.MemoryView":981
*
* cdef assign_item_from_object(self, char *itemp, object value):
* if self.to_dtype_func != NULL: # <<<<<<<<<<<<<<
* self.to_dtype_func(itemp, value)
* else:
*/
__pyx_t_1 = ((__pyx_v_self->to_dtype_func != NULL) != 0);
if (__pyx_t_1) {
/* "View.MemoryView":982
* cdef assign_item_from_object(self, char *itemp, object value):
* if self.to_dtype_func != NULL:
* self.to_dtype_func(itemp, value) # <<<<<<<<<<<<<<
* else:
* memoryview.assign_item_from_object(self, itemp, value)
*/
__pyx_t_2 = __pyx_v_self->to_dtype_func(__pyx_v_itemp, __pyx_v_value); if (unlikely(__pyx_t_2 == ((int)0))) __PYX_ERR(1, 982, __pyx_L1_error)
/* "View.MemoryView":981
*
* cdef assign_item_from_object(self, char *itemp, object value):
* if self.to_dtype_func != NULL: # <<<<<<<<<<<<<<
* self.to_dtype_func(itemp, value)
* else:
*/
goto __pyx_L3;
}
/* "View.MemoryView":984
* self.to_dtype_func(itemp, value)
* else:
* memoryview.assign_item_from_object(self, itemp, value) # <<<<<<<<<<<<<<
*
* @property
*/
/*else*/ {
__pyx_t_3 = __pyx_memoryview_assign_item_from_object(((struct __pyx_memoryview_obj *)__pyx_v_self), __pyx_v_itemp, __pyx_v_value); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 984, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_3);
__Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
}
__pyx_L3:;
/* "View.MemoryView":980
* return memoryview.convert_item_to_object(self, itemp)
*
* cdef assign_item_from_object(self, char *itemp, object value): # <<<<<<<<<<<<<<
* if self.to_dtype_func != NULL:
* self.to_dtype_func(itemp, value)
*/
/* function exit code */
__pyx_r = Py_None; __Pyx_INCREF(Py_None);
goto __pyx_L0;
__pyx_L1_error:;
__Pyx_XDECREF(__pyx_t_3);
__Pyx_AddTraceback("View.MemoryView._memoryviewslice.assign_item_from_object", __pyx_clineno, __pyx_lineno, __pyx_filename);
__pyx_r = 0;
__pyx_L0:;
__Pyx_XGIVEREF(__pyx_r);
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
/* "View.MemoryView":987
*
* @property
* def base(self): # <<<<<<<<<<<<<<
* return self.from_object
*
*/
/* Python wrapper */
static PyObject *__pyx_pw_15View_dot_MemoryView_16_memoryviewslice_4base_1__get__(PyObject *__pyx_v_self); /*proto*/
static PyObject *__pyx_pw_15View_dot_MemoryView_16_memoryviewslice_4base_1__get__(PyObject *__pyx_v_self) {
PyObject *__pyx_r = 0;
__Pyx_RefNannyDeclarations
__Pyx_RefNannySetupContext("__get__ (wrapper)", 0);
__pyx_r = __pyx_pf_15View_dot_MemoryView_16_memoryviewslice_4base___get__(((struct __pyx_memoryviewslice_obj *)__pyx_v_self));
/* function exit code */
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
static PyObject *__pyx_pf_15View_dot_MemoryView_16_memoryviewslice_4base___get__(struct __pyx_memoryviewslice_obj *__pyx_v_self) {
PyObject *__pyx_r = NULL;
__Pyx_RefNannyDeclarations
__Pyx_RefNannySetupContext("__get__", 0);
/* "View.MemoryView":988
* @property
* def base(self):
* return self.from_object # <<<<<<<<<<<<<<
*
* __pyx_getbuffer = capsule(<void *> &__pyx_memoryview_getbuffer, "getbuffer(obj, view, flags)")
*/
__Pyx_XDECREF(__pyx_r);
__Pyx_INCREF(__pyx_v_self->from_object);
__pyx_r = __pyx_v_self->from_object;
goto __pyx_L0;
/* "View.MemoryView":987
*
* @property
* def base(self): # <<<<<<<<<<<<<<
* return self.from_object
*
*/
/* function exit code */
__pyx_L0:;
__Pyx_XGIVEREF(__pyx_r);
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
/* "(tree fragment)":1
* def __reduce_cython__(self): # <<<<<<<<<<<<<<
* raise TypeError("no default __reduce__ due to non-trivial __cinit__")
* def __setstate_cython__(self, __pyx_state):
*/
/* Python wrapper */
static PyObject *__pyx_pw___pyx_memoryviewslice_1__reduce_cython__(PyObject *__pyx_v_self, CYTHON_UNUSED PyObject *unused); /*proto*/
static PyObject *__pyx_pw___pyx_memoryviewslice_1__reduce_cython__(PyObject *__pyx_v_self, CYTHON_UNUSED PyObject *unused) {
PyObject *__pyx_r = 0;
__Pyx_RefNannyDeclarations
__Pyx_RefNannySetupContext("__reduce_cython__ (wrapper)", 0);
__pyx_r = __pyx_pf___pyx_memoryviewslice___reduce_cython__(((struct __pyx_memoryviewslice_obj *)__pyx_v_self));
/* function exit code */
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
static PyObject *__pyx_pf___pyx_memoryviewslice___reduce_cython__(CYTHON_UNUSED struct __pyx_memoryviewslice_obj *__pyx_v_self) {
PyObject *__pyx_r = NULL;
__Pyx_RefNannyDeclarations
PyObject *__pyx_t_1 = NULL;
__Pyx_RefNannySetupContext("__reduce_cython__", 0);
/* "(tree fragment)":2
* def __reduce_cython__(self):
* raise TypeError("no default __reduce__ due to non-trivial __cinit__") # <<<<<<<<<<<<<<
* def __setstate_cython__(self, __pyx_state):
* raise TypeError("no default __reduce__ due to non-trivial __cinit__")
*/
__pyx_t_1 = __Pyx_PyObject_Call(__pyx_builtin_TypeError, __pyx_tuple__25, NULL); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 2, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_1);
__Pyx_Raise(__pyx_t_1, 0, 0, 0);
__Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
__PYX_ERR(1, 2, __pyx_L1_error)
/* "(tree fragment)":1
* def __reduce_cython__(self): # <<<<<<<<<<<<<<
* raise TypeError("no default __reduce__ due to non-trivial __cinit__")
* def __setstate_cython__(self, __pyx_state):
*/
/* function exit code */
__pyx_L1_error:;
__Pyx_XDECREF(__pyx_t_1);
__Pyx_AddTraceback("View.MemoryView._memoryviewslice.__reduce_cython__", __pyx_clineno, __pyx_lineno, __pyx_filename);
__pyx_r = NULL;
__Pyx_XGIVEREF(__pyx_r);
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
/* "(tree fragment)":3
* def __reduce_cython__(self):
* raise TypeError("no default __reduce__ due to non-trivial __cinit__")
* def __setstate_cython__(self, __pyx_state): # <<<<<<<<<<<<<<
* raise TypeError("no default __reduce__ due to non-trivial __cinit__")
*/
/* Python wrapper */
static PyObject *__pyx_pw___pyx_memoryviewslice_3__setstate_cython__(PyObject *__pyx_v_self, PyObject *__pyx_v___pyx_state); /*proto*/
static PyObject *__pyx_pw___pyx_memoryviewslice_3__setstate_cython__(PyObject *__pyx_v_self, PyObject *__pyx_v___pyx_state) {
PyObject *__pyx_r = 0;
__Pyx_RefNannyDeclarations
__Pyx_RefNannySetupContext("__setstate_cython__ (wrapper)", 0);
__pyx_r = __pyx_pf___pyx_memoryviewslice_2__setstate_cython__(((struct __pyx_memoryviewslice_obj *)__pyx_v_self), ((PyObject *)__pyx_v___pyx_state));
/* function exit code */
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
static PyObject *__pyx_pf___pyx_memoryviewslice_2__setstate_cython__(CYTHON_UNUSED struct __pyx_memoryviewslice_obj *__pyx_v_self, CYTHON_UNUSED PyObject *__pyx_v___pyx_state) {
PyObject *__pyx_r = NULL;
__Pyx_RefNannyDeclarations
PyObject *__pyx_t_1 = NULL;
__Pyx_RefNannySetupContext("__setstate_cython__", 0);
/* "(tree fragment)":4
* raise TypeError("no default __reduce__ due to non-trivial __cinit__")
* def __setstate_cython__(self, __pyx_state):
* raise TypeError("no default __reduce__ due to non-trivial __cinit__") # <<<<<<<<<<<<<<
*/
__pyx_t_1 = __Pyx_PyObject_Call(__pyx_builtin_TypeError, __pyx_tuple__26, NULL); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 4, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_1);
__Pyx_Raise(__pyx_t_1, 0, 0, 0);
__Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
__PYX_ERR(1, 4, __pyx_L1_error)
/* "(tree fragment)":3
* def __reduce_cython__(self):
* raise TypeError("no default __reduce__ due to non-trivial __cinit__")
* def __setstate_cython__(self, __pyx_state): # <<<<<<<<<<<<<<
* raise TypeError("no default __reduce__ due to non-trivial __cinit__")
*/
/* function exit code */
__pyx_L1_error:;
__Pyx_XDECREF(__pyx_t_1);
__Pyx_AddTraceback("View.MemoryView._memoryviewslice.__setstate_cython__", __pyx_clineno, __pyx_lineno, __pyx_filename);
__pyx_r = NULL;
__Pyx_XGIVEREF(__pyx_r);
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
/* "View.MemoryView":994
*
* @cname('__pyx_memoryview_fromslice')
* cdef memoryview_fromslice(__Pyx_memviewslice memviewslice, # <<<<<<<<<<<<<<
* int ndim,
* object (*to_object_func)(char *),
*/
static PyObject *__pyx_memoryview_fromslice(__Pyx_memviewslice __pyx_v_memviewslice, int __pyx_v_ndim, PyObject *(*__pyx_v_to_object_func)(char *), int (*__pyx_v_to_dtype_func)(char *, PyObject *), int __pyx_v_dtype_is_object) {
struct __pyx_memoryviewslice_obj *__pyx_v_result = 0;
Py_ssize_t __pyx_v_suboffset;
PyObject *__pyx_v_length = NULL;
PyObject *__pyx_r = NULL;
__Pyx_RefNannyDeclarations
int __pyx_t_1;
PyObject *__pyx_t_2 = NULL;
PyObject *__pyx_t_3 = NULL;
__Pyx_TypeInfo *__pyx_t_4;
Py_buffer __pyx_t_5;
Py_ssize_t *__pyx_t_6;
Py_ssize_t *__pyx_t_7;
Py_ssize_t *__pyx_t_8;
Py_ssize_t __pyx_t_9;
__Pyx_RefNannySetupContext("memoryview_fromslice", 0);
/* "View.MemoryView":1002
* cdef _memoryviewslice result
*
* if <PyObject *> memviewslice.memview == Py_None: # <<<<<<<<<<<<<<
* return None
*
*/
__pyx_t_1 = ((((PyObject *)__pyx_v_memviewslice.memview) == Py_None) != 0);
if (__pyx_t_1) {
/* "View.MemoryView":1003
*
* if <PyObject *> memviewslice.memview == Py_None:
* return None # <<<<<<<<<<<<<<
*
*
*/
__Pyx_XDECREF(__pyx_r);
__pyx_r = Py_None; __Pyx_INCREF(Py_None);
goto __pyx_L0;
/* "View.MemoryView":1002
* cdef _memoryviewslice result
*
* if <PyObject *> memviewslice.memview == Py_None: # <<<<<<<<<<<<<<
* return None
*
*/
}
/* "View.MemoryView":1008
*
*
* result = _memoryviewslice(None, 0, dtype_is_object) # <<<<<<<<<<<<<<
*
* result.from_slice = memviewslice
*/
__pyx_t_2 = __Pyx_PyBool_FromLong(__pyx_v_dtype_is_object); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 1008, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_2);
__pyx_t_3 = PyTuple_New(3); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 1008, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_3);
__Pyx_INCREF(Py_None);
__Pyx_GIVEREF(Py_None);
PyTuple_SET_ITEM(__pyx_t_3, 0, Py_None);
__Pyx_INCREF(__pyx_int_0);
__Pyx_GIVEREF(__pyx_int_0);
PyTuple_SET_ITEM(__pyx_t_3, 1, __pyx_int_0);
__Pyx_GIVEREF(__pyx_t_2);
PyTuple_SET_ITEM(__pyx_t_3, 2, __pyx_t_2);
__pyx_t_2 = 0;
__pyx_t_2 = __Pyx_PyObject_Call(((PyObject *)__pyx_memoryviewslice_type), __pyx_t_3, NULL); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 1008, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_2);
__Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
__pyx_v_result = ((struct __pyx_memoryviewslice_obj *)__pyx_t_2);
__pyx_t_2 = 0;
/* "View.MemoryView":1010
* result = _memoryviewslice(None, 0, dtype_is_object)
*
* result.from_slice = memviewslice # <<<<<<<<<<<<<<
* __PYX_INC_MEMVIEW(&memviewslice, 1)
*
*/
__pyx_v_result->from_slice = __pyx_v_memviewslice;
/* "View.MemoryView":1011
*
* result.from_slice = memviewslice
* __PYX_INC_MEMVIEW(&memviewslice, 1) # <<<<<<<<<<<<<<
*
* result.from_object = (<memoryview> memviewslice.memview).base
*/
__PYX_INC_MEMVIEW((&__pyx_v_memviewslice), 1);
/* "View.MemoryView":1013
* __PYX_INC_MEMVIEW(&memviewslice, 1)
*
* result.from_object = (<memoryview> memviewslice.memview).base # <<<<<<<<<<<<<<
* result.typeinfo = memviewslice.memview.typeinfo
*
*/
__pyx_t_2 = __Pyx_PyObject_GetAttrStr(((PyObject *)__pyx_v_memviewslice.memview), __pyx_n_s_base); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 1013, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_2);
__Pyx_GIVEREF(__pyx_t_2);
__Pyx_GOTREF(__pyx_v_result->from_object);
__Pyx_DECREF(__pyx_v_result->from_object);
__pyx_v_result->from_object = __pyx_t_2;
__pyx_t_2 = 0;
/* "View.MemoryView":1014
*
* result.from_object = (<memoryview> memviewslice.memview).base
* result.typeinfo = memviewslice.memview.typeinfo # <<<<<<<<<<<<<<
*
* result.view = memviewslice.memview.view
*/
__pyx_t_4 = __pyx_v_memviewslice.memview->typeinfo;
__pyx_v_result->__pyx_base.typeinfo = __pyx_t_4;
/* "View.MemoryView":1016
* result.typeinfo = memviewslice.memview.typeinfo
*
* result.view = memviewslice.memview.view # <<<<<<<<<<<<<<
* result.view.buf = <void *> memviewslice.data
* result.view.ndim = ndim
*/
__pyx_t_5 = __pyx_v_memviewslice.memview->view;
__pyx_v_result->__pyx_base.view = __pyx_t_5;
/* "View.MemoryView":1017
*
* result.view = memviewslice.memview.view
* result.view.buf = <void *> memviewslice.data # <<<<<<<<<<<<<<
* result.view.ndim = ndim
* (<__pyx_buffer *> &result.view).obj = Py_None
*/
__pyx_v_result->__pyx_base.view.buf = ((void *)__pyx_v_memviewslice.data);
/* "View.MemoryView":1018
* result.view = memviewslice.memview.view
* result.view.buf = <void *> memviewslice.data
* result.view.ndim = ndim # <<<<<<<<<<<<<<
* (<__pyx_buffer *> &result.view).obj = Py_None
* Py_INCREF(Py_None)
*/
__pyx_v_result->__pyx_base.view.ndim = __pyx_v_ndim;
/* "View.MemoryView":1019
* result.view.buf = <void *> memviewslice.data
* result.view.ndim = ndim
* (<__pyx_buffer *> &result.view).obj = Py_None # <<<<<<<<<<<<<<
* Py_INCREF(Py_None)
*
*/
((Py_buffer *)(&__pyx_v_result->__pyx_base.view))->obj = Py_None;
/* "View.MemoryView":1020
* result.view.ndim = ndim
* (<__pyx_buffer *> &result.view).obj = Py_None
* Py_INCREF(Py_None) # <<<<<<<<<<<<<<
*
* if (<memoryview>memviewslice.memview).flags & PyBUF_WRITABLE:
*/
Py_INCREF(Py_None);
/* "View.MemoryView":1022
* Py_INCREF(Py_None)
*
* if (<memoryview>memviewslice.memview).flags & PyBUF_WRITABLE: # <<<<<<<<<<<<<<
* result.flags = PyBUF_RECORDS
* else:
*/
__pyx_t_1 = ((((struct __pyx_memoryview_obj *)__pyx_v_memviewslice.memview)->flags & PyBUF_WRITABLE) != 0);
if (__pyx_t_1) {
/* "View.MemoryView":1023
*
* if (<memoryview>memviewslice.memview).flags & PyBUF_WRITABLE:
* result.flags = PyBUF_RECORDS # <<<<<<<<<<<<<<
* else:
* result.flags = PyBUF_RECORDS_RO
*/
__pyx_v_result->__pyx_base.flags = PyBUF_RECORDS;
/* "View.MemoryView":1022
* Py_INCREF(Py_None)
*
* if (<memoryview>memviewslice.memview).flags & PyBUF_WRITABLE: # <<<<<<<<<<<<<<
* result.flags = PyBUF_RECORDS
* else:
*/
goto __pyx_L4;
}
/* "View.MemoryView":1025
* result.flags = PyBUF_RECORDS
* else:
* result.flags = PyBUF_RECORDS_RO # <<<<<<<<<<<<<<
*
* result.view.shape = <Py_ssize_t *> result.from_slice.shape
*/
/*else*/ {
__pyx_v_result->__pyx_base.flags = PyBUF_RECORDS_RO;
}
__pyx_L4:;
/* "View.MemoryView":1027
* result.flags = PyBUF_RECORDS_RO
*
* result.view.shape = <Py_ssize_t *> result.from_slice.shape # <<<<<<<<<<<<<<
* result.view.strides = <Py_ssize_t *> result.from_slice.strides
*
*/
__pyx_v_result->__pyx_base.view.shape = ((Py_ssize_t *)__pyx_v_result->from_slice.shape);
/* "View.MemoryView":1028
*
* result.view.shape = <Py_ssize_t *> result.from_slice.shape
* result.view.strides = <Py_ssize_t *> result.from_slice.strides # <<<<<<<<<<<<<<
*
*
*/
__pyx_v_result->__pyx_base.view.strides = ((Py_ssize_t *)__pyx_v_result->from_slice.strides);
/* "View.MemoryView":1031
*
*
* result.view.suboffsets = NULL # <<<<<<<<<<<<<<
* for suboffset in result.from_slice.suboffsets[:ndim]:
* if suboffset >= 0:
*/
__pyx_v_result->__pyx_base.view.suboffsets = NULL;
/* "View.MemoryView":1032
*
* result.view.suboffsets = NULL
* for suboffset in result.from_slice.suboffsets[:ndim]: # <<<<<<<<<<<<<<
* if suboffset >= 0:
* result.view.suboffsets = <Py_ssize_t *> result.from_slice.suboffsets
*/
__pyx_t_7 = (__pyx_v_result->from_slice.suboffsets + __pyx_v_ndim);
for (__pyx_t_8 = __pyx_v_result->from_slice.suboffsets; __pyx_t_8 < __pyx_t_7; __pyx_t_8++) {
__pyx_t_6 = __pyx_t_8;
__pyx_v_suboffset = (__pyx_t_6[0]);
/* "View.MemoryView":1033
* result.view.suboffsets = NULL
* for suboffset in result.from_slice.suboffsets[:ndim]:
* if suboffset >= 0: # <<<<<<<<<<<<<<
* result.view.suboffsets = <Py_ssize_t *> result.from_slice.suboffsets
* break
*/
__pyx_t_1 = ((__pyx_v_suboffset >= 0) != 0);
if (__pyx_t_1) {
/* "View.MemoryView":1034
* for suboffset in result.from_slice.suboffsets[:ndim]:
* if suboffset >= 0:
* result.view.suboffsets = <Py_ssize_t *> result.from_slice.suboffsets # <<<<<<<<<<<<<<
* break
*
*/
__pyx_v_result->__pyx_base.view.suboffsets = ((Py_ssize_t *)__pyx_v_result->from_slice.suboffsets);
/* "View.MemoryView":1035
* if suboffset >= 0:
* result.view.suboffsets = <Py_ssize_t *> result.from_slice.suboffsets
* break # <<<<<<<<<<<<<<
*
* result.view.len = result.view.itemsize
*/
goto __pyx_L6_break;
/* "View.MemoryView":1033
* result.view.suboffsets = NULL
* for suboffset in result.from_slice.suboffsets[:ndim]:
* if suboffset >= 0: # <<<<<<<<<<<<<<
* result.view.suboffsets = <Py_ssize_t *> result.from_slice.suboffsets
* break
*/
}
}
__pyx_L6_break:;
/* "View.MemoryView":1037
* break
*
* result.view.len = result.view.itemsize # <<<<<<<<<<<<<<
* for length in result.view.shape[:ndim]:
* result.view.len *= length
*/
__pyx_t_9 = __pyx_v_result->__pyx_base.view.itemsize;
__pyx_v_result->__pyx_base.view.len = __pyx_t_9;
/* "View.MemoryView":1038
*
* result.view.len = result.view.itemsize
* for length in result.view.shape[:ndim]: # <<<<<<<<<<<<<<
* result.view.len *= length
*
*/
__pyx_t_7 = (__pyx_v_result->__pyx_base.view.shape + __pyx_v_ndim);
for (__pyx_t_8 = __pyx_v_result->__pyx_base.view.shape; __pyx_t_8 < __pyx_t_7; __pyx_t_8++) {
__pyx_t_6 = __pyx_t_8;
__pyx_t_2 = PyInt_FromSsize_t((__pyx_t_6[0])); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 1038, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_2);
__Pyx_XDECREF_SET(__pyx_v_length, __pyx_t_2);
__pyx_t_2 = 0;
/* "View.MemoryView":1039
* result.view.len = result.view.itemsize
* for length in result.view.shape[:ndim]:
* result.view.len *= length # <<<<<<<<<<<<<<
*
* result.to_object_func = to_object_func
*/
__pyx_t_2 = PyInt_FromSsize_t(__pyx_v_result->__pyx_base.view.len); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 1039, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_2);
__pyx_t_3 = PyNumber_InPlaceMultiply(__pyx_t_2, __pyx_v_length); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 1039, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_3);
__Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
__pyx_t_9 = __Pyx_PyIndex_AsSsize_t(__pyx_t_3); if (unlikely((__pyx_t_9 == (Py_ssize_t)-1) && PyErr_Occurred())) __PYX_ERR(1, 1039, __pyx_L1_error)
__Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
__pyx_v_result->__pyx_base.view.len = __pyx_t_9;
}
/* "View.MemoryView":1041
* result.view.len *= length
*
* result.to_object_func = to_object_func # <<<<<<<<<<<<<<
* result.to_dtype_func = to_dtype_func
*
*/
__pyx_v_result->to_object_func = __pyx_v_to_object_func;
/* "View.MemoryView":1042
*
* result.to_object_func = to_object_func
* result.to_dtype_func = to_dtype_func # <<<<<<<<<<<<<<
*
* return result
*/
__pyx_v_result->to_dtype_func = __pyx_v_to_dtype_func;
/* "View.MemoryView":1044
* result.to_dtype_func = to_dtype_func
*
* return result # <<<<<<<<<<<<<<
*
* @cname('__pyx_memoryview_get_slice_from_memoryview')
*/
__Pyx_XDECREF(__pyx_r);
__Pyx_INCREF(((PyObject *)__pyx_v_result));
__pyx_r = ((PyObject *)__pyx_v_result);
goto __pyx_L0;
/* "View.MemoryView":994
*
* @cname('__pyx_memoryview_fromslice')
* cdef memoryview_fromslice(__Pyx_memviewslice memviewslice, # <<<<<<<<<<<<<<
* int ndim,
* object (*to_object_func)(char *),
*/
/* function exit code */
__pyx_L1_error:;
__Pyx_XDECREF(__pyx_t_2);
__Pyx_XDECREF(__pyx_t_3);
__Pyx_AddTraceback("View.MemoryView.memoryview_fromslice", __pyx_clineno, __pyx_lineno, __pyx_filename);
__pyx_r = 0;
__pyx_L0:;
__Pyx_XDECREF((PyObject *)__pyx_v_result);
__Pyx_XDECREF(__pyx_v_length);
__Pyx_XGIVEREF(__pyx_r);
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
/* "View.MemoryView":1047
*
* @cname('__pyx_memoryview_get_slice_from_memoryview')
* cdef __Pyx_memviewslice *get_slice_from_memview(memoryview memview, # <<<<<<<<<<<<<<
* __Pyx_memviewslice *mslice):
* cdef _memoryviewslice obj
*/
static __Pyx_memviewslice *__pyx_memoryview_get_slice_from_memoryview(struct __pyx_memoryview_obj *__pyx_v_memview, __Pyx_memviewslice *__pyx_v_mslice) {
struct __pyx_memoryviewslice_obj *__pyx_v_obj = 0;
__Pyx_memviewslice *__pyx_r;
__Pyx_RefNannyDeclarations
int __pyx_t_1;
int __pyx_t_2;
PyObject *__pyx_t_3 = NULL;
__Pyx_RefNannySetupContext("get_slice_from_memview", 0);
/* "View.MemoryView":1050
* __Pyx_memviewslice *mslice):
* cdef _memoryviewslice obj
* if isinstance(memview, _memoryviewslice): # <<<<<<<<<<<<<<
* obj = memview
* return &obj.from_slice
*/
__pyx_t_1 = __Pyx_TypeCheck(((PyObject *)__pyx_v_memview), __pyx_memoryviewslice_type);
__pyx_t_2 = (__pyx_t_1 != 0);
if (__pyx_t_2) {
/* "View.MemoryView":1051
* cdef _memoryviewslice obj
* if isinstance(memview, _memoryviewslice):
* obj = memview # <<<<<<<<<<<<<<
* return &obj.from_slice
* else:
*/
if (!(likely(((((PyObject *)__pyx_v_memview)) == Py_None) || likely(__Pyx_TypeTest(((PyObject *)__pyx_v_memview), __pyx_memoryviewslice_type))))) __PYX_ERR(1, 1051, __pyx_L1_error)
__pyx_t_3 = ((PyObject *)__pyx_v_memview);
__Pyx_INCREF(__pyx_t_3);
__pyx_v_obj = ((struct __pyx_memoryviewslice_obj *)__pyx_t_3);
__pyx_t_3 = 0;
/* "View.MemoryView":1052
* if isinstance(memview, _memoryviewslice):
* obj = memview
* return &obj.from_slice # <<<<<<<<<<<<<<
* else:
* slice_copy(memview, mslice)
*/
__pyx_r = (&__pyx_v_obj->from_slice);
goto __pyx_L0;
/* "View.MemoryView":1050
* __Pyx_memviewslice *mslice):
* cdef _memoryviewslice obj
* if isinstance(memview, _memoryviewslice): # <<<<<<<<<<<<<<
* obj = memview
* return &obj.from_slice
*/
}
/* "View.MemoryView":1054
* return &obj.from_slice
* else:
* slice_copy(memview, mslice) # <<<<<<<<<<<<<<
* return mslice
*
*/
/*else*/ {
__pyx_memoryview_slice_copy(__pyx_v_memview, __pyx_v_mslice);
/* "View.MemoryView":1055
* else:
* slice_copy(memview, mslice)
* return mslice # <<<<<<<<<<<<<<
*
* @cname('__pyx_memoryview_slice_copy')
*/
__pyx_r = __pyx_v_mslice;
goto __pyx_L0;
}
/* "View.MemoryView":1047
*
* @cname('__pyx_memoryview_get_slice_from_memoryview')
* cdef __Pyx_memviewslice *get_slice_from_memview(memoryview memview, # <<<<<<<<<<<<<<
* __Pyx_memviewslice *mslice):
* cdef _memoryviewslice obj
*/
/* function exit code */
__pyx_L1_error:;
__Pyx_XDECREF(__pyx_t_3);
__Pyx_WriteUnraisable("View.MemoryView.get_slice_from_memview", __pyx_clineno, __pyx_lineno, __pyx_filename, 1, 0);
__pyx_r = 0;
__pyx_L0:;
__Pyx_XDECREF((PyObject *)__pyx_v_obj);
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
/* "View.MemoryView":1058
*
* @cname('__pyx_memoryview_slice_copy')
* cdef void slice_copy(memoryview memview, __Pyx_memviewslice *dst): # <<<<<<<<<<<<<<
* cdef int dim
* cdef (Py_ssize_t*) shape, strides, suboffsets
*/
static void __pyx_memoryview_slice_copy(struct __pyx_memoryview_obj *__pyx_v_memview, __Pyx_memviewslice *__pyx_v_dst) {
int __pyx_v_dim;
Py_ssize_t *__pyx_v_shape;
Py_ssize_t *__pyx_v_strides;
Py_ssize_t *__pyx_v_suboffsets;
__Pyx_RefNannyDeclarations
Py_ssize_t *__pyx_t_1;
int __pyx_t_2;
int __pyx_t_3;
int __pyx_t_4;
Py_ssize_t __pyx_t_5;
__Pyx_RefNannySetupContext("slice_copy", 0);
/* "View.MemoryView":1062
* cdef (Py_ssize_t*) shape, strides, suboffsets
*
* shape = memview.view.shape # <<<<<<<<<<<<<<
* strides = memview.view.strides
* suboffsets = memview.view.suboffsets
*/
__pyx_t_1 = __pyx_v_memview->view.shape;
__pyx_v_shape = __pyx_t_1;
/* "View.MemoryView":1063
*
* shape = memview.view.shape
* strides = memview.view.strides # <<<<<<<<<<<<<<
* suboffsets = memview.view.suboffsets
*
*/
__pyx_t_1 = __pyx_v_memview->view.strides;
__pyx_v_strides = __pyx_t_1;
/* "View.MemoryView":1064
* shape = memview.view.shape
* strides = memview.view.strides
* suboffsets = memview.view.suboffsets # <<<<<<<<<<<<<<
*
* dst.memview = <__pyx_memoryview *> memview
*/
__pyx_t_1 = __pyx_v_memview->view.suboffsets;
__pyx_v_suboffsets = __pyx_t_1;
/* "View.MemoryView":1066
* suboffsets = memview.view.suboffsets
*
* dst.memview = <__pyx_memoryview *> memview # <<<<<<<<<<<<<<
* dst.data = <char *> memview.view.buf
*
*/
__pyx_v_dst->memview = ((struct __pyx_memoryview_obj *)__pyx_v_memview);
/* "View.MemoryView":1067
*
* dst.memview = <__pyx_memoryview *> memview
* dst.data = <char *> memview.view.buf # <<<<<<<<<<<<<<
*
* for dim in range(memview.view.ndim):
*/
__pyx_v_dst->data = ((char *)__pyx_v_memview->view.buf);
/* "View.MemoryView":1069
* dst.data = <char *> memview.view.buf
*
* for dim in range(memview.view.ndim): # <<<<<<<<<<<<<<
* dst.shape[dim] = shape[dim]
* dst.strides[dim] = strides[dim]
*/
__pyx_t_2 = __pyx_v_memview->view.ndim;
__pyx_t_3 = __pyx_t_2;
for (__pyx_t_4 = 0; __pyx_t_4 < __pyx_t_3; __pyx_t_4+=1) {
__pyx_v_dim = __pyx_t_4;
/* "View.MemoryView":1070
*
* for dim in range(memview.view.ndim):
* dst.shape[dim] = shape[dim] # <<<<<<<<<<<<<<
* dst.strides[dim] = strides[dim]
* dst.suboffsets[dim] = suboffsets[dim] if suboffsets else -1
*/
(__pyx_v_dst->shape[__pyx_v_dim]) = (__pyx_v_shape[__pyx_v_dim]);
/* "View.MemoryView":1071
* for dim in range(memview.view.ndim):
* dst.shape[dim] = shape[dim]
* dst.strides[dim] = strides[dim] # <<<<<<<<<<<<<<
* dst.suboffsets[dim] = suboffsets[dim] if suboffsets else -1
*
*/
(__pyx_v_dst->strides[__pyx_v_dim]) = (__pyx_v_strides[__pyx_v_dim]);
/* "View.MemoryView":1072
* dst.shape[dim] = shape[dim]
* dst.strides[dim] = strides[dim]
* dst.suboffsets[dim] = suboffsets[dim] if suboffsets else -1 # <<<<<<<<<<<<<<
*
* @cname('__pyx_memoryview_copy_object')
*/
if ((__pyx_v_suboffsets != 0)) {
__pyx_t_5 = (__pyx_v_suboffsets[__pyx_v_dim]);
} else {
__pyx_t_5 = -1L;
}
(__pyx_v_dst->suboffsets[__pyx_v_dim]) = __pyx_t_5;
}
/* "View.MemoryView":1058
*
* @cname('__pyx_memoryview_slice_copy')
* cdef void slice_copy(memoryview memview, __Pyx_memviewslice *dst): # <<<<<<<<<<<<<<
* cdef int dim
* cdef (Py_ssize_t*) shape, strides, suboffsets
*/
/* function exit code */
__Pyx_RefNannyFinishContext();
}
/* "View.MemoryView":1075
*
* @cname('__pyx_memoryview_copy_object')
* cdef memoryview_copy(memoryview memview): # <<<<<<<<<<<<<<
* "Create a new memoryview object"
* cdef __Pyx_memviewslice memviewslice
*/
static PyObject *__pyx_memoryview_copy_object(struct __pyx_memoryview_obj *__pyx_v_memview) {
__Pyx_memviewslice __pyx_v_memviewslice;
PyObject *__pyx_r = NULL;
__Pyx_RefNannyDeclarations
PyObject *__pyx_t_1 = NULL;
__Pyx_RefNannySetupContext("memoryview_copy", 0);
/* "View.MemoryView":1078
* "Create a new memoryview object"
* cdef __Pyx_memviewslice memviewslice
* slice_copy(memview, &memviewslice) # <<<<<<<<<<<<<<
* return memoryview_copy_from_slice(memview, &memviewslice)
*
*/
__pyx_memoryview_slice_copy(__pyx_v_memview, (&__pyx_v_memviewslice));
/* "View.MemoryView":1079
* cdef __Pyx_memviewslice memviewslice
* slice_copy(memview, &memviewslice)
* return memoryview_copy_from_slice(memview, &memviewslice) # <<<<<<<<<<<<<<
*
* @cname('__pyx_memoryview_copy_object_from_slice')
*/
__Pyx_XDECREF(__pyx_r);
__pyx_t_1 = __pyx_memoryview_copy_object_from_slice(__pyx_v_memview, (&__pyx_v_memviewslice)); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 1079, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_1);
__pyx_r = __pyx_t_1;
__pyx_t_1 = 0;
goto __pyx_L0;
/* "View.MemoryView":1075
*
* @cname('__pyx_memoryview_copy_object')
* cdef memoryview_copy(memoryview memview): # <<<<<<<<<<<<<<
* "Create a new memoryview object"
* cdef __Pyx_memviewslice memviewslice
*/
/* function exit code */
__pyx_L1_error:;
__Pyx_XDECREF(__pyx_t_1);
__Pyx_AddTraceback("View.MemoryView.memoryview_copy", __pyx_clineno, __pyx_lineno, __pyx_filename);
__pyx_r = 0;
__pyx_L0:;
__Pyx_XGIVEREF(__pyx_r);
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
/* "View.MemoryView":1082
*
* @cname('__pyx_memoryview_copy_object_from_slice')
* cdef memoryview_copy_from_slice(memoryview memview, __Pyx_memviewslice *memviewslice): # <<<<<<<<<<<<<<
* """
* Create a new memoryview object from a given memoryview object and slice.
*/
static PyObject *__pyx_memoryview_copy_object_from_slice(struct __pyx_memoryview_obj *__pyx_v_memview, __Pyx_memviewslice *__pyx_v_memviewslice) {
PyObject *(*__pyx_v_to_object_func)(char *);
int (*__pyx_v_to_dtype_func)(char *, PyObject *);
PyObject *__pyx_r = NULL;
__Pyx_RefNannyDeclarations
int __pyx_t_1;
int __pyx_t_2;
PyObject *(*__pyx_t_3)(char *);
int (*__pyx_t_4)(char *, PyObject *);
PyObject *__pyx_t_5 = NULL;
__Pyx_RefNannySetupContext("memoryview_copy_from_slice", 0);
/* "View.MemoryView":1089
* cdef int (*to_dtype_func)(char *, object) except 0
*
* if isinstance(memview, _memoryviewslice): # <<<<<<<<<<<<<<
* to_object_func = (<_memoryviewslice> memview).to_object_func
* to_dtype_func = (<_memoryviewslice> memview).to_dtype_func
*/
__pyx_t_1 = __Pyx_TypeCheck(((PyObject *)__pyx_v_memview), __pyx_memoryviewslice_type);
__pyx_t_2 = (__pyx_t_1 != 0);
if (__pyx_t_2) {
/* "View.MemoryView":1090
*
* if isinstance(memview, _memoryviewslice):
* to_object_func = (<_memoryviewslice> memview).to_object_func # <<<<<<<<<<<<<<
* to_dtype_func = (<_memoryviewslice> memview).to_dtype_func
* else:
*/
__pyx_t_3 = ((struct __pyx_memoryviewslice_obj *)__pyx_v_memview)->to_object_func;
__pyx_v_to_object_func = __pyx_t_3;
/* "View.MemoryView":1091
* if isinstance(memview, _memoryviewslice):
* to_object_func = (<_memoryviewslice> memview).to_object_func
* to_dtype_func = (<_memoryviewslice> memview).to_dtype_func # <<<<<<<<<<<<<<
* else:
* to_object_func = NULL
*/
__pyx_t_4 = ((struct __pyx_memoryviewslice_obj *)__pyx_v_memview)->to_dtype_func;
__pyx_v_to_dtype_func = __pyx_t_4;
/* "View.MemoryView":1089
* cdef int (*to_dtype_func)(char *, object) except 0
*
* if isinstance(memview, _memoryviewslice): # <<<<<<<<<<<<<<
* to_object_func = (<_memoryviewslice> memview).to_object_func
* to_dtype_func = (<_memoryviewslice> memview).to_dtype_func
*/
goto __pyx_L3;
}
/* "View.MemoryView":1093
* to_dtype_func = (<_memoryviewslice> memview).to_dtype_func
* else:
* to_object_func = NULL # <<<<<<<<<<<<<<
* to_dtype_func = NULL
*
*/
/*else*/ {
__pyx_v_to_object_func = NULL;
/* "View.MemoryView":1094
* else:
* to_object_func = NULL
* to_dtype_func = NULL # <<<<<<<<<<<<<<
*
* return memoryview_fromslice(memviewslice[0], memview.view.ndim,
*/
__pyx_v_to_dtype_func = NULL;
}
__pyx_L3:;
/* "View.MemoryView":1096
* to_dtype_func = NULL
*
* return memoryview_fromslice(memviewslice[0], memview.view.ndim, # <<<<<<<<<<<<<<
* to_object_func, to_dtype_func,
* memview.dtype_is_object)
*/
__Pyx_XDECREF(__pyx_r);
/* "View.MemoryView":1098
* return memoryview_fromslice(memviewslice[0], memview.view.ndim,
* to_object_func, to_dtype_func,
* memview.dtype_is_object) # <<<<<<<<<<<<<<
*
*
*/
__pyx_t_5 = __pyx_memoryview_fromslice((__pyx_v_memviewslice[0]), __pyx_v_memview->view.ndim, __pyx_v_to_object_func, __pyx_v_to_dtype_func, __pyx_v_memview->dtype_is_object); if (unlikely(!__pyx_t_5)) __PYX_ERR(1, 1096, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_5);
__pyx_r = __pyx_t_5;
__pyx_t_5 = 0;
goto __pyx_L0;
/* "View.MemoryView":1082
*
* @cname('__pyx_memoryview_copy_object_from_slice')
* cdef memoryview_copy_from_slice(memoryview memview, __Pyx_memviewslice *memviewslice): # <<<<<<<<<<<<<<
* """
* Create a new memoryview object from a given memoryview object and slice.
*/
/* function exit code */
__pyx_L1_error:;
__Pyx_XDECREF(__pyx_t_5);
__Pyx_AddTraceback("View.MemoryView.memoryview_copy_from_slice", __pyx_clineno, __pyx_lineno, __pyx_filename);
__pyx_r = 0;
__pyx_L0:;
__Pyx_XGIVEREF(__pyx_r);
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
/* "View.MemoryView":1104
*
*
* cdef Py_ssize_t abs_py_ssize_t(Py_ssize_t arg) nogil: # <<<<<<<<<<<<<<
* if arg < 0:
* return -arg
*/
static Py_ssize_t abs_py_ssize_t(Py_ssize_t __pyx_v_arg) {
Py_ssize_t __pyx_r;
int __pyx_t_1;
/* "View.MemoryView":1105
*
* cdef Py_ssize_t abs_py_ssize_t(Py_ssize_t arg) nogil:
* if arg < 0: # <<<<<<<<<<<<<<
* return -arg
* else:
*/
__pyx_t_1 = ((__pyx_v_arg < 0) != 0);
if (__pyx_t_1) {
/* "View.MemoryView":1106
* cdef Py_ssize_t abs_py_ssize_t(Py_ssize_t arg) nogil:
* if arg < 0:
* return -arg # <<<<<<<<<<<<<<
* else:
* return arg
*/
__pyx_r = (-__pyx_v_arg);
goto __pyx_L0;
/* "View.MemoryView":1105
*
* cdef Py_ssize_t abs_py_ssize_t(Py_ssize_t arg) nogil:
* if arg < 0: # <<<<<<<<<<<<<<
* return -arg
* else:
*/
}
/* "View.MemoryView":1108
* return -arg
* else:
* return arg # <<<<<<<<<<<<<<
*
* @cname('__pyx_get_best_slice_order')
*/
/*else*/ {
__pyx_r = __pyx_v_arg;
goto __pyx_L0;
}
/* "View.MemoryView":1104
*
*
* cdef Py_ssize_t abs_py_ssize_t(Py_ssize_t arg) nogil: # <<<<<<<<<<<<<<
* if arg < 0:
* return -arg
*/
/* function exit code */
__pyx_L0:;
return __pyx_r;
}
/* "View.MemoryView":1111
*
* @cname('__pyx_get_best_slice_order')
* cdef char get_best_order(__Pyx_memviewslice *mslice, int ndim) nogil: # <<<<<<<<<<<<<<
* """
* Figure out the best memory access order for a given slice.
*/
static char __pyx_get_best_slice_order(__Pyx_memviewslice *__pyx_v_mslice, int __pyx_v_ndim) {
int __pyx_v_i;
Py_ssize_t __pyx_v_c_stride;
Py_ssize_t __pyx_v_f_stride;
char __pyx_r;
int __pyx_t_1;
int __pyx_t_2;
int __pyx_t_3;
int __pyx_t_4;
/* "View.MemoryView":1116
* """
* cdef int i
* cdef Py_ssize_t c_stride = 0 # <<<<<<<<<<<<<<
* cdef Py_ssize_t f_stride = 0
*
*/
__pyx_v_c_stride = 0;
/* "View.MemoryView":1117
* cdef int i
* cdef Py_ssize_t c_stride = 0
* cdef Py_ssize_t f_stride = 0 # <<<<<<<<<<<<<<
*
* for i in range(ndim - 1, -1, -1):
*/
__pyx_v_f_stride = 0;
/* "View.MemoryView":1119
* cdef Py_ssize_t f_stride = 0
*
* for i in range(ndim - 1, -1, -1): # <<<<<<<<<<<<<<
* if mslice.shape[i] > 1:
* c_stride = mslice.strides[i]
*/
for (__pyx_t_1 = (__pyx_v_ndim - 1); __pyx_t_1 > -1; __pyx_t_1-=1) {
__pyx_v_i = __pyx_t_1;
/* "View.MemoryView":1120
*
* for i in range(ndim - 1, -1, -1):
* if mslice.shape[i] > 1: # <<<<<<<<<<<<<<
* c_stride = mslice.strides[i]
* break
*/
__pyx_t_2 = (((__pyx_v_mslice->shape[__pyx_v_i]) > 1) != 0);
if (__pyx_t_2) {
/* "View.MemoryView":1121
* for i in range(ndim - 1, -1, -1):
* if mslice.shape[i] > 1:
* c_stride = mslice.strides[i] # <<<<<<<<<<<<<<
* break
*
*/
__pyx_v_c_stride = (__pyx_v_mslice->strides[__pyx_v_i]);
/* "View.MemoryView":1122
* if mslice.shape[i] > 1:
* c_stride = mslice.strides[i]
* break # <<<<<<<<<<<<<<
*
* for i in range(ndim):
*/
goto __pyx_L4_break;
/* "View.MemoryView":1120
*
* for i in range(ndim - 1, -1, -1):
* if mslice.shape[i] > 1: # <<<<<<<<<<<<<<
* c_stride = mslice.strides[i]
* break
*/
}
}
__pyx_L4_break:;
/* "View.MemoryView":1124
* break
*
* for i in range(ndim): # <<<<<<<<<<<<<<
* if mslice.shape[i] > 1:
* f_stride = mslice.strides[i]
*/
__pyx_t_1 = __pyx_v_ndim;
__pyx_t_3 = __pyx_t_1;
for (__pyx_t_4 = 0; __pyx_t_4 < __pyx_t_3; __pyx_t_4+=1) {
__pyx_v_i = __pyx_t_4;
/* "View.MemoryView":1125
*
* for i in range(ndim):
* if mslice.shape[i] > 1: # <<<<<<<<<<<<<<
* f_stride = mslice.strides[i]
* break
*/
__pyx_t_2 = (((__pyx_v_mslice->shape[__pyx_v_i]) > 1) != 0);
if (__pyx_t_2) {
/* "View.MemoryView":1126
* for i in range(ndim):
* if mslice.shape[i] > 1:
* f_stride = mslice.strides[i] # <<<<<<<<<<<<<<
* break
*
*/
__pyx_v_f_stride = (__pyx_v_mslice->strides[__pyx_v_i]);
/* "View.MemoryView":1127
* if mslice.shape[i] > 1:
* f_stride = mslice.strides[i]
* break # <<<<<<<<<<<<<<
*
* if abs_py_ssize_t(c_stride) <= abs_py_ssize_t(f_stride):
*/
goto __pyx_L7_break;
/* "View.MemoryView":1125
*
* for i in range(ndim):
* if mslice.shape[i] > 1: # <<<<<<<<<<<<<<
* f_stride = mslice.strides[i]
* break
*/
}
}
__pyx_L7_break:;
/* "View.MemoryView":1129
* break
*
* if abs_py_ssize_t(c_stride) <= abs_py_ssize_t(f_stride): # <<<<<<<<<<<<<<
* return 'C'
* else:
*/
__pyx_t_2 = ((abs_py_ssize_t(__pyx_v_c_stride) <= abs_py_ssize_t(__pyx_v_f_stride)) != 0);
if (__pyx_t_2) {
/* "View.MemoryView":1130
*
* if abs_py_ssize_t(c_stride) <= abs_py_ssize_t(f_stride):
* return 'C' # <<<<<<<<<<<<<<
* else:
* return 'F'
*/
__pyx_r = 'C';
goto __pyx_L0;
/* "View.MemoryView":1129
* break
*
* if abs_py_ssize_t(c_stride) <= abs_py_ssize_t(f_stride): # <<<<<<<<<<<<<<
* return 'C'
* else:
*/
}
/* "View.MemoryView":1132
* return 'C'
* else:
* return 'F' # <<<<<<<<<<<<<<
*
* @cython.cdivision(True)
*/
/*else*/ {
__pyx_r = 'F';
goto __pyx_L0;
}
/* "View.MemoryView":1111
*
* @cname('__pyx_get_best_slice_order')
* cdef char get_best_order(__Pyx_memviewslice *mslice, int ndim) nogil: # <<<<<<<<<<<<<<
* """
* Figure out the best memory access order for a given slice.
*/
/* function exit code */
__pyx_L0:;
return __pyx_r;
}
/* "View.MemoryView":1135
*
* @cython.cdivision(True)
* cdef void _copy_strided_to_strided(char *src_data, Py_ssize_t *src_strides, # <<<<<<<<<<<<<<
* char *dst_data, Py_ssize_t *dst_strides,
* Py_ssize_t *src_shape, Py_ssize_t *dst_shape,
*/
static void _copy_strided_to_strided(char *__pyx_v_src_data, Py_ssize_t *__pyx_v_src_strides, char *__pyx_v_dst_data, Py_ssize_t *__pyx_v_dst_strides, Py_ssize_t *__pyx_v_src_shape, Py_ssize_t *__pyx_v_dst_shape, int __pyx_v_ndim, size_t __pyx_v_itemsize) {
CYTHON_UNUSED Py_ssize_t __pyx_v_i;
CYTHON_UNUSED Py_ssize_t __pyx_v_src_extent;
Py_ssize_t __pyx_v_dst_extent;
Py_ssize_t __pyx_v_src_stride;
Py_ssize_t __pyx_v_dst_stride;
int __pyx_t_1;
int __pyx_t_2;
int __pyx_t_3;
Py_ssize_t __pyx_t_4;
Py_ssize_t __pyx_t_5;
Py_ssize_t __pyx_t_6;
/* "View.MemoryView":1142
*
* cdef Py_ssize_t i
* cdef Py_ssize_t src_extent = src_shape[0] # <<<<<<<<<<<<<<
* cdef Py_ssize_t dst_extent = dst_shape[0]
* cdef Py_ssize_t src_stride = src_strides[0]
*/
__pyx_v_src_extent = (__pyx_v_src_shape[0]);
/* "View.MemoryView":1143
* cdef Py_ssize_t i
* cdef Py_ssize_t src_extent = src_shape[0]
* cdef Py_ssize_t dst_extent = dst_shape[0] # <<<<<<<<<<<<<<
* cdef Py_ssize_t src_stride = src_strides[0]
* cdef Py_ssize_t dst_stride = dst_strides[0]
*/
__pyx_v_dst_extent = (__pyx_v_dst_shape[0]);
/* "View.MemoryView":1144
* cdef Py_ssize_t src_extent = src_shape[0]
* cdef Py_ssize_t dst_extent = dst_shape[0]
* cdef Py_ssize_t src_stride = src_strides[0] # <<<<<<<<<<<<<<
* cdef Py_ssize_t dst_stride = dst_strides[0]
*
*/
__pyx_v_src_stride = (__pyx_v_src_strides[0]);
/* "View.MemoryView":1145
* cdef Py_ssize_t dst_extent = dst_shape[0]
* cdef Py_ssize_t src_stride = src_strides[0]
* cdef Py_ssize_t dst_stride = dst_strides[0] # <<<<<<<<<<<<<<
*
* if ndim == 1:
*/
__pyx_v_dst_stride = (__pyx_v_dst_strides[0]);
/* "View.MemoryView":1147
* cdef Py_ssize_t dst_stride = dst_strides[0]
*
* if ndim == 1: # <<<<<<<<<<<<<<
* if (src_stride > 0 and dst_stride > 0 and
* <size_t> src_stride == itemsize == <size_t> dst_stride):
*/
__pyx_t_1 = ((__pyx_v_ndim == 1) != 0);
if (__pyx_t_1) {
/* "View.MemoryView":1148
*
* if ndim == 1:
* if (src_stride > 0 and dst_stride > 0 and # <<<<<<<<<<<<<<
* <size_t> src_stride == itemsize == <size_t> dst_stride):
* memcpy(dst_data, src_data, itemsize * dst_extent)
*/
__pyx_t_2 = ((__pyx_v_src_stride > 0) != 0);
if (__pyx_t_2) {
} else {
__pyx_t_1 = __pyx_t_2;
goto __pyx_L5_bool_binop_done;
}
__pyx_t_2 = ((__pyx_v_dst_stride > 0) != 0);
if (__pyx_t_2) {
} else {
__pyx_t_1 = __pyx_t_2;
goto __pyx_L5_bool_binop_done;
}
/* "View.MemoryView":1149
* if ndim == 1:
* if (src_stride > 0 and dst_stride > 0 and
* <size_t> src_stride == itemsize == <size_t> dst_stride): # <<<<<<<<<<<<<<
* memcpy(dst_data, src_data, itemsize * dst_extent)
* else:
*/
__pyx_t_2 = (((size_t)__pyx_v_src_stride) == __pyx_v_itemsize);
if (__pyx_t_2) {
__pyx_t_2 = (__pyx_v_itemsize == ((size_t)__pyx_v_dst_stride));
}
__pyx_t_3 = (__pyx_t_2 != 0);
__pyx_t_1 = __pyx_t_3;
__pyx_L5_bool_binop_done:;
/* "View.MemoryView":1148
*
* if ndim == 1:
* if (src_stride > 0 and dst_stride > 0 and # <<<<<<<<<<<<<<
* <size_t> src_stride == itemsize == <size_t> dst_stride):
* memcpy(dst_data, src_data, itemsize * dst_extent)
*/
if (__pyx_t_1) {
/* "View.MemoryView":1150
* if (src_stride > 0 and dst_stride > 0 and
* <size_t> src_stride == itemsize == <size_t> dst_stride):
* memcpy(dst_data, src_data, itemsize * dst_extent) # <<<<<<<<<<<<<<
* else:
* for i in range(dst_extent):
*/
(void)(memcpy(__pyx_v_dst_data, __pyx_v_src_data, (__pyx_v_itemsize * __pyx_v_dst_extent)));
/* "View.MemoryView":1148
*
* if ndim == 1:
* if (src_stride > 0 and dst_stride > 0 and # <<<<<<<<<<<<<<
* <size_t> src_stride == itemsize == <size_t> dst_stride):
* memcpy(dst_data, src_data, itemsize * dst_extent)
*/
goto __pyx_L4;
}
/* "View.MemoryView":1152
* memcpy(dst_data, src_data, itemsize * dst_extent)
* else:
* for i in range(dst_extent): # <<<<<<<<<<<<<<
* memcpy(dst_data, src_data, itemsize)
* src_data += src_stride
*/
/*else*/ {
__pyx_t_4 = __pyx_v_dst_extent;
__pyx_t_5 = __pyx_t_4;
for (__pyx_t_6 = 0; __pyx_t_6 < __pyx_t_5; __pyx_t_6+=1) {
__pyx_v_i = __pyx_t_6;
/* "View.MemoryView":1153
* else:
* for i in range(dst_extent):
* memcpy(dst_data, src_data, itemsize) # <<<<<<<<<<<<<<
* src_data += src_stride
* dst_data += dst_stride
*/
(void)(memcpy(__pyx_v_dst_data, __pyx_v_src_data, __pyx_v_itemsize));
/* "View.MemoryView":1154
* for i in range(dst_extent):
* memcpy(dst_data, src_data, itemsize)
* src_data += src_stride # <<<<<<<<<<<<<<
* dst_data += dst_stride
* else:
*/
__pyx_v_src_data = (__pyx_v_src_data + __pyx_v_src_stride);
/* "View.MemoryView":1155
* memcpy(dst_data, src_data, itemsize)
* src_data += src_stride
* dst_data += dst_stride # <<<<<<<<<<<<<<
* else:
* for i in range(dst_extent):
*/
__pyx_v_dst_data = (__pyx_v_dst_data + __pyx_v_dst_stride);
}
}
__pyx_L4:;
/* "View.MemoryView":1147
* cdef Py_ssize_t dst_stride = dst_strides[0]
*
* if ndim == 1: # <<<<<<<<<<<<<<
* if (src_stride > 0 and dst_stride > 0 and
* <size_t> src_stride == itemsize == <size_t> dst_stride):
*/
goto __pyx_L3;
}
/* "View.MemoryView":1157
* dst_data += dst_stride
* else:
* for i in range(dst_extent): # <<<<<<<<<<<<<<
* _copy_strided_to_strided(src_data, src_strides + 1,
* dst_data, dst_strides + 1,
*/
/*else*/ {
__pyx_t_4 = __pyx_v_dst_extent;
__pyx_t_5 = __pyx_t_4;
for (__pyx_t_6 = 0; __pyx_t_6 < __pyx_t_5; __pyx_t_6+=1) {
__pyx_v_i = __pyx_t_6;
/* "View.MemoryView":1158
* else:
* for i in range(dst_extent):
* _copy_strided_to_strided(src_data, src_strides + 1, # <<<<<<<<<<<<<<
* dst_data, dst_strides + 1,
* src_shape + 1, dst_shape + 1,
*/
_copy_strided_to_strided(__pyx_v_src_data, (__pyx_v_src_strides + 1), __pyx_v_dst_data, (__pyx_v_dst_strides + 1), (__pyx_v_src_shape + 1), (__pyx_v_dst_shape + 1), (__pyx_v_ndim - 1), __pyx_v_itemsize);
/* "View.MemoryView":1162
* src_shape + 1, dst_shape + 1,
* ndim - 1, itemsize)
* src_data += src_stride # <<<<<<<<<<<<<<
* dst_data += dst_stride
*
*/
__pyx_v_src_data = (__pyx_v_src_data + __pyx_v_src_stride);
/* "View.MemoryView":1163
* ndim - 1, itemsize)
* src_data += src_stride
* dst_data += dst_stride # <<<<<<<<<<<<<<
*
* cdef void copy_strided_to_strided(__Pyx_memviewslice *src,
*/
__pyx_v_dst_data = (__pyx_v_dst_data + __pyx_v_dst_stride);
}
}
__pyx_L3:;
/* "View.MemoryView":1135
*
* @cython.cdivision(True)
* cdef void _copy_strided_to_strided(char *src_data, Py_ssize_t *src_strides, # <<<<<<<<<<<<<<
* char *dst_data, Py_ssize_t *dst_strides,
* Py_ssize_t *src_shape, Py_ssize_t *dst_shape,
*/
/* function exit code */
}
/* "View.MemoryView":1165
* dst_data += dst_stride
*
* cdef void copy_strided_to_strided(__Pyx_memviewslice *src, # <<<<<<<<<<<<<<
* __Pyx_memviewslice *dst,
* int ndim, size_t itemsize) nogil:
*/
static void copy_strided_to_strided(__Pyx_memviewslice *__pyx_v_src, __Pyx_memviewslice *__pyx_v_dst, int __pyx_v_ndim, size_t __pyx_v_itemsize) {
/* "View.MemoryView":1168
* __Pyx_memviewslice *dst,
* int ndim, size_t itemsize) nogil:
* _copy_strided_to_strided(src.data, src.strides, dst.data, dst.strides, # <<<<<<<<<<<<<<
* src.shape, dst.shape, ndim, itemsize)
*
*/
_copy_strided_to_strided(__pyx_v_src->data, __pyx_v_src->strides, __pyx_v_dst->data, __pyx_v_dst->strides, __pyx_v_src->shape, __pyx_v_dst->shape, __pyx_v_ndim, __pyx_v_itemsize);
/* "View.MemoryView":1165
* dst_data += dst_stride
*
* cdef void copy_strided_to_strided(__Pyx_memviewslice *src, # <<<<<<<<<<<<<<
* __Pyx_memviewslice *dst,
* int ndim, size_t itemsize) nogil:
*/
/* function exit code */
}
/* "View.MemoryView":1172
*
* @cname('__pyx_memoryview_slice_get_size')
* cdef Py_ssize_t slice_get_size(__Pyx_memviewslice *src, int ndim) nogil: # <<<<<<<<<<<<<<
* "Return the size of the memory occupied by the slice in number of bytes"
* cdef int i
*/
static Py_ssize_t __pyx_memoryview_slice_get_size(__Pyx_memviewslice *__pyx_v_src, int __pyx_v_ndim) {
int __pyx_v_i;
Py_ssize_t __pyx_v_size;
Py_ssize_t __pyx_r;
Py_ssize_t __pyx_t_1;
int __pyx_t_2;
int __pyx_t_3;
int __pyx_t_4;
/* "View.MemoryView":1175
* "Return the size of the memory occupied by the slice in number of bytes"
* cdef int i
* cdef Py_ssize_t size = src.memview.view.itemsize # <<<<<<<<<<<<<<
*
* for i in range(ndim):
*/
__pyx_t_1 = __pyx_v_src->memview->view.itemsize;
__pyx_v_size = __pyx_t_1;
/* "View.MemoryView":1177
* cdef Py_ssize_t size = src.memview.view.itemsize
*
* for i in range(ndim): # <<<<<<<<<<<<<<
* size *= src.shape[i]
*
*/
__pyx_t_2 = __pyx_v_ndim;
__pyx_t_3 = __pyx_t_2;
for (__pyx_t_4 = 0; __pyx_t_4 < __pyx_t_3; __pyx_t_4+=1) {
__pyx_v_i = __pyx_t_4;
/* "View.MemoryView":1178
*
* for i in range(ndim):
* size *= src.shape[i] # <<<<<<<<<<<<<<
*
* return size
*/
__pyx_v_size = (__pyx_v_size * (__pyx_v_src->shape[__pyx_v_i]));
}
/* "View.MemoryView":1180
* size *= src.shape[i]
*
* return size # <<<<<<<<<<<<<<
*
* @cname('__pyx_fill_contig_strides_array')
*/
__pyx_r = __pyx_v_size;
goto __pyx_L0;
/* "View.MemoryView":1172
*
* @cname('__pyx_memoryview_slice_get_size')
* cdef Py_ssize_t slice_get_size(__Pyx_memviewslice *src, int ndim) nogil: # <<<<<<<<<<<<<<
* "Return the size of the memory occupied by the slice in number of bytes"
* cdef int i
*/
/* function exit code */
__pyx_L0:;
return __pyx_r;
}
/* "View.MemoryView":1183
*
* @cname('__pyx_fill_contig_strides_array')
* cdef Py_ssize_t fill_contig_strides_array( # <<<<<<<<<<<<<<
* Py_ssize_t *shape, Py_ssize_t *strides, Py_ssize_t stride,
* int ndim, char order) nogil:
*/
static Py_ssize_t __pyx_fill_contig_strides_array(Py_ssize_t *__pyx_v_shape, Py_ssize_t *__pyx_v_strides, Py_ssize_t __pyx_v_stride, int __pyx_v_ndim, char __pyx_v_order) {
int __pyx_v_idx;
Py_ssize_t __pyx_r;
int __pyx_t_1;
int __pyx_t_2;
int __pyx_t_3;
int __pyx_t_4;
/* "View.MemoryView":1192
* cdef int idx
*
* if order == 'F': # <<<<<<<<<<<<<<
* for idx in range(ndim):
* strides[idx] = stride
*/
__pyx_t_1 = ((__pyx_v_order == 'F') != 0);
if (__pyx_t_1) {
/* "View.MemoryView":1193
*
* if order == 'F':
* for idx in range(ndim): # <<<<<<<<<<<<<<
* strides[idx] = stride
* stride = stride * shape[idx]
*/
__pyx_t_2 = __pyx_v_ndim;
__pyx_t_3 = __pyx_t_2;
for (__pyx_t_4 = 0; __pyx_t_4 < __pyx_t_3; __pyx_t_4+=1) {
__pyx_v_idx = __pyx_t_4;
/* "View.MemoryView":1194
* if order == 'F':
* for idx in range(ndim):
* strides[idx] = stride # <<<<<<<<<<<<<<
* stride = stride * shape[idx]
* else:
*/
(__pyx_v_strides[__pyx_v_idx]) = __pyx_v_stride;
/* "View.MemoryView":1195
* for idx in range(ndim):
* strides[idx] = stride
* stride = stride * shape[idx] # <<<<<<<<<<<<<<
* else:
* for idx in range(ndim - 1, -1, -1):
*/
__pyx_v_stride = (__pyx_v_stride * (__pyx_v_shape[__pyx_v_idx]));
}
/* "View.MemoryView":1192
* cdef int idx
*
* if order == 'F': # <<<<<<<<<<<<<<
* for idx in range(ndim):
* strides[idx] = stride
*/
goto __pyx_L3;
}
/* "View.MemoryView":1197
* stride = stride * shape[idx]
* else:
* for idx in range(ndim - 1, -1, -1): # <<<<<<<<<<<<<<
* strides[idx] = stride
* stride = stride * shape[idx]
*/
/*else*/ {
for (__pyx_t_2 = (__pyx_v_ndim - 1); __pyx_t_2 > -1; __pyx_t_2-=1) {
__pyx_v_idx = __pyx_t_2;
/* "View.MemoryView":1198
* else:
* for idx in range(ndim - 1, -1, -1):
* strides[idx] = stride # <<<<<<<<<<<<<<
* stride = stride * shape[idx]
*
*/
(__pyx_v_strides[__pyx_v_idx]) = __pyx_v_stride;
/* "View.MemoryView":1199
* for idx in range(ndim - 1, -1, -1):
* strides[idx] = stride
* stride = stride * shape[idx] # <<<<<<<<<<<<<<
*
* return stride
*/
__pyx_v_stride = (__pyx_v_stride * (__pyx_v_shape[__pyx_v_idx]));
}
}
__pyx_L3:;
/* "View.MemoryView":1201
* stride = stride * shape[idx]
*
* return stride # <<<<<<<<<<<<<<
*
* @cname('__pyx_memoryview_copy_data_to_temp')
*/
__pyx_r = __pyx_v_stride;
goto __pyx_L0;
/* "View.MemoryView":1183
*
* @cname('__pyx_fill_contig_strides_array')
* cdef Py_ssize_t fill_contig_strides_array( # <<<<<<<<<<<<<<
* Py_ssize_t *shape, Py_ssize_t *strides, Py_ssize_t stride,
* int ndim, char order) nogil:
*/
/* function exit code */
__pyx_L0:;
return __pyx_r;
}
/* "View.MemoryView":1204
*
* @cname('__pyx_memoryview_copy_data_to_temp')
* cdef void *copy_data_to_temp(__Pyx_memviewslice *src, # <<<<<<<<<<<<<<
* __Pyx_memviewslice *tmpslice,
* char order,
*/
static void *__pyx_memoryview_copy_data_to_temp(__Pyx_memviewslice *__pyx_v_src, __Pyx_memviewslice *__pyx_v_tmpslice, char __pyx_v_order, int __pyx_v_ndim) {
int __pyx_v_i;
void *__pyx_v_result;
size_t __pyx_v_itemsize;
size_t __pyx_v_size;
void *__pyx_r;
Py_ssize_t __pyx_t_1;
int __pyx_t_2;
int __pyx_t_3;
struct __pyx_memoryview_obj *__pyx_t_4;
int __pyx_t_5;
int __pyx_t_6;
/* "View.MemoryView":1215
* cdef void *result
*
* cdef size_t itemsize = src.memview.view.itemsize # <<<<<<<<<<<<<<
* cdef size_t size = slice_get_size(src, ndim)
*
*/
__pyx_t_1 = __pyx_v_src->memview->view.itemsize;
__pyx_v_itemsize = __pyx_t_1;
/* "View.MemoryView":1216
*
* cdef size_t itemsize = src.memview.view.itemsize
* cdef size_t size = slice_get_size(src, ndim) # <<<<<<<<<<<<<<
*
* result = malloc(size)
*/
__pyx_v_size = __pyx_memoryview_slice_get_size(__pyx_v_src, __pyx_v_ndim);
/* "View.MemoryView":1218
* cdef size_t size = slice_get_size(src, ndim)
*
* result = malloc(size) # <<<<<<<<<<<<<<
* if not result:
* _err(MemoryError, NULL)
*/
__pyx_v_result = malloc(__pyx_v_size);
/* "View.MemoryView":1219
*
* result = malloc(size)
* if not result: # <<<<<<<<<<<<<<
* _err(MemoryError, NULL)
*
*/
__pyx_t_2 = ((!(__pyx_v_result != 0)) != 0);
if (__pyx_t_2) {
/* "View.MemoryView":1220
* result = malloc(size)
* if not result:
* _err(MemoryError, NULL) # <<<<<<<<<<<<<<
*
*
*/
__pyx_t_3 = __pyx_memoryview_err(__pyx_builtin_MemoryError, NULL); if (unlikely(__pyx_t_3 == ((int)-1))) __PYX_ERR(1, 1220, __pyx_L1_error)
/* "View.MemoryView":1219
*
* result = malloc(size)
* if not result: # <<<<<<<<<<<<<<
* _err(MemoryError, NULL)
*
*/
}
/* "View.MemoryView":1223
*
*
* tmpslice.data = <char *> result # <<<<<<<<<<<<<<
* tmpslice.memview = src.memview
* for i in range(ndim):
*/
__pyx_v_tmpslice->data = ((char *)__pyx_v_result);
/* "View.MemoryView":1224
*
* tmpslice.data = <char *> result
* tmpslice.memview = src.memview # <<<<<<<<<<<<<<
* for i in range(ndim):
* tmpslice.shape[i] = src.shape[i]
*/
__pyx_t_4 = __pyx_v_src->memview;
__pyx_v_tmpslice->memview = __pyx_t_4;
/* "View.MemoryView":1225
* tmpslice.data = <char *> result
* tmpslice.memview = src.memview
* for i in range(ndim): # <<<<<<<<<<<<<<
* tmpslice.shape[i] = src.shape[i]
* tmpslice.suboffsets[i] = -1
*/
__pyx_t_3 = __pyx_v_ndim;
__pyx_t_5 = __pyx_t_3;
for (__pyx_t_6 = 0; __pyx_t_6 < __pyx_t_5; __pyx_t_6+=1) {
__pyx_v_i = __pyx_t_6;
/* "View.MemoryView":1226
* tmpslice.memview = src.memview
* for i in range(ndim):
* tmpslice.shape[i] = src.shape[i] # <<<<<<<<<<<<<<
* tmpslice.suboffsets[i] = -1
*
*/
(__pyx_v_tmpslice->shape[__pyx_v_i]) = (__pyx_v_src->shape[__pyx_v_i]);
/* "View.MemoryView":1227
* for i in range(ndim):
* tmpslice.shape[i] = src.shape[i]
* tmpslice.suboffsets[i] = -1 # <<<<<<<<<<<<<<
*
* fill_contig_strides_array(&tmpslice.shape[0], &tmpslice.strides[0], itemsize,
*/
(__pyx_v_tmpslice->suboffsets[__pyx_v_i]) = -1L;
}
/* "View.MemoryView":1229
* tmpslice.suboffsets[i] = -1
*
* fill_contig_strides_array(&tmpslice.shape[0], &tmpslice.strides[0], itemsize, # <<<<<<<<<<<<<<
* ndim, order)
*
*/
(void)(__pyx_fill_contig_strides_array((&(__pyx_v_tmpslice->shape[0])), (&(__pyx_v_tmpslice->strides[0])), __pyx_v_itemsize, __pyx_v_ndim, __pyx_v_order));
/* "View.MemoryView":1233
*
*
* for i in range(ndim): # <<<<<<<<<<<<<<
* if tmpslice.shape[i] == 1:
* tmpslice.strides[i] = 0
*/
__pyx_t_3 = __pyx_v_ndim;
__pyx_t_5 = __pyx_t_3;
for (__pyx_t_6 = 0; __pyx_t_6 < __pyx_t_5; __pyx_t_6+=1) {
__pyx_v_i = __pyx_t_6;
/* "View.MemoryView":1234
*
* for i in range(ndim):
* if tmpslice.shape[i] == 1: # <<<<<<<<<<<<<<
* tmpslice.strides[i] = 0
*
*/
__pyx_t_2 = (((__pyx_v_tmpslice->shape[__pyx_v_i]) == 1) != 0);
if (__pyx_t_2) {
/* "View.MemoryView":1235
* for i in range(ndim):
* if tmpslice.shape[i] == 1:
* tmpslice.strides[i] = 0 # <<<<<<<<<<<<<<
*
* if slice_is_contig(src[0], order, ndim):
*/
(__pyx_v_tmpslice->strides[__pyx_v_i]) = 0;
/* "View.MemoryView":1234
*
* for i in range(ndim):
* if tmpslice.shape[i] == 1: # <<<<<<<<<<<<<<
* tmpslice.strides[i] = 0
*
*/
}
}
/* "View.MemoryView":1237
* tmpslice.strides[i] = 0
*
* if slice_is_contig(src[0], order, ndim): # <<<<<<<<<<<<<<
* memcpy(result, src.data, size)
* else:
*/
__pyx_t_2 = (__pyx_memviewslice_is_contig((__pyx_v_src[0]), __pyx_v_order, __pyx_v_ndim) != 0);
if (__pyx_t_2) {
/* "View.MemoryView":1238
*
* if slice_is_contig(src[0], order, ndim):
* memcpy(result, src.data, size) # <<<<<<<<<<<<<<
* else:
* copy_strided_to_strided(src, tmpslice, ndim, itemsize)
*/
(void)(memcpy(__pyx_v_result, __pyx_v_src->data, __pyx_v_size));
/* "View.MemoryView":1237
* tmpslice.strides[i] = 0
*
* if slice_is_contig(src[0], order, ndim): # <<<<<<<<<<<<<<
* memcpy(result, src.data, size)
* else:
*/
goto __pyx_L9;
}
/* "View.MemoryView":1240
* memcpy(result, src.data, size)
* else:
* copy_strided_to_strided(src, tmpslice, ndim, itemsize) # <<<<<<<<<<<<<<
*
* return result
*/
/*else*/ {
copy_strided_to_strided(__pyx_v_src, __pyx_v_tmpslice, __pyx_v_ndim, __pyx_v_itemsize);
}
__pyx_L9:;
/* "View.MemoryView":1242
* copy_strided_to_strided(src, tmpslice, ndim, itemsize)
*
* return result # <<<<<<<<<<<<<<
*
*
*/
__pyx_r = __pyx_v_result;
goto __pyx_L0;
/* "View.MemoryView":1204
*
* @cname('__pyx_memoryview_copy_data_to_temp')
* cdef void *copy_data_to_temp(__Pyx_memviewslice *src, # <<<<<<<<<<<<<<
* __Pyx_memviewslice *tmpslice,
* char order,
*/
/* function exit code */
__pyx_L1_error:;
{
#ifdef WITH_THREAD
PyGILState_STATE __pyx_gilstate_save = __Pyx_PyGILState_Ensure();
#endif
__Pyx_AddTraceback("View.MemoryView.copy_data_to_temp", __pyx_clineno, __pyx_lineno, __pyx_filename);
#ifdef WITH_THREAD
__Pyx_PyGILState_Release(__pyx_gilstate_save);
#endif
}
__pyx_r = NULL;
__pyx_L0:;
return __pyx_r;
}
/* "View.MemoryView":1247
*
* @cname('__pyx_memoryview_err_extents')
* cdef int _err_extents(int i, Py_ssize_t extent1, # <<<<<<<<<<<<<<
* Py_ssize_t extent2) except -1 with gil:
* raise ValueError("got differing extents in dimension %d (got %d and %d)" %
*/
static int __pyx_memoryview_err_extents(int __pyx_v_i, Py_ssize_t __pyx_v_extent1, Py_ssize_t __pyx_v_extent2) {
int __pyx_r;
__Pyx_RefNannyDeclarations
PyObject *__pyx_t_1 = NULL;
PyObject *__pyx_t_2 = NULL;
PyObject *__pyx_t_3 = NULL;
PyObject *__pyx_t_4 = NULL;
#ifdef WITH_THREAD
PyGILState_STATE __pyx_gilstate_save = __Pyx_PyGILState_Ensure();
#endif
__Pyx_RefNannySetupContext("_err_extents", 0);
/* "View.MemoryView":1250
* Py_ssize_t extent2) except -1 with gil:
* raise ValueError("got differing extents in dimension %d (got %d and %d)" %
* (i, extent1, extent2)) # <<<<<<<<<<<<<<
*
* @cname('__pyx_memoryview_err_dim')
*/
__pyx_t_1 = __Pyx_PyInt_From_int(__pyx_v_i); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 1250, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_1);
__pyx_t_2 = PyInt_FromSsize_t(__pyx_v_extent1); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 1250, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_2);
__pyx_t_3 = PyInt_FromSsize_t(__pyx_v_extent2); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 1250, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_3);
__pyx_t_4 = PyTuple_New(3); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 1250, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_4);
__Pyx_GIVEREF(__pyx_t_1);
PyTuple_SET_ITEM(__pyx_t_4, 0, __pyx_t_1);
__Pyx_GIVEREF(__pyx_t_2);
PyTuple_SET_ITEM(__pyx_t_4, 1, __pyx_t_2);
__Pyx_GIVEREF(__pyx_t_3);
PyTuple_SET_ITEM(__pyx_t_4, 2, __pyx_t_3);
__pyx_t_1 = 0;
__pyx_t_2 = 0;
__pyx_t_3 = 0;
/* "View.MemoryView":1249
* cdef int _err_extents(int i, Py_ssize_t extent1,
* Py_ssize_t extent2) except -1 with gil:
* raise ValueError("got differing extents in dimension %d (got %d and %d)" % # <<<<<<<<<<<<<<
* (i, extent1, extent2))
*
*/
__pyx_t_3 = __Pyx_PyString_Format(__pyx_kp_s_got_differing_extents_in_dimensi, __pyx_t_4); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 1249, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_3);
__Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
__pyx_t_4 = __Pyx_PyObject_CallOneArg(__pyx_builtin_ValueError, __pyx_t_3); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 1249, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_4);
__Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
__Pyx_Raise(__pyx_t_4, 0, 0, 0);
__Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
__PYX_ERR(1, 1249, __pyx_L1_error)
/* "View.MemoryView":1247
*
* @cname('__pyx_memoryview_err_extents')
* cdef int _err_extents(int i, Py_ssize_t extent1, # <<<<<<<<<<<<<<
* Py_ssize_t extent2) except -1 with gil:
* raise ValueError("got differing extents in dimension %d (got %d and %d)" %
*/
/* function exit code */
__pyx_L1_error:;
__Pyx_XDECREF(__pyx_t_1);
__Pyx_XDECREF(__pyx_t_2);
__Pyx_XDECREF(__pyx_t_3);
__Pyx_XDECREF(__pyx_t_4);
__Pyx_AddTraceback("View.MemoryView._err_extents", __pyx_clineno, __pyx_lineno, __pyx_filename);
__pyx_r = -1;
__Pyx_RefNannyFinishContext();
#ifdef WITH_THREAD
__Pyx_PyGILState_Release(__pyx_gilstate_save);
#endif
return __pyx_r;
}
/* "View.MemoryView":1253
*
* @cname('__pyx_memoryview_err_dim')
* cdef int _err_dim(object error, char *msg, int dim) except -1 with gil: # <<<<<<<<<<<<<<
* raise error(msg.decode('ascii') % dim)
*
*/
static int __pyx_memoryview_err_dim(PyObject *__pyx_v_error, char *__pyx_v_msg, int __pyx_v_dim) {
int __pyx_r;
__Pyx_RefNannyDeclarations
PyObject *__pyx_t_1 = NULL;
PyObject *__pyx_t_2 = NULL;
PyObject *__pyx_t_3 = NULL;
PyObject *__pyx_t_4 = NULL;
PyObject *__pyx_t_5 = NULL;
#ifdef WITH_THREAD
PyGILState_STATE __pyx_gilstate_save = __Pyx_PyGILState_Ensure();
#endif
__Pyx_RefNannySetupContext("_err_dim", 0);
__Pyx_INCREF(__pyx_v_error);
/* "View.MemoryView":1254
* @cname('__pyx_memoryview_err_dim')
* cdef int _err_dim(object error, char *msg, int dim) except -1 with gil:
* raise error(msg.decode('ascii') % dim) # <<<<<<<<<<<<<<
*
* @cname('__pyx_memoryview_err')
*/
__pyx_t_2 = __Pyx_decode_c_string(__pyx_v_msg, 0, strlen(__pyx_v_msg), NULL, NULL, PyUnicode_DecodeASCII); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 1254, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_2);
__pyx_t_3 = __Pyx_PyInt_From_int(__pyx_v_dim); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 1254, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_3);
__pyx_t_4 = PyUnicode_Format(__pyx_t_2, __pyx_t_3); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 1254, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_4);
__Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
__Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
__Pyx_INCREF(__pyx_v_error);
__pyx_t_3 = __pyx_v_error; __pyx_t_2 = NULL;
if (CYTHON_UNPACK_METHODS && unlikely(PyMethod_Check(__pyx_t_3))) {
__pyx_t_2 = PyMethod_GET_SELF(__pyx_t_3);
if (likely(__pyx_t_2)) {
PyObject* function = PyMethod_GET_FUNCTION(__pyx_t_3);
__Pyx_INCREF(__pyx_t_2);
__Pyx_INCREF(function);
__Pyx_DECREF_SET(__pyx_t_3, function);
}
}
if (!__pyx_t_2) {
__pyx_t_1 = __Pyx_PyObject_CallOneArg(__pyx_t_3, __pyx_t_4); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 1254, __pyx_L1_error)
__Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
__Pyx_GOTREF(__pyx_t_1);
} else {
#if CYTHON_FAST_PYCALL
if (PyFunction_Check(__pyx_t_3)) {
PyObject *__pyx_temp[2] = {__pyx_t_2, __pyx_t_4};
__pyx_t_1 = __Pyx_PyFunction_FastCall(__pyx_t_3, __pyx_temp+1-1, 1+1); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 1254, __pyx_L1_error)
__Pyx_XDECREF(__pyx_t_2); __pyx_t_2 = 0;
__Pyx_GOTREF(__pyx_t_1);
__Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
} else
#endif
#if CYTHON_FAST_PYCCALL
if (__Pyx_PyFastCFunction_Check(__pyx_t_3)) {
PyObject *__pyx_temp[2] = {__pyx_t_2, __pyx_t_4};
__pyx_t_1 = __Pyx_PyCFunction_FastCall(__pyx_t_3, __pyx_temp+1-1, 1+1); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 1254, __pyx_L1_error)
__Pyx_XDECREF(__pyx_t_2); __pyx_t_2 = 0;
__Pyx_GOTREF(__pyx_t_1);
__Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
} else
#endif
{
__pyx_t_5 = PyTuple_New(1+1); if (unlikely(!__pyx_t_5)) __PYX_ERR(1, 1254, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_5);
__Pyx_GIVEREF(__pyx_t_2); PyTuple_SET_ITEM(__pyx_t_5, 0, __pyx_t_2); __pyx_t_2 = NULL;
__Pyx_GIVEREF(__pyx_t_4);
PyTuple_SET_ITEM(__pyx_t_5, 0+1, __pyx_t_4);
__pyx_t_4 = 0;
__pyx_t_1 = __Pyx_PyObject_Call(__pyx_t_3, __pyx_t_5, NULL); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 1254, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_1);
__Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
}
}
__Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
__Pyx_Raise(__pyx_t_1, 0, 0, 0);
__Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
__PYX_ERR(1, 1254, __pyx_L1_error)
/* "View.MemoryView":1253
*
* @cname('__pyx_memoryview_err_dim')
* cdef int _err_dim(object error, char *msg, int dim) except -1 with gil: # <<<<<<<<<<<<<<
* raise error(msg.decode('ascii') % dim)
*
*/
/* function exit code */
__pyx_L1_error:;
__Pyx_XDECREF(__pyx_t_1);
__Pyx_XDECREF(__pyx_t_2);
__Pyx_XDECREF(__pyx_t_3);
__Pyx_XDECREF(__pyx_t_4);
__Pyx_XDECREF(__pyx_t_5);
__Pyx_AddTraceback("View.MemoryView._err_dim", __pyx_clineno, __pyx_lineno, __pyx_filename);
__pyx_r = -1;
__Pyx_XDECREF(__pyx_v_error);
__Pyx_RefNannyFinishContext();
#ifdef WITH_THREAD
__Pyx_PyGILState_Release(__pyx_gilstate_save);
#endif
return __pyx_r;
}
/* "View.MemoryView":1257
*
* @cname('__pyx_memoryview_err')
* cdef int _err(object error, char *msg) except -1 with gil: # <<<<<<<<<<<<<<
* if msg != NULL:
* raise error(msg.decode('ascii'))
*/
static int __pyx_memoryview_err(PyObject *__pyx_v_error, char *__pyx_v_msg) {
int __pyx_r;
__Pyx_RefNannyDeclarations
int __pyx_t_1;
PyObject *__pyx_t_2 = NULL;
PyObject *__pyx_t_3 = NULL;
PyObject *__pyx_t_4 = NULL;
PyObject *__pyx_t_5 = NULL;
PyObject *__pyx_t_6 = NULL;
#ifdef WITH_THREAD
PyGILState_STATE __pyx_gilstate_save = __Pyx_PyGILState_Ensure();
#endif
__Pyx_RefNannySetupContext("_err", 0);
__Pyx_INCREF(__pyx_v_error);
/* "View.MemoryView":1258
* @cname('__pyx_memoryview_err')
* cdef int _err(object error, char *msg) except -1 with gil:
* if msg != NULL: # <<<<<<<<<<<<<<
* raise error(msg.decode('ascii'))
* else:
*/
__pyx_t_1 = ((__pyx_v_msg != NULL) != 0);
if (unlikely(__pyx_t_1)) {
/* "View.MemoryView":1259
* cdef int _err(object error, char *msg) except -1 with gil:
* if msg != NULL:
* raise error(msg.decode('ascii')) # <<<<<<<<<<<<<<
* else:
* raise error
*/
__pyx_t_3 = __Pyx_decode_c_string(__pyx_v_msg, 0, strlen(__pyx_v_msg), NULL, NULL, PyUnicode_DecodeASCII); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 1259, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_3);
__Pyx_INCREF(__pyx_v_error);
__pyx_t_4 = __pyx_v_error; __pyx_t_5 = NULL;
if (CYTHON_UNPACK_METHODS && unlikely(PyMethod_Check(__pyx_t_4))) {
__pyx_t_5 = PyMethod_GET_SELF(__pyx_t_4);
if (likely(__pyx_t_5)) {
PyObject* function = PyMethod_GET_FUNCTION(__pyx_t_4);
__Pyx_INCREF(__pyx_t_5);
__Pyx_INCREF(function);
__Pyx_DECREF_SET(__pyx_t_4, function);
}
}
if (!__pyx_t_5) {
__pyx_t_2 = __Pyx_PyObject_CallOneArg(__pyx_t_4, __pyx_t_3); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 1259, __pyx_L1_error)
__Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
__Pyx_GOTREF(__pyx_t_2);
} else {
#if CYTHON_FAST_PYCALL
if (PyFunction_Check(__pyx_t_4)) {
PyObject *__pyx_temp[2] = {__pyx_t_5, __pyx_t_3};
__pyx_t_2 = __Pyx_PyFunction_FastCall(__pyx_t_4, __pyx_temp+1-1, 1+1); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 1259, __pyx_L1_error)
__Pyx_XDECREF(__pyx_t_5); __pyx_t_5 = 0;
__Pyx_GOTREF(__pyx_t_2);
__Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
} else
#endif
#if CYTHON_FAST_PYCCALL
if (__Pyx_PyFastCFunction_Check(__pyx_t_4)) {
PyObject *__pyx_temp[2] = {__pyx_t_5, __pyx_t_3};
__pyx_t_2 = __Pyx_PyCFunction_FastCall(__pyx_t_4, __pyx_temp+1-1, 1+1); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 1259, __pyx_L1_error)
__Pyx_XDECREF(__pyx_t_5); __pyx_t_5 = 0;
__Pyx_GOTREF(__pyx_t_2);
__Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
} else
#endif
{
__pyx_t_6 = PyTuple_New(1+1); if (unlikely(!__pyx_t_6)) __PYX_ERR(1, 1259, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_6);
__Pyx_GIVEREF(__pyx_t_5); PyTuple_SET_ITEM(__pyx_t_6, 0, __pyx_t_5); __pyx_t_5 = NULL;
__Pyx_GIVEREF(__pyx_t_3);
PyTuple_SET_ITEM(__pyx_t_6, 0+1, __pyx_t_3);
__pyx_t_3 = 0;
__pyx_t_2 = __Pyx_PyObject_Call(__pyx_t_4, __pyx_t_6, NULL); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 1259, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_2);
__Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0;
}
}
__Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
__Pyx_Raise(__pyx_t_2, 0, 0, 0);
__Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
__PYX_ERR(1, 1259, __pyx_L1_error)
/* "View.MemoryView":1258
* @cname('__pyx_memoryview_err')
* cdef int _err(object error, char *msg) except -1 with gil:
* if msg != NULL: # <<<<<<<<<<<<<<
* raise error(msg.decode('ascii'))
* else:
*/
}
/* "View.MemoryView":1261
* raise error(msg.decode('ascii'))
* else:
* raise error # <<<<<<<<<<<<<<
*
* @cname('__pyx_memoryview_copy_contents')
*/
/*else*/ {
__Pyx_Raise(__pyx_v_error, 0, 0, 0);
__PYX_ERR(1, 1261, __pyx_L1_error)
}
/* "View.MemoryView":1257
*
* @cname('__pyx_memoryview_err')
* cdef int _err(object error, char *msg) except -1 with gil: # <<<<<<<<<<<<<<
* if msg != NULL:
* raise error(msg.decode('ascii'))
*/
/* function exit code */
__pyx_L1_error:;
__Pyx_XDECREF(__pyx_t_2);
__Pyx_XDECREF(__pyx_t_3);
__Pyx_XDECREF(__pyx_t_4);
__Pyx_XDECREF(__pyx_t_5);
__Pyx_XDECREF(__pyx_t_6);
__Pyx_AddTraceback("View.MemoryView._err", __pyx_clineno, __pyx_lineno, __pyx_filename);
__pyx_r = -1;
__Pyx_XDECREF(__pyx_v_error);
__Pyx_RefNannyFinishContext();
#ifdef WITH_THREAD
__Pyx_PyGILState_Release(__pyx_gilstate_save);
#endif
return __pyx_r;
}
/* "View.MemoryView":1264
*
* @cname('__pyx_memoryview_copy_contents')
* cdef int memoryview_copy_contents(__Pyx_memviewslice src, # <<<<<<<<<<<<<<
* __Pyx_memviewslice dst,
* int src_ndim, int dst_ndim,
*/
static int __pyx_memoryview_copy_contents(__Pyx_memviewslice __pyx_v_src, __Pyx_memviewslice __pyx_v_dst, int __pyx_v_src_ndim, int __pyx_v_dst_ndim, int __pyx_v_dtype_is_object) {
void *__pyx_v_tmpdata;
size_t __pyx_v_itemsize;
int __pyx_v_i;
char __pyx_v_order;
int __pyx_v_broadcasting;
int __pyx_v_direct_copy;
__Pyx_memviewslice __pyx_v_tmp;
int __pyx_v_ndim;
int __pyx_r;
Py_ssize_t __pyx_t_1;
int __pyx_t_2;
int __pyx_t_3;
int __pyx_t_4;
int __pyx_t_5;
int __pyx_t_6;
void *__pyx_t_7;
int __pyx_t_8;
/* "View.MemoryView":1272
* Check for overlapping memory and verify the shapes.
* """
* cdef void *tmpdata = NULL # <<<<<<<<<<<<<<
* cdef size_t itemsize = src.memview.view.itemsize
* cdef int i
*/
__pyx_v_tmpdata = NULL;
/* "View.MemoryView":1273
* """
* cdef void *tmpdata = NULL
* cdef size_t itemsize = src.memview.view.itemsize # <<<<<<<<<<<<<<
* cdef int i
* cdef char order = get_best_order(&src, src_ndim)
*/
__pyx_t_1 = __pyx_v_src.memview->view.itemsize;
__pyx_v_itemsize = __pyx_t_1;
/* "View.MemoryView":1275
* cdef size_t itemsize = src.memview.view.itemsize
* cdef int i
* cdef char order = get_best_order(&src, src_ndim) # <<<<<<<<<<<<<<
* cdef bint broadcasting = False
* cdef bint direct_copy = False
*/
__pyx_v_order = __pyx_get_best_slice_order((&__pyx_v_src), __pyx_v_src_ndim);
/* "View.MemoryView":1276
* cdef int i
* cdef char order = get_best_order(&src, src_ndim)
* cdef bint broadcasting = False # <<<<<<<<<<<<<<
* cdef bint direct_copy = False
* cdef __Pyx_memviewslice tmp
*/
__pyx_v_broadcasting = 0;
/* "View.MemoryView":1277
* cdef char order = get_best_order(&src, src_ndim)
* cdef bint broadcasting = False
* cdef bint direct_copy = False # <<<<<<<<<<<<<<
* cdef __Pyx_memviewslice tmp
*
*/
__pyx_v_direct_copy = 0;
/* "View.MemoryView":1280
* cdef __Pyx_memviewslice tmp
*
* if src_ndim < dst_ndim: # <<<<<<<<<<<<<<
* broadcast_leading(&src, src_ndim, dst_ndim)
* elif dst_ndim < src_ndim:
*/
__pyx_t_2 = ((__pyx_v_src_ndim < __pyx_v_dst_ndim) != 0);
if (__pyx_t_2) {
/* "View.MemoryView":1281
*
* if src_ndim < dst_ndim:
* broadcast_leading(&src, src_ndim, dst_ndim) # <<<<<<<<<<<<<<
* elif dst_ndim < src_ndim:
* broadcast_leading(&dst, dst_ndim, src_ndim)
*/
__pyx_memoryview_broadcast_leading((&__pyx_v_src), __pyx_v_src_ndim, __pyx_v_dst_ndim);
/* "View.MemoryView":1280
* cdef __Pyx_memviewslice tmp
*
* if src_ndim < dst_ndim: # <<<<<<<<<<<<<<
* broadcast_leading(&src, src_ndim, dst_ndim)
* elif dst_ndim < src_ndim:
*/
goto __pyx_L3;
}
/* "View.MemoryView":1282
* if src_ndim < dst_ndim:
* broadcast_leading(&src, src_ndim, dst_ndim)
* elif dst_ndim < src_ndim: # <<<<<<<<<<<<<<
* broadcast_leading(&dst, dst_ndim, src_ndim)
*
*/
__pyx_t_2 = ((__pyx_v_dst_ndim < __pyx_v_src_ndim) != 0);
if (__pyx_t_2) {
/* "View.MemoryView":1283
* broadcast_leading(&src, src_ndim, dst_ndim)
* elif dst_ndim < src_ndim:
* broadcast_leading(&dst, dst_ndim, src_ndim) # <<<<<<<<<<<<<<
*
* cdef int ndim = max(src_ndim, dst_ndim)
*/
__pyx_memoryview_broadcast_leading((&__pyx_v_dst), __pyx_v_dst_ndim, __pyx_v_src_ndim);
/* "View.MemoryView":1282
* if src_ndim < dst_ndim:
* broadcast_leading(&src, src_ndim, dst_ndim)
* elif dst_ndim < src_ndim: # <<<<<<<<<<<<<<
* broadcast_leading(&dst, dst_ndim, src_ndim)
*
*/
}
__pyx_L3:;
/* "View.MemoryView":1285
* broadcast_leading(&dst, dst_ndim, src_ndim)
*
* cdef int ndim = max(src_ndim, dst_ndim) # <<<<<<<<<<<<<<
*
* for i in range(ndim):
*/
__pyx_t_3 = __pyx_v_dst_ndim;
__pyx_t_4 = __pyx_v_src_ndim;
if (((__pyx_t_3 > __pyx_t_4) != 0)) {
__pyx_t_5 = __pyx_t_3;
} else {
__pyx_t_5 = __pyx_t_4;
}
__pyx_v_ndim = __pyx_t_5;
/* "View.MemoryView":1287
* cdef int ndim = max(src_ndim, dst_ndim)
*
* for i in range(ndim): # <<<<<<<<<<<<<<
* if src.shape[i] != dst.shape[i]:
* if src.shape[i] == 1:
*/
__pyx_t_5 = __pyx_v_ndim;
__pyx_t_3 = __pyx_t_5;
for (__pyx_t_4 = 0; __pyx_t_4 < __pyx_t_3; __pyx_t_4+=1) {
__pyx_v_i = __pyx_t_4;
/* "View.MemoryView":1288
*
* for i in range(ndim):
* if src.shape[i] != dst.shape[i]: # <<<<<<<<<<<<<<
* if src.shape[i] == 1:
* broadcasting = True
*/
__pyx_t_2 = (((__pyx_v_src.shape[__pyx_v_i]) != (__pyx_v_dst.shape[__pyx_v_i])) != 0);
if (__pyx_t_2) {
/* "View.MemoryView":1289
* for i in range(ndim):
* if src.shape[i] != dst.shape[i]:
* if src.shape[i] == 1: # <<<<<<<<<<<<<<
* broadcasting = True
* src.strides[i] = 0
*/
__pyx_t_2 = (((__pyx_v_src.shape[__pyx_v_i]) == 1) != 0);
if (__pyx_t_2) {
/* "View.MemoryView":1290
* if src.shape[i] != dst.shape[i]:
* if src.shape[i] == 1:
* broadcasting = True # <<<<<<<<<<<<<<
* src.strides[i] = 0
* else:
*/
__pyx_v_broadcasting = 1;
/* "View.MemoryView":1291
* if src.shape[i] == 1:
* broadcasting = True
* src.strides[i] = 0 # <<<<<<<<<<<<<<
* else:
* _err_extents(i, dst.shape[i], src.shape[i])
*/
(__pyx_v_src.strides[__pyx_v_i]) = 0;
/* "View.MemoryView":1289
* for i in range(ndim):
* if src.shape[i] != dst.shape[i]:
* if src.shape[i] == 1: # <<<<<<<<<<<<<<
* broadcasting = True
* src.strides[i] = 0
*/
goto __pyx_L7;
}
/* "View.MemoryView":1293
* src.strides[i] = 0
* else:
* _err_extents(i, dst.shape[i], src.shape[i]) # <<<<<<<<<<<<<<
*
* if src.suboffsets[i] >= 0:
*/
/*else*/ {
__pyx_t_6 = __pyx_memoryview_err_extents(__pyx_v_i, (__pyx_v_dst.shape[__pyx_v_i]), (__pyx_v_src.shape[__pyx_v_i])); if (unlikely(__pyx_t_6 == ((int)-1))) __PYX_ERR(1, 1293, __pyx_L1_error)
}
__pyx_L7:;
/* "View.MemoryView":1288
*
* for i in range(ndim):
* if src.shape[i] != dst.shape[i]: # <<<<<<<<<<<<<<
* if src.shape[i] == 1:
* broadcasting = True
*/
}
/* "View.MemoryView":1295
* _err_extents(i, dst.shape[i], src.shape[i])
*
* if src.suboffsets[i] >= 0: # <<<<<<<<<<<<<<
* _err_dim(ValueError, "Dimension %d is not direct", i)
*
*/
__pyx_t_2 = (((__pyx_v_src.suboffsets[__pyx_v_i]) >= 0) != 0);
if (__pyx_t_2) {
/* "View.MemoryView":1296
*
* if src.suboffsets[i] >= 0:
* _err_dim(ValueError, "Dimension %d is not direct", i) # <<<<<<<<<<<<<<
*
* if slices_overlap(&src, &dst, ndim, itemsize):
*/
__pyx_t_6 = __pyx_memoryview_err_dim(__pyx_builtin_ValueError, ((char *)"Dimension %d is not direct"), __pyx_v_i); if (unlikely(__pyx_t_6 == ((int)-1))) __PYX_ERR(1, 1296, __pyx_L1_error)
/* "View.MemoryView":1295
* _err_extents(i, dst.shape[i], src.shape[i])
*
* if src.suboffsets[i] >= 0: # <<<<<<<<<<<<<<
* _err_dim(ValueError, "Dimension %d is not direct", i)
*
*/
}
}
/* "View.MemoryView":1298
* _err_dim(ValueError, "Dimension %d is not direct", i)
*
* if slices_overlap(&src, &dst, ndim, itemsize): # <<<<<<<<<<<<<<
*
* if not slice_is_contig(src, order, ndim):
*/
__pyx_t_2 = (__pyx_slices_overlap((&__pyx_v_src), (&__pyx_v_dst), __pyx_v_ndim, __pyx_v_itemsize) != 0);
if (__pyx_t_2) {
/* "View.MemoryView":1300
* if slices_overlap(&src, &dst, ndim, itemsize):
*
* if not slice_is_contig(src, order, ndim): # <<<<<<<<<<<<<<
* order = get_best_order(&dst, ndim)
*
*/
__pyx_t_2 = ((!(__pyx_memviewslice_is_contig(__pyx_v_src, __pyx_v_order, __pyx_v_ndim) != 0)) != 0);
if (__pyx_t_2) {
/* "View.MemoryView":1301
*
* if not slice_is_contig(src, order, ndim):
* order = get_best_order(&dst, ndim) # <<<<<<<<<<<<<<
*
* tmpdata = copy_data_to_temp(&src, &tmp, order, ndim)
*/
__pyx_v_order = __pyx_get_best_slice_order((&__pyx_v_dst), __pyx_v_ndim);
/* "View.MemoryView":1300
* if slices_overlap(&src, &dst, ndim, itemsize):
*
* if not slice_is_contig(src, order, ndim): # <<<<<<<<<<<<<<
* order = get_best_order(&dst, ndim)
*
*/
}
/* "View.MemoryView":1303
* order = get_best_order(&dst, ndim)
*
* tmpdata = copy_data_to_temp(&src, &tmp, order, ndim) # <<<<<<<<<<<<<<
* src = tmp
*
*/
__pyx_t_7 = __pyx_memoryview_copy_data_to_temp((&__pyx_v_src), (&__pyx_v_tmp), __pyx_v_order, __pyx_v_ndim); if (unlikely(__pyx_t_7 == ((void *)NULL))) __PYX_ERR(1, 1303, __pyx_L1_error)
__pyx_v_tmpdata = __pyx_t_7;
/* "View.MemoryView":1304
*
* tmpdata = copy_data_to_temp(&src, &tmp, order, ndim)
* src = tmp # <<<<<<<<<<<<<<
*
* if not broadcasting:
*/
__pyx_v_src = __pyx_v_tmp;
/* "View.MemoryView":1298
* _err_dim(ValueError, "Dimension %d is not direct", i)
*
* if slices_overlap(&src, &dst, ndim, itemsize): # <<<<<<<<<<<<<<
*
* if not slice_is_contig(src, order, ndim):
*/
}
/* "View.MemoryView":1306
* src = tmp
*
* if not broadcasting: # <<<<<<<<<<<<<<
*
*
*/
__pyx_t_2 = ((!(__pyx_v_broadcasting != 0)) != 0);
if (__pyx_t_2) {
/* "View.MemoryView":1309
*
*
* if slice_is_contig(src, 'C', ndim): # <<<<<<<<<<<<<<
* direct_copy = slice_is_contig(dst, 'C', ndim)
* elif slice_is_contig(src, 'F', ndim):
*/
__pyx_t_2 = (__pyx_memviewslice_is_contig(__pyx_v_src, 'C', __pyx_v_ndim) != 0);
if (__pyx_t_2) {
/* "View.MemoryView":1310
*
* if slice_is_contig(src, 'C', ndim):
* direct_copy = slice_is_contig(dst, 'C', ndim) # <<<<<<<<<<<<<<
* elif slice_is_contig(src, 'F', ndim):
* direct_copy = slice_is_contig(dst, 'F', ndim)
*/
__pyx_v_direct_copy = __pyx_memviewslice_is_contig(__pyx_v_dst, 'C', __pyx_v_ndim);
/* "View.MemoryView":1309
*
*
* if slice_is_contig(src, 'C', ndim): # <<<<<<<<<<<<<<
* direct_copy = slice_is_contig(dst, 'C', ndim)
* elif slice_is_contig(src, 'F', ndim):
*/
goto __pyx_L12;
}
/* "View.MemoryView":1311
* if slice_is_contig(src, 'C', ndim):
* direct_copy = slice_is_contig(dst, 'C', ndim)
* elif slice_is_contig(src, 'F', ndim): # <<<<<<<<<<<<<<
* direct_copy = slice_is_contig(dst, 'F', ndim)
*
*/
__pyx_t_2 = (__pyx_memviewslice_is_contig(__pyx_v_src, 'F', __pyx_v_ndim) != 0);
if (__pyx_t_2) {
/* "View.MemoryView":1312
* direct_copy = slice_is_contig(dst, 'C', ndim)
* elif slice_is_contig(src, 'F', ndim):
* direct_copy = slice_is_contig(dst, 'F', ndim) # <<<<<<<<<<<<<<
*
* if direct_copy:
*/
__pyx_v_direct_copy = __pyx_memviewslice_is_contig(__pyx_v_dst, 'F', __pyx_v_ndim);
/* "View.MemoryView":1311
* if slice_is_contig(src, 'C', ndim):
* direct_copy = slice_is_contig(dst, 'C', ndim)
* elif slice_is_contig(src, 'F', ndim): # <<<<<<<<<<<<<<
* direct_copy = slice_is_contig(dst, 'F', ndim)
*
*/
}
__pyx_L12:;
/* "View.MemoryView":1314
* direct_copy = slice_is_contig(dst, 'F', ndim)
*
* if direct_copy: # <<<<<<<<<<<<<<
*
* refcount_copying(&dst, dtype_is_object, ndim, False)
*/
__pyx_t_2 = (__pyx_v_direct_copy != 0);
if (__pyx_t_2) {
/* "View.MemoryView":1316
* if direct_copy:
*
* refcount_copying(&dst, dtype_is_object, ndim, False) # <<<<<<<<<<<<<<
* memcpy(dst.data, src.data, slice_get_size(&src, ndim))
* refcount_copying(&dst, dtype_is_object, ndim, True)
*/
__pyx_memoryview_refcount_copying((&__pyx_v_dst), __pyx_v_dtype_is_object, __pyx_v_ndim, 0);
/* "View.MemoryView":1317
*
* refcount_copying(&dst, dtype_is_object, ndim, False)
* memcpy(dst.data, src.data, slice_get_size(&src, ndim)) # <<<<<<<<<<<<<<
* refcount_copying(&dst, dtype_is_object, ndim, True)
* free(tmpdata)
*/
(void)(memcpy(__pyx_v_dst.data, __pyx_v_src.data, __pyx_memoryview_slice_get_size((&__pyx_v_src), __pyx_v_ndim)));
/* "View.MemoryView":1318
* refcount_copying(&dst, dtype_is_object, ndim, False)
* memcpy(dst.data, src.data, slice_get_size(&src, ndim))
* refcount_copying(&dst, dtype_is_object, ndim, True) # <<<<<<<<<<<<<<
* free(tmpdata)
* return 0
*/
__pyx_memoryview_refcount_copying((&__pyx_v_dst), __pyx_v_dtype_is_object, __pyx_v_ndim, 1);
/* "View.MemoryView":1319
* memcpy(dst.data, src.data, slice_get_size(&src, ndim))
* refcount_copying(&dst, dtype_is_object, ndim, True)
* free(tmpdata) # <<<<<<<<<<<<<<
* return 0
*
*/
free(__pyx_v_tmpdata);
/* "View.MemoryView":1320
* refcount_copying(&dst, dtype_is_object, ndim, True)
* free(tmpdata)
* return 0 # <<<<<<<<<<<<<<
*
* if order == 'F' == get_best_order(&dst, ndim):
*/
__pyx_r = 0;
goto __pyx_L0;
/* "View.MemoryView":1314
* direct_copy = slice_is_contig(dst, 'F', ndim)
*
* if direct_copy: # <<<<<<<<<<<<<<
*
* refcount_copying(&dst, dtype_is_object, ndim, False)
*/
}
/* "View.MemoryView":1306
* src = tmp
*
* if not broadcasting: # <<<<<<<<<<<<<<
*
*
*/
}
/* "View.MemoryView":1322
* return 0
*
* if order == 'F' == get_best_order(&dst, ndim): # <<<<<<<<<<<<<<
*
*
*/
__pyx_t_2 = (__pyx_v_order == 'F');
if (__pyx_t_2) {
__pyx_t_2 = ('F' == __pyx_get_best_slice_order((&__pyx_v_dst), __pyx_v_ndim));
}
__pyx_t_8 = (__pyx_t_2 != 0);
if (__pyx_t_8) {
/* "View.MemoryView":1325
*
*
* transpose_memslice(&src) # <<<<<<<<<<<<<<
* transpose_memslice(&dst)
*
*/
__pyx_t_5 = __pyx_memslice_transpose((&__pyx_v_src)); if (unlikely(__pyx_t_5 == ((int)0))) __PYX_ERR(1, 1325, __pyx_L1_error)
/* "View.MemoryView":1326
*
* transpose_memslice(&src)
* transpose_memslice(&dst) # <<<<<<<<<<<<<<
*
* refcount_copying(&dst, dtype_is_object, ndim, False)
*/
__pyx_t_5 = __pyx_memslice_transpose((&__pyx_v_dst)); if (unlikely(__pyx_t_5 == ((int)0))) __PYX_ERR(1, 1326, __pyx_L1_error)
/* "View.MemoryView":1322
* return 0
*
* if order == 'F' == get_best_order(&dst, ndim): # <<<<<<<<<<<<<<
*
*
*/
}
/* "View.MemoryView":1328
* transpose_memslice(&dst)
*
* refcount_copying(&dst, dtype_is_object, ndim, False) # <<<<<<<<<<<<<<
* copy_strided_to_strided(&src, &dst, ndim, itemsize)
* refcount_copying(&dst, dtype_is_object, ndim, True)
*/
__pyx_memoryview_refcount_copying((&__pyx_v_dst), __pyx_v_dtype_is_object, __pyx_v_ndim, 0);
/* "View.MemoryView":1329
*
* refcount_copying(&dst, dtype_is_object, ndim, False)
* copy_strided_to_strided(&src, &dst, ndim, itemsize) # <<<<<<<<<<<<<<
* refcount_copying(&dst, dtype_is_object, ndim, True)
*
*/
copy_strided_to_strided((&__pyx_v_src), (&__pyx_v_dst), __pyx_v_ndim, __pyx_v_itemsize);
/* "View.MemoryView":1330
* refcount_copying(&dst, dtype_is_object, ndim, False)
* copy_strided_to_strided(&src, &dst, ndim, itemsize)
* refcount_copying(&dst, dtype_is_object, ndim, True) # <<<<<<<<<<<<<<
*
* free(tmpdata)
*/
__pyx_memoryview_refcount_copying((&__pyx_v_dst), __pyx_v_dtype_is_object, __pyx_v_ndim, 1);
/* "View.MemoryView":1332
* refcount_copying(&dst, dtype_is_object, ndim, True)
*
* free(tmpdata) # <<<<<<<<<<<<<<
* return 0
*
*/
free(__pyx_v_tmpdata);
/* "View.MemoryView":1333
*
* free(tmpdata)
* return 0 # <<<<<<<<<<<<<<
*
* @cname('__pyx_memoryview_broadcast_leading')
*/
__pyx_r = 0;
goto __pyx_L0;
/* "View.MemoryView":1264
*
* @cname('__pyx_memoryview_copy_contents')
* cdef int memoryview_copy_contents(__Pyx_memviewslice src, # <<<<<<<<<<<<<<
* __Pyx_memviewslice dst,
* int src_ndim, int dst_ndim,
*/
/* function exit code */
__pyx_L1_error:;
{
#ifdef WITH_THREAD
PyGILState_STATE __pyx_gilstate_save = __Pyx_PyGILState_Ensure();
#endif
__Pyx_AddTraceback("View.MemoryView.memoryview_copy_contents", __pyx_clineno, __pyx_lineno, __pyx_filename);
#ifdef WITH_THREAD
__Pyx_PyGILState_Release(__pyx_gilstate_save);
#endif
}
__pyx_r = -1;
__pyx_L0:;
return __pyx_r;
}
/* "View.MemoryView":1336
*
* @cname('__pyx_memoryview_broadcast_leading')
* cdef void broadcast_leading(__Pyx_memviewslice *mslice, # <<<<<<<<<<<<<<
* int ndim,
* int ndim_other) nogil:
*/
static void __pyx_memoryview_broadcast_leading(__Pyx_memviewslice *__pyx_v_mslice, int __pyx_v_ndim, int __pyx_v_ndim_other) {
int __pyx_v_i;
int __pyx_v_offset;
int __pyx_t_1;
int __pyx_t_2;
int __pyx_t_3;
/* "View.MemoryView":1340
* int ndim_other) nogil:
* cdef int i
* cdef int offset = ndim_other - ndim # <<<<<<<<<<<<<<
*
* for i in range(ndim - 1, -1, -1):
*/
__pyx_v_offset = (__pyx_v_ndim_other - __pyx_v_ndim);
/* "View.MemoryView":1342
* cdef int offset = ndim_other - ndim
*
* for i in range(ndim - 1, -1, -1): # <<<<<<<<<<<<<<
* mslice.shape[i + offset] = mslice.shape[i]
* mslice.strides[i + offset] = mslice.strides[i]
*/
for (__pyx_t_1 = (__pyx_v_ndim - 1); __pyx_t_1 > -1; __pyx_t_1-=1) {
__pyx_v_i = __pyx_t_1;
/* "View.MemoryView":1343
*
* for i in range(ndim - 1, -1, -1):
* mslice.shape[i + offset] = mslice.shape[i] # <<<<<<<<<<<<<<
* mslice.strides[i + offset] = mslice.strides[i]
* mslice.suboffsets[i + offset] = mslice.suboffsets[i]
*/
(__pyx_v_mslice->shape[(__pyx_v_i + __pyx_v_offset)]) = (__pyx_v_mslice->shape[__pyx_v_i]);
/* "View.MemoryView":1344
* for i in range(ndim - 1, -1, -1):
* mslice.shape[i + offset] = mslice.shape[i]
* mslice.strides[i + offset] = mslice.strides[i] # <<<<<<<<<<<<<<
* mslice.suboffsets[i + offset] = mslice.suboffsets[i]
*
*/
(__pyx_v_mslice->strides[(__pyx_v_i + __pyx_v_offset)]) = (__pyx_v_mslice->strides[__pyx_v_i]);
/* "View.MemoryView":1345
* mslice.shape[i + offset] = mslice.shape[i]
* mslice.strides[i + offset] = mslice.strides[i]
* mslice.suboffsets[i + offset] = mslice.suboffsets[i] # <<<<<<<<<<<<<<
*
* for i in range(offset):
*/
(__pyx_v_mslice->suboffsets[(__pyx_v_i + __pyx_v_offset)]) = (__pyx_v_mslice->suboffsets[__pyx_v_i]);
}
/* "View.MemoryView":1347
* mslice.suboffsets[i + offset] = mslice.suboffsets[i]
*
* for i in range(offset): # <<<<<<<<<<<<<<
* mslice.shape[i] = 1
* mslice.strides[i] = mslice.strides[0]
*/
__pyx_t_1 = __pyx_v_offset;
__pyx_t_2 = __pyx_t_1;
for (__pyx_t_3 = 0; __pyx_t_3 < __pyx_t_2; __pyx_t_3+=1) {
__pyx_v_i = __pyx_t_3;
/* "View.MemoryView":1348
*
* for i in range(offset):
* mslice.shape[i] = 1 # <<<<<<<<<<<<<<
* mslice.strides[i] = mslice.strides[0]
* mslice.suboffsets[i] = -1
*/
(__pyx_v_mslice->shape[__pyx_v_i]) = 1;
/* "View.MemoryView":1349
* for i in range(offset):
* mslice.shape[i] = 1
* mslice.strides[i] = mslice.strides[0] # <<<<<<<<<<<<<<
* mslice.suboffsets[i] = -1
*
*/
(__pyx_v_mslice->strides[__pyx_v_i]) = (__pyx_v_mslice->strides[0]);
/* "View.MemoryView":1350
* mslice.shape[i] = 1
* mslice.strides[i] = mslice.strides[0]
* mslice.suboffsets[i] = -1 # <<<<<<<<<<<<<<
*
*
*/
(__pyx_v_mslice->suboffsets[__pyx_v_i]) = -1L;
}
/* "View.MemoryView":1336
*
* @cname('__pyx_memoryview_broadcast_leading')
* cdef void broadcast_leading(__Pyx_memviewslice *mslice, # <<<<<<<<<<<<<<
* int ndim,
* int ndim_other) nogil:
*/
/* function exit code */
}
/* "View.MemoryView":1358
*
* @cname('__pyx_memoryview_refcount_copying')
* cdef void refcount_copying(__Pyx_memviewslice *dst, bint dtype_is_object, # <<<<<<<<<<<<<<
* int ndim, bint inc) nogil:
*
*/
static void __pyx_memoryview_refcount_copying(__Pyx_memviewslice *__pyx_v_dst, int __pyx_v_dtype_is_object, int __pyx_v_ndim, int __pyx_v_inc) {
int __pyx_t_1;
/* "View.MemoryView":1362
*
*
* if dtype_is_object: # <<<<<<<<<<<<<<
* refcount_objects_in_slice_with_gil(dst.data, dst.shape,
* dst.strides, ndim, inc)
*/
__pyx_t_1 = (__pyx_v_dtype_is_object != 0);
if (__pyx_t_1) {
/* "View.MemoryView":1363
*
* if dtype_is_object:
* refcount_objects_in_slice_with_gil(dst.data, dst.shape, # <<<<<<<<<<<<<<
* dst.strides, ndim, inc)
*
*/
__pyx_memoryview_refcount_objects_in_slice_with_gil(__pyx_v_dst->data, __pyx_v_dst->shape, __pyx_v_dst->strides, __pyx_v_ndim, __pyx_v_inc);
/* "View.MemoryView":1362
*
*
* if dtype_is_object: # <<<<<<<<<<<<<<
* refcount_objects_in_slice_with_gil(dst.data, dst.shape,
* dst.strides, ndim, inc)
*/
}
/* "View.MemoryView":1358
*
* @cname('__pyx_memoryview_refcount_copying')
* cdef void refcount_copying(__Pyx_memviewslice *dst, bint dtype_is_object, # <<<<<<<<<<<<<<
* int ndim, bint inc) nogil:
*
*/
/* function exit code */
}
/* "View.MemoryView":1367
*
* @cname('__pyx_memoryview_refcount_objects_in_slice_with_gil')
* cdef void refcount_objects_in_slice_with_gil(char *data, Py_ssize_t *shape, # <<<<<<<<<<<<<<
* Py_ssize_t *strides, int ndim,
* bint inc) with gil:
*/
static void __pyx_memoryview_refcount_objects_in_slice_with_gil(char *__pyx_v_data, Py_ssize_t *__pyx_v_shape, Py_ssize_t *__pyx_v_strides, int __pyx_v_ndim, int __pyx_v_inc) {
__Pyx_RefNannyDeclarations
#ifdef WITH_THREAD
PyGILState_STATE __pyx_gilstate_save = __Pyx_PyGILState_Ensure();
#endif
__Pyx_RefNannySetupContext("refcount_objects_in_slice_with_gil", 0);
/* "View.MemoryView":1370
* Py_ssize_t *strides, int ndim,
* bint inc) with gil:
* refcount_objects_in_slice(data, shape, strides, ndim, inc) # <<<<<<<<<<<<<<
*
* @cname('__pyx_memoryview_refcount_objects_in_slice')
*/
__pyx_memoryview_refcount_objects_in_slice(__pyx_v_data, __pyx_v_shape, __pyx_v_strides, __pyx_v_ndim, __pyx_v_inc);
/* "View.MemoryView":1367
*
* @cname('__pyx_memoryview_refcount_objects_in_slice_with_gil')
* cdef void refcount_objects_in_slice_with_gil(char *data, Py_ssize_t *shape, # <<<<<<<<<<<<<<
* Py_ssize_t *strides, int ndim,
* bint inc) with gil:
*/
/* function exit code */
__Pyx_RefNannyFinishContext();
#ifdef WITH_THREAD
__Pyx_PyGILState_Release(__pyx_gilstate_save);
#endif
}
/* "View.MemoryView":1373
*
* @cname('__pyx_memoryview_refcount_objects_in_slice')
* cdef void refcount_objects_in_slice(char *data, Py_ssize_t *shape, # <<<<<<<<<<<<<<
* Py_ssize_t *strides, int ndim, bint inc):
* cdef Py_ssize_t i
*/
static void __pyx_memoryview_refcount_objects_in_slice(char *__pyx_v_data, Py_ssize_t *__pyx_v_shape, Py_ssize_t *__pyx_v_strides, int __pyx_v_ndim, int __pyx_v_inc) {
CYTHON_UNUSED Py_ssize_t __pyx_v_i;
__Pyx_RefNannyDeclarations
Py_ssize_t __pyx_t_1;
Py_ssize_t __pyx_t_2;
Py_ssize_t __pyx_t_3;
int __pyx_t_4;
__Pyx_RefNannySetupContext("refcount_objects_in_slice", 0);
/* "View.MemoryView":1377
* cdef Py_ssize_t i
*
* for i in range(shape[0]): # <<<<<<<<<<<<<<
* if ndim == 1:
* if inc:
*/
__pyx_t_1 = (__pyx_v_shape[0]);
__pyx_t_2 = __pyx_t_1;
for (__pyx_t_3 = 0; __pyx_t_3 < __pyx_t_2; __pyx_t_3+=1) {
__pyx_v_i = __pyx_t_3;
/* "View.MemoryView":1378
*
* for i in range(shape[0]):
* if ndim == 1: # <<<<<<<<<<<<<<
* if inc:
* Py_INCREF((<PyObject **> data)[0])
*/
__pyx_t_4 = ((__pyx_v_ndim == 1) != 0);
if (__pyx_t_4) {
/* "View.MemoryView":1379
* for i in range(shape[0]):
* if ndim == 1:
* if inc: # <<<<<<<<<<<<<<
* Py_INCREF((<PyObject **> data)[0])
* else:
*/
__pyx_t_4 = (__pyx_v_inc != 0);
if (__pyx_t_4) {
/* "View.MemoryView":1380
* if ndim == 1:
* if inc:
* Py_INCREF((<PyObject **> data)[0]) # <<<<<<<<<<<<<<
* else:
* Py_DECREF((<PyObject **> data)[0])
*/
Py_INCREF((((PyObject **)__pyx_v_data)[0]));
/* "View.MemoryView":1379
* for i in range(shape[0]):
* if ndim == 1:
* if inc: # <<<<<<<<<<<<<<
* Py_INCREF((<PyObject **> data)[0])
* else:
*/
goto __pyx_L6;
}
/* "View.MemoryView":1382
* Py_INCREF((<PyObject **> data)[0])
* else:
* Py_DECREF((<PyObject **> data)[0]) # <<<<<<<<<<<<<<
* else:
* refcount_objects_in_slice(data, shape + 1, strides + 1,
*/
/*else*/ {
Py_DECREF((((PyObject **)__pyx_v_data)[0]));
}
__pyx_L6:;
/* "View.MemoryView":1378
*
* for i in range(shape[0]):
* if ndim == 1: # <<<<<<<<<<<<<<
* if inc:
* Py_INCREF((<PyObject **> data)[0])
*/
goto __pyx_L5;
}
/* "View.MemoryView":1384
* Py_DECREF((<PyObject **> data)[0])
* else:
* refcount_objects_in_slice(data, shape + 1, strides + 1, # <<<<<<<<<<<<<<
* ndim - 1, inc)
*
*/
/*else*/ {
/* "View.MemoryView":1385
* else:
* refcount_objects_in_slice(data, shape + 1, strides + 1,
* ndim - 1, inc) # <<<<<<<<<<<<<<
*
* data += strides[0]
*/
__pyx_memoryview_refcount_objects_in_slice(__pyx_v_data, (__pyx_v_shape + 1), (__pyx_v_strides + 1), (__pyx_v_ndim - 1), __pyx_v_inc);
}
__pyx_L5:;
/* "View.MemoryView":1387
* ndim - 1, inc)
*
* data += strides[0] # <<<<<<<<<<<<<<
*
*
*/
__pyx_v_data = (__pyx_v_data + (__pyx_v_strides[0]));
}
/* "View.MemoryView":1373
*
* @cname('__pyx_memoryview_refcount_objects_in_slice')
* cdef void refcount_objects_in_slice(char *data, Py_ssize_t *shape, # <<<<<<<<<<<<<<
* Py_ssize_t *strides, int ndim, bint inc):
* cdef Py_ssize_t i
*/
/* function exit code */
__Pyx_RefNannyFinishContext();
}
/* "View.MemoryView":1393
*
* @cname('__pyx_memoryview_slice_assign_scalar')
* cdef void slice_assign_scalar(__Pyx_memviewslice *dst, int ndim, # <<<<<<<<<<<<<<
* size_t itemsize, void *item,
* bint dtype_is_object) nogil:
*/
static void __pyx_memoryview_slice_assign_scalar(__Pyx_memviewslice *__pyx_v_dst, int __pyx_v_ndim, size_t __pyx_v_itemsize, void *__pyx_v_item, int __pyx_v_dtype_is_object) {
/* "View.MemoryView":1396
* size_t itemsize, void *item,
* bint dtype_is_object) nogil:
* refcount_copying(dst, dtype_is_object, ndim, False) # <<<<<<<<<<<<<<
* _slice_assign_scalar(dst.data, dst.shape, dst.strides, ndim,
* itemsize, item)
*/
__pyx_memoryview_refcount_copying(__pyx_v_dst, __pyx_v_dtype_is_object, __pyx_v_ndim, 0);
/* "View.MemoryView":1397
* bint dtype_is_object) nogil:
* refcount_copying(dst, dtype_is_object, ndim, False)
* _slice_assign_scalar(dst.data, dst.shape, dst.strides, ndim, # <<<<<<<<<<<<<<
* itemsize, item)
* refcount_copying(dst, dtype_is_object, ndim, True)
*/
__pyx_memoryview__slice_assign_scalar(__pyx_v_dst->data, __pyx_v_dst->shape, __pyx_v_dst->strides, __pyx_v_ndim, __pyx_v_itemsize, __pyx_v_item);
/* "View.MemoryView":1399
* _slice_assign_scalar(dst.data, dst.shape, dst.strides, ndim,
* itemsize, item)
* refcount_copying(dst, dtype_is_object, ndim, True) # <<<<<<<<<<<<<<
*
*
*/
__pyx_memoryview_refcount_copying(__pyx_v_dst, __pyx_v_dtype_is_object, __pyx_v_ndim, 1);
/* "View.MemoryView":1393
*
* @cname('__pyx_memoryview_slice_assign_scalar')
* cdef void slice_assign_scalar(__Pyx_memviewslice *dst, int ndim, # <<<<<<<<<<<<<<
* size_t itemsize, void *item,
* bint dtype_is_object) nogil:
*/
/* function exit code */
}
/* "View.MemoryView":1403
*
* @cname('__pyx_memoryview__slice_assign_scalar')
* cdef void _slice_assign_scalar(char *data, Py_ssize_t *shape, # <<<<<<<<<<<<<<
* Py_ssize_t *strides, int ndim,
* size_t itemsize, void *item) nogil:
*/
static void __pyx_memoryview__slice_assign_scalar(char *__pyx_v_data, Py_ssize_t *__pyx_v_shape, Py_ssize_t *__pyx_v_strides, int __pyx_v_ndim, size_t __pyx_v_itemsize, void *__pyx_v_item) {
CYTHON_UNUSED Py_ssize_t __pyx_v_i;
Py_ssize_t __pyx_v_stride;
Py_ssize_t __pyx_v_extent;
int __pyx_t_1;
Py_ssize_t __pyx_t_2;
Py_ssize_t __pyx_t_3;
Py_ssize_t __pyx_t_4;
/* "View.MemoryView":1407
* size_t itemsize, void *item) nogil:
* cdef Py_ssize_t i
* cdef Py_ssize_t stride = strides[0] # <<<<<<<<<<<<<<
* cdef Py_ssize_t extent = shape[0]
*
*/
__pyx_v_stride = (__pyx_v_strides[0]);
/* "View.MemoryView":1408
* cdef Py_ssize_t i
* cdef Py_ssize_t stride = strides[0]
* cdef Py_ssize_t extent = shape[0] # <<<<<<<<<<<<<<
*
* if ndim == 1:
*/
__pyx_v_extent = (__pyx_v_shape[0]);
/* "View.MemoryView":1410
* cdef Py_ssize_t extent = shape[0]
*
* if ndim == 1: # <<<<<<<<<<<<<<
* for i in range(extent):
* memcpy(data, item, itemsize)
*/
__pyx_t_1 = ((__pyx_v_ndim == 1) != 0);
if (__pyx_t_1) {
/* "View.MemoryView":1411
*
* if ndim == 1:
* for i in range(extent): # <<<<<<<<<<<<<<
* memcpy(data, item, itemsize)
* data += stride
*/
__pyx_t_2 = __pyx_v_extent;
__pyx_t_3 = __pyx_t_2;
for (__pyx_t_4 = 0; __pyx_t_4 < __pyx_t_3; __pyx_t_4+=1) {
__pyx_v_i = __pyx_t_4;
/* "View.MemoryView":1412
* if ndim == 1:
* for i in range(extent):
* memcpy(data, item, itemsize) # <<<<<<<<<<<<<<
* data += stride
* else:
*/
(void)(memcpy(__pyx_v_data, __pyx_v_item, __pyx_v_itemsize));
/* "View.MemoryView":1413
* for i in range(extent):
* memcpy(data, item, itemsize)
* data += stride # <<<<<<<<<<<<<<
* else:
* for i in range(extent):
*/
__pyx_v_data = (__pyx_v_data + __pyx_v_stride);
}
/* "View.MemoryView":1410
* cdef Py_ssize_t extent = shape[0]
*
* if ndim == 1: # <<<<<<<<<<<<<<
* for i in range(extent):
* memcpy(data, item, itemsize)
*/
goto __pyx_L3;
}
/* "View.MemoryView":1415
* data += stride
* else:
* for i in range(extent): # <<<<<<<<<<<<<<
* _slice_assign_scalar(data, shape + 1, strides + 1,
* ndim - 1, itemsize, item)
*/
/*else*/ {
__pyx_t_2 = __pyx_v_extent;
__pyx_t_3 = __pyx_t_2;
for (__pyx_t_4 = 0; __pyx_t_4 < __pyx_t_3; __pyx_t_4+=1) {
__pyx_v_i = __pyx_t_4;
/* "View.MemoryView":1416
* else:
* for i in range(extent):
* _slice_assign_scalar(data, shape + 1, strides + 1, # <<<<<<<<<<<<<<
* ndim - 1, itemsize, item)
* data += stride
*/
__pyx_memoryview__slice_assign_scalar(__pyx_v_data, (__pyx_v_shape + 1), (__pyx_v_strides + 1), (__pyx_v_ndim - 1), __pyx_v_itemsize, __pyx_v_item);
/* "View.MemoryView":1418
* _slice_assign_scalar(data, shape + 1, strides + 1,
* ndim - 1, itemsize, item)
* data += stride # <<<<<<<<<<<<<<
*
*
*/
__pyx_v_data = (__pyx_v_data + __pyx_v_stride);
}
}
__pyx_L3:;
/* "View.MemoryView":1403
*
* @cname('__pyx_memoryview__slice_assign_scalar')
* cdef void _slice_assign_scalar(char *data, Py_ssize_t *shape, # <<<<<<<<<<<<<<
* Py_ssize_t *strides, int ndim,
* size_t itemsize, void *item) nogil:
*/
/* function exit code */
}
/* "(tree fragment)":1
* def __pyx_unpickle_Enum(__pyx_type, long __pyx_checksum, __pyx_state): # <<<<<<<<<<<<<<
* if __pyx_checksum != 0xb068931:
* from pickle import PickleError as __pyx_PickleError
*/
/* Python wrapper */
static PyObject *__pyx_pw_15View_dot_MemoryView_1__pyx_unpickle_Enum(PyObject *__pyx_self, PyObject *__pyx_args, PyObject *__pyx_kwds); /*proto*/
static PyMethodDef __pyx_mdef_15View_dot_MemoryView_1__pyx_unpickle_Enum = {"__pyx_unpickle_Enum", (PyCFunction)__pyx_pw_15View_dot_MemoryView_1__pyx_unpickle_Enum, METH_VARARGS|METH_KEYWORDS, 0};
static PyObject *__pyx_pw_15View_dot_MemoryView_1__pyx_unpickle_Enum(PyObject *__pyx_self, PyObject *__pyx_args, PyObject *__pyx_kwds) {
PyObject *__pyx_v___pyx_type = 0;
long __pyx_v___pyx_checksum;
PyObject *__pyx_v___pyx_state = 0;
PyObject *__pyx_r = 0;
__Pyx_RefNannyDeclarations
__Pyx_RefNannySetupContext("__pyx_unpickle_Enum (wrapper)", 0);
{
static PyObject **__pyx_pyargnames[] = {&__pyx_n_s_pyx_type,&__pyx_n_s_pyx_checksum,&__pyx_n_s_pyx_state,0};
PyObject* values[3] = {0,0,0};
if (unlikely(__pyx_kwds)) {
Py_ssize_t kw_args;
const Py_ssize_t pos_args = PyTuple_GET_SIZE(__pyx_args);
switch (pos_args) {
case 3: values[2] = PyTuple_GET_ITEM(__pyx_args, 2);
CYTHON_FALLTHROUGH;
case 2: values[1] = PyTuple_GET_ITEM(__pyx_args, 1);
CYTHON_FALLTHROUGH;
case 1: values[0] = PyTuple_GET_ITEM(__pyx_args, 0);
CYTHON_FALLTHROUGH;
case 0: break;
default: goto __pyx_L5_argtuple_error;
}
kw_args = PyDict_Size(__pyx_kwds);
switch (pos_args) {
case 0:
if (likely((values[0] = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_pyx_type)) != 0)) kw_args--;
else goto __pyx_L5_argtuple_error;
CYTHON_FALLTHROUGH;
case 1:
if (likely((values[1] = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_pyx_checksum)) != 0)) kw_args--;
else {
__Pyx_RaiseArgtupleInvalid("__pyx_unpickle_Enum", 1, 3, 3, 1); __PYX_ERR(1, 1, __pyx_L3_error)
}
CYTHON_FALLTHROUGH;
case 2:
if (likely((values[2] = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_pyx_state)) != 0)) kw_args--;
else {
__Pyx_RaiseArgtupleInvalid("__pyx_unpickle_Enum", 1, 3, 3, 2); __PYX_ERR(1, 1, __pyx_L3_error)
}
}
if (unlikely(kw_args > 0)) {
if (unlikely(__Pyx_ParseOptionalKeywords(__pyx_kwds, __pyx_pyargnames, 0, values, pos_args, "__pyx_unpickle_Enum") < 0)) __PYX_ERR(1, 1, __pyx_L3_error)
}
} else if (PyTuple_GET_SIZE(__pyx_args) != 3) {
goto __pyx_L5_argtuple_error;
} else {
values[0] = PyTuple_GET_ITEM(__pyx_args, 0);
values[1] = PyTuple_GET_ITEM(__pyx_args, 1);
values[2] = PyTuple_GET_ITEM(__pyx_args, 2);
}
__pyx_v___pyx_type = values[0];
__pyx_v___pyx_checksum = __Pyx_PyInt_As_long(values[1]); if (unlikely((__pyx_v___pyx_checksum == (long)-1) && PyErr_Occurred())) __PYX_ERR(1, 1, __pyx_L3_error)
__pyx_v___pyx_state = values[2];
}
goto __pyx_L4_argument_unpacking_done;
__pyx_L5_argtuple_error:;
__Pyx_RaiseArgtupleInvalid("__pyx_unpickle_Enum", 1, 3, 3, PyTuple_GET_SIZE(__pyx_args)); __PYX_ERR(1, 1, __pyx_L3_error)
__pyx_L3_error:;
__Pyx_AddTraceback("View.MemoryView.__pyx_unpickle_Enum", __pyx_clineno, __pyx_lineno, __pyx_filename);
__Pyx_RefNannyFinishContext();
return NULL;
__pyx_L4_argument_unpacking_done:;
__pyx_r = __pyx_pf_15View_dot_MemoryView___pyx_unpickle_Enum(__pyx_self, __pyx_v___pyx_type, __pyx_v___pyx_checksum, __pyx_v___pyx_state);
/* function exit code */
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
static PyObject *__pyx_pf_15View_dot_MemoryView___pyx_unpickle_Enum(CYTHON_UNUSED PyObject *__pyx_self, PyObject *__pyx_v___pyx_type, long __pyx_v___pyx_checksum, PyObject *__pyx_v___pyx_state) {
PyObject *__pyx_v___pyx_PickleError = NULL;
PyObject *__pyx_v___pyx_result = NULL;
PyObject *__pyx_r = NULL;
__Pyx_RefNannyDeclarations
int __pyx_t_1;
PyObject *__pyx_t_2 = NULL;
PyObject *__pyx_t_3 = NULL;
PyObject *__pyx_t_4 = NULL;
PyObject *__pyx_t_5 = NULL;
PyObject *__pyx_t_6 = NULL;
int __pyx_t_7;
__Pyx_RefNannySetupContext("__pyx_unpickle_Enum", 0);
/* "(tree fragment)":2
* def __pyx_unpickle_Enum(__pyx_type, long __pyx_checksum, __pyx_state):
* if __pyx_checksum != 0xb068931: # <<<<<<<<<<<<<<
* from pickle import PickleError as __pyx_PickleError
* raise __pyx_PickleError("Incompatible checksums (%s vs 0xb068931 = (name))" % __pyx_checksum)
*/
__pyx_t_1 = ((__pyx_v___pyx_checksum != 0xb068931) != 0);
if (__pyx_t_1) {
/* "(tree fragment)":3
* def __pyx_unpickle_Enum(__pyx_type, long __pyx_checksum, __pyx_state):
* if __pyx_checksum != 0xb068931:
* from pickle import PickleError as __pyx_PickleError # <<<<<<<<<<<<<<
* raise __pyx_PickleError("Incompatible checksums (%s vs 0xb068931 = (name))" % __pyx_checksum)
* __pyx_result = Enum.__new__(__pyx_type)
*/
__pyx_t_2 = PyList_New(1); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 3, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_2);
__Pyx_INCREF(__pyx_n_s_PickleError);
__Pyx_GIVEREF(__pyx_n_s_PickleError);
PyList_SET_ITEM(__pyx_t_2, 0, __pyx_n_s_PickleError);
__pyx_t_3 = __Pyx_Import(__pyx_n_s_pickle, __pyx_t_2, 0); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 3, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_3);
__Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
__pyx_t_2 = __Pyx_ImportFrom(__pyx_t_3, __pyx_n_s_PickleError); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 3, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_2);
__Pyx_INCREF(__pyx_t_2);
__pyx_v___pyx_PickleError = __pyx_t_2;
__Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
__Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
/* "(tree fragment)":4
* if __pyx_checksum != 0xb068931:
* from pickle import PickleError as __pyx_PickleError
* raise __pyx_PickleError("Incompatible checksums (%s vs 0xb068931 = (name))" % __pyx_checksum) # <<<<<<<<<<<<<<
* __pyx_result = Enum.__new__(__pyx_type)
* if __pyx_state is not None:
*/
__pyx_t_2 = __Pyx_PyInt_From_long(__pyx_v___pyx_checksum); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 4, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_2);
__pyx_t_4 = __Pyx_PyString_Format(__pyx_kp_s_Incompatible_checksums_s_vs_0xb0, __pyx_t_2); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 4, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_4);
__Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
__Pyx_INCREF(__pyx_v___pyx_PickleError);
__pyx_t_2 = __pyx_v___pyx_PickleError; __pyx_t_5 = NULL;
if (CYTHON_UNPACK_METHODS && unlikely(PyMethod_Check(__pyx_t_2))) {
__pyx_t_5 = PyMethod_GET_SELF(__pyx_t_2);
if (likely(__pyx_t_5)) {
PyObject* function = PyMethod_GET_FUNCTION(__pyx_t_2);
__Pyx_INCREF(__pyx_t_5);
__Pyx_INCREF(function);
__Pyx_DECREF_SET(__pyx_t_2, function);
}
}
if (!__pyx_t_5) {
__pyx_t_3 = __Pyx_PyObject_CallOneArg(__pyx_t_2, __pyx_t_4); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 4, __pyx_L1_error)
__Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
__Pyx_GOTREF(__pyx_t_3);
} else {
#if CYTHON_FAST_PYCALL
if (PyFunction_Check(__pyx_t_2)) {
PyObject *__pyx_temp[2] = {__pyx_t_5, __pyx_t_4};
__pyx_t_3 = __Pyx_PyFunction_FastCall(__pyx_t_2, __pyx_temp+1-1, 1+1); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 4, __pyx_L1_error)
__Pyx_XDECREF(__pyx_t_5); __pyx_t_5 = 0;
__Pyx_GOTREF(__pyx_t_3);
__Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
} else
#endif
#if CYTHON_FAST_PYCCALL
if (__Pyx_PyFastCFunction_Check(__pyx_t_2)) {
PyObject *__pyx_temp[2] = {__pyx_t_5, __pyx_t_4};
__pyx_t_3 = __Pyx_PyCFunction_FastCall(__pyx_t_2, __pyx_temp+1-1, 1+1); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 4, __pyx_L1_error)
__Pyx_XDECREF(__pyx_t_5); __pyx_t_5 = 0;
__Pyx_GOTREF(__pyx_t_3);
__Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
} else
#endif
{
__pyx_t_6 = PyTuple_New(1+1); if (unlikely(!__pyx_t_6)) __PYX_ERR(1, 4, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_6);
__Pyx_GIVEREF(__pyx_t_5); PyTuple_SET_ITEM(__pyx_t_6, 0, __pyx_t_5); __pyx_t_5 = NULL;
__Pyx_GIVEREF(__pyx_t_4);
PyTuple_SET_ITEM(__pyx_t_6, 0+1, __pyx_t_4);
__pyx_t_4 = 0;
__pyx_t_3 = __Pyx_PyObject_Call(__pyx_t_2, __pyx_t_6, NULL); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 4, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_3);
__Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0;
}
}
__Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
__Pyx_Raise(__pyx_t_3, 0, 0, 0);
__Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
__PYX_ERR(1, 4, __pyx_L1_error)
/* "(tree fragment)":2
* def __pyx_unpickle_Enum(__pyx_type, long __pyx_checksum, __pyx_state):
* if __pyx_checksum != 0xb068931: # <<<<<<<<<<<<<<
* from pickle import PickleError as __pyx_PickleError
* raise __pyx_PickleError("Incompatible checksums (%s vs 0xb068931 = (name))" % __pyx_checksum)
*/
}
/* "(tree fragment)":5
* from pickle import PickleError as __pyx_PickleError
* raise __pyx_PickleError("Incompatible checksums (%s vs 0xb068931 = (name))" % __pyx_checksum)
* __pyx_result = Enum.__new__(__pyx_type) # <<<<<<<<<<<<<<
* if __pyx_state is not None:
* __pyx_unpickle_Enum__set_state(<Enum> __pyx_result, __pyx_state)
*/
__pyx_t_2 = __Pyx_PyObject_GetAttrStr(((PyObject *)__pyx_MemviewEnum_type), __pyx_n_s_new); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 5, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_2);
__pyx_t_6 = NULL;
if (CYTHON_UNPACK_METHODS && likely(PyMethod_Check(__pyx_t_2))) {
__pyx_t_6 = PyMethod_GET_SELF(__pyx_t_2);
if (likely(__pyx_t_6)) {
PyObject* function = PyMethod_GET_FUNCTION(__pyx_t_2);
__Pyx_INCREF(__pyx_t_6);
__Pyx_INCREF(function);
__Pyx_DECREF_SET(__pyx_t_2, function);
}
}
if (!__pyx_t_6) {
__pyx_t_3 = __Pyx_PyObject_CallOneArg(__pyx_t_2, __pyx_v___pyx_type); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 5, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_3);
} else {
#if CYTHON_FAST_PYCALL
if (PyFunction_Check(__pyx_t_2)) {
PyObject *__pyx_temp[2] = {__pyx_t_6, __pyx_v___pyx_type};
__pyx_t_3 = __Pyx_PyFunction_FastCall(__pyx_t_2, __pyx_temp+1-1, 1+1); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 5, __pyx_L1_error)
__Pyx_XDECREF(__pyx_t_6); __pyx_t_6 = 0;
__Pyx_GOTREF(__pyx_t_3);
} else
#endif
#if CYTHON_FAST_PYCCALL
if (__Pyx_PyFastCFunction_Check(__pyx_t_2)) {
PyObject *__pyx_temp[2] = {__pyx_t_6, __pyx_v___pyx_type};
__pyx_t_3 = __Pyx_PyCFunction_FastCall(__pyx_t_2, __pyx_temp+1-1, 1+1); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 5, __pyx_L1_error)
__Pyx_XDECREF(__pyx_t_6); __pyx_t_6 = 0;
__Pyx_GOTREF(__pyx_t_3);
} else
#endif
{
__pyx_t_4 = PyTuple_New(1+1); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 5, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_4);
__Pyx_GIVEREF(__pyx_t_6); PyTuple_SET_ITEM(__pyx_t_4, 0, __pyx_t_6); __pyx_t_6 = NULL;
__Pyx_INCREF(__pyx_v___pyx_type);
__Pyx_GIVEREF(__pyx_v___pyx_type);
PyTuple_SET_ITEM(__pyx_t_4, 0+1, __pyx_v___pyx_type);
__pyx_t_3 = __Pyx_PyObject_Call(__pyx_t_2, __pyx_t_4, NULL); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 5, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_3);
__Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
}
}
__Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
__pyx_v___pyx_result = __pyx_t_3;
__pyx_t_3 = 0;
/* "(tree fragment)":6
* raise __pyx_PickleError("Incompatible checksums (%s vs 0xb068931 = (name))" % __pyx_checksum)
* __pyx_result = Enum.__new__(__pyx_type)
* if __pyx_state is not None: # <<<<<<<<<<<<<<
* __pyx_unpickle_Enum__set_state(<Enum> __pyx_result, __pyx_state)
* return __pyx_result
*/
__pyx_t_1 = (__pyx_v___pyx_state != Py_None);
__pyx_t_7 = (__pyx_t_1 != 0);
if (__pyx_t_7) {
/* "(tree fragment)":7
* __pyx_result = Enum.__new__(__pyx_type)
* if __pyx_state is not None:
* __pyx_unpickle_Enum__set_state(<Enum> __pyx_result, __pyx_state) # <<<<<<<<<<<<<<
* return __pyx_result
* cdef __pyx_unpickle_Enum__set_state(Enum __pyx_result, tuple __pyx_state):
*/
if (!(likely(PyTuple_CheckExact(__pyx_v___pyx_state))||((__pyx_v___pyx_state) == Py_None)||(PyErr_Format(PyExc_TypeError, "Expected %.16s, got %.200s", "tuple", Py_TYPE(__pyx_v___pyx_state)->tp_name), 0))) __PYX_ERR(1, 7, __pyx_L1_error)
__pyx_t_3 = __pyx_unpickle_Enum__set_state(((struct __pyx_MemviewEnum_obj *)__pyx_v___pyx_result), ((PyObject*)__pyx_v___pyx_state)); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 7, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_3);
__Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
/* "(tree fragment)":6
* raise __pyx_PickleError("Incompatible checksums (%s vs 0xb068931 = (name))" % __pyx_checksum)
* __pyx_result = Enum.__new__(__pyx_type)
* if __pyx_state is not None: # <<<<<<<<<<<<<<
* __pyx_unpickle_Enum__set_state(<Enum> __pyx_result, __pyx_state)
* return __pyx_result
*/
}
/* "(tree fragment)":8
* if __pyx_state is not None:
* __pyx_unpickle_Enum__set_state(<Enum> __pyx_result, __pyx_state)
* return __pyx_result # <<<<<<<<<<<<<<
* cdef __pyx_unpickle_Enum__set_state(Enum __pyx_result, tuple __pyx_state):
* __pyx_result.name = __pyx_state[0]
*/
__Pyx_XDECREF(__pyx_r);
__Pyx_INCREF(__pyx_v___pyx_result);
__pyx_r = __pyx_v___pyx_result;
goto __pyx_L0;
/* "(tree fragment)":1
* def __pyx_unpickle_Enum(__pyx_type, long __pyx_checksum, __pyx_state): # <<<<<<<<<<<<<<
* if __pyx_checksum != 0xb068931:
* from pickle import PickleError as __pyx_PickleError
*/
/* function exit code */
__pyx_L1_error:;
__Pyx_XDECREF(__pyx_t_2);
__Pyx_XDECREF(__pyx_t_3);
__Pyx_XDECREF(__pyx_t_4);
__Pyx_XDECREF(__pyx_t_5);
__Pyx_XDECREF(__pyx_t_6);
__Pyx_AddTraceback("View.MemoryView.__pyx_unpickle_Enum", __pyx_clineno, __pyx_lineno, __pyx_filename);
__pyx_r = NULL;
__pyx_L0:;
__Pyx_XDECREF(__pyx_v___pyx_PickleError);
__Pyx_XDECREF(__pyx_v___pyx_result);
__Pyx_XGIVEREF(__pyx_r);
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
/* "(tree fragment)":9
* __pyx_unpickle_Enum__set_state(<Enum> __pyx_result, __pyx_state)
* return __pyx_result
* cdef __pyx_unpickle_Enum__set_state(Enum __pyx_result, tuple __pyx_state): # <<<<<<<<<<<<<<
* __pyx_result.name = __pyx_state[0]
* if len(__pyx_state) > 1 and hasattr(__pyx_result, '__dict__'):
*/
static PyObject *__pyx_unpickle_Enum__set_state(struct __pyx_MemviewEnum_obj *__pyx_v___pyx_result, PyObject *__pyx_v___pyx_state) {
PyObject *__pyx_r = NULL;
__Pyx_RefNannyDeclarations
PyObject *__pyx_t_1 = NULL;
int __pyx_t_2;
Py_ssize_t __pyx_t_3;
int __pyx_t_4;
int __pyx_t_5;
PyObject *__pyx_t_6 = NULL;
PyObject *__pyx_t_7 = NULL;
PyObject *__pyx_t_8 = NULL;
PyObject *__pyx_t_9 = NULL;
__Pyx_RefNannySetupContext("__pyx_unpickle_Enum__set_state", 0);
/* "(tree fragment)":10
* return __pyx_result
* cdef __pyx_unpickle_Enum__set_state(Enum __pyx_result, tuple __pyx_state):
* __pyx_result.name = __pyx_state[0] # <<<<<<<<<<<<<<
* if len(__pyx_state) > 1 and hasattr(__pyx_result, '__dict__'):
* __pyx_result.__dict__.update(__pyx_state[1])
*/
if (unlikely(__pyx_v___pyx_state == Py_None)) {
PyErr_SetString(PyExc_TypeError, "'NoneType' object is not subscriptable");
__PYX_ERR(1, 10, __pyx_L1_error)
}
__pyx_t_1 = __Pyx_GetItemInt_Tuple(__pyx_v___pyx_state, 0, long, 1, __Pyx_PyInt_From_long, 0, 0, 1); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 10, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_1);
__Pyx_GIVEREF(__pyx_t_1);
__Pyx_GOTREF(__pyx_v___pyx_result->name);
__Pyx_DECREF(__pyx_v___pyx_result->name);
__pyx_v___pyx_result->name = __pyx_t_1;
__pyx_t_1 = 0;
/* "(tree fragment)":11
* cdef __pyx_unpickle_Enum__set_state(Enum __pyx_result, tuple __pyx_state):
* __pyx_result.name = __pyx_state[0]
* if len(__pyx_state) > 1 and hasattr(__pyx_result, '__dict__'): # <<<<<<<<<<<<<<
* __pyx_result.__dict__.update(__pyx_state[1])
*/
if (unlikely(__pyx_v___pyx_state == Py_None)) {
PyErr_SetString(PyExc_TypeError, "object of type 'NoneType' has no len()");
__PYX_ERR(1, 11, __pyx_L1_error)
}
__pyx_t_3 = PyTuple_GET_SIZE(__pyx_v___pyx_state); if (unlikely(__pyx_t_3 == ((Py_ssize_t)-1))) __PYX_ERR(1, 11, __pyx_L1_error)
__pyx_t_4 = ((__pyx_t_3 > 1) != 0);
if (__pyx_t_4) {
} else {
__pyx_t_2 = __pyx_t_4;
goto __pyx_L4_bool_binop_done;
}
__pyx_t_4 = __Pyx_HasAttr(((PyObject *)__pyx_v___pyx_result), __pyx_n_s_dict); if (unlikely(__pyx_t_4 == ((int)-1))) __PYX_ERR(1, 11, __pyx_L1_error)
__pyx_t_5 = (__pyx_t_4 != 0);
__pyx_t_2 = __pyx_t_5;
__pyx_L4_bool_binop_done:;
if (__pyx_t_2) {
/* "(tree fragment)":12
* __pyx_result.name = __pyx_state[0]
* if len(__pyx_state) > 1 and hasattr(__pyx_result, '__dict__'):
* __pyx_result.__dict__.update(__pyx_state[1]) # <<<<<<<<<<<<<<
*/
__pyx_t_6 = __Pyx_PyObject_GetAttrStr(((PyObject *)__pyx_v___pyx_result), __pyx_n_s_dict); if (unlikely(!__pyx_t_6)) __PYX_ERR(1, 12, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_6);
__pyx_t_7 = __Pyx_PyObject_GetAttrStr(__pyx_t_6, __pyx_n_s_update); if (unlikely(!__pyx_t_7)) __PYX_ERR(1, 12, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_7);
__Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0;
if (unlikely(__pyx_v___pyx_state == Py_None)) {
PyErr_SetString(PyExc_TypeError, "'NoneType' object is not subscriptable");
__PYX_ERR(1, 12, __pyx_L1_error)
}
__pyx_t_6 = __Pyx_GetItemInt_Tuple(__pyx_v___pyx_state, 1, long, 1, __Pyx_PyInt_From_long, 0, 0, 1); if (unlikely(!__pyx_t_6)) __PYX_ERR(1, 12, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_6);
__pyx_t_8 = NULL;
if (CYTHON_UNPACK_METHODS && likely(PyMethod_Check(__pyx_t_7))) {
__pyx_t_8 = PyMethod_GET_SELF(__pyx_t_7);
if (likely(__pyx_t_8)) {
PyObject* function = PyMethod_GET_FUNCTION(__pyx_t_7);
__Pyx_INCREF(__pyx_t_8);
__Pyx_INCREF(function);
__Pyx_DECREF_SET(__pyx_t_7, function);
}
}
if (!__pyx_t_8) {
__pyx_t_1 = __Pyx_PyObject_CallOneArg(__pyx_t_7, __pyx_t_6); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 12, __pyx_L1_error)
__Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0;
__Pyx_GOTREF(__pyx_t_1);
} else {
#if CYTHON_FAST_PYCALL
if (PyFunction_Check(__pyx_t_7)) {
PyObject *__pyx_temp[2] = {__pyx_t_8, __pyx_t_6};
__pyx_t_1 = __Pyx_PyFunction_FastCall(__pyx_t_7, __pyx_temp+1-1, 1+1); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 12, __pyx_L1_error)
__Pyx_XDECREF(__pyx_t_8); __pyx_t_8 = 0;
__Pyx_GOTREF(__pyx_t_1);
__Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0;
} else
#endif
#if CYTHON_FAST_PYCCALL
if (__Pyx_PyFastCFunction_Check(__pyx_t_7)) {
PyObject *__pyx_temp[2] = {__pyx_t_8, __pyx_t_6};
__pyx_t_1 = __Pyx_PyCFunction_FastCall(__pyx_t_7, __pyx_temp+1-1, 1+1); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 12, __pyx_L1_error)
__Pyx_XDECREF(__pyx_t_8); __pyx_t_8 = 0;
__Pyx_GOTREF(__pyx_t_1);
__Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0;
} else
#endif
{
__pyx_t_9 = PyTuple_New(1+1); if (unlikely(!__pyx_t_9)) __PYX_ERR(1, 12, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_9);
__Pyx_GIVEREF(__pyx_t_8); PyTuple_SET_ITEM(__pyx_t_9, 0, __pyx_t_8); __pyx_t_8 = NULL;
__Pyx_GIVEREF(__pyx_t_6);
PyTuple_SET_ITEM(__pyx_t_9, 0+1, __pyx_t_6);
__pyx_t_6 = 0;
__pyx_t_1 = __Pyx_PyObject_Call(__pyx_t_7, __pyx_t_9, NULL); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 12, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_1);
__Pyx_DECREF(__pyx_t_9); __pyx_t_9 = 0;
}
}
__Pyx_DECREF(__pyx_t_7); __pyx_t_7 = 0;
__Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
/* "(tree fragment)":11
* cdef __pyx_unpickle_Enum__set_state(Enum __pyx_result, tuple __pyx_state):
* __pyx_result.name = __pyx_state[0]
* if len(__pyx_state) > 1 and hasattr(__pyx_result, '__dict__'): # <<<<<<<<<<<<<<
* __pyx_result.__dict__.update(__pyx_state[1])
*/
}
/* "(tree fragment)":9
* __pyx_unpickle_Enum__set_state(<Enum> __pyx_result, __pyx_state)
* return __pyx_result
* cdef __pyx_unpickle_Enum__set_state(Enum __pyx_result, tuple __pyx_state): # <<<<<<<<<<<<<<
* __pyx_result.name = __pyx_state[0]
* if len(__pyx_state) > 1 and hasattr(__pyx_result, '__dict__'):
*/
/* function exit code */
__pyx_r = Py_None; __Pyx_INCREF(Py_None);
goto __pyx_L0;
__pyx_L1_error:;
__Pyx_XDECREF(__pyx_t_1);
__Pyx_XDECREF(__pyx_t_6);
__Pyx_XDECREF(__pyx_t_7);
__Pyx_XDECREF(__pyx_t_8);
__Pyx_XDECREF(__pyx_t_9);
__Pyx_AddTraceback("View.MemoryView.__pyx_unpickle_Enum__set_state", __pyx_clineno, __pyx_lineno, __pyx_filename);
__pyx_r = 0;
__pyx_L0:;
__Pyx_XGIVEREF(__pyx_r);
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
static struct __pyx_vtabstruct_array __pyx_vtable_array;
static PyObject *__pyx_tp_new_array(PyTypeObject *t, PyObject *a, PyObject *k) {
struct __pyx_array_obj *p;
PyObject *o;
if (likely((t->tp_flags & Py_TPFLAGS_IS_ABSTRACT) == 0)) {
o = (*t->tp_alloc)(t, 0);
} else {
o = (PyObject *) PyBaseObject_Type.tp_new(t, __pyx_empty_tuple, 0);
}
if (unlikely(!o)) return 0;
p = ((struct __pyx_array_obj *)o);
p->__pyx_vtab = __pyx_vtabptr_array;
p->mode = ((PyObject*)Py_None); Py_INCREF(Py_None);
p->_format = ((PyObject*)Py_None); Py_INCREF(Py_None);
if (unlikely(__pyx_array___cinit__(o, a, k) < 0)) goto bad;
return o;
bad:
Py_DECREF(o); o = 0;
return NULL;
}
static void __pyx_tp_dealloc_array(PyObject *o) {
struct __pyx_array_obj *p = (struct __pyx_array_obj *)o;
#if CYTHON_USE_TP_FINALIZE
if (unlikely(PyType_HasFeature(Py_TYPE(o), Py_TPFLAGS_HAVE_FINALIZE) && Py_TYPE(o)->tp_finalize) && (!PyType_IS_GC(Py_TYPE(o)) || !_PyGC_FINALIZED(o))) {
if (PyObject_CallFinalizerFromDealloc(o)) return;
}
#endif
{
PyObject *etype, *eval, *etb;
PyErr_Fetch(&etype, &eval, &etb);
++Py_REFCNT(o);
__pyx_array___dealloc__(o);
--Py_REFCNT(o);
PyErr_Restore(etype, eval, etb);
}
Py_CLEAR(p->mode);
Py_CLEAR(p->_format);
(*Py_TYPE(o)->tp_free)(o);
}
static PyObject *__pyx_sq_item_array(PyObject *o, Py_ssize_t i) {
PyObject *r;
PyObject *x = PyInt_FromSsize_t(i); if(!x) return 0;
r = Py_TYPE(o)->tp_as_mapping->mp_subscript(o, x);
Py_DECREF(x);
return r;
}
static int __pyx_mp_ass_subscript_array(PyObject *o, PyObject *i, PyObject *v) {
if (v) {
return __pyx_array___setitem__(o, i, v);
}
else {
PyErr_Format(PyExc_NotImplementedError,
"Subscript deletion not supported by %.200s", Py_TYPE(o)->tp_name);
return -1;
}
}
static PyObject *__pyx_tp_getattro_array(PyObject *o, PyObject *n) {
PyObject *v = __Pyx_PyObject_GenericGetAttr(o, n);
if (!v && PyErr_ExceptionMatches(PyExc_AttributeError)) {
PyErr_Clear();
v = __pyx_array___getattr__(o, n);
}
return v;
}
static PyObject *__pyx_getprop___pyx_array_memview(PyObject *o, CYTHON_UNUSED void *x) {
return __pyx_pw_15View_dot_MemoryView_5array_7memview_1__get__(o);
}
static PyMethodDef __pyx_methods_array[] = {
{"__getattr__", (PyCFunction)__pyx_array___getattr__, METH_O|METH_COEXIST, 0},
{"__reduce_cython__", (PyCFunction)__pyx_pw___pyx_array_1__reduce_cython__, METH_NOARGS, 0},
{"__setstate_cython__", (PyCFunction)__pyx_pw___pyx_array_3__setstate_cython__, METH_O, 0},
{0, 0, 0, 0}
};
static struct PyGetSetDef __pyx_getsets_array[] = {
{(char *)"memview", __pyx_getprop___pyx_array_memview, 0, (char *)0, 0},
{0, 0, 0, 0, 0}
};
static PySequenceMethods __pyx_tp_as_sequence_array = {
__pyx_array___len__, /*sq_length*/
0, /*sq_concat*/
0, /*sq_repeat*/
__pyx_sq_item_array, /*sq_item*/
0, /*sq_slice*/
0, /*sq_ass_item*/
0, /*sq_ass_slice*/
0, /*sq_contains*/
0, /*sq_inplace_concat*/
0, /*sq_inplace_repeat*/
};
static PyMappingMethods __pyx_tp_as_mapping_array = {
__pyx_array___len__, /*mp_length*/
__pyx_array___getitem__, /*mp_subscript*/
__pyx_mp_ass_subscript_array, /*mp_ass_subscript*/
};
static PyBufferProcs __pyx_tp_as_buffer_array = {
#if PY_MAJOR_VERSION < 3
0, /*bf_getreadbuffer*/
#endif
#if PY_MAJOR_VERSION < 3
0, /*bf_getwritebuffer*/
#endif
#if PY_MAJOR_VERSION < 3
0, /*bf_getsegcount*/
#endif
#if PY_MAJOR_VERSION < 3
0, /*bf_getcharbuffer*/
#endif
__pyx_array_getbuffer, /*bf_getbuffer*/
0, /*bf_releasebuffer*/
};
static PyTypeObject __pyx_type___pyx_array = {
PyVarObject_HEAD_INIT(0, 0)
"mnnpy._utils.array", /*tp_name*/
sizeof(struct __pyx_array_obj), /*tp_basicsize*/
0, /*tp_itemsize*/
__pyx_tp_dealloc_array, /*tp_dealloc*/
0, /*tp_print*/
0, /*tp_getattr*/
0, /*tp_setattr*/
#if PY_MAJOR_VERSION < 3
0, /*tp_compare*/
#endif
#if PY_MAJOR_VERSION >= 3
0, /*tp_as_async*/
#endif
0, /*tp_repr*/
0, /*tp_as_number*/
&__pyx_tp_as_sequence_array, /*tp_as_sequence*/
&__pyx_tp_as_mapping_array, /*tp_as_mapping*/
0, /*tp_hash*/
0, /*tp_call*/
0, /*tp_str*/
__pyx_tp_getattro_array, /*tp_getattro*/
0, /*tp_setattro*/
&__pyx_tp_as_buffer_array, /*tp_as_buffer*/
Py_TPFLAGS_DEFAULT|Py_TPFLAGS_HAVE_VERSION_TAG|Py_TPFLAGS_CHECKTYPES|Py_TPFLAGS_HAVE_NEWBUFFER|Py_TPFLAGS_BASETYPE, /*tp_flags*/
0, /*tp_doc*/
0, /*tp_traverse*/
0, /*tp_clear*/
0, /*tp_richcompare*/
0, /*tp_weaklistoffset*/
0, /*tp_iter*/
0, /*tp_iternext*/
__pyx_methods_array, /*tp_methods*/
0, /*tp_members*/
__pyx_getsets_array, /*tp_getset*/
0, /*tp_base*/
0, /*tp_dict*/
0, /*tp_descr_get*/
0, /*tp_descr_set*/
0, /*tp_dictoffset*/
0, /*tp_init*/
0, /*tp_alloc*/
__pyx_tp_new_array, /*tp_new*/
0, /*tp_free*/
0, /*tp_is_gc*/
0, /*tp_bases*/
0, /*tp_mro*/
0, /*tp_cache*/
0, /*tp_subclasses*/
0, /*tp_weaklist*/
0, /*tp_del*/
0, /*tp_version_tag*/
#if PY_VERSION_HEX >= 0x030400a1
0, /*tp_finalize*/
#endif
};
static PyObject *__pyx_tp_new_Enum(PyTypeObject *t, CYTHON_UNUSED PyObject *a, CYTHON_UNUSED PyObject *k) {
struct __pyx_MemviewEnum_obj *p;
PyObject *o;
if (likely((t->tp_flags & Py_TPFLAGS_IS_ABSTRACT) == 0)) {
o = (*t->tp_alloc)(t, 0);
} else {
o = (PyObject *) PyBaseObject_Type.tp_new(t, __pyx_empty_tuple, 0);
}
if (unlikely(!o)) return 0;
p = ((struct __pyx_MemviewEnum_obj *)o);
p->name = Py_None; Py_INCREF(Py_None);
return o;
}
static void __pyx_tp_dealloc_Enum(PyObject *o) {
struct __pyx_MemviewEnum_obj *p = (struct __pyx_MemviewEnum_obj *)o;
#if CYTHON_USE_TP_FINALIZE
if (unlikely(PyType_HasFeature(Py_TYPE(o), Py_TPFLAGS_HAVE_FINALIZE) && Py_TYPE(o)->tp_finalize) && !_PyGC_FINALIZED(o)) {
if (PyObject_CallFinalizerFromDealloc(o)) return;
}
#endif
PyObject_GC_UnTrack(o);
Py_CLEAR(p->name);
(*Py_TYPE(o)->tp_free)(o);
}
static int __pyx_tp_traverse_Enum(PyObject *o, visitproc v, void *a) {
int e;
struct __pyx_MemviewEnum_obj *p = (struct __pyx_MemviewEnum_obj *)o;
if (p->name) {
e = (*v)(p->name, a); if (e) return e;
}
return 0;
}
static int __pyx_tp_clear_Enum(PyObject *o) {
PyObject* tmp;
struct __pyx_MemviewEnum_obj *p = (struct __pyx_MemviewEnum_obj *)o;
tmp = ((PyObject*)p->name);
p->name = Py_None; Py_INCREF(Py_None);
Py_XDECREF(tmp);
return 0;
}
static PyMethodDef __pyx_methods_Enum[] = {
{"__reduce_cython__", (PyCFunction)__pyx_pw___pyx_MemviewEnum_1__reduce_cython__, METH_NOARGS, 0},
{"__setstate_cython__", (PyCFunction)__pyx_pw___pyx_MemviewEnum_3__setstate_cython__, METH_O, 0},
{0, 0, 0, 0}
};
static PyTypeObject __pyx_type___pyx_MemviewEnum = {
PyVarObject_HEAD_INIT(0, 0)
"mnnpy._utils.Enum", /*tp_name*/
sizeof(struct __pyx_MemviewEnum_obj), /*tp_basicsize*/
0, /*tp_itemsize*/
__pyx_tp_dealloc_Enum, /*tp_dealloc*/
0, /*tp_print*/
0, /*tp_getattr*/
0, /*tp_setattr*/
#if PY_MAJOR_VERSION < 3
0, /*tp_compare*/
#endif
#if PY_MAJOR_VERSION >= 3
0, /*tp_as_async*/
#endif
__pyx_MemviewEnum___repr__, /*tp_repr*/
0, /*tp_as_number*/
0, /*tp_as_sequence*/
0, /*tp_as_mapping*/
0, /*tp_hash*/
0, /*tp_call*/
0, /*tp_str*/
0, /*tp_getattro*/
0, /*tp_setattro*/
0, /*tp_as_buffer*/
Py_TPFLAGS_DEFAULT|Py_TPFLAGS_HAVE_VERSION_TAG|Py_TPFLAGS_CHECKTYPES|Py_TPFLAGS_HAVE_NEWBUFFER|Py_TPFLAGS_BASETYPE|Py_TPFLAGS_HAVE_GC, /*tp_flags*/
0, /*tp_doc*/
__pyx_tp_traverse_Enum, /*tp_traverse*/
__pyx_tp_clear_Enum, /*tp_clear*/
0, /*tp_richcompare*/
0, /*tp_weaklistoffset*/
0, /*tp_iter*/
0, /*tp_iternext*/
__pyx_methods_Enum, /*tp_methods*/
0, /*tp_members*/
0, /*tp_getset*/
0, /*tp_base*/
0, /*tp_dict*/
0, /*tp_descr_get*/
0, /*tp_descr_set*/
0, /*tp_dictoffset*/
__pyx_MemviewEnum___init__, /*tp_init*/
0, /*tp_alloc*/
__pyx_tp_new_Enum, /*tp_new*/
0, /*tp_free*/
0, /*tp_is_gc*/
0, /*tp_bases*/
0, /*tp_mro*/
0, /*tp_cache*/
0, /*tp_subclasses*/
0, /*tp_weaklist*/
0, /*tp_del*/
0, /*tp_version_tag*/
#if PY_VERSION_HEX >= 0x030400a1
0, /*tp_finalize*/
#endif
};
static struct __pyx_vtabstruct_memoryview __pyx_vtable_memoryview;
static PyObject *__pyx_tp_new_memoryview(PyTypeObject *t, PyObject *a, PyObject *k) {
struct __pyx_memoryview_obj *p;
PyObject *o;
if (likely((t->tp_flags & Py_TPFLAGS_IS_ABSTRACT) == 0)) {
o = (*t->tp_alloc)(t, 0);
} else {
o = (PyObject *) PyBaseObject_Type.tp_new(t, __pyx_empty_tuple, 0);
}
if (unlikely(!o)) return 0;
p = ((struct __pyx_memoryview_obj *)o);
p->__pyx_vtab = __pyx_vtabptr_memoryview;
p->obj = Py_None; Py_INCREF(Py_None);
p->_size = Py_None; Py_INCREF(Py_None);
p->_array_interface = Py_None; Py_INCREF(Py_None);
p->view.obj = NULL;
if (unlikely(__pyx_memoryview___cinit__(o, a, k) < 0)) goto bad;
return o;
bad:
Py_DECREF(o); o = 0;
return NULL;
}
static void __pyx_tp_dealloc_memoryview(PyObject *o) {
struct __pyx_memoryview_obj *p = (struct __pyx_memoryview_obj *)o;
#if CYTHON_USE_TP_FINALIZE
if (unlikely(PyType_HasFeature(Py_TYPE(o), Py_TPFLAGS_HAVE_FINALIZE) && Py_TYPE(o)->tp_finalize) && !_PyGC_FINALIZED(o)) {
if (PyObject_CallFinalizerFromDealloc(o)) return;
}
#endif
PyObject_GC_UnTrack(o);
{
PyObject *etype, *eval, *etb;
PyErr_Fetch(&etype, &eval, &etb);
++Py_REFCNT(o);
__pyx_memoryview___dealloc__(o);
--Py_REFCNT(o);
PyErr_Restore(etype, eval, etb);
}
Py_CLEAR(p->obj);
Py_CLEAR(p->_size);
Py_CLEAR(p->_array_interface);
(*Py_TYPE(o)->tp_free)(o);
}
static int __pyx_tp_traverse_memoryview(PyObject *o, visitproc v, void *a) {
int e;
struct __pyx_memoryview_obj *p = (struct __pyx_memoryview_obj *)o;
if (p->obj) {
e = (*v)(p->obj, a); if (e) return e;
}
if (p->_size) {
e = (*v)(p->_size, a); if (e) return e;
}
if (p->_array_interface) {
e = (*v)(p->_array_interface, a); if (e) return e;
}
if (p->view.obj) {
e = (*v)(p->view.obj, a); if (e) return e;
}
return 0;
}
static int __pyx_tp_clear_memoryview(PyObject *o) {
PyObject* tmp;
struct __pyx_memoryview_obj *p = (struct __pyx_memoryview_obj *)o;
tmp = ((PyObject*)p->obj);
p->obj = Py_None; Py_INCREF(Py_None);
Py_XDECREF(tmp);
tmp = ((PyObject*)p->_size);
p->_size = Py_None; Py_INCREF(Py_None);
Py_XDECREF(tmp);
tmp = ((PyObject*)p->_array_interface);
p->_array_interface = Py_None; Py_INCREF(Py_None);
Py_XDECREF(tmp);
Py_CLEAR(p->view.obj);
return 0;
}
static PyObject *__pyx_sq_item_memoryview(PyObject *o, Py_ssize_t i) {
PyObject *r;
PyObject *x = PyInt_FromSsize_t(i); if(!x) return 0;
r = Py_TYPE(o)->tp_as_mapping->mp_subscript(o, x);
Py_DECREF(x);
return r;
}
static int __pyx_mp_ass_subscript_memoryview(PyObject *o, PyObject *i, PyObject *v) {
if (v) {
return __pyx_memoryview___setitem__(o, i, v);
}
else {
PyErr_Format(PyExc_NotImplementedError,
"Subscript deletion not supported by %.200s", Py_TYPE(o)->tp_name);
return -1;
}
}
static PyObject *__pyx_getprop___pyx_memoryview_T(PyObject *o, CYTHON_UNUSED void *x) {
return __pyx_pw_15View_dot_MemoryView_10memoryview_1T_1__get__(o);
}
static PyObject *__pyx_getprop___pyx_memoryview_base(PyObject *o, CYTHON_UNUSED void *x) {
return __pyx_pw_15View_dot_MemoryView_10memoryview_4base_1__get__(o);
}
static PyObject *__pyx_getprop___pyx_memoryview_shape(PyObject *o, CYTHON_UNUSED void *x) {
return __pyx_pw_15View_dot_MemoryView_10memoryview_5shape_1__get__(o);
}
static PyObject *__pyx_getprop___pyx_memoryview_strides(PyObject *o, CYTHON_UNUSED void *x) {
return __pyx_pw_15View_dot_MemoryView_10memoryview_7strides_1__get__(o);
}
static PyObject *__pyx_getprop___pyx_memoryview_suboffsets(PyObject *o, CYTHON_UNUSED void *x) {
return __pyx_pw_15View_dot_MemoryView_10memoryview_10suboffsets_1__get__(o);
}
static PyObject *__pyx_getprop___pyx_memoryview_ndim(PyObject *o, CYTHON_UNUSED void *x) {
return __pyx_pw_15View_dot_MemoryView_10memoryview_4ndim_1__get__(o);
}
static PyObject *__pyx_getprop___pyx_memoryview_itemsize(PyObject *o, CYTHON_UNUSED void *x) {
return __pyx_pw_15View_dot_MemoryView_10memoryview_8itemsize_1__get__(o);
}
static PyObject *__pyx_getprop___pyx_memoryview_nbytes(PyObject *o, CYTHON_UNUSED void *x) {
return __pyx_pw_15View_dot_MemoryView_10memoryview_6nbytes_1__get__(o);
}
static PyObject *__pyx_getprop___pyx_memoryview_size(PyObject *o, CYTHON_UNUSED void *x) {
return __pyx_pw_15View_dot_MemoryView_10memoryview_4size_1__get__(o);
}
static PyMethodDef __pyx_methods_memoryview[] = {
{"is_c_contig", (PyCFunction)__pyx_memoryview_is_c_contig, METH_NOARGS, 0},
{"is_f_contig", (PyCFunction)__pyx_memoryview_is_f_contig, METH_NOARGS, 0},
{"copy", (PyCFunction)__pyx_memoryview_copy, METH_NOARGS, 0},
{"copy_fortran", (PyCFunction)__pyx_memoryview_copy_fortran, METH_NOARGS, 0},
{"__reduce_cython__", (PyCFunction)__pyx_pw___pyx_memoryview_1__reduce_cython__, METH_NOARGS, 0},
{"__setstate_cython__", (PyCFunction)__pyx_pw___pyx_memoryview_3__setstate_cython__, METH_O, 0},
{0, 0, 0, 0}
};
static struct PyGetSetDef __pyx_getsets_memoryview[] = {
{(char *)"T", __pyx_getprop___pyx_memoryview_T, 0, (char *)0, 0},
{(char *)"base", __pyx_getprop___pyx_memoryview_base, 0, (char *)0, 0},
{(char *)"shape", __pyx_getprop___pyx_memoryview_shape, 0, (char *)0, 0},
{(char *)"strides", __pyx_getprop___pyx_memoryview_strides, 0, (char *)0, 0},
{(char *)"suboffsets", __pyx_getprop___pyx_memoryview_suboffsets, 0, (char *)0, 0},
{(char *)"ndim", __pyx_getprop___pyx_memoryview_ndim, 0, (char *)0, 0},
{(char *)"itemsize", __pyx_getprop___pyx_memoryview_itemsize, 0, (char *)0, 0},
{(char *)"nbytes", __pyx_getprop___pyx_memoryview_nbytes, 0, (char *)0, 0},
{(char *)"size", __pyx_getprop___pyx_memoryview_size, 0, (char *)0, 0},
{0, 0, 0, 0, 0}
};
static PySequenceMethods __pyx_tp_as_sequence_memoryview = {
__pyx_memoryview___len__, /*sq_length*/
0, /*sq_concat*/
0, /*sq_repeat*/
__pyx_sq_item_memoryview, /*sq_item*/
0, /*sq_slice*/
0, /*sq_ass_item*/
0, /*sq_ass_slice*/
0, /*sq_contains*/
0, /*sq_inplace_concat*/
0, /*sq_inplace_repeat*/
};
static PyMappingMethods __pyx_tp_as_mapping_memoryview = {
__pyx_memoryview___len__, /*mp_length*/
__pyx_memoryview___getitem__, /*mp_subscript*/
__pyx_mp_ass_subscript_memoryview, /*mp_ass_subscript*/
};
static PyBufferProcs __pyx_tp_as_buffer_memoryview = {
#if PY_MAJOR_VERSION < 3
0, /*bf_getreadbuffer*/
#endif
#if PY_MAJOR_VERSION < 3
0, /*bf_getwritebuffer*/
#endif
#if PY_MAJOR_VERSION < 3
0, /*bf_getsegcount*/
#endif
#if PY_MAJOR_VERSION < 3
0, /*bf_getcharbuffer*/
#endif
__pyx_memoryview_getbuffer, /*bf_getbuffer*/
0, /*bf_releasebuffer*/
};
static PyTypeObject __pyx_type___pyx_memoryview = {
PyVarObject_HEAD_INIT(0, 0)
"mnnpy._utils.memoryview", /*tp_name*/
sizeof(struct __pyx_memoryview_obj), /*tp_basicsize*/
0, /*tp_itemsize*/
__pyx_tp_dealloc_memoryview, /*tp_dealloc*/
0, /*tp_print*/
0, /*tp_getattr*/
0, /*tp_setattr*/
#if PY_MAJOR_VERSION < 3
0, /*tp_compare*/
#endif
#if PY_MAJOR_VERSION >= 3
0, /*tp_as_async*/
#endif
__pyx_memoryview___repr__, /*tp_repr*/
0, /*tp_as_number*/
&__pyx_tp_as_sequence_memoryview, /*tp_as_sequence*/
&__pyx_tp_as_mapping_memoryview, /*tp_as_mapping*/
0, /*tp_hash*/
0, /*tp_call*/
__pyx_memoryview___str__, /*tp_str*/
0, /*tp_getattro*/
0, /*tp_setattro*/
&__pyx_tp_as_buffer_memoryview, /*tp_as_buffer*/
Py_TPFLAGS_DEFAULT|Py_TPFLAGS_HAVE_VERSION_TAG|Py_TPFLAGS_CHECKTYPES|Py_TPFLAGS_HAVE_NEWBUFFER|Py_TPFLAGS_BASETYPE|Py_TPFLAGS_HAVE_GC, /*tp_flags*/
0, /*tp_doc*/
__pyx_tp_traverse_memoryview, /*tp_traverse*/
__pyx_tp_clear_memoryview, /*tp_clear*/
0, /*tp_richcompare*/
0, /*tp_weaklistoffset*/
0, /*tp_iter*/
0, /*tp_iternext*/
__pyx_methods_memoryview, /*tp_methods*/
0, /*tp_members*/
__pyx_getsets_memoryview, /*tp_getset*/
0, /*tp_base*/
0, /*tp_dict*/
0, /*tp_descr_get*/
0, /*tp_descr_set*/
0, /*tp_dictoffset*/
0, /*tp_init*/
0, /*tp_alloc*/
__pyx_tp_new_memoryview, /*tp_new*/
0, /*tp_free*/
0, /*tp_is_gc*/
0, /*tp_bases*/
0, /*tp_mro*/
0, /*tp_cache*/
0, /*tp_subclasses*/
0, /*tp_weaklist*/
0, /*tp_del*/
0, /*tp_version_tag*/
#if PY_VERSION_HEX >= 0x030400a1
0, /*tp_finalize*/
#endif
};
static struct __pyx_vtabstruct__memoryviewslice __pyx_vtable__memoryviewslice;
static PyObject *__pyx_tp_new__memoryviewslice(PyTypeObject *t, PyObject *a, PyObject *k) {
struct __pyx_memoryviewslice_obj *p;
PyObject *o = __pyx_tp_new_memoryview(t, a, k);
if (unlikely(!o)) return 0;
p = ((struct __pyx_memoryviewslice_obj *)o);
p->__pyx_base.__pyx_vtab = (struct __pyx_vtabstruct_memoryview*)__pyx_vtabptr__memoryviewslice;
p->from_object = Py_None; Py_INCREF(Py_None);
p->from_slice.memview = NULL;
return o;
}
static void __pyx_tp_dealloc__memoryviewslice(PyObject *o) {
struct __pyx_memoryviewslice_obj *p = (struct __pyx_memoryviewslice_obj *)o;
#if CYTHON_USE_TP_FINALIZE
if (unlikely(PyType_HasFeature(Py_TYPE(o), Py_TPFLAGS_HAVE_FINALIZE) && Py_TYPE(o)->tp_finalize) && !_PyGC_FINALIZED(o)) {
if (PyObject_CallFinalizerFromDealloc(o)) return;
}
#endif
PyObject_GC_UnTrack(o);
{
PyObject *etype, *eval, *etb;
PyErr_Fetch(&etype, &eval, &etb);
++Py_REFCNT(o);
__pyx_memoryviewslice___dealloc__(o);
--Py_REFCNT(o);
PyErr_Restore(etype, eval, etb);
}
Py_CLEAR(p->from_object);
PyObject_GC_Track(o);
__pyx_tp_dealloc_memoryview(o);
}
static int __pyx_tp_traverse__memoryviewslice(PyObject *o, visitproc v, void *a) {
int e;
struct __pyx_memoryviewslice_obj *p = (struct __pyx_memoryviewslice_obj *)o;
e = __pyx_tp_traverse_memoryview(o, v, a); if (e) return e;
if (p->from_object) {
e = (*v)(p->from_object, a); if (e) return e;
}
return 0;
}
static int __pyx_tp_clear__memoryviewslice(PyObject *o) {
PyObject* tmp;
struct __pyx_memoryviewslice_obj *p = (struct __pyx_memoryviewslice_obj *)o;
__pyx_tp_clear_memoryview(o);
tmp = ((PyObject*)p->from_object);
p->from_object = Py_None; Py_INCREF(Py_None);
Py_XDECREF(tmp);
__PYX_XDEC_MEMVIEW(&p->from_slice, 1);
return 0;
}
static PyObject *__pyx_getprop___pyx_memoryviewslice_base(PyObject *o, CYTHON_UNUSED void *x) {
return __pyx_pw_15View_dot_MemoryView_16_memoryviewslice_4base_1__get__(o);
}
static PyMethodDef __pyx_methods__memoryviewslice[] = {
{"__reduce_cython__", (PyCFunction)__pyx_pw___pyx_memoryviewslice_1__reduce_cython__, METH_NOARGS, 0},
{"__setstate_cython__", (PyCFunction)__pyx_pw___pyx_memoryviewslice_3__setstate_cython__, METH_O, 0},
{0, 0, 0, 0}
};
static struct PyGetSetDef __pyx_getsets__memoryviewslice[] = {
{(char *)"base", __pyx_getprop___pyx_memoryviewslice_base, 0, (char *)0, 0},
{0, 0, 0, 0, 0}
};
static PyTypeObject __pyx_type___pyx_memoryviewslice = {
PyVarObject_HEAD_INIT(0, 0)
"mnnpy._utils._memoryviewslice", /*tp_name*/
sizeof(struct __pyx_memoryviewslice_obj), /*tp_basicsize*/
0, /*tp_itemsize*/
__pyx_tp_dealloc__memoryviewslice, /*tp_dealloc*/
0, /*tp_print*/
0, /*tp_getattr*/
0, /*tp_setattr*/
#if PY_MAJOR_VERSION < 3
0, /*tp_compare*/
#endif
#if PY_MAJOR_VERSION >= 3
0, /*tp_as_async*/
#endif
#if CYTHON_COMPILING_IN_PYPY
__pyx_memoryview___repr__, /*tp_repr*/
#else
0, /*tp_repr*/
#endif
0, /*tp_as_number*/
0, /*tp_as_sequence*/
0, /*tp_as_mapping*/
0, /*tp_hash*/
0, /*tp_call*/
#if CYTHON_COMPILING_IN_PYPY
__pyx_memoryview___str__, /*tp_str*/
#else
0, /*tp_str*/
#endif
0, /*tp_getattro*/
0, /*tp_setattro*/
0, /*tp_as_buffer*/
Py_TPFLAGS_DEFAULT|Py_TPFLAGS_HAVE_VERSION_TAG|Py_TPFLAGS_CHECKTYPES|Py_TPFLAGS_HAVE_NEWBUFFER|Py_TPFLAGS_BASETYPE|Py_TPFLAGS_HAVE_GC, /*tp_flags*/
"Internal class for passing memoryview slices to Python", /*tp_doc*/
__pyx_tp_traverse__memoryviewslice, /*tp_traverse*/
__pyx_tp_clear__memoryviewslice, /*tp_clear*/
0, /*tp_richcompare*/
0, /*tp_weaklistoffset*/
0, /*tp_iter*/
0, /*tp_iternext*/
__pyx_methods__memoryviewslice, /*tp_methods*/
0, /*tp_members*/
__pyx_getsets__memoryviewslice, /*tp_getset*/
0, /*tp_base*/
0, /*tp_dict*/
0, /*tp_descr_get*/
0, /*tp_descr_set*/
0, /*tp_dictoffset*/
0, /*tp_init*/
0, /*tp_alloc*/
__pyx_tp_new__memoryviewslice, /*tp_new*/
0, /*tp_free*/
0, /*tp_is_gc*/
0, /*tp_bases*/
0, /*tp_mro*/
0, /*tp_cache*/
0, /*tp_subclasses*/
0, /*tp_weaklist*/
0, /*tp_del*/
0, /*tp_version_tag*/
#if PY_VERSION_HEX >= 0x030400a1
0, /*tp_finalize*/
#endif
};
static PyMethodDef __pyx_methods[] = {
{"_adjust_shift_variance", (PyCFunction)__pyx_pw_5mnnpy_6_utils_1_adjust_shift_variance, METH_VARARGS|METH_KEYWORDS, 0},
{0, 0, 0, 0}
};
#if PY_MAJOR_VERSION >= 3
#if CYTHON_PEP489_MULTI_PHASE_INIT
static PyObject* __pyx_pymod_create(PyObject *spec, PyModuleDef *def); /*proto*/
static int __pyx_pymod_exec__utils(PyObject* module); /*proto*/
static PyModuleDef_Slot __pyx_moduledef_slots[] = {
{Py_mod_create, (void*)__pyx_pymod_create},
{Py_mod_exec, (void*)__pyx_pymod_exec__utils},
{0, NULL}
};
#endif
static struct PyModuleDef __pyx_moduledef = {
PyModuleDef_HEAD_INIT,
"_utils",
0, /* m_doc */
#if CYTHON_PEP489_MULTI_PHASE_INIT
0, /* m_size */
#else
-1, /* m_size */
#endif
__pyx_methods /* m_methods */,
#if CYTHON_PEP489_MULTI_PHASE_INIT
__pyx_moduledef_slots, /* m_slots */
#else
NULL, /* m_reload */
#endif
NULL, /* m_traverse */
NULL, /* m_clear */
NULL /* m_free */
};
#endif
static __Pyx_StringTabEntry __pyx_string_tab[] = {
{&__pyx_n_s_ASCII, __pyx_k_ASCII, sizeof(__pyx_k_ASCII), 0, 0, 1, 1},
{&__pyx_kp_s_Buffer_view_does_not_expose_stri, __pyx_k_Buffer_view_does_not_expose_stri, sizeof(__pyx_k_Buffer_view_does_not_expose_stri), 0, 0, 1, 0},
{&__pyx_kp_s_Can_only_create_a_buffer_that_is, __pyx_k_Can_only_create_a_buffer_that_is, sizeof(__pyx_k_Can_only_create_a_buffer_that_is), 0, 0, 1, 0},
{&__pyx_kp_s_Cannot_assign_to_read_only_memor, __pyx_k_Cannot_assign_to_read_only_memor, sizeof(__pyx_k_Cannot_assign_to_read_only_memor), 0, 0, 1, 0},
{&__pyx_kp_s_Cannot_create_writable_memory_vi, __pyx_k_Cannot_create_writable_memory_vi, sizeof(__pyx_k_Cannot_create_writable_memory_vi), 0, 0, 1, 0},
{&__pyx_kp_s_Cannot_index_with_type_s, __pyx_k_Cannot_index_with_type_s, sizeof(__pyx_k_Cannot_index_with_type_s), 0, 0, 1, 0},
{&__pyx_n_s_Ellipsis, __pyx_k_Ellipsis, sizeof(__pyx_k_Ellipsis), 0, 0, 1, 1},
{&__pyx_kp_s_Empty_shape_tuple_for_cython_arr, __pyx_k_Empty_shape_tuple_for_cython_arr, sizeof(__pyx_k_Empty_shape_tuple_for_cython_arr), 0, 0, 1, 0},
{&__pyx_kp_s_Incompatible_checksums_s_vs_0xb0, __pyx_k_Incompatible_checksums_s_vs_0xb0, sizeof(__pyx_k_Incompatible_checksums_s_vs_0xb0), 0, 0, 1, 0},
{&__pyx_n_s_IndexError, __pyx_k_IndexError, sizeof(__pyx_k_IndexError), 0, 0, 1, 1},
{&__pyx_kp_s_Indirect_dimensions_not_supporte, __pyx_k_Indirect_dimensions_not_supporte, sizeof(__pyx_k_Indirect_dimensions_not_supporte), 0, 0, 1, 0},
{&__pyx_kp_s_Invalid_mode_expected_c_or_fortr, __pyx_k_Invalid_mode_expected_c_or_fortr, sizeof(__pyx_k_Invalid_mode_expected_c_or_fortr), 0, 0, 1, 0},
{&__pyx_kp_s_Invalid_shape_in_axis_d_d, __pyx_k_Invalid_shape_in_axis_d_d, sizeof(__pyx_k_Invalid_shape_in_axis_d_d), 0, 0, 1, 0},
{&__pyx_n_s_MemoryError, __pyx_k_MemoryError, sizeof(__pyx_k_MemoryError), 0, 0, 1, 1},
{&__pyx_kp_s_MemoryView_of_r_at_0x_x, __pyx_k_MemoryView_of_r_at_0x_x, sizeof(__pyx_k_MemoryView_of_r_at_0x_x), 0, 0, 1, 0},
{&__pyx_kp_s_MemoryView_of_r_object, __pyx_k_MemoryView_of_r_object, sizeof(__pyx_k_MemoryView_of_r_object), 0, 0, 1, 0},
{&__pyx_n_b_O, __pyx_k_O, sizeof(__pyx_k_O), 0, 0, 0, 1},
{&__pyx_kp_s_Out_of_bounds_on_buffer_access_a, __pyx_k_Out_of_bounds_on_buffer_access_a, sizeof(__pyx_k_Out_of_bounds_on_buffer_access_a), 0, 0, 1, 0},
{&__pyx_n_s_PickleError, __pyx_k_PickleError, sizeof(__pyx_k_PickleError), 0, 0, 1, 1},
{&__pyx_n_s_TypeError, __pyx_k_TypeError, sizeof(__pyx_k_TypeError), 0, 0, 1, 1},
{&__pyx_kp_s_Unable_to_convert_item_to_object, __pyx_k_Unable_to_convert_item_to_object, sizeof(__pyx_k_Unable_to_convert_item_to_object), 0, 0, 1, 0},
{&__pyx_n_s_ValueError, __pyx_k_ValueError, sizeof(__pyx_k_ValueError), 0, 0, 1, 1},
{&__pyx_n_s_View_MemoryView, __pyx_k_View_MemoryView, sizeof(__pyx_k_View_MemoryView), 0, 0, 1, 1},
{&__pyx_n_s_allocate_buffer, __pyx_k_allocate_buffer, sizeof(__pyx_k_allocate_buffer), 0, 0, 1, 1},
{&__pyx_n_s_base, __pyx_k_base, sizeof(__pyx_k_base), 0, 0, 1, 1},
{&__pyx_n_s_c, __pyx_k_c, sizeof(__pyx_k_c), 0, 0, 1, 1},
{&__pyx_n_u_c, __pyx_k_c, sizeof(__pyx_k_c), 0, 1, 0, 1},
{&__pyx_n_s_class, __pyx_k_class, sizeof(__pyx_k_class), 0, 0, 1, 1},
{&__pyx_n_s_cline_in_traceback, __pyx_k_cline_in_traceback, sizeof(__pyx_k_cline_in_traceback), 0, 0, 1, 1},
{&__pyx_kp_s_contiguous_and_direct, __pyx_k_contiguous_and_direct, sizeof(__pyx_k_contiguous_and_direct), 0, 0, 1, 0},
{&__pyx_kp_s_contiguous_and_indirect, __pyx_k_contiguous_and_indirect, sizeof(__pyx_k_contiguous_and_indirect), 0, 0, 1, 0},
{&__pyx_n_s_correction, __pyx_k_correction, sizeof(__pyx_k_correction), 0, 0, 1, 1},
{&__pyx_n_s_data1, __pyx_k_data1, sizeof(__pyx_k_data1), 0, 0, 1, 1},
{&__pyx_n_s_data2, __pyx_k_data2, sizeof(__pyx_k_data2), 0, 0, 1, 1},
{&__pyx_n_s_dict, __pyx_k_dict, sizeof(__pyx_k_dict), 0, 0, 1, 1},
{&__pyx_n_s_dtype, __pyx_k_dtype, sizeof(__pyx_k_dtype), 0, 0, 1, 1},
{&__pyx_n_s_dtype_is_object, __pyx_k_dtype_is_object, sizeof(__pyx_k_dtype_is_object), 0, 0, 1, 1},
{&__pyx_n_s_encode, __pyx_k_encode, sizeof(__pyx_k_encode), 0, 0, 1, 1},
{&__pyx_n_s_enumerate, __pyx_k_enumerate, sizeof(__pyx_k_enumerate), 0, 0, 1, 1},
{&__pyx_n_s_error, __pyx_k_error, sizeof(__pyx_k_error), 0, 0, 1, 1},
{&__pyx_n_s_flags, __pyx_k_flags, sizeof(__pyx_k_flags), 0, 0, 1, 1},
{&__pyx_n_s_float32, __pyx_k_float32, sizeof(__pyx_k_float32), 0, 0, 1, 1},
{&__pyx_n_s_fmax, __pyx_k_fmax, sizeof(__pyx_k_fmax), 0, 0, 1, 1},
{&__pyx_n_s_format, __pyx_k_format, sizeof(__pyx_k_format), 0, 0, 1, 1},
{&__pyx_n_s_fortran, __pyx_k_fortran, sizeof(__pyx_k_fortran), 0, 0, 1, 1},
{&__pyx_n_u_fortran, __pyx_k_fortran, sizeof(__pyx_k_fortran), 0, 1, 0, 1},
{&__pyx_n_s_getstate, __pyx_k_getstate, sizeof(__pyx_k_getstate), 0, 0, 1, 1},
{&__pyx_kp_s_got_differing_extents_in_dimensi, __pyx_k_got_differing_extents_in_dimensi, sizeof(__pyx_k_got_differing_extents_in_dimensi), 0, 0, 1, 0},
{&__pyx_n_s_id, __pyx_k_id, sizeof(__pyx_k_id), 0, 0, 1, 1},
{&__pyx_n_s_import, __pyx_k_import, sizeof(__pyx_k_import), 0, 0, 1, 1},
{&__pyx_n_s_itemsize, __pyx_k_itemsize, sizeof(__pyx_k_itemsize), 0, 0, 1, 1},
{&__pyx_kp_s_itemsize_0_for_cython_array, __pyx_k_itemsize_0_for_cython_array, sizeof(__pyx_k_itemsize_0_for_cython_array), 0, 0, 1, 0},
{&__pyx_n_s_main, __pyx_k_main, sizeof(__pyx_k_main), 0, 0, 1, 1},
{&__pyx_n_s_memview, __pyx_k_memview, sizeof(__pyx_k_memview), 0, 0, 1, 1},
{&__pyx_n_s_mode, __pyx_k_mode, sizeof(__pyx_k_mode), 0, 0, 1, 1},
{&__pyx_n_s_n_jobs, __pyx_k_n_jobs, sizeof(__pyx_k_n_jobs), 0, 0, 1, 1},
{&__pyx_n_s_name, __pyx_k_name, sizeof(__pyx_k_name), 0, 0, 1, 1},
{&__pyx_n_s_name_2, __pyx_k_name_2, sizeof(__pyx_k_name_2), 0, 0, 1, 1},
{&__pyx_n_s_ndim, __pyx_k_ndim, sizeof(__pyx_k_ndim), 0, 0, 1, 1},
{&__pyx_n_s_new, __pyx_k_new, sizeof(__pyx_k_new), 0, 0, 1, 1},
{&__pyx_kp_s_no_default___reduce___due_to_non, __pyx_k_no_default___reduce___due_to_non, sizeof(__pyx_k_no_default___reduce___due_to_non), 0, 0, 1, 0},
{&__pyx_n_s_np, __pyx_k_np, sizeof(__pyx_k_np), 0, 0, 1, 1},
{&__pyx_n_s_numpy, __pyx_k_numpy, sizeof(__pyx_k_numpy), 0, 0, 1, 1},
{&__pyx_n_s_obj, __pyx_k_obj, sizeof(__pyx_k_obj), 0, 0, 1, 1},
{&__pyx_n_s_pack, __pyx_k_pack, sizeof(__pyx_k_pack), 0, 0, 1, 1},
{&__pyx_n_s_pickle, __pyx_k_pickle, sizeof(__pyx_k_pickle), 0, 0, 1, 1},
{&__pyx_n_s_pyx_PickleError, __pyx_k_pyx_PickleError, sizeof(__pyx_k_pyx_PickleError), 0, 0, 1, 1},
{&__pyx_n_s_pyx_checksum, __pyx_k_pyx_checksum, sizeof(__pyx_k_pyx_checksum), 0, 0, 1, 1},
{&__pyx_n_s_pyx_getbuffer, __pyx_k_pyx_getbuffer, sizeof(__pyx_k_pyx_getbuffer), 0, 0, 1, 1},
{&__pyx_n_s_pyx_result, __pyx_k_pyx_result, sizeof(__pyx_k_pyx_result), 0, 0, 1, 1},
{&__pyx_n_s_pyx_state, __pyx_k_pyx_state, sizeof(__pyx_k_pyx_state), 0, 0, 1, 1},
{&__pyx_n_s_pyx_type, __pyx_k_pyx_type, sizeof(__pyx_k_pyx_type), 0, 0, 1, 1},
{&__pyx_n_s_pyx_unpickle_Enum, __pyx_k_pyx_unpickle_Enum, sizeof(__pyx_k_pyx_unpickle_Enum), 0, 0, 1, 1},
{&__pyx_n_s_pyx_vtable, __pyx_k_pyx_vtable, sizeof(__pyx_k_pyx_vtable), 0, 0, 1, 1},
{&__pyx_n_s_range, __pyx_k_range, sizeof(__pyx_k_range), 0, 0, 1, 1},
{&__pyx_n_s_reduce, __pyx_k_reduce, sizeof(__pyx_k_reduce), 0, 0, 1, 1},
{&__pyx_n_s_reduce_cython, __pyx_k_reduce_cython, sizeof(__pyx_k_reduce_cython), 0, 0, 1, 1},
{&__pyx_n_s_reduce_ex, __pyx_k_reduce_ex, sizeof(__pyx_k_reduce_ex), 0, 0, 1, 1},
{&__pyx_n_s_setstate, __pyx_k_setstate, sizeof(__pyx_k_setstate), 0, 0, 1, 1},
{&__pyx_n_s_setstate_cython, __pyx_k_setstate_cython, sizeof(__pyx_k_setstate_cython), 0, 0, 1, 1},
{&__pyx_n_s_shape, __pyx_k_shape, sizeof(__pyx_k_shape), 0, 0, 1, 1},
{&__pyx_n_s_sigma, __pyx_k_sigma, sizeof(__pyx_k_sigma), 0, 0, 1, 1},
{&__pyx_n_s_size, __pyx_k_size, sizeof(__pyx_k_size), 0, 0, 1, 1},
{&__pyx_n_s_start, __pyx_k_start, sizeof(__pyx_k_start), 0, 0, 1, 1},
{&__pyx_n_s_step, __pyx_k_step, sizeof(__pyx_k_step), 0, 0, 1, 1},
{&__pyx_n_s_stop, __pyx_k_stop, sizeof(__pyx_k_stop), 0, 0, 1, 1},
{&__pyx_kp_s_strided_and_direct, __pyx_k_strided_and_direct, sizeof(__pyx_k_strided_and_direct), 0, 0, 1, 0},
{&__pyx_kp_s_strided_and_direct_or_indirect, __pyx_k_strided_and_direct_or_indirect, sizeof(__pyx_k_strided_and_direct_or_indirect), 0, 0, 1, 0},
{&__pyx_kp_s_strided_and_indirect, __pyx_k_strided_and_indirect, sizeof(__pyx_k_strided_and_indirect), 0, 0, 1, 0},
{&__pyx_kp_s_stringsource, __pyx_k_stringsource, sizeof(__pyx_k_stringsource), 0, 0, 1, 0},
{&__pyx_n_s_struct, __pyx_k_struct, sizeof(__pyx_k_struct), 0, 0, 1, 1},
{&__pyx_n_s_test, __pyx_k_test, sizeof(__pyx_k_test), 0, 0, 1, 1},
{&__pyx_kp_s_unable_to_allocate_array_data, __pyx_k_unable_to_allocate_array_data, sizeof(__pyx_k_unable_to_allocate_array_data), 0, 0, 1, 0},
{&__pyx_kp_s_unable_to_allocate_shape_and_str, __pyx_k_unable_to_allocate_shape_and_str, sizeof(__pyx_k_unable_to_allocate_shape_and_str), 0, 0, 1, 0},
{&__pyx_n_s_unpack, __pyx_k_unpack, sizeof(__pyx_k_unpack), 0, 0, 1, 1},
{&__pyx_n_s_update, __pyx_k_update, sizeof(__pyx_k_update), 0, 0, 1, 1},
{&__pyx_n_s_var_subset, __pyx_k_var_subset, sizeof(__pyx_k_var_subset), 0, 0, 1, 1},
{&__pyx_n_s_zeros, __pyx_k_zeros, sizeof(__pyx_k_zeros), 0, 0, 1, 1},
{0, 0, 0, 0, 0, 0, 0}
};
static int __Pyx_InitCachedBuiltins(void) {
__pyx_builtin_range = __Pyx_GetBuiltinName(__pyx_n_s_range); if (!__pyx_builtin_range) __PYX_ERR(0, 21, __pyx_L1_error)
__pyx_builtin_ValueError = __Pyx_GetBuiltinName(__pyx_n_s_ValueError); if (!__pyx_builtin_ValueError) __PYX_ERR(1, 132, __pyx_L1_error)
__pyx_builtin_MemoryError = __Pyx_GetBuiltinName(__pyx_n_s_MemoryError); if (!__pyx_builtin_MemoryError) __PYX_ERR(1, 147, __pyx_L1_error)
__pyx_builtin_enumerate = __Pyx_GetBuiltinName(__pyx_n_s_enumerate); if (!__pyx_builtin_enumerate) __PYX_ERR(1, 150, __pyx_L1_error)
__pyx_builtin_TypeError = __Pyx_GetBuiltinName(__pyx_n_s_TypeError); if (!__pyx_builtin_TypeError) __PYX_ERR(1, 2, __pyx_L1_error)
__pyx_builtin_Ellipsis = __Pyx_GetBuiltinName(__pyx_n_s_Ellipsis); if (!__pyx_builtin_Ellipsis) __PYX_ERR(1, 399, __pyx_L1_error)
__pyx_builtin_id = __Pyx_GetBuiltinName(__pyx_n_s_id); if (!__pyx_builtin_id) __PYX_ERR(1, 608, __pyx_L1_error)
__pyx_builtin_IndexError = __Pyx_GetBuiltinName(__pyx_n_s_IndexError); if (!__pyx_builtin_IndexError) __PYX_ERR(1, 827, __pyx_L1_error)
return 0;
__pyx_L1_error:;
return -1;
}
static int __Pyx_InitCachedConstants(void) {
__Pyx_RefNannyDeclarations
__Pyx_RefNannySetupContext("__Pyx_InitCachedConstants", 0);
/* "mnnpy/_utils.pyx":91
* cpdef _adjust_shift_variance(data1, data2, correction, sigma, n_jobs, var_subset=None):
* if var_subset is not None:
* vect = correction[:, var_subset] # <<<<<<<<<<<<<<
* data1 = data1[:, var_subset]
* data2 = data2[:, var_subset]
*/
__pyx_slice_ = PySlice_New(Py_None, Py_None, Py_None); if (unlikely(!__pyx_slice_)) __PYX_ERR(0, 91, __pyx_L1_error)
__Pyx_GOTREF(__pyx_slice_);
__Pyx_GIVEREF(__pyx_slice_);
/* "mnnpy/_utils.pyx":92
* if var_subset is not None:
* vect = correction[:, var_subset]
* data1 = data1[:, var_subset] # <<<<<<<<<<<<<<
* data2 = data2[:, var_subset]
* else:
*/
__pyx_slice__2 = PySlice_New(Py_None, Py_None, Py_None); if (unlikely(!__pyx_slice__2)) __PYX_ERR(0, 92, __pyx_L1_error)
__Pyx_GOTREF(__pyx_slice__2);
__Pyx_GIVEREF(__pyx_slice__2);
/* "mnnpy/_utils.pyx":93
* vect = correction[:, var_subset]
* data1 = data1[:, var_subset]
* data2 = data2[:, var_subset] # <<<<<<<<<<<<<<
* else:
* vect = correction
*/
__pyx_slice__3 = PySlice_New(Py_None, Py_None, Py_None); if (unlikely(!__pyx_slice__3)) __PYX_ERR(0, 93, __pyx_L1_error)
__Pyx_GOTREF(__pyx_slice__3);
__Pyx_GIVEREF(__pyx_slice__3);
/* "mnnpy/_utils.pyx":108
* s1[i] = _adjust_s_variance(da1, da2, da2[i], vec[i], s2)
* scaling = np.fmax(scaling, 1)
* return correction * scaling[:, None] # <<<<<<<<<<<<<<
*/
__pyx_slice__4 = PySlice_New(Py_None, Py_None, Py_None); if (unlikely(!__pyx_slice__4)) __PYX_ERR(0, 108, __pyx_L1_error)
__Pyx_GOTREF(__pyx_slice__4);
__Pyx_GIVEREF(__pyx_slice__4);
__pyx_tuple__5 = PyTuple_Pack(2, __pyx_slice__4, Py_None); if (unlikely(!__pyx_tuple__5)) __PYX_ERR(0, 108, __pyx_L1_error)
__Pyx_GOTREF(__pyx_tuple__5);
__Pyx_GIVEREF(__pyx_tuple__5);
/* "View.MemoryView":132
*
* if not self.ndim:
* raise ValueError("Empty shape tuple for cython.array") # <<<<<<<<<<<<<<
*
* if itemsize <= 0:
*/
__pyx_tuple__6 = PyTuple_Pack(1, __pyx_kp_s_Empty_shape_tuple_for_cython_arr); if (unlikely(!__pyx_tuple__6)) __PYX_ERR(1, 132, __pyx_L1_error)
__Pyx_GOTREF(__pyx_tuple__6);
__Pyx_GIVEREF(__pyx_tuple__6);
/* "View.MemoryView":135
*
* if itemsize <= 0:
* raise ValueError("itemsize <= 0 for cython.array") # <<<<<<<<<<<<<<
*
* if not isinstance(format, bytes):
*/
__pyx_tuple__7 = PyTuple_Pack(1, __pyx_kp_s_itemsize_0_for_cython_array); if (unlikely(!__pyx_tuple__7)) __PYX_ERR(1, 135, __pyx_L1_error)
__Pyx_GOTREF(__pyx_tuple__7);
__Pyx_GIVEREF(__pyx_tuple__7);
/* "View.MemoryView":138
*
* if not isinstance(format, bytes):
* format = format.encode('ASCII') # <<<<<<<<<<<<<<
* self._format = format # keep a reference to the byte string
* self.format = self._format
*/
__pyx_tuple__8 = PyTuple_Pack(1, __pyx_n_s_ASCII); if (unlikely(!__pyx_tuple__8)) __PYX_ERR(1, 138, __pyx_L1_error)
__Pyx_GOTREF(__pyx_tuple__8);
__Pyx_GIVEREF(__pyx_tuple__8);
/* "View.MemoryView":147
*
* if not self._shape:
* raise MemoryError("unable to allocate shape and strides.") # <<<<<<<<<<<<<<
*
*
*/
__pyx_tuple__9 = PyTuple_Pack(1, __pyx_kp_s_unable_to_allocate_shape_and_str); if (unlikely(!__pyx_tuple__9)) __PYX_ERR(1, 147, __pyx_L1_error)
__Pyx_GOTREF(__pyx_tuple__9);
__Pyx_GIVEREF(__pyx_tuple__9);
/* "View.MemoryView":175
* self.data = <char *>malloc(self.len)
* if not self.data:
* raise MemoryError("unable to allocate array data.") # <<<<<<<<<<<<<<
*
* if self.dtype_is_object:
*/
__pyx_tuple__10 = PyTuple_Pack(1, __pyx_kp_s_unable_to_allocate_array_data); if (unlikely(!__pyx_tuple__10)) __PYX_ERR(1, 175, __pyx_L1_error)
__Pyx_GOTREF(__pyx_tuple__10);
__Pyx_GIVEREF(__pyx_tuple__10);
/* "View.MemoryView":191
* bufmode = PyBUF_F_CONTIGUOUS | PyBUF_ANY_CONTIGUOUS
* if not (flags & bufmode):
* raise ValueError("Can only create a buffer that is contiguous in memory.") # <<<<<<<<<<<<<<
* info.buf = self.data
* info.len = self.len
*/
__pyx_tuple__11 = PyTuple_Pack(1, __pyx_kp_s_Can_only_create_a_buffer_that_is); if (unlikely(!__pyx_tuple__11)) __PYX_ERR(1, 191, __pyx_L1_error)
__Pyx_GOTREF(__pyx_tuple__11);
__Pyx_GIVEREF(__pyx_tuple__11);
/* "(tree fragment)":2
* def __reduce_cython__(self):
* raise TypeError("no default __reduce__ due to non-trivial __cinit__") # <<<<<<<<<<<<<<
* def __setstate_cython__(self, __pyx_state):
* raise TypeError("no default __reduce__ due to non-trivial __cinit__")
*/
__pyx_tuple__12 = PyTuple_Pack(1, __pyx_kp_s_no_default___reduce___due_to_non); if (unlikely(!__pyx_tuple__12)) __PYX_ERR(1, 2, __pyx_L1_error)
__Pyx_GOTREF(__pyx_tuple__12);
__Pyx_GIVEREF(__pyx_tuple__12);
/* "(tree fragment)":4
* raise TypeError("no default __reduce__ due to non-trivial __cinit__")
* def __setstate_cython__(self, __pyx_state):
* raise TypeError("no default __reduce__ due to non-trivial __cinit__") # <<<<<<<<<<<<<<
*/
__pyx_tuple__13 = PyTuple_Pack(1, __pyx_kp_s_no_default___reduce___due_to_non); if (unlikely(!__pyx_tuple__13)) __PYX_ERR(1, 4, __pyx_L1_error)
__Pyx_GOTREF(__pyx_tuple__13);
__Pyx_GIVEREF(__pyx_tuple__13);
/* "View.MemoryView":413
* def __setitem__(memoryview self, object index, object value):
* if self.view.readonly:
* raise TypeError("Cannot assign to read-only memoryview") # <<<<<<<<<<<<<<
*
* have_slices, index = _unellipsify(index, self.view.ndim)
*/
__pyx_tuple__14 = PyTuple_Pack(1, __pyx_kp_s_Cannot_assign_to_read_only_memor); if (unlikely(!__pyx_tuple__14)) __PYX_ERR(1, 413, __pyx_L1_error)
__Pyx_GOTREF(__pyx_tuple__14);
__Pyx_GIVEREF(__pyx_tuple__14);
/* "View.MemoryView":490
* result = struct.unpack(self.view.format, bytesitem)
* except struct.error:
* raise ValueError("Unable to convert item to object") # <<<<<<<<<<<<<<
* else:
* if len(self.view.format) == 1:
*/
__pyx_tuple__15 = PyTuple_Pack(1, __pyx_kp_s_Unable_to_convert_item_to_object); if (unlikely(!__pyx_tuple__15)) __PYX_ERR(1, 490, __pyx_L1_error)
__Pyx_GOTREF(__pyx_tuple__15);
__Pyx_GIVEREF(__pyx_tuple__15);
/* "View.MemoryView":515
* def __getbuffer__(self, Py_buffer *info, int flags):
* if flags & PyBUF_WRITABLE and self.view.readonly:
* raise ValueError("Cannot create writable memory view from read-only memoryview") # <<<<<<<<<<<<<<
*
* if flags & PyBUF_STRIDES:
*/
__pyx_tuple__16 = PyTuple_Pack(1, __pyx_kp_s_Cannot_create_writable_memory_vi); if (unlikely(!__pyx_tuple__16)) __PYX_ERR(1, 515, __pyx_L1_error)
__Pyx_GOTREF(__pyx_tuple__16);
__Pyx_GIVEREF(__pyx_tuple__16);
/* "View.MemoryView":565
* if self.view.strides == NULL:
*
* raise ValueError("Buffer view does not expose strides") # <<<<<<<<<<<<<<
*
* return tuple([stride for stride in self.view.strides[:self.view.ndim]])
*/
__pyx_tuple__17 = PyTuple_Pack(1, __pyx_kp_s_Buffer_view_does_not_expose_stri); if (unlikely(!__pyx_tuple__17)) __PYX_ERR(1, 565, __pyx_L1_error)
__Pyx_GOTREF(__pyx_tuple__17);
__Pyx_GIVEREF(__pyx_tuple__17);
/* "View.MemoryView":572
* def suboffsets(self):
* if self.view.suboffsets == NULL:
* return (-1,) * self.view.ndim # <<<<<<<<<<<<<<
*
* return tuple([suboffset for suboffset in self.view.suboffsets[:self.view.ndim]])
*/
__pyx_tuple__18 = PyTuple_New(1); if (unlikely(!__pyx_tuple__18)) __PYX_ERR(1, 572, __pyx_L1_error)
__Pyx_GOTREF(__pyx_tuple__18);
__Pyx_INCREF(__pyx_int_neg_1);
__Pyx_GIVEREF(__pyx_int_neg_1);
PyTuple_SET_ITEM(__pyx_tuple__18, 0, __pyx_int_neg_1);
__Pyx_GIVEREF(__pyx_tuple__18);
/* "(tree fragment)":2
* def __reduce_cython__(self):
* raise TypeError("no default __reduce__ due to non-trivial __cinit__") # <<<<<<<<<<<<<<
* def __setstate_cython__(self, __pyx_state):
* raise TypeError("no default __reduce__ due to non-trivial __cinit__")
*/
__pyx_tuple__19 = PyTuple_Pack(1, __pyx_kp_s_no_default___reduce___due_to_non); if (unlikely(!__pyx_tuple__19)) __PYX_ERR(1, 2, __pyx_L1_error)
__Pyx_GOTREF(__pyx_tuple__19);
__Pyx_GIVEREF(__pyx_tuple__19);
/* "(tree fragment)":4
* raise TypeError("no default __reduce__ due to non-trivial __cinit__")
* def __setstate_cython__(self, __pyx_state):
* raise TypeError("no default __reduce__ due to non-trivial __cinit__") # <<<<<<<<<<<<<<
*/
__pyx_tuple__20 = PyTuple_Pack(1, __pyx_kp_s_no_default___reduce___due_to_non); if (unlikely(!__pyx_tuple__20)) __PYX_ERR(1, 4, __pyx_L1_error)
__Pyx_GOTREF(__pyx_tuple__20);
__Pyx_GIVEREF(__pyx_tuple__20);
/* "View.MemoryView":677
* if item is Ellipsis:
* if not seen_ellipsis:
* result.extend([slice(None)] * (ndim - len(tup) + 1)) # <<<<<<<<<<<<<<
* seen_ellipsis = True
* else:
*/
__pyx_slice__21 = PySlice_New(Py_None, Py_None, Py_None); if (unlikely(!__pyx_slice__21)) __PYX_ERR(1, 677, __pyx_L1_error)
__Pyx_GOTREF(__pyx_slice__21);
__Pyx_GIVEREF(__pyx_slice__21);
/* "View.MemoryView":680
* seen_ellipsis = True
* else:
* result.append(slice(None)) # <<<<<<<<<<<<<<
* have_slices = True
* else:
*/
__pyx_slice__22 = PySlice_New(Py_None, Py_None, Py_None); if (unlikely(!__pyx_slice__22)) __PYX_ERR(1, 680, __pyx_L1_error)
__Pyx_GOTREF(__pyx_slice__22);
__Pyx_GIVEREF(__pyx_slice__22);
/* "View.MemoryView":691
* nslices = ndim - len(result)
* if nslices:
* result.extend([slice(None)] * nslices) # <<<<<<<<<<<<<<
*
* return have_slices or nslices, tuple(result)
*/
__pyx_slice__23 = PySlice_New(Py_None, Py_None, Py_None); if (unlikely(!__pyx_slice__23)) __PYX_ERR(1, 691, __pyx_L1_error)
__Pyx_GOTREF(__pyx_slice__23);
__Pyx_GIVEREF(__pyx_slice__23);
/* "View.MemoryView":698
* for suboffset in suboffsets[:ndim]:
* if suboffset >= 0:
* raise ValueError("Indirect dimensions not supported") # <<<<<<<<<<<<<<
*
*
*/
__pyx_tuple__24 = PyTuple_Pack(1, __pyx_kp_s_Indirect_dimensions_not_supporte); if (unlikely(!__pyx_tuple__24)) __PYX_ERR(1, 698, __pyx_L1_error)
__Pyx_GOTREF(__pyx_tuple__24);
__Pyx_GIVEREF(__pyx_tuple__24);
/* "(tree fragment)":2
* def __reduce_cython__(self):
* raise TypeError("no default __reduce__ due to non-trivial __cinit__") # <<<<<<<<<<<<<<
* def __setstate_cython__(self, __pyx_state):
* raise TypeError("no default __reduce__ due to non-trivial __cinit__")
*/
__pyx_tuple__25 = PyTuple_Pack(1, __pyx_kp_s_no_default___reduce___due_to_non); if (unlikely(!__pyx_tuple__25)) __PYX_ERR(1, 2, __pyx_L1_error)
__Pyx_GOTREF(__pyx_tuple__25);
__Pyx_GIVEREF(__pyx_tuple__25);
/* "(tree fragment)":4
* raise TypeError("no default __reduce__ due to non-trivial __cinit__")
* def __setstate_cython__(self, __pyx_state):
* raise TypeError("no default __reduce__ due to non-trivial __cinit__") # <<<<<<<<<<<<<<
*/
__pyx_tuple__26 = PyTuple_Pack(1, __pyx_kp_s_no_default___reduce___due_to_non); if (unlikely(!__pyx_tuple__26)) __PYX_ERR(1, 4, __pyx_L1_error)
__Pyx_GOTREF(__pyx_tuple__26);
__Pyx_GIVEREF(__pyx_tuple__26);
/* "View.MemoryView":285
* return self.name
*
* cdef generic = Enum("<strided and direct or indirect>") # <<<<<<<<<<<<<<
* cdef strided = Enum("<strided and direct>") # default
* cdef indirect = Enum("<strided and indirect>")
*/
__pyx_tuple__27 = PyTuple_Pack(1, __pyx_kp_s_strided_and_direct_or_indirect); if (unlikely(!__pyx_tuple__27)) __PYX_ERR(1, 285, __pyx_L1_error)
__Pyx_GOTREF(__pyx_tuple__27);
__Pyx_GIVEREF(__pyx_tuple__27);
/* "View.MemoryView":286
*
* cdef generic = Enum("<strided and direct or indirect>")
* cdef strided = Enum("<strided and direct>") # default # <<<<<<<<<<<<<<
* cdef indirect = Enum("<strided and indirect>")
*
*/
__pyx_tuple__28 = PyTuple_Pack(1, __pyx_kp_s_strided_and_direct); if (unlikely(!__pyx_tuple__28)) __PYX_ERR(1, 286, __pyx_L1_error)
__Pyx_GOTREF(__pyx_tuple__28);
__Pyx_GIVEREF(__pyx_tuple__28);
/* "View.MemoryView":287
* cdef generic = Enum("<strided and direct or indirect>")
* cdef strided = Enum("<strided and direct>") # default
* cdef indirect = Enum("<strided and indirect>") # <<<<<<<<<<<<<<
*
*
*/
__pyx_tuple__29 = PyTuple_Pack(1, __pyx_kp_s_strided_and_indirect); if (unlikely(!__pyx_tuple__29)) __PYX_ERR(1, 287, __pyx_L1_error)
__Pyx_GOTREF(__pyx_tuple__29);
__Pyx_GIVEREF(__pyx_tuple__29);
/* "View.MemoryView":290
*
*
* cdef contiguous = Enum("<contiguous and direct>") # <<<<<<<<<<<<<<
* cdef indirect_contiguous = Enum("<contiguous and indirect>")
*
*/
__pyx_tuple__30 = PyTuple_Pack(1, __pyx_kp_s_contiguous_and_direct); if (unlikely(!__pyx_tuple__30)) __PYX_ERR(1, 290, __pyx_L1_error)
__Pyx_GOTREF(__pyx_tuple__30);
__Pyx_GIVEREF(__pyx_tuple__30);
/* "View.MemoryView":291
*
* cdef contiguous = Enum("<contiguous and direct>")
* cdef indirect_contiguous = Enum("<contiguous and indirect>") # <<<<<<<<<<<<<<
*
*
*/
__pyx_tuple__31 = PyTuple_Pack(1, __pyx_kp_s_contiguous_and_indirect); if (unlikely(!__pyx_tuple__31)) __PYX_ERR(1, 291, __pyx_L1_error)
__Pyx_GOTREF(__pyx_tuple__31);
__Pyx_GIVEREF(__pyx_tuple__31);
/* "(tree fragment)":1
* def __pyx_unpickle_Enum(__pyx_type, long __pyx_checksum, __pyx_state): # <<<<<<<<<<<<<<
* if __pyx_checksum != 0xb068931:
* from pickle import PickleError as __pyx_PickleError
*/
__pyx_tuple__32 = PyTuple_Pack(5, __pyx_n_s_pyx_type, __pyx_n_s_pyx_checksum, __pyx_n_s_pyx_state, __pyx_n_s_pyx_PickleError, __pyx_n_s_pyx_result); if (unlikely(!__pyx_tuple__32)) __PYX_ERR(1, 1, __pyx_L1_error)
__Pyx_GOTREF(__pyx_tuple__32);
__Pyx_GIVEREF(__pyx_tuple__32);
__pyx_codeobj__33 = (PyObject*)__Pyx_PyCode_New(3, 0, 5, 0, CO_OPTIMIZED|CO_NEWLOCALS, __pyx_empty_bytes, __pyx_empty_tuple, __pyx_empty_tuple, __pyx_tuple__32, __pyx_empty_tuple, __pyx_empty_tuple, __pyx_kp_s_stringsource, __pyx_n_s_pyx_unpickle_Enum, 1, __pyx_empty_bytes); if (unlikely(!__pyx_codeobj__33)) __PYX_ERR(1, 1, __pyx_L1_error)
__Pyx_RefNannyFinishContext();
return 0;
__pyx_L1_error:;
__Pyx_RefNannyFinishContext();
return -1;
}
static int __Pyx_InitGlobals(void) {
/* InitThreads.init */
#ifdef WITH_THREAD
PyEval_InitThreads();
#endif
if (unlikely(PyErr_Occurred())) __PYX_ERR(0, 1, __pyx_L1_error)
if (__Pyx_InitStrings(__pyx_string_tab) < 0) __PYX_ERR(0, 1, __pyx_L1_error);
__pyx_int_0 = PyInt_FromLong(0); if (unlikely(!__pyx_int_0)) __PYX_ERR(0, 1, __pyx_L1_error)
__pyx_int_1 = PyInt_FromLong(1); if (unlikely(!__pyx_int_1)) __PYX_ERR(0, 1, __pyx_L1_error)
__pyx_int_184977713 = PyInt_FromLong(184977713L); if (unlikely(!__pyx_int_184977713)) __PYX_ERR(0, 1, __pyx_L1_error)
__pyx_int_neg_1 = PyInt_FromLong(-1); if (unlikely(!__pyx_int_neg_1)) __PYX_ERR(0, 1, __pyx_L1_error)
return 0;
__pyx_L1_error:;
return -1;
}
static int __Pyx_modinit_global_init_code(void); /*proto*/
static int __Pyx_modinit_variable_export_code(void); /*proto*/
static int __Pyx_modinit_function_export_code(void); /*proto*/
static int __Pyx_modinit_type_init_code(void); /*proto*/
static int __Pyx_modinit_type_import_code(void); /*proto*/
static int __Pyx_modinit_variable_import_code(void); /*proto*/
static int __Pyx_modinit_function_import_code(void); /*proto*/
static int __Pyx_modinit_global_init_code(void) {
__Pyx_RefNannyDeclarations
__Pyx_RefNannySetupContext("__Pyx_modinit_global_init_code", 0);
/*--- Global init code ---*/
generic = Py_None; Py_INCREF(Py_None);
strided = Py_None; Py_INCREF(Py_None);
indirect = Py_None; Py_INCREF(Py_None);
contiguous = Py_None; Py_INCREF(Py_None);
indirect_contiguous = Py_None; Py_INCREF(Py_None);
__Pyx_RefNannyFinishContext();
return 0;
}
static int __Pyx_modinit_variable_export_code(void) {
__Pyx_RefNannyDeclarations
__Pyx_RefNannySetupContext("__Pyx_modinit_variable_export_code", 0);
/*--- Variable export code ---*/
__Pyx_RefNannyFinishContext();
return 0;
}
static int __Pyx_modinit_function_export_code(void) {
__Pyx_RefNannyDeclarations
__Pyx_RefNannySetupContext("__Pyx_modinit_function_export_code", 0);
/*--- Function export code ---*/
__Pyx_RefNannyFinishContext();
return 0;
}
static int __Pyx_modinit_type_init_code(void) {
__Pyx_RefNannyDeclarations
__Pyx_RefNannySetupContext("__Pyx_modinit_type_init_code", 0);
/*--- Type init code ---*/
__pyx_vtabptr_array = &__pyx_vtable_array;
__pyx_vtable_array.get_memview = (PyObject *(*)(struct __pyx_array_obj *))__pyx_array_get_memview;
if (PyType_Ready(&__pyx_type___pyx_array) < 0) __PYX_ERR(1, 104, __pyx_L1_error)
__pyx_type___pyx_array.tp_print = 0;
if (__Pyx_SetVtable(__pyx_type___pyx_array.tp_dict, __pyx_vtabptr_array) < 0) __PYX_ERR(1, 104, __pyx_L1_error)
if (__Pyx_setup_reduce((PyObject*)&__pyx_type___pyx_array) < 0) __PYX_ERR(1, 104, __pyx_L1_error)
__pyx_array_type = &__pyx_type___pyx_array;
if (PyType_Ready(&__pyx_type___pyx_MemviewEnum) < 0) __PYX_ERR(1, 278, __pyx_L1_error)
__pyx_type___pyx_MemviewEnum.tp_print = 0;
if ((CYTHON_USE_TYPE_SLOTS && CYTHON_USE_PYTYPE_LOOKUP) && likely(!__pyx_type___pyx_MemviewEnum.tp_dictoffset && __pyx_type___pyx_MemviewEnum.tp_getattro == PyObject_GenericGetAttr)) {
__pyx_type___pyx_MemviewEnum.tp_getattro = __Pyx_PyObject_GenericGetAttr;
}
if (__Pyx_setup_reduce((PyObject*)&__pyx_type___pyx_MemviewEnum) < 0) __PYX_ERR(1, 278, __pyx_L1_error)
__pyx_MemviewEnum_type = &__pyx_type___pyx_MemviewEnum;
__pyx_vtabptr_memoryview = &__pyx_vtable_memoryview;
__pyx_vtable_memoryview.get_item_pointer = (char *(*)(struct __pyx_memoryview_obj *, PyObject *))__pyx_memoryview_get_item_pointer;
__pyx_vtable_memoryview.is_slice = (PyObject *(*)(struct __pyx_memoryview_obj *, PyObject *))__pyx_memoryview_is_slice;
__pyx_vtable_memoryview.setitem_slice_assignment = (PyObject *(*)(struct __pyx_memoryview_obj *, PyObject *, PyObject *))__pyx_memoryview_setitem_slice_assignment;
__pyx_vtable_memoryview.setitem_slice_assign_scalar = (PyObject *(*)(struct __pyx_memoryview_obj *, struct __pyx_memoryview_obj *, PyObject *))__pyx_memoryview_setitem_slice_assign_scalar;
__pyx_vtable_memoryview.setitem_indexed = (PyObject *(*)(struct __pyx_memoryview_obj *, PyObject *, PyObject *))__pyx_memoryview_setitem_indexed;
__pyx_vtable_memoryview.convert_item_to_object = (PyObject *(*)(struct __pyx_memoryview_obj *, char *))__pyx_memoryview_convert_item_to_object;
__pyx_vtable_memoryview.assign_item_from_object = (PyObject *(*)(struct __pyx_memoryview_obj *, char *, PyObject *))__pyx_memoryview_assign_item_from_object;
if (PyType_Ready(&__pyx_type___pyx_memoryview) < 0) __PYX_ERR(1, 329, __pyx_L1_error)
__pyx_type___pyx_memoryview.tp_print = 0;
if ((CYTHON_USE_TYPE_SLOTS && CYTHON_USE_PYTYPE_LOOKUP) && likely(!__pyx_type___pyx_memoryview.tp_dictoffset && __pyx_type___pyx_memoryview.tp_getattro == PyObject_GenericGetAttr)) {
__pyx_type___pyx_memoryview.tp_getattro = __Pyx_PyObject_GenericGetAttr;
}
if (__Pyx_SetVtable(__pyx_type___pyx_memoryview.tp_dict, __pyx_vtabptr_memoryview) < 0) __PYX_ERR(1, 329, __pyx_L1_error)
if (__Pyx_setup_reduce((PyObject*)&__pyx_type___pyx_memoryview) < 0) __PYX_ERR(1, 329, __pyx_L1_error)
__pyx_memoryview_type = &__pyx_type___pyx_memoryview;
__pyx_vtabptr__memoryviewslice = &__pyx_vtable__memoryviewslice;
__pyx_vtable__memoryviewslice.__pyx_base = *__pyx_vtabptr_memoryview;
__pyx_vtable__memoryviewslice.__pyx_base.convert_item_to_object = (PyObject *(*)(struct __pyx_memoryview_obj *, char *))__pyx_memoryviewslice_convert_item_to_object;
__pyx_vtable__memoryviewslice.__pyx_base.assign_item_from_object = (PyObject *(*)(struct __pyx_memoryview_obj *, char *, PyObject *))__pyx_memoryviewslice_assign_item_from_object;
__pyx_type___pyx_memoryviewslice.tp_base = __pyx_memoryview_type;
if (PyType_Ready(&__pyx_type___pyx_memoryviewslice) < 0) __PYX_ERR(1, 960, __pyx_L1_error)
__pyx_type___pyx_memoryviewslice.tp_print = 0;
if ((CYTHON_USE_TYPE_SLOTS && CYTHON_USE_PYTYPE_LOOKUP) && likely(!__pyx_type___pyx_memoryviewslice.tp_dictoffset && __pyx_type___pyx_memoryviewslice.tp_getattro == PyObject_GenericGetAttr)) {
__pyx_type___pyx_memoryviewslice.tp_getattro = __Pyx_PyObject_GenericGetAttr;
}
if (__Pyx_SetVtable(__pyx_type___pyx_memoryviewslice.tp_dict, __pyx_vtabptr__memoryviewslice) < 0) __PYX_ERR(1, 960, __pyx_L1_error)
if (__Pyx_setup_reduce((PyObject*)&__pyx_type___pyx_memoryviewslice) < 0) __PYX_ERR(1, 960, __pyx_L1_error)
__pyx_memoryviewslice_type = &__pyx_type___pyx_memoryviewslice;
__Pyx_RefNannyFinishContext();
return 0;
__pyx_L1_error:;
__Pyx_RefNannyFinishContext();
return -1;
}
static int __Pyx_modinit_type_import_code(void) {
__Pyx_RefNannyDeclarations
__Pyx_RefNannySetupContext("__Pyx_modinit_type_import_code", 0);
/*--- Type import code ---*/
__Pyx_RefNannyFinishContext();
return 0;
}
static int __Pyx_modinit_variable_import_code(void) {
__Pyx_RefNannyDeclarations
__Pyx_RefNannySetupContext("__Pyx_modinit_variable_import_code", 0);
/*--- Variable import code ---*/
__Pyx_RefNannyFinishContext();
return 0;
}
static int __Pyx_modinit_function_import_code(void) {
__Pyx_RefNannyDeclarations
__Pyx_RefNannySetupContext("__Pyx_modinit_function_import_code", 0);
/*--- Function import code ---*/
__Pyx_RefNannyFinishContext();
return 0;
}
#if PY_MAJOR_VERSION < 3
#ifdef CYTHON_NO_PYINIT_EXPORT
#define __Pyx_PyMODINIT_FUNC void
#else
#define __Pyx_PyMODINIT_FUNC PyMODINIT_FUNC
#endif
#else
#ifdef CYTHON_NO_PYINIT_EXPORT
#define __Pyx_PyMODINIT_FUNC PyObject *
#else
#define __Pyx_PyMODINIT_FUNC PyMODINIT_FUNC
#endif
#endif
#ifndef CYTHON_SMALL_CODE
#if defined(__clang__)
#define CYTHON_SMALL_CODE
#elif defined(__GNUC__) && (!(defined(__cplusplus)) || (__GNUC__ > 4 || (__GNUC__ == 4 && __GNUC_MINOR__ > 4)))
#define CYTHON_SMALL_CODE __attribute__((cold))
#else
#define CYTHON_SMALL_CODE
#endif
#endif
#if PY_MAJOR_VERSION < 3
__Pyx_PyMODINIT_FUNC init_utils(void) CYTHON_SMALL_CODE; /*proto*/
__Pyx_PyMODINIT_FUNC init_utils(void)
#else
__Pyx_PyMODINIT_FUNC PyInit__utils(void) CYTHON_SMALL_CODE; /*proto*/
__Pyx_PyMODINIT_FUNC PyInit__utils(void)
#if CYTHON_PEP489_MULTI_PHASE_INIT
{
return PyModuleDef_Init(&__pyx_moduledef);
}
static int __Pyx_copy_spec_to_module(PyObject *spec, PyObject *moddict, const char* from_name, const char* to_name) {
PyObject *value = PyObject_GetAttrString(spec, from_name);
int result = 0;
if (likely(value)) {
result = PyDict_SetItemString(moddict, to_name, value);
Py_DECREF(value);
} else if (PyErr_ExceptionMatches(PyExc_AttributeError)) {
PyErr_Clear();
} else {
result = -1;
}
return result;
}
static PyObject* __pyx_pymod_create(PyObject *spec, CYTHON_UNUSED PyModuleDef *def) {
PyObject *module = NULL, *moddict, *modname;
if (__pyx_m)
return __Pyx_NewRef(__pyx_m);
modname = PyObject_GetAttrString(spec, "name");
if (unlikely(!modname)) goto bad;
module = PyModule_NewObject(modname);
Py_DECREF(modname);
if (unlikely(!module)) goto bad;
moddict = PyModule_GetDict(module);
if (unlikely(!moddict)) goto bad;
if (unlikely(__Pyx_copy_spec_to_module(spec, moddict, "loader", "__loader__") < 0)) goto bad;
if (unlikely(__Pyx_copy_spec_to_module(spec, moddict, "origin", "__file__") < 0)) goto bad;
if (unlikely(__Pyx_copy_spec_to_module(spec, moddict, "parent", "__package__") < 0)) goto bad;
if (unlikely(__Pyx_copy_spec_to_module(spec, moddict, "submodule_search_locations", "__path__") < 0)) goto bad;
return module;
bad:
Py_XDECREF(module);
return NULL;
}
static int __pyx_pymod_exec__utils(PyObject *__pyx_pyinit_module)
#endif
#endif
{
PyObject *__pyx_t_1 = NULL;
static PyThread_type_lock __pyx_t_2[8];
__Pyx_RefNannyDeclarations
#if CYTHON_PEP489_MULTI_PHASE_INIT
if (__pyx_m && __pyx_m == __pyx_pyinit_module) return 0;
#elif PY_MAJOR_VERSION >= 3
if (__pyx_m) return __Pyx_NewRef(__pyx_m);
#endif
#if CYTHON_REFNANNY
__Pyx_RefNanny = __Pyx_RefNannyImportAPI("refnanny");
if (!__Pyx_RefNanny) {
PyErr_Clear();
__Pyx_RefNanny = __Pyx_RefNannyImportAPI("Cython.Runtime.refnanny");
if (!__Pyx_RefNanny)
Py_FatalError("failed to import 'refnanny' module");
}
#endif
__Pyx_RefNannySetupContext("__Pyx_PyMODINIT_FUNC PyInit__utils(void)", 0);
if (__Pyx_check_binary_version() < 0) __PYX_ERR(0, 1, __pyx_L1_error)
__pyx_empty_tuple = PyTuple_New(0); if (unlikely(!__pyx_empty_tuple)) __PYX_ERR(0, 1, __pyx_L1_error)
__pyx_empty_bytes = PyBytes_FromStringAndSize("", 0); if (unlikely(!__pyx_empty_bytes)) __PYX_ERR(0, 1, __pyx_L1_error)
__pyx_empty_unicode = PyUnicode_FromStringAndSize("", 0); if (unlikely(!__pyx_empty_unicode)) __PYX_ERR(0, 1, __pyx_L1_error)
#ifdef __Pyx_CyFunction_USED
if (__pyx_CyFunction_init() < 0) __PYX_ERR(0, 1, __pyx_L1_error)
#endif
#ifdef __Pyx_FusedFunction_USED
if (__pyx_FusedFunction_init() < 0) __PYX_ERR(0, 1, __pyx_L1_error)
#endif
#ifdef __Pyx_Coroutine_USED
if (__pyx_Coroutine_init() < 0) __PYX_ERR(0, 1, __pyx_L1_error)
#endif
#ifdef __Pyx_Generator_USED
if (__pyx_Generator_init() < 0) __PYX_ERR(0, 1, __pyx_L1_error)
#endif
#ifdef __Pyx_AsyncGen_USED
if (__pyx_AsyncGen_init() < 0) __PYX_ERR(0, 1, __pyx_L1_error)
#endif
#ifdef __Pyx_StopAsyncIteration_USED
if (__pyx_StopAsyncIteration_init() < 0) __PYX_ERR(0, 1, __pyx_L1_error)
#endif
/*--- Library function declarations ---*/
/*--- Threads initialization code ---*/
#if defined(__PYX_FORCE_INIT_THREADS) && __PYX_FORCE_INIT_THREADS
#ifdef WITH_THREAD /* Python build with threading support? */
PyEval_InitThreads();
#endif
#endif
/*--- Module creation code ---*/
#if CYTHON_PEP489_MULTI_PHASE_INIT
__pyx_m = __pyx_pyinit_module;
Py_INCREF(__pyx_m);
#else
#if PY_MAJOR_VERSION < 3
__pyx_m = Py_InitModule4("_utils", __pyx_methods, 0, 0, PYTHON_API_VERSION); Py_XINCREF(__pyx_m);
#else
__pyx_m = PyModule_Create(&__pyx_moduledef);
#endif
if (unlikely(!__pyx_m)) __PYX_ERR(0, 1, __pyx_L1_error)
#endif
__pyx_d = PyModule_GetDict(__pyx_m); if (unlikely(!__pyx_d)) __PYX_ERR(0, 1, __pyx_L1_error)
Py_INCREF(__pyx_d);
__pyx_b = PyImport_AddModule(__Pyx_BUILTIN_MODULE_NAME); if (unlikely(!__pyx_b)) __PYX_ERR(0, 1, __pyx_L1_error)
__pyx_cython_runtime = PyImport_AddModule((char *) "cython_runtime"); if (unlikely(!__pyx_cython_runtime)) __PYX_ERR(0, 1, __pyx_L1_error)
#if CYTHON_COMPILING_IN_PYPY
Py_INCREF(__pyx_b);
#endif
if (PyObject_SetAttrString(__pyx_m, "__builtins__", __pyx_b) < 0) __PYX_ERR(0, 1, __pyx_L1_error);
/*--- Initialize various global constants etc. ---*/
if (__Pyx_InitGlobals() < 0) __PYX_ERR(0, 1, __pyx_L1_error)
#if PY_MAJOR_VERSION < 3 && (__PYX_DEFAULT_STRING_ENCODING_IS_ASCII || __PYX_DEFAULT_STRING_ENCODING_IS_DEFAULT)
if (__Pyx_init_sys_getdefaultencoding_params() < 0) __PYX_ERR(0, 1, __pyx_L1_error)
#endif
if (__pyx_module_is_main_mnnpy___utils) {
if (PyObject_SetAttrString(__pyx_m, "__name__", __pyx_n_s_main) < 0) __PYX_ERR(0, 1, __pyx_L1_error)
}
#if PY_MAJOR_VERSION >= 3
{
PyObject *modules = PyImport_GetModuleDict(); if (unlikely(!modules)) __PYX_ERR(0, 1, __pyx_L1_error)
if (!PyDict_GetItemString(modules, "mnnpy._utils")) {
if (unlikely(PyDict_SetItemString(modules, "mnnpy._utils", __pyx_m) < 0)) __PYX_ERR(0, 1, __pyx_L1_error)
}
}
#endif
/*--- Builtin init code ---*/
if (__Pyx_InitCachedBuiltins() < 0) __PYX_ERR(0, 1, __pyx_L1_error)
/*--- Constants init code ---*/
if (__Pyx_InitCachedConstants() < 0) __PYX_ERR(0, 1, __pyx_L1_error)
/*--- Global type/function init code ---*/
(void)__Pyx_modinit_global_init_code();
(void)__Pyx_modinit_variable_export_code();
(void)__Pyx_modinit_function_export_code();
if (unlikely(__Pyx_modinit_type_init_code() != 0)) goto __pyx_L1_error;
(void)__Pyx_modinit_type_import_code();
(void)__Pyx_modinit_variable_import_code();
(void)__Pyx_modinit_function_import_code();
/*--- Execution code ---*/
#if defined(__Pyx_Generator_USED) || defined(__Pyx_Coroutine_USED)
if (__Pyx_patch_abc() < 0) __PYX_ERR(0, 1, __pyx_L1_error)
#endif
/* "mnnpy/_utils.pyx":5
* from libc.stdlib cimport malloc, free, qsort
* from libc.math cimport exp, sqrt
* import numpy as np # <<<<<<<<<<<<<<
*
* cdef extern from "_utils.h":
*/
__pyx_t_1 = __Pyx_Import(__pyx_n_s_numpy, 0, -1); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 5, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_1);
if (PyDict_SetItem(__pyx_d, __pyx_n_s_np, __pyx_t_1) < 0) __PYX_ERR(0, 5, __pyx_L1_error)
__Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
/* "mnnpy/_utils.pyx":1
* cimport cython # <<<<<<<<<<<<<<
* from cython.parallel import prange
* from libc.stdlib cimport malloc, free, qsort
*/
__pyx_t_1 = __Pyx_PyDict_NewPresized(0); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 1, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_1);
if (PyDict_SetItem(__pyx_d, __pyx_n_s_test, __pyx_t_1) < 0) __PYX_ERR(0, 1, __pyx_L1_error)
__Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
/* "View.MemoryView":208
* info.obj = self
*
* __pyx_getbuffer = capsule(<void *> &__pyx_array_getbuffer, "getbuffer(obj, view, flags)") # <<<<<<<<<<<<<<
*
* def __dealloc__(array self):
*/
__pyx_t_1 = __pyx_capsule_create(((void *)(&__pyx_array_getbuffer)), ((char *)"getbuffer(obj, view, flags)")); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 208, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_1);
if (PyDict_SetItem((PyObject *)__pyx_array_type->tp_dict, __pyx_n_s_pyx_getbuffer, __pyx_t_1) < 0) __PYX_ERR(1, 208, __pyx_L1_error)
__Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
PyType_Modified(__pyx_array_type);
/* "View.MemoryView":285
* return self.name
*
* cdef generic = Enum("<strided and direct or indirect>") # <<<<<<<<<<<<<<
* cdef strided = Enum("<strided and direct>") # default
* cdef indirect = Enum("<strided and indirect>")
*/
__pyx_t_1 = __Pyx_PyObject_Call(((PyObject *)__pyx_MemviewEnum_type), __pyx_tuple__27, NULL); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 285, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_1);
__Pyx_XGOTREF(generic);
__Pyx_DECREF_SET(generic, __pyx_t_1);
__Pyx_GIVEREF(__pyx_t_1);
__pyx_t_1 = 0;
/* "View.MemoryView":286
*
* cdef generic = Enum("<strided and direct or indirect>")
* cdef strided = Enum("<strided and direct>") # default # <<<<<<<<<<<<<<
* cdef indirect = Enum("<strided and indirect>")
*
*/
__pyx_t_1 = __Pyx_PyObject_Call(((PyObject *)__pyx_MemviewEnum_type), __pyx_tuple__28, NULL); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 286, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_1);
__Pyx_XGOTREF(strided);
__Pyx_DECREF_SET(strided, __pyx_t_1);
__Pyx_GIVEREF(__pyx_t_1);
__pyx_t_1 = 0;
/* "View.MemoryView":287
* cdef generic = Enum("<strided and direct or indirect>")
* cdef strided = Enum("<strided and direct>") # default
* cdef indirect = Enum("<strided and indirect>") # <<<<<<<<<<<<<<
*
*
*/
__pyx_t_1 = __Pyx_PyObject_Call(((PyObject *)__pyx_MemviewEnum_type), __pyx_tuple__29, NULL); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 287, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_1);
__Pyx_XGOTREF(indirect);
__Pyx_DECREF_SET(indirect, __pyx_t_1);
__Pyx_GIVEREF(__pyx_t_1);
__pyx_t_1 = 0;
/* "View.MemoryView":290
*
*
* cdef contiguous = Enum("<contiguous and direct>") # <<<<<<<<<<<<<<
* cdef indirect_contiguous = Enum("<contiguous and indirect>")
*
*/
__pyx_t_1 = __Pyx_PyObject_Call(((PyObject *)__pyx_MemviewEnum_type), __pyx_tuple__30, NULL); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 290, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_1);
__Pyx_XGOTREF(contiguous);
__Pyx_DECREF_SET(contiguous, __pyx_t_1);
__Pyx_GIVEREF(__pyx_t_1);
__pyx_t_1 = 0;
/* "View.MemoryView":291
*
* cdef contiguous = Enum("<contiguous and direct>")
* cdef indirect_contiguous = Enum("<contiguous and indirect>") # <<<<<<<<<<<<<<
*
*
*/
__pyx_t_1 = __Pyx_PyObject_Call(((PyObject *)__pyx_MemviewEnum_type), __pyx_tuple__31, NULL); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 291, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_1);
__Pyx_XGOTREF(indirect_contiguous);
__Pyx_DECREF_SET(indirect_contiguous, __pyx_t_1);
__Pyx_GIVEREF(__pyx_t_1);
__pyx_t_1 = 0;
/* "View.MemoryView":315
*
* DEF THREAD_LOCKS_PREALLOCATED = 8
* cdef int __pyx_memoryview_thread_locks_used = 0 # <<<<<<<<<<<<<<
* cdef PyThread_type_lock[THREAD_LOCKS_PREALLOCATED] __pyx_memoryview_thread_locks = [
* PyThread_allocate_lock(),
*/
__pyx_memoryview_thread_locks_used = 0;
/* "View.MemoryView":316
* DEF THREAD_LOCKS_PREALLOCATED = 8
* cdef int __pyx_memoryview_thread_locks_used = 0
* cdef PyThread_type_lock[THREAD_LOCKS_PREALLOCATED] __pyx_memoryview_thread_locks = [ # <<<<<<<<<<<<<<
* PyThread_allocate_lock(),
* PyThread_allocate_lock(),
*/
__pyx_t_2[0] = PyThread_allocate_lock();
__pyx_t_2[1] = PyThread_allocate_lock();
__pyx_t_2[2] = PyThread_allocate_lock();
__pyx_t_2[3] = PyThread_allocate_lock();
__pyx_t_2[4] = PyThread_allocate_lock();
__pyx_t_2[5] = PyThread_allocate_lock();
__pyx_t_2[6] = PyThread_allocate_lock();
__pyx_t_2[7] = PyThread_allocate_lock();
memcpy(&(__pyx_memoryview_thread_locks[0]), __pyx_t_2, sizeof(__pyx_memoryview_thread_locks[0]) * (8));
/* "View.MemoryView":544
* info.obj = self
*
* __pyx_getbuffer = capsule(<void *> &__pyx_memoryview_getbuffer, "getbuffer(obj, view, flags)") # <<<<<<<<<<<<<<
*
*
*/
__pyx_t_1 = __pyx_capsule_create(((void *)(&__pyx_memoryview_getbuffer)), ((char *)"getbuffer(obj, view, flags)")); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 544, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_1);
if (PyDict_SetItem((PyObject *)__pyx_memoryview_type->tp_dict, __pyx_n_s_pyx_getbuffer, __pyx_t_1) < 0) __PYX_ERR(1, 544, __pyx_L1_error)
__Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
PyType_Modified(__pyx_memoryview_type);
/* "View.MemoryView":990
* return self.from_object
*
* __pyx_getbuffer = capsule(<void *> &__pyx_memoryview_getbuffer, "getbuffer(obj, view, flags)") # <<<<<<<<<<<<<<
*
*
*/
__pyx_t_1 = __pyx_capsule_create(((void *)(&__pyx_memoryview_getbuffer)), ((char *)"getbuffer(obj, view, flags)")); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 990, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_1);
if (PyDict_SetItem((PyObject *)__pyx_memoryviewslice_type->tp_dict, __pyx_n_s_pyx_getbuffer, __pyx_t_1) < 0) __PYX_ERR(1, 990, __pyx_L1_error)
__Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
PyType_Modified(__pyx_memoryviewslice_type);
/* "(tree fragment)":1
* def __pyx_unpickle_Enum(__pyx_type, long __pyx_checksum, __pyx_state): # <<<<<<<<<<<<<<
* if __pyx_checksum != 0xb068931:
* from pickle import PickleError as __pyx_PickleError
*/
__pyx_t_1 = PyCFunction_NewEx(&__pyx_mdef_15View_dot_MemoryView_1__pyx_unpickle_Enum, NULL, __pyx_n_s_View_MemoryView); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 1, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_1);
if (PyDict_SetItem(__pyx_d, __pyx_n_s_pyx_unpickle_Enum, __pyx_t_1) < 0) __PYX_ERR(1, 1, __pyx_L1_error)
__Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
/* "(tree fragment)":9
* __pyx_unpickle_Enum__set_state(<Enum> __pyx_result, __pyx_state)
* return __pyx_result
* cdef __pyx_unpickle_Enum__set_state(Enum __pyx_result, tuple __pyx_state): # <<<<<<<<<<<<<<
* __pyx_result.name = __pyx_state[0]
* if len(__pyx_state) > 1 and hasattr(__pyx_result, '__dict__'):
*/
/*--- Wrapped vars code ---*/
goto __pyx_L0;
__pyx_L1_error:;
__Pyx_XDECREF(__pyx_t_1);
if (__pyx_m) {
if (__pyx_d) {
__Pyx_AddTraceback("init mnnpy._utils", 0, __pyx_lineno, __pyx_filename);
}
Py_DECREF(__pyx_m); __pyx_m = 0;
} else if (!PyErr_Occurred()) {
PyErr_SetString(PyExc_ImportError, "init mnnpy._utils");
}
__pyx_L0:;
__Pyx_RefNannyFinishContext();
#if CYTHON_PEP489_MULTI_PHASE_INIT
return (__pyx_m != NULL) ? 0 : -1;
#elif PY_MAJOR_VERSION >= 3
return __pyx_m;
#else
return;
#endif
}
/* --- Runtime support code --- */
/* Refnanny */
#if CYTHON_REFNANNY
static __Pyx_RefNannyAPIStruct *__Pyx_RefNannyImportAPI(const char *modname) {
PyObject *m = NULL, *p = NULL;
void *r = NULL;
m = PyImport_ImportModule((char *)modname);
if (!m) goto end;
p = PyObject_GetAttrString(m, (char *)"RefNannyAPI");
if (!p) goto end;
r = PyLong_AsVoidPtr(p);
end:
Py_XDECREF(p);
Py_XDECREF(m);
return (__Pyx_RefNannyAPIStruct *)r;
}
#endif
/* PyObjectGetAttrStr */
#if CYTHON_USE_TYPE_SLOTS
static CYTHON_INLINE PyObject* __Pyx_PyObject_GetAttrStr(PyObject* obj, PyObject* attr_name) {
PyTypeObject* tp = Py_TYPE(obj);
if (likely(tp->tp_getattro))
return tp->tp_getattro(obj, attr_name);
#if PY_MAJOR_VERSION < 3
if (likely(tp->tp_getattr))
return tp->tp_getattr(obj, PyString_AS_STRING(attr_name));
#endif
return PyObject_GetAttr(obj, attr_name);
}
#endif
/* GetBuiltinName */
static PyObject *__Pyx_GetBuiltinName(PyObject *name) {
PyObject* result = __Pyx_PyObject_GetAttrStr(__pyx_b, name);
if (unlikely(!result)) {
PyErr_Format(PyExc_NameError,
#if PY_MAJOR_VERSION >= 3
"name '%U' is not defined", name);
#else
"name '%.200s' is not defined", PyString_AS_STRING(name));
#endif
}
return result;
}
/* PyErrFetchRestore */
#if CYTHON_FAST_THREAD_STATE
static CYTHON_INLINE void __Pyx_ErrRestoreInState(PyThreadState *tstate, PyObject *type, PyObject *value, PyObject *tb) {
PyObject *tmp_type, *tmp_value, *tmp_tb;
tmp_type = tstate->curexc_type;
tmp_value = tstate->curexc_value;
tmp_tb = tstate->curexc_traceback;
tstate->curexc_type = type;
tstate->curexc_value = value;
tstate->curexc_traceback = tb;
Py_XDECREF(tmp_type);
Py_XDECREF(tmp_value);
Py_XDECREF(tmp_tb);
}
static CYTHON_INLINE void __Pyx_ErrFetchInState(PyThreadState *tstate, PyObject **type, PyObject **value, PyObject **tb) {
*type = tstate->curexc_type;
*value = tstate->curexc_value;
*tb = tstate->curexc_traceback;
tstate->curexc_type = 0;
tstate->curexc_value = 0;
tstate->curexc_traceback = 0;
}
#endif
/* WriteUnraisableException */
static void __Pyx_WriteUnraisable(const char *name, CYTHON_UNUSED int clineno,
CYTHON_UNUSED int lineno, CYTHON_UNUSED const char *filename,
int full_traceback, CYTHON_UNUSED int nogil) {
PyObject *old_exc, *old_val, *old_tb;
PyObject *ctx;
__Pyx_PyThreadState_declare
#ifdef WITH_THREAD
PyGILState_STATE state;
if (nogil)
state = PyGILState_Ensure();
#ifdef _MSC_VER
else state = (PyGILState_STATE)-1;
#endif
#endif
__Pyx_PyThreadState_assign
__Pyx_ErrFetch(&old_exc, &old_val, &old_tb);
if (full_traceback) {
Py_XINCREF(old_exc);
Py_XINCREF(old_val);
Py_XINCREF(old_tb);
__Pyx_ErrRestore(old_exc, old_val, old_tb);
PyErr_PrintEx(1);
}
#if PY_MAJOR_VERSION < 3
ctx = PyString_FromString(name);
#else
ctx = PyUnicode_FromString(name);
#endif
__Pyx_ErrRestore(old_exc, old_val, old_tb);
if (!ctx) {
PyErr_WriteUnraisable(Py_None);
} else {
PyErr_WriteUnraisable(ctx);
Py_DECREF(ctx);
}
#ifdef WITH_THREAD
if (nogil)
PyGILState_Release(state);
#endif
}
/* GetItemInt */
static PyObject *__Pyx_GetItemInt_Generic(PyObject *o, PyObject* j) {
PyObject *r;
if (!j) return NULL;
r = PyObject_GetItem(o, j);
Py_DECREF(j);
return r;
}
static CYTHON_INLINE PyObject *__Pyx_GetItemInt_List_Fast(PyObject *o, Py_ssize_t i,
CYTHON_NCP_UNUSED int wraparound,
CYTHON_NCP_UNUSED int boundscheck) {
#if CYTHON_ASSUME_SAFE_MACROS && !CYTHON_AVOID_BORROWED_REFS
Py_ssize_t wrapped_i = i;
if (wraparound & unlikely(i < 0)) {
wrapped_i += PyList_GET_SIZE(o);
}
if ((!boundscheck) || likely((0 <= wrapped_i) & (wrapped_i < PyList_GET_SIZE(o)))) {
PyObject *r = PyList_GET_ITEM(o, wrapped_i);
Py_INCREF(r);
return r;
}
return __Pyx_GetItemInt_Generic(o, PyInt_FromSsize_t(i));
#else
return PySequence_GetItem(o, i);
#endif
}
static CYTHON_INLINE PyObject *__Pyx_GetItemInt_Tuple_Fast(PyObject *o, Py_ssize_t i,
CYTHON_NCP_UNUSED int wraparound,
CYTHON_NCP_UNUSED int boundscheck) {
#if CYTHON_ASSUME_SAFE_MACROS && !CYTHON_AVOID_BORROWED_REFS
Py_ssize_t wrapped_i = i;
if (wraparound & unlikely(i < 0)) {
wrapped_i += PyTuple_GET_SIZE(o);
}
if ((!boundscheck) || likely((0 <= wrapped_i) & (wrapped_i < PyTuple_GET_SIZE(o)))) {
PyObject *r = PyTuple_GET_ITEM(o, wrapped_i);
Py_INCREF(r);
return r;
}
return __Pyx_GetItemInt_Generic(o, PyInt_FromSsize_t(i));
#else
return PySequence_GetItem(o, i);
#endif
}
static CYTHON_INLINE PyObject *__Pyx_GetItemInt_Fast(PyObject *o, Py_ssize_t i, int is_list,
CYTHON_NCP_UNUSED int wraparound,
CYTHON_NCP_UNUSED int boundscheck) {
#if CYTHON_ASSUME_SAFE_MACROS && !CYTHON_AVOID_BORROWED_REFS && CYTHON_USE_TYPE_SLOTS
if (is_list || PyList_CheckExact(o)) {
Py_ssize_t n = ((!wraparound) | likely(i >= 0)) ? i : i + PyList_GET_SIZE(o);
if ((!boundscheck) || (likely((n >= 0) & (n < PyList_GET_SIZE(o))))) {
PyObject *r = PyList_GET_ITEM(o, n);
Py_INCREF(r);
return r;
}
}
else if (PyTuple_CheckExact(o)) {
Py_ssize_t n = ((!wraparound) | likely(i >= 0)) ? i : i + PyTuple_GET_SIZE(o);
if ((!boundscheck) || likely((n >= 0) & (n < PyTuple_GET_SIZE(o)))) {
PyObject *r = PyTuple_GET_ITEM(o, n);
Py_INCREF(r);
return r;
}
} else {
PySequenceMethods *m = Py_TYPE(o)->tp_as_sequence;
if (likely(m && m->sq_item)) {
if (wraparound && unlikely(i < 0) && likely(m->sq_length)) {
Py_ssize_t l = m->sq_length(o);
if (likely(l >= 0)) {
i += l;
} else {
if (!PyErr_ExceptionMatches(PyExc_OverflowError))
return NULL;
PyErr_Clear();
}
}
return m->sq_item(o, i);
}
}
#else
if (is_list || PySequence_Check(o)) {
return PySequence_GetItem(o, i);
}
#endif
return __Pyx_GetItemInt_Generic(o, PyInt_FromSsize_t(i));
}
/* ObjectGetItem */
#if CYTHON_USE_TYPE_SLOTS
static PyObject *__Pyx_PyObject_GetIndex(PyObject *obj, PyObject* index) {
PyObject *runerr;
Py_ssize_t key_value;
PySequenceMethods *m = Py_TYPE(obj)->tp_as_sequence;
if (unlikely(!(m && m->sq_item))) {
PyErr_Format(PyExc_TypeError, "'%.200s' object is not subscriptable", Py_TYPE(obj)->tp_name);
return NULL;
}
key_value = __Pyx_PyIndex_AsSsize_t(index);
if (likely(key_value != -1 || !(runerr = PyErr_Occurred()))) {
return __Pyx_GetItemInt_Fast(obj, key_value, 0, 1, 1);
}
if (PyErr_GivenExceptionMatches(runerr, PyExc_OverflowError)) {
PyErr_Clear();
PyErr_Format(PyExc_IndexError, "cannot fit '%.200s' into an index-sized integer", Py_TYPE(index)->tp_name);
}
return NULL;
}
static PyObject *__Pyx_PyObject_GetItem(PyObject *obj, PyObject* key) {
PyMappingMethods *m = Py_TYPE(obj)->tp_as_mapping;
if (likely(m && m->mp_subscript)) {
return m->mp_subscript(obj, key);
}
return __Pyx_PyObject_GetIndex(obj, key);
}
#endif
/* GetModuleGlobalName */
static CYTHON_INLINE PyObject *__Pyx_GetModuleGlobalName(PyObject *name) {
PyObject *result;
#if !CYTHON_AVOID_BORROWED_REFS
#if CYTHON_COMPILING_IN_CPYTHON && PY_VERSION_HEX >= 0x030500A1
result = _PyDict_GetItem_KnownHash(__pyx_d, name, ((PyASCIIObject *) name)->hash);
if (likely(result)) {
Py_INCREF(result);
} else if (unlikely(PyErr_Occurred())) {
result = NULL;
} else {
#else
result = PyDict_GetItem(__pyx_d, name);
if (likely(result)) {
Py_INCREF(result);
} else {
#endif
#else
result = PyObject_GetItem(__pyx_d, name);
if (!result) {
PyErr_Clear();
#endif
result = __Pyx_GetBuiltinName(name);
}
return result;
}
/* PyObjectCall */
#if CYTHON_COMPILING_IN_CPYTHON
static CYTHON_INLINE PyObject* __Pyx_PyObject_Call(PyObject *func, PyObject *arg, PyObject *kw) {
PyObject *result;
ternaryfunc call = func->ob_type->tp_call;
if (unlikely(!call))
return PyObject_Call(func, arg, kw);
if (unlikely(Py_EnterRecursiveCall((char*)" while calling a Python object")))
return NULL;
result = (*call)(func, arg, kw);
Py_LeaveRecursiveCall();
if (unlikely(!result) && unlikely(!PyErr_Occurred())) {
PyErr_SetString(
PyExc_SystemError,
"NULL result without error in PyObject_Call");
}
return result;
}
#endif
/* PyIntBinop */
#if !CYTHON_COMPILING_IN_PYPY
static PyObject* __Pyx_PyInt_AddObjC(PyObject *op1, PyObject *op2, CYTHON_UNUSED long intval, CYTHON_UNUSED int inplace) {
#if PY_MAJOR_VERSION < 3
if (likely(PyInt_CheckExact(op1))) {
const long b = intval;
long x;
long a = PyInt_AS_LONG(op1);
x = (long)((unsigned long)a + b);
if (likely((x^a) >= 0 || (x^b) >= 0))
return PyInt_FromLong(x);
return PyLong_Type.tp_as_number->nb_add(op1, op2);
}
#endif
#if CYTHON_USE_PYLONG_INTERNALS
if (likely(PyLong_CheckExact(op1))) {
const long b = intval;
long a, x;
#ifdef HAVE_LONG_LONG
const PY_LONG_LONG llb = intval;
PY_LONG_LONG lla, llx;
#endif
const digit* digits = ((PyLongObject*)op1)->ob_digit;
const Py_ssize_t size = Py_SIZE(op1);
if (likely(__Pyx_sst_abs(size) <= 1)) {
a = likely(size) ? digits[0] : 0;
if (size == -1) a = -a;
} else {
switch (size) {
case -2:
if (8 * sizeof(long) - 1 > 2 * PyLong_SHIFT) {
a = -(long) (((((unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]));
break;
#ifdef HAVE_LONG_LONG
} else if (8 * sizeof(PY_LONG_LONG) - 1 > 2 * PyLong_SHIFT) {
lla = -(PY_LONG_LONG) (((((unsigned PY_LONG_LONG)digits[1]) << PyLong_SHIFT) | (unsigned PY_LONG_LONG)digits[0]));
goto long_long;
#endif
}
CYTHON_FALLTHROUGH;
case 2:
if (8 * sizeof(long) - 1 > 2 * PyLong_SHIFT) {
a = (long) (((((unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]));
break;
#ifdef HAVE_LONG_LONG
} else if (8 * sizeof(PY_LONG_LONG) - 1 > 2 * PyLong_SHIFT) {
lla = (PY_LONG_LONG) (((((unsigned PY_LONG_LONG)digits[1]) << PyLong_SHIFT) | (unsigned PY_LONG_LONG)digits[0]));
goto long_long;
#endif
}
CYTHON_FALLTHROUGH;
case -3:
if (8 * sizeof(long) - 1 > 3 * PyLong_SHIFT) {
a = -(long) (((((((unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]));
break;
#ifdef HAVE_LONG_LONG
} else if (8 * sizeof(PY_LONG_LONG) - 1 > 3 * PyLong_SHIFT) {
lla = -(PY_LONG_LONG) (((((((unsigned PY_LONG_LONG)digits[2]) << PyLong_SHIFT) | (unsigned PY_LONG_LONG)digits[1]) << PyLong_SHIFT) | (unsigned PY_LONG_LONG)digits[0]));
goto long_long;
#endif
}
CYTHON_FALLTHROUGH;
case 3:
if (8 * sizeof(long) - 1 > 3 * PyLong_SHIFT) {
a = (long) (((((((unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]));
break;
#ifdef HAVE_LONG_LONG
} else if (8 * sizeof(PY_LONG_LONG) - 1 > 3 * PyLong_SHIFT) {
lla = (PY_LONG_LONG) (((((((unsigned PY_LONG_LONG)digits[2]) << PyLong_SHIFT) | (unsigned PY_LONG_LONG)digits[1]) << PyLong_SHIFT) | (unsigned PY_LONG_LONG)digits[0]));
goto long_long;
#endif
}
CYTHON_FALLTHROUGH;
case -4:
if (8 * sizeof(long) - 1 > 4 * PyLong_SHIFT) {
a = -(long) (((((((((unsigned long)digits[3]) << PyLong_SHIFT) | (unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]));
break;
#ifdef HAVE_LONG_LONG
} else if (8 * sizeof(PY_LONG_LONG) - 1 > 4 * PyLong_SHIFT) {
lla = -(PY_LONG_LONG) (((((((((unsigned PY_LONG_LONG)digits[3]) << PyLong_SHIFT) | (unsigned PY_LONG_LONG)digits[2]) << PyLong_SHIFT) | (unsigned PY_LONG_LONG)digits[1]) << PyLong_SHIFT) | (unsigned PY_LONG_LONG)digits[0]));
goto long_long;
#endif
}
CYTHON_FALLTHROUGH;
case 4:
if (8 * sizeof(long) - 1 > 4 * PyLong_SHIFT) {
a = (long) (((((((((unsigned long)digits[3]) << PyLong_SHIFT) | (unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]));
break;
#ifdef HAVE_LONG_LONG
} else if (8 * sizeof(PY_LONG_LONG) - 1 > 4 * PyLong_SHIFT) {
lla = (PY_LONG_LONG) (((((((((unsigned PY_LONG_LONG)digits[3]) << PyLong_SHIFT) | (unsigned PY_LONG_LONG)digits[2]) << PyLong_SHIFT) | (unsigned PY_LONG_LONG)digits[1]) << PyLong_SHIFT) | (unsigned PY_LONG_LONG)digits[0]));
goto long_long;
#endif
}
CYTHON_FALLTHROUGH;
default: return PyLong_Type.tp_as_number->nb_add(op1, op2);
}
}
x = a + b;
return PyLong_FromLong(x);
#ifdef HAVE_LONG_LONG
long_long:
llx = lla + llb;
return PyLong_FromLongLong(llx);
#endif
}
#endif
if (PyFloat_CheckExact(op1)) {
const long b = intval;
double a = PyFloat_AS_DOUBLE(op1);
double result;
PyFPE_START_PROTECT("add", return NULL)
result = ((double)a) + (double)b;
PyFPE_END_PROTECT(result)
return PyFloat_FromDouble(result);
}
return (inplace ? PyNumber_InPlaceAdd : PyNumber_Add)(op1, op2);
}
#endif
/* MemviewSliceInit */
static int
__Pyx_init_memviewslice(struct __pyx_memoryview_obj *memview,
int ndim,
__Pyx_memviewslice *memviewslice,
int memview_is_new_reference)
{
__Pyx_RefNannyDeclarations
int i, retval=-1;
Py_buffer *buf = &memview->view;
__Pyx_RefNannySetupContext("init_memviewslice", 0);
if (!buf) {
PyErr_SetString(PyExc_ValueError,
"buf is NULL.");
goto fail;
} else if (memviewslice->memview || memviewslice->data) {
PyErr_SetString(PyExc_ValueError,
"memviewslice is already initialized!");
goto fail;
}
if (buf->strides) {
for (i = 0; i < ndim; i++) {
memviewslice->strides[i] = buf->strides[i];
}
} else {
Py_ssize_t stride = buf->itemsize;
for (i = ndim - 1; i >= 0; i--) {
memviewslice->strides[i] = stride;
stride *= buf->shape[i];
}
}
for (i = 0; i < ndim; i++) {
memviewslice->shape[i] = buf->shape[i];
if (buf->suboffsets) {
memviewslice->suboffsets[i] = buf->suboffsets[i];
} else {
memviewslice->suboffsets[i] = -1;
}
}
memviewslice->memview = memview;
memviewslice->data = (char *)buf->buf;
if (__pyx_add_acquisition_count(memview) == 0 && !memview_is_new_reference) {
Py_INCREF(memview);
}
retval = 0;
goto no_fail;
fail:
memviewslice->memview = 0;
memviewslice->data = 0;
retval = -1;
no_fail:
__Pyx_RefNannyFinishContext();
return retval;
}
#ifndef Py_NO_RETURN
#define Py_NO_RETURN
#endif
static void __pyx_fatalerror(const char *fmt, ...) Py_NO_RETURN {
va_list vargs;
char msg[200];
#ifdef HAVE_STDARG_PROTOTYPES
va_start(vargs, fmt);
#else
va_start(vargs);
#endif
vsnprintf(msg, 200, fmt, vargs);
va_end(vargs);
Py_FatalError(msg);
}
static CYTHON_INLINE int
__pyx_add_acquisition_count_locked(__pyx_atomic_int *acquisition_count,
PyThread_type_lock lock)
{
int result;
PyThread_acquire_lock(lock, 1);
result = (*acquisition_count)++;
PyThread_release_lock(lock);
return result;
}
static CYTHON_INLINE int
__pyx_sub_acquisition_count_locked(__pyx_atomic_int *acquisition_count,
PyThread_type_lock lock)
{
int result;
PyThread_acquire_lock(lock, 1);
result = (*acquisition_count)--;
PyThread_release_lock(lock);
return result;
}
static CYTHON_INLINE void
__Pyx_INC_MEMVIEW(__Pyx_memviewslice *memslice, int have_gil, int lineno)
{
int first_time;
struct __pyx_memoryview_obj *memview = memslice->memview;
if (!memview || (PyObject *) memview == Py_None)
return;
if (__pyx_get_slice_count(memview) < 0)
__pyx_fatalerror("Acquisition count is %d (line %d)",
__pyx_get_slice_count(memview), lineno);
first_time = __pyx_add_acquisition_count(memview) == 0;
if (first_time) {
if (have_gil) {
Py_INCREF((PyObject *) memview);
} else {
PyGILState_STATE _gilstate = PyGILState_Ensure();
Py_INCREF((PyObject *) memview);
PyGILState_Release(_gilstate);
}
}
}
static CYTHON_INLINE void __Pyx_XDEC_MEMVIEW(__Pyx_memviewslice *memslice,
int have_gil, int lineno) {
int last_time;
struct __pyx_memoryview_obj *memview = memslice->memview;
if (!memview ) {
return;
} else if ((PyObject *) memview == Py_None) {
memslice->memview = NULL;
return;
}
if (__pyx_get_slice_count(memview) <= 0)
__pyx_fatalerror("Acquisition count is %d (line %d)",
__pyx_get_slice_count(memview), lineno);
last_time = __pyx_sub_acquisition_count(memview) == 1;
memslice->data = NULL;
if (last_time) {
if (have_gil) {
Py_CLEAR(memslice->memview);
} else {
PyGILState_STATE _gilstate = PyGILState_Ensure();
Py_CLEAR(memslice->memview);
PyGILState_Release(_gilstate);
}
} else {
memslice->memview = NULL;
}
}
/* PyFunctionFastCall */
#if CYTHON_FAST_PYCALL
#include "frameobject.h"
static PyObject* __Pyx_PyFunction_FastCallNoKw(PyCodeObject *co, PyObject **args, Py_ssize_t na,
PyObject *globals) {
PyFrameObject *f;
PyThreadState *tstate = __Pyx_PyThreadState_Current;
PyObject **fastlocals;
Py_ssize_t i;
PyObject *result;
assert(globals != NULL);
/* XXX Perhaps we should create a specialized
PyFrame_New() that doesn't take locals, but does
take builtins without sanity checking them.
*/
assert(tstate != NULL);
f = PyFrame_New(tstate, co, globals, NULL);
if (f == NULL) {
return NULL;
}
fastlocals = f->f_localsplus;
for (i = 0; i < na; i++) {
Py_INCREF(*args);
fastlocals[i] = *args++;
}
result = PyEval_EvalFrameEx(f,0);
++tstate->recursion_depth;
Py_DECREF(f);
--tstate->recursion_depth;
return result;
}
#if 1 || PY_VERSION_HEX < 0x030600B1
static PyObject *__Pyx_PyFunction_FastCallDict(PyObject *func, PyObject **args, int nargs, PyObject *kwargs) {
PyCodeObject *co = (PyCodeObject *)PyFunction_GET_CODE(func);
PyObject *globals = PyFunction_GET_GLOBALS(func);
PyObject *argdefs = PyFunction_GET_DEFAULTS(func);
PyObject *closure;
#if PY_MAJOR_VERSION >= 3
PyObject *kwdefs;
#endif
PyObject *kwtuple, **k;
PyObject **d;
Py_ssize_t nd;
Py_ssize_t nk;
PyObject *result;
assert(kwargs == NULL || PyDict_Check(kwargs));
nk = kwargs ? PyDict_Size(kwargs) : 0;
if (Py_EnterRecursiveCall((char*)" while calling a Python object")) {
return NULL;
}
if (
#if PY_MAJOR_VERSION >= 3
co->co_kwonlyargcount == 0 &&
#endif
likely(kwargs == NULL || nk == 0) &&
co->co_flags == (CO_OPTIMIZED | CO_NEWLOCALS | CO_NOFREE)) {
if (argdefs == NULL && co->co_argcount == nargs) {
result = __Pyx_PyFunction_FastCallNoKw(co, args, nargs, globals);
goto done;
}
else if (nargs == 0 && argdefs != NULL
&& co->co_argcount == Py_SIZE(argdefs)) {
/* function called with no arguments, but all parameters have
a default value: use default values as arguments .*/
args = &PyTuple_GET_ITEM(argdefs, 0);
result =__Pyx_PyFunction_FastCallNoKw(co, args, Py_SIZE(argdefs), globals);
goto done;
}
}
if (kwargs != NULL) {
Py_ssize_t pos, i;
kwtuple = PyTuple_New(2 * nk);
if (kwtuple == NULL) {
result = NULL;
goto done;
}
k = &PyTuple_GET_ITEM(kwtuple, 0);
pos = i = 0;
while (PyDict_Next(kwargs, &pos, &k[i], &k[i+1])) {
Py_INCREF(k[i]);
Py_INCREF(k[i+1]);
i += 2;
}
nk = i / 2;
}
else {
kwtuple = NULL;
k = NULL;
}
closure = PyFunction_GET_CLOSURE(func);
#if PY_MAJOR_VERSION >= 3
kwdefs = PyFunction_GET_KW_DEFAULTS(func);
#endif
if (argdefs != NULL) {
d = &PyTuple_GET_ITEM(argdefs, 0);
nd = Py_SIZE(argdefs);
}
else {
d = NULL;
nd = 0;
}
#if PY_MAJOR_VERSION >= 3
result = PyEval_EvalCodeEx((PyObject*)co, globals, (PyObject *)NULL,
args, nargs,
k, (int)nk,
d, (int)nd, kwdefs, closure);
#else
result = PyEval_EvalCodeEx(co, globals, (PyObject *)NULL,
args, nargs,
k, (int)nk,
d, (int)nd, closure);
#endif
Py_XDECREF(kwtuple);
done:
Py_LeaveRecursiveCall();
return result;
}
#endif
#endif
/* PyCFunctionFastCall */
#if CYTHON_FAST_PYCCALL
static CYTHON_INLINE PyObject * __Pyx_PyCFunction_FastCall(PyObject *func_obj, PyObject **args, Py_ssize_t nargs) {
PyCFunctionObject *func = (PyCFunctionObject*)func_obj;
PyCFunction meth = PyCFunction_GET_FUNCTION(func);
PyObject *self = PyCFunction_GET_SELF(func);
int flags = PyCFunction_GET_FLAGS(func);
assert(PyCFunction_Check(func));
assert(METH_FASTCALL == (flags & ~(METH_CLASS | METH_STATIC | METH_COEXIST | METH_KEYWORDS)));
assert(nargs >= 0);
assert(nargs == 0 || args != NULL);
/* _PyCFunction_FastCallDict() must not be called with an exception set,
because it may clear it (directly or indirectly) and so the
caller loses its exception */
assert(!PyErr_Occurred());
if ((PY_VERSION_HEX < 0x030700A0) || unlikely(flags & METH_KEYWORDS)) {
return (*((__Pyx_PyCFunctionFastWithKeywords)meth)) (self, args, nargs, NULL);
} else {
return (*((__Pyx_PyCFunctionFast)meth)) (self, args, nargs);
}
}
#endif
/* RaiseArgTupleInvalid */
static void __Pyx_RaiseArgtupleInvalid(
const char* func_name,
int exact,
Py_ssize_t num_min,
Py_ssize_t num_max,
Py_ssize_t num_found)
{
Py_ssize_t num_expected;
const char *more_or_less;
if (num_found < num_min) {
num_expected = num_min;
more_or_less = "at least";
} else {
num_expected = num_max;
more_or_less = "at most";
}
if (exact) {
more_or_less = "exactly";
}
PyErr_Format(PyExc_TypeError,
"%.200s() takes %.8s %" CYTHON_FORMAT_SSIZE_T "d positional argument%.1s (%" CYTHON_FORMAT_SSIZE_T "d given)",
func_name, more_or_less, num_expected,
(num_expected == 1) ? "" : "s", num_found);
}
/* RaiseDoubleKeywords */
static void __Pyx_RaiseDoubleKeywordsError(
const char* func_name,
PyObject* kw_name)
{
PyErr_Format(PyExc_TypeError,
#if PY_MAJOR_VERSION >= 3
"%s() got multiple values for keyword argument '%U'", func_name, kw_name);
#else
"%s() got multiple values for keyword argument '%s'", func_name,
PyString_AsString(kw_name));
#endif
}
/* ParseKeywords */
static int __Pyx_ParseOptionalKeywords(
PyObject *kwds,
PyObject **argnames[],
PyObject *kwds2,
PyObject *values[],
Py_ssize_t num_pos_args,
const char* function_name)
{
PyObject *key = 0, *value = 0;
Py_ssize_t pos = 0;
PyObject*** name;
PyObject*** first_kw_arg = argnames + num_pos_args;
while (PyDict_Next(kwds, &pos, &key, &value)) {
name = first_kw_arg;
while (*name && (**name != key)) name++;
if (*name) {
values[name-argnames] = value;
continue;
}
name = first_kw_arg;
#if PY_MAJOR_VERSION < 3
if (likely(PyString_CheckExact(key)) || likely(PyString_Check(key))) {
while (*name) {
if ((CYTHON_COMPILING_IN_PYPY || PyString_GET_SIZE(**name) == PyString_GET_SIZE(key))
&& _PyString_Eq(**name, key)) {
values[name-argnames] = value;
break;
}
name++;
}
if (*name) continue;
else {
PyObject*** argname = argnames;
while (argname != first_kw_arg) {
if ((**argname == key) || (
(CYTHON_COMPILING_IN_PYPY || PyString_GET_SIZE(**argname) == PyString_GET_SIZE(key))
&& _PyString_Eq(**argname, key))) {
goto arg_passed_twice;
}
argname++;
}
}
} else
#endif
if (likely(PyUnicode_Check(key))) {
while (*name) {
int cmp = (**name == key) ? 0 :
#if !CYTHON_COMPILING_IN_PYPY && PY_MAJOR_VERSION >= 3
(PyUnicode_GET_SIZE(**name) != PyUnicode_GET_SIZE(key)) ? 1 :
#endif
PyUnicode_Compare(**name, key);
if (cmp < 0 && unlikely(PyErr_Occurred())) goto bad;
if (cmp == 0) {
values[name-argnames] = value;
break;
}
name++;
}
if (*name) continue;
else {
PyObject*** argname = argnames;
while (argname != first_kw_arg) {
int cmp = (**argname == key) ? 0 :
#if !CYTHON_COMPILING_IN_PYPY && PY_MAJOR_VERSION >= 3
(PyUnicode_GET_SIZE(**argname) != PyUnicode_GET_SIZE(key)) ? 1 :
#endif
PyUnicode_Compare(**argname, key);
if (cmp < 0 && unlikely(PyErr_Occurred())) goto bad;
if (cmp == 0) goto arg_passed_twice;
argname++;
}
}
} else
goto invalid_keyword_type;
if (kwds2) {
if (unlikely(PyDict_SetItem(kwds2, key, value))) goto bad;
} else {
goto invalid_keyword;
}
}
return 0;
arg_passed_twice:
__Pyx_RaiseDoubleKeywordsError(function_name, key);
goto bad;
invalid_keyword_type:
PyErr_Format(PyExc_TypeError,
"%.200s() keywords must be strings", function_name);
goto bad;
invalid_keyword:
PyErr_Format(PyExc_TypeError,
#if PY_MAJOR_VERSION < 3
"%.200s() got an unexpected keyword argument '%.200s'",
function_name, PyString_AsString(key));
#else
"%s() got an unexpected keyword argument '%U'",
function_name, key);
#endif
bad:
return -1;
}
/* ArgTypeTest */
static int __Pyx__ArgTypeTest(PyObject *obj, PyTypeObject *type, const char *name, int exact)
{
if (unlikely(!type)) {
PyErr_SetString(PyExc_SystemError, "Missing type object");
return 0;
}
else if (exact) {
#if PY_MAJOR_VERSION == 2
if ((type == &PyBaseString_Type) && likely(__Pyx_PyBaseString_CheckExact(obj))) return 1;
#endif
}
else {
if (likely(__Pyx_TypeCheck(obj, type))) return 1;
}
PyErr_Format(PyExc_TypeError,
"Argument '%.200s' has incorrect type (expected %.200s, got %.200s)",
name, type->tp_name, Py_TYPE(obj)->tp_name);
return 0;
}
/* RaiseException */
#if PY_MAJOR_VERSION < 3
static void __Pyx_Raise(PyObject *type, PyObject *value, PyObject *tb,
CYTHON_UNUSED PyObject *cause) {
__Pyx_PyThreadState_declare
Py_XINCREF(type);
if (!value || value == Py_None)
value = NULL;
else
Py_INCREF(value);
if (!tb || tb == Py_None)
tb = NULL;
else {
Py_INCREF(tb);
if (!PyTraceBack_Check(tb)) {
PyErr_SetString(PyExc_TypeError,
"raise: arg 3 must be a traceback or None");
goto raise_error;
}
}
if (PyType_Check(type)) {
#if CYTHON_COMPILING_IN_PYPY
if (!value) {
Py_INCREF(Py_None);
value = Py_None;
}
#endif
PyErr_NormalizeException(&type, &value, &tb);
} else {
if (value) {
PyErr_SetString(PyExc_TypeError,
"instance exception may not have a separate value");
goto raise_error;
}
value = type;
type = (PyObject*) Py_TYPE(type);
Py_INCREF(type);
if (!PyType_IsSubtype((PyTypeObject *)type, (PyTypeObject *)PyExc_BaseException)) {
PyErr_SetString(PyExc_TypeError,
"raise: exception class must be a subclass of BaseException");
goto raise_error;
}
}
__Pyx_PyThreadState_assign
__Pyx_ErrRestore(type, value, tb);
return;
raise_error:
Py_XDECREF(value);
Py_XDECREF(type);
Py_XDECREF(tb);
return;
}
#else
static void __Pyx_Raise(PyObject *type, PyObject *value, PyObject *tb, PyObject *cause) {
PyObject* owned_instance = NULL;
if (tb == Py_None) {
tb = 0;
} else if (tb && !PyTraceBack_Check(tb)) {
PyErr_SetString(PyExc_TypeError,
"raise: arg 3 must be a traceback or None");
goto bad;
}
if (value == Py_None)
value = 0;
if (PyExceptionInstance_Check(type)) {
if (value) {
PyErr_SetString(PyExc_TypeError,
"instance exception may not have a separate value");
goto bad;
}
value = type;
type = (PyObject*) Py_TYPE(value);
} else if (PyExceptionClass_Check(type)) {
PyObject *instance_class = NULL;
if (value && PyExceptionInstance_Check(value)) {
instance_class = (PyObject*) Py_TYPE(value);
if (instance_class != type) {
int is_subclass = PyObject_IsSubclass(instance_class, type);
if (!is_subclass) {
instance_class = NULL;
} else if (unlikely(is_subclass == -1)) {
goto bad;
} else {
type = instance_class;
}
}
}
if (!instance_class) {
PyObject *args;
if (!value)
args = PyTuple_New(0);
else if (PyTuple_Check(value)) {
Py_INCREF(value);
args = value;
} else
args = PyTuple_Pack(1, value);
if (!args)
goto bad;
owned_instance = PyObject_Call(type, args, NULL);
Py_DECREF(args);
if (!owned_instance)
goto bad;
value = owned_instance;
if (!PyExceptionInstance_Check(value)) {
PyErr_Format(PyExc_TypeError,
"calling %R should have returned an instance of "
"BaseException, not %R",
type, Py_TYPE(value));
goto bad;
}
}
} else {
PyErr_SetString(PyExc_TypeError,
"raise: exception class must be a subclass of BaseException");
goto bad;
}
if (cause) {
PyObject *fixed_cause;
if (cause == Py_None) {
fixed_cause = NULL;
} else if (PyExceptionClass_Check(cause)) {
fixed_cause = PyObject_CallObject(cause, NULL);
if (fixed_cause == NULL)
goto bad;
} else if (PyExceptionInstance_Check(cause)) {
fixed_cause = cause;
Py_INCREF(fixed_cause);
} else {
PyErr_SetString(PyExc_TypeError,
"exception causes must derive from "
"BaseException");
goto bad;
}
PyException_SetCause(value, fixed_cause);
}
PyErr_SetObject(type, value);
if (tb) {
#if CYTHON_COMPILING_IN_PYPY
PyObject *tmp_type, *tmp_value, *tmp_tb;
PyErr_Fetch(&tmp_type, &tmp_value, &tmp_tb);
Py_INCREF(tb);
PyErr_Restore(tmp_type, tmp_value, tb);
Py_XDECREF(tmp_tb);
#else
PyThreadState *tstate = __Pyx_PyThreadState_Current;
PyObject* tmp_tb = tstate->curexc_traceback;
if (tb != tmp_tb) {
Py_INCREF(tb);
tstate->curexc_traceback = tb;
Py_XDECREF(tmp_tb);
}
#endif
}
bad:
Py_XDECREF(owned_instance);
return;
}
#endif
/* PyObjectCallMethO */
#if CYTHON_COMPILING_IN_CPYTHON
static CYTHON_INLINE PyObject* __Pyx_PyObject_CallMethO(PyObject *func, PyObject *arg) {
PyObject *self, *result;
PyCFunction cfunc;
cfunc = PyCFunction_GET_FUNCTION(func);
self = PyCFunction_GET_SELF(func);
if (unlikely(Py_EnterRecursiveCall((char*)" while calling a Python object")))
return NULL;
result = cfunc(self, arg);
Py_LeaveRecursiveCall();
if (unlikely(!result) && unlikely(!PyErr_Occurred())) {
PyErr_SetString(
PyExc_SystemError,
"NULL result without error in PyObject_Call");
}
return result;
}
#endif
/* PyObjectCallOneArg */
#if CYTHON_COMPILING_IN_CPYTHON
static PyObject* __Pyx__PyObject_CallOneArg(PyObject *func, PyObject *arg) {
PyObject *result;
PyObject *args = PyTuple_New(1);
if (unlikely(!args)) return NULL;
Py_INCREF(arg);
PyTuple_SET_ITEM(args, 0, arg);
result = __Pyx_PyObject_Call(func, args, NULL);
Py_DECREF(args);
return result;
}
static CYTHON_INLINE PyObject* __Pyx_PyObject_CallOneArg(PyObject *func, PyObject *arg) {
#if CYTHON_FAST_PYCALL
if (PyFunction_Check(func)) {
return __Pyx_PyFunction_FastCall(func, &arg, 1);
}
#endif
if (likely(PyCFunction_Check(func))) {
if (likely(PyCFunction_GET_FLAGS(func) & METH_O)) {
return __Pyx_PyObject_CallMethO(func, arg);
#if CYTHON_FAST_PYCCALL
} else if (PyCFunction_GET_FLAGS(func) & METH_FASTCALL) {
return __Pyx_PyCFunction_FastCall(func, &arg, 1);
#endif
}
}
return __Pyx__PyObject_CallOneArg(func, arg);
}
#else
static CYTHON_INLINE PyObject* __Pyx_PyObject_CallOneArg(PyObject *func, PyObject *arg) {
PyObject *result;
PyObject *args = PyTuple_Pack(1, arg);
if (unlikely(!args)) return NULL;
result = __Pyx_PyObject_Call(func, args, NULL);
Py_DECREF(args);
return result;
}
#endif
/* BytesEquals */
static CYTHON_INLINE int __Pyx_PyBytes_Equals(PyObject* s1, PyObject* s2, int equals) {
#if CYTHON_COMPILING_IN_PYPY
return PyObject_RichCompareBool(s1, s2, equals);
#else
if (s1 == s2) {
return (equals == Py_EQ);
} else if (PyBytes_CheckExact(s1) & PyBytes_CheckExact(s2)) {
const char *ps1, *ps2;
Py_ssize_t length = PyBytes_GET_SIZE(s1);
if (length != PyBytes_GET_SIZE(s2))
return (equals == Py_NE);
ps1 = PyBytes_AS_STRING(s1);
ps2 = PyBytes_AS_STRING(s2);
if (ps1[0] != ps2[0]) {
return (equals == Py_NE);
} else if (length == 1) {
return (equals == Py_EQ);
} else {
int result;
#if CYTHON_USE_UNICODE_INTERNALS
Py_hash_t hash1, hash2;
hash1 = ((PyBytesObject*)s1)->ob_shash;
hash2 = ((PyBytesObject*)s2)->ob_shash;
if (hash1 != hash2 && hash1 != -1 && hash2 != -1) {
return (equals == Py_NE);
}
#endif
result = memcmp(ps1, ps2, (size_t)length);
return (equals == Py_EQ) ? (result == 0) : (result != 0);
}
} else if ((s1 == Py_None) & PyBytes_CheckExact(s2)) {
return (equals == Py_NE);
} else if ((s2 == Py_None) & PyBytes_CheckExact(s1)) {
return (equals == Py_NE);
} else {
int result;
PyObject* py_result = PyObject_RichCompare(s1, s2, equals);
if (!py_result)
return -1;
result = __Pyx_PyObject_IsTrue(py_result);
Py_DECREF(py_result);
return result;
}
#endif
}
/* UnicodeEquals */
static CYTHON_INLINE int __Pyx_PyUnicode_Equals(PyObject* s1, PyObject* s2, int equals) {
#if CYTHON_COMPILING_IN_PYPY
return PyObject_RichCompareBool(s1, s2, equals);
#else
#if PY_MAJOR_VERSION < 3
PyObject* owned_ref = NULL;
#endif
int s1_is_unicode, s2_is_unicode;
if (s1 == s2) {
goto return_eq;
}
s1_is_unicode = PyUnicode_CheckExact(s1);
s2_is_unicode = PyUnicode_CheckExact(s2);
#if PY_MAJOR_VERSION < 3
if ((s1_is_unicode & (!s2_is_unicode)) && PyString_CheckExact(s2)) {
owned_ref = PyUnicode_FromObject(s2);
if (unlikely(!owned_ref))
return -1;
s2 = owned_ref;
s2_is_unicode = 1;
} else if ((s2_is_unicode & (!s1_is_unicode)) && PyString_CheckExact(s1)) {
owned_ref = PyUnicode_FromObject(s1);
if (unlikely(!owned_ref))
return -1;
s1 = owned_ref;
s1_is_unicode = 1;
} else if (((!s2_is_unicode) & (!s1_is_unicode))) {
return __Pyx_PyBytes_Equals(s1, s2, equals);
}
#endif
if (s1_is_unicode & s2_is_unicode) {
Py_ssize_t length;
int kind;
void *data1, *data2;
if (unlikely(__Pyx_PyUnicode_READY(s1) < 0) || unlikely(__Pyx_PyUnicode_READY(s2) < 0))
return -1;
length = __Pyx_PyUnicode_GET_LENGTH(s1);
if (length != __Pyx_PyUnicode_GET_LENGTH(s2)) {
goto return_ne;
}
#if CYTHON_USE_UNICODE_INTERNALS
{
Py_hash_t hash1, hash2;
#if CYTHON_PEP393_ENABLED
hash1 = ((PyASCIIObject*)s1)->hash;
hash2 = ((PyASCIIObject*)s2)->hash;
#else
hash1 = ((PyUnicodeObject*)s1)->hash;
hash2 = ((PyUnicodeObject*)s2)->hash;
#endif
if (hash1 != hash2 && hash1 != -1 && hash2 != -1) {
goto return_ne;
}
}
#endif
kind = __Pyx_PyUnicode_KIND(s1);
if (kind != __Pyx_PyUnicode_KIND(s2)) {
goto return_ne;
}
data1 = __Pyx_PyUnicode_DATA(s1);
data2 = __Pyx_PyUnicode_DATA(s2);
if (__Pyx_PyUnicode_READ(kind, data1, 0) != __Pyx_PyUnicode_READ(kind, data2, 0)) {
goto return_ne;
} else if (length == 1) {
goto return_eq;
} else {
int result = memcmp(data1, data2, (size_t)(length * kind));
#if PY_MAJOR_VERSION < 3
Py_XDECREF(owned_ref);
#endif
return (equals == Py_EQ) ? (result == 0) : (result != 0);
}
} else if ((s1 == Py_None) & s2_is_unicode) {
goto return_ne;
} else if ((s2 == Py_None) & s1_is_unicode) {
goto return_ne;
} else {
int result;
PyObject* py_result = PyObject_RichCompare(s1, s2, equals);
#if PY_MAJOR_VERSION < 3
Py_XDECREF(owned_ref);
#endif
if (!py_result)
return -1;
result = __Pyx_PyObject_IsTrue(py_result);
Py_DECREF(py_result);
return result;
}
return_eq:
#if PY_MAJOR_VERSION < 3
Py_XDECREF(owned_ref);
#endif
return (equals == Py_EQ);
return_ne:
#if PY_MAJOR_VERSION < 3
Py_XDECREF(owned_ref);
#endif
return (equals == Py_NE);
#endif
}
/* None */
static CYTHON_INLINE Py_ssize_t __Pyx_div_Py_ssize_t(Py_ssize_t a, Py_ssize_t b) {
Py_ssize_t q = a / b;
Py_ssize_t r = a - q*b;
q -= ((r != 0) & ((r ^ b) < 0));
return q;
}
/* GetAttr */
static CYTHON_INLINE PyObject *__Pyx_GetAttr(PyObject *o, PyObject *n) {
#if CYTHON_USE_TYPE_SLOTS
#if PY_MAJOR_VERSION >= 3
if (likely(PyUnicode_Check(n)))
#else
if (likely(PyString_Check(n)))
#endif
return __Pyx_PyObject_GetAttrStr(o, n);
#endif
return PyObject_GetAttr(o, n);
}
/* decode_c_string */
static CYTHON_INLINE PyObject* __Pyx_decode_c_string(
const char* cstring, Py_ssize_t start, Py_ssize_t stop,
const char* encoding, const char* errors,
PyObject* (*decode_func)(const char *s, Py_ssize_t size, const char *errors)) {
Py_ssize_t length;
if (unlikely((start < 0) | (stop < 0))) {
size_t slen = strlen(cstring);
if (unlikely(slen > (size_t) PY_SSIZE_T_MAX)) {
PyErr_SetString(PyExc_OverflowError,
"c-string too long to convert to Python");
return NULL;
}
length = (Py_ssize_t) slen;
if (start < 0) {
start += length;
if (start < 0)
start = 0;
}
if (stop < 0)
stop += length;
}
length = stop - start;
if (unlikely(length <= 0))
return PyUnicode_FromUnicode(NULL, 0);
cstring += start;
if (decode_func) {
return decode_func(cstring, length, errors);
} else {
return PyUnicode_Decode(cstring, length, encoding, errors);
}
}
/* PyErrExceptionMatches */
#if CYTHON_FAST_THREAD_STATE
static int __Pyx_PyErr_ExceptionMatchesTuple(PyObject *exc_type, PyObject *tuple) {
Py_ssize_t i, n;
n = PyTuple_GET_SIZE(tuple);
#if PY_MAJOR_VERSION >= 3
for (i=0; i<n; i++) {
if (exc_type == PyTuple_GET_ITEM(tuple, i)) return 1;
}
#endif
for (i=0; i<n; i++) {
if (__Pyx_PyErr_GivenExceptionMatches(exc_type, PyTuple_GET_ITEM(tuple, i))) return 1;
}
return 0;
}
static CYTHON_INLINE int __Pyx_PyErr_ExceptionMatchesInState(PyThreadState* tstate, PyObject* err) {
PyObject *exc_type = tstate->curexc_type;
if (exc_type == err) return 1;
if (unlikely(!exc_type)) return 0;
if (unlikely(PyTuple_Check(err)))
return __Pyx_PyErr_ExceptionMatchesTuple(exc_type, err);
return __Pyx_PyErr_GivenExceptionMatches(exc_type, err);
}
#endif
/* GetAttr3 */
static PyObject *__Pyx_GetAttr3Default(PyObject *d) {
__Pyx_PyThreadState_declare
__Pyx_PyThreadState_assign
if (unlikely(!__Pyx_PyErr_ExceptionMatches(PyExc_AttributeError)))
return NULL;
__Pyx_PyErr_Clear();
Py_INCREF(d);
return d;
}
static CYTHON_INLINE PyObject *__Pyx_GetAttr3(PyObject *o, PyObject *n, PyObject *d) {
PyObject *r = __Pyx_GetAttr(o, n);
return (likely(r)) ? r : __Pyx_GetAttr3Default(d);
}
/* RaiseTooManyValuesToUnpack */
static CYTHON_INLINE void __Pyx_RaiseTooManyValuesError(Py_ssize_t expected) {
PyErr_Format(PyExc_ValueError,
"too many values to unpack (expected %" CYTHON_FORMAT_SSIZE_T "d)", expected);
}
/* RaiseNeedMoreValuesToUnpack */
static CYTHON_INLINE void __Pyx_RaiseNeedMoreValuesError(Py_ssize_t index) {
PyErr_Format(PyExc_ValueError,
"need more than %" CYTHON_FORMAT_SSIZE_T "d value%.1s to unpack",
index, (index == 1) ? "" : "s");
}
/* RaiseNoneIterError */
static CYTHON_INLINE void __Pyx_RaiseNoneNotIterableError(void) {
PyErr_SetString(PyExc_TypeError, "'NoneType' object is not iterable");
}
/* ExtTypeTest */
static CYTHON_INLINE int __Pyx_TypeTest(PyObject *obj, PyTypeObject *type) {
if (unlikely(!type)) {
PyErr_SetString(PyExc_SystemError, "Missing type object");
return 0;
}
if (likely(__Pyx_TypeCheck(obj, type)))
return 1;
PyErr_Format(PyExc_TypeError, "Cannot convert %.200s to %.200s",
Py_TYPE(obj)->tp_name, type->tp_name);
return 0;
}
/* SaveResetException */
#if CYTHON_FAST_THREAD_STATE
static CYTHON_INLINE void __Pyx__ExceptionSave(PyThreadState *tstate, PyObject **type, PyObject **value, PyObject **tb) {
#if PY_VERSION_HEX >= 0x030700A3
*type = tstate->exc_state.exc_type;
*value = tstate->exc_state.exc_value;
*tb = tstate->exc_state.exc_traceback;
#else
*type = tstate->exc_type;
*value = tstate->exc_value;
*tb = tstate->exc_traceback;
#endif
Py_XINCREF(*type);
Py_XINCREF(*value);
Py_XINCREF(*tb);
}
static CYTHON_INLINE void __Pyx__ExceptionReset(PyThreadState *tstate, PyObject *type, PyObject *value, PyObject *tb) {
PyObject *tmp_type, *tmp_value, *tmp_tb;
#if PY_VERSION_HEX >= 0x030700A3
tmp_type = tstate->exc_state.exc_type;
tmp_value = tstate->exc_state.exc_value;
tmp_tb = tstate->exc_state.exc_traceback;
tstate->exc_state.exc_type = type;
tstate->exc_state.exc_value = value;
tstate->exc_state.exc_traceback = tb;
#else
tmp_type = tstate->exc_type;
tmp_value = tstate->exc_value;
tmp_tb = tstate->exc_traceback;
tstate->exc_type = type;
tstate->exc_value = value;
tstate->exc_traceback = tb;
#endif
Py_XDECREF(tmp_type);
Py_XDECREF(tmp_value);
Py_XDECREF(tmp_tb);
}
#endif
/* GetException */
#if CYTHON_FAST_THREAD_STATE
static int __Pyx__GetException(PyThreadState *tstate, PyObject **type, PyObject **value, PyObject **tb) {
#else
static int __Pyx_GetException(PyObject **type, PyObject **value, PyObject **tb) {
#endif
PyObject *local_type, *local_value, *local_tb;
#if CYTHON_FAST_THREAD_STATE
PyObject *tmp_type, *tmp_value, *tmp_tb;
local_type = tstate->curexc_type;
local_value = tstate->curexc_value;
local_tb = tstate->curexc_traceback;
tstate->curexc_type = 0;
tstate->curexc_value = 0;
tstate->curexc_traceback = 0;
#else
PyErr_Fetch(&local_type, &local_value, &local_tb);
#endif
PyErr_NormalizeException(&local_type, &local_value, &local_tb);
#if CYTHON_FAST_THREAD_STATE
if (unlikely(tstate->curexc_type))
#else
if (unlikely(PyErr_Occurred()))
#endif
goto bad;
#if PY_MAJOR_VERSION >= 3
if (local_tb) {
if (unlikely(PyException_SetTraceback(local_value, local_tb) < 0))
goto bad;
}
#endif
Py_XINCREF(local_tb);
Py_XINCREF(local_type);
Py_XINCREF(local_value);
*type = local_type;
*value = local_value;
*tb = local_tb;
#if CYTHON_FAST_THREAD_STATE
#if PY_VERSION_HEX >= 0x030700A3
tmp_type = tstate->exc_state.exc_type;
tmp_value = tstate->exc_state.exc_value;
tmp_tb = tstate->exc_state.exc_traceback;
tstate->exc_state.exc_type = local_type;
tstate->exc_state.exc_value = local_value;
tstate->exc_state.exc_traceback = local_tb;
#else
tmp_type = tstate->exc_type;
tmp_value = tstate->exc_value;
tmp_tb = tstate->exc_traceback;
tstate->exc_type = local_type;
tstate->exc_value = local_value;
tstate->exc_traceback = local_tb;
#endif
Py_XDECREF(tmp_type);
Py_XDECREF(tmp_value);
Py_XDECREF(tmp_tb);
#else
PyErr_SetExcInfo(local_type, local_value, local_tb);
#endif
return 0;
bad:
*type = 0;
*value = 0;
*tb = 0;
Py_XDECREF(local_type);
Py_XDECREF(local_value);
Py_XDECREF(local_tb);
return -1;
}
/* SwapException */
#if CYTHON_FAST_THREAD_STATE
static CYTHON_INLINE void __Pyx__ExceptionSwap(PyThreadState *tstate, PyObject **type, PyObject **value, PyObject **tb) {
PyObject *tmp_type, *tmp_value, *tmp_tb;
#if PY_VERSION_HEX >= 0x030700A3
tmp_type = tstate->exc_state.exc_type;
tmp_value = tstate->exc_state.exc_value;
tmp_tb = tstate->exc_state.exc_traceback;
tstate->exc_state.exc_type = *type;
tstate->exc_state.exc_value = *value;
tstate->exc_state.exc_traceback = *tb;
#else
tmp_type = tstate->exc_type;
tmp_value = tstate->exc_value;
tmp_tb = tstate->exc_traceback;
tstate->exc_type = *type;
tstate->exc_value = *value;
tstate->exc_traceback = *tb;
#endif
*type = tmp_type;
*value = tmp_value;
*tb = tmp_tb;
}
#else
static CYTHON_INLINE void __Pyx_ExceptionSwap(PyObject **type, PyObject **value, PyObject **tb) {
PyObject *tmp_type, *tmp_value, *tmp_tb;
PyErr_GetExcInfo(&tmp_type, &tmp_value, &tmp_tb);
PyErr_SetExcInfo(*type, *value, *tb);
*type = tmp_type;
*value = tmp_value;
*tb = tmp_tb;
}
#endif
/* Import */
static PyObject *__Pyx_Import(PyObject *name, PyObject *from_list, int level) {
PyObject *empty_list = 0;
PyObject *module = 0;
PyObject *global_dict = 0;
PyObject *empty_dict = 0;
PyObject *list;
#if PY_MAJOR_VERSION < 3
PyObject *py_import;
py_import = __Pyx_PyObject_GetAttrStr(__pyx_b, __pyx_n_s_import);
if (!py_import)
goto bad;
#endif
if (from_list)
list = from_list;
else {
empty_list = PyList_New(0);
if (!empty_list)
goto bad;
list = empty_list;
}
global_dict = PyModule_GetDict(__pyx_m);
if (!global_dict)
goto bad;
empty_dict = PyDict_New();
if (!empty_dict)
goto bad;
{
#if PY_MAJOR_VERSION >= 3
if (level == -1) {
if (strchr(__Pyx_MODULE_NAME, '.')) {
module = PyImport_ImportModuleLevelObject(
name, global_dict, empty_dict, list, 1);
if (!module) {
if (!PyErr_ExceptionMatches(PyExc_ImportError))
goto bad;
PyErr_Clear();
}
}
level = 0;
}
#endif
if (!module) {
#if PY_MAJOR_VERSION < 3
PyObject *py_level = PyInt_FromLong(level);
if (!py_level)
goto bad;
module = PyObject_CallFunctionObjArgs(py_import,
name, global_dict, empty_dict, list, py_level, NULL);
Py_DECREF(py_level);
#else
module = PyImport_ImportModuleLevelObject(
name, global_dict, empty_dict, list, level);
#endif
}
}
bad:
#if PY_MAJOR_VERSION < 3
Py_XDECREF(py_import);
#endif
Py_XDECREF(empty_list);
Py_XDECREF(empty_dict);
return module;
}
/* FastTypeChecks */
#if CYTHON_COMPILING_IN_CPYTHON
static int __Pyx_InBases(PyTypeObject *a, PyTypeObject *b) {
while (a) {
a = a->tp_base;
if (a == b)
return 1;
}
return b == &PyBaseObject_Type;
}
static CYTHON_INLINE int __Pyx_IsSubtype(PyTypeObject *a, PyTypeObject *b) {
PyObject *mro;
if (a == b) return 1;
mro = a->tp_mro;
if (likely(mro)) {
Py_ssize_t i, n;
n = PyTuple_GET_SIZE(mro);
for (i = 0; i < n; i++) {
if (PyTuple_GET_ITEM(mro, i) == (PyObject *)b)
return 1;
}
return 0;
}
return __Pyx_InBases(a, b);
}
#if PY_MAJOR_VERSION == 2
static int __Pyx_inner_PyErr_GivenExceptionMatches2(PyObject *err, PyObject* exc_type1, PyObject* exc_type2) {
PyObject *exception, *value, *tb;
int res;
__Pyx_PyThreadState_declare
__Pyx_PyThreadState_assign
__Pyx_ErrFetch(&exception, &value, &tb);
res = exc_type1 ? PyObject_IsSubclass(err, exc_type1) : 0;
if (unlikely(res == -1)) {
PyErr_WriteUnraisable(err);
res = 0;
}
if (!res) {
res = PyObject_IsSubclass(err, exc_type2);
if (unlikely(res == -1)) {
PyErr_WriteUnraisable(err);
res = 0;
}
}
__Pyx_ErrRestore(exception, value, tb);
return res;
}
#else
static CYTHON_INLINE int __Pyx_inner_PyErr_GivenExceptionMatches2(PyObject *err, PyObject* exc_type1, PyObject *exc_type2) {
int res = exc_type1 ? __Pyx_IsSubtype((PyTypeObject*)err, (PyTypeObject*)exc_type1) : 0;
if (!res) {
res = __Pyx_IsSubtype((PyTypeObject*)err, (PyTypeObject*)exc_type2);
}
return res;
}
#endif
static int __Pyx_PyErr_GivenExceptionMatchesTuple(PyObject *exc_type, PyObject *tuple) {
Py_ssize_t i, n;
assert(PyExceptionClass_Check(exc_type));
n = PyTuple_GET_SIZE(tuple);
#if PY_MAJOR_VERSION >= 3
for (i=0; i<n; i++) {
if (exc_type == PyTuple_GET_ITEM(tuple, i)) return 1;
}
#endif
for (i=0; i<n; i++) {
PyObject *t = PyTuple_GET_ITEM(tuple, i);
#if PY_MAJOR_VERSION < 3
if (likely(exc_type == t)) return 1;
#endif
if (likely(PyExceptionClass_Check(t))) {
if (__Pyx_inner_PyErr_GivenExceptionMatches2(exc_type, NULL, t)) return 1;
} else {
}
}
return 0;
}
static CYTHON_INLINE int __Pyx_PyErr_GivenExceptionMatches(PyObject *err, PyObject* exc_type) {
if (likely(err == exc_type)) return 1;
if (likely(PyExceptionClass_Check(err))) {
if (likely(PyExceptionClass_Check(exc_type))) {
return __Pyx_inner_PyErr_GivenExceptionMatches2(err, NULL, exc_type);
} else if (likely(PyTuple_Check(exc_type))) {
return __Pyx_PyErr_GivenExceptionMatchesTuple(err, exc_type);
} else {
}
}
return PyErr_GivenExceptionMatches(err, exc_type);
}
static CYTHON_INLINE int __Pyx_PyErr_GivenExceptionMatches2(PyObject *err, PyObject *exc_type1, PyObject *exc_type2) {
assert(PyExceptionClass_Check(exc_type1));
assert(PyExceptionClass_Check(exc_type2));
if (likely(err == exc_type1 || err == exc_type2)) return 1;
if (likely(PyExceptionClass_Check(err))) {
return __Pyx_inner_PyErr_GivenExceptionMatches2(err, exc_type1, exc_type2);
}
return (PyErr_GivenExceptionMatches(err, exc_type1) || PyErr_GivenExceptionMatches(err, exc_type2));
}
#endif
/* None */
static CYTHON_INLINE void __Pyx_RaiseUnboundLocalError(const char *varname) {
PyErr_Format(PyExc_UnboundLocalError, "local variable '%s' referenced before assignment", varname);
}
/* None */
static CYTHON_INLINE long __Pyx_div_long(long a, long b) {
long q = a / b;
long r = a - q*b;
q -= ((r != 0) & ((r ^ b) < 0));
return q;
}
/* ImportFrom */
static PyObject* __Pyx_ImportFrom(PyObject* module, PyObject* name) {
PyObject* value = __Pyx_PyObject_GetAttrStr(module, name);
if (unlikely(!value) && PyErr_ExceptionMatches(PyExc_AttributeError)) {
PyErr_Format(PyExc_ImportError,
#if PY_MAJOR_VERSION < 3
"cannot import name %.230s", PyString_AS_STRING(name));
#else
"cannot import name %S", name);
#endif
}
return value;
}
/* HasAttr */
static CYTHON_INLINE int __Pyx_HasAttr(PyObject *o, PyObject *n) {
PyObject *r;
if (unlikely(!__Pyx_PyBaseString_Check(n))) {
PyErr_SetString(PyExc_TypeError,
"hasattr(): attribute name must be string");
return -1;
}
r = __Pyx_GetAttr(o, n);
if (unlikely(!r)) {
PyErr_Clear();
return 0;
} else {
Py_DECREF(r);
return 1;
}
}
/* PyObject_GenericGetAttrNoDict */
#if CYTHON_USE_TYPE_SLOTS && CYTHON_USE_PYTYPE_LOOKUP && PY_VERSION_HEX < 0x03070000
static PyObject *__Pyx_RaiseGenericGetAttributeError(PyTypeObject *tp, PyObject *attr_name) {
PyErr_Format(PyExc_AttributeError,
#if PY_MAJOR_VERSION >= 3
"'%.50s' object has no attribute '%U'",
tp->tp_name, attr_name);
#else
"'%.50s' object has no attribute '%.400s'",
tp->tp_name, PyString_AS_STRING(attr_name));
#endif
return NULL;
}
static CYTHON_INLINE PyObject* __Pyx_PyObject_GenericGetAttrNoDict(PyObject* obj, PyObject* attr_name) {
PyObject *descr;
PyTypeObject *tp = Py_TYPE(obj);
if (unlikely(!PyString_Check(attr_name))) {
return PyObject_GenericGetAttr(obj, attr_name);
}
assert(!tp->tp_dictoffset);
descr = _PyType_Lookup(tp, attr_name);
if (unlikely(!descr)) {
return __Pyx_RaiseGenericGetAttributeError(tp, attr_name);
}
Py_INCREF(descr);
#if PY_MAJOR_VERSION < 3
if (likely(PyType_HasFeature(Py_TYPE(descr), Py_TPFLAGS_HAVE_CLASS)))
#endif
{
descrgetfunc f = Py_TYPE(descr)->tp_descr_get;
if (unlikely(f)) {
PyObject *res = f(descr, obj, (PyObject *)tp);
Py_DECREF(descr);
return res;
}
}
return descr;
}
#endif
/* PyObject_GenericGetAttr */
#if CYTHON_USE_TYPE_SLOTS && CYTHON_USE_PYTYPE_LOOKUP && PY_VERSION_HEX < 0x03070000
static PyObject* __Pyx_PyObject_GenericGetAttr(PyObject* obj, PyObject* attr_name) {
if (unlikely(Py_TYPE(obj)->tp_dictoffset)) {
return PyObject_GenericGetAttr(obj, attr_name);
}
return __Pyx_PyObject_GenericGetAttrNoDict(obj, attr_name);
}
#endif
/* SetVTable */
static int __Pyx_SetVtable(PyObject *dict, void *vtable) {
#if PY_VERSION_HEX >= 0x02070000
PyObject *ob = PyCapsule_New(vtable, 0, 0);
#else
PyObject *ob = PyCObject_FromVoidPtr(vtable, 0);
#endif
if (!ob)
goto bad;
if (PyDict_SetItem(dict, __pyx_n_s_pyx_vtable, ob) < 0)
goto bad;
Py_DECREF(ob);
return 0;
bad:
Py_XDECREF(ob);
return -1;
}
/* SetupReduce */
static int __Pyx_setup_reduce_is_named(PyObject* meth, PyObject* name) {
int ret;
PyObject *name_attr;
name_attr = __Pyx_PyObject_GetAttrStr(meth, __pyx_n_s_name_2);
if (likely(name_attr)) {
ret = PyObject_RichCompareBool(name_attr, name, Py_EQ);
} else {
ret = -1;
}
if (unlikely(ret < 0)) {
PyErr_Clear();
ret = 0;
}
Py_XDECREF(name_attr);
return ret;
}
static int __Pyx_setup_reduce(PyObject* type_obj) {
int ret = 0;
PyObject *object_reduce = NULL;
PyObject *object_reduce_ex = NULL;
PyObject *reduce = NULL;
PyObject *reduce_ex = NULL;
PyObject *reduce_cython = NULL;
PyObject *setstate = NULL;
PyObject *setstate_cython = NULL;
#if CYTHON_USE_PYTYPE_LOOKUP
if (_PyType_Lookup((PyTypeObject*)type_obj, __pyx_n_s_getstate)) goto GOOD;
#else
if (PyObject_HasAttr(type_obj, __pyx_n_s_getstate)) goto GOOD;
#endif
#if CYTHON_USE_PYTYPE_LOOKUP
object_reduce_ex = _PyType_Lookup(&PyBaseObject_Type, __pyx_n_s_reduce_ex); if (!object_reduce_ex) goto BAD;
#else
object_reduce_ex = __Pyx_PyObject_GetAttrStr((PyObject*)&PyBaseObject_Type, __pyx_n_s_reduce_ex); if (!object_reduce_ex) goto BAD;
#endif
reduce_ex = __Pyx_PyObject_GetAttrStr(type_obj, __pyx_n_s_reduce_ex); if (unlikely(!reduce_ex)) goto BAD;
if (reduce_ex == object_reduce_ex) {
#if CYTHON_USE_PYTYPE_LOOKUP
object_reduce = _PyType_Lookup(&PyBaseObject_Type, __pyx_n_s_reduce); if (!object_reduce) goto BAD;
#else
object_reduce = __Pyx_PyObject_GetAttrStr((PyObject*)&PyBaseObject_Type, __pyx_n_s_reduce); if (!object_reduce) goto BAD;
#endif
reduce = __Pyx_PyObject_GetAttrStr(type_obj, __pyx_n_s_reduce); if (unlikely(!reduce)) goto BAD;
if (reduce == object_reduce || __Pyx_setup_reduce_is_named(reduce, __pyx_n_s_reduce_cython)) {
reduce_cython = __Pyx_PyObject_GetAttrStr(type_obj, __pyx_n_s_reduce_cython); if (unlikely(!reduce_cython)) goto BAD;
ret = PyDict_SetItem(((PyTypeObject*)type_obj)->tp_dict, __pyx_n_s_reduce, reduce_cython); if (unlikely(ret < 0)) goto BAD;
ret = PyDict_DelItem(((PyTypeObject*)type_obj)->tp_dict, __pyx_n_s_reduce_cython); if (unlikely(ret < 0)) goto BAD;
setstate = __Pyx_PyObject_GetAttrStr(type_obj, __pyx_n_s_setstate);
if (!setstate) PyErr_Clear();
if (!setstate || __Pyx_setup_reduce_is_named(setstate, __pyx_n_s_setstate_cython)) {
setstate_cython = __Pyx_PyObject_GetAttrStr(type_obj, __pyx_n_s_setstate_cython); if (unlikely(!setstate_cython)) goto BAD;
ret = PyDict_SetItem(((PyTypeObject*)type_obj)->tp_dict, __pyx_n_s_setstate, setstate_cython); if (unlikely(ret < 0)) goto BAD;
ret = PyDict_DelItem(((PyTypeObject*)type_obj)->tp_dict, __pyx_n_s_setstate_cython); if (unlikely(ret < 0)) goto BAD;
}
PyType_Modified((PyTypeObject*)type_obj);
}
}
goto GOOD;
BAD:
if (!PyErr_Occurred())
PyErr_Format(PyExc_RuntimeError, "Unable to initialize pickling for %s", ((PyTypeObject*)type_obj)->tp_name);
ret = -1;
GOOD:
#if !CYTHON_USE_PYTYPE_LOOKUP
Py_XDECREF(object_reduce);
Py_XDECREF(object_reduce_ex);
#endif
Py_XDECREF(reduce);
Py_XDECREF(reduce_ex);
Py_XDECREF(reduce_cython);
Py_XDECREF(setstate);
Py_XDECREF(setstate_cython);
return ret;
}
/* CLineInTraceback */
#ifndef CYTHON_CLINE_IN_TRACEBACK
static int __Pyx_CLineForTraceback(CYTHON_UNUSED PyThreadState *tstate, int c_line) {
PyObject *use_cline;
PyObject *ptype, *pvalue, *ptraceback;
#if CYTHON_COMPILING_IN_CPYTHON
PyObject **cython_runtime_dict;
#endif
if (unlikely(!__pyx_cython_runtime)) {
return c_line;
}
__Pyx_ErrFetchInState(tstate, &ptype, &pvalue, &ptraceback);
#if CYTHON_COMPILING_IN_CPYTHON
cython_runtime_dict = _PyObject_GetDictPtr(__pyx_cython_runtime);
if (likely(cython_runtime_dict)) {
use_cline = __Pyx_PyDict_GetItemStr(*cython_runtime_dict, __pyx_n_s_cline_in_traceback);
} else
#endif
{
PyObject *use_cline_obj = __Pyx_PyObject_GetAttrStr(__pyx_cython_runtime, __pyx_n_s_cline_in_traceback);
if (use_cline_obj) {
use_cline = PyObject_Not(use_cline_obj) ? Py_False : Py_True;
Py_DECREF(use_cline_obj);
} else {
PyErr_Clear();
use_cline = NULL;
}
}
if (!use_cline) {
c_line = 0;
PyObject_SetAttr(__pyx_cython_runtime, __pyx_n_s_cline_in_traceback, Py_False);
}
else if (PyObject_Not(use_cline) != 0) {
c_line = 0;
}
__Pyx_ErrRestoreInState(tstate, ptype, pvalue, ptraceback);
return c_line;
}
#endif
/* CodeObjectCache */
static int __pyx_bisect_code_objects(__Pyx_CodeObjectCacheEntry* entries, int count, int code_line) {
int start = 0, mid = 0, end = count - 1;
if (end >= 0 && code_line > entries[end].code_line) {
return count;
}
while (start < end) {
mid = start + (end - start) / 2;
if (code_line < entries[mid].code_line) {
end = mid;
} else if (code_line > entries[mid].code_line) {
start = mid + 1;
} else {
return mid;
}
}
if (code_line <= entries[mid].code_line) {
return mid;
} else {
return mid + 1;
}
}
static PyCodeObject *__pyx_find_code_object(int code_line) {
PyCodeObject* code_object;
int pos;
if (unlikely(!code_line) || unlikely(!__pyx_code_cache.entries)) {
return NULL;
}
pos = __pyx_bisect_code_objects(__pyx_code_cache.entries, __pyx_code_cache.count, code_line);
if (unlikely(pos >= __pyx_code_cache.count) || unlikely(__pyx_code_cache.entries[pos].code_line != code_line)) {
return NULL;
}
code_object = __pyx_code_cache.entries[pos].code_object;
Py_INCREF(code_object);
return code_object;
}
static void __pyx_insert_code_object(int code_line, PyCodeObject* code_object) {
int pos, i;
__Pyx_CodeObjectCacheEntry* entries = __pyx_code_cache.entries;
if (unlikely(!code_line)) {
return;
}
if (unlikely(!entries)) {
entries = (__Pyx_CodeObjectCacheEntry*)PyMem_Malloc(64*sizeof(__Pyx_CodeObjectCacheEntry));
if (likely(entries)) {
__pyx_code_cache.entries = entries;
__pyx_code_cache.max_count = 64;
__pyx_code_cache.count = 1;
entries[0].code_line = code_line;
entries[0].code_object = code_object;
Py_INCREF(code_object);
}
return;
}
pos = __pyx_bisect_code_objects(__pyx_code_cache.entries, __pyx_code_cache.count, code_line);
if ((pos < __pyx_code_cache.count) && unlikely(__pyx_code_cache.entries[pos].code_line == code_line)) {
PyCodeObject* tmp = entries[pos].code_object;
entries[pos].code_object = code_object;
Py_DECREF(tmp);
return;
}
if (__pyx_code_cache.count == __pyx_code_cache.max_count) {
int new_max = __pyx_code_cache.max_count + 64;
entries = (__Pyx_CodeObjectCacheEntry*)PyMem_Realloc(
__pyx_code_cache.entries, (size_t)new_max*sizeof(__Pyx_CodeObjectCacheEntry));
if (unlikely(!entries)) {
return;
}
__pyx_code_cache.entries = entries;
__pyx_code_cache.max_count = new_max;
}
for (i=__pyx_code_cache.count; i>pos; i--) {
entries[i] = entries[i-1];
}
entries[pos].code_line = code_line;
entries[pos].code_object = code_object;
__pyx_code_cache.count++;
Py_INCREF(code_object);
}
/* AddTraceback */
#include "compile.h"
#include "frameobject.h"
#include "traceback.h"
static PyCodeObject* __Pyx_CreateCodeObjectForTraceback(
const char *funcname, int c_line,
int py_line, const char *filename) {
PyCodeObject *py_code = 0;
PyObject *py_srcfile = 0;
PyObject *py_funcname = 0;
#if PY_MAJOR_VERSION < 3
py_srcfile = PyString_FromString(filename);
#else
py_srcfile = PyUnicode_FromString(filename);
#endif
if (!py_srcfile) goto bad;
if (c_line) {
#if PY_MAJOR_VERSION < 3
py_funcname = PyString_FromFormat( "%s (%s:%d)", funcname, __pyx_cfilenm, c_line);
#else
py_funcname = PyUnicode_FromFormat( "%s (%s:%d)", funcname, __pyx_cfilenm, c_line);
#endif
}
else {
#if PY_MAJOR_VERSION < 3
py_funcname = PyString_FromString(funcname);
#else
py_funcname = PyUnicode_FromString(funcname);
#endif
}
if (!py_funcname) goto bad;
py_code = __Pyx_PyCode_New(
0,
0,
0,
0,
0,
__pyx_empty_bytes, /*PyObject *code,*/
__pyx_empty_tuple, /*PyObject *consts,*/
__pyx_empty_tuple, /*PyObject *names,*/
__pyx_empty_tuple, /*PyObject *varnames,*/
__pyx_empty_tuple, /*PyObject *freevars,*/
__pyx_empty_tuple, /*PyObject *cellvars,*/
py_srcfile, /*PyObject *filename,*/
py_funcname, /*PyObject *name,*/
py_line,
__pyx_empty_bytes /*PyObject *lnotab*/
);
Py_DECREF(py_srcfile);
Py_DECREF(py_funcname);
return py_code;
bad:
Py_XDECREF(py_srcfile);
Py_XDECREF(py_funcname);
return NULL;
}
static void __Pyx_AddTraceback(const char *funcname, int c_line,
int py_line, const char *filename) {
PyCodeObject *py_code = 0;
PyFrameObject *py_frame = 0;
PyThreadState *tstate = __Pyx_PyThreadState_Current;
if (c_line) {
c_line = __Pyx_CLineForTraceback(tstate, c_line);
}
py_code = __pyx_find_code_object(c_line ? -c_line : py_line);
if (!py_code) {
py_code = __Pyx_CreateCodeObjectForTraceback(
funcname, c_line, py_line, filename);
if (!py_code) goto bad;
__pyx_insert_code_object(c_line ? -c_line : py_line, py_code);
}
py_frame = PyFrame_New(
tstate, /*PyThreadState *tstate,*/
py_code, /*PyCodeObject *code,*/
__pyx_d, /*PyObject *globals,*/
0 /*PyObject *locals*/
);
if (!py_frame) goto bad;
__Pyx_PyFrame_SetLineNumber(py_frame, py_line);
PyTraceBack_Here(py_frame);
bad:
Py_XDECREF(py_code);
Py_XDECREF(py_frame);
}
#if PY_MAJOR_VERSION < 3
static int __Pyx_GetBuffer(PyObject *obj, Py_buffer *view, int flags) {
if (PyObject_CheckBuffer(obj)) return PyObject_GetBuffer(obj, view, flags);
if (__Pyx_TypeCheck(obj, __pyx_array_type)) return __pyx_array_getbuffer(obj, view, flags);
if (__Pyx_TypeCheck(obj, __pyx_memoryview_type)) return __pyx_memoryview_getbuffer(obj, view, flags);
PyErr_Format(PyExc_TypeError, "'%.200s' does not have the buffer interface", Py_TYPE(obj)->tp_name);
return -1;
}
static void __Pyx_ReleaseBuffer(Py_buffer *view) {
PyObject *obj = view->obj;
if (!obj) return;
if (PyObject_CheckBuffer(obj)) {
PyBuffer_Release(view);
return;
}
if ((0)) {}
view->obj = NULL;
Py_DECREF(obj);
}
#endif
/* MemviewSliceIsContig */
static int
__pyx_memviewslice_is_contig(const __Pyx_memviewslice mvs, char order, int ndim)
{
int i, index, step, start;
Py_ssize_t itemsize = mvs.memview->view.itemsize;
if (order == 'F') {
step = 1;
start = 0;
} else {
step = -1;
start = ndim - 1;
}
for (i = 0; i < ndim; i++) {
index = start + step * i;
if (mvs.suboffsets[index] >= 0 || mvs.strides[index] != itemsize)
return 0;
itemsize *= mvs.shape[index];
}
return 1;
}
/* OverlappingSlices */
static void
__pyx_get_array_memory_extents(__Pyx_memviewslice *slice,
void **out_start, void **out_end,
int ndim, size_t itemsize)
{
char *start, *end;
int i;
start = end = slice->data;
for (i = 0; i < ndim; i++) {
Py_ssize_t stride = slice->strides[i];
Py_ssize_t extent = slice->shape[i];
if (extent == 0) {
*out_start = *out_end = start;
return;
} else {
if (stride > 0)
end += stride * (extent - 1);
else
start += stride * (extent - 1);
}
}
*out_start = start;
*out_end = end + itemsize;
}
static int
__pyx_slices_overlap(__Pyx_memviewslice *slice1,
__Pyx_memviewslice *slice2,
int ndim, size_t itemsize)
{
void *start1, *end1, *start2, *end2;
__pyx_get_array_memory_extents(slice1, &start1, &end1, ndim, itemsize);
__pyx_get_array_memory_extents(slice2, &start2, &end2, ndim, itemsize);
return (start1 < end2) && (start2 < end1);
}
/* Capsule */
static CYTHON_INLINE PyObject *
__pyx_capsule_create(void *p, CYTHON_UNUSED const char *sig)
{
PyObject *cobj;
#if PY_VERSION_HEX >= 0x02070000
cobj = PyCapsule_New(p, sig, NULL);
#else
cobj = PyCObject_FromVoidPtr(p, NULL);
#endif
return cobj;
}
/* CIntToPy */
static CYTHON_INLINE PyObject* __Pyx_PyInt_From_long(long value) {
const long neg_one = (long) -1, const_zero = (long) 0;
const int is_unsigned = neg_one > const_zero;
if (is_unsigned) {
if (sizeof(long) < sizeof(long)) {
return PyInt_FromLong((long) value);
} else if (sizeof(long) <= sizeof(unsigned long)) {
return PyLong_FromUnsignedLong((unsigned long) value);
#ifdef HAVE_LONG_LONG
} else if (sizeof(long) <= sizeof(unsigned PY_LONG_LONG)) {
return PyLong_FromUnsignedLongLong((unsigned PY_LONG_LONG) value);
#endif
}
} else {
if (sizeof(long) <= sizeof(long)) {
return PyInt_FromLong((long) value);
#ifdef HAVE_LONG_LONG
} else if (sizeof(long) <= sizeof(PY_LONG_LONG)) {
return PyLong_FromLongLong((PY_LONG_LONG) value);
#endif
}
}
{
int one = 1; int little = (int)*(unsigned char *)&one;
unsigned char *bytes = (unsigned char *)&value;
return _PyLong_FromByteArray(bytes, sizeof(long),
little, !is_unsigned);
}
}
/* CIntFromPyVerify */
#define __PYX_VERIFY_RETURN_INT(target_type, func_type, func_value)\
__PYX__VERIFY_RETURN_INT(target_type, func_type, func_value, 0)
#define __PYX_VERIFY_RETURN_INT_EXC(target_type, func_type, func_value)\
__PYX__VERIFY_RETURN_INT(target_type, func_type, func_value, 1)
#define __PYX__VERIFY_RETURN_INT(target_type, func_type, func_value, exc)\
{\
func_type value = func_value;\
if (sizeof(target_type) < sizeof(func_type)) {\
if (unlikely(value != (func_type) (target_type) value)) {\
func_type zero = 0;\
if (exc && unlikely(value == (func_type)-1 && PyErr_Occurred()))\
return (target_type) -1;\
if (is_unsigned && unlikely(value < zero))\
goto raise_neg_overflow;\
else\
goto raise_overflow;\
}\
}\
return (target_type) value;\
}
/* MemviewSliceCopyTemplate */
static __Pyx_memviewslice
__pyx_memoryview_copy_new_contig(const __Pyx_memviewslice *from_mvs,
const char *mode, int ndim,
size_t sizeof_dtype, int contig_flag,
int dtype_is_object)
{
__Pyx_RefNannyDeclarations
int i;
__Pyx_memviewslice new_mvs = { 0, 0, { 0 }, { 0 }, { 0 } };
struct __pyx_memoryview_obj *from_memview = from_mvs->memview;
Py_buffer *buf = &from_memview->view;
PyObject *shape_tuple = NULL;
PyObject *temp_int = NULL;
struct __pyx_array_obj *array_obj = NULL;
struct __pyx_memoryview_obj *memview_obj = NULL;
__Pyx_RefNannySetupContext("__pyx_memoryview_copy_new_contig", 0);
for (i = 0; i < ndim; i++) {
if (from_mvs->suboffsets[i] >= 0) {
PyErr_Format(PyExc_ValueError, "Cannot copy memoryview slice with "
"indirect dimensions (axis %d)", i);
goto fail;
}
}
shape_tuple = PyTuple_New(ndim);
if (unlikely(!shape_tuple)) {
goto fail;
}
__Pyx_GOTREF(shape_tuple);
for(i = 0; i < ndim; i++) {
temp_int = PyInt_FromSsize_t(from_mvs->shape[i]);
if(unlikely(!temp_int)) {
goto fail;
} else {
PyTuple_SET_ITEM(shape_tuple, i, temp_int);
temp_int = NULL;
}
}
array_obj = __pyx_array_new(shape_tuple, sizeof_dtype, buf->format, (char *) mode, NULL);
if (unlikely(!array_obj)) {
goto fail;
}
__Pyx_GOTREF(array_obj);
memview_obj = (struct __pyx_memoryview_obj *) __pyx_memoryview_new(
(PyObject *) array_obj, contig_flag,
dtype_is_object,
from_mvs->memview->typeinfo);
if (unlikely(!memview_obj))
goto fail;
if (unlikely(__Pyx_init_memviewslice(memview_obj, ndim, &new_mvs, 1) < 0))
goto fail;
if (unlikely(__pyx_memoryview_copy_contents(*from_mvs, new_mvs, ndim, ndim,
dtype_is_object) < 0))
goto fail;
goto no_fail;
fail:
__Pyx_XDECREF(new_mvs.memview);
new_mvs.memview = NULL;
new_mvs.data = NULL;
no_fail:
__Pyx_XDECREF(shape_tuple);
__Pyx_XDECREF(temp_int);
__Pyx_XDECREF(array_obj);
__Pyx_RefNannyFinishContext();
return new_mvs;
}
/* CIntFromPy */
static CYTHON_INLINE int __Pyx_PyInt_As_int(PyObject *x) {
const int neg_one = (int) -1, const_zero = (int) 0;
const int is_unsigned = neg_one > const_zero;
#if PY_MAJOR_VERSION < 3
if (likely(PyInt_Check(x))) {
if (sizeof(int) < sizeof(long)) {
__PYX_VERIFY_RETURN_INT(int, long, PyInt_AS_LONG(x))
} else {
long val = PyInt_AS_LONG(x);
if (is_unsigned && unlikely(val < 0)) {
goto raise_neg_overflow;
}
return (int) val;
}
} else
#endif
if (likely(PyLong_Check(x))) {
if (is_unsigned) {
#if CYTHON_USE_PYLONG_INTERNALS
const digit* digits = ((PyLongObject*)x)->ob_digit;
switch (Py_SIZE(x)) {
case 0: return (int) 0;
case 1: __PYX_VERIFY_RETURN_INT(int, digit, digits[0])
case 2:
if (8 * sizeof(int) > 1 * PyLong_SHIFT) {
if (8 * sizeof(unsigned long) > 2 * PyLong_SHIFT) {
__PYX_VERIFY_RETURN_INT(int, unsigned long, (((((unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])))
} else if (8 * sizeof(int) >= 2 * PyLong_SHIFT) {
return (int) (((((int)digits[1]) << PyLong_SHIFT) | (int)digits[0]));
}
}
break;
case 3:
if (8 * sizeof(int) > 2 * PyLong_SHIFT) {
if (8 * sizeof(unsigned long) > 3 * PyLong_SHIFT) {
__PYX_VERIFY_RETURN_INT(int, unsigned long, (((((((unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])))
} else if (8 * sizeof(int) >= 3 * PyLong_SHIFT) {
return (int) (((((((int)digits[2]) << PyLong_SHIFT) | (int)digits[1]) << PyLong_SHIFT) | (int)digits[0]));
}
}
break;
case 4:
if (8 * sizeof(int) > 3 * PyLong_SHIFT) {
if (8 * sizeof(unsigned long) > 4 * PyLong_SHIFT) {
__PYX_VERIFY_RETURN_INT(int, unsigned long, (((((((((unsigned long)digits[3]) << PyLong_SHIFT) | (unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])))
} else if (8 * sizeof(int) >= 4 * PyLong_SHIFT) {
return (int) (((((((((int)digits[3]) << PyLong_SHIFT) | (int)digits[2]) << PyLong_SHIFT) | (int)digits[1]) << PyLong_SHIFT) | (int)digits[0]));
}
}
break;
}
#endif
#if CYTHON_COMPILING_IN_CPYTHON
if (unlikely(Py_SIZE(x) < 0)) {
goto raise_neg_overflow;
}
#else
{
int result = PyObject_RichCompareBool(x, Py_False, Py_LT);
if (unlikely(result < 0))
return (int) -1;
if (unlikely(result == 1))
goto raise_neg_overflow;
}
#endif
if (sizeof(int) <= sizeof(unsigned long)) {
__PYX_VERIFY_RETURN_INT_EXC(int, unsigned long, PyLong_AsUnsignedLong(x))
#ifdef HAVE_LONG_LONG
} else if (sizeof(int) <= sizeof(unsigned PY_LONG_LONG)) {
__PYX_VERIFY_RETURN_INT_EXC(int, unsigned PY_LONG_LONG, PyLong_AsUnsignedLongLong(x))
#endif
}
} else {
#if CYTHON_USE_PYLONG_INTERNALS
const digit* digits = ((PyLongObject*)x)->ob_digit;
switch (Py_SIZE(x)) {
case 0: return (int) 0;
case -1: __PYX_VERIFY_RETURN_INT(int, sdigit, (sdigit) (-(sdigit)digits[0]))
case 1: __PYX_VERIFY_RETURN_INT(int, digit, +digits[0])
case -2:
if (8 * sizeof(int) - 1 > 1 * PyLong_SHIFT) {
if (8 * sizeof(unsigned long) > 2 * PyLong_SHIFT) {
__PYX_VERIFY_RETURN_INT(int, long, -(long) (((((unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])))
} else if (8 * sizeof(int) - 1 > 2 * PyLong_SHIFT) {
return (int) (((int)-1)*(((((int)digits[1]) << PyLong_SHIFT) | (int)digits[0])));
}
}
break;
case 2:
if (8 * sizeof(int) > 1 * PyLong_SHIFT) {
if (8 * sizeof(unsigned long) > 2 * PyLong_SHIFT) {
__PYX_VERIFY_RETURN_INT(int, unsigned long, (((((unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])))
} else if (8 * sizeof(int) - 1 > 2 * PyLong_SHIFT) {
return (int) ((((((int)digits[1]) << PyLong_SHIFT) | (int)digits[0])));
}
}
break;
case -3:
if (8 * sizeof(int) - 1 > 2 * PyLong_SHIFT) {
if (8 * sizeof(unsigned long) > 3 * PyLong_SHIFT) {
__PYX_VERIFY_RETURN_INT(int, long, -(long) (((((((unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])))
} else if (8 * sizeof(int) - 1 > 3 * PyLong_SHIFT) {
return (int) (((int)-1)*(((((((int)digits[2]) << PyLong_SHIFT) | (int)digits[1]) << PyLong_SHIFT) | (int)digits[0])));
}
}
break;
case 3:
if (8 * sizeof(int) > 2 * PyLong_SHIFT) {
if (8 * sizeof(unsigned long) > 3 * PyLong_SHIFT) {
__PYX_VERIFY_RETURN_INT(int, unsigned long, (((((((unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])))
} else if (8 * sizeof(int) - 1 > 3 * PyLong_SHIFT) {
return (int) ((((((((int)digits[2]) << PyLong_SHIFT) | (int)digits[1]) << PyLong_SHIFT) | (int)digits[0])));
}
}
break;
case -4:
if (8 * sizeof(int) - 1 > 3 * PyLong_SHIFT) {
if (8 * sizeof(unsigned long) > 4 * PyLong_SHIFT) {
__PYX_VERIFY_RETURN_INT(int, long, -(long) (((((((((unsigned long)digits[3]) << PyLong_SHIFT) | (unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])))
} else if (8 * sizeof(int) - 1 > 4 * PyLong_SHIFT) {
return (int) (((int)-1)*(((((((((int)digits[3]) << PyLong_SHIFT) | (int)digits[2]) << PyLong_SHIFT) | (int)digits[1]) << PyLong_SHIFT) | (int)digits[0])));
}
}
break;
case 4:
if (8 * sizeof(int) > 3 * PyLong_SHIFT) {
if (8 * sizeof(unsigned long) > 4 * PyLong_SHIFT) {
__PYX_VERIFY_RETURN_INT(int, unsigned long, (((((((((unsigned long)digits[3]) << PyLong_SHIFT) | (unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])))
} else if (8 * sizeof(int) - 1 > 4 * PyLong_SHIFT) {
return (int) ((((((((((int)digits[3]) << PyLong_SHIFT) | (int)digits[2]) << PyLong_SHIFT) | (int)digits[1]) << PyLong_SHIFT) | (int)digits[0])));
}
}
break;
}
#endif
if (sizeof(int) <= sizeof(long)) {
__PYX_VERIFY_RETURN_INT_EXC(int, long, PyLong_AsLong(x))
#ifdef HAVE_LONG_LONG
} else if (sizeof(int) <= sizeof(PY_LONG_LONG)) {
__PYX_VERIFY_RETURN_INT_EXC(int, PY_LONG_LONG, PyLong_AsLongLong(x))
#endif
}
}
{
#if CYTHON_COMPILING_IN_PYPY && !defined(_PyLong_AsByteArray)
PyErr_SetString(PyExc_RuntimeError,
"_PyLong_AsByteArray() not available in PyPy, cannot convert large numbers");
#else
int val;
PyObject *v = __Pyx_PyNumber_IntOrLong(x);
#if PY_MAJOR_VERSION < 3
if (likely(v) && !PyLong_Check(v)) {
PyObject *tmp = v;
v = PyNumber_Long(tmp);
Py_DECREF(tmp);
}
#endif
if (likely(v)) {
int one = 1; int is_little = (int)*(unsigned char *)&one;
unsigned char *bytes = (unsigned char *)&val;
int ret = _PyLong_AsByteArray((PyLongObject *)v,
bytes, sizeof(val),
is_little, !is_unsigned);
Py_DECREF(v);
if (likely(!ret))
return val;
}
#endif
return (int) -1;
}
} else {
int val;
PyObject *tmp = __Pyx_PyNumber_IntOrLong(x);
if (!tmp) return (int) -1;
val = __Pyx_PyInt_As_int(tmp);
Py_DECREF(tmp);
return val;
}
raise_overflow:
PyErr_SetString(PyExc_OverflowError,
"value too large to convert to int");
return (int) -1;
raise_neg_overflow:
PyErr_SetString(PyExc_OverflowError,
"can't convert negative value to int");
return (int) -1;
}
/* CIntFromPy */
static CYTHON_INLINE long __Pyx_PyInt_As_long(PyObject *x) {
const long neg_one = (long) -1, const_zero = (long) 0;
const int is_unsigned = neg_one > const_zero;
#if PY_MAJOR_VERSION < 3
if (likely(PyInt_Check(x))) {
if (sizeof(long) < sizeof(long)) {
__PYX_VERIFY_RETURN_INT(long, long, PyInt_AS_LONG(x))
} else {
long val = PyInt_AS_LONG(x);
if (is_unsigned && unlikely(val < 0)) {
goto raise_neg_overflow;
}
return (long) val;
}
} else
#endif
if (likely(PyLong_Check(x))) {
if (is_unsigned) {
#if CYTHON_USE_PYLONG_INTERNALS
const digit* digits = ((PyLongObject*)x)->ob_digit;
switch (Py_SIZE(x)) {
case 0: return (long) 0;
case 1: __PYX_VERIFY_RETURN_INT(long, digit, digits[0])
case 2:
if (8 * sizeof(long) > 1 * PyLong_SHIFT) {
if (8 * sizeof(unsigned long) > 2 * PyLong_SHIFT) {
__PYX_VERIFY_RETURN_INT(long, unsigned long, (((((unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])))
} else if (8 * sizeof(long) >= 2 * PyLong_SHIFT) {
return (long) (((((long)digits[1]) << PyLong_SHIFT) | (long)digits[0]));
}
}
break;
case 3:
if (8 * sizeof(long) > 2 * PyLong_SHIFT) {
if (8 * sizeof(unsigned long) > 3 * PyLong_SHIFT) {
__PYX_VERIFY_RETURN_INT(long, unsigned long, (((((((unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])))
} else if (8 * sizeof(long) >= 3 * PyLong_SHIFT) {
return (long) (((((((long)digits[2]) << PyLong_SHIFT) | (long)digits[1]) << PyLong_SHIFT) | (long)digits[0]));
}
}
break;
case 4:
if (8 * sizeof(long) > 3 * PyLong_SHIFT) {
if (8 * sizeof(unsigned long) > 4 * PyLong_SHIFT) {
__PYX_VERIFY_RETURN_INT(long, unsigned long, (((((((((unsigned long)digits[3]) << PyLong_SHIFT) | (unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])))
} else if (8 * sizeof(long) >= 4 * PyLong_SHIFT) {
return (long) (((((((((long)digits[3]) << PyLong_SHIFT) | (long)digits[2]) << PyLong_SHIFT) | (long)digits[1]) << PyLong_SHIFT) | (long)digits[0]));
}
}
break;
}
#endif
#if CYTHON_COMPILING_IN_CPYTHON
if (unlikely(Py_SIZE(x) < 0)) {
goto raise_neg_overflow;
}
#else
{
int result = PyObject_RichCompareBool(x, Py_False, Py_LT);
if (unlikely(result < 0))
return (long) -1;
if (unlikely(result == 1))
goto raise_neg_overflow;
}
#endif
if (sizeof(long) <= sizeof(unsigned long)) {
__PYX_VERIFY_RETURN_INT_EXC(long, unsigned long, PyLong_AsUnsignedLong(x))
#ifdef HAVE_LONG_LONG
} else if (sizeof(long) <= sizeof(unsigned PY_LONG_LONG)) {
__PYX_VERIFY_RETURN_INT_EXC(long, unsigned PY_LONG_LONG, PyLong_AsUnsignedLongLong(x))
#endif
}
} else {
#if CYTHON_USE_PYLONG_INTERNALS
const digit* digits = ((PyLongObject*)x)->ob_digit;
switch (Py_SIZE(x)) {
case 0: return (long) 0;
case -1: __PYX_VERIFY_RETURN_INT(long, sdigit, (sdigit) (-(sdigit)digits[0]))
case 1: __PYX_VERIFY_RETURN_INT(long, digit, +digits[0])
case -2:
if (8 * sizeof(long) - 1 > 1 * PyLong_SHIFT) {
if (8 * sizeof(unsigned long) > 2 * PyLong_SHIFT) {
__PYX_VERIFY_RETURN_INT(long, long, -(long) (((((unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])))
} else if (8 * sizeof(long) - 1 > 2 * PyLong_SHIFT) {
return (long) (((long)-1)*(((((long)digits[1]) << PyLong_SHIFT) | (long)digits[0])));
}
}
break;
case 2:
if (8 * sizeof(long) > 1 * PyLong_SHIFT) {
if (8 * sizeof(unsigned long) > 2 * PyLong_SHIFT) {
__PYX_VERIFY_RETURN_INT(long, unsigned long, (((((unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])))
} else if (8 * sizeof(long) - 1 > 2 * PyLong_SHIFT) {
return (long) ((((((long)digits[1]) << PyLong_SHIFT) | (long)digits[0])));
}
}
break;
case -3:
if (8 * sizeof(long) - 1 > 2 * PyLong_SHIFT) {
if (8 * sizeof(unsigned long) > 3 * PyLong_SHIFT) {
__PYX_VERIFY_RETURN_INT(long, long, -(long) (((((((unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])))
} else if (8 * sizeof(long) - 1 > 3 * PyLong_SHIFT) {
return (long) (((long)-1)*(((((((long)digits[2]) << PyLong_SHIFT) | (long)digits[1]) << PyLong_SHIFT) | (long)digits[0])));
}
}
break;
case 3:
if (8 * sizeof(long) > 2 * PyLong_SHIFT) {
if (8 * sizeof(unsigned long) > 3 * PyLong_SHIFT) {
__PYX_VERIFY_RETURN_INT(long, unsigned long, (((((((unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])))
} else if (8 * sizeof(long) - 1 > 3 * PyLong_SHIFT) {
return (long) ((((((((long)digits[2]) << PyLong_SHIFT) | (long)digits[1]) << PyLong_SHIFT) | (long)digits[0])));
}
}
break;
case -4:
if (8 * sizeof(long) - 1 > 3 * PyLong_SHIFT) {
if (8 * sizeof(unsigned long) > 4 * PyLong_SHIFT) {
__PYX_VERIFY_RETURN_INT(long, long, -(long) (((((((((unsigned long)digits[3]) << PyLong_SHIFT) | (unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])))
} else if (8 * sizeof(long) - 1 > 4 * PyLong_SHIFT) {
return (long) (((long)-1)*(((((((((long)digits[3]) << PyLong_SHIFT) | (long)digits[2]) << PyLong_SHIFT) | (long)digits[1]) << PyLong_SHIFT) | (long)digits[0])));
}
}
break;
case 4:
if (8 * sizeof(long) > 3 * PyLong_SHIFT) {
if (8 * sizeof(unsigned long) > 4 * PyLong_SHIFT) {
__PYX_VERIFY_RETURN_INT(long, unsigned long, (((((((((unsigned long)digits[3]) << PyLong_SHIFT) | (unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])))
} else if (8 * sizeof(long) - 1 > 4 * PyLong_SHIFT) {
return (long) ((((((((((long)digits[3]) << PyLong_SHIFT) | (long)digits[2]) << PyLong_SHIFT) | (long)digits[1]) << PyLong_SHIFT) | (long)digits[0])));
}
}
break;
}
#endif
if (sizeof(long) <= sizeof(long)) {
__PYX_VERIFY_RETURN_INT_EXC(long, long, PyLong_AsLong(x))
#ifdef HAVE_LONG_LONG
} else if (sizeof(long) <= sizeof(PY_LONG_LONG)) {
__PYX_VERIFY_RETURN_INT_EXC(long, PY_LONG_LONG, PyLong_AsLongLong(x))
#endif
}
}
{
#if CYTHON_COMPILING_IN_PYPY && !defined(_PyLong_AsByteArray)
PyErr_SetString(PyExc_RuntimeError,
"_PyLong_AsByteArray() not available in PyPy, cannot convert large numbers");
#else
long val;
PyObject *v = __Pyx_PyNumber_IntOrLong(x);
#if PY_MAJOR_VERSION < 3
if (likely(v) && !PyLong_Check(v)) {
PyObject *tmp = v;
v = PyNumber_Long(tmp);
Py_DECREF(tmp);
}
#endif
if (likely(v)) {
int one = 1; int is_little = (int)*(unsigned char *)&one;
unsigned char *bytes = (unsigned char *)&val;
int ret = _PyLong_AsByteArray((PyLongObject *)v,
bytes, sizeof(val),
is_little, !is_unsigned);
Py_DECREF(v);
if (likely(!ret))
return val;
}
#endif
return (long) -1;
}
} else {
long val;
PyObject *tmp = __Pyx_PyNumber_IntOrLong(x);
if (!tmp) return (long) -1;
val = __Pyx_PyInt_As_long(tmp);
Py_DECREF(tmp);
return val;
}
raise_overflow:
PyErr_SetString(PyExc_OverflowError,
"value too large to convert to long");
return (long) -1;
raise_neg_overflow:
PyErr_SetString(PyExc_OverflowError,
"can't convert negative value to long");
return (long) -1;
}
/* CIntToPy */
static CYTHON_INLINE PyObject* __Pyx_PyInt_From_int(int value) {
const int neg_one = (int) -1, const_zero = (int) 0;
const int is_unsigned = neg_one > const_zero;
if (is_unsigned) {
if (sizeof(int) < sizeof(long)) {
return PyInt_FromLong((long) value);
} else if (sizeof(int) <= sizeof(unsigned long)) {
return PyLong_FromUnsignedLong((unsigned long) value);
#ifdef HAVE_LONG_LONG
} else if (sizeof(int) <= sizeof(unsigned PY_LONG_LONG)) {
return PyLong_FromUnsignedLongLong((unsigned PY_LONG_LONG) value);
#endif
}
} else {
if (sizeof(int) <= sizeof(long)) {
return PyInt_FromLong((long) value);
#ifdef HAVE_LONG_LONG
} else if (sizeof(int) <= sizeof(PY_LONG_LONG)) {
return PyLong_FromLongLong((PY_LONG_LONG) value);
#endif
}
}
{
int one = 1; int little = (int)*(unsigned char *)&one;
unsigned char *bytes = (unsigned char *)&value;
return _PyLong_FromByteArray(bytes, sizeof(int),
little, !is_unsigned);
}
}
/* CIntFromPy */
static CYTHON_INLINE char __Pyx_PyInt_As_char(PyObject *x) {
const char neg_one = (char) -1, const_zero = (char) 0;
const int is_unsigned = neg_one > const_zero;
#if PY_MAJOR_VERSION < 3
if (likely(PyInt_Check(x))) {
if (sizeof(char) < sizeof(long)) {
__PYX_VERIFY_RETURN_INT(char, long, PyInt_AS_LONG(x))
} else {
long val = PyInt_AS_LONG(x);
if (is_unsigned && unlikely(val < 0)) {
goto raise_neg_overflow;
}
return (char) val;
}
} else
#endif
if (likely(PyLong_Check(x))) {
if (is_unsigned) {
#if CYTHON_USE_PYLONG_INTERNALS
const digit* digits = ((PyLongObject*)x)->ob_digit;
switch (Py_SIZE(x)) {
case 0: return (char) 0;
case 1: __PYX_VERIFY_RETURN_INT(char, digit, digits[0])
case 2:
if (8 * sizeof(char) > 1 * PyLong_SHIFT) {
if (8 * sizeof(unsigned long) > 2 * PyLong_SHIFT) {
__PYX_VERIFY_RETURN_INT(char, unsigned long, (((((unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])))
} else if (8 * sizeof(char) >= 2 * PyLong_SHIFT) {
return (char) (((((char)digits[1]) << PyLong_SHIFT) | (char)digits[0]));
}
}
break;
case 3:
if (8 * sizeof(char) > 2 * PyLong_SHIFT) {
if (8 * sizeof(unsigned long) > 3 * PyLong_SHIFT) {
__PYX_VERIFY_RETURN_INT(char, unsigned long, (((((((unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])))
} else if (8 * sizeof(char) >= 3 * PyLong_SHIFT) {
return (char) (((((((char)digits[2]) << PyLong_SHIFT) | (char)digits[1]) << PyLong_SHIFT) | (char)digits[0]));
}
}
break;
case 4:
if (8 * sizeof(char) > 3 * PyLong_SHIFT) {
if (8 * sizeof(unsigned long) > 4 * PyLong_SHIFT) {
__PYX_VERIFY_RETURN_INT(char, unsigned long, (((((((((unsigned long)digits[3]) << PyLong_SHIFT) | (unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])))
} else if (8 * sizeof(char) >= 4 * PyLong_SHIFT) {
return (char) (((((((((char)digits[3]) << PyLong_SHIFT) | (char)digits[2]) << PyLong_SHIFT) | (char)digits[1]) << PyLong_SHIFT) | (char)digits[0]));
}
}
break;
}
#endif
#if CYTHON_COMPILING_IN_CPYTHON
if (unlikely(Py_SIZE(x) < 0)) {
goto raise_neg_overflow;
}
#else
{
int result = PyObject_RichCompareBool(x, Py_False, Py_LT);
if (unlikely(result < 0))
return (char) -1;
if (unlikely(result == 1))
goto raise_neg_overflow;
}
#endif
if (sizeof(char) <= sizeof(unsigned long)) {
__PYX_VERIFY_RETURN_INT_EXC(char, unsigned long, PyLong_AsUnsignedLong(x))
#ifdef HAVE_LONG_LONG
} else if (sizeof(char) <= sizeof(unsigned PY_LONG_LONG)) {
__PYX_VERIFY_RETURN_INT_EXC(char, unsigned PY_LONG_LONG, PyLong_AsUnsignedLongLong(x))
#endif
}
} else {
#if CYTHON_USE_PYLONG_INTERNALS
const digit* digits = ((PyLongObject*)x)->ob_digit;
switch (Py_SIZE(x)) {
case 0: return (char) 0;
case -1: __PYX_VERIFY_RETURN_INT(char, sdigit, (sdigit) (-(sdigit)digits[0]))
case 1: __PYX_VERIFY_RETURN_INT(char, digit, +digits[0])
case -2:
if (8 * sizeof(char) - 1 > 1 * PyLong_SHIFT) {
if (8 * sizeof(unsigned long) > 2 * PyLong_SHIFT) {
__PYX_VERIFY_RETURN_INT(char, long, -(long) (((((unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])))
} else if (8 * sizeof(char) - 1 > 2 * PyLong_SHIFT) {
return (char) (((char)-1)*(((((char)digits[1]) << PyLong_SHIFT) | (char)digits[0])));
}
}
break;
case 2:
if (8 * sizeof(char) > 1 * PyLong_SHIFT) {
if (8 * sizeof(unsigned long) > 2 * PyLong_SHIFT) {
__PYX_VERIFY_RETURN_INT(char, unsigned long, (((((unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])))
} else if (8 * sizeof(char) - 1 > 2 * PyLong_SHIFT) {
return (char) ((((((char)digits[1]) << PyLong_SHIFT) | (char)digits[0])));
}
}
break;
case -3:
if (8 * sizeof(char) - 1 > 2 * PyLong_SHIFT) {
if (8 * sizeof(unsigned long) > 3 * PyLong_SHIFT) {
__PYX_VERIFY_RETURN_INT(char, long, -(long) (((((((unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])))
} else if (8 * sizeof(char) - 1 > 3 * PyLong_SHIFT) {
return (char) (((char)-1)*(((((((char)digits[2]) << PyLong_SHIFT) | (char)digits[1]) << PyLong_SHIFT) | (char)digits[0])));
}
}
break;
case 3:
if (8 * sizeof(char) > 2 * PyLong_SHIFT) {
if (8 * sizeof(unsigned long) > 3 * PyLong_SHIFT) {
__PYX_VERIFY_RETURN_INT(char, unsigned long, (((((((unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])))
} else if (8 * sizeof(char) - 1 > 3 * PyLong_SHIFT) {
return (char) ((((((((char)digits[2]) << PyLong_SHIFT) | (char)digits[1]) << PyLong_SHIFT) | (char)digits[0])));
}
}
break;
case -4:
if (8 * sizeof(char) - 1 > 3 * PyLong_SHIFT) {
if (8 * sizeof(unsigned long) > 4 * PyLong_SHIFT) {
__PYX_VERIFY_RETURN_INT(char, long, -(long) (((((((((unsigned long)digits[3]) << PyLong_SHIFT) | (unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])))
} else if (8 * sizeof(char) - 1 > 4 * PyLong_SHIFT) {
return (char) (((char)-1)*(((((((((char)digits[3]) << PyLong_SHIFT) | (char)digits[2]) << PyLong_SHIFT) | (char)digits[1]) << PyLong_SHIFT) | (char)digits[0])));
}
}
break;
case 4:
if (8 * sizeof(char) > 3 * PyLong_SHIFT) {
if (8 * sizeof(unsigned long) > 4 * PyLong_SHIFT) {
__PYX_VERIFY_RETURN_INT(char, unsigned long, (((((((((unsigned long)digits[3]) << PyLong_SHIFT) | (unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])))
} else if (8 * sizeof(char) - 1 > 4 * PyLong_SHIFT) {
return (char) ((((((((((char)digits[3]) << PyLong_SHIFT) | (char)digits[2]) << PyLong_SHIFT) | (char)digits[1]) << PyLong_SHIFT) | (char)digits[0])));
}
}
break;
}
#endif
if (sizeof(char) <= sizeof(long)) {
__PYX_VERIFY_RETURN_INT_EXC(char, long, PyLong_AsLong(x))
#ifdef HAVE_LONG_LONG
} else if (sizeof(char) <= sizeof(PY_LONG_LONG)) {
__PYX_VERIFY_RETURN_INT_EXC(char, PY_LONG_LONG, PyLong_AsLongLong(x))
#endif
}
}
{
#if CYTHON_COMPILING_IN_PYPY && !defined(_PyLong_AsByteArray)
PyErr_SetString(PyExc_RuntimeError,
"_PyLong_AsByteArray() not available in PyPy, cannot convert large numbers");
#else
char val;
PyObject *v = __Pyx_PyNumber_IntOrLong(x);
#if PY_MAJOR_VERSION < 3
if (likely(v) && !PyLong_Check(v)) {
PyObject *tmp = v;
v = PyNumber_Long(tmp);
Py_DECREF(tmp);
}
#endif
if (likely(v)) {
int one = 1; int is_little = (int)*(unsigned char *)&one;
unsigned char *bytes = (unsigned char *)&val;
int ret = _PyLong_AsByteArray((PyLongObject *)v,
bytes, sizeof(val),
is_little, !is_unsigned);
Py_DECREF(v);
if (likely(!ret))
return val;
}
#endif
return (char) -1;
}
} else {
char val;
PyObject *tmp = __Pyx_PyNumber_IntOrLong(x);
if (!tmp) return (char) -1;
val = __Pyx_PyInt_As_char(tmp);
Py_DECREF(tmp);
return val;
}
raise_overflow:
PyErr_SetString(PyExc_OverflowError,
"value too large to convert to char");
return (char) -1;
raise_neg_overflow:
PyErr_SetString(PyExc_OverflowError,
"can't convert negative value to char");
return (char) -1;
}
/* IsLittleEndian */
static CYTHON_INLINE int __Pyx_Is_Little_Endian(void)
{
union {
uint32_t u32;
uint8_t u8[4];
} S;
S.u32 = 0x01020304;
return S.u8[0] == 4;
}
/* BufferFormatCheck */
static void __Pyx_BufFmt_Init(__Pyx_BufFmt_Context* ctx,
__Pyx_BufFmt_StackElem* stack,
__Pyx_TypeInfo* type) {
stack[0].field = &ctx->root;
stack[0].parent_offset = 0;
ctx->root.type = type;
ctx->root.name = "buffer dtype";
ctx->root.offset = 0;
ctx->head = stack;
ctx->head->field = &ctx->root;
ctx->fmt_offset = 0;
ctx->head->parent_offset = 0;
ctx->new_packmode = '@';
ctx->enc_packmode = '@';
ctx->new_count = 1;
ctx->enc_count = 0;
ctx->enc_type = 0;
ctx->is_complex = 0;
ctx->is_valid_array = 0;
ctx->struct_alignment = 0;
while (type->typegroup == 'S') {
++ctx->head;
ctx->head->field = type->fields;
ctx->head->parent_offset = 0;
type = type->fields->type;
}
}
static int __Pyx_BufFmt_ParseNumber(const char** ts) {
int count;
const char* t = *ts;
if (*t < '0' || *t > '9') {
return -1;
} else {
count = *t++ - '0';
while (*t >= '0' && *t < '9') {
count *= 10;
count += *t++ - '0';
}
}
*ts = t;
return count;
}
static int __Pyx_BufFmt_ExpectNumber(const char **ts) {
int number = __Pyx_BufFmt_ParseNumber(ts);
if (number == -1)
PyErr_Format(PyExc_ValueError,\
"Does not understand character buffer dtype format string ('%c')", **ts);
return number;
}
static void __Pyx_BufFmt_RaiseUnexpectedChar(char ch) {
PyErr_Format(PyExc_ValueError,
"Unexpected format string character: '%c'", ch);
}
static const char* __Pyx_BufFmt_DescribeTypeChar(char ch, int is_complex) {
switch (ch) {
case 'c': return "'char'";
case 'b': return "'signed char'";
case 'B': return "'unsigned char'";
case 'h': return "'short'";
case 'H': return "'unsigned short'";
case 'i': return "'int'";
case 'I': return "'unsigned int'";
case 'l': return "'long'";
case 'L': return "'unsigned long'";
case 'q': return "'long long'";
case 'Q': return "'unsigned long long'";
case 'f': return (is_complex ? "'complex float'" : "'float'");
case 'd': return (is_complex ? "'complex double'" : "'double'");
case 'g': return (is_complex ? "'complex long double'" : "'long double'");
case 'T': return "a struct";
case 'O': return "Python object";
case 'P': return "a pointer";
case 's': case 'p': return "a string";
case 0: return "end";
default: return "unparseable format string";
}
}
static size_t __Pyx_BufFmt_TypeCharToStandardSize(char ch, int is_complex) {
switch (ch) {
case '?': case 'c': case 'b': case 'B': case 's': case 'p': return 1;
case 'h': case 'H': return 2;
case 'i': case 'I': case 'l': case 'L': return 4;
case 'q': case 'Q': return 8;
case 'f': return (is_complex ? 8 : 4);
case 'd': return (is_complex ? 16 : 8);
case 'g': {
PyErr_SetString(PyExc_ValueError, "Python does not define a standard format string size for long double ('g')..");
return 0;
}
case 'O': case 'P': return sizeof(void*);
default:
__Pyx_BufFmt_RaiseUnexpectedChar(ch);
return 0;
}
}
static size_t __Pyx_BufFmt_TypeCharToNativeSize(char ch, int is_complex) {
switch (ch) {
case 'c': case 'b': case 'B': case 's': case 'p': return 1;
case 'h': case 'H': return sizeof(short);
case 'i': case 'I': return sizeof(int);
case 'l': case 'L': return sizeof(long);
#ifdef HAVE_LONG_LONG
case 'q': case 'Q': return sizeof(PY_LONG_LONG);
#endif
case 'f': return sizeof(float) * (is_complex ? 2 : 1);
case 'd': return sizeof(double) * (is_complex ? 2 : 1);
case 'g': return sizeof(long double) * (is_complex ? 2 : 1);
case 'O': case 'P': return sizeof(void*);
default: {
__Pyx_BufFmt_RaiseUnexpectedChar(ch);
return 0;
}
}
}
typedef struct { char c; short x; } __Pyx_st_short;
typedef struct { char c; int x; } __Pyx_st_int;
typedef struct { char c; long x; } __Pyx_st_long;
typedef struct { char c; float x; } __Pyx_st_float;
typedef struct { char c; double x; } __Pyx_st_double;
typedef struct { char c; long double x; } __Pyx_st_longdouble;
typedef struct { char c; void *x; } __Pyx_st_void_p;
#ifdef HAVE_LONG_LONG
typedef struct { char c; PY_LONG_LONG x; } __Pyx_st_longlong;
#endif
static size_t __Pyx_BufFmt_TypeCharToAlignment(char ch, CYTHON_UNUSED int is_complex) {
switch (ch) {
case '?': case 'c': case 'b': case 'B': case 's': case 'p': return 1;
case 'h': case 'H': return sizeof(__Pyx_st_short) - sizeof(short);
case 'i': case 'I': return sizeof(__Pyx_st_int) - sizeof(int);
case 'l': case 'L': return sizeof(__Pyx_st_long) - sizeof(long);
#ifdef HAVE_LONG_LONG
case 'q': case 'Q': return sizeof(__Pyx_st_longlong) - sizeof(PY_LONG_LONG);
#endif
case 'f': return sizeof(__Pyx_st_float) - sizeof(float);
case 'd': return sizeof(__Pyx_st_double) - sizeof(double);
case 'g': return sizeof(__Pyx_st_longdouble) - sizeof(long double);
case 'P': case 'O': return sizeof(__Pyx_st_void_p) - sizeof(void*);
default:
__Pyx_BufFmt_RaiseUnexpectedChar(ch);
return 0;
}
}
/* These are for computing the padding at the end of the struct to align
on the first member of the struct. This will probably the same as above,
but we don't have any guarantees.
*/
typedef struct { short x; char c; } __Pyx_pad_short;
typedef struct { int x; char c; } __Pyx_pad_int;
typedef struct { long x; char c; } __Pyx_pad_long;
typedef struct { float x; char c; } __Pyx_pad_float;
typedef struct { double x; char c; } __Pyx_pad_double;
typedef struct { long double x; char c; } __Pyx_pad_longdouble;
typedef struct { void *x; char c; } __Pyx_pad_void_p;
#ifdef HAVE_LONG_LONG
typedef struct { PY_LONG_LONG x; char c; } __Pyx_pad_longlong;
#endif
static size_t __Pyx_BufFmt_TypeCharToPadding(char ch, CYTHON_UNUSED int is_complex) {
switch (ch) {
case '?': case 'c': case 'b': case 'B': case 's': case 'p': return 1;
case 'h': case 'H': return sizeof(__Pyx_pad_short) - sizeof(short);
case 'i': case 'I': return sizeof(__Pyx_pad_int) - sizeof(int);
case 'l': case 'L': return sizeof(__Pyx_pad_long) - sizeof(long);
#ifdef HAVE_LONG_LONG
case 'q': case 'Q': return sizeof(__Pyx_pad_longlong) - sizeof(PY_LONG_LONG);
#endif
case 'f': return sizeof(__Pyx_pad_float) - sizeof(float);
case 'd': return sizeof(__Pyx_pad_double) - sizeof(double);
case 'g': return sizeof(__Pyx_pad_longdouble) - sizeof(long double);
case 'P': case 'O': return sizeof(__Pyx_pad_void_p) - sizeof(void*);
default:
__Pyx_BufFmt_RaiseUnexpectedChar(ch);
return 0;
}
}
static char __Pyx_BufFmt_TypeCharToGroup(char ch, int is_complex) {
switch (ch) {
case 'c':
return 'H';
case 'b': case 'h': case 'i':
case 'l': case 'q': case 's': case 'p':
return 'I';
case 'B': case 'H': case 'I': case 'L': case 'Q':
return 'U';
case 'f': case 'd': case 'g':
return (is_complex ? 'C' : 'R');
case 'O':
return 'O';
case 'P':
return 'P';
default: {
__Pyx_BufFmt_RaiseUnexpectedChar(ch);
return 0;
}
}
}
static void __Pyx_BufFmt_RaiseExpected(__Pyx_BufFmt_Context* ctx) {
if (ctx->head == NULL || ctx->head->field == &ctx->root) {
const char* expected;
const char* quote;
if (ctx->head == NULL) {
expected = "end";
quote = "";
} else {
expected = ctx->head->field->type->name;
quote = "'";
}
PyErr_Format(PyExc_ValueError,
"Buffer dtype mismatch, expected %s%s%s but got %s",
quote, expected, quote,
__Pyx_BufFmt_DescribeTypeChar(ctx->enc_type, ctx->is_complex));
} else {
__Pyx_StructField* field = ctx->head->field;
__Pyx_StructField* parent = (ctx->head - 1)->field;
PyErr_Format(PyExc_ValueError,
"Buffer dtype mismatch, expected '%s' but got %s in '%s.%s'",
field->type->name, __Pyx_BufFmt_DescribeTypeChar(ctx->enc_type, ctx->is_complex),
parent->type->name, field->name);
}
}
static int __Pyx_BufFmt_ProcessTypeChunk(__Pyx_BufFmt_Context* ctx) {
char group;
size_t size, offset, arraysize = 1;
if (ctx->enc_type == 0) return 0;
if (ctx->head->field->type->arraysize[0]) {
int i, ndim = 0;
if (ctx->enc_type == 's' || ctx->enc_type == 'p') {
ctx->is_valid_array = ctx->head->field->type->ndim == 1;
ndim = 1;
if (ctx->enc_count != ctx->head->field->type->arraysize[0]) {
PyErr_Format(PyExc_ValueError,
"Expected a dimension of size %zu, got %zu",
ctx->head->field->type->arraysize[0], ctx->enc_count);
return -1;
}
}
if (!ctx->is_valid_array) {
PyErr_Format(PyExc_ValueError, "Expected %d dimensions, got %d",
ctx->head->field->type->ndim, ndim);
return -1;
}
for (i = 0; i < ctx->head->field->type->ndim; i++) {
arraysize *= ctx->head->field->type->arraysize[i];
}
ctx->is_valid_array = 0;
ctx->enc_count = 1;
}
group = __Pyx_BufFmt_TypeCharToGroup(ctx->enc_type, ctx->is_complex);
do {
__Pyx_StructField* field = ctx->head->field;
__Pyx_TypeInfo* type = field->type;
if (ctx->enc_packmode == '@' || ctx->enc_packmode == '^') {
size = __Pyx_BufFmt_TypeCharToNativeSize(ctx->enc_type, ctx->is_complex);
} else {
size = __Pyx_BufFmt_TypeCharToStandardSize(ctx->enc_type, ctx->is_complex);
}
if (ctx->enc_packmode == '@') {
size_t align_at = __Pyx_BufFmt_TypeCharToAlignment(ctx->enc_type, ctx->is_complex);
size_t align_mod_offset;
if (align_at == 0) return -1;
align_mod_offset = ctx->fmt_offset % align_at;
if (align_mod_offset > 0) ctx->fmt_offset += align_at - align_mod_offset;
if (ctx->struct_alignment == 0)
ctx->struct_alignment = __Pyx_BufFmt_TypeCharToPadding(ctx->enc_type,
ctx->is_complex);
}
if (type->size != size || type->typegroup != group) {
if (type->typegroup == 'C' && type->fields != NULL) {
size_t parent_offset = ctx->head->parent_offset + field->offset;
++ctx->head;
ctx->head->field = type->fields;
ctx->head->parent_offset = parent_offset;
continue;
}
if ((type->typegroup == 'H' || group == 'H') && type->size == size) {
} else {
__Pyx_BufFmt_RaiseExpected(ctx);
return -1;
}
}
offset = ctx->head->parent_offset + field->offset;
if (ctx->fmt_offset != offset) {
PyErr_Format(PyExc_ValueError,
"Buffer dtype mismatch; next field is at offset %" CYTHON_FORMAT_SSIZE_T "d but %" CYTHON_FORMAT_SSIZE_T "d expected",
(Py_ssize_t)ctx->fmt_offset, (Py_ssize_t)offset);
return -1;
}
ctx->fmt_offset += size;
if (arraysize)
ctx->fmt_offset += (arraysize - 1) * size;
--ctx->enc_count;
while (1) {
if (field == &ctx->root) {
ctx->head = NULL;
if (ctx->enc_count != 0) {
__Pyx_BufFmt_RaiseExpected(ctx);
return -1;
}
break;
}
ctx->head->field = ++field;
if (field->type == NULL) {
--ctx->head;
field = ctx->head->field;
continue;
} else if (field->type->typegroup == 'S') {
size_t parent_offset = ctx->head->parent_offset + field->offset;
if (field->type->fields->type == NULL) continue;
field = field->type->fields;
++ctx->head;
ctx->head->field = field;
ctx->head->parent_offset = parent_offset;
break;
} else {
break;
}
}
} while (ctx->enc_count);
ctx->enc_type = 0;
ctx->is_complex = 0;
return 0;
}
static PyObject *
__pyx_buffmt_parse_array(__Pyx_BufFmt_Context* ctx, const char** tsp)
{
const char *ts = *tsp;
int i = 0, number;
int ndim = ctx->head->field->type->ndim;
;
++ts;
if (ctx->new_count != 1) {
PyErr_SetString(PyExc_ValueError,
"Cannot handle repeated arrays in format string");
return NULL;
}
if (__Pyx_BufFmt_ProcessTypeChunk(ctx) == -1) return NULL;
while (*ts && *ts != ')') {
switch (*ts) {
case ' ': case '\f': case '\r': case '\n': case '\t': case '\v': continue;
default: break;
}
number = __Pyx_BufFmt_ExpectNumber(&ts);
if (number == -1) return NULL;
if (i < ndim && (size_t) number != ctx->head->field->type->arraysize[i])
return PyErr_Format(PyExc_ValueError,
"Expected a dimension of size %zu, got %d",
ctx->head->field->type->arraysize[i], number);
if (*ts != ',' && *ts != ')')
return PyErr_Format(PyExc_ValueError,
"Expected a comma in format string, got '%c'", *ts);
if (*ts == ',') ts++;
i++;
}
if (i != ndim)
return PyErr_Format(PyExc_ValueError, "Expected %d dimension(s), got %d",
ctx->head->field->type->ndim, i);
if (!*ts) {
PyErr_SetString(PyExc_ValueError,
"Unexpected end of format string, expected ')'");
return NULL;
}
ctx->is_valid_array = 1;
ctx->new_count = 1;
*tsp = ++ts;
return Py_None;
}
static const char* __Pyx_BufFmt_CheckString(__Pyx_BufFmt_Context* ctx, const char* ts) {
int got_Z = 0;
while (1) {
switch(*ts) {
case 0:
if (ctx->enc_type != 0 && ctx->head == NULL) {
__Pyx_BufFmt_RaiseExpected(ctx);
return NULL;
}
if (__Pyx_BufFmt_ProcessTypeChunk(ctx) == -1) return NULL;
if (ctx->head != NULL) {
__Pyx_BufFmt_RaiseExpected(ctx);
return NULL;
}
return ts;
case ' ':
case '\r':
case '\n':
++ts;
break;
case '<':
if (!__Pyx_Is_Little_Endian()) {
PyErr_SetString(PyExc_ValueError, "Little-endian buffer not supported on big-endian compiler");
return NULL;
}
ctx->new_packmode = '=';
++ts;
break;
case '>':
case '!':
if (__Pyx_Is_Little_Endian()) {
PyErr_SetString(PyExc_ValueError, "Big-endian buffer not supported on little-endian compiler");
return NULL;
}
ctx->new_packmode = '=';
++ts;
break;
case '=':
case '@':
case '^':
ctx->new_packmode = *ts++;
break;
case 'T':
{
const char* ts_after_sub;
size_t i, struct_count = ctx->new_count;
size_t struct_alignment = ctx->struct_alignment;
ctx->new_count = 1;
++ts;
if (*ts != '{') {
PyErr_SetString(PyExc_ValueError, "Buffer acquisition: Expected '{' after 'T'");
return NULL;
}
if (__Pyx_BufFmt_ProcessTypeChunk(ctx) == -1) return NULL;
ctx->enc_type = 0;
ctx->enc_count = 0;
ctx->struct_alignment = 0;
++ts;
ts_after_sub = ts;
for (i = 0; i != struct_count; ++i) {
ts_after_sub = __Pyx_BufFmt_CheckString(ctx, ts);
if (!ts_after_sub) return NULL;
}
ts = ts_after_sub;
if (struct_alignment) ctx->struct_alignment = struct_alignment;
}
break;
case '}':
{
size_t alignment = ctx->struct_alignment;
++ts;
if (__Pyx_BufFmt_ProcessTypeChunk(ctx) == -1) return NULL;
ctx->enc_type = 0;
if (alignment && ctx->fmt_offset % alignment) {
ctx->fmt_offset += alignment - (ctx->fmt_offset % alignment);
}
}
return ts;
case 'x':
if (__Pyx_BufFmt_ProcessTypeChunk(ctx) == -1) return NULL;
ctx->fmt_offset += ctx->new_count;
ctx->new_count = 1;
ctx->enc_count = 0;
ctx->enc_type = 0;
ctx->enc_packmode = ctx->new_packmode;
++ts;
break;
case 'Z':
got_Z = 1;
++ts;
if (*ts != 'f' && *ts != 'd' && *ts != 'g') {
__Pyx_BufFmt_RaiseUnexpectedChar('Z');
return NULL;
}
CYTHON_FALLTHROUGH;
case 'c': case 'b': case 'B': case 'h': case 'H': case 'i': case 'I':
case 'l': case 'L': case 'q': case 'Q':
case 'f': case 'd': case 'g':
case 'O': case 'p':
if (ctx->enc_type == *ts && got_Z == ctx->is_complex &&
ctx->enc_packmode == ctx->new_packmode) {
ctx->enc_count += ctx->new_count;
ctx->new_count = 1;
got_Z = 0;
++ts;
break;
}
CYTHON_FALLTHROUGH;
case 's':
if (__Pyx_BufFmt_ProcessTypeChunk(ctx) == -1) return NULL;
ctx->enc_count = ctx->new_count;
ctx->enc_packmode = ctx->new_packmode;
ctx->enc_type = *ts;
ctx->is_complex = got_Z;
++ts;
ctx->new_count = 1;
got_Z = 0;
break;
case ':':
++ts;
while(*ts != ':') ++ts;
++ts;
break;
case '(':
if (!__pyx_buffmt_parse_array(ctx, &ts)) return NULL;
break;
default:
{
int number = __Pyx_BufFmt_ExpectNumber(&ts);
if (number == -1) return NULL;
ctx->new_count = (size_t)number;
}
}
}
}
/* TypeInfoCompare */
static int
__pyx_typeinfo_cmp(__Pyx_TypeInfo *a, __Pyx_TypeInfo *b)
{
int i;
if (!a || !b)
return 0;
if (a == b)
return 1;
if (a->size != b->size || a->typegroup != b->typegroup ||
a->is_unsigned != b->is_unsigned || a->ndim != b->ndim) {
if (a->typegroup == 'H' || b->typegroup == 'H') {
return a->size == b->size;
} else {
return 0;
}
}
if (a->ndim) {
for (i = 0; i < a->ndim; i++)
if (a->arraysize[i] != b->arraysize[i])
return 0;
}
if (a->typegroup == 'S') {
if (a->flags != b->flags)
return 0;
if (a->fields || b->fields) {
if (!(a->fields && b->fields))
return 0;
for (i = 0; a->fields[i].type && b->fields[i].type; i++) {
__Pyx_StructField *field_a = a->fields + i;
__Pyx_StructField *field_b = b->fields + i;
if (field_a->offset != field_b->offset ||
!__pyx_typeinfo_cmp(field_a->type, field_b->type))
return 0;
}
return !a->fields[i].type && !b->fields[i].type;
}
}
return 1;
}
/* MemviewSliceValidateAndInit */
static int
__pyx_check_strides(Py_buffer *buf, int dim, int ndim, int spec)
{
if (buf->shape[dim] <= 1)
return 1;
if (buf->strides) {
if (spec & __Pyx_MEMVIEW_CONTIG) {
if (spec & (__Pyx_MEMVIEW_PTR|__Pyx_MEMVIEW_FULL)) {
if (buf->strides[dim] != sizeof(void *)) {
PyErr_Format(PyExc_ValueError,
"Buffer is not indirectly contiguous "
"in dimension %d.", dim);
goto fail;
}
} else if (buf->strides[dim] != buf->itemsize) {
PyErr_SetString(PyExc_ValueError,
"Buffer and memoryview are not contiguous "
"in the same dimension.");
goto fail;
}
}
if (spec & __Pyx_MEMVIEW_FOLLOW) {
Py_ssize_t stride = buf->strides[dim];
if (stride < 0)
stride = -stride;
if (stride < buf->itemsize) {
PyErr_SetString(PyExc_ValueError,
"Buffer and memoryview are not contiguous "
"in the same dimension.");
goto fail;
}
}
} else {
if (spec & __Pyx_MEMVIEW_CONTIG && dim != ndim - 1) {
PyErr_Format(PyExc_ValueError,
"C-contiguous buffer is not contiguous in "
"dimension %d", dim);
goto fail;
} else if (spec & (__Pyx_MEMVIEW_PTR)) {
PyErr_Format(PyExc_ValueError,
"C-contiguous buffer is not indirect in "
"dimension %d", dim);
goto fail;
} else if (buf->suboffsets) {
PyErr_SetString(PyExc_ValueError,
"Buffer exposes suboffsets but no strides");
goto fail;
}
}
return 1;
fail:
return 0;
}
static int
__pyx_check_suboffsets(Py_buffer *buf, int dim, CYTHON_UNUSED int ndim, int spec)
{
if (spec & __Pyx_MEMVIEW_DIRECT) {
if (buf->suboffsets && buf->suboffsets[dim] >= 0) {
PyErr_Format(PyExc_ValueError,
"Buffer not compatible with direct access "
"in dimension %d.", dim);
goto fail;
}
}
if (spec & __Pyx_MEMVIEW_PTR) {
if (!buf->suboffsets || (buf->suboffsets && buf->suboffsets[dim] < 0)) {
PyErr_Format(PyExc_ValueError,
"Buffer is not indirectly accessible "
"in dimension %d.", dim);
goto fail;
}
}
return 1;
fail:
return 0;
}
static int
__pyx_verify_contig(Py_buffer *buf, int ndim, int c_or_f_flag)
{
int i;
if (c_or_f_flag & __Pyx_IS_F_CONTIG) {
Py_ssize_t stride = 1;
for (i = 0; i < ndim; i++) {
if (stride * buf->itemsize != buf->strides[i] &&
buf->shape[i] > 1)
{
PyErr_SetString(PyExc_ValueError,
"Buffer not fortran contiguous.");
goto fail;
}
stride = stride * buf->shape[i];
}
} else if (c_or_f_flag & __Pyx_IS_C_CONTIG) {
Py_ssize_t stride = 1;
for (i = ndim - 1; i >- 1; i--) {
if (stride * buf->itemsize != buf->strides[i] &&
buf->shape[i] > 1) {
PyErr_SetString(PyExc_ValueError,
"Buffer not C contiguous.");
goto fail;
}
stride = stride * buf->shape[i];
}
}
return 1;
fail:
return 0;
}
static int __Pyx_ValidateAndInit_memviewslice(
int *axes_specs,
int c_or_f_flag,
int buf_flags,
int ndim,
__Pyx_TypeInfo *dtype,
__Pyx_BufFmt_StackElem stack[],
__Pyx_memviewslice *memviewslice,
PyObject *original_obj)
{
struct __pyx_memoryview_obj *memview, *new_memview;
__Pyx_RefNannyDeclarations
Py_buffer *buf;
int i, spec = 0, retval = -1;
__Pyx_BufFmt_Context ctx;
int from_memoryview = __pyx_memoryview_check(original_obj);
__Pyx_RefNannySetupContext("ValidateAndInit_memviewslice", 0);
if (from_memoryview && __pyx_typeinfo_cmp(dtype, ((struct __pyx_memoryview_obj *)
original_obj)->typeinfo)) {
memview = (struct __pyx_memoryview_obj *) original_obj;
new_memview = NULL;
} else {
memview = (struct __pyx_memoryview_obj *) __pyx_memoryview_new(
original_obj, buf_flags, 0, dtype);
new_memview = memview;
if (unlikely(!memview))
goto fail;
}
buf = &memview->view;
if (buf->ndim != ndim) {
PyErr_Format(PyExc_ValueError,
"Buffer has wrong number of dimensions (expected %d, got %d)",
ndim, buf->ndim);
goto fail;
}
if (new_memview) {
__Pyx_BufFmt_Init(&ctx, stack, dtype);
if (!__Pyx_BufFmt_CheckString(&ctx, buf->format)) goto fail;
}
if ((unsigned) buf->itemsize != dtype->size) {
PyErr_Format(PyExc_ValueError,
"Item size of buffer (%" CYTHON_FORMAT_SSIZE_T "u byte%s) "
"does not match size of '%s' (%" CYTHON_FORMAT_SSIZE_T "u byte%s)",
buf->itemsize,
(buf->itemsize > 1) ? "s" : "",
dtype->name,
dtype->size,
(dtype->size > 1) ? "s" : "");
goto fail;
}
for (i = 0; i < ndim; i++) {
spec = axes_specs[i];
if (!__pyx_check_strides(buf, i, ndim, spec))
goto fail;
if (!__pyx_check_suboffsets(buf, i, ndim, spec))
goto fail;
}
if (buf->strides && !__pyx_verify_contig(buf, ndim, c_or_f_flag))
goto fail;
if (unlikely(__Pyx_init_memviewslice(memview, ndim, memviewslice,
new_memview != NULL) == -1)) {
goto fail;
}
retval = 0;
goto no_fail;
fail:
Py_XDECREF(new_memview);
retval = -1;
no_fail:
__Pyx_RefNannyFinishContext();
return retval;
}
/* ObjectToMemviewSlice */
static CYTHON_INLINE __Pyx_memviewslice __Pyx_PyObject_to_MemoryviewSlice_dsds_float(PyObject *obj, int writable_flag) {
__Pyx_memviewslice result = { 0, 0, { 0 }, { 0 }, { 0 } };
__Pyx_BufFmt_StackElem stack[1];
int axes_specs[] = { (__Pyx_MEMVIEW_DIRECT | __Pyx_MEMVIEW_STRIDED), (__Pyx_MEMVIEW_DIRECT | __Pyx_MEMVIEW_STRIDED) };
int retcode;
if (obj == Py_None) {
result.memview = (struct __pyx_memoryview_obj *) Py_None;
return result;
}
retcode = __Pyx_ValidateAndInit_memviewslice(axes_specs, 0,
PyBUF_RECORDS_RO | writable_flag, 2,
&__Pyx_TypeInfo_float, stack,
&result, obj);
if (unlikely(retcode == -1))
goto __pyx_fail;
return result;
__pyx_fail:
result.memview = NULL;
result.data = NULL;
return result;
}
/* ObjectToMemviewSlice */
static CYTHON_INLINE __Pyx_memviewslice __Pyx_PyObject_to_MemoryviewSlice_ds_float(PyObject *obj, int writable_flag) {
__Pyx_memviewslice result = { 0, 0, { 0 }, { 0 }, { 0 } };
__Pyx_BufFmt_StackElem stack[1];
int axes_specs[] = { (__Pyx_MEMVIEW_DIRECT | __Pyx_MEMVIEW_STRIDED) };
int retcode;
if (obj == Py_None) {
result.memview = (struct __pyx_memoryview_obj *) Py_None;
return result;
}
retcode = __Pyx_ValidateAndInit_memviewslice(axes_specs, 0,
PyBUF_RECORDS_RO | writable_flag, 1,
&__Pyx_TypeInfo_float, stack,
&result, obj);
if (unlikely(retcode == -1))
goto __pyx_fail;
return result;
__pyx_fail:
result.memview = NULL;
result.data = NULL;
return result;
}
/* CheckBinaryVersion */
static int __Pyx_check_binary_version(void) {
char ctversion[4], rtversion[4];
PyOS_snprintf(ctversion, 4, "%d.%d", PY_MAJOR_VERSION, PY_MINOR_VERSION);
PyOS_snprintf(rtversion, 4, "%s", Py_GetVersion());
if (ctversion[0] != rtversion[0] || ctversion[2] != rtversion[2]) {
char message[200];
PyOS_snprintf(message, sizeof(message),
"compiletime version %s of module '%.100s' "
"does not match runtime version %s",
ctversion, __Pyx_MODULE_NAME, rtversion);
return PyErr_WarnEx(NULL, message, 1);
}
return 0;
}
/* InitStrings */
static int __Pyx_InitStrings(__Pyx_StringTabEntry *t) {
while (t->p) {
#if PY_MAJOR_VERSION < 3
if (t->is_unicode) {
*t->p = PyUnicode_DecodeUTF8(t->s, t->n - 1, NULL);
} else if (t->intern) {
*t->p = PyString_InternFromString(t->s);
} else {
*t->p = PyString_FromStringAndSize(t->s, t->n - 1);
}
#else
if (t->is_unicode | t->is_str) {
if (t->intern) {
*t->p = PyUnicode_InternFromString(t->s);
} else if (t->encoding) {
*t->p = PyUnicode_Decode(t->s, t->n - 1, t->encoding, NULL);
} else {
*t->p = PyUnicode_FromStringAndSize(t->s, t->n - 1);
}
} else {
*t->p = PyBytes_FromStringAndSize(t->s, t->n - 1);
}
#endif
if (!*t->p)
return -1;
if (PyObject_Hash(*t->p) == -1)
return -1;
++t;
}
return 0;
}
static CYTHON_INLINE PyObject* __Pyx_PyUnicode_FromString(const char* c_str) {
return __Pyx_PyUnicode_FromStringAndSize(c_str, (Py_ssize_t)strlen(c_str));
}
static CYTHON_INLINE const char* __Pyx_PyObject_AsString(PyObject* o) {
Py_ssize_t ignore;
return __Pyx_PyObject_AsStringAndSize(o, &ignore);
}
#if __PYX_DEFAULT_STRING_ENCODING_IS_ASCII || __PYX_DEFAULT_STRING_ENCODING_IS_DEFAULT
#if !CYTHON_PEP393_ENABLED
static const char* __Pyx_PyUnicode_AsStringAndSize(PyObject* o, Py_ssize_t *length) {
char* defenc_c;
PyObject* defenc = _PyUnicode_AsDefaultEncodedString(o, NULL);
if (!defenc) return NULL;
defenc_c = PyBytes_AS_STRING(defenc);
#if __PYX_DEFAULT_STRING_ENCODING_IS_ASCII
{
char* end = defenc_c + PyBytes_GET_SIZE(defenc);
char* c;
for (c = defenc_c; c < end; c++) {
if ((unsigned char) (*c) >= 128) {
PyUnicode_AsASCIIString(o);
return NULL;
}
}
}
#endif
*length = PyBytes_GET_SIZE(defenc);
return defenc_c;
}
#else
static CYTHON_INLINE const char* __Pyx_PyUnicode_AsStringAndSize(PyObject* o, Py_ssize_t *length) {
if (unlikely(__Pyx_PyUnicode_READY(o) == -1)) return NULL;
#if __PYX_DEFAULT_STRING_ENCODING_IS_ASCII
if (likely(PyUnicode_IS_ASCII(o))) {
*length = PyUnicode_GET_LENGTH(o);
return PyUnicode_AsUTF8(o);
} else {
PyUnicode_AsASCIIString(o);
return NULL;
}
#else
return PyUnicode_AsUTF8AndSize(o, length);
#endif
}
#endif
#endif
static CYTHON_INLINE const char* __Pyx_PyObject_AsStringAndSize(PyObject* o, Py_ssize_t *length) {
#if __PYX_DEFAULT_STRING_ENCODING_IS_ASCII || __PYX_DEFAULT_STRING_ENCODING_IS_DEFAULT
if (
#if PY_MAJOR_VERSION < 3 && __PYX_DEFAULT_STRING_ENCODING_IS_ASCII
__Pyx_sys_getdefaultencoding_not_ascii &&
#endif
PyUnicode_Check(o)) {
return __Pyx_PyUnicode_AsStringAndSize(o, length);
} else
#endif
#if (!CYTHON_COMPILING_IN_PYPY) || (defined(PyByteArray_AS_STRING) && defined(PyByteArray_GET_SIZE))
if (PyByteArray_Check(o)) {
*length = PyByteArray_GET_SIZE(o);
return PyByteArray_AS_STRING(o);
} else
#endif
{
char* result;
int r = PyBytes_AsStringAndSize(o, &result, length);
if (unlikely(r < 0)) {
return NULL;
} else {
return result;
}
}
}
static CYTHON_INLINE int __Pyx_PyObject_IsTrue(PyObject* x) {
int is_true = x == Py_True;
if (is_true | (x == Py_False) | (x == Py_None)) return is_true;
else return PyObject_IsTrue(x);
}
static PyObject* __Pyx_PyNumber_IntOrLongWrongResultType(PyObject* result, const char* type_name) {
#if PY_MAJOR_VERSION >= 3
if (PyLong_Check(result)) {
if (PyErr_WarnFormat(PyExc_DeprecationWarning, 1,
"__int__ returned non-int (type %.200s). "
"The ability to return an instance of a strict subclass of int "
"is deprecated, and may be removed in a future version of Python.",
Py_TYPE(result)->tp_name)) {
Py_DECREF(result);
return NULL;
}
return result;
}
#endif
PyErr_Format(PyExc_TypeError,
"__%.4s__ returned non-%.4s (type %.200s)",
type_name, type_name, Py_TYPE(result)->tp_name);
Py_DECREF(result);
return NULL;
}
static CYTHON_INLINE PyObject* __Pyx_PyNumber_IntOrLong(PyObject* x) {
#if CYTHON_USE_TYPE_SLOTS
PyNumberMethods *m;
#endif
const char *name = NULL;
PyObject *res = NULL;
#if PY_MAJOR_VERSION < 3
if (likely(PyInt_Check(x) || PyLong_Check(x)))
#else
if (likely(PyLong_Check(x)))
#endif
return __Pyx_NewRef(x);
#if CYTHON_USE_TYPE_SLOTS
m = Py_TYPE(x)->tp_as_number;
#if PY_MAJOR_VERSION < 3
if (m && m->nb_int) {
name = "int";
res = m->nb_int(x);
}
else if (m && m->nb_long) {
name = "long";
res = m->nb_long(x);
}
#else
if (likely(m && m->nb_int)) {
name = "int";
res = m->nb_int(x);
}
#endif
#else
if (!PyBytes_CheckExact(x) && !PyUnicode_CheckExact(x)) {
res = PyNumber_Int(x);
}
#endif
if (likely(res)) {
#if PY_MAJOR_VERSION < 3
if (unlikely(!PyInt_Check(res) && !PyLong_Check(res))) {
#else
if (unlikely(!PyLong_CheckExact(res))) {
#endif
return __Pyx_PyNumber_IntOrLongWrongResultType(res, name);
}
}
else if (!PyErr_Occurred()) {
PyErr_SetString(PyExc_TypeError,
"an integer is required");
}
return res;
}
static CYTHON_INLINE Py_ssize_t __Pyx_PyIndex_AsSsize_t(PyObject* b) {
Py_ssize_t ival;
PyObject *x;
#if PY_MAJOR_VERSION < 3
if (likely(PyInt_CheckExact(b))) {
if (sizeof(Py_ssize_t) >= sizeof(long))
return PyInt_AS_LONG(b);
else
return PyInt_AsSsize_t(x);
}
#endif
if (likely(PyLong_CheckExact(b))) {
#if CYTHON_USE_PYLONG_INTERNALS
const digit* digits = ((PyLongObject*)b)->ob_digit;
const Py_ssize_t size = Py_SIZE(b);
if (likely(__Pyx_sst_abs(size) <= 1)) {
ival = likely(size) ? digits[0] : 0;
if (size == -1) ival = -ival;
return ival;
} else {
switch (size) {
case 2:
if (8 * sizeof(Py_ssize_t) > 2 * PyLong_SHIFT) {
return (Py_ssize_t) (((((size_t)digits[1]) << PyLong_SHIFT) | (size_t)digits[0]));
}
break;
case -2:
if (8 * sizeof(Py_ssize_t) > 2 * PyLong_SHIFT) {
return -(Py_ssize_t) (((((size_t)digits[1]) << PyLong_SHIFT) | (size_t)digits[0]));
}
break;
case 3:
if (8 * sizeof(Py_ssize_t) > 3 * PyLong_SHIFT) {
return (Py_ssize_t) (((((((size_t)digits[2]) << PyLong_SHIFT) | (size_t)digits[1]) << PyLong_SHIFT) | (size_t)digits[0]));
}
break;
case -3:
if (8 * sizeof(Py_ssize_t) > 3 * PyLong_SHIFT) {
return -(Py_ssize_t) (((((((size_t)digits[2]) << PyLong_SHIFT) | (size_t)digits[1]) << PyLong_SHIFT) | (size_t)digits[0]));
}
break;
case 4:
if (8 * sizeof(Py_ssize_t) > 4 * PyLong_SHIFT) {
return (Py_ssize_t) (((((((((size_t)digits[3]) << PyLong_SHIFT) | (size_t)digits[2]) << PyLong_SHIFT) | (size_t)digits[1]) << PyLong_SHIFT) | (size_t)digits[0]));
}
break;
case -4:
if (8 * sizeof(Py_ssize_t) > 4 * PyLong_SHIFT) {
return -(Py_ssize_t) (((((((((size_t)digits[3]) << PyLong_SHIFT) | (size_t)digits[2]) << PyLong_SHIFT) | (size_t)digits[1]) << PyLong_SHIFT) | (size_t)digits[0]));
}
break;
}
}
#endif
return PyLong_AsSsize_t(b);
}
x = PyNumber_Index(b);
if (!x) return -1;
ival = PyInt_AsSsize_t(x);
Py_DECREF(x);
return ival;
}
static CYTHON_INLINE PyObject * __Pyx_PyBool_FromLong(long b) {
return b ? __Pyx_NewRef(Py_True) : __Pyx_NewRef(Py_False);
}
static CYTHON_INLINE PyObject * __Pyx_PyInt_FromSize_t(size_t ival) {
return PyInt_FromSize_t(ival);
}
#endif /* Py_PYTHON_H */
|
GB_unaryop__identity_int32_fp32.c | //------------------------------------------------------------------------------
// GB_unaryop: hard-coded functions for each built-in unary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2020, All Rights Reserved.
// http://suitesparse.com See GraphBLAS/Doc/License.txt for license.
//------------------------------------------------------------------------------
// If this file is in the Generated/ folder, do not edit it (auto-generated).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_control.h"
#include "GB_iterator.h"
#include "GB_unaryop__include.h"
// C=unop(A) is defined by the following types and operators:
// op(A) function: GB_unop__identity_int32_fp32
// op(A') function: GB_tran__identity_int32_fp32
// C type: int32_t
// A type: float
// cast: int32_t cij ; GB_CAST_SIGNED(cij,aij,32)
// unaryop: cij = aij
#define GB_ATYPE \
float
#define GB_CTYPE \
int32_t
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
float aij = Ax [pA]
#define GB_CX(p) Cx [p]
// unary operator
#define GB_OP(z, x) \
z = x ;
// casting
#define GB_CASTING(z, aij) \
int32_t z ; GB_CAST_SIGNED(z,aij,32) ;
// cij = op (cast (aij))
#define GB_CAST_OP(pC,pA) \
{ \
/* aij = Ax [pA] */ \
GB_GETA (aij, Ax, pA) ; \
/* Cx [pC] = op (cast (aij)) */ \
GB_CASTING (z, aij) ; \
GB_OP (GB_CX (pC), z) ; \
}
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_IDENTITY || GxB_NO_INT32 || GxB_NO_FP32)
//------------------------------------------------------------------------------
// Cx = op (cast (Ax)): apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_unop__identity_int32_fp32
(
int32_t *Cx, // Cx and Ax may be aliased
float *Ax,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
GB_CAST_OP (p, p) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (cast (A')): transpose, typecast, and apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_tran__identity_int32_fp32
(
GrB_Matrix C,
const GrB_Matrix A,
int64_t *GB_RESTRICT *Rowcounts,
GBI_single_iterator Iter,
const int64_t *GB_RESTRICT A_slice,
int naslice
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#define GB_PHASE_2_OF_2
#include "GB_unaryop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
GB_unop__bnot_uint16_uint16.c | //------------------------------------------------------------------------------
// GB_unop: hard-coded functions for each built-in unary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// If this file is in the Generated/ folder, do not edit it (auto-generated).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_control.h"
#include "GB_atomics.h"
#include "GB_unop__include.h"
// C=unop(A) is defined by the following types and operators:
// op(A) function: GB_unop_apply__bnot_uint16_uint16
// op(A') function: GB_unop_tran__bnot_uint16_uint16
// C type: uint16_t
// A type: uint16_t
// cast: uint16_t cij = aij
// unaryop: cij = ~(aij)
#define GB_ATYPE \
uint16_t
#define GB_CTYPE \
uint16_t
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
uint16_t aij = Ax [pA]
#define GB_CX(p) Cx [p]
// unary operator
#define GB_OP(z, x) \
z = ~(x) ;
// casting
#define GB_CAST(z, aij) \
uint16_t z = aij ;
// cij = op (aij)
#define GB_CAST_OP(pC,pA) \
{ \
/* aij = Ax [pA] */ \
uint16_t aij = Ax [pA] ; \
/* Cx [pC] = op (cast (aij)) */ \
uint16_t z = aij ; \
Cx [pC] = ~(z) ; \
}
// true if operator is the identity op with no typecasting
#define GB_OP_IS_IDENTITY_WITH_NO_TYPECAST \
0
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_BNOT || GxB_NO_UINT16)
//------------------------------------------------------------------------------
// Cx = op (cast (Ax)): apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_unop_apply__bnot_uint16_uint16
(
uint16_t *Cx, // Cx and Ax may be aliased
const uint16_t *Ax,
const int8_t *GB_RESTRICT Ab, // A->b if A is bitmap
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
if (Ab == NULL)
{
#if ( GB_OP_IS_IDENTITY_WITH_NO_TYPECAST )
GB_memcpy (Cx, Ax, anz * sizeof (uint16_t), nthreads) ;
#else
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
uint16_t aij = Ax [p] ;
uint16_t z = aij ;
Cx [p] = ~(z) ;
}
#endif
}
else
{
// bitmap case, no transpose; A->b already memcpy'd into C->b
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!Ab [p]) continue ;
uint16_t aij = Ax [p] ;
uint16_t z = aij ;
Cx [p] = ~(z) ;
}
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (cast (A')): transpose, typecast, and apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_unop_tran__bnot_uint16_uint16
(
GrB_Matrix C,
const GrB_Matrix A,
int64_t *GB_RESTRICT *Workspaces,
const int64_t *GB_RESTRICT A_slice,
int nworkspaces,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
Sema.h | //===--- Sema.h - Semantic Analysis & AST Building --------------*- C++ -*-===//
//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
//
// This file defines the Sema class, which performs semantic analysis and
// builds ASTs.
//
//===----------------------------------------------------------------------===//
#ifndef LLVM_CLANG_SEMA_SEMA_H
#define LLVM_CLANG_SEMA_SEMA_H
#include "clang/AST/Attr.h"
#include "clang/AST/Availability.h"
#include "clang/AST/ComparisonCategories.h"
#include "clang/AST/DeclTemplate.h"
#include "clang/AST/DeclarationName.h"
#include "clang/AST/Expr.h"
#include "clang/AST/ExprCXX.h"
#include "clang/AST/ExprObjC.h"
#include "clang/AST/ExternalASTSource.h"
#include "clang/AST/LocInfoType.h"
#include "clang/AST/MangleNumberingContext.h"
#include "clang/AST/NSAPI.h"
#include "clang/AST/PrettyPrinter.h"
#include "clang/AST/StmtCXX.h"
#include "clang/AST/TypeLoc.h"
#include "clang/AST/TypeOrdering.h"
#include "clang/Basic/ExpressionTraits.h"
#include "clang/Basic/Module.h"
#include "clang/Basic/OpenMPKinds.h"
#include "clang/Basic/PragmaKinds.h"
#include "clang/Basic/Specifiers.h"
#include "clang/Basic/TemplateKinds.h"
#include "clang/Basic/TypeTraits.h"
#include "clang/Sema/AnalysisBasedWarnings.h"
#include "clang/Sema/CleanupInfo.h"
#include "clang/Sema/DeclSpec.h"
#include "clang/Sema/ExternalSemaSource.h"
#include "clang/Sema/IdentifierResolver.h"
#include "clang/Sema/ObjCMethodList.h"
#include "clang/Sema/Ownership.h"
#include "clang/Sema/Scope.h"
#include "clang/Sema/TypoCorrection.h"
#include "clang/Sema/Weak.h"
#include "llvm/ADT/ArrayRef.h"
#include "llvm/ADT/Optional.h"
#include "llvm/ADT/SetVector.h"
#include "llvm/ADT/SmallBitVector.h"
#include "llvm/ADT/SmallPtrSet.h"
#include "llvm/ADT/SmallVector.h"
#include "llvm/ADT/TinyPtrVector.h"
#include <deque>
#include <memory>
#include <string>
#include <vector>
namespace llvm {
class APSInt;
template <typename ValueT> struct DenseMapInfo;
template <typename ValueT, typename ValueInfoT> class DenseSet;
class SmallBitVector;
struct InlineAsmIdentifierInfo;
}
namespace clang {
class ADLResult;
class ASTConsumer;
class ASTContext;
class ASTMutationListener;
class ASTReader;
class ASTWriter;
class ArrayType;
class ParsedAttr;
class BindingDecl;
class BlockDecl;
class CapturedDecl;
class CXXBasePath;
class CXXBasePaths;
class CXXBindTemporaryExpr;
typedef SmallVector<CXXBaseSpecifier*, 4> CXXCastPath;
class CXXConstructorDecl;
class CXXConversionDecl;
class CXXDeleteExpr;
class CXXDestructorDecl;
class CXXFieldCollector;
class CXXMemberCallExpr;
class CXXMethodDecl;
class CXXScopeSpec;
class CXXTemporary;
class CXXTryStmt;
class CallExpr;
class ClassTemplateDecl;
class ClassTemplatePartialSpecializationDecl;
class ClassTemplateSpecializationDecl;
class VarTemplatePartialSpecializationDecl;
class CodeCompleteConsumer;
class CodeCompletionAllocator;
class CodeCompletionTUInfo;
class CodeCompletionResult;
class CoroutineBodyStmt;
class Decl;
class DeclAccessPair;
class DeclContext;
class DeclRefExpr;
class DeclaratorDecl;
class DeducedTemplateArgument;
class DependentDiagnostic;
class DesignatedInitExpr;
class Designation;
class EnableIfAttr;
class EnumConstantDecl;
class Expr;
class ExtVectorType;
class FormatAttr;
class FriendDecl;
class FunctionDecl;
class FunctionProtoType;
class FunctionTemplateDecl;
class ImplicitConversionSequence;
typedef MutableArrayRef<ImplicitConversionSequence> ConversionSequenceList;
class InitListExpr;
class InitializationKind;
class InitializationSequence;
class InitializedEntity;
class IntegerLiteral;
class LabelStmt;
class LambdaExpr;
class LangOptions;
class LocalInstantiationScope;
class LookupResult;
class MacroInfo;
typedef ArrayRef<std::pair<IdentifierInfo *, SourceLocation>> ModuleIdPath;
class ModuleLoader;
class MultiLevelTemplateArgumentList;
class NamedDecl;
class ObjCCategoryDecl;
class ObjCCategoryImplDecl;
class ObjCCompatibleAliasDecl;
class ObjCContainerDecl;
class ObjCImplDecl;
class ObjCImplementationDecl;
class ObjCInterfaceDecl;
class ObjCIvarDecl;
template <class T> class ObjCList;
class ObjCMessageExpr;
class ObjCMethodDecl;
class ObjCPropertyDecl;
class ObjCProtocolDecl;
class OMPThreadPrivateDecl;
class OMPRequiresDecl;
class OMPDeclareReductionDecl;
class OMPDeclareSimdDecl;
class OMPClause;
struct OverloadCandidate;
class OverloadCandidateSet;
class OverloadExpr;
class ParenListExpr;
class ParmVarDecl;
class Preprocessor;
class PseudoDestructorTypeStorage;
class PseudoObjectExpr;
class QualType;
class StandardConversionSequence;
class Stmt;
class StringLiteral;
class SwitchStmt;
class TemplateArgument;
class TemplateArgumentList;
class TemplateArgumentLoc;
class TemplateDecl;
class TemplateInstantiationCallback;
class TemplateParameterList;
class TemplatePartialOrderingContext;
class TemplateTemplateParmDecl;
class Token;
class TypeAliasDecl;
class TypedefDecl;
class TypedefNameDecl;
class TypeLoc;
class TypoCorrectionConsumer;
class UnqualifiedId;
class UnresolvedLookupExpr;
class UnresolvedMemberExpr;
class UnresolvedSetImpl;
class UnresolvedSetIterator;
class UsingDecl;
class UsingShadowDecl;
class ValueDecl;
class VarDecl;
class VarTemplateSpecializationDecl;
class VisibilityAttr;
class VisibleDeclConsumer;
class IndirectFieldDecl;
struct DeductionFailureInfo;
class TemplateSpecCandidateSet;
namespace sema {
class AccessedEntity;
class BlockScopeInfo;
class Capture;
class CapturedRegionScopeInfo;
class CapturingScopeInfo;
class CompoundScopeInfo;
class DelayedDiagnostic;
class DelayedDiagnosticPool;
class FunctionScopeInfo;
class LambdaScopeInfo;
class PossiblyUnreachableDiag;
class SemaPPCallbacks;
class TemplateDeductionInfo;
}
namespace threadSafety {
class BeforeSet;
void threadSafetyCleanup(BeforeSet* Cache);
}
// FIXME: No way to easily map from TemplateTypeParmTypes to
// TemplateTypeParmDecls, so we have this horrible PointerUnion.
typedef std::pair<llvm::PointerUnion<const TemplateTypeParmType*, NamedDecl*>,
SourceLocation> UnexpandedParameterPack;
/// Describes whether we've seen any nullability information for the given
/// file.
struct FileNullability {
/// The first pointer declarator (of any pointer kind) in the file that does
/// not have a corresponding nullability annotation.
SourceLocation PointerLoc;
/// The end location for the first pointer declarator in the file. Used for
/// placing fix-its.
SourceLocation PointerEndLoc;
/// Which kind of pointer declarator we saw.
uint8_t PointerKind;
/// Whether we saw any type nullability annotations in the given file.
bool SawTypeNullability = false;
};
/// A mapping from file IDs to a record of whether we've seen nullability
/// information in that file.
class FileNullabilityMap {
/// A mapping from file IDs to the nullability information for each file ID.
llvm::DenseMap<FileID, FileNullability> Map;
/// A single-element cache based on the file ID.
struct {
FileID File;
FileNullability Nullability;
} Cache;
public:
FileNullability &operator[](FileID file) {
// Check the single-element cache.
if (file == Cache.File)
return Cache.Nullability;
// It's not in the single-element cache; flush the cache if we have one.
if (!Cache.File.isInvalid()) {
Map[Cache.File] = Cache.Nullability;
}
// Pull this entry into the cache.
Cache.File = file;
Cache.Nullability = Map[file];
return Cache.Nullability;
}
};
/// Sema - This implements semantic analysis and AST building for C.
class Sema {
Sema(const Sema &) = delete;
void operator=(const Sema &) = delete;
///Source of additional semantic information.
ExternalSemaSource *ExternalSource;
///Whether Sema has generated a multiplexer and has to delete it.
bool isMultiplexExternalSource;
static bool mightHaveNonExternalLinkage(const DeclaratorDecl *FD);
bool isVisibleSlow(const NamedDecl *D);
/// Determine whether two declarations should be linked together, given that
/// the old declaration might not be visible and the new declaration might
/// not have external linkage.
bool shouldLinkPossiblyHiddenDecl(const NamedDecl *Old,
const NamedDecl *New) {
if (isVisible(Old))
return true;
// See comment in below overload for why it's safe to compute the linkage
// of the new declaration here.
if (New->isExternallyDeclarable()) {
assert(Old->isExternallyDeclarable() &&
"should not have found a non-externally-declarable previous decl");
return true;
}
return false;
}
bool shouldLinkPossiblyHiddenDecl(LookupResult &Old, const NamedDecl *New);
public:
typedef OpaquePtr<DeclGroupRef> DeclGroupPtrTy;
typedef OpaquePtr<TemplateName> TemplateTy;
typedef OpaquePtr<QualType> TypeTy;
OpenCLOptions OpenCLFeatures;
FPOptions FPFeatures;
const LangOptions &LangOpts;
Preprocessor &PP;
ASTContext &Context;
ASTConsumer &Consumer;
DiagnosticsEngine &Diags;
SourceManager &SourceMgr;
/// Flag indicating whether or not to collect detailed statistics.
bool CollectStats;
/// Code-completion consumer.
CodeCompleteConsumer *CodeCompleter;
/// CurContext - This is the current declaration context of parsing.
DeclContext *CurContext;
/// Generally null except when we temporarily switch decl contexts,
/// like in \see ActOnObjCTemporaryExitContainerContext.
DeclContext *OriginalLexicalContext;
/// VAListTagName - The declaration name corresponding to __va_list_tag.
/// This is used as part of a hack to omit that class from ADL results.
DeclarationName VAListTagName;
bool MSStructPragmaOn; // True when \#pragma ms_struct on
/// Controls member pointer representation format under the MS ABI.
LangOptions::PragmaMSPointersToMembersKind
MSPointerToMemberRepresentationMethod;
/// Stack of active SEH __finally scopes. Can be empty.
SmallVector<Scope*, 2> CurrentSEHFinally;
/// Source location for newly created implicit MSInheritanceAttrs
SourceLocation ImplicitMSInheritanceAttrLoc;
/// pragma clang section kind
enum PragmaClangSectionKind {
PCSK_Invalid = 0,
PCSK_BSS = 1,
PCSK_Data = 2,
PCSK_Rodata = 3,
PCSK_Text = 4
};
enum PragmaClangSectionAction {
PCSA_Set = 0,
PCSA_Clear = 1
};
struct PragmaClangSection {
std::string SectionName;
bool Valid = false;
SourceLocation PragmaLocation;
void Act(SourceLocation PragmaLocation,
PragmaClangSectionAction Action,
StringLiteral* Name);
};
PragmaClangSection PragmaClangBSSSection;
PragmaClangSection PragmaClangDataSection;
PragmaClangSection PragmaClangRodataSection;
PragmaClangSection PragmaClangTextSection;
enum PragmaMsStackAction {
PSK_Reset = 0x0, // #pragma ()
PSK_Set = 0x1, // #pragma (value)
PSK_Push = 0x2, // #pragma (push[, id])
PSK_Pop = 0x4, // #pragma (pop[, id])
PSK_Show = 0x8, // #pragma (show) -- only for "pack"!
PSK_Push_Set = PSK_Push | PSK_Set, // #pragma (push[, id], value)
PSK_Pop_Set = PSK_Pop | PSK_Set, // #pragma (pop[, id], value)
};
template<typename ValueType>
struct PragmaStack {
struct Slot {
llvm::StringRef StackSlotLabel;
ValueType Value;
SourceLocation PragmaLocation;
SourceLocation PragmaPushLocation;
Slot(llvm::StringRef StackSlotLabel, ValueType Value,
SourceLocation PragmaLocation, SourceLocation PragmaPushLocation)
: StackSlotLabel(StackSlotLabel), Value(Value),
PragmaLocation(PragmaLocation),
PragmaPushLocation(PragmaPushLocation) {}
};
void Act(SourceLocation PragmaLocation,
PragmaMsStackAction Action,
llvm::StringRef StackSlotLabel,
ValueType Value);
// MSVC seems to add artificial slots to #pragma stacks on entering a C++
// method body to restore the stacks on exit, so it works like this:
//
// struct S {
// #pragma <name>(push, InternalPragmaSlot, <current_pragma_value>)
// void Method {}
// #pragma <name>(pop, InternalPragmaSlot)
// };
//
// It works even with #pragma vtordisp, although MSVC doesn't support
// #pragma vtordisp(push [, id], n)
// syntax.
//
// Push / pop a named sentinel slot.
void SentinelAction(PragmaMsStackAction Action, StringRef Label) {
assert((Action == PSK_Push || Action == PSK_Pop) &&
"Can only push / pop #pragma stack sentinels!");
Act(CurrentPragmaLocation, Action, Label, CurrentValue);
}
// Constructors.
explicit PragmaStack(const ValueType &Default)
: DefaultValue(Default), CurrentValue(Default) {}
bool hasValue() const { return CurrentValue != DefaultValue; }
SmallVector<Slot, 2> Stack;
ValueType DefaultValue; // Value used for PSK_Reset action.
ValueType CurrentValue;
SourceLocation CurrentPragmaLocation;
};
// FIXME: We should serialize / deserialize these if they occur in a PCH (but
// we shouldn't do so if they're in a module).
/// Whether to insert vtordisps prior to virtual bases in the Microsoft
/// C++ ABI. Possible values are 0, 1, and 2, which mean:
///
/// 0: Suppress all vtordisps
/// 1: Insert vtordisps in the presence of vbase overrides and non-trivial
/// structors
/// 2: Always insert vtordisps to support RTTI on partially constructed
/// objects
PragmaStack<MSVtorDispAttr::Mode> VtorDispStack;
// #pragma pack.
// Sentinel to represent when the stack is set to mac68k alignment.
static const unsigned kMac68kAlignmentSentinel = ~0U;
PragmaStack<unsigned> PackStack;
// The current #pragma pack values and locations at each #include.
struct PackIncludeState {
unsigned CurrentValue;
SourceLocation CurrentPragmaLocation;
bool HasNonDefaultValue, ShouldWarnOnInclude;
};
SmallVector<PackIncludeState, 8> PackIncludeStack;
// Segment #pragmas.
PragmaStack<StringLiteral *> DataSegStack;
PragmaStack<StringLiteral *> BSSSegStack;
PragmaStack<StringLiteral *> ConstSegStack;
PragmaStack<StringLiteral *> CodeSegStack;
// Address space #pragmas.
PragmaStack<LangAS> AddrSpaceStack;
PragmaStack<LangAS> StgAddrSpaceStack;
// RAII object to push / pop sentinel slots for all MS #pragma stacks.
// Actions should be performed only if we enter / exit a C++ method body.
class PragmaStackSentinelRAII {
public:
PragmaStackSentinelRAII(Sema &S, StringRef SlotLabel, bool ShouldAct);
~PragmaStackSentinelRAII();
private:
Sema &S;
StringRef SlotLabel;
bool ShouldAct;
};
/// A mapping that describes the nullability we've seen in each header file.
FileNullabilityMap NullabilityMap;
/// Last section used with #pragma init_seg.
StringLiteral *CurInitSeg;
SourceLocation CurInitSegLoc;
/// Current prefix for auto-generated 64/32 interop thunks.
StringLiteral *CurPtr32ThunkPrefix;
SourceLocation CurPtr32ThunkPrefixLoc;
/// Current name of the 32-bit code segment selector variable.
StringLiteral *CurPtr32CS32Name;
SourceLocation CurPtr32CS32NameLoc;
/// Current name of the 64-bit code segment selector variable.
StringLiteral *CurPtr32CS64Name;
SourceLocation CurPtr32CS64NameLoc;
/// VisContext - Manages the stack for \#pragma GCC visibility.
void *VisContext; // Really a "PragmaVisStack*"
/// This an attribute introduced by \#pragma clang attribute.
struct PragmaAttributeEntry {
SourceLocation Loc;
ParsedAttr *Attribute;
SmallVector<attr::SubjectMatchRule, 4> MatchRules;
bool IsUsed;
};
/// A push'd group of PragmaAttributeEntries.
struct PragmaAttributeGroup {
/// The location of the push attribute.
SourceLocation Loc;
SmallVector<PragmaAttributeEntry, 2> Entries;
};
SmallVector<PragmaAttributeGroup, 2> PragmaAttributeStack;
/// The declaration that is currently receiving an attribute from the
/// #pragma attribute stack.
const Decl *PragmaAttributeCurrentTargetDecl;
/// This represents the last location of a "#pragma clang optimize off"
/// directive if such a directive has not been closed by an "on" yet. If
/// optimizations are currently "on", this is set to an invalid location.
SourceLocation OptimizeOffPragmaLocation;
/// Flag indicating if Sema is building a recovery call expression.
///
/// This flag is used to avoid building recovery call expressions
/// if Sema is already doing so, which would cause infinite recursions.
bool IsBuildingRecoveryCallExpr;
/// Used to control the generation of ExprWithCleanups.
CleanupInfo Cleanup;
/// ExprCleanupObjects - This is the stack of objects requiring
/// cleanup that are created by the current full expression. The
/// element type here is ExprWithCleanups::Object.
SmallVector<BlockDecl*, 8> ExprCleanupObjects;
/// Store a list of either DeclRefExprs or MemberExprs
/// that contain a reference to a variable (constant) that may or may not
/// be odr-used in this Expr, and we won't know until all lvalue-to-rvalue
/// and discarded value conversions have been applied to all subexpressions
/// of the enclosing full expression. This is cleared at the end of each
/// full expression.
llvm::SmallPtrSet<Expr*, 2> MaybeODRUseExprs;
std::unique_ptr<sema::FunctionScopeInfo> PreallocatedFunctionScope;
/// Stack containing information about each of the nested
/// function, block, and method scopes that are currently active.
SmallVector<sema::FunctionScopeInfo *, 4> FunctionScopes;
typedef LazyVector<TypedefNameDecl *, ExternalSemaSource,
&ExternalSemaSource::ReadExtVectorDecls, 2, 2>
ExtVectorDeclsType;
/// ExtVectorDecls - This is a list all the extended vector types. This allows
/// us to associate a raw vector type with one of the ext_vector type names.
/// This is only necessary for issuing pretty diagnostics.
ExtVectorDeclsType ExtVectorDecls;
/// FieldCollector - Collects CXXFieldDecls during parsing of C++ classes.
std::unique_ptr<CXXFieldCollector> FieldCollector;
typedef llvm::SmallSetVector<NamedDecl *, 16> NamedDeclSetType;
/// Set containing all declared private fields that are not used.
NamedDeclSetType UnusedPrivateFields;
/// Set containing all typedefs that are likely unused.
llvm::SmallSetVector<const TypedefNameDecl *, 4>
UnusedLocalTypedefNameCandidates;
/// Delete-expressions to be analyzed at the end of translation unit
///
/// This list contains class members, and locations of delete-expressions
/// that could not be proven as to whether they mismatch with new-expression
/// used in initializer of the field.
typedef std::pair<SourceLocation, bool> DeleteExprLoc;
typedef llvm::SmallVector<DeleteExprLoc, 4> DeleteLocs;
llvm::MapVector<FieldDecl *, DeleteLocs> DeleteExprs;
typedef llvm::SmallPtrSet<const CXXRecordDecl*, 8> RecordDeclSetTy;
/// PureVirtualClassDiagSet - a set of class declarations which we have
/// emitted a list of pure virtual functions. Used to prevent emitting the
/// same list more than once.
std::unique_ptr<RecordDeclSetTy> PureVirtualClassDiagSet;
/// ParsingInitForAutoVars - a set of declarations with auto types for which
/// we are currently parsing the initializer.
llvm::SmallPtrSet<const Decl*, 4> ParsingInitForAutoVars;
/// Look for a locally scoped extern "C" declaration by the given name.
NamedDecl *findLocallyScopedExternCDecl(DeclarationName Name);
typedef LazyVector<VarDecl *, ExternalSemaSource,
&ExternalSemaSource::ReadTentativeDefinitions, 2, 2>
TentativeDefinitionsType;
/// All the tentative definitions encountered in the TU.
TentativeDefinitionsType TentativeDefinitions;
typedef LazyVector<const DeclaratorDecl *, ExternalSemaSource,
&ExternalSemaSource::ReadUnusedFileScopedDecls, 2, 2>
UnusedFileScopedDeclsType;
/// The set of file scoped decls seen so far that have not been used
/// and must warn if not used. Only contains the first declaration.
UnusedFileScopedDeclsType UnusedFileScopedDecls;
typedef LazyVector<CXXConstructorDecl *, ExternalSemaSource,
&ExternalSemaSource::ReadDelegatingConstructors, 2, 2>
DelegatingCtorDeclsType;
/// All the delegating constructors seen so far in the file, used for
/// cycle detection at the end of the TU.
DelegatingCtorDeclsType DelegatingCtorDecls;
/// All the overriding functions seen during a class definition
/// that had their exception spec checks delayed, plus the overridden
/// function.
SmallVector<std::pair<const CXXMethodDecl*, const CXXMethodDecl*>, 2>
DelayedOverridingExceptionSpecChecks;
/// All the function redeclarations seen during a class definition that had
/// their exception spec checks delayed, plus the prior declaration they
/// should be checked against. Except during error recovery, the new decl
/// should always be a friend declaration, as that's the only valid way to
/// redeclare a special member before its class is complete.
SmallVector<std::pair<FunctionDecl*, FunctionDecl*>, 2>
DelayedEquivalentExceptionSpecChecks;
/// All the members seen during a class definition which were both
/// explicitly defaulted and had explicitly-specified exception
/// specifications, along with the function type containing their
/// user-specified exception specification. Those exception specifications
/// were overridden with the default specifications, but we still need to
/// check whether they are compatible with the default specification, and
/// we can't do that until the nesting set of class definitions is complete.
SmallVector<std::pair<CXXMethodDecl*, const FunctionProtoType*>, 2>
DelayedDefaultedMemberExceptionSpecs;
typedef llvm::MapVector<const FunctionDecl *,
std::unique_ptr<LateParsedTemplate>>
LateParsedTemplateMapT;
LateParsedTemplateMapT LateParsedTemplateMap;
/// Callback to the parser to parse templated functions when needed.
typedef void LateTemplateParserCB(void *P, LateParsedTemplate &LPT);
typedef void LateTemplateParserCleanupCB(void *P);
LateTemplateParserCB *LateTemplateParser;
LateTemplateParserCleanupCB *LateTemplateParserCleanup;
void *OpaqueParser;
void SetLateTemplateParser(LateTemplateParserCB *LTP,
LateTemplateParserCleanupCB *LTPCleanup,
void *P) {
LateTemplateParser = LTP;
LateTemplateParserCleanup = LTPCleanup;
OpaqueParser = P;
}
class DelayedDiagnostics;
class DelayedDiagnosticsState {
sema::DelayedDiagnosticPool *SavedPool;
friend class Sema::DelayedDiagnostics;
};
typedef DelayedDiagnosticsState ParsingDeclState;
typedef DelayedDiagnosticsState ProcessingContextState;
/// A class which encapsulates the logic for delaying diagnostics
/// during parsing and other processing.
class DelayedDiagnostics {
/// The current pool of diagnostics into which delayed
/// diagnostics should go.
sema::DelayedDiagnosticPool *CurPool;
public:
DelayedDiagnostics() : CurPool(nullptr) {}
/// Adds a delayed diagnostic.
void add(const sema::DelayedDiagnostic &diag); // in DelayedDiagnostic.h
/// Determines whether diagnostics should be delayed.
bool shouldDelayDiagnostics() { return CurPool != nullptr; }
/// Returns the current delayed-diagnostics pool.
sema::DelayedDiagnosticPool *getCurrentPool() const {
return CurPool;
}
/// Enter a new scope. Access and deprecation diagnostics will be
/// collected in this pool.
DelayedDiagnosticsState push(sema::DelayedDiagnosticPool &pool) {
DelayedDiagnosticsState state;
state.SavedPool = CurPool;
CurPool = &pool;
return state;
}
/// Leave a delayed-diagnostic state that was previously pushed.
/// Do not emit any of the diagnostics. This is performed as part
/// of the bookkeeping of popping a pool "properly".
void popWithoutEmitting(DelayedDiagnosticsState state) {
CurPool = state.SavedPool;
}
/// Enter a new scope where access and deprecation diagnostics are
/// not delayed.
DelayedDiagnosticsState pushUndelayed() {
DelayedDiagnosticsState state;
state.SavedPool = CurPool;
CurPool = nullptr;
return state;
}
/// Undo a previous pushUndelayed().
void popUndelayed(DelayedDiagnosticsState state) {
assert(CurPool == nullptr);
CurPool = state.SavedPool;
}
} DelayedDiagnostics;
/// A RAII object to temporarily push a declaration context.
class ContextRAII {
private:
Sema &S;
DeclContext *SavedContext;
ProcessingContextState SavedContextState;
QualType SavedCXXThisTypeOverride;
public:
ContextRAII(Sema &S, DeclContext *ContextToPush, bool NewThisContext = true)
: S(S), SavedContext(S.CurContext),
SavedContextState(S.DelayedDiagnostics.pushUndelayed()),
SavedCXXThisTypeOverride(S.CXXThisTypeOverride)
{
assert(ContextToPush && "pushing null context");
S.CurContext = ContextToPush;
if (NewThisContext)
S.CXXThisTypeOverride = QualType();
}
void pop() {
if (!SavedContext) return;
S.CurContext = SavedContext;
S.DelayedDiagnostics.popUndelayed(SavedContextState);
S.CXXThisTypeOverride = SavedCXXThisTypeOverride;
SavedContext = nullptr;
}
~ContextRAII() {
pop();
}
};
/// RAII object to handle the state changes required to synthesize
/// a function body.
class SynthesizedFunctionScope {
Sema &S;
Sema::ContextRAII SavedContext;
bool PushedCodeSynthesisContext = false;
public:
SynthesizedFunctionScope(Sema &S, DeclContext *DC)
: S(S), SavedContext(S, DC) {
S.PushFunctionScope();
S.PushExpressionEvaluationContext(
Sema::ExpressionEvaluationContext::PotentiallyEvaluated);
if (auto *FD = dyn_cast<FunctionDecl>(DC))
FD->setWillHaveBody(true);
else
assert(isa<ObjCMethodDecl>(DC));
}
void addContextNote(SourceLocation UseLoc) {
assert(!PushedCodeSynthesisContext);
Sema::CodeSynthesisContext Ctx;
Ctx.Kind = Sema::CodeSynthesisContext::DefiningSynthesizedFunction;
Ctx.PointOfInstantiation = UseLoc;
Ctx.Entity = cast<Decl>(S.CurContext);
S.pushCodeSynthesisContext(Ctx);
PushedCodeSynthesisContext = true;
}
~SynthesizedFunctionScope() {
if (PushedCodeSynthesisContext)
S.popCodeSynthesisContext();
if (auto *FD = dyn_cast<FunctionDecl>(S.CurContext))
FD->setWillHaveBody(false);
S.PopExpressionEvaluationContext();
S.PopFunctionScopeInfo();
}
};
/// WeakUndeclaredIdentifiers - Identifiers contained in
/// \#pragma weak before declared. rare. may alias another
/// identifier, declared or undeclared
llvm::MapVector<IdentifierInfo *, WeakInfo> WeakUndeclaredIdentifiers;
/// ExtnameUndeclaredIdentifiers - Identifiers contained in
/// \#pragma redefine_extname before declared. Used in Solaris system headers
/// to define functions that occur in multiple standards to call the version
/// in the currently selected standard.
llvm::DenseMap<IdentifierInfo*,AsmLabelAttr*> ExtnameUndeclaredIdentifiers;
/// Load weak undeclared identifiers from the external source.
void LoadExternalWeakUndeclaredIdentifiers();
/// WeakTopLevelDecl - Translation-unit scoped declarations generated by
/// \#pragma weak during processing of other Decls.
/// I couldn't figure out a clean way to generate these in-line, so
/// we store them here and handle separately -- which is a hack.
/// It would be best to refactor this.
SmallVector<Decl*,2> WeakTopLevelDecl;
IdentifierResolver IdResolver;
/// Translation Unit Scope - useful to Objective-C actions that need
/// to lookup file scope declarations in the "ordinary" C decl namespace.
/// For example, user-defined classes, built-in "id" type, etc.
Scope *TUScope;
/// The C++ "std" namespace, where the standard library resides.
LazyDeclPtr StdNamespace;
/// The C++ "std::bad_alloc" class, which is defined by the C++
/// standard library.
LazyDeclPtr StdBadAlloc;
/// The C++ "std::align_val_t" enum class, which is defined by the C++
/// standard library.
LazyDeclPtr StdAlignValT;
/// The C++ "std::experimental" namespace, where the experimental parts
/// of the standard library resides.
NamespaceDecl *StdExperimentalNamespaceCache;
/// The C++ "std::initializer_list" template, which is defined in
/// \<initializer_list>.
ClassTemplateDecl *StdInitializerList;
/// The C++ "std::coroutine_traits" template, which is defined in
/// \<coroutine_traits>
ClassTemplateDecl *StdCoroutineTraitsCache;
/// The C++ "type_info" declaration, which is defined in \<typeinfo>.
RecordDecl *CXXTypeInfoDecl;
/// The MSVC "_GUID" struct, which is defined in MSVC header files.
RecordDecl *MSVCGuidDecl;
/// Caches identifiers/selectors for NSFoundation APIs.
std::unique_ptr<NSAPI> NSAPIObj;
/// The declaration of the Objective-C NSNumber class.
ObjCInterfaceDecl *NSNumberDecl;
/// The declaration of the Objective-C NSValue class.
ObjCInterfaceDecl *NSValueDecl;
/// Pointer to NSNumber type (NSNumber *).
QualType NSNumberPointer;
/// Pointer to NSValue type (NSValue *).
QualType NSValuePointer;
/// The Objective-C NSNumber methods used to create NSNumber literals.
ObjCMethodDecl *NSNumberLiteralMethods[NSAPI::NumNSNumberLiteralMethods];
/// The declaration of the Objective-C NSString class.
ObjCInterfaceDecl *NSStringDecl;
/// Pointer to NSString type (NSString *).
QualType NSStringPointer;
/// The declaration of the stringWithUTF8String: method.
ObjCMethodDecl *StringWithUTF8StringMethod;
/// The declaration of the valueWithBytes:objCType: method.
ObjCMethodDecl *ValueWithBytesObjCTypeMethod;
/// The declaration of the Objective-C NSArray class.
ObjCInterfaceDecl *NSArrayDecl;
/// The declaration of the arrayWithObjects:count: method.
ObjCMethodDecl *ArrayWithObjectsMethod;
/// The declaration of the Objective-C NSDictionary class.
ObjCInterfaceDecl *NSDictionaryDecl;
/// The declaration of the dictionaryWithObjects:forKeys:count: method.
ObjCMethodDecl *DictionaryWithObjectsMethod;
/// id<NSCopying> type.
QualType QIDNSCopying;
/// will hold 'respondsToSelector:'
Selector RespondsToSelectorSel;
/// A flag to remember whether the implicit forms of operator new and delete
/// have been declared.
bool GlobalNewDeleteDeclared;
/// A flag to indicate that we're in a context that permits abstract
/// references to fields. This is really a
bool AllowAbstractFieldReference;
/// Describes how the expressions currently being parsed are
/// evaluated at run-time, if at all.
enum class ExpressionEvaluationContext {
/// The current expression and its subexpressions occur within an
/// unevaluated operand (C++11 [expr]p7), such as the subexpression of
/// \c sizeof, where the type of the expression may be significant but
/// no code will be generated to evaluate the value of the expression at
/// run time.
Unevaluated,
/// The current expression occurs within a braced-init-list within
/// an unevaluated operand. This is mostly like a regular unevaluated
/// context, except that we still instantiate constexpr functions that are
/// referenced here so that we can perform narrowing checks correctly.
UnevaluatedList,
/// The current expression occurs within a discarded statement.
/// This behaves largely similarly to an unevaluated operand in preventing
/// definitions from being required, but not in other ways.
DiscardedStatement,
/// The current expression occurs within an unevaluated
/// operand that unconditionally permits abstract references to
/// fields, such as a SIZE operator in MS-style inline assembly.
UnevaluatedAbstract,
/// The current context is "potentially evaluated" in C++11 terms,
/// but the expression is evaluated at compile-time (like the values of
/// cases in a switch statement).
ConstantEvaluated,
/// The current expression is potentially evaluated at run time,
/// which means that code may be generated to evaluate the value of the
/// expression at run time.
PotentiallyEvaluated,
/// The current expression is potentially evaluated, but any
/// declarations referenced inside that expression are only used if
/// in fact the current expression is used.
///
/// This value is used when parsing default function arguments, for which
/// we would like to provide diagnostics (e.g., passing non-POD arguments
/// through varargs) but do not want to mark declarations as "referenced"
/// until the default argument is used.
PotentiallyEvaluatedIfUsed
};
/// Data structure used to record current or nested
/// expression evaluation contexts.
struct ExpressionEvaluationContextRecord {
/// The expression evaluation context.
ExpressionEvaluationContext Context;
/// Whether the enclosing context needed a cleanup.
CleanupInfo ParentCleanup;
/// Whether we are in a decltype expression.
bool IsDecltype;
/// The number of active cleanup objects when we entered
/// this expression evaluation context.
unsigned NumCleanupObjects;
/// The number of typos encountered during this expression evaluation
/// context (i.e. the number of TypoExprs created).
unsigned NumTypos;
llvm::SmallPtrSet<Expr*, 2> SavedMaybeODRUseExprs;
/// The lambdas that are present within this context, if it
/// is indeed an unevaluated context.
SmallVector<LambdaExpr *, 2> Lambdas;
/// The declaration that provides context for lambda expressions
/// and block literals if the normal declaration context does not
/// suffice, e.g., in a default function argument.
Decl *ManglingContextDecl;
/// The context information used to mangle lambda expressions
/// and block literals within this context.
///
/// This mangling information is allocated lazily, since most contexts
/// do not have lambda expressions or block literals.
std::unique_ptr<MangleNumberingContext> MangleNumbering;
/// If we are processing a decltype type, a set of call expressions
/// for which we have deferred checking the completeness of the return type.
SmallVector<CallExpr *, 8> DelayedDecltypeCalls;
/// If we are processing a decltype type, a set of temporary binding
/// expressions for which we have deferred checking the destructor.
SmallVector<CXXBindTemporaryExpr *, 8> DelayedDecltypeBinds;
/// \brief Describes whether we are in an expression constext which we have
/// to handle differently.
enum ExpressionKind {
EK_Decltype, EK_TemplateArgument, EK_Other
} ExprContext;
ExpressionEvaluationContextRecord(ExpressionEvaluationContext Context,
unsigned NumCleanupObjects,
CleanupInfo ParentCleanup,
Decl *ManglingContextDecl,
ExpressionKind ExprContext)
: Context(Context), ParentCleanup(ParentCleanup),
NumCleanupObjects(NumCleanupObjects), NumTypos(0),
ManglingContextDecl(ManglingContextDecl), MangleNumbering(),
ExprContext(ExprContext) {}
/// Retrieve the mangling numbering context, used to consistently
/// number constructs like lambdas for mangling.
MangleNumberingContext &getMangleNumberingContext(ASTContext &Ctx);
bool isUnevaluated() const {
return Context == ExpressionEvaluationContext::Unevaluated ||
Context == ExpressionEvaluationContext::UnevaluatedAbstract ||
Context == ExpressionEvaluationContext::UnevaluatedList;
}
bool isConstantEvaluated() const {
return Context == ExpressionEvaluationContext::ConstantEvaluated;
}
};
/// A stack of expression evaluation contexts.
SmallVector<ExpressionEvaluationContextRecord, 8> ExprEvalContexts;
/// Compute the mangling number context for a lambda expression or
/// block literal.
///
/// \param DC - The DeclContext containing the lambda expression or
/// block literal.
/// \param[out] ManglingContextDecl - Returns the ManglingContextDecl
/// associated with the context, if relevant.
MangleNumberingContext *getCurrentMangleNumberContext(
const DeclContext *DC,
Decl *&ManglingContextDecl);
/// SpecialMemberOverloadResult - The overloading result for a special member
/// function.
///
/// This is basically a wrapper around PointerIntPair. The lowest bits of the
/// integer are used to determine whether overload resolution succeeded.
class SpecialMemberOverloadResult {
public:
enum Kind {
NoMemberOrDeleted,
Ambiguous,
Success
};
private:
llvm::PointerIntPair<CXXMethodDecl*, 2> Pair;
public:
SpecialMemberOverloadResult() : Pair() {}
SpecialMemberOverloadResult(CXXMethodDecl *MD)
: Pair(MD, MD->isDeleted() ? NoMemberOrDeleted : Success) {}
CXXMethodDecl *getMethod() const { return Pair.getPointer(); }
void setMethod(CXXMethodDecl *MD) { Pair.setPointer(MD); }
Kind getKind() const { return static_cast<Kind>(Pair.getInt()); }
void setKind(Kind K) { Pair.setInt(K); }
};
class SpecialMemberOverloadResultEntry
: public llvm::FastFoldingSetNode,
public SpecialMemberOverloadResult {
public:
SpecialMemberOverloadResultEntry(const llvm::FoldingSetNodeID &ID)
: FastFoldingSetNode(ID)
{}
};
/// A cache of special member function overload resolution results
/// for C++ records.
llvm::FoldingSet<SpecialMemberOverloadResultEntry> SpecialMemberCache;
/// A cache of the flags available in enumerations with the flag_bits
/// attribute.
mutable llvm::DenseMap<const EnumDecl*, llvm::APInt> FlagBitsCache;
/// The kind of translation unit we are processing.
///
/// When we're processing a complete translation unit, Sema will perform
/// end-of-translation-unit semantic tasks (such as creating
/// initializers for tentative definitions in C) once parsing has
/// completed. Modules and precompiled headers perform different kinds of
/// checks.
TranslationUnitKind TUKind;
llvm::BumpPtrAllocator BumpAlloc;
/// The number of SFINAE diagnostics that have been trapped.
unsigned NumSFINAEErrors;
typedef llvm::DenseMap<ParmVarDecl *, llvm::TinyPtrVector<ParmVarDecl *>>
UnparsedDefaultArgInstantiationsMap;
/// A mapping from parameters with unparsed default arguments to the
/// set of instantiations of each parameter.
///
/// This mapping is a temporary data structure used when parsing
/// nested class templates or nested classes of class templates,
/// where we might end up instantiating an inner class before the
/// default arguments of its methods have been parsed.
UnparsedDefaultArgInstantiationsMap UnparsedDefaultArgInstantiations;
// Contains the locations of the beginning of unparsed default
// argument locations.
llvm::DenseMap<ParmVarDecl *, SourceLocation> UnparsedDefaultArgLocs;
/// UndefinedInternals - all the used, undefined objects which require a
/// definition in this translation unit.
llvm::MapVector<NamedDecl *, SourceLocation> UndefinedButUsed;
/// Determine if VD, which must be a variable or function, is an external
/// symbol that nonetheless can't be referenced from outside this translation
/// unit because its type has no linkage and it's not extern "C".
bool isExternalWithNoLinkageType(ValueDecl *VD);
/// Obtain a sorted list of functions that are undefined but ODR-used.
void getUndefinedButUsed(
SmallVectorImpl<std::pair<NamedDecl *, SourceLocation> > &Undefined);
/// Retrieves list of suspicious delete-expressions that will be checked at
/// the end of translation unit.
const llvm::MapVector<FieldDecl *, DeleteLocs> &
getMismatchingDeleteExpressions() const;
typedef std::pair<ObjCMethodList, ObjCMethodList> GlobalMethods;
typedef llvm::DenseMap<Selector, GlobalMethods> GlobalMethodPool;
/// Method Pool - allows efficient lookup when typechecking messages to "id".
/// We need to maintain a list, since selectors can have differing signatures
/// across classes. In Cocoa, this happens to be extremely uncommon (only 1%
/// of selectors are "overloaded").
/// At the head of the list it is recorded whether there were 0, 1, or >= 2
/// methods inside categories with a particular selector.
GlobalMethodPool MethodPool;
/// Method selectors used in a \@selector expression. Used for implementation
/// of -Wselector.
llvm::MapVector<Selector, SourceLocation> ReferencedSelectors;
/// Kinds of C++ special members.
enum CXXSpecialMember {
CXXDefaultConstructor,
CXXCopyConstructor,
CXXMoveConstructor,
CXXCopyAssignment,
CXXMoveAssignment,
CXXDestructor,
CXXInvalid
};
typedef llvm::PointerIntPair<CXXRecordDecl *, 3, CXXSpecialMember>
SpecialMemberDecl;
/// The C++ special members which we are currently in the process of
/// declaring. If this process recursively triggers the declaration of the
/// same special member, we should act as if it is not yet declared.
llvm::SmallPtrSet<SpecialMemberDecl, 4> SpecialMembersBeingDeclared;
/// The function definitions which were renamed as part of typo-correction
/// to match their respective declarations. We want to keep track of them
/// to ensure that we don't emit a "redefinition" error if we encounter a
/// correctly named definition after the renamed definition.
llvm::SmallPtrSet<const NamedDecl *, 4> TypoCorrectedFunctionDefinitions;
/// Stack of types that correspond to the parameter entities that are
/// currently being copy-initialized. Can be empty.
llvm::SmallVector<QualType, 4> CurrentParameterCopyTypes;
void ReadMethodPool(Selector Sel);
void updateOutOfDateSelector(Selector Sel);
/// Private Helper predicate to check for 'self'.
bool isSelfExpr(Expr *RExpr);
bool isSelfExpr(Expr *RExpr, const ObjCMethodDecl *Method);
/// Cause the active diagnostic on the DiagosticsEngine to be
/// emitted. This is closely coupled to the SemaDiagnosticBuilder class and
/// should not be used elsewhere.
void EmitCurrentDiagnostic(unsigned DiagID);
/// Records and restores the FP_CONTRACT state on entry/exit of compound
/// statements.
class FPContractStateRAII {
public:
FPContractStateRAII(Sema &S) : S(S), OldFPFeaturesState(S.FPFeatures) {}
~FPContractStateRAII() { S.FPFeatures = OldFPFeaturesState; }
private:
Sema& S;
FPOptions OldFPFeaturesState;
};
void addImplicitTypedef(StringRef Name, QualType T);
public:
Sema(Preprocessor &pp, ASTContext &ctxt, ASTConsumer &consumer,
TranslationUnitKind TUKind = TU_Complete,
CodeCompleteConsumer *CompletionConsumer = nullptr);
~Sema();
/// Perform initialization that occurs after the parser has been
/// initialized but before it parses anything.
void Initialize();
const LangOptions &getLangOpts() const { return LangOpts; }
OpenCLOptions &getOpenCLOptions() { return OpenCLFeatures; }
FPOptions &getFPOptions() { return FPFeatures; }
DiagnosticsEngine &getDiagnostics() const { return Diags; }
SourceManager &getSourceManager() const { return SourceMgr; }
Preprocessor &getPreprocessor() const { return PP; }
ASTContext &getASTContext() const { return Context; }
ASTConsumer &getASTConsumer() const { return Consumer; }
ASTMutationListener *getASTMutationListener() const;
ExternalSemaSource* getExternalSource() const { return ExternalSource; }
///Registers an external source. If an external source already exists,
/// creates a multiplex external source and appends to it.
///
///\param[in] E - A non-null external sema source.
///
void addExternalSource(ExternalSemaSource *E);
void PrintStats() const;
/// Helper class that creates diagnostics with optional
/// template instantiation stacks.
///
/// This class provides a wrapper around the basic DiagnosticBuilder
/// class that emits diagnostics. SemaDiagnosticBuilder is
/// responsible for emitting the diagnostic (as DiagnosticBuilder
/// does) and, if the diagnostic comes from inside a template
/// instantiation, printing the template instantiation stack as
/// well.
class SemaDiagnosticBuilder : public DiagnosticBuilder {
Sema &SemaRef;
unsigned DiagID;
public:
SemaDiagnosticBuilder(DiagnosticBuilder &DB, Sema &SemaRef, unsigned DiagID)
: DiagnosticBuilder(DB), SemaRef(SemaRef), DiagID(DiagID) { }
// This is a cunning lie. DiagnosticBuilder actually performs move
// construction in its copy constructor (but due to varied uses, it's not
// possible to conveniently express this as actual move construction). So
// the default copy ctor here is fine, because the base class disables the
// source anyway, so the user-defined ~SemaDiagnosticBuilder is a safe no-op
// in that case anwyay.
SemaDiagnosticBuilder(const SemaDiagnosticBuilder&) = default;
~SemaDiagnosticBuilder() {
// If we aren't active, there is nothing to do.
if (!isActive()) return;
// Otherwise, we need to emit the diagnostic. First flush the underlying
// DiagnosticBuilder data, and clear the diagnostic builder itself so it
// won't emit the diagnostic in its own destructor.
//
// This seems wasteful, in that as written the DiagnosticBuilder dtor will
// do its own needless checks to see if the diagnostic needs to be
// emitted. However, because we take care to ensure that the builder
// objects never escape, a sufficiently smart compiler will be able to
// eliminate that code.
FlushCounts();
Clear();
// Dispatch to Sema to emit the diagnostic.
SemaRef.EmitCurrentDiagnostic(DiagID);
}
/// Teach operator<< to produce an object of the correct type.
template<typename T>
friend const SemaDiagnosticBuilder &operator<<(
const SemaDiagnosticBuilder &Diag, const T &Value) {
const DiagnosticBuilder &BaseDiag = Diag;
BaseDiag << Value;
return Diag;
}
};
/// Emit a diagnostic.
SemaDiagnosticBuilder Diag(SourceLocation Loc, unsigned DiagID) {
DiagnosticBuilder DB = Diags.Report(Loc, DiagID);
return SemaDiagnosticBuilder(DB, *this, DiagID);
}
/// Emit a partial diagnostic.
SemaDiagnosticBuilder Diag(SourceLocation Loc, const PartialDiagnostic& PD);
/// Build a partial diagnostic.
PartialDiagnostic PDiag(unsigned DiagID = 0); // in SemaInternal.h
bool findMacroSpelling(SourceLocation &loc, StringRef name);
/// Get a string to suggest for zero-initialization of a type.
std::string
getFixItZeroInitializerForType(QualType T, SourceLocation Loc) const;
std::string getFixItZeroLiteralForType(QualType T, SourceLocation Loc) const;
/// Calls \c Lexer::getLocForEndOfToken()
SourceLocation getLocForEndOfToken(SourceLocation Loc, unsigned Offset = 0);
/// Retrieve the module loader associated with the preprocessor.
ModuleLoader &getModuleLoader() const;
void emitAndClearUnusedLocalTypedefWarnings();
void ActOnStartOfTranslationUnit();
void ActOnEndOfTranslationUnit();
void CheckDelegatingCtorCycles();
Scope *getScopeForContext(DeclContext *Ctx);
void PushFunctionScope();
void PushBlockScope(Scope *BlockScope, BlockDecl *Block);
sema::LambdaScopeInfo *PushLambdaScope();
/// This is used to inform Sema what the current TemplateParameterDepth
/// is during Parsing. Currently it is used to pass on the depth
/// when parsing generic lambda 'auto' parameters.
void RecordParsingTemplateParameterDepth(unsigned Depth);
void PushCapturedRegionScope(Scope *RegionScope, CapturedDecl *CD,
RecordDecl *RD,
CapturedRegionKind K);
void
PopFunctionScopeInfo(const sema::AnalysisBasedWarnings::Policy *WP = nullptr,
const Decl *D = nullptr,
const BlockExpr *blkExpr = nullptr);
sema::FunctionScopeInfo *getCurFunction() const {
return FunctionScopes.empty() ? nullptr : FunctionScopes.back();
}
sema::FunctionScopeInfo *getEnclosingFunction() const;
void setFunctionHasBranchIntoScope();
void setFunctionHasBranchProtectedScope();
void setFunctionHasIndirectGoto();
void PushCompoundScope(bool IsStmtExpr);
void PopCompoundScope();
sema::CompoundScopeInfo &getCurCompoundScope() const;
bool hasAnyUnrecoverableErrorsInThisFunction() const;
/// Retrieve the current block, if any.
sema::BlockScopeInfo *getCurBlock();
/// Retrieve the current lambda scope info, if any.
/// \param IgnoreNonLambdaCapturingScope true if should find the top-most
/// lambda scope info ignoring all inner capturing scopes that are not
/// lambda scopes.
sema::LambdaScopeInfo *
getCurLambda(bool IgnoreNonLambdaCapturingScope = false);
/// Retrieve the current generic lambda info, if any.
sema::LambdaScopeInfo *getCurGenericLambda();
/// Retrieve the current captured region, if any.
sema::CapturedRegionScopeInfo *getCurCapturedRegion();
/// WeakTopLevelDeclDecls - access to \#pragma weak-generated Decls
SmallVectorImpl<Decl *> &WeakTopLevelDecls() { return WeakTopLevelDecl; }
void ActOnComment(SourceRange Comment);
//===--------------------------------------------------------------------===//
// Type Analysis / Processing: SemaType.cpp.
//
QualType BuildQualifiedType(QualType T, SourceLocation Loc, Qualifiers Qs,
const DeclSpec *DS = nullptr);
QualType BuildQualifiedType(QualType T, SourceLocation Loc, unsigned CVRA,
const DeclSpec *DS = nullptr);
QualType BuildPointerType(QualType T,
SourceLocation Loc, DeclarationName Entity);
QualType BuildReferenceType(QualType T, bool LValueRef,
SourceLocation Loc, DeclarationName Entity);
QualType BuildArrayType(QualType T, ArrayType::ArraySizeModifier ASM,
Expr *ArraySize, unsigned Quals,
SourceRange Brackets, DeclarationName Entity);
QualType BuildVectorType(QualType T, Expr *VecSize, SourceLocation AttrLoc);
QualType BuildExtVectorType(QualType T, Expr *ArraySize,
SourceLocation AttrLoc);
QualType BuildAddressSpaceAttr(QualType &T, Expr *AddrSpace,
SourceLocation AttrLoc);
bool CheckFunctionReturnType(QualType T, SourceLocation Loc);
/// Build a function type.
///
/// This routine checks the function type according to C++ rules and
/// under the assumption that the result type and parameter types have
/// just been instantiated from a template. It therefore duplicates
/// some of the behavior of GetTypeForDeclarator, but in a much
/// simpler form that is only suitable for this narrow use case.
///
/// \param T The return type of the function.
///
/// \param ParamTypes The parameter types of the function. This array
/// will be modified to account for adjustments to the types of the
/// function parameters.
///
/// \param Loc The location of the entity whose type involves this
/// function type or, if there is no such entity, the location of the
/// type that will have function type.
///
/// \param Entity The name of the entity that involves the function
/// type, if known.
///
/// \param EPI Extra information about the function type. Usually this will
/// be taken from an existing function with the same prototype.
///
/// \returns A suitable function type, if there are no errors. The
/// unqualified type will always be a FunctionProtoType.
/// Otherwise, returns a NULL type.
QualType BuildFunctionType(QualType T,
MutableArrayRef<QualType> ParamTypes,
SourceLocation Loc, DeclarationName Entity,
const FunctionProtoType::ExtProtoInfo &EPI);
QualType BuildMemberPointerType(QualType T, QualType Class,
SourceLocation Loc,
DeclarationName Entity);
QualType BuildBlockPointerType(QualType T,
SourceLocation Loc, DeclarationName Entity);
QualType BuildParenType(QualType T);
QualType BuildAtomicType(QualType T, SourceLocation Loc);
QualType BuildReadPipeType(QualType T,
SourceLocation Loc);
QualType BuildWritePipeType(QualType T,
SourceLocation Loc);
TypeSourceInfo *GetTypeForDeclarator(Declarator &D, Scope *S);
TypeSourceInfo *GetTypeForDeclaratorCast(Declarator &D, QualType FromTy);
/// Package the given type and TSI into a ParsedType.
ParsedType CreateParsedType(QualType T, TypeSourceInfo *TInfo);
DeclarationNameInfo GetNameForDeclarator(Declarator &D);
DeclarationNameInfo GetNameFromUnqualifiedId(const UnqualifiedId &Name);
static QualType GetTypeFromParser(ParsedType Ty,
TypeSourceInfo **TInfo = nullptr);
CanThrowResult canThrow(const Expr *E);
const FunctionProtoType *ResolveExceptionSpec(SourceLocation Loc,
const FunctionProtoType *FPT);
void UpdateExceptionSpec(FunctionDecl *FD,
const FunctionProtoType::ExceptionSpecInfo &ESI);
bool CheckSpecifiedExceptionType(QualType &T, SourceRange Range);
bool CheckDistantExceptionSpec(QualType T);
bool CheckEquivalentExceptionSpec(FunctionDecl *Old, FunctionDecl *New);
bool CheckEquivalentExceptionSpec(
const FunctionProtoType *Old, SourceLocation OldLoc,
const FunctionProtoType *New, SourceLocation NewLoc);
bool CheckEquivalentExceptionSpec(
const PartialDiagnostic &DiagID, const PartialDiagnostic & NoteID,
const FunctionProtoType *Old, SourceLocation OldLoc,
const FunctionProtoType *New, SourceLocation NewLoc);
bool handlerCanCatch(QualType HandlerType, QualType ExceptionType);
bool CheckExceptionSpecSubset(const PartialDiagnostic &DiagID,
const PartialDiagnostic &NestedDiagID,
const PartialDiagnostic &NoteID,
const FunctionProtoType *Superset,
SourceLocation SuperLoc,
const FunctionProtoType *Subset,
SourceLocation SubLoc);
bool CheckParamExceptionSpec(const PartialDiagnostic &NestedDiagID,
const PartialDiagnostic &NoteID,
const FunctionProtoType *Target,
SourceLocation TargetLoc,
const FunctionProtoType *Source,
SourceLocation SourceLoc);
TypeResult ActOnTypeName(Scope *S, Declarator &D);
/// The parser has parsed the context-sensitive type 'instancetype'
/// in an Objective-C message declaration. Return the appropriate type.
ParsedType ActOnObjCInstanceType(SourceLocation Loc);
/// Abstract class used to diagnose incomplete types.
struct TypeDiagnoser {
TypeDiagnoser() {}
virtual void diagnose(Sema &S, SourceLocation Loc, QualType T) = 0;
virtual ~TypeDiagnoser() {}
};
static int getPrintable(int I) { return I; }
static unsigned getPrintable(unsigned I) { return I; }
static bool getPrintable(bool B) { return B; }
static const char * getPrintable(const char *S) { return S; }
static StringRef getPrintable(StringRef S) { return S; }
static const std::string &getPrintable(const std::string &S) { return S; }
static const IdentifierInfo *getPrintable(const IdentifierInfo *II) {
return II;
}
static DeclarationName getPrintable(DeclarationName N) { return N; }
static QualType getPrintable(QualType T) { return T; }
static SourceRange getPrintable(SourceRange R) { return R; }
static SourceRange getPrintable(SourceLocation L) { return L; }
static SourceRange getPrintable(const Expr *E) { return E->getSourceRange(); }
static SourceRange getPrintable(TypeLoc TL) { return TL.getSourceRange();}
template <typename... Ts> class BoundTypeDiagnoser : public TypeDiagnoser {
unsigned DiagID;
std::tuple<const Ts &...> Args;
template <std::size_t... Is>
void emit(const SemaDiagnosticBuilder &DB,
llvm::index_sequence<Is...>) const {
// Apply all tuple elements to the builder in order.
bool Dummy[] = {false, (DB << getPrintable(std::get<Is>(Args)))...};
(void)Dummy;
}
public:
BoundTypeDiagnoser(unsigned DiagID, const Ts &...Args)
: TypeDiagnoser(), DiagID(DiagID), Args(Args...) {
assert(DiagID != 0 && "no diagnostic for type diagnoser");
}
void diagnose(Sema &S, SourceLocation Loc, QualType T) override {
const SemaDiagnosticBuilder &DB = S.Diag(Loc, DiagID);
emit(DB, llvm::index_sequence_for<Ts...>());
DB << T;
}
};
private:
bool RequireCompleteTypeImpl(SourceLocation Loc, QualType T,
TypeDiagnoser *Diagnoser);
struct ModuleScope {
clang::Module *Module = nullptr;
bool ModuleInterface = false;
VisibleModuleSet OuterVisibleModules;
};
/// The modules we're currently parsing.
llvm::SmallVector<ModuleScope, 16> ModuleScopes;
/// Get the module whose scope we are currently within.
Module *getCurrentModule() const {
return ModuleScopes.empty() ? nullptr : ModuleScopes.back().Module;
}
VisibleModuleSet VisibleModules;
public:
/// Get the module owning an entity.
Module *getOwningModule(Decl *Entity) { return Entity->getOwningModule(); }
/// Make a merged definition of an existing hidden definition \p ND
/// visible at the specified location.
void makeMergedDefinitionVisible(NamedDecl *ND);
bool isModuleVisible(const Module *M, bool ModulePrivate = false);
/// Determine whether a declaration is visible to name lookup.
bool isVisible(const NamedDecl *D) {
return !D->isHidden() || isVisibleSlow(D);
}
/// Determine whether any declaration of an entity is visible.
bool
hasVisibleDeclaration(const NamedDecl *D,
llvm::SmallVectorImpl<Module *> *Modules = nullptr) {
return isVisible(D) || hasVisibleDeclarationSlow(D, Modules);
}
bool hasVisibleDeclarationSlow(const NamedDecl *D,
llvm::SmallVectorImpl<Module *> *Modules);
bool hasVisibleMergedDefinition(NamedDecl *Def);
bool hasMergedDefinitionInCurrentModule(NamedDecl *Def);
/// Determine if \p D and \p Suggested have a structurally compatible
/// layout as described in C11 6.2.7/1.
bool hasStructuralCompatLayout(Decl *D, Decl *Suggested);
/// Determine if \p D has a visible definition. If not, suggest a declaration
/// that should be made visible to expose the definition.
bool hasVisibleDefinition(NamedDecl *D, NamedDecl **Suggested,
bool OnlyNeedComplete = false);
bool hasVisibleDefinition(const NamedDecl *D) {
NamedDecl *Hidden;
return hasVisibleDefinition(const_cast<NamedDecl*>(D), &Hidden);
}
/// Determine if the template parameter \p D has a visible default argument.
bool
hasVisibleDefaultArgument(const NamedDecl *D,
llvm::SmallVectorImpl<Module *> *Modules = nullptr);
/// Determine if there is a visible declaration of \p D that is an explicit
/// specialization declaration for a specialization of a template. (For a
/// member specialization, use hasVisibleMemberSpecialization.)
bool hasVisibleExplicitSpecialization(
const NamedDecl *D, llvm::SmallVectorImpl<Module *> *Modules = nullptr);
/// Determine if there is a visible declaration of \p D that is a member
/// specialization declaration (as opposed to an instantiated declaration).
bool hasVisibleMemberSpecialization(
const NamedDecl *D, llvm::SmallVectorImpl<Module *> *Modules = nullptr);
/// Determine if \p A and \p B are equivalent internal linkage declarations
/// from different modules, and thus an ambiguity error can be downgraded to
/// an extension warning.
bool isEquivalentInternalLinkageDeclaration(const NamedDecl *A,
const NamedDecl *B);
void diagnoseEquivalentInternalLinkageDeclarations(
SourceLocation Loc, const NamedDecl *D,
ArrayRef<const NamedDecl *> Equiv);
bool isUsualDeallocationFunction(const CXXMethodDecl *FD);
bool isCompleteType(SourceLocation Loc, QualType T) {
return !RequireCompleteTypeImpl(Loc, T, nullptr);
}
bool RequireCompleteType(SourceLocation Loc, QualType T,
TypeDiagnoser &Diagnoser);
bool RequireCompleteType(SourceLocation Loc, QualType T,
unsigned DiagID);
template <typename... Ts>
bool RequireCompleteType(SourceLocation Loc, QualType T, unsigned DiagID,
const Ts &...Args) {
BoundTypeDiagnoser<Ts...> Diagnoser(DiagID, Args...);
return RequireCompleteType(Loc, T, Diagnoser);
}
void completeExprArrayBound(Expr *E);
bool RequireCompleteExprType(Expr *E, TypeDiagnoser &Diagnoser);
bool RequireCompleteExprType(Expr *E, unsigned DiagID);
template <typename... Ts>
bool RequireCompleteExprType(Expr *E, unsigned DiagID, const Ts &...Args) {
BoundTypeDiagnoser<Ts...> Diagnoser(DiagID, Args...);
return RequireCompleteExprType(E, Diagnoser);
}
bool RequireLiteralType(SourceLocation Loc, QualType T,
TypeDiagnoser &Diagnoser);
bool RequireLiteralType(SourceLocation Loc, QualType T, unsigned DiagID);
template <typename... Ts>
bool RequireLiteralType(SourceLocation Loc, QualType T, unsigned DiagID,
const Ts &...Args) {
BoundTypeDiagnoser<Ts...> Diagnoser(DiagID, Args...);
return RequireLiteralType(Loc, T, Diagnoser);
}
QualType getElaboratedType(ElaboratedTypeKeyword Keyword,
const CXXScopeSpec &SS, QualType T,
TagDecl *OwnedTagDecl = nullptr);
QualType BuildTypeofExprType(Expr *E, SourceLocation Loc);
/// If AsUnevaluated is false, E is treated as though it were an evaluated
/// context, such as when building a type for decltype(auto).
QualType BuildDecltypeType(Expr *E, SourceLocation Loc,
bool AsUnevaluated = true);
QualType BuildUnaryTransformType(QualType BaseType,
UnaryTransformType::UTTKind UKind,
SourceLocation Loc);
//===--------------------------------------------------------------------===//
// Symbol table / Decl tracking callbacks: SemaDecl.cpp.
//
struct SkipBodyInfo {
SkipBodyInfo()
: ShouldSkip(false), CheckSameAsPrevious(false), Previous(nullptr),
New(nullptr) {}
bool ShouldSkip;
bool CheckSameAsPrevious;
NamedDecl *Previous;
NamedDecl *New;
};
DeclGroupPtrTy ConvertDeclToDeclGroup(Decl *Ptr, Decl *OwnedType = nullptr);
void DiagnoseUseOfUnimplementedSelectors();
bool isSimpleTypeSpecifier(tok::TokenKind Kind) const;
ParsedType getTypeName(const IdentifierInfo &II, SourceLocation NameLoc,
Scope *S, CXXScopeSpec *SS = nullptr,
bool isClassName = false, bool HasTrailingDot = false,
ParsedType ObjectType = nullptr,
bool IsCtorOrDtorName = false,
bool WantNontrivialTypeSourceInfo = false,
bool IsClassTemplateDeductionContext = true,
IdentifierInfo **CorrectedII = nullptr);
TypeSpecifierType isTagName(IdentifierInfo &II, Scope *S);
bool isMicrosoftMissingTypename(const CXXScopeSpec *SS, Scope *S);
void DiagnoseUnknownTypeName(IdentifierInfo *&II,
SourceLocation IILoc,
Scope *S,
CXXScopeSpec *SS,
ParsedType &SuggestedType,
bool IsTemplateName = false);
/// Attempt to behave like MSVC in situations where lookup of an unqualified
/// type name has failed in a dependent context. In these situations, we
/// automatically form a DependentTypeName that will retry lookup in a related
/// scope during instantiation.
ParsedType ActOnMSVCUnknownTypeName(const IdentifierInfo &II,
SourceLocation NameLoc,
bool IsTemplateTypeArg);
/// Describes the result of the name lookup and resolution performed
/// by \c ClassifyName().
enum NameClassificationKind {
NC_Unknown,
NC_Error,
NC_Keyword,
NC_Type,
NC_Expression,
NC_NestedNameSpecifier,
NC_TypeTemplate,
NC_VarTemplate,
NC_FunctionTemplate
};
class NameClassification {
NameClassificationKind Kind;
ExprResult Expr;
TemplateName Template;
ParsedType Type;
explicit NameClassification(NameClassificationKind Kind) : Kind(Kind) {}
public:
NameClassification(ExprResult Expr) : Kind(NC_Expression), Expr(Expr) {}
NameClassification(ParsedType Type) : Kind(NC_Type), Type(Type) {}
NameClassification(const IdentifierInfo *Keyword) : Kind(NC_Keyword) {}
static NameClassification Error() {
return NameClassification(NC_Error);
}
static NameClassification Unknown() {
return NameClassification(NC_Unknown);
}
static NameClassification NestedNameSpecifier() {
return NameClassification(NC_NestedNameSpecifier);
}
static NameClassification TypeTemplate(TemplateName Name) {
NameClassification Result(NC_TypeTemplate);
Result.Template = Name;
return Result;
}
static NameClassification VarTemplate(TemplateName Name) {
NameClassification Result(NC_VarTemplate);
Result.Template = Name;
return Result;
}
static NameClassification FunctionTemplate(TemplateName Name) {
NameClassification Result(NC_FunctionTemplate);
Result.Template = Name;
return Result;
}
NameClassificationKind getKind() const { return Kind; }
ParsedType getType() const {
assert(Kind == NC_Type);
return Type;
}
ExprResult getExpression() const {
assert(Kind == NC_Expression);
return Expr;
}
TemplateName getTemplateName() const {
assert(Kind == NC_TypeTemplate || Kind == NC_FunctionTemplate ||
Kind == NC_VarTemplate);
return Template;
}
TemplateNameKind getTemplateNameKind() const {
switch (Kind) {
case NC_TypeTemplate:
return TNK_Type_template;
case NC_FunctionTemplate:
return TNK_Function_template;
case NC_VarTemplate:
return TNK_Var_template;
default:
llvm_unreachable("unsupported name classification.");
}
}
};
/// Perform name lookup on the given name, classifying it based on
/// the results of name lookup and the following token.
///
/// This routine is used by the parser to resolve identifiers and help direct
/// parsing. When the identifier cannot be found, this routine will attempt
/// to correct the typo and classify based on the resulting name.
///
/// \param S The scope in which we're performing name lookup.
///
/// \param SS The nested-name-specifier that precedes the name.
///
/// \param Name The identifier. If typo correction finds an alternative name,
/// this pointer parameter will be updated accordingly.
///
/// \param NameLoc The location of the identifier.
///
/// \param NextToken The token following the identifier. Used to help
/// disambiguate the name.
///
/// \param IsAddressOfOperand True if this name is the operand of a unary
/// address of ('&') expression, assuming it is classified as an
/// expression.
///
/// \param CCC The correction callback, if typo correction is desired.
NameClassification
ClassifyName(Scope *S, CXXScopeSpec &SS, IdentifierInfo *&Name,
SourceLocation NameLoc, const Token &NextToken,
bool IsAddressOfOperand,
std::unique_ptr<CorrectionCandidateCallback> CCC = nullptr);
/// Describes the detailed kind of a template name. Used in diagnostics.
enum class TemplateNameKindForDiagnostics {
ClassTemplate,
FunctionTemplate,
VarTemplate,
AliasTemplate,
TemplateTemplateParam,
DependentTemplate
};
TemplateNameKindForDiagnostics
getTemplateNameKindForDiagnostics(TemplateName Name);
/// Determine whether it's plausible that E was intended to be a
/// template-name.
bool mightBeIntendedToBeTemplateName(ExprResult E, bool &Dependent) {
if (!getLangOpts().CPlusPlus || E.isInvalid())
return false;
Dependent = false;
if (auto *DRE = dyn_cast<DeclRefExpr>(E.get()))
return !DRE->hasExplicitTemplateArgs();
if (auto *ME = dyn_cast<MemberExpr>(E.get()))
return !ME->hasExplicitTemplateArgs();
Dependent = true;
if (auto *DSDRE = dyn_cast<DependentScopeDeclRefExpr>(E.get()))
return !DSDRE->hasExplicitTemplateArgs();
if (auto *DSME = dyn_cast<CXXDependentScopeMemberExpr>(E.get()))
return !DSME->hasExplicitTemplateArgs();
// Any additional cases recognized here should also be handled by
// diagnoseExprIntendedAsTemplateName.
return false;
}
void diagnoseExprIntendedAsTemplateName(Scope *S, ExprResult TemplateName,
SourceLocation Less,
SourceLocation Greater);
Decl *ActOnDeclarator(Scope *S, Declarator &D);
NamedDecl *HandleDeclarator(Scope *S, Declarator &D,
MultiTemplateParamsArg TemplateParameterLists);
void RegisterLocallyScopedExternCDecl(NamedDecl *ND, Scope *S);
bool DiagnoseClassNameShadow(DeclContext *DC, DeclarationNameInfo Info);
bool diagnoseQualifiedDeclaration(CXXScopeSpec &SS, DeclContext *DC,
DeclarationName Name, SourceLocation Loc,
bool IsTemplateId);
void
diagnoseIgnoredQualifiers(unsigned DiagID, unsigned Quals,
SourceLocation FallbackLoc,
SourceLocation ConstQualLoc = SourceLocation(),
SourceLocation VolatileQualLoc = SourceLocation(),
SourceLocation RestrictQualLoc = SourceLocation(),
SourceLocation AtomicQualLoc = SourceLocation(),
SourceLocation UnalignedQualLoc = SourceLocation());
static bool adjustContextForLocalExternDecl(DeclContext *&DC);
void DiagnoseFunctionSpecifiers(const DeclSpec &DS);
NamedDecl *getShadowedDeclaration(const TypedefNameDecl *D,
const LookupResult &R);
NamedDecl *getShadowedDeclaration(const VarDecl *D, const LookupResult &R);
void CheckShadow(NamedDecl *D, NamedDecl *ShadowedDecl,
const LookupResult &R);
void CheckShadow(Scope *S, VarDecl *D);
/// Warn if 'E', which is an expression that is about to be modified, refers
/// to a shadowing declaration.
void CheckShadowingDeclModification(Expr *E, SourceLocation Loc);
void DiagnoseShadowingLambdaDecls(const sema::LambdaScopeInfo *LSI);
private:
/// Map of current shadowing declarations to shadowed declarations. Warn if
/// it looks like the user is trying to modify the shadowing declaration.
llvm::DenseMap<const NamedDecl *, const NamedDecl *> ShadowingDecls;
public:
void CheckCastAlign(Expr *Op, QualType T, SourceRange TRange);
void handleTagNumbering(const TagDecl *Tag, Scope *TagScope);
void setTagNameForLinkagePurposes(TagDecl *TagFromDeclSpec,
TypedefNameDecl *NewTD);
void CheckTypedefForVariablyModifiedType(Scope *S, TypedefNameDecl *D);
NamedDecl* ActOnTypedefDeclarator(Scope* S, Declarator& D, DeclContext* DC,
TypeSourceInfo *TInfo,
LookupResult &Previous);
NamedDecl* ActOnTypedefNameDecl(Scope* S, DeclContext* DC, TypedefNameDecl *D,
LookupResult &Previous, bool &Redeclaration);
NamedDecl *ActOnVariableDeclarator(Scope *S, Declarator &D, DeclContext *DC,
TypeSourceInfo *TInfo,
LookupResult &Previous,
MultiTemplateParamsArg TemplateParamLists,
bool &AddToScope,
ArrayRef<BindingDecl *> Bindings = None);
NamedDecl *
ActOnDecompositionDeclarator(Scope *S, Declarator &D,
MultiTemplateParamsArg TemplateParamLists);
// Returns true if the variable declaration is a redeclaration
bool CheckVariableDeclaration(VarDecl *NewVD, LookupResult &Previous);
void CheckVariableDeclarationType(VarDecl *NewVD);
bool DeduceVariableDeclarationType(VarDecl *VDecl, bool DirectInit,
Expr *Init);
void CheckCompleteVariableDeclaration(VarDecl *VD);
void CheckCompleteDecompositionDeclaration(DecompositionDecl *DD);
void MaybeSuggestAddingStaticToDecl(const FunctionDecl *D);
NamedDecl* ActOnFunctionDeclarator(Scope* S, Declarator& D, DeclContext* DC,
TypeSourceInfo *TInfo,
LookupResult &Previous,
MultiTemplateParamsArg TemplateParamLists,
bool &AddToScope);
bool AddOverriddenMethods(CXXRecordDecl *DC, CXXMethodDecl *MD);
bool CheckConstexprFunctionDecl(const FunctionDecl *FD);
bool CheckConstexprFunctionBody(const FunctionDecl *FD, Stmt *Body);
void DiagnoseHiddenVirtualMethods(CXXMethodDecl *MD);
void FindHiddenVirtualMethods(CXXMethodDecl *MD,
SmallVectorImpl<CXXMethodDecl*> &OverloadedMethods);
void NoteHiddenVirtualMethods(CXXMethodDecl *MD,
SmallVectorImpl<CXXMethodDecl*> &OverloadedMethods);
// Returns true if the function declaration is a redeclaration
bool CheckFunctionDeclaration(Scope *S,
FunctionDecl *NewFD, LookupResult &Previous,
bool IsMemberSpecialization);
bool shouldLinkDependentDeclWithPrevious(Decl *D, Decl *OldDecl);
bool canFullyTypeCheckRedeclaration(ValueDecl *NewD, ValueDecl *OldD,
QualType NewT, QualType OldT);
void CheckMain(FunctionDecl *FD, const DeclSpec &D);
void CheckMSVCRTEntryPoint(FunctionDecl *FD);
Attr *getImplicitCodeSegOrSectionAttrForFunction(const FunctionDecl *FD, bool IsDefinition);
Decl *ActOnParamDeclarator(Scope *S, Declarator &D);
ParmVarDecl *BuildParmVarDeclForTypedef(DeclContext *DC,
SourceLocation Loc,
QualType T);
ParmVarDecl *CheckParameter(DeclContext *DC, SourceLocation StartLoc,
SourceLocation NameLoc, IdentifierInfo *Name,
QualType T, TypeSourceInfo *TSInfo,
StorageClass SC);
void ActOnParamDefaultArgument(Decl *param,
SourceLocation EqualLoc,
Expr *defarg);
void ActOnParamUnparsedDefaultArgument(Decl *param,
SourceLocation EqualLoc,
SourceLocation ArgLoc);
void ActOnParamDefaultArgumentError(Decl *param, SourceLocation EqualLoc);
bool SetParamDefaultArgument(ParmVarDecl *Param, Expr *DefaultArg,
SourceLocation EqualLoc);
void AddInitializerToDecl(Decl *dcl, Expr *init, bool DirectInit);
void ActOnUninitializedDecl(Decl *dcl);
void ActOnInitializerError(Decl *Dcl);
void ActOnPureSpecifier(Decl *D, SourceLocation PureSpecLoc);
void ActOnCXXForRangeDecl(Decl *D);
StmtResult ActOnCXXForRangeIdentifier(Scope *S, SourceLocation IdentLoc,
IdentifierInfo *Ident,
ParsedAttributes &Attrs,
SourceLocation AttrEnd);
void SetDeclDeleted(Decl *dcl, SourceLocation DelLoc);
void SetDeclDefaulted(Decl *dcl, SourceLocation DefaultLoc);
void FinalizeDeclaration(Decl *D);
DeclGroupPtrTy FinalizeDeclaratorGroup(Scope *S, const DeclSpec &DS,
ArrayRef<Decl *> Group);
DeclGroupPtrTy BuildDeclaratorGroup(MutableArrayRef<Decl *> Group);
/// Should be called on all declarations that might have attached
/// documentation comments.
void ActOnDocumentableDecl(Decl *D);
void ActOnDocumentableDecls(ArrayRef<Decl *> Group);
void ActOnFinishKNRParamDeclarations(Scope *S, Declarator &D,
SourceLocation LocAfterDecls);
void CheckForFunctionRedefinition(
FunctionDecl *FD, const FunctionDecl *EffectiveDefinition = nullptr,
SkipBodyInfo *SkipBody = nullptr);
Decl *ActOnStartOfFunctionDef(Scope *S, Declarator &D,
MultiTemplateParamsArg TemplateParamLists,
SkipBodyInfo *SkipBody = nullptr);
Decl *ActOnStartOfFunctionDef(Scope *S, Decl *D,
SkipBodyInfo *SkipBody = nullptr);
void ActOnStartOfObjCMethodDef(Scope *S, Decl *D);
bool isObjCMethodDecl(Decl *D) {
return D && isa<ObjCMethodDecl>(D);
}
/// Determine whether we can delay parsing the body of a function or
/// function template until it is used, assuming we don't care about emitting
/// code for that function.
///
/// This will be \c false if we may need the body of the function in the
/// middle of parsing an expression (where it's impractical to switch to
/// parsing a different function), for instance, if it's constexpr in C++11
/// or has an 'auto' return type in C++14. These cases are essentially bugs.
bool canDelayFunctionBody(const Declarator &D);
/// Determine whether we can skip parsing the body of a function
/// definition, assuming we don't care about analyzing its body or emitting
/// code for that function.
///
/// This will be \c false only if we may need the body of the function in
/// order to parse the rest of the program (for instance, if it is
/// \c constexpr in C++11 or has an 'auto' return type in C++14).
bool canSkipFunctionBody(Decl *D);
void computeNRVO(Stmt *Body, sema::FunctionScopeInfo *Scope);
Decl *ActOnFinishFunctionBody(Decl *Decl, Stmt *Body);
Decl *ActOnFinishFunctionBody(Decl *Decl, Stmt *Body, bool IsInstantiation);
Decl *ActOnSkippedFunctionBody(Decl *Decl);
void ActOnFinishInlineFunctionDef(FunctionDecl *D);
/// ActOnFinishDelayedAttribute - Invoked when we have finished parsing an
/// attribute for which parsing is delayed.
void ActOnFinishDelayedAttribute(Scope *S, Decl *D, ParsedAttributes &Attrs);
/// Diagnose any unused parameters in the given sequence of
/// ParmVarDecl pointers.
void DiagnoseUnusedParameters(ArrayRef<ParmVarDecl *> Parameters);
/// Diagnose whether the size of parameters or return value of a
/// function or obj-c method definition is pass-by-value and larger than a
/// specified threshold.
void
DiagnoseSizeOfParametersAndReturnValue(ArrayRef<ParmVarDecl *> Parameters,
QualType ReturnTy, NamedDecl *D);
void DiagnoseInvalidJumps(Stmt *Body);
Decl *ActOnFileScopeAsmDecl(Expr *expr,
SourceLocation AsmLoc,
SourceLocation RParenLoc);
/// Handle a C++11 empty-declaration and attribute-declaration.
Decl *ActOnEmptyDeclaration(Scope *S, const ParsedAttributesView &AttrList,
SourceLocation SemiLoc);
enum class ModuleDeclKind {
Interface, ///< 'export module X;'
Implementation, ///< 'module X;'
Partition, ///< 'module partition X;'
};
/// The parser has processed a module-declaration that begins the definition
/// of a module interface or implementation.
DeclGroupPtrTy ActOnModuleDecl(SourceLocation StartLoc,
SourceLocation ModuleLoc, ModuleDeclKind MDK,
ModuleIdPath Path);
/// The parser has processed a module import declaration.
///
/// \param AtLoc The location of the '@' symbol, if any.
///
/// \param ImportLoc The location of the 'import' keyword.
///
/// \param Path The module access path.
DeclResult ActOnModuleImport(SourceLocation AtLoc, SourceLocation ImportLoc,
ModuleIdPath Path);
/// The parser has processed a module import translated from a
/// #include or similar preprocessing directive.
void ActOnModuleInclude(SourceLocation DirectiveLoc, Module *Mod);
void BuildModuleInclude(SourceLocation DirectiveLoc, Module *Mod);
/// The parsed has entered a submodule.
void ActOnModuleBegin(SourceLocation DirectiveLoc, Module *Mod);
/// The parser has left a submodule.
void ActOnModuleEnd(SourceLocation DirectiveLoc, Module *Mod);
/// Create an implicit import of the given module at the given
/// source location, for error recovery, if possible.
///
/// This routine is typically used when an entity found by name lookup
/// is actually hidden within a module that we know about but the user
/// has forgotten to import.
void createImplicitModuleImportForErrorRecovery(SourceLocation Loc,
Module *Mod);
/// Kinds of missing import. Note, the values of these enumerators correspond
/// to %select values in diagnostics.
enum class MissingImportKind {
Declaration,
Definition,
DefaultArgument,
ExplicitSpecialization,
PartialSpecialization
};
/// Diagnose that the specified declaration needs to be visible but
/// isn't, and suggest a module import that would resolve the problem.
void diagnoseMissingImport(SourceLocation Loc, NamedDecl *Decl,
MissingImportKind MIK, bool Recover = true);
void diagnoseMissingImport(SourceLocation Loc, NamedDecl *Decl,
SourceLocation DeclLoc, ArrayRef<Module *> Modules,
MissingImportKind MIK, bool Recover);
Decl *ActOnStartExportDecl(Scope *S, SourceLocation ExportLoc,
SourceLocation LBraceLoc);
Decl *ActOnFinishExportDecl(Scope *S, Decl *ExportDecl,
SourceLocation RBraceLoc);
/// We've found a use of a templated declaration that would trigger an
/// implicit instantiation. Check that any relevant explicit specializations
/// and partial specializations are visible, and diagnose if not.
void checkSpecializationVisibility(SourceLocation Loc, NamedDecl *Spec);
/// We've found a use of a template specialization that would select a
/// partial specialization. Check that the partial specialization is visible,
/// and diagnose if not.
void checkPartialSpecializationVisibility(SourceLocation Loc,
NamedDecl *Spec);
/// Retrieve a suitable printing policy for diagnostics.
PrintingPolicy getPrintingPolicy() const {
return getPrintingPolicy(Context, PP);
}
/// Retrieve a suitable printing policy for diagnostics.
static PrintingPolicy getPrintingPolicy(const ASTContext &Ctx,
const Preprocessor &PP);
/// Scope actions.
void ActOnPopScope(SourceLocation Loc, Scope *S);
void ActOnTranslationUnitScope(Scope *S);
Decl *ParsedFreeStandingDeclSpec(Scope *S, AccessSpecifier AS, DeclSpec &DS,
RecordDecl *&AnonRecord);
Decl *ParsedFreeStandingDeclSpec(Scope *S, AccessSpecifier AS, DeclSpec &DS,
MultiTemplateParamsArg TemplateParams,
bool IsExplicitInstantiation,
RecordDecl *&AnonRecord);
Decl *BuildAnonymousStructOrUnion(Scope *S, DeclSpec &DS,
AccessSpecifier AS,
RecordDecl *Record,
const PrintingPolicy &Policy);
Decl *BuildMicrosoftCAnonymousStruct(Scope *S, DeclSpec &DS,
RecordDecl *Record);
/// Common ways to introduce type names without a tag for use in diagnostics.
/// Keep in sync with err_tag_reference_non_tag.
enum NonTagKind {
NTK_NonStruct,
NTK_NonClass,
NTK_NonUnion,
NTK_NonEnum,
NTK_Typedef,
NTK_TypeAlias,
NTK_Template,
NTK_TypeAliasTemplate,
NTK_TemplateTemplateArgument,
};
/// Given a non-tag type declaration, returns an enum useful for indicating
/// what kind of non-tag type this is.
NonTagKind getNonTagTypeDeclKind(const Decl *D, TagTypeKind TTK);
bool isAcceptableTagRedeclaration(const TagDecl *Previous,
TagTypeKind NewTag, bool isDefinition,
SourceLocation NewTagLoc,
const IdentifierInfo *Name);
enum TagUseKind {
TUK_Reference, // Reference to a tag: 'struct foo *X;'
TUK_Declaration, // Fwd decl of a tag: 'struct foo;'
TUK_Definition, // Definition of a tag: 'struct foo { int X; } Y;'
TUK_Friend // Friend declaration: 'friend struct foo;'
};
Decl *ActOnTag(Scope *S, unsigned TagSpec, TagUseKind TUK,
SourceLocation KWLoc, CXXScopeSpec &SS, IdentifierInfo *Name,
SourceLocation NameLoc, const ParsedAttributesView &Attr,
AccessSpecifier AS, SourceLocation ModulePrivateLoc,
MultiTemplateParamsArg TemplateParameterLists, bool &OwnedDecl,
bool &IsDependent, SourceLocation ScopedEnumKWLoc,
bool ScopedEnumUsesClassTag, TypeResult UnderlyingType,
bool IsTypeSpecifier, bool IsTemplateParamOrArg,
SkipBodyInfo *SkipBody = nullptr);
Decl *ActOnTemplatedFriendTag(Scope *S, SourceLocation FriendLoc,
unsigned TagSpec, SourceLocation TagLoc,
CXXScopeSpec &SS, IdentifierInfo *Name,
SourceLocation NameLoc,
const ParsedAttributesView &Attr,
MultiTemplateParamsArg TempParamLists);
TypeResult ActOnDependentTag(Scope *S,
unsigned TagSpec,
TagUseKind TUK,
const CXXScopeSpec &SS,
IdentifierInfo *Name,
SourceLocation TagLoc,
SourceLocation NameLoc);
void ActOnDefs(Scope *S, Decl *TagD, SourceLocation DeclStart,
IdentifierInfo *ClassName,
SmallVectorImpl<Decl *> &Decls);
Decl *ActOnField(Scope *S, Decl *TagD, SourceLocation DeclStart,
Declarator &D, Expr *BitfieldWidth);
FieldDecl *HandleField(Scope *S, RecordDecl *TagD, SourceLocation DeclStart,
Declarator &D, Expr *BitfieldWidth,
InClassInitStyle InitStyle,
AccessSpecifier AS);
MSPropertyDecl *HandleMSProperty(Scope *S, RecordDecl *TagD,
SourceLocation DeclStart, Declarator &D,
Expr *BitfieldWidth,
InClassInitStyle InitStyle,
AccessSpecifier AS,
const ParsedAttr &MSPropertyAttr);
FieldDecl *CheckFieldDecl(DeclarationName Name, QualType T,
TypeSourceInfo *TInfo,
RecordDecl *Record, SourceLocation Loc,
bool Mutable, Expr *BitfieldWidth,
InClassInitStyle InitStyle,
SourceLocation TSSL,
AccessSpecifier AS, NamedDecl *PrevDecl,
Declarator *D = nullptr);
bool CheckNontrivialField(FieldDecl *FD);
void DiagnoseNontrivial(const CXXRecordDecl *Record, CXXSpecialMember CSM);
enum TrivialABIHandling {
/// The triviality of a method unaffected by "trivial_abi".
TAH_IgnoreTrivialABI,
/// The triviality of a method affected by "trivial_abi".
TAH_ConsiderTrivialABI
};
bool SpecialMemberIsTrivial(CXXMethodDecl *MD, CXXSpecialMember CSM,
TrivialABIHandling TAH = TAH_IgnoreTrivialABI,
bool Diagnose = false);
CXXSpecialMember getSpecialMember(const CXXMethodDecl *MD);
void ActOnLastBitfield(SourceLocation DeclStart,
SmallVectorImpl<Decl *> &AllIvarDecls);
Decl *ActOnIvar(Scope *S, SourceLocation DeclStart,
Declarator &D, Expr *BitfieldWidth,
tok::ObjCKeywordKind visibility);
// This is used for both record definitions and ObjC interface declarations.
void ActOnFields(Scope *S, SourceLocation RecLoc, Decl *TagDecl,
ArrayRef<Decl *> Fields, SourceLocation LBrac,
SourceLocation RBrac, const ParsedAttributesView &AttrList);
/// ActOnTagStartDefinition - Invoked when we have entered the
/// scope of a tag's definition (e.g., for an enumeration, class,
/// struct, or union).
void ActOnTagStartDefinition(Scope *S, Decl *TagDecl);
/// Perform ODR-like check for C/ObjC when merging tag types from modules.
/// Differently from C++, actually parse the body and reject / error out
/// in case of a structural mismatch.
bool ActOnDuplicateDefinition(DeclSpec &DS, Decl *Prev,
SkipBodyInfo &SkipBody);
typedef void *SkippedDefinitionContext;
/// Invoked when we enter a tag definition that we're skipping.
SkippedDefinitionContext ActOnTagStartSkippedDefinition(Scope *S, Decl *TD);
Decl *ActOnObjCContainerStartDefinition(Decl *IDecl);
/// ActOnStartCXXMemberDeclarations - Invoked when we have parsed a
/// C++ record definition's base-specifiers clause and are starting its
/// member declarations.
void ActOnStartCXXMemberDeclarations(Scope *S, Decl *TagDecl,
SourceLocation FinalLoc,
bool IsFinalSpelledSealed,
SourceLocation LBraceLoc);
/// ActOnTagFinishDefinition - Invoked once we have finished parsing
/// the definition of a tag (enumeration, class, struct, or union).
void ActOnTagFinishDefinition(Scope *S, Decl *TagDecl,
SourceRange BraceRange);
void ActOnTagFinishSkippedDefinition(SkippedDefinitionContext Context);
void ActOnObjCContainerFinishDefinition();
/// Invoked when we must temporarily exit the objective-c container
/// scope for parsing/looking-up C constructs.
///
/// Must be followed by a call to \see ActOnObjCReenterContainerContext
void ActOnObjCTemporaryExitContainerContext(DeclContext *DC);
void ActOnObjCReenterContainerContext(DeclContext *DC);
/// ActOnTagDefinitionError - Invoked when there was an unrecoverable
/// error parsing the definition of a tag.
void ActOnTagDefinitionError(Scope *S, Decl *TagDecl);
EnumConstantDecl *CheckEnumConstant(EnumDecl *Enum,
EnumConstantDecl *LastEnumConst,
SourceLocation IdLoc,
IdentifierInfo *Id,
Expr *val);
bool CheckEnumUnderlyingType(TypeSourceInfo *TI);
bool CheckEnumRedeclaration(SourceLocation EnumLoc, bool IsScoped,
QualType EnumUnderlyingTy, bool IsFixed,
const EnumDecl *Prev);
/// Determine whether the body of an anonymous enumeration should be skipped.
/// \param II The name of the first enumerator.
SkipBodyInfo shouldSkipAnonEnumBody(Scope *S, IdentifierInfo *II,
SourceLocation IILoc);
Decl *ActOnEnumConstant(Scope *S, Decl *EnumDecl, Decl *LastEnumConstant,
SourceLocation IdLoc, IdentifierInfo *Id,
const ParsedAttributesView &Attrs,
SourceLocation EqualLoc, Expr *Val);
void ActOnEnumBody(SourceLocation EnumLoc, SourceRange BraceRange,
Decl *EnumDecl, ArrayRef<Decl *> Elements, Scope *S,
const ParsedAttributesView &Attr);
DeclContext *getContainingDC(DeclContext *DC);
/// Set the current declaration context until it gets popped.
void PushDeclContext(Scope *S, DeclContext *DC);
void PopDeclContext();
/// EnterDeclaratorContext - Used when we must lookup names in the context
/// of a declarator's nested name specifier.
void EnterDeclaratorContext(Scope *S, DeclContext *DC);
void ExitDeclaratorContext(Scope *S);
/// Push the parameters of D, which must be a function, into scope.
void ActOnReenterFunctionContext(Scope* S, Decl* D);
void ActOnExitFunctionContext();
DeclContext *getFunctionLevelDeclContext();
/// getCurFunctionDecl - If inside of a function body, this returns a pointer
/// to the function decl for the function being parsed. If we're currently
/// in a 'block', this returns the containing context.
FunctionDecl *getCurFunctionDecl();
/// getCurMethodDecl - If inside of a method body, this returns a pointer to
/// the method decl for the method being parsed. If we're currently
/// in a 'block', this returns the containing context.
ObjCMethodDecl *getCurMethodDecl();
/// getCurFunctionOrMethodDecl - Return the Decl for the current ObjC method
/// or C function we're in, otherwise return null. If we're currently
/// in a 'block', this returns the containing context.
NamedDecl *getCurFunctionOrMethodDecl();
/// Add this decl to the scope shadowed decl chains.
void PushOnScopeChains(NamedDecl *D, Scope *S, bool AddToContext = true);
/// Make the given externally-produced declaration visible at the
/// top level scope.
///
/// \param D The externally-produced declaration to push.
///
/// \param Name The name of the externally-produced declaration.
void pushExternalDeclIntoScope(NamedDecl *D, DeclarationName Name);
/// isDeclInScope - If 'Ctx' is a function/method, isDeclInScope returns true
/// if 'D' is in Scope 'S', otherwise 'S' is ignored and isDeclInScope returns
/// true if 'D' belongs to the given declaration context.
///
/// \param AllowInlineNamespace If \c true, allow the declaration to be in the
/// enclosing namespace set of the context, rather than contained
/// directly within it.
bool isDeclInScope(NamedDecl *D, DeclContext *Ctx, Scope *S = nullptr,
bool AllowInlineNamespace = false);
/// Finds the scope corresponding to the given decl context, if it
/// happens to be an enclosing scope. Otherwise return NULL.
static Scope *getScopeForDeclContext(Scope *S, DeclContext *DC);
/// Subroutines of ActOnDeclarator().
TypedefDecl *ParseTypedefDecl(Scope *S, Declarator &D, QualType T,
TypeSourceInfo *TInfo);
bool isIncompatibleTypedef(TypeDecl *Old, TypedefNameDecl *New);
/// Describes the kind of merge to perform for availability
/// attributes (including "deprecated", "unavailable", and "availability").
enum AvailabilityMergeKind {
/// Don't merge availability attributes at all.
AMK_None,
/// Merge availability attributes for a redeclaration, which requires
/// an exact match.
AMK_Redeclaration,
/// Merge availability attributes for an override, which requires
/// an exact match or a weakening of constraints.
AMK_Override,
/// Merge availability attributes for an implementation of
/// a protocol requirement.
AMK_ProtocolImplementation,
};
/// Attribute merging methods. Return true if a new attribute was added.
AvailabilityAttr *mergeAvailabilityAttr(NamedDecl *D, SourceRange Range,
IdentifierInfo *Platform,
bool Implicit,
VersionTuple Introduced,
VersionTuple Deprecated,
VersionTuple Obsoleted,
bool IsUnavailable,
StringRef Message,
bool IsStrict, StringRef Replacement,
AvailabilityMergeKind AMK,
unsigned AttrSpellingListIndex);
TypeVisibilityAttr *mergeTypeVisibilityAttr(Decl *D, SourceRange Range,
TypeVisibilityAttr::VisibilityType Vis,
unsigned AttrSpellingListIndex);
VisibilityAttr *mergeVisibilityAttr(Decl *D, SourceRange Range,
VisibilityAttr::VisibilityType Vis,
unsigned AttrSpellingListIndex);
UuidAttr *mergeUuidAttr(Decl *D, SourceRange Range,
unsigned AttrSpellingListIndex, StringRef Uuid);
DLLImportAttr *mergeDLLImportAttr(Decl *D, SourceRange Range,
unsigned AttrSpellingListIndex);
DLLExportAttr *mergeDLLExportAttr(Decl *D, SourceRange Range,
unsigned AttrSpellingListIndex);
MSInheritanceAttr *
mergeMSInheritanceAttr(Decl *D, SourceRange Range, bool BestCase,
unsigned AttrSpellingListIndex,
MSInheritanceAttr::Spelling SemanticSpelling);
FormatAttr *mergeFormatAttr(Decl *D, SourceRange Range,
IdentifierInfo *Format, int FormatIdx,
int FirstArg, unsigned AttrSpellingListIndex);
SectionAttr *mergeSectionAttr(Decl *D, SourceRange Range, StringRef Name,
unsigned AttrSpellingListIndex);
CodeSegAttr *mergeCodeSegAttr(Decl *D, SourceRange Range, StringRef Name,
unsigned AttrSpellingListIndex);
AlwaysInlineAttr *mergeAlwaysInlineAttr(Decl *D, SourceRange Range,
IdentifierInfo *Ident,
unsigned AttrSpellingListIndex);
MinSizeAttr *mergeMinSizeAttr(Decl *D, SourceRange Range,
unsigned AttrSpellingListIndex);
OptimizeNoneAttr *mergeOptimizeNoneAttr(Decl *D, SourceRange Range,
unsigned AttrSpellingListIndex);
InternalLinkageAttr *mergeInternalLinkageAttr(Decl *D, const ParsedAttr &AL);
InternalLinkageAttr *mergeInternalLinkageAttr(Decl *D,
const InternalLinkageAttr &AL);
CommonAttr *mergeCommonAttr(Decl *D, const ParsedAttr &AL);
CommonAttr *mergeCommonAttr(Decl *D, const CommonAttr &AL);
void mergeDeclAttributes(NamedDecl *New, Decl *Old,
AvailabilityMergeKind AMK = AMK_Redeclaration);
void MergeTypedefNameDecl(Scope *S, TypedefNameDecl *New,
LookupResult &OldDecls);
bool MergeFunctionDecl(FunctionDecl *New, NamedDecl *&Old, Scope *S,
bool MergeTypeWithOld);
bool MergeCompatibleFunctionDecls(FunctionDecl *New, FunctionDecl *Old,
Scope *S, bool MergeTypeWithOld);
void mergeObjCMethodDecls(ObjCMethodDecl *New, ObjCMethodDecl *Old);
void MergeVarDecl(VarDecl *New, LookupResult &Previous);
void MergeVarDeclTypes(VarDecl *New, VarDecl *Old, bool MergeTypeWithOld);
void MergeVarDeclExceptionSpecs(VarDecl *New, VarDecl *Old);
bool checkVarDeclRedefinition(VarDecl *OldDefn, VarDecl *NewDefn);
void notePreviousDefinition(const NamedDecl *Old, SourceLocation New);
bool MergeCXXFunctionDecl(FunctionDecl *New, FunctionDecl *Old, Scope *S);
// AssignmentAction - This is used by all the assignment diagnostic functions
// to represent what is actually causing the operation
enum AssignmentAction {
AA_Assigning,
AA_Passing,
AA_Returning,
AA_Converting,
AA_Initializing,
AA_Sending,
AA_Casting,
AA_Passing_CFAudited
};
/// C++ Overloading.
enum OverloadKind {
/// This is a legitimate overload: the existing declarations are
/// functions or function templates with different signatures.
Ovl_Overload,
/// This is not an overload because the signature exactly matches
/// an existing declaration.
Ovl_Match,
/// This is not an overload because the lookup results contain a
/// non-function.
Ovl_NonFunction
};
OverloadKind CheckOverload(Scope *S,
FunctionDecl *New,
const LookupResult &OldDecls,
NamedDecl *&OldDecl,
bool IsForUsingDecl);
bool IsOverload(FunctionDecl *New, FunctionDecl *Old, bool IsForUsingDecl,
bool ConsiderCudaAttrs = true);
/// Checks availability of the function depending on the current
/// function context.Inside an unavailable function,unavailability is ignored.
///
/// \returns true if \p FD is unavailable and current context is inside
/// an available function, false otherwise.
bool isFunctionConsideredUnavailable(FunctionDecl *FD);
ImplicitConversionSequence
TryImplicitConversion(Expr *From, QualType ToType,
bool SuppressUserConversions,
bool AllowExplicit,
bool InOverloadResolution,
bool CStyle,
bool AllowObjCWritebackConversion);
bool IsIntegralPromotion(Expr *From, QualType FromType, QualType ToType);
bool IsFloatingPointPromotion(QualType FromType, QualType ToType);
bool IsComplexPromotion(QualType FromType, QualType ToType);
bool IsPointerConversion(Expr *From, QualType FromType, QualType ToType,
bool InOverloadResolution,
QualType& ConvertedType, bool &IncompatibleObjC);
bool isObjCPointerConversion(QualType FromType, QualType ToType,
QualType& ConvertedType, bool &IncompatibleObjC);
bool isObjCWritebackConversion(QualType FromType, QualType ToType,
QualType &ConvertedType);
bool IsBlockPointerConversion(QualType FromType, QualType ToType,
QualType& ConvertedType);
bool FunctionParamTypesAreEqual(const FunctionProtoType *OldType,
const FunctionProtoType *NewType,
unsigned *ArgPos = nullptr);
void HandleFunctionTypeMismatch(PartialDiagnostic &PDiag,
QualType FromType, QualType ToType);
void maybeExtendBlockObject(ExprResult &E);
CastKind PrepareCastToObjCObjectPointer(ExprResult &E);
bool CheckPointerConversion(Expr *From, QualType ToType,
CastKind &Kind,
CXXCastPath& BasePath,
bool IgnoreBaseAccess,
bool Diagnose = true);
bool IsMemberPointerConversion(Expr *From, QualType FromType, QualType ToType,
bool InOverloadResolution,
QualType &ConvertedType);
bool CheckMemberPointerConversion(Expr *From, QualType ToType,
CastKind &Kind,
CXXCastPath &BasePath,
bool IgnoreBaseAccess);
bool IsQualificationConversion(QualType FromType, QualType ToType,
bool CStyle, bool &ObjCLifetimeConversion,
bool &AddressSpaceConversion);
bool IsFunctionConversion(QualType FromType, QualType ToType,
QualType &ResultTy);
bool DiagnoseMultipleUserDefinedConversion(Expr *From, QualType ToType);
bool isSameOrCompatibleFunctionType(CanQualType Param, CanQualType Arg);
ExprResult PerformMoveOrCopyInitialization(const InitializedEntity &Entity,
const VarDecl *NRVOCandidate,
QualType ResultType,
Expr *Value,
bool AllowNRVO = true);
bool CanPerformCopyInitialization(const InitializedEntity &Entity,
ExprResult Init);
ExprResult PerformCopyInitialization(const InitializedEntity &Entity,
SourceLocation EqualLoc,
ExprResult Init,
bool TopLevelOfInitList = false,
bool AllowExplicit = false);
ExprResult PerformObjectArgumentInitialization(Expr *From,
NestedNameSpecifier *Qualifier,
NamedDecl *FoundDecl,
CXXMethodDecl *Method);
/// Check that the lifetime of the initializer (and its subobjects) is
/// sufficient for initializing the entity, and perform lifetime extension
/// (when permitted) if not.
void checkInitializerLifetime(const InitializedEntity &Entity, Expr *Init);
ExprResult PerformContextuallyConvertToBool(Expr *From);
ExprResult PerformContextuallyConvertToObjCPointer(Expr *From);
/// Contexts in which a converted constant expression is required.
enum CCEKind {
CCEK_CaseValue, ///< Expression in a case label.
CCEK_Enumerator, ///< Enumerator value with fixed underlying type.
CCEK_TemplateArg, ///< Value of a non-type template parameter.
CCEK_NewExpr, ///< Constant expression in a noptr-new-declarator.
CCEK_ConstexprIf ///< Condition in a constexpr if statement.
};
ExprResult CheckConvertedConstantExpression(Expr *From, QualType T,
llvm::APSInt &Value, CCEKind CCE);
ExprResult CheckConvertedConstantExpression(Expr *From, QualType T,
APValue &Value, CCEKind CCE);
/// Abstract base class used to perform a contextual implicit
/// conversion from an expression to any type passing a filter.
class ContextualImplicitConverter {
public:
bool Suppress;
bool SuppressConversion;
ContextualImplicitConverter(bool Suppress = false,
bool SuppressConversion = false)
: Suppress(Suppress), SuppressConversion(SuppressConversion) {}
/// Determine whether the specified type is a valid destination type
/// for this conversion.
virtual bool match(QualType T) = 0;
/// Emits a diagnostic complaining that the expression does not have
/// integral or enumeration type.
virtual SemaDiagnosticBuilder
diagnoseNoMatch(Sema &S, SourceLocation Loc, QualType T) = 0;
/// Emits a diagnostic when the expression has incomplete class type.
virtual SemaDiagnosticBuilder
diagnoseIncomplete(Sema &S, SourceLocation Loc, QualType T) = 0;
/// Emits a diagnostic when the only matching conversion function
/// is explicit.
virtual SemaDiagnosticBuilder diagnoseExplicitConv(
Sema &S, SourceLocation Loc, QualType T, QualType ConvTy) = 0;
/// Emits a note for the explicit conversion function.
virtual SemaDiagnosticBuilder
noteExplicitConv(Sema &S, CXXConversionDecl *Conv, QualType ConvTy) = 0;
/// Emits a diagnostic when there are multiple possible conversion
/// functions.
virtual SemaDiagnosticBuilder
diagnoseAmbiguous(Sema &S, SourceLocation Loc, QualType T) = 0;
/// Emits a note for one of the candidate conversions.
virtual SemaDiagnosticBuilder
noteAmbiguous(Sema &S, CXXConversionDecl *Conv, QualType ConvTy) = 0;
/// Emits a diagnostic when we picked a conversion function
/// (for cases when we are not allowed to pick a conversion function).
virtual SemaDiagnosticBuilder diagnoseConversion(
Sema &S, SourceLocation Loc, QualType T, QualType ConvTy) = 0;
virtual ~ContextualImplicitConverter() {}
};
class ICEConvertDiagnoser : public ContextualImplicitConverter {
bool AllowScopedEnumerations;
public:
ICEConvertDiagnoser(bool AllowScopedEnumerations,
bool Suppress, bool SuppressConversion)
: ContextualImplicitConverter(Suppress, SuppressConversion),
AllowScopedEnumerations(AllowScopedEnumerations) {}
/// Match an integral or (possibly scoped) enumeration type.
bool match(QualType T) override;
SemaDiagnosticBuilder
diagnoseNoMatch(Sema &S, SourceLocation Loc, QualType T) override {
return diagnoseNotInt(S, Loc, T);
}
/// Emits a diagnostic complaining that the expression does not have
/// integral or enumeration type.
virtual SemaDiagnosticBuilder
diagnoseNotInt(Sema &S, SourceLocation Loc, QualType T) = 0;
};
/// Perform a contextual implicit conversion.
ExprResult PerformContextualImplicitConversion(
SourceLocation Loc, Expr *FromE, ContextualImplicitConverter &Converter);
enum ObjCSubscriptKind {
OS_Array,
OS_Dictionary,
OS_Error
};
ObjCSubscriptKind CheckSubscriptingKind(Expr *FromE);
// Note that LK_String is intentionally after the other literals, as
// this is used for diagnostics logic.
enum ObjCLiteralKind {
LK_Array,
LK_Dictionary,
LK_Numeric,
LK_Boxed,
LK_String,
LK_Block,
LK_None
};
ObjCLiteralKind CheckLiteralKind(Expr *FromE);
ExprResult PerformObjectMemberConversion(Expr *From,
NestedNameSpecifier *Qualifier,
NamedDecl *FoundDecl,
NamedDecl *Member);
// Members have to be NamespaceDecl* or TranslationUnitDecl*.
// TODO: make this is a typesafe union.
typedef llvm::SmallSetVector<DeclContext *, 16> AssociatedNamespaceSet;
typedef llvm::SmallSetVector<CXXRecordDecl *, 16> AssociatedClassSet;
void AddOverloadCandidate(FunctionDecl *Function,
DeclAccessPair FoundDecl,
ArrayRef<Expr *> Args,
OverloadCandidateSet &CandidateSet,
bool SuppressUserConversions = false,
bool PartialOverloading = false,
bool AllowExplicit = false,
ConversionSequenceList EarlyConversions = None);
void AddFunctionCandidates(const UnresolvedSetImpl &Functions,
ArrayRef<Expr *> Args,
OverloadCandidateSet &CandidateSet,
TemplateArgumentListInfo *ExplicitTemplateArgs = nullptr,
bool SuppressUserConversions = false,
bool PartialOverloading = false,
bool FirstArgumentIsBase = false);
void AddMethodCandidate(DeclAccessPair FoundDecl,
QualType ObjectType,
Expr::Classification ObjectClassification,
ArrayRef<Expr *> Args,
OverloadCandidateSet& CandidateSet,
bool SuppressUserConversion = false);
void AddMethodCandidate(CXXMethodDecl *Method,
DeclAccessPair FoundDecl,
CXXRecordDecl *ActingContext, QualType ObjectType,
Expr::Classification ObjectClassification,
ArrayRef<Expr *> Args,
OverloadCandidateSet& CandidateSet,
bool SuppressUserConversions = false,
bool PartialOverloading = false,
ConversionSequenceList EarlyConversions = None);
void AddMethodTemplateCandidate(FunctionTemplateDecl *MethodTmpl,
DeclAccessPair FoundDecl,
CXXRecordDecl *ActingContext,
TemplateArgumentListInfo *ExplicitTemplateArgs,
QualType ObjectType,
Expr::Classification ObjectClassification,
ArrayRef<Expr *> Args,
OverloadCandidateSet& CandidateSet,
bool SuppressUserConversions = false,
bool PartialOverloading = false);
void AddTemplateOverloadCandidate(FunctionTemplateDecl *FunctionTemplate,
DeclAccessPair FoundDecl,
TemplateArgumentListInfo *ExplicitTemplateArgs,
ArrayRef<Expr *> Args,
OverloadCandidateSet& CandidateSet,
bool SuppressUserConversions = false,
bool PartialOverloading = false);
bool CheckNonDependentConversions(FunctionTemplateDecl *FunctionTemplate,
ArrayRef<QualType> ParamTypes,
ArrayRef<Expr *> Args,
OverloadCandidateSet &CandidateSet,
ConversionSequenceList &Conversions,
bool SuppressUserConversions,
CXXRecordDecl *ActingContext = nullptr,
QualType ObjectType = QualType(),
Expr::Classification
ObjectClassification = {});
void AddConversionCandidate(CXXConversionDecl *Conversion,
DeclAccessPair FoundDecl,
CXXRecordDecl *ActingContext,
Expr *From, QualType ToType,
OverloadCandidateSet& CandidateSet,
bool AllowObjCConversionOnExplicit,
bool AllowResultConversion = true);
void AddTemplateConversionCandidate(FunctionTemplateDecl *FunctionTemplate,
DeclAccessPair FoundDecl,
CXXRecordDecl *ActingContext,
Expr *From, QualType ToType,
OverloadCandidateSet &CandidateSet,
bool AllowObjCConversionOnExplicit,
bool AllowResultConversion = true);
void AddSurrogateCandidate(CXXConversionDecl *Conversion,
DeclAccessPair FoundDecl,
CXXRecordDecl *ActingContext,
const FunctionProtoType *Proto,
Expr *Object, ArrayRef<Expr *> Args,
OverloadCandidateSet& CandidateSet);
void AddMemberOperatorCandidates(OverloadedOperatorKind Op,
SourceLocation OpLoc, ArrayRef<Expr *> Args,
OverloadCandidateSet& CandidateSet,
SourceRange OpRange = SourceRange());
void AddBuiltinCandidate(QualType *ParamTys, ArrayRef<Expr *> Args,
OverloadCandidateSet& CandidateSet,
bool IsAssignmentOperator = false,
unsigned NumContextualBoolArguments = 0);
void AddBuiltinOperatorCandidates(OverloadedOperatorKind Op,
SourceLocation OpLoc, ArrayRef<Expr *> Args,
OverloadCandidateSet& CandidateSet);
void AddArgumentDependentLookupCandidates(DeclarationName Name,
SourceLocation Loc,
ArrayRef<Expr *> Args,
TemplateArgumentListInfo *ExplicitTemplateArgs,
OverloadCandidateSet& CandidateSet,
bool PartialOverloading = false);
// Emit as a 'note' the specific overload candidate
void NoteOverloadCandidate(NamedDecl *Found, FunctionDecl *Fn,
QualType DestType = QualType(),
bool TakingAddress = false);
// Emit as a series of 'note's all template and non-templates identified by
// the expression Expr
void NoteAllOverloadCandidates(Expr *E, QualType DestType = QualType(),
bool TakingAddress = false);
/// Check the enable_if expressions on the given function. Returns the first
/// failing attribute, or NULL if they were all successful.
EnableIfAttr *CheckEnableIf(FunctionDecl *Function, ArrayRef<Expr *> Args,
bool MissingImplicitThis = false);
/// Find the failed Boolean condition within a given Boolean
/// constant expression, and describe it with a string.
///
/// \param AllowTopLevelCond Whether to allow the result to be the
/// complete top-level condition.
std::pair<Expr *, std::string>
findFailedBooleanCondition(Expr *Cond, bool AllowTopLevelCond);
/// Emit diagnostics for the diagnose_if attributes on Function, ignoring any
/// non-ArgDependent DiagnoseIfAttrs.
///
/// Argument-dependent diagnose_if attributes should be checked each time a
/// function is used as a direct callee of a function call.
///
/// Returns true if any errors were emitted.
bool diagnoseArgDependentDiagnoseIfAttrs(const FunctionDecl *Function,
const Expr *ThisArg,
ArrayRef<const Expr *> Args,
SourceLocation Loc);
/// Emit diagnostics for the diagnose_if attributes on Function, ignoring any
/// ArgDependent DiagnoseIfAttrs.
///
/// Argument-independent diagnose_if attributes should be checked on every use
/// of a function.
///
/// Returns true if any errors were emitted.
bool diagnoseArgIndependentDiagnoseIfAttrs(const NamedDecl *ND,
SourceLocation Loc);
/// Returns whether the given function's address can be taken or not,
/// optionally emitting a diagnostic if the address can't be taken.
///
/// Returns false if taking the address of the function is illegal.
bool checkAddressOfFunctionIsAvailable(const FunctionDecl *Function,
bool Complain = false,
SourceLocation Loc = SourceLocation());
// [PossiblyAFunctionType] --> [Return]
// NonFunctionType --> NonFunctionType
// R (A) --> R(A)
// R (*)(A) --> R (A)
// R (&)(A) --> R (A)
// R (S::*)(A) --> R (A)
QualType ExtractUnqualifiedFunctionType(QualType PossiblyAFunctionType);
FunctionDecl *
ResolveAddressOfOverloadedFunction(Expr *AddressOfExpr,
QualType TargetType,
bool Complain,
DeclAccessPair &Found,
bool *pHadMultipleCandidates = nullptr);
FunctionDecl *
resolveAddressOfOnlyViableOverloadCandidate(Expr *E,
DeclAccessPair &FoundResult);
bool resolveAndFixAddressOfOnlyViableOverloadCandidate(
ExprResult &SrcExpr, bool DoFunctionPointerConversion = false);
FunctionDecl *
ResolveSingleFunctionTemplateSpecialization(OverloadExpr *ovl,
bool Complain = false,
DeclAccessPair *Found = nullptr);
bool ResolveAndFixSingleFunctionTemplateSpecialization(
ExprResult &SrcExpr,
bool DoFunctionPointerConverion = false,
bool Complain = false,
SourceRange OpRangeForComplaining = SourceRange(),
QualType DestTypeForComplaining = QualType(),
unsigned DiagIDForComplaining = 0);
Expr *FixOverloadedFunctionReference(Expr *E,
DeclAccessPair FoundDecl,
FunctionDecl *Fn);
ExprResult FixOverloadedFunctionReference(ExprResult,
DeclAccessPair FoundDecl,
FunctionDecl *Fn);
void AddOverloadedCallCandidates(UnresolvedLookupExpr *ULE,
ArrayRef<Expr *> Args,
OverloadCandidateSet &CandidateSet,
bool PartialOverloading = false);
// An enum used to represent the different possible results of building a
// range-based for loop.
enum ForRangeStatus {
FRS_Success,
FRS_NoViableFunction,
FRS_DiagnosticIssued
};
ForRangeStatus BuildForRangeBeginEndCall(SourceLocation Loc,
SourceLocation RangeLoc,
const DeclarationNameInfo &NameInfo,
LookupResult &MemberLookup,
OverloadCandidateSet *CandidateSet,
Expr *Range, ExprResult *CallExpr);
ExprResult BuildOverloadedCallExpr(Scope *S, Expr *Fn,
UnresolvedLookupExpr *ULE,
SourceLocation LParenLoc,
MultiExprArg Args,
SourceLocation RParenLoc,
Expr *ExecConfig,
bool AllowTypoCorrection=true,
bool CalleesAddressIsTaken=false);
bool buildOverloadedCallSet(Scope *S, Expr *Fn, UnresolvedLookupExpr *ULE,
MultiExprArg Args, SourceLocation RParenLoc,
OverloadCandidateSet *CandidateSet,
ExprResult *Result);
ExprResult CreateOverloadedUnaryOp(SourceLocation OpLoc,
UnaryOperatorKind Opc,
const UnresolvedSetImpl &Fns,
Expr *input, bool RequiresADL = true);
ExprResult CreateOverloadedBinOp(SourceLocation OpLoc,
BinaryOperatorKind Opc,
const UnresolvedSetImpl &Fns,
Expr *LHS, Expr *RHS,
bool RequiresADL = true);
ExprResult CreateOverloadedArraySubscriptExpr(SourceLocation LLoc,
SourceLocation RLoc,
Expr *Base,Expr *Idx);
ExprResult
BuildCallToMemberFunction(Scope *S, Expr *MemExpr,
SourceLocation LParenLoc,
MultiExprArg Args,
SourceLocation RParenLoc);
ExprResult
BuildCallToObjectOfClassType(Scope *S, Expr *Object, SourceLocation LParenLoc,
MultiExprArg Args,
SourceLocation RParenLoc);
ExprResult BuildOverloadedArrowExpr(Scope *S, Expr *Base,
SourceLocation OpLoc,
bool *NoArrowOperatorFound = nullptr);
/// CheckCallReturnType - Checks that a call expression's return type is
/// complete. Returns true on failure. The location passed in is the location
/// that best represents the call.
bool CheckCallReturnType(QualType ReturnType, SourceLocation Loc,
CallExpr *CE, FunctionDecl *FD);
/// Helpers for dealing with blocks and functions.
bool CheckParmsForFunctionDef(ArrayRef<ParmVarDecl *> Parameters,
bool CheckParameterNames);
void CheckCXXDefaultArguments(FunctionDecl *FD);
void CheckExtraCXXDefaultArguments(Declarator &D);
Scope *getNonFieldDeclScope(Scope *S);
/// \name Name lookup
///
/// These routines provide name lookup that is used during semantic
/// analysis to resolve the various kinds of names (identifiers,
/// overloaded operator names, constructor names, etc.) into zero or
/// more declarations within a particular scope. The major entry
/// points are LookupName, which performs unqualified name lookup,
/// and LookupQualifiedName, which performs qualified name lookup.
///
/// All name lookup is performed based on some specific criteria,
/// which specify what names will be visible to name lookup and how
/// far name lookup should work. These criteria are important both
/// for capturing language semantics (certain lookups will ignore
/// certain names, for example) and for performance, since name
/// lookup is often a bottleneck in the compilation of C++. Name
/// lookup criteria is specified via the LookupCriteria enumeration.
///
/// The results of name lookup can vary based on the kind of name
/// lookup performed, the current language, and the translation
/// unit. In C, for example, name lookup will either return nothing
/// (no entity found) or a single declaration. In C++, name lookup
/// can additionally refer to a set of overloaded functions or
/// result in an ambiguity. All of the possible results of name
/// lookup are captured by the LookupResult class, which provides
/// the ability to distinguish among them.
//@{
/// Describes the kind of name lookup to perform.
enum LookupNameKind {
/// Ordinary name lookup, which finds ordinary names (functions,
/// variables, typedefs, etc.) in C and most kinds of names
/// (functions, variables, members, types, etc.) in C++.
LookupOrdinaryName = 0,
/// Tag name lookup, which finds the names of enums, classes,
/// structs, and unions.
LookupTagName,
/// Label name lookup.
LookupLabel,
/// Member name lookup, which finds the names of
/// class/struct/union members.
LookupMemberName,
/// Look up of an operator name (e.g., operator+) for use with
/// operator overloading. This lookup is similar to ordinary name
/// lookup, but will ignore any declarations that are class members.
LookupOperatorName,
/// Look up of a name that precedes the '::' scope resolution
/// operator in C++. This lookup completely ignores operator, object,
/// function, and enumerator names (C++ [basic.lookup.qual]p1).
LookupNestedNameSpecifierName,
/// Look up a namespace name within a C++ using directive or
/// namespace alias definition, ignoring non-namespace names (C++
/// [basic.lookup.udir]p1).
LookupNamespaceName,
/// Look up all declarations in a scope with the given name,
/// including resolved using declarations. This is appropriate
/// for checking redeclarations for a using declaration.
LookupUsingDeclName,
/// Look up an ordinary name that is going to be redeclared as a
/// name with linkage. This lookup ignores any declarations that
/// are outside of the current scope unless they have linkage. See
/// C99 6.2.2p4-5 and C++ [basic.link]p6.
LookupRedeclarationWithLinkage,
/// Look up a friend of a local class. This lookup does not look
/// outside the innermost non-class scope. See C++11 [class.friend]p11.
LookupLocalFriendName,
/// Look up the name of an Objective-C protocol.
LookupObjCProtocolName,
/// Look up implicit 'self' parameter of an objective-c method.
LookupObjCImplicitSelfParam,
/// Look up the name of an OpenMP user-defined reduction operation.
LookupOMPReductionName,
/// Look up any declaration with any name.
LookupAnyName
};
/// Specifies whether (or how) name lookup is being performed for a
/// redeclaration (vs. a reference).
enum RedeclarationKind {
/// The lookup is a reference to this name that is not for the
/// purpose of redeclaring the name.
NotForRedeclaration = 0,
/// The lookup results will be used for redeclaration of a name,
/// if an entity by that name already exists and is visible.
ForVisibleRedeclaration,
/// The lookup results will be used for redeclaration of a name
/// with external linkage; non-visible lookup results with external linkage
/// may also be found.
ForExternalRedeclaration
};
RedeclarationKind forRedeclarationInCurContext() {
// A declaration with an owning module for linkage can never link against
// anything that is not visible. We don't need to check linkage here; if
// the context has internal linkage, redeclaration lookup won't find things
// from other TUs, and we can't safely compute linkage yet in general.
if (cast<Decl>(CurContext)
->getOwningModuleForLinkage(/*IgnoreLinkage*/true))
return ForVisibleRedeclaration;
return ForExternalRedeclaration;
}
/// The possible outcomes of name lookup for a literal operator.
enum LiteralOperatorLookupResult {
/// The lookup resulted in an error.
LOLR_Error,
/// The lookup found no match but no diagnostic was issued.
LOLR_ErrorNoDiagnostic,
/// The lookup found a single 'cooked' literal operator, which
/// expects a normal literal to be built and passed to it.
LOLR_Cooked,
/// The lookup found a single 'raw' literal operator, which expects
/// a string literal containing the spelling of the literal token.
LOLR_Raw,
/// The lookup found an overload set of literal operator templates,
/// which expect the characters of the spelling of the literal token to be
/// passed as a non-type template argument pack.
LOLR_Template,
/// The lookup found an overload set of literal operator templates,
/// which expect the character type and characters of the spelling of the
/// string literal token to be passed as template arguments.
LOLR_StringTemplate
};
SpecialMemberOverloadResult LookupSpecialMember(CXXRecordDecl *D,
CXXSpecialMember SM,
bool ConstArg,
bool VolatileArg,
bool RValueThis,
bool ConstThis,
bool VolatileThis);
typedef std::function<void(const TypoCorrection &)> TypoDiagnosticGenerator;
typedef std::function<ExprResult(Sema &, TypoExpr *, TypoCorrection)>
TypoRecoveryCallback;
private:
bool CppLookupName(LookupResult &R, Scope *S);
struct TypoExprState {
std::unique_ptr<TypoCorrectionConsumer> Consumer;
TypoDiagnosticGenerator DiagHandler;
TypoRecoveryCallback RecoveryHandler;
TypoExprState();
TypoExprState(TypoExprState &&other) noexcept;
TypoExprState &operator=(TypoExprState &&other) noexcept;
};
/// The set of unhandled TypoExprs and their associated state.
llvm::MapVector<TypoExpr *, TypoExprState> DelayedTypos;
/// Creates a new TypoExpr AST node.
TypoExpr *createDelayedTypo(std::unique_ptr<TypoCorrectionConsumer> TCC,
TypoDiagnosticGenerator TDG,
TypoRecoveryCallback TRC);
// The set of known/encountered (unique, canonicalized) NamespaceDecls.
//
// The boolean value will be true to indicate that the namespace was loaded
// from an AST/PCH file, or false otherwise.
llvm::MapVector<NamespaceDecl*, bool> KnownNamespaces;
/// Whether we have already loaded known namespaces from an extenal
/// source.
bool LoadedExternalKnownNamespaces;
/// Helper for CorrectTypo and CorrectTypoDelayed used to create and
/// populate a new TypoCorrectionConsumer. Returns nullptr if typo correction
/// should be skipped entirely.
std::unique_ptr<TypoCorrectionConsumer>
makeTypoCorrectionConsumer(const DeclarationNameInfo &Typo,
Sema::LookupNameKind LookupKind, Scope *S,
CXXScopeSpec *SS,
std::unique_ptr<CorrectionCandidateCallback> CCC,
DeclContext *MemberContext, bool EnteringContext,
const ObjCObjectPointerType *OPT,
bool ErrorRecovery);
public:
const TypoExprState &getTypoExprState(TypoExpr *TE) const;
/// Clears the state of the given TypoExpr.
void clearDelayedTypo(TypoExpr *TE);
/// Look up a name, looking for a single declaration. Return
/// null if the results were absent, ambiguous, or overloaded.
///
/// It is preferable to use the elaborated form and explicitly handle
/// ambiguity and overloaded.
NamedDecl *LookupSingleName(Scope *S, DeclarationName Name,
SourceLocation Loc,
LookupNameKind NameKind,
RedeclarationKind Redecl
= NotForRedeclaration);
bool LookupName(LookupResult &R, Scope *S,
bool AllowBuiltinCreation = false);
bool LookupQualifiedName(LookupResult &R, DeclContext *LookupCtx,
bool InUnqualifiedLookup = false);
bool LookupQualifiedName(LookupResult &R, DeclContext *LookupCtx,
CXXScopeSpec &SS);
bool LookupParsedName(LookupResult &R, Scope *S, CXXScopeSpec *SS,
bool AllowBuiltinCreation = false,
bool EnteringContext = false);
ObjCProtocolDecl *LookupProtocol(IdentifierInfo *II, SourceLocation IdLoc,
RedeclarationKind Redecl
= NotForRedeclaration);
bool LookupInSuper(LookupResult &R, CXXRecordDecl *Class);
void LookupOverloadedOperatorName(OverloadedOperatorKind Op, Scope *S,
QualType T1, QualType T2,
UnresolvedSetImpl &Functions);
LabelDecl *LookupOrCreateLabel(IdentifierInfo *II, SourceLocation IdentLoc,
SourceLocation GnuLabelLoc = SourceLocation());
DeclContextLookupResult LookupConstructors(CXXRecordDecl *Class);
CXXConstructorDecl *LookupDefaultConstructor(CXXRecordDecl *Class);
CXXConstructorDecl *LookupCopyingConstructor(CXXRecordDecl *Class,
unsigned Quals);
CXXMethodDecl *LookupCopyingAssignment(CXXRecordDecl *Class, unsigned Quals,
bool RValueThis, unsigned ThisQuals);
CXXConstructorDecl *LookupMovingConstructor(CXXRecordDecl *Class,
unsigned Quals);
CXXMethodDecl *LookupMovingAssignment(CXXRecordDecl *Class, unsigned Quals,
bool RValueThis, unsigned ThisQuals);
CXXDestructorDecl *LookupDestructor(CXXRecordDecl *Class);
bool checkLiteralOperatorId(const CXXScopeSpec &SS, const UnqualifiedId &Id);
LiteralOperatorLookupResult LookupLiteralOperator(Scope *S, LookupResult &R,
ArrayRef<QualType> ArgTys,
bool AllowRaw,
bool AllowTemplate,
bool AllowStringTemplate,
bool DiagnoseMissing);
bool isKnownName(StringRef name);
void ArgumentDependentLookup(DeclarationName Name, SourceLocation Loc,
ArrayRef<Expr *> Args, ADLResult &Functions);
void LookupVisibleDecls(Scope *S, LookupNameKind Kind,
VisibleDeclConsumer &Consumer,
bool IncludeGlobalScope = true,
bool LoadExternal = true);
void LookupVisibleDecls(DeclContext *Ctx, LookupNameKind Kind,
VisibleDeclConsumer &Consumer,
bool IncludeGlobalScope = true,
bool IncludeDependentBases = false,
bool LoadExternal = true);
enum CorrectTypoKind {
CTK_NonError, // CorrectTypo used in a non error recovery situation.
CTK_ErrorRecovery // CorrectTypo used in normal error recovery.
};
TypoCorrection CorrectTypo(const DeclarationNameInfo &Typo,
Sema::LookupNameKind LookupKind,
Scope *S, CXXScopeSpec *SS,
std::unique_ptr<CorrectionCandidateCallback> CCC,
CorrectTypoKind Mode,
DeclContext *MemberContext = nullptr,
bool EnteringContext = false,
const ObjCObjectPointerType *OPT = nullptr,
bool RecordFailure = true);
TypoExpr *CorrectTypoDelayed(const DeclarationNameInfo &Typo,
Sema::LookupNameKind LookupKind, Scope *S,
CXXScopeSpec *SS,
std::unique_ptr<CorrectionCandidateCallback> CCC,
TypoDiagnosticGenerator TDG,
TypoRecoveryCallback TRC, CorrectTypoKind Mode,
DeclContext *MemberContext = nullptr,
bool EnteringContext = false,
const ObjCObjectPointerType *OPT = nullptr);
/// Process any TypoExprs in the given Expr and its children,
/// generating diagnostics as appropriate and returning a new Expr if there
/// were typos that were all successfully corrected and ExprError if one or
/// more typos could not be corrected.
///
/// \param E The Expr to check for TypoExprs.
///
/// \param InitDecl A VarDecl to avoid because the Expr being corrected is its
/// initializer.
///
/// \param Filter A function applied to a newly rebuilt Expr to determine if
/// it is an acceptable/usable result from a single combination of typo
/// corrections. As long as the filter returns ExprError, different
/// combinations of corrections will be tried until all are exhausted.
ExprResult
CorrectDelayedTyposInExpr(Expr *E, VarDecl *InitDecl = nullptr,
llvm::function_ref<ExprResult(Expr *)> Filter =
[](Expr *E) -> ExprResult { return E; });
ExprResult
CorrectDelayedTyposInExpr(Expr *E,
llvm::function_ref<ExprResult(Expr *)> Filter) {
return CorrectDelayedTyposInExpr(E, nullptr, Filter);
}
ExprResult
CorrectDelayedTyposInExpr(ExprResult ER, VarDecl *InitDecl = nullptr,
llvm::function_ref<ExprResult(Expr *)> Filter =
[](Expr *E) -> ExprResult { return E; }) {
return ER.isInvalid() ? ER : CorrectDelayedTyposInExpr(ER.get(), Filter);
}
ExprResult
CorrectDelayedTyposInExpr(ExprResult ER,
llvm::function_ref<ExprResult(Expr *)> Filter) {
return CorrectDelayedTyposInExpr(ER, nullptr, Filter);
}
void diagnoseTypo(const TypoCorrection &Correction,
const PartialDiagnostic &TypoDiag,
bool ErrorRecovery = true);
void diagnoseTypo(const TypoCorrection &Correction,
const PartialDiagnostic &TypoDiag,
const PartialDiagnostic &PrevNote,
bool ErrorRecovery = true);
void MarkTypoCorrectedFunctionDefinition(const NamedDecl *F);
void FindAssociatedClassesAndNamespaces(SourceLocation InstantiationLoc,
ArrayRef<Expr *> Args,
AssociatedNamespaceSet &AssociatedNamespaces,
AssociatedClassSet &AssociatedClasses);
void FilterLookupForScope(LookupResult &R, DeclContext *Ctx, Scope *S,
bool ConsiderLinkage, bool AllowInlineNamespace);
bool CheckRedeclarationModuleOwnership(NamedDecl *New, NamedDecl *Old);
void DiagnoseAmbiguousLookup(LookupResult &Result);
//@}
ObjCInterfaceDecl *getObjCInterfaceDecl(IdentifierInfo *&Id,
SourceLocation IdLoc,
bool TypoCorrection = false);
NamedDecl *LazilyCreateBuiltin(IdentifierInfo *II, unsigned ID,
Scope *S, bool ForRedeclaration,
SourceLocation Loc);
NamedDecl *ImplicitlyDefineFunction(SourceLocation Loc, IdentifierInfo &II,
Scope *S);
void AddKnownFunctionAttributes(FunctionDecl *FD);
// More parsing and symbol table subroutines.
void ProcessPragmaWeak(Scope *S, Decl *D);
// Decl attributes - this routine is the top level dispatcher.
void ProcessDeclAttributes(Scope *S, Decl *D, const Declarator &PD);
// Helper for delayed processing of attributes.
void ProcessDeclAttributeDelayed(Decl *D,
const ParsedAttributesView &AttrList);
void ProcessDeclAttributeList(Scope *S, Decl *D, const ParsedAttributesView &AL,
bool IncludeCXX11Attributes = true);
bool ProcessAccessDeclAttributeList(AccessSpecDecl *ASDecl,
const ParsedAttributesView &AttrList);
void checkUnusedDeclAttributes(Declarator &D);
/// Determine if type T is a valid subject for a nonnull and similar
/// attributes. By default, we look through references (the behavior used by
/// nonnull), but if the second parameter is true, then we treat a reference
/// type as valid.
bool isValidPointerAttrType(QualType T, bool RefOkay = false);
bool CheckRegparmAttr(const ParsedAttr &attr, unsigned &value);
bool CheckCallingConvAttr(const ParsedAttr &attr, CallingConv &CC,
const FunctionDecl *FD = nullptr);
bool CheckAttrTarget(const ParsedAttr &CurrAttr);
bool CheckAttrNoArgs(const ParsedAttr &CurrAttr);
bool checkStringLiteralArgumentAttr(const ParsedAttr &Attr, unsigned ArgNum,
StringRef &Str,
SourceLocation *ArgLocation = nullptr);
bool checkSectionName(SourceLocation LiteralLoc, StringRef Str);
bool checkTargetAttr(SourceLocation LiteralLoc, StringRef Str);
bool checkMSInheritanceAttrOnDefinition(
CXXRecordDecl *RD, SourceRange Range, bool BestCase,
MSInheritanceAttr::Spelling SemanticSpelling);
void CheckAlignasUnderalignment(Decl *D);
/// Adjust the calling convention of a method to be the ABI default if it
/// wasn't specified explicitly. This handles method types formed from
/// function type typedefs and typename template arguments.
void adjustMemberFunctionCC(QualType &T, bool IsStatic, bool IsCtorOrDtor,
SourceLocation Loc);
// Check if there is an explicit attribute, but only look through parens.
// The intent is to look for an attribute on the current declarator, but not
// one that came from a typedef.
bool hasExplicitCallingConv(QualType &T);
/// Get the outermost AttributedType node that sets a calling convention.
/// Valid types should not have multiple attributes with different CCs.
const AttributedType *getCallingConvAttributedType(QualType T) const;
/// Stmt attributes - this routine is the top level dispatcher.
StmtResult ProcessStmtAttributes(Stmt *Stmt,
const ParsedAttributesView &Attrs,
SourceRange Range);
void WarnConflictingTypedMethods(ObjCMethodDecl *Method,
ObjCMethodDecl *MethodDecl,
bool IsProtocolMethodDecl);
void CheckConflictingOverridingMethod(ObjCMethodDecl *Method,
ObjCMethodDecl *Overridden,
bool IsProtocolMethodDecl);
/// WarnExactTypedMethods - This routine issues a warning if method
/// implementation declaration matches exactly that of its declaration.
void WarnExactTypedMethods(ObjCMethodDecl *Method,
ObjCMethodDecl *MethodDecl,
bool IsProtocolMethodDecl);
typedef llvm::SmallPtrSet<Selector, 8> SelectorSet;
/// CheckImplementationIvars - This routine checks if the instance variables
/// listed in the implelementation match those listed in the interface.
void CheckImplementationIvars(ObjCImplementationDecl *ImpDecl,
ObjCIvarDecl **Fields, unsigned nIvars,
SourceLocation Loc);
/// ImplMethodsVsClassMethods - This is main routine to warn if any method
/// remains unimplemented in the class or category \@implementation.
void ImplMethodsVsClassMethods(Scope *S, ObjCImplDecl* IMPDecl,
ObjCContainerDecl* IDecl,
bool IncompleteImpl = false);
/// DiagnoseUnimplementedProperties - This routine warns on those properties
/// which must be implemented by this implementation.
void DiagnoseUnimplementedProperties(Scope *S, ObjCImplDecl* IMPDecl,
ObjCContainerDecl *CDecl,
bool SynthesizeProperties);
/// Diagnose any null-resettable synthesized setters.
void diagnoseNullResettableSynthesizedSetters(const ObjCImplDecl *impDecl);
/// DefaultSynthesizeProperties - This routine default synthesizes all
/// properties which must be synthesized in the class's \@implementation.
void DefaultSynthesizeProperties(Scope *S, ObjCImplDecl *IMPDecl,
ObjCInterfaceDecl *IDecl,
SourceLocation AtEnd);
void DefaultSynthesizeProperties(Scope *S, Decl *D, SourceLocation AtEnd);
/// IvarBacksCurrentMethodAccessor - This routine returns 'true' if 'IV' is
/// an ivar synthesized for 'Method' and 'Method' is a property accessor
/// declared in class 'IFace'.
bool IvarBacksCurrentMethodAccessor(ObjCInterfaceDecl *IFace,
ObjCMethodDecl *Method, ObjCIvarDecl *IV);
/// DiagnoseUnusedBackingIvarInAccessor - Issue an 'unused' warning if ivar which
/// backs the property is not used in the property's accessor.
void DiagnoseUnusedBackingIvarInAccessor(Scope *S,
const ObjCImplementationDecl *ImplD);
/// GetIvarBackingPropertyAccessor - If method is a property setter/getter and
/// it property has a backing ivar, returns this ivar; otherwise, returns NULL.
/// It also returns ivar's property on success.
ObjCIvarDecl *GetIvarBackingPropertyAccessor(const ObjCMethodDecl *Method,
const ObjCPropertyDecl *&PDecl) const;
/// Called by ActOnProperty to handle \@property declarations in
/// class extensions.
ObjCPropertyDecl *HandlePropertyInClassExtension(Scope *S,
SourceLocation AtLoc,
SourceLocation LParenLoc,
FieldDeclarator &FD,
Selector GetterSel,
SourceLocation GetterNameLoc,
Selector SetterSel,
SourceLocation SetterNameLoc,
const bool isReadWrite,
unsigned &Attributes,
const unsigned AttributesAsWritten,
QualType T,
TypeSourceInfo *TSI,
tok::ObjCKeywordKind MethodImplKind);
/// Called by ActOnProperty and HandlePropertyInClassExtension to
/// handle creating the ObjcPropertyDecl for a category or \@interface.
ObjCPropertyDecl *CreatePropertyDecl(Scope *S,
ObjCContainerDecl *CDecl,
SourceLocation AtLoc,
SourceLocation LParenLoc,
FieldDeclarator &FD,
Selector GetterSel,
SourceLocation GetterNameLoc,
Selector SetterSel,
SourceLocation SetterNameLoc,
const bool isReadWrite,
const unsigned Attributes,
const unsigned AttributesAsWritten,
QualType T,
TypeSourceInfo *TSI,
tok::ObjCKeywordKind MethodImplKind,
DeclContext *lexicalDC = nullptr);
/// AtomicPropertySetterGetterRules - This routine enforces the rule (via
/// warning) when atomic property has one but not the other user-declared
/// setter or getter.
void AtomicPropertySetterGetterRules(ObjCImplDecl* IMPDecl,
ObjCInterfaceDecl* IDecl);
void DiagnoseOwningPropertyGetterSynthesis(const ObjCImplementationDecl *D);
void DiagnoseMissingDesignatedInitOverrides(
const ObjCImplementationDecl *ImplD,
const ObjCInterfaceDecl *IFD);
void DiagnoseDuplicateIvars(ObjCInterfaceDecl *ID, ObjCInterfaceDecl *SID);
enum MethodMatchStrategy {
MMS_loose,
MMS_strict
};
/// MatchTwoMethodDeclarations - Checks if two methods' type match and returns
/// true, or false, accordingly.
bool MatchTwoMethodDeclarations(const ObjCMethodDecl *Method,
const ObjCMethodDecl *PrevMethod,
MethodMatchStrategy strategy = MMS_strict);
/// MatchAllMethodDeclarations - Check methods declaraed in interface or
/// or protocol against those declared in their implementations.
void MatchAllMethodDeclarations(const SelectorSet &InsMap,
const SelectorSet &ClsMap,
SelectorSet &InsMapSeen,
SelectorSet &ClsMapSeen,
ObjCImplDecl* IMPDecl,
ObjCContainerDecl* IDecl,
bool &IncompleteImpl,
bool ImmediateClass,
bool WarnCategoryMethodImpl=false);
/// CheckCategoryVsClassMethodMatches - Checks that methods implemented in
/// category matches with those implemented in its primary class and
/// warns each time an exact match is found.
void CheckCategoryVsClassMethodMatches(ObjCCategoryImplDecl *CatIMP);
/// Add the given method to the list of globally-known methods.
void addMethodToGlobalList(ObjCMethodList *List, ObjCMethodDecl *Method);
private:
/// AddMethodToGlobalPool - Add an instance or factory method to the global
/// pool. See descriptoin of AddInstanceMethodToGlobalPool.
void AddMethodToGlobalPool(ObjCMethodDecl *Method, bool impl, bool instance);
/// LookupMethodInGlobalPool - Returns the instance or factory method and
/// optionally warns if there are multiple signatures.
ObjCMethodDecl *LookupMethodInGlobalPool(Selector Sel, SourceRange R,
bool receiverIdOrClass,
bool instance);
public:
/// - Returns instance or factory methods in global method pool for
/// given selector. It checks the desired kind first, if none is found, and
/// parameter checkTheOther is set, it then checks the other kind. If no such
/// method or only one method is found, function returns false; otherwise, it
/// returns true.
bool
CollectMultipleMethodsInGlobalPool(Selector Sel,
SmallVectorImpl<ObjCMethodDecl*>& Methods,
bool InstanceFirst, bool CheckTheOther,
const ObjCObjectType *TypeBound = nullptr);
bool
AreMultipleMethodsInGlobalPool(Selector Sel, ObjCMethodDecl *BestMethod,
SourceRange R, bool receiverIdOrClass,
SmallVectorImpl<ObjCMethodDecl*>& Methods);
void
DiagnoseMultipleMethodInGlobalPool(SmallVectorImpl<ObjCMethodDecl*> &Methods,
Selector Sel, SourceRange R,
bool receiverIdOrClass);
private:
/// - Returns a selector which best matches given argument list or
/// nullptr if none could be found
ObjCMethodDecl *SelectBestMethod(Selector Sel, MultiExprArg Args,
bool IsInstance,
SmallVectorImpl<ObjCMethodDecl*>& Methods);
/// Record the typo correction failure and return an empty correction.
TypoCorrection FailedCorrection(IdentifierInfo *Typo, SourceLocation TypoLoc,
bool RecordFailure = true) {
if (RecordFailure)
TypoCorrectionFailures[Typo].insert(TypoLoc);
return TypoCorrection();
}
public:
/// AddInstanceMethodToGlobalPool - All instance methods in a translation
/// unit are added to a global pool. This allows us to efficiently associate
/// a selector with a method declaraation for purposes of typechecking
/// messages sent to "id" (where the class of the object is unknown).
void AddInstanceMethodToGlobalPool(ObjCMethodDecl *Method, bool impl=false) {
AddMethodToGlobalPool(Method, impl, /*instance*/true);
}
/// AddFactoryMethodToGlobalPool - Same as above, but for factory methods.
void AddFactoryMethodToGlobalPool(ObjCMethodDecl *Method, bool impl=false) {
AddMethodToGlobalPool(Method, impl, /*instance*/false);
}
/// AddAnyMethodToGlobalPool - Add any method, instance or factory to global
/// pool.
void AddAnyMethodToGlobalPool(Decl *D);
/// LookupInstanceMethodInGlobalPool - Returns the method and warns if
/// there are multiple signatures.
ObjCMethodDecl *LookupInstanceMethodInGlobalPool(Selector Sel, SourceRange R,
bool receiverIdOrClass=false) {
return LookupMethodInGlobalPool(Sel, R, receiverIdOrClass,
/*instance*/true);
}
/// LookupFactoryMethodInGlobalPool - Returns the method and warns if
/// there are multiple signatures.
ObjCMethodDecl *LookupFactoryMethodInGlobalPool(Selector Sel, SourceRange R,
bool receiverIdOrClass=false) {
return LookupMethodInGlobalPool(Sel, R, receiverIdOrClass,
/*instance*/false);
}
const ObjCMethodDecl *SelectorsForTypoCorrection(Selector Sel,
QualType ObjectType=QualType());
/// LookupImplementedMethodInGlobalPool - Returns the method which has an
/// implementation.
ObjCMethodDecl *LookupImplementedMethodInGlobalPool(Selector Sel);
/// CollectIvarsToConstructOrDestruct - Collect those ivars which require
/// initialization.
void CollectIvarsToConstructOrDestruct(ObjCInterfaceDecl *OI,
SmallVectorImpl<ObjCIvarDecl*> &Ivars);
//===--------------------------------------------------------------------===//
// Statement Parsing Callbacks: SemaStmt.cpp.
public:
class FullExprArg {
public:
FullExprArg() : E(nullptr) { }
FullExprArg(Sema &actions) : E(nullptr) { }
ExprResult release() {
return E;
}
Expr *get() const { return E; }
Expr *operator->() {
return E;
}
private:
// FIXME: No need to make the entire Sema class a friend when it's just
// Sema::MakeFullExpr that needs access to the constructor below.
friend class Sema;
explicit FullExprArg(Expr *expr) : E(expr) {}
Expr *E;
};
FullExprArg MakeFullExpr(Expr *Arg) {
return MakeFullExpr(Arg, Arg ? Arg->getExprLoc() : SourceLocation());
}
FullExprArg MakeFullExpr(Expr *Arg, SourceLocation CC) {
return FullExprArg(ActOnFinishFullExpr(Arg, CC).get());
}
FullExprArg MakeFullDiscardedValueExpr(Expr *Arg) {
ExprResult FE =
ActOnFinishFullExpr(Arg, Arg ? Arg->getExprLoc() : SourceLocation(),
/*DiscardedValue*/ true);
return FullExprArg(FE.get());
}
StmtResult ActOnExprStmt(ExprResult Arg);
StmtResult ActOnExprStmtError();
StmtResult ActOnNullStmt(SourceLocation SemiLoc,
bool HasLeadingEmptyMacro = false);
void ActOnStartOfCompoundStmt(bool IsStmtExpr);
void ActOnFinishOfCompoundStmt();
StmtResult ActOnCompoundStmt(SourceLocation L, SourceLocation R,
ArrayRef<Stmt *> Elts, bool isStmtExpr);
/// A RAII object to enter scope of a compound statement.
class CompoundScopeRAII {
public:
CompoundScopeRAII(Sema &S, bool IsStmtExpr = false) : S(S) {
S.ActOnStartOfCompoundStmt(IsStmtExpr);
}
~CompoundScopeRAII() {
S.ActOnFinishOfCompoundStmt();
}
private:
Sema &S;
};
/// An RAII helper that pops function a function scope on exit.
struct FunctionScopeRAII {
Sema &S;
bool Active;
FunctionScopeRAII(Sema &S) : S(S), Active(true) {}
~FunctionScopeRAII() {
if (Active)
S.PopFunctionScopeInfo();
}
void disable() { Active = false; }
};
StmtResult ActOnDeclStmt(DeclGroupPtrTy Decl,
SourceLocation StartLoc,
SourceLocation EndLoc);
void ActOnForEachDeclStmt(DeclGroupPtrTy Decl);
StmtResult ActOnForEachLValueExpr(Expr *E);
ExprResult ActOnCaseExpr(SourceLocation CaseLoc, ExprResult Val);
StmtResult ActOnCaseStmt(SourceLocation CaseLoc, ExprResult LHS,
SourceLocation DotDotDotLoc, ExprResult RHS,
SourceLocation ColonLoc);
void ActOnCaseStmtBody(Stmt *CaseStmt, Stmt *SubStmt);
StmtResult ActOnDefaultStmt(SourceLocation DefaultLoc,
SourceLocation ColonLoc,
Stmt *SubStmt, Scope *CurScope);
StmtResult ActOnLabelStmt(SourceLocation IdentLoc, LabelDecl *TheDecl,
SourceLocation ColonLoc, Stmt *SubStmt);
StmtResult ActOnAttributedStmt(SourceLocation AttrLoc,
ArrayRef<const Attr*> Attrs,
Stmt *SubStmt);
class ConditionResult;
StmtResult ActOnIfStmt(SourceLocation IfLoc, bool IsConstexpr,
Stmt *InitStmt,
ConditionResult Cond, Stmt *ThenVal,
SourceLocation ElseLoc, Stmt *ElseVal);
StmtResult BuildIfStmt(SourceLocation IfLoc, bool IsConstexpr,
Stmt *InitStmt,
ConditionResult Cond, Stmt *ThenVal,
SourceLocation ElseLoc, Stmt *ElseVal);
StmtResult ActOnStartOfSwitchStmt(SourceLocation SwitchLoc,
Stmt *InitStmt,
ConditionResult Cond);
StmtResult ActOnFinishSwitchStmt(SourceLocation SwitchLoc,
Stmt *Switch, Stmt *Body);
StmtResult ActOnWhileStmt(SourceLocation WhileLoc, ConditionResult Cond,
Stmt *Body);
StmtResult ActOnDoStmt(SourceLocation DoLoc, Stmt *Body,
SourceLocation WhileLoc, SourceLocation CondLParen,
Expr *Cond, SourceLocation CondRParen);
StmtResult ActOnForStmt(SourceLocation ForLoc,
SourceLocation LParenLoc,
Stmt *First,
ConditionResult Second,
FullExprArg Third,
SourceLocation RParenLoc,
Stmt *Body);
ExprResult CheckObjCForCollectionOperand(SourceLocation forLoc,
Expr *collection);
StmtResult ActOnObjCForCollectionStmt(SourceLocation ForColLoc,
Stmt *First, Expr *collection,
SourceLocation RParenLoc);
StmtResult FinishObjCForCollectionStmt(Stmt *ForCollection, Stmt *Body);
enum BuildForRangeKind {
/// Initial building of a for-range statement.
BFRK_Build,
/// Instantiation or recovery rebuild of a for-range statement. Don't
/// attempt any typo-correction.
BFRK_Rebuild,
/// Determining whether a for-range statement could be built. Avoid any
/// unnecessary or irreversible actions.
BFRK_Check
};
StmtResult ActOnCXXForRangeStmt(Scope *S, SourceLocation ForLoc,
SourceLocation CoawaitLoc,
Stmt *InitStmt,
Stmt *LoopVar,
SourceLocation ColonLoc, Expr *Collection,
SourceLocation RParenLoc,
BuildForRangeKind Kind);
StmtResult BuildCXXForRangeStmt(SourceLocation ForLoc,
SourceLocation CoawaitLoc,
Stmt *InitStmt,
SourceLocation ColonLoc,
Stmt *RangeDecl, Stmt *Begin, Stmt *End,
Expr *Cond, Expr *Inc,
Stmt *LoopVarDecl,
SourceLocation RParenLoc,
BuildForRangeKind Kind);
StmtResult FinishCXXForRangeStmt(Stmt *ForRange, Stmt *Body);
StmtResult ActOnGotoStmt(SourceLocation GotoLoc,
SourceLocation LabelLoc,
LabelDecl *TheDecl);
StmtResult ActOnIndirectGotoStmt(SourceLocation GotoLoc,
SourceLocation StarLoc,
Expr *DestExp);
StmtResult ActOnContinueStmt(SourceLocation ContinueLoc, Scope *CurScope);
StmtResult ActOnBreakStmt(SourceLocation BreakLoc, Scope *CurScope);
void ActOnCapturedRegionStart(SourceLocation Loc, Scope *CurScope,
CapturedRegionKind Kind, unsigned NumParams);
typedef std::pair<StringRef, QualType> CapturedParamNameType;
void ActOnCapturedRegionStart(SourceLocation Loc, Scope *CurScope,
CapturedRegionKind Kind,
ArrayRef<CapturedParamNameType> Params);
StmtResult ActOnCapturedRegionEnd(Stmt *S);
void ActOnCapturedRegionError();
RecordDecl *CreateCapturedStmtRecordDecl(CapturedDecl *&CD,
SourceLocation Loc,
unsigned NumParams);
enum CopyElisionSemanticsKind {
CES_Strict = 0,
CES_AllowParameters = 1,
CES_AllowDifferentTypes = 2,
CES_AllowExceptionVariables = 4,
CES_FormerDefault = (CES_AllowParameters),
CES_Default = (CES_AllowParameters | CES_AllowDifferentTypes),
CES_AsIfByStdMove = (CES_AllowParameters | CES_AllowDifferentTypes |
CES_AllowExceptionVariables),
};
VarDecl *getCopyElisionCandidate(QualType ReturnType, Expr *E,
CopyElisionSemanticsKind CESK);
bool isCopyElisionCandidate(QualType ReturnType, const VarDecl *VD,
CopyElisionSemanticsKind CESK);
StmtResult ActOnReturnStmt(SourceLocation ReturnLoc, Expr *RetValExp,
Scope *CurScope);
StmtResult BuildReturnStmt(SourceLocation ReturnLoc, Expr *RetValExp);
StmtResult ActOnCapScopeReturnStmt(SourceLocation ReturnLoc, Expr *RetValExp);
StmtResult ActOnGCCAsmStmt(SourceLocation AsmLoc, bool IsSimple,
bool IsVolatile, unsigned NumOutputs,
unsigned NumInputs, IdentifierInfo **Names,
MultiExprArg Constraints, MultiExprArg Exprs,
Expr *AsmString, MultiExprArg Clobbers,
SourceLocation RParenLoc);
void FillInlineAsmIdentifierInfo(Expr *Res,
llvm::InlineAsmIdentifierInfo &Info);
ExprResult LookupInlineAsmIdentifier(CXXScopeSpec &SS,
SourceLocation TemplateKWLoc,
UnqualifiedId &Id,
bool IsUnevaluatedContext);
bool LookupInlineAsmField(StringRef Base, StringRef Member,
unsigned &Offset, SourceLocation AsmLoc);
ExprResult LookupInlineAsmVarDeclField(Expr *RefExpr, StringRef Member,
SourceLocation AsmLoc);
StmtResult ActOnMSAsmStmt(SourceLocation AsmLoc, SourceLocation LBraceLoc,
ArrayRef<Token> AsmToks,
StringRef AsmString,
unsigned NumOutputs, unsigned NumInputs,
ArrayRef<StringRef> Constraints,
ArrayRef<StringRef> Clobbers,
ArrayRef<Expr*> Exprs,
SourceLocation EndLoc);
LabelDecl *GetOrCreateMSAsmLabel(StringRef ExternalLabelName,
SourceLocation Location,
bool AlwaysCreate);
VarDecl *BuildObjCExceptionDecl(TypeSourceInfo *TInfo, QualType ExceptionType,
SourceLocation StartLoc,
SourceLocation IdLoc, IdentifierInfo *Id,
bool Invalid = false);
Decl *ActOnObjCExceptionDecl(Scope *S, Declarator &D);
StmtResult ActOnObjCAtCatchStmt(SourceLocation AtLoc, SourceLocation RParen,
Decl *Parm, Stmt *Body);
StmtResult ActOnObjCAtFinallyStmt(SourceLocation AtLoc, Stmt *Body);
StmtResult ActOnObjCAtTryStmt(SourceLocation AtLoc, Stmt *Try,
MultiStmtArg Catch, Stmt *Finally);
StmtResult BuildObjCAtThrowStmt(SourceLocation AtLoc, Expr *Throw);
StmtResult ActOnObjCAtThrowStmt(SourceLocation AtLoc, Expr *Throw,
Scope *CurScope);
ExprResult ActOnObjCAtSynchronizedOperand(SourceLocation atLoc,
Expr *operand);
StmtResult ActOnObjCAtSynchronizedStmt(SourceLocation AtLoc,
Expr *SynchExpr,
Stmt *SynchBody);
StmtResult ActOnObjCAutoreleasePoolStmt(SourceLocation AtLoc, Stmt *Body);
VarDecl *BuildExceptionDeclaration(Scope *S, TypeSourceInfo *TInfo,
SourceLocation StartLoc,
SourceLocation IdLoc,
IdentifierInfo *Id);
Decl *ActOnExceptionDeclarator(Scope *S, Declarator &D);
StmtResult ActOnCXXCatchBlock(SourceLocation CatchLoc,
Decl *ExDecl, Stmt *HandlerBlock);
StmtResult ActOnCXXTryBlock(SourceLocation TryLoc, Stmt *TryBlock,
ArrayRef<Stmt *> Handlers);
StmtResult ActOnSEHTryBlock(bool IsCXXTry, // try (true) or __try (false) ?
SourceLocation TryLoc, Stmt *TryBlock,
Stmt *Handler);
StmtResult ActOnSEHExceptBlock(SourceLocation Loc,
Expr *FilterExpr,
Stmt *Block);
void ActOnStartSEHFinallyBlock();
void ActOnAbortSEHFinallyBlock();
StmtResult ActOnFinishSEHFinallyBlock(SourceLocation Loc, Stmt *Block);
StmtResult ActOnSEHLeaveStmt(SourceLocation Loc, Scope *CurScope);
void DiagnoseReturnInConstructorExceptionHandler(CXXTryStmt *TryBlock);
bool ShouldWarnIfUnusedFileScopedDecl(const DeclaratorDecl *D) const;
/// If it's a file scoped decl that must warn if not used, keep track
/// of it.
void MarkUnusedFileScopedDecl(const DeclaratorDecl *D);
/// DiagnoseUnusedExprResult - If the statement passed in is an expression
/// whose result is unused, warn.
void DiagnoseUnusedExprResult(const Stmt *S);
void DiagnoseUnusedNestedTypedefs(const RecordDecl *D);
void DiagnoseUnusedDecl(const NamedDecl *ND);
/// Emit \p DiagID if statement located on \p StmtLoc has a suspicious null
/// statement as a \p Body, and it is located on the same line.
///
/// This helps prevent bugs due to typos, such as:
/// if (condition);
/// do_stuff();
void DiagnoseEmptyStmtBody(SourceLocation StmtLoc,
const Stmt *Body,
unsigned DiagID);
/// Warn if a for/while loop statement \p S, which is followed by
/// \p PossibleBody, has a suspicious null statement as a body.
void DiagnoseEmptyLoopBody(const Stmt *S,
const Stmt *PossibleBody);
/// Warn if a value is moved to itself.
void DiagnoseSelfMove(const Expr *LHSExpr, const Expr *RHSExpr,
SourceLocation OpLoc);
/// Warn if we're implicitly casting from a _Nullable pointer type to a
/// _Nonnull one.
void diagnoseNullableToNonnullConversion(QualType DstType, QualType SrcType,
SourceLocation Loc);
/// Warn when implicitly casting 0 to nullptr.
void diagnoseZeroToNullptrConversion(CastKind Kind, const Expr *E);
ParsingDeclState PushParsingDeclaration(sema::DelayedDiagnosticPool &pool) {
return DelayedDiagnostics.push(pool);
}
void PopParsingDeclaration(ParsingDeclState state, Decl *decl);
typedef ProcessingContextState ParsingClassState;
ParsingClassState PushParsingClass() {
return DelayedDiagnostics.pushUndelayed();
}
void PopParsingClass(ParsingClassState state) {
DelayedDiagnostics.popUndelayed(state);
}
void redelayDiagnostics(sema::DelayedDiagnosticPool &pool);
void DiagnoseAvailabilityOfDecl(NamedDecl *D, ArrayRef<SourceLocation> Locs,
const ObjCInterfaceDecl *UnknownObjCClass,
bool ObjCPropertyAccess,
bool AvoidPartialAvailabilityChecks = false,
ObjCInterfaceDecl *ClassReceiver = nullptr);
bool makeUnavailableInSystemHeader(SourceLocation loc,
UnavailableAttr::ImplicitReason reason);
/// Issue any -Wunguarded-availability warnings in \c FD
void DiagnoseUnguardedAvailabilityViolations(Decl *FD);
//===--------------------------------------------------------------------===//
// Expression Parsing Callbacks: SemaExpr.cpp.
bool CanUseDecl(NamedDecl *D, bool TreatUnavailableAsInvalid);
bool DiagnoseUseOfDecl(NamedDecl *D, ArrayRef<SourceLocation> Locs,
const ObjCInterfaceDecl *UnknownObjCClass = nullptr,
bool ObjCPropertyAccess = false,
bool AvoidPartialAvailabilityChecks = false,
ObjCInterfaceDecl *ClassReciever = nullptr);
void NoteDeletedFunction(FunctionDecl *FD);
void NoteDeletedInheritingConstructor(CXXConstructorDecl *CD);
std::string getDeletedOrUnavailableSuffix(const FunctionDecl *FD);
bool DiagnosePropertyAccessorMismatch(ObjCPropertyDecl *PD,
ObjCMethodDecl *Getter,
SourceLocation Loc);
void DiagnoseSentinelCalls(NamedDecl *D, SourceLocation Loc,
ArrayRef<Expr *> Args);
void PushExpressionEvaluationContext(
ExpressionEvaluationContext NewContext, Decl *LambdaContextDecl = nullptr,
ExpressionEvaluationContextRecord::ExpressionKind Type =
ExpressionEvaluationContextRecord::EK_Other);
enum ReuseLambdaContextDecl_t { ReuseLambdaContextDecl };
void PushExpressionEvaluationContext(
ExpressionEvaluationContext NewContext, ReuseLambdaContextDecl_t,
ExpressionEvaluationContextRecord::ExpressionKind Type =
ExpressionEvaluationContextRecord::EK_Other);
void PopExpressionEvaluationContext();
void DiscardCleanupsInEvaluationContext();
ExprResult TransformToPotentiallyEvaluated(Expr *E);
ExprResult HandleExprEvaluationContextForTypeof(Expr *E);
ExprResult ActOnConstantExpression(ExprResult Res);
// Functions for marking a declaration referenced. These functions also
// contain the relevant logic for marking if a reference to a function or
// variable is an odr-use (in the C++11 sense). There are separate variants
// for expressions referring to a decl; these exist because odr-use marking
// needs to be delayed for some constant variables when we build one of the
// named expressions.
//
// MightBeOdrUse indicates whether the use could possibly be an odr-use, and
// should usually be true. This only needs to be set to false if the lack of
// odr-use cannot be determined from the current context (for instance,
// because the name denotes a virtual function and was written without an
// explicit nested-name-specifier).
void MarkAnyDeclReferenced(SourceLocation Loc, Decl *D, bool MightBeOdrUse);
void MarkFunctionReferenced(SourceLocation Loc, FunctionDecl *Func,
bool MightBeOdrUse = true);
void MarkVariableReferenced(SourceLocation Loc, VarDecl *Var);
void MarkDeclRefReferenced(DeclRefExpr *E, const Expr *Base = nullptr);
void MarkMemberReferenced(MemberExpr *E);
void UpdateMarkingForLValueToRValue(Expr *E);
void CleanupVarDeclMarking();
enum TryCaptureKind {
TryCapture_Implicit, TryCapture_ExplicitByVal, TryCapture_ExplicitByRef
};
/// Try to capture the given variable.
///
/// \param Var The variable to capture.
///
/// \param Loc The location at which the capture occurs.
///
/// \param Kind The kind of capture, which may be implicit (for either a
/// block or a lambda), or explicit by-value or by-reference (for a lambda).
///
/// \param EllipsisLoc The location of the ellipsis, if one is provided in
/// an explicit lambda capture.
///
/// \param BuildAndDiagnose Whether we are actually supposed to add the
/// captures or diagnose errors. If false, this routine merely check whether
/// the capture can occur without performing the capture itself or complaining
/// if the variable cannot be captured.
///
/// \param CaptureType Will be set to the type of the field used to capture
/// this variable in the innermost block or lambda. Only valid when the
/// variable can be captured.
///
/// \param DeclRefType Will be set to the type of a reference to the capture
/// from within the current scope. Only valid when the variable can be
/// captured.
///
/// \param FunctionScopeIndexToStopAt If non-null, it points to the index
/// of the FunctionScopeInfo stack beyond which we do not attempt to capture.
/// This is useful when enclosing lambdas must speculatively capture
/// variables that may or may not be used in certain specializations of
/// a nested generic lambda.
///
/// \returns true if an error occurred (i.e., the variable cannot be
/// captured) and false if the capture succeeded.
bool tryCaptureVariable(VarDecl *Var, SourceLocation Loc, TryCaptureKind Kind,
SourceLocation EllipsisLoc, bool BuildAndDiagnose,
QualType &CaptureType,
QualType &DeclRefType,
const unsigned *const FunctionScopeIndexToStopAt);
/// Try to capture the given variable.
bool tryCaptureVariable(VarDecl *Var, SourceLocation Loc,
TryCaptureKind Kind = TryCapture_Implicit,
SourceLocation EllipsisLoc = SourceLocation());
/// Checks if the variable must be captured.
bool NeedToCaptureVariable(VarDecl *Var, SourceLocation Loc);
/// Given a variable, determine the type that a reference to that
/// variable will have in the given scope.
QualType getCapturedDeclRefType(VarDecl *Var, SourceLocation Loc);
/// Mark all of the declarations referenced within a particular AST node as
/// referenced. Used when template instantiation instantiates a non-dependent
/// type -- entities referenced by the type are now referenced.
void MarkDeclarationsReferencedInType(SourceLocation Loc, QualType T);
void MarkDeclarationsReferencedInExpr(Expr *E,
bool SkipLocalVariables = false);
/// Try to recover by turning the given expression into a
/// call. Returns true if recovery was attempted or an error was
/// emitted; this may also leave the ExprResult invalid.
bool tryToRecoverWithCall(ExprResult &E, const PartialDiagnostic &PD,
bool ForceComplain = false,
bool (*IsPlausibleResult)(QualType) = nullptr);
/// Figure out if an expression could be turned into a call.
bool tryExprAsCall(Expr &E, QualType &ZeroArgCallReturnTy,
UnresolvedSetImpl &NonTemplateOverloads);
/// Conditionally issue a diagnostic based on the current
/// evaluation context.
///
/// \param Statement If Statement is non-null, delay reporting the
/// diagnostic until the function body is parsed, and then do a basic
/// reachability analysis to determine if the statement is reachable.
/// If it is unreachable, the diagnostic will not be emitted.
bool DiagRuntimeBehavior(SourceLocation Loc, const Stmt *Statement,
const PartialDiagnostic &PD);
// Primary Expressions.
SourceRange getExprRange(Expr *E) const;
ExprResult ActOnIdExpression(
Scope *S, CXXScopeSpec &SS, SourceLocation TemplateKWLoc,
UnqualifiedId &Id, bool HasTrailingLParen, bool IsAddressOfOperand,
std::unique_ptr<CorrectionCandidateCallback> CCC = nullptr,
bool IsInlineAsmIdentifier = false, Token *KeywordReplacement = nullptr);
void DecomposeUnqualifiedId(const UnqualifiedId &Id,
TemplateArgumentListInfo &Buffer,
DeclarationNameInfo &NameInfo,
const TemplateArgumentListInfo *&TemplateArgs);
bool
DiagnoseEmptyLookup(Scope *S, CXXScopeSpec &SS, LookupResult &R,
std::unique_ptr<CorrectionCandidateCallback> CCC,
TemplateArgumentListInfo *ExplicitTemplateArgs = nullptr,
ArrayRef<Expr *> Args = None, TypoExpr **Out = nullptr);
ExprResult LookupInObjCMethod(LookupResult &LookUp, Scope *S,
IdentifierInfo *II,
bool AllowBuiltinCreation=false);
ExprResult ActOnDependentIdExpression(const CXXScopeSpec &SS,
SourceLocation TemplateKWLoc,
const DeclarationNameInfo &NameInfo,
bool isAddressOfOperand,
const TemplateArgumentListInfo *TemplateArgs);
ExprResult BuildDeclRefExpr(ValueDecl *D, QualType Ty,
ExprValueKind VK,
SourceLocation Loc,
const CXXScopeSpec *SS = nullptr);
ExprResult
BuildDeclRefExpr(ValueDecl *D, QualType Ty, ExprValueKind VK,
const DeclarationNameInfo &NameInfo,
const CXXScopeSpec *SS = nullptr,
NamedDecl *FoundD = nullptr,
const TemplateArgumentListInfo *TemplateArgs = nullptr);
ExprResult
BuildAnonymousStructUnionMemberReference(
const CXXScopeSpec &SS,
SourceLocation nameLoc,
IndirectFieldDecl *indirectField,
DeclAccessPair FoundDecl = DeclAccessPair::make(nullptr, AS_none),
Expr *baseObjectExpr = nullptr,
SourceLocation opLoc = SourceLocation());
ExprResult BuildPossibleImplicitMemberExpr(const CXXScopeSpec &SS,
SourceLocation TemplateKWLoc,
LookupResult &R,
const TemplateArgumentListInfo *TemplateArgs,
const Scope *S);
ExprResult BuildImplicitMemberExpr(const CXXScopeSpec &SS,
SourceLocation TemplateKWLoc,
LookupResult &R,
const TemplateArgumentListInfo *TemplateArgs,
bool IsDefiniteInstance,
const Scope *S);
bool UseArgumentDependentLookup(const CXXScopeSpec &SS,
const LookupResult &R,
bool HasTrailingLParen);
ExprResult
BuildQualifiedDeclarationNameExpr(CXXScopeSpec &SS,
const DeclarationNameInfo &NameInfo,
bool IsAddressOfOperand, const Scope *S,
TypeSourceInfo **RecoveryTSI = nullptr);
ExprResult BuildDependentDeclRefExpr(const CXXScopeSpec &SS,
SourceLocation TemplateKWLoc,
const DeclarationNameInfo &NameInfo,
const TemplateArgumentListInfo *TemplateArgs);
ExprResult BuildDeclarationNameExpr(const CXXScopeSpec &SS,
LookupResult &R,
bool NeedsADL,
bool AcceptInvalidDecl = false);
ExprResult BuildDeclarationNameExpr(
const CXXScopeSpec &SS, const DeclarationNameInfo &NameInfo, NamedDecl *D,
NamedDecl *FoundD = nullptr,
const TemplateArgumentListInfo *TemplateArgs = nullptr,
bool AcceptInvalidDecl = false);
ExprResult BuildLiteralOperatorCall(LookupResult &R,
DeclarationNameInfo &SuffixInfo,
ArrayRef<Expr *> Args,
SourceLocation LitEndLoc,
TemplateArgumentListInfo *ExplicitTemplateArgs = nullptr);
ExprResult BuildPredefinedExpr(SourceLocation Loc,
PredefinedExpr::IdentType IT);
ExprResult ActOnPredefinedExpr(SourceLocation Loc, tok::TokenKind Kind);
ExprResult ActOnIntegerConstant(SourceLocation Loc, uint64_t Val);
bool CheckLoopHintExpr(Expr *E, SourceLocation Loc);
ExprResult ActOnNumericConstant(const Token &Tok, Scope *UDLScope = nullptr);
ExprResult ActOnCharacterConstant(const Token &Tok,
Scope *UDLScope = nullptr);
ExprResult ActOnParenExpr(SourceLocation L, SourceLocation R, Expr *E);
ExprResult ActOnParenListExpr(SourceLocation L,
SourceLocation R,
MultiExprArg Val);
/// ActOnStringLiteral - The specified tokens were lexed as pasted string
/// fragments (e.g. "foo" "bar" L"baz").
ExprResult ActOnStringLiteral(ArrayRef<Token> StringToks,
Scope *UDLScope = nullptr);
ExprResult ActOnGenericSelectionExpr(SourceLocation KeyLoc,
SourceLocation DefaultLoc,
SourceLocation RParenLoc,
Expr *ControllingExpr,
ArrayRef<ParsedType> ArgTypes,
ArrayRef<Expr *> ArgExprs);
ExprResult CreateGenericSelectionExpr(SourceLocation KeyLoc,
SourceLocation DefaultLoc,
SourceLocation RParenLoc,
Expr *ControllingExpr,
ArrayRef<TypeSourceInfo *> Types,
ArrayRef<Expr *> Exprs);
// Binary/Unary Operators. 'Tok' is the token for the operator.
ExprResult CreateBuiltinUnaryOp(SourceLocation OpLoc, UnaryOperatorKind Opc,
Expr *InputExpr);
ExprResult BuildUnaryOp(Scope *S, SourceLocation OpLoc,
UnaryOperatorKind Opc, Expr *Input);
ExprResult ActOnUnaryOp(Scope *S, SourceLocation OpLoc,
tok::TokenKind Op, Expr *Input);
bool isQualifiedMemberAccess(Expr *E);
QualType CheckAddressOfOperand(ExprResult &Operand, SourceLocation OpLoc);
ExprResult CreateUnaryExprOrTypeTraitExpr(TypeSourceInfo *TInfo,
SourceLocation OpLoc,
UnaryExprOrTypeTrait ExprKind,
SourceRange R);
ExprResult CreateUnaryExprOrTypeTraitExpr(Expr *E, SourceLocation OpLoc,
UnaryExprOrTypeTrait ExprKind);
ExprResult
ActOnUnaryExprOrTypeTraitExpr(SourceLocation OpLoc,
UnaryExprOrTypeTrait ExprKind,
bool IsType, void *TyOrEx,
SourceRange ArgRange);
ExprResult CheckPlaceholderExpr(Expr *E);
bool CheckVecStepExpr(Expr *E);
bool CheckUnaryExprOrTypeTraitOperand(Expr *E, UnaryExprOrTypeTrait ExprKind);
bool CheckUnaryExprOrTypeTraitOperand(QualType ExprType, SourceLocation OpLoc,
SourceRange ExprRange,
UnaryExprOrTypeTrait ExprKind);
ExprResult ActOnSizeofParameterPackExpr(Scope *S,
SourceLocation OpLoc,
IdentifierInfo &Name,
SourceLocation NameLoc,
SourceLocation RParenLoc);
ExprResult ActOnPostfixUnaryOp(Scope *S, SourceLocation OpLoc,
tok::TokenKind Kind, Expr *Input);
ExprResult ActOnArraySubscriptExpr(Scope *S, Expr *Base, SourceLocation LLoc,
Expr *Idx, SourceLocation RLoc);
ExprResult CreateBuiltinArraySubscriptExpr(Expr *Base, SourceLocation LLoc,
Expr *Idx, SourceLocation RLoc);
ExprResult ActOnOMPArraySectionExpr(Expr *Base, SourceLocation LBLoc,
Expr *LowerBound, SourceLocation ColonLoc,
Expr *Length, SourceLocation RBLoc);
// This struct is for use by ActOnMemberAccess to allow
// BuildMemberReferenceExpr to be able to reinvoke ActOnMemberAccess after
// changing the access operator from a '.' to a '->' (to see if that is the
// change needed to fix an error about an unknown member, e.g. when the class
// defines a custom operator->).
struct ActOnMemberAccessExtraArgs {
Scope *S;
UnqualifiedId &Id;
Decl *ObjCImpDecl;
};
ExprResult BuildMemberReferenceExpr(
Expr *Base, QualType BaseType, SourceLocation OpLoc, bool IsArrow,
CXXScopeSpec &SS, SourceLocation TemplateKWLoc,
NamedDecl *FirstQualifierInScope, const DeclarationNameInfo &NameInfo,
const TemplateArgumentListInfo *TemplateArgs,
const Scope *S,
ActOnMemberAccessExtraArgs *ExtraArgs = nullptr);
ExprResult
BuildMemberReferenceExpr(Expr *Base, QualType BaseType, SourceLocation OpLoc,
bool IsArrow, const CXXScopeSpec &SS,
SourceLocation TemplateKWLoc,
NamedDecl *FirstQualifierInScope, LookupResult &R,
const TemplateArgumentListInfo *TemplateArgs,
const Scope *S,
bool SuppressQualifierCheck = false,
ActOnMemberAccessExtraArgs *ExtraArgs = nullptr);
ExprResult BuildFieldReferenceExpr(Expr *BaseExpr, bool IsArrow,
SourceLocation OpLoc,
const CXXScopeSpec &SS, FieldDecl *Field,
DeclAccessPair FoundDecl,
const DeclarationNameInfo &MemberNameInfo);
ExprResult PerformMemberExprBaseConversion(Expr *Base, bool IsArrow);
bool CheckQualifiedMemberReference(Expr *BaseExpr, QualType BaseType,
const CXXScopeSpec &SS,
const LookupResult &R);
ExprResult ActOnDependentMemberExpr(Expr *Base, QualType BaseType,
bool IsArrow, SourceLocation OpLoc,
const CXXScopeSpec &SS,
SourceLocation TemplateKWLoc,
NamedDecl *FirstQualifierInScope,
const DeclarationNameInfo &NameInfo,
const TemplateArgumentListInfo *TemplateArgs);
ExprResult ActOnMemberAccessExpr(Scope *S, Expr *Base,
SourceLocation OpLoc,
tok::TokenKind OpKind,
CXXScopeSpec &SS,
SourceLocation TemplateKWLoc,
UnqualifiedId &Member,
Decl *ObjCImpDecl);
void ActOnDefaultCtorInitializers(Decl *CDtorDecl);
bool ConvertArgumentsForCall(CallExpr *Call, Expr *Fn,
FunctionDecl *FDecl,
const FunctionProtoType *Proto,
ArrayRef<Expr *> Args,
SourceLocation RParenLoc,
bool ExecConfig = false);
void CheckStaticArrayArgument(SourceLocation CallLoc,
ParmVarDecl *Param,
const Expr *ArgExpr);
/// ActOnCallExpr - Handle a call to Fn with the specified array of arguments.
/// This provides the location of the left/right parens and a list of comma
/// locations.
ExprResult ActOnCallExpr(Scope *S, Expr *Fn, SourceLocation LParenLoc,
MultiExprArg ArgExprs, SourceLocation RParenLoc,
Expr *ExecConfig = nullptr,
bool IsExecConfig = false);
ExprResult BuildResolvedCallExpr(Expr *Fn, NamedDecl *NDecl,
SourceLocation LParenLoc,
ArrayRef<Expr *> Arg,
SourceLocation RParenLoc,
Expr *Config = nullptr,
bool IsExecConfig = false);
ExprResult ActOnCUDAExecConfigExpr(Scope *S, SourceLocation LLLLoc,
MultiExprArg ExecConfig,
SourceLocation GGGLoc);
ExprResult ActOnCastExpr(Scope *S, SourceLocation LParenLoc,
Declarator &D, ParsedType &Ty,
SourceLocation RParenLoc, Expr *CastExpr,
bool isTruncateCast = false, bool isASCast = false);
ExprResult BuildCStyleCastExpr(SourceLocation LParenLoc,
TypeSourceInfo *Ty,
SourceLocation RParenLoc,
Expr *Op, bool isTruncateCast = false,
bool isASCast = false);
CastKind PrepareScalarCast(ExprResult &src, QualType destType);
/// Build an altivec or OpenCL literal.
ExprResult BuildVectorLiteral(SourceLocation LParenLoc,
SourceLocation RParenLoc, Expr *E,
TypeSourceInfo *TInfo);
ExprResult MaybeConvertParenListExprToParenExpr(Scope *S, Expr *ME);
ExprResult ActOnCompoundLiteral(SourceLocation LParenLoc,
ParsedType Ty,
SourceLocation RParenLoc,
Expr *InitExpr);
ExprResult BuildCompoundLiteralExpr(SourceLocation LParenLoc,
TypeSourceInfo *TInfo,
SourceLocation RParenLoc,
Expr *LiteralExpr);
ExprResult ActOnInitList(SourceLocation LBraceLoc,
MultiExprArg InitArgList,
SourceLocation RBraceLoc);
ExprResult ActOnDesignatedInitializer(Designation &Desig,
SourceLocation Loc,
bool GNUSyntax,
ExprResult Init);
private:
static BinaryOperatorKind ConvertTokenKindToBinaryOpcode(tok::TokenKind Kind);
public:
ExprResult ActOnBinOp(Scope *S, SourceLocation TokLoc,
tok::TokenKind Kind, Expr *LHSExpr, Expr *RHSExpr);
ExprResult BuildBinOp(Scope *S, SourceLocation OpLoc,
BinaryOperatorKind Opc, Expr *LHSExpr, Expr *RHSExpr);
ExprResult CreateBuiltinBinOp(SourceLocation OpLoc, BinaryOperatorKind Opc,
Expr *LHSExpr, Expr *RHSExpr);
void DiagnoseCommaOperator(const Expr *LHS, SourceLocation Loc);
/// ActOnConditionalOp - Parse a ?: operation. Note that 'LHS' may be null
/// in the case of a the GNU conditional expr extension.
ExprResult ActOnConditionalOp(SourceLocation QuestionLoc,
SourceLocation ColonLoc,
Expr *CondExpr, Expr *LHSExpr, Expr *RHSExpr);
/// ActOnAddrLabel - Parse the GNU address of label extension: "&&foo".
ExprResult ActOnAddrLabel(SourceLocation OpLoc, SourceLocation LabLoc,
LabelDecl *TheDecl);
void ActOnStartStmtExpr();
ExprResult ActOnStmtExpr(SourceLocation LPLoc, Stmt *SubStmt,
SourceLocation RPLoc); // "({..})"
void ActOnStmtExprError();
// __builtin_offsetof(type, identifier(.identifier|[expr])*)
struct OffsetOfComponent {
SourceLocation LocStart, LocEnd;
bool isBrackets; // true if [expr], false if .ident
union {
IdentifierInfo *IdentInfo;
Expr *E;
} U;
};
/// __builtin_offsetof(type, a.b[123][456].c)
ExprResult BuildBuiltinOffsetOf(SourceLocation BuiltinLoc,
TypeSourceInfo *TInfo,
ArrayRef<OffsetOfComponent> Components,
SourceLocation RParenLoc);
ExprResult ActOnBuiltinOffsetOf(Scope *S,
SourceLocation BuiltinLoc,
SourceLocation TypeLoc,
ParsedType ParsedArgTy,
ArrayRef<OffsetOfComponent> Components,
SourceLocation RParenLoc);
// __builtin_choose_expr(constExpr, expr1, expr2)
ExprResult ActOnChooseExpr(SourceLocation BuiltinLoc,
Expr *CondExpr, Expr *LHSExpr,
Expr *RHSExpr, SourceLocation RPLoc);
// __builtin_va_arg(expr, type)
ExprResult ActOnVAArg(SourceLocation BuiltinLoc, Expr *E, ParsedType Ty,
SourceLocation RPLoc);
ExprResult BuildVAArgExpr(SourceLocation BuiltinLoc, Expr *E,
TypeSourceInfo *TInfo, SourceLocation RPLoc);
// __null
ExprResult ActOnGNUNullExpr(SourceLocation TokenLoc);
bool CheckCaseExpression(Expr *E);
/// Describes the result of an "if-exists" condition check.
enum IfExistsResult {
/// The symbol exists.
IER_Exists,
/// The symbol does not exist.
IER_DoesNotExist,
/// The name is a dependent name, so the results will differ
/// from one instantiation to the next.
IER_Dependent,
/// An error occurred.
IER_Error
};
IfExistsResult
CheckMicrosoftIfExistsSymbol(Scope *S, CXXScopeSpec &SS,
const DeclarationNameInfo &TargetNameInfo);
IfExistsResult
CheckMicrosoftIfExistsSymbol(Scope *S, SourceLocation KeywordLoc,
bool IsIfExists, CXXScopeSpec &SS,
UnqualifiedId &Name);
StmtResult BuildMSDependentExistsStmt(SourceLocation KeywordLoc,
bool IsIfExists,
NestedNameSpecifierLoc QualifierLoc,
DeclarationNameInfo NameInfo,
Stmt *Nested);
StmtResult ActOnMSDependentExistsStmt(SourceLocation KeywordLoc,
bool IsIfExists,
CXXScopeSpec &SS, UnqualifiedId &Name,
Stmt *Nested);
//===------------------------- "Block" Extension ------------------------===//
/// ActOnBlockStart - This callback is invoked when a block literal is
/// started.
void ActOnBlockStart(SourceLocation CaretLoc, Scope *CurScope);
/// ActOnBlockArguments - This callback allows processing of block arguments.
/// If there are no arguments, this is still invoked.
void ActOnBlockArguments(SourceLocation CaretLoc, Declarator &ParamInfo,
Scope *CurScope);
/// ActOnBlockError - If there is an error parsing a block, this callback
/// is invoked to pop the information about the block from the action impl.
void ActOnBlockError(SourceLocation CaretLoc, Scope *CurScope);
/// ActOnBlockStmtExpr - This is called when the body of a block statement
/// literal was successfully completed. ^(int x){...}
ExprResult ActOnBlockStmtExpr(SourceLocation CaretLoc, Stmt *Body,
Scope *CurScope);
//===---------------------------- Clang Extensions ----------------------===//
/// __builtin_convertvector(...)
ExprResult ActOnConvertVectorExpr(Expr *E, ParsedType ParsedDestTy,
SourceLocation BuiltinLoc,
SourceLocation RParenLoc);
//===---------------------------- OpenCL Features -----------------------===//
/// __builtin_astype(...)
ExprResult ActOnAsTypeExpr(Expr *E, ParsedType ParsedDestTy,
SourceLocation BuiltinLoc,
SourceLocation RParenLoc);
//===---------------------------- C++ Features --------------------------===//
// Act on C++ namespaces
Decl *ActOnStartNamespaceDef(Scope *S, SourceLocation InlineLoc,
SourceLocation NamespaceLoc,
SourceLocation IdentLoc, IdentifierInfo *Ident,
SourceLocation LBrace,
const ParsedAttributesView &AttrList,
UsingDirectiveDecl *&UsingDecl);
void ActOnFinishNamespaceDef(Decl *Dcl, SourceLocation RBrace);
NamespaceDecl *getStdNamespace() const;
NamespaceDecl *getOrCreateStdNamespace();
NamespaceDecl *lookupStdExperimentalNamespace();
CXXRecordDecl *getStdBadAlloc() const;
EnumDecl *getStdAlignValT() const;
private:
// A cache representing if we've fully checked the various comparison category
// types stored in ASTContext. The bit-index corresponds to the integer value
// of a ComparisonCategoryType enumerator.
llvm::SmallBitVector FullyCheckedComparisonCategories;
ValueDecl *tryLookupCtorInitMemberDecl(CXXRecordDecl *ClassDecl,
CXXScopeSpec &SS,
ParsedType TemplateTypeTy,
IdentifierInfo *MemberOrBase);
public:
/// Lookup the specified comparison category types in the standard
/// library, an check the VarDecls possibly returned by the operator<=>
/// builtins for that type.
///
/// \return The type of the comparison category type corresponding to the
/// specified Kind, or a null type if an error occurs
QualType CheckComparisonCategoryType(ComparisonCategoryType Kind,
SourceLocation Loc);
/// Tests whether Ty is an instance of std::initializer_list and, if
/// it is and Element is not NULL, assigns the element type to Element.
bool isStdInitializerList(QualType Ty, QualType *Element);
/// Looks for the std::initializer_list template and instantiates it
/// with Element, or emits an error if it's not found.
///
/// \returns The instantiated template, or null on error.
QualType BuildStdInitializerList(QualType Element, SourceLocation Loc);
/// Determine whether Ctor is an initializer-list constructor, as
/// defined in [dcl.init.list]p2.
bool isInitListConstructor(const FunctionDecl *Ctor);
Decl *ActOnUsingDirective(Scope *CurScope, SourceLocation UsingLoc,
SourceLocation NamespcLoc, CXXScopeSpec &SS,
SourceLocation IdentLoc,
IdentifierInfo *NamespcName,
const ParsedAttributesView &AttrList);
void PushUsingDirective(Scope *S, UsingDirectiveDecl *UDir);
Decl *ActOnNamespaceAliasDef(Scope *CurScope,
SourceLocation NamespaceLoc,
SourceLocation AliasLoc,
IdentifierInfo *Alias,
CXXScopeSpec &SS,
SourceLocation IdentLoc,
IdentifierInfo *Ident);
void HideUsingShadowDecl(Scope *S, UsingShadowDecl *Shadow);
bool CheckUsingShadowDecl(UsingDecl *UD, NamedDecl *Target,
const LookupResult &PreviousDecls,
UsingShadowDecl *&PrevShadow);
UsingShadowDecl *BuildUsingShadowDecl(Scope *S, UsingDecl *UD,
NamedDecl *Target,
UsingShadowDecl *PrevDecl);
bool CheckUsingDeclRedeclaration(SourceLocation UsingLoc,
bool HasTypenameKeyword,
const CXXScopeSpec &SS,
SourceLocation NameLoc,
const LookupResult &Previous);
bool CheckUsingDeclQualifier(SourceLocation UsingLoc,
bool HasTypename,
const CXXScopeSpec &SS,
const DeclarationNameInfo &NameInfo,
SourceLocation NameLoc);
NamedDecl *BuildUsingDeclaration(
Scope *S, AccessSpecifier AS, SourceLocation UsingLoc,
bool HasTypenameKeyword, SourceLocation TypenameLoc, CXXScopeSpec &SS,
DeclarationNameInfo NameInfo, SourceLocation EllipsisLoc,
const ParsedAttributesView &AttrList, bool IsInstantiation);
NamedDecl *BuildUsingPackDecl(NamedDecl *InstantiatedFrom,
ArrayRef<NamedDecl *> Expansions);
bool CheckInheritingConstructorUsingDecl(UsingDecl *UD);
/// Given a derived-class using shadow declaration for a constructor and the
/// correspnding base class constructor, find or create the implicit
/// synthesized derived class constructor to use for this initialization.
CXXConstructorDecl *
findInheritingConstructor(SourceLocation Loc, CXXConstructorDecl *BaseCtor,
ConstructorUsingShadowDecl *DerivedShadow);
Decl *ActOnUsingDeclaration(Scope *CurScope, AccessSpecifier AS,
SourceLocation UsingLoc,
SourceLocation TypenameLoc, CXXScopeSpec &SS,
UnqualifiedId &Name, SourceLocation EllipsisLoc,
const ParsedAttributesView &AttrList);
Decl *ActOnAliasDeclaration(Scope *CurScope, AccessSpecifier AS,
MultiTemplateParamsArg TemplateParams,
SourceLocation UsingLoc, UnqualifiedId &Name,
const ParsedAttributesView &AttrList,
TypeResult Type, Decl *DeclFromDeclSpec);
/// BuildCXXConstructExpr - Creates a complete call to a constructor,
/// including handling of its default argument expressions.
///
/// \param ConstructKind - a CXXConstructExpr::ConstructionKind
ExprResult
BuildCXXConstructExpr(SourceLocation ConstructLoc, QualType DeclInitType,
NamedDecl *FoundDecl,
CXXConstructorDecl *Constructor, MultiExprArg Exprs,
bool HadMultipleCandidates, bool IsListInitialization,
bool IsStdInitListInitialization,
bool RequiresZeroInit, unsigned ConstructKind,
SourceRange ParenRange);
/// Build a CXXConstructExpr whose constructor has already been resolved if
/// it denotes an inherited constructor.
ExprResult
BuildCXXConstructExpr(SourceLocation ConstructLoc, QualType DeclInitType,
CXXConstructorDecl *Constructor, bool Elidable,
MultiExprArg Exprs,
bool HadMultipleCandidates, bool IsListInitialization,
bool IsStdInitListInitialization,
bool RequiresZeroInit, unsigned ConstructKind,
SourceRange ParenRange);
// FIXME: Can we remove this and have the above BuildCXXConstructExpr check if
// the constructor can be elidable?
ExprResult
BuildCXXConstructExpr(SourceLocation ConstructLoc, QualType DeclInitType,
NamedDecl *FoundDecl,
CXXConstructorDecl *Constructor, bool Elidable,
MultiExprArg Exprs, bool HadMultipleCandidates,
bool IsListInitialization,
bool IsStdInitListInitialization, bool RequiresZeroInit,
unsigned ConstructKind, SourceRange ParenRange);
ExprResult BuildCXXDefaultInitExpr(SourceLocation Loc, FieldDecl *Field);
/// Instantiate or parse a C++ default argument expression as necessary.
/// Return true on error.
bool CheckCXXDefaultArgExpr(SourceLocation CallLoc, FunctionDecl *FD,
ParmVarDecl *Param);
/// BuildCXXDefaultArgExpr - Creates a CXXDefaultArgExpr, instantiating
/// the default expr if needed.
ExprResult BuildCXXDefaultArgExpr(SourceLocation CallLoc,
FunctionDecl *FD,
ParmVarDecl *Param);
/// FinalizeVarWithDestructor - Prepare for calling destructor on the
/// constructed variable.
void FinalizeVarWithDestructor(VarDecl *VD, const RecordType *DeclInitType);
/// Helper class that collects exception specifications for
/// implicitly-declared special member functions.
class ImplicitExceptionSpecification {
// Pointer to allow copying
Sema *Self;
// We order exception specifications thus:
// noexcept is the most restrictive, but is only used in C++11.
// throw() comes next.
// Then a throw(collected exceptions)
// Finally no specification, which is expressed as noexcept(false).
// throw(...) is used instead if any called function uses it.
ExceptionSpecificationType ComputedEST;
llvm::SmallPtrSet<CanQualType, 4> ExceptionsSeen;
SmallVector<QualType, 4> Exceptions;
void ClearExceptions() {
ExceptionsSeen.clear();
Exceptions.clear();
}
public:
explicit ImplicitExceptionSpecification(Sema &Self)
: Self(&Self), ComputedEST(EST_BasicNoexcept) {
if (!Self.getLangOpts().CPlusPlus11)
ComputedEST = EST_DynamicNone;
}
/// Get the computed exception specification type.
ExceptionSpecificationType getExceptionSpecType() const {
assert(!isComputedNoexcept(ComputedEST) &&
"noexcept(expr) should not be a possible result");
return ComputedEST;
}
/// The number of exceptions in the exception specification.
unsigned size() const { return Exceptions.size(); }
/// The set of exceptions in the exception specification.
const QualType *data() const { return Exceptions.data(); }
/// Integrate another called method into the collected data.
void CalledDecl(SourceLocation CallLoc, const CXXMethodDecl *Method);
/// Integrate an invoked expression into the collected data.
void CalledExpr(Expr *E);
/// Overwrite an EPI's exception specification with this
/// computed exception specification.
FunctionProtoType::ExceptionSpecInfo getExceptionSpec() const {
FunctionProtoType::ExceptionSpecInfo ESI;
ESI.Type = getExceptionSpecType();
if (ESI.Type == EST_Dynamic) {
ESI.Exceptions = Exceptions;
} else if (ESI.Type == EST_None) {
/// C++11 [except.spec]p14:
/// The exception-specification is noexcept(false) if the set of
/// potential exceptions of the special member function contains "any"
ESI.Type = EST_NoexceptFalse;
ESI.NoexceptExpr = Self->ActOnCXXBoolLiteral(SourceLocation(),
tok::kw_false).get();
}
return ESI;
}
};
/// Determine what sort of exception specification a defaulted
/// copy constructor of a class will have.
ImplicitExceptionSpecification
ComputeDefaultedDefaultCtorExceptionSpec(SourceLocation Loc,
CXXMethodDecl *MD);
/// Determine what sort of exception specification a defaulted
/// default constructor of a class will have, and whether the parameter
/// will be const.
ImplicitExceptionSpecification
ComputeDefaultedCopyCtorExceptionSpec(CXXMethodDecl *MD);
/// Determine what sort of exception specification a defautled
/// copy assignment operator of a class will have, and whether the
/// parameter will be const.
ImplicitExceptionSpecification
ComputeDefaultedCopyAssignmentExceptionSpec(CXXMethodDecl *MD);
/// Determine what sort of exception specification a defaulted move
/// constructor of a class will have.
ImplicitExceptionSpecification
ComputeDefaultedMoveCtorExceptionSpec(CXXMethodDecl *MD);
/// Determine what sort of exception specification a defaulted move
/// assignment operator of a class will have.
ImplicitExceptionSpecification
ComputeDefaultedMoveAssignmentExceptionSpec(CXXMethodDecl *MD);
/// Determine what sort of exception specification a defaulted
/// destructor of a class will have.
ImplicitExceptionSpecification
ComputeDefaultedDtorExceptionSpec(CXXMethodDecl *MD);
/// Determine what sort of exception specification an inheriting
/// constructor of a class will have.
ImplicitExceptionSpecification
ComputeInheritingCtorExceptionSpec(SourceLocation Loc,
CXXConstructorDecl *CD);
/// Evaluate the implicit exception specification for a defaulted
/// special member function.
void EvaluateImplicitExceptionSpec(SourceLocation Loc, CXXMethodDecl *MD);
/// Check the given noexcept-specifier, convert its expression, and compute
/// the appropriate ExceptionSpecificationType.
ExprResult ActOnNoexceptSpec(SourceLocation NoexceptLoc, Expr *NoexceptExpr,
ExceptionSpecificationType &EST);
/// Check the given exception-specification and update the
/// exception specification information with the results.
void checkExceptionSpecification(bool IsTopLevel,
ExceptionSpecificationType EST,
ArrayRef<ParsedType> DynamicExceptions,
ArrayRef<SourceRange> DynamicExceptionRanges,
Expr *NoexceptExpr,
SmallVectorImpl<QualType> &Exceptions,
FunctionProtoType::ExceptionSpecInfo &ESI);
/// Determine if we're in a case where we need to (incorrectly) eagerly
/// parse an exception specification to work around a libstdc++ bug.
bool isLibstdcxxEagerExceptionSpecHack(const Declarator &D);
/// Add an exception-specification to the given member function
/// (or member function template). The exception-specification was parsed
/// after the method itself was declared.
void actOnDelayedExceptionSpecification(Decl *Method,
ExceptionSpecificationType EST,
SourceRange SpecificationRange,
ArrayRef<ParsedType> DynamicExceptions,
ArrayRef<SourceRange> DynamicExceptionRanges,
Expr *NoexceptExpr);
class InheritedConstructorInfo;
/// Determine if a special member function should have a deleted
/// definition when it is defaulted.
bool ShouldDeleteSpecialMember(CXXMethodDecl *MD, CXXSpecialMember CSM,
InheritedConstructorInfo *ICI = nullptr,
bool Diagnose = false);
/// Declare the implicit default constructor for the given class.
///
/// \param ClassDecl The class declaration into which the implicit
/// default constructor will be added.
///
/// \returns The implicitly-declared default constructor.
CXXConstructorDecl *DeclareImplicitDefaultConstructor(
CXXRecordDecl *ClassDecl);
/// DefineImplicitDefaultConstructor - Checks for feasibility of
/// defining this constructor as the default constructor.
void DefineImplicitDefaultConstructor(SourceLocation CurrentLocation,
CXXConstructorDecl *Constructor);
/// Declare the implicit destructor for the given class.
///
/// \param ClassDecl The class declaration into which the implicit
/// destructor will be added.
///
/// \returns The implicitly-declared destructor.
CXXDestructorDecl *DeclareImplicitDestructor(CXXRecordDecl *ClassDecl);
/// DefineImplicitDestructor - Checks for feasibility of
/// defining this destructor as the default destructor.
void DefineImplicitDestructor(SourceLocation CurrentLocation,
CXXDestructorDecl *Destructor);
/// Build an exception spec for destructors that don't have one.
///
/// C++11 says that user-defined destructors with no exception spec get one
/// that looks as if the destructor was implicitly declared.
void AdjustDestructorExceptionSpec(CXXDestructorDecl *Destructor);
/// Define the specified inheriting constructor.
void DefineInheritingConstructor(SourceLocation UseLoc,
CXXConstructorDecl *Constructor);
/// Declare the implicit copy constructor for the given class.
///
/// \param ClassDecl The class declaration into which the implicit
/// copy constructor will be added.
///
/// \returns The implicitly-declared copy constructor.
CXXConstructorDecl *DeclareImplicitCopyConstructor(CXXRecordDecl *ClassDecl);
/// DefineImplicitCopyConstructor - Checks for feasibility of
/// defining this constructor as the copy constructor.
void DefineImplicitCopyConstructor(SourceLocation CurrentLocation,
CXXConstructorDecl *Constructor);
/// Declare the implicit move constructor for the given class.
///
/// \param ClassDecl The Class declaration into which the implicit
/// move constructor will be added.
///
/// \returns The implicitly-declared move constructor, or NULL if it wasn't
/// declared.
CXXConstructorDecl *DeclareImplicitMoveConstructor(CXXRecordDecl *ClassDecl);
/// DefineImplicitMoveConstructor - Checks for feasibility of
/// defining this constructor as the move constructor.
void DefineImplicitMoveConstructor(SourceLocation CurrentLocation,
CXXConstructorDecl *Constructor);
/// Declare the implicit copy assignment operator for the given class.
///
/// \param ClassDecl The class declaration into which the implicit
/// copy assignment operator will be added.
///
/// \returns The implicitly-declared copy assignment operator.
CXXMethodDecl *DeclareImplicitCopyAssignment(CXXRecordDecl *ClassDecl);
/// Defines an implicitly-declared copy assignment operator.
void DefineImplicitCopyAssignment(SourceLocation CurrentLocation,
CXXMethodDecl *MethodDecl);
/// Declare the implicit move assignment operator for the given class.
///
/// \param ClassDecl The Class declaration into which the implicit
/// move assignment operator will be added.
///
/// \returns The implicitly-declared move assignment operator, or NULL if it
/// wasn't declared.
CXXMethodDecl *DeclareImplicitMoveAssignment(CXXRecordDecl *ClassDecl);
/// Defines an implicitly-declared move assignment operator.
void DefineImplicitMoveAssignment(SourceLocation CurrentLocation,
CXXMethodDecl *MethodDecl);
/// Force the declaration of any implicitly-declared members of this
/// class.
void ForceDeclarationOfImplicitMembers(CXXRecordDecl *Class);
/// Check a completed declaration of an implicit special member.
void CheckImplicitSpecialMemberDeclaration(Scope *S, FunctionDecl *FD);
/// Determine whether the given function is an implicitly-deleted
/// special member function.
bool isImplicitlyDeleted(FunctionDecl *FD);
/// Check whether 'this' shows up in the type of a static member
/// function after the (naturally empty) cv-qualifier-seq would be.
///
/// \returns true if an error occurred.
bool checkThisInStaticMemberFunctionType(CXXMethodDecl *Method);
/// Whether this' shows up in the exception specification of a static
/// member function.
bool checkThisInStaticMemberFunctionExceptionSpec(CXXMethodDecl *Method);
/// Check whether 'this' shows up in the attributes of the given
/// static member function.
///
/// \returns true if an error occurred.
bool checkThisInStaticMemberFunctionAttributes(CXXMethodDecl *Method);
/// MaybeBindToTemporary - If the passed in expression has a record type with
/// a non-trivial destructor, this will return CXXBindTemporaryExpr. Otherwise
/// it simply returns the passed in expression.
ExprResult MaybeBindToTemporary(Expr *E);
bool CompleteConstructorCall(CXXConstructorDecl *Constructor,
MultiExprArg ArgsPtr,
SourceLocation Loc,
SmallVectorImpl<Expr*> &ConvertedArgs,
bool AllowExplicit = false,
bool IsListInitialization = false);
ParsedType getInheritingConstructorName(CXXScopeSpec &SS,
SourceLocation NameLoc,
IdentifierInfo &Name);
ParsedType getConstructorName(IdentifierInfo &II, SourceLocation NameLoc,
Scope *S, CXXScopeSpec &SS,
bool EnteringContext);
ParsedType getDestructorName(SourceLocation TildeLoc,
IdentifierInfo &II, SourceLocation NameLoc,
Scope *S, CXXScopeSpec &SS,
ParsedType ObjectType,
bool EnteringContext);
ParsedType getDestructorTypeForDecltype(const DeclSpec &DS,
ParsedType ObjectType);
// Checks that reinterpret casts don't have undefined behavior.
void CheckCompatibleReinterpretCast(QualType SrcType, QualType DestType,
bool IsDereference, SourceRange Range);
/// ActOnCXXNamedCast - Parse {dynamic,static,reinterpret,const}_cast's.
ExprResult ActOnCXXNamedCast(SourceLocation OpLoc,
tok::TokenKind Kind,
SourceLocation LAngleBracketLoc,
Declarator &D,
SourceLocation RAngleBracketLoc,
SourceLocation LParenLoc,
Expr *E,
SourceLocation RParenLoc);
ExprResult BuildCXXNamedCast(SourceLocation OpLoc,
tok::TokenKind Kind,
TypeSourceInfo *Ty,
Expr *E,
SourceRange AngleBrackets,
SourceRange Parens);
ExprResult BuildCXXTypeId(QualType TypeInfoType,
SourceLocation TypeidLoc,
TypeSourceInfo *Operand,
SourceLocation RParenLoc);
ExprResult BuildCXXTypeId(QualType TypeInfoType,
SourceLocation TypeidLoc,
Expr *Operand,
SourceLocation RParenLoc);
/// ActOnCXXTypeid - Parse typeid( something ).
ExprResult ActOnCXXTypeid(SourceLocation OpLoc,
SourceLocation LParenLoc, bool isType,
void *TyOrExpr,
SourceLocation RParenLoc);
ExprResult BuildCXXUuidof(QualType TypeInfoType,
SourceLocation TypeidLoc,
TypeSourceInfo *Operand,
SourceLocation RParenLoc);
ExprResult BuildCXXUuidof(QualType TypeInfoType,
SourceLocation TypeidLoc,
Expr *Operand,
SourceLocation RParenLoc);
/// ActOnCXXUuidof - Parse __uuidof( something ).
ExprResult ActOnCXXUuidof(SourceLocation OpLoc,
SourceLocation LParenLoc, bool isType,
void *TyOrExpr,
SourceLocation RParenLoc);
/// Handle a C++1z fold-expression: ( expr op ... op expr ).
ExprResult ActOnCXXFoldExpr(SourceLocation LParenLoc, Expr *LHS,
tok::TokenKind Operator,
SourceLocation EllipsisLoc, Expr *RHS,
SourceLocation RParenLoc);
ExprResult BuildCXXFoldExpr(SourceLocation LParenLoc, Expr *LHS,
BinaryOperatorKind Operator,
SourceLocation EllipsisLoc, Expr *RHS,
SourceLocation RParenLoc);
ExprResult BuildEmptyCXXFoldExpr(SourceLocation EllipsisLoc,
BinaryOperatorKind Operator);
//// ActOnCXXThis - Parse 'this' pointer.
ExprResult ActOnCXXThis(SourceLocation loc);
/// Try to retrieve the type of the 'this' pointer.
///
/// \returns The type of 'this', if possible. Otherwise, returns a NULL type.
QualType getCurrentThisType();
/// When non-NULL, the C++ 'this' expression is allowed despite the
/// current context not being a non-static member function. In such cases,
/// this provides the type used for 'this'.
QualType CXXThisTypeOverride;
/// RAII object used to temporarily allow the C++ 'this' expression
/// to be used, with the given qualifiers on the current class type.
class CXXThisScopeRAII {
Sema &S;
QualType OldCXXThisTypeOverride;
bool Enabled;
public:
/// Introduce a new scope where 'this' may be allowed (when enabled),
/// using the given declaration (which is either a class template or a
/// class) along with the given qualifiers.
/// along with the qualifiers placed on '*this'.
CXXThisScopeRAII(Sema &S, Decl *ContextDecl, unsigned CXXThisTypeQuals,
bool Enabled = true);
~CXXThisScopeRAII();
};
/// Make sure the value of 'this' is actually available in the current
/// context, if it is a potentially evaluated context.
///
/// \param Loc The location at which the capture of 'this' occurs.
///
/// \param Explicit Whether 'this' is explicitly captured in a lambda
/// capture list.
///
/// \param FunctionScopeIndexToStopAt If non-null, it points to the index
/// of the FunctionScopeInfo stack beyond which we do not attempt to capture.
/// This is useful when enclosing lambdas must speculatively capture
/// 'this' that may or may not be used in certain specializations of
/// a nested generic lambda (depending on whether the name resolves to
/// a non-static member function or a static function).
/// \return returns 'true' if failed, 'false' if success.
bool CheckCXXThisCapture(SourceLocation Loc, bool Explicit = false,
bool BuildAndDiagnose = true,
const unsigned *const FunctionScopeIndexToStopAt = nullptr,
bool ByCopy = false);
/// Determine whether the given type is the type of *this that is used
/// outside of the body of a member function for a type that is currently
/// being defined.
bool isThisOutsideMemberFunctionBody(QualType BaseType);
/// ActOnCXXBoolLiteral - Parse {true,false} literals.
ExprResult ActOnCXXBoolLiteral(SourceLocation OpLoc, tok::TokenKind Kind);
/// ActOnObjCBoolLiteral - Parse {__objc_yes,__objc_no} literals.
ExprResult ActOnObjCBoolLiteral(SourceLocation OpLoc, tok::TokenKind Kind);
ExprResult
ActOnObjCAvailabilityCheckExpr(llvm::ArrayRef<AvailabilitySpec> AvailSpecs,
SourceLocation AtLoc, SourceLocation RParen);
/// ActOnCXXNullPtrLiteral - Parse 'nullptr'.
ExprResult ActOnCXXNullPtrLiteral(SourceLocation Loc);
//// ActOnCXXThrow - Parse throw expressions.
ExprResult ActOnCXXThrow(Scope *S, SourceLocation OpLoc, Expr *expr);
ExprResult BuildCXXThrow(SourceLocation OpLoc, Expr *Ex,
bool IsThrownVarInScope);
bool CheckCXXThrowOperand(SourceLocation ThrowLoc, QualType ThrowTy, Expr *E);
/// ActOnCXXTypeConstructExpr - Parse construction of a specified type.
/// Can be interpreted either as function-style casting ("int(x)")
/// or class type construction ("ClassType(x,y,z)")
/// or creation of a value-initialized type ("int()").
ExprResult ActOnCXXTypeConstructExpr(ParsedType TypeRep,
SourceLocation LParenOrBraceLoc,
MultiExprArg Exprs,
SourceLocation RParenOrBraceLoc,
bool ListInitialization);
ExprResult BuildCXXTypeConstructExpr(TypeSourceInfo *Type,
SourceLocation LParenLoc,
MultiExprArg Exprs,
SourceLocation RParenLoc,
bool ListInitialization);
/// ActOnCXXNew - Parsed a C++ 'new' expression.
ExprResult ActOnCXXNew(SourceLocation StartLoc, bool UseGlobal,
SourceLocation PlacementLParen,
MultiExprArg PlacementArgs,
SourceLocation PlacementRParen,
SourceRange TypeIdParens, Declarator &D,
Expr *Initializer);
ExprResult BuildCXXNew(SourceRange Range, bool UseGlobal,
SourceLocation PlacementLParen,
MultiExprArg PlacementArgs,
SourceLocation PlacementRParen,
SourceRange TypeIdParens,
QualType AllocType,
TypeSourceInfo *AllocTypeInfo,
Expr *ArraySize,
SourceRange DirectInitRange,
Expr *Initializer);
bool CheckAllocatedType(QualType AllocType, SourceLocation Loc,
SourceRange R);
/// The scope in which to find allocation functions.
enum AllocationFunctionScope {
/// Only look for allocation functions in the global scope.
AFS_Global,
/// Only look for allocation functions in the scope of the
/// allocated class.
AFS_Class,
/// Look for allocation functions in both the global scope
/// and in the scope of the allocated class.
AFS_Both
};
/// Finds the overloads of operator new and delete that are appropriate
/// for the allocation.
bool FindAllocationFunctions(SourceLocation StartLoc, SourceRange Range,
AllocationFunctionScope NewScope,
AllocationFunctionScope DeleteScope,
QualType AllocType, bool IsArray,
bool &PassAlignment, MultiExprArg PlaceArgs,
FunctionDecl *&OperatorNew,
FunctionDecl *&OperatorDelete,
bool Diagnose = true);
void DeclareGlobalNewDelete();
void DeclareGlobalAllocationFunction(DeclarationName Name, QualType Return,
ArrayRef<QualType> Params);
bool FindDeallocationFunction(SourceLocation StartLoc, CXXRecordDecl *RD,
DeclarationName Name, FunctionDecl* &Operator,
bool Diagnose = true);
FunctionDecl *FindUsualDeallocationFunction(SourceLocation StartLoc,
bool CanProvideSize,
bool Overaligned,
DeclarationName Name);
FunctionDecl *FindDeallocationFunctionForDestructor(SourceLocation StartLoc,
CXXRecordDecl *RD);
/// ActOnCXXDelete - Parsed a C++ 'delete' expression
ExprResult ActOnCXXDelete(SourceLocation StartLoc,
bool UseGlobal, bool ArrayForm,
Expr *Operand);
void CheckVirtualDtorCall(CXXDestructorDecl *dtor, SourceLocation Loc,
bool IsDelete, bool CallCanBeVirtual,
bool WarnOnNonAbstractTypes,
SourceLocation DtorLoc);
ExprResult ActOnNoexceptExpr(SourceLocation KeyLoc, SourceLocation LParen,
Expr *Operand, SourceLocation RParen);
ExprResult BuildCXXNoexceptExpr(SourceLocation KeyLoc, Expr *Operand,
SourceLocation RParen);
/// Parsed one of the type trait support pseudo-functions.
ExprResult ActOnTypeTrait(TypeTrait Kind, SourceLocation KWLoc,
ArrayRef<ParsedType> Args,
SourceLocation RParenLoc);
ExprResult BuildTypeTrait(TypeTrait Kind, SourceLocation KWLoc,
ArrayRef<TypeSourceInfo *> Args,
SourceLocation RParenLoc);
/// ActOnArrayTypeTrait - Parsed one of the binary type trait support
/// pseudo-functions.
ExprResult ActOnArrayTypeTrait(ArrayTypeTrait ATT,
SourceLocation KWLoc,
ParsedType LhsTy,
Expr *DimExpr,
SourceLocation RParen);
ExprResult BuildArrayTypeTrait(ArrayTypeTrait ATT,
SourceLocation KWLoc,
TypeSourceInfo *TSInfo,
Expr *DimExpr,
SourceLocation RParen);
/// ActOnExpressionTrait - Parsed one of the unary type trait support
/// pseudo-functions.
ExprResult ActOnExpressionTrait(ExpressionTrait OET,
SourceLocation KWLoc,
Expr *Queried,
SourceLocation RParen);
ExprResult BuildExpressionTrait(ExpressionTrait OET,
SourceLocation KWLoc,
Expr *Queried,
SourceLocation RParen);
ExprResult ActOnStartCXXMemberReference(Scope *S,
Expr *Base,
SourceLocation OpLoc,
tok::TokenKind OpKind,
ParsedType &ObjectType,
bool &MayBePseudoDestructor);
ExprResult BuildPseudoDestructorExpr(Expr *Base,
SourceLocation OpLoc,
tok::TokenKind OpKind,
const CXXScopeSpec &SS,
TypeSourceInfo *ScopeType,
SourceLocation CCLoc,
SourceLocation TildeLoc,
PseudoDestructorTypeStorage DestroyedType);
ExprResult ActOnPseudoDestructorExpr(Scope *S, Expr *Base,
SourceLocation OpLoc,
tok::TokenKind OpKind,
CXXScopeSpec &SS,
UnqualifiedId &FirstTypeName,
SourceLocation CCLoc,
SourceLocation TildeLoc,
UnqualifiedId &SecondTypeName);
ExprResult ActOnPseudoDestructorExpr(Scope *S, Expr *Base,
SourceLocation OpLoc,
tok::TokenKind OpKind,
SourceLocation TildeLoc,
const DeclSpec& DS);
/// MaybeCreateExprWithCleanups - If the current full-expression
/// requires any cleanups, surround it with a ExprWithCleanups node.
/// Otherwise, just returns the passed-in expression.
Expr *MaybeCreateExprWithCleanups(Expr *SubExpr);
Stmt *MaybeCreateStmtWithCleanups(Stmt *SubStmt);
ExprResult MaybeCreateExprWithCleanups(ExprResult SubExpr);
MaterializeTemporaryExpr *
CreateMaterializeTemporaryExpr(QualType T, Expr *Temporary,
bool BoundToLvalueReference);
ExprResult ActOnFinishFullExpr(Expr *Expr) {
return ActOnFinishFullExpr(Expr, Expr ? Expr->getExprLoc()
: SourceLocation());
}
ExprResult ActOnFinishFullExpr(Expr *Expr, SourceLocation CC,
bool DiscardedValue = false,
bool IsConstexpr = false,
bool IsLambdaInitCaptureInitializer = false);
StmtResult ActOnFinishFullStmt(Stmt *Stmt);
// Marks SS invalid if it represents an incomplete type.
bool RequireCompleteDeclContext(CXXScopeSpec &SS, DeclContext *DC);
DeclContext *computeDeclContext(QualType T);
DeclContext *computeDeclContext(const CXXScopeSpec &SS,
bool EnteringContext = false);
bool isDependentScopeSpecifier(const CXXScopeSpec &SS);
CXXRecordDecl *getCurrentInstantiationOf(NestedNameSpecifier *NNS);
/// The parser has parsed a global nested-name-specifier '::'.
///
/// \param CCLoc The location of the '::'.
///
/// \param SS The nested-name-specifier, which will be updated in-place
/// to reflect the parsed nested-name-specifier.
///
/// \returns true if an error occurred, false otherwise.
bool ActOnCXXGlobalScopeSpecifier(SourceLocation CCLoc, CXXScopeSpec &SS);
/// The parser has parsed a '__super' nested-name-specifier.
///
/// \param SuperLoc The location of the '__super' keyword.
///
/// \param ColonColonLoc The location of the '::'.
///
/// \param SS The nested-name-specifier, which will be updated in-place
/// to reflect the parsed nested-name-specifier.
///
/// \returns true if an error occurred, false otherwise.
bool ActOnSuperScopeSpecifier(SourceLocation SuperLoc,
SourceLocation ColonColonLoc, CXXScopeSpec &SS);
bool isAcceptableNestedNameSpecifier(const NamedDecl *SD,
bool *CanCorrect = nullptr);
NamedDecl *FindFirstQualifierInScope(Scope *S, NestedNameSpecifier *NNS);
/// Keeps information about an identifier in a nested-name-spec.
///
struct NestedNameSpecInfo {
/// The type of the object, if we're parsing nested-name-specifier in
/// a member access expression.
ParsedType ObjectType;
/// The identifier preceding the '::'.
IdentifierInfo *Identifier;
/// The location of the identifier.
SourceLocation IdentifierLoc;
/// The location of the '::'.
SourceLocation CCLoc;
/// Creates info object for the most typical case.
NestedNameSpecInfo(IdentifierInfo *II, SourceLocation IdLoc,
SourceLocation ColonColonLoc, ParsedType ObjectType = ParsedType())
: ObjectType(ObjectType), Identifier(II), IdentifierLoc(IdLoc),
CCLoc(ColonColonLoc) {
}
NestedNameSpecInfo(IdentifierInfo *II, SourceLocation IdLoc,
SourceLocation ColonColonLoc, QualType ObjectType)
: ObjectType(ParsedType::make(ObjectType)), Identifier(II),
IdentifierLoc(IdLoc), CCLoc(ColonColonLoc) {
}
};
bool isNonTypeNestedNameSpecifier(Scope *S, CXXScopeSpec &SS,
NestedNameSpecInfo &IdInfo);
bool BuildCXXNestedNameSpecifier(Scope *S,
NestedNameSpecInfo &IdInfo,
bool EnteringContext,
CXXScopeSpec &SS,
NamedDecl *ScopeLookupResult,
bool ErrorRecoveryLookup,
bool *IsCorrectedToColon = nullptr,
bool OnlyNamespace = false);
/// The parser has parsed a nested-name-specifier 'identifier::'.
///
/// \param S The scope in which this nested-name-specifier occurs.
///
/// \param IdInfo Parser information about an identifier in the
/// nested-name-spec.
///
/// \param EnteringContext Whether we're entering the context nominated by
/// this nested-name-specifier.
///
/// \param SS The nested-name-specifier, which is both an input
/// parameter (the nested-name-specifier before this type) and an
/// output parameter (containing the full nested-name-specifier,
/// including this new type).
///
/// \param ErrorRecoveryLookup If true, then this method is called to improve
/// error recovery. In this case do not emit error message.
///
/// \param IsCorrectedToColon If not null, suggestions to replace '::' -> ':'
/// are allowed. The bool value pointed by this parameter is set to 'true'
/// if the identifier is treated as if it was followed by ':', not '::'.
///
/// \param OnlyNamespace If true, only considers namespaces in lookup.
///
/// \returns true if an error occurred, false otherwise.
bool ActOnCXXNestedNameSpecifier(Scope *S,
NestedNameSpecInfo &IdInfo,
bool EnteringContext,
CXXScopeSpec &SS,
bool ErrorRecoveryLookup = false,
bool *IsCorrectedToColon = nullptr,
bool OnlyNamespace = false);
ExprResult ActOnDecltypeExpression(Expr *E);
bool ActOnCXXNestedNameSpecifierDecltype(CXXScopeSpec &SS,
const DeclSpec &DS,
SourceLocation ColonColonLoc);
bool IsInvalidUnlessNestedName(Scope *S, CXXScopeSpec &SS,
NestedNameSpecInfo &IdInfo,
bool EnteringContext);
/// The parser has parsed a nested-name-specifier
/// 'template[opt] template-name < template-args >::'.
///
/// \param S The scope in which this nested-name-specifier occurs.
///
/// \param SS The nested-name-specifier, which is both an input
/// parameter (the nested-name-specifier before this type) and an
/// output parameter (containing the full nested-name-specifier,
/// including this new type).
///
/// \param TemplateKWLoc the location of the 'template' keyword, if any.
/// \param TemplateName the template name.
/// \param TemplateNameLoc The location of the template name.
/// \param LAngleLoc The location of the opening angle bracket ('<').
/// \param TemplateArgs The template arguments.
/// \param RAngleLoc The location of the closing angle bracket ('>').
/// \param CCLoc The location of the '::'.
///
/// \param EnteringContext Whether we're entering the context of the
/// nested-name-specifier.
///
///
/// \returns true if an error occurred, false otherwise.
bool ActOnCXXNestedNameSpecifier(Scope *S,
CXXScopeSpec &SS,
SourceLocation TemplateKWLoc,
TemplateTy TemplateName,
SourceLocation TemplateNameLoc,
SourceLocation LAngleLoc,
ASTTemplateArgsPtr TemplateArgs,
SourceLocation RAngleLoc,
SourceLocation CCLoc,
bool EnteringContext);
/// Given a C++ nested-name-specifier, produce an annotation value
/// that the parser can use later to reconstruct the given
/// nested-name-specifier.
///
/// \param SS A nested-name-specifier.
///
/// \returns A pointer containing all of the information in the
/// nested-name-specifier \p SS.
void *SaveNestedNameSpecifierAnnotation(CXXScopeSpec &SS);
/// Given an annotation pointer for a nested-name-specifier, restore
/// the nested-name-specifier structure.
///
/// \param Annotation The annotation pointer, produced by
/// \c SaveNestedNameSpecifierAnnotation().
///
/// \param AnnotationRange The source range corresponding to the annotation.
///
/// \param SS The nested-name-specifier that will be updated with the contents
/// of the annotation pointer.
void RestoreNestedNameSpecifierAnnotation(void *Annotation,
SourceRange AnnotationRange,
CXXScopeSpec &SS);
bool ShouldEnterDeclaratorScope(Scope *S, const CXXScopeSpec &SS);
/// ActOnCXXEnterDeclaratorScope - Called when a C++ scope specifier (global
/// scope or nested-name-specifier) is parsed, part of a declarator-id.
/// After this method is called, according to [C++ 3.4.3p3], names should be
/// looked up in the declarator-id's scope, until the declarator is parsed and
/// ActOnCXXExitDeclaratorScope is called.
/// The 'SS' should be a non-empty valid CXXScopeSpec.
bool ActOnCXXEnterDeclaratorScope(Scope *S, CXXScopeSpec &SS);
/// ActOnCXXExitDeclaratorScope - Called when a declarator that previously
/// invoked ActOnCXXEnterDeclaratorScope(), is finished. 'SS' is the same
/// CXXScopeSpec that was passed to ActOnCXXEnterDeclaratorScope as well.
/// Used to indicate that names should revert to being looked up in the
/// defining scope.
void ActOnCXXExitDeclaratorScope(Scope *S, const CXXScopeSpec &SS);
/// ActOnCXXEnterDeclInitializer - Invoked when we are about to parse an
/// initializer for the declaration 'Dcl'.
/// After this method is called, according to [C++ 3.4.1p13], if 'Dcl' is a
/// static data member of class X, names should be looked up in the scope of
/// class X.
void ActOnCXXEnterDeclInitializer(Scope *S, Decl *Dcl);
/// ActOnCXXExitDeclInitializer - Invoked after we are finished parsing an
/// initializer for the declaration 'Dcl'.
void ActOnCXXExitDeclInitializer(Scope *S, Decl *Dcl);
/// Create a new lambda closure type.
CXXRecordDecl *createLambdaClosureType(SourceRange IntroducerRange,
TypeSourceInfo *Info,
bool KnownDependent,
LambdaCaptureDefault CaptureDefault);
/// Start the definition of a lambda expression.
CXXMethodDecl *startLambdaDefinition(CXXRecordDecl *Class,
SourceRange IntroducerRange,
TypeSourceInfo *MethodType,
SourceLocation EndLoc,
ArrayRef<ParmVarDecl *> Params,
bool IsConstexprSpecified);
/// Endow the lambda scope info with the relevant properties.
void buildLambdaScope(sema::LambdaScopeInfo *LSI,
CXXMethodDecl *CallOperator,
SourceRange IntroducerRange,
LambdaCaptureDefault CaptureDefault,
SourceLocation CaptureDefaultLoc,
bool ExplicitParams,
bool ExplicitResultType,
bool Mutable);
/// Perform initialization analysis of the init-capture and perform
/// any implicit conversions such as an lvalue-to-rvalue conversion if
/// not being used to initialize a reference.
ParsedType actOnLambdaInitCaptureInitialization(
SourceLocation Loc, bool ByRef, IdentifierInfo *Id,
LambdaCaptureInitKind InitKind, Expr *&Init) {
return ParsedType::make(buildLambdaInitCaptureInitialization(
Loc, ByRef, Id, InitKind != LambdaCaptureInitKind::CopyInit, Init));
}
QualType buildLambdaInitCaptureInitialization(SourceLocation Loc, bool ByRef,
IdentifierInfo *Id,
bool DirectInit, Expr *&Init);
/// Create a dummy variable within the declcontext of the lambda's
/// call operator, for name lookup purposes for a lambda init capture.
///
/// CodeGen handles emission of lambda captures, ignoring these dummy
/// variables appropriately.
VarDecl *createLambdaInitCaptureVarDecl(SourceLocation Loc,
QualType InitCaptureType,
IdentifierInfo *Id,
unsigned InitStyle, Expr *Init);
/// Build the implicit field for an init-capture.
FieldDecl *buildInitCaptureField(sema::LambdaScopeInfo *LSI, VarDecl *Var);
/// Note that we have finished the explicit captures for the
/// given lambda.
void finishLambdaExplicitCaptures(sema::LambdaScopeInfo *LSI);
/// Introduce the lambda parameters into scope.
void addLambdaParameters(CXXMethodDecl *CallOperator, Scope *CurScope);
/// Deduce a block or lambda's return type based on the return
/// statements present in the body.
void deduceClosureReturnType(sema::CapturingScopeInfo &CSI);
/// ActOnStartOfLambdaDefinition - This is called just before we start
/// parsing the body of a lambda; it analyzes the explicit captures and
/// arguments, and sets up various data-structures for the body of the
/// lambda.
void ActOnStartOfLambdaDefinition(LambdaIntroducer &Intro,
Declarator &ParamInfo, Scope *CurScope);
/// ActOnLambdaError - If there is an error parsing a lambda, this callback
/// is invoked to pop the information about the lambda.
void ActOnLambdaError(SourceLocation StartLoc, Scope *CurScope,
bool IsInstantiation = false);
/// ActOnLambdaExpr - This is called when the body of a lambda expression
/// was successfully completed.
ExprResult ActOnLambdaExpr(SourceLocation StartLoc, Stmt *Body,
Scope *CurScope);
/// Does copying/destroying the captured variable have side effects?
bool CaptureHasSideEffects(const sema::Capture &From);
/// Diagnose if an explicit lambda capture is unused. Returns true if a
/// diagnostic is emitted.
bool DiagnoseUnusedLambdaCapture(SourceRange CaptureRange,
const sema::Capture &From);
/// Complete a lambda-expression having processed and attached the
/// lambda body.
ExprResult BuildLambdaExpr(SourceLocation StartLoc, SourceLocation EndLoc,
sema::LambdaScopeInfo *LSI);
/// Get the return type to use for a lambda's conversion function(s) to
/// function pointer type, given the type of the call operator.
QualType
getLambdaConversionFunctionResultType(const FunctionProtoType *CallOpType);
/// Define the "body" of the conversion from a lambda object to a
/// function pointer.
///
/// This routine doesn't actually define a sensible body; rather, it fills
/// in the initialization expression needed to copy the lambda object into
/// the block, and IR generation actually generates the real body of the
/// block pointer conversion.
void DefineImplicitLambdaToFunctionPointerConversion(
SourceLocation CurrentLoc, CXXConversionDecl *Conv);
/// Define the "body" of the conversion from a lambda object to a
/// block pointer.
///
/// This routine doesn't actually define a sensible body; rather, it fills
/// in the initialization expression needed to copy the lambda object into
/// the block, and IR generation actually generates the real body of the
/// block pointer conversion.
void DefineImplicitLambdaToBlockPointerConversion(SourceLocation CurrentLoc,
CXXConversionDecl *Conv);
ExprResult BuildBlockForLambdaConversion(SourceLocation CurrentLocation,
SourceLocation ConvLocation,
CXXConversionDecl *Conv,
Expr *Src);
// ParseObjCStringLiteral - Parse Objective-C string literals.
ExprResult ParseObjCStringLiteral(SourceLocation *AtLocs,
ArrayRef<Expr *> Strings);
ExprResult BuildObjCStringLiteral(SourceLocation AtLoc, StringLiteral *S);
/// BuildObjCNumericLiteral - builds an ObjCBoxedExpr AST node for the
/// numeric literal expression. Type of the expression will be "NSNumber *"
/// or "id" if NSNumber is unavailable.
ExprResult BuildObjCNumericLiteral(SourceLocation AtLoc, Expr *Number);
ExprResult ActOnObjCBoolLiteral(SourceLocation AtLoc, SourceLocation ValueLoc,
bool Value);
ExprResult BuildObjCArrayLiteral(SourceRange SR, MultiExprArg Elements);
/// BuildObjCBoxedExpr - builds an ObjCBoxedExpr AST node for the
/// '@' prefixed parenthesized expression. The type of the expression will
/// either be "NSNumber *", "NSString *" or "NSValue *" depending on the type
/// of ValueType, which is allowed to be a built-in numeric type, "char *",
/// "const char *" or C structure with attribute 'objc_boxable'.
ExprResult BuildObjCBoxedExpr(SourceRange SR, Expr *ValueExpr);
ExprResult BuildObjCSubscriptExpression(SourceLocation RB, Expr *BaseExpr,
Expr *IndexExpr,
ObjCMethodDecl *getterMethod,
ObjCMethodDecl *setterMethod);
ExprResult BuildObjCDictionaryLiteral(SourceRange SR,
MutableArrayRef<ObjCDictionaryElement> Elements);
ExprResult BuildObjCEncodeExpression(SourceLocation AtLoc,
TypeSourceInfo *EncodedTypeInfo,
SourceLocation RParenLoc);
ExprResult BuildCXXMemberCallExpr(Expr *Exp, NamedDecl *FoundDecl,
CXXConversionDecl *Method,
bool HadMultipleCandidates);
ExprResult ParseObjCEncodeExpression(SourceLocation AtLoc,
SourceLocation EncodeLoc,
SourceLocation LParenLoc,
ParsedType Ty,
SourceLocation RParenLoc);
/// ParseObjCSelectorExpression - Build selector expression for \@selector
ExprResult ParseObjCSelectorExpression(Selector Sel,
SourceLocation AtLoc,
SourceLocation SelLoc,
SourceLocation LParenLoc,
SourceLocation RParenLoc,
bool WarnMultipleSelectors);
/// ParseObjCProtocolExpression - Build protocol expression for \@protocol
ExprResult ParseObjCProtocolExpression(IdentifierInfo * ProtocolName,
SourceLocation AtLoc,
SourceLocation ProtoLoc,
SourceLocation LParenLoc,
SourceLocation ProtoIdLoc,
SourceLocation RParenLoc);
//===--------------------------------------------------------------------===//
// C++ Declarations
//
Decl *ActOnStartLinkageSpecification(Scope *S,
SourceLocation ExternLoc,
Expr *LangStr,
SourceLocation LBraceLoc);
Decl *ActOnFinishLinkageSpecification(Scope *S,
Decl *LinkageSpec,
SourceLocation RBraceLoc);
//===--------------------------------------------------------------------===//
// C++ Classes
//
CXXRecordDecl *getCurrentClass(Scope *S, const CXXScopeSpec *SS);
bool isCurrentClassName(const IdentifierInfo &II, Scope *S,
const CXXScopeSpec *SS = nullptr);
bool isCurrentClassNameTypo(IdentifierInfo *&II, const CXXScopeSpec *SS);
bool ActOnAccessSpecifier(AccessSpecifier Access, SourceLocation ASLoc,
SourceLocation ColonLoc,
const ParsedAttributesView &Attrs);
NamedDecl *ActOnCXXMemberDeclarator(Scope *S, AccessSpecifier AS,
Declarator &D,
MultiTemplateParamsArg TemplateParameterLists,
Expr *BitfieldWidth, const VirtSpecifiers &VS,
InClassInitStyle InitStyle);
void ActOnStartCXXInClassMemberInitializer();
void ActOnFinishCXXInClassMemberInitializer(Decl *VarDecl,
SourceLocation EqualLoc,
Expr *Init);
MemInitResult ActOnMemInitializer(Decl *ConstructorD,
Scope *S,
CXXScopeSpec &SS,
IdentifierInfo *MemberOrBase,
ParsedType TemplateTypeTy,
const DeclSpec &DS,
SourceLocation IdLoc,
SourceLocation LParenLoc,
ArrayRef<Expr *> Args,
SourceLocation RParenLoc,
SourceLocation EllipsisLoc);
MemInitResult ActOnMemInitializer(Decl *ConstructorD,
Scope *S,
CXXScopeSpec &SS,
IdentifierInfo *MemberOrBase,
ParsedType TemplateTypeTy,
const DeclSpec &DS,
SourceLocation IdLoc,
Expr *InitList,
SourceLocation EllipsisLoc);
MemInitResult BuildMemInitializer(Decl *ConstructorD,
Scope *S,
CXXScopeSpec &SS,
IdentifierInfo *MemberOrBase,
ParsedType TemplateTypeTy,
const DeclSpec &DS,
SourceLocation IdLoc,
Expr *Init,
SourceLocation EllipsisLoc);
MemInitResult BuildMemberInitializer(ValueDecl *Member,
Expr *Init,
SourceLocation IdLoc);
MemInitResult BuildBaseInitializer(QualType BaseType,
TypeSourceInfo *BaseTInfo,
Expr *Init,
CXXRecordDecl *ClassDecl,
SourceLocation EllipsisLoc);
MemInitResult BuildDelegatingInitializer(TypeSourceInfo *TInfo,
Expr *Init,
CXXRecordDecl *ClassDecl);
bool SetDelegatingInitializer(CXXConstructorDecl *Constructor,
CXXCtorInitializer *Initializer);
bool SetCtorInitializers(CXXConstructorDecl *Constructor, bool AnyErrors,
ArrayRef<CXXCtorInitializer *> Initializers = None);
void SetIvarInitializers(ObjCImplementationDecl *ObjCImplementation);
/// MarkBaseAndMemberDestructorsReferenced - Given a record decl,
/// mark all the non-trivial destructors of its members and bases as
/// referenced.
void MarkBaseAndMemberDestructorsReferenced(SourceLocation Loc,
CXXRecordDecl *Record);
/// The list of classes whose vtables have been used within
/// this translation unit, and the source locations at which the
/// first use occurred.
typedef std::pair<CXXRecordDecl*, SourceLocation> VTableUse;
/// The list of vtables that are required but have not yet been
/// materialized.
SmallVector<VTableUse, 16> VTableUses;
/// The set of classes whose vtables have been used within
/// this translation unit, and a bit that will be true if the vtable is
/// required to be emitted (otherwise, it should be emitted only if needed
/// by code generation).
llvm::DenseMap<CXXRecordDecl *, bool> VTablesUsed;
/// Load any externally-stored vtable uses.
void LoadExternalVTableUses();
/// Note that the vtable for the given class was used at the
/// given location.
void MarkVTableUsed(SourceLocation Loc, CXXRecordDecl *Class,
bool DefinitionRequired = false);
/// Mark the exception specifications of all virtual member functions
/// in the given class as needed.
void MarkVirtualMemberExceptionSpecsNeeded(SourceLocation Loc,
const CXXRecordDecl *RD);
/// MarkVirtualMembersReferenced - Will mark all members of the given
/// CXXRecordDecl referenced.
void MarkVirtualMembersReferenced(SourceLocation Loc,
const CXXRecordDecl *RD);
/// Define all of the vtables that have been used in this
/// translation unit and reference any virtual members used by those
/// vtables.
///
/// \returns true if any work was done, false otherwise.
bool DefineUsedVTables();
void AddImplicitlyDeclaredMembersToClass(CXXRecordDecl *ClassDecl);
void ActOnMemInitializers(Decl *ConstructorDecl,
SourceLocation ColonLoc,
ArrayRef<CXXCtorInitializer*> MemInits,
bool AnyErrors);
/// Check class-level dllimport/dllexport attribute. The caller must
/// ensure that referenceDLLExportedClassMethods is called some point later
/// when all outer classes of Class are complete.
void checkClassLevelDLLAttribute(CXXRecordDecl *Class);
void checkClassLevelCodeSegAttribute(CXXRecordDecl *Class);
void referenceDLLExportedClassMethods();
void propagateDLLAttrToBaseClassTemplate(
CXXRecordDecl *Class, Attr *ClassAttr,
ClassTemplateSpecializationDecl *BaseTemplateSpec,
SourceLocation BaseLoc);
void CheckCompletedCXXClass(CXXRecordDecl *Record);
/// Check that the C++ class annoated with "trivial_abi" satisfies all the
/// conditions that are needed for the attribute to have an effect.
void checkIllFormedTrivialABIStruct(CXXRecordDecl &RD);
void ActOnFinishCXXMemberSpecification(Scope *S, SourceLocation RLoc,
Decl *TagDecl, SourceLocation LBrac,
SourceLocation RBrac,
const ParsedAttributesView &AttrList);
void ActOnFinishCXXMemberDecls();
void ActOnFinishCXXNonNestedClass(Decl *D);
void ActOnReenterCXXMethodParameter(Scope *S, ParmVarDecl *Param);
unsigned ActOnReenterTemplateScope(Scope *S, Decl *Template);
void ActOnStartDelayedMemberDeclarations(Scope *S, Decl *Record);
void ActOnStartDelayedCXXMethodDeclaration(Scope *S, Decl *Method);
void ActOnDelayedCXXMethodParameter(Scope *S, Decl *Param);
void ActOnFinishDelayedMemberDeclarations(Scope *S, Decl *Record);
void ActOnFinishDelayedCXXMethodDeclaration(Scope *S, Decl *Method);
void ActOnFinishDelayedMemberInitializers(Decl *Record);
void MarkAsLateParsedTemplate(FunctionDecl *FD, Decl *FnD,
CachedTokens &Toks);
void UnmarkAsLateParsedTemplate(FunctionDecl *FD);
bool IsInsideALocalClassWithinATemplateFunction();
Decl *ActOnStaticAssertDeclaration(SourceLocation StaticAssertLoc,
Expr *AssertExpr,
Expr *AssertMessageExpr,
SourceLocation RParenLoc);
Decl *BuildStaticAssertDeclaration(SourceLocation StaticAssertLoc,
Expr *AssertExpr,
StringLiteral *AssertMessageExpr,
SourceLocation RParenLoc,
bool Failed);
FriendDecl *CheckFriendTypeDecl(SourceLocation LocStart,
SourceLocation FriendLoc,
TypeSourceInfo *TSInfo);
Decl *ActOnFriendTypeDecl(Scope *S, const DeclSpec &DS,
MultiTemplateParamsArg TemplateParams);
NamedDecl *ActOnFriendFunctionDecl(Scope *S, Declarator &D,
MultiTemplateParamsArg TemplateParams);
QualType CheckConstructorDeclarator(Declarator &D, QualType R,
StorageClass& SC);
void CheckConstructor(CXXConstructorDecl *Constructor);
QualType CheckDestructorDeclarator(Declarator &D, QualType R,
StorageClass& SC);
bool CheckDestructor(CXXDestructorDecl *Destructor);
void CheckConversionDeclarator(Declarator &D, QualType &R,
StorageClass& SC);
Decl *ActOnConversionDeclarator(CXXConversionDecl *Conversion);
void CheckDeductionGuideDeclarator(Declarator &D, QualType &R,
StorageClass &SC);
void CheckDeductionGuideTemplate(FunctionTemplateDecl *TD);
void CheckExplicitlyDefaultedSpecialMember(CXXMethodDecl *MD);
void CheckExplicitlyDefaultedMemberExceptionSpec(CXXMethodDecl *MD,
const FunctionProtoType *T);
void CheckDelayedMemberExceptionSpecs();
//===--------------------------------------------------------------------===//
// C++ Derived Classes
//
/// ActOnBaseSpecifier - Parsed a base specifier
CXXBaseSpecifier *CheckBaseSpecifier(CXXRecordDecl *Class,
SourceRange SpecifierRange,
bool Virtual, AccessSpecifier Access,
TypeSourceInfo *TInfo,
SourceLocation EllipsisLoc);
BaseResult ActOnBaseSpecifier(Decl *classdecl,
SourceRange SpecifierRange,
ParsedAttributes &Attrs,
bool Virtual, AccessSpecifier Access,
ParsedType basetype,
SourceLocation BaseLoc,
SourceLocation EllipsisLoc);
bool AttachBaseSpecifiers(CXXRecordDecl *Class,
MutableArrayRef<CXXBaseSpecifier *> Bases);
void ActOnBaseSpecifiers(Decl *ClassDecl,
MutableArrayRef<CXXBaseSpecifier *> Bases);
bool IsDerivedFrom(SourceLocation Loc, QualType Derived, QualType Base);
bool IsDerivedFrom(SourceLocation Loc, QualType Derived, QualType Base,
CXXBasePaths &Paths);
// FIXME: I don't like this name.
void BuildBasePathArray(const CXXBasePaths &Paths, CXXCastPath &BasePath);
bool CheckDerivedToBaseConversion(QualType Derived, QualType Base,
SourceLocation Loc, SourceRange Range,
CXXCastPath *BasePath = nullptr,
bool IgnoreAccess = false);
bool CheckDerivedToBaseConversion(QualType Derived, QualType Base,
unsigned InaccessibleBaseID,
unsigned AmbigiousBaseConvID,
SourceLocation Loc, SourceRange Range,
DeclarationName Name,
CXXCastPath *BasePath,
bool IgnoreAccess = false);
std::string getAmbiguousPathsDisplayString(CXXBasePaths &Paths);
bool CheckOverridingFunctionAttributes(const CXXMethodDecl *New,
const CXXMethodDecl *Old);
/// CheckOverridingFunctionReturnType - Checks whether the return types are
/// covariant, according to C++ [class.virtual]p5.
bool CheckOverridingFunctionReturnType(const CXXMethodDecl *New,
const CXXMethodDecl *Old);
/// CheckOverridingFunctionExceptionSpec - Checks whether the exception
/// spec is a subset of base spec.
bool CheckOverridingFunctionExceptionSpec(const CXXMethodDecl *New,
const CXXMethodDecl *Old);
bool CheckPureMethod(CXXMethodDecl *Method, SourceRange InitRange);
/// CheckOverrideControl - Check C++11 override control semantics.
void CheckOverrideControl(NamedDecl *D);
/// DiagnoseAbsenceOfOverrideControl - Diagnose if 'override' keyword was
/// not used in the declaration of an overriding method.
void DiagnoseAbsenceOfOverrideControl(NamedDecl *D);
/// CheckForFunctionMarkedFinal - Checks whether a virtual member function
/// overrides a virtual member function marked 'final', according to
/// C++11 [class.virtual]p4.
bool CheckIfOverriddenFunctionIsMarkedFinal(const CXXMethodDecl *New,
const CXXMethodDecl *Old);
//===--------------------------------------------------------------------===//
// C++ Access Control
//
enum AccessResult {
AR_accessible,
AR_inaccessible,
AR_dependent,
AR_delayed
};
bool SetMemberAccessSpecifier(NamedDecl *MemberDecl,
NamedDecl *PrevMemberDecl,
AccessSpecifier LexicalAS);
AccessResult CheckUnresolvedMemberAccess(UnresolvedMemberExpr *E,
DeclAccessPair FoundDecl);
AccessResult CheckUnresolvedLookupAccess(UnresolvedLookupExpr *E,
DeclAccessPair FoundDecl);
AccessResult CheckAllocationAccess(SourceLocation OperatorLoc,
SourceRange PlacementRange,
CXXRecordDecl *NamingClass,
DeclAccessPair FoundDecl,
bool Diagnose = true);
AccessResult CheckConstructorAccess(SourceLocation Loc,
CXXConstructorDecl *D,
DeclAccessPair FoundDecl,
const InitializedEntity &Entity,
bool IsCopyBindingRefToTemp = false);
AccessResult CheckConstructorAccess(SourceLocation Loc,
CXXConstructorDecl *D,
DeclAccessPair FoundDecl,
const InitializedEntity &Entity,
const PartialDiagnostic &PDiag);
AccessResult CheckDestructorAccess(SourceLocation Loc,
CXXDestructorDecl *Dtor,
const PartialDiagnostic &PDiag,
QualType objectType = QualType());
AccessResult CheckFriendAccess(NamedDecl *D);
AccessResult CheckMemberAccess(SourceLocation UseLoc,
CXXRecordDecl *NamingClass,
DeclAccessPair Found);
AccessResult
CheckStructuredBindingMemberAccess(SourceLocation UseLoc,
CXXRecordDecl *DecomposedClass,
DeclAccessPair Field);
AccessResult CheckMemberOperatorAccess(SourceLocation Loc,
Expr *ObjectExpr,
Expr *ArgExpr,
DeclAccessPair FoundDecl);
AccessResult CheckAddressOfMemberAccess(Expr *OvlExpr,
DeclAccessPair FoundDecl);
AccessResult CheckBaseClassAccess(SourceLocation AccessLoc,
QualType Base, QualType Derived,
const CXXBasePath &Path,
unsigned DiagID,
bool ForceCheck = false,
bool ForceUnprivileged = false);
void CheckLookupAccess(const LookupResult &R);
bool IsSimplyAccessible(NamedDecl *decl, DeclContext *Ctx);
bool isSpecialMemberAccessibleForDeletion(CXXMethodDecl *decl,
AccessSpecifier access,
QualType objectType);
void HandleDependentAccessCheck(const DependentDiagnostic &DD,
const MultiLevelTemplateArgumentList &TemplateArgs);
void PerformDependentDiagnostics(const DeclContext *Pattern,
const MultiLevelTemplateArgumentList &TemplateArgs);
void HandleDelayedAccessCheck(sema::DelayedDiagnostic &DD, Decl *Ctx);
/// When true, access checking violations are treated as SFINAE
/// failures rather than hard errors.
bool AccessCheckingSFINAE;
enum AbstractDiagSelID {
AbstractNone = -1,
AbstractReturnType,
AbstractParamType,
AbstractVariableType,
AbstractFieldType,
AbstractIvarType,
AbstractSynthesizedIvarType,
AbstractArrayType
};
bool isAbstractType(SourceLocation Loc, QualType T);
bool RequireNonAbstractType(SourceLocation Loc, QualType T,
TypeDiagnoser &Diagnoser);
template <typename... Ts>
bool RequireNonAbstractType(SourceLocation Loc, QualType T, unsigned DiagID,
const Ts &...Args) {
BoundTypeDiagnoser<Ts...> Diagnoser(DiagID, Args...);
return RequireNonAbstractType(Loc, T, Diagnoser);
}
void DiagnoseAbstractType(const CXXRecordDecl *RD);
//===--------------------------------------------------------------------===//
// C++ Overloaded Operators [C++ 13.5]
//
bool CheckOverloadedOperatorDeclaration(FunctionDecl *FnDecl);
bool CheckLiteralOperatorDeclaration(FunctionDecl *FnDecl);
//===--------------------------------------------------------------------===//
// C++ Templates [C++ 14]
//
void FilterAcceptableTemplateNames(LookupResult &R,
bool AllowFunctionTemplates = true);
bool hasAnyAcceptableTemplateNames(LookupResult &R,
bool AllowFunctionTemplates = true);
bool LookupTemplateName(LookupResult &R, Scope *S, CXXScopeSpec &SS,
QualType ObjectType, bool EnteringContext,
bool &MemberOfUnknownSpecialization,
SourceLocation TemplateKWLoc = SourceLocation());
TemplateNameKind isTemplateName(Scope *S,
CXXScopeSpec &SS,
bool hasTemplateKeyword,
const UnqualifiedId &Name,
ParsedType ObjectType,
bool EnteringContext,
TemplateTy &Template,
bool &MemberOfUnknownSpecialization);
/// Determine whether a particular identifier might be the name in a C++1z
/// deduction-guide declaration.
bool isDeductionGuideName(Scope *S, const IdentifierInfo &Name,
SourceLocation NameLoc,
ParsedTemplateTy *Template = nullptr);
bool DiagnoseUnknownTemplateName(const IdentifierInfo &II,
SourceLocation IILoc,
Scope *S,
const CXXScopeSpec *SS,
TemplateTy &SuggestedTemplate,
TemplateNameKind &SuggestedKind);
bool DiagnoseUninstantiableTemplate(SourceLocation PointOfInstantiation,
NamedDecl *Instantiation,
bool InstantiatedFromMember,
const NamedDecl *Pattern,
const NamedDecl *PatternDef,
TemplateSpecializationKind TSK,
bool Complain = true);
void DiagnoseTemplateParameterShadow(SourceLocation Loc, Decl *PrevDecl);
TemplateDecl *AdjustDeclIfTemplate(Decl *&Decl);
NamedDecl *ActOnTypeParameter(Scope *S, bool Typename,
SourceLocation EllipsisLoc,
SourceLocation KeyLoc,
IdentifierInfo *ParamName,
SourceLocation ParamNameLoc,
unsigned Depth, unsigned Position,
SourceLocation EqualLoc,
ParsedType DefaultArg);
QualType CheckNonTypeTemplateParameterType(TypeSourceInfo *&TSI,
SourceLocation Loc);
QualType CheckNonTypeTemplateParameterType(QualType T, SourceLocation Loc);
NamedDecl *ActOnNonTypeTemplateParameter(Scope *S, Declarator &D,
unsigned Depth,
unsigned Position,
SourceLocation EqualLoc,
Expr *DefaultArg);
NamedDecl *ActOnTemplateTemplateParameter(Scope *S,
SourceLocation TmpLoc,
TemplateParameterList *Params,
SourceLocation EllipsisLoc,
IdentifierInfo *ParamName,
SourceLocation ParamNameLoc,
unsigned Depth,
unsigned Position,
SourceLocation EqualLoc,
ParsedTemplateArgument DefaultArg);
TemplateParameterList *
ActOnTemplateParameterList(unsigned Depth,
SourceLocation ExportLoc,
SourceLocation TemplateLoc,
SourceLocation LAngleLoc,
ArrayRef<NamedDecl *> Params,
SourceLocation RAngleLoc,
Expr *RequiresClause);
/// The context in which we are checking a template parameter list.
enum TemplateParamListContext {
TPC_ClassTemplate,
TPC_VarTemplate,
TPC_FunctionTemplate,
TPC_ClassTemplateMember,
TPC_FriendClassTemplate,
TPC_FriendFunctionTemplate,
TPC_FriendFunctionTemplateDefinition,
TPC_TypeAliasTemplate
};
bool CheckTemplateParameterList(TemplateParameterList *NewParams,
TemplateParameterList *OldParams,
TemplateParamListContext TPC,
SkipBodyInfo *SkipBody = nullptr);
TemplateParameterList *MatchTemplateParametersToScopeSpecifier(
SourceLocation DeclStartLoc, SourceLocation DeclLoc,
const CXXScopeSpec &SS, TemplateIdAnnotation *TemplateId,
ArrayRef<TemplateParameterList *> ParamLists,
bool IsFriend, bool &IsMemberSpecialization, bool &Invalid);
DeclResult CheckClassTemplate(
Scope *S, unsigned TagSpec, TagUseKind TUK, SourceLocation KWLoc,
CXXScopeSpec &SS, IdentifierInfo *Name, SourceLocation NameLoc,
const ParsedAttributesView &Attr, TemplateParameterList *TemplateParams,
AccessSpecifier AS, SourceLocation ModulePrivateLoc,
SourceLocation FriendLoc, unsigned NumOuterTemplateParamLists,
TemplateParameterList **OuterTemplateParamLists,
SkipBodyInfo *SkipBody = nullptr);
TemplateArgumentLoc getTrivialTemplateArgumentLoc(const TemplateArgument &Arg,
QualType NTTPType,
SourceLocation Loc);
void translateTemplateArguments(const ASTTemplateArgsPtr &In,
TemplateArgumentListInfo &Out);
ParsedTemplateArgument ActOnTemplateTypeArgument(TypeResult ParsedType);
void NoteAllFoundTemplates(TemplateName Name);
QualType CheckTemplateIdType(TemplateName Template,
SourceLocation TemplateLoc,
TemplateArgumentListInfo &TemplateArgs);
TypeResult
ActOnTemplateIdType(CXXScopeSpec &SS, SourceLocation TemplateKWLoc,
TemplateTy Template, IdentifierInfo *TemplateII,
SourceLocation TemplateIILoc,
SourceLocation LAngleLoc,
ASTTemplateArgsPtr TemplateArgs,
SourceLocation RAngleLoc,
bool IsCtorOrDtorName = false,
bool IsClassName = false);
/// Parsed an elaborated-type-specifier that refers to a template-id,
/// such as \c class T::template apply<U>.
TypeResult ActOnTagTemplateIdType(TagUseKind TUK,
TypeSpecifierType TagSpec,
SourceLocation TagLoc,
CXXScopeSpec &SS,
SourceLocation TemplateKWLoc,
TemplateTy TemplateD,
SourceLocation TemplateLoc,
SourceLocation LAngleLoc,
ASTTemplateArgsPtr TemplateArgsIn,
SourceLocation RAngleLoc);
DeclResult ActOnVarTemplateSpecialization(
Scope *S, Declarator &D, TypeSourceInfo *DI,
SourceLocation TemplateKWLoc, TemplateParameterList *TemplateParams,
StorageClass SC, bool IsPartialSpecialization);
DeclResult CheckVarTemplateId(VarTemplateDecl *Template,
SourceLocation TemplateLoc,
SourceLocation TemplateNameLoc,
const TemplateArgumentListInfo &TemplateArgs);
ExprResult CheckVarTemplateId(const CXXScopeSpec &SS,
const DeclarationNameInfo &NameInfo,
VarTemplateDecl *Template,
SourceLocation TemplateLoc,
const TemplateArgumentListInfo *TemplateArgs);
void diagnoseMissingTemplateArguments(TemplateName Name, SourceLocation Loc);
ExprResult BuildTemplateIdExpr(const CXXScopeSpec &SS,
SourceLocation TemplateKWLoc,
LookupResult &R,
bool RequiresADL,
const TemplateArgumentListInfo *TemplateArgs);
ExprResult BuildQualifiedTemplateIdExpr(CXXScopeSpec &SS,
SourceLocation TemplateKWLoc,
const DeclarationNameInfo &NameInfo,
const TemplateArgumentListInfo *TemplateArgs);
TemplateNameKind ActOnDependentTemplateName(
Scope *S, CXXScopeSpec &SS, SourceLocation TemplateKWLoc,
const UnqualifiedId &Name, ParsedType ObjectType, bool EnteringContext,
TemplateTy &Template, bool AllowInjectedClassName = false);
DeclResult ActOnClassTemplateSpecialization(
Scope *S, unsigned TagSpec, TagUseKind TUK, SourceLocation KWLoc,
SourceLocation ModulePrivateLoc, TemplateIdAnnotation &TemplateId,
const ParsedAttributesView &Attr,
MultiTemplateParamsArg TemplateParameterLists,
SkipBodyInfo *SkipBody = nullptr);
bool CheckTemplatePartialSpecializationArgs(SourceLocation Loc,
TemplateDecl *PrimaryTemplate,
unsigned NumExplicitArgs,
ArrayRef<TemplateArgument> Args);
void CheckTemplatePartialSpecialization(
ClassTemplatePartialSpecializationDecl *Partial);
void CheckTemplatePartialSpecialization(
VarTemplatePartialSpecializationDecl *Partial);
Decl *ActOnTemplateDeclarator(Scope *S,
MultiTemplateParamsArg TemplateParameterLists,
Declarator &D);
bool
CheckSpecializationInstantiationRedecl(SourceLocation NewLoc,
TemplateSpecializationKind NewTSK,
NamedDecl *PrevDecl,
TemplateSpecializationKind PrevTSK,
SourceLocation PrevPtOfInstantiation,
bool &SuppressNew);
bool CheckDependentFunctionTemplateSpecialization(FunctionDecl *FD,
const TemplateArgumentListInfo &ExplicitTemplateArgs,
LookupResult &Previous);
bool CheckFunctionTemplateSpecialization(FunctionDecl *FD,
TemplateArgumentListInfo *ExplicitTemplateArgs,
LookupResult &Previous);
bool CheckMemberSpecialization(NamedDecl *Member, LookupResult &Previous);
void CompleteMemberSpecialization(NamedDecl *Member, LookupResult &Previous);
DeclResult ActOnExplicitInstantiation(
Scope *S, SourceLocation ExternLoc, SourceLocation TemplateLoc,
unsigned TagSpec, SourceLocation KWLoc, const CXXScopeSpec &SS,
TemplateTy Template, SourceLocation TemplateNameLoc,
SourceLocation LAngleLoc, ASTTemplateArgsPtr TemplateArgs,
SourceLocation RAngleLoc, const ParsedAttributesView &Attr);
DeclResult ActOnExplicitInstantiation(Scope *S, SourceLocation ExternLoc,
SourceLocation TemplateLoc,
unsigned TagSpec, SourceLocation KWLoc,
CXXScopeSpec &SS, IdentifierInfo *Name,
SourceLocation NameLoc,
const ParsedAttributesView &Attr);
DeclResult ActOnExplicitInstantiation(Scope *S,
SourceLocation ExternLoc,
SourceLocation TemplateLoc,
Declarator &D);
TemplateArgumentLoc
SubstDefaultTemplateArgumentIfAvailable(TemplateDecl *Template,
SourceLocation TemplateLoc,
SourceLocation RAngleLoc,
Decl *Param,
SmallVectorImpl<TemplateArgument>
&Converted,
bool &HasDefaultArg);
/// Specifies the context in which a particular template
/// argument is being checked.
enum CheckTemplateArgumentKind {
/// The template argument was specified in the code or was
/// instantiated with some deduced template arguments.
CTAK_Specified,
/// The template argument was deduced via template argument
/// deduction.
CTAK_Deduced,
/// The template argument was deduced from an array bound
/// via template argument deduction.
CTAK_DeducedFromArrayBound
};
bool CheckTemplateArgument(NamedDecl *Param,
TemplateArgumentLoc &Arg,
NamedDecl *Template,
SourceLocation TemplateLoc,
SourceLocation RAngleLoc,
unsigned ArgumentPackIndex,
SmallVectorImpl<TemplateArgument> &Converted,
CheckTemplateArgumentKind CTAK = CTAK_Specified);
/// Check that the given template arguments can be be provided to
/// the given template, converting the arguments along the way.
///
/// \param Template The template to which the template arguments are being
/// provided.
///
/// \param TemplateLoc The location of the template name in the source.
///
/// \param TemplateArgs The list of template arguments. If the template is
/// a template template parameter, this function may extend the set of
/// template arguments to also include substituted, defaulted template
/// arguments.
///
/// \param PartialTemplateArgs True if the list of template arguments is
/// intentionally partial, e.g., because we're checking just the initial
/// set of template arguments.
///
/// \param Converted Will receive the converted, canonicalized template
/// arguments.
///
/// \param UpdateArgsWithConversions If \c true, update \p TemplateArgs to
/// contain the converted forms of the template arguments as written.
/// Otherwise, \p TemplateArgs will not be modified.
///
/// \returns true if an error occurred, false otherwise.
bool CheckTemplateArgumentList(TemplateDecl *Template,
SourceLocation TemplateLoc,
TemplateArgumentListInfo &TemplateArgs,
bool PartialTemplateArgs,
SmallVectorImpl<TemplateArgument> &Converted,
bool UpdateArgsWithConversions = true);
bool CheckTemplateTypeArgument(TemplateTypeParmDecl *Param,
TemplateArgumentLoc &Arg,
SmallVectorImpl<TemplateArgument> &Converted);
bool CheckTemplateArgument(TemplateTypeParmDecl *Param,
TypeSourceInfo *Arg);
ExprResult CheckTemplateArgument(NonTypeTemplateParmDecl *Param,
QualType InstantiatedParamType, Expr *Arg,
TemplateArgument &Converted,
CheckTemplateArgumentKind CTAK = CTAK_Specified);
bool CheckTemplateTemplateArgument(TemplateParameterList *Params,
TemplateArgumentLoc &Arg);
ExprResult
BuildExpressionFromDeclTemplateArgument(const TemplateArgument &Arg,
QualType ParamType,
SourceLocation Loc);
ExprResult
BuildExpressionFromIntegralTemplateArgument(const TemplateArgument &Arg,
SourceLocation Loc);
/// Enumeration describing how template parameter lists are compared
/// for equality.
enum TemplateParameterListEqualKind {
/// We are matching the template parameter lists of two templates
/// that might be redeclarations.
///
/// \code
/// template<typename T> struct X;
/// template<typename T> struct X;
/// \endcode
TPL_TemplateMatch,
/// We are matching the template parameter lists of two template
/// template parameters as part of matching the template parameter lists
/// of two templates that might be redeclarations.
///
/// \code
/// template<template<int I> class TT> struct X;
/// template<template<int Value> class Other> struct X;
/// \endcode
TPL_TemplateTemplateParmMatch,
/// We are matching the template parameter lists of a template
/// template argument against the template parameter lists of a template
/// template parameter.
///
/// \code
/// template<template<int Value> class Metafun> struct X;
/// template<int Value> struct integer_c;
/// X<integer_c> xic;
/// \endcode
TPL_TemplateTemplateArgumentMatch
};
bool TemplateParameterListsAreEqual(TemplateParameterList *New,
TemplateParameterList *Old,
bool Complain,
TemplateParameterListEqualKind Kind,
SourceLocation TemplateArgLoc
= SourceLocation());
bool CheckTemplateDeclScope(Scope *S, TemplateParameterList *TemplateParams);
/// Called when the parser has parsed a C++ typename
/// specifier, e.g., "typename T::type".
///
/// \param S The scope in which this typename type occurs.
/// \param TypenameLoc the location of the 'typename' keyword
/// \param SS the nested-name-specifier following the typename (e.g., 'T::').
/// \param II the identifier we're retrieving (e.g., 'type' in the example).
/// \param IdLoc the location of the identifier.
TypeResult
ActOnTypenameType(Scope *S, SourceLocation TypenameLoc,
const CXXScopeSpec &SS, const IdentifierInfo &II,
SourceLocation IdLoc);
/// Called when the parser has parsed a C++ typename
/// specifier that ends in a template-id, e.g.,
/// "typename MetaFun::template apply<T1, T2>".
///
/// \param S The scope in which this typename type occurs.
/// \param TypenameLoc the location of the 'typename' keyword
/// \param SS the nested-name-specifier following the typename (e.g., 'T::').
/// \param TemplateLoc the location of the 'template' keyword, if any.
/// \param TemplateName The template name.
/// \param TemplateII The identifier used to name the template.
/// \param TemplateIILoc The location of the template name.
/// \param LAngleLoc The location of the opening angle bracket ('<').
/// \param TemplateArgs The template arguments.
/// \param RAngleLoc The location of the closing angle bracket ('>').
TypeResult
ActOnTypenameType(Scope *S, SourceLocation TypenameLoc,
const CXXScopeSpec &SS,
SourceLocation TemplateLoc,
TemplateTy TemplateName,
IdentifierInfo *TemplateII,
SourceLocation TemplateIILoc,
SourceLocation LAngleLoc,
ASTTemplateArgsPtr TemplateArgs,
SourceLocation RAngleLoc);
QualType CheckTypenameType(ElaboratedTypeKeyword Keyword,
SourceLocation KeywordLoc,
NestedNameSpecifierLoc QualifierLoc,
const IdentifierInfo &II,
SourceLocation IILoc);
TypeSourceInfo *RebuildTypeInCurrentInstantiation(TypeSourceInfo *T,
SourceLocation Loc,
DeclarationName Name);
bool RebuildNestedNameSpecifierInCurrentInstantiation(CXXScopeSpec &SS);
ExprResult RebuildExprInCurrentInstantiation(Expr *E);
bool RebuildTemplateParamsInCurrentInstantiation(
TemplateParameterList *Params);
std::string
getTemplateArgumentBindingsText(const TemplateParameterList *Params,
const TemplateArgumentList &Args);
std::string
getTemplateArgumentBindingsText(const TemplateParameterList *Params,
const TemplateArgument *Args,
unsigned NumArgs);
//===--------------------------------------------------------------------===//
// C++ Variadic Templates (C++0x [temp.variadic])
//===--------------------------------------------------------------------===//
/// Determine whether an unexpanded parameter pack might be permitted in this
/// location. Useful for error recovery.
bool isUnexpandedParameterPackPermitted();
/// The context in which an unexpanded parameter pack is
/// being diagnosed.
///
/// Note that the values of this enumeration line up with the first
/// argument to the \c err_unexpanded_parameter_pack diagnostic.
enum UnexpandedParameterPackContext {
/// An arbitrary expression.
UPPC_Expression = 0,
/// The base type of a class type.
UPPC_BaseType,
/// The type of an arbitrary declaration.
UPPC_DeclarationType,
/// The type of a data member.
UPPC_DataMemberType,
/// The size of a bit-field.
UPPC_BitFieldWidth,
/// The expression in a static assertion.
UPPC_StaticAssertExpression,
/// The fixed underlying type of an enumeration.
UPPC_FixedUnderlyingType,
/// The enumerator value.
UPPC_EnumeratorValue,
/// A using declaration.
UPPC_UsingDeclaration,
/// A friend declaration.
UPPC_FriendDeclaration,
/// A declaration qualifier.
UPPC_DeclarationQualifier,
/// An initializer.
UPPC_Initializer,
/// A default argument.
UPPC_DefaultArgument,
/// The type of a non-type template parameter.
UPPC_NonTypeTemplateParameterType,
/// The type of an exception.
UPPC_ExceptionType,
/// Partial specialization.
UPPC_PartialSpecialization,
/// Microsoft __if_exists.
UPPC_IfExists,
/// Microsoft __if_not_exists.
UPPC_IfNotExists,
/// Lambda expression.
UPPC_Lambda,
/// Block expression,
UPPC_Block
};
/// Diagnose unexpanded parameter packs.
///
/// \param Loc The location at which we should emit the diagnostic.
///
/// \param UPPC The context in which we are diagnosing unexpanded
/// parameter packs.
///
/// \param Unexpanded the set of unexpanded parameter packs.
///
/// \returns true if an error occurred, false otherwise.
bool DiagnoseUnexpandedParameterPacks(SourceLocation Loc,
UnexpandedParameterPackContext UPPC,
ArrayRef<UnexpandedParameterPack> Unexpanded);
/// If the given type contains an unexpanded parameter pack,
/// diagnose the error.
///
/// \param Loc The source location where a diagnostc should be emitted.
///
/// \param T The type that is being checked for unexpanded parameter
/// packs.
///
/// \returns true if an error occurred, false otherwise.
bool DiagnoseUnexpandedParameterPack(SourceLocation Loc, TypeSourceInfo *T,
UnexpandedParameterPackContext UPPC);
/// If the given expression contains an unexpanded parameter
/// pack, diagnose the error.
///
/// \param E The expression that is being checked for unexpanded
/// parameter packs.
///
/// \returns true if an error occurred, false otherwise.
bool DiagnoseUnexpandedParameterPack(Expr *E,
UnexpandedParameterPackContext UPPC = UPPC_Expression);
/// If the given nested-name-specifier contains an unexpanded
/// parameter pack, diagnose the error.
///
/// \param SS The nested-name-specifier that is being checked for
/// unexpanded parameter packs.
///
/// \returns true if an error occurred, false otherwise.
bool DiagnoseUnexpandedParameterPack(const CXXScopeSpec &SS,
UnexpandedParameterPackContext UPPC);
/// If the given name contains an unexpanded parameter pack,
/// diagnose the error.
///
/// \param NameInfo The name (with source location information) that
/// is being checked for unexpanded parameter packs.
///
/// \returns true if an error occurred, false otherwise.
bool DiagnoseUnexpandedParameterPack(const DeclarationNameInfo &NameInfo,
UnexpandedParameterPackContext UPPC);
/// If the given template name contains an unexpanded parameter pack,
/// diagnose the error.
///
/// \param Loc The location of the template name.
///
/// \param Template The template name that is being checked for unexpanded
/// parameter packs.
///
/// \returns true if an error occurred, false otherwise.
bool DiagnoseUnexpandedParameterPack(SourceLocation Loc,
TemplateName Template,
UnexpandedParameterPackContext UPPC);
/// If the given template argument contains an unexpanded parameter
/// pack, diagnose the error.
///
/// \param Arg The template argument that is being checked for unexpanded
/// parameter packs.
///
/// \returns true if an error occurred, false otherwise.
bool DiagnoseUnexpandedParameterPack(TemplateArgumentLoc Arg,
UnexpandedParameterPackContext UPPC);
/// Collect the set of unexpanded parameter packs within the given
/// template argument.
///
/// \param Arg The template argument that will be traversed to find
/// unexpanded parameter packs.
void collectUnexpandedParameterPacks(TemplateArgument Arg,
SmallVectorImpl<UnexpandedParameterPack> &Unexpanded);
/// Collect the set of unexpanded parameter packs within the given
/// template argument.
///
/// \param Arg The template argument that will be traversed to find
/// unexpanded parameter packs.
void collectUnexpandedParameterPacks(TemplateArgumentLoc Arg,
SmallVectorImpl<UnexpandedParameterPack> &Unexpanded);
/// Collect the set of unexpanded parameter packs within the given
/// type.
///
/// \param T The type that will be traversed to find
/// unexpanded parameter packs.
void collectUnexpandedParameterPacks(QualType T,
SmallVectorImpl<UnexpandedParameterPack> &Unexpanded);
/// Collect the set of unexpanded parameter packs within the given
/// type.
///
/// \param TL The type that will be traversed to find
/// unexpanded parameter packs.
void collectUnexpandedParameterPacks(TypeLoc TL,
SmallVectorImpl<UnexpandedParameterPack> &Unexpanded);
/// Collect the set of unexpanded parameter packs within the given
/// nested-name-specifier.
///
/// \param NNS The nested-name-specifier that will be traversed to find
/// unexpanded parameter packs.
void collectUnexpandedParameterPacks(NestedNameSpecifierLoc NNS,
SmallVectorImpl<UnexpandedParameterPack> &Unexpanded);
/// Collect the set of unexpanded parameter packs within the given
/// name.
///
/// \param NameInfo The name that will be traversed to find
/// unexpanded parameter packs.
void collectUnexpandedParameterPacks(const DeclarationNameInfo &NameInfo,
SmallVectorImpl<UnexpandedParameterPack> &Unexpanded);
/// Invoked when parsing a template argument followed by an
/// ellipsis, which creates a pack expansion.
///
/// \param Arg The template argument preceding the ellipsis, which
/// may already be invalid.
///
/// \param EllipsisLoc The location of the ellipsis.
ParsedTemplateArgument ActOnPackExpansion(const ParsedTemplateArgument &Arg,
SourceLocation EllipsisLoc);
/// Invoked when parsing a type followed by an ellipsis, which
/// creates a pack expansion.
///
/// \param Type The type preceding the ellipsis, which will become
/// the pattern of the pack expansion.
///
/// \param EllipsisLoc The location of the ellipsis.
TypeResult ActOnPackExpansion(ParsedType Type, SourceLocation EllipsisLoc);
/// Construct a pack expansion type from the pattern of the pack
/// expansion.
TypeSourceInfo *CheckPackExpansion(TypeSourceInfo *Pattern,
SourceLocation EllipsisLoc,
Optional<unsigned> NumExpansions);
/// Construct a pack expansion type from the pattern of the pack
/// expansion.
QualType CheckPackExpansion(QualType Pattern,
SourceRange PatternRange,
SourceLocation EllipsisLoc,
Optional<unsigned> NumExpansions);
/// Invoked when parsing an expression followed by an ellipsis, which
/// creates a pack expansion.
///
/// \param Pattern The expression preceding the ellipsis, which will become
/// the pattern of the pack expansion.
///
/// \param EllipsisLoc The location of the ellipsis.
ExprResult ActOnPackExpansion(Expr *Pattern, SourceLocation EllipsisLoc);
/// Invoked when parsing an expression followed by an ellipsis, which
/// creates a pack expansion.
///
/// \param Pattern The expression preceding the ellipsis, which will become
/// the pattern of the pack expansion.
///
/// \param EllipsisLoc The location of the ellipsis.
ExprResult CheckPackExpansion(Expr *Pattern, SourceLocation EllipsisLoc,
Optional<unsigned> NumExpansions);
/// Determine whether we could expand a pack expansion with the
/// given set of parameter packs into separate arguments by repeatedly
/// transforming the pattern.
///
/// \param EllipsisLoc The location of the ellipsis that identifies the
/// pack expansion.
///
/// \param PatternRange The source range that covers the entire pattern of
/// the pack expansion.
///
/// \param Unexpanded The set of unexpanded parameter packs within the
/// pattern.
///
/// \param ShouldExpand Will be set to \c true if the transformer should
/// expand the corresponding pack expansions into separate arguments. When
/// set, \c NumExpansions must also be set.
///
/// \param RetainExpansion Whether the caller should add an unexpanded
/// pack expansion after all of the expanded arguments. This is used
/// when extending explicitly-specified template argument packs per
/// C++0x [temp.arg.explicit]p9.
///
/// \param NumExpansions The number of separate arguments that will be in
/// the expanded form of the corresponding pack expansion. This is both an
/// input and an output parameter, which can be set by the caller if the
/// number of expansions is known a priori (e.g., due to a prior substitution)
/// and will be set by the callee when the number of expansions is known.
/// The callee must set this value when \c ShouldExpand is \c true; it may
/// set this value in other cases.
///
/// \returns true if an error occurred (e.g., because the parameter packs
/// are to be instantiated with arguments of different lengths), false
/// otherwise. If false, \c ShouldExpand (and possibly \c NumExpansions)
/// must be set.
bool CheckParameterPacksForExpansion(SourceLocation EllipsisLoc,
SourceRange PatternRange,
ArrayRef<UnexpandedParameterPack> Unexpanded,
const MultiLevelTemplateArgumentList &TemplateArgs,
bool &ShouldExpand,
bool &RetainExpansion,
Optional<unsigned> &NumExpansions);
/// Determine the number of arguments in the given pack expansion
/// type.
///
/// This routine assumes that the number of arguments in the expansion is
/// consistent across all of the unexpanded parameter packs in its pattern.
///
/// Returns an empty Optional if the type can't be expanded.
Optional<unsigned> getNumArgumentsInExpansion(QualType T,
const MultiLevelTemplateArgumentList &TemplateArgs);
/// Determine whether the given declarator contains any unexpanded
/// parameter packs.
///
/// This routine is used by the parser to disambiguate function declarators
/// with an ellipsis prior to the ')', e.g.,
///
/// \code
/// void f(T...);
/// \endcode
///
/// To determine whether we have an (unnamed) function parameter pack or
/// a variadic function.
///
/// \returns true if the declarator contains any unexpanded parameter packs,
/// false otherwise.
bool containsUnexpandedParameterPacks(Declarator &D);
/// Returns the pattern of the pack expansion for a template argument.
///
/// \param OrigLoc The template argument to expand.
///
/// \param Ellipsis Will be set to the location of the ellipsis.
///
/// \param NumExpansions Will be set to the number of expansions that will
/// be generated from this pack expansion, if known a priori.
TemplateArgumentLoc getTemplateArgumentPackExpansionPattern(
TemplateArgumentLoc OrigLoc,
SourceLocation &Ellipsis,
Optional<unsigned> &NumExpansions) const;
/// Given a template argument that contains an unexpanded parameter pack, but
/// which has already been substituted, attempt to determine the number of
/// elements that will be produced once this argument is fully-expanded.
///
/// This is intended for use when transforming 'sizeof...(Arg)' in order to
/// avoid actually expanding the pack where possible.
Optional<unsigned> getFullyPackExpandedSize(TemplateArgument Arg);
//===--------------------------------------------------------------------===//
// C++ Template Argument Deduction (C++ [temp.deduct])
//===--------------------------------------------------------------------===//
/// Adjust the type \p ArgFunctionType to match the calling convention,
/// noreturn, and optionally the exception specification of \p FunctionType.
/// Deduction often wants to ignore these properties when matching function
/// types.
QualType adjustCCAndNoReturn(QualType ArgFunctionType, QualType FunctionType,
bool AdjustExceptionSpec = false);
/// Describes the result of template argument deduction.
///
/// The TemplateDeductionResult enumeration describes the result of
/// template argument deduction, as returned from
/// DeduceTemplateArguments(). The separate TemplateDeductionInfo
/// structure provides additional information about the results of
/// template argument deduction, e.g., the deduced template argument
/// list (if successful) or the specific template parameters or
/// deduced arguments that were involved in the failure.
enum TemplateDeductionResult {
/// Template argument deduction was successful.
TDK_Success = 0,
/// The declaration was invalid; do nothing.
TDK_Invalid,
/// Template argument deduction exceeded the maximum template
/// instantiation depth (which has already been diagnosed).
TDK_InstantiationDepth,
/// Template argument deduction did not deduce a value
/// for every template parameter.
TDK_Incomplete,
/// Template argument deduction did not deduce a value for every
/// expansion of an expanded template parameter pack.
TDK_IncompletePack,
/// Template argument deduction produced inconsistent
/// deduced values for the given template parameter.
TDK_Inconsistent,
/// Template argument deduction failed due to inconsistent
/// cv-qualifiers on a template parameter type that would
/// otherwise be deduced, e.g., we tried to deduce T in "const T"
/// but were given a non-const "X".
TDK_Underqualified,
/// Substitution of the deduced template argument values
/// resulted in an error.
TDK_SubstitutionFailure,
/// After substituting deduced template arguments, a dependent
/// parameter type did not match the corresponding argument.
TDK_DeducedMismatch,
/// After substituting deduced template arguments, an element of
/// a dependent parameter type did not match the corresponding element
/// of the corresponding argument (when deducing from an initializer list).
TDK_DeducedMismatchNested,
/// A non-depnedent component of the parameter did not match the
/// corresponding component of the argument.
TDK_NonDeducedMismatch,
/// When performing template argument deduction for a function
/// template, there were too many call arguments.
TDK_TooManyArguments,
/// When performing template argument deduction for a function
/// template, there were too few call arguments.
TDK_TooFewArguments,
/// The explicitly-specified template arguments were not valid
/// template arguments for the given template.
TDK_InvalidExplicitArguments,
/// Checking non-dependent argument conversions failed.
TDK_NonDependentConversionFailure,
/// Deduction failed; that's all we know.
TDK_MiscellaneousDeductionFailure,
/// CUDA Target attributes do not match.
TDK_CUDATargetMismatch
};
TemplateDeductionResult
DeduceTemplateArguments(ClassTemplatePartialSpecializationDecl *Partial,
const TemplateArgumentList &TemplateArgs,
sema::TemplateDeductionInfo &Info);
TemplateDeductionResult
DeduceTemplateArguments(VarTemplatePartialSpecializationDecl *Partial,
const TemplateArgumentList &TemplateArgs,
sema::TemplateDeductionInfo &Info);
TemplateDeductionResult SubstituteExplicitTemplateArguments(
FunctionTemplateDecl *FunctionTemplate,
TemplateArgumentListInfo &ExplicitTemplateArgs,
SmallVectorImpl<DeducedTemplateArgument> &Deduced,
SmallVectorImpl<QualType> &ParamTypes, QualType *FunctionType,
sema::TemplateDeductionInfo &Info);
/// brief A function argument from which we performed template argument
// deduction for a call.
struct OriginalCallArg {
OriginalCallArg(QualType OriginalParamType, bool DecomposedParam,
unsigned ArgIdx, QualType OriginalArgType)
: OriginalParamType(OriginalParamType),
DecomposedParam(DecomposedParam), ArgIdx(ArgIdx),
OriginalArgType(OriginalArgType) {}
QualType OriginalParamType;
bool DecomposedParam;
unsigned ArgIdx;
QualType OriginalArgType;
};
TemplateDeductionResult FinishTemplateArgumentDeduction(
FunctionTemplateDecl *FunctionTemplate,
SmallVectorImpl<DeducedTemplateArgument> &Deduced,
unsigned NumExplicitlySpecified, FunctionDecl *&Specialization,
sema::TemplateDeductionInfo &Info,
SmallVectorImpl<OriginalCallArg> const *OriginalCallArgs = nullptr,
bool PartialOverloading = false,
llvm::function_ref<bool()> CheckNonDependent = []{ return false; });
TemplateDeductionResult DeduceTemplateArguments(
FunctionTemplateDecl *FunctionTemplate,
TemplateArgumentListInfo *ExplicitTemplateArgs, ArrayRef<Expr *> Args,
FunctionDecl *&Specialization, sema::TemplateDeductionInfo &Info,
bool PartialOverloading,
llvm::function_ref<bool(ArrayRef<QualType>)> CheckNonDependent);
TemplateDeductionResult
DeduceTemplateArguments(FunctionTemplateDecl *FunctionTemplate,
TemplateArgumentListInfo *ExplicitTemplateArgs,
QualType ArgFunctionType,
FunctionDecl *&Specialization,
sema::TemplateDeductionInfo &Info,
bool IsAddressOfFunction = false);
TemplateDeductionResult
DeduceTemplateArguments(FunctionTemplateDecl *FunctionTemplate,
QualType ToType,
CXXConversionDecl *&Specialization,
sema::TemplateDeductionInfo &Info);
TemplateDeductionResult
DeduceTemplateArguments(FunctionTemplateDecl *FunctionTemplate,
TemplateArgumentListInfo *ExplicitTemplateArgs,
FunctionDecl *&Specialization,
sema::TemplateDeductionInfo &Info,
bool IsAddressOfFunction = false);
/// Substitute Replacement for \p auto in \p TypeWithAuto
QualType SubstAutoType(QualType TypeWithAuto, QualType Replacement);
/// Substitute Replacement for auto in TypeWithAuto
TypeSourceInfo* SubstAutoTypeSourceInfo(TypeSourceInfo *TypeWithAuto,
QualType Replacement);
/// Completely replace the \c auto in \p TypeWithAuto by
/// \p Replacement. This does not retain any \c auto type sugar.
QualType ReplaceAutoType(QualType TypeWithAuto, QualType Replacement);
/// Result type of DeduceAutoType.
enum DeduceAutoResult {
DAR_Succeeded,
DAR_Failed,
DAR_FailedAlreadyDiagnosed
};
DeduceAutoResult
DeduceAutoType(TypeSourceInfo *AutoType, Expr *&Initializer, QualType &Result,
Optional<unsigned> DependentDeductionDepth = None);
DeduceAutoResult
DeduceAutoType(TypeLoc AutoTypeLoc, Expr *&Initializer, QualType &Result,
Optional<unsigned> DependentDeductionDepth = None);
void DiagnoseAutoDeductionFailure(VarDecl *VDecl, Expr *Init);
bool DeduceReturnType(FunctionDecl *FD, SourceLocation Loc,
bool Diagnose = true);
/// Declare implicit deduction guides for a class template if we've
/// not already done so.
void DeclareImplicitDeductionGuides(TemplateDecl *Template,
SourceLocation Loc);
QualType DeduceTemplateSpecializationFromInitializer(
TypeSourceInfo *TInfo, const InitializedEntity &Entity,
const InitializationKind &Kind, MultiExprArg Init);
QualType deduceVarTypeFromInitializer(VarDecl *VDecl, DeclarationName Name,
QualType Type, TypeSourceInfo *TSI,
SourceRange Range, bool DirectInit,
Expr *Init);
TypeLoc getReturnTypeLoc(FunctionDecl *FD) const;
bool DeduceFunctionTypeFromReturnExpr(FunctionDecl *FD,
SourceLocation ReturnLoc,
Expr *&RetExpr, AutoType *AT);
FunctionTemplateDecl *getMoreSpecializedTemplate(FunctionTemplateDecl *FT1,
FunctionTemplateDecl *FT2,
SourceLocation Loc,
TemplatePartialOrderingContext TPOC,
unsigned NumCallArguments1,
unsigned NumCallArguments2);
UnresolvedSetIterator
getMostSpecialized(UnresolvedSetIterator SBegin, UnresolvedSetIterator SEnd,
TemplateSpecCandidateSet &FailedCandidates,
SourceLocation Loc,
const PartialDiagnostic &NoneDiag,
const PartialDiagnostic &AmbigDiag,
const PartialDiagnostic &CandidateDiag,
bool Complain = true, QualType TargetType = QualType());
ClassTemplatePartialSpecializationDecl *
getMoreSpecializedPartialSpecialization(
ClassTemplatePartialSpecializationDecl *PS1,
ClassTemplatePartialSpecializationDecl *PS2,
SourceLocation Loc);
bool isMoreSpecializedThanPrimary(ClassTemplatePartialSpecializationDecl *T,
sema::TemplateDeductionInfo &Info);
VarTemplatePartialSpecializationDecl *getMoreSpecializedPartialSpecialization(
VarTemplatePartialSpecializationDecl *PS1,
VarTemplatePartialSpecializationDecl *PS2, SourceLocation Loc);
bool isMoreSpecializedThanPrimary(VarTemplatePartialSpecializationDecl *T,
sema::TemplateDeductionInfo &Info);
bool isTemplateTemplateParameterAtLeastAsSpecializedAs(
TemplateParameterList *P, TemplateDecl *AArg, SourceLocation Loc);
void MarkUsedTemplateParameters(const TemplateArgumentList &TemplateArgs,
bool OnlyDeduced,
unsigned Depth,
llvm::SmallBitVector &Used);
void MarkDeducedTemplateParameters(
const FunctionTemplateDecl *FunctionTemplate,
llvm::SmallBitVector &Deduced) {
return MarkDeducedTemplateParameters(Context, FunctionTemplate, Deduced);
}
static void MarkDeducedTemplateParameters(ASTContext &Ctx,
const FunctionTemplateDecl *FunctionTemplate,
llvm::SmallBitVector &Deduced);
//===--------------------------------------------------------------------===//
// C++ Template Instantiation
//
MultiLevelTemplateArgumentList
getTemplateInstantiationArgs(NamedDecl *D,
const TemplateArgumentList *Innermost = nullptr,
bool RelativeToPrimary = false,
const FunctionDecl *Pattern = nullptr);
/// A context in which code is being synthesized (where a source location
/// alone is not sufficient to identify the context). This covers template
/// instantiation and various forms of implicitly-generated functions.
struct CodeSynthesisContext {
/// The kind of template instantiation we are performing
enum SynthesisKind {
/// We are instantiating a template declaration. The entity is
/// the declaration we're instantiating (e.g., a CXXRecordDecl).
TemplateInstantiation,
/// We are instantiating a default argument for a template
/// parameter. The Entity is the template parameter whose argument is
/// being instantiated, the Template is the template, and the
/// TemplateArgs/NumTemplateArguments provide the template arguments as
/// specified.
DefaultTemplateArgumentInstantiation,
/// We are instantiating a default argument for a function.
/// The Entity is the ParmVarDecl, and TemplateArgs/NumTemplateArgs
/// provides the template arguments as specified.
DefaultFunctionArgumentInstantiation,
/// We are substituting explicit template arguments provided for
/// a function template. The entity is a FunctionTemplateDecl.
ExplicitTemplateArgumentSubstitution,
/// We are substituting template argument determined as part of
/// template argument deduction for either a class template
/// partial specialization or a function template. The
/// Entity is either a {Class|Var}TemplatePartialSpecializationDecl or
/// a TemplateDecl.
DeducedTemplateArgumentSubstitution,
/// We are substituting prior template arguments into a new
/// template parameter. The template parameter itself is either a
/// NonTypeTemplateParmDecl or a TemplateTemplateParmDecl.
PriorTemplateArgumentSubstitution,
/// We are checking the validity of a default template argument that
/// has been used when naming a template-id.
DefaultTemplateArgumentChecking,
/// We are computing the exception specification for a defaulted special
/// member function.
ExceptionSpecEvaluation,
/// We are instantiating the exception specification for a function
/// template which was deferred until it was needed.
ExceptionSpecInstantiation,
/// We are declaring an implicit special member function.
DeclaringSpecialMember,
/// We are defining a synthesized function (such as a defaulted special
/// member).
DefiningSynthesizedFunction,
/// Added for Template instantiation observation.
/// Memoization means we are _not_ instantiating a template because
/// it is already instantiated (but we entered a context where we
/// would have had to if it was not already instantiated).
Memoization
} Kind;
/// Was the enclosing context a non-instantiation SFINAE context?
bool SavedInNonInstantiationSFINAEContext;
/// The point of instantiation or synthesis within the source code.
SourceLocation PointOfInstantiation;
/// The entity that is being synthesized.
Decl *Entity;
/// The template (or partial specialization) in which we are
/// performing the instantiation, for substitutions of prior template
/// arguments.
NamedDecl *Template;
/// The list of template arguments we are substituting, if they
/// are not part of the entity.
const TemplateArgument *TemplateArgs;
// FIXME: Wrap this union around more members, or perhaps store the
// kind-specific members in the RAII object owning the context.
union {
/// The number of template arguments in TemplateArgs.
unsigned NumTemplateArgs;
/// The special member being declared or defined.
CXXSpecialMember SpecialMember;
};
ArrayRef<TemplateArgument> template_arguments() const {
assert(Kind != DeclaringSpecialMember);
return {TemplateArgs, NumTemplateArgs};
}
/// The template deduction info object associated with the
/// substitution or checking of explicit or deduced template arguments.
sema::TemplateDeductionInfo *DeductionInfo;
/// The source range that covers the construct that cause
/// the instantiation, e.g., the template-id that causes a class
/// template instantiation.
SourceRange InstantiationRange;
CodeSynthesisContext()
: Kind(TemplateInstantiation), Entity(nullptr), Template(nullptr),
TemplateArgs(nullptr), NumTemplateArgs(0), DeductionInfo(nullptr) {}
/// Determines whether this template is an actual instantiation
/// that should be counted toward the maximum instantiation depth.
bool isInstantiationRecord() const;
};
/// List of active code synthesis contexts.
///
/// This vector is treated as a stack. As synthesis of one entity requires
/// synthesis of another, additional contexts are pushed onto the stack.
SmallVector<CodeSynthesisContext, 16> CodeSynthesisContexts;
/// Specializations whose definitions are currently being instantiated.
llvm::DenseSet<std::pair<Decl *, unsigned>> InstantiatingSpecializations;
/// Non-dependent types used in templates that have already been instantiated
/// by some template instantiation.
llvm::DenseSet<QualType> InstantiatedNonDependentTypes;
/// Extra modules inspected when performing a lookup during a template
/// instantiation. Computed lazily.
SmallVector<Module*, 16> CodeSynthesisContextLookupModules;
/// Cache of additional modules that should be used for name lookup
/// within the current template instantiation. Computed lazily; use
/// getLookupModules() to get a complete set.
llvm::DenseSet<Module*> LookupModulesCache;
/// Get the set of additional modules that should be checked during
/// name lookup. A module and its imports become visible when instanting a
/// template defined within it.
llvm::DenseSet<Module*> &getLookupModules();
/// Map from the most recent declaration of a namespace to the most
/// recent visible declaration of that namespace.
llvm::DenseMap<NamedDecl*, NamedDecl*> VisibleNamespaceCache;
/// Whether we are in a SFINAE context that is not associated with
/// template instantiation.
///
/// This is used when setting up a SFINAE trap (\c see SFINAETrap) outside
/// of a template instantiation or template argument deduction.
bool InNonInstantiationSFINAEContext;
/// The number of \p CodeSynthesisContexts that are not template
/// instantiations and, therefore, should not be counted as part of the
/// instantiation depth.
///
/// When the instantiation depth reaches the user-configurable limit
/// \p LangOptions::InstantiationDepth we will abort instantiation.
// FIXME: Should we have a similar limit for other forms of synthesis?
unsigned NonInstantiationEntries;
/// The depth of the context stack at the point when the most recent
/// error or warning was produced.
///
/// This value is used to suppress printing of redundant context stacks
/// when there are multiple errors or warnings in the same instantiation.
// FIXME: Does this belong in Sema? It's tough to implement it anywhere else.
unsigned LastEmittedCodeSynthesisContextDepth = 0;
/// The template instantiation callbacks to trace or track
/// instantiations (objects can be chained).
///
/// This callbacks is used to print, trace or track template
/// instantiations as they are being constructed.
std::vector<std::unique_ptr<TemplateInstantiationCallback>>
TemplateInstCallbacks;
/// The current index into pack expansion arguments that will be
/// used for substitution of parameter packs.
///
/// The pack expansion index will be -1 to indicate that parameter packs
/// should be instantiated as themselves. Otherwise, the index specifies
/// which argument within the parameter pack will be used for substitution.
int ArgumentPackSubstitutionIndex;
/// RAII object used to change the argument pack substitution index
/// within a \c Sema object.
///
/// See \c ArgumentPackSubstitutionIndex for more information.
class ArgumentPackSubstitutionIndexRAII {
Sema &Self;
int OldSubstitutionIndex;
public:
ArgumentPackSubstitutionIndexRAII(Sema &Self, int NewSubstitutionIndex)
: Self(Self), OldSubstitutionIndex(Self.ArgumentPackSubstitutionIndex) {
Self.ArgumentPackSubstitutionIndex = NewSubstitutionIndex;
}
~ArgumentPackSubstitutionIndexRAII() {
Self.ArgumentPackSubstitutionIndex = OldSubstitutionIndex;
}
};
friend class ArgumentPackSubstitutionRAII;
/// For each declaration that involved template argument deduction, the
/// set of diagnostics that were suppressed during that template argument
/// deduction.
///
/// FIXME: Serialize this structure to the AST file.
typedef llvm::DenseMap<Decl *, SmallVector<PartialDiagnosticAt, 1> >
SuppressedDiagnosticsMap;
SuppressedDiagnosticsMap SuppressedDiagnostics;
/// A stack object to be created when performing template
/// instantiation.
///
/// Construction of an object of type \c InstantiatingTemplate
/// pushes the current instantiation onto the stack of active
/// instantiations. If the size of this stack exceeds the maximum
/// number of recursive template instantiations, construction
/// produces an error and evaluates true.
///
/// Destruction of this object will pop the named instantiation off
/// the stack.
struct InstantiatingTemplate {
/// Note that we are instantiating a class template,
/// function template, variable template, alias template,
/// or a member thereof.
InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation,
Decl *Entity,
SourceRange InstantiationRange = SourceRange());
struct ExceptionSpecification {};
/// Note that we are instantiating an exception specification
/// of a function template.
InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation,
FunctionDecl *Entity, ExceptionSpecification,
SourceRange InstantiationRange = SourceRange());
/// Note that we are instantiating a default argument in a
/// template-id.
InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation,
TemplateParameter Param, TemplateDecl *Template,
ArrayRef<TemplateArgument> TemplateArgs,
SourceRange InstantiationRange = SourceRange());
/// Note that we are substituting either explicitly-specified or
/// deduced template arguments during function template argument deduction.
InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation,
FunctionTemplateDecl *FunctionTemplate,
ArrayRef<TemplateArgument> TemplateArgs,
CodeSynthesisContext::SynthesisKind Kind,
sema::TemplateDeductionInfo &DeductionInfo,
SourceRange InstantiationRange = SourceRange());
/// Note that we are instantiating as part of template
/// argument deduction for a class template declaration.
InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation,
TemplateDecl *Template,
ArrayRef<TemplateArgument> TemplateArgs,
sema::TemplateDeductionInfo &DeductionInfo,
SourceRange InstantiationRange = SourceRange());
/// Note that we are instantiating as part of template
/// argument deduction for a class template partial
/// specialization.
InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation,
ClassTemplatePartialSpecializationDecl *PartialSpec,
ArrayRef<TemplateArgument> TemplateArgs,
sema::TemplateDeductionInfo &DeductionInfo,
SourceRange InstantiationRange = SourceRange());
/// Note that we are instantiating as part of template
/// argument deduction for a variable template partial
/// specialization.
InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation,
VarTemplatePartialSpecializationDecl *PartialSpec,
ArrayRef<TemplateArgument> TemplateArgs,
sema::TemplateDeductionInfo &DeductionInfo,
SourceRange InstantiationRange = SourceRange());
/// Note that we are instantiating a default argument for a function
/// parameter.
InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation,
ParmVarDecl *Param,
ArrayRef<TemplateArgument> TemplateArgs,
SourceRange InstantiationRange = SourceRange());
/// Note that we are substituting prior template arguments into a
/// non-type parameter.
InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation,
NamedDecl *Template,
NonTypeTemplateParmDecl *Param,
ArrayRef<TemplateArgument> TemplateArgs,
SourceRange InstantiationRange);
/// Note that we are substituting prior template arguments into a
/// template template parameter.
InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation,
NamedDecl *Template,
TemplateTemplateParmDecl *Param,
ArrayRef<TemplateArgument> TemplateArgs,
SourceRange InstantiationRange);
/// Note that we are checking the default template argument
/// against the template parameter for a given template-id.
InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation,
TemplateDecl *Template,
NamedDecl *Param,
ArrayRef<TemplateArgument> TemplateArgs,
SourceRange InstantiationRange);
/// Note that we have finished instantiating this template.
void Clear();
~InstantiatingTemplate() { Clear(); }
/// Determines whether we have exceeded the maximum
/// recursive template instantiations.
bool isInvalid() const { return Invalid; }
/// Determine whether we are already instantiating this
/// specialization in some surrounding active instantiation.
bool isAlreadyInstantiating() const { return AlreadyInstantiating; }
private:
Sema &SemaRef;
bool Invalid;
bool AlreadyInstantiating;
bool CheckInstantiationDepth(SourceLocation PointOfInstantiation,
SourceRange InstantiationRange);
InstantiatingTemplate(
Sema &SemaRef, CodeSynthesisContext::SynthesisKind Kind,
SourceLocation PointOfInstantiation, SourceRange InstantiationRange,
Decl *Entity, NamedDecl *Template = nullptr,
ArrayRef<TemplateArgument> TemplateArgs = None,
sema::TemplateDeductionInfo *DeductionInfo = nullptr);
InstantiatingTemplate(const InstantiatingTemplate&) = delete;
InstantiatingTemplate&
operator=(const InstantiatingTemplate&) = delete;
};
void pushCodeSynthesisContext(CodeSynthesisContext Ctx);
void popCodeSynthesisContext();
/// Determine whether we are currently performing template instantiation.
bool inTemplateInstantiation() const {
return CodeSynthesisContexts.size() > NonInstantiationEntries;
}
void PrintContextStack() {
if (!CodeSynthesisContexts.empty() &&
CodeSynthesisContexts.size() != LastEmittedCodeSynthesisContextDepth) {
PrintInstantiationStack();
LastEmittedCodeSynthesisContextDepth = CodeSynthesisContexts.size();
}
if (PragmaAttributeCurrentTargetDecl)
PrintPragmaAttributeInstantiationPoint();
}
void PrintInstantiationStack();
void PrintPragmaAttributeInstantiationPoint();
/// Determines whether we are currently in a context where
/// template argument substitution failures are not considered
/// errors.
///
/// \returns An empty \c Optional if we're not in a SFINAE context.
/// Otherwise, contains a pointer that, if non-NULL, contains the nearest
/// template-deduction context object, which can be used to capture
/// diagnostics that will be suppressed.
Optional<sema::TemplateDeductionInfo *> isSFINAEContext() const;
/// Determines whether we are currently in a context that
/// is not evaluated as per C++ [expr] p5.
bool isUnevaluatedContext() const {
assert(!ExprEvalContexts.empty() &&
"Must be in an expression evaluation context");
return ExprEvalContexts.back().isUnevaluated();
}
/// RAII class used to determine whether SFINAE has
/// trapped any errors that occur during template argument
/// deduction.
class SFINAETrap {
Sema &SemaRef;
unsigned PrevSFINAEErrors;
bool PrevInNonInstantiationSFINAEContext;
bool PrevAccessCheckingSFINAE;
bool PrevLastDiagnosticIgnored;
public:
explicit SFINAETrap(Sema &SemaRef, bool AccessCheckingSFINAE = false)
: SemaRef(SemaRef), PrevSFINAEErrors(SemaRef.NumSFINAEErrors),
PrevInNonInstantiationSFINAEContext(
SemaRef.InNonInstantiationSFINAEContext),
PrevAccessCheckingSFINAE(SemaRef.AccessCheckingSFINAE),
PrevLastDiagnosticIgnored(
SemaRef.getDiagnostics().isLastDiagnosticIgnored())
{
if (!SemaRef.isSFINAEContext())
SemaRef.InNonInstantiationSFINAEContext = true;
SemaRef.AccessCheckingSFINAE = AccessCheckingSFINAE;
}
~SFINAETrap() {
SemaRef.NumSFINAEErrors = PrevSFINAEErrors;
SemaRef.InNonInstantiationSFINAEContext
= PrevInNonInstantiationSFINAEContext;
SemaRef.AccessCheckingSFINAE = PrevAccessCheckingSFINAE;
SemaRef.getDiagnostics().setLastDiagnosticIgnored(
PrevLastDiagnosticIgnored);
}
/// Determine whether any SFINAE errors have been trapped.
bool hasErrorOccurred() const {
return SemaRef.NumSFINAEErrors > PrevSFINAEErrors;
}
};
/// RAII class used to indicate that we are performing provisional
/// semantic analysis to determine the validity of a construct, so
/// typo-correction and diagnostics in the immediate context (not within
/// implicitly-instantiated templates) should be suppressed.
class TentativeAnalysisScope {
Sema &SemaRef;
// FIXME: Using a SFINAETrap for this is a hack.
SFINAETrap Trap;
bool PrevDisableTypoCorrection;
public:
explicit TentativeAnalysisScope(Sema &SemaRef)
: SemaRef(SemaRef), Trap(SemaRef, true),
PrevDisableTypoCorrection(SemaRef.DisableTypoCorrection) {
SemaRef.DisableTypoCorrection = true;
}
~TentativeAnalysisScope() {
SemaRef.DisableTypoCorrection = PrevDisableTypoCorrection;
}
};
/// The current instantiation scope used to store local
/// variables.
LocalInstantiationScope *CurrentInstantiationScope;
/// Tracks whether we are in a context where typo correction is
/// disabled.
bool DisableTypoCorrection;
/// The number of typos corrected by CorrectTypo.
unsigned TyposCorrected;
typedef llvm::SmallSet<SourceLocation, 2> SrcLocSet;
typedef llvm::DenseMap<IdentifierInfo *, SrcLocSet> IdentifierSourceLocations;
/// A cache containing identifiers for which typo correction failed and
/// their locations, so that repeated attempts to correct an identifier in a
/// given location are ignored if typo correction already failed for it.
IdentifierSourceLocations TypoCorrectionFailures;
/// Worker object for performing CFG-based warnings.
sema::AnalysisBasedWarnings AnalysisWarnings;
threadSafety::BeforeSet *ThreadSafetyDeclCache;
/// An entity for which implicit template instantiation is required.
///
/// The source location associated with the declaration is the first place in
/// the source code where the declaration was "used". It is not necessarily
/// the point of instantiation (which will be either before or after the
/// namespace-scope declaration that triggered this implicit instantiation),
/// However, it is the location that diagnostics should generally refer to,
/// because users will need to know what code triggered the instantiation.
typedef std::pair<ValueDecl *, SourceLocation> PendingImplicitInstantiation;
/// The queue of implicit template instantiations that are required
/// but have not yet been performed.
std::deque<PendingImplicitInstantiation> PendingInstantiations;
/// Queue of implicit template instantiations that cannot be performed
/// eagerly.
SmallVector<PendingImplicitInstantiation, 1> LateParsedInstantiations;
class GlobalEagerInstantiationScope {
public:
GlobalEagerInstantiationScope(Sema &S, bool Enabled)
: S(S), Enabled(Enabled) {
if (!Enabled) return;
SavedPendingInstantiations.swap(S.PendingInstantiations);
SavedVTableUses.swap(S.VTableUses);
}
void perform() {
if (Enabled) {
S.DefineUsedVTables();
S.PerformPendingInstantiations();
}
}
~GlobalEagerInstantiationScope() {
if (!Enabled) return;
// Restore the set of pending vtables.
assert(S.VTableUses.empty() &&
"VTableUses should be empty before it is discarded.");
S.VTableUses.swap(SavedVTableUses);
// Restore the set of pending implicit instantiations.
assert(S.PendingInstantiations.empty() &&
"PendingInstantiations should be empty before it is discarded.");
S.PendingInstantiations.swap(SavedPendingInstantiations);
}
private:
Sema &S;
SmallVector<VTableUse, 16> SavedVTableUses;
std::deque<PendingImplicitInstantiation> SavedPendingInstantiations;
bool Enabled;
};
/// The queue of implicit template instantiations that are required
/// and must be performed within the current local scope.
///
/// This queue is only used for member functions of local classes in
/// templates, which must be instantiated in the same scope as their
/// enclosing function, so that they can reference function-local
/// types, static variables, enumerators, etc.
std::deque<PendingImplicitInstantiation> PendingLocalImplicitInstantiations;
class LocalEagerInstantiationScope {
public:
LocalEagerInstantiationScope(Sema &S) : S(S) {
SavedPendingLocalImplicitInstantiations.swap(
S.PendingLocalImplicitInstantiations);
}
void perform() { S.PerformPendingInstantiations(/*LocalOnly=*/true); }
~LocalEagerInstantiationScope() {
assert(S.PendingLocalImplicitInstantiations.empty() &&
"there shouldn't be any pending local implicit instantiations");
SavedPendingLocalImplicitInstantiations.swap(
S.PendingLocalImplicitInstantiations);
}
private:
Sema &S;
std::deque<PendingImplicitInstantiation>
SavedPendingLocalImplicitInstantiations;
};
/// A helper class for building up ExtParameterInfos.
class ExtParameterInfoBuilder {
SmallVector<FunctionProtoType::ExtParameterInfo, 16> Infos;
bool HasInteresting = false;
public:
/// Set the ExtParameterInfo for the parameter at the given index,
///
void set(unsigned index, FunctionProtoType::ExtParameterInfo info) {
assert(Infos.size() <= index);
Infos.resize(index);
Infos.push_back(info);
if (!HasInteresting)
HasInteresting = (info != FunctionProtoType::ExtParameterInfo());
}
/// Return a pointer (suitable for setting in an ExtProtoInfo) to the
/// ExtParameterInfo array we've built up.
const FunctionProtoType::ExtParameterInfo *
getPointerOrNull(unsigned numParams) {
if (!HasInteresting) return nullptr;
Infos.resize(numParams);
return Infos.data();
}
};
void PerformPendingInstantiations(bool LocalOnly = false);
TypeSourceInfo *SubstType(TypeSourceInfo *T,
const MultiLevelTemplateArgumentList &TemplateArgs,
SourceLocation Loc, DeclarationName Entity,
bool AllowDeducedTST = false);
QualType SubstType(QualType T,
const MultiLevelTemplateArgumentList &TemplateArgs,
SourceLocation Loc, DeclarationName Entity);
TypeSourceInfo *SubstType(TypeLoc TL,
const MultiLevelTemplateArgumentList &TemplateArgs,
SourceLocation Loc, DeclarationName Entity);
TypeSourceInfo *SubstFunctionDeclType(TypeSourceInfo *T,
const MultiLevelTemplateArgumentList &TemplateArgs,
SourceLocation Loc,
DeclarationName Entity,
CXXRecordDecl *ThisContext,
unsigned ThisTypeQuals);
void SubstExceptionSpec(FunctionDecl *New, const FunctionProtoType *Proto,
const MultiLevelTemplateArgumentList &Args);
bool SubstExceptionSpec(SourceLocation Loc,
FunctionProtoType::ExceptionSpecInfo &ESI,
SmallVectorImpl<QualType> &ExceptionStorage,
const MultiLevelTemplateArgumentList &Args);
ParmVarDecl *SubstParmVarDecl(ParmVarDecl *D,
const MultiLevelTemplateArgumentList &TemplateArgs,
int indexAdjustment,
Optional<unsigned> NumExpansions,
bool ExpectParameterPack);
bool SubstParmTypes(SourceLocation Loc, ArrayRef<ParmVarDecl *> Params,
const FunctionProtoType::ExtParameterInfo *ExtParamInfos,
const MultiLevelTemplateArgumentList &TemplateArgs,
SmallVectorImpl<QualType> &ParamTypes,
SmallVectorImpl<ParmVarDecl *> *OutParams,
ExtParameterInfoBuilder &ParamInfos);
ExprResult SubstExpr(Expr *E,
const MultiLevelTemplateArgumentList &TemplateArgs);
/// Substitute the given template arguments into a list of
/// expressions, expanding pack expansions if required.
///
/// \param Exprs The list of expressions to substitute into.
///
/// \param IsCall Whether this is some form of call, in which case
/// default arguments will be dropped.
///
/// \param TemplateArgs The set of template arguments to substitute.
///
/// \param Outputs Will receive all of the substituted arguments.
///
/// \returns true if an error occurred, false otherwise.
bool SubstExprs(ArrayRef<Expr *> Exprs, bool IsCall,
const MultiLevelTemplateArgumentList &TemplateArgs,
SmallVectorImpl<Expr *> &Outputs);
StmtResult SubstStmt(Stmt *S,
const MultiLevelTemplateArgumentList &TemplateArgs);
TemplateParameterList *
SubstTemplateParams(TemplateParameterList *Params, DeclContext *Owner,
const MultiLevelTemplateArgumentList &TemplateArgs);
Decl *SubstDecl(Decl *D, DeclContext *Owner,
const MultiLevelTemplateArgumentList &TemplateArgs);
ExprResult SubstInitializer(Expr *E,
const MultiLevelTemplateArgumentList &TemplateArgs,
bool CXXDirectInit);
bool
SubstBaseSpecifiers(CXXRecordDecl *Instantiation,
CXXRecordDecl *Pattern,
const MultiLevelTemplateArgumentList &TemplateArgs);
bool
InstantiateClass(SourceLocation PointOfInstantiation,
CXXRecordDecl *Instantiation, CXXRecordDecl *Pattern,
const MultiLevelTemplateArgumentList &TemplateArgs,
TemplateSpecializationKind TSK,
bool Complain = true);
bool InstantiateEnum(SourceLocation PointOfInstantiation,
EnumDecl *Instantiation, EnumDecl *Pattern,
const MultiLevelTemplateArgumentList &TemplateArgs,
TemplateSpecializationKind TSK);
bool InstantiateInClassInitializer(
SourceLocation PointOfInstantiation, FieldDecl *Instantiation,
FieldDecl *Pattern, const MultiLevelTemplateArgumentList &TemplateArgs);
struct LateInstantiatedAttribute {
const Attr *TmplAttr;
LocalInstantiationScope *Scope;
Decl *NewDecl;
LateInstantiatedAttribute(const Attr *A, LocalInstantiationScope *S,
Decl *D)
: TmplAttr(A), Scope(S), NewDecl(D)
{ }
};
typedef SmallVector<LateInstantiatedAttribute, 16> LateInstantiatedAttrVec;
void InstantiateAttrs(const MultiLevelTemplateArgumentList &TemplateArgs,
const Decl *Pattern, Decl *Inst,
LateInstantiatedAttrVec *LateAttrs = nullptr,
LocalInstantiationScope *OuterMostScope = nullptr);
void
InstantiateAttrsForDecl(const MultiLevelTemplateArgumentList &TemplateArgs,
const Decl *Pattern, Decl *Inst,
LateInstantiatedAttrVec *LateAttrs = nullptr,
LocalInstantiationScope *OuterMostScope = nullptr);
bool usesPartialOrExplicitSpecialization(
SourceLocation Loc, ClassTemplateSpecializationDecl *ClassTemplateSpec);
bool
InstantiateClassTemplateSpecialization(SourceLocation PointOfInstantiation,
ClassTemplateSpecializationDecl *ClassTemplateSpec,
TemplateSpecializationKind TSK,
bool Complain = true);
void InstantiateClassMembers(SourceLocation PointOfInstantiation,
CXXRecordDecl *Instantiation,
const MultiLevelTemplateArgumentList &TemplateArgs,
TemplateSpecializationKind TSK);
void InstantiateClassTemplateSpecializationMembers(
SourceLocation PointOfInstantiation,
ClassTemplateSpecializationDecl *ClassTemplateSpec,
TemplateSpecializationKind TSK);
NestedNameSpecifierLoc
SubstNestedNameSpecifierLoc(NestedNameSpecifierLoc NNS,
const MultiLevelTemplateArgumentList &TemplateArgs);
DeclarationNameInfo
SubstDeclarationNameInfo(const DeclarationNameInfo &NameInfo,
const MultiLevelTemplateArgumentList &TemplateArgs);
TemplateName
SubstTemplateName(NestedNameSpecifierLoc QualifierLoc, TemplateName Name,
SourceLocation Loc,
const MultiLevelTemplateArgumentList &TemplateArgs);
bool Subst(const TemplateArgumentLoc *Args, unsigned NumArgs,
TemplateArgumentListInfo &Result,
const MultiLevelTemplateArgumentList &TemplateArgs);
void InstantiateExceptionSpec(SourceLocation PointOfInstantiation,
FunctionDecl *Function);
FunctionDecl *InstantiateFunctionDeclaration(FunctionTemplateDecl *FTD,
const TemplateArgumentList *Args,
SourceLocation Loc);
void InstantiateFunctionDefinition(SourceLocation PointOfInstantiation,
FunctionDecl *Function,
bool Recursive = false,
bool DefinitionRequired = false,
bool AtEndOfTU = false);
VarTemplateSpecializationDecl *BuildVarTemplateInstantiation(
VarTemplateDecl *VarTemplate, VarDecl *FromVar,
const TemplateArgumentList &TemplateArgList,
const TemplateArgumentListInfo &TemplateArgsInfo,
SmallVectorImpl<TemplateArgument> &Converted,
SourceLocation PointOfInstantiation, void *InsertPos,
LateInstantiatedAttrVec *LateAttrs = nullptr,
LocalInstantiationScope *StartingScope = nullptr);
VarTemplateSpecializationDecl *CompleteVarTemplateSpecializationDecl(
VarTemplateSpecializationDecl *VarSpec, VarDecl *PatternDecl,
const MultiLevelTemplateArgumentList &TemplateArgs);
void
BuildVariableInstantiation(VarDecl *NewVar, VarDecl *OldVar,
const MultiLevelTemplateArgumentList &TemplateArgs,
LateInstantiatedAttrVec *LateAttrs,
DeclContext *Owner,
LocalInstantiationScope *StartingScope,
bool InstantiatingVarTemplate = false);
void InstantiateVariableInitializer(
VarDecl *Var, VarDecl *OldVar,
const MultiLevelTemplateArgumentList &TemplateArgs);
void InstantiateVariableDefinition(SourceLocation PointOfInstantiation,
VarDecl *Var, bool Recursive = false,
bool DefinitionRequired = false,
bool AtEndOfTU = false);
void InstantiateMemInitializers(CXXConstructorDecl *New,
const CXXConstructorDecl *Tmpl,
const MultiLevelTemplateArgumentList &TemplateArgs);
NamedDecl *FindInstantiatedDecl(SourceLocation Loc, NamedDecl *D,
const MultiLevelTemplateArgumentList &TemplateArgs,
bool FindingInstantiatedContext = false);
DeclContext *FindInstantiatedContext(SourceLocation Loc, DeclContext *DC,
const MultiLevelTemplateArgumentList &TemplateArgs);
// Objective-C declarations.
enum ObjCContainerKind {
OCK_None = -1,
OCK_Interface = 0,
OCK_Protocol,
OCK_Category,
OCK_ClassExtension,
OCK_Implementation,
OCK_CategoryImplementation
};
ObjCContainerKind getObjCContainerKind() const;
DeclResult actOnObjCTypeParam(Scope *S,
ObjCTypeParamVariance variance,
SourceLocation varianceLoc,
unsigned index,
IdentifierInfo *paramName,
SourceLocation paramLoc,
SourceLocation colonLoc,
ParsedType typeBound);
ObjCTypeParamList *actOnObjCTypeParamList(Scope *S, SourceLocation lAngleLoc,
ArrayRef<Decl *> typeParams,
SourceLocation rAngleLoc);
void popObjCTypeParamList(Scope *S, ObjCTypeParamList *typeParamList);
Decl *ActOnStartClassInterface(
Scope *S, SourceLocation AtInterfaceLoc, IdentifierInfo *ClassName,
SourceLocation ClassLoc, ObjCTypeParamList *typeParamList,
IdentifierInfo *SuperName, SourceLocation SuperLoc,
ArrayRef<ParsedType> SuperTypeArgs, SourceRange SuperTypeArgsRange,
Decl *const *ProtoRefs, unsigned NumProtoRefs,
const SourceLocation *ProtoLocs, SourceLocation EndProtoLoc,
const ParsedAttributesView &AttrList);
void ActOnSuperClassOfClassInterface(Scope *S,
SourceLocation AtInterfaceLoc,
ObjCInterfaceDecl *IDecl,
IdentifierInfo *ClassName,
SourceLocation ClassLoc,
IdentifierInfo *SuperName,
SourceLocation SuperLoc,
ArrayRef<ParsedType> SuperTypeArgs,
SourceRange SuperTypeArgsRange);
void ActOnTypedefedProtocols(SmallVectorImpl<Decl *> &ProtocolRefs,
SmallVectorImpl<SourceLocation> &ProtocolLocs,
IdentifierInfo *SuperName,
SourceLocation SuperLoc);
Decl *ActOnCompatibilityAlias(
SourceLocation AtCompatibilityAliasLoc,
IdentifierInfo *AliasName, SourceLocation AliasLocation,
IdentifierInfo *ClassName, SourceLocation ClassLocation);
bool CheckForwardProtocolDeclarationForCircularDependency(
IdentifierInfo *PName,
SourceLocation &PLoc, SourceLocation PrevLoc,
const ObjCList<ObjCProtocolDecl> &PList);
Decl *ActOnStartProtocolInterface(
SourceLocation AtProtoInterfaceLoc, IdentifierInfo *ProtocolName,
SourceLocation ProtocolLoc, Decl *const *ProtoRefNames,
unsigned NumProtoRefs, const SourceLocation *ProtoLocs,
SourceLocation EndProtoLoc, const ParsedAttributesView &AttrList);
Decl *ActOnStartCategoryInterface(
SourceLocation AtInterfaceLoc, IdentifierInfo *ClassName,
SourceLocation ClassLoc, ObjCTypeParamList *typeParamList,
IdentifierInfo *CategoryName, SourceLocation CategoryLoc,
Decl *const *ProtoRefs, unsigned NumProtoRefs,
const SourceLocation *ProtoLocs, SourceLocation EndProtoLoc,
const ParsedAttributesView &AttrList);
Decl *ActOnStartClassImplementation(
SourceLocation AtClassImplLoc,
IdentifierInfo *ClassName, SourceLocation ClassLoc,
IdentifierInfo *SuperClassname,
SourceLocation SuperClassLoc);
Decl *ActOnStartCategoryImplementation(SourceLocation AtCatImplLoc,
IdentifierInfo *ClassName,
SourceLocation ClassLoc,
IdentifierInfo *CatName,
SourceLocation CatLoc);
DeclGroupPtrTy ActOnFinishObjCImplementation(Decl *ObjCImpDecl,
ArrayRef<Decl *> Decls);
DeclGroupPtrTy ActOnForwardClassDeclaration(SourceLocation Loc,
IdentifierInfo **IdentList,
SourceLocation *IdentLocs,
ArrayRef<ObjCTypeParamList *> TypeParamLists,
unsigned NumElts);
DeclGroupPtrTy
ActOnForwardProtocolDeclaration(SourceLocation AtProtoclLoc,
ArrayRef<IdentifierLocPair> IdentList,
const ParsedAttributesView &attrList);
void FindProtocolDeclaration(bool WarnOnDeclarations, bool ForObjCContainer,
ArrayRef<IdentifierLocPair> ProtocolId,
SmallVectorImpl<Decl *> &Protocols);
void DiagnoseTypeArgsAndProtocols(IdentifierInfo *ProtocolId,
SourceLocation ProtocolLoc,
IdentifierInfo *TypeArgId,
SourceLocation TypeArgLoc,
bool SelectProtocolFirst = false);
/// Given a list of identifiers (and their locations), resolve the
/// names to either Objective-C protocol qualifiers or type
/// arguments, as appropriate.
void actOnObjCTypeArgsOrProtocolQualifiers(
Scope *S,
ParsedType baseType,
SourceLocation lAngleLoc,
ArrayRef<IdentifierInfo *> identifiers,
ArrayRef<SourceLocation> identifierLocs,
SourceLocation rAngleLoc,
SourceLocation &typeArgsLAngleLoc,
SmallVectorImpl<ParsedType> &typeArgs,
SourceLocation &typeArgsRAngleLoc,
SourceLocation &protocolLAngleLoc,
SmallVectorImpl<Decl *> &protocols,
SourceLocation &protocolRAngleLoc,
bool warnOnIncompleteProtocols);
/// Build a an Objective-C protocol-qualified 'id' type where no
/// base type was specified.
TypeResult actOnObjCProtocolQualifierType(
SourceLocation lAngleLoc,
ArrayRef<Decl *> protocols,
ArrayRef<SourceLocation> protocolLocs,
SourceLocation rAngleLoc);
/// Build a specialized and/or protocol-qualified Objective-C type.
TypeResult actOnObjCTypeArgsAndProtocolQualifiers(
Scope *S,
SourceLocation Loc,
ParsedType BaseType,
SourceLocation TypeArgsLAngleLoc,
ArrayRef<ParsedType> TypeArgs,
SourceLocation TypeArgsRAngleLoc,
SourceLocation ProtocolLAngleLoc,
ArrayRef<Decl *> Protocols,
ArrayRef<SourceLocation> ProtocolLocs,
SourceLocation ProtocolRAngleLoc);
/// Build an Objective-C type parameter type.
QualType BuildObjCTypeParamType(const ObjCTypeParamDecl *Decl,
SourceLocation ProtocolLAngleLoc,
ArrayRef<ObjCProtocolDecl *> Protocols,
ArrayRef<SourceLocation> ProtocolLocs,
SourceLocation ProtocolRAngleLoc,
bool FailOnError = false);
/// Build an Objective-C object pointer type.
QualType BuildObjCObjectType(QualType BaseType,
SourceLocation Loc,
SourceLocation TypeArgsLAngleLoc,
ArrayRef<TypeSourceInfo *> TypeArgs,
SourceLocation TypeArgsRAngleLoc,
SourceLocation ProtocolLAngleLoc,
ArrayRef<ObjCProtocolDecl *> Protocols,
ArrayRef<SourceLocation> ProtocolLocs,
SourceLocation ProtocolRAngleLoc,
bool FailOnError = false);
/// Ensure attributes are consistent with type.
/// \param [in, out] Attributes The attributes to check; they will
/// be modified to be consistent with \p PropertyTy.
void CheckObjCPropertyAttributes(Decl *PropertyPtrTy,
SourceLocation Loc,
unsigned &Attributes,
bool propertyInPrimaryClass);
/// Process the specified property declaration and create decls for the
/// setters and getters as needed.
/// \param property The property declaration being processed
void ProcessPropertyDecl(ObjCPropertyDecl *property);
void DiagnosePropertyMismatch(ObjCPropertyDecl *Property,
ObjCPropertyDecl *SuperProperty,
const IdentifierInfo *Name,
bool OverridingProtocolProperty);
void DiagnoseClassExtensionDupMethods(ObjCCategoryDecl *CAT,
ObjCInterfaceDecl *ID);
Decl *ActOnAtEnd(Scope *S, SourceRange AtEnd,
ArrayRef<Decl *> allMethods = None,
ArrayRef<DeclGroupPtrTy> allTUVars = None);
Decl *ActOnProperty(Scope *S, SourceLocation AtLoc,
SourceLocation LParenLoc,
FieldDeclarator &FD, ObjCDeclSpec &ODS,
Selector GetterSel, Selector SetterSel,
tok::ObjCKeywordKind MethodImplKind,
DeclContext *lexicalDC = nullptr);
Decl *ActOnPropertyImplDecl(Scope *S,
SourceLocation AtLoc,
SourceLocation PropertyLoc,
bool ImplKind,
IdentifierInfo *PropertyId,
IdentifierInfo *PropertyIvar,
SourceLocation PropertyIvarLoc,
ObjCPropertyQueryKind QueryKind);
enum ObjCSpecialMethodKind {
OSMK_None,
OSMK_Alloc,
OSMK_New,
OSMK_Copy,
OSMK_RetainingInit,
OSMK_NonRetainingInit
};
struct ObjCArgInfo {
IdentifierInfo *Name;
SourceLocation NameLoc;
// The Type is null if no type was specified, and the DeclSpec is invalid
// in this case.
ParsedType Type;
ObjCDeclSpec DeclSpec;
/// ArgAttrs - Attribute list for this argument.
ParsedAttributesView ArgAttrs;
};
Decl *ActOnMethodDeclaration(
Scope *S,
SourceLocation BeginLoc, // location of the + or -.
SourceLocation EndLoc, // location of the ; or {.
tok::TokenKind MethodType, ObjCDeclSpec &ReturnQT, ParsedType ReturnType,
ArrayRef<SourceLocation> SelectorLocs, Selector Sel,
// optional arguments. The number of types/arguments is obtained
// from the Sel.getNumArgs().
ObjCArgInfo *ArgInfo, DeclaratorChunk::ParamInfo *CParamInfo,
unsigned CNumArgs, // c-style args
const ParsedAttributesView &AttrList, tok::ObjCKeywordKind MethodImplKind,
bool isVariadic, bool MethodDefinition);
ObjCMethodDecl *LookupMethodInQualifiedType(Selector Sel,
const ObjCObjectPointerType *OPT,
bool IsInstance);
ObjCMethodDecl *LookupMethodInObjectType(Selector Sel, QualType Ty,
bool IsInstance);
bool CheckARCMethodDecl(ObjCMethodDecl *method);
bool inferObjCARCLifetime(ValueDecl *decl);
ExprResult
HandleExprPropertyRefExpr(const ObjCObjectPointerType *OPT,
Expr *BaseExpr,
SourceLocation OpLoc,
DeclarationName MemberName,
SourceLocation MemberLoc,
SourceLocation SuperLoc, QualType SuperType,
bool Super);
ExprResult
ActOnClassPropertyRefExpr(IdentifierInfo &receiverName,
IdentifierInfo &propertyName,
SourceLocation receiverNameLoc,
SourceLocation propertyNameLoc);
ObjCMethodDecl *tryCaptureObjCSelf(SourceLocation Loc);
/// Describes the kind of message expression indicated by a message
/// send that starts with an identifier.
enum ObjCMessageKind {
/// The message is sent to 'super'.
ObjCSuperMessage,
/// The message is an instance message.
ObjCInstanceMessage,
/// The message is a class message, and the identifier is a type
/// name.
ObjCClassMessage
};
ObjCMessageKind getObjCMessageKind(Scope *S,
IdentifierInfo *Name,
SourceLocation NameLoc,
bool IsSuper,
bool HasTrailingDot,
ParsedType &ReceiverType);
ExprResult ActOnSuperMessage(Scope *S, SourceLocation SuperLoc,
Selector Sel,
SourceLocation LBracLoc,
ArrayRef<SourceLocation> SelectorLocs,
SourceLocation RBracLoc,
MultiExprArg Args);
ExprResult BuildClassMessage(TypeSourceInfo *ReceiverTypeInfo,
QualType ReceiverType,
SourceLocation SuperLoc,
Selector Sel,
ObjCMethodDecl *Method,
SourceLocation LBracLoc,
ArrayRef<SourceLocation> SelectorLocs,
SourceLocation RBracLoc,
MultiExprArg Args,
bool isImplicit = false);
ExprResult BuildClassMessageImplicit(QualType ReceiverType,
bool isSuperReceiver,
SourceLocation Loc,
Selector Sel,
ObjCMethodDecl *Method,
MultiExprArg Args);
ExprResult ActOnClassMessage(Scope *S,
ParsedType Receiver,
Selector Sel,
SourceLocation LBracLoc,
ArrayRef<SourceLocation> SelectorLocs,
SourceLocation RBracLoc,
MultiExprArg Args);
ExprResult BuildInstanceMessage(Expr *Receiver,
QualType ReceiverType,
SourceLocation SuperLoc,
Selector Sel,
ObjCMethodDecl *Method,
SourceLocation LBracLoc,
ArrayRef<SourceLocation> SelectorLocs,
SourceLocation RBracLoc,
MultiExprArg Args,
bool isImplicit = false);
ExprResult BuildInstanceMessageImplicit(Expr *Receiver,
QualType ReceiverType,
SourceLocation Loc,
Selector Sel,
ObjCMethodDecl *Method,
MultiExprArg Args);
ExprResult ActOnInstanceMessage(Scope *S,
Expr *Receiver,
Selector Sel,
SourceLocation LBracLoc,
ArrayRef<SourceLocation> SelectorLocs,
SourceLocation RBracLoc,
MultiExprArg Args);
ExprResult BuildObjCBridgedCast(SourceLocation LParenLoc,
ObjCBridgeCastKind Kind,
SourceLocation BridgeKeywordLoc,
TypeSourceInfo *TSInfo,
Expr *SubExpr);
ExprResult ActOnObjCBridgedCast(Scope *S,
SourceLocation LParenLoc,
ObjCBridgeCastKind Kind,
SourceLocation BridgeKeywordLoc,
ParsedType Type,
SourceLocation RParenLoc,
Expr *SubExpr);
void CheckTollFreeBridgeCast(QualType castType, Expr *castExpr);
void CheckObjCBridgeRelatedCast(QualType castType, Expr *castExpr);
bool CheckTollFreeBridgeStaticCast(QualType castType, Expr *castExpr,
CastKind &Kind);
bool checkObjCBridgeRelatedComponents(SourceLocation Loc,
QualType DestType, QualType SrcType,
ObjCInterfaceDecl *&RelatedClass,
ObjCMethodDecl *&ClassMethod,
ObjCMethodDecl *&InstanceMethod,
TypedefNameDecl *&TDNDecl,
bool CfToNs, bool Diagnose = true);
bool CheckObjCBridgeRelatedConversions(SourceLocation Loc,
QualType DestType, QualType SrcType,
Expr *&SrcExpr, bool Diagnose = true);
bool ConversionToObjCStringLiteralCheck(QualType DstType, Expr *&SrcExpr,
bool Diagnose = true);
bool checkInitMethod(ObjCMethodDecl *method, QualType receiverTypeIfCall);
/// Check whether the given new method is a valid override of the
/// given overridden method, and set any properties that should be inherited.
void CheckObjCMethodOverride(ObjCMethodDecl *NewMethod,
const ObjCMethodDecl *Overridden);
/// Describes the compatibility of a result type with its method.
enum ResultTypeCompatibilityKind {
RTC_Compatible,
RTC_Incompatible,
RTC_Unknown
};
void CheckObjCMethodOverrides(ObjCMethodDecl *ObjCMethod,
ObjCInterfaceDecl *CurrentClass,
ResultTypeCompatibilityKind RTC);
enum PragmaOptionsAlignKind {
POAK_Native, // #pragma options align=native
POAK_Natural, // #pragma options align=natural
POAK_Packed, // #pragma options align=packed
POAK_Power, // #pragma options align=power
POAK_Mac68k, // #pragma options align=mac68k
POAK_Reset // #pragma options align=reset
};
/// ActOnPragmaClangSection - Called on well formed \#pragma clang section
void ActOnPragmaClangSection(SourceLocation PragmaLoc,
PragmaClangSectionAction Action,
PragmaClangSectionKind SecKind, StringRef SecName);
/// ActOnPragmaOptionsAlign - Called on well formed \#pragma options align.
void ActOnPragmaOptionsAlign(PragmaOptionsAlignKind Kind,
SourceLocation PragmaLoc);
/// ActOnPragmaPack - Called on well formed \#pragma pack(...).
void ActOnPragmaPack(SourceLocation PragmaLoc, PragmaMsStackAction Action,
StringRef SlotLabel, Expr *Alignment);
enum class PragmaPackDiagnoseKind {
NonDefaultStateAtInclude,
ChangedStateAtExit
};
void DiagnoseNonDefaultPragmaPack(PragmaPackDiagnoseKind Kind,
SourceLocation IncludeLoc);
void DiagnoseUnterminatedPragmaPack();
/// ActOnPragmaMSStruct - Called on well formed \#pragma ms_struct [on|off].
void ActOnPragmaMSStruct(PragmaMSStructKind Kind);
/// ActOnPragmaMSComment - Called on well formed
/// \#pragma comment(kind, "arg").
void ActOnPragmaMSComment(SourceLocation CommentLoc, PragmaMSCommentKind Kind,
StringRef Arg);
/// ActOnPragmaMSPointersToMembers - called on well formed \#pragma
/// pointers_to_members(representation method[, general purpose
/// representation]).
void ActOnPragmaMSPointersToMembers(
LangOptions::PragmaMSPointersToMembersKind Kind,
SourceLocation PragmaLoc);
/// Called on well formed \#pragma vtordisp().
void ActOnPragmaMSVtorDisp(PragmaMsStackAction Action,
SourceLocation PragmaLoc,
MSVtorDispAttr::Mode Value);
enum PragmaSectionKind {
PSK_DataSeg,
PSK_BSSSeg,
PSK_ConstSeg,
PSK_CodeSeg,
};
bool UnifySection(StringRef SectionName,
int SectionFlags,
DeclaratorDecl *TheDecl);
bool UnifySection(StringRef SectionName,
int SectionFlags,
SourceLocation PragmaSectionLocation);
/// Called on well formed \#pragma bss_seg/data_seg/const_seg/code_seg.
void ActOnPragmaMSSeg(SourceLocation PragmaLocation,
PragmaMsStackAction Action,
llvm::StringRef StackSlotLabel,
StringLiteral *SegmentName,
llvm::StringRef PragmaName);
/// Called on well formed \#pragma section().
void ActOnPragmaMSSection(SourceLocation PragmaLocation,
int SectionFlags, StringLiteral *SegmentName);
/// Called on well-formed \#pragma init_seg().
void ActOnPragmaMSInitSeg(SourceLocation PragmaLocation,
StringLiteral *SegmentName);
/// Called on #pragma clang __debug dump II
void ActOnPragmaDump(Scope *S, SourceLocation Loc, IdentifierInfo *II);
/// ActOnPragmaDetectMismatch - Call on well-formed \#pragma detect_mismatch
void ActOnPragmaDetectMismatch(SourceLocation Loc, StringRef Name,
StringRef Value);
/// ActOnPragmaUnused - Called on well-formed '\#pragma unused'.
void ActOnPragmaUnused(const Token &Identifier,
Scope *curScope,
SourceLocation PragmaLoc);
/// ActOnPragmaVisibility - Called on well formed \#pragma GCC visibility... .
void ActOnPragmaVisibility(const IdentifierInfo* VisType,
SourceLocation PragmaLoc);
NamedDecl *DeclClonePragmaWeak(NamedDecl *ND, IdentifierInfo *II,
SourceLocation Loc);
void DeclApplyPragmaWeak(Scope *S, NamedDecl *ND, WeakInfo &W);
/// ActOnPragmaWeakID - Called on well formed \#pragma weak ident.
void ActOnPragmaWeakID(IdentifierInfo* WeakName,
SourceLocation PragmaLoc,
SourceLocation WeakNameLoc);
/// ActOnPragmaRedefineExtname - Called on well formed
/// \#pragma redefine_extname oldname newname.
void ActOnPragmaRedefineExtname(IdentifierInfo* WeakName,
IdentifierInfo* AliasName,
SourceLocation PragmaLoc,
SourceLocation WeakNameLoc,
SourceLocation AliasNameLoc);
/// ActOnPragmaWeakAlias - Called on well formed \#pragma weak ident = ident.
void ActOnPragmaWeakAlias(IdentifierInfo* WeakName,
IdentifierInfo* AliasName,
SourceLocation PragmaLoc,
SourceLocation WeakNameLoc,
SourceLocation AliasNameLoc);
/// ActOnPragmaFPContract - Called on well formed
/// \#pragma {STDC,OPENCL} FP_CONTRACT and
/// \#pragma clang fp contract
void ActOnPragmaFPContract(LangOptions::FPContractModeKind FPC);
/// ActOnPragmaFenvAccess - Called on well formed
/// \#pragma STDC FENV_ACCESS
void ActOnPragmaFEnvAccess(LangOptions::FEnvAccessModeKind FPC);
/// AddAlignmentAttributesForRecord - Adds any needed alignment attributes to
/// a the record decl, to handle '\#pragma pack' and '\#pragma options align'.
void AddAlignmentAttributesForRecord(RecordDecl *RD);
/// AddMsStructLayoutForRecord - Adds ms_struct layout attribute to record.
void AddMsStructLayoutForRecord(RecordDecl *RD);
void AddPragmaAttributesForRecord(RecordDecl *RD);
/// FreePackedContext - Deallocate and null out PackContext.
void FreePackedContext();
/// PushNamespaceVisibilityAttr - Note that we've entered a
/// namespace with a visibility attribute.
void PushNamespaceVisibilityAttr(const VisibilityAttr *Attr,
SourceLocation Loc);
/// AddPushedVisibilityAttribute - If '\#pragma GCC visibility' was used,
/// add an appropriate visibility attribute.
void AddPushedVisibilityAttribute(Decl *RD);
/// PopPragmaVisibility - Pop the top element of the visibility stack; used
/// for '\#pragma GCC visibility' and visibility attributes on namespaces.
void PopPragmaVisibility(bool IsNamespaceEnd, SourceLocation EndLoc);
/// FreeVisContext - Deallocate and null out VisContext.
void FreeVisContext();
/// AddCFAuditedAttribute - Check whether we're currently within
/// '\#pragma clang arc_cf_code_audited' and, if so, consider adding
/// the appropriate attribute.
void AddCFAuditedAttribute(Decl *D);
void ActOnPragmaAttributeAttribute(ParsedAttr &Attribute,
SourceLocation PragmaLoc,
attr::ParsedSubjectMatchRuleSet Rules);
void ActOnPragmaAttributeEmptyPush(SourceLocation PragmaLoc);
/// Called on well-formed '\#pragma clang attribute pop'.
void ActOnPragmaAttributePop(SourceLocation PragmaLoc);
/// Adds the attributes that have been specified using the
/// '\#pragma clang attribute push' directives to the given declaration.
void AddPragmaAttributes(Scope *S, Decl *D);
void DiagnoseUnterminatedPragmaAttribute();
/// Called on well formed \#pragma clang optimize.
void ActOnPragmaOptimize(bool On, SourceLocation PragmaLoc);
/// Get the location for the currently active "\#pragma clang optimize
/// off". If this location is invalid, then the state of the pragma is "on".
SourceLocation getOptimizeOffPragmaLocation() const {
return OptimizeOffPragmaLocation;
}
/// Only called on function definitions; if there is a pragma in scope
/// with the effect of a range-based optnone, consider marking the function
/// with attribute optnone.
void AddRangeBasedOptnone(FunctionDecl *FD);
/// Adds the 'optnone' attribute to the function declaration if there
/// are no conflicts; Loc represents the location causing the 'optnone'
/// attribute to be added (usually because of a pragma).
void AddOptnoneAttributeIfNoConflicts(FunctionDecl *FD, SourceLocation Loc);
/// AddAlignedAttr - Adds an aligned attribute to a particular declaration.
void AddAlignedAttr(SourceRange AttrRange, Decl *D, Expr *E,
unsigned SpellingListIndex, bool IsPackExpansion);
void AddAlignedAttr(SourceRange AttrRange, Decl *D, TypeSourceInfo *T,
unsigned SpellingListIndex, bool IsPackExpansion);
/// AddAssumeAlignedAttr - Adds an assume_aligned attribute to a particular
/// declaration.
void AddAssumeAlignedAttr(SourceRange AttrRange, Decl *D, Expr *E, Expr *OE,
unsigned SpellingListIndex);
/// AddAllocAlignAttr - Adds an alloc_align attribute to a particular
/// declaration.
void AddAllocAlignAttr(SourceRange AttrRange, Decl *D, Expr *ParamExpr,
unsigned SpellingListIndex);
/// AddAlignValueAttr - Adds an align_value attribute to a particular
/// declaration.
void AddAlignValueAttr(SourceRange AttrRange, Decl *D, Expr *E,
unsigned SpellingListIndex);
/// AddLaunchBoundsAttr - Adds a launch_bounds attribute to a particular
/// declaration.
void AddLaunchBoundsAttr(SourceRange AttrRange, Decl *D, Expr *MaxThreads,
Expr *MinBlocks, unsigned SpellingListIndex);
/// AddModeAttr - Adds a mode attribute to a particular declaration.
void AddModeAttr(SourceRange AttrRange, Decl *D, IdentifierInfo *Name,
unsigned SpellingListIndex, bool InInstantiation = false);
void AddParameterABIAttr(SourceRange AttrRange, Decl *D,
ParameterABI ABI, unsigned SpellingListIndex);
void AddNSConsumedAttr(SourceRange AttrRange, Decl *D,
unsigned SpellingListIndex, bool isNSConsumed,
bool isTemplateInstantiation);
bool checkNSReturnsRetainedReturnType(SourceLocation loc, QualType type);
/// Called on well-formed \#pragma clang default_addr_space(...).
void ActOnPragmaDefaultAS(SourceLocation Loc, PragmaMsStackAction Action,
LangAS AS);
/// Called on well-formed \#pragma clang storage_addr_space(...).
void ActOnPragmaStorageAS(SourceLocation Loc, PragmaMsStackAction Action,
LangAS AS);
/// Called on well-formed \#pragma clang ptr32_thunk_prefix(...).
void ActOnPragmaPtr32ThunkPrefix(SourceLocation Loc, StringLiteral *Prefix);
/// Called on well-formed \#pragma clang ptr32_cs32_name(...).
void ActOnPragmaPtr32CS32Name(SourceLocation Loc, StringLiteral *Name);
/// Called on well-formed \#pragma clang ptr32_cs64_name(...).
void ActOnPragmaPtr32CS64Name(SourceLocation Loc, StringLiteral *Name);
//===--------------------------------------------------------------------===//
// C++ Coroutines TS
//
bool ActOnCoroutineBodyStart(Scope *S, SourceLocation KwLoc,
StringRef Keyword);
ExprResult ActOnCoawaitExpr(Scope *S, SourceLocation KwLoc, Expr *E);
ExprResult ActOnCoyieldExpr(Scope *S, SourceLocation KwLoc, Expr *E);
StmtResult ActOnCoreturnStmt(Scope *S, SourceLocation KwLoc, Expr *E);
ExprResult BuildResolvedCoawaitExpr(SourceLocation KwLoc, Expr *E,
bool IsImplicit = false);
ExprResult BuildUnresolvedCoawaitExpr(SourceLocation KwLoc, Expr *E,
UnresolvedLookupExpr* Lookup);
ExprResult BuildCoyieldExpr(SourceLocation KwLoc, Expr *E);
StmtResult BuildCoreturnStmt(SourceLocation KwLoc, Expr *E,
bool IsImplicit = false);
StmtResult BuildCoroutineBodyStmt(CoroutineBodyStmt::CtorArgs);
bool buildCoroutineParameterMoves(SourceLocation Loc);
VarDecl *buildCoroutinePromise(SourceLocation Loc);
void CheckCompletedCoroutineBody(FunctionDecl *FD, Stmt *&Body);
ClassTemplateDecl *lookupCoroutineTraits(SourceLocation KwLoc,
SourceLocation FuncLoc);
//===--------------------------------------------------------------------===//
// OpenCL extensions.
//
private:
std::string CurrOpenCLExtension;
/// Extensions required by an OpenCL type.
llvm::DenseMap<const Type*, std::set<std::string>> OpenCLTypeExtMap;
/// Extensions required by an OpenCL declaration.
llvm::DenseMap<const Decl*, std::set<std::string>> OpenCLDeclExtMap;
public:
llvm::StringRef getCurrentOpenCLExtension() const {
return CurrOpenCLExtension;
}
void setCurrentOpenCLExtension(llvm::StringRef Ext) {
CurrOpenCLExtension = Ext;
}
/// Set OpenCL extensions for a type which can only be used when these
/// OpenCL extensions are enabled. If \p Exts is empty, do nothing.
/// \param Exts A space separated list of OpenCL extensions.
void setOpenCLExtensionForType(QualType T, llvm::StringRef Exts);
/// Set OpenCL extensions for a declaration which can only be
/// used when these OpenCL extensions are enabled. If \p Exts is empty, do
/// nothing.
/// \param Exts A space separated list of OpenCL extensions.
void setOpenCLExtensionForDecl(Decl *FD, llvm::StringRef Exts);
/// Set current OpenCL extensions for a type which can only be used
/// when these OpenCL extensions are enabled. If current OpenCL extension is
/// empty, do nothing.
void setCurrentOpenCLExtensionForType(QualType T);
/// Set current OpenCL extensions for a declaration which
/// can only be used when these OpenCL extensions are enabled. If current
/// OpenCL extension is empty, do nothing.
void setCurrentOpenCLExtensionForDecl(Decl *FD);
bool isOpenCLDisabledDecl(Decl *FD);
/// Check if type \p T corresponding to declaration specifier \p DS
/// is disabled due to required OpenCL extensions being disabled. If so,
/// emit diagnostics.
/// \return true if type is disabled.
bool checkOpenCLDisabledTypeDeclSpec(const DeclSpec &DS, QualType T);
/// Check if declaration \p D used by expression \p E
/// is disabled due to required OpenCL extensions being disabled. If so,
/// emit diagnostics.
/// \return true if type is disabled.
bool checkOpenCLDisabledDecl(const NamedDecl &D, const Expr &E);
//===--------------------------------------------------------------------===//
// OpenMP directives and clauses.
//
private:
void *VarDataSharingAttributesStack;
/// Number of nested '#pragma omp declare target' directives.
unsigned DeclareTargetNestingLevel = 0;
/// Initialization of data-sharing attributes stack.
void InitDataSharingAttributesStack();
void DestroyDataSharingAttributesStack();
ExprResult
VerifyPositiveIntegerConstantInClause(Expr *Op, OpenMPClauseKind CKind,
bool StrictlyPositive = true);
/// Returns OpenMP nesting level for current directive.
unsigned getOpenMPNestingLevel() const;
/// Adjusts the function scopes index for the target-based regions.
void adjustOpenMPTargetScopeIndex(unsigned &FunctionScopesIndex,
unsigned Level) const;
/// Push new OpenMP function region for non-capturing function.
void pushOpenMPFunctionRegion();
/// Pop OpenMP function region for non-capturing function.
void popOpenMPFunctionRegion(const sema::FunctionScopeInfo *OldFSI);
/// Checks if a type or a declaration is disabled due to the owning extension
/// being disabled, and emits diagnostic messages if it is disabled.
/// \param D type or declaration to be checked.
/// \param DiagLoc source location for the diagnostic message.
/// \param DiagInfo information to be emitted for the diagnostic message.
/// \param SrcRange source range of the declaration.
/// \param Map maps type or declaration to the extensions.
/// \param Selector selects diagnostic message: 0 for type and 1 for
/// declaration.
/// \return true if the type or declaration is disabled.
template <typename T, typename DiagLocT, typename DiagInfoT, typename MapT>
bool checkOpenCLDisabledTypeOrDecl(T D, DiagLocT DiagLoc, DiagInfoT DiagInfo,
MapT &Map, unsigned Selector = 0,
SourceRange SrcRange = SourceRange());
public:
/// Return true if the provided declaration \a VD should be captured by
/// reference.
/// \param Level Relative level of nested OpenMP construct for that the check
/// is performed.
bool isOpenMPCapturedByRef(const ValueDecl *D, unsigned Level) const;
/// Check if the specified variable is used in one of the private
/// clauses (private, firstprivate, lastprivate, reduction etc.) in OpenMP
/// constructs.
VarDecl *isOpenMPCapturedDecl(ValueDecl *D);
ExprResult getOpenMPCapturedExpr(VarDecl *Capture, ExprValueKind VK,
ExprObjectKind OK, SourceLocation Loc);
/// Check if the specified variable is used in 'private' clause.
/// \param Level Relative level of nested OpenMP construct for that the check
/// is performed.
bool isOpenMPPrivateDecl(const ValueDecl *D, unsigned Level) const;
/// Sets OpenMP capture kind (OMPC_private, OMPC_firstprivate, OMPC_map etc.)
/// for \p FD based on DSA for the provided corresponding captured declaration
/// \p D.
void setOpenMPCaptureKind(FieldDecl *FD, const ValueDecl *D, unsigned Level);
/// Check if the specified variable is captured by 'target' directive.
/// \param Level Relative level of nested OpenMP construct for that the check
/// is performed.
bool isOpenMPTargetCapturedDecl(const ValueDecl *D, unsigned Level) const;
ExprResult PerformOpenMPImplicitIntegerConversion(SourceLocation OpLoc,
Expr *Op);
/// Called on start of new data sharing attribute block.
void StartOpenMPDSABlock(OpenMPDirectiveKind K,
const DeclarationNameInfo &DirName, Scope *CurScope,
SourceLocation Loc);
/// Start analysis of clauses.
void StartOpenMPClause(OpenMPClauseKind K);
/// End analysis of clauses.
void EndOpenMPClause();
/// Called on end of data sharing attribute block.
void EndOpenMPDSABlock(Stmt *CurDirective);
/// Check if the current region is an OpenMP loop region and if it is,
/// mark loop control variable, used in \p Init for loop initialization, as
/// private by default.
/// \param Init First part of the for loop.
void ActOnOpenMPLoopInitialization(SourceLocation ForLoc, Stmt *Init);
// OpenMP directives and clauses.
/// Called on correct id-expression from the '#pragma omp
/// threadprivate'.
ExprResult ActOnOpenMPIdExpression(Scope *CurScope,
CXXScopeSpec &ScopeSpec,
const DeclarationNameInfo &Id);
/// Called on well-formed '#pragma omp threadprivate'.
DeclGroupPtrTy ActOnOpenMPThreadprivateDirective(
SourceLocation Loc,
ArrayRef<Expr *> VarList);
/// Builds a new OpenMPThreadPrivateDecl and checks its correctness.
OMPThreadPrivateDecl *CheckOMPThreadPrivateDecl(SourceLocation Loc,
ArrayRef<Expr *> VarList);
/// Called on well-formed '#pragma omp requires'.
DeclGroupPtrTy ActOnOpenMPRequiresDirective(SourceLocation Loc,
ArrayRef<OMPClause *> ClauseList);
/// Check restrictions on Requires directive
OMPRequiresDecl *CheckOMPRequiresDecl(SourceLocation Loc,
ArrayRef<OMPClause *> Clauses);
/// Check if the specified type is allowed to be used in 'omp declare
/// reduction' construct.
QualType ActOnOpenMPDeclareReductionType(SourceLocation TyLoc,
TypeResult ParsedType);
/// Called on start of '#pragma omp declare reduction'.
DeclGroupPtrTy ActOnOpenMPDeclareReductionDirectiveStart(
Scope *S, DeclContext *DC, DeclarationName Name,
ArrayRef<std::pair<QualType, SourceLocation>> ReductionTypes,
AccessSpecifier AS, Decl *PrevDeclInScope = nullptr);
/// Initialize declare reduction construct initializer.
void ActOnOpenMPDeclareReductionCombinerStart(Scope *S, Decl *D);
/// Finish current declare reduction construct initializer.
void ActOnOpenMPDeclareReductionCombinerEnd(Decl *D, Expr *Combiner);
/// Initialize declare reduction construct initializer.
/// \return omp_priv variable.
VarDecl *ActOnOpenMPDeclareReductionInitializerStart(Scope *S, Decl *D);
/// Finish current declare reduction construct initializer.
void ActOnOpenMPDeclareReductionInitializerEnd(Decl *D, Expr *Initializer,
VarDecl *OmpPrivParm);
/// Called at the end of '#pragma omp declare reduction'.
DeclGroupPtrTy ActOnOpenMPDeclareReductionDirectiveEnd(
Scope *S, DeclGroupPtrTy DeclReductions, bool IsValid);
/// Called on the start of target region i.e. '#pragma omp declare target'.
bool ActOnStartOpenMPDeclareTargetDirective(SourceLocation Loc);
/// Called at the end of target region i.e. '#pragme omp end declare target'.
void ActOnFinishOpenMPDeclareTargetDirective();
/// Called on correct id-expression from the '#pragma omp declare target'.
void ActOnOpenMPDeclareTargetName(Scope *CurScope, CXXScopeSpec &ScopeSpec,
const DeclarationNameInfo &Id,
OMPDeclareTargetDeclAttr::MapTypeTy MT,
NamedDeclSetType &SameDirectiveDecls);
/// Check declaration inside target region.
void
checkDeclIsAllowedInOpenMPTarget(Expr *E, Decl *D,
SourceLocation IdLoc = SourceLocation());
/// Return true inside OpenMP declare target region.
bool isInOpenMPDeclareTargetContext() const {
return DeclareTargetNestingLevel > 0;
}
/// Return true inside OpenMP target region.
bool isInOpenMPTargetExecutionDirective() const;
/// Return true if (un)supported features for the current target should be
/// diagnosed if OpenMP (offloading) is enabled.
bool shouldDiagnoseTargetSupportFromOpenMP() const {
return !getLangOpts().OpenMPIsDevice || isInOpenMPDeclareTargetContext() ||
isInOpenMPTargetExecutionDirective();
}
/// Return the number of captured regions created for an OpenMP directive.
static int getOpenMPCaptureLevels(OpenMPDirectiveKind Kind);
/// Initialization of captured region for OpenMP region.
void ActOnOpenMPRegionStart(OpenMPDirectiveKind DKind, Scope *CurScope);
/// End of OpenMP region.
///
/// \param S Statement associated with the current OpenMP region.
/// \param Clauses List of clauses for the current OpenMP region.
///
/// \returns Statement for finished OpenMP region.
StmtResult ActOnOpenMPRegionEnd(StmtResult S, ArrayRef<OMPClause *> Clauses);
StmtResult ActOnOpenMPExecutableDirective(
OpenMPDirectiveKind Kind, const DeclarationNameInfo &DirName,
OpenMPDirectiveKind CancelRegion, ArrayRef<OMPClause *> Clauses,
Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc);
/// Called on well-formed '\#pragma omp parallel' after parsing
/// of the associated statement.
StmtResult ActOnOpenMPParallelDirective(ArrayRef<OMPClause *> Clauses,
Stmt *AStmt,
SourceLocation StartLoc,
SourceLocation EndLoc);
using VarsWithInheritedDSAType =
llvm::SmallDenseMap<const ValueDecl *, const Expr *, 4>;
/// Called on well-formed '\#pragma omp simd' after parsing
/// of the associated statement.
StmtResult
ActOnOpenMPSimdDirective(ArrayRef<OMPClause *> Clauses, Stmt *AStmt,
SourceLocation StartLoc, SourceLocation EndLoc,
VarsWithInheritedDSAType &VarsWithImplicitDSA);
/// Called on well-formed '\#pragma omp for' after parsing
/// of the associated statement.
StmtResult
ActOnOpenMPForDirective(ArrayRef<OMPClause *> Clauses, Stmt *AStmt,
SourceLocation StartLoc, SourceLocation EndLoc,
VarsWithInheritedDSAType &VarsWithImplicitDSA);
/// Called on well-formed '\#pragma omp for simd' after parsing
/// of the associated statement.
StmtResult
ActOnOpenMPForSimdDirective(ArrayRef<OMPClause *> Clauses, Stmt *AStmt,
SourceLocation StartLoc, SourceLocation EndLoc,
VarsWithInheritedDSAType &VarsWithImplicitDSA);
/// Called on well-formed '\#pragma omp sections' after parsing
/// of the associated statement.
StmtResult ActOnOpenMPSectionsDirective(ArrayRef<OMPClause *> Clauses,
Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed '\#pragma omp section' after parsing of the
/// associated statement.
StmtResult ActOnOpenMPSectionDirective(Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed '\#pragma omp single' after parsing of the
/// associated statement.
StmtResult ActOnOpenMPSingleDirective(ArrayRef<OMPClause *> Clauses,
Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed '\#pragma omp master' after parsing of the
/// associated statement.
StmtResult ActOnOpenMPMasterDirective(Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed '\#pragma omp critical' after parsing of the
/// associated statement.
StmtResult ActOnOpenMPCriticalDirective(const DeclarationNameInfo &DirName,
ArrayRef<OMPClause *> Clauses,
Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed '\#pragma omp parallel for' after parsing
/// of the associated statement.
StmtResult ActOnOpenMPParallelForDirective(
ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA);
/// Called on well-formed '\#pragma omp parallel for simd' after
/// parsing of the associated statement.
StmtResult ActOnOpenMPParallelForSimdDirective(
ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA);
/// Called on well-formed '\#pragma omp parallel sections' after
/// parsing of the associated statement.
StmtResult ActOnOpenMPParallelSectionsDirective(ArrayRef<OMPClause *> Clauses,
Stmt *AStmt,
SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed '\#pragma omp task' after parsing of the
/// associated statement.
StmtResult ActOnOpenMPTaskDirective(ArrayRef<OMPClause *> Clauses,
Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed '\#pragma omp taskyield'.
StmtResult ActOnOpenMPTaskyieldDirective(SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed '\#pragma omp barrier'.
StmtResult ActOnOpenMPBarrierDirective(SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed '\#pragma omp taskwait'.
StmtResult ActOnOpenMPTaskwaitDirective(SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed '\#pragma omp taskgroup'.
StmtResult ActOnOpenMPTaskgroupDirective(ArrayRef<OMPClause *> Clauses,
Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed '\#pragma omp flush'.
StmtResult ActOnOpenMPFlushDirective(ArrayRef<OMPClause *> Clauses,
SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed '\#pragma omp ordered' after parsing of the
/// associated statement.
StmtResult ActOnOpenMPOrderedDirective(ArrayRef<OMPClause *> Clauses,
Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed '\#pragma omp atomic' after parsing of the
/// associated statement.
StmtResult ActOnOpenMPAtomicDirective(ArrayRef<OMPClause *> Clauses,
Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed '\#pragma omp target' after parsing of the
/// associated statement.
StmtResult ActOnOpenMPTargetDirective(ArrayRef<OMPClause *> Clauses,
Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed '\#pragma omp target data' after parsing of
/// the associated statement.
StmtResult ActOnOpenMPTargetDataDirective(ArrayRef<OMPClause *> Clauses,
Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed '\#pragma omp target enter data' after
/// parsing of the associated statement.
StmtResult ActOnOpenMPTargetEnterDataDirective(ArrayRef<OMPClause *> Clauses,
SourceLocation StartLoc,
SourceLocation EndLoc,
Stmt *AStmt);
/// Called on well-formed '\#pragma omp target exit data' after
/// parsing of the associated statement.
StmtResult ActOnOpenMPTargetExitDataDirective(ArrayRef<OMPClause *> Clauses,
SourceLocation StartLoc,
SourceLocation EndLoc,
Stmt *AStmt);
/// Called on well-formed '\#pragma omp target parallel' after
/// parsing of the associated statement.
StmtResult ActOnOpenMPTargetParallelDirective(ArrayRef<OMPClause *> Clauses,
Stmt *AStmt,
SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed '\#pragma omp target parallel for' after
/// parsing of the associated statement.
StmtResult ActOnOpenMPTargetParallelForDirective(
ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA);
/// Called on well-formed '\#pragma omp teams' after parsing of the
/// associated statement.
StmtResult ActOnOpenMPTeamsDirective(ArrayRef<OMPClause *> Clauses,
Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed '\#pragma omp cancellation point'.
StmtResult
ActOnOpenMPCancellationPointDirective(SourceLocation StartLoc,
SourceLocation EndLoc,
OpenMPDirectiveKind CancelRegion);
/// Called on well-formed '\#pragma omp cancel'.
StmtResult ActOnOpenMPCancelDirective(ArrayRef<OMPClause *> Clauses,
SourceLocation StartLoc,
SourceLocation EndLoc,
OpenMPDirectiveKind CancelRegion);
/// Called on well-formed '\#pragma omp taskloop' after parsing of the
/// associated statement.
StmtResult
ActOnOpenMPTaskLoopDirective(ArrayRef<OMPClause *> Clauses, Stmt *AStmt,
SourceLocation StartLoc, SourceLocation EndLoc,
VarsWithInheritedDSAType &VarsWithImplicitDSA);
/// Called on well-formed '\#pragma omp taskloop simd' after parsing of
/// the associated statement.
StmtResult ActOnOpenMPTaskLoopSimdDirective(
ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA);
/// Called on well-formed '\#pragma omp distribute' after parsing
/// of the associated statement.
StmtResult
ActOnOpenMPDistributeDirective(ArrayRef<OMPClause *> Clauses, Stmt *AStmt,
SourceLocation StartLoc, SourceLocation EndLoc,
VarsWithInheritedDSAType &VarsWithImplicitDSA);
/// Called on well-formed '\#pragma omp target update'.
StmtResult ActOnOpenMPTargetUpdateDirective(ArrayRef<OMPClause *> Clauses,
SourceLocation StartLoc,
SourceLocation EndLoc,
Stmt *AStmt);
/// Called on well-formed '\#pragma omp distribute parallel for' after
/// parsing of the associated statement.
StmtResult ActOnOpenMPDistributeParallelForDirective(
ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA);
/// Called on well-formed '\#pragma omp distribute parallel for simd'
/// after parsing of the associated statement.
StmtResult ActOnOpenMPDistributeParallelForSimdDirective(
ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA);
/// Called on well-formed '\#pragma omp distribute simd' after
/// parsing of the associated statement.
StmtResult ActOnOpenMPDistributeSimdDirective(
ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA);
/// Called on well-formed '\#pragma omp target parallel for simd' after
/// parsing of the associated statement.
StmtResult ActOnOpenMPTargetParallelForSimdDirective(
ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA);
/// Called on well-formed '\#pragma omp target simd' after parsing of
/// the associated statement.
StmtResult
ActOnOpenMPTargetSimdDirective(ArrayRef<OMPClause *> Clauses, Stmt *AStmt,
SourceLocation StartLoc, SourceLocation EndLoc,
VarsWithInheritedDSAType &VarsWithImplicitDSA);
/// Called on well-formed '\#pragma omp teams distribute' after parsing of
/// the associated statement.
StmtResult ActOnOpenMPTeamsDistributeDirective(
ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA);
/// Called on well-formed '\#pragma omp teams distribute simd' after parsing
/// of the associated statement.
StmtResult ActOnOpenMPTeamsDistributeSimdDirective(
ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA);
/// Called on well-formed '\#pragma omp teams distribute parallel for simd'
/// after parsing of the associated statement.
StmtResult ActOnOpenMPTeamsDistributeParallelForSimdDirective(
ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA);
/// Called on well-formed '\#pragma omp teams distribute parallel for'
/// after parsing of the associated statement.
StmtResult ActOnOpenMPTeamsDistributeParallelForDirective(
ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA);
/// Called on well-formed '\#pragma omp target teams' after parsing of the
/// associated statement.
StmtResult ActOnOpenMPTargetTeamsDirective(ArrayRef<OMPClause *> Clauses,
Stmt *AStmt,
SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed '\#pragma omp target teams distribute' after parsing
/// of the associated statement.
StmtResult ActOnOpenMPTargetTeamsDistributeDirective(
ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA);
/// Called on well-formed '\#pragma omp target teams distribute parallel for'
/// after parsing of the associated statement.
StmtResult ActOnOpenMPTargetTeamsDistributeParallelForDirective(
ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA);
/// Called on well-formed '\#pragma omp target teams distribute parallel for
/// simd' after parsing of the associated statement.
StmtResult ActOnOpenMPTargetTeamsDistributeParallelForSimdDirective(
ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA);
/// Called on well-formed '\#pragma omp target teams distribute simd' after
/// parsing of the associated statement.
StmtResult ActOnOpenMPTargetTeamsDistributeSimdDirective(
ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA);
/// Checks correctness of linear modifiers.
bool CheckOpenMPLinearModifier(OpenMPLinearClauseKind LinKind,
SourceLocation LinLoc);
/// Checks that the specified declaration matches requirements for the linear
/// decls.
bool CheckOpenMPLinearDecl(const ValueDecl *D, SourceLocation ELoc,
OpenMPLinearClauseKind LinKind, QualType Type);
/// Called on well-formed '\#pragma omp declare simd' after parsing of
/// the associated method/function.
DeclGroupPtrTy ActOnOpenMPDeclareSimdDirective(
DeclGroupPtrTy DG, OMPDeclareSimdDeclAttr::BranchStateTy BS,
Expr *Simdlen, ArrayRef<Expr *> Uniforms, ArrayRef<Expr *> Aligneds,
ArrayRef<Expr *> Alignments, ArrayRef<Expr *> Linears,
ArrayRef<unsigned> LinModifiers, ArrayRef<Expr *> Steps, SourceRange SR);
OMPClause *ActOnOpenMPSingleExprClause(OpenMPClauseKind Kind,
Expr *Expr,
SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
/// Called on well-formed 'if' clause.
OMPClause *ActOnOpenMPIfClause(OpenMPDirectiveKind NameModifier,
Expr *Condition, SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation NameModifierLoc,
SourceLocation ColonLoc,
SourceLocation EndLoc);
/// Called on well-formed 'final' clause.
OMPClause *ActOnOpenMPFinalClause(Expr *Condition, SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
/// Called on well-formed 'num_threads' clause.
OMPClause *ActOnOpenMPNumThreadsClause(Expr *NumThreads,
SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
/// Called on well-formed 'safelen' clause.
OMPClause *ActOnOpenMPSafelenClause(Expr *Length,
SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
/// Called on well-formed 'simdlen' clause.
OMPClause *ActOnOpenMPSimdlenClause(Expr *Length, SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
/// Called on well-formed 'collapse' clause.
OMPClause *ActOnOpenMPCollapseClause(Expr *NumForLoops,
SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
/// Called on well-formed 'ordered' clause.
OMPClause *
ActOnOpenMPOrderedClause(SourceLocation StartLoc, SourceLocation EndLoc,
SourceLocation LParenLoc = SourceLocation(),
Expr *NumForLoops = nullptr);
/// Called on well-formed 'grainsize' clause.
OMPClause *ActOnOpenMPGrainsizeClause(Expr *Size, SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
/// Called on well-formed 'num_tasks' clause.
OMPClause *ActOnOpenMPNumTasksClause(Expr *NumTasks, SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
/// Called on well-formed 'hint' clause.
OMPClause *ActOnOpenMPHintClause(Expr *Hint, SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
OMPClause *ActOnOpenMPSimpleClause(OpenMPClauseKind Kind,
unsigned Argument,
SourceLocation ArgumentLoc,
SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
/// Called on well-formed 'default' clause.
OMPClause *ActOnOpenMPDefaultClause(OpenMPDefaultClauseKind Kind,
SourceLocation KindLoc,
SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
/// Called on well-formed 'proc_bind' clause.
OMPClause *ActOnOpenMPProcBindClause(OpenMPProcBindClauseKind Kind,
SourceLocation KindLoc,
SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
OMPClause *ActOnOpenMPSingleExprWithArgClause(
OpenMPClauseKind Kind, ArrayRef<unsigned> Arguments, Expr *Expr,
SourceLocation StartLoc, SourceLocation LParenLoc,
ArrayRef<SourceLocation> ArgumentsLoc, SourceLocation DelimLoc,
SourceLocation EndLoc);
/// Called on well-formed 'schedule' clause.
OMPClause *ActOnOpenMPScheduleClause(
OpenMPScheduleClauseModifier M1, OpenMPScheduleClauseModifier M2,
OpenMPScheduleClauseKind Kind, Expr *ChunkSize, SourceLocation StartLoc,
SourceLocation LParenLoc, SourceLocation M1Loc, SourceLocation M2Loc,
SourceLocation KindLoc, SourceLocation CommaLoc, SourceLocation EndLoc);
OMPClause *ActOnOpenMPClause(OpenMPClauseKind Kind, SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed 'nowait' clause.
OMPClause *ActOnOpenMPNowaitClause(SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed 'untied' clause.
OMPClause *ActOnOpenMPUntiedClause(SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed 'mergeable' clause.
OMPClause *ActOnOpenMPMergeableClause(SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed 'read' clause.
OMPClause *ActOnOpenMPReadClause(SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed 'write' clause.
OMPClause *ActOnOpenMPWriteClause(SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed 'update' clause.
OMPClause *ActOnOpenMPUpdateClause(SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed 'capture' clause.
OMPClause *ActOnOpenMPCaptureClause(SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed 'seq_cst' clause.
OMPClause *ActOnOpenMPSeqCstClause(SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed 'threads' clause.
OMPClause *ActOnOpenMPThreadsClause(SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed 'simd' clause.
OMPClause *ActOnOpenMPSIMDClause(SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed 'nogroup' clause.
OMPClause *ActOnOpenMPNogroupClause(SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed 'unified_address' clause.
OMPClause *ActOnOpenMPUnifiedAddressClause(SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed 'unified_address' clause.
OMPClause *ActOnOpenMPUnifiedSharedMemoryClause(SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed 'reverse_offload' clause.
OMPClause *ActOnOpenMPReverseOffloadClause(SourceLocation StartLoc,
SourceLocation EndLoc);
OMPClause *ActOnOpenMPVarListClause(
OpenMPClauseKind Kind, ArrayRef<Expr *> Vars, Expr *TailExpr,
SourceLocation StartLoc, SourceLocation LParenLoc,
SourceLocation ColonLoc, SourceLocation EndLoc,
CXXScopeSpec &ReductionIdScopeSpec,
const DeclarationNameInfo &ReductionId, OpenMPDependClauseKind DepKind,
OpenMPLinearClauseKind LinKind, OpenMPMapClauseKind MapTypeModifier,
OpenMPMapClauseKind MapType, bool IsMapTypeImplicit,
SourceLocation DepLinMapLoc);
/// Called on well-formed 'private' clause.
OMPClause *ActOnOpenMPPrivateClause(ArrayRef<Expr *> VarList,
SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
/// Called on well-formed 'firstprivate' clause.
OMPClause *ActOnOpenMPFirstprivateClause(ArrayRef<Expr *> VarList,
SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
/// Called on well-formed 'lastprivate' clause.
OMPClause *ActOnOpenMPLastprivateClause(ArrayRef<Expr *> VarList,
SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
/// Called on well-formed 'shared' clause.
OMPClause *ActOnOpenMPSharedClause(ArrayRef<Expr *> VarList,
SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
/// Called on well-formed 'reduction' clause.
OMPClause *ActOnOpenMPReductionClause(
ArrayRef<Expr *> VarList, SourceLocation StartLoc,
SourceLocation LParenLoc, SourceLocation ColonLoc, SourceLocation EndLoc,
CXXScopeSpec &ReductionIdScopeSpec,
const DeclarationNameInfo &ReductionId,
ArrayRef<Expr *> UnresolvedReductions = llvm::None);
/// Called on well-formed 'task_reduction' clause.
OMPClause *ActOnOpenMPTaskReductionClause(
ArrayRef<Expr *> VarList, SourceLocation StartLoc,
SourceLocation LParenLoc, SourceLocation ColonLoc, SourceLocation EndLoc,
CXXScopeSpec &ReductionIdScopeSpec,
const DeclarationNameInfo &ReductionId,
ArrayRef<Expr *> UnresolvedReductions = llvm::None);
/// Called on well-formed 'in_reduction' clause.
OMPClause *ActOnOpenMPInReductionClause(
ArrayRef<Expr *> VarList, SourceLocation StartLoc,
SourceLocation LParenLoc, SourceLocation ColonLoc, SourceLocation EndLoc,
CXXScopeSpec &ReductionIdScopeSpec,
const DeclarationNameInfo &ReductionId,
ArrayRef<Expr *> UnresolvedReductions = llvm::None);
/// Called on well-formed 'linear' clause.
OMPClause *
ActOnOpenMPLinearClause(ArrayRef<Expr *> VarList, Expr *Step,
SourceLocation StartLoc, SourceLocation LParenLoc,
OpenMPLinearClauseKind LinKind, SourceLocation LinLoc,
SourceLocation ColonLoc, SourceLocation EndLoc);
/// Called on well-formed 'aligned' clause.
OMPClause *ActOnOpenMPAlignedClause(ArrayRef<Expr *> VarList,
Expr *Alignment,
SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation ColonLoc,
SourceLocation EndLoc);
/// Called on well-formed 'copyin' clause.
OMPClause *ActOnOpenMPCopyinClause(ArrayRef<Expr *> VarList,
SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
/// Called on well-formed 'copyprivate' clause.
OMPClause *ActOnOpenMPCopyprivateClause(ArrayRef<Expr *> VarList,
SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
/// Called on well-formed 'flush' pseudo clause.
OMPClause *ActOnOpenMPFlushClause(ArrayRef<Expr *> VarList,
SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
/// Called on well-formed 'depend' clause.
OMPClause *
ActOnOpenMPDependClause(OpenMPDependClauseKind DepKind, SourceLocation DepLoc,
SourceLocation ColonLoc, ArrayRef<Expr *> VarList,
SourceLocation StartLoc, SourceLocation LParenLoc,
SourceLocation EndLoc);
/// Called on well-formed 'device' clause.
OMPClause *ActOnOpenMPDeviceClause(Expr *Device, SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
/// Called on well-formed 'map' clause.
OMPClause *
ActOnOpenMPMapClause(OpenMPMapClauseKind MapTypeModifier,
OpenMPMapClauseKind MapType, bool IsMapTypeImplicit,
SourceLocation MapLoc, SourceLocation ColonLoc,
ArrayRef<Expr *> VarList, SourceLocation StartLoc,
SourceLocation LParenLoc, SourceLocation EndLoc);
/// Called on well-formed 'num_teams' clause.
OMPClause *ActOnOpenMPNumTeamsClause(Expr *NumTeams, SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
/// Called on well-formed 'thread_limit' clause.
OMPClause *ActOnOpenMPThreadLimitClause(Expr *ThreadLimit,
SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
/// Called on well-formed 'priority' clause.
OMPClause *ActOnOpenMPPriorityClause(Expr *Priority, SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
/// Called on well-formed 'dist_schedule' clause.
OMPClause *ActOnOpenMPDistScheduleClause(
OpenMPDistScheduleClauseKind Kind, Expr *ChunkSize,
SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation KindLoc,
SourceLocation CommaLoc, SourceLocation EndLoc);
/// Called on well-formed 'defaultmap' clause.
OMPClause *ActOnOpenMPDefaultmapClause(
OpenMPDefaultmapClauseModifier M, OpenMPDefaultmapClauseKind Kind,
SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation MLoc,
SourceLocation KindLoc, SourceLocation EndLoc);
/// Called on well-formed 'to' clause.
OMPClause *ActOnOpenMPToClause(ArrayRef<Expr *> VarList,
SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
/// Called on well-formed 'from' clause.
OMPClause *ActOnOpenMPFromClause(ArrayRef<Expr *> VarList,
SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
/// Called on well-formed 'use_device_ptr' clause.
OMPClause *ActOnOpenMPUseDevicePtrClause(ArrayRef<Expr *> VarList,
SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
/// Called on well-formed 'is_device_ptr' clause.
OMPClause *ActOnOpenMPIsDevicePtrClause(ArrayRef<Expr *> VarList,
SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
/// The kind of conversion being performed.
enum CheckedConversionKind {
/// An implicit conversion.
CCK_ImplicitConversion,
/// A C-style cast.
CCK_CStyleCast,
/// A functional-style cast.
CCK_FunctionalCast,
/// A cast other than a C-style cast.
CCK_OtherCast,
/// A conversion for an operand of a builtin overloaded operator.
CCK_ForBuiltinOverloadedOp
};
static bool isCast(CheckedConversionKind CCK) {
return CCK == CCK_CStyleCast || CCK == CCK_FunctionalCast ||
CCK == CCK_OtherCast;
}
/// ImpCastExprToType - If Expr is not of type 'Type', insert an implicit
/// cast. If there is already an implicit cast, merge into the existing one.
/// If isLvalue, the result of the cast is an lvalue.
ExprResult ImpCastExprToType(Expr *E, QualType Type, CastKind CK,
ExprValueKind VK = VK_RValue,
const CXXCastPath *BasePath = nullptr,
CheckedConversionKind CCK
= CCK_ImplicitConversion);
/// ScalarTypeToBooleanCastKind - Returns the cast kind corresponding
/// to the conversion from scalar type ScalarTy to the Boolean type.
static CastKind ScalarTypeToBooleanCastKind(QualType ScalarTy);
/// IgnoredValueConversions - Given that an expression's result is
/// syntactically ignored, perform any conversions that are
/// required.
ExprResult IgnoredValueConversions(Expr *E);
// UsualUnaryConversions - promotes integers (C99 6.3.1.1p2) and converts
// functions and arrays to their respective pointers (C99 6.3.2.1).
ExprResult UsualUnaryConversions(Expr *E);
/// CallExprUnaryConversions - a special case of an unary conversion
/// performed on a function designator of a call expression.
ExprResult CallExprUnaryConversions(Expr *E);
// DefaultFunctionArrayConversion - converts functions and arrays
// to their respective pointers (C99 6.3.2.1).
ExprResult DefaultFunctionArrayConversion(Expr *E, bool Diagnose = true);
// DefaultFunctionArrayLvalueConversion - converts functions and
// arrays to their respective pointers and performs the
// lvalue-to-rvalue conversion.
ExprResult DefaultFunctionArrayLvalueConversion(Expr *E,
bool Diagnose = true);
// DefaultLvalueConversion - performs lvalue-to-rvalue conversion on
// the operand. This is DefaultFunctionArrayLvalueConversion,
// except that it assumes the operand isn't of function or array
// type.
ExprResult DefaultLvalueConversion(Expr *E);
// DefaultArgumentPromotion (C99 6.5.2.2p6). Used for function calls that
// do not have a prototype. Integer promotions are performed on each
// argument, and arguments that have type float are promoted to double.
ExprResult DefaultArgumentPromotion(Expr *E);
/// If \p E is a prvalue denoting an unmaterialized temporary, materialize
/// it as an xvalue. In C++98, the result will still be a prvalue, because
/// we don't have xvalues there.
ExprResult TemporaryMaterializationConversion(Expr *E);
// Used for emitting the right warning by DefaultVariadicArgumentPromotion
enum VariadicCallType {
VariadicFunction,
VariadicBlock,
VariadicMethod,
VariadicConstructor,
VariadicDoesNotApply
};
VariadicCallType getVariadicCallType(FunctionDecl *FDecl,
const FunctionProtoType *Proto,
Expr *Fn);
// Used for determining in which context a type is allowed to be passed to a
// vararg function.
enum VarArgKind {
VAK_Valid,
VAK_ValidInCXX11,
VAK_Undefined,
VAK_MSVCUndefined,
VAK_Invalid
};
// Determines which VarArgKind fits an expression.
VarArgKind isValidVarArgType(const QualType &Ty);
/// Check to see if the given expression is a valid argument to a variadic
/// function, issuing a diagnostic if not.
void checkVariadicArgument(const Expr *E, VariadicCallType CT);
/// Check to see if a given expression could have '.c_str()' called on it.
bool hasCStrMethod(const Expr *E);
/// GatherArgumentsForCall - Collector argument expressions for various
/// form of call prototypes.
bool GatherArgumentsForCall(SourceLocation CallLoc, FunctionDecl *FDecl,
const FunctionProtoType *Proto,
unsigned FirstParam, ArrayRef<Expr *> Args,
SmallVectorImpl<Expr *> &AllArgs,
VariadicCallType CallType = VariadicDoesNotApply,
bool AllowExplicit = false,
bool IsListInitialization = false);
// DefaultVariadicArgumentPromotion - Like DefaultArgumentPromotion, but
// will create a runtime trap if the resulting type is not a POD type.
ExprResult DefaultVariadicArgumentPromotion(Expr *E, VariadicCallType CT,
FunctionDecl *FDecl,
LangAS AS = static_cast<LangAS>(-1));
// UsualArithmeticConversions - performs the UsualUnaryConversions on it's
// operands and then handles various conversions that are common to binary
// operators (C99 6.3.1.8). If both operands aren't arithmetic, this
// routine returns the first non-arithmetic type found. The client is
// responsible for emitting appropriate error diagnostics.
QualType UsualArithmeticConversions(ExprResult &LHS, ExprResult &RHS,
bool IsCompAssign = false);
/// AssignConvertType - All of the 'assignment' semantic checks return this
/// enum to indicate whether the assignment was allowed. These checks are
/// done for simple assignments, as well as initialization, return from
/// function, argument passing, etc. The query is phrased in terms of a
/// source and destination type.
enum AssignConvertType {
/// Compatible - the types are compatible according to the standard.
Compatible,
/// PointerToInt - The assignment converts a pointer to an int, which we
/// accept as an extension.
PointerToInt,
/// IntToPointer - The assignment converts an int to a pointer, which we
/// accept as an extension.
IntToPointer,
/// FunctionVoidPointer - The assignment is between a function pointer and
/// void*, which the standard doesn't allow, but we accept as an extension.
FunctionVoidPointer,
/// IncompatiblePointer - The assignment is between two pointers types that
/// are not compatible, but we accept them as an extension.
IncompatiblePointer,
/// IncompatiblePointerSign - The assignment is between two pointers types
/// which point to integers which have a different sign, but are otherwise
/// identical. This is a subset of the above, but broken out because it's by
/// far the most common case of incompatible pointers.
IncompatiblePointerSign,
/// CompatiblePointerDiscardsQualifiers - The assignment discards
/// c/v/r qualifiers, which we accept as an extension.
CompatiblePointerDiscardsQualifiers,
/// IncompatiblePointerDiscardsQualifiers - The assignment
/// discards qualifiers that we don't permit to be discarded,
/// like address spaces.
IncompatiblePointerDiscardsQualifiers,
/// IncompatibleNestedPointerQualifiers - The assignment is between two
/// nested pointer types, and the qualifiers other than the first two
/// levels differ e.g. char ** -> const char **, but we accept them as an
/// extension.
IncompatibleNestedPointerQualifiers,
/// IncompatibleVectors - The assignment is between two vector types that
/// have the same size, which we accept as an extension.
IncompatibleVectors,
/// IntToBlockPointer - The assignment converts an int to a block
/// pointer. We disallow this.
IntToBlockPointer,
/// IncompatibleBlockPointer - The assignment is between two block
/// pointers types that are not compatible.
IncompatibleBlockPointer,
/// IncompatibleObjCQualifiedId - The assignment is between a qualified
/// id type and something else (that is incompatible with it). For example,
/// "id <XXX>" = "Foo *", where "Foo *" doesn't implement the XXX protocol.
IncompatibleObjCQualifiedId,
/// IncompatibleObjCWeakRef - Assigning a weak-unavailable object to an
/// object with __weak qualifier.
IncompatibleObjCWeakRef,
/// Incompatible - We reject this conversion outright, it is invalid to
/// represent it in the AST.
Incompatible
};
/// DiagnoseAssignmentResult - Emit a diagnostic, if required, for the
/// assignment conversion type specified by ConvTy. This returns true if the
/// conversion was invalid or false if the conversion was accepted.
bool DiagnoseAssignmentResult(AssignConvertType ConvTy,
SourceLocation Loc,
QualType DstType, QualType SrcType,
Expr *SrcExpr, AssignmentAction Action,
bool *Complained = nullptr);
/// IsValueInFlagEnum - Determine if a value is allowed as part of a flag
/// enum. If AllowMask is true, then we also allow the complement of a valid
/// value, to be used as a mask.
bool IsValueInFlagEnum(const EnumDecl *ED, const llvm::APInt &Val,
bool AllowMask) const;
/// DiagnoseAssignmentEnum - Warn if assignment to enum is a constant
/// integer not in the range of enum values.
void DiagnoseAssignmentEnum(QualType DstType, QualType SrcType,
Expr *SrcExpr);
/// CheckAssignmentConstraints - Perform type checking for assignment,
/// argument passing, variable initialization, and function return values.
/// C99 6.5.16.
AssignConvertType CheckAssignmentConstraints(SourceLocation Loc,
QualType LHSType,
QualType RHSType);
/// Check assignment constraints and optionally prepare for a conversion of
/// the RHS to the LHS type. The conversion is prepared for if ConvertRHS
/// is true.
AssignConvertType CheckAssignmentConstraints(QualType LHSType,
ExprResult &RHS,
CastKind &Kind,
bool ConvertRHS = true);
/// Check assignment constraints for an assignment of RHS to LHSType.
///
/// \param LHSType The destination type for the assignment.
/// \param RHS The source expression for the assignment.
/// \param Diagnose If \c true, diagnostics may be produced when checking
/// for assignability. If a diagnostic is produced, \p RHS will be
/// set to ExprError(). Note that this function may still return
/// without producing a diagnostic, even for an invalid assignment.
/// \param DiagnoseCFAudited If \c true, the target is a function parameter
/// in an audited Core Foundation API and does not need to be checked
/// for ARC retain issues.
/// \param ConvertRHS If \c true, \p RHS will be updated to model the
/// conversions necessary to perform the assignment. If \c false,
/// \p Diagnose must also be \c false.
AssignConvertType CheckSingleAssignmentConstraints(
QualType LHSType, ExprResult &RHS, bool Diagnose = true,
bool DiagnoseCFAudited = false, bool ConvertRHS = true);
// If the lhs type is a transparent union, check whether we
// can initialize the transparent union with the given expression.
AssignConvertType CheckTransparentUnionArgumentConstraints(QualType ArgType,
ExprResult &RHS);
bool IsStringLiteralToNonConstPointerConversion(Expr *From, QualType ToType);
bool CheckExceptionSpecCompatibility(Expr *From, QualType ToType);
ExprResult PerformImplicitConversion(Expr *From, QualType ToType,
AssignmentAction Action,
bool AllowExplicit = false);
ExprResult PerformImplicitConversion(Expr *From, QualType ToType,
AssignmentAction Action,
bool AllowExplicit,
ImplicitConversionSequence& ICS);
ExprResult PerformImplicitConversion(Expr *From, QualType ToType,
const ImplicitConversionSequence& ICS,
AssignmentAction Action,
CheckedConversionKind CCK
= CCK_ImplicitConversion);
ExprResult PerformImplicitConversion(Expr *From, QualType ToType,
const StandardConversionSequence& SCS,
AssignmentAction Action,
CheckedConversionKind CCK);
/// the following "Check" methods will return a valid/converted QualType
/// or a null QualType (indicating an error diagnostic was issued).
/// type checking binary operators (subroutines of CreateBuiltinBinOp).
QualType InvalidOperands(SourceLocation Loc, ExprResult &LHS,
ExprResult &RHS);
QualType InvalidLogicalVectorOperands(SourceLocation Loc, ExprResult &LHS,
ExprResult &RHS);
QualType CheckPointerToMemberOperands( // C++ 5.5
ExprResult &LHS, ExprResult &RHS, ExprValueKind &VK,
SourceLocation OpLoc, bool isIndirect);
QualType CheckMultiplyDivideOperands( // C99 6.5.5
ExprResult &LHS, ExprResult &RHS, SourceLocation Loc, bool IsCompAssign,
bool IsDivide);
QualType CheckRemainderOperands( // C99 6.5.5
ExprResult &LHS, ExprResult &RHS, SourceLocation Loc,
bool IsCompAssign = false);
QualType CheckAdditionOperands( // C99 6.5.6
ExprResult &LHS, ExprResult &RHS, SourceLocation Loc,
BinaryOperatorKind Opc, QualType* CompLHSTy = nullptr);
QualType CheckSubtractionOperands( // C99 6.5.6
ExprResult &LHS, ExprResult &RHS, SourceLocation Loc,
QualType* CompLHSTy = nullptr);
QualType CheckShiftOperands( // C99 6.5.7
ExprResult &LHS, ExprResult &RHS, SourceLocation Loc,
BinaryOperatorKind Opc, bool IsCompAssign = false);
QualType CheckCompareOperands( // C99 6.5.8/9
ExprResult &LHS, ExprResult &RHS, SourceLocation Loc,
BinaryOperatorKind Opc);
QualType CheckBitwiseOperands( // C99 6.5.[10...12]
ExprResult &LHS, ExprResult &RHS, SourceLocation Loc,
BinaryOperatorKind Opc);
QualType CheckLogicalOperands( // C99 6.5.[13,14]
ExprResult &LHS, ExprResult &RHS, SourceLocation Loc,
BinaryOperatorKind Opc);
// CheckAssignmentOperands is used for both simple and compound assignment.
// For simple assignment, pass both expressions and a null converted type.
// For compound assignment, pass both expressions and the converted type.
QualType CheckAssignmentOperands( // C99 6.5.16.[1,2]
Expr *LHSExpr, ExprResult &RHS, SourceLocation Loc, QualType CompoundType);
ExprResult checkPseudoObjectIncDec(Scope *S, SourceLocation OpLoc,
UnaryOperatorKind Opcode, Expr *Op);
ExprResult checkPseudoObjectAssignment(Scope *S, SourceLocation OpLoc,
BinaryOperatorKind Opcode,
Expr *LHS, Expr *RHS);
ExprResult checkPseudoObjectRValue(Expr *E);
Expr *recreateSyntacticForm(PseudoObjectExpr *E);
QualType CheckConditionalOperands( // C99 6.5.15
ExprResult &Cond, ExprResult &LHS, ExprResult &RHS,
ExprValueKind &VK, ExprObjectKind &OK, SourceLocation QuestionLoc);
QualType CXXCheckConditionalOperands( // C++ 5.16
ExprResult &cond, ExprResult &lhs, ExprResult &rhs,
ExprValueKind &VK, ExprObjectKind &OK, SourceLocation questionLoc);
QualType FindCompositePointerType(SourceLocation Loc, Expr *&E1, Expr *&E2,
bool ConvertArgs = true);
QualType FindCompositePointerType(SourceLocation Loc,
ExprResult &E1, ExprResult &E2,
bool ConvertArgs = true) {
Expr *E1Tmp = E1.get(), *E2Tmp = E2.get();
QualType Composite =
FindCompositePointerType(Loc, E1Tmp, E2Tmp, ConvertArgs);
E1 = E1Tmp;
E2 = E2Tmp;
return Composite;
}
QualType FindCompositeObjCPointerType(ExprResult &LHS, ExprResult &RHS,
SourceLocation QuestionLoc);
bool DiagnoseConditionalForNull(Expr *LHSExpr, Expr *RHSExpr,
SourceLocation QuestionLoc);
void DiagnoseAlwaysNonNullPointer(Expr *E,
Expr::NullPointerConstantKind NullType,
bool IsEqual, SourceRange Range);
/// type checking for vector binary operators.
QualType CheckVectorOperands(ExprResult &LHS, ExprResult &RHS,
SourceLocation Loc, bool IsCompAssign,
bool AllowBothBool, bool AllowBoolConversion);
QualType GetSignedVectorType(QualType V);
QualType CheckVectorCompareOperands(ExprResult &LHS, ExprResult &RHS,
SourceLocation Loc,
BinaryOperatorKind Opc);
QualType CheckVectorLogicalOperands(ExprResult &LHS, ExprResult &RHS,
SourceLocation Loc);
bool areLaxCompatibleVectorTypes(QualType srcType, QualType destType);
bool isLaxVectorConversion(QualType srcType, QualType destType);
/// type checking declaration initializers (C99 6.7.8)
bool CheckForConstantInitializer(Expr *e, QualType t);
// type checking C++ declaration initializers (C++ [dcl.init]).
/// ReferenceCompareResult - Expresses the result of comparing two
/// types (cv1 T1 and cv2 T2) to determine their compatibility for the
/// purposes of initialization by reference (C++ [dcl.init.ref]p4).
enum ReferenceCompareResult {
/// Ref_Incompatible - The two types are incompatible, so direct
/// reference binding is not possible.
Ref_Incompatible = 0,
/// Ref_Related - The two types are reference-related, which means
/// that their unqualified forms (T1 and T2) are either the same
/// or T1 is a base class of T2.
Ref_Related,
/// Ref_Compatible - The two types are reference-compatible.
Ref_Compatible
};
ReferenceCompareResult CompareReferenceRelationship(SourceLocation Loc,
QualType T1, QualType T2,
bool &DerivedToBase,
bool &ObjCConversion,
bool &ObjCLifetimeConversion);
ExprResult checkUnknownAnyCast(SourceRange TypeRange, QualType CastType,
Expr *CastExpr, CastKind &CastKind,
ExprValueKind &VK, CXXCastPath &Path);
/// Force an expression with unknown-type to an expression of the
/// given type.
ExprResult forceUnknownAnyToType(Expr *E, QualType ToType);
/// Type-check an expression that's being passed to an
/// __unknown_anytype parameter.
ExprResult checkUnknownAnyArg(SourceLocation callLoc,
Expr *result, QualType ¶mType);
// CheckVectorCast - check type constraints for vectors.
// Since vectors are an extension, there are no C standard reference for this.
// We allow casting between vectors and integer datatypes of the same size.
// returns true if the cast is invalid
bool CheckVectorCast(SourceRange R, QualType VectorTy, QualType Ty,
CastKind &Kind);
/// Prepare `SplattedExpr` for a vector splat operation, adding
/// implicit casts if necessary.
ExprResult prepareVectorSplat(QualType VectorTy, Expr *SplattedExpr);
// CheckExtVectorCast - check type constraints for extended vectors.
// Since vectors are an extension, there are no C standard reference for this.
// We allow casting between vectors and integer datatypes of the same size,
// or vectors and the element type of that vector.
// returns the cast expr
ExprResult CheckExtVectorCast(SourceRange R, QualType DestTy, Expr *CastExpr,
CastKind &Kind);
ExprResult BuildCXXFunctionalCastExpr(TypeSourceInfo *TInfo, QualType Type,
SourceLocation LParenLoc,
Expr *CastExpr,
SourceLocation RParenLoc);
enum ARCConversionResult { ACR_okay, ACR_unbridged, ACR_error };
/// Checks for invalid conversions and casts between
/// retainable pointers and other pointer kinds for ARC and Weak.
ARCConversionResult CheckObjCConversion(SourceRange castRange,
QualType castType, Expr *&op,
CheckedConversionKind CCK,
bool Diagnose = true,
bool DiagnoseCFAudited = false,
BinaryOperatorKind Opc = BO_PtrMemD
);
Expr *stripARCUnbridgedCast(Expr *e);
void diagnoseARCUnbridgedCast(Expr *e);
bool CheckObjCARCUnavailableWeakConversion(QualType castType,
QualType ExprType);
/// checkRetainCycles - Check whether an Objective-C message send
/// might create an obvious retain cycle.
void checkRetainCycles(ObjCMessageExpr *msg);
void checkRetainCycles(Expr *receiver, Expr *argument);
void checkRetainCycles(VarDecl *Var, Expr *Init);
/// checkUnsafeAssigns - Check whether +1 expr is being assigned
/// to weak/__unsafe_unretained type.
bool checkUnsafeAssigns(SourceLocation Loc, QualType LHS, Expr *RHS);
/// checkUnsafeExprAssigns - Check whether +1 expr is being assigned
/// to weak/__unsafe_unretained expression.
void checkUnsafeExprAssigns(SourceLocation Loc, Expr *LHS, Expr *RHS);
/// CheckMessageArgumentTypes - Check types in an Obj-C message send.
/// \param Method - May be null.
/// \param [out] ReturnType - The return type of the send.
/// \return true iff there were any incompatible types.
bool CheckMessageArgumentTypes(QualType ReceiverType,
MultiExprArg Args, Selector Sel,
ArrayRef<SourceLocation> SelectorLocs,
ObjCMethodDecl *Method, bool isClassMessage,
bool isSuperMessage,
SourceLocation lbrac, SourceLocation rbrac,
SourceRange RecRange,
QualType &ReturnType, ExprValueKind &VK);
/// Determine the result of a message send expression based on
/// the type of the receiver, the method expected to receive the message,
/// and the form of the message send.
QualType getMessageSendResultType(QualType ReceiverType,
ObjCMethodDecl *Method,
bool isClassMessage, bool isSuperMessage);
/// If the given expression involves a message send to a method
/// with a related result type, emit a note describing what happened.
void EmitRelatedResultTypeNote(const Expr *E);
/// Given that we had incompatible pointer types in a return
/// statement, check whether we're in a method with a related result
/// type, and if so, emit a note describing what happened.
void EmitRelatedResultTypeNoteForReturn(QualType destType);
class ConditionResult {
Decl *ConditionVar;
FullExprArg Condition;
bool Invalid;
bool HasKnownValue;
bool KnownValue;
friend class Sema;
ConditionResult(Sema &S, Decl *ConditionVar, FullExprArg Condition,
bool IsConstexpr)
: ConditionVar(ConditionVar), Condition(Condition), Invalid(false),
HasKnownValue(IsConstexpr && Condition.get() &&
!Condition.get()->isValueDependent()),
KnownValue(HasKnownValue &&
!!Condition.get()->EvaluateKnownConstInt(S.Context)) {}
explicit ConditionResult(bool Invalid)
: ConditionVar(nullptr), Condition(nullptr), Invalid(Invalid),
HasKnownValue(false), KnownValue(false) {}
public:
ConditionResult() : ConditionResult(false) {}
bool isInvalid() const { return Invalid; }
std::pair<VarDecl *, Expr *> get() const {
return std::make_pair(cast_or_null<VarDecl>(ConditionVar),
Condition.get());
}
llvm::Optional<bool> getKnownValue() const {
if (!HasKnownValue)
return None;
return KnownValue;
}
};
static ConditionResult ConditionError() { return ConditionResult(true); }
enum class ConditionKind {
Boolean, ///< A boolean condition, from 'if', 'while', 'for', or 'do'.
ConstexprIf, ///< A constant boolean condition from 'if constexpr'.
Switch ///< An integral condition for a 'switch' statement.
};
ConditionResult ActOnCondition(Scope *S, SourceLocation Loc,
Expr *SubExpr, ConditionKind CK);
ConditionResult ActOnConditionVariable(Decl *ConditionVar,
SourceLocation StmtLoc,
ConditionKind CK);
DeclResult ActOnCXXConditionDeclaration(Scope *S, Declarator &D);
ExprResult CheckConditionVariable(VarDecl *ConditionVar,
SourceLocation StmtLoc,
ConditionKind CK);
ExprResult CheckSwitchCondition(SourceLocation SwitchLoc, Expr *Cond);
/// CheckBooleanCondition - Diagnose problems involving the use of
/// the given expression as a boolean condition (e.g. in an if
/// statement). Also performs the standard function and array
/// decays, possibly changing the input variable.
///
/// \param Loc - A location associated with the condition, e.g. the
/// 'if' keyword.
/// \return true iff there were any errors
ExprResult CheckBooleanCondition(SourceLocation Loc, Expr *E,
bool IsConstexpr = false);
/// DiagnoseAssignmentAsCondition - Given that an expression is
/// being used as a boolean condition, warn if it's an assignment.
void DiagnoseAssignmentAsCondition(Expr *E);
/// Redundant parentheses over an equality comparison can indicate
/// that the user intended an assignment used as condition.
void DiagnoseEqualityWithExtraParens(ParenExpr *ParenE);
/// CheckCXXBooleanCondition - Returns true if conversion to bool is invalid.
ExprResult CheckCXXBooleanCondition(Expr *CondExpr, bool IsConstexpr = false);
/// ConvertIntegerToTypeWarnOnOverflow - Convert the specified APInt to have
/// the specified width and sign. If an overflow occurs, detect it and emit
/// the specified diagnostic.
void ConvertIntegerToTypeWarnOnOverflow(llvm::APSInt &OldVal,
unsigned NewWidth, bool NewSign,
SourceLocation Loc, unsigned DiagID);
/// Checks that the Objective-C declaration is declared in the global scope.
/// Emits an error and marks the declaration as invalid if it's not declared
/// in the global scope.
bool CheckObjCDeclScope(Decl *D);
/// Abstract base class used for diagnosing integer constant
/// expression violations.
class VerifyICEDiagnoser {
public:
bool Suppress;
VerifyICEDiagnoser(bool Suppress = false) : Suppress(Suppress) { }
virtual void diagnoseNotICE(Sema &S, SourceLocation Loc, SourceRange SR) =0;
virtual void diagnoseFold(Sema &S, SourceLocation Loc, SourceRange SR);
virtual ~VerifyICEDiagnoser() { }
};
/// VerifyIntegerConstantExpression - Verifies that an expression is an ICE,
/// and reports the appropriate diagnostics. Returns false on success.
/// Can optionally return the value of the expression.
ExprResult VerifyIntegerConstantExpression(Expr *E, llvm::APSInt *Result,
VerifyICEDiagnoser &Diagnoser,
bool AllowFold = true);
ExprResult VerifyIntegerConstantExpression(Expr *E, llvm::APSInt *Result,
unsigned DiagID,
bool AllowFold = true);
ExprResult VerifyIntegerConstantExpression(Expr *E,
llvm::APSInt *Result = nullptr);
/// VerifyBitField - verifies that a bit field expression is an ICE and has
/// the correct width, and that the field type is valid.
/// Returns false on success.
/// Can optionally return whether the bit-field is of width 0
ExprResult VerifyBitField(SourceLocation FieldLoc, IdentifierInfo *FieldName,
QualType FieldTy, bool IsMsStruct,
Expr *BitWidth, bool *ZeroWidth = nullptr);
private:
unsigned ForceCUDAHostDeviceDepth = 0;
public:
/// Increments our count of the number of times we've seen a pragma forcing
/// functions to be __host__ __device__. So long as this count is greater
/// than zero, all functions encountered will be __host__ __device__.
void PushForceCUDAHostDevice();
/// Decrements our count of the number of times we've seen a pragma forcing
/// functions to be __host__ __device__. Returns false if the count is 0
/// before incrementing, so you can emit an error.
bool PopForceCUDAHostDevice();
/// Diagnostics that are emitted only if we discover that the given function
/// must be codegen'ed. Because handling these correctly adds overhead to
/// compilation, this is currently only enabled for CUDA compilations.
llvm::DenseMap<CanonicalDeclPtr<FunctionDecl>,
std::vector<PartialDiagnosticAt>>
CUDADeferredDiags;
/// A pair of a canonical FunctionDecl and a SourceLocation. When used as the
/// key in a hashtable, both the FD and location are hashed.
struct FunctionDeclAndLoc {
CanonicalDeclPtr<FunctionDecl> FD;
SourceLocation Loc;
};
/// FunctionDecls and SourceLocations for which CheckCUDACall has emitted a
/// (maybe deferred) "bad call" diagnostic. We use this to avoid emitting the
/// same deferred diag twice.
llvm::DenseSet<FunctionDeclAndLoc> LocsWithCUDACallDiags;
/// An inverse call graph, mapping known-emitted functions to one of their
/// known-emitted callers (plus the location of the call).
///
/// Functions that we can tell a priori must be emitted aren't added to this
/// map.
llvm::DenseMap</* Callee = */ CanonicalDeclPtr<FunctionDecl>,
/* Caller = */ FunctionDeclAndLoc>
CUDAKnownEmittedFns;
/// A partial call graph maintained during CUDA compilation to support
/// deferred diagnostics.
///
/// Functions are only added here if, at the time they're considered, they are
/// not known-emitted. As soon as we discover that a function is
/// known-emitted, we remove it and everything it transitively calls from this
/// set and add those functions to CUDAKnownEmittedFns.
llvm::DenseMap</* Caller = */ CanonicalDeclPtr<FunctionDecl>,
/* Callees = */ llvm::MapVector<CanonicalDeclPtr<FunctionDecl>,
SourceLocation>>
CUDACallGraph;
/// Diagnostic builder for CUDA errors which may or may not be deferred.
///
/// In CUDA, there exist constructs (e.g. variable-length arrays, try/catch)
/// which are not allowed to appear inside __device__ functions and are
/// allowed to appear in __host__ __device__ functions only if the host+device
/// function is never codegen'ed.
///
/// To handle this, we use the notion of "deferred diagnostics", where we
/// attach a diagnostic to a FunctionDecl that's emitted iff it's codegen'ed.
///
/// This class lets you emit either a regular diagnostic, a deferred
/// diagnostic, or no diagnostic at all, according to an argument you pass to
/// its constructor, thus simplifying the process of creating these "maybe
/// deferred" diagnostics.
class CUDADiagBuilder {
public:
enum Kind {
/// Emit no diagnostics.
K_Nop,
/// Emit the diagnostic immediately (i.e., behave like Sema::Diag()).
K_Immediate,
/// Emit the diagnostic immediately, and, if it's a warning or error, also
/// emit a call stack showing how this function can be reached by an a
/// priori known-emitted function.
K_ImmediateWithCallStack,
/// Create a deferred diagnostic, which is emitted only if the function
/// it's attached to is codegen'ed. Also emit a call stack as with
/// K_ImmediateWithCallStack.
K_Deferred
};
CUDADiagBuilder(Kind K, SourceLocation Loc, unsigned DiagID,
FunctionDecl *Fn, Sema &S);
~CUDADiagBuilder();
/// Convertible to bool: True if we immediately emitted an error, false if
/// we didn't emit an error or we created a deferred error.
///
/// Example usage:
///
/// if (CUDADiagBuilder(...) << foo << bar)
/// return ExprError();
///
/// But see CUDADiagIfDeviceCode() and CUDADiagIfHostCode() -- you probably
/// want to use these instead of creating a CUDADiagBuilder yourself.
operator bool() const { return ImmediateDiag.hasValue(); }
template <typename T>
friend const CUDADiagBuilder &operator<<(const CUDADiagBuilder &Diag,
const T &Value) {
if (Diag.ImmediateDiag.hasValue())
*Diag.ImmediateDiag << Value;
else if (Diag.PartialDiag.hasValue())
*Diag.PartialDiag << Value;
return Diag;
}
private:
Sema &S;
SourceLocation Loc;
unsigned DiagID;
FunctionDecl *Fn;
bool ShowCallStack;
// Invariant: At most one of these Optionals has a value.
// FIXME: Switch these to a Variant once that exists.
llvm::Optional<SemaDiagnosticBuilder> ImmediateDiag;
llvm::Optional<PartialDiagnostic> PartialDiag;
};
/// Creates a CUDADiagBuilder that emits the diagnostic if the current context
/// is "used as device code".
///
/// - If CurContext is a __host__ function, does not emit any diagnostics.
/// - If CurContext is a __device__ or __global__ function, emits the
/// diagnostics immediately.
/// - If CurContext is a __host__ __device__ function and we are compiling for
/// the device, creates a diagnostic which is emitted if and when we realize
/// that the function will be codegen'ed.
///
/// Example usage:
///
/// // Variable-length arrays are not allowed in CUDA device code.
/// if (CUDADiagIfDeviceCode(Loc, diag::err_cuda_vla) << CurrentCUDATarget())
/// return ExprError();
/// // Otherwise, continue parsing as normal.
CUDADiagBuilder CUDADiagIfDeviceCode(SourceLocation Loc, unsigned DiagID);
/// Creates a CUDADiagBuilder that emits the diagnostic if the current context
/// is "used as host code".
///
/// Same as CUDADiagIfDeviceCode, with "host" and "device" switched.
CUDADiagBuilder CUDADiagIfHostCode(SourceLocation Loc, unsigned DiagID);
enum CUDAFunctionTarget {
CFT_Device,
CFT_Global,
CFT_Host,
CFT_HostDevice,
CFT_InvalidTarget
};
/// Determines whether the given function is a CUDA device/host/kernel/etc.
/// function.
///
/// Use this rather than examining the function's attributes yourself -- you
/// will get it wrong. Returns CFT_Host if D is null.
CUDAFunctionTarget IdentifyCUDATarget(const FunctionDecl *D,
bool IgnoreImplicitHDAttr = false);
CUDAFunctionTarget IdentifyCUDATarget(const ParsedAttributesView &Attrs);
/// Gets the CUDA target for the current context.
CUDAFunctionTarget CurrentCUDATarget() {
return IdentifyCUDATarget(dyn_cast<FunctionDecl>(CurContext));
}
// CUDA function call preference. Must be ordered numerically from
// worst to best.
enum CUDAFunctionPreference {
CFP_Never, // Invalid caller/callee combination.
CFP_WrongSide, // Calls from host-device to host or device
// function that do not match current compilation
// mode.
CFP_HostDevice, // Any calls to host/device functions.
CFP_SameSide, // Calls from host-device to host or device
// function matching current compilation mode.
CFP_Native, // host-to-host or device-to-device calls.
};
/// Identifies relative preference of a given Caller/Callee
/// combination, based on their host/device attributes.
/// \param Caller function which needs address of \p Callee.
/// nullptr in case of global context.
/// \param Callee target function
///
/// \returns preference value for particular Caller/Callee combination.
CUDAFunctionPreference IdentifyCUDAPreference(const FunctionDecl *Caller,
const FunctionDecl *Callee);
/// Determines whether Caller may invoke Callee, based on their CUDA
/// host/device attributes. Returns false if the call is not allowed.
///
/// Note: Will return true for CFP_WrongSide calls. These may appear in
/// semantically correct CUDA programs, but only if they're never codegen'ed.
bool IsAllowedCUDACall(const FunctionDecl *Caller,
const FunctionDecl *Callee) {
return IdentifyCUDAPreference(Caller, Callee) != CFP_Never;
}
/// May add implicit CUDAHostAttr and CUDADeviceAttr attributes to FD,
/// depending on FD and the current compilation settings.
void maybeAddCUDAHostDeviceAttrs(FunctionDecl *FD,
const LookupResult &Previous);
public:
/// Check whether we're allowed to call Callee from the current context.
///
/// - If the call is never allowed in a semantically-correct program
/// (CFP_Never), emits an error and returns false.
///
/// - If the call is allowed in semantically-correct programs, but only if
/// it's never codegen'ed (CFP_WrongSide), creates a deferred diagnostic to
/// be emitted if and when the caller is codegen'ed, and returns true.
///
/// Will only create deferred diagnostics for a given SourceLocation once,
/// so you can safely call this multiple times without generating duplicate
/// deferred errors.
///
/// - Otherwise, returns true without emitting any diagnostics.
bool CheckCUDACall(SourceLocation Loc, FunctionDecl *Callee);
/// Set __device__ or __host__ __device__ attributes on the given lambda
/// operator() method.
///
/// CUDA lambdas declared inside __device__ or __global__ functions inherit
/// the __device__ attribute. Similarly, lambdas inside __host__ __device__
/// functions become __host__ __device__ themselves.
void CUDASetLambdaAttrs(CXXMethodDecl *Method);
/// Finds a function in \p Matches with highest calling priority
/// from \p Caller context and erases all functions with lower
/// calling priority.
void EraseUnwantedCUDAMatches(
const FunctionDecl *Caller,
SmallVectorImpl<std::pair<DeclAccessPair, FunctionDecl *>> &Matches);
/// Given a implicit special member, infer its CUDA target from the
/// calls it needs to make to underlying base/field special members.
/// \param ClassDecl the class for which the member is being created.
/// \param CSM the kind of special member.
/// \param MemberDecl the special member itself.
/// \param ConstRHS true if this is a copy operation with a const object on
/// its RHS.
/// \param Diagnose true if this call should emit diagnostics.
/// \return true if there was an error inferring.
/// The result of this call is implicit CUDA target attribute(s) attached to
/// the member declaration.
bool inferCUDATargetForImplicitSpecialMember(CXXRecordDecl *ClassDecl,
CXXSpecialMember CSM,
CXXMethodDecl *MemberDecl,
bool ConstRHS,
bool Diagnose);
/// \return true if \p CD can be considered empty according to CUDA
/// (E.2.3.1 in CUDA 7.5 Programming guide).
bool isEmptyCudaConstructor(SourceLocation Loc, CXXConstructorDecl *CD);
bool isEmptyCudaDestructor(SourceLocation Loc, CXXDestructorDecl *CD);
// \brief Checks that initializers of \p Var satisfy CUDA restrictions. In
// case of error emits appropriate diagnostic and invalidates \p Var.
//
// \details CUDA allows only empty constructors as initializers for global
// variables (see E.2.3.1, CUDA 7.5). The same restriction also applies to all
// __shared__ variables whether they are local or not (they all are implicitly
// static in CUDA). One exception is that CUDA allows constant initializers
// for __constant__ and __device__ variables.
void checkAllowedCUDAInitializer(VarDecl *VD);
/// Check whether NewFD is a valid overload for CUDA. Emits
/// diagnostics and invalidates NewFD if not.
void checkCUDATargetOverload(FunctionDecl *NewFD,
const LookupResult &Previous);
/// Copies target attributes from the template TD to the function FD.
void inheritCUDATargetAttrs(FunctionDecl *FD, const FunctionTemplateDecl &TD);
/// \name Code completion
//@{
/// Describes the context in which code completion occurs.
enum ParserCompletionContext {
/// Code completion occurs at top-level or namespace context.
PCC_Namespace,
/// Code completion occurs within a class, struct, or union.
PCC_Class,
/// Code completion occurs within an Objective-C interface, protocol,
/// or category.
PCC_ObjCInterface,
/// Code completion occurs within an Objective-C implementation or
/// category implementation
PCC_ObjCImplementation,
/// Code completion occurs within the list of instance variables
/// in an Objective-C interface, protocol, category, or implementation.
PCC_ObjCInstanceVariableList,
/// Code completion occurs following one or more template
/// headers.
PCC_Template,
/// Code completion occurs following one or more template
/// headers within a class.
PCC_MemberTemplate,
/// Code completion occurs within an expression.
PCC_Expression,
/// Code completion occurs within a statement, which may
/// also be an expression or a declaration.
PCC_Statement,
/// Code completion occurs at the beginning of the
/// initialization statement (or expression) in a for loop.
PCC_ForInit,
/// Code completion occurs within the condition of an if,
/// while, switch, or for statement.
PCC_Condition,
/// Code completion occurs within the body of a function on a
/// recovery path, where we do not have a specific handle on our position
/// in the grammar.
PCC_RecoveryInFunction,
/// Code completion occurs where only a type is permitted.
PCC_Type,
/// Code completion occurs in a parenthesized expression, which
/// might also be a type cast.
PCC_ParenthesizedExpression,
/// Code completion occurs within a sequence of declaration
/// specifiers within a function, method, or block.
PCC_LocalDeclarationSpecifiers
};
void CodeCompleteModuleImport(SourceLocation ImportLoc, ModuleIdPath Path);
void CodeCompleteOrdinaryName(Scope *S,
ParserCompletionContext CompletionContext);
void CodeCompleteDeclSpec(Scope *S, DeclSpec &DS,
bool AllowNonIdentifiers,
bool AllowNestedNameSpecifiers);
struct CodeCompleteExpressionData;
void CodeCompleteExpression(Scope *S,
const CodeCompleteExpressionData &Data);
void CodeCompleteExpression(Scope *S, QualType PreferredType);
void CodeCompleteMemberReferenceExpr(Scope *S, Expr *Base, Expr *OtherOpBase,
SourceLocation OpLoc, bool IsArrow,
bool IsBaseExprStatement);
void CodeCompletePostfixExpression(Scope *S, ExprResult LHS);
void CodeCompleteTag(Scope *S, unsigned TagSpec);
void CodeCompleteTypeQualifiers(DeclSpec &DS);
void CodeCompleteFunctionQualifiers(DeclSpec &DS, Declarator &D,
const VirtSpecifiers *VS = nullptr);
void CodeCompleteBracketDeclarator(Scope *S);
void CodeCompleteCase(Scope *S);
/// Reports signatures for a call to CodeCompleteConsumer and returns the
/// preferred type for the current argument. Returned type can be null.
QualType ProduceCallSignatureHelp(Scope *S, Expr *Fn, ArrayRef<Expr *> Args,
SourceLocation OpenParLoc);
QualType ProduceConstructorSignatureHelp(Scope *S, QualType Type,
SourceLocation Loc,
ArrayRef<Expr *> Args,
SourceLocation OpenParLoc);
QualType ProduceCtorInitMemberSignatureHelp(Scope *S, Decl *ConstructorDecl,
CXXScopeSpec SS,
ParsedType TemplateTypeTy,
ArrayRef<Expr *> ArgExprs,
IdentifierInfo *II,
SourceLocation OpenParLoc);
void CodeCompleteInitializer(Scope *S, Decl *D);
void CodeCompleteReturn(Scope *S);
void CodeCompleteAfterIf(Scope *S);
void CodeCompleteAssignmentRHS(Scope *S, Expr *LHS);
void CodeCompleteQualifiedId(Scope *S, CXXScopeSpec &SS,
bool EnteringContext);
void CodeCompleteUsing(Scope *S);
void CodeCompleteUsingDirective(Scope *S);
void CodeCompleteNamespaceDecl(Scope *S);
void CodeCompleteNamespaceAliasDecl(Scope *S);
void CodeCompleteOperatorName(Scope *S);
void CodeCompleteConstructorInitializer(
Decl *Constructor,
ArrayRef<CXXCtorInitializer *> Initializers);
void CodeCompleteLambdaIntroducer(Scope *S, LambdaIntroducer &Intro,
bool AfterAmpersand);
void CodeCompleteObjCAtDirective(Scope *S);
void CodeCompleteObjCAtVisibility(Scope *S);
void CodeCompleteObjCAtStatement(Scope *S);
void CodeCompleteObjCAtExpression(Scope *S);
void CodeCompleteObjCPropertyFlags(Scope *S, ObjCDeclSpec &ODS);
void CodeCompleteObjCPropertyGetter(Scope *S);
void CodeCompleteObjCPropertySetter(Scope *S);
void CodeCompleteObjCPassingType(Scope *S, ObjCDeclSpec &DS,
bool IsParameter);
void CodeCompleteObjCMessageReceiver(Scope *S);
void CodeCompleteObjCSuperMessage(Scope *S, SourceLocation SuperLoc,
ArrayRef<IdentifierInfo *> SelIdents,
bool AtArgumentExpression);
void CodeCompleteObjCClassMessage(Scope *S, ParsedType Receiver,
ArrayRef<IdentifierInfo *> SelIdents,
bool AtArgumentExpression,
bool IsSuper = false);
void CodeCompleteObjCInstanceMessage(Scope *S, Expr *Receiver,
ArrayRef<IdentifierInfo *> SelIdents,
bool AtArgumentExpression,
ObjCInterfaceDecl *Super = nullptr);
void CodeCompleteObjCForCollection(Scope *S,
DeclGroupPtrTy IterationVar);
void CodeCompleteObjCSelector(Scope *S,
ArrayRef<IdentifierInfo *> SelIdents);
void CodeCompleteObjCProtocolReferences(
ArrayRef<IdentifierLocPair> Protocols);
void CodeCompleteObjCProtocolDecl(Scope *S);
void CodeCompleteObjCInterfaceDecl(Scope *S);
void CodeCompleteObjCSuperclass(Scope *S,
IdentifierInfo *ClassName,
SourceLocation ClassNameLoc);
void CodeCompleteObjCImplementationDecl(Scope *S);
void CodeCompleteObjCInterfaceCategory(Scope *S,
IdentifierInfo *ClassName,
SourceLocation ClassNameLoc);
void CodeCompleteObjCImplementationCategory(Scope *S,
IdentifierInfo *ClassName,
SourceLocation ClassNameLoc);
void CodeCompleteObjCPropertyDefinition(Scope *S);
void CodeCompleteObjCPropertySynthesizeIvar(Scope *S,
IdentifierInfo *PropertyName);
void CodeCompleteObjCMethodDecl(Scope *S, Optional<bool> IsInstanceMethod,
ParsedType ReturnType);
void CodeCompleteObjCMethodDeclSelector(Scope *S,
bool IsInstanceMethod,
bool AtParameterName,
ParsedType ReturnType,
ArrayRef<IdentifierInfo *> SelIdents);
void CodeCompleteObjCClassPropertyRefExpr(Scope *S, IdentifierInfo &ClassName,
SourceLocation ClassNameLoc,
bool IsBaseExprStatement);
void CodeCompletePreprocessorDirective(bool InConditional);
void CodeCompleteInPreprocessorConditionalExclusion(Scope *S);
void CodeCompletePreprocessorMacroName(bool IsDefinition);
void CodeCompletePreprocessorExpression();
void CodeCompletePreprocessorMacroArgument(Scope *S,
IdentifierInfo *Macro,
MacroInfo *MacroInfo,
unsigned Argument);
void CodeCompleteIncludedFile(llvm::StringRef Dir, bool IsAngled);
void CodeCompleteNaturalLanguage();
void CodeCompleteAvailabilityPlatformName();
void GatherGlobalCodeCompletions(CodeCompletionAllocator &Allocator,
CodeCompletionTUInfo &CCTUInfo,
SmallVectorImpl<CodeCompletionResult> &Results);
//@}
//===--------------------------------------------------------------------===//
// Extra semantic analysis beyond the C type system
public:
SourceLocation getLocationOfStringLiteralByte(const StringLiteral *SL,
unsigned ByteNo) const;
private:
void CheckArrayAccess(const Expr *BaseExpr, const Expr *IndexExpr,
const ArraySubscriptExpr *ASE=nullptr,
bool AllowOnePastEnd=true, bool IndexNegated=false);
void CheckArrayAccess(const Expr *E);
// Used to grab the relevant information from a FormatAttr and a
// FunctionDeclaration.
struct FormatStringInfo {
unsigned FormatIdx;
unsigned FirstDataArg;
bool HasVAListArg;
};
static bool getFormatStringInfo(const FormatAttr *Format, bool IsCXXMember,
FormatStringInfo *FSI);
bool CheckFunctionCall(FunctionDecl *FDecl, CallExpr *TheCall,
const FunctionProtoType *Proto);
bool CheckObjCMethodCall(ObjCMethodDecl *Method, SourceLocation loc,
ArrayRef<const Expr *> Args);
bool CheckPointerCall(NamedDecl *NDecl, CallExpr *TheCall,
const FunctionProtoType *Proto);
bool CheckOtherCall(CallExpr *TheCall, const FunctionProtoType *Proto);
void CheckConstructorCall(FunctionDecl *FDecl,
ArrayRef<const Expr *> Args,
const FunctionProtoType *Proto,
SourceLocation Loc);
void checkCall(NamedDecl *FDecl, const FunctionProtoType *Proto,
const Expr *ThisArg, ArrayRef<const Expr *> Args,
bool IsMemberFunction, SourceLocation Loc, SourceRange Range,
VariadicCallType CallType);
bool CheckObjCString(Expr *Arg);
ExprResult CheckOSLogFormatStringArg(Expr *Arg);
ExprResult CheckBuiltinFunctionCall(FunctionDecl *FDecl,
unsigned BuiltinID, CallExpr *TheCall);
bool CheckARMBuiltinExclusiveCall(unsigned BuiltinID, CallExpr *TheCall,
unsigned MaxWidth);
bool CheckNeonBuiltinFunctionCall(unsigned BuiltinID, CallExpr *TheCall);
bool CheckARMBuiltinFunctionCall(unsigned BuiltinID, CallExpr *TheCall);
bool CheckAArch64BuiltinFunctionCall(unsigned BuiltinID, CallExpr *TheCall);
bool CheckHexagonBuiltinFunctionCall(unsigned BuiltinID, CallExpr *TheCall);
bool CheckHexagonBuiltinCpu(unsigned BuiltinID, CallExpr *TheCall);
bool CheckHexagonBuiltinArgument(unsigned BuiltinID, CallExpr *TheCall);
bool CheckMipsBuiltinFunctionCall(unsigned BuiltinID, CallExpr *TheCall);
bool CheckSystemZBuiltinFunctionCall(unsigned BuiltinID, CallExpr *TheCall);
bool CheckX86BuiltinRoundingOrSAE(unsigned BuiltinID, CallExpr *TheCall);
bool CheckX86BuiltinGatherScatterScale(unsigned BuiltinID, CallExpr *TheCall);
bool CheckX86BuiltinFunctionCall(unsigned BuiltinID, CallExpr *TheCall);
bool CheckPPCBuiltinFunctionCall(unsigned BuiltinID, CallExpr *TheCall);
bool SemaBuiltinVAStart(unsigned BuiltinID, CallExpr *TheCall);
bool SemaBuiltinVAStartARMMicrosoft(CallExpr *Call);
bool SemaBuiltinUnorderedCompare(CallExpr *TheCall);
bool SemaBuiltinFPClassification(CallExpr *TheCall, unsigned NumArgs);
bool SemaBuiltinVSX(CallExpr *TheCall);
bool SemaBuiltinOSLogFormat(CallExpr *TheCall);
public:
// Used by C++ template instantiation.
ExprResult SemaBuiltinShuffleVector(CallExpr *TheCall);
ExprResult SemaConvertVectorExpr(Expr *E, TypeSourceInfo *TInfo,
SourceLocation BuiltinLoc,
SourceLocation RParenLoc);
private:
bool SemaBuiltinPrefetch(CallExpr *TheCall);
bool SemaBuiltinAllocaWithAlign(CallExpr *TheCall);
bool SemaBuiltinAssume(CallExpr *TheCall);
bool SemaBuiltinAssumeAligned(CallExpr *TheCall);
bool SemaBuiltinLongjmp(CallExpr *TheCall);
bool SemaBuiltinSetjmp(CallExpr *TheCall);
ExprResult SemaBuiltinAtomicOverloaded(ExprResult TheCallResult);
ExprResult SemaBuiltinNontemporalOverloaded(ExprResult TheCallResult);
ExprResult SemaAtomicOpsOverloaded(ExprResult TheCallResult,
AtomicExpr::AtomicOp Op);
ExprResult SemaBuiltinOperatorNewDeleteOverloaded(ExprResult TheCallResult,
bool IsDelete);
bool SemaBuiltinConstantArg(CallExpr *TheCall, int ArgNum,
llvm::APSInt &Result);
bool SemaBuiltinConstantArgRange(CallExpr *TheCall, int ArgNum, int Low,
int High, bool RangeIsError = true);
bool SemaBuiltinConstantArgMultiple(CallExpr *TheCall, int ArgNum,
unsigned Multiple);
bool SemaBuiltinARMSpecialReg(unsigned BuiltinID, CallExpr *TheCall,
int ArgNum, unsigned ExpectedFieldNum,
bool AllowName);
public:
enum FormatStringType {
FST_Scanf,
FST_Printf,
FST_NSString,
FST_Strftime,
FST_Strfmon,
FST_Kprintf,
FST_FreeBSDKPrintf,
FST_OSTrace,
FST_OSLog,
FST_Unknown
};
static FormatStringType GetFormatStringType(const FormatAttr *Format);
bool FormatStringHasSArg(const StringLiteral *FExpr);
static bool GetFormatNSStringIdx(const FormatAttr *Format, unsigned &Idx);
private:
bool CheckFormatArguments(const FormatAttr *Format,
ArrayRef<const Expr *> Args,
bool IsCXXMember,
VariadicCallType CallType,
SourceLocation Loc, SourceRange Range,
llvm::SmallBitVector &CheckedVarArgs,
LangAS AS = static_cast<LangAS>(-1));
bool CheckFormatArguments(ArrayRef<const Expr *> Args,
bool HasVAListArg, unsigned format_idx,
unsigned firstDataArg, FormatStringType Type,
VariadicCallType CallType,
SourceLocation Loc, SourceRange range,
llvm::SmallBitVector &CheckedVarArgs,
LangAS AS = static_cast<LangAS>(-1));
void CheckAbsoluteValueFunction(const CallExpr *Call,
const FunctionDecl *FDecl);
void CheckMaxUnsignedZero(const CallExpr *Call, const FunctionDecl *FDecl);
void CheckMemaccessArguments(const CallExpr *Call,
unsigned BId,
IdentifierInfo *FnName);
void CheckStrlcpycatArguments(const CallExpr *Call,
IdentifierInfo *FnName);
void CheckStrncatArguments(const CallExpr *Call,
IdentifierInfo *FnName);
void CheckReturnValExpr(Expr *RetValExp, QualType lhsType,
SourceLocation ReturnLoc,
bool isObjCMethod = false,
const AttrVec *Attrs = nullptr,
const FunctionDecl *FD = nullptr);
public:
void CheckFloatComparison(SourceLocation Loc, Expr *LHS, Expr *RHS);
private:
void CheckImplicitConversions(Expr *E, SourceLocation CC = SourceLocation());
void CheckBoolLikeConversion(Expr *E, SourceLocation CC);
void CheckForIntOverflow(Expr *E);
void CheckUnsequencedOperations(Expr *E);
/// Perform semantic checks on a completed expression. This will either
/// be a full-expression or a default argument expression.
void CheckCompletedExpr(Expr *E, SourceLocation CheckLoc = SourceLocation(),
bool IsConstexpr = false);
void CheckBitFieldInitialization(SourceLocation InitLoc, FieldDecl *Field,
Expr *Init);
/// Check if there is a field shadowing.
void CheckShadowInheritedFields(const SourceLocation &Loc,
DeclarationName FieldName,
const CXXRecordDecl *RD);
/// Check if the given expression contains 'break' or 'continue'
/// statement that produces control flow different from GCC.
void CheckBreakContinueBinding(Expr *E);
/// Check whether receiver is mutable ObjC container which
/// attempts to add itself into the container
void CheckObjCCircularContainer(ObjCMessageExpr *Message);
void AnalyzeDeleteExprMismatch(const CXXDeleteExpr *DE);
void AnalyzeDeleteExprMismatch(FieldDecl *Field, SourceLocation DeleteLoc,
bool DeleteWasArrayForm);
public:
/// Register a magic integral constant to be used as a type tag.
void RegisterTypeTagForDatatype(const IdentifierInfo *ArgumentKind,
uint64_t MagicValue, QualType Type,
bool LayoutCompatible, bool MustBeNull);
struct TypeTagData {
TypeTagData() {}
TypeTagData(QualType Type, bool LayoutCompatible, bool MustBeNull) :
Type(Type), LayoutCompatible(LayoutCompatible),
MustBeNull(MustBeNull)
{}
QualType Type;
/// If true, \c Type should be compared with other expression's types for
/// layout-compatibility.
unsigned LayoutCompatible : 1;
unsigned MustBeNull : 1;
};
/// A pair of ArgumentKind identifier and magic value. This uniquely
/// identifies the magic value.
typedef std::pair<const IdentifierInfo *, uint64_t> TypeTagMagicValue;
private:
/// A map from magic value to type information.
std::unique_ptr<llvm::DenseMap<TypeTagMagicValue, TypeTagData>>
TypeTagForDatatypeMagicValues;
/// Peform checks on a call of a function with argument_with_type_tag
/// or pointer_with_type_tag attributes.
void CheckArgumentWithTypeTag(const ArgumentWithTypeTagAttr *Attr,
const ArrayRef<const Expr *> ExprArgs,
SourceLocation CallSiteLoc);
/// Check if we are taking the address of a packed field
/// as this may be a problem if the pointer value is dereferenced.
void CheckAddressOfPackedMember(Expr *rhs);
/// The parser's current scope.
///
/// The parser maintains this state here.
Scope *CurScope;
mutable IdentifierInfo *Ident_super;
mutable IdentifierInfo *Ident___float128;
/// Nullability type specifiers.
IdentifierInfo *Ident__Nonnull = nullptr;
IdentifierInfo *Ident__Nullable = nullptr;
IdentifierInfo *Ident__Nullable_result = nullptr;
IdentifierInfo *Ident__Null_unspecified = nullptr;
IdentifierInfo *Ident_NSError = nullptr;
/// The handler for the FileChanged preprocessor events.
///
/// Used for diagnostics that implement custom semantic analysis for #include
/// directives, like -Wpragma-pack.
sema::SemaPPCallbacks *SemaPPCallbackHandler;
protected:
friend class Parser;
friend class InitializationSequence;
friend class ASTReader;
friend class ASTDeclReader;
friend class ASTWriter;
public:
/// Retrieve the keyword associated
IdentifierInfo *getNullabilityKeyword(NullabilityKind nullability);
/// The struct behind the CFErrorRef pointer.
RecordDecl *CFError = nullptr;
/// Retrieve the identifier "NSError".
IdentifierInfo *getNSErrorIdent();
/// Retrieve the parser's current scope.
///
/// This routine must only be used when it is certain that semantic analysis
/// and the parser are in precisely the same context, which is not the case
/// when, e.g., we are performing any kind of template instantiation.
/// Therefore, the only safe places to use this scope are in the parser
/// itself and in routines directly invoked from the parser and *never* from
/// template substitution or instantiation.
Scope *getCurScope() const { return CurScope; }
void incrementMSManglingNumber() const {
return CurScope->incrementMSManglingNumber();
}
IdentifierInfo *getSuperIdentifier() const;
IdentifierInfo *getFloat128Identifier() const;
Decl *getObjCDeclContext() const;
DeclContext *getCurLexicalContext() const {
return OriginalLexicalContext ? OriginalLexicalContext : CurContext;
}
const DeclContext *getCurObjCLexicalContext() const {
const DeclContext *DC = getCurLexicalContext();
// A category implicitly has the attribute of the interface.
if (const ObjCCategoryDecl *CatD = dyn_cast<ObjCCategoryDecl>(DC))
DC = CatD->getClassInterface();
return DC;
}
/// To be used for checking whether the arguments being passed to
/// function exceeds the number of parameters expected for it.
static bool TooManyArguments(size_t NumParams, size_t NumArgs,
bool PartialOverloading = false) {
// We check whether we're just after a comma in code-completion.
if (NumArgs > 0 && PartialOverloading)
return NumArgs + 1 > NumParams; // If so, we view as an extra argument.
return NumArgs > NumParams;
}
// Emitting members of dllexported classes is delayed until the class
// (including field initializers) is fully parsed.
SmallVector<CXXRecordDecl*, 4> DelayedDllExportClasses;
private:
class SavePendingParsedClassStateRAII {
public:
SavePendingParsedClassStateRAII(Sema &S) : S(S) { swapSavedState(); }
~SavePendingParsedClassStateRAII() {
assert(S.DelayedOverridingExceptionSpecChecks.empty() &&
"there shouldn't be any pending delayed exception spec checks");
assert(S.DelayedEquivalentExceptionSpecChecks.empty() &&
"there shouldn't be any pending delayed exception spec checks");
assert(S.DelayedDefaultedMemberExceptionSpecs.empty() &&
"there shouldn't be any pending delayed defaulted member "
"exception specs");
assert(S.DelayedDllExportClasses.empty() &&
"there shouldn't be any pending delayed DLL export classes");
swapSavedState();
}
private:
Sema &S;
decltype(DelayedOverridingExceptionSpecChecks)
SavedOverridingExceptionSpecChecks;
decltype(DelayedEquivalentExceptionSpecChecks)
SavedEquivalentExceptionSpecChecks;
decltype(DelayedDefaultedMemberExceptionSpecs)
SavedDefaultedMemberExceptionSpecs;
decltype(DelayedDllExportClasses) SavedDllExportClasses;
void swapSavedState() {
SavedOverridingExceptionSpecChecks.swap(
S.DelayedOverridingExceptionSpecChecks);
SavedEquivalentExceptionSpecChecks.swap(
S.DelayedEquivalentExceptionSpecChecks);
SavedDefaultedMemberExceptionSpecs.swap(
S.DelayedDefaultedMemberExceptionSpecs);
SavedDllExportClasses.swap(S.DelayedDllExportClasses);
}
};
/// Helper class that collects misaligned member designations and
/// their location info for delayed diagnostics.
struct MisalignedMember {
Expr *E;
RecordDecl *RD;
ValueDecl *MD;
CharUnits Alignment;
MisalignedMember() : E(), RD(), MD(), Alignment() {}
MisalignedMember(Expr *E, RecordDecl *RD, ValueDecl *MD,
CharUnits Alignment)
: E(E), RD(RD), MD(MD), Alignment(Alignment) {}
explicit MisalignedMember(Expr *E)
: MisalignedMember(E, nullptr, nullptr, CharUnits()) {}
bool operator==(const MisalignedMember &m) { return this->E == m.E; }
};
/// Small set of gathered accesses to potentially misaligned members
/// due to the packed attribute.
SmallVector<MisalignedMember, 4> MisalignedMembers;
/// Adds an expression to the set of gathered misaligned members.
void AddPotentialMisalignedMembers(Expr *E, RecordDecl *RD, ValueDecl *MD,
CharUnits Alignment);
public:
/// Diagnoses the current set of gathered accesses. This typically
/// happens at full expression level. The set is cleared after emitting the
/// diagnostics.
void DiagnoseMisalignedMembers();
/// This function checks if the expression is in the sef of potentially
/// misaligned members and it is converted to some pointer type T with lower
/// or equal alignment requirements. If so it removes it. This is used when
/// we do not want to diagnose such misaligned access (e.g. in conversions to
/// void*).
void DiscardMisalignedMemberAddress(const Type *T, Expr *E);
/// This function calls Action when it determines that E designates a
/// misaligned member due to the packed attribute. This is used to emit
/// local diagnostics like in reference binding.
void RefersToMemberWithReducedAlignment(
Expr *E,
llvm::function_ref<void(Expr *, RecordDecl *, FieldDecl *, CharUnits)>
Action);
};
/// RAII object that enters a new expression evaluation context.
class EnterExpressionEvaluationContext {
Sema &Actions;
bool Entered = true;
public:
EnterExpressionEvaluationContext(
Sema &Actions, Sema::ExpressionEvaluationContext NewContext,
Decl *LambdaContextDecl = nullptr,
Sema::ExpressionEvaluationContextRecord::ExpressionKind ExprContext =
Sema::ExpressionEvaluationContextRecord::EK_Other,
bool ShouldEnter = true)
: Actions(Actions), Entered(ShouldEnter) {
if (Entered)
Actions.PushExpressionEvaluationContext(NewContext, LambdaContextDecl,
ExprContext);
}
EnterExpressionEvaluationContext(
Sema &Actions, Sema::ExpressionEvaluationContext NewContext,
Sema::ReuseLambdaContextDecl_t,
Sema::ExpressionEvaluationContextRecord::ExpressionKind ExprContext =
Sema::ExpressionEvaluationContextRecord::EK_Other)
: Actions(Actions) {
Actions.PushExpressionEvaluationContext(
NewContext, Sema::ReuseLambdaContextDecl, ExprContext);
}
enum InitListTag { InitList };
EnterExpressionEvaluationContext(Sema &Actions, InitListTag,
bool ShouldEnter = true)
: Actions(Actions), Entered(false) {
// In C++11 onwards, narrowing checks are performed on the contents of
// braced-init-lists, even when they occur within unevaluated operands.
// Therefore we still need to instantiate constexpr functions used in such
// a context.
if (ShouldEnter && Actions.isUnevaluatedContext() &&
Actions.getLangOpts().CPlusPlus11) {
Actions.PushExpressionEvaluationContext(
Sema::ExpressionEvaluationContext::UnevaluatedList);
Entered = true;
}
}
~EnterExpressionEvaluationContext() {
if (Entered)
Actions.PopExpressionEvaluationContext();
}
};
DeductionFailureInfo
MakeDeductionFailureInfo(ASTContext &Context, Sema::TemplateDeductionResult TDK,
sema::TemplateDeductionInfo &Info);
/// Contains a late templated function.
/// Will be parsed at the end of the translation unit, used by Sema & Parser.
struct LateParsedTemplate {
CachedTokens Toks;
/// The template function declaration to be late parsed.
Decl *D;
};
} // end namespace clang
namespace llvm {
// Hash a FunctionDeclAndLoc by looking at both its FunctionDecl and its
// SourceLocation.
template <> struct DenseMapInfo<clang::Sema::FunctionDeclAndLoc> {
using FunctionDeclAndLoc = clang::Sema::FunctionDeclAndLoc;
using FDBaseInfo = DenseMapInfo<clang::CanonicalDeclPtr<clang::FunctionDecl>>;
static FunctionDeclAndLoc getEmptyKey() {
return {FDBaseInfo::getEmptyKey(), clang::SourceLocation()};
}
static FunctionDeclAndLoc getTombstoneKey() {
return {FDBaseInfo::getTombstoneKey(), clang::SourceLocation()};
}
static unsigned getHashValue(const FunctionDeclAndLoc &FDL) {
return hash_combine(FDBaseInfo::getHashValue(FDL.FD),
FDL.Loc.getRawEncoding());
}
static bool isEqual(const FunctionDeclAndLoc &LHS,
const FunctionDeclAndLoc &RHS) {
return LHS.FD == RHS.FD && LHS.Loc == RHS.Loc;
}
};
} // namespace llvm
#endif
|
unfold.c | #ifndef TH_GENERIC_FILE
#define TH_GENERIC_FILE "generic/unfold.c"
#else
/* note: due to write issues, this one cannot be parallelized as well as unfolded_copy */
void THNN_(unfolded_acc)(
THTensor *finput,
THTensor *input,
int kW,
int kH,
int dW,
int dH,
int padW,
int padH,
int nInputPlane,
int inputWidth,
int inputHeight,
int outputWidth,
int outputHeight)
{
// This function assumes that
// outputHeight*dH does not overflow a long
// outputWidth*dW does not overflow a long
int nip;
real *input_data = THTensor_(data)(input);
real *finput_data = THTensor_(data)(finput);
#pragma omp parallel for private(nip)
for(nip = 0; nip < nInputPlane; nip++)
{
int kw, kh, y, x;
long ix, iy;
for(kh = 0; kh < kH; kh++)
{
for(kw = 0; kw < kW; kw++)
{
real *src = finput_data + nip*((size_t)kH*kW*outputHeight*outputWidth) + kh*((size_t)kW*outputHeight*outputWidth) + kw*((size_t)outputHeight*outputWidth);
real *dst = input_data + nip*((size_t)inputHeight*inputWidth);
if (padW > 0 || padH > 0) {
int lpad,rpad;
for(y = 0; y < outputHeight; y++) {
iy = (long)y*dH - padH + kh;
if (iy < 0 || iy >= inputHeight) {
} else {
if (dW==1){
ix = 0 - padW + kw;
lpad = fmaxf(0,padW-kw);
rpad = fmaxf(0,padW-(kW-kw-1));
real *dst_slice = dst+(size_t)iy*inputWidth+ix+lpad;
THVector_(cadd)(dst_slice, dst_slice, src+(size_t)y*outputWidth+lpad, 1, outputWidth - lpad - rpad); /* note: THVector_add could handle 1 value better */
}
else{
for (x=0; x<outputWidth; x++){
ix = (long)x*dW - padW + kw;
if (ix < 0 || ix >= inputWidth){
}else{
real *dst_slice = dst+(size_t)iy*inputWidth+ix;
THVector_(cadd)(dst_slice, dst_slice, src+(size_t)y*outputWidth+x, 1, 1);
}
}
}
}
}
} else {
for(y = 0; y < outputHeight; y++) {
iy = (long)y*dH + kh;
ix = 0 + kw;
if (dW == 1 ) {
real *dst_slice = dst+(size_t)iy*inputWidth+ix;
THVector_(cadd)(dst_slice, dst_slice, src+(size_t)y*outputWidth, 1, outputWidth); /* note: THVector_add could handle 1 value better */
}else{
for(x = 0; x < outputWidth; x++) {
real *dst_slice = dst+(size_t)iy*inputWidth+ix+x*dW;
THVector_(cadd)(dst_slice, dst_slice, src+(size_t)y*outputWidth+x, 1, 1);
}
}
}
}
}
}
}
}
void THNN_(unfolded_copy)(
THTensor *finput,
THTensor *input,
int kW,
int kH,
int dW,
int dH,
int padW,
int padH,
int nInputPlane,
int inputWidth,
int inputHeight,
int outputWidth,
int outputHeight)
{
// This function assumes that
// kH*kW does not overflow an int
// nInputPlane*kH*kW does not overflow a long
// outputHeight*dH does not overflow a long
// outputWidth*dW does not overflow a long
long k;
real *input_data = THTensor_(data)(input);
real *finput_data = THTensor_(data)(finput);
#pragma omp parallel for private(k)
for(k = 0; k < (long)nInputPlane*kH*kW; k++) {
long nip = k / (kH*kW);
long rest = k % (kH*kW);
long kh = rest / kW;
long kw = rest % kW;
int x, y;
long ix, iy;
real *dst = finput_data + nip*((size_t)kH*kW*outputHeight*outputWidth) + kh*((size_t)kW*outputHeight*outputWidth) + kw*((size_t)outputHeight*outputWidth);
real *src = input_data + nip*((size_t)inputHeight*inputWidth);
if (padW > 0 || padH > 0) {
long lpad,rpad;
for(y = 0; y < outputHeight; y++) {
iy = (long)y*dH - padH + kh;
if (iy < 0 || iy >= inputHeight) {
memset(dst+(size_t)y*outputWidth, 0, sizeof(real)*outputWidth);
} else {
if (dW==1){
ix = 0 - padW + kw;
lpad = fmaxf(0,padW-kw);
rpad = fmaxf(0,padW-(kW-kw-1));
if (outputWidth-rpad-lpad <= 0) {
memset(dst+(size_t)y*outputWidth, 0, sizeof(real)*outputWidth);
} else {
if (lpad > 0) memset(dst+(size_t)y*outputWidth, 0, sizeof(real)*lpad);
memcpy(dst+(size_t)y*outputWidth+lpad, src+(size_t)iy*inputWidth+ix+lpad, sizeof(real)*(outputWidth-rpad-lpad));
if (rpad > 0) memset(dst+(size_t)y*outputWidth + outputWidth - rpad, 0, sizeof(real)*rpad);
}
}
else{
for (x=0; x<outputWidth; x++){
ix = (long)x*dW - padW + kw;
if (ix < 0 || ix >= inputWidth)
memset(dst+(size_t)y*outputWidth+x, 0, sizeof(real)*1);
else
memcpy(dst+(size_t)y*outputWidth+x, src+(size_t)iy*inputWidth+ix, sizeof(real)*(1));
}
}
}
}
} else {
for(y = 0; y < outputHeight; y++) {
iy = (long)y*dH + kh;
ix = 0 + kw;
if (dW == 1)
memcpy(dst+(size_t)y*outputWidth, src+(size_t)iy*inputWidth+ix, sizeof(real)*outputWidth);
else{
for (x=0; x<outputWidth; x++)
memcpy(dst+(size_t)y*outputWidth+x, src+(size_t)iy*inputWidth+ix+(long)x*dW, sizeof(real)*(1));
}
}
}
}
}
#endif
|
omp3-2.c | #include<stdio.h>
#ifndef N
#define N 5000
#endif
#define M 1000000000
int a[N][N], b[N][N];
int main() {
int i, j, x, y, sum;
#pragma omp parallel sections
{
#pragma omp section
for (i = 0; i < N; i++)
for (j = 0; j < N; j++)
a[i][j] = i + j;
#pragma omp section
for (x = 0; x < N; x++)
for (y = 0; y < N; y++)
b[x][y] = x - y;
}
sum = 0;
for (i = 0; i < N; i++)
for (j = 0; j < N; j++) {
sum += a[i][j];
sum %= M;
}
printf("%d\n", sum);
sum = 0;
for (i = 0; i < N; i++)
for (j = 0; j < N; j++) {
sum += b[i][j];
sum %= M;
}
printf("%d\n", sum);
return 0;
}
|
comms.h | /*
//@HEADER
// *****************************************************************************
//
// HPCGraph: Graph Computation on High Performance Computing Systems
// Copyright (2016) Sandia Corporation
//
// Under the terms of Contract DE-AC04-94AL85000 with Sandia Corporation,
// the U.S. Government retains certain rights in this software.
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
//
// 1. Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
//
// 2. Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
//
// 3. Neither the name of the Corporation nor the names of the
// contributors may be used to endorse or promote products derived from
// this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY SANDIA CORPORATION "AS IS" AND ANY
// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL SANDIA CORPORATION OR THE
// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
// LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
// NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
// Questions? Contact George M. Slota (gmslota@sandia.gov)
// Siva Rajamanickam (srajama@sandia.gov)
// Kamesh Madduri (madduri@cse.psu.edu)
//
// *****************************************************************************
//@HEADER
*/
#ifndef _COMMS_H_
#define _COMMS_H_
#include <mpi.h>
#include <omp.h>
#include <stdio.h>
#include <stdlib.h>
#include <stdint.h>
#include <assert.h>
#include "dist_graph.h"
#include "util.h"
extern int procid, nprocs;
extern bool verbose, debug, verify, output;
#define MAX_SEND_SIZE 2147483648
#define THREAD_QUEUE_SIZE 1024
struct mpi_data_t {
int32_t* sendcounts;
int32_t* recvcounts;
int32_t* sdispls;
int32_t* rdispls;
int32_t* sdispls_cpy;
uint64_t* recvcounts_temp;
uint64_t* sendcounts_temp;
uint64_t* sdispls_temp;
uint64_t* rdispls_temp;
uint64_t* sdispls_cpy_temp;
uint64_t* sendbuf_vert;
uint64_t* sendbuf_data;
double* sendbuf_data_flt;
uint64_t* recvbuf_vert;
uint64_t* recvbuf_data;
double* recvbuf_data_flt;
uint64_t total_recv;
uint64_t total_send;
uint64_t global_queue_size;
} ;
struct queue_data_t {
uint64_t* queue;
uint64_t* queue_next;
uint64_t* queue_send;
uint64_t queue_size;
uint64_t next_size;
uint64_t send_size;
} ;
struct thread_queue_t {
int32_t tid;
uint64_t* thread_queue;
uint64_t* thread_send;
uint64_t thread_queue_size;
uint64_t thread_send_size;
} ;
struct thread_comm_t {
int32_t tid;
bool* v_to_rank;
uint64_t* sendcounts_thread;
uint64_t* sendbuf_vert_thread;
uint64_t* sendbuf_data_thread;
double* sendbuf_data_thread_flt;
int32_t* sendbuf_rank_thread;
uint64_t* thread_starts;
uint64_t thread_queue_size;
} ;
void init_queue_data(dist_graph_t* g, queue_data_t* q);
void clear_queue_data(queue_data_t* q);
void init_comm_data(mpi_data_t* comm);
void clear_comm_data(mpi_data_t* comm);
void init_thread_queue(thread_queue_t* tq);
void clear_thread_queue(thread_queue_t* tq);
void init_thread_comm(thread_comm_t* tc);
void clear_thread_comm(thread_comm_t* tc);
void init_thread_comm_flt(thread_comm_t* tc);
void clear_thread_commflt(thread_comm_t* tc);
void init_sendbuf_vid_data(mpi_data_t* comm);
void init_recvbuf_vid_data(mpi_data_t* comm);
void init_sendbuf_vid_data_flt(mpi_data_t* comm);
void init_recvbuf_vid_data_flt(mpi_data_t* comm);
void clear_recvbuf_vid_data(mpi_data_t* comm);
void clear_allbuf_vid_data(mpi_data_t* comm);
inline void exchange_verts(dist_graph_t* g, mpi_data_t* comm, queue_data_t* q);
inline void exchange_verts(mpi_data_t* comm);
inline void exchange_data(mpi_data_t* comm);
inline void exchange_data_flt(mpi_data_t* comm);
inline void exchange_vert_data(dist_graph_t* g, mpi_data_t* comm,
queue_data_t* q);
inline void exchange_vert_data(dist_graph_t* g, mpi_data_t* comm);
inline void update_sendcounts_thread(dist_graph_t* g,
thread_comm_t* tc, uint64_t vert_index);
inline void update_sendcounts_thread_out(dist_graph_t* g,
thread_comm_t* tc, uint64_t vert_index);
inline void update_vid_data_queues(dist_graph_t* g,
thread_comm_t* tc, mpi_data_t* comm,
uint64_t vert_index, uint64_t data);
inline void update_vid_data_queues_out(dist_graph_t* g,
thread_comm_t* tc, mpi_data_t* comm,
uint64_t vert_index, uint64_t data);
inline void update_vid_data_queues_out(dist_graph_t* g,
thread_comm_t* tc, mpi_data_t* comm,
uint64_t vert_index, double data);
inline void add_vid_to_queue(thread_queue_t* tq, queue_data_t* q,
uint64_t vertex_id);
inline void empty_queue(thread_queue_t* tq, queue_data_t* q);
inline void add_vid_to_send(thread_queue_t* tq, queue_data_t* q,
uint64_t vertex_id);
inline void empty_send(thread_queue_t* tq, queue_data_t* q);
inline void add_vid_data_to_send(thread_comm_t* tc, mpi_data_t* comm,
uint64_t vertex_id, uint64_t data_val, int32_t send_rank);
inline void add_vid_data_to_send_flt(thread_comm_t* tc, mpi_data_t* comm,
uint64_t vertex_id, double data_val, int32_t send_rank);
inline void empty_vid_data(thread_comm_t* tc, mpi_data_t* comm);
inline void empty_vid_data_flt(thread_comm_t* tc, mpi_data_t* comm);
inline void exchange_verts(dist_graph_t* g, mpi_data_t* comm, queue_data_t* q)
{
comm->global_queue_size = 0;
uint64_t task_queue_size = q->next_size + q->send_size;
MPI_Allreduce(&task_queue_size, &comm->global_queue_size, 1,
MPI_UINT64_T, MPI_SUM, MPI_COMM_WORLD);
uint64_t num_comms = comm->global_queue_size / (uint64_t)MAX_SEND_SIZE + 1;
uint64_t sum_recv = 0;
for (uint64_t c = 0; c < num_comms; ++c)
{
uint64_t send_begin = (q->send_size * c) / num_comms;
uint64_t send_end = (q->send_size * (c + 1)) / num_comms;
if (c == (num_comms-1))
send_end = q->send_size;
for (int32_t i = 0; i < nprocs; ++i)
{
comm->sendcounts[i] = 0;
comm->recvcounts[i] = 0;
}
for (uint64_t i = send_begin; i < send_end; ++i)
{
uint64_t ghost_index = q->queue_send[i] - g->n_local;
uint64_t ghost_task = g->ghost_tasks[ghost_index];
++comm->sendcounts[ghost_task];
}
MPI_Alltoall(comm->sendcounts, 1, MPI_INT32_T,
comm->recvcounts, 1, MPI_INT32_T, MPI_COMM_WORLD);
comm->sdispls[0] = 0;
comm->sdispls_cpy[0] = 0;
comm->rdispls[0] = 0;
for (int32_t i = 1; i < nprocs; ++i)
{
comm->sdispls[i] = comm->sdispls[i-1] + comm->sendcounts[i-1];
comm->rdispls[i] = comm->rdispls[i-1] + comm->recvcounts[i-1];
comm->sdispls_cpy[i] = comm->sdispls[i];
}
int32_t cur_send = comm->sdispls[nprocs-1] + comm->sendcounts[nprocs-1];
int32_t cur_recv = comm->rdispls[nprocs-1] + comm->recvcounts[nprocs-1];
comm->sendbuf_vert = (uint64_t*)malloc((uint64_t)(cur_send+1)*sizeof(uint64_t));
if (comm->sendbuf_vert == NULL)
throw_err("exchange_verts(), unable to allocate comm buffers", procid);
for (uint64_t i = send_begin; i < send_end; ++i)
{
uint64_t ghost_index = q->queue_send[i] - g->n_local;
uint64_t ghost_task = g->ghost_tasks[ghost_index];
uint64_t vert = g->ghost_unmap[ghost_index];
comm->sendbuf_vert[comm->sdispls_cpy[ghost_task]++] = vert;
}
MPI_Alltoallv(comm->sendbuf_vert,
comm->sendcounts, comm->sdispls, MPI_UINT64_T,
q->queue_next+q->next_size+sum_recv,
comm->recvcounts, comm->rdispls, MPI_UINT64_T,
MPI_COMM_WORLD);
free(comm->sendbuf_vert);
sum_recv += cur_recv;
}
q->queue_size = q->next_size + sum_recv;
q->next_size = 0;
q->send_size = 0;
uint64_t* temp = q->queue;
q->queue = q->queue_next;
q->queue_next = temp;
}
inline void exchange_vert_data(dist_graph_t* g, mpi_data_t* comm,
queue_data_t* q)
{
for (int32_t i = 0; i < nprocs; ++i)
comm->recvcounts_temp[i] = 0;
MPI_Alltoall(comm->sendcounts_temp, 1, MPI_UINT64_T,
comm->recvcounts_temp, 1, MPI_UINT64_T, MPI_COMM_WORLD);
comm->total_recv = 0;
for (int i = 0; i < nprocs; ++i)
comm->total_recv += comm->recvcounts_temp[i];
comm->recvbuf_vert = (uint64_t*)malloc(comm->total_recv*sizeof(uint64_t));
comm->recvbuf_data = (uint64_t*)malloc(comm->total_recv*sizeof(uint64_t));
comm->recvbuf_data_flt = NULL;
if (comm->recvbuf_vert == NULL || comm->recvbuf_data == NULL)
throw_err("exchange_vert_data() unable to allocate comm buffers", procid);
comm->global_queue_size = 0;
uint64_t task_queue_size = comm->total_send;
MPI_Allreduce(&task_queue_size, &comm->global_queue_size, 1,
MPI_UINT64_T, MPI_SUM, MPI_COMM_WORLD);
uint64_t num_comms = comm->global_queue_size / (uint64_t)MAX_SEND_SIZE + 1;
uint64_t sum_recv = 0;
uint64_t sum_send = 0;
for (uint64_t c = 0; c < num_comms; ++c)
{
for (int32_t i = 0; i < nprocs; ++i)
{
uint64_t send_begin = (comm->sendcounts_temp[i] * c) / num_comms;
uint64_t send_end = (comm->sendcounts_temp[i] * (c + 1)) / num_comms;
if (c == (num_comms-1))
send_end = comm->sendcounts_temp[i];
comm->sendcounts[i] = (int32_t)(send_end - send_begin);
assert(comm->sendcounts[i] >= 0);
}
MPI_Alltoall(comm->sendcounts, 1, MPI_INT32_T,
comm->recvcounts, 1, MPI_INT32_T, MPI_COMM_WORLD);
comm->sdispls[0] = 0;
comm->sdispls_cpy[0] = 0;
comm->rdispls[0] = 0;
for (int32_t i = 1; i < nprocs; ++i)
{
comm->sdispls[i] = comm->sdispls[i-1] + comm->sendcounts[i-1];
comm->rdispls[i] = comm->rdispls[i-1] + comm->recvcounts[i-1];
comm->sdispls_cpy[i] = comm->sdispls[i];
}
int32_t cur_send = comm->sdispls[nprocs-1] + comm->sendcounts[nprocs-1];
int32_t cur_recv = comm->rdispls[nprocs-1] + comm->recvcounts[nprocs-1];
uint64_t* buf_v = (uint64_t*)malloc((uint64_t)(cur_send)*sizeof(uint64_t));
uint64_t* buf_d = (uint64_t*)malloc((uint64_t)(cur_send)*sizeof(uint64_t));
if (buf_v == NULL || buf_d == NULL)
throw_err("exchange_verts(), unable to allocate comm buffers", procid);
for (int32_t i = 0; i < nprocs; ++i)
{
uint64_t send_begin = (comm->sendcounts_temp[i] * c) / num_comms;
uint64_t send_end = (comm->sendcounts_temp[i] * (c + 1)) / num_comms;
if (c == (num_comms-1))
send_end = comm->sendcounts_temp[i];
for (uint64_t j = send_begin; j < send_end; ++j)
{
uint64_t vert = comm->sendbuf_vert[comm->sdispls_temp[i]+j];
uint64_t data = comm->sendbuf_data[comm->sdispls_temp[i]+j];
buf_v[comm->sdispls_cpy[i]] = vert;
buf_d[comm->sdispls_cpy[i]++] = data;
}
}
MPI_Alltoallv(buf_v, comm->sendcounts,
comm->sdispls, MPI_UINT64_T,
comm->recvbuf_vert+sum_recv, comm->recvcounts,
comm->rdispls, MPI_UINT64_T, MPI_COMM_WORLD);
MPI_Alltoallv(buf_d, comm->sendcounts,
comm->sdispls, MPI_UINT64_T,
comm->recvbuf_data+sum_recv, comm->recvcounts,
comm->rdispls, MPI_UINT64_T, MPI_COMM_WORLD);
free(buf_v);
free(buf_d);
sum_recv += cur_recv;
sum_send += cur_send;
}
assert(sum_recv == comm->total_recv);
assert(sum_send == comm->total_send);
comm->global_queue_size = 0;
task_queue_size = comm->total_recv + q->next_size;
MPI_Allreduce(&task_queue_size, &comm->global_queue_size, 1,
MPI_UINT64_T, MPI_SUM, MPI_COMM_WORLD);
q->send_size = 0;
}
inline void exchange_verts(mpi_data_t* comm)
{
if (debug) { printf("Task %d exchange_verts() start\n", procid); }
uint64_t num_comms = comm->global_queue_size / (uint64_t)MAX_SEND_SIZE + 1;
uint64_t sum_recv = 0;
uint64_t sum_send = 0;
for (uint64_t c = 0; c < num_comms; ++c)
{
for (int32_t i = 0; i < nprocs; ++i)
{
uint64_t send_begin = (comm->sendcounts_temp[i] * c) / num_comms;
uint64_t send_end = (comm->sendcounts_temp[i] * (c + 1)) / num_comms;
if (c == (num_comms-1))
send_end = comm->sendcounts_temp[i];
comm->sendcounts[i] = (int32_t)(send_end - send_begin);
assert(comm->sendcounts[i] >= 0);
}
MPI_Alltoall(comm->sendcounts, 1, MPI_INT32_T,
comm->recvcounts, 1, MPI_INT32_T, MPI_COMM_WORLD);
comm->sdispls[0] = 0;
comm->sdispls_cpy[0] = 0;
comm->rdispls[0] = 0;
for (int32_t i = 1; i < nprocs; ++i)
{
comm->sdispls[i] = comm->sdispls[i-1] + comm->sendcounts[i-1];
comm->rdispls[i] = comm->rdispls[i-1] + comm->recvcounts[i-1];
comm->sdispls_cpy[i] = comm->sdispls[i];
}
int32_t cur_send = comm->sdispls[nprocs-1] + comm->sendcounts[nprocs-1];
int32_t cur_recv = comm->rdispls[nprocs-1] + comm->recvcounts[nprocs-1];
uint64_t* buf_v = (uint64_t*)malloc((uint64_t)(cur_send)*sizeof(uint64_t));
if (buf_v == NULL)
throw_err("exchange_verts(), unable to allocate comm buffers", procid);
for (int32_t i = 0; i < nprocs; ++i)
{
uint64_t send_begin = (comm->sendcounts_temp[i] * c) / num_comms;
uint64_t send_end = (comm->sendcounts_temp[i] * (c + 1)) / num_comms;
if (c == (num_comms-1))
send_end = comm->sendcounts_temp[i];
for (uint64_t j = send_begin; j < send_end; ++j)
{
uint64_t vert = comm->sendbuf_vert[comm->sdispls_temp[i]+j];
buf_v[comm->sdispls_cpy[i]++] = vert;
}
}
MPI_Alltoallv(buf_v, comm->sendcounts,
comm->sdispls, MPI_UINT64_T,
comm->recvbuf_vert+sum_recv, comm->recvcounts,
comm->rdispls, MPI_UINT64_T, MPI_COMM_WORLD);
free(buf_v);
sum_recv += cur_recv;
sum_send += cur_send;
}
assert(sum_recv == comm->total_recv);
assert(sum_send == comm->total_send);
if (debug) { printf("Task %d exchange_verts() success\n", procid); }
}
inline void exchange_data(mpi_data_t* comm)
{
if (debug) { printf("Task %d exchange_data() start\n", procid); }
uint64_t num_comms = comm->global_queue_size / (uint64_t)MAX_SEND_SIZE + 1;
uint64_t sum_recv = 0;
uint64_t sum_send = 0;
for (uint64_t c = 0; c < num_comms; ++c)
{
for (int32_t i = 0; i < nprocs; ++i)
{
uint64_t send_begin = (comm->sendcounts_temp[i] * c) / num_comms;
uint64_t send_end = (comm->sendcounts_temp[i] * (c + 1)) / num_comms;
if (c == (num_comms-1))
send_end = comm->sendcounts_temp[i];
comm->sendcounts[i] = (int32_t)(send_end - send_begin);
assert(comm->sendcounts[i] >= 0);
}
MPI_Alltoall(comm->sendcounts, 1, MPI_INT32_T,
comm->recvcounts, 1, MPI_INT32_T, MPI_COMM_WORLD);
comm->sdispls[0] = 0;
comm->sdispls_cpy[0] = 0;
comm->rdispls[0] = 0;
for (int32_t i = 1; i < nprocs; ++i)
{
comm->sdispls[i] = comm->sdispls[i-1] + comm->sendcounts[i-1];
comm->rdispls[i] = comm->rdispls[i-1] + comm->recvcounts[i-1];
comm->sdispls_cpy[i] = comm->sdispls[i];
}
int32_t cur_send = comm->sdispls[nprocs-1] + comm->sendcounts[nprocs-1];
int32_t cur_recv = comm->rdispls[nprocs-1] + comm->recvcounts[nprocs-1];
uint64_t* buf_d = (uint64_t*)malloc((uint64_t)(cur_send)*sizeof(uint64_t));
if (buf_d == NULL)
throw_err("exchange_data(), unable to allocate comm buffers", procid);
for (int32_t i = 0; i < nprocs; ++i)
{
uint64_t send_begin = (comm->sendcounts_temp[i] * c) / num_comms;
uint64_t send_end = (comm->sendcounts_temp[i] * (c + 1)) / num_comms;
if (c == (num_comms-1))
send_end = comm->sendcounts_temp[i];
for (uint64_t j = send_begin; j < send_end; ++j)
{
uint64_t data = comm->sendbuf_data[comm->sdispls_temp[i]+j];
buf_d[comm->sdispls_cpy[i]++] = data;
}
}
MPI_Alltoallv(buf_d, comm->sendcounts,
comm->sdispls, MPI_UINT64_T,
comm->recvbuf_data+sum_recv, comm->recvcounts,
comm->rdispls, MPI_UINT64_T, MPI_COMM_WORLD);
free(buf_d);
sum_recv += cur_recv;
sum_send += cur_send;
}
assert(sum_recv == comm->total_recv);
assert(sum_send == comm->total_send);
if (debug) { printf("Task %d exchange_data() success\n", procid); }
}
inline void exchange_data_flt(mpi_data_t* comm)
{
if (debug) { printf("Task %d exchange_data_flt() start\n", procid); }
uint64_t num_comms = comm->global_queue_size / (uint64_t)MAX_SEND_SIZE + 1;
uint64_t sum_recv = 0;
uint64_t sum_send = 0;
for (uint64_t c = 0; c < num_comms; ++c)
{
for (int32_t i = 0; i < nprocs; ++i)
{
uint64_t send_begin = (comm->sendcounts_temp[i] * c) / num_comms;
uint64_t send_end = (comm->sendcounts_temp[i] * (c + 1)) / num_comms;
if (c == (num_comms-1))
send_end = comm->sendcounts_temp[i];
comm->sendcounts[i] = (int32_t)(send_end - send_begin);
assert(comm->sendcounts[i] >= 0);
}
MPI_Alltoall(comm->sendcounts, 1, MPI_INT32_T,
comm->recvcounts, 1, MPI_INT32_T, MPI_COMM_WORLD);
comm->sdispls[0] = 0;
comm->sdispls_cpy[0] = 0;
comm->rdispls[0] = 0;
for (int32_t i = 1; i < nprocs; ++i)
{
comm->sdispls[i] = comm->sdispls[i-1] + comm->sendcounts[i-1];
comm->rdispls[i] = comm->rdispls[i-1] + comm->recvcounts[i-1];
comm->sdispls_cpy[i] = comm->sdispls[i];
}
int32_t cur_send = comm->sdispls[nprocs-1] + comm->sendcounts[nprocs-1];
int32_t cur_recv = comm->rdispls[nprocs-1] + comm->recvcounts[nprocs-1];
double* buf_d = (double*)malloc((double)(cur_send)*sizeof(double));
if (buf_d == NULL)
throw_err("exchange_data_flt(), unable to allocate comm buffers", procid);
for (int32_t i = 0; i < nprocs; ++i)
{
uint64_t send_begin = (comm->sendcounts_temp[i] * c) / num_comms;
uint64_t send_end = (comm->sendcounts_temp[i] * (c + 1)) / num_comms;
if (c == (num_comms-1))
send_end = comm->sendcounts_temp[i];
for (uint64_t j = send_begin; j < send_end; ++j)
{
double data = comm->sendbuf_data_flt[comm->sdispls_temp[i]+j];
buf_d[comm->sdispls_cpy[i]++] = data;
}
}
MPI_Alltoallv(buf_d, comm->sendcounts,
comm->sdispls, MPI_DOUBLE,
comm->recvbuf_data_flt+sum_recv, comm->recvcounts,
comm->rdispls, MPI_DOUBLE, MPI_COMM_WORLD);
free(buf_d);
sum_recv += cur_recv;
sum_send += cur_send;
}
assert(sum_recv == comm->total_recv);
assert(sum_send == comm->total_send);
if (debug) { printf("Task %d exchange_data_flt() success\n", procid); }
}
inline void update_sendcounts_thread(dist_graph_t* g,
thread_comm_t* tc,
uint64_t vert_index)
{
for (int32_t i = 0; i < nprocs; ++i)
tc->v_to_rank[i] = false;
uint64_t out_degree = out_degree(g, vert_index);
uint64_t* outs = out_vertices(g, vert_index);
for (uint64_t j = 0; j < out_degree; ++j)
{
uint64_t out_index = outs[j];
if (out_index >= g->n_local)
{
int32_t out_rank = g->ghost_tasks[out_index-g->n_local];
if (!tc->v_to_rank[out_rank])
{
tc->v_to_rank[out_rank] = true;
++tc->sendcounts_thread[out_rank];
}
}
}
uint64_t in_degree = in_degree(g, vert_index);
uint64_t* ins = in_vertices(g, vert_index);
for (uint64_t j = 0; j < in_degree; ++j)
{
uint64_t in_index = ins[j];
if (in_index >= g->n_local)
{
int32_t in_rank = g->ghost_tasks[in_index-g->n_local];
if (!tc->v_to_rank[in_rank])
{
tc->v_to_rank[in_rank] = true;
++tc->sendcounts_thread[in_rank];
}
}
}
}
inline void update_sendcounts_thread_out(dist_graph_t* g,
thread_comm_t* tc,
uint64_t vert_index)
{
for (int32_t i = 0; i < nprocs; ++i)
tc->v_to_rank[i] = false;
uint64_t out_degree = out_degree(g, vert_index);
uint64_t* outs = out_vertices(g, vert_index);
for (uint64_t j = 0; j < out_degree; ++j)
{
uint64_t out_index = outs[j];
if (out_index >= g->n_local)
{
int32_t out_rank = g->ghost_tasks[out_index-g->n_local];
if (!tc->v_to_rank[out_rank])
{
tc->v_to_rank[out_rank] = true;
++tc->sendcounts_thread[out_rank];
}
}
}
}
inline void update_vid_data_queues(dist_graph_t* g,
thread_comm_t* tc, mpi_data_t* comm,
uint64_t vert_index, uint64_t data)
{
for (int32_t i = 0; i < nprocs; ++i)
tc->v_to_rank[i] = false;
uint64_t out_degree = out_degree(g, vert_index);
uint64_t* outs = out_vertices(g, vert_index);
for (uint64_t j = 0; j < out_degree; ++j)
{
uint64_t out_index = outs[j];
if (out_index >= g->n_local)
{
int32_t out_rank = g->ghost_tasks[out_index - g->n_local];
if (!tc->v_to_rank[out_rank])
{
tc->v_to_rank[out_rank] = true;
add_vid_data_to_send(tc, comm,
g->local_unmap[vert_index], data, out_rank);
}
}
}
uint64_t in_degree = in_degree(g, vert_index);
uint64_t* ins = in_vertices(g, vert_index);
for (uint64_t j = 0; j < in_degree; ++j)
{
uint64_t in_index = ins[j];
if (in_index >= g->n_local)
{
int32_t in_rank = g->ghost_tasks[in_index - g->n_local];
if (!tc->v_to_rank[in_rank])
{
tc->v_to_rank[in_rank] = true;
add_vid_data_to_send(tc, comm,
g->local_unmap[vert_index], data, in_rank);
}
}
}
}
inline void update_vid_data_queues_out(dist_graph_t* g,
thread_comm_t* tc, mpi_data_t* comm,
uint64_t vert_index, double data)
{
for (int32_t i = 0; i < nprocs; ++i)
tc->v_to_rank[i] = false;
uint64_t out_degree = out_degree(g, vert_index);
uint64_t* outs = out_vertices(g, vert_index);
for (uint64_t j = 0; j < out_degree; ++j)
{
uint64_t out_index = outs[j];
if (out_index >= g->n_local)
{
int32_t out_rank = g->ghost_tasks[out_index - g->n_local];
if (!tc->v_to_rank[out_rank])
{
tc->v_to_rank[out_rank] = true;
add_vid_data_to_send_flt(tc, comm,
g->local_unmap[vert_index], data, out_rank);
}
}
}
}
inline void update_vid_data_queues_out(dist_graph_t* g,
thread_comm_t* tc, mpi_data_t* comm,
uint64_t vert_index, uint64_t data)
{
for (int32_t i = 0; i < nprocs; ++i)
tc->v_to_rank[i] = false;
uint64_t out_degree = out_degree(g, vert_index);
uint64_t* outs = out_vertices(g, vert_index);
for (uint64_t j = 0; j < out_degree; ++j)
{
uint64_t out_index = outs[j];
if (out_index >= g->n_local)
{
int32_t out_rank = g->ghost_tasks[out_index - g->n_local];
if (!tc->v_to_rank[out_rank])
{
tc->v_to_rank[out_rank] = true;
add_vid_data_to_send(tc, comm,
g->local_unmap[vert_index], data, out_rank);
}
}
}
}
inline void add_vid_to_queue(thread_queue_t* tq, queue_data_t* q,
uint64_t vertex_id)
{
tq->thread_queue[tq->thread_queue_size++] = vertex_id;
if (tq->thread_queue_size == THREAD_QUEUE_SIZE)
empty_queue(tq, q);
}
inline void empty_queue(thread_queue_t* tq, queue_data_t* q)
{
uint64_t start_offset;
#pragma omp atomic capture
start_offset = q->next_size += tq->thread_queue_size;
start_offset -= tq->thread_queue_size;
for (uint64_t i = 0; i < tq->thread_queue_size; ++i)
q->queue_next[start_offset + i] = tq->thread_queue[i];
tq->thread_queue_size = 0;
}
inline void add_vid_to_send(thread_queue_t* tq, queue_data_t* q,
uint64_t vertex_id)
{
tq->thread_send[tq->thread_send_size++] = vertex_id;
if (tq->thread_send_size == THREAD_QUEUE_SIZE)
empty_send(tq, q);
}
inline void empty_send(thread_queue_t* tq, queue_data_t* q)
{
uint64_t start_offset;
#pragma omp atomic capture
start_offset = q->send_size += tq->thread_send_size;
start_offset -= tq->thread_send_size;
for (uint64_t i = 0; i < tq->thread_send_size; ++i)
q->queue_send[start_offset + i] = tq->thread_send[i];
tq->thread_send_size = 0;
}
inline void add_vid_data_to_send(thread_comm_t* tc, mpi_data_t* comm,
uint64_t vertex_id, uint64_t data_val, int32_t send_rank)
{
tc->sendbuf_vert_thread[tc->thread_queue_size] = vertex_id;
tc->sendbuf_data_thread[tc->thread_queue_size] = data_val;
tc->sendbuf_rank_thread[tc->thread_queue_size] = send_rank;
++tc->thread_queue_size;
++tc->sendcounts_thread[send_rank];
if (tc->thread_queue_size == THREAD_QUEUE_SIZE)
empty_vid_data(tc, comm);
}
inline void add_vid_data_to_send_flt(thread_comm_t* tc, mpi_data_t* comm,
uint64_t vertex_id, double data_val, int32_t send_rank)
{
tc->sendbuf_vert_thread[tc->thread_queue_size] = vertex_id;
tc->sendbuf_data_thread_flt[tc->thread_queue_size] = data_val;
tc->sendbuf_rank_thread[tc->thread_queue_size] = send_rank;
++tc->thread_queue_size;
++tc->sendcounts_thread[send_rank];
if (tc->thread_queue_size == THREAD_QUEUE_SIZE)
empty_vid_data_flt(tc, comm);
}
inline void empty_vid_data(thread_comm_t* tc, mpi_data_t* comm)
{
for (int32_t i = 0; i < nprocs; ++i)
{
#pragma omp atomic capture
tc->thread_starts[i] = comm->sdispls_cpy_temp[i] += tc->sendcounts_thread[i];
tc->thread_starts[i] -= tc->sendcounts_thread[i];
}
for (uint64_t i = 0; i < tc->thread_queue_size; ++i)
{
int32_t cur_rank = tc->sendbuf_rank_thread[i];
comm->sendbuf_vert[tc->thread_starts[cur_rank]] =
tc->sendbuf_vert_thread[i];
comm->sendbuf_data[tc->thread_starts[cur_rank]] =
tc->sendbuf_data_thread[i];
++tc->thread_starts[cur_rank];
}
for (int32_t i = 0; i < nprocs; ++i)
{
tc->thread_starts[i] = 0;
tc->sendcounts_thread[i] = 0;
}
tc->thread_queue_size = 0;
}
inline void empty_vid_data_flt(thread_comm_t* tc, mpi_data_t* comm)
{
for (int32_t i = 0; i < nprocs; ++i)
{
#pragma omp atomic capture
tc->thread_starts[i] = comm->sdispls_cpy_temp[i] += tc->sendcounts_thread[i];
tc->thread_starts[i] -= tc->sendcounts_thread[i];
}
for (uint64_t i = 0; i < tc->thread_queue_size; ++i)
{
int32_t cur_rank = tc->sendbuf_rank_thread[i];
comm->sendbuf_vert[tc->thread_starts[cur_rank]] =
tc->sendbuf_vert_thread[i];
comm->sendbuf_data_flt[tc->thread_starts[cur_rank]] =
tc->sendbuf_data_thread_flt[i];
++tc->thread_starts[cur_rank];
}
for (int32_t i = 0; i < nprocs; ++i)
{
tc->thread_starts[i] = 0;
tc->sendcounts_thread[i] = 0;
}
tc->thread_queue_size = 0;
}
#endif
|
openmp2.c | #include <stdio.h>
#include <stdlib.h>
#include <omp.h>
#include <sys/sysconf.h>
int main (int argc, char *argv[])
{
int nthreads, tid;
printf("SC_NPROCESSORS_ONLN: %d\n", sysconf (_SC_NPROCESSORS_ONLN));
printf("SC_NPROCESSORS_CONF: %d\n", sysconf (_SC_NPROCESSORS_CONF));
#pragma omp parallel default(shared) private(nthreads, tid)
/* Fork a team of threads giving them their own copies of variables */
{
/* Obtain thread number */
tid = omp_get_thread_num();
printf("Hello World from thread = %d\n", tid);
/* Only master thread does this */
if (tid == 0)
{
nthreads = omp_get_num_threads();
printf("Number of threads = %d\n", nthreads);
}
} /* All threads join master thread and disband */
return 0;
}
|
3d25pt_var.c | /*
* Order-1, 3D 25 point stencil with axis-symmetric ariable coefficients
* Adapted from PLUTO and Pochoir test bench
*
* Tareq Malas
*/
#include <stdio.h>
#include <stdlib.h>
#include <sys/time.h>
#ifdef LIKWID_PERFMON
#include <likwid.h>
#endif
#include "print_utils.h"
#define TESTS 2
#define MAX(a,b) ((a) > (b) ? a : b)
#define MIN(a,b) ((a) < (b) ? a : b)
/* Subtract the `struct timeval' values X and Y,
* storing the result in RESULT.
*
* Return 1 if the difference is negative, otherwise 0.
*/
int timeval_subtract(struct timeval *result, struct timeval *x, struct timeval *y)
{
/* Perform the carry for the later subtraction by updating y. */
if (x->tv_usec < y->tv_usec)
{
int nsec = (y->tv_usec - x->tv_usec) / 1000000 + 1;
y->tv_usec -= 1000000 * nsec;
y->tv_sec += nsec;
}
if (x->tv_usec - y->tv_usec > 1000000)
{
int nsec = (x->tv_usec - y->tv_usec) / 1000000;
y->tv_usec += 1000000 * nsec;
y->tv_sec -= nsec;
}
/* Compute the time remaining to wait.
* tv_usec is certainly positive.
*/
result->tv_sec = x->tv_sec - y->tv_sec;
result->tv_usec = x->tv_usec - y->tv_usec;
/* Return 1 if result is negative. */
return x->tv_sec < y->tv_sec;
}
int main(int argc, char *argv[])
{
int t, i, j, k, m, test;
int Nx, Ny, Nz, Nt;
if (argc > 3) {
Nx = atoi(argv[1])+8;
Ny = atoi(argv[2])+8;
Nz = atoi(argv[3])+8;
}
if (argc > 4)
Nt = atoi(argv[4]);
// allocate the arrays
double ****A = (double ****) malloc(sizeof(double***)*2);
for(m=0; m<2;m++){
A[m] = (double ***) malloc(sizeof(double**)*Nz);
for(i=0; i<Nz; i++){
A[m][i] = (double**) malloc(sizeof(double*)*Ny);
for(j=0;j<Ny;j++){
A[m][i][j] = (double*) malloc(sizeof(double)*Nx);
}
}
}
double ****coef = (double ****) malloc(sizeof(double***)*13);
for(m=0; m<13;m++){
coef[m] = (double ***) malloc(sizeof(double**)*Nz);
for(i=0; i<Nz; i++){
coef[m][i] = (double**) malloc(sizeof(double*)*Ny);
for(j=0;j<Ny;j++){
coef[m][i][j] = (double*) malloc(sizeof(double)*Nx);
}
}
}
// tile size information, including extra element to decide the list length
int *tile_size = (int*) malloc(sizeof(int));
tile_size[0] = -1;
// The list is modified here before source-to-source transformations
tile_size = (int*) realloc((void *)tile_size, sizeof(int)*5);
tile_size[0] = 8;
tile_size[1] = 8;
tile_size[2] = 24;
tile_size[3] = 32;
tile_size[4] = -1;
// for timekeeping
int ts_return = -1;
struct timeval start, end, result;
double tdiff = 0.0, min_tdiff=1.e100;
const int BASE = 1024;
// initialize variables
//
srand(42);
for (i = 1; i < Nz; i++) {
for (j = 1; j < Ny; j++) {
for (k = 1; k < Nx; k++) {
A[0][i][j][k] = 1.0 * (rand() % BASE);
}
}
}
for (m=0; m<13; m++) {
for (i=1; i<Nz; i++) {
for (j=1; j<Ny; j++) {
for (k=1; k<Nx; k++) {
coef[m][i][j][k] = 1.0 * (rand() % BASE);
}
}
}
}
#ifdef LIKWID_PERFMON
LIKWID_MARKER_INIT;
#pragma omp parallel
{
LIKWID_MARKER_THREADINIT;
#pragma omp barrier
LIKWID_MARKER_START("calc");
}
#endif
int num_threads = 1;
#if defined(_OPENMP)
num_threads = omp_get_max_threads();
#endif
for(test=0; test<TESTS; test++){
gettimeofday(&start, 0);
// serial execution - Addition: 6 && Multiplication: 2
#pragma scop
for (t = 0; t < Nt; t++) {
for (i = 4; i < Nz-4; i++) {
for (j = 4; j < Ny-4; j++) {
for (k = 4; k < Nx-4; k++) {
A[(t+1)%2][i][j][k] =
coef[0][i][j][k] * A[(t)%2][i ][j ][k ] +
coef[1][i][j][k] * (A[(t)%2][i-1][j ][k ] + A[(t)%2][i+1][j ][k ]) +
coef[2][i][j][k] * (A[(t)%2][i ][j-1][k ] + A[(t)%2][i ][j+1][k ]) +
coef[3][i][j][k] * (A[(t)%2][i ][j ][k-1] + A[(t)%2][i ][j ][k+1]) +
coef[4][i][j][k] * (A[(t)%2][i-2][j ][k ] + A[(t)%2][i+2][j ][k ]) +
coef[5][i][j][k] * (A[(t)%2][i ][j-2][k ] + A[(t)%2][i ][j+2][k ]) +
coef[6][i][j][k] * (A[(t)%2][i ][j ][k-2] + A[(t)%2][i ][j ][k+2]) +
coef[7][i][j][k] * (A[(t)%2][i-3][j ][k ] + A[(t)%2][i+3][j ][k ]) +
coef[8][i][j][k] * (A[(t)%2][i ][j-3][k ] + A[(t)%2][i ][j+3][k ]) +
coef[9][i][j][k] * (A[(t)%2][i ][j ][k-3] + A[(t)%2][i ][j ][k+3]) +
coef[10][i][j][k]* (A[(t)%2][i-4][j ][k ] + A[(t)%2][i+4][j ][k ]) +
coef[11][i][j][k]* (A[(t)%2][i ][j-4][k ] + A[(t)%2][i ][j+4][k ]) +
coef[12][i][j][k]* (A[(t)%2][i ][j ][k-4] + A[(t)%2][i ][j ][k+4]) ;
}
}
}
}
#pragma endscop
gettimeofday(&end, 0);
ts_return = timeval_subtract(&result, &end, &start);
tdiff = (double) (result.tv_sec + result.tv_usec * 1.0e-6);
min_tdiff = min(min_tdiff, tdiff);
printf("Rank 0 TEST# %d time: %f\n", test, tdiff);
}
PRINT_RESULTS(4, "variable axis-symmetric")
#ifdef LIKWID_PERFMON
#pragma omp parallel
{
LIKWID_MARKER_STOP("calc");
}
LIKWID_MARKER_CLOSE;
#endif
// Free allocated arrays
for(i=0; i<Nz; i++){
for(j=0;j<Ny;j++){
free(A[0][i][j]);
free(A[1][i][j]);
}
free(A[0][i]);
free(A[1][i]);
}
free(A[0]);
free(A[1]);
for(m=0; m<13;m++){
for(i=0; i<Nz; i++){
for(j=0;j<Ny;j++){
free(coef[m][i][j]);
}
free(coef[m][i]);
}
free(coef[m]);
}
return 0;
}
|
simdtruedep-var-yes.c | /*
Copyright (c) 2017, Lawrence Livermore National Security, LLC.
Produced at the Lawrence Livermore National Laboratory
Written by Chunhua Liao, Pei-Hung Lin, Joshua Asplund,
Markus Schordan, and Ian Karlin
(email: liao6@llnl.gov, lin32@llnl.gov, asplund1@llnl.gov,
schordan1@llnl.gov, karlin1@llnl.gov)
LLNL-CODE-732144
All rights reserved.
This file is part of DataRaceBench. For details, see
https://github.com/LLNL/dataracebench. Please also see the LICENSE file
for our additional BSD notice.
Redistribution and use in source and binary forms, with
or without modification, are permitted provided that the following
conditions are met:
* Redistributions of source code must retain the above copyright
notice, this list of conditions and the disclaimer below.
* Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the disclaimer (as noted below)
in the documentation and/or other materials provided with the
distribution.
* Neither the name of the LLNS/LLNL nor the names of its contributors
may be used to endorse or promote products derived from this
software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND
CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES,
INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL LAWRENCE LIVERMORE NATIONAL
SECURITY, LLC, THE U.S. DEPARTMENT OF ENERGY OR CONTRIBUTORS BE
LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY,
OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
THE POSSIBILITY OF SUCH DAMAGE.
*/
// This one has race condition due to true dependence
#include <stdlib.h>
int main(int argc, char* argv[])
{
int i;
int len=100;
if (argc>1)
len = atoi(argv[1]);
int a[len], b[len];
for (i=0;i<len;i++)
{
a[i]=i;
b[i]=i+1;
}
#pragma omp simd
for (i=0;i<len-1;i++)
a[i+1]=a[i]*b[i];
return 0;
}
|
convolutiondepthwise_3x3_pack4.h | // Tencent is pleased to support the open source community by making ncnn available.
//
// Copyright (C) 2021 THL A29 Limited, a Tencent company. All rights reserved.
//
// Licensed under the BSD 3-Clause License (the "License"); you may not use this file except
// in compliance with the License. You may obtain a copy of the License at
//
// https://opensource.org/licenses/BSD-3-Clause
//
// Unless required by applicable law or agreed to in writing, software distributed
// under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
// CONDITIONS OF ANY KIND, either express or implied. See the License for the
// specific language governing permissions and limitations under the License.
static void convdw3x3s1_pack4_msa(const Mat& bottom_blob, Mat& top_blob, const Mat& kernel, const Mat& _bias, const Option& opt)
{
int w = bottom_blob.w;
int outw = top_blob.w;
int outh = top_blob.h;
const int group = bottom_blob.c;
const float* bias = _bias;
#pragma omp parallel for num_threads(opt.num_threads)
for (int g = 0; g < group; g++)
{
Mat out = top_blob.channel(g);
v4f32 _bias0 = bias ? (v4f32)__msa_ld_w(bias + g * 4, 0) : (v4f32)__msa_fill_w(0);
const float* k0 = kernel.row(g);
float* outptr0 = out.row(0);
float* outptr1 = out.row(1);
const Mat img0 = bottom_blob.channel(g);
const float* r0 = img0.row(0);
const float* r1 = img0.row(1);
const float* r2 = img0.row(2);
const float* r3 = img0.row(3);
v4f32 _k00 = (v4f32)__msa_ld_w(k0, 0);
v4f32 _k01 = (v4f32)__msa_ld_w(k0 + 4, 0);
v4f32 _k02 = (v4f32)__msa_ld_w(k0 + 4 * 2, 0);
v4f32 _k10 = (v4f32)__msa_ld_w(k0 + 4 * 3, 0);
v4f32 _k11 = (v4f32)__msa_ld_w(k0 + 4 * 4, 0);
v4f32 _k12 = (v4f32)__msa_ld_w(k0 + 4 * 5, 0);
v4f32 _k20 = (v4f32)__msa_ld_w(k0 + 4 * 6, 0);
v4f32 _k21 = (v4f32)__msa_ld_w(k0 + 4 * 7, 0);
v4f32 _k22 = (v4f32)__msa_ld_w(k0 + 4 * 8, 0);
int i = 0;
for (; i + 1 < outh; i += 2)
{
int j = 0;
for (; j + 1 < outw; j += 2)
{
__builtin_prefetch(r0 + 128);
__builtin_prefetch(r1 + 128);
__builtin_prefetch(r2 + 128);
__builtin_prefetch(r3 + 128);
v4f32 _sum00 = _bias0;
v4f32 _sum01 = _bias0;
v4f32 _sum10 = _bias0;
v4f32 _sum11 = _bias0;
v4f32 _r00 = (v4f32)__msa_ld_w(r0, 0);
v4f32 _r01 = (v4f32)__msa_ld_w(r0 + 4, 0);
v4f32 _r02 = (v4f32)__msa_ld_w(r0 + 4 * 2, 0);
v4f32 _r03 = (v4f32)__msa_ld_w(r0 + 4 * 3, 0);
_sum00 = __msa_fmadd_w(_sum00, _k00, _r00);
_sum00 = __msa_fmadd_w(_sum00, _k01, _r01);
_sum00 = __msa_fmadd_w(_sum00, _k02, _r02);
_sum01 = __msa_fmadd_w(_sum01, _k00, _r01);
_sum01 = __msa_fmadd_w(_sum01, _k01, _r02);
_sum01 = __msa_fmadd_w(_sum01, _k02, _r03);
v4f32 _r10 = (v4f32)__msa_ld_w(r1, 0);
v4f32 _r11 = (v4f32)__msa_ld_w(r1 + 4, 0);
v4f32 _r12 = (v4f32)__msa_ld_w(r1 + 4 * 2, 0);
v4f32 _r13 = (v4f32)__msa_ld_w(r1 + 4 * 3, 0);
_sum00 = __msa_fmadd_w(_sum00, _k10, _r10);
_sum00 = __msa_fmadd_w(_sum00, _k11, _r11);
_sum00 = __msa_fmadd_w(_sum00, _k12, _r12);
_sum01 = __msa_fmadd_w(_sum01, _k10, _r11);
_sum01 = __msa_fmadd_w(_sum01, _k11, _r12);
_sum01 = __msa_fmadd_w(_sum01, _k12, _r13);
_sum10 = __msa_fmadd_w(_sum10, _k00, _r10);
_sum10 = __msa_fmadd_w(_sum10, _k01, _r11);
_sum10 = __msa_fmadd_w(_sum10, _k02, _r12);
_sum11 = __msa_fmadd_w(_sum11, _k00, _r11);
_sum11 = __msa_fmadd_w(_sum11, _k01, _r12);
_sum11 = __msa_fmadd_w(_sum11, _k02, _r13);
v4f32 _r20 = (v4f32)__msa_ld_w(r2, 0);
v4f32 _r21 = (v4f32)__msa_ld_w(r2 + 4, 0);
v4f32 _r22 = (v4f32)__msa_ld_w(r2 + 4 * 2, 0);
v4f32 _r23 = (v4f32)__msa_ld_w(r2 + 4 * 3, 0);
_sum00 = __msa_fmadd_w(_sum00, _k20, _r20);
_sum00 = __msa_fmadd_w(_sum00, _k21, _r21);
_sum00 = __msa_fmadd_w(_sum00, _k22, _r22);
_sum01 = __msa_fmadd_w(_sum01, _k20, _r21);
_sum01 = __msa_fmadd_w(_sum01, _k21, _r22);
_sum01 = __msa_fmadd_w(_sum01, _k22, _r23);
_sum10 = __msa_fmadd_w(_sum10, _k10, _r20);
_sum10 = __msa_fmadd_w(_sum10, _k11, _r21);
_sum10 = __msa_fmadd_w(_sum10, _k12, _r22);
_sum11 = __msa_fmadd_w(_sum11, _k10, _r21);
_sum11 = __msa_fmadd_w(_sum11, _k11, _r22);
_sum11 = __msa_fmadd_w(_sum11, _k12, _r23);
v4f32 _r30 = (v4f32)__msa_ld_w(r3, 0);
v4f32 _r31 = (v4f32)__msa_ld_w(r3 + 4, 0);
v4f32 _r32 = (v4f32)__msa_ld_w(r3 + 4 * 2, 0);
v4f32 _r33 = (v4f32)__msa_ld_w(r3 + 4 * 3, 0);
_sum10 = __msa_fmadd_w(_sum10, _k20, _r30);
_sum10 = __msa_fmadd_w(_sum10, _k21, _r31);
_sum10 = __msa_fmadd_w(_sum10, _k22, _r32);
_sum11 = __msa_fmadd_w(_sum11, _k20, _r31);
_sum11 = __msa_fmadd_w(_sum11, _k21, _r32);
_sum11 = __msa_fmadd_w(_sum11, _k22, _r33);
__msa_st_w((v4i32)_sum00, outptr0, 0);
__msa_st_w((v4i32)_sum01, outptr0 + 4, 0);
__msa_st_w((v4i32)_sum10, outptr1, 0);
__msa_st_w((v4i32)_sum11, outptr1 + 4, 0);
outptr0 += 4 * 2;
outptr1 += 4 * 2;
r0 += 4 * 2;
r1 += 4 * 2;
r2 += 4 * 2;
r3 += 4 * 2;
}
for (; j < outw; j++)
{
__builtin_prefetch(r0 + 96);
__builtin_prefetch(r1 + 96);
__builtin_prefetch(r2 + 96);
__builtin_prefetch(r3 + 96);
v4f32 _sum0 = _bias0;
v4f32 _sum1 = _bias0;
v4f32 _r00 = (v4f32)__msa_ld_w(r0, 0);
v4f32 _r01 = (v4f32)__msa_ld_w(r0 + 4, 0);
v4f32 _r02 = (v4f32)__msa_ld_w(r0 + 4 * 2, 0);
_sum0 = __msa_fmadd_w(_sum0, _k00, _r00);
_sum0 = __msa_fmadd_w(_sum0, _k01, _r01);
_sum0 = __msa_fmadd_w(_sum0, _k02, _r02);
v4f32 _r10 = (v4f32)__msa_ld_w(r1, 0);
v4f32 _r11 = (v4f32)__msa_ld_w(r1 + 4, 0);
v4f32 _r12 = (v4f32)__msa_ld_w(r1 + 4 * 2, 0);
_sum0 = __msa_fmadd_w(_sum0, _k10, _r10);
_sum0 = __msa_fmadd_w(_sum0, _k11, _r11);
_sum0 = __msa_fmadd_w(_sum0, _k12, _r12);
_sum1 = __msa_fmadd_w(_sum1, _k00, _r10);
_sum1 = __msa_fmadd_w(_sum1, _k01, _r11);
_sum1 = __msa_fmadd_w(_sum1, _k02, _r12);
v4f32 _r20 = (v4f32)__msa_ld_w(r2, 0);
v4f32 _r21 = (v4f32)__msa_ld_w(r2 + 4, 0);
v4f32 _r22 = (v4f32)__msa_ld_w(r2 + 4 * 2, 0);
_sum0 = __msa_fmadd_w(_sum0, _k20, _r20);
_sum0 = __msa_fmadd_w(_sum0, _k21, _r21);
_sum0 = __msa_fmadd_w(_sum0, _k22, _r22);
_sum1 = __msa_fmadd_w(_sum1, _k10, _r20);
_sum1 = __msa_fmadd_w(_sum1, _k11, _r21);
_sum1 = __msa_fmadd_w(_sum1, _k12, _r22);
v4f32 _r30 = (v4f32)__msa_ld_w(r3, 0);
v4f32 _r31 = (v4f32)__msa_ld_w(r3 + 4, 0);
v4f32 _r32 = (v4f32)__msa_ld_w(r3 + 4 * 2, 0);
_sum1 = __msa_fmadd_w(_sum1, _k20, _r30);
_sum1 = __msa_fmadd_w(_sum1, _k21, _r31);
_sum1 = __msa_fmadd_w(_sum1, _k22, _r32);
__msa_st_w((v4i32)_sum0, outptr0, 0);
__msa_st_w((v4i32)_sum1, outptr1, 0);
outptr0 += 4;
outptr1 += 4;
r0 += 4;
r1 += 4;
r2 += 4;
r3 += 4;
}
r0 += 2 * 4 + w * 4;
r1 += 2 * 4 + w * 4;
r2 += 2 * 4 + w * 4;
r3 += 2 * 4 + w * 4;
outptr0 += outw * 4;
outptr1 += outw * 4;
}
for (; i < outh; i++)
{
int j = 0;
for (; j + 1 < outw; j += 2)
{
__builtin_prefetch(r0 + 128);
__builtin_prefetch(r1 + 128);
__builtin_prefetch(r2 + 128);
v4f32 _sum00 = _bias0;
v4f32 _sum01 = _bias0;
v4f32 _r00 = (v4f32)__msa_ld_w(r0, 0);
v4f32 _r01 = (v4f32)__msa_ld_w(r0 + 4, 0);
v4f32 _r02 = (v4f32)__msa_ld_w(r0 + 4 * 2, 0);
v4f32 _r03 = (v4f32)__msa_ld_w(r0 + 4 * 3, 0);
_sum00 = __msa_fmadd_w(_sum00, _k00, _r00);
_sum00 = __msa_fmadd_w(_sum00, _k01, _r01);
_sum00 = __msa_fmadd_w(_sum00, _k02, _r02);
_sum01 = __msa_fmadd_w(_sum01, _k00, _r01);
_sum01 = __msa_fmadd_w(_sum01, _k01, _r02);
_sum01 = __msa_fmadd_w(_sum01, _k02, _r03);
v4f32 _r10 = (v4f32)__msa_ld_w(r1, 0);
v4f32 _r11 = (v4f32)__msa_ld_w(r1 + 4, 0);
v4f32 _r12 = (v4f32)__msa_ld_w(r1 + 4 * 2, 0);
v4f32 _r13 = (v4f32)__msa_ld_w(r1 + 4 * 3, 0);
_sum00 = __msa_fmadd_w(_sum00, _k10, _r10);
_sum00 = __msa_fmadd_w(_sum00, _k11, _r11);
_sum00 = __msa_fmadd_w(_sum00, _k12, _r12);
_sum01 = __msa_fmadd_w(_sum01, _k10, _r11);
_sum01 = __msa_fmadd_w(_sum01, _k11, _r12);
_sum01 = __msa_fmadd_w(_sum01, _k12, _r13);
v4f32 _r20 = (v4f32)__msa_ld_w(r2, 0);
v4f32 _r21 = (v4f32)__msa_ld_w(r2 + 4, 0);
v4f32 _r22 = (v4f32)__msa_ld_w(r2 + 4 * 2, 0);
v4f32 _r23 = (v4f32)__msa_ld_w(r2 + 4 * 3, 0);
_sum00 = __msa_fmadd_w(_sum00, _k20, _r20);
_sum00 = __msa_fmadd_w(_sum00, _k21, _r21);
_sum00 = __msa_fmadd_w(_sum00, _k22, _r22);
_sum01 = __msa_fmadd_w(_sum01, _k20, _r21);
_sum01 = __msa_fmadd_w(_sum01, _k21, _r22);
_sum01 = __msa_fmadd_w(_sum01, _k22, _r23);
__msa_st_w((v4i32)_sum00, outptr0, 0);
__msa_st_w((v4i32)_sum01, outptr0 + 4, 0);
outptr0 += 4 * 2;
r0 += 4 * 2;
r1 += 4 * 2;
r2 += 4 * 2;
}
for (; j < outw; j++)
{
__builtin_prefetch(r0 + 96);
__builtin_prefetch(r1 + 96);
__builtin_prefetch(r2 + 96);
v4f32 _sum0 = _bias0;
v4f32 _r00 = (v4f32)__msa_ld_w(r0, 0);
v4f32 _r01 = (v4f32)__msa_ld_w(r0 + 4, 0);
v4f32 _r02 = (v4f32)__msa_ld_w(r0 + 4 * 2, 0);
_sum0 = __msa_fmadd_w(_sum0, _k00, _r00);
_sum0 = __msa_fmadd_w(_sum0, _k01, _r01);
_sum0 = __msa_fmadd_w(_sum0, _k02, _r02);
v4f32 _r10 = (v4f32)__msa_ld_w(r1, 0);
v4f32 _r11 = (v4f32)__msa_ld_w(r1 + 4, 0);
v4f32 _r12 = (v4f32)__msa_ld_w(r1 + 4 * 2, 0);
_sum0 = __msa_fmadd_w(_sum0, _k10, _r10);
_sum0 = __msa_fmadd_w(_sum0, _k11, _r11);
_sum0 = __msa_fmadd_w(_sum0, _k12, _r12);
v4f32 _r20 = (v4f32)__msa_ld_w(r2, 0);
v4f32 _r21 = (v4f32)__msa_ld_w(r2 + 4, 0);
v4f32 _r22 = (v4f32)__msa_ld_w(r2 + 4 * 2, 0);
_sum0 = __msa_fmadd_w(_sum0, _k20, _r20);
_sum0 = __msa_fmadd_w(_sum0, _k21, _r21);
_sum0 = __msa_fmadd_w(_sum0, _k22, _r22);
__msa_st_w((v4i32)_sum0, outptr0, 0);
outptr0 += 4;
r0 += 4;
r1 += 4;
r2 += 4;
}
r0 += 2 * 4;
r1 += 2 * 4;
r2 += 2 * 4;
}
}
}
static void convdw3x3s2_pack4_msa(const Mat& bottom_blob, Mat& top_blob, const Mat& kernel, const Mat& _bias, const Option& opt)
{
int w = bottom_blob.w;
int outw = top_blob.w;
int outh = top_blob.h;
const int group = bottom_blob.c;
const int tailstep = (w - 2 * outw + w) * 4;
const float* bias = _bias;
#pragma omp parallel for num_threads(opt.num_threads)
for (int g = 0; g < group; g++)
{
Mat out = top_blob.channel(g);
v4f32 _bias0 = bias ? (v4f32)__msa_ld_w(bias + g * 4, 0) : (v4f32)__msa_fill_w(0);
const float* k0 = kernel.row(g);
float* outptr0 = out;
const Mat img0 = bottom_blob.channel(g);
const float* r0 = img0.row(0);
const float* r1 = img0.row(1);
const float* r2 = img0.row(2);
v4f32 _k00 = (v4f32)__msa_ld_w(k0, 0);
v4f32 _k01 = (v4f32)__msa_ld_w(k0 + 4, 0);
v4f32 _k02 = (v4f32)__msa_ld_w(k0 + 4 * 2, 0);
v4f32 _k10 = (v4f32)__msa_ld_w(k0 + 4 * 3, 0);
v4f32 _k11 = (v4f32)__msa_ld_w(k0 + 4 * 4, 0);
v4f32 _k12 = (v4f32)__msa_ld_w(k0 + 4 * 5, 0);
v4f32 _k20 = (v4f32)__msa_ld_w(k0 + 4 * 6, 0);
v4f32 _k21 = (v4f32)__msa_ld_w(k0 + 4 * 7, 0);
v4f32 _k22 = (v4f32)__msa_ld_w(k0 + 4 * 8, 0);
int i = 0;
for (; i < outh; i++)
{
int j = 0;
for (; j + 1 < outw; j += 2)
{
__builtin_prefetch(r0 + 160);
__builtin_prefetch(r1 + 160);
__builtin_prefetch(r2 + 160);
v4f32 _sum00 = _bias0;
v4f32 _sum01 = _bias0;
v4f32 _r00 = (v4f32)__msa_ld_w(r0, 0);
v4f32 _r01 = (v4f32)__msa_ld_w(r0 + 4, 0);
v4f32 _r02 = (v4f32)__msa_ld_w(r0 + 4 * 2, 0);
v4f32 _r03 = (v4f32)__msa_ld_w(r0 + 4 * 3, 0);
v4f32 _r04 = (v4f32)__msa_ld_w(r0 + 4 * 4, 0);
_sum00 = __msa_fmadd_w(_sum00, _k00, _r00);
_sum00 = __msa_fmadd_w(_sum00, _k01, _r01);
_sum00 = __msa_fmadd_w(_sum00, _k02, _r02);
_sum01 = __msa_fmadd_w(_sum01, _k00, _r02);
_sum01 = __msa_fmadd_w(_sum01, _k01, _r03);
_sum01 = __msa_fmadd_w(_sum01, _k02, _r04);
v4f32 _r10 = (v4f32)__msa_ld_w(r1, 0);
v4f32 _r11 = (v4f32)__msa_ld_w(r1 + 4, 0);
v4f32 _r12 = (v4f32)__msa_ld_w(r1 + 4 * 2, 0);
v4f32 _r13 = (v4f32)__msa_ld_w(r1 + 4 * 3, 0);
v4f32 _r14 = (v4f32)__msa_ld_w(r1 + 4 * 4, 0);
_sum00 = __msa_fmadd_w(_sum00, _k10, _r10);
_sum00 = __msa_fmadd_w(_sum00, _k11, _r11);
_sum00 = __msa_fmadd_w(_sum00, _k12, _r12);
_sum01 = __msa_fmadd_w(_sum01, _k10, _r12);
_sum01 = __msa_fmadd_w(_sum01, _k11, _r13);
_sum01 = __msa_fmadd_w(_sum01, _k12, _r14);
v4f32 _r20 = (v4f32)__msa_ld_w(r2, 0);
v4f32 _r21 = (v4f32)__msa_ld_w(r2 + 4, 0);
v4f32 _r22 = (v4f32)__msa_ld_w(r2 + 4 * 2, 0);
v4f32 _r23 = (v4f32)__msa_ld_w(r2 + 4 * 3, 0);
v4f32 _r24 = (v4f32)__msa_ld_w(r2 + 4 * 4, 0);
_sum00 = __msa_fmadd_w(_sum00, _k20, _r20);
_sum00 = __msa_fmadd_w(_sum00, _k21, _r21);
_sum00 = __msa_fmadd_w(_sum00, _k22, _r22);
_sum01 = __msa_fmadd_w(_sum01, _k20, _r22);
_sum01 = __msa_fmadd_w(_sum01, _k21, _r23);
_sum01 = __msa_fmadd_w(_sum01, _k22, _r24);
__msa_st_w((v4i32)_sum00, outptr0, 0);
__msa_st_w((v4i32)_sum01, outptr0 + 4, 0);
outptr0 += 4 * 2;
r0 += 4 * 4;
r1 += 4 * 4;
r2 += 4 * 4;
}
for (; j < outw; j++)
{
__builtin_prefetch(r0 + 96);
__builtin_prefetch(r1 + 96);
__builtin_prefetch(r2 + 96);
v4f32 _sum0 = _bias0;
v4f32 _r00 = (v4f32)__msa_ld_w(r0, 0);
v4f32 _r01 = (v4f32)__msa_ld_w(r0 + 4, 0);
v4f32 _r02 = (v4f32)__msa_ld_w(r0 + 4 * 2, 0);
_sum0 = __msa_fmadd_w(_sum0, _k00, _r00);
_sum0 = __msa_fmadd_w(_sum0, _k01, _r01);
_sum0 = __msa_fmadd_w(_sum0, _k02, _r02);
v4f32 _r10 = (v4f32)__msa_ld_w(r1, 0);
v4f32 _r11 = (v4f32)__msa_ld_w(r1 + 4, 0);
v4f32 _r12 = (v4f32)__msa_ld_w(r1 + 4 * 2, 0);
_sum0 = __msa_fmadd_w(_sum0, _k10, _r10);
_sum0 = __msa_fmadd_w(_sum0, _k11, _r11);
_sum0 = __msa_fmadd_w(_sum0, _k12, _r12);
v4f32 _r20 = (v4f32)__msa_ld_w(r2, 0);
v4f32 _r21 = (v4f32)__msa_ld_w(r2 + 4, 0);
v4f32 _r22 = (v4f32)__msa_ld_w(r2 + 4 * 2, 0);
_sum0 = __msa_fmadd_w(_sum0, _k20, _r20);
_sum0 = __msa_fmadd_w(_sum0, _k21, _r21);
_sum0 = __msa_fmadd_w(_sum0, _k22, _r22);
__msa_st_w((v4i32)_sum0, outptr0, 0);
outptr0 += 4;
r0 += 4 * 2;
r1 += 4 * 2;
r2 += 4 * 2;
}
r0 += tailstep;
r1 += tailstep;
r2 += tailstep;
}
}
}
|
run-on-cpu.c | /*
* Copyright 2016 Vanya Yaneva, The University of Edinburgh
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "../kernel-gen/cpu-gen.h"
#include "../kernel-gen/structs.h"
#include "../utils/options.h"
#include "../utils/read-test-cases.h"
#include "../utils/timing.h"
#include "../utils/utils.h"
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
int run_main(struct partecl_input input, struct partecl_result *result);
void run_on_cpu(struct partecl_input input, struct partecl_result *result) {
result->test_case_num = input.test_case_num;
run_main(input, result);
}
int main(int argc, char **argv) {
int do_print_results = HANDLE_RESULTS;
int num_runs = NUM_RUNS;
int do_time = DO_TIME;
int num_test_cases = 1;
if (read_options(argc, argv, &num_test_cases, &do_print_results, &do_time,
&num_runs, NULL, NULL, NULL) == FAIL)
return 0;
printf("Device: CPU.\n");
printf("Number of test cases %d.\n", num_test_cases);
#pragma omp parallel default(none)
{
int tid = omp_get_thread_num();
if (tid == 0)
printf("Number of threads = %d\n", omp_get_num_threads());
}
if (do_time)
printf("Time in ms\n");
struct partecl_input *inputs;
size_t inputs_size = sizeof(struct partecl_input) * num_test_cases;
inputs = (struct partecl_input *)malloc(inputs_size);
struct partecl_result *results;
size_t results_size = sizeof(struct partecl_result) * num_test_cases;
results = (struct partecl_result *)malloc(results_size);
// read the test cases
if (read_test_cases(inputs, num_test_cases) == FAIL)
return 0;
for (int i = 0; i < num_runs; i++) {
struct timespec time1, time2;
get_timestamp(&time1);
#pragma omp parallel for default(none) shared(num_test_cases, inputs, results) \
schedule(static)
for (int j = 0; j < num_test_cases; j++) {
run_on_cpu(inputs[j], &results[j]);
}
get_timestamp(&time2);
double time_in_secs = timestamp_diff_in_seconds(time1, time2);
double time_cpu = time_in_secs * 1000;
if (do_time)
printf("%f \n", time_cpu);
}
if (do_print_results)
compare_results(results, NULL, num_test_cases);
free(inputs);
free(results);
}
|
smg_residual_unrolled.c | /*BHEADER**********************************************************************
* Copyright (c) 2008, Lawrence Livermore National Security, LLC.
* Produced at the Lawrence Livermore National Laboratory.
* This file is part of HYPRE. See file COPYRIGHT for details.
*
* HYPRE is free software; you can redistribute it and/or modify it under the
* terms of the GNU Lesser General Public License (as published by the Free
* Software Foundation) version 2.1 dated February 1999.
*
* $Revision$
***********************************************************************EHEADER*/
#include "_hypre_struct_ls.h"
/*--------------------------------------------------------------------------
* hypre_SMGResidualData data structure
*--------------------------------------------------------------------------*/
typedef struct
{
hypre_Index base_index;
hypre_Index base_stride;
hypre_StructMatrix *A;
hypre_StructVector *x;
hypre_StructVector *b;
hypre_StructVector *r;
hypre_BoxArray *base_points;
hypre_ComputePkg *compute_pkg;
HYPRE_Int time_index;
HYPRE_Int flops;
} hypre_SMGResidualData;
/*--------------------------------------------------------------------------
* hypre_SMGResidualCreate
*--------------------------------------------------------------------------*/
void *
hypre_SMGResidualCreate( )
{
hypre_SMGResidualData *residual_data;
residual_data = hypre_CTAlloc(hypre_SMGResidualData, 1);
(residual_data -> time_index) = hypre_InitializeTiming("SMGResidual");
/* set defaults */
hypre_SetIndex3((residual_data -> base_index), 0, 0, 0);
hypre_SetIndex3((residual_data -> base_stride), 1, 1, 1);
return (void *) residual_data;
}
/*--------------------------------------------------------------------------
* hypre_SMGResidualSetup
*--------------------------------------------------------------------------*/
HYPRE_Int
hypre_SMGResidualSetup( void *residual_vdata,
hypre_StructMatrix *A,
hypre_StructVector *x,
hypre_StructVector *b,
hypre_StructVector *r )
{
HYPRE_Int ierr;
hypre_SMGResidualData *residual_data = residual_vdata;
hypre_IndexRef base_index = (residual_data -> base_index);
hypre_IndexRef base_stride = (residual_data -> base_stride);
hypre_Index unit_stride;
hypre_StructGrid *grid;
hypre_StructStencil *stencil;
hypre_BoxArray *base_points;
hypre_ComputeInfo *compute_info;
hypre_ComputePkg *compute_pkg;
/*----------------------------------------------------------
* Set up base points and the compute package
*----------------------------------------------------------*/
grid = hypre_StructMatrixGrid(A);
stencil = hypre_StructMatrixStencil(A);
hypre_SetIndex3(unit_stride, 1, 1, 1);
base_points = hypre_BoxArrayDuplicate(hypre_StructGridBoxes(grid));
hypre_ProjectBoxArray(base_points, base_index, base_stride);
hypre_CreateComputeInfo(grid, stencil, &compute_info);
hypre_ComputeInfoProjectComp(compute_info, base_index, base_stride);
hypre_ComputePkgCreate(compute_info, hypre_StructVectorDataSpace(x), 1,
grid, &compute_pkg);
/*----------------------------------------------------------
* Set up the residual data structure
*----------------------------------------------------------*/
(residual_data -> A) = hypre_StructMatrixRef(A);
(residual_data -> x) = hypre_StructVectorRef(x);
(residual_data -> b) = hypre_StructVectorRef(b);
(residual_data -> r) = hypre_StructVectorRef(r);
(residual_data -> base_points) = base_points;
(residual_data -> compute_pkg) = compute_pkg;
/*-----------------------------------------------------
* Compute flops
*-----------------------------------------------------*/
(residual_data -> flops) =
(hypre_StructMatrixGlobalSize(A) + hypre_StructVectorGlobalSize(x)) /
(hypre_IndexX(base_stride) *
hypre_IndexY(base_stride) *
hypre_IndexZ(base_stride) );
return ierr;
}
/*--------------------------------------------------------------------------
* hypre_SMGResidual
*--------------------------------------------------------------------------*/
HYPRE_Int
hypre_SMGResidual( void *residual_vdata,
hypre_StructMatrix *A,
hypre_StructVector *x,
hypre_StructVector *b,
hypre_StructVector *r )
{
HYPRE_Int ierr;
hypre_SMGResidualData *residual_data = residual_vdata;
hypre_IndexRef base_stride = (residual_data -> base_stride);
hypre_BoxArray *base_points = (residual_data -> base_points);
hypre_ComputePkg *compute_pkg = (residual_data -> compute_pkg);
hypre_CommHandle *comm_handle;
hypre_BoxArrayArray *compute_box_aa;
hypre_BoxArray *compute_box_a;
hypre_Box *compute_box;
hypre_Box *A_data_box;
hypre_Box *x_data_box;
hypre_Box *b_data_box;
hypre_Box *r_data_box;
HYPRE_Int Ai;
HYPRE_Int xi;
HYPRE_Int bi;
HYPRE_Int ri;
HYPRE_Real *Ap0;
HYPRE_Real *xp0;
HYPRE_Real *bp;
HYPRE_Real *rp;
hypre_Index loop_size;
hypre_IndexRef start;
hypre_StructStencil *stencil;
hypre_Index *stencil_shape;
HYPRE_Int stencil_size;
HYPRE_Int compute_i, i, j, si;
HYPRE_Real *Ap1, *Ap2;
HYPRE_Real *Ap3, *Ap4;
HYPRE_Real *Ap5, *Ap6;
HYPRE_Real *Ap7, *Ap8, *Ap9;
HYPRE_Real *Ap10, *Ap11, *Ap12, *Ap13, *Ap14;
HYPRE_Real *Ap15, *Ap16, *Ap17, *Ap18;
HYPRE_Real *Ap19, *Ap20, *Ap21, *Ap22, *Ap23, *Ap24, *Ap25, *Ap26;
HYPRE_Real *xp1, *xp2;
HYPRE_Real *xp3, *xp4;
HYPRE_Real *xp5, *xp6;
HYPRE_Real *xp7, *xp8, *xp9;
HYPRE_Real *xp10, *xp11, *xp12, *xp13, *xp14;
HYPRE_Real *xp15, *xp16, *xp17, *xp18;
HYPRE_Real *xp19, *xp20, *xp21, *xp22, *xp23, *xp24, *xp25, *xp26;
hypre_BeginTiming(residual_data -> time_index);
/*-----------------------------------------------------------------------
* Compute residual r = b - Ax
*-----------------------------------------------------------------------*/
stencil = hypre_StructMatrixStencil(A);
stencil_shape = hypre_StructStencilShape(stencil);
stencil_size = hypre_StructStencilSize(stencil);
for (compute_i = 0; compute_i < 2; compute_i++)
{
switch(compute_i)
{
case 0:
{
xp0 = hypre_StructVectorData(x);
hypre_InitializeIndtComputations(compute_pkg, xp0, &comm_handle);
compute_box_aa = hypre_ComputePkgIndtBoxes(compute_pkg);
/*----------------------------------------
* Copy b into r
*----------------------------------------*/
compute_box_a = base_points;
hypre_ForBoxI(i, compute_box_a)
{
compute_box = hypre_BoxArrayBox(compute_box_a, i);
start = hypre_BoxIMin(compute_box);
b_data_box =
hypre_BoxArrayBox(hypre_StructVectorDataSpace(b), i);
r_data_box =
hypre_BoxArrayBox(hypre_StructVectorDataSpace(r), i);
bp = hypre_StructVectorBoxData(b, i);
rp = hypre_StructVectorBoxData(r, i);
hypre_BoxGetStrideSize(compute_box, base_stride, loop_size);
hypre_BoxLoop2Begin(hypre_StructMatrixNDim(A), loop_size,
b_data_box, start, base_stride, bi,
r_data_box, start, base_stride, ri);
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel for private(HYPRE_BOX_PRIVATE,bi,ri) HYPRE_SMP_SCHEDULE
#endif
hypre_BoxLoop2For(bi, ri)
{
rp[ri] = bp[bi];
}
hypre_BoxLoop2End(bi, ri);
}
}
break;
case 1:
{
hypre_FinalizeIndtComputations(comm_handle);
compute_box_aa = hypre_ComputePkgDeptBoxes(compute_pkg);
}
break;
}
/*--------------------------------------------------------------------
* Compute r -= A*x
*--------------------------------------------------------------------*/
hypre_ForBoxArrayI(i, compute_box_aa)
{
compute_box_a = hypre_BoxArrayArrayBoxArray(compute_box_aa, i);
A_data_box = hypre_BoxArrayBox(hypre_StructMatrixDataSpace(A), i);
x_data_box = hypre_BoxArrayBox(hypre_StructVectorDataSpace(x), i);
r_data_box = hypre_BoxArrayBox(hypre_StructVectorDataSpace(r), i);
rp = hypre_StructVectorBoxData(r, i);
/*--------------------------------------------------------------
* Switch statement to direct control (based on stencil size) to
* code to get pointers and offsets fo A and x.
*--------------------------------------------------------------*/
switch (stencil_size)
{
case 1:
Ap0 = hypre_StructMatrixBoxData(A, i, 0);
xp0 = hypre_StructVectorBoxData(x, i) +
hypre_BoxOffsetDistance(x_data_box, stencil_shape[0]);
break;
case 3:
Ap0 = hypre_StructMatrixBoxData(A, i, 0);
Ap1 = hypre_StructMatrixBoxData(A, i, 1);
Ap2 = hypre_StructMatrixBoxData(A, i, 2);
xp0 = hypre_StructVectorBoxData(x, i) +
hypre_BoxOffsetDistance(x_data_box, stencil_shape[0]);
xp1 = hypre_StructVectorBoxData(x, i) +
hypre_BoxOffsetDistance(x_data_box, stencil_shape[1]);
xp2 = hypre_StructVectorBoxData(x, i) +
hypre_BoxOffsetDistance(x_data_box, stencil_shape[2]);
break;
case 5:
Ap0 = hypre_StructMatrixBoxData(A, i, 0);
Ap1 = hypre_StructMatrixBoxData(A, i, 1);
Ap2 = hypre_StructMatrixBoxData(A, i, 2);
Ap3 = hypre_StructMatrixBoxData(A, i, 3);
Ap4 = hypre_StructMatrixBoxData(A, i, 4);
xp0 = hypre_StructVectorBoxData(x, i) +
hypre_BoxOffsetDistance(x_data_box, stencil_shape[0]);
xp1 = hypre_StructVectorBoxData(x, i) +
hypre_BoxOffsetDistance(x_data_box, stencil_shape[1]);
xp2 = hypre_StructVectorBoxData(x, i) +
hypre_BoxOffsetDistance(x_data_box, stencil_shape[2]);
xp3 = hypre_StructVectorBoxData(x, i) +
hypre_BoxOffsetDistance(x_data_box, stencil_shape[3]);
xp4 = hypre_StructVectorBoxData(x, i) +
hypre_BoxOffsetDistance(x_data_box, stencil_shape[4]);
break;
case 7:
Ap0 = hypre_StructMatrixBoxData(A, i, 0);
Ap1 = hypre_StructMatrixBoxData(A, i, 1);
Ap2 = hypre_StructMatrixBoxData(A, i, 2);
Ap3 = hypre_StructMatrixBoxData(A, i, 3);
Ap4 = hypre_StructMatrixBoxData(A, i, 4);
Ap5 = hypre_StructMatrixBoxData(A, i, 5);
Ap6 = hypre_StructMatrixBoxData(A, i, 6);
xp0 = hypre_StructVectorBoxData(x, i) +
hypre_BoxOffsetDistance(x_data_box, stencil_shape[0]);
xp1 = hypre_StructVectorBoxData(x, i) +
hypre_BoxOffsetDistance(x_data_box, stencil_shape[1]);
xp2 = hypre_StructVectorBoxData(x, i) +
hypre_BoxOffsetDistance(x_data_box, stencil_shape[2]);
xp3 = hypre_StructVectorBoxData(x, i) +
hypre_BoxOffsetDistance(x_data_box, stencil_shape[3]);
xp4 = hypre_StructVectorBoxData(x, i) +
hypre_BoxOffsetDistance(x_data_box, stencil_shape[4]);
xp5 = hypre_StructVectorBoxData(x, i) +
hypre_BoxOffsetDistance(x_data_box, stencil_shape[5]);
xp6 = hypre_StructVectorBoxData(x, i) +
hypre_BoxOffsetDistance(x_data_box, stencil_shape[6]);
break;
case 9:
Ap0 = hypre_StructMatrixBoxData(A, i, 0);
Ap1 = hypre_StructMatrixBoxData(A, i, 1);
Ap2 = hypre_StructMatrixBoxData(A, i, 2);
Ap3 = hypre_StructMatrixBoxData(A, i, 3);
Ap4 = hypre_StructMatrixBoxData(A, i, 4);
Ap5 = hypre_StructMatrixBoxData(A, i, 5);
Ap6 = hypre_StructMatrixBoxData(A, i, 6);
Ap7 = hypre_StructMatrixBoxData(A, i, 7);
Ap8 = hypre_StructMatrixBoxData(A, i, 8);
xp0 = hypre_StructVectorBoxData(x, i) +
hypre_BoxOffsetDistance(x_data_box, stencil_shape[0]);
xp1 = hypre_StructVectorBoxData(x, i) +
hypre_BoxOffsetDistance(x_data_box, stencil_shape[1]);
xp2 = hypre_StructVectorBoxData(x, i) +
hypre_BoxOffsetDistance(x_data_box, stencil_shape[2]);
xp3 = hypre_StructVectorBoxData(x, i) +
hypre_BoxOffsetDistance(x_data_box, stencil_shape[3]);
xp4 = hypre_StructVectorBoxData(x, i) +
hypre_BoxOffsetDistance(x_data_box, stencil_shape[4]);
xp5 = hypre_StructVectorBoxData(x, i) +
hypre_BoxOffsetDistance(x_data_box, stencil_shape[5]);
xp6 = hypre_StructVectorBoxData(x, i) +
hypre_BoxOffsetDistance(x_data_box, stencil_shape[6]);
xp7 = hypre_StructVectorBoxData(x, i) +
hypre_BoxOffsetDistance(x_data_box, stencil_shape[7]);
xp8 = hypre_StructVectorBoxData(x, i) +
hypre_BoxOffsetDistance(x_data_box, stencil_shape[8]);
break;
case 15:
Ap0 = hypre_StructMatrixBoxData(A, i, 0);
Ap1 = hypre_StructMatrixBoxData(A, i, 1);
Ap2 = hypre_StructMatrixBoxData(A, i, 2);
Ap3 = hypre_StructMatrixBoxData(A, i, 3);
Ap4 = hypre_StructMatrixBoxData(A, i, 4);
Ap5 = hypre_StructMatrixBoxData(A, i, 5);
Ap6 = hypre_StructMatrixBoxData(A, i, 6);
Ap7 = hypre_StructMatrixBoxData(A, i, 7);
Ap8 = hypre_StructMatrixBoxData(A, i, 8);
Ap9 = hypre_StructMatrixBoxData(A, i, 9);
Ap10 = hypre_StructMatrixBoxData(A, i, 10);
Ap11 = hypre_StructMatrixBoxData(A, i, 11);
Ap12 = hypre_StructMatrixBoxData(A, i, 12);
Ap13 = hypre_StructMatrixBoxData(A, i, 13);
Ap14 = hypre_StructMatrixBoxData(A, i, 14);
xp0 = hypre_StructVectorBoxData(x, i) +
hypre_BoxOffsetDistance(x_data_box, stencil_shape[0]);
xp1 = hypre_StructVectorBoxData(x, i) +
hypre_BoxOffsetDistance(x_data_box, stencil_shape[1]);
xp2 = hypre_StructVectorBoxData(x, i) +
hypre_BoxOffsetDistance(x_data_box, stencil_shape[2]);
xp3 = hypre_StructVectorBoxData(x, i) +
hypre_BoxOffsetDistance(x_data_box, stencil_shape[3]);
xp4 = hypre_StructVectorBoxData(x, i) +
hypre_BoxOffsetDistance(x_data_box, stencil_shape[4]);
xp5 = hypre_StructVectorBoxData(x, i) +
hypre_BoxOffsetDistance(x_data_box, stencil_shape[5]);
xp6 = hypre_StructVectorBoxData(x, i) +
hypre_BoxOffsetDistance(x_data_box, stencil_shape[6]);
xp7 = hypre_StructVectorBoxData(x, i) +
hypre_BoxOffsetDistance(x_data_box, stencil_shape[7]);
xp8 = hypre_StructVectorBoxData(x, i) +
hypre_BoxOffsetDistance(x_data_box, stencil_shape[8]);
xp9 = hypre_StructVectorBoxData(x, i) +
hypre_BoxOffsetDistance(x_data_box, stencil_shape[9]);
xp10 = hypre_StructVectorBoxData(x, i) +
hypre_BoxOffsetDistance(x_data_box, stencil_shape[10]);
xp11 = hypre_StructVectorBoxData(x, i) +
hypre_BoxOffsetDistance(x_data_box, stencil_shape[11]);
xp12 = hypre_StructVectorBoxData(x, i) +
hypre_BoxOffsetDistance(x_data_box, stencil_shape[12]);
xp13 = hypre_StructVectorBoxData(x, i) +
hypre_BoxOffsetDistance(x_data_box, stencil_shape[13]);
xp14 = hypre_StructVectorBoxData(x, i) +
hypre_BoxOffsetDistance(x_data_box, stencil_shape[14]);
break;
case 19:
Ap0 = hypre_StructMatrixBoxData(A, i, 0);
Ap1 = hypre_StructMatrixBoxData(A, i, 1);
Ap2 = hypre_StructMatrixBoxData(A, i, 2);
Ap3 = hypre_StructMatrixBoxData(A, i, 3);
Ap4 = hypre_StructMatrixBoxData(A, i, 4);
Ap5 = hypre_StructMatrixBoxData(A, i, 5);
Ap6 = hypre_StructMatrixBoxData(A, i, 6);
Ap7 = hypre_StructMatrixBoxData(A, i, 7);
Ap8 = hypre_StructMatrixBoxData(A, i, 8);
Ap9 = hypre_StructMatrixBoxData(A, i, 9);
Ap10 = hypre_StructMatrixBoxData(A, i, 10);
Ap11 = hypre_StructMatrixBoxData(A, i, 11);
Ap12 = hypre_StructMatrixBoxData(A, i, 12);
Ap13 = hypre_StructMatrixBoxData(A, i, 13);
Ap14 = hypre_StructMatrixBoxData(A, i, 14);
Ap15 = hypre_StructMatrixBoxData(A, i, 15);
Ap16 = hypre_StructMatrixBoxData(A, i, 16);
Ap17 = hypre_StructMatrixBoxData(A, i, 17);
Ap18 = hypre_StructMatrixBoxData(A, i, 18);
xp0 = hypre_StructVectorBoxData(x, i) +
hypre_BoxOffsetDistance(x_data_box, stencil_shape[0]);
xp1 = hypre_StructVectorBoxData(x, i) +
hypre_BoxOffsetDistance(x_data_box, stencil_shape[1]);
xp2 = hypre_StructVectorBoxData(x, i) +
hypre_BoxOffsetDistance(x_data_box, stencil_shape[2]);
xp3 = hypre_StructVectorBoxData(x, i) +
hypre_BoxOffsetDistance(x_data_box, stencil_shape[3]);
xp4 = hypre_StructVectorBoxData(x, i) +
hypre_BoxOffsetDistance(x_data_box, stencil_shape[4]);
xp5 = hypre_StructVectorBoxData(x, i) +
hypre_BoxOffsetDistance(x_data_box, stencil_shape[5]);
xp6 = hypre_StructVectorBoxData(x, i) +
hypre_BoxOffsetDistance(x_data_box, stencil_shape[6]);
xp7 = hypre_StructVectorBoxData(x, i) +
hypre_BoxOffsetDistance(x_data_box, stencil_shape[7]);
xp8 = hypre_StructVectorBoxData(x, i) +
hypre_BoxOffsetDistance(x_data_box, stencil_shape[8]);
xp9 = hypre_StructVectorBoxData(x, i) +
hypre_BoxOffsetDistance(x_data_box, stencil_shape[9]);
xp10 = hypre_StructVectorBoxData(x, i) +
hypre_BoxOffsetDistance(x_data_box, stencil_shape[10]);
xp11 = hypre_StructVectorBoxData(x, i) +
hypre_BoxOffsetDistance(x_data_box, stencil_shape[11]);
xp12 = hypre_StructVectorBoxData(x, i) +
hypre_BoxOffsetDistance(x_data_box, stencil_shape[12]);
xp13 = hypre_StructVectorBoxData(x, i) +
hypre_BoxOffsetDistance(x_data_box, stencil_shape[13]);
xp14 = hypre_StructVectorBoxData(x, i) +
hypre_BoxOffsetDistance(x_data_box, stencil_shape[14]);
xp15 = hypre_StructVectorBoxData(x, i) +
hypre_BoxOffsetDistance(x_data_box, stencil_shape[15]);
xp16 = hypre_StructVectorBoxData(x, i) +
hypre_BoxOffsetDistance(x_data_box, stencil_shape[16]);
xp17 = hypre_StructVectorBoxData(x, i) +
hypre_BoxOffsetDistance(x_data_box, stencil_shape[17]);
xp18 = hypre_StructVectorBoxData(x, i) +
hypre_BoxOffsetDistance(x_data_box, stencil_shape[18]);
break;
case 27:
Ap0 = hypre_StructMatrixBoxData(A, i, 0);
Ap1 = hypre_StructMatrixBoxData(A, i, 1);
Ap2 = hypre_StructMatrixBoxData(A, i, 2);
Ap3 = hypre_StructMatrixBoxData(A, i, 3);
Ap4 = hypre_StructMatrixBoxData(A, i, 4);
Ap5 = hypre_StructMatrixBoxData(A, i, 5);
Ap6 = hypre_StructMatrixBoxData(A, i, 6);
Ap7 = hypre_StructMatrixBoxData(A, i, 7);
Ap8 = hypre_StructMatrixBoxData(A, i, 8);
Ap9 = hypre_StructMatrixBoxData(A, i, 9);
Ap10 = hypre_StructMatrixBoxData(A, i, 10);
Ap11 = hypre_StructMatrixBoxData(A, i, 11);
Ap12 = hypre_StructMatrixBoxData(A, i, 12);
Ap13 = hypre_StructMatrixBoxData(A, i, 13);
Ap14 = hypre_StructMatrixBoxData(A, i, 14);
Ap15 = hypre_StructMatrixBoxData(A, i, 15);
Ap16 = hypre_StructMatrixBoxData(A, i, 16);
Ap17 = hypre_StructMatrixBoxData(A, i, 17);
Ap18 = hypre_StructMatrixBoxData(A, i, 18);
Ap19 = hypre_StructMatrixBoxData(A, i, 19);
Ap20 = hypre_StructMatrixBoxData(A, i, 20);
Ap21 = hypre_StructMatrixBoxData(A, i, 21);
Ap22 = hypre_StructMatrixBoxData(A, i, 22);
Ap23 = hypre_StructMatrixBoxData(A, i, 23);
Ap24 = hypre_StructMatrixBoxData(A, i, 24);
Ap25 = hypre_StructMatrixBoxData(A, i, 25);
Ap26 = hypre_StructMatrixBoxData(A, i, 26);
xp0 = hypre_StructVectorBoxData(x, i) +
hypre_BoxOffsetDistance(x_data_box, stencil_shape[0]);
xp1 = hypre_StructVectorBoxData(x, i) +
hypre_BoxOffsetDistance(x_data_box, stencil_shape[1]);
xp2 = hypre_StructVectorBoxData(x, i) +
hypre_BoxOffsetDistance(x_data_box, stencil_shape[2]);
xp3 = hypre_StructVectorBoxData(x, i) +
hypre_BoxOffsetDistance(x_data_box, stencil_shape[3]);
xp4 = hypre_StructVectorBoxData(x, i) +
hypre_BoxOffsetDistance(x_data_box, stencil_shape[4]);
xp5 = hypre_StructVectorBoxData(x, i) +
hypre_BoxOffsetDistance(x_data_box, stencil_shape[5]);
xp6 = hypre_StructVectorBoxData(x, i) +
hypre_BoxOffsetDistance(x_data_box, stencil_shape[6]);
xp7 = hypre_StructVectorBoxData(x, i) +
hypre_BoxOffsetDistance(x_data_box, stencil_shape[7]);
xp8 = hypre_StructVectorBoxData(x, i) +
hypre_BoxOffsetDistance(x_data_box, stencil_shape[8]);
xp9 = hypre_StructVectorBoxData(x, i) +
hypre_BoxOffsetDistance(x_data_box, stencil_shape[9]);
xp10 = hypre_StructVectorBoxData(x, i) +
hypre_BoxOffsetDistance(x_data_box, stencil_shape[10]);
xp11 = hypre_StructVectorBoxData(x, i) +
hypre_BoxOffsetDistance(x_data_box, stencil_shape[11]);
xp12 = hypre_StructVectorBoxData(x, i) +
hypre_BoxOffsetDistance(x_data_box, stencil_shape[12]);
xp13 = hypre_StructVectorBoxData(x, i) +
hypre_BoxOffsetDistance(x_data_box, stencil_shape[13]);
xp14 = hypre_StructVectorBoxData(x, i) +
hypre_BoxOffsetDistance(x_data_box, stencil_shape[14]);
xp15 = hypre_StructVectorBoxData(x, i) +
hypre_BoxOffsetDistance(x_data_box, stencil_shape[15]);
xp16 = hypre_StructVectorBoxData(x, i) +
hypre_BoxOffsetDistance(x_data_box, stencil_shape[16]);
xp17 = hypre_StructVectorBoxData(x, i) +
hypre_BoxOffsetDistance(x_data_box, stencil_shape[17]);
xp18 = hypre_StructVectorBoxData(x, i) +
hypre_BoxOffsetDistance(x_data_box, stencil_shape[18]);
xp19 = hypre_StructVectorBoxData(x, i) +
hypre_BoxOffsetDistance(x_data_box, stencil_shape[19]);
xp20 = hypre_StructVectorBoxData(x, i) +
hypre_BoxOffsetDistance(x_data_box, stencil_shape[20]);
xp21 = hypre_StructVectorBoxData(x, i) +
hypre_BoxOffsetDistance(x_data_box, stencil_shape[21]);
xp22 = hypre_StructVectorBoxData(x, i) +
hypre_BoxOffsetDistance(x_data_box, stencil_shape[22]);
xp23 = hypre_StructVectorBoxData(x, i) +
hypre_BoxOffsetDistance(x_data_box, stencil_shape[23]);
xp24 = hypre_StructVectorBoxData(x, i) +
hypre_BoxOffsetDistance(x_data_box, stencil_shape[24]);
xp25 = hypre_StructVectorBoxData(x, i) +
hypre_BoxOffsetDistance(x_data_box, stencil_shape[25]);
xp26 = hypre_StructVectorBoxData(x, i) +
hypre_BoxOffsetDistance(x_data_box, stencil_shape[26]);
break;
default:
;
}
hypre_ForBoxI(j, compute_box_a)
{
compute_box = hypre_BoxArrayBox(compute_box_a, j);
start = hypre_BoxIMin(compute_box);
/*------------------------------------------------------
* Switch statement to direct control to appropriate
* box loop depending on stencil size
*------------------------------------------------------*/
switch (stencil_size)
{
case 1:
hypre_BoxGetStrideSize(compute_box, base_stride, loop_size);
hypre_BoxLoop3Begin(hypre_StructMatrixNDim(A), loop_size,
A_data_box, start, base_stride, Ai,
x_data_box, start, base_stride, xi,
r_data_box, start, base_stride, ri);
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel for private(HYPRE_BOX_PRIVATE,Ai,xi,ri) HYPRE_SMP_SCHEDULE
#endif
hypre_BoxLoop3For(Ai, xi, ri)
{
rp[ri] = rp[ri]
- Ap0[Ai] * xp0[xi];
}
hypre_BoxLoop3End(Ai, xi, ri);
break;
case 3:
hypre_BoxGetStrideSize(compute_box, base_stride, loop_size);
hypre_BoxLoop3Begin(hypre_StructMatrixNDim(A), loop_size,
A_data_box, start, base_stride, Ai,
x_data_box, start, base_stride, xi,
r_data_box, start, base_stride, ri);
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel for private(HYPRE_BOX_PRIVATE,Ai,xi,ri) HYPRE_SMP_SCHEDULE
#endif
hypre_BoxLoop3For(Ai, xi, ri)
{
rp[ri] = rp[ri]
- Ap0[Ai] * xp0[xi]
- Ap1[Ai] * xp1[xi]
- Ap2[Ai] * xp2[xi];
}
hypre_BoxLoop3End(Ai, xi, ri);
break;
case 5:
hypre_BoxGetStrideSize(compute_box, base_stride, loop_size);
hypre_BoxLoop3Begin(hypre_StructMatrixNDim(A), loop_size,
A_data_box, start, base_stride, Ai,
x_data_box, start, base_stride, xi,
r_data_box, start, base_stride, ri);
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel for private(HYPRE_BOX_PRIVATE,Ai,xi,ri) HYPRE_SMP_SCHEDULE
#endif
hypre_BoxLoop3For(Ai, xi, ri)
{
rp[ri] = rp[ri]
- Ap0[Ai] * xp0[xi]
- Ap1[Ai] * xp1[xi]
- Ap2[Ai] * xp2[xi]
- Ap3[Ai] * xp3[xi]
- Ap4[Ai] * xp4[xi];
}
hypre_BoxLoop3End(Ai, xi, ri);
break;
case 7:
hypre_BoxGetStrideSize(compute_box, base_stride, loop_size);
hypre_BoxLoop3Begin(hypre_StructMatrixNDim(A), loop_size,
A_data_box, start, base_stride, Ai,
x_data_box, start, base_stride, xi,
r_data_box, start, base_stride, ri);
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel for private(HYPRE_BOX_PRIVATE,Ai,xi,ri) HYPRE_SMP_SCHEDULE
#endif
hypre_BoxLoop3For(Ai, xi, ri)
{
rp[ri] = rp[ri]
- Ap0[Ai] * xp0[xi]
- Ap1[Ai] * xp1[xi]
- Ap2[Ai] * xp2[xi]
- Ap3[Ai] * xp3[xi]
- Ap4[Ai] * xp4[xi]
- Ap5[Ai] * xp5[xi]
- Ap6[Ai] * xp6[xi];
}
hypre_BoxLoop3End(Ai, xi, ri);
break;
case 9:
hypre_BoxGetStrideSize(compute_box, base_stride, loop_size);
hypre_BoxLoop3Begin(hypre_StructMatrixNDim(A), loop_size,
A_data_box, start, base_stride, Ai,
x_data_box, start, base_stride, xi,
r_data_box, start, base_stride, ri);
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel for private(HYPRE_BOX_PRIVATE,Ai,xi,ri) HYPRE_SMP_SCHEDULE
#endif
hypre_BoxLoop3For(Ai, xi, ri)
{
rp[ri] = rp[ri]
- Ap0[Ai] * xp0[xi]
- Ap1[Ai] * xp1[xi]
- Ap2[Ai] * xp2[xi]
- Ap3[Ai] * xp3[xi]
- Ap4[Ai] * xp4[xi]
- Ap5[Ai] * xp5[xi]
- Ap6[Ai] * xp6[xi]
- Ap7[Ai] * xp7[xi]
- Ap8[Ai] * xp8[xi];
}
hypre_BoxLoop3End(Ai, xi, ri);
break;
case 15:
hypre_BoxGetStrideSize(compute_box, base_stride, loop_size);
hypre_BoxLoop3Begin(hypre_StructMatrixNDim(A), loop_size,
A_data_box, start, base_stride, Ai,
x_data_box, start, base_stride, xi,
r_data_box, start, base_stride, ri);
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel for private(HYPRE_BOX_PRIVATE,Ai,xi,ri) HYPRE_SMP_SCHEDULE
#endif
hypre_BoxLoop3For(Ai, xi, ri)
{
rp[ri] = rp[ri]
- Ap0[Ai] * xp0[xi]
- Ap1[Ai] * xp1[xi]
- Ap2[Ai] * xp2[xi]
- Ap3[Ai] * xp3[xi]
- Ap4[Ai] * xp4[xi]
- Ap5[Ai] * xp5[xi]
- Ap6[Ai] * xp6[xi]
- Ap7[Ai] * xp7[xi]
- Ap8[Ai] * xp8[xi]
- Ap9[Ai] * xp9[xi]
- Ap10[Ai] * xp10[xi]
- Ap11[Ai] * xp11[xi]
- Ap12[Ai] * xp12[xi]
- Ap13[Ai] * xp13[xi]
- Ap14[Ai] * xp14[xi];
}
hypre_BoxLoop3End(Ai, xi, ri);
break;
case 19:
hypre_BoxGetStrideSize(compute_box, base_stride, loop_size);
hypre_BoxLoop3Begin(hypre_StructMatrixNDim(A), loop_size,
A_data_box, start, base_stride, Ai,
x_data_box, start, base_stride, xi,
r_data_box, start, base_stride, ri);
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel for private(HYPRE_BOX_PRIVATE,Ai,xi,ri) HYPRE_SMP_SCHEDULE
#endif
hypre_BoxLoop3For(Ai, xi, ri)
{
rp[ri] = rp[ri]
- Ap0[Ai] * xp0[xi]
- Ap1[Ai] * xp1[xi]
- Ap2[Ai] * xp2[xi]
- Ap3[Ai] * xp3[xi]
- Ap4[Ai] * xp4[xi]
- Ap5[Ai] * xp5[xi]
- Ap6[Ai] * xp6[xi]
- Ap7[Ai] * xp7[xi]
- Ap8[Ai] * xp8[xi]
- Ap9[Ai] * xp9[xi]
- Ap10[Ai] * xp10[xi]
- Ap11[Ai] * xp11[xi]
- Ap12[Ai] * xp12[xi]
- Ap13[Ai] * xp13[xi]
- Ap14[Ai] * xp14[xi]
- Ap15[Ai] * xp15[xi]
- Ap16[Ai] * xp16[xi]
- Ap17[Ai] * xp17[xi]
- Ap18[Ai] * xp18[xi];
}
hypre_BoxLoop3End(Ai, xi, ri);
break;
case 27:
hypre_BoxGetStrideSize(compute_box, base_stride, loop_size);
hypre_BoxLoop3Begin(hypre_StructMatrixNDim(A), loop_size,
A_data_box, start, base_stride, Ai,
x_data_box, start, base_stride, xi,
r_data_box, start, base_stride, ri);
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel for private(HYPRE_BOX_PRIVATE,Ai,xi,ri) HYPRE_SMP_SCHEDULE
#endif
hypre_BoxLoop3For(Ai, xi, ri)
{
rp[ri] = rp[ri]
- Ap0[Ai] * xp0[xi]
- Ap1[Ai] * xp1[xi]
- Ap2[Ai] * xp2[xi]
- Ap3[Ai] * xp3[xi]
- Ap4[Ai] * xp4[xi]
- Ap5[Ai] * xp5[xi]
- Ap6[Ai] * xp6[xi]
- Ap7[Ai] * xp7[xi]
- Ap8[Ai] * xp8[xi]
- Ap9[Ai] * xp9[xi]
- Ap10[Ai] * xp10[xi]
- Ap11[Ai] * xp11[xi]
- Ap12[Ai] * xp12[xi]
- Ap13[Ai] * xp13[xi]
- Ap14[Ai] * xp14[xi]
- Ap15[Ai] * xp15[xi]
- Ap16[Ai] * xp16[xi]
- Ap17[Ai] * xp17[xi]
- Ap18[Ai] * xp18[xi]
- Ap19[Ai] * xp19[xi]
- Ap20[Ai] * xp20[xi]
- Ap21[Ai] * xp21[xi]
- Ap22[Ai] * xp22[xi]
- Ap23[Ai] * xp23[xi]
- Ap24[Ai] * xp24[xi]
- Ap25[Ai] * xp25[xi]
- Ap26[Ai] * xp26[xi];
}
hypre_BoxLoop3End(Ai, xi, ri);
break;
default:
for (si = 0; si < stencil_size; si++)
{
Ap0 = hypre_StructMatrixBoxData(A, i, si);
xp0 = hypre_StructVectorBoxData(x, i) +
hypre_BoxOffsetDistance(x_data_box, stencil_shape[si]);
hypre_BoxGetStrideSize(compute_box, base_stride,
loop_size);
hypre_BoxLoop3Begin(hypre_StructMatrixNDim(A), loop_size,
A_data_box, start, base_stride, Ai,
x_data_box, start, base_stride, xi,
r_data_box, start, base_stride, ri);
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel for private(HYPRE_BOX_PRIVATE,Ai,xi,ri) HYPRE_SMP_SCHEDULE
#endif
hypre_BoxLoop3For(Ai, xi, ri)
{
rp[ri] -= Ap0[Ai] * xp0[xi];
}
hypre_BoxLoop3End(Ai, xi, ri);
}
}
}
}
}
/*-----------------------------------------------------------------------
* Return
*-----------------------------------------------------------------------*/
hypre_IncFLOPCount(residual_data -> flops);
hypre_EndTiming(residual_data -> time_index);
return ierr;
}
/*--------------------------------------------------------------------------
* hypre_SMGResidualSetBase
*--------------------------------------------------------------------------*/
HYPRE_Int
hypre_SMGResidualSetBase( void *residual_vdata,
hypre_Index base_index,
hypre_Index base_stride )
{
hypre_SMGResidualData *residual_data = residual_vdata;
HYPRE_Int d;
HYPRE_Int ierr = 0;
for (d = 0; d < 3; d++)
{
hypre_IndexD((residual_data -> base_index), d)
= hypre_IndexD(base_index, d);
hypre_IndexD((residual_data -> base_stride), d)
= hypre_IndexD(base_stride, d);
}
return ierr;
}
/*--------------------------------------------------------------------------
* hypre_SMGResidualDestroy
*--------------------------------------------------------------------------*/
HYPRE_Int
hypre_SMGResidualDestroy( void *residual_vdata )
{
HYPRE_Int ierr;
hypre_SMGResidualData *residual_data = residual_vdata;
if (residual_data)
{
hypre_StructMatrixDestroy(residual_data -> A);
hypre_StructVectorDestroy(residual_data -> x);
hypre_StructVectorDestroy(residual_data -> b);
hypre_StructVectorDestroy(residual_data -> r);
hypre_BoxArrayDestroy(residual_data -> base_points);
hypre_ComputePkgDestroy(residual_data -> compute_pkg );
hypre_FinalizeTiming(residual_data -> time_index);
hypre_TFree(residual_data);
}
return ierr;
}
|
omp_flush.c | #include <stdio.h>
#include <unistd.h>
#include <omp.h>
#include "omp_testsuite.h"
#include "omp_my_sleep.h"
int
check_omp_flush (FILE * logFile)
{
int result1 = 0;
int result2 = 0;
int dummy;
#pragma omp parallel
{
int rank;
rank = omp_get_thread_num ();
#pragma omp barrier
if (rank == 1)
{
result2 = 3;
#pragma omp flush(result2)
dummy = result2;
}
if (rank == 0)
{
my_sleep (1.);
#pragma omp flush(result2)
result1 = result2;
}
}
return ((result1 == result2) && (result2 == dummy) && (result2 == 3));
}
int
crosscheck_omp_flush (FILE * logFile)
{
int result1 = 0;
int result2 = 0;
int dummy;
#pragma omp parallel
{
int rank;
rank = omp_get_thread_num ();
#pragma omp barrier
if (rank == 1)
{
result2 = 3;
/* #pragma omp flush(result2) */
dummy = result2;
}
if (rank == 0)
{
my_sleep (1.);
/* #pragma omp flush(result2) */
result1 = result2;
}
}
return ((result1 == result2) && (result2 == dummy) && (result2 == 3));
}
|
spectra.c | #include "common.h"
typedef struct {
int l;
RunParams *par;
char *tr1;
char *tr2;
} IntClPar;
static double cl_integrand(double lk,void *params)
{
double d1,d2;
IntClPar *p=(IntClPar *)params;
double k=pow(10.,lk);
double pk=csm_Pk_linear_0(p->par->cpar,k);
d1=transfer_wrap(p->l,k,p->par,p->tr1,0);
d2=transfer_wrap(p->l,k,p->par,p->tr2,1);
return k*d1*d2*pk;
}
static double spectra(char *tr1,char *tr2,int l,RunParams *par)
{
IntClPar ipar;
double result=0,eresult;
gsl_function F;
gsl_integration_workspace *w=gsl_integration_workspace_alloc(1000);
ipar.l=l;
ipar.par=par;
ipar.tr1=tr1;
ipar.tr2=tr2;
F.function=&cl_integrand;
F.params=&ipar;
gsl_integration_qag(&F,D_LKMIN,D_LKMAX,0,1E-4,1000,GSL_INTEG_GAUSS41,w,&result,&eresult);
gsl_integration_workspace_free(w);
return M_LN10*2*result/(2*l+1.);
}
void compute_spectra(RunParams *par)
{
printf("Computing power spectra\n");
#ifdef _HAS_OMP
#pragma omp parallel default(none) shared(par)
{
#endif //_HAS_OMP
int l;
#ifdef _HAS_OMP
#pragma omp for
#endif //_HAS_OMP
for(l=0;l<=par->lmax;l++) {
#ifdef _DEBUG
printf("%d \n",l);
#endif //_DEBUG
if(par->do_nc) {
par->cl_dd[l]=spectra("nc","nc",l,par);
if(par->do_shear) {
par->cl_d1l2[l]=spectra("nc","shear",l,par);
par->cl_d2l1[l]=spectra("shear","nc",l,par);
}
if(par->do_cmblens)
par->cl_dc[l]=spectra("nc","cmblens",l,par);
if(par->do_isw)
par->cl_di[l]=spectra("nc","isw",l,par);
}
if(par->do_shear) {
par->cl_ll[l]=spectra("shear","shear",l,par);
if(par->do_cmblens)
par->cl_lc[l]=spectra("shear","cmblens",l,par);
if(par->do_isw)
par->cl_li[l]=spectra("shear","isw",l,par);
}
if(par->do_cmblens) {
par->cl_cc[l]=spectra("cmblens","cmblens",l,par);
if(par->do_isw)
par->cl_ci[l]=spectra("cmblens","isw",l,par);
}
if(par->do_isw)
par->cl_ii[l]=spectra("isw","isw",l,par);
} //end omp for
#ifdef _HAS_OMP
} //end omp parallel
#endif //_HAS_OMP
}
typedef struct {
int i_bessel;
RunParams *par;
double th;
SplPar *clsp;
double *cl;
} IntWtPar;
static double wt_integrand(double l,void *params)
{
IntWtPar *p=(IntWtPar *)params;
double x=l*p->th;
// double cl=spline_eval(l,p->clsp);
double cl=p->cl[(int)l];
double jbes;
if(p->i_bessel)
jbes=gsl_sf_bessel_Jn(p->i_bessel,x);
else
jbes=gsl_sf_bessel_J0(x);
return l*jbes*cl;
}
static void compute_wt_single(RunParams *par,double *cl,double *wt,double *llist,int bessel_order)
{
#ifdef _HAS_OMP
#pragma omp parallel default(none) \
shared(par,cl,wt,llist,bessel_order)
{
#endif //_HAS_OMP
int ith;
double result,eresult;
gsl_function F;
gsl_integration_workspace *w=gsl_integration_workspace_alloc(1000);
SplPar *clsp=spline_init((par->lmax+1),llist,cl,0,0);
IntWtPar ipar;
ipar.i_bessel=bessel_order;
ipar.par=par;
ipar.clsp=clsp;
ipar.cl=cl;
#ifdef _HAS_OMP
#pragma omp for
#endif //_HAS_OMP
for(ith=0;ith<par->n_th;ith++) {
if(par->do_w_theta_logbin)
ipar.th=DTOR*par->th_max*pow(10.,(ith+0.5-par->n_th)/par->n_th_logint);
else
ipar.th=DTOR*(par->th_min+(par->th_max-par->th_min)*(ith+0.5)/par->n_th);
F.function=&wt_integrand;
F.params=&ipar;
gsl_integration_qag(&F,llist[0],llist[par->lmax],0,1E-4,1000,GSL_INTEG_GAUSS41,w,&result,&eresult);
wt[ith]=result/(2*M_PI);
}//end omp for
gsl_integration_workspace_free(w);
spline_free(clsp);
#ifdef _HAS_OMP
} //end omp parallel
#endif //_HAS_OMP
}
void compute_w_theta(RunParams *par)
{
if(par->do_w_theta) {
int l;
double *llist;
printf("Computing correlation functions\n");
llist=dam_malloc((par->lmax+1)*sizeof(double));
for(l=0;l<=par->lmax;l++)
llist[l]=(float)l;
if(par->do_nc) {
compute_wt_single(par,par->cl_dd,par->wt_dd,llist,0);
if(par->do_shear) {
compute_wt_single(par,par->cl_d1l2,par->wt_d1l2,llist,0);
compute_wt_single(par,par->cl_d2l1,par->wt_d2l1,llist,0);
}
if(par->do_cmblens)
compute_wt_single(par,par->cl_dc,par->wt_dc,llist,0);
if(par->do_isw)
compute_wt_single(par,par->cl_di,par->wt_di,llist,0);
}
if(par->do_shear) {
compute_wt_single(par,par->cl_ll,par->wt_ll_pp,llist,0);
compute_wt_single(par,par->cl_ll,par->wt_ll_mm,llist,4);
if(par->do_cmblens)
compute_wt_single(par,par->cl_lc,par->wt_lc,llist,0);
if(par->do_isw)
compute_wt_single(par,par->cl_li,par->wt_li,llist,0);
}
if(par->do_cmblens) {
compute_wt_single(par,par->cl_cc,par->wt_cc,llist,0);
if(par->do_isw)
compute_wt_single(par,par->cl_ci,par->wt_ci,llist,0);
}
if(par->do_isw)
compute_wt_single(par,par->cl_ii,par->wt_ii,llist,0);
free(llist);
}
else {
printf("Skipping correlation functions\n");
}
}
|
dynamic_fmt.c | /*
* This software was written by Jim Fougeron jfoug AT cox dot net
* in 2009-2013. No copyright is claimed, and the software is hereby
* placed in the public domain. In case this attempt to disclaim
* copyright and place the software in the public domain is deemed
* null and void, then the software is Copyright (c) 2009-2013 Jim Fougeron
* and it is hereby released to the general public under the following
* terms:
*
* This software may be modified, redistributed, and used for any
* purpose, in source and binary forms, with or without modification.
*
* Generic 'scriptable' hash cracker for JtR
*
* Renamed and changed from md5_gen* to dynamic*. We handle MD5 and SHA1
* at the present time. More crypt types 'may' be added later.
* Added SHA2 (SHA224, SHA256, SHA384, SHA512), GOST, Whirlpool crypt types.
* Whirlpool use oSSSL if OPENSSL_VERSION_NUMBER >= 0x10000000, otherwise use sph_* code.
*
* There used to be a todo list, and other commenting here. It has been
* moved to ./docs/dynamic_history.txt
*
* KNOWN issues, and things to do.
*
* 1. create a new optimize flag, MGF_PASS_AFTER_FIXEDSALT and
* MGF_PASS_BEFORE_FIXEDSALT. Then create DynamicFunc__appendsalt_after_pass[12]
* These would only be valid for a FIXED length salted format. Then
* we can write the pass right into the buffer, and get_key() would read
* it back from there, either skipping over the salt, or removing the salt
* from the end. This would allow crypt($s.$p) and crypt($p.s) to be optimized
* in the way of string loading, and many fewer buffer copies. So dyna_1 could
* be optimized to something like:
// dynamic_1 Joomla md5($p.$s)
static DYNAMIC_primitive_funcp _Funcs_1[] =
{
//Flags=MGF_PASS_BEFORE_FIXEDSALT | MGF_SALTED
// saltlen=3 (or whatever). This fixed size is 'key'
DynamicFunc__appendsalt_after_pass1,
DynamicFunc__crypt_md5,
NULL
};
* WELL, the fixed size salt, it 'may' not be key for the MGF_PASS_BEFORE_FIXEDSALT,
* I think I can make that 'work' for variable sized salts. But for the
* MGF_PASS_AFTER_FIXEDSALT, i.e. crypt($s.$p) the fixed size salt IS key. I would
* like to store all PW's at salt_len offset in the buffer, and simply overwrite the
* first part of each buffer with the salt, never moving the password after the first
* time it is written. THEN it is very important this ONLY be allowed when we KNOW
* the salt length ahead of time.
*
* 2. Change regen-salts to be generic. Add the logic to dynamic_fmt.c proper, and change
* the fake-salts.c, and options so that 'generic' regen-salts can be done.
*/
#include <string.h>
#include <time.h>
#if AC_BUILT
#include "autoconfig.h"
#endif
#include "arch.h"
#if !FAST_FORMATS_OMP
#ifdef _OPENMP
# define FORCE_THREAD_MD5_body
#endif
#undef _OPENMP
#endif
#ifndef DYNAMIC_DISABLED
#ifdef SIMD_COEF_32
#include "simd-intrinsics.h"
#endif
#include "misc.h"
#include "common.h"
#include "formats.h"
#include "params.h"
#include "md5.h"
#include "md4.h"
#include "dynamic.h"
#include "options.h"
#include "config.h"
#include "sha.h"
#include "sha2.h"
#include "gost.h"
#include "sph_haval.h"
#include "sph_ripemd.h"
#include "sph_tiger.h"
#include "sph_md2.h"
#include "sph_panama.h"
#include "sph_skein.h"
#include "sph_whirlpool.h"
#include "memory.h"
#include "unicode.h"
#include "johnswap.h"
#include "crc32.h"
#include "aligned.h"
#include "fake_salts.h"
#include "base64_convert.h"
#if (AC_BUILT && HAVE_WHIRLPOOL) || \
(!AC_BUILT && OPENSSL_VERSION_NUMBER >= 0x10000000 && !HAVE_NO_SSL_WHIRLPOOL)
#include <openssl/whrlpool.h>
#else
// on my 32 bit cygwin builds, this code is about 4x slower than the oSSL code.
#define WHIRLPOOL_CTX sph_whirlpool_context
#define WHIRLPOOL_Init(a) sph_whirlpool_init(a)
#define WHIRLPOOL_Update(a,b,c) sph_whirlpool(a,b,c)
#define WHIRLPOOL_Final(a,b) sph_whirlpool_close(b,a)
#endif
#include "KeccakHash.h"
#define KECCAK_CTX Keccak_HashInstance
#define KECCAK_Update(a,b,c) Keccak_HashUpdate(a,b,(c)*8)
#define KECCAK_Final(a,b) Keccak_HashFinal(b,a)
#define KECCAK_256_Init(hash) Keccak_HashInitialize(hash, 1088, 512, 256, 0x01)
#define KECCAK_512_Init(hash) Keccak_HashInitialize(hash, 576, 1024, 512, 0x01)
// FIPS202 complient
#define SHA3_224_Init(hash) Keccak_HashInitialize(hash, 1152, 448, 224, 0x06)
#define SHA3_256_Init(hash) Keccak_HashInitialize(hash, 1088, 512, 256, 0x06)
#define SHA3_384_Init(hash) Keccak_HashInitialize(hash, 832, 768, 384, 0x06)
#define SHA3_512_Init(hash) Keccak_HashInitialize(hash, 576, 1024, 512, 0x06)
#ifdef _OPENMP
#include <omp.h>
static unsigned int m_ompt;
#endif
#include "dynamic_types.h"
#include "memdbg.h"
#if (defined (_OPENMP)||defined(FORCE_THREAD_MD5_body)) && defined (_MSC_VER)
unsigned DES_bs_max_kpc, DES_bs_min_kpc, DES_bs_all_p;
#undef MD5_body
extern void MD5_body(MD5_word x[15],MD5_word out[4]);
#endif
#define STRINGIZE2(s) #s
#define STRINGIZE(s) STRINGIZE2(s)
static struct fmt_main fmt_Dynamic;
static struct fmt_main *pFmts;
static int nFmts;
static int nLocalFmts;
static struct fmt_main *pLocalFmts;
static int force_md5_ctx;
static void dynamic_RESET(struct fmt_main *fmt);
#define eLargeOut dyna_eLargeOut
eLargeOut_t *eLargeOut;
#define nLargeOff dyna_nLargeOff
unsigned *nLargeOff;
#if ARCH_LITTLE_ENDIAN
#define MD5_swap(x, y, count)
#define MD5_swap2(a,b,c,d,e)
#else
extern char *MD5_DumpHexStr(void *p);
static void MD5_swap(MD5_word *x, MD5_word *y, int count)
{
do {
*y++ = JOHNSWAP(*x++);
} while (--count);
}
#if MD5_X2
static void MD5_swap2(MD5_word *x, MD5_word *x2, MD5_word *y, MD5_word *y2, int count)
{
do {
*y++ = JOHNSWAP(*x++);
*y2++ = JOHNSWAP(*x2++);
} while (--count);
}
#endif
#endif
#define FORMAT_LABEL "dynamic"
#define FORMAT_NAME "Generic MD5"
#ifdef SIMD_COEF_32
# define GETPOS(i, index) ( (index&(SIMD_COEF_32-1))*4 + ((i)&(0xffffffff-3) )*SIMD_COEF_32 + ((i)&3) )
# define SHAGETPOS(i, index) ( (index&(SIMD_COEF_32-1))*4 + ((i)&(0xffffffff-3) )*SIMD_COEF_32 + (3-((i)&3)) ) //for endianity conversion
#endif
#define BENCHMARK_COMMENT ""
#define BENCHMARK_LENGTH -1
#define CIPHERTEXT_LENGTH 32
#define BINARY_SIZE 16
#define BINARY_SIZE_SHA 20
#define BINARY_ALIGN MEM_ALIGN_WORD
// Computation for 'salt_size' The salt (and salt2) is appended to the end of the hash entry.
// The format of a salted entry is: $dynamic_#$hash$SALT_VAL[$$2SALT2_VAL]
// salt 64 bytes,
// salt2 64 bytes,
// salt signature $ 1 byte
// salt2 signature $$2 3 bytes
// null termination 1 byte. This this allows 2 64 byte salt's.
// Note, we now have up to 10 of these.
#define SALT_SIZE (64*4+1+3+1)
#define SALT_ALIGN MEM_ALIGN_WORD
// slots to do 24 'tests'. Note, we copy the
// same 3 tests over and over again. Simply to validate that
// tests use 'multiple' blocks.
static struct fmt_tests dynamic_tests[] = {
{NULL},{NULL},{NULL},{NULL},{NULL},{NULL},{NULL},{NULL},
{NULL},{NULL},{NULL},{NULL},{NULL},{NULL},{NULL},{NULL},
{NULL},{NULL},{NULL},{NULL},{NULL},{NULL},{NULL},{NULL},{NULL}
};
#ifdef SIMD_COEF_32
// SSE2 works only with 54 byte keys. Thus, md5(md5($p).md5($s)) can NOT be used
// with the SSE2, since that final md5 will be over a 64 byte block of data.
static union SIMD_inpup {
ARCH_WORD_32 w[(64*SIMD_COEF_32)/sizeof(ARCH_WORD_32)];
unsigned char c[64*SIMD_COEF_32];
} *input_buf, *input_buf2;
static union SIMD_crypt {
ARCH_WORD_32 w[(BINARY_SIZE*SIMD_COEF_32)/sizeof(ARCH_WORD_32)];
unsigned char c[BINARY_SIZE*SIMD_COEF_32];
} *crypt_key, *crypt_key2;
static unsigned int (*total_len)[SIMD_COEF_32];
static unsigned int (*total_len2)[SIMD_COEF_32];
#define MMX_INP_BUF_SZ (sizeof(input_buf[0]) *BLOCK_LOOPS)
#define MMX_INP_BUF2_SZ (sizeof(input_buf2[0])*BLOCK_LOOPS)
#define MMX_TOT_LEN_SZ (sizeof(*total_len) *BLOCK_LOOPS)
#define MMX_TOT_LEN2_SZ (sizeof(*total_len2)*BLOCK_LOOPS)
#define MMX_INP_BUF_SZ (sizeof(input_buf[0]) *BLOCK_LOOPS)
#define MMX_CRYPT_KEY_SZ (sizeof(crypt_key[0]) *BLOCK_LOOPS+sizeof(crypt_key[0]))
#define MMX_CRYPT_KEY2_SZ (sizeof(crypt_key2[0])*BLOCK_LOOPS)
#endif
#define FLAT_INP_BUF_SZ (sizeof(MD5_IN)*(MAX_KEYS_PER_CRYPT_X86>>MD5_X2))
#define FLAT_TOT_LEN_SZ (sizeof(unsigned int)*(MAX_KEYS_PER_CRYPT_X86))
MD5_OUT *crypt_key_X86;
MD5_OUT *crypt_key2_X86;
MD5_IN *input_buf_X86;
MD5_IN *input_buf2_X86;
unsigned int *total_len_X86;
unsigned int *total_len2_X86;
BIG_HASH_OUT dynamic_BHO[4];
static int keys_dirty;
// We store the salt here
static unsigned char *cursalt;
// length of salt (so we don't have to call strlen() all the time.
static int saltlen;
int get_dynamic_fmt_saltlen() { return saltlen; }
// This array is for the 2nd salt in the hash. I know of no hashes with double salts,
// but test type dynamic_16 (which is 'fake') has 2 salts, and this is the data/code to
// handle double salts.
static unsigned char *cursalt2;
static int saltlen2;
static unsigned char *username;
static int usernamelen;
static unsigned char *flds[10];
static int fld_lens[10];
const char *dynamic_itoa16 = itoa16;
#if !defined (_DEBUG)
#define itoa16_w2 __Dynamic_itoa_w2
#define itoa16_w2_u __Dynamic_itoa_w2_u
#define itoa16_w2_l __Dynamic_itoa_w2_l
#endif
unsigned short itoa16_w2_u[256], itoa16_w2_l[256];
unsigned short *itoa16_w2=itoa16_w2_l;
// array of the keys. Also lengths of the keys. NOTE if store_keys_in_input, then the
// key array will NOT be used (but the length array still is).
#ifndef MAX_KEYS_PER_CRYPT
#define MAX_KEYS_PER_CRYPT MAX_KEYS_PER_CRYPT_X86
#endif
#ifndef PLAINTEXT_LENGTH
#define PLAINTEXT_LENGTH PLAINTEXT_LENGTH_X86
#endif
#define EFFECTIVE_MKPC (MAX_KEYS_PER_CRYPT > MAX_KEYS_PER_CRYPT_X86 ? MAX_KEYS_PER_CRYPT : MAX_KEYS_PER_CRYPT_X86)
#define EFFECTIVE_MAX_LENGTH (PLAINTEXT_LENGTH > PLAINTEXT_LENGTH_X86 ? PLAINTEXT_LENGTH : PLAINTEXT_LENGTH_X86)
// Used to compute length of each string to clean. This is needed, since we have to clean a little more than
// just the length, IF we are cleaning strings that are in different endianity than native for the CPU.
// This is seen on SHA224 (etc) on Intel, or MD5 of BE systems. We still try to clean 'only' as much as
// we need to, but that is usually MORE than what the length of the stored string is. 8 gives us 7 byte spill
// over, plus 1 byte for the 0x80
#define COMPUTE_EX_LEN(a) ( (a) > (sizeof(input_buf_X86[0].x1.b)-8) ) ? sizeof(input_buf_X86[0].x1.b) : ((a)+8)
static char saved_key[EFFECTIVE_MKPC][EFFECTIVE_MAX_LENGTH + 1];
static int saved_key_len[EFFECTIVE_MKPC];
// Used in 'get_key' if we are running in store_keys_in_input mode
static char out[EFFECTIVE_MAX_LENGTH + 1];
// This is the GLOBAL count of keys. ALL of the primitives which deal with a count
// will read from this variable.
#if !defined (_DEBUG)
#define m_count m_Dynamic_Count
#endif
unsigned int m_count;
// If we are run in 'specific' mode (say, -format=dynamic -subformat=dynamic_0, then we
// want to 'allow' bare hashes to be 'valid'. This is how we will do this. We have a boolean
// that if set to true, we will perform a 1 time check within the valid function. If at
// that time we find out that we are cracking (or showing, etc) that we will accept lines
// that are either format of $dynamic_0$hhhhhh...32 or simply in the format of hhhhhhh..32
int dynamic_allow_rawhash_fixup = 0;
// this one IS in the private_dat, but since it is accessed SO much, we pull it
// out prior to 'internal' processing. The others are accessed right from
// the structure, since there are accessed infrequently enough to not matter.
static int dynamic_use_sse;
// If set to 1, then do unicode conversion is many string setting functions.
static int *md5_unicode_convert;
#if !defined (_DEBUG)
#define curdat Dynamic_curdat
#endif
private_subformat_data curdat;
// Helper function that loads out 256 unsigned short array that does base-16 conversions
// This function is called at the 'validation' call that loads our preloads (i.e. only
// called one time, pre 'run' (but will be called multiple times when benchmarking, but
// will NOT impact benchmark times.) Loading a word at a time (2 bytes), sped up
// the overall run time of dynamic_2 almost 5%, thus this conversion is MUCH faster than
// the fastest byte by byte I could put together. I tested several ways to access this
// array of unsigned shorts, and the best way was a 2 step method into an array of long
// integer pointers (thus, load 1/2 the 32 bit word, then the other 1/2, into a 32 bit word).
/*********************************************************************************
*********************************************************************************
* Start of the 'normal' *_fmt code for md5-gen
*********************************************************************************
*********************************************************************************/
char *RemoveHEX(char *output, char *input)
{
char *cpi = input;
char *cpo = output;
char *cpH = strstr(input, "$HEX$");
if (!cpH) {
// should never get here, we have a check performed before this function is called.
strcpy(output, input);
return output;
}
while (cpi < cpH)
*cpo++ = *cpi++;
*cpo++ = *cpi;
cpi += 5;
while (*cpi) {
if (*cpi == '0' && cpi[1] == '0') {
strcpy(output, input);
return output;
}
if (atoi16[ARCH_INDEX(*cpi)] != 0x7f && atoi16[ARCH_INDEX(cpi[1])] != 0x7f) {
*cpo++ = atoi16[ARCH_INDEX(*cpi)]*16 + atoi16[ARCH_INDEX(cpi[1])];
cpi += 2;
} else if (*cpi == '$') {
while (*cpi && strncmp(cpi, "$HEX$", 5)) {
*cpo++ = *cpi++;
}
if (!strncmp(cpi, "$HEX$", 5)) {
*cpo++ = *cpi;
cpi += 5;
}
} else {
strcpy(output, input);
return output;
}
}
*cpo = 0;
return output;
}
/*********************************************************************************
* Detects a 'valid' md5-gen format. This function is NOT locked to anything. It
* takes its detection logic from the provided fmt_main pointer. Within there,
* is a 'private' data pointer. When john first loads the md5-gen, it calls a
* function which builds proper 'private' data for EACH type of md5-gen. Then
* john will call valid on EACH of those formats, asking each one if a string is
* valid. Each format has a 'private' properly setup data object.
*********************************************************************************/
static int valid(char *ciphertext, struct fmt_main *pFmt)
{
unsigned int i, cipherTextLen;
char *cp, fixed_ciphertext[1024];
private_subformat_data *pPriv = pFmt->private.data;
if (!pPriv)
return 0;
if (strncmp(ciphertext, pPriv->dynamic_WHICH_TYPE_SIG, strlen(pPriv->dynamic_WHICH_TYPE_SIG)))
return 0;
// this is now simply REMOVED totally, if we detect it. Doing this solves MANY other problems
// of leaving it in there. The ONLY problem we still have is NULL bytes.
if (strstr(ciphertext, "$HEX$")) {
if (strlen(ciphertext) < sizeof(fixed_ciphertext))
ciphertext = RemoveHEX(fixed_ciphertext, ciphertext);
}
cp = &ciphertext[strlen(pPriv->dynamic_WHICH_TYPE_SIG)];
if (pPriv->dynamic_base64_inout == 1 || pPriv->dynamic_base64_inout == 3 || pPriv->dynamic_base64_inout == 5)
{
// jgypwqm.JsMssPLiS8YQ00$BaaaaaSX
unsigned int len;
len = base64_valid_length(cp, pPriv->dynamic_base64_inout==3?e_b64_mime:e_b64_crypt, flg_Base64_MIME_TRAIL_EQ_CNT);
if (len < 20 || len > pPriv->dynamic_SALT_OFFSET+4) return 0;
if (pPriv->dynamic_FIXED_SALT_SIZE == 0)
return !cp[len];
if (pPriv->dynamic_FIXED_SALT_SIZE && cp[len] != '$')
return 0;
if (pPriv->dynamic_FIXED_SALT_SIZE > 0 && strlen(&cp[len+1]) != pPriv->dynamic_FIXED_SALT_SIZE)
return 0;
else if (pPriv->dynamic_FIXED_SALT_SIZE < -1 && strlen(&cp[len+1]) > -(pPriv->dynamic_FIXED_SALT_SIZE))
return 0;
return 1;
}
if (pPriv->dynamic_base64_inout == 2)
{
// h3mJrcH0901pqX/m$alex
unsigned int i;
for (i = 0; i < 16; ++i) {
if (atoi64[ARCH_INDEX(cp[i])] == 0x7F)
return 0;
}
if (pPriv->dynamic_FIXED_SALT_SIZE == 0)
return !cp[i];
if (pPriv->dynamic_FIXED_SALT_SIZE && cp[16] != '$')
return 0;
if (pPriv->dynamic_FIXED_SALT_SIZE > 0 && strlen(&cp[17]) != pPriv->dynamic_FIXED_SALT_SIZE)
return 0;
else if (pPriv->dynamic_FIXED_SALT_SIZE < -1 && strlen(&cp[17]) > -(pPriv->dynamic_FIXED_SALT_SIZE))
return 0;
if (strlen(cp) < 16)
return 0;
return 1;
}
if (strlen(cp) < 32)
return 0;
cipherTextLen = CIPHERTEXT_LENGTH;
if (pPriv->dynamic_40_byte_input) {
cipherTextLen = 40;
} else if (pPriv->dynamic_48_byte_input) {
cipherTextLen = 48;
} else if (pPriv->dynamic_64_byte_input) {
cipherTextLen = 64;
} else if (pPriv->dynamic_56_byte_input) {
cipherTextLen = 56;
} else if (pPriv->dynamic_80_byte_input) {
cipherTextLen = 80;
} else if (pPriv->dynamic_96_byte_input) {
cipherTextLen = 96;
} else if (pPriv->dynamic_128_byte_input) {
cipherTextLen = 128;
}
for (i = 0; i < cipherTextLen; i++) {
if (atoi16[ARCH_INDEX(cp[i])] == 0x7f)
return 0;
}
if ((pPriv->pSetup->flags&MGF_SALTED) == 0) {
if (!cp[cipherTextLen])
return 1;
return 0;
}
if (cp[cipherTextLen] && cp[cipherTextLen] != '$')
return 0;
// NOTE if looking at this in the future, this was not my fix.
if (strlen(&cp[cipherTextLen]) > SALT_SIZE)
return 0;
// end NOTE.
if (pPriv->dynamic_FIXED_SALT_SIZE > 0 && ciphertext[pPriv->dynamic_SALT_OFFSET-1] != '$')
return 0;
if (pPriv->dynamic_FIXED_SALT_SIZE > 0 && strlen(&ciphertext[pPriv->dynamic_SALT_OFFSET]) != pPriv->dynamic_FIXED_SALT_SIZE) {
// first check to see if this salt has left the $HEX$ in the string (i.e. embedded nulls). If so, then
// validate length with this in mind.
if (!memcmp(&ciphertext[pPriv->dynamic_SALT_OFFSET], "HEX$", 4)) {
int len = strlen(&ciphertext[pPriv->dynamic_SALT_OFFSET]);
len = (len-4)>>1;
if (len != pPriv->dynamic_FIXED_SALT_SIZE)
return 0;
} else {
// check if there is a 'salt-2' or 'username', etc If that is the case, then this is still valid.
if (strncmp(&ciphertext[pPriv->dynamic_SALT_OFFSET+pPriv->dynamic_FIXED_SALT_SIZE], "$$", 2))
return 0;
}
}
else if (!regen_salts_options && pPriv->dynamic_FIXED_SALT_SIZE < -1 && strlen(&ciphertext[pPriv->dynamic_SALT_OFFSET]) > -(pPriv->dynamic_FIXED_SALT_SIZE)) {
char *cpX;
// first check to see if this salt has left the $HEX$ in the string (i.e. embedded nulls). If so, then
// validate length with this in mind.
if (!memcmp(&ciphertext[pPriv->dynamic_SALT_OFFSET], "HEX$", 4)) {
int len = strlen(&ciphertext[pPriv->dynamic_SALT_OFFSET]);
len = (len-4)>>1;
if (len > -(pPriv->dynamic_FIXED_SALT_SIZE))
return 0;
} else {
// check if there is a 'salt-2' or 'username', etc If that is the case, then this is still 'valid'
cpX = mem_alloc(-(pPriv->dynamic_FIXED_SALT_SIZE) + 3);
strnzcpy(cpX, &ciphertext[pPriv->dynamic_SALT_OFFSET], -(pPriv->dynamic_FIXED_SALT_SIZE) + 3);
if (!strstr(cpX, "$$")) {
MEM_FREE(cpX);
return 0;
}
MEM_FREE(cpX);
}
}
if (pPriv->b2Salts==1 && !strstr(&ciphertext[pPriv->dynamic_SALT_OFFSET-1], "$$2"))
return 0;
if (pPriv->nUserName && !strstr(&ciphertext[pPriv->dynamic_SALT_OFFSET-1], "$$U"))
return 0;
if (pPriv->FldMask) {
for (i = 0; i < 10; ++i) {
if ((pPriv->FldMask & (MGF_FLDx_BIT<<i)) == (MGF_FLDx_BIT<<i)) {
char Fld[5];
sprintf(Fld, "$$F%d", i);
if (!strstr(&ciphertext[pPriv->dynamic_SALT_OFFSET-1], Fld))
return 0;
}
}
}
return 1;
}
static char *FixupIfNeeded(char *ciphertext, private_subformat_data *pPriv);
static struct fmt_main *dynamic_Get_fmt_main(int which);
static char *HandleCase(char *cp, int caseType);
// 'wrapper' functions. These are here, so we can call these functions to work on ALL data (not simply within the
// thead, which ONLY wants to work on a subset of the data. These functions should NOT be called by threading
// code, EVER. But this functions KNOW what to do. Some actually have threads, others do not need them.
#ifdef _OPENMP
#ifndef SIMD_COEF_32
const unsigned int OMP_INC = (MD5_X2+1);
const unsigned int OMP_MD5_INC = (MD5_X2+1);
const unsigned int OMP_MD4_INC = (MD5_X2+1);
const unsigned int OMP_SHA1_INC = (MD5_X2+1);
#else
const unsigned int OMP_INC = (MD5_X2+1);
const unsigned int OMP_MD5_INC = (SIMD_PARA_MD5*SIMD_COEF_32);
const unsigned int OMP_MD4_INC = (SIMD_PARA_MD4*SIMD_COEF_32);
const unsigned int OMP_SHA1_INC = (SIMD_PARA_SHA1*SIMD_COEF_32);
#endif // SIMD_COEF_32
#endif // _OPENMP
static inline void __nonMP_DynamicFunc__SSEtoX86_switch_output2()
{
#ifdef _OPENMP
DynamicFunc__SSEtoX86_switch_output2(0,m_count,0);
#else
DynamicFunc__SSEtoX86_switch_output2();
#endif
}
static inline void __nonMP_DynamicFunc__append_from_last_output2_to_input1_as_base16()
{
#ifdef _OPENMP
DynamicFunc__append_from_last_output2_to_input1_as_base16(0,m_count,0);
#else
DynamicFunc__append_from_last_output2_to_input1_as_base16();
#endif
}
void __nonMP_eLargeOut(eLargeOut_t what)
{
#ifdef _OPENMP
unsigned int i;
for (i = 1; i < m_ompt; ++i)
eLargeOut[i] = what;
#endif
eLargeOut[0] = what;
}
void __nonMP_nLargeOff(unsigned val)
{
#ifdef _OPENMP
unsigned int i;
for (i = 1; i < m_ompt; ++i)
nLargeOff[i] = val;
#endif
nLargeOff[0] = val;
}
static inline void md5_unicode_convert_set(int what, int tid)
{
md5_unicode_convert[tid] = what;
}
static inline int md5_unicode_convert_get(int tid)
{
return md5_unicode_convert[tid];
}
void __nonMP_md5_unicode_convert(int what)
{
#ifdef _OPENMP
unsigned int i;
for (i = 1; i < m_ompt; ++i)
md5_unicode_convert[i] = what;
#endif
md5_unicode_convert[0] = what;
}
#if !defined (_OPENMP)
#define md5_unicode_convert_set(what, tid) md5_unicode_convert_set(what, 0)
#define md5_unicode_convert_get(tid) md5_unicode_convert_get(0)
#define eLargeOut_set(what, tid) eLargeOut_set(what, 0)
#define eLargeOut_get(tid) eLargeOut_get(0)
#define nLargeOff_set(val, tid) nLargeOff_set(val, 0)
#define nLargeOff_get(tid) nLargeOff_get(0)
#endif
static inline void __nonMP_DynamicFunc__append_keys2()
{
#ifdef _OPENMP
DynamicFunc__append_keys2(0,m_count,0);
#else
DynamicFunc__append_keys2();
#endif
}
static void __possMP_DynamicFunc__crypt2_md5()
{
#ifdef _OPENMP
int i;
unsigned int inc = OMP_MD5_INC;
// if (dynamic_use_sse!=1)
// inc = OMP_INC;
#pragma omp parallel for
for (i = 0; i < m_count; i += inc)
DynamicFunc__crypt2_md5(i,i+inc,omp_get_thread_num());
#else
DynamicFunc__crypt2_md5();
#endif
}
static void __nonMP_DynamicFunc__clean_input()
{
unsigned int i=0;
#ifdef SIMD_COEF_32
if (dynamic_use_sse==1) {
memset(input_buf, 0, MMX_INP_BUF_SZ);
memset(total_len, 0, MMX_TOT_LEN_SZ);
return;
}
#endif
for (; i < MAX_KEYS_PER_CRYPT_X86; ++i) {
//if (total_len_X86[i]) {
#if MD5_X2
if (i&1)
memset(input_buf_X86[i>>MD5_X2].x2.b2, 0, COMPUTE_EX_LEN(total_len_X86[i]));
else
#endif
memset(input_buf_X86[i>>MD5_X2].x1.b, 0, COMPUTE_EX_LEN(total_len_X86[i]));
total_len_X86[i] = 0;
//}
}
return;
}
static void __nonMP_DynamicFunc__clean_input2()
{
unsigned int i=0;
#ifdef SIMD_COEF_32
if (dynamic_use_sse==1) {
memset(input_buf2, 0, MMX_INP_BUF2_SZ);
memset(total_len2, 0, MMX_TOT_LEN2_SZ);
return;
}
#endif
if (curdat.using_flat_buffers_sse2_ok) {
memset(total_len2_X86, 0, sizeof(total_len2_X86[0])*MAX_KEYS_PER_CRYPT_X86);
return;
}
for (; i < MAX_KEYS_PER_CRYPT_X86; ++i) {
//if (total_len2_X86[i]) {
#if MD5_X2
if (i&1)
memset(input_buf2_X86[i>>MD5_X2].x2.b2, 0, COMPUTE_EX_LEN(total_len2_X86[i]));
else
#endif
memset(input_buf2_X86[i>>MD5_X2].x1.b, 0, COMPUTE_EX_LEN(total_len2_X86[i]));
total_len2_X86[i] = 0;
//}
}
return;
}
static void __nonMP_DynamicFunc__clean_input_full()
{
#ifdef SIMD_COEF_32
memset(input_buf, 0, MMX_INP_BUF_SZ);
memset(total_len, 0, MMX_TOT_LEN_SZ);
#endif
memset(input_buf_X86, 0, FLAT_INP_BUF_SZ);
memset(total_len_X86, 0, FLAT_TOT_LEN_SZ);
}
static void __nonMP_DynamicFunc__clean_input2_full()
{
#ifdef SIMD_COEF_32
memset(input_buf2, 0, MMX_INP_BUF2_SZ);
memset(total_len2, 0, MMX_TOT_LEN2_SZ);
#endif
memset(input_buf2_X86, 0, FLAT_INP_BUF_SZ);
memset(total_len2_X86, 0, FLAT_TOT_LEN_SZ);
}
static void __nonMP_DynamicFunc__clean_input_kwik()
{
#ifdef SIMD_COEF_32
if (dynamic_use_sse==1) {
memset(total_len, 0, MMX_TOT_LEN_SZ);
return;
}
#endif
memset(total_len_X86, 0, FLAT_TOT_LEN_SZ);
#if !ARCH_LITTLE_ENDIAN
memset(input_buf_X86, 0, FLAT_INP_BUF_SZ);
#endif
}
#ifndef _OPENMP
static void __nonMP_DynamicFunc__clean_input2_kwik()
{
#ifdef SIMD_COEF_32
if (dynamic_use_sse==1) {
memset(total_len2, 0, MMX_TOT_LEN2_SZ);
return;
}
#endif
memset(total_len2_X86, 0, FLAT_TOT_LEN_SZ);
#if !ARCH_LITTLE_ENDIAN
memset(input_buf2_X86, 0, FLAT_INP_BUF_SZ);
#endif
}
#endif
/*********************************************************************************
* init() here does nothing. NOTE many formats LINKING into us will have a valid
* that DOES do something, but ours does nothing.
*********************************************************************************/
static void init(struct fmt_main *pFmt)
{
private_subformat_data *pPriv = pFmt->private.data;
unsigned int i;
//fprintf(stderr, "init(%s)\n", pPriv->dynamic_WHICH_TYPE_SIG);
/* first off, SAVE the original format structure (owned by JtR). We may need this later */
pPriv->pFmtMain = pFmt;
#ifdef _OPENMP
m_ompt = omp_get_max_threads();
if (!md5_unicode_convert) {
md5_unicode_convert = (int*)mem_calloc(m_ompt, sizeof(int));
eLargeOut = (eLargeOut_t*)mem_calloc(m_ompt, sizeof(eLargeOut_t));
nLargeOff = (unsigned*)mem_calloc(m_ompt, sizeof(unsigned));
for (i = 0; i < m_ompt; ++i) {
eLargeOut[i] = eBase16;
nLargeOff[i] = 0;
}
}
#else
if (!md5_unicode_convert) {
md5_unicode_convert = (int*)mem_calloc(1, sizeof(int));
eLargeOut = (eLargeOut_t*)mem_calloc(1, sizeof(eLargeOut_t));
eLargeOut[0] = eBase16;
nLargeOff = (unsigned*)mem_calloc(1, sizeof(unsigned));
nLargeOff[0] = 0;
}
#endif
#ifdef SIMD_COEF_32
if (!input_buf) {
input_buf = mem_calloc_align(1, MMX_INP_BUF_SZ, MEM_ALIGN_SIMD);
total_len = mem_calloc_align(1, MMX_TOT_LEN_SZ, MEM_ALIGN_SIMD);
total_len2 = mem_calloc_align(1, MMX_TOT_LEN2_SZ, MEM_ALIGN_SIMD);
input_buf2 = mem_calloc_align(1, MMX_INP_BUF2_SZ, MEM_ALIGN_SIMD);
crypt_key = mem_calloc_align(1, MMX_CRYPT_KEY_SZ, MEM_ALIGN_SIMD);
crypt_key2 = mem_calloc_align(1, MMX_CRYPT_KEY2_SZ, MEM_ALIGN_SIMD);
}
#endif
if (!crypt_key_X86) {
crypt_key_X86 = (MD5_OUT *)mem_calloc(((MAX_KEYS_PER_CRYPT_X86>>MD5_X2)+1), sizeof(*crypt_key_X86));
crypt_key2_X86 = (MD5_OUT *)mem_calloc(((MAX_KEYS_PER_CRYPT_X86>>MD5_X2)+1), sizeof(*crypt_key2_X86));
input_buf_X86 = (MD5_IN *)mem_calloc(((MAX_KEYS_PER_CRYPT_X86>>MD5_X2)+1), sizeof(*input_buf_X86));
input_buf2_X86 = (MD5_IN *)mem_calloc(((MAX_KEYS_PER_CRYPT_X86>>MD5_X2)+1), sizeof(*input_buf2_X86));
total_len_X86 = (unsigned int *)mem_calloc((MAX_KEYS_PER_CRYPT_X86+1), sizeof(*total_len_X86));
total_len2_X86 = (unsigned int *)mem_calloc((MAX_KEYS_PER_CRYPT_X86+1), sizeof(*total_len2_X86));
}
for (i = 0; i < 4; ++i)
dynamic_BHO[i].dat = mem_calloc_align(BLOCK_LOOPS, sizeof(*(dynamic_BHO[0].dat)), MEM_ALIGN_SIMD);
gost_init_table();
if (!pPriv || (pPriv->init == 1 && !strcmp(curdat.dynamic_WHICH_TYPE_SIG, pPriv->dynamic_WHICH_TYPE_SIG)))
return;
__nonMP_DynamicFunc__clean_input_full();
__nonMP_DynamicFunc__clean_input2_full();
// Some builds (omp vs non omp, etc) do not call these functions, so to avoid 'unused' warnings, we simply
// call them here.
__nonMP_DynamicFunc__clean_input_kwik();
dynamic_RESET(pFmt);
if (!pPriv)
return;
pPriv->init = 1;
memcpy(&curdat, pPriv, sizeof(private_subformat_data));
dynamic_use_sse = curdat.dynamic_use_sse;
force_md5_ctx = curdat.force_md5_ctx;
fmt_Dynamic.params.max_keys_per_crypt = pFmt->params.max_keys_per_crypt;
fmt_Dynamic.params.min_keys_per_crypt = pFmt->params.max_keys_per_crypt;
if (pFmt->params.min_keys_per_crypt > 64)
pFmt->params.min_keys_per_crypt = 64;
fmt_Dynamic.params.flags = pFmt->params.flags;
fmt_Dynamic.params.format_name = pFmt->params.format_name;
fmt_Dynamic.params.algorithm_name = pFmt->params.algorithm_name;
fmt_Dynamic.params.benchmark_comment = pFmt->params.benchmark_comment;
fmt_Dynamic.params.benchmark_length = pFmt->params.benchmark_length;
// we allow for 3 bytes of utf8 data to make up the number of plaintext_length unicode chars.
if ( (pFmt->params.flags&FMT_UNICODE) && options.target_enc == UTF_8 ) {
//printf ("Here pFmt->params.plaintext_length=%d pPriv->pSetup->MaxInputLen=%d\n", pFmt->params.plaintext_length, pPriv->pSetup->MaxInputLen);
pFmt->params.plaintext_length = MIN(125, pFmt->params.plaintext_length * 3);
}
else
fmt_Dynamic.params.plaintext_length = pFmt->params.plaintext_length;
fmt_Dynamic.params.salt_size = pFmt->params.salt_size;
fmt_Dynamic.params.flags = pFmt->params.flags;
fmt_Dynamic.methods.cmp_all = pFmt->methods.cmp_all;
fmt_Dynamic.methods.cmp_one = pFmt->methods.cmp_one;
fmt_Dynamic.methods.cmp_exact = pFmt->methods.cmp_exact;
fmt_Dynamic.methods.set_salt = pFmt->methods.set_salt;
fmt_Dynamic.methods.salt = pFmt->methods.salt;
fmt_Dynamic.methods.salt_hash = pFmt->methods.salt_hash;
fmt_Dynamic.methods.split = pFmt->methods.split;
fmt_Dynamic.methods.set_key = pFmt->methods.set_key;
fmt_Dynamic.methods.get_key = pFmt->methods.get_key;
fmt_Dynamic.methods.clear_keys = pFmt->methods.clear_keys;
fmt_Dynamic.methods.crypt_all = pFmt->methods.crypt_all;
for (i = 0; i < PASSWORD_HASH_SIZES; ++i)
{
fmt_Dynamic.methods.binary_hash[i] = pFmt->methods.binary_hash[i];
fmt_Dynamic.methods.get_hash[i] = pFmt->methods.get_hash[i];
}
#if !MD5_IMM
{
extern void MD5_std_init(struct fmt_main *pFmt);
MD5_std_init(pFmt);
}
#endif
if (curdat.input2_set_len32) {
for (i = 0; i < MAX_KEYS_PER_CRYPT_X86; ++i)
total_len2_X86[i] = 32;
#ifdef SIMD_COEF_32
for (i = 0; i < BLOCK_LOOPS; ++i) {
unsigned int j;
for (j = 0; j < SIMD_COEF_32; j++) {
input_buf2[i].c[GETPOS(32, j)] = 0x80;
input_buf2[i].c[GETPOS(57, j)] = 0x1;
total_len2[i][j] = 0x20;
}
}
#endif
}
}
static void done(void)
{
int i;
MEM_FREE(total_len2_X86);
MEM_FREE(total_len_X86);
MEM_FREE(input_buf2_X86);
MEM_FREE(input_buf_X86);
MEM_FREE(crypt_key2_X86);
MEM_FREE(crypt_key_X86);
#ifdef SIMD_COEF_32
MEM_FREE(crypt_key2);
MEM_FREE(crypt_key);
MEM_FREE(input_buf2);
MEM_FREE(total_len2);
MEM_FREE(total_len);
MEM_FREE(input_buf);
#endif
MEM_FREE(nLargeOff);
MEM_FREE(eLargeOut);
MEM_FREE(md5_unicode_convert);
for (i = 0; i < 4; ++i)
MEM_FREE(dynamic_BHO[i].dat);
}
/*********************************************************************************
* This function will add a $dynamic_#$ IF there is not one, and if we have a specific
* format requested. Also, it will add things like UserID, Domain, Fld3, Fld4,
* Fld5, etc.
*********************************************************************************/
static char *prepare(char *split_fields[10], struct fmt_main *pFmt)
{
private_subformat_data *pPriv = pFmt->private.data;
char Tmp[80];
int i;
int trim_u=0;
char *cpBuilding=split_fields[1];
if (!pPriv)
return split_fields[1];
// ANY field[1] longer than 490 will simply be ignored, and returned 'as is'.
// the rest of this function makes this assumption.
if (!cpBuilding || strlen(cpBuilding) > 490)
return cpBuilding;
// mime. We want to strip off ALL trailing '=' characters to 'normalize' them
if (pPriv->dynamic_base64_inout == 3 && !strncmp(cpBuilding, "$dynamic_", 9))
{
static char ct[496];
int len;
char *cp = strchr(&cpBuilding[9], '$'), *cp2;
if (!cp) return cpBuilding;
++cp;
len = base64_valid_length(cp, e_b64_mime, flg_Base64_MIME_TRAIL_EQ_CNT);
if (len && cp[len-1] == '=') {
strnzcpy(ct, cpBuilding, cp-cpBuilding+len+1);
cp2 = &ct[strlen(ct)-1];
while (*cp2 == '=')
*cp2-- = 0;
if (cp[len])
strcat(cp2, &cp[len]);
cpBuilding = ct;
}
}
if (pFmt->params.salt_size && !strchr(split_fields[1], '$')) {
if (!pPriv->nUserName && !pPriv->FldMask && options.regen_lost_salts == 0)
return split_fields[1];
}
// handle 'older' md5_gen(x) signature, by simply converting to $dynamic_x$ signature
// Thus older md5_gen() is a valid input (or from john.pot), but ONLY the newer
// $dynamic_x$ will be written out (into .pot, output lines, etc).
if (!strncmp(cpBuilding, "md5_gen(", 8))
{
static char ct[496];
char *cp = &cpBuilding[8], *cpo = &ct[sprintf(ct, "$dynamic_")];
while (*cp >= '0' && *cp <= '9')
*cpo++ = *cp++;
*cpo++ = '$';
++cp;
strcpy(cpo, cp);
cpBuilding = ct;
}
// At this point, max length of cpBuilding is 491 (if it was a md5_gen signature)
// allow a raw hash, if there is a $u but no salt
if (pPriv->nUserName && strlen(split_fields[0]) && !strchr(cpBuilding, '$') && strcmp(split_fields[0], "?")) {
static char ct[496];
strcpy(ct, cpBuilding);
strcat(ct, "$$U");
cpBuilding = ct;
trim_u=1;
}
cpBuilding = FixupIfNeeded(cpBuilding, pPriv);
if (trim_u)
cpBuilding[strlen(cpBuilding)-3] = 0;
// at this point max length is still < 512. 491 + strlen($dynamic_xxxxx$) is 506
if (strncmp(cpBuilding, "$dynamic_", 9)) {
// ok, here we add the 'generic' regen salt code
if (options.regen_lost_salts && !strchr(cpBuilding, '$')) {
char *cp = load_regen_lost_salt_Prepare(cpBuilding);
if (cp)
return cp;
}
return split_fields[1];
}
if ( (pPriv->pSetup->flags&MGF_SALTED) == 0)
return cpBuilding;
/* at this point, we want to convert ANY and all $HEX$hex into values */
/* the reason we want to do this, is so that things read from john.pot file will be in proper 'native' format */
/* the ONE exception to this, is if there is a NULL byte in the $HEX$ string, then we MUST leave that $HEX$ string */
/* alone, and let the later calls in dynamic.c handle them. */
if (strstr(cpBuilding, "$HEX$")) {
char *cp, *cpo;
int bGood=1;
static char ct[512];
strcpy(ct, cpBuilding);
cp = strstr(ct, "$HEX$");
cpo = cp;
*cpo++ = *cp;
cp += 5;
while (*cp && bGood) {
if (*cp == '0' && cp[1] == '0') {
bGood = 0;
break;
}
if (atoi16[ARCH_INDEX(*cp)] != 0x7f && atoi16[ARCH_INDEX(cp[1])] != 0x7f) {
*cpo++ = atoi16[ARCH_INDEX(*cp)]*16 + atoi16[ARCH_INDEX(cp[1])];
*cpo = 0;
cp += 2;
} else if (*cp == '$') {
while (*cp && strncmp(cp, "$HEX$", 5)) {
*cpo++ = *cp++;
}
*cpo = 0;
if (!strncmp(cp, "$HEX$", 5)) {
*cpo++ = *cp;
cp += 5;
}
} else {
return split_fields[1];
}
}
if (bGood)
cpBuilding = ct;
// if we came into $HEX$ removal, then cpBuilding will always be shorter
}
// at this point max length is still < 512. 491 + strlen($dynamic_xxxxx$) is 506
if (pPriv->nUserName && !strstr(cpBuilding, "$$U")) {
if (split_fields[0] && strlen(split_fields[0]) && strcmp(split_fields[0], "?")) {
char *userName=split_fields[0], *cp;
static char ct[1024];
// assume field[0] is in format: username OR DOMAIN\\username If we find a \\, then use the username 'following' it.
cp = strchr(split_fields[0], '\\');
if (cp)
userName = &cp[1];
userName = HandleCase(userName, pPriv->nUserName);
snprintf (ct, sizeof(ct), "%s$$U%s", cpBuilding, userName);
cpBuilding = ct;
}
}
if (pPriv->FldMask) {
for (i = 0; i < 10; ++i) {
if (pPriv->FldMask&(MGF_FLDx_BIT<<i)) {
sprintf(Tmp, "$$F%d", i);
if (split_fields[i] && strlen(split_fields[i]) && strcmp(split_fields[i], "/") && !strstr(cpBuilding, Tmp)) {
static char ct[1024];
char ct2[1024];
snprintf (ct2, sizeof(ct2), "%s$$F%d%s", cpBuilding, i, split_fields[i]);
strcpy(ct, ct2);
cpBuilding = ct;
}
}
}
}
return cpBuilding;
}
static char *split(char *ciphertext, int index, struct fmt_main *pFmt)
{
static char out[1024];
private_subformat_data *pPriv = pFmt->private.data;
if (strlen(ciphertext) > 950)
return ciphertext;
// mime. We want to strip off ALL trailing '=' characters to 'normalize' them
if (pPriv->dynamic_base64_inout == 3 && !strncmp(ciphertext, "$dynamic_", 9))
{
static char ct[496];
unsigned int len;
char *cp = strchr(&ciphertext[9], '$'), *cp2;
if (cp) {
++cp;
len = base64_valid_length(cp, e_b64_mime, flg_Base64_MIME_TRAIL_EQ_CNT);
if (len && cp[len-1] == '=') {
strnzcpy(ct, ciphertext, cp-ciphertext+len+1);
cp2 = &ct[strlen(ct)-1];
while (*cp2 == '=')
*cp2-- = 0;
if (cp[len])
strcat(cp2, &cp[len]);
ciphertext = ct;
}
}
}
if (!strncmp(ciphertext, "$dynamic", 8)) {
if (strstr(ciphertext, "$HEX$"))
return RemoveHEX(out, ciphertext);
return ciphertext;
}
if (!strncmp(ciphertext, "md5_gen(", 8)) {
ciphertext += 8;
do ++ciphertext; while (*ciphertext != ')') ;
++ciphertext;
}
if (strstr(ciphertext, "$HEX$")) {
char *cp = out + sprintf(out, "%s", pPriv->dynamic_WHICH_TYPE_SIG);
RemoveHEX(cp, ciphertext);
} else
snprintf(out, sizeof(out), "%s%s", pPriv->dynamic_WHICH_TYPE_SIG, ciphertext);
return out;
}
// This split unifies case.
static char *split_UC(char *ciphertext, int index, struct fmt_main *pFmt)
{
static char out[1024];
private_subformat_data *pPriv = pFmt->private.data;
if (!strncmp(ciphertext, "$dynamic", 8)) {
if (strstr(ciphertext, "$HEX$"))
RemoveHEX(out, ciphertext);
else
strcpy(out, ciphertext);
} else {
if (!strncmp(ciphertext, "md5_gen(", 8)) {
ciphertext += 8;
do ++ciphertext; while (*ciphertext != ')') ;
++ciphertext;
}
if (strstr(ciphertext, "$HEX$")) {
char *cp = out + sprintf(out, "%s", pPriv->dynamic_WHICH_TYPE_SIG);
RemoveHEX(cp, ciphertext);
} else
sprintf(out, "%s%s", pPriv->dynamic_WHICH_TYPE_SIG, ciphertext);
}
ciphertext = strchr(&out[8], '$')+1;
while (*ciphertext && *ciphertext != '$') {
if (*ciphertext >= 'A' && *ciphertext <= 'Z')
*ciphertext += 0x20; // ASCII specific, but I really do not care.
++ciphertext;
}
// printf("%s\n", out);
return out;
}
/*********************************************************************************
* Stores the new salt provided into our 'working' salt
*********************************************************************************/
static void set_salt(void *salt)
{
unsigned char *cpsalt;
unsigned int todo_bits=0, i, bit;
if (!salt || curdat.dynamic_FIXED_SALT_SIZE == 0) {
saltlen = 0;
return;
}
cpsalt = *((unsigned char**)salt);
saltlen = *cpsalt++ - '0';
saltlen <<= 3;
saltlen += *cpsalt++ - '0';
#if ARCH_ALLOWS_UNALIGNED
if (*((ARCH_WORD_32*)cpsalt) != 0x30303030)
#else
if (memcmp(cpsalt, "0000", 4))
#endif
{
// this is why we used base-8. Takes an extra byte, but there is NO conditional
// logic, building this number, and no multiplication. We HAVE added one conditional
// check, to see if we can skip the entire load, if it is 0000.
todo_bits = *cpsalt++ - '0';
todo_bits <<= 3;
todo_bits += *cpsalt++ - '0';
todo_bits <<= 3;
todo_bits += *cpsalt++ - '0';
todo_bits <<= 3;
todo_bits += *cpsalt++ - '0';
}
else
cpsalt += 4;
cursalt = cpsalt;
if (!todo_bits) return;
cpsalt += saltlen;
if (todo_bits & 1) {
todo_bits ^= 1; // clear that bit.
saltlen2 = *cpsalt++;
cursalt2 = cpsalt;
if (todo_bits == 0) return;
cpsalt += saltlen2;
}
if (todo_bits & 2) {
todo_bits ^= 2; // clear that bit.
usernamelen = *cpsalt++;
username = cpsalt;
if (todo_bits == 0) return;
cpsalt += usernamelen;
}
bit = 4;
for (i = 0; i < 10; ++i, bit<<=1) {
if (todo_bits & bit) {
todo_bits ^= bit; // clear that bit.
fld_lens[i] = *cpsalt++;
flds[i] = cpsalt;
if (todo_bits == 0) return;
cpsalt += fld_lens[i];
}
}
}
/*********************************************************************************
* Sets this key. It will either be dropped DIRECTLY into the input buffer
* number 1, or put into an array of keys. Which one happens depends upon
* HOW the generic functions were laid out for this type. Not all types can
* load into the input. If not they MUST use the key array. Using the input
* buffer is faster, when it can be safely done.
*********************************************************************************/
static void set_key(char *key, int index)
{
unsigned int len;
//printf("idx=%d key=%s\n", index, key);
#ifdef SIMD_COEF_32
if (curdat.store_keys_in_input==2)
dynamic_use_sse = 3;
else if (curdat.md5_startup_in_x86)
dynamic_use_sse = 2;
else if (dynamic_use_sse==2)
dynamic_use_sse = 1;
#endif
if (curdat.nPassCase>1)
key = HandleCase(key, curdat.nPassCase);
// Ok, if the key is in unicode/utf8, we switch it here one time, and are done with it.
if (curdat.store_keys_in_input)
{
#ifdef SIMD_COEF_32
if (dynamic_use_sse==1) {
// code derived from rawMD5_fmt_plug.c code from magnum
#if ARCH_ALLOWS_UNALIGNED
const ARCH_WORD_32 *key32 = (ARCH_WORD_32*)key;
#else
char buf_aligned[PLAINTEXT_LENGTH + 1] JTR_ALIGN(sizeof(uint32_t));
const ARCH_WORD_32 *key32 = is_aligned(key, sizeof(uint32_t)) ?
(uint32_t*)key : (uint32_t*)strcpy(buf_aligned, key);
#endif
unsigned int idx = ( ((unsigned int)index)/SIMD_COEF_32);
ARCH_WORD_32 *keybuffer = &input_buf[idx].w[index&(SIMD_COEF_32-1)];
ARCH_WORD_32 *keybuf_word = keybuffer;
unsigned int len;
ARCH_WORD_32 temp;
len = 0;
while((temp = *key32++) & 0xff) {
if (!(temp & 0xff00))
{
*keybuf_word = (temp & 0xff) | (0x80 << 8);
++len;
goto key_cleaning;
}
if (!(temp & 0xff0000))
{
*keybuf_word = (temp & 0xffff) | (0x80 << 16);
len+=2;
goto key_cleaning;
}
if (!(temp & 0xff000000))
{
*keybuf_word = temp | (0x80U << 24);
len+=3;
goto key_cleaning;
}
*keybuf_word = temp;
len += 4;
keybuf_word += SIMD_COEF_32;
}
*keybuf_word = 0x80;
key_cleaning:
keybuf_word += SIMD_COEF_32;
while(*keybuf_word) {
*keybuf_word = 0;
keybuf_word += SIMD_COEF_32;
}
keybuffer[14*SIMD_COEF_32] = len << 3;
return;
}
#endif
len = strlen(key);
if (len > 110) // we never do UTF-8 -> UTF-16 in this mode
len = 110;
// if(index==0) {
// we 'have' to use full clean here. NOTE 100% sure why, but 10 formats fail if we do not.
// __nonMP_DynamicFunc__clean_input_full();
// }
#if MD5_X2
if (index & 1)
memcpy(input_buf_X86[index>>MD5_X2].x2.b2, key, len);
else
#endif
memcpy(input_buf_X86[index>>MD5_X2].x1.b, key, len);
saved_key_len[index] = total_len_X86[index] = len;
}
else
{
len = strlen(key);
if (len > 110 && !(fmt_Dynamic.params.flags & FMT_UNICODE))
len = 110;
// if(index==0) {
// __nonMP_DynamicFunc__clean_input_full();
// }
keys_dirty = 1;
memcpy(((char*)(saved_key[index])), key, len);
saved_key_len[index] = len;
}
}
static void clear_keys(void)
{
#ifdef SIMD_COEF_32
if (curdat.pSetup->flags & MGF_FULL_CLEAN_REQUIRED) {
__nonMP_DynamicFunc__clean_input_full();
return;
}
if (curdat.store_keys_in_input==1 || curdat.store_keys_in_input==3)
return;
if (curdat.md5_startup_in_x86)
__nonMP_DynamicFunc__clean_input_full();
// This clean was causing failures (dirty buffers left) for dyna_51, 61 and formspring.
// once commented out, dyna fully passes. I see no reason to keep this here at all.
// else
// __nonMP_DynamicFunc__clean_input_kwik();
#else
__nonMP_DynamicFunc__clean_input_full();
#endif
}
/*********************************************************************************
* Returns the key. NOTE how it gets it depends upon if we are storing
* into the array of keys (there we simply return it), or if we are
* loading into input buffer #1. If in input buffer, we have to re-create
* the key, prior to returning it.
*********************************************************************************/
static char *get_key(int index)
{
if (curdat.store_keys_in_input)
{
unsigned int i;
unsigned char *cp;
#ifdef SIMD_COEF_32
//if (dynamic_use_sse==1) {
// Note, if we are not in
if (dynamic_use_sse && !curdat.md5_startup_in_x86) {
unsigned int s;
unsigned int idx = ( ((unsigned int)index)/SIMD_COEF_32);
//if (curdat.store_keys_in_input && dynamic_use_sse==1)
// s = saved_key_len[index]; // NOTE, we now have to get the length from the buffer, we do NOT store it into a saved_key_len buffer.
ARCH_WORD_32 *keybuffer = &input_buf[idx].w[index&(SIMD_COEF_32-1)];
s = keybuffer[14*SIMD_COEF_32] >> 3;
for(i=0;i<s;i++)
out[i] = input_buf[idx].c[GETPOS(i, index&(SIMD_COEF_32-1))];
out[i] = 0;
return (char*)out;
}
#endif
#if MD5_X2
if (index & 1)
cp = input_buf_X86[index>>MD5_X2].x2.B2;
else
#endif
cp = input_buf_X86[index>>MD5_X2].x1.B;
for(i=0;i<saved_key_len[index];++i)
out[i] = cp[i];
out[i] = 0;
return (char*)out;
}
else
{
saved_key[index][saved_key_len[index]] = '\0';
return saved_key[index];
}
}
/*********************************************************************************
* Looks for ANY key that was cracked.
*********************************************************************************/
static int cmp_all(void *binary, int count)
{
unsigned int i;
#ifdef SIMD_COEF_32
unsigned int j;
if (dynamic_use_sse&1) {
unsigned int cnt = ( ((unsigned int)count+SIMD_COEF_32-1)/SIMD_COEF_32);
for (i = 0; i < cnt; ++i)
{
for (j = 0; j < SIMD_COEF_32; ++j)
if( *((ARCH_WORD_32 *)binary) == crypt_key[i].w[j])
return 1;
}
return 0;
}
#endif
for (i = 0; i < count; i++) {
#if MD5_X2
if (i&1) {
if (!(((ARCH_WORD_32 *)binary)[0] - crypt_key_X86[i>>MD5_X2].x2.w2[0]))
return 1;
}
else
#endif
if (!(((ARCH_WORD_32 *)binary)[0] - crypt_key_X86[i>>MD5_X2].x1.w[0]))
return 1;
}
return 0;
}
#if ARCH_LITTLE_ENDIAN
#define MASK_4x6 0x00ffffff
#else
#define MASK_4x6 0xffffff00
#endif
static int cmp_all_64_4x6(void *binary, int count)
{
unsigned int i;
#ifdef SIMD_COEF_32
unsigned int j;
if (dynamic_use_sse==1) {
unsigned int cnt = ( ((unsigned int)count+SIMD_COEF_32-1)/SIMD_COEF_32);
for (i = 0; i < cnt; ++i)
{
for (j = 0; j < SIMD_COEF_32; ++j)
if( *((ARCH_WORD_32 *)binary) == (crypt_key[i].w[j] & MASK_4x6))
return 1;
}
return 0;
}
#endif
for (i = 0; i < count; i++) {
#if MD5_X2
if (i&1) {
if (!(((ARCH_WORD_32 *)binary)[0] - (crypt_key_X86[i>>MD5_X2].x2.w2[0]&MASK_4x6)))
return 1;
}
else
#endif
if (!(((ARCH_WORD_32 *)binary)[0] - (crypt_key_X86[i>>MD5_X2].x1.w[0]&MASK_4x6)))
return 1;
}
return 0;
}
/*********************************************************************************
* In this code, we always do exact compare, so if this function is called, it
* simply returns true.
*********************************************************************************/
static int cmp_exact(char *binary, int index)
{
return 1;
}
/*********************************************************************************
* There was 'something' that was possibly hit. Now john will ask us to check
* each one of the data items, for an 'exact' match.
*********************************************************************************/
static int cmp_one(void *binary, int index)
{
#ifdef SIMD_COEF_32
if (dynamic_use_sse&1) {
unsigned int idx = ( ((unsigned int)index)/SIMD_COEF_32);
if( (((ARCH_WORD_32 *)binary)[0] == ((ARCH_WORD_32 *)&(crypt_key[idx].c))[0*SIMD_COEF_32+(index&(SIMD_COEF_32-1))]) &&
(((ARCH_WORD_32 *)binary)[1] == ((ARCH_WORD_32 *)&(crypt_key[idx].c))[1*SIMD_COEF_32+(index&(SIMD_COEF_32-1))]) &&
(((ARCH_WORD_32 *)binary)[2] == ((ARCH_WORD_32 *)&(crypt_key[idx].c))[2*SIMD_COEF_32+(index&(SIMD_COEF_32-1))]) &&
(((ARCH_WORD_32 *)binary)[3] == ((ARCH_WORD_32 *)&(crypt_key[idx].c))[3*SIMD_COEF_32+(index&(SIMD_COEF_32-1))]))
return 1;
return 0;
}
#endif
#if MD5_X2
if (index & 1) {
if ( (((ARCH_WORD_32 *)binary)[0] == crypt_key_X86[index>>MD5_X2].x2.w2[0] ) &&
(((ARCH_WORD_32 *)binary)[1] == crypt_key_X86[index>>MD5_X2].x2.w2[1] ) &&
(((ARCH_WORD_32 *)binary)[2] == crypt_key_X86[index>>MD5_X2].x2.w2[2] ) &&
(((ARCH_WORD_32 *)binary)[3] == crypt_key_X86[index>>MD5_X2].x2.w2[3] ) )
return 1;
return 0;
}
#endif
if ( (((ARCH_WORD_32 *)binary)[0] == crypt_key_X86[index>>MD5_X2].x1.w[0] ) &&
(((ARCH_WORD_32 *)binary)[1] == crypt_key_X86[index>>MD5_X2].x1.w[1] ) &&
(((ARCH_WORD_32 *)binary)[2] == crypt_key_X86[index>>MD5_X2].x1.w[2] ) &&
(((ARCH_WORD_32 *)binary)[3] == crypt_key_X86[index>>MD5_X2].x1.w[3] ) )
return 1;
return 0;
}
static int cmp_one_64_4x6(void *binary, int index)
{
#ifdef SIMD_COEF_32
if (dynamic_use_sse==1) {
unsigned int idx = ( ((unsigned int)index)/SIMD_COEF_32);
if( (((ARCH_WORD_32 *)binary)[0] == (((ARCH_WORD_32 *)&(crypt_key[idx].c))[0*SIMD_COEF_32+(index&(SIMD_COEF_32-1))] & MASK_4x6)) &&
(((ARCH_WORD_32 *)binary)[1] == (((ARCH_WORD_32 *)&(crypt_key[idx].c))[1*SIMD_COEF_32+(index&(SIMD_COEF_32-1))] & MASK_4x6)) &&
(((ARCH_WORD_32 *)binary)[2] == (((ARCH_WORD_32 *)&(crypt_key[idx].c))[2*SIMD_COEF_32+(index&(SIMD_COEF_32-1))] & MASK_4x6)) &&
(((ARCH_WORD_32 *)binary)[3] == (((ARCH_WORD_32 *)&(crypt_key[idx].c))[3*SIMD_COEF_32+(index&(SIMD_COEF_32-1))] & MASK_4x6)))
return 1;
return 0;
}
#endif
#if MD5_X2
if (index & 1) {
if ( (((ARCH_WORD_32*)binary)[0] == (crypt_key_X86[index>>MD5_X2].x2.w2[0] & MASK_4x6)) &&
(((ARCH_WORD_32*)binary)[1] == (crypt_key_X86[index>>MD5_X2].x2.w2[1] & MASK_4x6)) &&
(((ARCH_WORD_32*)binary)[2] == (crypt_key_X86[index>>MD5_X2].x2.w2[2] & MASK_4x6)) &&
(((ARCH_WORD_32*)binary)[3] == (crypt_key_X86[index>>MD5_X2].x2.w2[3] & MASK_4x6)) )
return 1;
return 0;
}
#endif
if ( (((ARCH_WORD_32*)binary)[0] == (crypt_key_X86[index>>MD5_X2].x1.w[0] & MASK_4x6)) &&
(((ARCH_WORD_32*)binary)[1] == (crypt_key_X86[index>>MD5_X2].x1.w[1] & MASK_4x6)) &&
(((ARCH_WORD_32*)binary)[2] == (crypt_key_X86[index>>MD5_X2].x1.w[2] & MASK_4x6)) &&
(((ARCH_WORD_32*)binary)[3] == (crypt_key_X86[index>>MD5_X2].x1.w[3] & MASK_4x6)) )
return 1;
return 0;
}
/*********************************************************************************
*********************************************************************************
* This is the real 'engine'. It simply calls functions one
* at a time from the array of functions.
*********************************************************************************
*********************************************************************************/
static int crypt_all(int *pcount, struct db_salt *salt)
{
// set m_count. This is our GLOBAL value, used by ALL of the script functions to know how
// many keys are loaded, and how much work we do.
m_count = *pcount;
__nonMP_eLargeOut(eBase16);
__nonMP_nLargeOff(0);
#ifdef SIMD_COEF_32
// If this format is MMX built, but is supposed to start in X86 (but be switchable), then we
// set that value here.
if (curdat.store_keys_in_input==2)
dynamic_use_sse = 3;
else if (curdat.md5_startup_in_x86)
dynamic_use_sse = 2;
else if (dynamic_use_sse==2)
dynamic_use_sse = 1;
#endif
__nonMP_md5_unicode_convert(0);
if (curdat.dynamic_base16_upcase) {
dynamic_itoa16 = itoa16u;
itoa16_w2 = itoa16_w2_u;
}
else {
dynamic_itoa16 = itoa16;
itoa16_w2 = itoa16_w2_l;
}
// There may have to be some 'prelim' work done with the keys. This is so that if we 'know' that keys were
// loaded into the keys[] array, but that we should do something like md5 and base-16 put them into an
// input slot, then we do that FIRST, prior to calling the script functions. Thus for a format such as
// md5(md5($p).$s) we could md5 the pass, and base-16 put it into a input buffer. Then when john sets salt
// and calls crypt all, the crypt script would simply set the input len to 32, append the salt and call a
// single crypt. That eliminates almost 1/2 of the calls to md5_crypt() for the format show in this example.
if (keys_dirty)
{
if (curdat.store_keys_normal_but_precompute_hash_to_output2)
{
keys_dirty = 0;
__nonMP_DynamicFunc__clean_input2();
if (curdat.store_keys_in_input_unicode_convert)
__nonMP_md5_unicode_convert(1);
__nonMP_DynamicFunc__append_keys2();
__nonMP_md5_unicode_convert(0);
//if (curdat.using_flat_buffers_sse2_ok) {
if (curdat.dynamic_use_sse == 0) {
if (curdat.store_keys_normal_but_precompute_hash_to_output2_base16_to_input1) {
#ifdef _OPENMP
#define CASE(H) case MGF__##H: DynamicFunc__##H##_crypt_input2_overwrite_input1(0,m_count,0); break
#else
#define CASE(H) case MGF__##H: DynamicFunc__##H##_crypt_input2_overwrite_input1(); break
#endif
switch(curdat.store_keys_normal_but_precompute_hash_to_output2_base16_type)
{
CASE(MD5);
CASE(MD4);
CASE(SHA1);
CASE(SHA224);
CASE(SHA256);
CASE(SHA384);
CASE(SHA512);
CASE(GOST);
CASE(WHIRLPOOL);
CASE(Tiger);
CASE(RIPEMD128);
CASE(RIPEMD160);
CASE(RIPEMD256);
CASE(RIPEMD320);
CASE(HAVAL128_3);
CASE(HAVAL128_4);
CASE(HAVAL128_5);
CASE(HAVAL160_3);
CASE(HAVAL160_4);
CASE(HAVAL160_5);
CASE(HAVAL192_3);
CASE(HAVAL192_4);
CASE(HAVAL192_5);
CASE(HAVAL224_3);
CASE(HAVAL224_4);
CASE(HAVAL224_5);
CASE(HAVAL256_3);
CASE(HAVAL256_4);
CASE(HAVAL256_5);
CASE(MD2);
CASE(PANAMA);
CASE(SKEIN224);
CASE(SKEIN256);
CASE(SKEIN384);
CASE(SKEIN512);
CASE(SHA3_224);
CASE(SHA3_256);
CASE(SHA3_384);
CASE(SHA3_512);
CASE(KECCAK_256);
CASE(KECCAK_512);
// LARGE_HASH_EDIT_POINT
}
} else if (curdat.store_keys_normal_but_precompute_hash_to_output2_base16_to_input1_offsetX) {
unsigned int i;
for (i = 0; i < m_count; ++i)
total_len_X86[i] = curdat.store_keys_normal_but_precompute_hash_to_output2_base16_to_input1_offsetX;
#undef CASE
#ifdef _OPENMP
#define CASE(H) case MGF__##H: DynamicFunc__##H##_crypt_input2_append_input1(0,m_count,0); break
#else
#define CASE(H) case MGF__##H: DynamicFunc__##H##_crypt_input2_append_input1(); break
#endif
switch(curdat.store_keys_normal_but_precompute_hash_to_output2_base16_type) {
CASE(MD5);
CASE(MD4);
CASE(SHA1);
CASE(SHA224);
CASE(SHA256);
CASE(SHA384);
CASE(SHA512);
CASE(GOST);
CASE(WHIRLPOOL);
CASE(Tiger);
CASE(RIPEMD128);
CASE(RIPEMD160);
CASE(RIPEMD256);
CASE(RIPEMD320);
CASE(HAVAL128_3);
CASE(HAVAL128_4);
CASE(HAVAL128_5);
CASE(HAVAL160_3);
CASE(HAVAL160_4);
CASE(HAVAL160_5);
CASE(HAVAL192_3);
CASE(HAVAL192_4);
CASE(HAVAL192_5);
CASE(HAVAL224_3);
CASE(HAVAL224_4);
CASE(HAVAL224_5);
CASE(HAVAL256_3);
CASE(HAVAL256_4);
CASE(HAVAL256_5);
CASE(MD2);
CASE(PANAMA);
CASE(SKEIN224);
CASE(SKEIN256);
CASE(SKEIN384);
CASE(SKEIN512);
CASE(SHA3_224);
CASE(SHA3_256);
CASE(SHA3_384);
CASE(SHA3_512);
CASE(KECCAK_256);
CASE(KECCAK_512);
// LARGE_HASH_EDIT_POINT
}
} else {
// calls 'old' code (ossl, sorry :( We should FIND and remove any format
// written this way, if it is
__possMP_DynamicFunc__crypt2_md5();
}
} else {
__possMP_DynamicFunc__crypt2_md5();
if (curdat.store_keys_normal_but_precompute_hash_to_output2_base16_to_input1)
{
if (curdat.store_keys_normal_but_precompute_hash_to_output2_base16_to_input1==2)
__nonMP_DynamicFunc__SSEtoX86_switch_output2();
__nonMP_DynamicFunc__clean_input();
__nonMP_DynamicFunc__append_from_last_output2_to_input1_as_base16();
}
}
}
}
// Ok, now we 'run' the script. We simply call 1 function right after the other.
// ALL functions are void f(void). They use the globals:
// input_buf1[] input_buf2[] (requires thread safety)
// total_len1[] total_len2[] (requires thread safety)
// crypt1[] crypt2[] (requires thread safety)
// md5_unicode_convert (requires thread safety, had to change to array)
// saved_key[] (const?)
// saved_key_len[] (const)
// cursalt, cursalt2 (const)
// saltlen, saltlen2 (const)
// m_count (const)
// nConsts (const)
// Consts[], ConstsLen[] (const)
// Since this array is in a structure, we assign a simple pointer to it
// before walking. Trivial improvement, but every cycle counts :)
{
#ifdef _OPENMP
if ((curdat.pFmtMain->params.flags & FMT_OMP) == FMT_OMP) {
int j;
unsigned int inc = (m_count+m_ompt-1) / m_ompt;
//printf ("maxkeys=%d m_count=%d inc1=%d granularity=%d inc2=%d\n", curdat.pFmtMain->params.max_keys_per_crypt, m_count, inc, curdat.omp_granularity, ((inc + curdat.omp_granularity-1)/curdat.omp_granularity)*curdat.omp_granularity);
inc = ((inc + curdat.omp_granularity-1)/curdat.omp_granularity)*curdat.omp_granularity;
#pragma omp parallel for shared(curdat, inc, m_count)
for (j = 0; j < m_count; j += inc) {
unsigned int i;
unsigned int top=j+inc;
/* The last block may 'appear' to have more keys than we have in the
entire buffer space. This is due to the granularity. If so,
reduce that last one to stop at end of our buffers. NOT doing
this is causes a huge buffer overflow. */
if (top > curdat.pFmtMain->params.max_keys_per_crypt)
top = curdat.pFmtMain->params.max_keys_per_crypt;
// we now run a full script in this thread, using only a subset of
// the data, from [j,top) The next thread will run from [top,top+inc)
// each thread will take the next inc values, until we get to m_count
for (i = 0; curdat.dynamic_FUNCTIONS[i]; ++i)
(*(curdat.dynamic_FUNCTIONS[i]))(j,top,omp_get_thread_num());
}
} else {
unsigned int i;
// same code (almost), but without the threads.
for (i = 0; curdat.dynamic_FUNCTIONS[i]; ++i)
(*(curdat.dynamic_FUNCTIONS[i]))(0,m_count,0);
}
#else
unsigned int i;
for (i = 0; curdat.dynamic_FUNCTIONS[i]; ++i) {
(*(curdat.dynamic_FUNCTIONS[i]))();
#if 0
// Dump state (for debugging help)
if (i==0) printf("\npassword=%.*s\n", saved_key_len[0], saved_key[0]);
printf ("\nState after function: %s\n", dynamic_Find_Function_Name(curdat.dynamic_FUNCTIONS[i]));
// dump input 1
#ifdef SIMD_COEF_32
dump_stuff_mmx_msg("input_buf[0]", input_buf[0].c, 64, 0);
dump_stuff_mmx_msg("input_buf[1]", input_buf[0].c, 64, 1);
dump_stuff_mmx_msg("input_buf[2]", input_buf[0].c, 64, 2);
dump_stuff_mmx_msg("input_buf[3]", input_buf[0].c, 64, 3);
#endif
printf ("input_buf86[0] : %*.*s\n", total_len_X86[0],total_len_X86[0],input_buf_X86[0].x1.b);
printf ("input_buf86[1] : %*.*s\n", total_len_X86[1],total_len_X86[1],input_buf_X86[1].x1.b);
printf ("input_buf86[2] : %*.*s\n", total_len_X86[2],total_len_X86[2],input_buf_X86[2].x1.b);
printf ("input_buf86[3] : %*.*s\n", total_len_X86[3],total_len_X86[3],input_buf_X86[3].x1.b);
// dump crypt 1
#ifdef SIMD_COEF_32
dump_stuff_mmx_msg("crypt_key[0]", crypt_key[0].c, 16, 0);
dump_stuff_mmx_msg("crypt_key[1]", crypt_key[0].c, 16, 1);
dump_stuff_mmx_msg("crypt_key[2]", crypt_key[0].c, 16, 2);
dump_stuff_mmx_msg("crypt_key[3]", crypt_key[0].c, 16, 3);
#endif
dump_stuff_be_msg("crypt_key_X86[0]", crypt_key_X86[0].x1.b, 16);
dump_stuff_be_msg("crypt_key_X86[1]", crypt_key_X86[1].x1.b, 16);
dump_stuff_be_msg("crypt_key_X86[2]", crypt_key_X86[2].x1.b, 16);
dump_stuff_be_msg("crypt_key_X86[3]", crypt_key_X86[3].x1.b, 16);
// dump input 2
#ifdef SIMD_COEF_32
dump_stuff_mmx_msg("input_buf2[0]", input_buf2[0].c, 64, 0);
dump_stuff_mmx_msg("input_buf2[1]", input_buf2[0].c, 64, 1);
dump_stuff_mmx_msg("input_buf2[2]", input_buf2[0].c, 64, 2);
dump_stuff_mmx_msg("input_buf2[3]", input_buf2[0].c, 64, 3);
#endif
printf ("input2_buf86[0] : %*.*s\n", total_len2_X86[0],total_len2_X86[0],input_buf2_X86[0].x1.b);
printf ("input2_buf86[1] : %*.*s\n", total_len2_X86[1],total_len2_X86[1],input_buf2_X86[1].x1.b);
printf ("input2_buf86[2] : %*.*s\n", total_len2_X86[2],total_len2_X86[2],input_buf2_X86[2].x1.b);
printf ("input2_buf86[3] : %*.*s\n", total_len2_X86[3],total_len2_X86[3],input_buf2_X86[3].x1.b);
// dump crypt 2
#ifdef SIMD_COEF_32
dump_stuff_mmx_msg("crypt_key2[0]", crypt_key2[0].c, 16, 0);
dump_stuff_mmx_msg("crypt_key2[1]", crypt_key2[0].c, 16, 1);
dump_stuff_mmx_msg("crypt_key2[2]", crypt_key2[0].c, 16, 2);
dump_stuff_mmx_msg("crypt_key2[3]", crypt_key2[0].c, 16, 3);
#endif
dump_stuff_be_msg("crypt_key2_X86[0]", crypt_key2_X86[0].x1.b, 16);
dump_stuff_be_msg("crypt_key2_X86[1]", crypt_key2_X86[1].x1.b, 16);
dump_stuff_be_msg("crypt_key2_X86[2]", crypt_key2_X86[2].x1.b, 16);
dump_stuff_be_msg("crypt_key2_X86[3]", crypt_key2_X86[3].x1.b, 16);
#endif
}
#endif
}
return m_count;
}
/*********************************************************************************
* 'normal' hashing functions
*********************************************************************************/
extern char *MD5_DumpHexStr(void *p);
#if !ARCH_LITTLE_ENDIAN
// the lower 8 bits is zero on the binary (but filled in on the hash). We need to dump the low 8
static int binary_hash_0_64x4(void * binary) { return (((ARCH_WORD_32 *)binary)[0]>>8) & PH_MASK_0; }
static int binary_hash_1_64x4(void * binary) { return (((ARCH_WORD_32 *)binary)[0]>>8) & PH_MASK_1; }
static int binary_hash_2_64x4(void * binary) { return (((ARCH_WORD_32 *)binary)[0]>>8) & PH_MASK_2; }
static int binary_hash_3_64x4(void * binary) { return (((ARCH_WORD_32 *)binary)[0]>>8) & PH_MASK_3; }
static int binary_hash_4_64x4(void * binary) { return (((ARCH_WORD_32 *)binary)[0]>>8) & PH_MASK_4; }
static int binary_hash_5_64x4(void * binary) { return (((ARCH_WORD_32 *)binary)[0]>>8) & PH_MASK_5; }
static int get_hash_0_64x4(int index) {
#if MD5_X2
if (index & 1) return (crypt_key_X86[index>>MD5_X2].x2.w2[0]>>8) & PH_MASK_0;
#endif
return (crypt_key_X86[index>>MD5_X2].x1.w[0]>>8) & PH_MASK_0;}
static int get_hash_1_64x4(int index) {
#if MD5_X2
if (index & 1) return (crypt_key_X86[index>>MD5_X2].x2.w2[0]>>8) & PH_MASK_1;
#endif
return (crypt_key_X86[index>>MD5_X2].x1.w[0]>>8) & PH_MASK_1;}
static int get_hash_2_64x4(int index) {
#if MD5_X2
if (index & 1) return (crypt_key_X86[index>>MD5_X2].x2.w2[0]>>8) & PH_MASK_2;
#endif
return (crypt_key_X86[index>>MD5_X2].x1.w[0]>>8) & PH_MASK_2;}
static int get_hash_3_64x4(int index) {
#if MD5_X2
if (index & 1) return (crypt_key_X86[index>>MD5_X2].x2.w2[0]>>8) & PH_MASK_3;
#endif
return (crypt_key_X86[index>>MD5_X2].x1.w[0]>>8) & PH_MASK_3;}
static int get_hash_4_64x4(int index) {
#if MD5_X2
if (index & 1) return (crypt_key_X86[index>>MD5_X2].x2.w2[0]>>8) & PH_MASK_4;
#endif
return (crypt_key_X86[index>>MD5_X2].x1.w[0]>>8) & PH_MASK_4;}
static int get_hash_5_64x4(int index) {
#if MD5_X2
if (index & 1) return (crypt_key_X86[index>>MD5_X2].x2.w2[0]>>8) & PH_MASK_5;
#endif
return (crypt_key_X86[index>>MD5_X2].x1.w[0]>>8) & PH_MASK_5;}
#endif
static int get_hash_0(int index)
{
#ifdef SIMD_COEF_32
if (dynamic_use_sse&1) {
unsigned int idx = ( ((unsigned int)index)/SIMD_COEF_32);
return ((ARCH_WORD_32 *)&(crypt_key[idx].c))[index&(SIMD_COEF_32-1)] & PH_MASK_0;
}
#endif
#if MD5_X2
if (index & 1)
return crypt_key_X86[index>>MD5_X2].x2.w2[0] & PH_MASK_0;
#endif
return crypt_key_X86[index>>MD5_X2].x1.w[0] & PH_MASK_0;
}
static int get_hash_1(int index)
{
#ifdef SIMD_COEF_32
if (dynamic_use_sse&1) {
unsigned int idx = ( ((unsigned int)index)/SIMD_COEF_32);
return ((ARCH_WORD_32 *)&(crypt_key[idx].c))[index&(SIMD_COEF_32-1)] & PH_MASK_1;
}
#endif
#if MD5_X2
if (index & 1)
return crypt_key_X86[index>>MD5_X2].x2.w2[0] & PH_MASK_1;
#endif
return crypt_key_X86[index>>MD5_X2].x1.w[0] & PH_MASK_1;
}
static int get_hash_2(int index)
{
#ifdef SIMD_COEF_32
if (dynamic_use_sse&1) {
unsigned int idx = ( ((unsigned int)index)/SIMD_COEF_32);
return ((ARCH_WORD_32 *)&(crypt_key[idx].c))[index&(SIMD_COEF_32-1)] & PH_MASK_2;
}
#endif
#if MD5_X2
if (index & 1)
return crypt_key_X86[index>>MD5_X2].x2.w2[0] & PH_MASK_2;
#endif
return crypt_key_X86[index>>MD5_X2].x1.w[0] & PH_MASK_2;
}
static int get_hash_3(int index)
{
#ifdef SIMD_COEF_32
if (dynamic_use_sse&1) {
unsigned int idx = ( ((unsigned int)index)/SIMD_COEF_32);
return ((ARCH_WORD_32 *)&(crypt_key[idx].c))[index&(SIMD_COEF_32-1)] & PH_MASK_3;
}
#endif
#if MD5_X2
if (index & 1)
return crypt_key_X86[index>>MD5_X2].x2.w2[0] & PH_MASK_3;
#endif
return crypt_key_X86[index>>MD5_X2].x1.w[0] & PH_MASK_3;
}
static int get_hash_4(int index)
{
#ifdef SIMD_COEF_32
if (dynamic_use_sse&1) {
unsigned int idx = ( ((unsigned int)index)/SIMD_COEF_32);
return ((ARCH_WORD_32 *)&(crypt_key[idx].c))[index&(SIMD_COEF_32-1)] & PH_MASK_4;
}
#endif
#if MD5_X2
if (index & 1)
return crypt_key_X86[index>>MD5_X2].x2.w2[0] & PH_MASK_4;
#endif
return crypt_key_X86[index>>MD5_X2].x1.w[0] & PH_MASK_4;
}
static int get_hash_5(int index)
{
#ifdef SIMD_COEF_32
if (dynamic_use_sse&1) {
unsigned int idx = ( ((unsigned int)index)/SIMD_COEF_32);
return ((ARCH_WORD_32 *)&(crypt_key[idx].c))[index&(SIMD_COEF_32-1)] & PH_MASK_5;
}
#endif
#if MD5_X2
if (index & 1)
return crypt_key_X86[index>>MD5_X2].x2.w2[0] & PH_MASK_5;
#endif
return crypt_key_X86[index>>MD5_X2].x1.w[0] & PH_MASK_5;
}
static int get_hash_6(int index)
{
#ifdef SIMD_COEF_32
if (dynamic_use_sse&1) {
unsigned int idx = ( ((unsigned int)index)/SIMD_COEF_32);
return ((ARCH_WORD_32 *)&(crypt_key[idx].c))[index&(SIMD_COEF_32-1)] & PH_MASK_6;
}
#endif
#if MD5_X2
if (index & 1)
return crypt_key_X86[index>>MD5_X2].x2.w2[0] & PH_MASK_6;
#endif
return crypt_key_X86[index>>MD5_X2].x1.w[0] & PH_MASK_6;
}
/************************************************************************
* We now fully handle all hashing of salts, here in the format. We
* return a pointer ot an allocated salt record. Thus, we search all
* of the salt records, looking for the same salt. If we find it, we
* want to return THAT pointer, and not allocate a new pointer.
* This works great, but forces us to do salt comparision here.
***********************************************************************/
#define DYNA_SALT_HASH_BITS SALT_HASH_LOG
#define DYNA_SALT_HASH_SIZE (1<<DYNA_SALT_HASH_BITS)
#define DYNA_SALT_HASH_MOD (DYNA_SALT_HASH_SIZE-1)
typedef struct dyna_salt_list_entry {
struct dyna_salt_list_entry *next;
unsigned len;
unsigned char *salt;
} dyna_salt_list_entry;
typedef struct {
dyna_salt_list_entry *head, *tail;
int count;
} dyna_salt_list_main;
typedef struct {
dyna_salt_list_main List;
} SaltHashTab_t;
static SaltHashTab_t *SaltHashTab=NULL;
static dyna_salt_list_entry *pSaltHashData=NULL, *pSaltHashDataNext=NULL;
static int dyna_salt_list_count=0;
static unsigned char *pSaltDataBuf=NULL, *pNextSaltDataBuf=NULL;
static int nSaltDataBuf=0;
static unsigned char *AddSaltHash(unsigned char *salt, unsigned int len, unsigned int idx)
{
unsigned char *pRet;
if (dyna_salt_list_count == 0) {
pSaltHashDataNext = pSaltHashData = mem_calloc_tiny(sizeof(dyna_salt_list_entry) * 25000, MEM_ALIGN_WORD);
dyna_salt_list_count = 25000;
}
if (nSaltDataBuf < len) {
pSaltDataBuf = pNextSaltDataBuf = mem_alloc_tiny(0x60000, MEM_ALIGN_NONE);
nSaltDataBuf = 0x60000;
}
pRet = pNextSaltDataBuf;
pSaltHashDataNext->salt = pNextSaltDataBuf;
memcpy(pSaltHashDataNext->salt, salt, len);
pSaltHashDataNext->len = len;
pNextSaltDataBuf += len;
nSaltDataBuf -= len;
if (SaltHashTab[idx].List.count == 0)
SaltHashTab[idx].List.tail = SaltHashTab[idx].List.head = pSaltHashDataNext;
else {
SaltHashTab[idx].List.tail->next = pSaltHashDataNext;
SaltHashTab[idx].List.tail = pSaltHashDataNext;
}
++SaltHashTab[idx].List.count;
++pSaltHashDataNext;
--dyna_salt_list_count;
return pRet;
}
static unsigned char *FindSaltHash(unsigned char *salt, unsigned int len, CRC32_t crc)
{
unsigned int idx = crc & DYNA_SALT_HASH_MOD;
dyna_salt_list_entry *p;
if (!SaltHashTab)
SaltHashTab = mem_calloc_tiny(sizeof(SaltHashTab_t) * DYNA_SALT_HASH_SIZE, MEM_ALIGN_WORD);
if (!SaltHashTab[idx].List.count) {
return AddSaltHash(salt, len, idx);
}
// Ok, we have some salts in this hash list. Now walk the list, searching for an EQUAL salt.
p = SaltHashTab[idx].List.head;
while (p) {
if (len == p->len && !memcmp((char*)salt, (char*)p->salt, len)) {
return p->salt; // found it! return this one, so we do not allocate another.
}
p = p->next;
}
return AddSaltHash(salt, len, idx);
}
static unsigned char *HashSalt(unsigned char *salt, unsigned int len)
{
CRC32_t crc = 0xffffffff, i;
unsigned char *ret_hash;
// compute the hash.
for (i = 0; i < len; ++i)
crc = jtr_crc32(crc,salt[i]);
crc = ~crc;
ret_hash = FindSaltHash(salt, len, crc);
return ret_hash;
}
static int ConvertFromHex(unsigned char *p, int len)
{
unsigned char *cp;
unsigned int i, x;
if (!p || memcmp(p, "HEX$", 4))
return len;
// Ok, do a convert, and return 'new' len.
len -= 4;
len >>= 1;
cp = p;
x = len;
for (i=4; x; --x, i+= 2) {
*cp++ = atoi16[ARCH_INDEX(p[i])]*16 + atoi16[ARCH_INDEX(p[i+1])];
}
*cp = 0;
return len;
}
static unsigned int salt_external_to_internal_convert(unsigned char *extern_salt, unsigned char *Buffer)
{
// Ok, we get this: extern_salt = salt_data$$2salt2$$Uuser ... where anything can be missing or in any order
// the any order has 1 exception of salt_data MUST be first. So if we get $$2salt2, then we know there is no salt-1 value.
unsigned char *salt2=0, *userid=0, *Flds[10];
int i, nsalt2=0, nuserid=0, nFlds[10]={0,0,0,0,0,0,0,0,0,0};
unsigned int len = strlen((char*)extern_salt), bit;
unsigned int bit_array=0;
unsigned int the_real_len = 6; // 2 bytes base-8 length, and 4 bytes base-8 bitmap.
// work from back of string to front, looking for the $$X signatures.
for (i = len-3; i >= 0; --i) {
if (extern_salt[i] == '$' && extern_salt[i+1] == '$') {
// a 'likely' extra salt value.
switch(extern_salt[i+2]) {
case '2':
if (curdat.b2Salts) {
salt2 = &extern_salt[i+3];
nsalt2 = strlen((char*)salt2);
nsalt2 = ConvertFromHex(salt2, nsalt2);
extern_salt[i] = 0;
bit_array |= 1;
the_real_len += (nsalt2+1);
}
break;
case 'U':
if (curdat.nUserName) {
userid = &extern_salt[i+3];
nuserid = strlen((char*)userid);
nuserid = ConvertFromHex(userid, nuserid);
extern_salt[i] = 0;
bit_array |= 2;
the_real_len += (nuserid+1);
}
break;
case 'F': {
if (extern_salt[i+3] >= '0' && extern_salt[i+3] <= '9') {
if (curdat.FldMask && (curdat.FldMask & (MGF_FLDx_BIT<<(extern_salt[i+3]-'0'))) == (MGF_FLDx_BIT<<(extern_salt[i+3]-'0'))) {
Flds[extern_salt[i+3]-'0'] = &extern_salt[i+4];
nFlds[extern_salt[i+3]-'0'] = strlen((char*)(Flds[extern_salt[i+3]-'0']));
nFlds[extern_salt[i+3]-'0'] = ConvertFromHex(Flds[extern_salt[i+3]-'0'], nFlds[extern_salt[i+3]-'0']);
extern_salt[i] = 0;
bit_array |= (1<<(2+extern_salt[i+3]-'0'));
the_real_len += (nFlds[extern_salt[i+3]-'0']+1);
}
break;
}
}
}
}
}
// We have now ripped the data apart. Now put it into Buffer, in proper ORDER
// Length of salt (salt1) These 2 are stored as base-8 numbers.
len = strlen((char*)extern_salt);
len = ConvertFromHex(extern_salt, len);
the_real_len += len;
*Buffer++ = (len>>3) + '0';
*Buffer++ = (len&7) + '0';
// bit array
*Buffer++ = (bit_array>>9) + '0';
*Buffer++ = ((bit_array>>6)&7) + '0';
*Buffer++ = ((bit_array>>3)&7) + '0';
*Buffer++ = (bit_array&7) + '0';
memcpy((char*)Buffer, (char*)extern_salt, len);
Buffer += len;
if (!bit_array)
return the_real_len;
if (nsalt2) {
*Buffer++ = nsalt2;
memcpy((char*)Buffer, (char*)salt2, nsalt2);
Buffer += nsalt2;
bit_array &= ~1;
if (!bit_array)
return the_real_len;
}
if (nuserid) {
*Buffer++ = nuserid;
memcpy((char*)Buffer, (char*)userid, nuserid);
if (curdat.nUserName==2) {
Buffer[nuserid] = 0;
strupr((char*)Buffer);
} else if (curdat.nUserName==2) {
Buffer[nuserid] = 0;
strlwr((char*)Buffer);
}
Buffer += nuserid;
bit_array &= ~2;
if (!bit_array)
return the_real_len;
}
bit = 4;
for (i = 0; i < 10; ++i, bit<<=1) {
if (nFlds[i]) {
*Buffer++ = nFlds[i];
memcpy((char*)Buffer, (char*)(Flds[i]), nFlds[i]);
Buffer += nFlds[i];
bit_array &= ~bit;
if (!bit_array)
return the_real_len;
}
}
return the_real_len;
}
/*********************************************************************************
* This salt function has been TOTALLY re-written. Now, we do these things:
* 1. convert from external format ($salt$$Uuser$$2HEX$salt2_in_hex, etc, into
* our internal format. Our internal format is 2 base-8 numbers (2 digit and 4
* digit), followed by the 'raw' salt bytes, followed by pascal strings of any
* other special salt values (salt2, user, flields 0 to 9). The first 2 digit
* base 8 number is the length of the binary bytes of the 'real' salt. The
* 2nd base-8 4 digit number, is a bit mask of what 'extra' salt types are
* contained.
* 2. We allocate and 'own' the salt buffers here, so that:
* 3. We detect duplicate salts. NOTE, we have normalized the salts, so 2 salts that
* appear different (external format), appear exactly the same on internal format.
* Thus, we dupe remove them here.
* 4. We allocation storage for the salts. The ONLY thing we return to john, is
* a 4 (or 8 byte in 64 bit builds) pointer to the salt. Thus, when we find
* a dupe, we do not have to allocate ANY memory, and simply return the pointer
* to the original salt (which is the same as the one we are working on now).
*
* this is much more complex, however, it allows us to use much less memory, to
* have the set_salt function operate VERY quickly (all processing is done here).
* It also allows john load time to happen FASTER (yes faster), that it was happening
* due to smaller memory footprint, and john's external salt collision to have
* less work to do. The memory footprint was also reduced, because now we store
* JUST the require memory, and a pointer. Before, often we stored a LOT of memory
* for many format types. For a few types, we do use more memory with this method
* than before, but for more the memory usage is way down.
*********************************************************************************/
static void *get_salt(char *ciphertext)
{
char Salt[SALT_SIZE+1], saltIntBuf[SALT_SIZE+1];
int off, possible_neg_one=0;
unsigned char *saltp;
unsigned int the_real_len;
static union x {
unsigned char salt_p[sizeof(unsigned char*)];
ARCH_WORD p[1];
} union_x;
if ( (curdat.pSetup->flags&MGF_SALTED) == 0) {
memset(union_x.salt_p, 0, sizeof(union_x.salt_p));
return union_x.salt_p;
}
memset(Salt, 0, SALT_SIZE+1);
// Ok, see if the wrong dynamic type is loaded (such as the 'last' dynamic type).
if (!strncmp(ciphertext, "$dynamic_", 9)) {
char *cp1 = &ciphertext[9];
char *cp2 = &curdat.dynamic_WHICH_TYPE_SIG[9];
while (*cp2 && *cp2 == *cp1) {
++cp1; ++cp2;
}
if (*cp2) {
char subformat[17];
struct fmt_main *pFmtLocal;
int nFmtNum;
memcpy(subformat, ciphertext, 16);
subformat[16] = 0;
cp2 = &subformat[9];
while (*cp2 && *cp2 != '$')
++cp2;
*cp2 = 0;
nFmtNum = -1;
sscanf(subformat, "$dynamic_%d", &nFmtNum);
if (nFmtNum==-1)
return union_x.salt_p;
pFmtLocal = dynamic_Get_fmt_main(nFmtNum);
memcpy(&curdat, pFmtLocal->private.data, sizeof(private_subformat_data));
}
}
if (curdat.dynamic_FIXED_SALT_SIZE==0 && !curdat.nUserName && !curdat.FldMask)
return union_x.salt_p;
if (!strncmp(ciphertext, "$dynamic_", 9))
off=curdat.dynamic_SALT_OFFSET;
else
off=curdat.dynamic_SALT_OFFSET-strlen(curdat.dynamic_WHICH_TYPE_SIG);
if (ciphertext[off] == '$') {
if (ciphertext[off+1]=='U' && curdat.nUserName)
possible_neg_one = -1;
else if (ciphertext[off+1]=='2' && curdat.b2Salts)
possible_neg_one = -1;
else if (ciphertext[off+1]=='F' && ciphertext[off+2]>='0' && ciphertext[off+2]<='9' && curdat.FldMask) {
if ((curdat.FldMask & (MGF_FLDx_BIT<<(ciphertext[off+2]-'0'))) == (MGF_FLDx_BIT<<(ciphertext[off+2]-'0')))
possible_neg_one = -1;
}
}
strnzcpy(Salt, &ciphertext[off + possible_neg_one], SALT_SIZE);
if (curdat.dynamic_salt_as_hex)
{
unsigned char Buf[128];
unsigned int slen=strlen(Salt);
switch (curdat.dynamic_salt_as_hex_format_type) {
// TODO: Come up with some way to put these into a CASE(HASH) #define
#define SPH_CASE(H,F,S) case MGF__##H: {sph_##F##_context c;sph_##F##_init(&c);sph_##F(&c,(const unsigned char*)Salt,slen);sph_##F##_close(&c,Buf); \
memset(Salt,0,SALT_SIZE+1);base64_convert(Buf,e_b64_raw,S,Salt,e_b64_hex,SALT_SIZE, 0);break; }
#define OSSL_CASE(H,C,S) case MGF__##H: {C##_CTX c;H##_Init(&c);H##_Update(&c,Salt,slen);H##_Final(Buf,&c); \
memset(Salt,0,SALT_SIZE+1);base64_convert(Buf,e_b64_raw,S,Salt,e_b64_hex,SALT_SIZE, 0);break; }
#define KECCAK_CASE(H,S) case MGF__##H: {KECCAK_CTX c;H##_Init(&c);KECCAK_Update(&c,(BitSequence*)Salt,slen);KECCAK_Final(Buf,&c); \
memset(Salt,0,SALT_SIZE+1);base64_convert(Buf,e_b64_raw,S,Salt,e_b64_hex,SALT_SIZE, 0);break; }
case MGF__MD5:
{
// Do not 'worry' about SSE/MMX, Only do 'generic' md5. This is ONLY done
// at the start of the run. We will NEVER see this run, once john starts.
MD5_CTX ctx;
int i;
char *cpo;
MD5_Init(&ctx);
if (curdat.dynamic_salt_as_hex & 0x100)
{
char *s2 = mem_alloc(slen*2+1);
for (i = 0; i < slen; ++i)
{
s2[i<<1] = Salt[i];
s2[(i<<1)+1] = 0;
}
MD5_Update(&ctx, s2, slen*2);
MEM_FREE(s2);
}
else
MD5_Update(&ctx, Salt, slen);
MD5_Final(Buf, &ctx);
if ( (curdat.dynamic_salt_as_hex&3) == 2) {
strcat(Salt, "$$2");
cpo = &Salt[slen+3];
}
else {
cpo = Salt;
memset(Salt, 0, SALT_SIZE+1);
}
base64_convert(Buf, e_b64_raw, 16, cpo, e_b64_hex, SALT_SIZE, 0);
break;
}
OSSL_CASE(MD4,MD4,16)
OSSL_CASE(SHA1,SHA,20)
OSSL_CASE(SHA224,SHA256,28)
OSSL_CASE(SHA256,SHA256,32)
OSSL_CASE(SHA384,SHA512,48)
OSSL_CASE(SHA512,SHA512,64)
OSSL_CASE(WHIRLPOOL,WHIRLPOOL,64)
case MGF__GOST:
{
gost_ctx ctx;
john_gost_init(&ctx);
john_gost_update(&ctx, (const unsigned char*)Salt, slen);
john_gost_final(&ctx, (unsigned char*)Buf);
memset(Salt, 0, SALT_SIZE+1);
base64_convert(Buf, e_b64_raw, 32, Salt, e_b64_hex, SALT_SIZE, 0);
break;
}
SPH_CASE(Tiger,tiger,24)
SPH_CASE(RIPEMD128,ripemd128,16)
SPH_CASE(RIPEMD160,ripemd160,20)
SPH_CASE(RIPEMD256,ripemd256,32)
SPH_CASE(RIPEMD320,ripemd320,40)
SPH_CASE(HAVAL128_3,haval128_3,16)
SPH_CASE(HAVAL128_4,haval128_4,16)
SPH_CASE(HAVAL128_5,haval128_5,16)
SPH_CASE(HAVAL160_3,haval160_3,20)
SPH_CASE(HAVAL160_4,haval160_4,20)
SPH_CASE(HAVAL160_5,haval160_5,20)
SPH_CASE(HAVAL192_3,haval192_3,24)
SPH_CASE(HAVAL192_4,haval192_4,24)
SPH_CASE(HAVAL192_5,haval192_5,24)
SPH_CASE(HAVAL224_3,haval224_3,28)
SPH_CASE(HAVAL224_4,haval224_4,28)
SPH_CASE(HAVAL224_5,haval224_5,28)
SPH_CASE(HAVAL256_3,haval256_3,32)
SPH_CASE(HAVAL256_4,haval256_4,32)
SPH_CASE(HAVAL256_5,haval256_5,32)
SPH_CASE(MD2,md2,16)
SPH_CASE(PANAMA,panama,32)
SPH_CASE(SKEIN224,skein224,28)
SPH_CASE(SKEIN256,skein256,32)
SPH_CASE(SKEIN384,skein384,48)
SPH_CASE(SKEIN512,skein512,64)
KECCAK_CASE(SHA3_224,28)
KECCAK_CASE(SHA3_256,32)
KECCAK_CASE(SHA3_384,48)
KECCAK_CASE(SHA3_512,64)
KECCAK_CASE(KECCAK_256,32)
KECCAK_CASE(KECCAK_512,64)
// LARGE_HASH_EDIT_POINT
default:
{
error_msg("Invalid dynamic flags seen. Data type not yet defined\n");
}
}
}
the_real_len = salt_external_to_internal_convert((unsigned char*)Salt, (unsigned char*)saltIntBuf);
// Now convert this into a stored salt, or find the 'already' stored same salt.
saltp = HashSalt((unsigned char*)saltIntBuf, the_real_len);
memcpy(union_x.salt_p, &saltp, sizeof(saltp));
return union_x.salt_p;
}
/*********************************************************************************
* Now our salt is returned only as a pointer. We
*********************************************************************************/
static int salt_hash(void *salt)
{
unsigned long H;
if (!salt) return 0;
if ( (curdat.pSetup->flags&MGF_SALTED) == 0)
return 0;
// salt is now a pointer, but WORD aligned. We remove that word alingment, and simply use the next bits
H = *((unsigned long*)salt);
// Mix up the pointer value (H^(H>>9)) so that if we have a fixed sized allocation
// that things do get 'stirred' up better.
return ( (H^(H>>9)) & (SALT_HASH_SIZE-1) );
}
static unsigned dynamic_this_salt_length(const void *v) {
const unsigned char *s = (unsigned char*)v;
unsigned l = *s++ - '0';
unsigned bits;
l <<= 3;
l += *s++ - '0';
#if ARCH_ALLOWS_UNALIGNED
if (*((ARCH_WORD_32*)s) == 0x30303030)
#else
if (!memcmp(s, "0000", 4))
#endif
return l;
bits = *s++ - '0';
bits <<= 3;
bits += *s++ - '0';
bits <<= 3;
bits += *s++ - '0';
bits <<= 3;
bits += *s++ - '0';
s += l;
while(bits) {
if (bits & 1) {
l += *s;
s += *s;
++s;
}
bits >>= 1;
}
return l;
}
/*
* dyna compare is required, to get all the shortest
* salt strings first, then the next longer, then the
* next, and finally the longest. Without this change
* there are many dyna formats which will miss finding
* hashes, because old dirty salt information gets left
* over, blowing the next runs. There are many formats
* which try to not clear buffers if they do not need
* to, BUT this only works if salts are taken shortest
* to longest. This sort builds the list of salts that way
*/
static int salt_compare(const void *x, const void *y)
{
/* this is all that is needed in dyna salt_compare().
Dyna is a pointer to a string, NOT the actual string.
The first 2 bytes of string are length (base 8 ascii) */
const char *X = *((const char**)x);
const char *Y = *((const char**)y);
int l1, l2, l;
if (*X<*Y) return -1;
if (*X>*Y) return 1;
if (X[1]<Y[1]) return -1;
if (X[1]>Y[1]) return 1;
// we had to make the salt order 100% deterministic, so that intersalt-restore
l = l1 = dynamic_this_salt_length(X);
l2 = dynamic_this_salt_length(Y);
if (l2 < l) l = l2;
l = memcmp(&X[6], &Y[6], l);
if (l) return l;
if (l1==l2) return 0;
if (l1 > l2) return 1;
return -1;
}
void dynamic_salt_md5(struct db_salt *s) {
MD5_CTX ctx;
int len;
const char *S = *((const char**)s->salt);
MD5_Init(&ctx);
len = dynamic_this_salt_length(S);
MD5_Update(&ctx, S + 6, len);
MD5_Final((unsigned char*)(s->salt_md5), &ctx);
}
/*********************************************************************************
* Gets the binary value from a base-16 hash.
*********************************************************************************/
static void *get_binary(char *_ciphertext)
{
static char *realcipher;
unsigned int i;
char *ciphertext = _ciphertext;
if (!realcipher) realcipher = mem_alloc_tiny(BINARY_SIZE_SHA, MEM_ALIGN_WORD);
if (!strncmp(_ciphertext, "$dynamic_", 9)) {
ciphertext += 9;
while (*ciphertext++ != '$')
;
}
for(i=0;i<BINARY_SIZE;i++)
{
realcipher[i] =
atoi16[ARCH_INDEX(ciphertext[i*2])]*16 +
atoi16[ARCH_INDEX(ciphertext[i*2+1])];
}
return (void *)realcipher;
}
// NOTE NOTE NOTE, we have currently ONLY implemented a non-salted function!!!
static char *source(char *source, void *binary)
{
static char Buf[256];
unsigned char *cpi= (unsigned char*)(binary);
char *cpo = Buf;
unsigned int i;
cpo += sprintf(Buf, "%s", curdat.dynamic_WHICH_TYPE_SIG);
for (i = 0; i < 16; ++i) {
*cpo++ = itoa16[(*cpi)>>4];
*cpo++ = itoa16[*cpi&0xF];
++cpi;
}
*cpo = 0;
return Buf;
}
static char *source_20_hex(char *source, void *binary)
{
static char Buf[256];
unsigned char *cpi= (unsigned char*)(binary);
char *cpo = Buf;
unsigned int i;
cpo += sprintf(Buf, "%s", curdat.dynamic_WHICH_TYPE_SIG);
for (i = 0; i < 20; ++i) {
*cpo++ = itoa16[(*cpi)>>4];
*cpo++ = itoa16[*cpi&0xF];
++cpi;
}
*cpo = 0;
return Buf;
}
static char *source_28_hex(char *source, void *binary)
{
static char Buf[256];
unsigned char *cpi= (unsigned char*)(binary);
char *cpo = Buf;
unsigned int i;
cpo += sprintf(Buf, "%s", curdat.dynamic_WHICH_TYPE_SIG);
for (i = 0; i < 28; ++i) {
*cpo++ = itoa16[(*cpi)>>4];
*cpo++ = itoa16[*cpi&0xF];
++cpi;
}
*cpo = 0;
return Buf;
}
static char *source_32_hex(char *source, void *binary)
{
static char Buf[256];
unsigned char *cpi= (unsigned char*)(binary);
char *cpo = Buf;
unsigned int i;
cpo += sprintf(Buf, "%s", curdat.dynamic_WHICH_TYPE_SIG);
for (i = 0; i < 32; ++i) {
*cpo++ = itoa16[(*cpi)>>4];
*cpo++ = itoa16[*cpi&0xF];
++cpi;
}
*cpo = 0;
return Buf;
}
static char *source_40_hex(char *source, void *binary)
{
static char Buf[256];
unsigned char *cpi= (unsigned char*)(binary);
char *cpo = Buf;
unsigned int i;
cpo += sprintf(Buf, "%s", curdat.dynamic_WHICH_TYPE_SIG);
for (i = 0; i < 40; ++i) {
*cpo++ = itoa16[(*cpi)>>4];
*cpo++ = itoa16[*cpi&0xF];
++cpi;
}
*cpo = 0;
return Buf;
}
static char *source_48_hex(char *source, void *binary)
{
static char Buf[256];
unsigned char *cpi= (unsigned char*)(binary);
char *cpo = Buf;
unsigned int i;
cpo += sprintf(Buf, "%s", curdat.dynamic_WHICH_TYPE_SIG);
for (i = 0; i < 48; ++i) {
*cpo++ = itoa16[(*cpi)>>4];
*cpo++ = itoa16[*cpi&0xF];
++cpi;
}
*cpo = 0;
return Buf;
}
static char *source_64_hex(char *source, void *binary)
{
static char Buf[256];
unsigned char *cpi= (unsigned char*)(binary);
char *cpo = Buf;
unsigned int i;
cpo += sprintf(Buf, "%s", curdat.dynamic_WHICH_TYPE_SIG);
for (i = 0; i < 64; ++i) {
*cpo++ = itoa16[(*cpi)>>4];
*cpo++ = itoa16[*cpi&0xF];
++cpi;
}
*cpo = 0;
return Buf;
}
/*********************************************************************************
* Gets the binary value from a base-64 hash
*********************************************************************************/
static void * binary_b64m(char *ciphertext)
{
unsigned int i;
static unsigned char *b;
char *pos;
if (!b) b = mem_alloc_tiny(64+3, MEM_ALIGN_WORD);
pos = ciphertext;
if (!strncmp(pos, "$dynamic_", 9)) {
pos += 9;
while (*pos++ != '$')
;
}
i = base64_valid_length(pos, e_b64_mime, 0);
base64_convert(pos, e_b64_mime, i, b, e_b64_raw, 64+3, 0);
//printf("\nciphertext=%s\n", ciphertext);
//dump_stuff_msg("binary", b, 16);
return b;
}
static void * binary_b64(char *ciphertext)
{
unsigned int i;
static unsigned char *b;
char *pos;
if (!b) b = mem_alloc_tiny(64+3, MEM_ALIGN_WORD);
pos = ciphertext;
if (!strncmp(pos, "$dynamic_", 9)) {
pos += 9;
while (*pos++ != '$')
;
}
i = base64_valid_length(pos, e_b64_crypt, 0);
base64_convert(pos, e_b64_cryptBS, i, b, e_b64_raw, 64+3, 0);
//printf("\nciphertext=%s\n", ciphertext);
//dump_stuff_msg("binary", b, 16);
return b;
}
static void * binary_b64b(char *ciphertext)
{
unsigned int i;
static unsigned char *b;
char *pos;
if (!b) b = mem_alloc_tiny(64+3, MEM_ALIGN_WORD);
pos = ciphertext;
if (!strncmp(pos, "$dynamic_", 9)) {
pos += 9;
while (*pos++ != '$')
;
}
i = base64_valid_length(pos, e_b64_crypt, 0);
base64_convert(pos, e_b64_crypt, i, b, e_b64_raw, 64+3, 0);
//printf("\nciphertext=%s\n", ciphertext);
//dump_stuff_msg("binary", b, 16);
return b;
}
#define TO_BINARY(b1, b2, b3) \
value = \
(MD5_word)atoi64[ARCH_INDEX(pos[0])] | \
((MD5_word)atoi64[ARCH_INDEX(pos[1])] << 6) | \
((MD5_word)atoi64[ARCH_INDEX(pos[2])] << 12) | \
((MD5_word)atoi64[ARCH_INDEX(pos[3])] << 18); \
pos += 4; \
b[b1] = value >> 16; \
b[b2] = value >> 8; \
b[b3] = value;
static void * binary_b64a(char *ciphertext)
{
static unsigned char *b;
char *pos;
MD5_word value;
if (!b) b = mem_alloc_tiny(16, MEM_ALIGN_WORD);
pos = ciphertext;
if (!strncmp(pos, "$dynamic_", 9)) {
pos += 9;
while (*pos++ != '$')
;
}
TO_BINARY(0, 6, 12);
TO_BINARY(1, 7, 13);
TO_BINARY(2, 8, 14);
TO_BINARY(3, 9, 15);
TO_BINARY(4, 10, 5);
b[11] =
(MD5_word)atoi64[ARCH_INDEX(pos[0])] |
((MD5_word)atoi64[ARCH_INDEX(pos[1])] << 6);
MD5_swap((MD5_word*)b,(MD5_word*)b, 4);
return b;
}
/*********************************************************************************
* Gets the binary value from a base-64 hash (such as cisco PIX)
*********************************************************************************/
static void * binary_b64_4x6(char *ciphertext)
{
static ARCH_WORD_32 *b;
unsigned int i;
char *pos;
if (!b) b = mem_alloc_tiny(16, MEM_ALIGN_WORD);
pos = ciphertext;
if (!strncmp(pos, "$dynamic_", 9)) {
pos += 9;
while (*pos++ != '$')
;
}
for(i = 0; i < 4; i++) {
b[i] =
atoi64[ARCH_INDEX(pos[i*4 + 0])] +
(atoi64[ARCH_INDEX(pos[i*4 + 1])] << 6) +
(atoi64[ARCH_INDEX(pos[i*4 + 2])] << 12) +
(atoi64[ARCH_INDEX(pos[i*4 + 3])] << 18);
}
MD5_swap(b,b, 4);
return (void *)b;
}
/*********************************************************************************
* Here is the main mdg_generic fmt_main. NOTE in its default settings, it is
* ready to handle base-16 hashes.
*********************************************************************************/
static struct fmt_main fmt_Dynamic =
{
{
FORMAT_LABEL,
FORMAT_NAME,
#ifdef SIMD_COEF_32
ALGORITHM_NAME,
#else
ALGORITHM_NAME_X86,
#endif
BENCHMARK_COMMENT,
BENCHMARK_LENGTH,
0,
#ifdef SIMD_COEF_32
PLAINTEXT_LENGTH,
#else
PLAINTEXT_LENGTH_X86,
#endif
BINARY_SIZE,
BINARY_ALIGN,
SALT_SIZE,
SALT_ALIGN,
#ifdef SIMD_COEF_32
MIN_KEYS_PER_CRYPT,
MAX_KEYS_PER_CRYPT,
#else
MIN_KEYS_PER_CRYPT_X86,
MAX_KEYS_PER_CRYPT_X86,
#endif
#ifdef _OPENMP
FMT_OMP | FMT_OMP_BAD |
#endif
FMT_CASE | FMT_8_BIT,
{ NULL },
dynamic_tests
}, {
init,
done,
fmt_default_reset,
prepare,
valid,
split,
get_binary,
get_salt,
{ NULL },
fmt_default_source,
{
fmt_default_binary_hash_0,
fmt_default_binary_hash_1,
fmt_default_binary_hash_2,
fmt_default_binary_hash_3,
fmt_default_binary_hash_4,
fmt_default_binary_hash_5,
fmt_default_binary_hash_6
},
salt_hash,
salt_compare,
set_salt,
set_key,
get_key,
clear_keys,
crypt_all,
{
get_hash_0,
get_hash_1,
get_hash_2,
get_hash_3,
get_hash_4,
get_hash_5,
get_hash_6
},
cmp_all,
cmp_one,
cmp_exact
}
};
/**************************************************************
**************************************************************
**************************************************************
**************************************************************
* These are the md5 'primitive' functions that are used by
* the build-in expressions, and by the expression generator
* They load passwords, salts, user ids, do crypts, convert
* crypts into base-16, etc. They are pretty encompassing,
* and have been found to be able to do most anything with
* a standard 'base-16' md5 hash, salted or unsalted that
* fits a 'simple' php style expression.
**************************************************************
**************************************************************
**************************************************************
*************************************************************/
static void Dynamic_Load_itoa16_w2()
{
char buf[3];
unsigned int i;
for (i = 0; i < 256; ++i)
{
sprintf(buf, "%X%X", i>>4, i&0xF);
memcpy(&(itoa16_w2_u[i]), buf, 2);
sprintf(buf, "%x%x", i>>4, i&0xF);
memcpy(&(itoa16_w2_l[i]), buf, 2);
}
}
#ifdef SIMD_COEF_32
/**************************************************************
**************************************************************
* Here are some 'helpers' to our helpers, when it comes to
* loading data into the mmx/sse buffers. We have several
* of these common helper functions, and use them in 'most'
* of the helper primitives, instead of having the same
* code being inlined in each of them.
**************************************************************
*************************************************************/
static void __SSE_append_output_base16_to_input(ARCH_WORD_32 *IPBdw, unsigned char *CRY, unsigned int idx_mod)
{
// #3
// 5955K (core2, $dynamic_2$)
// 1565K (core2, $dynamic_1006$)
// 3381K (ath64, $dynamic_2$)
// 824.7k (ath64, $dynamic_1006$)
#undef inc
#define inc ((SIMD_COEF_32-1) * 2)
unsigned short *IPBw = (unsigned short*)IPBdw;
IPBw += (idx_mod<<1);
CRY += (idx_mod<<2);
*IPBw++ = itoa16_w2[*CRY++];
*IPBw++ = itoa16_w2[*CRY++];
IPBw += inc;
*IPBw++ = itoa16_w2[*CRY++];
*IPBw++ = itoa16_w2[*CRY++];
IPBw += inc;
CRY += (inc<<1);
*IPBw++ = itoa16_w2[*CRY++];
*IPBw++ = itoa16_w2[*CRY++];
IPBw += inc;
*IPBw++ = itoa16_w2[*CRY++];
*IPBw++ = itoa16_w2[*CRY++];
IPBw += inc;
CRY += (inc<<1);
*IPBw++ = itoa16_w2[*CRY++];
*IPBw++ = itoa16_w2[*CRY++];
IPBw += inc;
*IPBw++ = itoa16_w2[*CRY++];
*IPBw++ = itoa16_w2[*CRY++];
IPBw += inc;
CRY += (inc<<1);
*IPBw++ = itoa16_w2[*CRY++];
*IPBw++ = itoa16_w2[*CRY++];
IPBw += inc;
*IPBw++ = itoa16_w2[*CRY++];
*IPBw++ = itoa16_w2[*CRY++];
IPBw += inc;
*IPBw = 0x80;
#undef inc
}
static void __SSE_overwrite_output_base16_to_input(ARCH_WORD_32 *IPBdw, unsigned char *CRY, unsigned int idx_mod)
{
// #3
// 5955K (core2, $dynamic_2$)
// 1565K (core2, $dynamic_1006$)
// 3381K (ath64, $dynamic_2$)
// 824.7k (ath64, $dynamic_1006$)
#undef inc
#define inc ((SIMD_COEF_32-1) * 2)
unsigned short *IPBw = (unsigned short *)IPBdw;
IPBw += (idx_mod<<1);
CRY += (idx_mod<<2);
*IPBw++ = itoa16_w2[*CRY++];
*IPBw++ = itoa16_w2[*CRY++];
IPBw += inc;
*IPBw++ = itoa16_w2[*CRY++];
*IPBw++ = itoa16_w2[*CRY++];
IPBw += inc;
CRY += (inc<<1);
*IPBw++ = itoa16_w2[*CRY++];
*IPBw++ = itoa16_w2[*CRY++];
IPBw += inc;
*IPBw++ = itoa16_w2[*CRY++];
*IPBw++ = itoa16_w2[*CRY++];
IPBw += inc;
CRY += (inc<<1);
*IPBw++ = itoa16_w2[*CRY++];
*IPBw++ = itoa16_w2[*CRY++];
IPBw += inc;
*IPBw++ = itoa16_w2[*CRY++];
*IPBw++ = itoa16_w2[*CRY++];
IPBw += inc;
CRY += (inc<<1);
*IPBw++ = itoa16_w2[*CRY++];
*IPBw++ = itoa16_w2[*CRY++];
IPBw += inc;
*IPBw++ = itoa16_w2[*CRY++];
*IPBw++ = itoa16_w2[*CRY++];
IPBw += inc;
#undef inc
}
static void __SSE_append_output_base16_to_input_semi_aligned_2(unsigned int ip, ARCH_WORD_32 *IPBdw, unsigned char *CRY, unsigned int idx_mod)
{
// #1
// 9586k/4740k (core2, $dynamic_9$)
// 5113k/4382k (core2,$dynamic_10$)
// (ath64, $dynamic_9$)
// (ath64, $dynamic_10$)
# define inc SIMD_COEF_32
# define incCRY ((SIMD_COEF_32 - 1) * 4)
// Ok, here we are 1/2 off. We are starting in the 'middle' of a DWORD (and end
// in the middle of the last one).
// start our pointers out at the right 32 bit offset into the first MMX/SSE buffer
IPBdw += idx_mod;
IPBdw += (ip>>2)*SIMD_COEF_32;
CRY += (idx_mod<<2);
// first byte handled here.
*IPBdw &= 0xFFFF;
*IPBdw |= (((ARCH_WORD_32)(itoa16_w2[*CRY++]))<<16);
IPBdw += inc;
*IPBdw = (itoa16_w2[*CRY++]);
*IPBdw |= (((ARCH_WORD_32)(itoa16_w2[*CRY++]))<<16);
IPBdw += inc;
*IPBdw = (itoa16_w2[*CRY++]);
CRY += incCRY;
*IPBdw |= (((ARCH_WORD_32)(itoa16_w2[*CRY++]))<<16);
IPBdw += inc;
*IPBdw = (itoa16_w2[*CRY++]);
*IPBdw |= (((ARCH_WORD_32)(itoa16_w2[*CRY++]))<<16);
IPBdw += inc;
*IPBdw = (itoa16_w2[*CRY++]);
CRY += incCRY;
*IPBdw |= (((ARCH_WORD_32)(itoa16_w2[*CRY++]))<<16);
IPBdw += inc;
*IPBdw = (itoa16_w2[*CRY++]);
*IPBdw |= (((ARCH_WORD_32)(itoa16_w2[*CRY++]))<<16);
IPBdw += inc;
*IPBdw = (itoa16_w2[*CRY++]);
CRY += incCRY;
*IPBdw |= (((ARCH_WORD_32)(itoa16_w2[*CRY++]))<<16);
IPBdw += inc;
*IPBdw = (itoa16_w2[*CRY++]);
*IPBdw |= (((ARCH_WORD_32)(itoa16_w2[*CRY++]))<<16);
IPBdw += inc;
*IPBdw = (itoa16_w2[*CRY++]);
// Add the 0x80 at the proper location (offset 0x21)
*IPBdw |= 0x800000;
#undef inc
#undef incCRY
}
static void __SSE_append_output_base16_to_input_semi_aligned_0(unsigned int ip, ARCH_WORD_32 *IPBdw, unsigned char *CRY, unsigned int idx_mod)
{
// #2
// 6083k (core2, $dynamic_2$)
// 1590K (core2, $dynamic_1006$)
// 3537K (ath64, $dynamic_2$)
// 890.3K (ath64, $dynamic_1006$)
#undef inc
#define inc SIMD_COEF_32
#define incCRY (4*SIMD_COEF_32-2)
// start our pointers out at the right 32 bit offset into the first MMX/SSE buffer
IPBdw += idx_mod;
IPBdw += (ip>>2)*SIMD_COEF_32;
CRY += (idx_mod<<2);
*IPBdw = (((ARCH_WORD_32)(itoa16_w2[*(CRY+1)]))<<16)|(itoa16_w2[*CRY]);
IPBdw += inc;
CRY += 2;
*IPBdw = (((ARCH_WORD_32)(itoa16_w2[*(CRY+1)]))<<16)|(itoa16_w2[*CRY]);
IPBdw += inc;
// CRY += (inc*3)+2;
CRY += incCRY;
*IPBdw = (((ARCH_WORD_32)(itoa16_w2[*(CRY+1)]))<<16)|(itoa16_w2[*CRY]);
IPBdw += inc;
CRY += 2;
*IPBdw = (((ARCH_WORD_32)(itoa16_w2[*(CRY+1)]))<<16)|(itoa16_w2[*CRY]);
IPBdw += inc;
// CRY += (inc*3)+2;
CRY += incCRY;
*IPBdw = (((ARCH_WORD_32)(itoa16_w2[*(CRY+1)]))<<16)|(itoa16_w2[*CRY]);
IPBdw += inc;
CRY += 2;
*IPBdw = (((ARCH_WORD_32)(itoa16_w2[*(CRY+1)]))<<16)|(itoa16_w2[*CRY]);
IPBdw += inc;
// CRY += (inc*3)+2;
CRY += incCRY;
*IPBdw = (((ARCH_WORD_32)(itoa16_w2[*(CRY+1)]))<<16)|(itoa16_w2[*CRY]);
IPBdw += inc;
CRY += 2;
*IPBdw = (((ARCH_WORD_32)(itoa16_w2[*(CRY+1)]))<<16)|(itoa16_w2[*CRY]);
// Add the 0x80 at the proper location (offset 0x21)
IPBdw += inc;
*IPBdw = 0x80;
#undef inc
#undef incCRY
}
static void __SSE_append_string_to_input_unicode(unsigned char *IPB, unsigned int idx_mod, unsigned char *cp, unsigned int len, unsigned int bf_ptr, unsigned int bUpdate0x80)
{
unsigned char *cpO;
#if ARCH_LITTLE_ENDIAN
// if big-endian, we gain nothing from this function (since we would have to byte swap)
if (len>1&&!(bf_ptr&1))
{
unsigned int w32_cnt;
if(bf_ptr&2) {
cpO = &IPB[GETPOS(bf_ptr, idx_mod)];
bf_ptr += 2;
*cpO = *cp++;
cpO[1] = 0;
--len;
}
w32_cnt = len>>1;
if (w32_cnt)
{
ARCH_WORD_32 *wpO;
wpO = (ARCH_WORD_32*)&IPB[GETPOS(bf_ptr, idx_mod)];
len -= (w32_cnt<<1);
bf_ptr += (w32_cnt<<2);
do
{
ARCH_WORD_32 x = 0;
x = cp[1];
x <<= 16;
x += cp[0];
*wpO = x;
cp += 2;
wpO += SIMD_COEF_32;
}
while (--w32_cnt);
}
}
#endif
cpO = &IPB[GETPOS(bf_ptr, idx_mod)];
while (len--)
{
*cpO++ = *cp++;
if ( ((++bf_ptr)&3) == 0)
cpO += ((SIMD_COEF_32-1)*4);
*cpO++ = 0;
if ( ((++bf_ptr)&3) == 0)
cpO += ((SIMD_COEF_32-1)*4);
}
if (bUpdate0x80)
*cpO = 0x80;
}
static void __SSE_append_string_to_input(unsigned char *IPB, unsigned int idx_mod, unsigned char *cp, unsigned int len, unsigned int bf_ptr, unsigned int bUpdate0x80)
{
unsigned char *cpO;
// if our insertion point is on an 'even' DWORD, then we use DWORD * copying, as long as we can
// This provides quite a nice speedup.
#if ARCH_LITTLE_ENDIAN
// if big-endian, we gain nothing from this function (since we would have to byte swap)
if (len>3&&(bf_ptr&3)) {
cpO = &IPB[GETPOS(bf_ptr, idx_mod)];
while (len--)
{
*cpO++ = *cp++;
if ( ((++bf_ptr)&3) == 0) {
if (!len) {
if (bUpdate0x80)
*cpO = 0x80;
return;
}
break;
}
}
}
if (len>3&&!(bf_ptr&3))
{
unsigned int w32_cnt = len>>2;
if (w32_cnt)
{
ARCH_WORD_32 *wpO;
wpO = (ARCH_WORD_32*)&IPB[GETPOS(bf_ptr, idx_mod)];
len -= (w32_cnt<<2);
bf_ptr += (w32_cnt<<2);
do
{
*wpO = *((ARCH_WORD_32*)cp);
cp += 4;
wpO += SIMD_COEF_32;
}
while (--w32_cnt);
}
if (!len) {
if (bUpdate0x80)
IPB[GETPOS(bf_ptr, idx_mod)] = 0x80;
return;
}
}
#endif
cpO = &IPB[GETPOS(bf_ptr, idx_mod)];
while (len--)
{
*cpO++ = *cp++;
if ( ((++bf_ptr)&3) == 0)
cpO += ((SIMD_COEF_32-1)*4);
}
if (bUpdate0x80)
*cpO = 0x80;
}
#endif // #ifdef SIMD_COEF_32 from way above.
static inline void __append_string(DYNA_OMP_PARAMSm unsigned char *Str, unsigned int len)
{
unsigned int j;
unsigned int til;
#ifdef _OPENMP
til = last;
j = first;
#else
j = 0;
til = m_count;
#endif
#ifdef SIMD_COEF_32
if (dynamic_use_sse==1) {
if (!md5_unicode_convert_get(tid)) {
for (; j < til; ++j) {
unsigned int idx = j/SIMD_COEF_32;
unsigned int idx_mod = j&(SIMD_COEF_32-1);
unsigned int bf_ptr = total_len[idx][idx_mod];
total_len[idx][idx_mod] += len;
__SSE_append_string_to_input(input_buf[idx].c,idx_mod,Str,len,bf_ptr,1);
}
} else {
if (options.target_enc != ASCII && options.target_enc != ISO_8859_1) {
UTF16 utf16Str[27+1]; // 27 chars is 'max' that fits in SSE without overflow, so that is where we limit it at now
int outlen;
outlen = enc_to_utf16(utf16Str, 27, Str, len) * sizeof(UTF16);
if (outlen < 0)
outlen = strlen16(utf16Str) * sizeof(UTF16);
for (; j < til; ++j) {
unsigned int idx = j/SIMD_COEF_32;
unsigned int idx_mod = j&(SIMD_COEF_32-1);
unsigned int bf_ptr = total_len[idx][idx_mod];
total_len[idx][idx_mod] += outlen;
// note we use the 'non' unicode variant, since we have already computed the unicode, and length properly
__SSE_append_string_to_input(input_buf[idx].c,idx_mod,(unsigned char*)utf16Str,outlen,bf_ptr,1);
}
} else {
for (; j < til; ++j) {
unsigned int idx = j/SIMD_COEF_32;
unsigned int idx_mod = j&(SIMD_COEF_32-1);
unsigned int bf_ptr = total_len[idx][idx_mod];
total_len[idx][idx_mod] += len << 1;
__SSE_append_string_to_input_unicode(input_buf[idx].c,idx_mod,Str,len,bf_ptr,1);
}
}
}
return;
}
#endif
if (md5_unicode_convert_get(tid)) {
if (options.target_enc != ASCII && options.target_enc != ISO_8859_1) {
UTF16 utf16Str[EFFECTIVE_MAX_LENGTH / 3 + 1];
int outlen;
outlen = enc_to_utf16(utf16Str, EFFECTIVE_MAX_LENGTH / 3, Str, len) * sizeof(UTF16);
if (outlen < 0)
outlen = strlen16(utf16Str) * sizeof(UTF16);
for (; j < til; ++j) {
unsigned int z;
unsigned char *cp;
unsigned char *cpi = (unsigned char*)utf16Str;
#if MD5_X2
if (j&1)
cp = &(input_buf_X86[j>>MD5_X2].x2.B2[total_len_X86[j]]);
else
#endif
cp = &(input_buf_X86[j>>MD5_X2].x1.B[total_len_X86[j]]);
for (z = 0; z < outlen; ++z) {
*cp++ = *cpi++;
}
total_len_X86[j] += outlen;
}
} else {
for (; j < til; ++j) {
unsigned int z;
unsigned char *cp;
unsigned char *cpi = Str;
#if MD5_X2
if (j&1)
cp = &(input_buf_X86[j>>MD5_X2].x2.B2[total_len_X86[j]]);
else
#endif
cp = &(input_buf_X86[j>>MD5_X2].x1.B[total_len_X86[j]]);
for (z = 0; z < len; ++z) {
*cp++ = *cpi++;
*cp++ = 0;
}
total_len_X86[j] += (len<<1);
}
}
} else {
for (; j < til; ++j) {
#if MD5_X2
if (j&1)
memcpy(&(input_buf_X86[j>>MD5_X2].x2.b2[total_len_X86[j]]), Str, len);
else
#endif
memcpy(&(input_buf_X86[j>>MD5_X2].x1.b[total_len_X86[j]]), Str, len);
total_len_X86[j] += len;
}
}
}
static inline void __append2_string(DYNA_OMP_PARAMSm unsigned char *Str, unsigned int len)
{
unsigned int j;
unsigned int til;
#ifdef _OPENMP
til = last;
j = first;
#else
j = 0;
til = m_count;
#endif
#ifdef SIMD_COEF_32
if (dynamic_use_sse==1) {
if (!md5_unicode_convert_get(tid)) {
for (; j < til; ++j) {
unsigned int idx = j/SIMD_COEF_32;
unsigned int idx_mod = j&(SIMD_COEF_32-1);
unsigned int bf_ptr = total_len2[idx][idx_mod];
total_len2[idx][idx_mod] += len;
__SSE_append_string_to_input(input_buf2[idx].c,idx_mod,Str,len,bf_ptr,1);
}
} else {
if (options.target_enc != ASCII && options.target_enc != ISO_8859_1) {
UTF16 utf16Str[27+1]; // 27 chars is 'max' that fits in SSE without overflow, so that is where we limit it at now
int outlen;
outlen = enc_to_utf16(utf16Str, 27, Str, len) * sizeof(UTF16);
if (outlen < 0)
outlen = strlen16(utf16Str) * sizeof(UTF16);
for (; j < til; ++j) {
unsigned int idx = j/SIMD_COEF_32;
unsigned int idx_mod = j&(SIMD_COEF_32-1);
unsigned int bf_ptr = total_len2[idx][idx_mod];
total_len2[idx][idx_mod] += outlen;
// note we use the 'non' unicode variant of __SSE_append_string_to_input(), since it's already unicode, and length properly
__SSE_append_string_to_input(input_buf2[idx].c,idx_mod,(unsigned char*)utf16Str,outlen,bf_ptr,1);
}
} else {
for (; j < til; ++j) {
unsigned int idx = j/SIMD_COEF_32;
unsigned int idx_mod = j&(SIMD_COEF_32-1);
unsigned int bf_ptr = total_len2[idx][idx_mod];
total_len2[idx][idx_mod] += len << 1;
__SSE_append_string_to_input_unicode(input_buf2[idx].c,idx_mod,Str,len,bf_ptr,1);
}
}
}
return;
}
#endif
if (md5_unicode_convert_get(tid)) {
if (options.target_enc != ASCII && options.target_enc != ISO_8859_1) {
UTF16 utf16Str[EFFECTIVE_MAX_LENGTH / 3 + 1];
int outlen;
outlen = enc_to_utf16(utf16Str, EFFECTIVE_MAX_LENGTH / 3, Str, len) * sizeof(UTF16);
if (outlen < 0)
outlen = strlen16(utf16Str) * sizeof(UTF16);
for (; j < til; ++j) {
unsigned int z;
unsigned char *cp;
unsigned char *cpi = (unsigned char*)utf16Str;
#if MD5_X2
if (j&1)
cp = &(input_buf2_X86[j>>MD5_X2].x2.B2[total_len2_X86[j]]);
else
#endif
cp = &(input_buf2_X86[j>>MD5_X2].x1.B[total_len2_X86[j]]);
for (z = 0; z < outlen; ++z) {
*cp++ = *cpi++;
}
total_len2_X86[j] += outlen;
}
} else {
for (; j < til; ++j) {
unsigned int z;
unsigned char *cp;
unsigned char *cpi = Str;
#if MD5_X2
if (j&1)
cp = &(input_buf2_X86[j>>MD5_X2].x2.B2[total_len2_X86[j]]);
else
#endif
cp = &(input_buf2_X86[j>>MD5_X2].x1.B[total_len2_X86[j]]);
for (z = 0; z < len; ++z) {
*cp++ = *cpi++;
*cp++ = 0;
}
total_len2_X86[j] += (len<<1);
}
}
} else {
for (; j < til; ++j) {
#if MD5_X2
if (j&1)
memcpy(&(input_buf2_X86[j>>MD5_X2].x2.b2[total_len2_X86[j]]), Str, len);
else
#endif
memcpy(&(input_buf2_X86[j>>MD5_X2].x1.b[total_len2_X86[j]]), Str, len);
total_len2_X86[j] += len;
}
}
}
void DynamicFunc__setmode_unicode(DYNA_OMP_PARAMS) // DYNA_OMP_PARAMS not used. We use omp_thread_num() instead.
{
md5_unicode_convert_set(1,tid);
}
void DynamicFunc__setmode_normal (DYNA_OMP_PARAMS) // DYNA_OMP_PARAMS not used. We use omp_thread_num() instead.
{
md5_unicode_convert_set(0,tid);
}
/**************************************************************
* DYNAMIC primitive helper function
* Clears the input variable, and input 'lengths'
*************************************************************/
void DynamicFunc__clean_input(DYNA_OMP_PARAMS)
{
#ifndef _OPENMP
__nonMP_DynamicFunc__clean_input();
#else
unsigned int i=0;
#ifdef SIMD_COEF_32
if (dynamic_use_sse==1) {
unsigned int x = first / SIMD_COEF_32;
unsigned int y = (last+SIMD_COEF_32-1) / SIMD_COEF_32;
while (x < y) {
memset(input_buf[x].c, 0, sizeof(input_buf[0]));
memset(total_len[x], 0, SIMD_COEF_32 * sizeof(total_len[0][0]));
++x;
}
return;
}
#endif
for (i = first; i < last; ++i) {
#if MD5_X2
if (i&1)
memset(input_buf_X86[i>>MD5_X2].x2.b2, 0, COMPUTE_EX_LEN(total_len_X86[i]));
else
#endif
memset(input_buf_X86[i>>MD5_X2].x1.b, 0, COMPUTE_EX_LEN(total_len_X86[i]));
total_len_X86[i] = 0;
}
#endif
}
void DynamicFunc__clean_input2(DYNA_OMP_PARAMS)
{
#ifndef _OPENMP
__nonMP_DynamicFunc__clean_input2();
#else
unsigned int i=0;
#ifdef SIMD_COEF_32
if (dynamic_use_sse==1) {
unsigned int x = first / SIMD_COEF_32;
unsigned int y = (last+SIMD_COEF_32-1) / SIMD_COEF_32;
while (x < y) {
memset(input_buf2[x].c, 0, sizeof(input_buf2[0]));
memset(total_len2[x], 0, SIMD_COEF_32 * sizeof(total_len2[0][0]));
++x;
}
return;
}
#endif
for (i = first; i < last; ++i) {
#if MD5_X2
if (i&1)
memset(input_buf2_X86[i>>MD5_X2].x2.b2, 0, COMPUTE_EX_LEN(total_len2_X86[i]));
else
#endif
memset(input_buf2_X86[i>>MD5_X2].x1.b, 0, COMPUTE_EX_LEN(total_len2_X86[i]));
total_len2_X86[i] = 0;
}
#endif
}
void DynamicFunc__clean_input_full(DYNA_OMP_PARAMS)
{
#ifndef _OPENMP
__nonMP_DynamicFunc__clean_input_full();
#else
unsigned int i;
#ifdef SIMD_COEF_32
unsigned int x = first / SIMD_COEF_32;
unsigned int y = (last+SIMD_COEF_32-1) / SIMD_COEF_32;
while (x < y) {
memset(input_buf[x].c, 0, sizeof(input_buf[0]));
memset(total_len[x], 0, SIMD_COEF_32 * sizeof(total_len[0][0]));
++x;
}
#endif
for (i = first; i < last; ++i) {
#if MD5_X2
if (i&1)
memset(input_buf_X86[i>>MD5_X2].x2.b2, 0, COMPUTE_EX_LEN(total_len_X86[i]));
else
#endif
memset(input_buf_X86[i>>MD5_X2].x1.b, 0, COMPUTE_EX_LEN(total_len_X86[i]));
total_len_X86[i] = 0;
}
#endif
}
void DynamicFunc__clean_input2_full(DYNA_OMP_PARAMS)
{
#ifndef _OPENMP
__nonMP_DynamicFunc__clean_input2_full();
#else
unsigned int i;
#ifdef SIMD_COEF_32
unsigned int x = first / SIMD_COEF_32;
unsigned int y = (last+SIMD_COEF_32-1) / SIMD_COEF_32;
while (x < y) {
memset(input_buf2[x].c, 0, sizeof(input_buf2[0]));
memset(total_len2[x], 0, SIMD_COEF_32 * sizeof(total_len2[0][0]));
++x;
}
#endif
for (i = first; i < last; ++i) {
#if MD5_X2
if (i&1)
memset(input_buf2_X86[i>>MD5_X2].x2.b2, 0, COMPUTE_EX_LEN(total_len2_X86[i]));
else
#endif
memset(input_buf2_X86[i>>MD5_X2].x1.b, 0, COMPUTE_EX_LEN(total_len2_X86[i]));
total_len2_X86[i] = 0;
}
#endif
}
void DynamicFunc__clean_input_kwik(DYNA_OMP_PARAMS)
{
#ifndef _OPENMP
__nonMP_DynamicFunc__clean_input_kwik();
#else
#ifdef SIMD_COEF_32
unsigned int i;
if (dynamic_use_sse==1) {
unsigned int x = first / SIMD_COEF_32;
unsigned int y = (last+SIMD_COEF_32-1) / SIMD_COEF_32;
while (x < y)
memset(total_len[x++], 0, SIMD_COEF_32 * sizeof(total_len[0][0]));
return;
}
#else
unsigned int i;
#endif
for (i = first; i < last; ++i) {
#if !ARCH_LITTLE_ENDIAN
#if MD5_X2
if (i&1)
memset(input_buf_X86[i>>MD5_X2].x2.b2, 0, total_len_X86[i]+5);
else
#endif
memset(input_buf_X86[i>>MD5_X2].x1.b, 0, total_len_X86[i]+5);
#endif
total_len_X86[i] = 0;
}
#endif
}
void DynamicFunc__clean_input2_kwik(DYNA_OMP_PARAMS)
{
#ifndef _OPENMP
__nonMP_DynamicFunc__clean_input2_kwik();
#else
#ifdef SIMD_COEF_32
unsigned int i;
if (dynamic_use_sse==1) {
unsigned int x = first / SIMD_COEF_32;
unsigned int y = (last+SIMD_COEF_32-1) / SIMD_COEF_32;
while (x < y)
memset(total_len2[x++], 0, SIMD_COEF_32 * sizeof(total_len2[0][0]));
return;
}
#else
unsigned int i;
#endif
for (i = first; i < last; ++i) {
#if !ARCH_LITTLE_ENDIAN
#if MD5_X2
if (i&1)
memset(input_buf2_X86[i>>MD5_X2].x2.b2, 0, total_len2_X86[i]+5);
else
#endif
memset(input_buf2_X86[i>>MD5_X2].x1.b, 0, total_len2_X86[i]+5);
#endif
total_len2_X86[i] = 0;
}
#endif
}
/**************************************************************
* DYNAMIC primitive helper function
* Appends all keys to the end of the input variables, and
* updates lengths
*************************************************************/
void DynamicFunc__append_keys(DYNA_OMP_PARAMS)
{
unsigned int j;
unsigned int til;
#ifdef _OPENMP
til = last;
j = first;
#else
j = 0;
til = m_count;
#endif
#ifdef SIMD_COEF_32
if (dynamic_use_sse==1) {
for (; j < til; ++j) {
unsigned int idx = j/SIMD_COEF_32;
unsigned int idx_mod = j&(SIMD_COEF_32-1);
unsigned int bf_ptr = total_len[idx][idx_mod];
if (md5_unicode_convert_get(tid)) {
if (options.target_enc != ASCII && options.target_enc != ISO_8859_1) {
UTF16 utf16Str[27+1]; // 27 chars is 'max' that fits in SSE without overflow, so that is where we limit it at now
int outlen;
int maxlen=27;
if (curdat.pSetup->MaxInputLen < maxlen)
maxlen = curdat.pSetup->MaxInputLen;
outlen = enc_to_utf16(utf16Str, maxlen, (unsigned char*)saved_key[j], saved_key_len[j]) * sizeof(UTF16);
if (outlen <= 0) {
saved_key_len[j] = -outlen / sizeof(UTF16);
if (outlen < 0)
outlen = strlen16(utf16Str) * sizeof(UTF16);
}
total_len[idx][idx_mod] += outlen;
__SSE_append_string_to_input(input_buf[idx].c,idx_mod,(unsigned char*)utf16Str,outlen,bf_ptr,1);
} else {
total_len[idx][idx_mod] += (saved_key_len[j] << 1);
__SSE_append_string_to_input_unicode(input_buf[idx].c,idx_mod,(unsigned char*)saved_key[j],saved_key_len[j],bf_ptr,1);
}
} else {
total_len[idx][idx_mod] += saved_key_len[j];
__SSE_append_string_to_input(input_buf[idx].c,idx_mod,(unsigned char*)saved_key[j],saved_key_len[j],bf_ptr,1);
}
}
return;
}
#endif
if (md5_unicode_convert_get(tid)) {
if (options.target_enc != ASCII && options.target_enc != ISO_8859_1) {
for (; j < til; ++j) {
unsigned int z;
unsigned char *cp, *cpi;
UTF16 utf16Str[EFFECTIVE_MAX_LENGTH / 3 + 1];
int outlen;
outlen = enc_to_utf16(utf16Str, EFFECTIVE_MAX_LENGTH / 3, (unsigned char*)saved_key[j], saved_key_len[j]) * sizeof(UTF16);
if (outlen <= 0) {
saved_key_len[j] = -outlen / sizeof(UTF16);
if (outlen < 0)
outlen = strlen16(utf16Str) * sizeof(UTF16);
}
#if MD5_X2
if (j&1)
cp = &(input_buf_X86[j>>MD5_X2].x2.B2[total_len_X86[j]]);
else
#endif
cp = &(input_buf_X86[j>>MD5_X2].x1.B[total_len_X86[j]]);
for (cpi = (unsigned char*)utf16Str, z = 0; z < outlen; ++z)
*cp++ = *cpi++;
total_len_X86[j] += outlen;
}
} else {
for (; j < til; ++j) {
unsigned int z;
unsigned char *cp, *cpi = (unsigned char*)saved_key[j];
#if MD5_X2
if (j&1)
cp = &(input_buf_X86[j>>MD5_X2].x2.B2[total_len_X86[j]]);
else
#endif
cp = &(input_buf_X86[j>>MD5_X2].x1.B[total_len_X86[j]]);
for (z = 0; z < saved_key_len[j]; ++z) {
*cp++ = *cpi++;
*cp++ = 0;
}
total_len_X86[j] += (saved_key_len[j]<<1);
}
}
} else {
for (; j < til; ++j) {
#if MD5_X2
if (j&1)
memcpy(&(input_buf_X86[j>>MD5_X2].x2.b2[total_len_X86[j]]), saved_key[j], saved_key_len[j]);
else
#endif
memcpy(&(input_buf_X86[j>>MD5_X2].x1.b[total_len_X86[j]]), saved_key[j], saved_key_len[j]);
total_len_X86[j] += saved_key_len[j];
}
}
}
// DynamicFunc__append_keys_pad16
// append the array of keys to the array input1[], padding with nulls to 16 bytes, if input shorter.
// Needed for net-md5 and net-sha1 formats.
void DynamicFunc__append_keys_pad16(DYNA_OMP_PARAMS)
{
unsigned int j;
unsigned int til;
#ifdef _OPENMP
til = last;
j = first;
#else
j = 0;
til = m_count;
#endif
#ifdef SIMD_COEF_32
if (dynamic_use_sse==1) {
for (; j < til; ++j) {
unsigned int idx = j/SIMD_COEF_32;
unsigned int idx_mod = j&(SIMD_COEF_32-1);
unsigned int bf_ptr = total_len[idx][idx_mod];
saved_key[j][saved_key_len[j]] = 0; // so strncpy 'works'
if (saved_key_len[j] < 16) {
char buf[24];
strncpy(buf, saved_key[j], 18);
total_len[idx][idx_mod] += 16;
__SSE_append_string_to_input(input_buf[idx].c,idx_mod,(unsigned char*)buf,16,bf_ptr,1);
} else {
total_len[idx][idx_mod] += saved_key_len[j];
__SSE_append_string_to_input(input_buf[idx].c,idx_mod,(unsigned char*)saved_key[j],saved_key_len[j],bf_ptr,1);
}
}
return;
}
#endif
for (; j < til; ++j) {
saved_key[j][saved_key_len[j]] = 0; // so strncpy 'works'
#if MD5_X2
if (j&1)
strncpy(&(input_buf_X86[j>>MD5_X2].x2.b2[total_len_X86[j]]), saved_key[j], 17);
else
#endif
strncpy(&(input_buf_X86[j>>MD5_X2].x1.b[total_len_X86[j]]), saved_key[j], 17);
total_len_X86[j] += 16;
}
}
void DynamicFunc__append_keys_pad20(DYNA_OMP_PARAMS)
{
unsigned int j;
unsigned int til;
#ifdef _OPENMP
til = last;
j = first;
#else
j = 0;
til = m_count;
#endif
#ifdef SIMD_COEF_32
if (dynamic_use_sse==1) {
for (; j < til; ++j) {
unsigned int idx = j/SIMD_COEF_32;
unsigned int idx_mod = j&(SIMD_COEF_32-1);
unsigned int bf_ptr = total_len[idx][idx_mod];
saved_key[j][saved_key_len[j]] = 0; // so strncpy 'works'
if (saved_key_len[j] < 20) {
char buf[28];
strncpy(buf, saved_key[j], 22);
total_len[idx][idx_mod] += 20;
__SSE_append_string_to_input(input_buf[idx].c,idx_mod,(unsigned char*)buf,20,bf_ptr,1);
} else {
total_len[idx][idx_mod] += saved_key_len[j];
__SSE_append_string_to_input(input_buf[idx].c,idx_mod,(unsigned char*)saved_key[j],saved_key_len[j],bf_ptr,1);
}
}
return;
}
#endif
for (; j < til; ++j) {
saved_key[j][saved_key_len[j]] = 0; // so strncpy 'works'
#if MD5_X2
if (j&1)
strncpy(&(input_buf_X86[j>>MD5_X2].x2.b2[total_len_X86[j]]), saved_key[j], 21);
else
#endif
strncpy(&(input_buf_X86[j>>MD5_X2].x1.b[total_len_X86[j]]), saved_key[j], 21);
total_len_X86[j] += 20;
}
}
/**************************************************************
* DYNAMIC primitive helper function
* Appends all keys to the end of the 2nd input variables, and
* updates lengths
*************************************************************/
void DynamicFunc__append_keys2(DYNA_OMP_PARAMS)
{
unsigned int j, til;
#ifdef _OPENMP
til = last;
j = first;
#else
j = 0;
til = m_count;
#endif
#ifdef SIMD_COEF_32
if (dynamic_use_sse==1) {
for (; j < til; ++j) {
unsigned int idx = j/SIMD_COEF_32;
unsigned int idx_mod = j&(SIMD_COEF_32-1);
unsigned int bf_ptr = total_len2[idx][idx_mod];
if (md5_unicode_convert_get(tid)) {
if (options.target_enc != ASCII && options.target_enc != ISO_8859_1) {
UTF16 utf16Str[27+1]; // 27 chars is 'max' that fits in SSE without overflow, so that is where we limit it at now
int outlen;
int maxlen=27;
if (curdat.pSetup->MaxInputLen < maxlen)
maxlen = curdat.pSetup->MaxInputLen;
outlen = enc_to_utf16(utf16Str, maxlen, (unsigned char*)saved_key[j], saved_key_len[j]) * sizeof(UTF16);
if (outlen <= 0) {
saved_key_len[j] = -outlen / sizeof(UTF16);
if (outlen < 0)
outlen = strlen16(utf16Str) * sizeof(UTF16);
}
total_len2[idx][idx_mod] += outlen;
__SSE_append_string_to_input(input_buf2[idx].c,idx_mod,(unsigned char*)utf16Str,outlen,bf_ptr,1);
} else {
total_len2[idx][idx_mod] += (saved_key_len[j] << 1);
__SSE_append_string_to_input_unicode(input_buf2[idx].c,idx_mod,(unsigned char*)saved_key[j],saved_key_len[j],bf_ptr,1);
}
} else {
total_len2[idx][idx_mod] += saved_key_len[j];
__SSE_append_string_to_input(input_buf2[idx].c,idx_mod,(unsigned char*)saved_key[j],saved_key_len[j],bf_ptr,1);
}
}
return;
}
#endif
if (md5_unicode_convert_get(tid)) {
if (options.target_enc != ASCII && options.target_enc != ISO_8859_1) {
for (; j < til; ++j) {
unsigned int z;
unsigned char *cp, *cpi;
UTF16 utf16Str[EFFECTIVE_MAX_LENGTH / 3 + 1];
int outlen;
outlen = enc_to_utf16(utf16Str, EFFECTIVE_MAX_LENGTH / 3, (unsigned char*)saved_key[j], saved_key_len[j]) * sizeof(UTF16);
if (outlen <= 0) {
saved_key_len[j] = -outlen / sizeof(UTF16);
if (outlen < 0)
outlen = strlen16(utf16Str) * sizeof(UTF16);
}
#if MD5_X2
if (j&1)
cp = &(input_buf2_X86[j>>MD5_X2].x2.B2[total_len2_X86[j]]);
else
#endif
cp = &(input_buf2_X86[j>>MD5_X2].x1.B[total_len2_X86[j]]);
for (cpi = (unsigned char*)utf16Str, z = 0; z < outlen; ++z)
*cp++ = *cpi++;
total_len2_X86[j] += outlen;
}
} else {
for (; j < til; ++j) {
unsigned int z;
unsigned char *cp, *cpi = (unsigned char*)saved_key[j];
#if MD5_X2
if (j&1)
cp = &(input_buf2_X86[j>>MD5_X2].x2.B2[total_len2_X86[j]]);
else
#endif
cp = &(input_buf2_X86[j>>MD5_X2].x1.B[total_len2_X86[j]]);
for (z = 0; z < saved_key_len[j]; ++z) {
*cp++ = *cpi++;
*cp++ = 0;
}
total_len2_X86[j] += (saved_key_len[j]<<1);
}
}
} else {
for (; j < til; ++j) {
#if MD5_X2
if (j&1)
memcpy(&(input_buf2_X86[j>>MD5_X2].x2.b2[total_len2_X86[j]]), saved_key[j], saved_key_len[j]);
else
#endif
memcpy(&(input_buf2_X86[j>>MD5_X2].x1.b[total_len2_X86[j]]), saved_key[j], saved_key_len[j]);
total_len2_X86[j] += saved_key_len[j];
}
}
}
void DynamicFunc__set_input_len_16(DYNA_OMP_PARAMS)
{
unsigned int j, til;
#ifdef _OPENMP
til = last;
j = first;
#else
j = 0;
til = m_count;
#endif
#ifdef SIMD_COEF_32
if (dynamic_use_sse==1) {
unsigned int k;
j /= SIMD_COEF_32;
til = (til+SIMD_COEF_32-1)/SIMD_COEF_32;
for (; j < til; ++j)
{
// If length is < 16, then remove existing end of buffer marker, and then set
// one at offset 16
for (k = 0; k < SIMD_COEF_32; ++k) {
unsigned int this_item_len = total_len[j][k];
if (this_item_len < 16)
input_buf[j].c[GETPOS(this_item_len, k&(SIMD_COEF_32-1))] = 0x00;
input_buf[j].c[GETPOS(16, k&(SIMD_COEF_32-1))] = 0x80;
total_len[j][k] = 16;
}
}
return;
}
#endif
for (; j < til; ++j)
{
// TODO: this code MAY need buffer cleaned up if we are using md5_go code!!!
#if MD5_X2
if (j&1) {
while (total_len_X86[j] < 16)
input_buf_X86[j>>MD5_X2].x2.b2[total_len_X86[j]++] = 0;
}
else
#endif
{while (total_len_X86[j] < 16)
input_buf_X86[j>>MD5_X2].x1.b[total_len_X86[j]++] = 0;}
total_len_X86[j] = 16;
}
}
void DynamicFunc__set_input2_len_16(DYNA_OMP_PARAMS)
{
unsigned int j, til;
#ifdef _OPENMP
til = last;
j = first;
#else
j = 0;
til = m_count;
#endif
#ifdef SIMD_COEF_32
if (dynamic_use_sse==1) {
unsigned int k;
j /= SIMD_COEF_32;
til = (til+SIMD_COEF_32-1)/SIMD_COEF_32;
for (; j < til; ++j)
{
// If length is < 16, then remove existing end of buffer marker, and then set
// one at offset 16
for (k = 0; k < SIMD_COEF_32; ++k) {
unsigned int this_item_len = total_len2[j][k];
if (this_item_len < 16)
input_buf2[j].c[GETPOS(this_item_len, k&(SIMD_COEF_32-1))] = 0x00;
input_buf2[j].c[GETPOS(16, k&(SIMD_COEF_32-1))] = 0x80;
total_len2[j][k] = 16;
}
}
return;
}
#endif
for (; j < til; ++j)
{
// TODO: this code MAY need buffer cleaned up if we are using md5_go code!!!
#if MD5_X2
if (j&1) {
while (total_len2_X86[j] < 16)
input_buf2_X86[j>>MD5_X2].x2.b2[total_len2_X86[j]++] = 0;
}
else
#endif
{while (total_len2_X86[j] < 16)
input_buf2_X86[j>>MD5_X2].x1.b[total_len2_X86[j]++] = 0;}
total_len2_X86[j] = 16;
}
}
void DynamicFunc__set_input_len_20(DYNA_OMP_PARAMS)
{
unsigned int j, til;
#ifdef _OPENMP
til = last;
j = first;
#else
j = 0;
til = m_count;
#endif
#ifdef SIMD_COEF_32
if (dynamic_use_sse==1) {
unsigned int k;
j /= SIMD_COEF_32;
til = (til+SIMD_COEF_32-1)/SIMD_COEF_32;
for (; j < til; ++j)
{
// If length is < 20, then remove existing end of buffer marker, and then set
// one at offset 20
for (k = 0; k < SIMD_COEF_32; ++k) {
unsigned int this_item_len = total_len[j][k];
if (this_item_len < 20)
input_buf[j].c[GETPOS(this_item_len, k&(SIMD_COEF_32-1))] = 0x00;
input_buf[j].c[GETPOS(20, k&(SIMD_COEF_32-1))] = 0x80;
total_len[j][k] = 20;
}
}
return;
}
#endif
for (; j < til; ++j)
{
#if MD5_X2
if (j&1) {
while (total_len_X86[j] < 20)
input_buf_X86[j>>MD5_X2].x2.b2[total_len_X86[j]++] = 0;
}
else
#endif
{while (total_len_X86[j] < 20)
input_buf_X86[j>>MD5_X2].x1.b[total_len_X86[j]++] = 0;}
total_len_X86[j] = 20;
}
}
void DynamicFunc__set_input2_len_20(DYNA_OMP_PARAMS)
{
unsigned int j, til;
#ifdef _OPENMP
til = last;
j = first;
#else
j = 0;
til = m_count;
#endif
#ifdef SIMD_COEF_32
if (dynamic_use_sse==1) {
unsigned int k;
j /= SIMD_COEF_32;
til = (til+SIMD_COEF_32-1)/SIMD_COEF_32;
for (; j < til; ++j)
{
// If length is < 20, then remove existing end of buffer marker, and then set
// one at offset 20
for (k = 0; k < SIMD_COEF_32; ++k) {
unsigned int this_item_len = total_len2[j][k];
if (this_item_len < 20)
input_buf2[j].c[GETPOS(this_item_len, k&(SIMD_COEF_32-1))] = 0x00;
input_buf2[j].c[GETPOS(20, k&(SIMD_COEF_32-1))] = 0x80;
total_len2[j][k] = 20;
}
}
return;
}
#endif
for (; j < til; ++j)
{
#if MD5_X2
if (j&1) {
while (total_len2_X86[j] < 20)
input_buf2_X86[j>>MD5_X2].x2.b2[total_len2_X86[j]++] = 0;
}
else
#endif
{while (total_len2_X86[j] < 20)
input_buf2_X86[j>>MD5_X2].x1.b[total_len2_X86[j]++] = 0;}
total_len2_X86[j] = 20;
}
}
void DynamicFunc__set_input_len_32(DYNA_OMP_PARAMS)
{
unsigned int j, til;
#ifdef _OPENMP
til = last;
j = first;
#else
j = 0;
til = m_count;
#endif
for (; j < til; ++j)
total_len_X86[j] = 32;
}
void DynamicFunc__set_input_len_32_cleartop(DYNA_OMP_PARAMS)
{
unsigned int j, til;
#ifdef _OPENMP
til = last;
j = first;
#else
j = 0;
til = m_count;
#endif
#ifdef SIMD_COEF_32
if (dynamic_use_sse==1) {
j /= SIMD_COEF_32;
til = (til+SIMD_COEF_32-1)/SIMD_COEF_32;
for (; j < til; ++j)
{
unsigned int k;
for (k = 0; k < SIMD_COEF_32; ++k) {
input_buf[j].c[GETPOS(32, k&(SIMD_COEF_32-1))] = 0x80;
total_len[j][k] = 32;
}
}
return;
}
#endif
for (; j < til; ++j) {
total_len_X86[j] = 32;
#if !ARCH_LITTLE_ENDIAN
#if MD5_X2
if (j&1) {
//MD5_swap(input_buf_X86[j>>MD5_X2].x2.w2, input_buf2_X86[j>>MD5_X2].x2.w2, 8);
memset(&(input_buf_X86[j>>MD5_X2].x2.B2[32]), 0, 24);
}
else
#endif
{
//MD5_swap(input_buf_X86[j>>MD5_X2].x1.w, input_buf2_X86[j>>MD5_X2].x1.w, 8);
memset(&(input_buf_X86[j>>MD5_X2].x1.B[32]), 0, 24);
}
#endif
}
}
void DynamicFunc__set_input2_len_32(DYNA_OMP_PARAMS)
{
unsigned int j, til;
#ifdef _OPENMP
til = last;
j = first;
#else
j = 0;
til = m_count;
#endif
for (; j < til; ++j)
total_len2_X86[j] = 32;
}
void DynamicFunc__set_input2_len_32_cleartop(DYNA_OMP_PARAMS)
{
unsigned int j, til;
#ifdef _OPENMP
til = last;
j = first;
#else
j = 0;
til = m_count;
#endif
#ifdef SIMD_COEF_32
if (dynamic_use_sse==1) {
j /= SIMD_COEF_32;
til = (til+SIMD_COEF_32-1)/SIMD_COEF_32;
for (; j < til; ++j)
{
unsigned int k;
for (k = 0; k < SIMD_COEF_32; ++k) {
input_buf2[j].c[GETPOS(32, k&(SIMD_COEF_32-1))] = 0x80;
total_len2[j][k] = 32;
}
}
return;
}
#endif
for (; j < til; ++j)
{
total_len2_X86[j] = 32;
#if !ARCH_LITTLE_ENDIAN
#if MD5_X2
if (j&1) {
//MD5_swap(input_buf2_X86[j>>MD5_X2].x2.w2, input_buf2_X86[j>>MD5_X2].x2.w2, 8);
memset(&(input_buf2_X86[j>>MD5_X2].x2.B2[32]), 0, 24);
}
else
#endif
{
//MD5_swap(input_buf2_X86[j>>MD5_X2].x1.w, input_buf2_X86[j>>MD5_X2].x1.w, 8);
memset(&(input_buf2_X86[j>>MD5_X2].x1.B[32]), 0, 24);
}
#endif
}
}
void DynamicFunc__set_input_len_40(DYNA_OMP_PARAMS)
{
unsigned int j, til;
#ifdef _OPENMP
til = last;
j = first;
#else
j = 0;
til = m_count;
#endif
for (; j < til; ++j)
total_len_X86[j] = 40;
}
void DynamicFunc__set_input2_len_40(DYNA_OMP_PARAMS)
{
unsigned int j, til;
#ifdef _OPENMP
til = last;
j = first;
#else
j = 0;
til = m_count;
#endif
for (; j < til; ++j)
total_len2_X86[j] = 40;
}
void DynamicFunc__set_input2_len_40_cleartop(DYNA_OMP_PARAMS)
{
unsigned int j, til;
#ifdef _OPENMP
til = last;
j = first;
#else
j = 0;
til = m_count;
#endif
#ifdef SIMD_COEF_32
if (dynamic_use_sse==1) {
j /= SIMD_COEF_32;
til = (til+SIMD_COEF_32-1)/SIMD_COEF_32;
for (; j < til; ++j)
{
unsigned int k;
for (k = 0; k < SIMD_COEF_32; ++k) {
input_buf2[j].c[GETPOS(40, k&(SIMD_COEF_32-1))] = 0x80;
total_len2[j][k] = 40;
}
}
return;
}
#endif
for (; j < til; ++j)
{
total_len2_X86[j] = 40;
#if !ARCH_LITTLE_ENDIAN
#if MD5_X2
if (j&1) {
memset(&(input_buf2_X86[j>>MD5_X2].x2.B2[40]), 0, 16);
}
else
#endif
{
memset(&(input_buf2_X86[j>>MD5_X2].x1.B[40]), 0, 16);
}
#endif
}
}
void DynamicFunc__set_input_len_64(DYNA_OMP_PARAMS)
{
unsigned int j, til;
#ifdef _OPENMP
til = last;
j = first;
#else
j = 0;
til = m_count;
#endif
#ifdef SIMD_COEF_32
if (dynamic_use_sse == 1)
error_msg("Error, in your DYNAMIC script.\nIt is NOT valid to call DynamicFunc__set_input_len_64 in SSE2/MMX mode\n");
#endif
for (; j < til; ++j)
total_len_X86[j] = 64;
}
void DynamicFunc__set_input2_len_64(DYNA_OMP_PARAMS)
{
unsigned int j, til;
#ifdef _OPENMP
til = last;
j = first;
#else
j = 0;
til = m_count;
#endif
#ifdef SIMD_COEF_32
if (dynamic_use_sse == 1)
error_msg("Error, in your DYNAMIC script.\nIt is NOT valid to call DynamicFunc__set_input2_len_64 in SSE2/MMX mode\n");
#endif
for (; j < til; ++j)
total_len2_X86[j] = 64;
}
void DynamicFunc__set_input_len_100(DYNA_OMP_PARAMS)
{
unsigned int j, til;
#ifdef _OPENMP
til = last;
j = first;
#else
j = 0;
til = m_count;
#endif
#ifdef SIMD_COEF_32
if (dynamic_use_sse == 1)
error_msg("Error, in your DYNAMIC script.\nIt is NOT valid to call DynamicFunc__set_input_len_100 in SSE2/MMX mode\n");
#endif
for (; j < til; ++j) {
unsigned char *cp;
#if MD5_X2
if (j&1)
cp = &(input_buf_X86[j>>MD5_X2].x2.B2[total_len_X86[j]]);
else
#endif
cp = &(input_buf_X86[j>>MD5_X2].x1.B[total_len_X86[j]]);
while (*cp)
*cp++ = 0;
total_len_X86[j] = 100;
}
}
void DynamicFunc__set_input_len_24(DYNA_OMP_PARAMS)
{
unsigned int j, til;
#ifdef _OPENMP
til = last;
j = first;
#else
j = 0;
til = m_count;
#endif
#ifdef SIMD_COEF_32
if (dynamic_use_sse == 1)
error_msg("Error, in your DYNAMIC script.\nIt is NOT valid to call DynamicFunc__set_input_len_24 in SSE2/MMX mode\n");
#endif
for (; j < til; ++j)
total_len_X86[j] = 24;
}
void DynamicFunc__set_input_len_28(DYNA_OMP_PARAMS)
{
unsigned int j, til;
#ifdef _OPENMP
til = last;
j = first;
#else
j = 0;
til = m_count;
#endif
#ifdef SIMD_COEF_32
if (dynamic_use_sse == 1)
error_msg("Error, in your DYNAMIC script.\nIt is NOT valid to call DynamicFunc__set_input_len_28 in SSE2/MMX mode\n");
#endif
for (; j < til; ++j)
total_len_X86[j] = 28;
}
void DynamicFunc__set_input_len_48(DYNA_OMP_PARAMS)
{
unsigned int j, til;
#ifdef _OPENMP
til = last;
j = first;
#else
j = 0;
til = m_count;
#endif
#ifdef SIMD_COEF_32
if (dynamic_use_sse == 1)
error_msg("Error, in your DYNAMIC script.\nIt is NOT valid to call DynamicFunc__set_input_len_48 in SSE2/MMX mode\n");
#endif
for (; j < til; ++j)
total_len_X86[j] = 48;
}
void DynamicFunc__set_input_len_56(DYNA_OMP_PARAMS)
{
unsigned int j, til;
#ifdef _OPENMP
til = last;
j = first;
#else
j = 0;
til = m_count;
#endif
#ifdef SIMD_COEF_32
if (dynamic_use_sse == 1)
error_msg("Error, in your DYNAMIC script.\nIt is NOT valid to call DynamicFunc__set_input_len_56 in SSE2/MMX mode\n");
#endif
for (; j < til; ++j)
total_len_X86[j] = 56;
}
void DynamicFunc__set_input_len_80(DYNA_OMP_PARAMS)
{
unsigned int j, til;
#ifdef _OPENMP
til = last;
j = first;
#else
j = 0;
til = m_count;
#endif
#ifdef SIMD_COEF_32
if (dynamic_use_sse == 1)
error_msg("Error, in your DYNAMIC script.\nIt is NOT valid to call DynamicFunc__set_input_len_80 in SSE2/MMX mode\n");
#endif
for (; j < til; ++j)
total_len_X86[j] = 80;
}
void DynamicFunc__set_input_len_96(DYNA_OMP_PARAMS)
{
unsigned int j, til;
#ifdef _OPENMP
til = last;
j = first;
#else
j = 0;
til = m_count;
#endif
#ifdef SIMD_COEF_32
if (dynamic_use_sse == 1)
error_msg("Error, in your DYNAMIC script.\nIt is NOT valid to call DynamicFunc__set_input_len_96 in SSE2/MMX mode\n");
#endif
for (; j < til; ++j)
total_len_X86[j] = 96;
}
void DynamicFunc__set_input_len_112(DYNA_OMP_PARAMS)
{
unsigned int j, til;
#ifdef _OPENMP
til = last;
j = first;
#else
j = 0;
til = m_count;
#endif
#ifdef SIMD_COEF_32
if (dynamic_use_sse == 1)
error_msg("Error, in your DYNAMIC script.\nIt is NOT valid to call DynamicFunc__set_input_len_112 in SSE2/MMX mode\n");
#endif
for (; j < til; ++j)
total_len_X86[j] = 112;
}
void DynamicFunc__set_input_len_128(DYNA_OMP_PARAMS)
{
unsigned int j, til;
#ifdef _OPENMP
til = last;
j = first;
#else
j = 0;
til = m_count;
#endif
#ifdef SIMD_COEF_32
if (dynamic_use_sse == 1)
error_msg("Error, in your DYNAMIC script.\nIt is NOT valid to call DynamicFunc__set_input_len_128 in SSE2/MMX mode\n");
#endif
for (; j < til; ++j)
total_len_X86[j] = 128;
}
void DynamicFunc__set_input_len_160(DYNA_OMP_PARAMS)
{
unsigned int j, til;
#ifdef _OPENMP
til = last;
j = first;
#else
j = 0;
til = m_count;
#endif
#ifdef SIMD_COEF_32
if (dynamic_use_sse == 1)
error_msg("Error, in your DYNAMIC script.\nIt is NOT valid to call DynamicFunc__set_input_len_160 in SSE2/MMX mode\n");
#endif
for (; j < til; ++j)
total_len_X86[j] = 160;
}
void DynamicFunc__set_input_len_192(DYNA_OMP_PARAMS)
{
unsigned int j, til;
#ifdef _OPENMP
til = last;
j = first;
#else
j = 0;
til = m_count;
#endif
#ifdef SIMD_COEF_32
if (dynamic_use_sse == 1)
error_msg("Error, in your DYNAMIC script.\nIt is NOT valid to call DynamicFunc__set_input_len_192 in SSE2/MMX mode\n");
#endif
for (; j < til; ++j)
total_len_X86[j] = 192;
}
void DynamicFunc__set_input_len_256(DYNA_OMP_PARAMS)
{
unsigned int j, til;
#ifdef _OPENMP
til = last;
j = first;
#else
j = 0;
til = m_count;
#endif
#ifdef SIMD_COEF_32
if (dynamic_use_sse == 1)
error_msg("Error, in your DYNAMIC script.\nIt is NOT valid to call DynamicFunc__set_input_len_256 in SSE2/MMX mode\n");
#endif
for (; j < til; ++j)
total_len_X86[j] = 256;
}
void DynamicFunc__set_input2_len_24(DYNA_OMP_PARAMS)
{
unsigned int j, til;
#ifdef _OPENMP
til = last;
j = first;
#else
j = 0;
til = m_count;
#endif
#ifdef SIMD_COEF_32
if (dynamic_use_sse == 1)
error_msg("Error, in your DYNAMIC script.\nIt is NOT valid to call DynamicFunc__set_input2_len_24 in SSE2/MMX mode\n");
#endif
for (; j < til; ++j)
total_len2_X86[j] = 24;
}
void DynamicFunc__set_input2_len_28(DYNA_OMP_PARAMS)
{
unsigned int j, til;
#ifdef _OPENMP
til = last;
j = first;
#else
j = 0;
til = m_count;
#endif
#ifdef SIMD_COEF_32
if (dynamic_use_sse == 1)
error_msg("Error, in your DYNAMIC script.\nIt is NOT valid to call DynamicFunc__set_input2_len_28 in SSE2/MMX mode\n");
#endif
for (; j < til; ++j)
total_len2_X86[j] = 28;
}
void DynamicFunc__set_input2_len_48(DYNA_OMP_PARAMS)
{
unsigned int j, til;
#ifdef _OPENMP
til = last;
j = first;
#else
j = 0;
til = m_count;
#endif
#ifdef SIMD_COEF_32
if (dynamic_use_sse == 1)
error_msg("Error, in your DYNAMIC script.\nIt is NOT valid to call DynamicFunc__set_input2_len_48 in SSE2/MMX mode\n");
#endif
for (; j < til; ++j)
total_len2_X86[j] = 48;
}
void DynamicFunc__set_input2_len_56(DYNA_OMP_PARAMS)
{
unsigned int j, til;
#ifdef _OPENMP
til = last;
j = first;
#else
j = 0;
til = m_count;
#endif
#ifdef SIMD_COEF_32
if (dynamic_use_sse == 1)
error_msg("Error, in your DYNAMIC script.\nIt is NOT valid to call DynamicFunc__set_input2_len_56 in SSE2/MMX mode\n");
#endif
for (; j < til; ++j)
total_len2_X86[j] = 56;
}
void DynamicFunc__set_input2_len_80(DYNA_OMP_PARAMS)
{
unsigned int j, til;
#ifdef _OPENMP
til = last;
j = first;
#else
j = 0;
til = m_count;
#endif
#ifdef SIMD_COEF_32
if (dynamic_use_sse == 1)
error_msg("Error, in your DYNAMIC script.\nIt is NOT valid to call DynamicFunc__set_input2_len_80 in SSE2/MMX mode\n");
#endif
for (; j < til; ++j)
total_len2_X86[j] = 80;
}
void DynamicFunc__set_input2_len_96(DYNA_OMP_PARAMS)
{
unsigned int j, til;
#ifdef _OPENMP
til = last;
j = first;
#else
j = 0;
til = m_count;
#endif
#ifdef SIMD_COEF_32
if (dynamic_use_sse == 1)
error_msg("Error, in your DYNAMIC script.\nIt is NOT valid to call DynamicFunc__set_input2_len_96 in SSE2/MMX mode\n");
#endif
for (; j < til; ++j)
total_len2_X86[j] = 96;
}
void DynamicFunc__set_input2_len_112(DYNA_OMP_PARAMS)
{
unsigned int j, til;
#ifdef _OPENMP
til = last;
j = first;
#else
j = 0;
til = m_count;
#endif
#ifdef SIMD_COEF_32
if (dynamic_use_sse == 1)
error_msg("Error, in your DYNAMIC script.\nIt is NOT valid to call DynamicFunc__set_input2_len_112 in SSE2/MMX mode\n");
#endif
for (; j < til; ++j)
total_len2_X86[j] = 112;
}
void DynamicFunc__set_input2_len_128(DYNA_OMP_PARAMS)
{
unsigned int j, til;
#ifdef _OPENMP
til = last;
j = first;
#else
j = 0;
til = m_count;
#endif
#ifdef SIMD_COEF_32
if (dynamic_use_sse == 1)
error_msg("Error, in your DYNAMIC script.\nIt is NOT valid to call DynamicFunc__set_input2_len_128 in SSE2/MMX mode\n");
#endif
for (; j < til; ++j)
total_len2_X86[j] = 128;
}
void DynamicFunc__set_input2_len_160(DYNA_OMP_PARAMS)
{
unsigned int j, til;
#ifdef _OPENMP
til = last;
j = first;
#else
j = 0;
til = m_count;
#endif
#ifdef SIMD_COEF_32
if (dynamic_use_sse == 1)
error_msg("Error, in your DYNAMIC script.\nIt is NOT valid to call DynamicFunc__set_input2_len_160 in SSE2/MMX mode\n");
#endif
for (; j < til; ++j)
total_len2_X86[j] = 160;
}
void DynamicFunc__set_input2_len_192(DYNA_OMP_PARAMS)
{
unsigned int j, til;
#ifdef _OPENMP
til = last;
j = first;
#else
j = 0;
til = m_count;
#endif
#ifdef SIMD_COEF_32
if (dynamic_use_sse == 1)
error_msg("Error, in your DYNAMIC script.\nIt is NOT valid to call DynamicFunc__set_input2_len_192 in SSE2/MMX mode\n");
#endif
for (; j < til; ++j)
total_len2_X86[j] = 192;
}
void DynamicFunc__set_input2_len_256(DYNA_OMP_PARAMS)
{
unsigned int j, til;
#ifdef _OPENMP
til = last;
j = first;
#else
j = 0;
til = m_count;
#endif
#ifdef SIMD_COEF_32
if (dynamic_use_sse == 1)
error_msg("Error, in your DYNAMIC script.\nIt is NOT valid to call DynamicFunc__set_input2_len_256 in SSE2/MMX mode\n");
#endif
for (; j < til; ++j)
total_len2_X86[j] = 256;
}
/**************************************************************
* DYNAMIC primitive helper function
* Appends the salt to the end of the input variables, and
* updates lengths
*************************************************************/
void DynamicFunc__append_salt(DYNA_OMP_PARAMS)
{
__append_string(DYNA_OMP_PARAMSdm cursalt, saltlen);
}
/**************************************************************
* DYNAMIC primitive helper function
* Appends the salt to the end of the 2nd input variables, and
* updates lengths
*************************************************************/
void DynamicFunc__append_salt2(DYNA_OMP_PARAMS)
{
__append2_string(DYNA_OMP_PARAMSdm cursalt, saltlen);
}
void DynamicFunc__append_input_from_input2(DYNA_OMP_PARAMS)
{
unsigned int i, til;
#ifdef _OPENMP
til = last;
i = first;
#else
i = 0;
til = m_count;
#endif
#ifdef SIMD_COEF_32
if (dynamic_use_sse==1) {
unsigned int j, k;
til = (til+SIMD_COEF_32-1)/SIMD_COEF_32;
i /= SIMD_COEF_32;
for (; i < til; ++i)
{
for (j = 0; j < SIMD_COEF_32; ++j)
{
unsigned int start_len = total_len[i][j];
unsigned int len1 = total_len2[i][j];
for (k = 0; k < len1; ++k)
input_buf[i].c[GETPOS((k+start_len), j)] = input_buf2[i].c[GETPOS(k,j)];
input_buf[i].c[GETPOS((len1+start_len), j)] = 0x80;
total_len[i][j] += len1;
}
}
return;
}
#endif
for (; i < til; ++i)
{
#if MD5_X2
if (i&1)
memcpy(&(input_buf_X86[i>>MD5_X2].x2.b2[total_len_X86[i]]), input_buf2_X86[i>>MD5_X2].x2.b2, total_len2_X86[i]);
else
#endif
memcpy(&(input_buf_X86[i>>MD5_X2].x1.b[total_len_X86[i]]), input_buf2_X86[i>>MD5_X2].x1.b, total_len2_X86[i]);
total_len_X86[i] += total_len2_X86[i];
}
}
void DynamicFunc__append_input2_from_input(DYNA_OMP_PARAMS)
{
unsigned int i, til;
#ifdef _OPENMP
til = last;
i = first;
#else
i = 0;
til = m_count;
#endif
#ifdef SIMD_COEF_32
if (dynamic_use_sse==1) {
unsigned int j, k;
til = (til+SIMD_COEF_32-1)/SIMD_COEF_32;
i /= SIMD_COEF_32;
for (; i < til; ++i)
{
for (j = 0; j < SIMD_COEF_32; ++j)
{
unsigned int start_len = total_len2[i][j];
unsigned int len1 = total_len[i][j];
for (k = 0; k < len1; ++k)
input_buf2[i].c[GETPOS((k+start_len), j)] = input_buf[i].c[GETPOS(k,j)];
input_buf2[i].c[GETPOS((len1+start_len), j)] = 0x80;
total_len2[i][j] += len1;
}
}
return;
}
#endif
for (; i < til; ++i)
{
#if MD5_X2
if (i&1)
memcpy(&(input_buf2_X86[i>>MD5_X2].x2.b2[total_len2_X86[i]]), input_buf_X86[i>>MD5_X2].x2.b2, total_len_X86[i]);
else
#endif
memcpy(&(input_buf2_X86[i>>MD5_X2].x1.b[total_len2_X86[i]]), input_buf_X86[i>>MD5_X2].x1.b, total_len_X86[i]);
total_len2_X86[i] += total_len_X86[i];
}
}
void DynamicFunc__append_input_from_input(DYNA_OMP_PARAMS)
{
unsigned int i, til;
#ifdef _OPENMP
til = last;
i = first;
#else
i = 0;
til = m_count;
#endif
#ifdef SIMD_COEF_32
if (dynamic_use_sse==1) {
unsigned int j, k;
til = (til+SIMD_COEF_32-1)/SIMD_COEF_32;
i /= SIMD_COEF_32;
for (; i < til; ++i)
{
for (j = 0; j < SIMD_COEF_32; ++j)
{
unsigned int start_len = total_len[i][j];
for (k = 0; k < start_len; ++k)
input_buf[i].c[GETPOS((k+start_len), j)] = input_buf[i].c[GETPOS(k,j)];
input_buf[i].c[GETPOS((start_len+start_len), j)] = 0x80;
total_len[i][j] += start_len;
}
}
return;
}
#endif
for (; i < til; ++i)
{
#if MD5_X2
if (i&1)
memcpy(&(input_buf_X86[i>>MD5_X2].x2.b2[total_len_X86[i]]), input_buf_X86[i>>MD5_X2].x2.b2, total_len_X86[i]);
else
#endif
memcpy(&(input_buf_X86[i>>MD5_X2].x1.b[total_len_X86[i]]), input_buf_X86[i>>MD5_X2].x1.b, total_len_X86[i]);
total_len_X86[i] <<= 1;
}
}
void DynamicFunc__append_input2_from_input2(DYNA_OMP_PARAMS)
{
unsigned int i, til;
#ifdef _OPENMP
til = last;
i = first;
#else
i = 0;
til = m_count;
#endif
#ifdef SIMD_COEF_32
if (dynamic_use_sse==1) {
unsigned int j, k;
til = (til+SIMD_COEF_32-1)/SIMD_COEF_32;
i /= SIMD_COEF_32;
for (; i < til; ++i)
{
for (j = 0; j < SIMD_COEF_32; ++j)
{
unsigned int start_len = total_len2[i][j];
for (k = 0; k < start_len; ++k)
input_buf2[i].c[GETPOS((k+start_len), j)] = input_buf2[i].c[GETPOS(k,j)];
input_buf2[i].c[GETPOS((start_len+start_len), j)] = 0x80;
total_len2[i][j] += start_len;
}
}
return;
}
#endif
for (; i < til; ++i)
{
#if MD5_X2
if (i&1)
memcpy(&(input_buf2_X86[i>>MD5_X2].x2.b2[total_len2_X86[i]]), input_buf2_X86[i>>MD5_X2].x2.b2, total_len2_X86[i]);
else
#endif
memcpy(&(input_buf2_X86[i>>MD5_X2].x1.b[total_len2_X86[i]]), input_buf2_X86[i>>MD5_X2].x1.b, total_len2_X86[i]);
total_len2_X86[i] <<= 1;
}
}
#ifdef SIMD_PARA_MD5
static void SSE_Intrinsics_LoadLens_md5(int side, int i)
{
ARCH_WORD_32 *p;
unsigned int j, k;
if (side == 0)
{
for (j = 0; j < SIMD_PARA_MD5; j++)
{
p = input_buf[i+j].w;
for (k = 0; k < SIMD_COEF_32; k++)
p[14*SIMD_COEF_32+k] = total_len[i+j][k] << 3;
}
}
else
{
for (j = 0; j < SIMD_PARA_MD5; j++)
{
p = input_buf2[i+j].w;
for (k = 0; k < SIMD_COEF_32; k++)
p[14*SIMD_COEF_32+k] = total_len2[i+j][k] << 3;
}
}
}
#endif
#ifdef SIMD_PARA_MD4
static void SSE_Intrinsics_LoadLens_md4(int side, int i)
{
ARCH_WORD_32 *p;
unsigned int j, k;
if (side == 0)
{
for (j = 0; j < SIMD_PARA_MD4; j++)
{
p = input_buf[i+j].w;
for (k = 0; k < SIMD_COEF_32; k++)
p[14*SIMD_COEF_32+k] = total_len[i+j][k] << 3;
}
}
else
{
for (j = 0; j < SIMD_PARA_MD4; j++)
{
p = input_buf2[i+j].w;
for (k = 0; k < SIMD_COEF_32; k++)
p[14*SIMD_COEF_32+k] = total_len2[i+j][k] << 3;
}
}
}
#endif
/**************************************************************
* DYNAMIC primitive helper function
* Encrypts the data in the first input field. The data is
* still in the binary encrypted format, in the crypt_key.
* we do not yet convert to base-16. This is so we can output
* as base-16, or later, if we add base-64, we can output to
* that format instead.
*************************************************************/
void DynamicFunc__crypt_md5(DYNA_OMP_PARAMS)
{
unsigned int i, til;
#ifdef _OPENMP
til = last;
i = first;
#else
i = 0;
til = m_count;
#endif
#ifdef SIMD_COEF_32
if (dynamic_use_sse==1) {
til = (til+SIMD_COEF_32-1)/SIMD_COEF_32;
i /= SIMD_COEF_32;
if (curdat.store_keys_in_input) {
for (; i < til; i += SIMD_PARA_MD5) {
SIMDmd5body(input_buf[i].c, crypt_key[i].w, NULL, SSEi_MIXED_IN);
}
} else {
for (; i < til; i += SIMD_PARA_MD5) {
SSE_Intrinsics_LoadLens_md5(0, i);
SIMDmd5body(input_buf[i].c, crypt_key[i].w, NULL, SSEi_MIXED_IN);
}
}
return;
}
#endif
for (; i < til; ++i) {
#if MD5_X2
unsigned int len[2];
len[0] = total_len_X86[i++];
if (i == m_count)
len[1] = 0;
else
len[1] = total_len_X86[i];
#else
unsigned int len = total_len_X86[i];
#endif
DoMD5(input_buf_X86[i>>MD5_X2], len, crypt_key_X86[i>>MD5_X2]);
}
}
void DynamicFunc__crypt_md4(DYNA_OMP_PARAMS)
{
unsigned int i, til;
#ifdef _OPENMP
til = last;
i = first;
#else
i = 0;
til = m_count;
#endif
#ifdef SIMD_COEF_32
if (dynamic_use_sse==1) {
til = (til+SIMD_COEF_32-1)/SIMD_COEF_32;
i /= SIMD_COEF_32;
if (curdat.store_keys_in_input) {
for (; i < til; i += SIMD_PARA_MD4) {
SIMDmd4body(input_buf[i].c, crypt_key[i].w, NULL, SSEi_MIXED_IN);
}
} else {
for (; i < til; i += SIMD_PARA_MD4) {
SSE_Intrinsics_LoadLens_md4(0, i);
SIMDmd4body(input_buf[i].c, crypt_key[i].w, NULL, SSEi_MIXED_IN);
}
}
return;
}
#endif
for (; i < til; ++i) {
// MD5_X2 sets our input buffers and crypt keys up in 'double' format. Thus, we HAVE
// to treat them just like we do in MD5. The macro hides the details.
#if MD5_X2
unsigned int len[2];
len[0] = total_len_X86[i++];
if (i == m_count)
len[1] = 0;
else
len[1] = total_len_X86[i];
#else
unsigned int len = total_len_X86[i];
#endif
DoMD4(input_buf_X86[i>>MD5_X2], len, crypt_key_X86[i>>MD5_X2]);
}
}
void DynamicFunc__POCrypt(DYNA_OMP_PARAMS)
{
unsigned int i, j;
unsigned int til, len;
unsigned char *pBuf;
#if MD5_X2
unsigned char *pBuf2;
unsigned int lens[2];
#endif
#ifdef _OPENMP
til = last;
i = first;
#else
i = 0;
til = m_count;
#endif
//DynamicFunc__clean_input_kwik();
//DynamicFunc__append_salt,
//DynamicFunc__append_input1_from_CONST1,
//DynamicFunc__append_keys,
//DynamicFunc__append_input1_from_CONST2,
//DynamicFunc__append_salt,
//DynamicFunc__crypt_md5,
pBuf = input_buf_X86[i>>MD5_X2].x1.B;
#if MD5_X2
pBuf2 = input_buf_X86[i>>MD5_X2].x2.B2;
memset(pBuf2, 0, sizeof(input_buf_X86[i>>MD5_X2].x2.B2));
memcpy(pBuf2, cursalt, 32);
pBuf2[32] = 'Y';
#endif
memset(pBuf, 0, sizeof(input_buf_X86[i>>MD5_X2].x1.b));
memcpy(pBuf, cursalt, 32);
pBuf[32] = 'Y';
for (j = i; j < til; ++j) {
len = saved_key_len[j];
memcpy(&pBuf[33], saved_key[j], len);
pBuf[33+len] = 0xf7;
memcpy(&pBuf[34+len], cursalt, 32);
#if MD5_X2
lens[0] = len+66; // len from the 'first'
++j;
if (j < m_count) {
len = saved_key_len[j];
memcpy(&pBuf2[33], saved_key[j], len);
pBuf2[33+len] = 0xf7;
memcpy(&pBuf2[34+len], cursalt, 32);
lens[1] = len+66;
} else {
lens[1] = 0;
}
DoMD5(input_buf_X86[i>>MD5_X2], lens, crypt_key_X86[j>>MD5_X2]);
#else
DoMD5(input_buf_X86[i>>MD5_X2], (len+66), crypt_key_X86[j]);
#endif
}
}
/**************************************************************
* DYNAMIC primitive helper function
* Encrypts the data in the 2nd input field into crypt_keys2.
*************************************************************/
void DynamicFunc__crypt2_md5(DYNA_OMP_PARAMS)
{
unsigned int i, til;
#ifdef _OPENMP
i = first;
til = last;
#else
i = 0;
til = m_count;
#endif
#ifdef SIMD_COEF_32
if (dynamic_use_sse==1) {
til = (til+SIMD_COEF_32-1)/SIMD_COEF_32;
i /= SIMD_COEF_32;
for (; i < til; i += SIMD_PARA_MD5) {
SSE_Intrinsics_LoadLens_md5(1, i);
SIMDmd5body(input_buf2[i].c, crypt_key2[i].w, NULL, SSEi_MIXED_IN);
}
return;
}
#endif
for (; i < til; ++i) {
#if MD5_X2
unsigned int len[2];
len[0] = total_len2_X86[i++];
if (i < m_count)
len[1] = total_len2_X86[i];
else
len[1] = 0;
#else
unsigned int len = total_len2_X86[i];
#endif
DoMD5(input_buf2_X86[i>>MD5_X2], len, crypt_key2_X86[i>>MD5_X2]);
}
}
void DynamicFunc__crypt2_md4(DYNA_OMP_PARAMS)
{
unsigned int i, til;
#ifdef _OPENMP
i = first;
til = last;
#else
i = 0;
til = m_count;
#endif
#ifdef SIMD_COEF_32
if (dynamic_use_sse==1) {
til = (til+SIMD_COEF_32-1)/SIMD_COEF_32;
i /= SIMD_COEF_32;
for (; i < til; i += SIMD_PARA_MD4) {
SSE_Intrinsics_LoadLens_md4(1, i);
SIMDmd4body(input_buf2[i].c, crypt_key2[i].w, NULL, SSEi_MIXED_IN);
}
return;
}
#endif
for (; i < til; ++i) {
// MD5_X2 sets our input buffers and crypt keys up in 'double' format. Thus, we HAVE
// to treat them just like we do in MD5. The macro hides the details.
#if MD5_X2
unsigned int len[2];
len[0] = total_len2_X86[i++];
if (i == m_count)
len[1] = 0;
else
len[1] = total_len2_X86[i];
#else
unsigned int len = total_len2_X86[i];
#endif
DoMD4(input_buf2_X86[i>>MD5_X2], len, crypt_key2_X86[i>>MD5_X2]);
}
}
/**************************************************************
* DYNAMIC primitive helper function
* Encrypts the data in the 1st input field crypt_keys2.
*************************************************************/
void DynamicFunc__crypt_md5_in1_to_out2(DYNA_OMP_PARAMS)
{
unsigned int i, til;
#ifdef _OPENMP
i = first;
til = last;
#else
i = 0;
til = m_count;
#endif
#ifdef SIMD_COEF_32
if (dynamic_use_sse==1) {
til = (til+SIMD_COEF_32-1)/SIMD_COEF_32;
i /= SIMD_COEF_32;
if (curdat.store_keys_in_input) {
for (; i < til; i += SIMD_PARA_MD5) {
SIMDmd5body(input_buf[i].c, crypt_key2[i].w, NULL, SSEi_MIXED_IN);
}
} else {
for (; i < til; i += SIMD_PARA_MD5) {
SSE_Intrinsics_LoadLens_md5(0, i);
SIMDmd5body(input_buf[i].c, crypt_key2[i].w, NULL, SSEi_MIXED_IN);
}
}
return;
}
#endif
for (; i < til; ++i) {
#if MD5_X2
unsigned int len[2];
len[0] = total_len_X86[i++];
if (i == m_count)
len[1] = 0;
else
len[1] = total_len_X86[i];
#else
unsigned int len = total_len_X86[i];
#endif
DoMD5(input_buf_X86[i>>MD5_X2], len, crypt_key2_X86[i>>MD5_X2]);
}
}
void DynamicFunc__crypt_md4_in1_to_out2(DYNA_OMP_PARAMS)
{
unsigned int i, til;
#ifdef _OPENMP
i = first;
til = last;
#else
i = 0;
til = m_count;
#endif
#ifdef SIMD_COEF_32
if (dynamic_use_sse==1) {
til = (til+SIMD_COEF_32-1)/SIMD_COEF_32;
i /= SIMD_COEF_32;
if (curdat.store_keys_in_input) {
for (; i < til; i += SIMD_PARA_MD4) {
SIMDmd4body(input_buf[i].c, crypt_key2[i].w, NULL, SSEi_MIXED_IN);
}
} else {
for (; i < til; i += SIMD_PARA_MD4) {
SSE_Intrinsics_LoadLens_md4(0, i);
SIMDmd4body(input_buf[i].c, crypt_key2[i].w, NULL, SSEi_MIXED_IN);
}
}
return;
}
#endif
for (; i < til; ++i) {
// MD5_X2 sets our input buffers and crypt keys up in 'double' format. Thus, we HAVE
// to treat them just like we do in MD5. The macro hides the details.
#if MD5_X2
unsigned int len[2];
len[0] = total_len_X86[i++];
if (i == m_count)
len[1] = 0;
else
len[1] = total_len_X86[i];
#else
unsigned int len = total_len_X86[i];
#endif
DoMD4(input_buf_X86[i>>MD5_X2], len, crypt_key2_X86[i>>MD5_X2]);
}
}
/**************************************************************
* DYNAMIC primitive helper function
* Encrypts the data in the 2nd input field into crypt_keys.
*************************************************************/
void DynamicFunc__crypt_md5_in2_to_out1(DYNA_OMP_PARAMS)
{
unsigned int i, til;
#ifdef _OPENMP
i = first;
til = last;
#else
i = 0;
til = m_count;
#endif
#ifdef SIMD_COEF_32
if (dynamic_use_sse==1) {
til = (til+SIMD_COEF_32-1)/SIMD_COEF_32;
i /= SIMD_COEF_32;
for (; i < til; i += SIMD_PARA_MD5)
{
SSE_Intrinsics_LoadLens_md5(1, i);
SIMDmd5body(input_buf2[i].c, crypt_key[i].w, NULL, SSEi_MIXED_IN);
//dump_stuff_mmx_msg("DynamicFunc__crypt_md5_in2_to_out1", input_buf2[i].c,64,m_count-1);
}
return;
}
#endif
for (; i < til; ++i) {
#if MD5_X2
unsigned int len[2];
len[0] = total_len2_X86[i++];
if (i == m_count)
len[1] = 0;
else
len[1] = total_len2_X86[i];
#else
unsigned int len = total_len2_X86[i];
#endif
DoMD5(input_buf2_X86[i>>MD5_X2], len, crypt_key_X86[i>>MD5_X2]);
}
}
void DynamicFunc__crypt_md4_in2_to_out1(DYNA_OMP_PARAMS)
{
unsigned int i, til;
#ifdef _OPENMP
i = first;
til = last;
#else
i = 0;
til = m_count;
#endif
#ifdef SIMD_COEF_32
if (dynamic_use_sse==1) {
til = (til+SIMD_COEF_32-1)/SIMD_COEF_32;
i /= SIMD_COEF_32;
for (; i < til; i += SIMD_PARA_MD4)
{
SSE_Intrinsics_LoadLens_md4(1, i);
SIMDmd4body(input_buf2[i].c, crypt_key[i].w, NULL, SSEi_MIXED_IN);
}
return;
}
#endif
for (; i < til; ++i) {
// MD5_X2 sets our input buffers and crypt keys up in 'double' format. Thus, we HAVE
// to treat them just like we do in MD5. The macro hides the details.
#if MD5_X2
unsigned int len[2];
len[0] = total_len2_X86[i++];
if (i == m_count)
len[1] = 0;
else
len[1] = total_len2_X86[i];
#else
unsigned int len = total_len2_X86[i];
#endif
DoMD4(input_buf2_X86[i>>MD5_X2], len, crypt_key_X86[i>>MD5_X2]);
}
}
void DynamicFunc__crypt_md5_to_input_raw(DYNA_OMP_PARAMS)
{
unsigned int i, til;
#ifdef _OPENMP
i = first;
til = last;
#else
i = 0;
til = m_count;
#endif
#ifdef SIMD_COEF_32
if (dynamic_use_sse==1) {
til = (til+SIMD_COEF_32-1)/SIMD_COEF_32;
i /= SIMD_COEF_32;
for (; i < til; i += SIMD_PARA_MD5)
{
unsigned int j, k;
SSE_Intrinsics_LoadLens_md5(0, i);
// NOTE, since crypt_key array is 16 bytes each, and input_buf is 64 bytes
// each, and we are doing 3 at a time, we can NOT directly write to the
// input buff, but have to use the crypt_key buffer, and then memcpy when done.
SIMDmd5body(input_buf[i].c, crypt_key[i].w, NULL, SSEi_MIXED_IN);
for (j = 0; j < SIMD_PARA_MD5; ++j)
{
memset(input_buf[i+j].c, 0, sizeof(input_buf[0]));
memcpy(input_buf[i+j].c, crypt_key[i+j].c, 16*SIMD_COEF_32);
for (k = 0; k < SIMD_COEF_32; k++)
total_len[i+j][k] = 16;
}
}
return;
}
#endif
for (; i < til; ++i) {
#if MD5_X2
unsigned int len[2];
len[0] = total_len_X86[i];
total_len_X86[i++] = 0x10;
if (i == m_count)
len[1] = 0;
else
len[1] = total_len_X86[i];
#else
unsigned int len = total_len_X86[i];
#endif
DoMD5(input_buf_X86[i>>MD5_X2], len, input_buf_X86[i>>MD5_X2]);
total_len_X86[i] = 0x10;
}
}
void DynamicFunc__crypt_md5_to_input_raw_Overwrite_NoLen_but_setlen_in_SSE(DYNA_OMP_PARAMS)
{
unsigned int i, til;
#ifdef _OPENMP
i = first;
til = last;
#else
i = 0;
til = m_count;
#endif
#ifdef SIMD_COEF_32
if (dynamic_use_sse==1) {
til = (til+SIMD_COEF_32-1)/SIMD_COEF_32;
i /= SIMD_COEF_32;
for (; i < til; i += SIMD_PARA_MD5)
{
unsigned int j;
SSE_Intrinsics_LoadLens_md5(0, i);
// NOTE, since crypt_key array is 16 bytes each, and input_buf is 64 bytes
// each, and we are doing 3 at a time, we can NOT directly write to the
// input buff, but have to use the crypt_key buffer, and then memcpy when done.
SIMDmd5body(input_buf[i].c, crypt_key[i].w, NULL, SSEi_MIXED_IN);
for (j = 0; j < SIMD_PARA_MD5; ++j)
memcpy(input_buf[i+j].c, crypt_key[i+j].c, 16*SIMD_COEF_32);
}
return;
}
#endif
for (; i < til; ++i) {
#if MD5_X2
unsigned int len[2];
len[0] = total_len_X86[i++];
if (i == m_count)
len[1] = 0;
else
len[1] = total_len_X86[i];
#else
unsigned int len = total_len_X86[i];
#endif
DoMD5(input_buf_X86[i>>MD5_X2], len, input_buf_X86[i>>MD5_X2]);
}
}
void DynamicFunc__crypt_md5_to_input_raw_Overwrite_NoLen(DYNA_OMP_PARAMS)
{
unsigned int i, til;
#ifdef _OPENMP
i = first;
til = last;
#else
i = 0;
til = m_count;
#endif
#ifdef SIMD_COEF_32
if (dynamic_use_sse==1) {
til = (til+SIMD_COEF_32-1)/SIMD_COEF_32;
i /= SIMD_COEF_32;
for (; i < til; i += SIMD_PARA_MD5)
{
unsigned int j;
// NOTE, since crypt_key array is 16 bytes each, and input_buf is 64 bytes
// each, and we are doing 3 at a time, we can NOT directly write to the
// input buff, but have to use the crypt_key buffer, and then memcpy when done.
SIMDmd5body(input_buf[i].c, crypt_key[i].w, NULL, SSEi_MIXED_IN);
for (j = 0; j < SIMD_PARA_MD5; ++j)
memcpy(input_buf[i+j].c, crypt_key[i+j].c, 16*SIMD_COEF_32);
}
return;
}
#endif
for (; i < til; ++i) {
#if MD5_X2
unsigned int len[2];
len[0] = total_len_X86[i++];
if (i == m_count)
len[1] = 0;
else
len[1] = total_len_X86[i];
#else
unsigned int len = total_len_X86[i];
#endif
// we call DoMD5o so as to 'not' change then length (it was already set)
DoMD5o(input_buf_X86[i>>MD5_X2], len, input_buf_X86[i>>MD5_X2]);
}
}
void DynamicFunc__overwrite_salt_to_input1_no_size_fix(DYNA_OMP_PARAMS)
{
unsigned int j, til;
#ifdef _OPENMP
j = first;
til = last;
#else
j = 0;
til = m_count;
#endif
#ifdef SIMD_COEF_32
if (dynamic_use_sse==1) {
if (md5_unicode_convert_get(tid)) {
if (options.target_enc != ASCII && options.target_enc != ISO_8859_1) {
UTF16 utf16Str[27+1]; // 27 chars is 'max' that fits in SSE without overflow, so that is where we limit it at now
int outlen;
outlen = enc_to_utf16(utf16Str, 27, (unsigned char*)cursalt, saltlen) * sizeof(UTF16);
if (outlen < 0)
outlen = strlen16(utf16Str) * sizeof(UTF16);
for (; j < til; ++j) {
__SSE_append_string_to_input(input_buf[j/SIMD_COEF_32].c,j&(SIMD_COEF_32-1),(unsigned char*)utf16Str,outlen,0,0);
}
} else {
for (; j < til; ++j)
__SSE_append_string_to_input_unicode(input_buf[j/SIMD_COEF_32].c,j&(SIMD_COEF_32-1),(unsigned char*)cursalt,saltlen,0,0);
}
return;
}
for (; j < til; ++j)
__SSE_append_string_to_input(input_buf[j/SIMD_COEF_32].c,j&(SIMD_COEF_32-1),cursalt,saltlen,0,0);
return;
}
#endif
if (md5_unicode_convert_get(tid)) {
if (options.target_enc != ASCII && options.target_enc != ISO_8859_1) {
UTF16 utf16Str[EFFECTIVE_MAX_LENGTH / 3 + 1];
int outlen;
outlen = enc_to_utf16(utf16Str, EFFECTIVE_MAX_LENGTH / 3, (unsigned char*)cursalt, saltlen) * sizeof(UTF16);
if (outlen < 0)
outlen = strlen16(utf16Str) * sizeof(UTF16);
for (; j < til; ++j) {
unsigned int z;
unsigned char *cp, *cpi = (unsigned char*)utf16Str;
#if MD5_X2
if (j&1)
cp = input_buf_X86[j>>MD5_X2].x2.B2;
else
#endif
cp = input_buf_X86[j>>MD5_X2].x1.B;
for (z = 0; z < outlen; ++z)
*cp++ = *cpi++;
}
} else {
for (; j < til; ++j) {
unsigned int z;
unsigned char *cp, *cpi = (unsigned char*)cursalt;
#if MD5_X2
if (j&1)
cp = input_buf_X86[j>>MD5_X2].x2.B2;
else
#endif
cp = input_buf_X86[j>>MD5_X2].x1.B;
for (z = 0; z < saltlen; ++z) {
*cp++ = *cpi++;
*cp++ = 0;
}
}
}
return;
}
for (; j < til; ++j) {
#if MD5_X2
if (j&1)
memcpy(input_buf_X86[j>>MD5_X2].x2.b2, cursalt, saltlen);
else
#endif
memcpy(input_buf_X86[j>>MD5_X2].x1.b, cursalt, saltlen);
}
}
void DynamicFunc__overwrite_salt_to_input2_no_size_fix(DYNA_OMP_PARAMS)
{
unsigned int j, til;
#ifdef _OPENMP
j = first;
til = last;
#else
j = 0;
til = m_count;
#endif
#ifdef SIMD_COEF_32
if (dynamic_use_sse==1) {
if (md5_unicode_convert_get(tid)) {
if (options.target_enc != ASCII && options.target_enc != ISO_8859_1) {
UTF16 utf16Str[27+1]; // 27 chars is 'max' that fits in SSE without overflow, so that is where we limit it at now
int outlen;
outlen = enc_to_utf16(utf16Str, 27, (unsigned char*)cursalt, saltlen) * sizeof(UTF16);
if (outlen < 0)
outlen = strlen16(utf16Str) * sizeof(UTF16);
for (; j < til; ++j) {
__SSE_append_string_to_input(input_buf2[j/SIMD_COEF_32].c,j&(SIMD_COEF_32-1),(unsigned char*)utf16Str,outlen,0,0);
}
} else {
for (; j < til; ++j)
__SSE_append_string_to_input_unicode(input_buf2[j/SIMD_COEF_32].c,j&(SIMD_COEF_32-1),(unsigned char*)cursalt,saltlen,0,0);
}
return;
}
for (; j < til; ++j)
__SSE_append_string_to_input(input_buf2[j/SIMD_COEF_32].c,j&(SIMD_COEF_32-1),cursalt,saltlen,0,0);
return;
}
#endif
if (md5_unicode_convert_get(tid)) {
if (options.target_enc != ASCII && options.target_enc != ISO_8859_1) {
UTF16 utf16Str[EFFECTIVE_MAX_LENGTH / 3 + 1];
int outlen;
outlen = enc_to_utf16(utf16Str, EFFECTIVE_MAX_LENGTH / 3, (unsigned char*)cursalt, saltlen) * sizeof(UTF16);
if (outlen < 0)
outlen = strlen16(utf16Str) * sizeof(UTF16);
for (; j < til; ++j) {
unsigned int z;
unsigned char *cp, *cpi = (unsigned char*)utf16Str;
#if MD5_X2
if (j&1)
cp = input_buf2_X86[j>>MD5_X2].x2.B2;
else
#endif
cp = input_buf2_X86[j>>MD5_X2].x1.B;
for (z = 0; z < outlen; ++z)
*cp++ = *cpi++;
}
} else {
for (; j < til; ++j) {
unsigned int z;
unsigned char *cp, *cpi = (unsigned char*)cursalt;
#if MD5_X2
if (j&1)
cp = input_buf2_X86[j>>MD5_X2].x2.B2;
else
#endif
cp = input_buf2_X86[j>>MD5_X2].x1.B;
for (z = 0; z < saltlen; ++z) {
*cp++ = *cpi++;
*cp++ = 0;
}
}
}
return;
}
for (; j < til; ++j) {
#if MD5_X2
if (j&1)
memcpy(input_buf2_X86[j>>MD5_X2].x2.b2, cursalt, saltlen);
else
#endif
memcpy(input_buf2_X86[j>>MD5_X2].x1.b, cursalt, saltlen);
}
}
/**************************************************************
* DYNAMIC primitive helper function
* overwrites start of input1 from the output2 data using base-16
*************************************************************/
void DynamicFunc__overwrite_from_last_output2_to_input1_as_base16_no_size_fix(DYNA_OMP_PARAMS)
{
unsigned int j, til;
#ifdef _OPENMP
j = first;
til = last;
#else
j = 0;
til = m_count;
#endif
#ifdef SIMD_COEF_32
if (dynamic_use_sse==1) {
unsigned int idx;
for (; j < til; ++j)
{
idx = ( ((unsigned int)j)/SIMD_COEF_32);
__SSE_overwrite_output_base16_to_input(input_buf[idx].w, crypt_key2[idx].c, j&(SIMD_COEF_32-1));
}
return;
}
#endif
for (; j < til; ++j)
{
unsigned char *cpo, *cpi;
unsigned int i;
/* MD5_word *w; */
#if MD5_X2
if (j&1)
{cpo = input_buf_X86[j>>MD5_X2].x2.B2; cpi = crypt_key2_X86[j>>MD5_X2].x2.B2; /* w=input_buf_X86[j>>MD5_X2].x2.w2; */}
else
#endif
{cpo = input_buf_X86[j>>MD5_X2].x1.B; cpi = crypt_key2_X86[j>>MD5_X2].x1.B; /* w=input_buf_X86[j>>MD5_X2].x1.w; */ }
for (i = 0; i < 16; ++i, ++cpi)
{
*cpo++ = dynamic_itoa16[*cpi>>4];
*cpo++ = dynamic_itoa16[*cpi&0xF];
}
//MD5_swap(w,w,4);
}
}
/**************************************************************
* DYNAMIC primitive helper function
* overwrites start of input1 from the output1 data using base-16
*************************************************************/
void DynamicFunc__overwrite_from_last_output_as_base16_no_size_fix(DYNA_OMP_PARAMS)
{
unsigned int j, til;
#ifdef _OPENMP
j = first;
til = last;
#else
j = 0;
til = m_count;
#endif
#ifdef SIMD_COEF_32
if (dynamic_use_sse==1) {
unsigned int idx;
for (; j < til; ++j)
{
idx = ( ((unsigned int)j)/SIMD_COEF_32);
__SSE_overwrite_output_base16_to_input(input_buf[idx].w, crypt_key[idx].c, j&(SIMD_COEF_32-1));
}
return;
}
#endif
for (; j < til; ++j)
{
unsigned char *cpo, *cpi;
unsigned int i;
/* MD5_word *w; */
#if MD5_X2
if (j&1)
{cpo = input_buf_X86[j>>MD5_X2].x2.B2; cpi = crypt_key_X86[j>>MD5_X2].x2.B2; /* w=input_buf_X86[j>>MD5_X2].x2.w2; */}
else
#endif
{cpo = input_buf_X86[j>>MD5_X2].x1.B; cpi = crypt_key_X86[j>>MD5_X2].x1.B; /* w=input_buf_X86[j>>MD5_X2].x1.w; */ }
for (i = 0; i < 16; ++i, ++cpi)
{
*cpo++ = dynamic_itoa16[*cpi>>4];
*cpo++ = dynamic_itoa16[*cpi&0xF];
}
//MD5_swap(w,w,4);
}
}
/**************************************************************
* DYNAMIC primitive helper function
* This will take the data stored in the crypt_keys (the encrypted
* 'first' key variable), and use a base-16 text formatting, and
* append this to the first input buffer (adjusting the lengths)
*************************************************************/
void DynamicFunc__append_from_last_output_as_base16(DYNA_OMP_PARAMS)
{
unsigned int j, til;
#ifdef _OPENMP
j = first;
til = last;
#else
j = 0;
til = m_count;
#endif
#ifdef SIMD_COEF_32
if (dynamic_use_sse==1) {
unsigned int idx;
for (; j < til; ++j)
{
unsigned int ip;
idx = ( ((unsigned int)j)/SIMD_COEF_32);
// This is the 'actual' work.
ip = total_len[idx][j & (SIMD_COEF_32 - 1)];
total_len[idx][j & (SIMD_COEF_32 - 1)] += 32;
if (!ip)
__SSE_append_output_base16_to_input(input_buf[idx].w, crypt_key[idx].c, j&(SIMD_COEF_32-1));
else if (ip&1)
{
// Note we are 100% unaligned, and it seems fastest to handle byte/byte (at this time).
unsigned int k;
for (k = 0; k < 16; ++k)
{
unsigned char v = crypt_key[idx].c[GETPOS(k, j&(SIMD_COEF_32-1))];
input_buf[idx].c[GETPOS(ip+(k<<1), j&(SIMD_COEF_32-1))] = dynamic_itoa16[v>>4];
input_buf[idx].c[GETPOS(ip+(k<<1)+1, j&(SIMD_COEF_32-1))] = dynamic_itoa16[v&0xF];
}
input_buf[idx].c[GETPOS(ip+32, j&(SIMD_COEF_32-1))] = 0x80;
}
else if ((ip&3)==0)
__SSE_append_output_base16_to_input_semi_aligned_0(ip, input_buf[idx].w, crypt_key[idx].c, j&(SIMD_COEF_32-1));
else
__SSE_append_output_base16_to_input_semi_aligned_2(ip, input_buf[idx].w, crypt_key[idx].c, j&(SIMD_COEF_32-1));
}
return;
}
#endif
for (; j < til; ++j)
{
unsigned char *cp, *cpi;
unsigned int i;
#if MD5_X2
if (j&1)
{cp = &(input_buf_X86[j>>MD5_X2].x2.B2[total_len_X86[j]]); cpi = crypt_key_X86[j>>MD5_X2].x2.B2; }
else
#endif
{cp = &(input_buf_X86[j>>MD5_X2].x1.B[total_len_X86[j]]); cpi = crypt_key_X86[j>>MD5_X2].x1.B; }
for (i = 0; i < 16; ++i)
{
#if ARCH_ALLOWS_UNALIGNED
*((unsigned short*)cp) = itoa16_w2[*cpi++];
cp += 2;
#else
unsigned char b = *cpi++;
*cp++ = dynamic_itoa16[b>>4];
*cp++ = dynamic_itoa16[b&0xF];
#endif
}
*cp = 0;
total_len_X86[j] += 32;
}
}
/**************************************************************
* DYNAMIC primitive helper function
* This will take the data stored in the crypt_keys2 (the encrypted
* 'second' key variable), and base-16 appends to the 2nd input
*************************************************************/
void DynamicFunc__append_from_last_output2_as_base16(DYNA_OMP_PARAMS)
{
unsigned int i, til;
#ifdef _OPENMP
i = first;
til = last;
#else
i = 0;
til = m_count;
#endif
#ifdef SIMD_COEF_32
if (dynamic_use_sse==1) {
unsigned int idx;
for (; i < til; ++i)
{
unsigned int ip, j;
idx = ( ((unsigned int)i)/SIMD_COEF_32);
// This is the 'actual' work.
ip = total_len2[idx][i&(SIMD_COEF_32-1)];
total_len2[idx][i&(SIMD_COEF_32-1)] += 32;
if (!ip)
__SSE_append_output_base16_to_input(input_buf2[idx].w, crypt_key2[idx].c, i&(SIMD_COEF_32-1));
else if (ip&1)
{
// Note we are 100% unaligned, and it seems fastest to handle byte/byte (at this time).
for (j = 0; j < 16; ++j)
{
unsigned char v = crypt_key2[idx].c[GETPOS(j, i&(SIMD_COEF_32-1))];
input_buf2[idx].c[GETPOS(ip+(j<<1), i&(SIMD_COEF_32-1))] = dynamic_itoa16[v>>4];
input_buf2[idx].c[GETPOS(ip+(j<<1)+1, i&(SIMD_COEF_32-1))] = dynamic_itoa16[v&0xF];
}
input_buf2[idx].c[GETPOS(ip+32, i&(SIMD_COEF_32-1))] = 0x80;
}
else if ((ip&3)==0)
__SSE_append_output_base16_to_input_semi_aligned_0(ip, input_buf2[idx].w, crypt_key2[idx].c, i&(SIMD_COEF_32-1));
else
__SSE_append_output_base16_to_input_semi_aligned_2(ip, input_buf2[idx].w, crypt_key2[idx].c, i&(SIMD_COEF_32-1));
}
return;
}
#endif
for (; i < til; ++i)
{
unsigned int j;
unsigned char *cp, *cpi;
#if MD5_X2
if (i&1)
{cp = &(input_buf2_X86[i>>MD5_X2].x2.B2[total_len2_X86[i]]); cpi = crypt_key2_X86[i>>MD5_X2].x2.B2; }
else
#endif
{cp = &(input_buf2_X86[i>>MD5_X2].x1.B[total_len2_X86[i]]); cpi = crypt_key2_X86[i>>MD5_X2].x1.B; }
for (j = 0; j < 16; ++j)
{
#if ARCH_ALLOWS_UNALIGNED
*((unsigned short*)cp) = itoa16_w2[*cpi++];
cp += 2;
#else
unsigned char b = *cpi++;
*cp++ = dynamic_itoa16[b>>4];
*cp++ = dynamic_itoa16[b&0xF];
#endif
}
*cp = 0;
total_len2_X86[i] += 32;
}
}
/**************************************************************
* DYNAMIC primitive helper function
* overwrites start of input2 from the output1 data using base-16
* an optimization, if the same thing is done over and over
* again, such as md5(md5(md5(md5($p)))) There, we would only
* call the copy and set length once, then simply call copy.
*************************************************************/
void DynamicFunc__overwrite_from_last_output_to_input2_as_base16_no_size_fix(DYNA_OMP_PARAMS)
{
unsigned int i, til,j;
#ifdef _OPENMP
i = first;
til = last;
#else
i = 0;
til = m_count;
#endif
#ifdef SIMD_COEF_32
if (dynamic_use_sse==1) {
unsigned int idx;
for (; i < til; ++i)
{
idx = ( ((unsigned int)i)/SIMD_COEF_32);
__SSE_overwrite_output_base16_to_input(input_buf2[idx].w, crypt_key[idx].c, i&(SIMD_COEF_32-1));
}
return;
}
#endif
j = i;
for (; j < til; ++j)
{
unsigned char *cpo, *cpi;
/* MD5_word *w; */
#if MD5_X2
if (j&1)
{cpo = input_buf2_X86[j>>MD5_X2].x2.B2; cpi = crypt_key_X86[j>>MD5_X2].x2.B2; /* w=input_buf_X86[j>>MD5_X2].x2.w2; */}
else
#endif
{cpo = input_buf2_X86[j>>MD5_X2].x1.B; cpi = crypt_key_X86[j>>MD5_X2].x1.B; /* w=input_buf_X86[j>>MD5_X2].x1.w; */ }
for (i = 0; i < 16; ++i, ++cpi)
{
*cpo++ = dynamic_itoa16[*cpi>>4];
*cpo++ = dynamic_itoa16[*cpi&0xF];
}
//MD5_swap(w,w,4);
}
}
void DynamicFunc__overwrite_from_last_output2_to_input2_as_base16_no_size_fix(DYNA_OMP_PARAMS)
{
unsigned int i, til,j;
#ifdef _OPENMP
i = first;
til = last;
#else
i = 0;
til = m_count;
#endif
#ifdef SIMD_COEF_32
if (dynamic_use_sse==1) {
unsigned int idx;
for (; i < til; ++i)
{
idx = ( ((unsigned int)i)/SIMD_COEF_32);
__SSE_overwrite_output_base16_to_input(input_buf2[idx].w, crypt_key2[idx].c, i&(SIMD_COEF_32-1));
}
return;
}
#endif
j = i;
for (; j < til; ++j)
{
unsigned char *cpo, *cpi;
/* MD5_word *w; */
#if MD5_X2
if (j&1)
{cpo = input_buf2_X86[j>>MD5_X2].x2.B2; cpi = crypt_key2_X86[j>>MD5_X2].x2.B2; /* w=input_buf2_X86[j>>MD5_X2].x2.w2; */}
else
#endif
{cpo = input_buf2_X86[j>>MD5_X2].x1.B; cpi = crypt_key2_X86[j>>MD5_X2].x1.B; /* w=input_buf2_X86[j>>MD5_X2].x1.w; */ }
for (i = 0; i < 16; ++i, ++cpi)
{
*cpo++ = dynamic_itoa16[*cpi>>4];
*cpo++ = dynamic_itoa16[*cpi&0xF];
}
//MD5_swap(w,w,4);
}
}
/**************************************************************
* DYNAMIC primitive helper function
* overwrites start of input2 from the output2 data using base-16
*************************************************************/
void DynamicFunc__overwrite_from_last_output2_as_base16_no_size_fix(DYNA_OMP_PARAMS)
{
unsigned int i, til,j;
#ifdef _OPENMP
i = first;
til = last;
#else
i = 0;
til = m_count;
#endif
#ifdef SIMD_COEF_32
if (dynamic_use_sse==1) {
unsigned int idx;
for (; i < til; ++i)
{
idx = ( ((unsigned int)i)/SIMD_COEF_32);
__SSE_overwrite_output_base16_to_input(input_buf2[idx].w, crypt_key2[idx].c, i&(SIMD_COEF_32-1));
}
return;
}
#endif
j=i;
for (; j < til; ++j)
{
unsigned char *cpo, *cpi;
/* MD5_word *w; */
#if MD5_X2
if (j&1)
{cpo = input_buf2_X86[j>>MD5_X2].x2.B2; cpi = crypt_key2_X86[j>>MD5_X2].x2.B2; /* w=input_buf_X86[j>>MD5_X2].x2.w2; */}
else
#endif
{cpo = input_buf2_X86[j>>MD5_X2].x1.B; cpi = crypt_key2_X86[j>>MD5_X2].x1.B; /* w=input_buf_X86[j>>MD5_X2].x1.w; */ }
for (i = 0; i < 16; ++i, ++cpi)
{
*cpo++ = dynamic_itoa16[*cpi>>4];
*cpo++ = dynamic_itoa16[*cpi&0xF];
}
//MD5_swap(w,w,4);
}
}
/**************************************************************
* DYNAMIC primitive helper function
* This will take the data stored in the crypt_keys1 (the encrypted
* 'first' key variable), and base-16 appends to the 2nd input
*************************************************************/
void DynamicFunc__append_from_last_output_to_input2_as_base16(DYNA_OMP_PARAMS)
{
unsigned int i, til;
#ifdef _OPENMP
i = first;
til = last;
#else
i = 0;
til = m_count;
#endif
#ifdef SIMD_COEF_32
if (dynamic_use_sse==1) {
unsigned int index=i, idx;
for (; index < til; ++index)
{
unsigned int ip;
idx = ( ((unsigned int)index)/SIMD_COEF_32);
// This is the 'actual' work.
ip = total_len2[idx][index&(SIMD_COEF_32-1)];
total_len2[idx][index&(SIMD_COEF_32-1)] += 32;
if (!ip)
__SSE_append_output_base16_to_input(input_buf2[idx].w, crypt_key[idx].c, index&(SIMD_COEF_32-1));
else if (ip&1)
{
// Note we are 100% unaligned, and it seems fastest to handle byte/byte (at this time).
for (i = 0; i < 16; ++i)
{
unsigned char v = crypt_key[idx].c[GETPOS(i, index&(SIMD_COEF_32-1))];
input_buf2[idx].c[GETPOS(ip+(i<<1), index&(SIMD_COEF_32-1))] = dynamic_itoa16[v>>4];
input_buf2[idx].c[GETPOS(ip+(i<<1)+1, index&(SIMD_COEF_32-1))] = dynamic_itoa16[v&0xF];
}
input_buf2[idx].c[GETPOS(ip+32, index&(SIMD_COEF_32-1))] = 0x80;
}
else if ((ip&3)==0)
__SSE_append_output_base16_to_input_semi_aligned_0(ip, input_buf2[idx].w, crypt_key[idx].c, index&(SIMD_COEF_32-1));
else
__SSE_append_output_base16_to_input_semi_aligned_2(ip, input_buf2[idx].w, crypt_key[idx].c, index&(SIMD_COEF_32-1));
}
return;
}
#endif
for (; i < til; ++i)
{
unsigned int j;
unsigned char *cp, *cpi;
#if MD5_X2
if (i&1)
{cpi = crypt_key_X86[i>>MD5_X2].x2.B2; cp = &(input_buf2_X86[i>>MD5_X2].x2.B2[total_len2_X86[i]]); }
else
#endif
{cpi = crypt_key_X86[i>>MD5_X2].x1.B; cp = &(input_buf2_X86[i>>MD5_X2].x1.B[total_len2_X86[i]]);}
for (j = 0; j < 16; ++j)
{
#if ARCH_ALLOWS_UNALIGNED
*((unsigned short*)cp) = itoa16_w2[*cpi++];
cp += 2;
#else
unsigned char b = *cpi++;
*cp++ = dynamic_itoa16[b>>4];
*cp++ = dynamic_itoa16[b&0xF];
#endif
}
*cp = 0;
total_len2_X86[i] += 32;
}
}
/**************************************************************
* DYNAMIC primitive helper function
* This will take the data stored in the crypt_keys2 (the encrypted
* 'second' key variable), and base-16 appends to the 1st input
*************************************************************/
void DynamicFunc__append_from_last_output2_to_input1_as_base16(DYNA_OMP_PARAMS)
{
unsigned int i, til;
#ifdef _OPENMP
i = first;
til = last;
#else
i = 0;
til = m_count;
#endif
#ifdef SIMD_COEF_32
if (dynamic_use_sse==1) {
unsigned int index=i, idx;
for (; index < til; ++index)
{
unsigned int ip;
idx = ( ((unsigned int)index)/SIMD_COEF_32);
// This is the 'actual' work.
ip = total_len[idx][index&(SIMD_COEF_32-1)];
total_len[idx][index&(SIMD_COEF_32-1)] += 32;
if (!ip)
__SSE_append_output_base16_to_input(input_buf[idx].w, crypt_key2[idx].c, index&(SIMD_COEF_32-1));
else if (ip&1)
{
// Note we are 100% unaligned, and it seems fastest to handle byte/byte (at this time).
for (i = 0; i < 16; ++i)
{
unsigned char v = crypt_key2[idx].c[GETPOS(i, index&(SIMD_COEF_32-1))];
input_buf[idx].c[GETPOS(ip+(i<<1), index&(SIMD_COEF_32-1))] = dynamic_itoa16[v>>4];
input_buf[idx].c[GETPOS(ip+(i<<1)+1, index&(SIMD_COEF_32-1))] = dynamic_itoa16[v&0xF];
}
input_buf[idx].c[GETPOS(ip+32, index&(SIMD_COEF_32-1))] = 0x80;
}
else if ((ip&3)==0)
__SSE_append_output_base16_to_input_semi_aligned_0(ip, input_buf[idx].w, crypt_key2[idx].c, index&(SIMD_COEF_32-1));
else
__SSE_append_output_base16_to_input_semi_aligned_2(ip, input_buf[idx].w, crypt_key2[idx].c, index&(SIMD_COEF_32-1));
}
return;
}
#endif
for (; i < til; ++i)
{
unsigned int j;
unsigned char *cp, *cpi;
#if MD5_X2
if (i&1)
{cp = &(input_buf_X86[i>>MD5_X2].x2.B2[total_len_X86[i]]); cpi = crypt_key2_X86[i>>MD5_X2].x2.B2; }
else
#endif
{cp = &(input_buf_X86[i>>MD5_X2].x1.B[total_len_X86[i]]); cpi = crypt_key2_X86[i>>MD5_X2].x1.B; }
for (j = 0; j < 16; ++j)
{
#if ARCH_ALLOWS_UNALIGNED
*((unsigned short*)cp) = itoa16_w2[*cpi++];
cp += 2;
#else
unsigned char b = *cpi++;
*cp++ = dynamic_itoa16[b>>4];
*cp++ = dynamic_itoa16[b&0xF];
#endif
}
*cp = 0;
total_len_X86[i] += 32;
}
}
void DynamicFunc__append_from_last_output2_as_raw(DYNA_OMP_PARAMS)
{
unsigned int i, til;
#ifdef _OPENMP
i = first;
til = last;
#else
i = 0;
til = m_count;
#endif
#ifdef SIMD_COEF_32
if (dynamic_use_sse==1) {
unsigned int index=i, idx;
for (; index < til; ++index)
{
unsigned int ip;
idx = ( ((unsigned int)index)/SIMD_COEF_32);
// This is the 'actual' work.
ip = total_len[idx][index&(SIMD_COEF_32-1)];
if (!ip)
{
ARCH_WORD_32 *po = input_buf[idx].w;
ARCH_WORD_32 *pi = crypt_key2[idx].w;
po += (index&(SIMD_COEF_32-1));
pi += (index&(SIMD_COEF_32-1));
for (i = 0; i < 4; i++)
{
*po = *pi;
po += SIMD_COEF_32;
pi += SIMD_COEF_32;
}
input_buf[idx].c[GETPOS(16, index&(SIMD_COEF_32-1))] = 0x80;
}
else
{
for (i = 0; i < 16; ++i)
input_buf[idx].c[GETPOS(ip+i, index&(SIMD_COEF_32-1))] = crypt_key2[idx].c[GETPOS(i, index&(SIMD_COEF_32-1))];
input_buf[idx].c[GETPOS(ip+16, index&(SIMD_COEF_32-1))] = 0x80;
}
total_len[idx][index&(SIMD_COEF_32-1)] += 16;
}
return;
}
#endif
for (; i < til; ++i)
{
unsigned int j;
unsigned char *cp, *cpi;
#if MD5_X2
if (i&1)
{cp = &(input_buf_X86[i>>MD5_X2].x2.B2[total_len_X86[i]]); cpi = crypt_key2_X86[i>>MD5_X2].x2.B2; }
else
#endif
{cp = &(input_buf_X86[i>>MD5_X2].x1.B[total_len_X86[i]]); cpi = crypt_key2_X86[i>>MD5_X2].x1.B; }
for (j = 0; j < 16; ++j)
*cp++ = *cpi++;
*cp = 0;
total_len_X86[i] += 16;
}
}
void DynamicFunc__append2_from_last_output2_as_raw(DYNA_OMP_PARAMS)
{
unsigned int i, til;
#ifdef _OPENMP
i = first;
til = last;
#else
i = 0;
til = m_count;
#endif
#ifdef SIMD_COEF_32
if (dynamic_use_sse==1) {
unsigned int index=i, idx;
for (; index < til; ++index)
{
unsigned int ip;
idx = ( ((unsigned int)index)/SIMD_COEF_32);
// This is the 'actual' work.
ip = total_len2[idx][index&(SIMD_COEF_32-1)];
if (!ip)
{
ARCH_WORD_32 *po = input_buf2[idx].w;
ARCH_WORD_32 *pi = crypt_key2[idx].w;
po += (index&(SIMD_COEF_32-1));
pi += (index&(SIMD_COEF_32-1));
for (i = 0; i < 4; i++)
{
*po = *pi;
po += SIMD_COEF_32;
pi += SIMD_COEF_32;
}
input_buf2[idx].c[GETPOS(16, index&(SIMD_COEF_32-1))] = 0x80;
}
else
{
for (i = 0; i < 16; ++i)
input_buf2[idx].c[GETPOS(ip+i, index&(SIMD_COEF_32-1))] = crypt_key2[idx].c[GETPOS(i, index&(SIMD_COEF_32-1))];
input_buf2[idx].c[GETPOS(ip+16, index&(SIMD_COEF_32-1))] = 0x80;
}
total_len2[idx][index&(SIMD_COEF_32-1)] += 16;
}
return;
}
#endif
for (; i < til; ++i)
{
unsigned int j;
unsigned char *cp, *cpi;
#if MD5_X2
if (i&1)
{cp = &(input_buf2_X86[i>>MD5_X2].x2.B2[total_len2_X86[i]]); cpi = crypt_key2_X86[i>>MD5_X2].x2.B2; }
else
#endif
{cp = &(input_buf2_X86[i>>MD5_X2].x1.B[total_len2_X86[i]]); cpi = crypt_key2_X86[i>>MD5_X2].x1.B; }
for (j = 0; j < 16; ++j)
*cp++ = *cpi++;
*cp = 0;
total_len2_X86[i] += 16;
}
}
void DynamicFunc__append_from_last_output1_as_raw(DYNA_OMP_PARAMS)
{
unsigned int i, til;
#ifdef _OPENMP
i = first;
til = last;
#else
i = 0;
til = m_count;
#endif
#ifdef SIMD_COEF_32
if (dynamic_use_sse==1) {
unsigned int index, idx;
for (index = i; index < til; ++index)
{
unsigned int ip;
idx = ( ((unsigned int)index)/SIMD_COEF_32);
// This is the 'actual' work.
ip = total_len[idx][index&(SIMD_COEF_32-1)];
if (!ip)
{
ARCH_WORD_32 *po = input_buf[idx].w;
ARCH_WORD_32 *pi = crypt_key[idx].w;
po += (index&(SIMD_COEF_32-1));
pi += (index&(SIMD_COEF_32-1));
for (i = 0; i < 4; i++)
{
*po = *pi;
po += SIMD_COEF_32;
pi += SIMD_COEF_32;
}
input_buf[idx].c[GETPOS(16, index&(SIMD_COEF_32-1))] = 0x80;
}
else
{
for (i = 0; i < 16; ++i)
input_buf[idx].c[GETPOS(ip+i, index&(SIMD_COEF_32-1))] = crypt_key[idx].c[GETPOS(i, index&(SIMD_COEF_32-1))];
input_buf[idx].c[GETPOS(ip+16, index&(SIMD_COEF_32-1))] = 0x80;
}
total_len[idx][index&(SIMD_COEF_32-1)] += 16;
}
return;
}
#endif
for (; i < til; ++i)
{
unsigned int j;
unsigned char *cp, *cpi;
#if MD5_X2
if (i&1)
{cp = &(input_buf_X86[i>>MD5_X2].x2.B2[total_len_X86[i]]); cpi = crypt_key_X86[i>>MD5_X2].x2.B2; }
else
#endif
{cp = &(input_buf_X86[i>>MD5_X2].x1.B[total_len_X86[i]]); cpi = crypt_key_X86[i>>MD5_X2].x1.B; }
for (j = 0; j < 16; ++j)
*cp++ = *cpi++;
*cp = 0;
total_len_X86[i] += 16;
}
}
void DynamicFunc__append2_from_last_output1_as_raw(DYNA_OMP_PARAMS)
{
unsigned int i, til;
#ifdef _OPENMP
i = first;
til = last;
#else
i = 0;
til = m_count;
#endif
#ifdef SIMD_COEF_32
if (dynamic_use_sse==1) {
unsigned int index, idx;
for (index = i; index < til; ++index)
{
unsigned int ip;
idx = ( ((unsigned int)index)/SIMD_COEF_32);
// This is the 'actual' work.
ip = total_len2[idx][index&(SIMD_COEF_32-1)];
if (!ip)
{
ARCH_WORD_32 *po = input_buf2[idx].w;
ARCH_WORD_32 *pi = crypt_key[idx].w;
po += (index&(SIMD_COEF_32-1));
pi += (index&(SIMD_COEF_32-1));
for (i = 0; i < 4; i++)
{
*po = *pi;
po += SIMD_COEF_32;
pi += SIMD_COEF_32;
}
input_buf2[idx].c[GETPOS(16, index&(SIMD_COEF_32-1))] = 0x80;
}
else
{
for (i = 0; i < 16; ++i)
input_buf2[idx].c[GETPOS(ip+i, index&(SIMD_COEF_32-1))] = crypt_key[idx].c[GETPOS(i, index&(SIMD_COEF_32-1))];
input_buf2[idx].c[GETPOS(ip+16, index&(SIMD_COEF_32-1))] = 0x80;
}
total_len2[idx][index&(SIMD_COEF_32-1)] += 16;
}
return;
}
#endif
for (; i < til; ++i)
{
unsigned int j;
unsigned char *cp, *cpi;
#if MD5_X2
if (i&1)
{cp = &(input_buf2_X86[i>>MD5_X2].x2.B2[total_len2_X86[i]]); cpi = crypt_key_X86[i>>MD5_X2].x2.B2; }
else
#endif
{cp = &(input_buf2_X86[i>>MD5_X2].x1.B[total_len2_X86[i]]); cpi = crypt_key_X86[i>>MD5_X2].x1.B; }
for (j = 0; j < 16; ++j)
*cp++ = *cpi++;
*cp = 0;
total_len2_X86[i] += 16;
}
}
/**************************************************************
* DYNAMIC primitive helper function
* Append salt #2 into input 1
*************************************************************/
void DynamicFunc__append_2nd_salt(DYNA_OMP_PARAMS)
{
__append_string(DYNA_OMP_PARAMSdm cursalt2, saltlen2);
}
/**************************************************************
* DYNAMIC primitive helper function
* Append salt #2 into input 2
*************************************************************/
void DynamicFunc__append_2nd_salt2(DYNA_OMP_PARAMS)
{
__append2_string(DYNA_OMP_PARAMSdm cursalt2, saltlen2);
}
/**************************************************************
* DYNAMIC primitive helper function
* Append UserID into input 1
*************************************************************/
void DynamicFunc__append_userid(DYNA_OMP_PARAMS)
{
__append_string(DYNA_OMP_PARAMSdm username, usernamelen);
}
/**************************************************************
* DYNAMIC primitive helper function
* Append UserID into input 2
*************************************************************/
void DynamicFunc__append_userid2(DYNA_OMP_PARAMS)
{
__append2_string(DYNA_OMP_PARAMSdm username, usernamelen);
}
void DynamicFunc__append_input1_from_CONST1(DYNA_OMP_PARAMS)
{
__append_string(DYNA_OMP_PARAMSdm curdat.Consts[0], curdat.ConstsLen[0]);
}
void DynamicFunc__append_input1_from_CONST2(DYNA_OMP_PARAMS)
{
__append_string(DYNA_OMP_PARAMSdm curdat.Consts[1], curdat.ConstsLen[1]);
}
void DynamicFunc__append_input1_from_CONST3(DYNA_OMP_PARAMS)
{
__append_string(DYNA_OMP_PARAMSdm curdat.Consts[2], curdat.ConstsLen[2]);
}
void DynamicFunc__append_input1_from_CONST4(DYNA_OMP_PARAMS)
{
__append_string(DYNA_OMP_PARAMSdm curdat.Consts[3], curdat.ConstsLen[3]);
}
void DynamicFunc__append_input1_from_CONST5(DYNA_OMP_PARAMS)
{
__append_string(DYNA_OMP_PARAMSdm curdat.Consts[4], curdat.ConstsLen[4]);
}
void DynamicFunc__append_input1_from_CONST6(DYNA_OMP_PARAMS)
{
__append_string(DYNA_OMP_PARAMSdm curdat.Consts[5], curdat.ConstsLen[5]);
}
void DynamicFunc__append_input1_from_CONST7(DYNA_OMP_PARAMS)
{
__append_string(DYNA_OMP_PARAMSdm curdat.Consts[6], curdat.ConstsLen[6]);
}
void DynamicFunc__append_input1_from_CONST8(DYNA_OMP_PARAMS)
{
__append_string(DYNA_OMP_PARAMSdm curdat.Consts[7], curdat.ConstsLen[7]);
}
void DynamicFunc__append_input2_from_CONST1(DYNA_OMP_PARAMS)
{
__append2_string(DYNA_OMP_PARAMSdm curdat.Consts[0], curdat.ConstsLen[0]);
}
void DynamicFunc__append_input2_from_CONST2(DYNA_OMP_PARAMS)
{
__append2_string(DYNA_OMP_PARAMSdm curdat.Consts[1], curdat.ConstsLen[1]);
}
void DynamicFunc__append_input2_from_CONST3(DYNA_OMP_PARAMS)
{
__append2_string(DYNA_OMP_PARAMSdm curdat.Consts[2], curdat.ConstsLen[2]);
}
void DynamicFunc__append_input2_from_CONST4(DYNA_OMP_PARAMS)
{
__append2_string(DYNA_OMP_PARAMSdm curdat.Consts[3], curdat.ConstsLen[3]);
}
void DynamicFunc__append_input2_from_CONST5(DYNA_OMP_PARAMS)
{
__append2_string(DYNA_OMP_PARAMSdm curdat.Consts[4], curdat.ConstsLen[4]);
}
void DynamicFunc__append_input2_from_CONST6(DYNA_OMP_PARAMS)
{
__append2_string(DYNA_OMP_PARAMSdm curdat.Consts[5], curdat.ConstsLen[5]);
}
void DynamicFunc__append_input2_from_CONST7(DYNA_OMP_PARAMS)
{
__append2_string(DYNA_OMP_PARAMSdm curdat.Consts[6], curdat.ConstsLen[6]);
}
void DynamicFunc__append_input2_from_CONST8(DYNA_OMP_PARAMS)
{
__append2_string(DYNA_OMP_PARAMSdm curdat.Consts[7], curdat.ConstsLen[7]);
}
void DynamicFunc__append_fld0(DYNA_OMP_PARAMS)
{
__append_string(DYNA_OMP_PARAMSdm flds[0], fld_lens[0]);
}
void DynamicFunc__append_fld1(DYNA_OMP_PARAMS)
{
__append_string(DYNA_OMP_PARAMSdm flds[1], fld_lens[1]);
}
void DynamicFunc__append_fld2(DYNA_OMP_PARAMS)
{
__append_string(DYNA_OMP_PARAMSdm flds[2], fld_lens[2]);
}
void DynamicFunc__append_fld3(DYNA_OMP_PARAMS)
{
__append_string(DYNA_OMP_PARAMSdm flds[3], fld_lens[3]);
}
void DynamicFunc__append_fld4(DYNA_OMP_PARAMS)
{
__append_string(DYNA_OMP_PARAMSdm flds[4], fld_lens[4]);
}
void DynamicFunc__append_fld5(DYNA_OMP_PARAMS)
{
__append_string(DYNA_OMP_PARAMSdm flds[5], fld_lens[5]);
}
void DynamicFunc__append_fld6(DYNA_OMP_PARAMS)
{
__append_string(DYNA_OMP_PARAMSdm flds[6], fld_lens[6]);
}
void DynamicFunc__append_fld7(DYNA_OMP_PARAMS)
{
__append_string(DYNA_OMP_PARAMSdm flds[7], fld_lens[7]);
}
void DynamicFunc__append_fld8(DYNA_OMP_PARAMS)
{
__append_string(DYNA_OMP_PARAMSdm flds[8], fld_lens[8]);
}
void DynamicFunc__append_fld9(DYNA_OMP_PARAMS)
{
__append_string(DYNA_OMP_PARAMSdm flds[9], fld_lens[9]);
}
void DynamicFunc__append2_fld0(DYNA_OMP_PARAMS)
{
__append2_string(DYNA_OMP_PARAMSdm flds[0], fld_lens[0]);
}
void DynamicFunc__append2_fld1(DYNA_OMP_PARAMS)
{
__append2_string(DYNA_OMP_PARAMSdm flds[1], fld_lens[1]);
}
void DynamicFunc__append2_fld2(DYNA_OMP_PARAMS)
{
__append2_string(DYNA_OMP_PARAMSdm flds[2], fld_lens[2]);
}
void DynamicFunc__append2_fld3(DYNA_OMP_PARAMS)
{
__append2_string(DYNA_OMP_PARAMSdm flds[3], fld_lens[3]);
}
void DynamicFunc__append2_fld4(DYNA_OMP_PARAMS)
{
__append2_string(DYNA_OMP_PARAMSdm flds[4], fld_lens[4]);
}
void DynamicFunc__append2_fld5(DYNA_OMP_PARAMS)
{
__append2_string(DYNA_OMP_PARAMSdm flds[5], fld_lens[5]);
}
void DynamicFunc__append2_fld6(DYNA_OMP_PARAMS)
{
__append2_string(DYNA_OMP_PARAMSdm flds[6], fld_lens[6]);
}
void DynamicFunc__append2_fld7(DYNA_OMP_PARAMS)
{
__append2_string(DYNA_OMP_PARAMSdm flds[7], fld_lens[7]);
}
void DynamicFunc__append2_fld8(DYNA_OMP_PARAMS)
{
__append2_string(DYNA_OMP_PARAMSdm flds[8], fld_lens[8]);
}
void DynamicFunc__append2_fld9(DYNA_OMP_PARAMS)
{
__append2_string(DYNA_OMP_PARAMSdm flds[9], fld_lens[9]);
}
void DynamicFunc__SSEtoX86_switch_input1(DYNA_OMP_PARAMS)
{
#ifdef SIMD_COEF_32
unsigned int i, j, k, idx, max;
if (dynamic_use_sse == 0)
return;
dynamic_use_sse = 2;
for (j = 0; j < m_count; j += SIMD_COEF_32)
{
ARCH_WORD_32 *cpi;
ARCH_WORD_32 *cpo[SIMD_COEF_32];
#if (MD5_X2)
for (i = 0; i < SIMD_COEF_32; i += 2) {
cpo[i ] = input_buf_X86[(j>>1)+(i>>1)].x1.w;
cpo[i+1] = input_buf_X86[(j>>1)+(i>>1)].x2.w2;
}
#else
for (i = 0; i < SIMD_COEF_32; i++)
cpo[i] = input_buf_X86[j+i].x1.w;
#endif
idx = j / SIMD_COEF_32;
cpi = input_buf[idx].w;
max = total_len_X86[j] = (total_len[idx][0]);
for (i = 1; i < SIMD_COEF_32; i++)
if (max < (total_len_X86[j+i] = total_len[idx][j]))
max = total_len_X86[j+i];
max = (max+3)>>2;
for (k = 0; k < max; ++k) {
for (i = 0; i < SIMD_COEF_32; i++)
*cpo[i]++ = *cpi++;
}
#if (MD5_X2)
for (i = 0; i < SIMD_COEF_32; i += 2) {
input_buf_X86[(j>>1)+(i>>1)].x1.b[total_len_X86[j+i]] = 0;
input_buf_X86[(j>>1)+(i>>1)].x2.b2[total_len_X86[j+i+1]] = 0;
}
#else
for (i = 0; i < SIMD_COEF_32; i++)
input_buf_X86[j+i].x1.b[total_len_X86[j+i]] = 0;
#endif
}
#endif
}
void DynamicFunc__SSEtoX86_switch_input2(DYNA_OMP_PARAMS)
{
#ifdef SIMD_COEF_32
unsigned int i, j, k, idx, max;
if (dynamic_use_sse == 0)
return;
dynamic_use_sse = 2;
for (j = 0; j < m_count; j += SIMD_COEF_32)
{
ARCH_WORD_32 *cpi;
ARCH_WORD_32 *cpo[SIMD_COEF_32];
#if (MD5_X2)
for (i = 0; i < SIMD_COEF_32; i += 2) {
cpo[i ] = input_buf2_X86[(j>>1)+(i>>1)].x1.w;
cpo[i+1] = input_buf2_X86[(j>>1)+(i>>1)].x2.w2;
}
#else
for (i = 0; i < SIMD_COEF_32; i++)
cpo[i] = input_buf2_X86[j+i].x1.w;
#endif
idx = j / SIMD_COEF_32;
cpi = input_buf2[idx].w;
max = total_len2_X86[j] = (total_len2[idx][0]);
for (i = 1; i < SIMD_COEF_32; i++)
if (max < (total_len2_X86[j+i] = total_len2[idx][i]))
max = total_len2_X86[j+i];
max = (max+3)>>2;
for (k = 0; k < max; ++k) {
for (i = 0; i < SIMD_COEF_32; i++)
*cpo[i]++ = *cpi++;
}
// get rid of the 0x80
#if (MD5_X2)
for (i = 0; i < SIMD_COEF_32; i += 2) {
input_buf2_X86[(j>>1)+(i>>1)].x1.b[total_len_X86[j+i]] = 0;
input_buf2_X86[(j>>1)+(i>>1)].x2.b2[total_len_X86[j+i+1]] = 0;
}
#else
for (i = 0; i < SIMD_COEF_32; i++)
input_buf2_X86[j+i].x1.b[total_len2_X86[j+i]] = 0;
#endif
}
#endif
}
void DynamicFunc__SSEtoX86_switch_output1(DYNA_OMP_PARAMS)
{
#ifdef SIMD_COEF_32
unsigned int i, j, k, idx;
if (dynamic_use_sse == 0)
return;
dynamic_use_sse = 2;
for (j = 0; j < m_count; j += SIMD_COEF_32)
{
ARCH_WORD_32 *cpi;
ARCH_WORD_32 *cpo[SIMD_COEF_32];
#if MD5_X2
for (i = 0; i < SIMD_COEF_32; i += 2) {
cpo[i ] = crypt_key_X86[(j>>1)+(i>>1)].x1.w;
cpo[i+1] = crypt_key_X86[(j>>1)+(i>>1)].x2.w2;
}
#else
for (i = 0; i < SIMD_COEF_32; i++)
cpo[i] = crypt_key_X86[j+i].x1.w;
#endif
idx = j/SIMD_COEF_32;
cpi = (void*)crypt_key[idx].c;
for (k = 0; k < 4; ++k) {
for (i = 0; i < SIMD_COEF_32; i++)
*cpo[i]++ = *cpi++;
}
}
#endif
}
void DynamicFunc__SSEtoX86_switch_output2(DYNA_OMP_PARAMS)
{
#ifdef SIMD_COEF_32
unsigned int i, j, k, idx;
if (dynamic_use_sse == 0)
return;
dynamic_use_sse = 2;
for (j = 0; j < m_count; j += SIMD_COEF_32)
{
ARCH_WORD_32 *cpi;
ARCH_WORD_32 *cpo[SIMD_COEF_32];
#if (MD5_X2)
for (i = 0; i < SIMD_COEF_32; i += 2) {
cpo[i ] = crypt_key2_X86[(j>>1)+(i>>1)].x1.w;
cpo[i+1] = crypt_key2_X86[(j>>1)+(i>>1)].x2.w2;
}
#else
for (i = 0; i < SIMD_COEF_32; i++)
cpo[i] = crypt_key2_X86[j+i].x1.w;
#endif
idx = j / SIMD_COEF_32;
cpi = crypt_key2[idx].w;
for (k = 0; k < 4; ++k) {
for (i = 0; i < SIMD_COEF_32; i++)
*cpo[i]++ = *cpi++;
}
}
#endif
}
void DynamicFunc__X86toSSE_switch_input1(DYNA_OMP_PARAMS)
{
#ifdef SIMD_COEF_32
unsigned int j, idx, idx_mod;
if (dynamic_use_sse == 0)
return;
dynamic_use_sse = 1;
__nonMP_DynamicFunc__clean_input();
for (j = 0; j < m_count; ++j) {
idx = j/SIMD_COEF_32;
idx_mod = j&(SIMD_COEF_32-1);
total_len[idx][idx_mod] += total_len_X86[j];
#if (MD5_X2)
if (j & 1)
__SSE_append_string_to_input(input_buf[idx].c,idx_mod,input_buf_X86[j>>1].x2.B2,total_len_X86[j],0,1);
else
#endif
__SSE_append_string_to_input(input_buf[idx].c,idx_mod,input_buf_X86[j>>MD5_X2].x1.B,total_len_X86[j],0,1);
}
#endif
}
void DynamicFunc__X86toSSE_switch_input2(DYNA_OMP_PARAMS)
{
#ifdef SIMD_COEF_32
unsigned int j, idx, idx_mod;
if (dynamic_use_sse == 0)
return;
dynamic_use_sse = 1;
__nonMP_DynamicFunc__clean_input2();
for (j = 0; j < m_count; ++j) {
idx = j/SIMD_COEF_32;
idx_mod = j&(SIMD_COEF_32-1);
total_len2[idx][idx_mod] += total_len2_X86[j];
#if (MD5_X2)
if (j & 1)
__SSE_append_string_to_input(input_buf2[idx].c,idx_mod,input_buf2_X86[j>>1].x2.B2,total_len2_X86[j],0,1);
else
#endif
__SSE_append_string_to_input(input_buf2[idx].c,idx_mod,input_buf2_X86[j>>MD5_X2].x1.B,total_len2_X86[j],0,1);
}
#endif
}
void DynamicFunc__X86toSSE_switch_output1(DYNA_OMP_PARAMS)
{
#ifdef SIMD_COEF_32
unsigned int i, j, k, idx;
if (dynamic_use_sse == 0)
return;
dynamic_use_sse = 1;
for (j = 0; j < m_count; j += SIMD_COEF_32)
{
ARCH_WORD_32 *cpi;
ARCH_WORD_32 *cpo[SIMD_COEF_32];
#if (MD5_X2)
for (i = 0; i < SIMD_COEF_32; i += 2) {
cpo[i ] = crypt_key_X86[(j>>1)+(i>>1)].x1.w;
cpo[i+1] = crypt_key_X86[(j>>1)+(i>>1)].x2.w2;
}
#else
for (i = 0; i < SIMD_COEF_32; i++)
cpo[i] = crypt_key_X86[j+i].x1.w;
#endif
idx = j / SIMD_COEF_32;
cpi = (void*)crypt_key[idx].c;
for (k = 0; k < 4; ++k) {
for (i = 0; i < SIMD_COEF_32; i++)
*cpi++ = *cpo[i]++;
}
}
#endif
}
void DynamicFunc__X86toSSE_switch_output2(DYNA_OMP_PARAMS)
{
#ifdef SIMD_COEF_32
unsigned int i, j, k, idx;
if (dynamic_use_sse == 0)
return;
dynamic_use_sse = 1;
for (j = 0; j < m_count; j += SIMD_COEF_32)
{
ARCH_WORD_32 *cpi;
ARCH_WORD_32 *cpo[SIMD_COEF_32];
#if (MD5_X2)
for (i = 0; i < SIMD_COEF_32; i += 2) {
cpo[i ] = crypt_key2_X86[(j>>1)+(i>>1)].x1.w;
cpo[i+1] = crypt_key2_X86[(j>>1)+(i>>1)].x2.w2;
}
#else
for (i = 0; i < SIMD_COEF_32; i++)
cpo[i] = crypt_key2_X86[j+i].x1.w;
#endif
idx = j / SIMD_COEF_32;
cpi = crypt_key2[idx].w;
for (k = 0; k < 4; ++k) {
for (i = 0; i < SIMD_COEF_32; i++)
*cpi++ = *cpo[i]++;
}
}
#endif
}
// This function, simply 'switches' back to SSE It does NOT copy any data from X86 to SSE
void DynamicFunc__ToSSE(DYNA_OMP_PARAMS)
{
if (dynamic_use_sse == 0)
return;
dynamic_use_sse = 1;
}
// This function, simply 'switches' to X86 It does NOT copy any data from SSE to X86
void DynamicFunc__ToX86(DYNA_OMP_PARAMS)
{
if (dynamic_use_sse == 0)
return;
dynamic_use_sse = 2;
}
void DynamicFunc__base16_convert_locase(DYNA_OMP_PARAMS)
{
dynamic_itoa16 = itoa16;
itoa16_w2=itoa16_w2_l;
}
void DynamicFunc__base16_convert_upcase(DYNA_OMP_PARAMS)
{
dynamic_itoa16 = itoa16u;
itoa16_w2=itoa16_w2_u;
}
/**************************************************************
* DEPRICATED functions. These are the older pseudo functions
* which we now have flags for. We keep them, so that we can
* add the proper flags, even if the user is running an older
* script.
*************************************************************/
void DynamicFunc__InitialLoadKeysToInput(DYNA_OMP_PARAMS) {}
void DynamicFunc__InitialLoadKeys_md5crypt_ToOutput2(DYNA_OMP_PARAMS) {}
void DynamicFunc__InitialLoadKeys_md5crypt_ToOutput2_Base16_to_Input1(DYNA_OMP_PARAMS) {}
void DynamicFunc__InitialLoadKeys_md5crypt_ToOutput2_Base16_to_Input1_offset32(DYNA_OMP_PARAMS) {}
/**************************************************************
**************************************************************
**************************************************************
**************************************************************
* DYNAMIC primitive helper function
* This is the END of the primitives.
**************************************************************
**************************************************************
**************************************************************
*************************************************************/
static DYNAMIC_primitive_funcp *ConvertFuncs(DYNAMIC_primitive_funcp p, unsigned int *count)
{
static DYNAMIC_primitive_funcp fncs[20];
*count = 0;
if (p==DynamicFunc__InitialLoadKeysToInput ||
p==DynamicFunc__InitialLoadKeys_md5crypt_ToOutput2 ||
p==DynamicFunc__InitialLoadKeys_md5crypt_ToOutput2_Base16_to_Input1 ||
p==DynamicFunc__InitialLoadKeys_md5crypt_ToOutput2_Base16_to_Input1_offset32)
return fncs; // ignore these
#ifndef SIMD_COEF_32
if (p==DynamicFunc__SSEtoX86_switch_input1 || p==DynamicFunc__SSEtoX86_switch_input2 ||
p==DynamicFunc__SSEtoX86_switch_output1 || p==DynamicFunc__SSEtoX86_switch_output2 ||
p==DynamicFunc__X86toSSE_switch_input1 || p==DynamicFunc__X86toSSE_switch_input2 ||
p==DynamicFunc__X86toSSE_switch_output1 || p==DynamicFunc__X86toSSE_switch_output2 ||
p==DynamicFunc__ToSSE || p==DynamicFunc__ToX86)
return fncs; // we ignore these functions 100% in x86 mode.
#endif
// if (p==DynamicFunc__append_input2_from_CONST1) {
// fncs[0] = DynamicFunc__set_input2;
// fncs[1] = DynamicFunc__set_CONST1;
// fncs[2] = DynamicFunc__append_CONST;
// *count = 3;
// }
/* LOOK INTO THIS!!!!! This may not be valid, now that SHA1 is handled 100% outside of the SSE2 code.
But I am not sure just WTF this is supposed to do anyway, since not LE should be using CTX only??? */
#if !ARCH_LITTLE_ENDIAN
if (/*p==DynamicFunc__SHA1_crypt_input1_append_input2_base16 ||*/ p==DynamicFunc__SHA1_crypt_input1_append_input2 ||
/*p==DynamicFunc__SHA1_crypt_input2_append_input1_base16 ||*/ p==DynamicFunc__SHA1_crypt_input2_append_input1 ||
/*p==DynamicFunc__SHA1_crypt_input1_overwrite_input1_base16 ||*/ p==DynamicFunc__SHA1_crypt_input1_overwrite_input1 ||
/*p==DynamicFunc__SHA1_crypt_input2_overwrite_input2_base16 ||*/ p==DynamicFunc__SHA1_crypt_input2_overwrite_input2 ||
/*p==DynamicFunc__SHA1_crypt_input1_overwrite_input2_base16 ||*/ p==DynamicFunc__SHA1_crypt_input1_overwrite_input2 ||
/*p==DynamicFunc__SHA1_crypt_input2_overwrite_input1_base16 ||*/ p==DynamicFunc__SHA1_crypt_input2_overwrite_input1 ||
p==DynamicFunc__SHA1_crypt_input1_to_output1_FINAL ||
p==DynamicFunc__SHA1_crypt_input2_to_output1_FINAL)
curdat.force_md5_ctx = 0;
#endif
*count = 1;
fncs[0] = p;
return fncs;
}
#ifdef _OPENMP
static int isBadOMPFunc(DYNAMIC_primitive_funcp p)
{
// If ANY of these functions are seen, we can NOT use OMP for this single format.
#if SIMD_COEF_32
if (p==DynamicFunc__SSEtoX86_switch_input1 || p==DynamicFunc__SSEtoX86_switch_input2 ||
p==DynamicFunc__SSEtoX86_switch_output1 || p==DynamicFunc__SSEtoX86_switch_output2 ||
p==DynamicFunc__X86toSSE_switch_input1 || p==DynamicFunc__X86toSSE_switch_input2 ||
p==DynamicFunc__X86toSSE_switch_output1 || p==DynamicFunc__X86toSSE_switch_output2 ||
p==DynamicFunc__ToSSE || p==DynamicFunc__ToX86)
return 1;
#endif
if (p==DynamicFunc__base16_convert_locase || p==DynamicFunc__base16_convert_upcase)
return 1;
return 0;
}
#endif
#define RETURN_TRUE_IF_BIG_FUNC(H) if(p==DynamicFunc__##H##_crypt_input1_append_input2 || \
p==DynamicFunc__##H##_crypt_input2_append_input1 || \
p==DynamicFunc__##H##_crypt_input1_overwrite_input1 || \
p==DynamicFunc__##H##_crypt_input2_overwrite_input2 || \
p==DynamicFunc__##H##_crypt_input1_overwrite_input2 || \
p==DynamicFunc__##H##_crypt_input2_overwrite_input1 || \
p==DynamicFunc__##H##_crypt_input1_to_output1_FINAL || \
p==DynamicFunc__##H##_crypt_input2_to_output1_FINAL) \
return 1
static int isMD4Func(DYNAMIC_primitive_funcp p)
{
// handle flats
RETURN_TRUE_IF_BIG_FUNC(MD4);
// handle older mmx_coef variants
if (p==DynamicFunc__crypt_md4 || p==DynamicFunc__crypt_md4_in1_to_out2 ||
p==DynamicFunc__crypt2_md4 || p==DynamicFunc__crypt_md4_in2_to_out1)
return 1;
return 0;
}
#ifdef _OPENMP
// Only used in OMP code, to compute LCM granularity. So we #ifdef it out to avoid compiler warnings.
#ifdef SIMD_COEF_32
// otherwise unused
static int isMD5Func(DYNAMIC_primitive_funcp p)
{
// handle flats
RETURN_TRUE_IF_BIG_FUNC(MD5);
// handle older mmx_coef variants
if (p==DynamicFunc__crypt_md5 || p==DynamicFunc__InitialLoadKeys_md5crypt_ToOutput2_Base16_to_Input1 ||
p==DynamicFunc__crypt_md5_in1_to_out2 || p==DynamicFunc__InitialLoadKeys_md5crypt_ToOutput2 ||
p==DynamicFunc__crypt_md5_to_input_raw || p==DynamicFunc__crypt_md5_to_input_raw_Overwrite_NoLen ||
p==DynamicFunc__crypt_md5_in2_to_out1 || p==DynamicFunc__crypt_md5_to_input_raw_Overwrite_NoLen_but_setlen_in_SSE ||
p==DynamicFunc__crypt2_md5 || p==DynamicFunc__InitialLoadKeys_md5crypt_ToOutput2_Base16_to_Input1_offset32)
return 1;
return 0;
}
#endif
#endif
static int isSHA1Func(DYNAMIC_primitive_funcp p) {
RETURN_TRUE_IF_BIG_FUNC(SHA1);
return 0;
}
static int isSHA2_256Func(DYNAMIC_primitive_funcp p) {
RETURN_TRUE_IF_BIG_FUNC(SHA224); RETURN_TRUE_IF_BIG_FUNC(SHA256);
return 0;
}
static int isSHA2_512Func(DYNAMIC_primitive_funcp p) {
RETURN_TRUE_IF_BIG_FUNC(SHA384); RETURN_TRUE_IF_BIG_FUNC(SHA512);
return 0;
}
static int isGOSTFunc(DYNAMIC_primitive_funcp p) {
RETURN_TRUE_IF_BIG_FUNC(GOST);
return 0;
}
static int isTigerFunc(DYNAMIC_primitive_funcp p) {
RETURN_TRUE_IF_BIG_FUNC(Tiger);
return 0;
}
static int isWHIRLFunc(DYNAMIC_primitive_funcp p) {
RETURN_TRUE_IF_BIG_FUNC(WHIRLPOOL);
return 0;
}
static int isRIPEMDFunc(DYNAMIC_primitive_funcp p) {
RETURN_TRUE_IF_BIG_FUNC(RIPEMD128); RETURN_TRUE_IF_BIG_FUNC(RIPEMD160);
RETURN_TRUE_IF_BIG_FUNC(RIPEMD256); RETURN_TRUE_IF_BIG_FUNC(RIPEMD320);
return 0;
}
static int isHAVALFunc(DYNAMIC_primitive_funcp p) {
RETURN_TRUE_IF_BIG_FUNC(HAVAL128_3); RETURN_TRUE_IF_BIG_FUNC(HAVAL128_4); RETURN_TRUE_IF_BIG_FUNC(HAVAL128_5);
RETURN_TRUE_IF_BIG_FUNC(HAVAL160_3); RETURN_TRUE_IF_BIG_FUNC(HAVAL160_4); RETURN_TRUE_IF_BIG_FUNC(HAVAL160_5);
RETURN_TRUE_IF_BIG_FUNC(HAVAL192_3); RETURN_TRUE_IF_BIG_FUNC(HAVAL192_4); RETURN_TRUE_IF_BIG_FUNC(HAVAL192_5);
RETURN_TRUE_IF_BIG_FUNC(HAVAL224_3); RETURN_TRUE_IF_BIG_FUNC(HAVAL224_4); RETURN_TRUE_IF_BIG_FUNC(HAVAL224_5);
RETURN_TRUE_IF_BIG_FUNC(HAVAL256_3); RETURN_TRUE_IF_BIG_FUNC(HAVAL256_4); RETURN_TRUE_IF_BIG_FUNC(HAVAL256_5);
return 0;
}
static int isMD2Func(DYNAMIC_primitive_funcp p) {
RETURN_TRUE_IF_BIG_FUNC(MD2);
return 0;
}
static int isPANAMAFunc(DYNAMIC_primitive_funcp p) {
RETURN_TRUE_IF_BIG_FUNC(PANAMA);
return 0;
}
static int isSKEINFunc(DYNAMIC_primitive_funcp p) {
RETURN_TRUE_IF_BIG_FUNC(SKEIN224); RETURN_TRUE_IF_BIG_FUNC(SKEIN256);
RETURN_TRUE_IF_BIG_FUNC(SKEIN384); RETURN_TRUE_IF_BIG_FUNC(SKEIN512);
return 0;
}
static int isKECCAKFunc(DYNAMIC_primitive_funcp p) {
RETURN_TRUE_IF_BIG_FUNC(SHA3_224); RETURN_TRUE_IF_BIG_FUNC(SHA3_256); RETURN_TRUE_IF_BIG_FUNC(SHA3_384);
RETURN_TRUE_IF_BIG_FUNC(SHA3_512); RETURN_TRUE_IF_BIG_FUNC(KECCAK_256); RETURN_TRUE_IF_BIG_FUNC(KECCAK_512);
return 0;
}
// LARGE_HASH_EDIT_POINT (Add a new IsXXXFunc() type function)
static int isLargeHashFinalFunc(DYNAMIC_primitive_funcp p)
{
#undef IF
#define IF(H) p==DynamicFunc__##H##_crypt_input1_to_output1_FINAL||p==DynamicFunc__##H##_crypt_input2_to_output1_FINAL
if (IF(SHA1)||IF(SHA224)||IF(SHA256)||IF(SHA384)||IF(SHA512)||IF(GOST)||IF(WHIRLPOOL)||IF(Tiger)||IF(RIPEMD128)||
IF(RIPEMD160)||IF(RIPEMD256)||IF(RIPEMD320)||
IF(HAVAL128_3)||IF(HAVAL128_4)||IF(HAVAL128_5)||IF(HAVAL160_3)||IF(HAVAL160_4)||IF(HAVAL160_5)||
IF(HAVAL192_3)||IF(HAVAL192_4)||IF(HAVAL192_5)||IF(HAVAL224_3)||IF(HAVAL224_4)||IF(HAVAL224_5)||
IF(HAVAL256_3)||IF(HAVAL256_4)||IF(HAVAL256_5)||IF(MD2)||IF(PANAMA)||IF(SKEIN224)||IF(SKEIN256)||
IF(SKEIN384)||IF(SKEIN512)||IF(SHA3_224)||IF(SHA3_256)||IF(SHA3_384)||IF(SHA3_512)||
IF(KECCAK_256)||IF(KECCAK_512))
// LARGE_HASH_EDIT_POINT
return 1;
return 0;
}
#ifdef _OPENMP
#ifdef SIMD_COEF_32
// Simple euclid algorithm for GCD
static int GCD (int a, int b)
{
while (b) {
int t = b;
b = a % b;
a = t;
}
return a;
}
// simple algorithm for LCM is (a*b)/GCD(a,b)
static int LCM(int a, int b)
{
a/=GCD(a,b);
return a*b;
}
#endif
static void dyna_setupOMP(DYNAMIC_Setup *Setup, struct fmt_main *pFmt)
{
unsigned int i;
#ifndef SIMD_COEF_32
curdat.omp_granularity=OMP_INC;
#else
if ((curdat.pSetup->flags& MGF_NOTSSE2Safe) == MGF_NOTSSE2Safe)
curdat.omp_granularity=OMP_INC;
else {
curdat.omp_granularity = 1;
for (i=0; Setup->pFuncs[i]; ++i) {
if (isMD5Func(Setup->pFuncs[i]))
curdat.omp_granularity = LCM(curdat.omp_granularity, SIMD_PARA_MD5*SIMD_COEF_32);
else if (isMD4Func(Setup->pFuncs[i]))
curdat.omp_granularity = LCM(curdat.omp_granularity, SIMD_PARA_MD4*SIMD_COEF_32);
else if (isSHA1Func(Setup->pFuncs[i]))
curdat.omp_granularity = LCM(curdat.omp_granularity, SIMD_PARA_SHA1*SIMD_COEF_32);
else if (isSHA2_256Func(Setup->pFuncs[i]))
#if SIMD_COEF_32
#if SIMD_PARA_SHA256
curdat.omp_granularity = LCM(curdat.omp_granularity, SIMD_PARA_SHA256*SIMD_COEF_32);
#else
curdat.omp_granularity = LCM(curdat.omp_granularity, SIMD_COEF_32);
#endif
#else
curdat.omp_granularity=LCM(curdat.omp_granularity, OMP_INC);
#endif
else if (isSHA2_512Func(Setup->pFuncs[i]))
#if SIMD_COEF_64
#if SIMD_PARA_SHA512
curdat.omp_granularity = LCM(curdat.omp_granularity, SIMD_PARA_SHA512*SIMD_COEF_64);
#else
curdat.omp_granularity = LCM(curdat.omp_granularity, SIMD_COEF_64);
#endif
#else
curdat.omp_granularity=LCM(curdat.omp_granularity, OMP_INC);
#endif
}
}
#endif
for (i=0; Setup->pFuncs[i]; ++i) {
if (isBadOMPFunc(Setup->pFuncs[i]))
pFmt->params.flags &= (~(FMT_OMP|FMT_OMP_BAD));
}
if ((pFmt->params.flags&FMT_OMP)==FMT_OMP && (curdat.pSetup->startFlags&MGF_POOR_OMP)==MGF_POOR_OMP)
pFmt->params.flags |= FMT_OMP_BAD;
}
#endif
int dynamic_SETUP(DYNAMIC_Setup *Setup, struct fmt_main *pFmt)
{
unsigned int i, j, cnt, cnt2, x;
DYNAMIC_primitive_funcp *pFuncs;
if (Setup->flags & MGF_ColonNOTValid)
{
extern struct options_main options;
if (options.loader.field_sep_char == ':')
{
return 0;
}
}
// Deal with depricated 1st functions. Convert them to proper 'flags'
if (Setup->pFuncs[0] == DynamicFunc__InitialLoadKeysToInput)
Setup->startFlags |= MGF_KEYS_INPUT;
if (Setup->pFuncs[0] == DynamicFunc__InitialLoadKeys_md5crypt_ToOutput2)
Setup->startFlags |= MGF_KEYS_CRYPT_IN2;
if (Setup->pFuncs[0] == DynamicFunc__InitialLoadKeys_md5crypt_ToOutput2_Base16_to_Input1)
Setup->startFlags |= MGF_KEYS_BASE16_IN1;
if (Setup->pFuncs[0] == DynamicFunc__InitialLoadKeys_md5crypt_ToOutput2_Base16_to_Input1_offset32)
Setup->startFlags |= MGF_KEYS_BASE16_IN1_Offset32;
curdat.dynamic_40_byte_input = ((Setup->startFlags&MGF_INPUT_20_BYTE)==MGF_INPUT_20_BYTE) ? 1 : 0;
curdat.dynamic_48_byte_input = ((Setup->startFlags&MGF_INPUT_24_BYTE)==MGF_INPUT_24_BYTE) ? 1 : 0;
curdat.dynamic_64_byte_input = ((Setup->startFlags&MGF_INPUT_32_BYTE)==MGF_INPUT_32_BYTE) ? 1 : 0;
curdat.dynamic_56_byte_input = ((Setup->startFlags&MGF_INPUT_28_BYTE)==MGF_INPUT_28_BYTE) ? 1 : 0;
curdat.dynamic_80_byte_input = ((Setup->startFlags&MGF_INPUT_40_BYTE)==MGF_INPUT_40_BYTE) ? 1 : 0;
curdat.dynamic_96_byte_input = ((Setup->startFlags&MGF_INPUT_48_BYTE)==MGF_INPUT_48_BYTE) ? 1 : 0;
curdat.dynamic_128_byte_input= ((Setup->startFlags&MGF_INPUT_64_BYTE)==MGF_INPUT_64_BYTE) ? 1 : 0;
curdat.FldMask = 0;
curdat.b2Salts = ((Setup->flags&MGF_SALTED2)==MGF_SALTED2) ? 1 : 0;
curdat.dynamic_base16_upcase = ((Setup->flags&MGF_BASE_16_OUTPUT_UPCASE)==MGF_BASE_16_OUTPUT_UPCASE) ? 1 : 0;
curdat.FldMask |= ((Setup->flags&MGF_FLD0)==MGF_FLD0) ? MGF_FLD0 : 0;
curdat.FldMask |= ((Setup->flags&MGF_FLD1)==MGF_FLD1) ? MGF_FLD1 : 0;
curdat.FldMask |= ((Setup->flags&MGF_FLD2)==MGF_FLD2) ? MGF_FLD2 : 0;
curdat.FldMask |= ((Setup->flags&MGF_FLD3)==MGF_FLD3) ? MGF_FLD3 : 0;
curdat.FldMask |= ((Setup->flags&MGF_FLD4)==MGF_FLD4) ? MGF_FLD4 : 0;
curdat.FldMask |= ((Setup->flags&MGF_FLD5)==MGF_FLD5) ? MGF_FLD5 : 0;
curdat.FldMask |= ((Setup->flags&MGF_FLD6)==MGF_FLD6) ? MGF_FLD6 : 0;
curdat.FldMask |= ((Setup->flags&MGF_FLD7)==MGF_FLD7) ? MGF_FLD7 : 0;
curdat.FldMask |= ((Setup->flags&MGF_FLD8)==MGF_FLD8) ? MGF_FLD8 : 0;
curdat.FldMask |= ((Setup->flags&MGF_FLD9)==MGF_FLD9) ? MGF_FLD9 : 0;
curdat.dynamic_base64_inout = 0;
curdat.dynamic_salt_as_hex = 0;
curdat.dynamic_salt_as_hex_format_type = 0;
curdat.force_md5_ctx = 0;
curdat.nUserName = 0;
curdat.nPassCase = 1;
curdat.md5_startup_in_x86 = curdat.dynamic_use_sse = 0; // if 0, then never use SSE2
curdat.init = 0;
curdat.pSetup = Setup;
pFmt->methods.binary = get_binary;
pFmt->methods.cmp_all=cmp_all;
pFmt->methods.cmp_one=cmp_one;
pFmt->methods.source=fmt_default_source;
pFmt->methods.salt = get_salt;
pFmt->methods.done = done;
pFmt->methods.set_salt = set_salt;
pFmt->methods.salt_hash = salt_hash;
//pFmt->params.format_name = str_alloc_copy(Setup->szFORMAT_NAME);
pFmt->params.format_name = "";
pFmt->params.benchmark_length = 0; // NOTE 0 'assumes' salted. If unsalted, we set back to -1
pFmt->params.salt_size = 0;
curdat.using_flat_buffers_sse2_ok = 0; // used to distingish MGF_NOTSSE2Safe from MGF_FLAT_BUFFERS
if ((Setup->flags & MGF_FLAT_BUFFERS) == MGF_FLAT_BUFFERS)
curdat.using_flat_buffers_sse2_ok = 1;
#ifdef SIMD_COEF_32
curdat.dynamic_use_sse = 1; // if 1, then we are in SSE2 mode (but can switch out)
if ((Setup->flags & MGF_NOTSSE2Safe) == MGF_NOTSSE2Safe) {
curdat.dynamic_use_sse = 0; // Do not use SSE code at all.
} else if ((Setup->flags & MGF_FLAT_BUFFERS) == MGF_FLAT_BUFFERS) {
curdat.dynamic_use_sse = 0; // uses flat buffers but will use SSE code (large formats use the flat buffers, and the SSE2 code 'mixes' them).
curdat.using_flat_buffers_sse2_ok = 1;
} else if ((Setup->flags & MGF_StartInX86Mode) == MGF_StartInX86Mode) {
curdat.dynamic_use_sse = 2; // if 2, then we are in SSE2 mode, but currently using X86 (and can switch back to SSE2).
curdat.md5_startup_in_x86 = 1;
}
if (curdat.dynamic_use_sse || curdat.using_flat_buffers_sse2_ok) {
pFmt->params.max_keys_per_crypt = MAX_KEYS_PER_CRYPT;
pFmt->params.algorithm_name = ALGORITHM_NAME;
} else {
pFmt->params.max_keys_per_crypt = MAX_KEYS_PER_CRYPT_X86;
pFmt->params.algorithm_name = ALGORITHM_NAME_X86;
}
#else
pFmt->params.max_keys_per_crypt = MAX_KEYS_PER_CRYPT_X86;
pFmt->params.algorithm_name = ALGORITHM_NAME_X86;
#endif
pFmt->params.min_keys_per_crypt = pFmt->params.max_keys_per_crypt;
if (pFmt->params.min_keys_per_crypt > 64)
pFmt->params.min_keys_per_crypt = 64;
dynamic_use_sse = curdat.dynamic_use_sse;
// Ok, set the new 'constants' data
memset(curdat.Consts, 0, sizeof(curdat.Consts));
memset(curdat.ConstsLen, 0, sizeof(curdat.ConstsLen));
for (curdat.nConsts = 0; curdat.nConsts < 8; ++curdat.nConsts)
{
if (Setup->pConstants[curdat.nConsts].Const == NULL)
break;
//curdat.Consts[curdat.nConsts] = (unsigned char*)str_alloc_copy(Setup->pConstants[curdat.nConsts].Const);
//curdat.ConstsLen[curdat.nConsts] = strlen(Setup->pConstants[curdat.nConsts].Const);
// we really do not 'have' to null terminate, but do just to be on the 'safe' side.
curdat.Consts[curdat.nConsts] = mem_alloc_tiny(Setup->pConstants[curdat.nConsts].len+1, MEM_ALIGN_NONE);
memcpy(curdat.Consts[curdat.nConsts], Setup->pConstants[curdat.nConsts].Const, Setup->pConstants[curdat.nConsts].len);
curdat.Consts[curdat.nConsts][Setup->pConstants[curdat.nConsts].len] = 0;
curdat.ConstsLen[curdat.nConsts] = Setup->pConstants[curdat.nConsts].len;
}
if ( (Setup->flags & MGF_INPBASE64) == MGF_INPBASE64)
{
curdat.dynamic_base64_inout = 1;
pFmt->methods.binary = binary_b64;
}
if ( (Setup->flags & MGF_INPBASE64m) == MGF_INPBASE64m)
{
curdat.dynamic_base64_inout = 3;
pFmt->methods.binary = binary_b64m;
}
if ( (Setup->flags & MGF_INPBASE64b) == MGF_INPBASE64b)
{
curdat.dynamic_base64_inout = 5;
pFmt->methods.binary = binary_b64b;
}
if ( (Setup->flags & MGF_INPBASE64_4x6) == MGF_INPBASE64_4x6)
{
curdat.dynamic_base64_inout = 2;
pFmt->methods.binary = binary_b64_4x6;
pFmt->methods.cmp_all = cmp_all_64_4x6;
pFmt->methods.cmp_one = cmp_one_64_4x6;
#if !ARCH_LITTLE_ENDIAN
pFmt->methods.binary_hash[0] = binary_hash_0_64x4;
pFmt->methods.binary_hash[1] = binary_hash_1_64x4;
pFmt->methods.binary_hash[2] = binary_hash_2_64x4;
pFmt->methods.binary_hash[3] = binary_hash_3_64x4;
pFmt->methods.binary_hash[4] = binary_hash_4_64x4;
pFmt->methods.binary_hash[5] = binary_hash_5_64x4;
pFmt->methods.get_hash[0] = get_hash_0_64x4;
pFmt->methods.get_hash[1] = get_hash_1_64x4;
pFmt->methods.get_hash[2] = get_hash_2_64x4;
pFmt->methods.get_hash[3] = get_hash_3_64x4;
pFmt->methods.get_hash[4] = get_hash_4_64x4;
pFmt->methods.get_hash[5] = get_hash_5_64x4;
#endif
// Not enough bits in a single WORD to do the 7th one.
pFmt->methods.binary_hash[6] = NULL;
pFmt->methods.get_hash[6] = NULL;
}
// printf ("%.13s",Setup->szFORMAT_NAME);
if ( (Setup->flags & (MGF_INPBASE64|MGF_INPBASE64_4x6|MGF_INPBASE64a|MGF_INPBASE64m|MGF_INPBASE64b)) == 0) {
pFmt->params.flags |= FMT_SPLIT_UNIFIES_CASE;
// printf (" Setting FMT_SPLIT_UNIFIES_CASE");
if (pFmt->methods.split == split) {
pFmt->methods.split = split_UC;
// printf (" split set to split_UC()\n");
}
}
// else printf (" split set to split()\n");
if (Setup->flags & MGF_UTF8)
pFmt->params.flags |= FMT_UTF8;
if (Setup->flags & MGF_INPBASE64a) {
curdat.dynamic_base64_inout = 1;
pFmt->methods.binary = binary_b64a;
}
if ( (Setup->flags & MGF_USERNAME) == MGF_USERNAME)
curdat.nUserName = 1;
if ( (Setup->flags & MGF_USERNAME_UPCASE) == MGF_USERNAME_UPCASE)
curdat.nUserName = 2;
if ( (Setup->flags & MGF_USERNAME_LOCASE) == MGF_USERNAME_LOCASE)
curdat.nUserName = 3;
// Ok, what 'flag' in the format struct, do we clear???
if ( (Setup->flags & MGF_PASSWORD_UPCASE) == MGF_PASSWORD_UPCASE) {
curdat.nPassCase = 2;
pFmt->params.flags &= (~FMT_CASE);
}
if ( (Setup->flags & MGF_PASSWORD_LOCASE) == MGF_PASSWORD_LOCASE) {
curdat.nPassCase = 3;
pFmt->params.flags &= (~FMT_CASE);
}
if ( (Setup->flags & MGF_SALT_AS_HEX) == MGF_SALT_AS_HEX) {
curdat.dynamic_salt_as_hex = 1;
curdat.dynamic_salt_as_hex_format_type = Setup->flags >> 56;
}
if ( (Setup->flags & MGF_SALT_AS_HEX_TO_SALT2) == MGF_SALT_AS_HEX_TO_SALT2) {
curdat.dynamic_salt_as_hex = 2;
if (curdat.b2Salts)
return !fprintf(stderr, "Error invalid format %s: MGF_SALT_AS_HEX_TO_SALT2 and MGF_SALTED2 are not valid to use in same format\n", Setup->szFORMAT_NAME);
curdat.b2Salts = 2;
}
if ( (Setup->flags & MGF_SALT_UNICODE_B4_CRYPT) == MGF_SALT_UNICODE_B4_CRYPT && curdat.dynamic_salt_as_hex)
curdat.dynamic_salt_as_hex |= 0x100;
if ( (Setup->flags & MGF_SALTED) == 0)
{
curdat.dynamic_FIXED_SALT_SIZE = 0;
pFmt->params.benchmark_length = -1;
pFmt->params.salt_size = 0;
}
else
{
pFmt->params.salt_size = sizeof(void *);
if (Setup->SaltLen > 0)
curdat.dynamic_FIXED_SALT_SIZE = Setup->SaltLen;
else
{
// says we have a salt, but NOT a fixed sized one that we 'know' about.
// if the SaltLen is -1, then there is NO constraints. If the SaltLen
// is -12 (or any other neg number other than -1), then there is no
// fixed salt length, but the 'max' salt size is -SaltLen. So, -12
// means any salt from 1 to 12 is 'valid'.
if (Setup->SaltLen > -2)
curdat.dynamic_FIXED_SALT_SIZE = -1;
else {
curdat.dynamic_FIXED_SALT_SIZE = Setup->SaltLen;
#if !defined (SIMD_COEF_32)
// for non-sse, we limit ourselves to 110 bytes, not 55. So, we can add 55 to this value
curdat.dynamic_FIXED_SALT_SIZE -= 55;
#endif
}
}
}
if (Setup->MaxInputLen)
pFmt->params.plaintext_length = Setup->MaxInputLen;
else {
if ( ((Setup->flags&MGF_FLAT_BUFFERS)==MGF_FLAT_BUFFERS) || ((Setup->flags&MGF_NOTSSE2Safe)==MGF_NOTSSE2Safe)) {
pFmt->params.plaintext_length = 110 - abs(Setup->SaltLen);
if (pFmt->params.plaintext_length < 32)
pFmt->params.plaintext_length = 32;
} else {
pFmt->params.plaintext_length = 55 - abs(Setup->SaltLen);
if (pFmt->params.plaintext_length < 1) {
pFmt->params.plaintext_length = 1;
fprintf(stderr, "\nError, for format %s, MMX build, is not valid due to TOO long of a SaltLength\n", Setup->szFORMAT_NAME);
}
}
}
#ifndef SIMD_COEF_32
if (Setup->MaxInputLenX86) {
pFmt->params.plaintext_length = Setup->MaxInputLenX86;
} else {
if (Setup->SaltLenX86)
pFmt->params.plaintext_length = 110 - abs(Setup->SaltLenX86);
else
pFmt->params.plaintext_length = 110 - abs(Setup->SaltLen);
if (pFmt->params.plaintext_length < 32)
pFmt->params.plaintext_length = 32;
}
#endif
curdat.store_keys_in_input = !!(Setup->startFlags&MGF_KEYS_INPUT );
curdat.input2_set_len32 = !!(Setup->startFlags&MGF_SET_INP2LEN32);
if (Setup->startFlags&MGF_SOURCE) {
if (Setup->startFlags&MGF_INPUT_20_BYTE) pFmt->methods.source = source_20_hex;
else if (Setup->startFlags&MGF_INPUT_28_BYTE) pFmt->methods.source = source_28_hex;
else if (Setup->startFlags&MGF_INPUT_32_BYTE) pFmt->methods.source = source_32_hex;
else if (Setup->startFlags&MGF_INPUT_40_BYTE) pFmt->methods.source = source_40_hex;
else if (Setup->startFlags&MGF_INPUT_48_BYTE) pFmt->methods.source = source_48_hex;
else if (Setup->startFlags&MGF_INPUT_64_BYTE) pFmt->methods.source = source_64_hex;
else pFmt->methods.source = source;
}
if (!curdat.store_keys_in_input && Setup->startFlags&MGF_KEYS_INPUT_BE_SAFE)
curdat.store_keys_in_input = 3;
curdat.store_keys_in_input_unicode_convert = !!(Setup->startFlags&MGF_KEYS_UNICODE_B4_CRYPT);
if (curdat.store_keys_in_input_unicode_convert && curdat.store_keys_in_input)
return !fprintf(stderr, "Error invalid format %s: Using MGF_KEYS_INPUT and MGF_KEYS_UNICODE_B4_CRYPT in same format is NOT valid\n", Setup->szFORMAT_NAME);
curdat.store_keys_normal_but_precompute_hash_to_output2 = !!(Setup->startFlags&MGF_KEYS_CRYPT_IN2);
curdat.store_keys_normal_but_precompute_hash_to_output2_base16_to_input1 = !!(Setup->startFlags&MGF_KEYS_BASE16_IN1);
if (curdat.store_keys_normal_but_precompute_hash_to_output2_base16_to_input1)
curdat.store_keys_normal_but_precompute_hash_to_output2 = 1;
#define IF_CDOFF32(F,L) if (!curdat.store_keys_normal_but_precompute_hash_to_output2_base16_to_input1_offsetX) \
curdat.store_keys_normal_but_precompute_hash_to_output2_base16_to_input1_offsetX = \
(!!((Setup->startFlags&MGF_KEYS_BASE16_IN1_Offset_TYPE)==MGF_KEYS_BASE16_IN1_Offset_ ## F))*L
curdat.store_keys_normal_but_precompute_hash_to_output2_base16_to_input1_offsetX = 0;
IF_CDOFF32(MD5,32); IF_CDOFF32(MD4,32); IF_CDOFF32(SHA1,40); IF_CDOFF32(SHA224,56);
IF_CDOFF32(SHA256,64); IF_CDOFF32(SHA384,96); IF_CDOFF32(SHA512,128); IF_CDOFF32(GOST,64);
IF_CDOFF32(WHIRLPOOL,128); IF_CDOFF32(Tiger,48); IF_CDOFF32(RIPEMD128,32); IF_CDOFF32(RIPEMD160,40);
IF_CDOFF32(RIPEMD256,64); IF_CDOFF32(RIPEMD320,80); IF_CDOFF32(MD2,32); IF_CDOFF32(PANAMA,64);
IF_CDOFF32(HAVAL128_3,32); IF_CDOFF32(HAVAL160_3,40); IF_CDOFF32(HAVAL192_3,48); IF_CDOFF32(HAVAL224_3,56); IF_CDOFF32(HAVAL256_3,64);
IF_CDOFF32(HAVAL128_4,32); IF_CDOFF32(HAVAL160_4,40); IF_CDOFF32(HAVAL192_4,48); IF_CDOFF32(HAVAL224_4,56); IF_CDOFF32(HAVAL256_4,64);
IF_CDOFF32(HAVAL128_5,32); IF_CDOFF32(HAVAL160_5,40); IF_CDOFF32(HAVAL192_5,48); IF_CDOFF32(HAVAL224_5,56); IF_CDOFF32(HAVAL256_5,64);
IF_CDOFF32(SKEIN224,56); IF_CDOFF32(SKEIN256,64); IF_CDOFF32(SKEIN384,96); IF_CDOFF32(SKEIN512,128);
IF_CDOFF32(SHA3_224,56); IF_CDOFF32(SHA3_256,64); IF_CDOFF32(SHA3_384,96); IF_CDOFF32(SHA3_512,128);
IF_CDOFF32(KECCAK_256,64); IF_CDOFF32(KECCAK_512,128);
// LARGE_HASH_EDIT_POINT
if (curdat.store_keys_normal_but_precompute_hash_to_output2_base16_to_input1_offsetX)
{
curdat.store_keys_normal_but_precompute_hash_to_output2 = 1;
}
curdat.store_keys_normal_but_precompute_hash_to_output2_base16_type = Setup->startFlags>>56;
if ((Setup->startFlags) == 0)
{
// Ok, if we do not have some 'special' loader function, we MUST first clean some
// input. If that is not done, there is NO WAY this is a valid format. This is
// NOT an intelligent check, but more like the dummy lights on newer automobiles.
// You know it will not work, but do not know 'why', nor should you care.
if (Setup->pFuncs[0] != DynamicFunc__clean_input &&
Setup->pFuncs[0] != DynamicFunc__clean_input2 &&
Setup->pFuncs[0] != DynamicFunc__clean_input_kwik &&
Setup->pFuncs[0] != DynamicFunc__clean_input2_kwik &&
Setup->pFuncs[0] != DynamicFunc__clean_input_full)
return !fprintf(stderr, "Error invalid format %s: The first command MUST be a clean of input 1 or input 2 OR a special key 2 input loader function\n", Setup->szFORMAT_NAME);
}
if ( (Setup->flags&MGF_SALTED2)==MGF_SALTED2 && (Setup->flags&MGF_SALT_AS_HEX) == MGF_SALT_AS_HEX)
{
// if the user wants salt_as_hex, then here can NOT be 2 salts.
return !fprintf(stderr, "Error invalid format %s: If using MGF_SALT_AS_HEX flag, then you can NOT have a 2nd salt.\n", Setup->szFORMAT_NAME);
}
if (Setup->pFuncs && Setup->pFuncs[0])
{
unsigned int z;
for (z = 0; Setup->pFuncs[z]; ++z)
;
z += 50;
curdat.dynamic_FUNCTIONS = mem_alloc_tiny(z*sizeof(DYNAMIC_primitive_funcp), MEM_ALIGN_WORD);
j = 0;
#if !ARCH_LITTLE_ENDIAN
// for bigendian, we do NOT store into keys, since we byte swap them.
if (curdat.store_keys_in_input==1) {
// this is only a minor speed hit, so simply fix by doing this. There is an
// extra memcpy, that is it.
curdat.store_keys_in_input = 0;
curdat.dynamic_FUNCTIONS[j++] = DynamicFunc__clean_input;
curdat.dynamic_FUNCTIONS[j++] = DynamicFunc__append_keys;
}
// NOTE NOTE NOTE, FIXME. These are 'hacks' which slow stuff way down. We should look at
// building preloads that CAN do this. Store key input to input 1, but then do not use
// input 1. Put a copy to input 2, then append, etc. In that way, we cut the number of
// MD5's down by at least 1.
//
// But for now, just get it working. Get it working faster later.
// NOTE, these are commented out now. I am not sure why they were there
// I think the thought was for SIMD, BUT SIMD is not used on Sparc
// I am leaving this code for now, BUT I think it should NOT be here.
// I was getting failures on the 16 byte sph formats, for any
// hash(hash($p).$s) such as md2(md2($p).$s) However, the modifications
// where curdat.store_keys_in_input==1 is absolutely needed, or we have
// get_key() failures all over the place.
// note, with Setup->pFuncs[0]==DynamicFunc__set_input_len_32, we only will handle type 6 and 7
// for now we have this 'turned' off. It is fixed for type 6, 7 and 14. It is left on for the
// john.ini stuff. Thus, if someone builds the intel version type 6, it will work (but slower).
// if (curdat.store_keys_normal_but_precompute_hash_to_output2_base16_to_input1==1 && Setup->pFuncs[0]==DynamicFunc__set_input_len_32) {
// curdat.store_keys_normal_but_precompute_hash_to_output2_base16_to_input1 = 0;
// curdat.dynamic_FUNCTIONS[j++] = DynamicFunc__clean_input;
// curdat.dynamic_FUNCTIONS[j++] = DynamicFunc__append_keys;
// curdat.dynamic_FUNCTIONS[j++] = DynamicFunc__crypt_md5;
// curdat.dynamic_FUNCTIONS[j++] = DynamicFunc__clean_input;
// Setup->pFuncs[0] = DynamicFunc__append_from_last_output_as_base16;
// }
#endif
for (i=0; Setup->pFuncs[i]; ++i)
{
if (j > z-10)
{
unsigned int k;
z += 100;
curdat.dynamic_FUNCTIONS = mem_alloc_tiny(z*sizeof(DYNAMIC_primitive_funcp), MEM_ALIGN_WORD);
for (k = 0; k <= j; ++k)
curdat.dynamic_FUNCTIONS[k] = curdat.dynamic_FUNCTIONS[k];
}
if (curdat.store_keys_in_input)
{
if (Setup->pFuncs[i] == DynamicFunc__append_keys)
return !fprintf(stderr, "Error invalid format %s: MGF_KEYS_INPUT used, but append_keys called and that is invalid\n", Setup->szFORMAT_NAME);
if (Setup->pFuncs[i] == DynamicFunc__append_keys2)
return !fprintf(stderr, "Error invalid format %s: MGF_KEYS_INPUT used, but append_keys2 called and that is invalid\n", Setup->szFORMAT_NAME);
if (Setup->pFuncs[i] == DynamicFunc__clean_input)
return !fprintf(stderr, "Error invalid format %s: MGF_KEYS_INPUT used, but clean_input called and that is invalid\n", Setup->szFORMAT_NAME);
if (Setup->pFuncs[i] == DynamicFunc__append_salt)
return !fprintf(stderr, "Error invalid format %s: MGF_KEYS_INPUT used, but append_salt called and that is invalid\n", Setup->szFORMAT_NAME);
if (Setup->pFuncs[i] == DynamicFunc__append_from_last_output2_to_input1_as_base16)
return !fprintf(stderr, "Error invalid format %s: MGF_KEYS_INPUT used, but append_from_last_output2_to_input1_as_base16 called and that is invalid\n", Setup->szFORMAT_NAME);
if (Setup->pFuncs[i] == DynamicFunc__overwrite_from_last_output2_to_input1_as_base16_no_size_fix)
return !fprintf(stderr, "Error invalid format %s: MGF_KEYS_INPUT used, but overwrite_from_last_output2_to_input1_as_base16_no_size_fix called and that is invalid\n", Setup->szFORMAT_NAME);
if (Setup->pFuncs[i] == DynamicFunc__append_from_last_output_as_base16)
return !fprintf(stderr, "Error invalid format %s: MGF_KEYS_INPUT used, but append_from_last_output_as_base16s called and that is invalid\n", Setup->szFORMAT_NAME);
if (Setup->pFuncs[i] == DynamicFunc__overwrite_from_last_output_as_base16_no_size_fix)
return !fprintf(stderr, "Error invalid format %s: MGF_KEYS_INPUT used, but overwrite_from_last_output_as_base16_no_size_fix called and that is invalid\n", Setup->szFORMAT_NAME);
if (Setup->pFuncs[i] == DynamicFunc__append_2nd_salt)
return !fprintf(stderr, "Error invalid format %s: MGF_KEYS_INPUT used, but append_2nd_salt called and that is invalid\n", Setup->szFORMAT_NAME);
if (Setup->pFuncs[i] == DynamicFunc__set_input_len_32)
return !fprintf(stderr, "Error invalid format %s: MGF_KEYS_INPUT used, but DynamicFunc__set_input_len_32 called and that is invalid\n", Setup->szFORMAT_NAME);
if (Setup->pFuncs[i] == DynamicFunc__set_input_len_64)
return !fprintf(stderr, "Error invalid format %s: MGF_KEYS_INPUT used, but DynamicFunc__set_input_len_32 called and that is invalid\n", Setup->szFORMAT_NAME);
if (Setup->pFuncs[i] == DynamicFunc__overwrite_salt_to_input1_no_size_fix)
return !fprintf(stderr, "Error invalid format %s: MGF_KEYS_INPUT used, but DynamicFunc__set_input_len_32 called and that is invalid\n", Setup->szFORMAT_NAME);
if (Setup->pFuncs[i] == DynamicFunc__append_input_from_input2)
return !fprintf(stderr, "Error invalid format %s: MGF_KEYS_INPUT used, but DynamicFunc__set_input_len_32 called and that is invalid\n", Setup->szFORMAT_NAME);
}
// Ok if copy constants are set, make SURE we have that many constants.
if ( (Setup->pFuncs[i] == DynamicFunc__append_input1_from_CONST1 || Setup->pFuncs[i] == DynamicFunc__append_input2_from_CONST1) && curdat.nConsts == 0)
return !fprintf(stderr, "Error invalid format %s: Append Constant function called, but NO constants in the format\n", Setup->szFORMAT_NAME);
if ( (Setup->pFuncs[i] == DynamicFunc__append_input1_from_CONST2 || Setup->pFuncs[i] == DynamicFunc__append_input2_from_CONST2) && curdat.nConsts < 2)
return !fprintf(stderr, "Error invalid format %s: Append Constant #2 function called, but NO constants, or less than 2 constants in the format\n", Setup->szFORMAT_NAME);
if ( (Setup->pFuncs[i] == DynamicFunc__append_input1_from_CONST3 || Setup->pFuncs[i] == DynamicFunc__append_input2_from_CONST3) && curdat.nConsts < 3)
return !fprintf(stderr, "Error invalid format %s: Append Constant #3 function called, but NO constants, or less than 3 constants in the format\n", Setup->szFORMAT_NAME);
if ( (Setup->pFuncs[i] == DynamicFunc__append_input1_from_CONST4 || Setup->pFuncs[i] == DynamicFunc__append_input2_from_CONST4) && curdat.nConsts < 4)
return !fprintf(stderr, "Error invalid format %s: Append Constant #4 function called, but NO constants, or less than 4 constants in the format\n", Setup->szFORMAT_NAME);
if ( (Setup->pFuncs[i] == DynamicFunc__append_input1_from_CONST5 || Setup->pFuncs[i] == DynamicFunc__append_input2_from_CONST5) && curdat.nConsts < 5)
return !fprintf(stderr, "Error invalid format %s: Append Constant #5 function called, but NO constants, or less than 5 constants in the format\n", Setup->szFORMAT_NAME);
if ( (Setup->pFuncs[i] == DynamicFunc__append_input1_from_CONST6 || Setup->pFuncs[i] == DynamicFunc__append_input2_from_CONST6) && curdat.nConsts < 6)
return !fprintf(stderr, "Error invalid format %s: Append Constant #6 function called, but NO constants, or less than 6 constants in the format\n", Setup->szFORMAT_NAME);
if ( (Setup->pFuncs[i] == DynamicFunc__append_input1_from_CONST7 || Setup->pFuncs[i] == DynamicFunc__append_input2_from_CONST7) && curdat.nConsts < 7)
return !fprintf(stderr, "Error invalid format %s: Append Constant #7 function called, but NO constants, or less than 7 constants in the format\n", Setup->szFORMAT_NAME);
if ( (Setup->pFuncs[i] == DynamicFunc__append_input1_from_CONST8 || Setup->pFuncs[i] == DynamicFunc__append_input2_from_CONST8) && curdat.nConsts < 8)
return !fprintf(stderr, "Error invalid format %s: Append Constant #8 function called, but NO constants, or less than 8 constants in the format\n", Setup->szFORMAT_NAME);
if ( (Setup->pFuncs[i] == DynamicFunc__append_2nd_salt || Setup->pFuncs[i] == DynamicFunc__append_2nd_salt2) && curdat.b2Salts == 0)
return !fprintf(stderr, "Error invalid format %s: A call to one of the 'salt-2' functions, but this format does not have MFG_SALT2 flag set\n", Setup->szFORMAT_NAME);
// Ok, if we have made it here, the function is 'currently' still valid. Load this pointer into our array of pointers.
pFuncs = ConvertFuncs(Setup->pFuncs[i], &cnt2);
#define IS_FUNC_NAME(H,N) if(is##H##Func(pFuncs[x])){ if (!strcmp(pFmt->params.algorithm_name, ALGORITHM_NAME)) pFmt->params.algorithm_name = ALGORITHM_NAME_##N; \
else if (!strcmp(pFmt->params.algorithm_name, ALGORITHM_NAME_X86)) pFmt->params.algorithm_name = ALGORITHM_NAME_X86_##N; }
for (x = 0; x < cnt2; ++x) {
curdat.dynamic_FUNCTIONS[j++] = pFuncs[x];
if (pFuncs[x] == DynamicFunc__setmode_unicode)
pFmt->params.flags |= FMT_UNICODE;
IS_FUNC_NAME(SHA1,S)
if (isSHA2_256Func(pFuncs[x])) {
#ifdef SIMD_COEF_32
if (curdat.using_flat_buffers_sse2_ok)
pFmt->params.algorithm_name = ALGORITHM_NAME_S2_256;
else
#endif
pFmt->params.algorithm_name = ALGORITHM_NAME_X86_S2_256;
}
if (isSHA2_512Func(pFuncs[x])) {
#ifdef SIMD_COEF_64
if (curdat.using_flat_buffers_sse2_ok)
pFmt->params.algorithm_name = ALGORITHM_NAME_S2_512;
else
#endif
pFmt->params.algorithm_name = ALGORITHM_NAME_X86_S2_512;
}
IS_FUNC_NAME(MD4,4)
IS_FUNC_NAME(WHIRL,WP2)
IS_FUNC_NAME(GOST,GST2)
IS_FUNC_NAME(Tiger,TGR)
IS_FUNC_NAME(RIPEMD,RIPEMD)
IS_FUNC_NAME(HAVAL,HAVAL)
IS_FUNC_NAME(MD2,MD2)
IS_FUNC_NAME(PANAMA,PANAMA)
IS_FUNC_NAME(SKEIN,SKEIN)
// Note, until we add SIMD keccak, one algoithm is all we 'need'
IS_FUNC_NAME(KECCAK,KECCAK)
// IS_FUNC_NAME(KECCAK,SHA3_256)
// IS_FUNC_NAME(KECCAK,SHA3_384)
// IS_FUNC_NAME(KECCAK,SHA3_512)
// IS_FUNC_NAME(KECCAK,KECCAK_256)
// IS_FUNC_NAME(KECCAK,KECCAK_512)
// LARGE_HASH_EDIT_POINT (MUST match the just added a new IsXXXFunc() type function)
}
if (isLargeHashFinalFunc(curdat.dynamic_FUNCTIONS[j-1]))
{
if (Setup->pFuncs[i+1])
return !fprintf(stderr, "Error invalid format %s: DynamicFunc__LARGE_HASH_crypt_inputX_to_output1_FINAL, can ONLY be used as the last function in a script\n", Setup->szFORMAT_NAME);
}
}
curdat.dynamic_FUNCTIONS[j] = NULL;
}
if (!Setup->pPreloads || Setup->pPreloads[0].ciphertext == NULL)
{
return !fprintf(stderr, "Error invalid format %s: Error, no validation hash(s) for this format\n", Setup->szFORMAT_NAME);
}
cnt = 0;
#ifdef _OPENMP
dyna_setupOMP(Setup, pFmt);
#endif
{
struct fmt_tests *pfx = mem_alloc_tiny(ARRAY_COUNT(dynamic_tests) * sizeof (struct fmt_tests), MEM_ALIGN_WORD);
memset(pfx, 0, ARRAY_COUNT(dynamic_tests) * sizeof (struct fmt_tests));
for (i = 0; cnt < ARRAY_COUNT(dynamic_tests) -1; ++i)
{
if (Setup->pPreloads[i].ciphertext == NULL) {
i = 0;
}
if (Setup->pPreloads[i].ciphertext[0] == 'A' && Setup->pPreloads[i].ciphertext[1] == '=') {
if (options.target_enc != ASCII && options.target_enc != ISO_8859_1)
continue;
pfx[cnt].ciphertext = str_alloc_copy(&Setup->pPreloads[i].ciphertext[2]);
}
else if (Setup->pPreloads[i].ciphertext[0] == 'U' && Setup->pPreloads[i].ciphertext[1] == '=') {
if (options.target_enc != UTF_8)
continue;
pfx[cnt].ciphertext = str_alloc_copy(&Setup->pPreloads[i].ciphertext[2]);
}
else
pfx[cnt].ciphertext = str_alloc_copy(Setup->pPreloads[i].ciphertext);
pfx[cnt].plaintext = str_alloc_copy(Setup->pPreloads[i].plaintext);
pfx[cnt].fields[0] = Setup->pPreloads[i].fields[0] ? str_alloc_copy(Setup->pPreloads[i].fields[0]) : "";
pfx[cnt].fields[1] = pfx[cnt].ciphertext;
for (j = 2; j < 10; ++j)
pfx[cnt].fields[j] = Setup->pPreloads[i].fields[j] ? str_alloc_copy(Setup->pPreloads[i].fields[j]) : "";
++cnt;
}
pfx[cnt].ciphertext = NULL;
pfx[cnt].plaintext = NULL;
pFmt->params.tests = pfx;
}
if (curdat.dynamic_base16_upcase)
dynamic_itoa16 = itoa16u;
else
dynamic_itoa16 = itoa16;
{
char s[512], *cp;
cp = Setup->szFORMAT_NAME;
cp = strchr(Setup->szFORMAT_NAME, ' ');
++cp;
sprintf(s, "%s %s", cp, pFmt->params.algorithm_name);
pFmt->params.algorithm_name = str_alloc_copy(s);
}
if ((Setup->flags & MGF_SALTED) && !Setup->SaltLen)
return !fprintf(stderr, "Error invalid format %s\n\tIt is required to add SaltLen= to the script, for this format\n", Setup->szFORMAT_NAME);
return 1;
}
static int LoadOneFormat(int idx, struct fmt_main *pFmt)
{
extern struct options_main options;
char label[16] = { 0 }, label_id[16] = { 0 }, *cp = NULL;
memcpy(pFmt, &fmt_Dynamic, sizeof(struct fmt_main));
// TODO:
// NOTE, this was commented out, because the late binding @dynamic=expr@
// hashes were killing out possibly pre-setup input buffers. NOTE, that
// things worked fine after this, all self tests do pass, and I am 99%
// sure that all of this 'required' cleaning happens in init(). but I am
// putting this comment in here, so that if at a later time, there are
// problems and are tracked down to this, we will know why.
// dynamic_RESET(pFmt);
// Ok we need to list this as a dynamic format (even for the 'thin' formats)
pFmt->params.flags |= FMT_DYNAMIC;
if (idx < 1000) {
if (dynamic_RESERVED_PRELOAD_SETUP(idx, pFmt) != 1)
return 0;
}
else {
if (dynamic_LOAD_PARSER_FUNCTIONS(idx, pFmt) != 1)
return 0;
}
/* we 'have' to take the sig from the test array. If we do not have */
/* our preload array 'solid', then the idx will not be the proper */
/* number. So we simply grab the label from the test cyphertext string */
strncpy(label, pFmt->params.tests[0].ciphertext, 15);
cp = strchr(&label[1], '$');
if (NULL != cp) cp[1] = 0;
strcpy(label_id, &label[1]);
cp = strchr(label_id, '$');
if (NULL != cp) *cp = 0;
// if (!options.format || strncmp(options.format, "dynamic_", 8))
// pFmt->params.label = str_alloc_copy("dynamic");
// else
pFmt->params.label = str_alloc_copy(label_id);
strcpy(curdat.dynamic_WHICH_TYPE_SIG, label);
curdat.dynamic_HASH_OFFSET = strlen(label);
if (curdat.dynamic_base64_inout == 1 || curdat.dynamic_base64_inout == 3) {
// we have to compute 'proper' offset
const char *cp = pFmt->params.tests[0].ciphertext;
int len = base64_valid_length(&cp[curdat.dynamic_HASH_OFFSET], curdat.dynamic_base64_inout == 1 ? e_b64_crypt : e_b64_mime, flg_Base64_MIME_TRAIL_EQ_CNT);
curdat.dynamic_SALT_OFFSET = curdat.dynamic_HASH_OFFSET + len + 1;
}
else if (curdat.dynamic_base64_inout == 2)
curdat.dynamic_SALT_OFFSET = curdat.dynamic_HASH_OFFSET + 16 + 1;
else if (curdat.dynamic_40_byte_input)
curdat.dynamic_SALT_OFFSET = curdat.dynamic_HASH_OFFSET + 40 + 1;
else if (curdat.dynamic_48_byte_input)
curdat.dynamic_SALT_OFFSET = curdat.dynamic_HASH_OFFSET + 48 + 1;
else if (curdat.dynamic_64_byte_input)
curdat.dynamic_SALT_OFFSET = curdat.dynamic_HASH_OFFSET + 64 + 1;
else if (curdat.dynamic_56_byte_input)
curdat.dynamic_SALT_OFFSET = curdat.dynamic_HASH_OFFSET + 56 + 1;
else if (curdat.dynamic_80_byte_input)
curdat.dynamic_SALT_OFFSET = curdat.dynamic_HASH_OFFSET + 80 + 1;
else if (curdat.dynamic_96_byte_input)
curdat.dynamic_SALT_OFFSET = curdat.dynamic_HASH_OFFSET + 96 + 1;
else if (curdat.dynamic_128_byte_input)
curdat.dynamic_SALT_OFFSET = curdat.dynamic_HASH_OFFSET + 128 + 1;
else
curdat.dynamic_SALT_OFFSET = curdat.dynamic_HASH_OFFSET + 32 + 1;
pFmt->private.data = mem_alloc_tiny(sizeof(private_subformat_data), MEM_ALIGN_WORD);
memcpy(pFmt->private.data, &curdat, sizeof(private_subformat_data));
if (strncmp(curdat.dynamic_WHICH_TYPE_SIG, pFmt->params.tests[0].ciphertext, strlen(curdat.dynamic_WHICH_TYPE_SIG)))
{
fprintf(stderr, "ERROR, when loading dynamic formats, the wrong curdat item was linked to this type:\nTYPE_SIG=%s\nTest_Dat=%s\n",
curdat.dynamic_WHICH_TYPE_SIG, pFmt->params.tests[0].ciphertext);
return 0;
}
return 1;
}
struct fmt_main *dynamic_Register_local_format(int *type) {
int num=nLocalFmts++;
private_subformat_data keep;
if (!pLocalFmts)
pLocalFmts = mem_calloc_tiny(1000*sizeof(struct fmt_main), 16);
/* since these are loaded LATE in the process, init() has been called
* and we HAVE to preserve the already loaded setup. This will happen
* if we run a crack, but do not specify a specific dyna format
*/
memcpy(&keep, &curdat, sizeof(private_subformat_data));
LoadOneFormat(num+6000, &(pLocalFmts[num]));
memcpy(&curdat, &keep, sizeof(private_subformat_data));
dynamic_use_sse = curdat.dynamic_use_sse;
force_md5_ctx = curdat.force_md5_ctx;
*type = num+6000;
return &(pLocalFmts[num]);
}
int dynamic_Register_formats(struct fmt_main **ptr)
{
int count, i, idx, single=-1, wildcard = 0, pop[5000];
extern struct options_main options;
if (options.format && strstr(options.format, "*"))
wildcard = 1;
Dynamic_Load_itoa16_w2();
if (!wildcard && options.format &&
!strncmp(options.format, "dynamic_", 8))
sscanf(options.format, "dynamic_%d", &single);
if (options.format && options.subformat && !strcmp(options.format, "dynamic") && !strncmp(options.subformat, "dynamic_", 8))
sscanf(options.subformat, "dynamic_%d", &single);
if (options.dynamic_bare_hashes_always_valid == 'Y')
dynamic_allow_rawhash_fixup = 1;
else if (options.dynamic_bare_hashes_always_valid != 'N' && cfg_get_bool(SECTION_OPTIONS, NULL, "DynamicAlwaysUseBareHashes", 1))
dynamic_allow_rawhash_fixup = 1;
if (single != -1) {
// user wanted only a 'specific' format. Simply load that one.
dynamic_allow_rawhash_fixup = 1;
if (dynamic_IS_VALID(single, 1) == 0)
return 0;
pFmts = mem_alloc_tiny(sizeof(pFmts[0]), MEM_ALIGN_WORD);
if (!LoadOneFormat(single, pFmts))
return 0;
*ptr = pFmts;
return (nFmts = 1);
}
for (count = i = 0; i < 5000; ++i) {
if ((pop[i] = (dynamic_IS_VALID(i, 0) == 1)))
++count;
}
// Ok, now we know how many formats we have. Load them
pFmts = mem_alloc_tiny(sizeof(pFmts[0])*count, MEM_ALIGN_WORD);
for (idx = i = 0; i < 5000; ++i) {
if (pop[i]) {
if (LoadOneFormat(i, &pFmts[idx]) == 0)
--count;
else
++idx;
}
}
*ptr = pFmts;
return (nFmts = count);
}
/*
* finds the 'proper' sub format from the allocated formats, IFF that format 'exists'
*/
static struct fmt_main *dynamic_Get_fmt_main(int which)
{
char label[40];
int i;
sprintf(label, "$dynamic_%d$", which);
for (i = 0; i < nFmts; ++i) {
private_subformat_data *pPriv = pFmts[i].private.data;
if (!strcmp(pPriv->dynamic_WHICH_TYPE_SIG, label))
return &pFmts[i];
}
for (i = 0; i < nLocalFmts; ++i) {
private_subformat_data *pPriv = pLocalFmts[i].private.data;
if (!strcmp(pPriv->dynamic_WHICH_TYPE_SIG, label))
return &pLocalFmts[i];
}
return NULL;
}
/*
* This function will 'forget' which md5-gen subtype we are working with. It will allow
* a different type to be used. Very useful for things like -test (benchmarking).
*/
static void dynamic_RESET(struct fmt_main *fmt)
{
memset(&curdat, 0, sizeof(curdat));
m_count = 0;
keys_dirty = 0;
cursalt=cursalt2=username=0;
saltlen=saltlen2=usernamelen=0;
// make 'sure' we startout with blank inputs.
m_count = 0;
#ifdef SIMD_COEF_32
if (input_buf) {
#else
if (input_buf_X86) {
#endif
__nonMP_DynamicFunc__clean_input_full();
__nonMP_DynamicFunc__clean_input2_full();
}
}
/*
* This will LINK our functions into some other fmt_main struction. That way
* that struction can use our code. The other *_fmt.c file will need to
* 'override' the valid, the binary and the salt functions, and make changes
* to the hash, BEFORE calling into the dynamic valid/binary/salt functions.
* Other than those functions (and calling into this linkage function at init time)
* that is about all that needs to be in that 'other' *_fmt.c file, as long as the
* format is part of the md5-generic 'class' of functions.
*/
struct fmt_main *dynamic_THIN_FORMAT_LINK(struct fmt_main *pFmt, char *ciphertext, char *orig_sig, int bInitAlso)
{
int i, valid, nFmtNum;
struct fmt_main *pFmtLocal;
static char subformat[17], *cp;
dynamic_allow_rawhash_fixup = 0;
strncpy(subformat, ciphertext, 16);
subformat[16] = 0;
cp = strchr(&subformat[9], '$');
if (cp)
cp[1] = 0;
nFmtNum = -1;
sscanf(subformat, "$dynamic_%d", &nFmtNum);
if (nFmtNum == -1)
error_msg("Error, Invalid signature line trying to link to dynamic format.\nOriginal format=%s\nSignature line=%s\n", orig_sig, ciphertext);
pFmtLocal = dynamic_Get_fmt_main(nFmtNum);
if (pFmtLocal == NULL)
error_msg("Error, Invalid signature line trying to link to dynamic format.\nOriginal format=%s\nSignature line=%s\n", orig_sig, ciphertext);
valid = pFmtLocal->methods.valid(ciphertext, pFmtLocal);
if (!valid)
error_msg("Error, trying to link to %s using ciphertext=%s FAILED\n", subformat, ciphertext);
pFmt->params.algorithm_name = pFmtLocal->params.algorithm_name;
if (pFmt->params.plaintext_length == 0 ||
pFmt->params.plaintext_length > pFmtLocal->params.plaintext_length) {
pFmt->params.plaintext_length = pFmtLocal->params.plaintext_length;
pFmt->params.plaintext_min_length = pFmtLocal->params.plaintext_min_length;
}
pFmt->params.max_keys_per_crypt = pFmtLocal->params.max_keys_per_crypt;
pFmt->params.min_keys_per_crypt = pFmtLocal->params.max_keys_per_crypt;
if (pFmt->params.min_keys_per_crypt > 64)
pFmt->params.min_keys_per_crypt = 64;
pFmt->params.flags = pFmtLocal->params.flags;
if (pFmtLocal->params.salt_size)
pFmt->params.salt_size = sizeof(void*);
else
pFmt->params.salt_size = 0;
pFmt->methods.cmp_all = pFmtLocal->methods.cmp_all;
pFmt->methods.cmp_one = pFmtLocal->methods.cmp_one;
pFmt->methods.cmp_exact = pFmtLocal->methods.cmp_exact;
for (i = 0; i < FMT_TUNABLE_COSTS; ++i) {
pFmt->methods.tunable_cost_value[i] = pFmtLocal->methods.tunable_cost_value[i];
pFmt->params.tunable_cost_name[i] = pFmtLocal->params.tunable_cost_name[i];
}
pFmt->methods.source = pFmtLocal->methods.source;
pFmt->methods.set_salt = pFmtLocal->methods.set_salt;
pFmt->methods.salt = pFmtLocal->methods.salt;
pFmt->methods.done = pFmtLocal->methods.done;
pFmt->methods.salt_hash = pFmtLocal->methods.salt_hash;
pFmt->methods.split = pFmtLocal->methods.split;
pFmt->methods.set_key = pFmtLocal->methods.set_key;
pFmt->methods.get_key = pFmtLocal->methods.get_key;
pFmt->methods.clear_keys = pFmtLocal->methods.clear_keys;
pFmt->methods.crypt_all = pFmtLocal->methods.crypt_all;
pFmt->methods.prepare = pFmtLocal->methods.prepare;
pFmt->methods.salt_compare = pFmtLocal->methods.salt_compare;
for (i = 0; i < PASSWORD_HASH_SIZES; ++i)
{
pFmt->methods.binary_hash[i] = pFmtLocal->methods.binary_hash[i];
pFmt->methods.get_hash[i] = pFmtLocal->methods.get_hash[i];
}
if (bInitAlso)
{
//fprintf(stderr, "dynamic_THIN_FORMAT_LINK() calling init(%s)\n", subformat);
init(pFmtLocal);
}
pFmt->private.data = mem_alloc_tiny(sizeof(private_subformat_data), MEM_ALIGN_WORD);
memcpy(pFmt->private.data, pFmtLocal->private.data, sizeof(private_subformat_data));
return pFmtLocal;
}
// We ONLY deal with hex hashes at this time. Is we later have to deal with
// base-64, this will become harder. Before this function we had bugs where
// many things were loaded as 'being' valid, even if not.
static int looks_like_raw_hash(char *ciphertext, private_subformat_data *pPriv)
{
int i, cipherTextLen = CIPHERTEXT_LENGTH;
if (pPriv->dynamic_40_byte_input) {
cipherTextLen = 40;
} else if (pPriv->dynamic_48_byte_input) {
cipherTextLen = 48;
} else if (pPriv->dynamic_64_byte_input) {
cipherTextLen = 64;
} else if (pPriv->dynamic_56_byte_input) {
cipherTextLen = 56;
} else if (pPriv->dynamic_80_byte_input) {
cipherTextLen = 80;
} else if (pPriv->dynamic_96_byte_input) {
cipherTextLen = 96;
} else if (pPriv->dynamic_128_byte_input) {
cipherTextLen = 128;
}
for (i = 0; i < cipherTextLen; i++) {
if (atoi16[ARCH_INDEX(ciphertext[i])] == 0x7f)
return 0;
}
if ((pPriv->pSetup->flags&MGF_SALTED) == 0) {
if (!ciphertext[cipherTextLen])
return 1;
return 0;
}
return ciphertext[cipherTextLen] == '$';
}
static char *FixupIfNeeded(char *ciphertext, private_subformat_data *pPriv)
{
if (!ciphertext || *ciphertext == 0 || *ciphertext == '*')
return ciphertext;
if (dynamic_allow_rawhash_fixup && strncmp(ciphertext, "$dynamic_", 9) && looks_like_raw_hash(ciphertext, pPriv))
{
static char __ciphertext[512+24];
if (pPriv->pSetup->flags & MGF_SALTED) {
if (!strchr(ciphertext, '$'))
return ciphertext;
}
if ( (pPriv->pSetup->flags & MGF_SALTED2) == MGF_SALTED2) {
if (!strstr(ciphertext, "$$2"))
return ciphertext;
}
if ( (pPriv->pSetup->flags & MGF_USERNAME) == MGF_USERNAME) {
if (!strstr(ciphertext, "$$U"))
return ciphertext;
}
if (pPriv->FldMask) {
int i;
for (i = 0; i < 10; ++i) {
if ((pPriv->FldMask & (MGF_FLDx_BIT<<i)) == (MGF_FLDx_BIT<<i)) {
char Fld[5];
sprintf(Fld, "$$F%d", i);
if (!strstr(&ciphertext[pPriv->dynamic_SALT_OFFSET-1], Fld))
return ciphertext;
}
}
}
strcpy(__ciphertext, pPriv->dynamic_WHICH_TYPE_SIG);
strnzcpy(&__ciphertext[strlen(__ciphertext)], ciphertext, 512);
return __ciphertext;
}
return ciphertext;
}
int text_in_dynamic_format_already(struct fmt_main *pFmt, char *ciphertext)
{
private_subformat_data *pPriv;
if (!pFmt) return 0;
/* NOTE, it 'is' possible to get called here, without the private stuff being setup
properly (in valid, etc). So, we simply grab the static private stuff each time */
pPriv = pFmt->private.data;
if (!ciphertext || !pPriv) return 0;
return !strncmp(ciphertext, pPriv->dynamic_WHICH_TYPE_SIG, strlen(pPriv->dynamic_WHICH_TYPE_SIG));
}
// if caseType == 1, return cp
// if caseType == 2, return upcase(cp)
// if caseType == 3, return locase(cp)
// if caseType == 4, return upcaseFirstChar(locase(cp))
static char *HandleCase(char *cp, int caseType)
{
static UTF8 dest[256];
switch(caseType) {
case 1:
return cp;
case 2:
enc_uc(dest, sizeof(dest), (unsigned char*)cp, strlen(cp));
if (!strcmp((char*)dest, cp))
return cp;
break;
case 3:
case 4:
enc_lc(dest, sizeof(dest), (unsigned char*)cp, strlen(cp));
if (caseType == 4)
dest[0] = low2up_ansi(dest[0]);
if (!strcmp((char*)dest, cp))
return cp;
break;
default:
return cp;
}
return (char*)dest;
}
int dynamic_real_salt_length(struct fmt_main *pFmt)
{
if (pFmt->params.flags & FMT_DYNAMIC) {
private_subformat_data *pPriv = pFmt->private.data;
if (pPriv == NULL || pPriv->pSetup == NULL)
return -1; // not a dynamic format, or called before we have loaded them!!
return abs(pPriv->pSetup->SaltLen);
}
// NOT a dynamic format
return -1;
}
#else
#warning Notice: Dynamic format disabled from build.
#endif /* DYNAMIC_DISABLED */
|
duplex.c | /*
* compute the duplex structure of two RNA strands,
* allowing only inter-strand base pairs.
* see cofold() for computing hybrid structures without
* restriction.
*
* Ivo Hofacker
* Vienna RNA package
*/
#ifdef HAVE_CONFIG_H
#include "config.h"
#endif
#include <stdio.h>
#include <stdlib.h>
#include <math.h>
#include <ctype.h>
#include <string.h>
#include "ViennaRNA/utils/basic.h"
#include "ViennaRNA/params/default.h"
#include "ViennaRNA/fold_vars.h"
#include "ViennaRNA/fold.h"
#include "ViennaRNA/pair_mat.h"
#include "ViennaRNA/params/basic.h"
#include "ViennaRNA/alifold.h"
#include "ViennaRNA/subopt.h"
#include "ViennaRNA/loops/all.h"
#include "ViennaRNA/duplex.h"
#ifdef _OPENMP
#include <omp.h>
#endif
#define STACK_BULGE1 1 /* stacking energies for bulges of size 1 */
#define NEW_NINIO 1 /* new asymetry penalty */
#define MAXSECTORS 500 /* dimension for a backtrack array */
#define LOCALITY 0. /* locality parameter for base-pairs */
#define UNIT 100
#define MINPSCORE -2 * UNIT
#define NONE -10000 /* score for forbidden pairs */
/*
#################################
# GLOBAL VARIABLES #
#################################
*/
/*
#################################
# PRIVATE VARIABLES #
#################################
*/
PRIVATE vrna_param_t *P = NULL;
PRIVATE int **c = NULL; /* energy array, given that i-j pair */
PRIVATE short *S1 = NULL, *SS1 = NULL, *S2 = NULL, *SS2 = NULL;
PRIVATE int n1, n2; /* sequence lengths */
#ifdef _OPENMP
/* NOTE: all variables are assumed to be uninitialized if they are declared as threadprivate
*/
#pragma omp threadprivate(P, c, S1, SS1, S2, SS2, n1, n2)
#endif
/*
#################################
# PRIVATE FUNCTION DECLARATIONS #
#################################
*/
PRIVATE duplexT
duplexfold_cu(const char *s1,
const char *s2,
int clean_up);
PRIVATE duplexT
aliduplexfold_cu(const char *s1[],
const char *s2[],
int clean_up);
PRIVATE char *
backtrack(int i,
int j);
PRIVATE char *
alibacktrack(int i,
int j,
const short **S1,
const short **S2);
PRIVATE int
compare(const void *sub1,
const void *sub2);
PRIVATE int
covscore(const int *types,
int n_seq);
/*
#################################
# BEGIN OF FUNCTION DEFINITIONS #
#################################
*/
PUBLIC duplexT
duplexfold(const char *s1,
const char *s2)
{
return duplexfold_cu(s1, s2, 1);
}
PRIVATE duplexT
duplexfold_cu(const char *s1,
const char *s2,
int clean_up)
{
int i, j, Emin = INF, i_min = 0, j_min = 0;
char *struc;
duplexT mfe;
vrna_md_t md;
n1 = (int)strlen(s1);
n2 = (int)strlen(s2);
set_model_details(&md);
if ((!P) || (fabs(P->temperature - temperature) > 1e-6)) {
if (P)
free(P);
P = vrna_params(&md);
make_pair_matrix();
}
c = (int **)vrna_alloc(sizeof(int *) * (n1 + 1));
for (i = 1; i <= n1; i++)
c[i] = (int *)vrna_alloc(sizeof(int) * (n2 + 1));
S1 = encode_sequence(s1, 0);
S2 = encode_sequence(s2, 0);
SS1 = encode_sequence(s1, 1);
SS2 = encode_sequence(s2, 1);
for (i = 1; i <= n1; i++) {
for (j = n2; j > 0; j--) {
int type, type2, E, k, l;
type = pair[S1[i]][S2[j]];
c[i][j] = type ? P->DuplexInit : INF;
if (!type)
continue;
c[i][j] += vrna_E_ext_stem(type, (i > 1) ? SS1[i - 1] : -1, (j < n2) ? SS2[j + 1] : -1, P);
for (k = i - 1; k > 0 && k > i - MAXLOOP - 2; k--) {
for (l = j + 1; l <= n2; l++) {
if (i - k + l - j - 2 > MAXLOOP)
break;
type2 = pair[S1[k]][S2[l]];
if (!type2)
continue;
E = E_IntLoop(i - k - 1, l - j - 1, type2, rtype[type],
SS1[k + 1], SS2[l - 1], SS1[i - 1], SS2[j + 1], P);
c[i][j] = MIN2(c[i][j], c[k][l] + E);
}
}
E = c[i][j];
E += vrna_E_ext_stem(rtype[type], (j > 1) ? SS2[j - 1] : -1, (i < n1) ? SS1[i + 1] : -1, P);
if (E < Emin) {
Emin = E;
i_min = i;
j_min = j;
}
}
}
struc = backtrack(i_min, j_min);
if (i_min < n1)
i_min++;
if (j_min > 1)
j_min--;
mfe.i = i_min;
mfe.j = j_min;
mfe.energy = (float)Emin / 100.;
mfe.structure = struc;
if (clean_up) {
for (i = 1; i <= n1; i++)
free(c[i]);
free(c);
free(S1);
free(S2);
free(SS1);
free(SS2);
}
return mfe;
}
PUBLIC duplexT *
duplex_subopt(const char *s1,
const char *s2,
int delta,
int w)
{
int i, j, n1, n2, thresh, E, n_subopt = 0, n_max;
char *struc;
duplexT mfe;
duplexT *subopt;
n_max = 16;
subopt = (duplexT *)vrna_alloc(n_max * sizeof(duplexT));
mfe = duplexfold_cu(s1, s2, 0);
free(mfe.structure);
thresh = (int)mfe.energy * 100 + 0.1 + delta;
n1 = strlen(s1);
n2 = strlen(s2);
for (i = n1; i > 0; i--) {
for (j = 1; j <= n2; j++) {
int type, ii, jj, Ed;
type = pair[S2[j]][S1[i]];
if (!type)
continue;
E = Ed = c[i][j];
Ed += vrna_E_ext_stem(type, (j > 1) ? SS2[j - 1] : -1, (i < n1) ? SS1[i + 1] : -1, P);
if (Ed > thresh)
continue;
/* too keep output small, remove hits that are dominated by a
* better one close (w) by. For simplicity we do test without
* adding dangles, which is slightly inaccurate.
*/
for (ii = MAX2(i - w, 1); (ii <= MIN2(i + w, n1)) && type; ii++) {
for (jj = MAX2(j - w, 1); jj <= MIN2(j + w, n2); jj++)
if (c[ii][jj] < E) {
type = 0;
break;
}
}
if (!type)
continue;
struc = backtrack(i, j);
vrna_message_info(stderr, "%d %d %d", i, j, E);
if (n_subopt + 1 >= n_max) {
n_max *= 2;
subopt = (duplexT *)vrna_realloc(subopt, n_max * sizeof(duplexT));
}
subopt[n_subopt].i = MIN2(i + 1, n1);
subopt[n_subopt].j = MAX2(j - 1, 1);
subopt[n_subopt].energy = Ed * 0.01;
subopt[n_subopt++].structure = struc;
}
}
/* free all static globals */
for (i = 1; i <= n1; i++)
free(c[i]);
free(c);
free(S1);
free(S2);
free(SS1);
free(SS2);
if (subopt_sorted)
qsort(subopt, n_subopt, sizeof(duplexT), compare);
subopt[n_subopt].i = 0;
subopt[n_subopt].j = 0;
subopt[n_subopt].structure = NULL;
return subopt;
}
PRIVATE char *
backtrack(int i,
int j)
{
/* backtrack structure going backwards from i, and forwards from j
* return structure in bracket notation with & as separator */
int k, l, type, type2, E, traced, i0, j0;
char *st1, *st2, *struc;
st1 = (char *)vrna_alloc(sizeof(char) * (n1 + 1));
st2 = (char *)vrna_alloc(sizeof(char) * (n2 + 1));
i0 = MIN2(i + 1, n1);
j0 = MAX2(j - 1, 1);
while (i > 0 && j <= n2) {
E = c[i][j];
traced = 0;
st1[i - 1] = '(';
st2[j - 1] = ')';
type = pair[S1[i]][S2[j]];
if (!type)
vrna_message_error("backtrack failed in fold duplex");
for (k = i - 1; k > 0 && k > i - MAXLOOP - 2; k--) {
for (l = j + 1; l <= n2; l++) {
int LE;
if (i - k + l - j - 2 > MAXLOOP)
break;
type2 = pair[S1[k]][S2[l]];
if (!type2)
continue;
LE = E_IntLoop(i - k - 1, l - j - 1, type2, rtype[type],
SS1[k + 1], SS2[l - 1], SS1[i - 1], SS2[j + 1], P);
if (E == c[k][l] + LE) {
traced = 1;
i = k;
j = l;
break;
}
}
if (traced)
break;
}
if (!traced) {
E -= vrna_E_ext_stem(type, (i > 1) ? SS1[i - 1] : -1, (j < n2) ? SS2[j + 1] : -1, P);
if (E != P->DuplexInit)
vrna_message_error("backtrack failed in fold duplex");
else
break;
}
}
if (i > 1)
i--;
if (j < n2)
j++;
struc = (char *)vrna_alloc(i0 - i + 1 + j - j0 + 1 + 2);
for (k = MAX2(i, 1); k <= i0; k++)
if (!st1[k - 1])
st1[k - 1] = '.';
for (k = j0; k <= j; k++)
if (!st2[k - 1])
st2[k - 1] = '.';
strcpy(struc, st1 + MAX2(i - 1, 0));
strcat(struc, "&");
strcat(struc, st2 + j0 - 1);
/* printf("%s %3d,%-3d : %3d,%-3d\n", struc, i,i0,j0,j); */
free(st1);
free(st2);
return struc;
}
/*------------------------------------------------------------------------*/
PRIVATE int
compare(const void *sub1,
const void *sub2)
{
int d;
if (((duplexT *)sub1)->energy > ((duplexT *)sub2)->energy)
return 1;
if (((duplexT *)sub1)->energy < ((duplexT *)sub2)->energy)
return -1;
d = ((duplexT *)sub1)->i - ((duplexT *)sub2)->i;
if (d != 0)
return d;
return ((duplexT *)sub1)->j - ((duplexT *)sub2)->j;
}
/*---------------------------------------------------------------------------*/
PUBLIC duplexT
aliduplexfold(const char *s1[],
const char *s2[])
{
return aliduplexfold_cu(s1, s2, 1);
}
PRIVATE duplexT
aliduplexfold_cu(const char *s1[],
const char *s2[],
int clean_up)
{
int i, j, s, n_seq, Emin = INF, i_min = 0, j_min = 0;
char *struc;
duplexT mfe;
short **S1, **S2;
int *type;
vrna_md_t md;
n1 = (int)strlen(s1[0]);
n2 = (int)strlen(s2[0]);
for (s = 0; s1[s] != NULL; s++);
n_seq = s;
for (s = 0; s2[s] != NULL; s++);
if (n_seq != s)
vrna_message_error("unequal number of sequences in aliduplexfold()\n");
set_model_details(&md);
if ((!P) || (fabs(P->temperature - temperature) > 1e-6)) {
if (P)
free(P);
P = vrna_params(&md);
make_pair_matrix();
}
c = (int **)vrna_alloc(sizeof(int *) * (n1 + 1));
for (i = 1; i <= n1; i++)
c[i] = (int *)vrna_alloc(sizeof(int) * (n2 + 1));
S1 = (short **)vrna_alloc((n_seq + 1) * sizeof(short *));
S2 = (short **)vrna_alloc((n_seq + 1) * sizeof(short *));
for (s = 0; s < n_seq; s++) {
if (strlen(s1[s]) != n1)
vrna_message_error("uneqal seqence lengths");
if (strlen(s2[s]) != n2)
vrna_message_error("uneqal seqence lengths");
S1[s] = encode_sequence(s1[s], 0);
S2[s] = encode_sequence(s2[s], 0);
}
type = (int *)vrna_alloc(n_seq * sizeof(int));
for (i = 1; i <= n1; i++) {
for (j = n2; j > 0; j--) {
int k, l, E, psc;
for (s = 0; s < n_seq; s++)
type[s] = pair[S1[s][i]][S2[s][j]];
psc = covscore(type, n_seq);
for (s = 0; s < n_seq; s++)
if (type[s] == 0)
type[s] = 7;
c[i][j] = (psc >= MINPSCORE) ? (n_seq * P->DuplexInit) : INF;
if (psc < MINPSCORE)
continue;
for (s = 0; s < n_seq; s++)
c[i][j] += vrna_E_ext_stem(type[s],
(i > 1) ? S1[s][i - 1] : -1,
(j < n2) ? S2[s][j + 1] : -1,
P);
for (k = i - 1; k > 0 && k > i - MAXLOOP - 2; k--) {
for (l = j + 1; l <= n2; l++) {
int type2;
if (i - k + l - j - 2 > MAXLOOP)
break;
if (c[k][l] > INF / 2)
continue;
for (E = s = 0; s < n_seq; s++) {
type2 = pair[S1[s][k]][S2[s][l]];
if (type2 == 0)
type2 = 7;
E += E_IntLoop(i - k - 1, l - j - 1, type2, rtype[type[s]],
S1[s][k + 1], S2[s][l - 1], S1[s][i - 1], S2[s][j + 1], P);
}
c[i][j] = MIN2(c[i][j], c[k][l] + E);
}
}
c[i][j] -= psc;
E = c[i][j];
for (s = 0; s < n_seq; s++)
E +=
vrna_E_ext_stem(rtype[type[s]],
(j > 1) ? S2[s][j - 1] : -1,
(i < n1) ? S1[s][i + 1] : -1,
P);
if (E < Emin) {
Emin = E;
i_min = i;
j_min = j;
}
}
}
struc = alibacktrack(i_min, j_min, (const short **)S1, (const short **)S2);
if (i_min < n1)
i_min++;
if (j_min > 1)
j_min--;
mfe.i = i_min;
mfe.j = j_min;
mfe.energy = (float)(Emin / (100. * n_seq));
mfe.structure = struc;
if (clean_up) {
for (i = 1; i <= n1; i++)
free(c[i]);
free(c);
}
for (s = 0; s < n_seq; s++) {
free(S1[s]);
free(S2[s]);
}
free(S1);
free(S2);
free(type);
return mfe;
}
PUBLIC duplexT *
aliduplex_subopt(const char *s1[],
const char *s2[],
int delta,
int w)
{
int i, j, n1, n2, thresh, E, n_subopt = 0, n_max, s, n_seq, *type;
char *struc;
duplexT mfe;
duplexT *subopt;
short **S1, **S2;
n_max = 16;
subopt = (duplexT *)vrna_alloc(n_max * sizeof(duplexT));
mfe = aliduplexfold_cu(s1, s2, 0);
free(mfe.structure);
for (s = 0; s1[s] != NULL; s++);
n_seq = s;
thresh = (int)((mfe.energy * 100. + delta) * n_seq + 0.1);
n1 = strlen(s1[0]);
n2 = strlen(s2[0]);
S1 = (short **)vrna_alloc((n_seq + 1) * sizeof(short *));
S2 = (short **)vrna_alloc((n_seq + 1) * sizeof(short *));
for (s = 0; s < n_seq; s++) {
if (strlen(s1[s]) != n1)
vrna_message_error("uneqal seqence lengths");
if (strlen(s2[s]) != n2)
vrna_message_error("uneqal seqence lengths");
S1[s] = encode_sequence(s1[s], 0);
S2[s] = encode_sequence(s2[s], 0);
}
type = (int *)vrna_alloc(n_seq * sizeof(int));
for (i = n1; i > 0; i--) {
for (j = 1; j <= n2; j++) {
int ii, jj, skip, Ed, psc;
for (s = 0; s < n_seq; s++)
type[s] = pair[S2[s][j]][S1[s][i]];
psc = covscore(type, n_seq);
for (s = 0; s < n_seq; s++)
if (type[s] == 0)
type[s] = 7;
if (psc < MINPSCORE)
continue;
E = Ed = c[i][j];
for (s = 0; s < n_seq; s++)
Ed +=
vrna_E_ext_stem(type[s], (j > 1) ? S2[s][j - 1] : -1, (i < n1) ? S1[s][i + 1] : -1, P);
if (Ed > thresh)
continue;
/* too keep output small, skip hits that are dominated by a
* better one close (w) by. For simplicity we don't take dangels
* into account here, thus the heuristic is somewhat inaccurate.
*/
for (skip = 0, ii = MAX2(i - w, 1); (ii <= MIN2(i + w, n1)) && type; ii++) {
for (jj = MAX2(j - w, 1); jj <= MIN2(j + w, n2); jj++)
if (c[ii][jj] < E) {
skip = 1;
break;
}
}
if (skip)
continue;
struc = alibacktrack(i, j, (const short **)S1, (const short **)S2);
vrna_message_info(stderr, "%d %d %d", i, j, E);
if (n_subopt + 1 >= n_max) {
n_max *= 2;
subopt = (duplexT *)vrna_realloc(subopt, n_max * sizeof(duplexT));
}
subopt[n_subopt].i = MIN2(i + 1, n1);
subopt[n_subopt].j = MAX2(j - 1, 1);
subopt[n_subopt].energy = Ed * 0.01 / n_seq;
subopt[n_subopt++].structure = struc;
}
}
for (i = 1; i <= n1; i++)
free(c[i]);
free(c);
for (s = 0; s < n_seq; s++) {
free(S1[s]);
free(S2[s]);
}
free(S1);
free(S2);
free(type);
if (subopt_sorted)
qsort(subopt, n_subopt, sizeof(duplexT), compare);
subopt[n_subopt].i = 0;
subopt[n_subopt].j = 0;
subopt[n_subopt].structure = NULL;
return subopt;
}
PRIVATE char *
alibacktrack(int i,
int j,
const short **S1,
const short **S2)
{
/* backtrack structure going backwards from i, and forwards from j
* return structure in bracket notation with & as separator */
int k, l, *type, type2, E, traced, i0, j0, s, n_seq;
char *st1, *st2, *struc;
n1 = (int)S1[0][0];
n2 = (int)S2[0][0];
for (s = 0; S1[s] != NULL; s++);
n_seq = s;
for (s = 0; S2[s] != NULL; s++);
if (n_seq != s)
vrna_message_error("unequal number of sequences in alibacktrack()\n");
st1 = (char *)vrna_alloc(sizeof(char) * (n1 + 1));
st2 = (char *)vrna_alloc(sizeof(char) * (n2 + 1));
type = (int *)vrna_alloc(n_seq * sizeof(int));
i0 = MIN2(i + 1, n1);
j0 = MAX2(j - 1, 1);
while (i > 0 && j <= n2) {
int psc;
E = c[i][j];
traced = 0;
st1[i - 1] = '(';
st2[j - 1] = ')';
for (s = 0; s < n_seq; s++)
type[s] = pair[S1[s][i]][S2[s][j]];
psc = covscore(type, n_seq);
for (s = 0; s < n_seq; s++)
if (type[s] == 0)
type[s] = 7;
E += psc;
for (k = i - 1; k > 0 && k > i - MAXLOOP - 2; k--) {
for (l = j + 1; l <= n2; l++) {
int LE;
if (i - k + l - j - 2 > MAXLOOP)
break;
if (c[k][l] > INF / 2)
continue;
for (s = LE = 0; s < n_seq; s++) {
type2 = pair[S1[s][k]][S2[s][l]];
if (type2 == 0)
type2 = 7;
LE += E_IntLoop(i - k - 1, l - j - 1, type2, rtype[type[s]],
S1[s][k + 1], S2[s][l - 1], S1[s][i - 1], S2[s][j + 1], P);
}
if (E == c[k][l] + LE) {
traced = 1;
i = k;
j = l;
break;
}
}
if (traced)
break;
}
if (!traced) {
for (s = 0; s < n_seq; s++)
E -= vrna_E_ext_stem(type[s], (i > 1) ? S1[s][i - 1] : -1, (j < n2) ? S2[s][j + 1] : -1, P);
if (E != n_seq * P->DuplexInit)
vrna_message_error("backtrack failed in aliduplex");
else
break;
}
}
if (i > 1)
i--;
if (j < n2)
j++;
struc = (char *)vrna_alloc(i0 - i + 1 + j - j0 + 1 + 2);
for (k = MAX2(i, 1); k <= i0; k++)
if (!st1[k - 1])
st1[k - 1] = '.';
for (k = j0; k <= j; k++)
if (!st2[k - 1])
st2[k - 1] = '.';
strcpy(struc, st1 + MAX2(i - 1, 0));
strcat(struc, "&");
strcat(struc, st2 + j0 - 1);
/* printf("%s %3d,%-3d : %3d,%-3d\n", struc, i,i0,j0,j); */
free(st1);
free(st2);
free(type);
return struc;
}
PRIVATE int
covscore(const int *types,
int n_seq)
{
/*
* calculate co-variance bonus for a pair depending on
* compensatory/consistent mutations and incompatible seqs
* should be 0 for conserved pairs, >0 for good pairs
*/
int k, l, s, score, pscore;
int dm[7][7] = { { 0, 0, 0, 0, 0, 0, 0 }, /* hamming distance between pairs */
{ 0, 0, 2, 2, 1, 2, 2 } /* CG */,
{ 0, 2, 0, 1, 2, 2, 2 } /* GC */,
{ 0, 2, 1, 0, 2, 1, 2 } /* GU */,
{ 0, 1, 2, 2, 0, 2, 1 } /* UG */,
{ 0, 2, 2, 1, 2, 0, 2 } /* AU */,
{ 0, 2, 2, 2, 1, 2, 0 } /* UA */ };
int pfreq[8] = {
0, 0, 0, 0, 0, 0, 0, 0
};
for (s = 0; s < n_seq; s++)
pfreq[types[s]]++;
if (pfreq[0] * 2 + pfreq[7] >= n_seq)
return NONE;
for (k = 1, score = 0; k <= 6; k++) /* ignore pairtype 7 (gap-gap) */
for (l = k + 1; l <= 6; l++)
/*
* scores for replacements between pairtypes
* consistent or compensatory mutations score 1 or 2
*/
score += pfreq[k] * pfreq[l] * dm[k][l];
/* counter examples score -1, gap-gap scores -0.25 */
pscore = cv_fact * ((UNIT * score) / n_seq -
nc_fact * UNIT * (pfreq[0] + pfreq[7] * 0.25));
return pscore;
}
|
serial_tree_learner.h | #ifndef LIGHTGBM_TREELEARNER_SERIAL_TREE_LEARNER_H_
#define LIGHTGBM_TREELEARNER_SERIAL_TREE_LEARNER_H_
#include <LightGBM/utils/random.h>
#include <LightGBM/utils/array_args.h>
#include <LightGBM/tree_learner.h>
#include <LightGBM/dataset.h>
#include <LightGBM/tree.h>
#include "feature_histogram.hpp"
#include "split_info.hpp"
#include "data_partition.hpp"
#include "leaf_splits.hpp"
#include <cstdio>
#include <vector>
#include <random>
#include <cmath>
#include <memory>
#ifdef USE_GPU
// Use 4KBytes aligned allocator for ordered gradients and ordered hessians when GPU is enabled.
// This is necessary to pin the two arrays in memory and make transferring faster.
#include <boost/align/aligned_allocator.hpp>
#endif
namespace LightGBM {
/*!
* \brief Used for learning a tree by single machine
*/
class SerialTreeLearner: public TreeLearner {
public:
explicit SerialTreeLearner(const TreeConfig* tree_config);
~SerialTreeLearner();
void Init(const Dataset* train_data, bool is_constant_hessian) override;
void ResetTrainingData(const Dataset* train_data) override;
void ResetConfig(const TreeConfig* tree_config) override;
Tree* Train(const score_t* gradients, const score_t *hessians, bool is_constant_hessian) override;
Tree* FitByExistingTree(const Tree* old_tree, const score_t* gradients, const score_t* hessians) const override;
void SetBaggingData(const data_size_t* used_indices, data_size_t num_data) override {
data_partition_->SetUsedDataIndices(used_indices, num_data);
}
void AddPredictionToScore(const Tree* tree, double* out_score) const override {
if (tree->num_leaves() <= 1) { return; }
CHECK(tree->num_leaves() <= data_partition_->num_leaves());
#pragma omp parallel for schedule(static)
for (int i = 0; i < tree->num_leaves(); ++i) {
double output = static_cast<double>(tree->LeafOutput(i));
data_size_t cnt_leaf_data = 0;
auto tmp_idx = data_partition_->GetIndexOnLeaf(i, &cnt_leaf_data);
for (data_size_t j = 0; j < cnt_leaf_data; ++j) {
out_score[tmp_idx[j]] += output;
}
}
}
protected:
/*!
* \brief Some initial works before training
*/
virtual void BeforeTrain();
/*!
* \brief Some initial works before FindBestSplit
*/
virtual bool BeforeFindBestSplit(const Tree* tree, int left_leaf, int right_leaf);
virtual void ConstructHistograms(const std::vector<int8_t>& is_feature_used, bool use_subtract);
/*!
* \brief Find best thresholds for all features, using multi-threading.
* The result will be stored in smaller_leaf_splits_ and larger_leaf_splits_.
* This function will be called in FindBestSplit.
*/
virtual void FindBestThresholds();
/*!
* \brief Find best features for leaves from smaller_leaf_splits_ and larger_leaf_splits_.
* This function will be called after FindBestThresholds.
*/
virtual void FindBestSplitsForLeaves();
/*!
* \brief Partition tree and data according best split.
* \param tree Current tree, will be splitted on this function.
* \param best_leaf The index of leaf that will be splitted.
* \param left_leaf The index of left leaf after splitted.
* \param right_leaf The index of right leaf after splitted.
*/
virtual void Split(Tree* tree, int best_leaf, int* left_leaf, int* right_leaf);
/*!
* \brief Get the number of data in a leaf
* \param leaf_idx The index of leaf
* \return The number of data in the leaf_idx leaf
*/
inline virtual data_size_t GetGlobalDataCountInLeaf(int leaf_idx) const;
/*! \brief number of data */
data_size_t num_data_;
/*! \brief number of features */
int num_features_;
/*! \brief training data */
const Dataset* train_data_;
/*! \brief gradients of current iteration */
const score_t* gradients_;
/*! \brief hessians of current iteration */
const score_t* hessians_;
/*! \brief training data partition on leaves */
std::unique_ptr<DataPartition> data_partition_;
/*! \brief used for generate used features */
Random random_;
/*! \brief used for sub feature training, is_feature_used_[i] = false means don't used feature i */
std::vector<int8_t> is_feature_used_;
/*! \brief pointer to histograms array of parent of current leaves */
FeatureHistogram* parent_leaf_histogram_array_;
/*! \brief pointer to histograms array of smaller leaf */
FeatureHistogram* smaller_leaf_histogram_array_;
/*! \brief pointer to histograms array of larger leaf */
FeatureHistogram* larger_leaf_histogram_array_;
/*! \brief store best split points for all leaves */
std::vector<SplitInfo> best_split_per_leaf_;
/*! \brief stores best thresholds for all feature for smaller leaf */
std::unique_ptr<LeafSplits> smaller_leaf_splits_;
/*! \brief stores best thresholds for all feature for larger leaf */
std::unique_ptr<LeafSplits> larger_leaf_splits_;
#ifdef USE_GPU
/*! \brief gradients of current iteration, ordered for cache optimized, aligned to 4K page */
std::vector<score_t, boost::alignment::aligned_allocator<score_t, 4096>> ordered_gradients_;
/*! \brief hessians of current iteration, ordered for cache optimized, aligned to 4K page */
std::vector<score_t, boost::alignment::aligned_allocator<score_t, 4096>> ordered_hessians_;
#else
/*! \brief gradients of current iteration, ordered for cache optimized */
std::vector<score_t> ordered_gradients_;
/*! \brief hessians of current iteration, ordered for cache optimized */
std::vector<score_t> ordered_hessians_;
#endif
/*! \brief Store ordered bin */
std::vector<std::unique_ptr<OrderedBin>> ordered_bins_;
/*! \brief True if has ordered bin */
bool has_ordered_bin_ = false;
/*! \brief is_data_in_leaf_[i] != 0 means i-th data is marked */
std::vector<char> is_data_in_leaf_;
/*! \brief used to cache historical histogram to speed up*/
HistogramPool histogram_pool_;
/*! \brief config of tree learner*/
const TreeConfig* tree_config_;
int num_threads_;
std::vector<int> ordered_bin_indices_;
bool is_constant_hessian_;
};
inline data_size_t SerialTreeLearner::GetGlobalDataCountInLeaf(int leafIdx) const {
if (leafIdx >= 0) {
return data_partition_->leaf_count(leafIdx);
} else {
return 0;
}
}
} // namespace LightGBM
#endif // LightGBM_TREELEARNER_SERIAL_TREE_LEARNER_H_
|
oracle12c_fmt_plug.c | /*
* This software is Copyright (c) 2015, Dhiru Kholia <dhiru.kholia at gmail.com>,
* and it is hereby released to the general public under the following terms:
* Redistribution and use in source and binary forms, with or without modification,
* are permitted.
*
* https://www.trustwave.com/Resources/SpiderLabs-Blog/Changes-in-Oracle-Database-12c-password-hashes/
*
*/
#if FMT_EXTERNS_H
extern struct fmt_main fmt_oracle12c;
#elif FMT_REGISTERS_H
john_register_one(&fmt_oracle12c);
#else
#include <openssl/sha.h>
#include <string.h>
#include "arch.h"
//#undef SIMD_COEF_64
//#undef SIMD_PARA_SHA512
//#undef _OPENMP
#include "misc.h"
#include "memory.h"
#include "common.h"
#include "formats.h"
#include "johnswap.h"
#include "sha2.h"
#include "pbkdf2_hmac_sha512.h"
#ifdef _OPENMP
#include <omp.h>
#ifndef OMP_SCALE
#define OMP_SCALE 1
#endif
#endif
#include "memdbg.h"
#define FORMAT_LABEL "Oracle12C"
#define FORMAT_NAME ""
#ifdef SIMD_COEF_64
#define ALGORITHM_NAME "PBKDF2-SHA512 " SHA512_ALGORITHM_NAME
#else
#define ALGORITHM_NAME "PBKDF2-SHA512 32/" ARCH_BITS_STR
#endif
#define PLAINTEXT_LENGTH 125 // XXX
#define CIPHERTEXT_LENGTH 160
#define SALT_SIZE sizeof(struct custom_salt)
#define SALT_ALIGN sizeof(uint32_t)
#define BINARY_SIZE 64
#define BINARY_ALIGN sizeof(uint32_t)
#define BENCHMARK_COMMENT ""
#define BENCHMARK_LENGTH -1
#ifdef SIMD_COEF_64
#define MIN_KEYS_PER_CRYPT (SIMD_COEF_64 * SIMD_PARA_SHA512)
#define MAX_KEYS_PER_CRYPT (SIMD_COEF_64 * SIMD_PARA_SHA512)
#else
#define MIN_KEYS_PER_CRYPT 1
#define MAX_KEYS_PER_CRYPT 1
#endif
#define FORMAT_TAG "$oracle12c$"
#define FORMAT_TAG_LENGTH (sizeof(FORMAT_TAG) - 1)
static struct fmt_tests tests[] = {
{"$oracle12c$e3243b98974159cc24fd2c9a8b30ba62e0e83b6ca2fc7c55177c3a7f82602e3bdd17ceb9b9091cf9dad672b8be961a9eac4d344bdba878edc5dcb5899f689ebd8dd1be3f67bff9813a464382381ab36b", "epsilon"},
{"$oracle12c$eda9535a516d5c7c75ef250f8b1b5fadc023ebfdad9b8d46f023b283cabc06f822e6db556a131d8f87fb427e6a7d592ca69b0e4eef22648aa7ba00afee786a8745057545117145650771143408825746", "18445407"},
{NULL}
};
static struct custom_salt {
int saltlen;
unsigned char salt[16 + 22 + 1];
} *cur_salt;
#ifdef SIMD_COEF_64
static char (*saved_key)[SHA_BUF_SIZ*sizeof(uint64_t)];
#else
static char (*saved_key)[PLAINTEXT_LENGTH + 1];
#endif
static uint32_t (*crypt_out)[BINARY_SIZE / sizeof(uint32_t)];
static void init(struct fmt_main *self)
{
#ifdef _OPENMP
static int omp_t = 1;
omp_t = omp_get_max_threads();
self->params.min_keys_per_crypt *= omp_t;
omp_t *= OMP_SCALE;
self->params.max_keys_per_crypt *= omp_t;
#endif
saved_key = mem_calloc(self->params.max_keys_per_crypt,
sizeof(*saved_key));
crypt_out = mem_calloc(self->params.max_keys_per_crypt,
sizeof(*crypt_out));
}
static void done(void)
{
MEM_FREE(crypt_out);
MEM_FREE(saved_key);
}
static int valid(char *ciphertext, struct fmt_main *self)
{
char *p = ciphertext;
if (strncasecmp(ciphertext, FORMAT_TAG, FORMAT_TAG_LENGTH))
return 0;
if (strlen(ciphertext) > (FORMAT_TAG_LENGTH + CIPHERTEXT_LENGTH))
return 0;
p = strrchr(ciphertext, '$');
if (!p)
return 0;
p = p + 1;
if (strlen(p) != (BINARY_SIZE * 2 + 32))
return 0;
if (!ishexlc(p))
goto error;
return 1;
error:
return 0;
}
static void *get_salt(char *ciphertext)
{
static struct custom_salt cs;
char *p;
int i;
memset(&cs, 0, sizeof(cs));
p = ciphertext + FORMAT_TAG_LENGTH + 2 * BINARY_SIZE;
// AUTH_VFR_DATA is variable, and 16 bytes in length
for (i = 0; i < 16; i++)
cs.salt[i] = (atoi16[ARCH_INDEX(p[2*i])] << 4) | atoi16[ARCH_INDEX(p[2*i+1])];
strncpy((char*)cs.salt + 16, "AUTH_PBKDF2_SPEEDY_KEY", 22); // add constant string to the salt
cs.saltlen = 16 + 22;
return (void *)&cs;
}
static void *get_binary(char *ciphertext)
{
static union {
unsigned char c[BINARY_SIZE];
ARCH_WORD dummy;
} buf;
unsigned char *out = buf.c;
int i;
char *p;
p = ciphertext + FORMAT_TAG_LENGTH;
for (i = 0; i < BINARY_SIZE && *p; i++) {
out[i] =
(atoi16[ARCH_INDEX(*p)] << 4) |
atoi16[ARCH_INDEX(p[1])];
p += 2;
}
return out;
}
static void set_salt(void *salt)
{
cur_salt = (struct custom_salt *)salt;
}
#define COMMON_GET_HASH_VAR crypt_out
#include "common-get-hash.h"
static int crypt_all(int *pcount, struct db_salt *salt)
{
int index;
const int count = *pcount;
#ifdef _OPENMP
#pragma omp parallel for
#endif
#if defined(_OPENMP) || MAX_KEYS_PER_CRYPT > 1
#endif
for (index = 0; index < count; index += MAX_KEYS_PER_CRYPT)
{
SHA512_CTX ctx;
int i = 0;
#if SIMD_COEF_64
int lens[SSE_GROUP_SZ_SHA512];
unsigned char *pin[SSE_GROUP_SZ_SHA512];
union {
uint32_t *pout[SSE_GROUP_SZ_SHA512];
unsigned char *poutc;
} x;
for (i = 0; i < SSE_GROUP_SZ_SHA512; ++i) {
lens[i] = strlen(saved_key[index+i]);
pin[i] = (unsigned char*)saved_key[index+i];
x.pout[i] = (uint32_t*)(crypt_out[index+i]);
}
pbkdf2_sha512_sse((const unsigned char **)pin, lens, cur_salt->salt,
cur_salt->saltlen, 4096, &(x.poutc), BINARY_SIZE, 0);
#else
pbkdf2_sha512((const unsigned char*)saved_key[index],
strlen(saved_key[index]), cur_salt->salt,
cur_salt->saltlen, 4096,
(unsigned char*)crypt_out[index], BINARY_SIZE, 0);
#endif
#if defined(_OPENMP) || MAX_KEYS_PER_CRYPT > 1
for (i = 0; i < MAX_KEYS_PER_CRYPT; i++)
#endif
{
SHA512_Init(&ctx);
SHA512_Update(&ctx, (unsigned char*)crypt_out[index + i], BINARY_SIZE);
SHA512_Update(&ctx, cur_salt->salt, 16); // AUTH_VFR_DATA first 16 bytes
SHA512_Final((unsigned char*)crypt_out[index + i], &ctx);
}
}
return count;
}
static int cmp_all(void *binary, int count)
{
int index = 0;
#if defined(_OPENMP) || MAX_KEYS_PER_CRYPT > 1
for (; index < count; index++)
#endif
if (!memcmp(binary, crypt_out[index], ARCH_SIZE))
return 1;
return 0;
}
static int cmp_one(void *binary, int index)
{
return !memcmp(binary, crypt_out[index], BINARY_SIZE);
}
static int cmp_exact(char *source, int index)
{
return 1;
}
static void set_key(char *key, int index)
{
strnzcpy(saved_key[index], key, sizeof(*saved_key));
}
static char *get_key(int index)
{
return saved_key[index];
}
struct fmt_main fmt_oracle12c = {
{
FORMAT_LABEL,
FORMAT_NAME,
ALGORITHM_NAME,
BENCHMARK_COMMENT,
BENCHMARK_LENGTH,
0,
PLAINTEXT_LENGTH,
BINARY_SIZE,
BINARY_ALIGN,
SALT_SIZE,
SALT_ALIGN,
MIN_KEYS_PER_CRYPT,
MAX_KEYS_PER_CRYPT,
FMT_CASE | FMT_8_BIT | FMT_OMP,
{ NULL },
{ FORMAT_TAG },
tests
}, {
init,
done,
fmt_default_reset,
fmt_default_prepare,
valid,
fmt_default_split,
get_binary,
get_salt,
{ NULL },
fmt_default_source,
{
fmt_default_binary_hash_0,
fmt_default_binary_hash_1,
fmt_default_binary_hash_2,
fmt_default_binary_hash_3,
fmt_default_binary_hash_4,
fmt_default_binary_hash_5,
fmt_default_binary_hash_6
},
fmt_default_salt_hash,
NULL,
set_salt,
set_key,
get_key,
fmt_default_clear_keys,
crypt_all,
{
#define COMMON_GET_HASH_LINK
#include "common-get-hash.h"
},
cmp_all,
cmp_one,
cmp_exact
}
};
#endif /* plugin stanza */
|
master_taskloop_simd_misc_messages.c | // RUN: %clang_cc1 -fsyntax-only -fopenmp -triple x86_64-unknown-unknown -verify %s -Wuninitialized
// RUN: %clang_cc1 -fsyntax-only -fopenmp-simd -triple x86_64-unknown-unknown -verify %s -Wuninitialized
void xxx(int argc) {
int x; // expected-note {{initialize the variable 'x' to silence this warning}}
#pragma omp master taskloop simd
for (int i = 0; i < 10; ++i)
argc = x; // expected-warning {{variable 'x' is uninitialized when used here}}
}
// expected-error@+1 {{unexpected OpenMP directive '#pragma omp master taskloop simd'}}
#pragma omp master taskloop simd
// expected-error@+1 {{unexpected OpenMP directive '#pragma omp master taskloop simd'}}
#pragma omp master taskloop simd foo
void test_no_clause() {
int i;
#pragma omp master taskloop simd
for (i = 0; i < 16; ++i)
;
// expected-error@+2 {{statement after '#pragma omp master taskloop simd' must be a for loop}}
#pragma omp master taskloop simd
++i;
}
void test_branch_protected_scope() {
int i = 0;
L1:
++i;
int x[24];
#pragma omp parallel
#pragma omp master taskloop simd
for (i = 0; i < 16; ++i) {
if (i == 5)
goto L1; // expected-error {{use of undeclared label 'L1'}}
else if (i == 6)
return; // expected-error {{cannot return from OpenMP region}}
else if (i == 7)
goto L2;
else if (i == 8) {
L2:
x[i]++;
}
}
if (x[0] == 0)
goto L2; // expected-error {{use of undeclared label 'L2'}}
else if (x[1] == 1)
goto L1;
}
void test_invalid_clause() {
int i;
#pragma omp parallel
// expected-warning@+1 {{extra tokens at the end of '#pragma omp master taskloop simd' are ignored}}
#pragma omp master taskloop simd foo bar
for (i = 0; i < 16; ++i)
;
// expected-error@+1 {{directive '#pragma omp master taskloop simd' cannot contain more than one 'nogroup' clause}}
#pragma omp master taskloop simd nogroup nogroup
for (i = 0; i < 16; ++i)
;
}
void test_non_identifiers() {
int i, x;
#pragma omp parallel
// expected-warning@+1 {{extra tokens at the end of '#pragma omp master taskloop simd' are ignored}}
#pragma omp master taskloop simd;
for (i = 0; i < 16; ++i)
;
// expected-warning@+2 {{extra tokens at the end of '#pragma omp master taskloop simd' are ignored}}
#pragma omp parallel
#pragma omp master taskloop simd linear(x);
for (i = 0; i < 16; ++i)
;
#pragma omp parallel
// expected-warning@+1 {{extra tokens at the end of '#pragma omp master taskloop simd' are ignored}}
#pragma omp master taskloop simd private(x);
for (i = 0; i < 16; ++i)
;
#pragma omp parallel
// expected-warning@+1 {{extra tokens at the end of '#pragma omp master taskloop simd' are ignored}}
#pragma omp master taskloop simd, private(x);
for (i = 0; i < 16; ++i)
;
}
extern int foo();
void test_collapse() {
int i;
#pragma omp parallel
// expected-error@+1 {{expected '('}}
#pragma omp master taskloop simd collapse
for (i = 0; i < 16; ++i)
;
#pragma omp parallel
// expected-error@+1 {{expected expression}} expected-error@+1 {{expected ')'}} expected-note@+1 {{to match this '('}}
#pragma omp master taskloop simd collapse(
for (i = 0; i < 16; ++i)
;
#pragma omp parallel
// expected-error@+1 {{expected expression}}
#pragma omp master taskloop simd collapse()
for (i = 0; i < 16; ++i)
;
#pragma omp parallel
// expected-error@+1 {{expected expression}} expected-error@+1 {{expected ')'}} expected-note@+1 {{to match this '('}}
#pragma omp master taskloop simd collapse(,
for (i = 0; i < 16; ++i)
;
#pragma omp parallel
// expected-error@+1 {{expected expression}} expected-error@+1 {{expected ')'}} expected-note@+1 {{to match this '('}}
#pragma omp master taskloop simd collapse(, )
for (i = 0; i < 16; ++i)
;
#pragma omp parallel
// expected-warning@+2 {{extra tokens at the end of '#pragma omp master taskloop simd' are ignored}}
// expected-error@+1 {{expected '('}}
#pragma omp master taskloop simd collapse 4)
for (i = 0; i < 16; ++i)
;
#pragma omp parallel
// expected-error@+2 {{expected ')'}}
// expected-note@+1 {{to match this '('}} expected-note@+1 {{as specified in 'collapse' clause}}
#pragma omp master taskloop simd collapse(4
for (i = 0; i < 16; ++i)
; // expected-error {{expected 4 for loops after '#pragma omp master taskloop simd', but found only 1}}
#pragma omp parallel
// expected-error@+2 {{expected ')'}}
// expected-note@+1 {{to match this '('}} expected-note@+1 {{as specified in 'collapse' clause}}
#pragma omp master taskloop simd collapse(4,
for (i = 0; i < 16; ++i)
; // expected-error {{expected 4 for loops after '#pragma omp master taskloop simd', but found only 1}}
#pragma omp parallel
// expected-error@+2 {{expected ')'}}
// expected-note@+1 {{to match this '('}} expected-note@+1 {{as specified in 'collapse' clause}}
#pragma omp master taskloop simd collapse(4, )
for (i = 0; i < 16; ++i)
; // expected-error {{expected 4 for loops after '#pragma omp master taskloop simd', but found only 1}}
#pragma omp parallel
// expected-note@+1 {{as specified in 'collapse' clause}}
#pragma omp master taskloop simd collapse(4)
for (i = 0; i < 16; ++i)
; // expected-error {{expected 4 for loops after '#pragma omp master taskloop simd', but found only 1}}
#pragma omp parallel
// expected-error@+2 {{expected ')'}}
// expected-note@+1 {{to match this '('}} expected-note@+1 {{as specified in 'collapse' clause}}
#pragma omp master taskloop simd collapse(4 4)
for (i = 0; i < 16; ++i)
; // expected-error {{expected 4 for loops after '#pragma omp master taskloop simd', but found only 1}}
#pragma omp parallel
// expected-error@+2 {{expected ')'}}
// expected-note@+1 {{to match this '('}} expected-note@+1 {{as specified in 'collapse' clause}}
#pragma omp master taskloop simd collapse(4, , 4)
for (i = 0; i < 16; ++i)
; // expected-error {{expected 4 for loops after '#pragma omp master taskloop simd', but found only 1}}
#pragma omp parallel
#pragma omp master taskloop simd collapse(4)
for (int i1 = 0; i1 < 16; ++i1)
for (int i2 = 0; i2 < 16; ++i2)
for (int i3 = 0; i3 < 16; ++i3)
for (int i4 = 0; i4 < 16; ++i4)
foo();
#pragma omp parallel
// expected-error@+2 {{expected ')'}}
// expected-note@+1 {{to match this '('}} expected-note@+1 {{as specified in 'collapse' clause}}
#pragma omp master taskloop simd collapse(4, 8)
for (i = 0; i < 16; ++i)
; // expected-error {{expected 4 for loops after '#pragma omp master taskloop simd', but found only 1}}
#pragma omp parallel
// expected-error@+1 {{expression is not an integer constant expression}}
#pragma omp master taskloop simd collapse(2.5)
for (i = 0; i < 16; ++i)
;
#pragma omp parallel
// expected-error@+1 {{expression is not an integer constant expression}}
#pragma omp master taskloop simd collapse(foo())
for (i = 0; i < 16; ++i)
;
#pragma omp parallel
// expected-error@+1 {{argument to 'collapse' clause must be a strictly positive integer value}}
#pragma omp master taskloop simd collapse(-5)
for (i = 0; i < 16; ++i)
;
#pragma omp parallel
// expected-error@+1 {{argument to 'collapse' clause must be a strictly positive integer value}}
#pragma omp master taskloop simd collapse(0)
for (i = 0; i < 16; ++i)
;
#pragma omp parallel
// expected-error@+1 {{argument to 'collapse' clause must be a strictly positive integer value}}
#pragma omp master taskloop simd collapse(5 - 5)
for (i = 0; i < 16; ++i)
;
}
void test_private() {
int i;
#pragma omp parallel
// expected-error@+2 {{expected expression}}
// expected-error@+1 {{expected ')'}} expected-note@+1 {{to match this '('}}
#pragma omp master taskloop simd private(
for (i = 0; i < 16; ++i)
;
#pragma omp parallel
// expected-error@+2 {{expected ')'}} expected-note@+2 {{to match this '('}}
// expected-error@+1 2 {{expected expression}}
#pragma omp master taskloop simd private(,
for (i = 0; i < 16; ++i)
;
#pragma omp parallel
// expected-error@+1 2 {{expected expression}}
#pragma omp master taskloop simd private(, )
for (i = 0; i < 16; ++i)
;
#pragma omp parallel
// expected-error@+1 {{expected expression}}
#pragma omp master taskloop simd private()
for (i = 0; i < 16; ++i)
;
#pragma omp parallel
// expected-error@+1 {{expected expression}}
#pragma omp master taskloop simd private(int)
for (i = 0; i < 16; ++i)
;
#pragma omp parallel
// expected-error@+1 {{expected variable name}}
#pragma omp master taskloop simd private(0)
for (i = 0; i < 16; ++i)
;
int x, y, z;
#pragma omp parallel
#pragma omp master taskloop simd private(x)
for (i = 0; i < 16; ++i)
;
#pragma omp parallel
#pragma omp master taskloop simd private(x, y)
for (i = 0; i < 16; ++i)
;
#pragma omp parallel
#pragma omp master taskloop simd private(x, y, z)
for (i = 0; i < 16; ++i) {
x = y * i + z;
}
}
void test_lastprivate() {
int i;
#pragma omp parallel
// expected-error@+2 {{expected ')'}} expected-note@+2 {{to match this '('}}
// expected-error@+1 {{expected expression}}
#pragma omp master taskloop simd lastprivate(
for (i = 0; i < 16; ++i)
;
#pragma omp parallel
// expected-error@+2 {{expected ')'}} expected-note@+2 {{to match this '('}}
// expected-error@+1 2 {{expected expression}}
#pragma omp master taskloop simd lastprivate(,
for (i = 0; i < 16; ++i)
;
#pragma omp parallel
// expected-error@+1 2 {{expected expression}}
#pragma omp master taskloop simd lastprivate(, )
for (i = 0; i < 16; ++i)
;
#pragma omp parallel
// expected-error@+1 {{expected expression}}
#pragma omp master taskloop simd lastprivate()
for (i = 0; i < 16; ++i)
;
#pragma omp parallel
// expected-error@+1 {{expected expression}}
#pragma omp master taskloop simd lastprivate(int)
for (i = 0; i < 16; ++i)
;
#pragma omp parallel
// expected-error@+1 {{expected variable name}}
#pragma omp master taskloop simd lastprivate(0)
for (i = 0; i < 16; ++i)
;
int x, y, z;
#pragma omp parallel
#pragma omp master taskloop simd lastprivate(x)
for (i = 0; i < 16; ++i)
;
#pragma omp parallel
#pragma omp master taskloop simd lastprivate(x, y)
for (i = 0; i < 16; ++i)
;
#pragma omp parallel
#pragma omp master taskloop simd lastprivate(x, y, z)
for (i = 0; i < 16; ++i)
;
}
void test_firstprivate() {
int i;
#pragma omp parallel
// expected-error@+2 {{expected ')'}} expected-note@+2 {{to match this '('}}
// expected-error@+1 {{expected expression}}
#pragma omp master taskloop simd firstprivate(
for (i = 0; i < 16; ++i)
;
#pragma omp parallel
// expected-error@+2 {{expected ')'}} expected-note@+2 {{to match this '('}}
// expected-error@+1 2 {{expected expression}}
#pragma omp master taskloop simd firstprivate(,
for (i = 0; i < 16; ++i)
;
#pragma omp parallel
// expected-error@+1 2 {{expected expression}}
#pragma omp master taskloop simd firstprivate(, )
for (i = 0; i < 16; ++i)
;
#pragma omp parallel
// expected-error@+1 {{expected expression}}
#pragma omp master taskloop simd firstprivate()
for (i = 0; i < 16; ++i)
;
#pragma omp parallel
// expected-error@+1 {{expected expression}}
#pragma omp master taskloop simd firstprivate(int)
for (i = 0; i < 16; ++i)
;
#pragma omp parallel
// expected-error@+1 {{expected variable name}}
#pragma omp master taskloop simd firstprivate(0)
for (i = 0; i < 16; ++i)
;
int x, y, z;
#pragma omp parallel
#pragma omp master taskloop simd lastprivate(x) firstprivate(x)
for (i = 0; i < 16; ++i)
;
#pragma omp parallel
#pragma omp master taskloop simd lastprivate(x, y) firstprivate(x, y)
for (i = 0; i < 16; ++i)
;
#pragma omp parallel
#pragma omp master taskloop simd lastprivate(x, y, z) firstprivate(x, y, z)
for (i = 0; i < 16; ++i)
;
// expected-error@+1 {{the value of 'simdlen' parameter must be less than or equal to the value of the 'safelen' parameter}}
#pragma omp master taskloop simd simdlen(64) safelen(8)
for (i = 0; i < 16; ++i)
;
}
void test_loop_messages() {
float a[100], b[100], c[100];
#pragma omp parallel
// expected-error@+2 {{variable must be of integer or pointer type}}
#pragma omp master taskloop simd
for (float fi = 0; fi < 10.0; fi++) {
c[(int)fi] = a[(int)fi] + b[(int)fi];
}
#pragma omp parallel
// expected-error@+2 {{variable must be of integer or pointer type}}
#pragma omp master taskloop simd
for (double fi = 0; fi < 10.0; fi++) {
c[(int)fi] = a[(int)fi] + b[(int)fi];
}
// expected-warning@+2 {{OpenMP loop iteration variable cannot have more than 64 bits size and will be narrowed}}
#pragma omp master taskloop simd
for (__int128 ii = 0; ii < 10; ii++) {
c[ii] = a[ii] + b[ii];
}
}
|
GrB_Type_wait.c | //------------------------------------------------------------------------------
// GrB_Type_wait: wait for a user-defined GrB_Type to complete
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2022, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// In SuiteSparse:GraphBLAS, a user-defined GrB_Type has no pending operations
// to wait for. All this method does is verify that the type is properly
// initialized, and then it does an OpenMP flush.
#include "GB.h"
GrB_Info GrB_Type_wait // no work, just check if the GrB_Type is valid
(
#if (GxB_IMPLEMENTATION_MAJOR <= 5)
GrB_Type *type
#else
GrB_Type type,
GrB_WaitMode waitmode
#endif
)
{
//--------------------------------------------------------------------------
// check inputs
//--------------------------------------------------------------------------
#if (GxB_IMPLEMENTATION_MAJOR <= 5)
GB_WHERE1 ("GrB_Type_wait (&type)") ;
GB_RETURN_IF_NULL (type) ;
GB_RETURN_IF_NULL_OR_FAULTY (*type) ;
#else
GB_WHERE1 ("GrB_Type_wait (type, waitmode)") ;
GB_RETURN_IF_NULL_OR_FAULTY (type) ;
#endif
//--------------------------------------------------------------------------
// return result
//--------------------------------------------------------------------------
#pragma omp flush
return (GrB_SUCCESS) ;
}
|
decorate.c | /*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% DDDD EEEEE CCCC OOO RRRR AAA TTTTT EEEEE %
% D D E C O O R R A A T E %
% D D EEE C O O RRRR AAAAA T EEE %
% D D E C O O R R A A T E %
% DDDD EEEEE CCCC OOO R R A A T EEEEE %
% %
% %
% MagickCore Image Decoration Methods %
% %
% Software Design %
% Cristy %
% July 1992 %
% %
% %
% Copyright 1999-2020 ImageMagick Studio LLC, a non-profit organization %
% dedicated to making software imaging solutions freely available. %
% %
% You may not use this file except in compliance with the License. You may %
% obtain a copy of the License at %
% %
% https://imagemagick.org/script/license.php %
% %
% Unless required by applicable law or agreed to in writing, software %
% distributed under the License is distributed on an "AS IS" BASIS, %
% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. %
% See the License for the specific language governing permissions and %
% limitations under the License. %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
%
%
*/
/*
Include declarations.
*/
#include "magick/studio.h"
#include "magick/cache-view.h"
#include "magick/channel.h"
#include "magick/color-private.h"
#include "magick/colorspace-private.h"
#include "magick/composite.h"
#include "magick/decorate.h"
#include "magick/exception.h"
#include "magick/exception-private.h"
#include "magick/image.h"
#include "magick/memory_.h"
#include "magick/monitor.h"
#include "magick/monitor-private.h"
#include "magick/pixel-accessor.h"
#include "magick/pixel-private.h"
#include "magick/quantum.h"
#include "magick/resource_.h"
#include "magick/thread-private.h"
#include "magick/transform.h"
/*
Define declarations.
*/
#define AccentuateModulate ScaleCharToQuantum(80)
#define HighlightModulate ScaleCharToQuantum(125)
#define ShadowModulate ScaleCharToQuantum(135)
#define DepthModulate ScaleCharToQuantum(185)
#define TroughModulate ScaleCharToQuantum(110)
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% B o r d e r I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% BorderImage() surrounds the image with a border of the color defined by
% the bordercolor member of the image structure. The width and height
% of the border are defined by the corresponding members of the border_info
% structure.
%
% The format of the BorderImage method is:
%
% Image *BorderImage(const Image *image,const RectangleInfo *border_info,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o border_info: Define the width and height of the border.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport Image *BorderImage(const Image *image,
const RectangleInfo *border_info,ExceptionInfo *exception)
{
Image
*border_image,
*clone_image;
FrameInfo
frame_info;
assert(image != (const Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(border_info != (RectangleInfo *) NULL);
frame_info.width=image->columns+(border_info->width << 1);
frame_info.height=image->rows+(border_info->height << 1);
frame_info.x=(ssize_t) border_info->width;
frame_info.y=(ssize_t) border_info->height;
frame_info.inner_bevel=0;
frame_info.outer_bevel=0;
clone_image=CloneImage(image,0,0,MagickTrue,exception);
if (clone_image == (Image *) NULL)
return((Image *) NULL);
clone_image->matte_color=image->border_color;
border_image=FrameImage(clone_image,&frame_info,exception);
clone_image=DestroyImage(clone_image);
if (border_image != (Image *) NULL)
border_image->matte_color=image->matte_color;
return(border_image);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% F r a m e I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% FrameImage() adds a simulated three-dimensional border around the image.
% The color of the border is defined by the matte_color member of image.
% Members width and height of frame_info specify the border width of the
% vertical and horizontal sides of the frame. Members inner and outer
% indicate the width of the inner and outer shadows of the frame.
%
% The format of the FrameImage method is:
%
% Image *FrameImage(const Image *image,const FrameInfo *frame_info,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o frame_info: Define the width and height of the frame and its bevels.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport Image *FrameImage(const Image *image,const FrameInfo *frame_info,
ExceptionInfo *exception)
{
#define FrameImageTag "Frame/Image"
CacheView
*image_view,
*frame_view;
Image
*frame_image;
MagickBooleanType
status;
MagickOffsetType
progress;
MagickPixelPacket
accentuate,
border,
highlight,
matte,
shadow,
trough;
register ssize_t
x;
size_t
bevel_width,
height,
width;
ssize_t
y;
/*
Check frame geometry.
*/
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(frame_info != (FrameInfo *) NULL);
if ((frame_info->outer_bevel < 0) || (frame_info->inner_bevel < 0))
ThrowImageException(OptionError,"FrameIsLessThanImageSize");
bevel_width=(size_t) (frame_info->outer_bevel+frame_info->inner_bevel);
x=(ssize_t) frame_info->width-frame_info->x-bevel_width;
y=(ssize_t) frame_info->height-frame_info->y-bevel_width;
if ((x < (ssize_t) image->columns) || (y < (ssize_t) image->rows))
ThrowImageException(OptionError,"FrameIsLessThanImageSize");
/*
Initialize framed image attributes.
*/
frame_image=CloneImage(image,frame_info->width,frame_info->height,MagickTrue,
exception);
if (frame_image == (Image *) NULL)
return((Image *) NULL);
if (SetImageStorageClass(frame_image,DirectClass) == MagickFalse)
{
InheritException(exception,&frame_image->exception);
frame_image=DestroyImage(frame_image);
return((Image *) NULL);
}
if ((IsPixelGray(&frame_image->border_color) == MagickFalse) &&
(IsGrayColorspace(frame_image->colorspace) != MagickFalse))
(void) SetImageColorspace(frame_image,sRGBColorspace);
if ((frame_image->border_color.opacity != OpaqueOpacity) &&
(frame_image->matte == MagickFalse))
(void) SetImageAlphaChannel(frame_image,OpaqueAlphaChannel);
frame_image->page=image->page;
if ((image->page.width != 0) && (image->page.height != 0))
{
frame_image->page.width+=frame_image->columns-image->columns;
frame_image->page.height+=frame_image->rows-image->rows;
}
/*
Initialize 3D effects color.
*/
GetMagickPixelPacket(frame_image,&matte);
matte.colorspace=sRGBColorspace;
SetMagickPixelPacket(frame_image,&image->matte_color,(IndexPacket *) NULL,
&matte);
GetMagickPixelPacket(frame_image,&border);
border.colorspace=sRGBColorspace;
SetMagickPixelPacket(frame_image,&image->border_color,(IndexPacket *) NULL,
&border);
GetMagickPixelPacket(frame_image,&accentuate);
accentuate.red=(MagickRealType) (QuantumScale*((QuantumRange-
AccentuateModulate)*matte.red+(QuantumRange*AccentuateModulate)));
accentuate.green=(MagickRealType) (QuantumScale*((QuantumRange-
AccentuateModulate)*matte.green+(QuantumRange*AccentuateModulate)));
accentuate.blue=(MagickRealType) (QuantumScale*((QuantumRange-
AccentuateModulate)*matte.blue+(QuantumRange*AccentuateModulate)));
accentuate.opacity=matte.opacity;
GetMagickPixelPacket(frame_image,&highlight);
highlight.red=(MagickRealType) (QuantumScale*((QuantumRange-
HighlightModulate)*matte.red+(QuantumRange*HighlightModulate)));
highlight.green=(MagickRealType) (QuantumScale*((QuantumRange-
HighlightModulate)*matte.green+(QuantumRange*HighlightModulate)));
highlight.blue=(MagickRealType) (QuantumScale*((QuantumRange-
HighlightModulate)*matte.blue+(QuantumRange*HighlightModulate)));
highlight.opacity=matte.opacity;
GetMagickPixelPacket(frame_image,&shadow);
shadow.red=QuantumScale*matte.red*ShadowModulate;
shadow.green=QuantumScale*matte.green*ShadowModulate;
shadow.blue=QuantumScale*matte.blue*ShadowModulate;
shadow.opacity=matte.opacity;
GetMagickPixelPacket(frame_image,&trough);
trough.red=QuantumScale*matte.red*TroughModulate;
trough.green=QuantumScale*matte.green*TroughModulate;
trough.blue=QuantumScale*matte.blue*TroughModulate;
trough.opacity=matte.opacity;
if (image->colorspace == CMYKColorspace)
{
ConvertRGBToCMYK(&matte);
ConvertRGBToCMYK(&border);
ConvertRGBToCMYK(&accentuate);
ConvertRGBToCMYK(&highlight);
ConvertRGBToCMYK(&shadow);
ConvertRGBToCMYK(&trough);
}
status=MagickTrue;
progress=0;
image_view=AcquireVirtualCacheView(image,exception);
frame_view=AcquireAuthenticCacheView(frame_image,exception);
height=(size_t) (frame_info->outer_bevel+(frame_info->y-bevel_width)+
frame_info->inner_bevel);
if (height != 0)
{
register IndexPacket
*magick_restrict frame_indexes;
register ssize_t
x;
register PixelPacket
*magick_restrict q;
/*
Draw top of ornamental border.
*/
q=QueueCacheViewAuthenticPixels(frame_view,0,0,frame_image->columns,
height,exception);
frame_indexes=GetCacheViewAuthenticIndexQueue(frame_view);
if (q != (PixelPacket *) NULL)
{
/*
Draw top of ornamental border.
*/
for (y=0; y < (ssize_t) frame_info->outer_bevel; y++)
{
for (x=0; x < (ssize_t) (frame_image->columns-y); x++)
{
if (x < y)
SetPixelPacket(frame_image,&highlight,q,frame_indexes);
else
SetPixelPacket(frame_image,&accentuate,q,frame_indexes);
q++;
frame_indexes++;
}
for ( ; x < (ssize_t) frame_image->columns; x++)
{
SetPixelPacket(frame_image,&shadow,q,frame_indexes);
q++;
frame_indexes++;
}
}
for (y=0; y < (ssize_t) (frame_info->y-bevel_width); y++)
{
for (x=0; x < (ssize_t) frame_info->outer_bevel; x++)
{
SetPixelPacket(frame_image,&highlight,q,frame_indexes);
q++;
frame_indexes++;
}
width=frame_image->columns-2*frame_info->outer_bevel;
for (x=0; x < (ssize_t) width; x++)
{
SetPixelPacket(frame_image,&matte,q,frame_indexes);
q++;
frame_indexes++;
}
for (x=0; x < (ssize_t) frame_info->outer_bevel; x++)
{
SetPixelPacket(frame_image,&shadow,q,frame_indexes);
q++;
frame_indexes++;
}
}
for (y=0; y < (ssize_t) frame_info->inner_bevel; y++)
{
for (x=0; x < (ssize_t) frame_info->outer_bevel; x++)
{
SetPixelPacket(frame_image,&highlight,q,frame_indexes);
q++;
frame_indexes++;
}
for (x=0; x < (ssize_t) (frame_info->x-bevel_width); x++)
{
SetPixelPacket(frame_image,&matte,q,frame_indexes);
q++;
frame_indexes++;
}
width=image->columns+((size_t) frame_info->inner_bevel << 1)-
y;
for (x=0; x < (ssize_t) width; x++)
{
if (x < y)
SetPixelPacket(frame_image,&shadow,q,frame_indexes);
else
SetPixelPacket(frame_image,&trough,q,frame_indexes);
q++;
frame_indexes++;
}
for ( ; x < (ssize_t) (image->columns+2*frame_info->inner_bevel); x++)
{
SetPixelPacket(frame_image,&highlight,q,frame_indexes);
q++;
frame_indexes++;
}
width=frame_info->width-frame_info->x-image->columns-bevel_width;
for (x=0; x < (ssize_t) width; x++)
{
SetPixelPacket(frame_image,&matte,q,frame_indexes);
q++;
frame_indexes++;
}
for (x=0; x < (ssize_t) frame_info->outer_bevel; x++)
{
SetPixelPacket(frame_image,&shadow,q,frame_indexes);
q++;
frame_indexes++;
}
}
(void) SyncCacheViewAuthenticPixels(frame_view,exception);
}
}
/*
Draw sides of ornamental border.
*/
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(status) \
magick_number_threads(image,frame_image,image->rows,1)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
register IndexPacket
*magick_restrict frame_indexes;
register ssize_t
x;
register PixelPacket
*magick_restrict q;
/*
Initialize scanline with matte color.
*/
if (status == MagickFalse)
continue;
q=QueueCacheViewAuthenticPixels(frame_view,0,frame_info->y+y,
frame_image->columns,1,exception);
if (q == (PixelPacket *) NULL)
{
status=MagickFalse;
continue;
}
frame_indexes=GetCacheViewAuthenticIndexQueue(frame_view);
for (x=0; x < (ssize_t) frame_info->outer_bevel; x++)
{
SetPixelPacket(frame_image,&highlight,q,frame_indexes);
q++;
frame_indexes++;
}
for (x=0; x < (ssize_t) (frame_info->x-bevel_width); x++)
{
SetPixelPacket(frame_image,&matte,q,frame_indexes);
q++;
frame_indexes++;
}
for (x=0; x < (ssize_t) frame_info->inner_bevel; x++)
{
SetPixelPacket(frame_image,&shadow,q,frame_indexes);
q++;
frame_indexes++;
}
/*
Set frame interior pixels.
*/
for (x=0; x < (ssize_t) image->columns; x++)
{
SetPixelPacket(frame_image,&border,q,frame_indexes);
q++;
frame_indexes++;
}
for (x=0; x < (ssize_t) frame_info->inner_bevel; x++)
{
SetPixelPacket(frame_image,&highlight,q,frame_indexes);
q++;
frame_indexes++;
}
width=frame_info->width-frame_info->x-image->columns-bevel_width;
for (x=0; x < (ssize_t) width; x++)
{
SetPixelPacket(frame_image,&matte,q,frame_indexes);
q++;
frame_indexes++;
}
for (x=0; x < (ssize_t) frame_info->outer_bevel; x++)
{
SetPixelPacket(frame_image,&shadow,q,frame_indexes);
q++;
frame_indexes++;
}
if (SyncCacheViewAuthenticPixels(frame_view,exception) == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp atomic
#endif
progress++;
proceed=SetImageProgress(image,FrameImageTag,progress,image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
height=(size_t) (frame_info->inner_bevel+frame_info->height-
frame_info->y-image->rows-bevel_width+frame_info->outer_bevel);
if (height != 0)
{
register IndexPacket
*magick_restrict frame_indexes;
register ssize_t
x;
register PixelPacket
*magick_restrict q;
/*
Draw bottom of ornamental border.
*/
q=QueueCacheViewAuthenticPixels(frame_view,0,(ssize_t) (frame_image->rows-
height),frame_image->columns,height,exception);
if (q != (PixelPacket *) NULL)
{
/*
Draw bottom of ornamental border.
*/
frame_indexes=GetCacheViewAuthenticIndexQueue(frame_view);
for (y=frame_info->inner_bevel-1; y >= 0; y--)
{
for (x=0; x < (ssize_t) frame_info->outer_bevel; x++)
{
SetPixelPacket(frame_image,&highlight,q,frame_indexes);
q++;
frame_indexes++;
}
for (x=0; x < (ssize_t) (frame_info->x-bevel_width); x++)
{
SetPixelPacket(frame_image,&matte,q,frame_indexes);
q++;
frame_indexes++;
}
for (x=0; x < y; x++)
{
SetPixelPacket(frame_image,&shadow,q,frame_indexes);
q++;
frame_indexes++;
}
for ( ; x < (ssize_t) (image->columns+2*frame_info->inner_bevel); x++)
{
if (x >= (ssize_t) (image->columns+2*frame_info->inner_bevel-y))
SetPixelPacket(frame_image,&highlight,q,frame_indexes);
else
SetPixelPacket(frame_image,&accentuate,q,frame_indexes);
q++;
frame_indexes++;
}
width=frame_info->width-frame_info->x-image->columns-bevel_width;
for (x=0; x < (ssize_t) width; x++)
{
SetPixelPacket(frame_image,&matte,q,frame_indexes);
q++;
frame_indexes++;
}
for (x=0; x < (ssize_t) frame_info->outer_bevel; x++)
{
SetPixelPacket(frame_image,&shadow,q,frame_indexes);
q++;
frame_indexes++;
}
}
height=frame_info->height-frame_info->y-image->rows-bevel_width;
for (y=0; y < (ssize_t) height; y++)
{
for (x=0; x < (ssize_t) frame_info->outer_bevel; x++)
{
SetPixelPacket(frame_image,&highlight,q,frame_indexes);
q++;
frame_indexes++;
}
width=frame_image->columns-2*frame_info->outer_bevel;
for (x=0; x < (ssize_t) width; x++)
{
SetPixelPacket(frame_image,&matte,q,frame_indexes);
q++;
frame_indexes++;
}
for (x=0; x < (ssize_t) frame_info->outer_bevel; x++)
{
SetPixelPacket(frame_image,&shadow,q,frame_indexes);
q++;
frame_indexes++;
}
}
for (y=frame_info->outer_bevel-1; y >= 0; y--)
{
for (x=0; x < y; x++)
{
SetPixelPacket(frame_image,&highlight,q,frame_indexes);
q++;
frame_indexes++;
}
for ( ; x < (ssize_t) frame_image->columns; x++)
{
if (x >= (ssize_t) (frame_image->columns-y))
SetPixelPacket(frame_image,&shadow,q,frame_indexes);
else
SetPixelPacket(frame_image,&trough,q,frame_indexes);
q++;
frame_indexes++;
}
}
(void) SyncCacheViewAuthenticPixels(frame_view,exception);
}
}
frame_view=DestroyCacheView(frame_view);
image_view=DestroyCacheView(image_view);
x=(ssize_t) (frame_info->outer_bevel+(frame_info->x-bevel_width)+
frame_info->inner_bevel);
y=(ssize_t) (frame_info->outer_bevel+(frame_info->y-bevel_width)+
frame_info->inner_bevel);
if (status != MagickFalse)
status=CompositeImage(frame_image,image->compose,image,x,y);
if (status == MagickFalse)
frame_image=DestroyImage(frame_image);
return(frame_image);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% R a i s e I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% RaiseImage() creates a simulated three-dimensional button-like effect
% by lightening and darkening the edges of the image. Members width and
% height of raise_info define the width of the vertical and horizontal
% edge of the effect.
%
% The format of the RaiseImage method is:
%
% MagickBooleanType RaiseImage(const Image *image,
% const RectangleInfo *raise_info,const MagickBooleanType raise)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o raise_info: Define the width and height of the raise area.
%
% o raise: A value other than zero creates a 3-D raise effect,
% otherwise it has a lowered effect.
%
*/
MagickExport MagickBooleanType RaiseImage(Image *image,
const RectangleInfo *raise_info,const MagickBooleanType raise)
{
#define AccentuateFactor ScaleCharToQuantum(135)
#define HighlightFactor ScaleCharToQuantum(190)
#define ShadowFactor ScaleCharToQuantum(190)
#define RaiseImageTag "Raise/Image"
#define TroughFactor ScaleCharToQuantum(135)
CacheView
*image_view;
ExceptionInfo
*exception;
MagickBooleanType
status;
MagickOffsetType
progress;
Quantum
foreground,
background;
ssize_t
y;
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(raise_info != (RectangleInfo *) NULL);
exception=(&image->exception);
if ((image->columns <= (raise_info->width << 1)) ||
(image->rows <= (raise_info->height << 1)))
ThrowBinaryException(OptionError,"ImageSizeMustExceedBevelWidth",
image->filename);
foreground=QuantumRange;
background=(Quantum) 0;
if (raise == MagickFalse)
{
foreground=(Quantum) 0;
background=QuantumRange;
}
if (SetImageStorageClass(image,DirectClass) == MagickFalse)
return(MagickFalse);
/*
Raise image.
*/
status=MagickTrue;
progress=0;
image_view=AcquireAuthenticCacheView(image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(status) \
magick_number_threads(image,image,raise_info->height,1)
#endif
for (y=0; y < (ssize_t) raise_info->height; y++)
{
register ssize_t
x;
register PixelPacket
*magick_restrict q;
if (status == MagickFalse)
continue;
q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception);
if (q == (PixelPacket *) NULL)
{
status=MagickFalse;
continue;
}
for (x=0; x < y; x++)
{
SetPixelRed(q,ClampToQuantum(QuantumScale*((MagickRealType)
GetPixelRed(q)*HighlightFactor+(MagickRealType) foreground*
(QuantumRange-HighlightFactor))));
SetPixelGreen(q,ClampToQuantum(QuantumScale*((MagickRealType)
GetPixelGreen(q)*HighlightFactor+(MagickRealType) foreground*
(QuantumRange-HighlightFactor))));
SetPixelBlue(q,ClampToQuantum(QuantumScale*((MagickRealType)
GetPixelBlue(q)*HighlightFactor+(MagickRealType) foreground*
(QuantumRange-HighlightFactor))));
q++;
}
for ( ; x < (ssize_t) (image->columns-y); x++)
{
SetPixelRed(q,ClampToQuantum(QuantumScale*((MagickRealType)
GetPixelRed(q)*AccentuateFactor+(MagickRealType) foreground*
(QuantumRange-AccentuateFactor))));
SetPixelGreen(q,ClampToQuantum(QuantumScale*((MagickRealType)
GetPixelGreen(q)*AccentuateFactor+(MagickRealType) foreground*
(QuantumRange-AccentuateFactor))));
SetPixelBlue(q,ClampToQuantum(QuantumScale*((MagickRealType)
GetPixelBlue(q)*AccentuateFactor+(MagickRealType) foreground*
(QuantumRange-AccentuateFactor))));
q++;
}
for ( ; x < (ssize_t) image->columns; x++)
{
SetPixelRed(q,ClampToQuantum(QuantumScale*((MagickRealType)
GetPixelRed(q)*ShadowFactor+(MagickRealType) background*
(QuantumRange-ShadowFactor))));
SetPixelGreen(q,ClampToQuantum(QuantumScale*((MagickRealType)
GetPixelGreen(q)*ShadowFactor+(MagickRealType) background*
(QuantumRange-ShadowFactor))));
SetPixelBlue(q,ClampToQuantum(QuantumScale*((MagickRealType)
GetPixelBlue(q)*ShadowFactor+(MagickRealType) background*
(QuantumRange-ShadowFactor))));
q++;
}
if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp atomic
#endif
progress++;
proceed=SetImageProgress(image,RaiseImageTag,progress,image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(status) \
magick_number_threads(image,image,image->rows-2*raise_info->height,1)
#endif
for (y=(ssize_t) raise_info->height; y < (ssize_t) (image->rows-raise_info->height); y++)
{
register ssize_t
x;
register PixelPacket
*magick_restrict q;
if (status == MagickFalse)
continue;
q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception);
if (q == (PixelPacket *) NULL)
{
status=MagickFalse;
continue;
}
for (x=0; x < (ssize_t) raise_info->width; x++)
{
SetPixelRed(q,ClampToQuantum(QuantumScale*((MagickRealType)
GetPixelRed(q)*HighlightFactor+(MagickRealType) foreground*
(QuantumRange-HighlightFactor))));
SetPixelGreen(q,ClampToQuantum(QuantumScale*((MagickRealType)
GetPixelGreen(q)*HighlightFactor+(MagickRealType) foreground*
(QuantumRange-HighlightFactor))));
SetPixelBlue(q,ClampToQuantum(QuantumScale*((MagickRealType)
GetPixelBlue(q)*HighlightFactor+(MagickRealType) foreground*
(QuantumRange-HighlightFactor))));
q++;
}
for ( ; x < (ssize_t) (image->columns-raise_info->width); x++)
q++;
for ( ; x < (ssize_t) image->columns; x++)
{
SetPixelRed(q,ClampToQuantum(QuantumScale*((MagickRealType)
GetPixelRed(q)*ShadowFactor+(MagickRealType) background*
(QuantumRange-ShadowFactor))));
SetPixelGreen(q,ClampToQuantum(QuantumScale*((MagickRealType)
GetPixelGreen(q)*ShadowFactor+(MagickRealType) background*
(QuantumRange-ShadowFactor))));
SetPixelBlue(q,ClampToQuantum(QuantumScale*((MagickRealType)
GetPixelBlue(q)*ShadowFactor+(MagickRealType) background*
(QuantumRange-ShadowFactor))));
q++;
}
if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp atomic
#endif
progress++;
proceed=SetImageProgress(image,RaiseImageTag,progress,image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(status) \
magick_number_threads(image,image,image->rows-raise_info->height,1)
#endif
for (y=(ssize_t) (image->rows-raise_info->height); y < (ssize_t) image->rows; y++)
{
register ssize_t
x;
register PixelPacket
*magick_restrict q;
if (status == MagickFalse)
continue;
q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception);
if (q == (PixelPacket *) NULL)
{
status=MagickFalse;
continue;
}
for (x=0; x < (ssize_t) (image->rows-y); x++)
{
SetPixelRed(q,ClampToQuantum(QuantumScale*((MagickRealType)
GetPixelRed(q)*HighlightFactor+(MagickRealType) foreground*
(QuantumRange-HighlightFactor))));
SetPixelGreen(q,ClampToQuantum(QuantumScale*((MagickRealType)
GetPixelGreen(q)*HighlightFactor+(MagickRealType) foreground*
(QuantumRange-HighlightFactor))));
SetPixelBlue(q,ClampToQuantum(QuantumScale*((MagickRealType)
GetPixelBlue(q)*HighlightFactor+(MagickRealType) foreground*
(QuantumRange-HighlightFactor))));
q++;
}
for ( ; x < (ssize_t) (image->columns-(image->rows-y)); x++)
{
SetPixelRed(q,ClampToQuantum(QuantumScale*((MagickRealType)
GetPixelRed(q)*TroughFactor+(MagickRealType) background*
(QuantumRange-TroughFactor))));
SetPixelGreen(q,ClampToQuantum(QuantumScale*((MagickRealType)
GetPixelGreen(q)*TroughFactor+(MagickRealType) background*
(QuantumRange-TroughFactor))));
SetPixelBlue(q,ClampToQuantum(QuantumScale*((MagickRealType)
GetPixelBlue(q)*TroughFactor+(MagickRealType) background*
(QuantumRange-TroughFactor))));
q++;
}
for ( ; x < (ssize_t) image->columns; x++)
{
SetPixelRed(q,ClampToQuantum(QuantumScale*((MagickRealType)
GetPixelRed(q)*ShadowFactor+(MagickRealType) background*
(QuantumRange-ShadowFactor))));
SetPixelGreen(q,ClampToQuantum(QuantumScale*((MagickRealType)
GetPixelGreen(q)*ShadowFactor+(MagickRealType) background*
(QuantumRange-ShadowFactor))));
SetPixelBlue(q,ClampToQuantum(QuantumScale*((MagickRealType)
GetPixelBlue(q)*ShadowFactor+(MagickRealType) background*
(QuantumRange-ShadowFactor))));
q++;
}
if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp atomic
#endif
progress++;
proceed=SetImageProgress(image,RaiseImageTag,progress,image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
image_view=DestroyCacheView(image_view);
return(status);
}
|
conv_kernel_arm.c | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* License); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* AS IS BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
/*
* Copyright (c) 2020, OPEN AI LAB
* Author: haoluo@openailab.com
*/
#include <stdint.h>
#include <stdlib.h>
#include <math.h>
#include "../conv_hcl_kernel.h"
#include "wino_conv_kernel_arm.h"
#ifdef __aarch64__
#include "wino_conv_kernel_1_arm.h"
#endif
#ifdef __aarch64__
#define PER_OUT_CHAN 16
void sgemm_4x16_a72(float* biases, float* input, float* kernel, long kernel_size, float* output, long output_xy,
int activation, int layout);
void sgemm_4x4_a72(float* biases, float* input, float* kernel, long kernel_size, float* output, long output_xy,
int activation, int layout);
#else
#define PER_OUT_CHAN 12
void sgemm_4x12_a17(float* biases, float* input, float* kernel, int kernel_size, float* output, int output_xy,
int activation, int layout);
void sgemm_4x4_a17(float* biases, float* input, float* kernel, int kernel_size, float* output, int output_xy,
int activation, int layout);
#endif
void im2col_fp32_1x1(float* input, int input_xy, float* col, int col_cnt, int input_chan);
void im2col_fp32_3x3(float* input, int w, int h, int channel, float* cur_col, int stride);
static void interleave_kernel(float* kernel, float* kernel_interleaved, int kernel_chan, int kernel_size)
{
int i, j, k;
float* cur_kernel[PER_OUT_CHAN];
float* cur_kernel_interleaved = kernel_interleaved;
// interleave PER_OUT_CHAN kernels
for (i = 0; i + PER_OUT_CHAN - 1 < kernel_chan; i += PER_OUT_CHAN)
{
for (k = 0; k < PER_OUT_CHAN; k++)
cur_kernel[k] = kernel + kernel_size * (i + k);
for (j = 0; j < kernel_size; j++)
{
for (k = 0; k < PER_OUT_CHAN; k++)
*(cur_kernel_interleaved++) = cur_kernel[k][j];
}
}
for (; i < (kernel_chan & -4); i += 4)
{
for (k = 0; k < 4; k++)
cur_kernel[k] = kernel + kernel_size * (i + k);
for (j = 0; j < kernel_size; j++)
{
for (k = 0; k < 4; k++)
*(cur_kernel_interleaved++) = cur_kernel[k][j];
}
}
// last 4 kernel
for (k = 0; k < 3; k++)
cur_kernel[k] = kernel + kernel_size * (i + k);
if ((kernel_chan & 0x3) == 3)
{
for (j = 0; j < kernel_size; j++)
{
for (k = 0; k < 3; k++)
*(cur_kernel_interleaved++) = cur_kernel[k][j];
*(cur_kernel_interleaved++) = 0.f;
}
}
else if ((kernel_chan & 0x3) == 2)
{
for (j = 0; j < kernel_size; j++)
{
for (k = 0; k < 2; k++)
*(cur_kernel_interleaved++) = cur_kernel[k][j];
*(cur_kernel_interleaved++) = 0.f;
*(cur_kernel_interleaved++) = 0.f;
}
}
else if ((kernel_chan & 0x3) == 1)
{
for (j = 0; j < kernel_size; j++)
{
*(cur_kernel_interleaved++) = cur_kernel[0][j];
*(cur_kernel_interleaved++) = 0.f;
*(cur_kernel_interleaved++) = 0.f;
*(cur_kernel_interleaved++) = 0.f;
}
}
}
/* kernel interleave */
static void interleave(struct ir_tensor* filter, struct conv_priv_info* priv_info, struct conv_param* param)
{
int group = param->group;
int kernel_size = filter->dims[1] * filter->dims[2] * filter->dims[3];
int out_chan = filter->dims[0] / group;
int out_chan_align4 = (out_chan + 3) / 4 * 4;
int kernel_size_algin = kernel_size * out_chan_align4;
int kernel_size_group = kernel_size * out_chan;
float* kernel = filter->data;
float* interleave_buf = priv_info->interleave_buffer;
for (int g = 0; g < group; g++)
{
float* cur_kernel = kernel + g * kernel_size_group;
float* cur_interleave = interleave_buf + g * kernel_size_algin;
interleave_kernel(cur_kernel, cur_interleave, out_chan, kernel_size);
}
}
static void im2col(float* input, float* col, int in_c, int in_w, int in_h, int k_w, int k_h, int s_w, int s_h, int d_w,
int d_h, int pad_w0, int pad_w1, int pad_h0, int pad_h1, int out_w, int out_h, int num_thread)
{
float* cur_col;
int col_i, col_j, kch, ky, kx, i;
int kernel_size = k_w * k_h * in_c;
int in_xy = in_w * in_h;
int out_xy = out_w * out_h;
int col_end3 = out_xy & 3;
if (k_w == 1 && k_h == 1 && s_w == 1 && s_h == 1)
{
for (col_i = 0; col_i + 3 < out_xy; col_i += 4)
{
cur_col = col + col_i * kernel_size;
float* cur_input = input + col_i;
im2col_fp32_1x1(cur_input, in_xy, cur_col, 4, in_c);
}
// final 4 input
if (col_end3)
{
cur_col = col + col_i * kernel_size;
for (col_j = 0; col_j < kernel_size; col_j++)
{
for (int i = 0; i < 4; i++)
{
if (i < col_end3)
*cur_col++ = *(input + col_j * in_xy + col_i + i);
else
*cur_col++ = 0;
}
}
}
}
#ifdef __aarch64__
else if (d_w == 1 && d_h == 1 && k_w == 3 && k_h == 3 && s_w == s_h)
{
int is_pad0 = (pad_w0 == 0) && (pad_h0 == 0) && (pad_w1 == 0) && (pad_h1 == 0);
for (col_i = 0; col_i < (out_xy & -4); col_i += 4)
{
cur_col = col + col_i * kernel_size;
int imy0 = col_i / out_w;
int imy3 = (col_i + 3) / out_w;
int imx0 = col_i - imy0 * out_w;
int imx3 = (col_i + 3) - imy3 * out_w;
if ((imy0 == imy3) && (is_pad0 || (imy0 != 0 && imx0 != 0 && imy0 != (out_h - 1) && imx3 != (out_w - 1))))
{
float* l0 = input + (imy0 * s_h - pad_h0) * in_w + (imx0 * s_w - pad_w0);
{
im2col_fp32_3x3(l0, in_w, in_h, in_c, cur_col, s_w);
cur_col += 4 * kernel_size;
}
}
else
{
int cnt_y[4] = {imy0, (col_i + 1) / out_w, (col_i + 2) / out_w, imy3};
int cnt_x[4] = {imx0, col_i - cnt_y[1] * out_w + 1, col_i - cnt_y[2] * out_w + 2, imx3};
int imx_start[4] = {cnt_x[0] * s_w - pad_w0, cnt_x[1] * s_w - pad_w0, cnt_x[2] * s_w - pad_w0,
cnt_x[3] * s_w - pad_w0};
int imy_start[4] = {cnt_y[0] * s_h - pad_h0, cnt_y[1] * s_h - pad_h0, cnt_y[2] * s_h - pad_h0,
cnt_y[3] * s_h - pad_h0};
for (kch = 0; kch < in_c; kch++)
for (ky = 0; ky < 3; ky++)
for (kx = 0; kx < 3; kx++)
{
int imx[4] = {imx_start[0] + kx, imx_start[1] + kx, imx_start[2] + kx, imx_start[3] + kx};
int imy[4] = {imy_start[0] + ky, imy_start[1] + ky, imy_start[2] + ky, imy_start[3] + ky};
for (i = 0; i < 4; i++)
{
if (imx[i] >= 0 && imx[i] < in_w && imy[i] >= 0 && imy[i] < in_h)
*cur_col++ = *(input + in_xy * kch + in_w * imy[i] + imx[i]);
else
*cur_col++ = 0.f;
}
}
}
}
// final 4 input
if (col_end3)
{
cur_col = col + col_i * kernel_size;
int cnt_y[4] = {col_i / out_w, (col_i + 1) / out_w, (col_i + 2) / out_w, (col_i + 3) / out_w};
int cnt_x[4] = {col_i - cnt_y[0] * out_w, col_i - cnt_y[1] * out_w + 1, col_i - cnt_y[2] * out_w + 2,
col_i - cnt_y[3] * out_w + 3};
int imx_start[4] = {cnt_x[0] * s_w - pad_w0, cnt_x[1] * s_w - pad_w0, cnt_x[2] * s_w - pad_w0,
cnt_x[3] * s_w - pad_w0};
int imy_start[4] = {cnt_y[0] * s_h - pad_h0, cnt_y[1] * s_h - pad_h0, cnt_y[2] * s_h - pad_h0,
cnt_y[3] * s_h - pad_h0};
for (kch = 0; kch < in_c; kch++)
{
for (ky = 0; ky < 3; ky++)
{
for (kx = 0; kx < 3; kx++)
{
int imx[4] = {imx_start[0] + kx, imx_start[1] + kx, imx_start[2] + kx, imx_start[3] + kx};
int imy[4] = {imy_start[0] + ky, imy_start[1] + ky, imy_start[2] + ky, imy_start[3] + ky};
for (i = 0; i < 4; i++)
{
if (i < col_end3 && imx[i] >= 0 && imx[i] < in_w && imy[i] >= 0 && imy[i] < in_h)
*cur_col++ = *(input + in_xy * kch + in_w * imy[i] + imx[i]);
else
*cur_col++ = 0.f;
}
}
}
}
}
}
#endif
else
{
for (col_i = 0; col_i + 3 < out_xy; col_i += 4)
{
cur_col = col + col_i * kernel_size;
int cnt_y[4] = {col_i / out_w, (col_i + 1) / out_w, (col_i + 2) / out_w, (col_i + 3) / out_w};
int cnt_x[4] = {col_i - cnt_y[0] * out_w, col_i - cnt_y[1] * out_w + 1, col_i - cnt_y[2] * out_w + 2,
col_i - cnt_y[3] * out_w + 3};
int imx_start[4] = {cnt_x[0] * s_w - pad_w0, cnt_x[1] * s_w - pad_w0, cnt_x[2] * s_w - pad_w0,
cnt_x[3] * s_w - pad_w0};
int imy_start[4] = {cnt_y[0] * s_h - pad_h0, cnt_y[1] * s_h - pad_h0, cnt_y[2] * s_h - pad_h0,
cnt_y[3] * s_h - pad_h0};
for (kch = 0; kch < in_c; kch++)
for (ky = 0; ky < (k_h * d_h); ky += d_h)
for (kx = 0; kx < (k_w * d_w); kx += d_w)
{
int imx[4] = {imx_start[0] + kx, imx_start[1] + kx, imx_start[2] + kx, imx_start[3] + kx};
int imy[4] = {imy_start[0] + ky, imy_start[1] + ky, imy_start[2] + ky, imy_start[3] + ky};
for (i = 0; i < 4; i++)
{
if (imx[i] >= 0 && imx[i] < in_w && imy[i] >= 0 && imy[i] < in_h)
*cur_col++ = *(input + in_xy * kch + in_w * imy[i] + imx[i]);
else
*cur_col++ = 0.f;
}
}
}
if (col_end3)
{
cur_col = col + col_i * kernel_size;
int cnt_y[4] = {col_i / out_w, (col_i + 1) / out_w, (col_i + 2) / out_w, (col_i + 3) / out_w};
int cnt_x[4] = {col_i - cnt_y[0] * out_w, col_i - cnt_y[1] * out_w + 1, col_i - cnt_y[2] * out_w + 2,
col_i - cnt_y[3] * out_w + 3};
int imx_start[4] = {cnt_x[0] * s_w - pad_w0, cnt_x[1] * s_w - pad_w0, cnt_x[2] * s_w - pad_w0,
cnt_x[3] * s_w - pad_w0};
int imy_start[4] = {cnt_y[0] * s_h - pad_h0, cnt_y[1] * s_h - pad_h0, cnt_y[2] * s_h - pad_h0,
cnt_y[3] * s_h - pad_h0};
for (kch = 0; kch < in_c; kch++)
for (ky = 0; ky < (k_h * d_h); ky += d_h)
for (kx = 0; kx < (k_w * d_w); kx += d_w)
{
int imx[4] = {imx_start[0] + kx, imx_start[1] + kx, imx_start[2] + kx, imx_start[3] + kx};
int imy[4] = {imy_start[0] + ky, imy_start[1] + ky, imy_start[2] + ky, imy_start[3] + ky};
for (i = 0; i < 4; i++)
{
if (i < col_end3 && imx[i] >= 0 && imx[i] < in_w && imy[i] >= 0 && imy[i] < in_h)
*cur_col++ = *(input + in_xy * kch + in_w * imy[i] + imx[i]);
else
*cur_col++ = 0.f;
}
}
}
}
}
static void sgemm_set(float* col, float* kernel, float* biases, float* output, int kernel_size, int ch_start,
int ch_end, int output_xy, int activation, int num_thread, int cpu_affinity)
{
int nn_outch = ch_end / PER_OUT_CHAN;
int col_end3 = output_xy & 0x3;
if (col_end3)
{
#pragma omp parallel for num_threads(num_thread)
for (int pp = 0; pp < nn_outch; pp++)
{
int p = pp * PER_OUT_CHAN;
float* biasptr = biases ? ( float* )(biases + p) : NULL;
float* kernel_tmp = ( float* )(kernel + p * kernel_size);
float* output_tmp = ( float* )(output + p * output_xy);
int col_line = 0;
for (col_line = 0; col_line + 3 < output_xy; col_line += 4)
#ifdef __aarch64__
{
float* col_tmp = ( float* )(col + col_line * kernel_size);
sgemm_4x16_a72(biasptr, col_tmp, kernel_tmp, kernel_size, output_tmp + col_line, output_xy, activation, 0);
}
{
float result[64];
float* col_tmp = ( float* )(col + col_line * kernel_size);
sgemm_4x16_a72(biasptr, col_tmp, kernel_tmp, kernel_size, result, 4, activation, 0);
for (int i = 0; i < 16; i++)
{
for (int j = 0; j < (col_end3); j++)
*(output + (p + i) * output_xy + col_line + j) = result[(i << 2) + j];
}
}
#else
{
float* col_tmp = ( float* )(col + col_line * kernel_size);
sgemm_4x12_a17(biasptr, col_tmp, kernel_tmp, kernel_size, output_tmp + col_line, output_xy, activation, 0);
}
{
float result[64];
float* col_tmp = ( float* )(col + col_line * kernel_size);
sgemm_4x12_a17(biasptr, col_tmp, kernel_tmp, kernel_size, result, 4, activation, 0);
for (int i = 0; i < 12; i++)
{
for (int j = 0; j < (col_end3); j++)
*(output + (p + i) * output_xy + col_line + j) = result[(i << 2) + j];
}
}
#endif
}
}
else
{
#pragma omp parallel for num_threads(num_thread)
for (int pp = 0; pp < nn_outch; pp++)
{
int p = pp * PER_OUT_CHAN;
float* biasptr = biases ? ( float* )(biases + p) : NULL;
float* kernel_tmp = ( float* )(kernel + p * kernel_size);
float* output_tmp = ( float* )(output + p * output_xy);
for (int col_line = 0; col_line + 3 < output_xy; col_line += 4)
{
float* col_tmp = ( float* )(col + col_line * kernel_size);
#ifdef __aarch64__
sgemm_4x16_a72(biasptr, col_tmp, kernel_tmp, kernel_size, output_tmp + col_line, output_xy, activation, 0);
#else
sgemm_4x12_a17(biasptr, col_tmp, kernel_tmp, kernel_size, output_tmp + col_line, output_xy, activation, 0);
#endif
}
}
}
}
static void sgemm4x4(float* col, float* kernel, float* biases, float* output, int kernel_size, int ch_start, int ch_end,
int output_xy, int activation, int num_thread, int cpu_affinity)
{
float result[16];
float* cur_biases = NULL;
int col_line, kernel_num;
float *cur_col, *cur_kernel, *cur_output;
int i, j;
int col_end3 = output_xy & 0x3;
int kernel_end3 = ch_end & 0x3;
for (kernel_num = ch_start; kernel_num + 3 < (ch_end & -4); kernel_num += 4)
{
if (biases)
cur_biases = ( float* )(biases + kernel_num);
cur_kernel = ( float* )(kernel + kernel_num * kernel_size);
cur_output = ( float* )(output + kernel_num * output_xy);
for (col_line = 0; col_line < (output_xy & -4); col_line += 4)
{
cur_col = ( float* )(col + col_line * kernel_size);
#ifdef __aarch64__
sgemm_4x4_a72(cur_biases, cur_col, cur_kernel, kernel_size, cur_output + col_line, output_xy, activation, 0);
#else
sgemm_4x4_a17(cur_biases, cur_col, cur_kernel, kernel_size, cur_output + col_line, output_xy, activation, 0);
#endif
}
if (col_end3)
{
cur_col = ( float* )(col + col_line * kernel_size);
#ifdef __aarch64__
sgemm_4x4_a72(cur_biases, cur_col, cur_kernel, kernel_size, result, 4, activation, 0);
#else
sgemm_4x4_a17(cur_biases, cur_col, cur_kernel, kernel_size, result, 4, activation, 0);
#endif
for (i = 0; i < 4; i++)
{
for (j = 0; j < (col_end3); j++)
*(output + (kernel_num + i) * output_xy + col_line + j) = result[(i << 2) + j];
}
}
}
if (kernel_end3)
{
if (biases)
cur_biases = ( float* )(biases + kernel_num);
cur_kernel = ( float* )(kernel + kernel_num * kernel_size);
for (col_line = 0; col_line < (output_xy & -4); col_line += 4)
{
cur_col = ( float* )(col + col_line * kernel_size);
#ifdef __aarch64__
sgemm_4x4_a72(cur_biases, cur_col, cur_kernel, kernel_size, result, 4, activation, 0);
#else
sgemm_4x4_a17(cur_biases, cur_col, cur_kernel, kernel_size, result, 4, activation, 0);
#endif
for (i = 0; i < kernel_end3; i++)
for (j = 0; j < 4; j++)
*(output + (kernel_num + i) * output_xy + col_line + j) = result[(i << 2) + j];
}
if (col_end3)
{
cur_col = ( float* )(col + col_line * kernel_size);
#ifdef __aarch64__
sgemm_4x4_a72(cur_biases, cur_col, cur_kernel, kernel_size, result, 4, activation, 0);
#else
sgemm_4x4_a17(cur_biases, cur_col, cur_kernel, kernel_size, result, 4, activation, 0);
#endif
for (i = 0; i < (kernel_end3); i++)
{
for (j = 0; j < (col_end3); j++)
*(output + (kernel_num + i) * output_xy + col_line + j) = result[(i << 2) + j];
}
}
}
}
/* check the conv wheather need to be using winograd */
static int winograd_support(struct conv_param* param, int in_h, int in_w)
{
int kernel_h = param->kernel_h;
int kernel_w = param->kernel_w;
int stride_h = param->stride_h;
int stride_w = param->stride_w;
int dilation_h = param->dilation_h;
int dilation_w = param->dilation_w;
int output_chan = param->output_channel;
int group = param->group;
if (in_h < 7 && in_w < 7)
return 0;
if (in_h < 10 && in_w < 10 && output_chan < 16)
return 0;
if (group != 1 || kernel_h != 3 || kernel_w != 3)
return 0;
if (dilation_h != 1 || dilation_w != 1 || stride_h != 1 || stride_w != 1)
return 0;
return 1;
}
/*
* get the memory size for im2col of input tensor
*/
int conv_hcl_get_shared_mem_size(struct ir_tensor* input, struct ir_tensor* output, struct conv_param* param)
{
int in_h = input->dims[2];
int in_w = input->dims[3];
int out_h = output->dims[2];
int out_w = output->dims[3];
int group = param->group;
int input_chan = param->input_channel / group;
int kernel_size = input_chan * param->kernel_h * param->kernel_w;
int out_cstep = out_h * out_w; // channel cstep, output_h * output_w
int elem_size = input->elem_size; // uint8/int8 is 1 byte, fp32 is 4 bytes
out_cstep = (out_cstep + 3) / 4 * 4;
int mem_size = elem_size * kernel_size * out_cstep + 128;
return mem_size;
}
int conv_hcl_get_shared_pack4_mem_size(struct ir_tensor* filter, struct ir_tensor* output, struct conv_param* param)
{
return 0;
}
/*
* get the memory size for im2col + sgemm of kernel tensor interleave
*/
static int get_private_mem_size(struct ir_tensor* filter, struct conv_param* param)
{
int group = param->group;
int out_chan = filter->dims[0] / group;
int out_chan_align4 = (out_chan + 3) / 4 * 4;
int kernel_size = filter->dims[1] * filter->dims[2] * filter->dims[3];
int mem_size = kernel_size * filter->elem_size * out_chan_align4 * group + 128; // caution
return mem_size;
}
int conv_hcl_set_shared_mem(struct conv_priv_info* priv_info, void* mem, int mem_size)
{
priv_info->external_im2col_mem = 1;
priv_info->im2col_buffer = mem;
priv_info->im2col_buffer_size = mem_size;
return 0;
}
int conv_hcl_set_shared_pack4_mem(struct conv_priv_info* priv_info, void* mem, int mem_size)
{
priv_info->external_im2col_pack4_mem = 0;
priv_info->im2col_buffer_pack4 = NULL;
priv_info->im2col_buffer_pack4_size = 0;
return 0;
}
int conv_hcl_prerun(struct ir_tensor* input_tensor, struct ir_tensor* filter_tensor, struct ir_tensor* output_tensor,
struct conv_priv_info* priv_info, struct conv_param* param)
{
int in_c = input_tensor->dims[1];
int in_h = input_tensor->dims[2];
int in_w = input_tensor->dims[3];
/* check winograd implement, only for conv3x3s1 */
priv_info->winograd = winograd_support(param, in_h, in_w);
if (priv_info->winograd)
{
#ifdef __aarch64__
if(in_c >= 256)
return wino_conv_hcl_prerun_1(input_tensor, filter_tensor, output_tensor, priv_info, param);
else
#endif
return wino_conv_hcl_prerun(input_tensor, filter_tensor, output_tensor, priv_info, param);
}
/* alloc mem of im2col */
if (!priv_info->external_im2col_mem)
{
int mem_size = conv_hcl_get_shared_mem_size(input_tensor, output_tensor, param);
void* mem = sys_malloc(mem_size);
priv_info->im2col_buffer = mem;
priv_info->im2col_buffer_size = mem_size;
}
/* alloc mem of kernel interleave */
if (!priv_info->external_interleave_mem)
{
int mem_size = get_private_mem_size(filter_tensor, param);
void* mem = sys_malloc(mem_size);
priv_info->interleave_buffer = mem;
priv_info->interleave_buffer_size = mem_size;
}
/* kernel interleave */
interleave(filter_tensor, priv_info, param);
return 0;
}
int conv_hcl_postrun(struct conv_priv_info* priv_info)
{
if (priv_info->winograd)
{
wino_conv_hcl_postrun(priv_info);
}
if (!priv_info->external_interleave_mem && priv_info->interleave_buffer != NULL)
{
sys_free(priv_info->interleave_buffer);
priv_info->interleave_buffer = NULL;
}
if (!priv_info->external_im2col_mem && priv_info->im2col_buffer != NULL)
{
sys_free(priv_info->im2col_buffer);
priv_info->im2col_buffer = NULL;
}
return 0;
}
int conv_hcl_run(struct ir_tensor* input_tensor, struct ir_tensor* filter_tensor, struct ir_tensor* bias_tensor,
struct ir_tensor* output_tensor, struct conv_priv_info* priv_info, struct conv_param* param,
int num_thread, int cpu_affinity)
{
/* param */
int group = param->group;
int kernel_h = param->kernel_h;
int kernel_w = param->kernel_w;
int stride_h = param->stride_h;
int stride_w = param->stride_w;
int dilation_h = param->dilation_h;
int dilation_w = param->dilation_w;
int pad_h0 = param->pad_h0;
int pad_h1 = param->pad_h1;
int pad_w0 = param->pad_w0;
int pad_w1 = param->pad_w1;
int act_type = param->activation;
int batch = input_tensor->dims[0];
int in_c = input_tensor->dims[1] / group;
int in_h = input_tensor->dims[2];
int in_w = input_tensor->dims[3];
int input_size = in_c * in_h * in_w;
int kernel_size = in_c * kernel_h * kernel_w;
int input_image_size = input_tensor->dims[1] * input_tensor->dims[2] * input_tensor->dims[3];
if (priv_info->winograd)
{
#ifdef __aarch64__
if(in_c >= 256)
return wino_conv_hcl_run_1(input_tensor, filter_tensor, bias_tensor, output_tensor, priv_info, param, num_thread, cpu_affinity);
else
#endif
return wino_conv_hcl_run(input_tensor, filter_tensor, bias_tensor, output_tensor, priv_info, param, num_thread, cpu_affinity);
}
int out_c = output_tensor->dims[1] / group;
int out_h = output_tensor->dims[2];
int out_w = output_tensor->dims[3];
int out_hw = out_h * out_w;
int output_size = out_c * out_h * out_w;
int out_c_align = ((out_c + 3) & -4);
int output_image_size = output_tensor->dims[1] * output_tensor->dims[2] * output_tensor->dims[3];
/* buffer addr */
float* input_buf = ( float* )input_tensor->data;
float* output_buf = ( float* )output_tensor->data;
float* biases_buf = NULL;
if (bias_tensor != NULL)
biases_buf = ( float* )bias_tensor->data;
float* col_buf = ( float* )priv_info->im2col_buffer;
float* interleave_buf = ( float* )priv_info->interleave_buffer;
int sgemm_set_chan = out_c / PER_OUT_CHAN * PER_OUT_CHAN;
int sgemm_set_remain = out_c % PER_OUT_CHAN;
for (int n = 0; n < batch; n++) // batch size
{
for (int g = 0; g < group; g++)
{
/* im2col */
float* cur_input = input_buf + n * input_image_size + g * input_size;
im2col(cur_input, col_buf, in_c, in_w, in_h, kernel_w, kernel_h, stride_w, stride_h, dilation_w, dilation_h,
pad_w0, pad_w1, pad_h0, pad_h1, out_w, out_h, num_thread);
/* gemm */
float* cur_kernel = interleave_buf + g * kernel_size * out_c_align;
float* cur_output = output_buf + n * output_image_size + g * output_size;
float* cur_bias = biases_buf ? (biases_buf + g * out_c) : NULL;
sgemm_set(col_buf, cur_kernel, cur_bias, cur_output, kernel_size, 0, sgemm_set_chan, out_hw, act_type,
num_thread, cpu_affinity);
if (sgemm_set_remain)
sgemm4x4(col_buf, cur_kernel, cur_bias, cur_output, kernel_size, sgemm_set_chan, out_c, out_hw,
act_type, num_thread, cpu_affinity);
}
}
return 0;
}
|
Sema.h | //===--- Sema.h - Semantic Analysis & AST Building --------------*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
// This file defines the Sema class, which performs semantic analysis and
// builds ASTs.
//
//===----------------------------------------------------------------------===//
#ifndef LLVM_CLANG_SEMA_SEMA_H
#define LLVM_CLANG_SEMA_SEMA_H
#include "clang/AST/ASTConcept.h"
#include "clang/AST/ASTFwd.h"
#include "clang/AST/Attr.h"
#include "clang/AST/Availability.h"
#include "clang/AST/ComparisonCategories.h"
#include "clang/AST/DeclTemplate.h"
#include "clang/AST/DeclarationName.h"
#include "clang/AST/Expr.h"
#include "clang/AST/ExprCXX.h"
#include "clang/AST/ExprConcepts.h"
#include "clang/AST/ExprObjC.h"
#include "clang/AST/ExternalASTSource.h"
#include "clang/AST/LocInfoType.h"
#include "clang/AST/MangleNumberingContext.h"
#include "clang/AST/NSAPI.h"
#include "clang/AST/PrettyPrinter.h"
#include "clang/AST/StmtCXX.h"
#include "clang/AST/TypeLoc.h"
#include "clang/AST/TypeOrdering.h"
#include "clang/Basic/BitmaskEnum.h"
#include "clang/Basic/DiagnosticSema.h"
#include "clang/Basic/ExpressionTraits.h"
#include "clang/Basic/Module.h"
#include "clang/Basic/OpenMPKinds.h"
#include "clang/Basic/PragmaKinds.h"
#include "clang/Basic/Specifiers.h"
#include "clang/Basic/TemplateKinds.h"
#include "clang/Basic/TypeTraits.h"
#include "clang/Sema/AnalysisBasedWarnings.h"
#include "clang/Sema/CleanupInfo.h"
#include "clang/Sema/DeclSpec.h"
#include "clang/Sema/ExternalSemaSource.h"
#include "clang/Sema/IdentifierResolver.h"
#include "clang/Sema/ObjCMethodList.h"
#include "clang/Sema/Ownership.h"
#include "clang/Sema/Scope.h"
#include "clang/Sema/SemaConcept.h"
#include "clang/Sema/TypoCorrection.h"
#include "clang/Sema/Weak.h"
#include "llvm/ADT/ArrayRef.h"
#include "llvm/ADT/Optional.h"
#include "llvm/ADT/SetVector.h"
#include "llvm/ADT/SmallBitVector.h"
#include "llvm/ADT/SmallPtrSet.h"
#include "llvm/ADT/SmallVector.h"
#include "llvm/ADT/TinyPtrVector.h"
#include "llvm/Frontend/OpenMP/OMPConstants.h"
#include <deque>
#include <memory>
#include <string>
#include <tuple>
#include <vector>
namespace llvm {
class APSInt;
template <typename ValueT> struct DenseMapInfo;
template <typename ValueT, typename ValueInfoT> class DenseSet;
class SmallBitVector;
struct InlineAsmIdentifierInfo;
}
namespace clang {
class ADLResult;
class ASTConsumer;
class ASTContext;
class ASTMutationListener;
class ASTReader;
class ASTWriter;
class ArrayType;
class ParsedAttr;
class BindingDecl;
class BlockDecl;
class CapturedDecl;
class CXXBasePath;
class CXXBasePaths;
class CXXBindTemporaryExpr;
typedef SmallVector<CXXBaseSpecifier*, 4> CXXCastPath;
class CXXConstructorDecl;
class CXXConversionDecl;
class CXXDeleteExpr;
class CXXDestructorDecl;
class CXXFieldCollector;
class CXXMemberCallExpr;
class CXXMethodDecl;
class CXXScopeSpec;
class CXXTemporary;
class CXXTryStmt;
class CallExpr;
class ClassTemplateDecl;
class ClassTemplatePartialSpecializationDecl;
class ClassTemplateSpecializationDecl;
class VarTemplatePartialSpecializationDecl;
class CodeCompleteConsumer;
class CodeCompletionAllocator;
class CodeCompletionTUInfo;
class CodeCompletionResult;
class CoroutineBodyStmt;
class Decl;
class DeclAccessPair;
class DeclContext;
class DeclRefExpr;
class DeclaratorDecl;
class DeducedTemplateArgument;
class DependentDiagnostic;
class DesignatedInitExpr;
class Designation;
class EnableIfAttr;
class EnumConstantDecl;
class Expr;
class ExtVectorType;
class FormatAttr;
class FriendDecl;
class FunctionDecl;
class FunctionProtoType;
class FunctionTemplateDecl;
class ImplicitConversionSequence;
typedef MutableArrayRef<ImplicitConversionSequence> ConversionSequenceList;
class InitListExpr;
class InitializationKind;
class InitializationSequence;
class InitializedEntity;
class IntegerLiteral;
class LabelStmt;
class LambdaExpr;
class LangOptions;
class LocalInstantiationScope;
class LookupResult;
class MacroInfo;
typedef ArrayRef<std::pair<IdentifierInfo *, SourceLocation>> ModuleIdPath;
class ModuleLoader;
class MultiLevelTemplateArgumentList;
class NamedDecl;
class ObjCCategoryDecl;
class ObjCCategoryImplDecl;
class ObjCCompatibleAliasDecl;
class ObjCContainerDecl;
class ObjCImplDecl;
class ObjCImplementationDecl;
class ObjCInterfaceDecl;
class ObjCIvarDecl;
template <class T> class ObjCList;
class ObjCMessageExpr;
class ObjCMethodDecl;
class ObjCPropertyDecl;
class ObjCProtocolDecl;
class OMPThreadPrivateDecl;
class OMPRequiresDecl;
class OMPDeclareReductionDecl;
class OMPDeclareSimdDecl;
class OMPClause;
struct OMPVarListLocTy;
struct OverloadCandidate;
enum class OverloadCandidateParamOrder : char;
enum OverloadCandidateRewriteKind : unsigned;
class OverloadCandidateSet;
class OverloadExpr;
class ParenListExpr;
class ParmVarDecl;
class Preprocessor;
class PseudoDestructorTypeStorage;
class PseudoObjectExpr;
class QualType;
class StandardConversionSequence;
class Stmt;
class StringLiteral;
class SwitchStmt;
class TemplateArgument;
class TemplateArgumentList;
class TemplateArgumentLoc;
class TemplateDecl;
class TemplateInstantiationCallback;
class TemplateParameterList;
class TemplatePartialOrderingContext;
class TemplateTemplateParmDecl;
class Token;
class TypeAliasDecl;
class TypedefDecl;
class TypedefNameDecl;
class TypeLoc;
class TypoCorrectionConsumer;
class UnqualifiedId;
class UnresolvedLookupExpr;
class UnresolvedMemberExpr;
class UnresolvedSetImpl;
class UnresolvedSetIterator;
class UsingDecl;
class UsingShadowDecl;
class ValueDecl;
class VarDecl;
class VarTemplateSpecializationDecl;
class VisibilityAttr;
class VisibleDeclConsumer;
class IndirectFieldDecl;
struct DeductionFailureInfo;
class TemplateSpecCandidateSet;
namespace sema {
class AccessedEntity;
class BlockScopeInfo;
class Capture;
class CapturedRegionScopeInfo;
class CapturingScopeInfo;
class CompoundScopeInfo;
class DelayedDiagnostic;
class DelayedDiagnosticPool;
class FunctionScopeInfo;
class LambdaScopeInfo;
class PossiblyUnreachableDiag;
class SemaPPCallbacks;
class TemplateDeductionInfo;
}
namespace threadSafety {
class BeforeSet;
void threadSafetyCleanup(BeforeSet* Cache);
}
// FIXME: No way to easily map from TemplateTypeParmTypes to
// TemplateTypeParmDecls, so we have this horrible PointerUnion.
typedef std::pair<llvm::PointerUnion<const TemplateTypeParmType*, NamedDecl*>,
SourceLocation> UnexpandedParameterPack;
/// Describes whether we've seen any nullability information for the given
/// file.
struct FileNullability {
/// The first pointer declarator (of any pointer kind) in the file that does
/// not have a corresponding nullability annotation.
SourceLocation PointerLoc;
/// The end location for the first pointer declarator in the file. Used for
/// placing fix-its.
SourceLocation PointerEndLoc;
/// Which kind of pointer declarator we saw.
uint8_t PointerKind;
/// Whether we saw any type nullability annotations in the given file.
bool SawTypeNullability = false;
};
/// A mapping from file IDs to a record of whether we've seen nullability
/// information in that file.
class FileNullabilityMap {
/// A mapping from file IDs to the nullability information for each file ID.
llvm::DenseMap<FileID, FileNullability> Map;
/// A single-element cache based on the file ID.
struct {
FileID File;
FileNullability Nullability;
} Cache;
public:
FileNullability &operator[](FileID file) {
// Check the single-element cache.
if (file == Cache.File)
return Cache.Nullability;
// It's not in the single-element cache; flush the cache if we have one.
if (!Cache.File.isInvalid()) {
Map[Cache.File] = Cache.Nullability;
}
// Pull this entry into the cache.
Cache.File = file;
Cache.Nullability = Map[file];
return Cache.Nullability;
}
};
// TODO SYCL Integration header approach relies on an assumption that kernel
// lambda objects created by the host compiler and any of the device compilers
// will be identical wrt to field types, order and offsets. Some verification
// mechanism should be developed to enforce that.
// TODO FIXME SYCL Support for SYCL in FE should be refactored:
// - kernel identification and generation should be made a separate pass over
// AST. RecursiveASTVisitor + VisitFunctionTemplateDecl +
// FunctionTemplateDecl::getSpecializations() mechanism could be used for that.
// - All SYCL stuff on Sema level should be encapsulated into a single Sema
// field
// - Move SYCL stuff into a separate header
// Represents contents of a SYCL integration header file produced by a SYCL
// device compiler and used by SYCL host compiler (via forced inclusion into
// compiled SYCL source):
// - SYCL kernel names
// - SYCL kernel parameters and offsets of corresponding actual arguments
class SYCLIntegrationHeader {
public:
// Kind of kernel's parameters as captured by the compiler in the
// kernel lambda or function object
enum kernel_param_kind_t {
kind_first,
kind_accessor = kind_first,
kind_std_layout,
kind_sampler,
kind_pointer,
kind_last = kind_pointer
};
public:
SYCLIntegrationHeader(DiagnosticsEngine &Diag, bool UnnamedLambdaSupport);
/// Emits contents of the header into given stream.
void emit(raw_ostream &Out);
/// Emits contents of the header into a file with given name.
/// Returns true/false on success/failure.
bool emit(const StringRef &MainSrc);
/// Signals that subsequent parameter descriptor additions will go to
/// the kernel with given name. Starts new kernel invocation descriptor.
void startKernel(StringRef KernelName, QualType KernelNameType,
StringRef KernelStableName);
/// Adds a kernel parameter descriptor to current kernel invocation
/// descriptor.
void addParamDesc(kernel_param_kind_t Kind, int Info, unsigned Offset);
/// Signals that addition of parameter descriptors to current kernel
/// invocation descriptor has finished.
void endKernel();
private:
// Kernel actual parameter descriptor.
struct KernelParamDesc {
// Represents a parameter kind.
kernel_param_kind_t Kind = kind_last;
// If Kind is kind_scalar or kind_struct, then
// denotes parameter size in bytes (includes padding for structs)
// If Kind is kind_accessor
// denotes access target; possible access targets are defined in
// access/access.hpp
int Info = 0;
// Offset of the captured parameter value in the lambda or function object.
unsigned Offset = 0;
KernelParamDesc() = default;
};
// Kernel invocation descriptor
struct KernelDesc {
/// Kernel name.
std::string Name;
/// Kernel name type.
QualType NameType;
/// Kernel name with stable lambda name mangling
std::string StableName;
/// Descriptor of kernel actual parameters.
SmallVector<KernelParamDesc, 8> Params;
KernelDesc() = default;
};
/// Returns the latest invocation descriptor started by
/// SYCLIntegrationHeader::startKernel
KernelDesc *getCurKernelDesc() {
return KernelDescs.size() > 0 ? &KernelDescs[KernelDescs.size() - 1]
: nullptr;
}
/// Emits a forward declaration for given declaration.
void emitFwdDecl(raw_ostream &O, const Decl *D);
/// Emits forward declarations of classes and template classes on which
/// declaration of given type depends. See example in the comments for the
/// implementation.
/// \param O
/// stream to emit to
/// \param T
/// type to emit forward declarations for
/// \param Emitted
/// a set of declarations forward declrations has been emitted for already
void emitForwardClassDecls(raw_ostream &O, QualType T,
llvm::SmallPtrSetImpl<const void*> &Emitted);
private:
/// Keeps invocation descriptors for each kernel invocation started by
/// SYCLIntegrationHeader::startKernel
SmallVector<KernelDesc, 4> KernelDescs;
/// Used for emitting diagnostics.
DiagnosticsEngine &Diag;
/// Whether header is generated with unnamed lambda support
bool UnnamedLambdaSupport;
};
/// Keeps track of expected type during expression parsing. The type is tied to
/// a particular token, all functions that update or consume the type take a
/// start location of the token they are looking at as a parameter. This allows
/// to avoid updating the type on hot paths in the parser.
class PreferredTypeBuilder {
public:
PreferredTypeBuilder() = default;
explicit PreferredTypeBuilder(QualType Type) : Type(Type) {}
void enterCondition(Sema &S, SourceLocation Tok);
void enterReturn(Sema &S, SourceLocation Tok);
void enterVariableInit(SourceLocation Tok, Decl *D);
/// Computing a type for the function argument may require running
/// overloading, so we postpone its computation until it is actually needed.
///
/// Clients should be very careful when using this funciton, as it stores a
/// function_ref, clients should make sure all calls to get() with the same
/// location happen while function_ref is alive.
void enterFunctionArgument(SourceLocation Tok,
llvm::function_ref<QualType()> ComputeType);
void enterParenExpr(SourceLocation Tok, SourceLocation LParLoc);
void enterUnary(Sema &S, SourceLocation Tok, tok::TokenKind OpKind,
SourceLocation OpLoc);
void enterBinary(Sema &S, SourceLocation Tok, Expr *LHS, tok::TokenKind Op);
void enterMemAccess(Sema &S, SourceLocation Tok, Expr *Base);
void enterSubscript(Sema &S, SourceLocation Tok, Expr *LHS);
/// Handles all type casts, including C-style cast, C++ casts, etc.
void enterTypeCast(SourceLocation Tok, QualType CastType);
QualType get(SourceLocation Tok) const {
if (Tok != ExpectedLoc)
return QualType();
if (!Type.isNull())
return Type;
if (ComputeType)
return ComputeType();
return QualType();
}
private:
/// Start position of a token for which we store expected type.
SourceLocation ExpectedLoc;
/// Expected type for a token starting at ExpectedLoc.
QualType Type;
/// A function to compute expected type at ExpectedLoc. It is only considered
/// if Type is null.
llvm::function_ref<QualType()> ComputeType;
};
/// Sema - This implements semantic analysis and AST building for C.
class Sema final {
Sema(const Sema &) = delete;
void operator=(const Sema &) = delete;
/// A key method to reduce duplicate debug info from Sema.
virtual void anchor();
///Source of additional semantic information.
ExternalSemaSource *ExternalSource;
///Whether Sema has generated a multiplexer and has to delete it.
bool isMultiplexExternalSource;
static bool mightHaveNonExternalLinkage(const DeclaratorDecl *FD);
bool isVisibleSlow(const NamedDecl *D);
/// Determine whether two declarations should be linked together, given that
/// the old declaration might not be visible and the new declaration might
/// not have external linkage.
bool shouldLinkPossiblyHiddenDecl(const NamedDecl *Old,
const NamedDecl *New) {
if (isVisible(Old))
return true;
// See comment in below overload for why it's safe to compute the linkage
// of the new declaration here.
if (New->isExternallyDeclarable()) {
assert(Old->isExternallyDeclarable() &&
"should not have found a non-externally-declarable previous decl");
return true;
}
return false;
}
bool shouldLinkPossiblyHiddenDecl(LookupResult &Old, const NamedDecl *New);
void setupImplicitSpecialMemberType(CXXMethodDecl *SpecialMem,
QualType ResultTy,
ArrayRef<QualType> Args);
public:
/// The maximum alignment, same as in llvm::Value. We duplicate them here
/// because that allows us not to duplicate the constants in clang code,
/// which we must to since we can't directly use the llvm constants.
/// The value is verified against llvm here: lib/CodeGen/CGDecl.cpp
///
/// This is the greatest alignment value supported by load, store, and alloca
/// instructions, and global values.
static const unsigned MaxAlignmentExponent = 29;
static const unsigned MaximumAlignment = 1u << MaxAlignmentExponent;
typedef OpaquePtr<DeclGroupRef> DeclGroupPtrTy;
typedef OpaquePtr<TemplateName> TemplateTy;
typedef OpaquePtr<QualType> TypeTy;
OpenCLOptions OpenCLFeatures;
FPOptions FPFeatures;
const LangOptions &LangOpts;
Preprocessor &PP;
ASTContext &Context;
ASTConsumer &Consumer;
DiagnosticsEngine &Diags;
SourceManager &SourceMgr;
/// Flag indicating whether or not to collect detailed statistics.
bool CollectStats;
/// Code-completion consumer.
CodeCompleteConsumer *CodeCompleter;
/// CurContext - This is the current declaration context of parsing.
DeclContext *CurContext;
/// Generally null except when we temporarily switch decl contexts,
/// like in \see ActOnObjCTemporaryExitContainerContext.
DeclContext *OriginalLexicalContext;
/// VAListTagName - The declaration name corresponding to __va_list_tag.
/// This is used as part of a hack to omit that class from ADL results.
DeclarationName VAListTagName;
bool MSStructPragmaOn; // True when \#pragma ms_struct on
/// Controls member pointer representation format under the MS ABI.
LangOptions::PragmaMSPointersToMembersKind
MSPointerToMemberRepresentationMethod;
/// Stack of active SEH __finally scopes. Can be empty.
SmallVector<Scope*, 2> CurrentSEHFinally;
/// Source location for newly created implicit MSInheritanceAttrs
SourceLocation ImplicitMSInheritanceAttrLoc;
/// Holds TypoExprs that are created from `createDelayedTypo`. This is used by
/// `TransformTypos` in order to keep track of any TypoExprs that are created
/// recursively during typo correction and wipe them away if the correction
/// fails.
llvm::SmallVector<TypoExpr *, 2> TypoExprs;
/// pragma clang section kind
enum PragmaClangSectionKind {
PCSK_Invalid = 0,
PCSK_BSS = 1,
PCSK_Data = 2,
PCSK_Rodata = 3,
PCSK_Text = 4,
PCSK_Relro = 5
};
enum PragmaClangSectionAction {
PCSA_Set = 0,
PCSA_Clear = 1
};
struct PragmaClangSection {
std::string SectionName;
bool Valid = false;
SourceLocation PragmaLocation;
void Act(SourceLocation PragmaLocation,
PragmaClangSectionAction Action,
StringLiteral* Name);
};
PragmaClangSection PragmaClangBSSSection;
PragmaClangSection PragmaClangDataSection;
PragmaClangSection PragmaClangRodataSection;
PragmaClangSection PragmaClangRelroSection;
PragmaClangSection PragmaClangTextSection;
enum PragmaMsStackAction {
PSK_Reset = 0x0, // #pragma ()
PSK_Set = 0x1, // #pragma (value)
PSK_Push = 0x2, // #pragma (push[, id])
PSK_Pop = 0x4, // #pragma (pop[, id])
PSK_Show = 0x8, // #pragma (show) -- only for "pack"!
PSK_Push_Set = PSK_Push | PSK_Set, // #pragma (push[, id], value)
PSK_Pop_Set = PSK_Pop | PSK_Set, // #pragma (pop[, id], value)
};
template<typename ValueType>
struct PragmaStack {
struct Slot {
llvm::StringRef StackSlotLabel;
ValueType Value;
SourceLocation PragmaLocation;
SourceLocation PragmaPushLocation;
Slot(llvm::StringRef StackSlotLabel, ValueType Value,
SourceLocation PragmaLocation, SourceLocation PragmaPushLocation)
: StackSlotLabel(StackSlotLabel), Value(Value),
PragmaLocation(PragmaLocation),
PragmaPushLocation(PragmaPushLocation) {}
};
void Act(SourceLocation PragmaLocation,
PragmaMsStackAction Action,
llvm::StringRef StackSlotLabel,
ValueType Value);
// MSVC seems to add artificial slots to #pragma stacks on entering a C++
// method body to restore the stacks on exit, so it works like this:
//
// struct S {
// #pragma <name>(push, InternalPragmaSlot, <current_pragma_value>)
// void Method {}
// #pragma <name>(pop, InternalPragmaSlot)
// };
//
// It works even with #pragma vtordisp, although MSVC doesn't support
// #pragma vtordisp(push [, id], n)
// syntax.
//
// Push / pop a named sentinel slot.
void SentinelAction(PragmaMsStackAction Action, StringRef Label) {
assert((Action == PSK_Push || Action == PSK_Pop) &&
"Can only push / pop #pragma stack sentinels!");
Act(CurrentPragmaLocation, Action, Label, CurrentValue);
}
// Constructors.
explicit PragmaStack(const ValueType &Default)
: DefaultValue(Default), CurrentValue(Default) {}
bool hasValue() const { return CurrentValue != DefaultValue; }
SmallVector<Slot, 2> Stack;
ValueType DefaultValue; // Value used for PSK_Reset action.
ValueType CurrentValue;
SourceLocation CurrentPragmaLocation;
};
// FIXME: We should serialize / deserialize these if they occur in a PCH (but
// we shouldn't do so if they're in a module).
/// Whether to insert vtordisps prior to virtual bases in the Microsoft
/// C++ ABI. Possible values are 0, 1, and 2, which mean:
///
/// 0: Suppress all vtordisps
/// 1: Insert vtordisps in the presence of vbase overrides and non-trivial
/// structors
/// 2: Always insert vtordisps to support RTTI on partially constructed
/// objects
PragmaStack<MSVtorDispMode> VtorDispStack;
// #pragma pack.
// Sentinel to represent when the stack is set to mac68k alignment.
static const unsigned kMac68kAlignmentSentinel = ~0U;
PragmaStack<unsigned> PackStack;
// The current #pragma pack values and locations at each #include.
struct PackIncludeState {
unsigned CurrentValue;
SourceLocation CurrentPragmaLocation;
bool HasNonDefaultValue, ShouldWarnOnInclude;
};
SmallVector<PackIncludeState, 8> PackIncludeStack;
// Segment #pragmas.
PragmaStack<StringLiteral *> DataSegStack;
PragmaStack<StringLiteral *> BSSSegStack;
PragmaStack<StringLiteral *> ConstSegStack;
PragmaStack<StringLiteral *> CodeSegStack;
// RAII object to push / pop sentinel slots for all MS #pragma stacks.
// Actions should be performed only if we enter / exit a C++ method body.
class PragmaStackSentinelRAII {
public:
PragmaStackSentinelRAII(Sema &S, StringRef SlotLabel, bool ShouldAct);
~PragmaStackSentinelRAII();
private:
Sema &S;
StringRef SlotLabel;
bool ShouldAct;
};
/// A mapping that describes the nullability we've seen in each header file.
FileNullabilityMap NullabilityMap;
/// Last section used with #pragma init_seg.
StringLiteral *CurInitSeg;
SourceLocation CurInitSegLoc;
/// VisContext - Manages the stack for \#pragma GCC visibility.
void *VisContext; // Really a "PragmaVisStack*"
/// This an attribute introduced by \#pragma clang attribute.
struct PragmaAttributeEntry {
SourceLocation Loc;
ParsedAttr *Attribute;
SmallVector<attr::SubjectMatchRule, 4> MatchRules;
bool IsUsed;
};
/// A push'd group of PragmaAttributeEntries.
struct PragmaAttributeGroup {
/// The location of the push attribute.
SourceLocation Loc;
/// The namespace of this push group.
const IdentifierInfo *Namespace;
SmallVector<PragmaAttributeEntry, 2> Entries;
};
SmallVector<PragmaAttributeGroup, 2> PragmaAttributeStack;
/// The declaration that is currently receiving an attribute from the
/// #pragma attribute stack.
const Decl *PragmaAttributeCurrentTargetDecl;
/// This represents the last location of a "#pragma clang optimize off"
/// directive if such a directive has not been closed by an "on" yet. If
/// optimizations are currently "on", this is set to an invalid location.
SourceLocation OptimizeOffPragmaLocation;
/// Flag indicating if Sema is building a recovery call expression.
///
/// This flag is used to avoid building recovery call expressions
/// if Sema is already doing so, which would cause infinite recursions.
bool IsBuildingRecoveryCallExpr;
/// Used to control the generation of ExprWithCleanups.
CleanupInfo Cleanup;
/// ExprCleanupObjects - This is the stack of objects requiring
/// cleanup that are created by the current full expression. The
/// element type here is ExprWithCleanups::Object.
SmallVector<BlockDecl*, 8> ExprCleanupObjects;
/// Store a set of either DeclRefExprs or MemberExprs that contain a reference
/// to a variable (constant) that may or may not be odr-used in this Expr, and
/// we won't know until all lvalue-to-rvalue and discarded value conversions
/// have been applied to all subexpressions of the enclosing full expression.
/// This is cleared at the end of each full expression.
using MaybeODRUseExprSet = llvm::SmallPtrSet<Expr *, 2>;
MaybeODRUseExprSet MaybeODRUseExprs;
std::unique_ptr<sema::FunctionScopeInfo> CachedFunctionScope;
/// Stack containing information about each of the nested
/// function, block, and method scopes that are currently active.
SmallVector<sema::FunctionScopeInfo *, 4> FunctionScopes;
/// Stack containing information needed when in C++2a an 'auto' is encountered
/// in a function declaration parameter type specifier in order to invent a
/// corresponding template parameter in the enclosing abbreviated function
/// template. This information is also present in LambdaScopeInfo, stored in
/// the FunctionScopes stack.
SmallVector<InventedTemplateParameterInfo, 4> InventedParameterInfos;
typedef LazyVector<TypedefNameDecl *, ExternalSemaSource,
&ExternalSemaSource::ReadExtVectorDecls, 2, 2>
ExtVectorDeclsType;
/// ExtVectorDecls - This is a list all the extended vector types. This allows
/// us to associate a raw vector type with one of the ext_vector type names.
/// This is only necessary for issuing pretty diagnostics.
ExtVectorDeclsType ExtVectorDecls;
/// FieldCollector - Collects CXXFieldDecls during parsing of C++ classes.
std::unique_ptr<CXXFieldCollector> FieldCollector;
typedef llvm::SmallSetVector<NamedDecl *, 16> NamedDeclSetType;
/// Set containing all declared private fields that are not used.
NamedDeclSetType UnusedPrivateFields;
/// Set containing all typedefs that are likely unused.
llvm::SmallSetVector<const TypedefNameDecl *, 4>
UnusedLocalTypedefNameCandidates;
/// Delete-expressions to be analyzed at the end of translation unit
///
/// This list contains class members, and locations of delete-expressions
/// that could not be proven as to whether they mismatch with new-expression
/// used in initializer of the field.
typedef std::pair<SourceLocation, bool> DeleteExprLoc;
typedef llvm::SmallVector<DeleteExprLoc, 4> DeleteLocs;
llvm::MapVector<FieldDecl *, DeleteLocs> DeleteExprs;
typedef llvm::SmallPtrSet<const CXXRecordDecl*, 8> RecordDeclSetTy;
/// PureVirtualClassDiagSet - a set of class declarations which we have
/// emitted a list of pure virtual functions. Used to prevent emitting the
/// same list more than once.
std::unique_ptr<RecordDeclSetTy> PureVirtualClassDiagSet;
/// ParsingInitForAutoVars - a set of declarations with auto types for which
/// we are currently parsing the initializer.
llvm::SmallPtrSet<const Decl*, 4> ParsingInitForAutoVars;
/// Look for a locally scoped extern "C" declaration by the given name.
NamedDecl *findLocallyScopedExternCDecl(DeclarationName Name);
typedef LazyVector<VarDecl *, ExternalSemaSource,
&ExternalSemaSource::ReadTentativeDefinitions, 2, 2>
TentativeDefinitionsType;
/// All the tentative definitions encountered in the TU.
TentativeDefinitionsType TentativeDefinitions;
/// All the external declarations encoutered and used in the TU.
SmallVector<VarDecl *, 4> ExternalDeclarations;
typedef LazyVector<const DeclaratorDecl *, ExternalSemaSource,
&ExternalSemaSource::ReadUnusedFileScopedDecls, 2, 2>
UnusedFileScopedDeclsType;
/// The set of file scoped decls seen so far that have not been used
/// and must warn if not used. Only contains the first declaration.
UnusedFileScopedDeclsType UnusedFileScopedDecls;
typedef LazyVector<CXXConstructorDecl *, ExternalSemaSource,
&ExternalSemaSource::ReadDelegatingConstructors, 2, 2>
DelegatingCtorDeclsType;
/// All the delegating constructors seen so far in the file, used for
/// cycle detection at the end of the TU.
DelegatingCtorDeclsType DelegatingCtorDecls;
/// All the overriding functions seen during a class definition
/// that had their exception spec checks delayed, plus the overridden
/// function.
SmallVector<std::pair<const CXXMethodDecl*, const CXXMethodDecl*>, 2>
DelayedOverridingExceptionSpecChecks;
/// All the function redeclarations seen during a class definition that had
/// their exception spec checks delayed, plus the prior declaration they
/// should be checked against. Except during error recovery, the new decl
/// should always be a friend declaration, as that's the only valid way to
/// redeclare a special member before its class is complete.
SmallVector<std::pair<FunctionDecl*, FunctionDecl*>, 2>
DelayedEquivalentExceptionSpecChecks;
typedef llvm::MapVector<const FunctionDecl *,
std::unique_ptr<LateParsedTemplate>>
LateParsedTemplateMapT;
LateParsedTemplateMapT LateParsedTemplateMap;
/// Callback to the parser to parse templated functions when needed.
typedef void LateTemplateParserCB(void *P, LateParsedTemplate &LPT);
typedef void LateTemplateParserCleanupCB(void *P);
LateTemplateParserCB *LateTemplateParser;
LateTemplateParserCleanupCB *LateTemplateParserCleanup;
void *OpaqueParser;
void SetLateTemplateParser(LateTemplateParserCB *LTP,
LateTemplateParserCleanupCB *LTPCleanup,
void *P) {
LateTemplateParser = LTP;
LateTemplateParserCleanup = LTPCleanup;
OpaqueParser = P;
}
class DelayedDiagnostics;
class DelayedDiagnosticsState {
sema::DelayedDiagnosticPool *SavedPool;
friend class Sema::DelayedDiagnostics;
};
typedef DelayedDiagnosticsState ParsingDeclState;
typedef DelayedDiagnosticsState ProcessingContextState;
/// A class which encapsulates the logic for delaying diagnostics
/// during parsing and other processing.
class DelayedDiagnostics {
/// The current pool of diagnostics into which delayed
/// diagnostics should go.
sema::DelayedDiagnosticPool *CurPool;
public:
DelayedDiagnostics() : CurPool(nullptr) {}
/// Adds a delayed diagnostic.
void add(const sema::DelayedDiagnostic &diag); // in DelayedDiagnostic.h
/// Determines whether diagnostics should be delayed.
bool shouldDelayDiagnostics() { return CurPool != nullptr; }
/// Returns the current delayed-diagnostics pool.
sema::DelayedDiagnosticPool *getCurrentPool() const {
return CurPool;
}
/// Enter a new scope. Access and deprecation diagnostics will be
/// collected in this pool.
DelayedDiagnosticsState push(sema::DelayedDiagnosticPool &pool) {
DelayedDiagnosticsState state;
state.SavedPool = CurPool;
CurPool = &pool;
return state;
}
/// Leave a delayed-diagnostic state that was previously pushed.
/// Do not emit any of the diagnostics. This is performed as part
/// of the bookkeeping of popping a pool "properly".
void popWithoutEmitting(DelayedDiagnosticsState state) {
CurPool = state.SavedPool;
}
/// Enter a new scope where access and deprecation diagnostics are
/// not delayed.
DelayedDiagnosticsState pushUndelayed() {
DelayedDiagnosticsState state;
state.SavedPool = CurPool;
CurPool = nullptr;
return state;
}
/// Undo a previous pushUndelayed().
void popUndelayed(DelayedDiagnosticsState state) {
assert(CurPool == nullptr);
CurPool = state.SavedPool;
}
} DelayedDiagnostics;
/// A RAII object to temporarily push a declaration context.
class ContextRAII {
private:
Sema &S;
DeclContext *SavedContext;
ProcessingContextState SavedContextState;
QualType SavedCXXThisTypeOverride;
public:
ContextRAII(Sema &S, DeclContext *ContextToPush, bool NewThisContext = true)
: S(S), SavedContext(S.CurContext),
SavedContextState(S.DelayedDiagnostics.pushUndelayed()),
SavedCXXThisTypeOverride(S.CXXThisTypeOverride)
{
assert(ContextToPush && "pushing null context");
S.CurContext = ContextToPush;
if (NewThisContext)
S.CXXThisTypeOverride = QualType();
}
void pop() {
if (!SavedContext) return;
S.CurContext = SavedContext;
S.DelayedDiagnostics.popUndelayed(SavedContextState);
S.CXXThisTypeOverride = SavedCXXThisTypeOverride;
SavedContext = nullptr;
}
~ContextRAII() {
pop();
}
};
/// Whether the AST is currently being rebuilt to correct immediate
/// invocations. Immediate invocation candidates and references to consteval
/// functions aren't tracked when this is set.
bool RebuildingImmediateInvocation = false;
/// Used to change context to isConstantEvaluated without pushing a heavy
/// ExpressionEvaluationContextRecord object.
bool isConstantEvaluatedOverride;
bool isConstantEvaluated() {
return ExprEvalContexts.back().isConstantEvaluated() ||
isConstantEvaluatedOverride;
}
/// RAII object to handle the state changes required to synthesize
/// a function body.
class SynthesizedFunctionScope {
Sema &S;
Sema::ContextRAII SavedContext;
bool PushedCodeSynthesisContext = false;
public:
SynthesizedFunctionScope(Sema &S, DeclContext *DC)
: S(S), SavedContext(S, DC) {
S.PushFunctionScope();
S.PushExpressionEvaluationContext(
Sema::ExpressionEvaluationContext::PotentiallyEvaluated);
if (auto *FD = dyn_cast<FunctionDecl>(DC))
FD->setWillHaveBody(true);
else
assert(isa<ObjCMethodDecl>(DC));
}
void addContextNote(SourceLocation UseLoc) {
assert(!PushedCodeSynthesisContext);
Sema::CodeSynthesisContext Ctx;
Ctx.Kind = Sema::CodeSynthesisContext::DefiningSynthesizedFunction;
Ctx.PointOfInstantiation = UseLoc;
Ctx.Entity = cast<Decl>(S.CurContext);
S.pushCodeSynthesisContext(Ctx);
PushedCodeSynthesisContext = true;
}
~SynthesizedFunctionScope() {
if (PushedCodeSynthesisContext)
S.popCodeSynthesisContext();
if (auto *FD = dyn_cast<FunctionDecl>(S.CurContext))
FD->setWillHaveBody(false);
S.PopExpressionEvaluationContext();
S.PopFunctionScopeInfo();
}
};
/// WeakUndeclaredIdentifiers - Identifiers contained in
/// \#pragma weak before declared. rare. may alias another
/// identifier, declared or undeclared
llvm::MapVector<IdentifierInfo *, WeakInfo> WeakUndeclaredIdentifiers;
/// ExtnameUndeclaredIdentifiers - Identifiers contained in
/// \#pragma redefine_extname before declared. Used in Solaris system headers
/// to define functions that occur in multiple standards to call the version
/// in the currently selected standard.
llvm::DenseMap<IdentifierInfo*,AsmLabelAttr*> ExtnameUndeclaredIdentifiers;
/// Load weak undeclared identifiers from the external source.
void LoadExternalWeakUndeclaredIdentifiers();
/// WeakTopLevelDecl - Translation-unit scoped declarations generated by
/// \#pragma weak during processing of other Decls.
/// I couldn't figure out a clean way to generate these in-line, so
/// we store them here and handle separately -- which is a hack.
/// It would be best to refactor this.
SmallVector<Decl*,2> WeakTopLevelDecl;
IdentifierResolver IdResolver;
/// Translation Unit Scope - useful to Objective-C actions that need
/// to lookup file scope declarations in the "ordinary" C decl namespace.
/// For example, user-defined classes, built-in "id" type, etc.
Scope *TUScope;
/// The C++ "std" namespace, where the standard library resides.
LazyDeclPtr StdNamespace;
/// The C++ "std::bad_alloc" class, which is defined by the C++
/// standard library.
LazyDeclPtr StdBadAlloc;
/// The C++ "std::align_val_t" enum class, which is defined by the C++
/// standard library.
LazyDeclPtr StdAlignValT;
/// The C++ "std::experimental" namespace, where the experimental parts
/// of the standard library resides.
NamespaceDecl *StdExperimentalNamespaceCache;
/// The C++ "std::initializer_list" template, which is defined in
/// \<initializer_list>.
ClassTemplateDecl *StdInitializerList;
/// The C++ "std::coroutine_traits" template, which is defined in
/// \<coroutine_traits>
ClassTemplateDecl *StdCoroutineTraitsCache;
/// The C++ "type_info" declaration, which is defined in \<typeinfo>.
RecordDecl *CXXTypeInfoDecl;
/// The MSVC "_GUID" struct, which is defined in MSVC header files.
RecordDecl *MSVCGuidDecl;
/// Caches identifiers/selectors for NSFoundation APIs.
std::unique_ptr<NSAPI> NSAPIObj;
/// The declaration of the Objective-C NSNumber class.
ObjCInterfaceDecl *NSNumberDecl;
/// The declaration of the Objective-C NSValue class.
ObjCInterfaceDecl *NSValueDecl;
/// Pointer to NSNumber type (NSNumber *).
QualType NSNumberPointer;
/// Pointer to NSValue type (NSValue *).
QualType NSValuePointer;
/// The Objective-C NSNumber methods used to create NSNumber literals.
ObjCMethodDecl *NSNumberLiteralMethods[NSAPI::NumNSNumberLiteralMethods];
/// The declaration of the Objective-C NSString class.
ObjCInterfaceDecl *NSStringDecl;
/// Pointer to NSString type (NSString *).
QualType NSStringPointer;
/// The declaration of the stringWithUTF8String: method.
ObjCMethodDecl *StringWithUTF8StringMethod;
/// The declaration of the valueWithBytes:objCType: method.
ObjCMethodDecl *ValueWithBytesObjCTypeMethod;
/// The declaration of the Objective-C NSArray class.
ObjCInterfaceDecl *NSArrayDecl;
/// The declaration of the arrayWithObjects:count: method.
ObjCMethodDecl *ArrayWithObjectsMethod;
/// The declaration of the Objective-C NSDictionary class.
ObjCInterfaceDecl *NSDictionaryDecl;
/// The declaration of the dictionaryWithObjects:forKeys:count: method.
ObjCMethodDecl *DictionaryWithObjectsMethod;
/// id<NSCopying> type.
QualType QIDNSCopying;
/// will hold 'respondsToSelector:'
Selector RespondsToSelectorSel;
/// A flag to remember whether the implicit forms of operator new and delete
/// have been declared.
bool GlobalNewDeleteDeclared;
/// A flag to indicate that we're in a context that permits abstract
/// references to fields. This is really a
bool AllowAbstractFieldReference;
/// Describes how the expressions currently being parsed are
/// evaluated at run-time, if at all.
enum class ExpressionEvaluationContext {
/// The current expression and its subexpressions occur within an
/// unevaluated operand (C++11 [expr]p7), such as the subexpression of
/// \c sizeof, where the type of the expression may be significant but
/// no code will be generated to evaluate the value of the expression at
/// run time.
Unevaluated,
/// The current expression occurs within a braced-init-list within
/// an unevaluated operand. This is mostly like a regular unevaluated
/// context, except that we still instantiate constexpr functions that are
/// referenced here so that we can perform narrowing checks correctly.
UnevaluatedList,
/// The current expression occurs within a discarded statement.
/// This behaves largely similarly to an unevaluated operand in preventing
/// definitions from being required, but not in other ways.
DiscardedStatement,
/// The current expression occurs within an unevaluated
/// operand that unconditionally permits abstract references to
/// fields, such as a SIZE operator in MS-style inline assembly.
UnevaluatedAbstract,
/// The current context is "potentially evaluated" in C++11 terms,
/// but the expression is evaluated at compile-time (like the values of
/// cases in a switch statement).
ConstantEvaluated,
/// The current expression is potentially evaluated at run time,
/// which means that code may be generated to evaluate the value of the
/// expression at run time.
PotentiallyEvaluated,
/// The current expression is potentially evaluated, but any
/// declarations referenced inside that expression are only used if
/// in fact the current expression is used.
///
/// This value is used when parsing default function arguments, for which
/// we would like to provide diagnostics (e.g., passing non-POD arguments
/// through varargs) but do not want to mark declarations as "referenced"
/// until the default argument is used.
PotentiallyEvaluatedIfUsed
};
using ImmediateInvocationCandidate = llvm::PointerIntPair<ConstantExpr *, 1>;
/// Data structure used to record current or nested
/// expression evaluation contexts.
struct ExpressionEvaluationContextRecord {
/// The expression evaluation context.
ExpressionEvaluationContext Context;
/// Whether the enclosing context needed a cleanup.
CleanupInfo ParentCleanup;
/// Whether we are in a decltype expression.
bool IsDecltype;
/// The number of active cleanup objects when we entered
/// this expression evaluation context.
unsigned NumCleanupObjects;
/// The number of typos encountered during this expression evaluation
/// context (i.e. the number of TypoExprs created).
unsigned NumTypos;
MaybeODRUseExprSet SavedMaybeODRUseExprs;
/// The lambdas that are present within this context, if it
/// is indeed an unevaluated context.
SmallVector<LambdaExpr *, 2> Lambdas;
/// The declaration that provides context for lambda expressions
/// and block literals if the normal declaration context does not
/// suffice, e.g., in a default function argument.
Decl *ManglingContextDecl;
/// If we are processing a decltype type, a set of call expressions
/// for which we have deferred checking the completeness of the return type.
SmallVector<CallExpr *, 8> DelayedDecltypeCalls;
/// If we are processing a decltype type, a set of temporary binding
/// expressions for which we have deferred checking the destructor.
SmallVector<CXXBindTemporaryExpr *, 8> DelayedDecltypeBinds;
llvm::SmallPtrSet<const Expr *, 8> PossibleDerefs;
/// Expressions appearing as the LHS of a volatile assignment in this
/// context. We produce a warning for these when popping the context if
/// they are not discarded-value expressions nor unevaluated operands.
SmallVector<Expr*, 2> VolatileAssignmentLHSs;
/// Set of candidates for starting an immediate invocation.
llvm::SmallVector<ImmediateInvocationCandidate, 4> ImmediateInvocationCandidates;
/// Set of DeclRefExprs referencing a consteval function when used in a
/// context not already known to be immediately invoked.
llvm::SmallPtrSet<DeclRefExpr *, 4> ReferenceToConsteval;
/// \brief Describes whether we are in an expression constext which we have
/// to handle differently.
enum ExpressionKind {
EK_Decltype, EK_TemplateArgument, EK_Other
} ExprContext;
ExpressionEvaluationContextRecord(ExpressionEvaluationContext Context,
unsigned NumCleanupObjects,
CleanupInfo ParentCleanup,
Decl *ManglingContextDecl,
ExpressionKind ExprContext)
: Context(Context), ParentCleanup(ParentCleanup),
NumCleanupObjects(NumCleanupObjects), NumTypos(0),
ManglingContextDecl(ManglingContextDecl), ExprContext(ExprContext) {}
bool isUnevaluated() const {
return Context == ExpressionEvaluationContext::Unevaluated ||
Context == ExpressionEvaluationContext::UnevaluatedAbstract ||
Context == ExpressionEvaluationContext::UnevaluatedList;
}
bool isConstantEvaluated() const {
return Context == ExpressionEvaluationContext::ConstantEvaluated;
}
};
/// A stack of expression evaluation contexts.
SmallVector<ExpressionEvaluationContextRecord, 8> ExprEvalContexts;
/// Emit a warning for all pending noderef expressions that we recorded.
void WarnOnPendingNoDerefs(ExpressionEvaluationContextRecord &Rec);
/// Compute the mangling number context for a lambda expression or
/// block literal. Also return the extra mangling decl if any.
///
/// \param DC - The DeclContext containing the lambda expression or
/// block literal.
std::tuple<MangleNumberingContext *, Decl *>
getCurrentMangleNumberContext(const DeclContext *DC);
/// SpecialMemberOverloadResult - The overloading result for a special member
/// function.
///
/// This is basically a wrapper around PointerIntPair. The lowest bits of the
/// integer are used to determine whether overload resolution succeeded.
class SpecialMemberOverloadResult {
public:
enum Kind {
NoMemberOrDeleted,
Ambiguous,
Success
};
private:
llvm::PointerIntPair<CXXMethodDecl*, 2> Pair;
public:
SpecialMemberOverloadResult() : Pair() {}
SpecialMemberOverloadResult(CXXMethodDecl *MD)
: Pair(MD, MD->isDeleted() ? NoMemberOrDeleted : Success) {}
CXXMethodDecl *getMethod() const { return Pair.getPointer(); }
void setMethod(CXXMethodDecl *MD) { Pair.setPointer(MD); }
Kind getKind() const { return static_cast<Kind>(Pair.getInt()); }
void setKind(Kind K) { Pair.setInt(K); }
};
class SpecialMemberOverloadResultEntry
: public llvm::FastFoldingSetNode,
public SpecialMemberOverloadResult {
public:
SpecialMemberOverloadResultEntry(const llvm::FoldingSetNodeID &ID)
: FastFoldingSetNode(ID)
{}
};
/// A cache of special member function overload resolution results
/// for C++ records.
llvm::FoldingSet<SpecialMemberOverloadResultEntry> SpecialMemberCache;
/// A cache of the flags available in enumerations with the flag_bits
/// attribute.
mutable llvm::DenseMap<const EnumDecl*, llvm::APInt> FlagBitsCache;
/// The kind of translation unit we are processing.
///
/// When we're processing a complete translation unit, Sema will perform
/// end-of-translation-unit semantic tasks (such as creating
/// initializers for tentative definitions in C) once parsing has
/// completed. Modules and precompiled headers perform different kinds of
/// checks.
TranslationUnitKind TUKind;
llvm::BumpPtrAllocator BumpAlloc;
/// The number of SFINAE diagnostics that have been trapped.
unsigned NumSFINAEErrors;
typedef llvm::DenseMap<ParmVarDecl *, llvm::TinyPtrVector<ParmVarDecl *>>
UnparsedDefaultArgInstantiationsMap;
/// A mapping from parameters with unparsed default arguments to the
/// set of instantiations of each parameter.
///
/// This mapping is a temporary data structure used when parsing
/// nested class templates or nested classes of class templates,
/// where we might end up instantiating an inner class before the
/// default arguments of its methods have been parsed.
UnparsedDefaultArgInstantiationsMap UnparsedDefaultArgInstantiations;
// Contains the locations of the beginning of unparsed default
// argument locations.
llvm::DenseMap<ParmVarDecl *, SourceLocation> UnparsedDefaultArgLocs;
/// UndefinedInternals - all the used, undefined objects which require a
/// definition in this translation unit.
llvm::MapVector<NamedDecl *, SourceLocation> UndefinedButUsed;
/// Determine if VD, which must be a variable or function, is an external
/// symbol that nonetheless can't be referenced from outside this translation
/// unit because its type has no linkage and it's not extern "C".
bool isExternalWithNoLinkageType(ValueDecl *VD);
/// Obtain a sorted list of functions that are undefined but ODR-used.
void getUndefinedButUsed(
SmallVectorImpl<std::pair<NamedDecl *, SourceLocation> > &Undefined);
/// Retrieves list of suspicious delete-expressions that will be checked at
/// the end of translation unit.
const llvm::MapVector<FieldDecl *, DeleteLocs> &
getMismatchingDeleteExpressions() const;
typedef std::pair<ObjCMethodList, ObjCMethodList> GlobalMethods;
typedef llvm::DenseMap<Selector, GlobalMethods> GlobalMethodPool;
/// Method Pool - allows efficient lookup when typechecking messages to "id".
/// We need to maintain a list, since selectors can have differing signatures
/// across classes. In Cocoa, this happens to be extremely uncommon (only 1%
/// of selectors are "overloaded").
/// At the head of the list it is recorded whether there were 0, 1, or >= 2
/// methods inside categories with a particular selector.
GlobalMethodPool MethodPool;
/// Method selectors used in a \@selector expression. Used for implementation
/// of -Wselector.
llvm::MapVector<Selector, SourceLocation> ReferencedSelectors;
/// List of SourceLocations where 'self' is implicitly retained inside a
/// block.
llvm::SmallVector<std::pair<SourceLocation, const BlockDecl *>, 1>
ImplicitlyRetainedSelfLocs;
/// Kinds of C++ special members.
enum CXXSpecialMember {
CXXDefaultConstructor,
CXXCopyConstructor,
CXXMoveConstructor,
CXXCopyAssignment,
CXXMoveAssignment,
CXXDestructor,
CXXInvalid
};
typedef llvm::PointerIntPair<CXXRecordDecl *, 3, CXXSpecialMember>
SpecialMemberDecl;
/// The C++ special members which we are currently in the process of
/// declaring. If this process recursively triggers the declaration of the
/// same special member, we should act as if it is not yet declared.
llvm::SmallPtrSet<SpecialMemberDecl, 4> SpecialMembersBeingDeclared;
/// Kinds of defaulted comparison operator functions.
enum class DefaultedComparisonKind : unsigned char {
/// This is not a defaultable comparison operator.
None,
/// This is an operator== that should be implemented as a series of
/// subobject comparisons.
Equal,
/// This is an operator<=> that should be implemented as a series of
/// subobject comparisons.
ThreeWay,
/// This is an operator!= that should be implemented as a rewrite in terms
/// of a == comparison.
NotEqual,
/// This is an <, <=, >, or >= that should be implemented as a rewrite in
/// terms of a <=> comparison.
Relational,
};
/// The function definitions which were renamed as part of typo-correction
/// to match their respective declarations. We want to keep track of them
/// to ensure that we don't emit a "redefinition" error if we encounter a
/// correctly named definition after the renamed definition.
llvm::SmallPtrSet<const NamedDecl *, 4> TypoCorrectedFunctionDefinitions;
/// Stack of types that correspond to the parameter entities that are
/// currently being copy-initialized. Can be empty.
llvm::SmallVector<QualType, 4> CurrentParameterCopyTypes;
void ReadMethodPool(Selector Sel);
void updateOutOfDateSelector(Selector Sel);
/// Private Helper predicate to check for 'self'.
bool isSelfExpr(Expr *RExpr);
bool isSelfExpr(Expr *RExpr, const ObjCMethodDecl *Method);
/// Cause the active diagnostic on the DiagosticsEngine to be
/// emitted. This is closely coupled to the SemaDiagnosticBuilder class and
/// should not be used elsewhere.
void EmitCurrentDiagnostic(unsigned DiagID);
/// Records and restores the FPFeatures state on entry/exit of compound
/// statements.
class FPFeaturesStateRAII {
public:
FPFeaturesStateRAII(Sema &S) : S(S), OldFPFeaturesState(S.FPFeatures) {}
~FPFeaturesStateRAII() { S.FPFeatures = OldFPFeaturesState; }
private:
Sema& S;
FPOptions OldFPFeaturesState;
};
void addImplicitTypedef(StringRef Name, QualType T);
bool WarnedStackExhausted = false;
public:
Sema(Preprocessor &pp, ASTContext &ctxt, ASTConsumer &consumer,
TranslationUnitKind TUKind = TU_Complete,
CodeCompleteConsumer *CompletionConsumer = nullptr);
~Sema();
/// Perform initialization that occurs after the parser has been
/// initialized but before it parses anything.
void Initialize();
const LangOptions &getLangOpts() const { return LangOpts; }
OpenCLOptions &getOpenCLOptions() { return OpenCLFeatures; }
FPOptions &getFPOptions() { return FPFeatures; }
DiagnosticsEngine &getDiagnostics() const { return Diags; }
SourceManager &getSourceManager() const { return SourceMgr; }
Preprocessor &getPreprocessor() const { return PP; }
ASTContext &getASTContext() const { return Context; }
ASTConsumer &getASTConsumer() const { return Consumer; }
ASTMutationListener *getASTMutationListener() const;
ExternalSemaSource* getExternalSource() const { return ExternalSource; }
///Registers an external source. If an external source already exists,
/// creates a multiplex external source and appends to it.
///
///\param[in] E - A non-null external sema source.
///
void addExternalSource(ExternalSemaSource *E);
void PrintStats() const;
/// Warn that the stack is nearly exhausted.
void warnStackExhausted(SourceLocation Loc);
/// Run some code with "sufficient" stack space. (Currently, at least 256K is
/// guaranteed). Produces a warning if we're low on stack space and allocates
/// more in that case. Use this in code that may recurse deeply (for example,
/// in template instantiation) to avoid stack overflow.
void runWithSufficientStackSpace(SourceLocation Loc,
llvm::function_ref<void()> Fn);
/// Helper class that creates diagnostics with optional
/// template instantiation stacks.
///
/// This class provides a wrapper around the basic DiagnosticBuilder
/// class that emits diagnostics. SemaDiagnosticBuilder is
/// responsible for emitting the diagnostic (as DiagnosticBuilder
/// does) and, if the diagnostic comes from inside a template
/// instantiation, printing the template instantiation stack as
/// well.
class SemaDiagnosticBuilder : public DiagnosticBuilder {
Sema &SemaRef;
unsigned DiagID;
public:
SemaDiagnosticBuilder(DiagnosticBuilder &DB, Sema &SemaRef, unsigned DiagID)
: DiagnosticBuilder(DB), SemaRef(SemaRef), DiagID(DiagID) { }
// This is a cunning lie. DiagnosticBuilder actually performs move
// construction in its copy constructor (but due to varied uses, it's not
// possible to conveniently express this as actual move construction). So
// the default copy ctor here is fine, because the base class disables the
// source anyway, so the user-defined ~SemaDiagnosticBuilder is a safe no-op
// in that case anwyay.
SemaDiagnosticBuilder(const SemaDiagnosticBuilder&) = default;
~SemaDiagnosticBuilder() {
// If we aren't active, there is nothing to do.
if (!isActive()) return;
// Otherwise, we need to emit the diagnostic. First flush the underlying
// DiagnosticBuilder data, and clear the diagnostic builder itself so it
// won't emit the diagnostic in its own destructor.
//
// This seems wasteful, in that as written the DiagnosticBuilder dtor will
// do its own needless checks to see if the diagnostic needs to be
// emitted. However, because we take care to ensure that the builder
// objects never escape, a sufficiently smart compiler will be able to
// eliminate that code.
FlushCounts();
Clear();
// Dispatch to Sema to emit the diagnostic.
SemaRef.EmitCurrentDiagnostic(DiagID);
}
/// Teach operator<< to produce an object of the correct type.
template<typename T>
friend const SemaDiagnosticBuilder &operator<<(
const SemaDiagnosticBuilder &Diag, const T &Value) {
const DiagnosticBuilder &BaseDiag = Diag;
BaseDiag << Value;
return Diag;
}
};
/// Emit a diagnostic.
SemaDiagnosticBuilder Diag(SourceLocation Loc, unsigned DiagID) {
DiagnosticBuilder DB = Diags.Report(Loc, DiagID);
return SemaDiagnosticBuilder(DB, *this, DiagID);
}
/// Emit a partial diagnostic.
SemaDiagnosticBuilder Diag(SourceLocation Loc, const PartialDiagnostic& PD);
/// Build a partial diagnostic.
PartialDiagnostic PDiag(unsigned DiagID = 0); // in SemaInternal.h
bool findMacroSpelling(SourceLocation &loc, StringRef name);
/// Get a string to suggest for zero-initialization of a type.
std::string
getFixItZeroInitializerForType(QualType T, SourceLocation Loc) const;
std::string getFixItZeroLiteralForType(QualType T, SourceLocation Loc) const;
/// Calls \c Lexer::getLocForEndOfToken()
SourceLocation getLocForEndOfToken(SourceLocation Loc, unsigned Offset = 0);
/// Retrieve the module loader associated with the preprocessor.
ModuleLoader &getModuleLoader() const;
/// Invent a new identifier for parameters of abbreviated templates.
IdentifierInfo *
InventAbbreviatedTemplateParameterTypeName(IdentifierInfo *ParamName,
unsigned Index);
void emitAndClearUnusedLocalTypedefWarnings();
enum TUFragmentKind {
/// The global module fragment, between 'module;' and a module-declaration.
Global,
/// A normal translation unit fragment. For a non-module unit, this is the
/// entire translation unit. Otherwise, it runs from the module-declaration
/// to the private-module-fragment (if any) or the end of the TU (if not).
Normal,
/// The private module fragment, between 'module :private;' and the end of
/// the translation unit.
Private
};
void ActOnStartOfTranslationUnit();
void ActOnEndOfTranslationUnit();
void ActOnEndOfTranslationUnitFragment(TUFragmentKind Kind);
void CheckDelegatingCtorCycles();
Scope *getScopeForContext(DeclContext *Ctx);
void PushFunctionScope();
void PushBlockScope(Scope *BlockScope, BlockDecl *Block);
sema::LambdaScopeInfo *PushLambdaScope();
/// This is used to inform Sema what the current TemplateParameterDepth
/// is during Parsing. Currently it is used to pass on the depth
/// when parsing generic lambda 'auto' parameters.
void RecordParsingTemplateParameterDepth(unsigned Depth);
void PushCapturedRegionScope(Scope *RegionScope, CapturedDecl *CD,
RecordDecl *RD, CapturedRegionKind K,
unsigned OpenMPCaptureLevel = 0);
/// Custom deleter to allow FunctionScopeInfos to be kept alive for a short
/// time after they've been popped.
class PoppedFunctionScopeDeleter {
Sema *Self;
public:
explicit PoppedFunctionScopeDeleter(Sema *Self) : Self(Self) {}
void operator()(sema::FunctionScopeInfo *Scope) const;
};
using PoppedFunctionScopePtr =
std::unique_ptr<sema::FunctionScopeInfo, PoppedFunctionScopeDeleter>;
PoppedFunctionScopePtr
PopFunctionScopeInfo(const sema::AnalysisBasedWarnings::Policy *WP = nullptr,
const Decl *D = nullptr,
QualType BlockType = QualType());
sema::FunctionScopeInfo *getCurFunction() const {
return FunctionScopes.empty() ? nullptr : FunctionScopes.back();
}
sema::FunctionScopeInfo *getEnclosingFunction() const;
void setFunctionHasBranchIntoScope();
void setFunctionHasBranchProtectedScope();
void setFunctionHasIndirectGoto();
void PushCompoundScope(bool IsStmtExpr);
void PopCompoundScope();
sema::CompoundScopeInfo &getCurCompoundScope() const;
bool hasAnyUnrecoverableErrorsInThisFunction() const;
/// Retrieve the current block, if any.
sema::BlockScopeInfo *getCurBlock();
/// Get the innermost lambda enclosing the current location, if any. This
/// looks through intervening non-lambda scopes such as local functions and
/// blocks.
sema::LambdaScopeInfo *getEnclosingLambda() const;
/// Retrieve the current lambda scope info, if any.
/// \param IgnoreNonLambdaCapturingScope true if should find the top-most
/// lambda scope info ignoring all inner capturing scopes that are not
/// lambda scopes.
sema::LambdaScopeInfo *
getCurLambda(bool IgnoreNonLambdaCapturingScope = false);
/// Retrieve the current generic lambda info, if any.
sema::LambdaScopeInfo *getCurGenericLambda();
/// Retrieve the current captured region, if any.
sema::CapturedRegionScopeInfo *getCurCapturedRegion();
/// WeakTopLevelDeclDecls - access to \#pragma weak-generated Decls
SmallVectorImpl<Decl *> &WeakTopLevelDecls() { return WeakTopLevelDecl; }
/// Called before parsing a function declarator belonging to a function
/// declaration.
void ActOnStartFunctionDeclarationDeclarator(Declarator &D,
unsigned TemplateParameterDepth);
/// Called after parsing a function declarator belonging to a function
/// declaration.
void ActOnFinishFunctionDeclarationDeclarator(Declarator &D);
void ActOnComment(SourceRange Comment);
//===--------------------------------------------------------------------===//
// Type Analysis / Processing: SemaType.cpp.
//
QualType BuildQualifiedType(QualType T, SourceLocation Loc, Qualifiers Qs,
const DeclSpec *DS = nullptr);
QualType BuildQualifiedType(QualType T, SourceLocation Loc, unsigned CVRA,
const DeclSpec *DS = nullptr);
QualType BuildPointerType(QualType T,
SourceLocation Loc, DeclarationName Entity);
QualType BuildReferenceType(QualType T, bool LValueRef,
SourceLocation Loc, DeclarationName Entity);
QualType BuildArrayType(QualType T, ArrayType::ArraySizeModifier ASM,
Expr *ArraySize, unsigned Quals,
SourceRange Brackets, DeclarationName Entity);
QualType BuildVectorType(QualType T, Expr *VecSize, SourceLocation AttrLoc);
QualType BuildExtVectorType(QualType T, Expr *ArraySize,
SourceLocation AttrLoc);
QualType BuildAddressSpaceAttr(QualType &T, LangAS ASIdx, Expr *AddrSpace,
SourceLocation AttrLoc);
/// Same as above, but constructs the AddressSpace index if not provided.
QualType BuildAddressSpaceAttr(QualType &T, Expr *AddrSpace,
SourceLocation AttrLoc);
SYCLIntelFPGAIVDepAttr *
BuildSYCLIntelFPGAIVDepAttr(const AttributeCommonInfo &CI, Expr *Expr1,
Expr *Expr2);
template <typename FPGALoopAttrT>
FPGALoopAttrT *BuildSYCLIntelFPGALoopAttr(const AttributeCommonInfo &A,
Expr *E);
LoopUnrollHintAttr *BuildLoopUnrollHintAttr(const AttributeCommonInfo &A,
Expr *E);
OpenCLUnrollHintAttr *
BuildOpenCLLoopUnrollHintAttr(const AttributeCommonInfo &A, Expr *E);
bool CheckQualifiedFunctionForTypeId(QualType T, SourceLocation Loc);
bool CheckFunctionReturnType(QualType T, SourceLocation Loc);
/// Build a function type.
///
/// This routine checks the function type according to C++ rules and
/// under the assumption that the result type and parameter types have
/// just been instantiated from a template. It therefore duplicates
/// some of the behavior of GetTypeForDeclarator, but in a much
/// simpler form that is only suitable for this narrow use case.
///
/// \param T The return type of the function.
///
/// \param ParamTypes The parameter types of the function. This array
/// will be modified to account for adjustments to the types of the
/// function parameters.
///
/// \param Loc The location of the entity whose type involves this
/// function type or, if there is no such entity, the location of the
/// type that will have function type.
///
/// \param Entity The name of the entity that involves the function
/// type, if known.
///
/// \param EPI Extra information about the function type. Usually this will
/// be taken from an existing function with the same prototype.
///
/// \returns A suitable function type, if there are no errors. The
/// unqualified type will always be a FunctionProtoType.
/// Otherwise, returns a NULL type.
QualType BuildFunctionType(QualType T,
MutableArrayRef<QualType> ParamTypes,
SourceLocation Loc, DeclarationName Entity,
const FunctionProtoType::ExtProtoInfo &EPI);
QualType BuildMemberPointerType(QualType T, QualType Class,
SourceLocation Loc,
DeclarationName Entity);
QualType BuildBlockPointerType(QualType T,
SourceLocation Loc, DeclarationName Entity);
QualType BuildParenType(QualType T);
QualType BuildAtomicType(QualType T, SourceLocation Loc);
QualType BuildReadPipeType(QualType T,
SourceLocation Loc);
QualType BuildWritePipeType(QualType T,
SourceLocation Loc);
TypeSourceInfo *GetTypeForDeclarator(Declarator &D, Scope *S);
TypeSourceInfo *GetTypeForDeclaratorCast(Declarator &D, QualType FromTy);
/// Package the given type and TSI into a ParsedType.
ParsedType CreateParsedType(QualType T, TypeSourceInfo *TInfo);
DeclarationNameInfo GetNameForDeclarator(Declarator &D);
DeclarationNameInfo GetNameFromUnqualifiedId(const UnqualifiedId &Name);
static QualType GetTypeFromParser(ParsedType Ty,
TypeSourceInfo **TInfo = nullptr);
CanThrowResult canThrow(const Stmt *E);
const FunctionProtoType *ResolveExceptionSpec(SourceLocation Loc,
const FunctionProtoType *FPT);
void UpdateExceptionSpec(FunctionDecl *FD,
const FunctionProtoType::ExceptionSpecInfo &ESI);
bool CheckSpecifiedExceptionType(QualType &T, SourceRange Range);
bool CheckDistantExceptionSpec(QualType T);
bool CheckEquivalentExceptionSpec(FunctionDecl *Old, FunctionDecl *New);
bool CheckEquivalentExceptionSpec(
const FunctionProtoType *Old, SourceLocation OldLoc,
const FunctionProtoType *New, SourceLocation NewLoc);
bool CheckEquivalentExceptionSpec(
const PartialDiagnostic &DiagID, const PartialDiagnostic & NoteID,
const FunctionProtoType *Old, SourceLocation OldLoc,
const FunctionProtoType *New, SourceLocation NewLoc);
bool handlerCanCatch(QualType HandlerType, QualType ExceptionType);
bool CheckExceptionSpecSubset(const PartialDiagnostic &DiagID,
const PartialDiagnostic &NestedDiagID,
const PartialDiagnostic &NoteID,
const PartialDiagnostic &NoThrowDiagID,
const FunctionProtoType *Superset,
SourceLocation SuperLoc,
const FunctionProtoType *Subset,
SourceLocation SubLoc);
bool CheckParamExceptionSpec(const PartialDiagnostic &NestedDiagID,
const PartialDiagnostic &NoteID,
const FunctionProtoType *Target,
SourceLocation TargetLoc,
const FunctionProtoType *Source,
SourceLocation SourceLoc);
TypeResult ActOnTypeName(Scope *S, Declarator &D);
/// The parser has parsed the context-sensitive type 'instancetype'
/// in an Objective-C message declaration. Return the appropriate type.
ParsedType ActOnObjCInstanceType(SourceLocation Loc);
/// Abstract class used to diagnose incomplete types.
struct TypeDiagnoser {
TypeDiagnoser() {}
virtual void diagnose(Sema &S, SourceLocation Loc, QualType T) = 0;
virtual ~TypeDiagnoser() {}
};
static int getPrintable(int I) { return I; }
static unsigned getPrintable(unsigned I) { return I; }
static bool getPrintable(bool B) { return B; }
static const char * getPrintable(const char *S) { return S; }
static StringRef getPrintable(StringRef S) { return S; }
static const std::string &getPrintable(const std::string &S) { return S; }
static const IdentifierInfo *getPrintable(const IdentifierInfo *II) {
return II;
}
static DeclarationName getPrintable(DeclarationName N) { return N; }
static QualType getPrintable(QualType T) { return T; }
static SourceRange getPrintable(SourceRange R) { return R; }
static SourceRange getPrintable(SourceLocation L) { return L; }
static SourceRange getPrintable(const Expr *E) { return E->getSourceRange(); }
static SourceRange getPrintable(TypeLoc TL) { return TL.getSourceRange();}
template <typename... Ts> class BoundTypeDiagnoser : public TypeDiagnoser {
unsigned DiagID;
std::tuple<const Ts &...> Args;
template <std::size_t... Is>
void emit(const SemaDiagnosticBuilder &DB,
std::index_sequence<Is...>) const {
// Apply all tuple elements to the builder in order.
bool Dummy[] = {false, (DB << getPrintable(std::get<Is>(Args)))...};
(void)Dummy;
}
public:
BoundTypeDiagnoser(unsigned DiagID, const Ts &...Args)
: TypeDiagnoser(), DiagID(DiagID), Args(Args...) {
assert(DiagID != 0 && "no diagnostic for type diagnoser");
}
void diagnose(Sema &S, SourceLocation Loc, QualType T) override {
const SemaDiagnosticBuilder &DB = S.Diag(Loc, DiagID);
emit(DB, std::index_sequence_for<Ts...>());
DB << T;
}
};
private:
/// Methods for marking which expressions involve dereferencing a pointer
/// marked with the 'noderef' attribute. Expressions are checked bottom up as
/// they are parsed, meaning that a noderef pointer may not be accessed. For
/// example, in `&*p` where `p` is a noderef pointer, we will first parse the
/// `*p`, but need to check that `address of` is called on it. This requires
/// keeping a container of all pending expressions and checking if the address
/// of them are eventually taken.
void CheckSubscriptAccessOfNoDeref(const ArraySubscriptExpr *E);
void CheckAddressOfNoDeref(const Expr *E);
void CheckMemberAccessOfNoDeref(const MemberExpr *E);
bool RequireCompleteTypeImpl(SourceLocation Loc, QualType T,
TypeDiagnoser *Diagnoser);
struct ModuleScope {
SourceLocation BeginLoc;
clang::Module *Module = nullptr;
bool ModuleInterface = false;
bool ImplicitGlobalModuleFragment = false;
VisibleModuleSet OuterVisibleModules;
};
/// The modules we're currently parsing.
llvm::SmallVector<ModuleScope, 16> ModuleScopes;
/// Namespace definitions that we will export when they finish.
llvm::SmallPtrSet<const NamespaceDecl*, 8> DeferredExportedNamespaces;
/// Get the module whose scope we are currently within.
Module *getCurrentModule() const {
return ModuleScopes.empty() ? nullptr : ModuleScopes.back().Module;
}
VisibleModuleSet VisibleModules;
public:
/// Get the module owning an entity.
Module *getOwningModule(const Decl *Entity) {
return Entity->getOwningModule();
}
/// Make a merged definition of an existing hidden definition \p ND
/// visible at the specified location.
void makeMergedDefinitionVisible(NamedDecl *ND);
bool isModuleVisible(const Module *M, bool ModulePrivate = false);
/// Determine whether a declaration is visible to name lookup.
bool isVisible(const NamedDecl *D) {
return !D->isHidden() || isVisibleSlow(D);
}
/// Determine whether any declaration of an entity is visible.
bool
hasVisibleDeclaration(const NamedDecl *D,
llvm::SmallVectorImpl<Module *> *Modules = nullptr) {
return isVisible(D) || hasVisibleDeclarationSlow(D, Modules);
}
bool hasVisibleDeclarationSlow(const NamedDecl *D,
llvm::SmallVectorImpl<Module *> *Modules);
bool hasVisibleMergedDefinition(NamedDecl *Def);
bool hasMergedDefinitionInCurrentModule(NamedDecl *Def);
/// Determine if \p D and \p Suggested have a structurally compatible
/// layout as described in C11 6.2.7/1.
bool hasStructuralCompatLayout(Decl *D, Decl *Suggested);
/// Determine if \p D has a visible definition. If not, suggest a declaration
/// that should be made visible to expose the definition.
bool hasVisibleDefinition(NamedDecl *D, NamedDecl **Suggested,
bool OnlyNeedComplete = false);
bool hasVisibleDefinition(const NamedDecl *D) {
NamedDecl *Hidden;
return hasVisibleDefinition(const_cast<NamedDecl*>(D), &Hidden);
}
/// Determine if the template parameter \p D has a visible default argument.
bool
hasVisibleDefaultArgument(const NamedDecl *D,
llvm::SmallVectorImpl<Module *> *Modules = nullptr);
/// Determine if there is a visible declaration of \p D that is an explicit
/// specialization declaration for a specialization of a template. (For a
/// member specialization, use hasVisibleMemberSpecialization.)
bool hasVisibleExplicitSpecialization(
const NamedDecl *D, llvm::SmallVectorImpl<Module *> *Modules = nullptr);
/// Determine if there is a visible declaration of \p D that is a member
/// specialization declaration (as opposed to an instantiated declaration).
bool hasVisibleMemberSpecialization(
const NamedDecl *D, llvm::SmallVectorImpl<Module *> *Modules = nullptr);
/// Determine if \p A and \p B are equivalent internal linkage declarations
/// from different modules, and thus an ambiguity error can be downgraded to
/// an extension warning.
bool isEquivalentInternalLinkageDeclaration(const NamedDecl *A,
const NamedDecl *B);
void diagnoseEquivalentInternalLinkageDeclarations(
SourceLocation Loc, const NamedDecl *D,
ArrayRef<const NamedDecl *> Equiv);
bool isUsualDeallocationFunction(const CXXMethodDecl *FD);
bool isCompleteType(SourceLocation Loc, QualType T) {
return !RequireCompleteTypeImpl(Loc, T, nullptr);
}
bool RequireCompleteType(SourceLocation Loc, QualType T,
TypeDiagnoser &Diagnoser);
bool RequireCompleteType(SourceLocation Loc, QualType T,
unsigned DiagID);
template <typename... Ts>
bool RequireCompleteType(SourceLocation Loc, QualType T, unsigned DiagID,
const Ts &...Args) {
BoundTypeDiagnoser<Ts...> Diagnoser(DiagID, Args...);
return RequireCompleteType(Loc, T, Diagnoser);
}
void completeExprArrayBound(Expr *E);
bool RequireCompleteExprType(Expr *E, TypeDiagnoser &Diagnoser);
bool RequireCompleteExprType(Expr *E, unsigned DiagID);
template <typename... Ts>
bool RequireCompleteExprType(Expr *E, unsigned DiagID, const Ts &...Args) {
BoundTypeDiagnoser<Ts...> Diagnoser(DiagID, Args...);
return RequireCompleteExprType(E, Diagnoser);
}
bool RequireLiteralType(SourceLocation Loc, QualType T,
TypeDiagnoser &Diagnoser);
bool RequireLiteralType(SourceLocation Loc, QualType T, unsigned DiagID);
template <typename... Ts>
bool RequireLiteralType(SourceLocation Loc, QualType T, unsigned DiagID,
const Ts &...Args) {
BoundTypeDiagnoser<Ts...> Diagnoser(DiagID, Args...);
return RequireLiteralType(Loc, T, Diagnoser);
}
QualType getElaboratedType(ElaboratedTypeKeyword Keyword,
const CXXScopeSpec &SS, QualType T,
TagDecl *OwnedTagDecl = nullptr);
QualType BuildTypeofExprType(Expr *E, SourceLocation Loc);
/// If AsUnevaluated is false, E is treated as though it were an evaluated
/// context, such as when building a type for decltype(auto).
QualType BuildDecltypeType(Expr *E, SourceLocation Loc,
bool AsUnevaluated = true);
QualType BuildUnaryTransformType(QualType BaseType,
UnaryTransformType::UTTKind UKind,
SourceLocation Loc);
//===--------------------------------------------------------------------===//
// Symbol table / Decl tracking callbacks: SemaDecl.cpp.
//
struct SkipBodyInfo {
SkipBodyInfo()
: ShouldSkip(false), CheckSameAsPrevious(false), Previous(nullptr),
New(nullptr) {}
bool ShouldSkip;
bool CheckSameAsPrevious;
NamedDecl *Previous;
NamedDecl *New;
};
DeclGroupPtrTy ConvertDeclToDeclGroup(Decl *Ptr, Decl *OwnedType = nullptr);
void DiagnoseUseOfUnimplementedSelectors();
bool isSimpleTypeSpecifier(tok::TokenKind Kind) const;
ParsedType getTypeName(const IdentifierInfo &II, SourceLocation NameLoc,
Scope *S, CXXScopeSpec *SS = nullptr,
bool isClassName = false, bool HasTrailingDot = false,
ParsedType ObjectType = nullptr,
bool IsCtorOrDtorName = false,
bool WantNontrivialTypeSourceInfo = false,
bool IsClassTemplateDeductionContext = true,
IdentifierInfo **CorrectedII = nullptr);
TypeSpecifierType isTagName(IdentifierInfo &II, Scope *S);
bool isMicrosoftMissingTypename(const CXXScopeSpec *SS, Scope *S);
void DiagnoseUnknownTypeName(IdentifierInfo *&II,
SourceLocation IILoc,
Scope *S,
CXXScopeSpec *SS,
ParsedType &SuggestedType,
bool IsTemplateName = false);
/// Attempt to behave like MSVC in situations where lookup of an unqualified
/// type name has failed in a dependent context. In these situations, we
/// automatically form a DependentTypeName that will retry lookup in a related
/// scope during instantiation.
ParsedType ActOnMSVCUnknownTypeName(const IdentifierInfo &II,
SourceLocation NameLoc,
bool IsTemplateTypeArg);
/// Describes the result of the name lookup and resolution performed
/// by \c ClassifyName().
enum NameClassificationKind {
/// This name is not a type or template in this context, but might be
/// something else.
NC_Unknown,
/// Classification failed; an error has been produced.
NC_Error,
/// The name has been typo-corrected to a keyword.
NC_Keyword,
/// The name was classified as a type.
NC_Type,
/// The name was classified as a specific non-type, non-template
/// declaration. ActOnNameClassifiedAsNonType should be called to
/// convert the declaration to an expression.
NC_NonType,
/// The name was classified as an ADL-only function name.
/// ActOnNameClassifiedAsUndeclaredNonType should be called to convert the
/// result to an expression.
NC_UndeclaredNonType,
/// The name denotes a member of a dependent type that could not be
/// resolved. ActOnNameClassifiedAsDependentNonType should be called to
/// convert the result to an expression.
NC_DependentNonType,
/// The name was classified as a non-type, and an expression representing
/// that name has been formed.
NC_ContextIndependentExpr,
/// The name was classified as a template whose specializations are types.
NC_TypeTemplate,
/// The name was classified as a variable template name.
NC_VarTemplate,
/// The name was classified as a function template name.
NC_FunctionTemplate,
/// The name was classified as an ADL-only function template name.
NC_UndeclaredTemplate,
/// The name was classified as a concept name.
NC_Concept,
};
class NameClassification {
NameClassificationKind Kind;
union {
ExprResult Expr;
NamedDecl *NonTypeDecl;
TemplateName Template;
ParsedType Type;
};
explicit NameClassification(NameClassificationKind Kind) : Kind(Kind) {}
public:
NameClassification(ParsedType Type) : Kind(NC_Type), Type(Type) {}
NameClassification(const IdentifierInfo *Keyword) : Kind(NC_Keyword) {}
static NameClassification Error() {
return NameClassification(NC_Error);
}
static NameClassification Unknown() {
return NameClassification(NC_Unknown);
}
static NameClassification ContextIndependentExpr(ExprResult E) {
NameClassification Result(NC_ContextIndependentExpr);
Result.Expr = E;
return Result;
}
static NameClassification NonType(NamedDecl *D) {
NameClassification Result(NC_NonType);
Result.NonTypeDecl = D;
return Result;
}
static NameClassification UndeclaredNonType() {
return NameClassification(NC_UndeclaredNonType);
}
static NameClassification DependentNonType() {
return NameClassification(NC_DependentNonType);
}
static NameClassification TypeTemplate(TemplateName Name) {
NameClassification Result(NC_TypeTemplate);
Result.Template = Name;
return Result;
}
static NameClassification VarTemplate(TemplateName Name) {
NameClassification Result(NC_VarTemplate);
Result.Template = Name;
return Result;
}
static NameClassification FunctionTemplate(TemplateName Name) {
NameClassification Result(NC_FunctionTemplate);
Result.Template = Name;
return Result;
}
static NameClassification Concept(TemplateName Name) {
NameClassification Result(NC_Concept);
Result.Template = Name;
return Result;
}
static NameClassification UndeclaredTemplate(TemplateName Name) {
NameClassification Result(NC_UndeclaredTemplate);
Result.Template = Name;
return Result;
}
NameClassificationKind getKind() const { return Kind; }
ExprResult getExpression() const {
assert(Kind == NC_ContextIndependentExpr);
return Expr;
}
ParsedType getType() const {
assert(Kind == NC_Type);
return Type;
}
NamedDecl *getNonTypeDecl() const {
assert(Kind == NC_NonType);
return NonTypeDecl;
}
TemplateName getTemplateName() const {
assert(Kind == NC_TypeTemplate || Kind == NC_FunctionTemplate ||
Kind == NC_VarTemplate || Kind == NC_Concept ||
Kind == NC_UndeclaredTemplate);
return Template;
}
TemplateNameKind getTemplateNameKind() const {
switch (Kind) {
case NC_TypeTemplate:
return TNK_Type_template;
case NC_FunctionTemplate:
return TNK_Function_template;
case NC_VarTemplate:
return TNK_Var_template;
case NC_Concept:
return TNK_Concept_template;
case NC_UndeclaredTemplate:
return TNK_Undeclared_template;
default:
llvm_unreachable("unsupported name classification.");
}
}
};
/// Perform name lookup on the given name, classifying it based on
/// the results of name lookup and the following token.
///
/// This routine is used by the parser to resolve identifiers and help direct
/// parsing. When the identifier cannot be found, this routine will attempt
/// to correct the typo and classify based on the resulting name.
///
/// \param S The scope in which we're performing name lookup.
///
/// \param SS The nested-name-specifier that precedes the name.
///
/// \param Name The identifier. If typo correction finds an alternative name,
/// this pointer parameter will be updated accordingly.
///
/// \param NameLoc The location of the identifier.
///
/// \param NextToken The token following the identifier. Used to help
/// disambiguate the name.
///
/// \param CCC The correction callback, if typo correction is desired.
NameClassification ClassifyName(Scope *S, CXXScopeSpec &SS,
IdentifierInfo *&Name, SourceLocation NameLoc,
const Token &NextToken,
CorrectionCandidateCallback *CCC = nullptr);
/// Act on the result of classifying a name as an undeclared (ADL-only)
/// non-type declaration.
ExprResult ActOnNameClassifiedAsUndeclaredNonType(IdentifierInfo *Name,
SourceLocation NameLoc);
/// Act on the result of classifying a name as an undeclared member of a
/// dependent base class.
ExprResult ActOnNameClassifiedAsDependentNonType(const CXXScopeSpec &SS,
IdentifierInfo *Name,
SourceLocation NameLoc,
bool IsAddressOfOperand);
/// Act on the result of classifying a name as a specific non-type
/// declaration.
ExprResult ActOnNameClassifiedAsNonType(Scope *S, const CXXScopeSpec &SS,
NamedDecl *Found,
SourceLocation NameLoc,
const Token &NextToken);
/// Describes the detailed kind of a template name. Used in diagnostics.
enum class TemplateNameKindForDiagnostics {
ClassTemplate,
FunctionTemplate,
VarTemplate,
AliasTemplate,
TemplateTemplateParam,
Concept,
DependentTemplate
};
TemplateNameKindForDiagnostics
getTemplateNameKindForDiagnostics(TemplateName Name);
/// Determine whether it's plausible that E was intended to be a
/// template-name.
bool mightBeIntendedToBeTemplateName(ExprResult E, bool &Dependent) {
if (!getLangOpts().CPlusPlus || E.isInvalid())
return false;
Dependent = false;
if (auto *DRE = dyn_cast<DeclRefExpr>(E.get()))
return !DRE->hasExplicitTemplateArgs();
if (auto *ME = dyn_cast<MemberExpr>(E.get()))
return !ME->hasExplicitTemplateArgs();
Dependent = true;
if (auto *DSDRE = dyn_cast<DependentScopeDeclRefExpr>(E.get()))
return !DSDRE->hasExplicitTemplateArgs();
if (auto *DSME = dyn_cast<CXXDependentScopeMemberExpr>(E.get()))
return !DSME->hasExplicitTemplateArgs();
// Any additional cases recognized here should also be handled by
// diagnoseExprIntendedAsTemplateName.
return false;
}
void diagnoseExprIntendedAsTemplateName(Scope *S, ExprResult TemplateName,
SourceLocation Less,
SourceLocation Greater);
Decl *ActOnDeclarator(Scope *S, Declarator &D);
NamedDecl *HandleDeclarator(Scope *S, Declarator &D,
MultiTemplateParamsArg TemplateParameterLists);
void RegisterLocallyScopedExternCDecl(NamedDecl *ND, Scope *S);
bool DiagnoseClassNameShadow(DeclContext *DC, DeclarationNameInfo Info);
bool diagnoseQualifiedDeclaration(CXXScopeSpec &SS, DeclContext *DC,
DeclarationName Name, SourceLocation Loc,
bool IsTemplateId);
void
diagnoseIgnoredQualifiers(unsigned DiagID, unsigned Quals,
SourceLocation FallbackLoc,
SourceLocation ConstQualLoc = SourceLocation(),
SourceLocation VolatileQualLoc = SourceLocation(),
SourceLocation RestrictQualLoc = SourceLocation(),
SourceLocation AtomicQualLoc = SourceLocation(),
SourceLocation UnalignedQualLoc = SourceLocation());
static bool adjustContextForLocalExternDecl(DeclContext *&DC);
void DiagnoseFunctionSpecifiers(const DeclSpec &DS);
NamedDecl *getShadowedDeclaration(const TypedefNameDecl *D,
const LookupResult &R);
NamedDecl *getShadowedDeclaration(const VarDecl *D, const LookupResult &R);
void CheckShadow(NamedDecl *D, NamedDecl *ShadowedDecl,
const LookupResult &R);
void CheckShadow(Scope *S, VarDecl *D);
/// Warn if 'E', which is an expression that is about to be modified, refers
/// to a shadowing declaration.
void CheckShadowingDeclModification(Expr *E, SourceLocation Loc);
void DiagnoseShadowingLambdaDecls(const sema::LambdaScopeInfo *LSI);
private:
/// Map of current shadowing declarations to shadowed declarations. Warn if
/// it looks like the user is trying to modify the shadowing declaration.
llvm::DenseMap<const NamedDecl *, const NamedDecl *> ShadowingDecls;
public:
void CheckCastAlign(Expr *Op, QualType T, SourceRange TRange);
void handleTagNumbering(const TagDecl *Tag, Scope *TagScope);
void setTagNameForLinkagePurposes(TagDecl *TagFromDeclSpec,
TypedefNameDecl *NewTD);
void CheckTypedefForVariablyModifiedType(Scope *S, TypedefNameDecl *D);
NamedDecl* ActOnTypedefDeclarator(Scope* S, Declarator& D, DeclContext* DC,
TypeSourceInfo *TInfo,
LookupResult &Previous);
NamedDecl* ActOnTypedefNameDecl(Scope* S, DeclContext* DC, TypedefNameDecl *D,
LookupResult &Previous, bool &Redeclaration);
NamedDecl *ActOnVariableDeclarator(Scope *S, Declarator &D, DeclContext *DC,
TypeSourceInfo *TInfo,
LookupResult &Previous,
MultiTemplateParamsArg TemplateParamLists,
bool &AddToScope,
ArrayRef<BindingDecl *> Bindings = None);
NamedDecl *
ActOnDecompositionDeclarator(Scope *S, Declarator &D,
MultiTemplateParamsArg TemplateParamLists);
// Returns true if the variable declaration is a redeclaration
bool CheckVariableDeclaration(VarDecl *NewVD, LookupResult &Previous);
void CheckVariableDeclarationType(VarDecl *NewVD);
bool DeduceVariableDeclarationType(VarDecl *VDecl, bool DirectInit,
Expr *Init);
void CheckCompleteVariableDeclaration(VarDecl *VD);
void CheckCompleteDecompositionDeclaration(DecompositionDecl *DD);
void MaybeSuggestAddingStaticToDecl(const FunctionDecl *D);
NamedDecl* ActOnFunctionDeclarator(Scope* S, Declarator& D, DeclContext* DC,
TypeSourceInfo *TInfo,
LookupResult &Previous,
MultiTemplateParamsArg TemplateParamLists,
bool &AddToScope);
bool AddOverriddenMethods(CXXRecordDecl *DC, CXXMethodDecl *MD);
enum class CheckConstexprKind {
/// Diagnose issues that are non-constant or that are extensions.
Diagnose,
/// Identify whether this function satisfies the formal rules for constexpr
/// functions in the current lanugage mode (with no extensions).
CheckValid
};
bool CheckConstexprFunctionDefinition(const FunctionDecl *FD,
CheckConstexprKind Kind);
void DiagnoseHiddenVirtualMethods(CXXMethodDecl *MD);
void FindHiddenVirtualMethods(CXXMethodDecl *MD,
SmallVectorImpl<CXXMethodDecl*> &OverloadedMethods);
void NoteHiddenVirtualMethods(CXXMethodDecl *MD,
SmallVectorImpl<CXXMethodDecl*> &OverloadedMethods);
// Returns true if the function declaration is a redeclaration
bool CheckFunctionDeclaration(Scope *S,
FunctionDecl *NewFD, LookupResult &Previous,
bool IsMemberSpecialization);
bool shouldLinkDependentDeclWithPrevious(Decl *D, Decl *OldDecl);
bool canFullyTypeCheckRedeclaration(ValueDecl *NewD, ValueDecl *OldD,
QualType NewT, QualType OldT);
void CheckMain(FunctionDecl *FD, const DeclSpec &D);
void CheckMSVCRTEntryPoint(FunctionDecl *FD);
Attr *getImplicitCodeSegOrSectionAttrForFunction(const FunctionDecl *FD,
bool IsDefinition);
void CheckFunctionOrTemplateParamDeclarator(Scope *S, Declarator &D);
Decl *ActOnParamDeclarator(Scope *S, Declarator &D);
ParmVarDecl *BuildParmVarDeclForTypedef(DeclContext *DC,
SourceLocation Loc,
QualType T);
ParmVarDecl *CheckParameter(DeclContext *DC, SourceLocation StartLoc,
SourceLocation NameLoc, IdentifierInfo *Name,
QualType T, TypeSourceInfo *TSInfo,
StorageClass SC);
void ActOnParamDefaultArgument(Decl *param,
SourceLocation EqualLoc,
Expr *defarg);
void ActOnParamUnparsedDefaultArgument(Decl *param,
SourceLocation EqualLoc,
SourceLocation ArgLoc);
void ActOnParamDefaultArgumentError(Decl *param, SourceLocation EqualLoc);
bool SetParamDefaultArgument(ParmVarDecl *Param, Expr *DefaultArg,
SourceLocation EqualLoc);
// Contexts where using non-trivial C union types can be disallowed. This is
// passed to err_non_trivial_c_union_in_invalid_context.
enum NonTrivialCUnionContext {
// Function parameter.
NTCUC_FunctionParam,
// Function return.
NTCUC_FunctionReturn,
// Default-initialized object.
NTCUC_DefaultInitializedObject,
// Variable with automatic storage duration.
NTCUC_AutoVar,
// Initializer expression that might copy from another object.
NTCUC_CopyInit,
// Assignment.
NTCUC_Assignment,
// Compound literal.
NTCUC_CompoundLiteral,
// Block capture.
NTCUC_BlockCapture,
// lvalue-to-rvalue conversion of volatile type.
NTCUC_LValueToRValueVolatile,
};
/// Emit diagnostics if the initializer or any of its explicit or
/// implicitly-generated subexpressions require copying or
/// default-initializing a type that is or contains a C union type that is
/// non-trivial to copy or default-initialize.
void checkNonTrivialCUnionInInitializer(const Expr *Init, SourceLocation Loc);
// These flags are passed to checkNonTrivialCUnion.
enum NonTrivialCUnionKind {
NTCUK_Init = 0x1,
NTCUK_Destruct = 0x2,
NTCUK_Copy = 0x4,
};
/// Emit diagnostics if a non-trivial C union type or a struct that contains
/// a non-trivial C union is used in an invalid context.
void checkNonTrivialCUnion(QualType QT, SourceLocation Loc,
NonTrivialCUnionContext UseContext,
unsigned NonTrivialKind);
void AddInitializerToDecl(Decl *dcl, Expr *init, bool DirectInit);
void ActOnUninitializedDecl(Decl *dcl);
void ActOnInitializerError(Decl *Dcl);
void ActOnPureSpecifier(Decl *D, SourceLocation PureSpecLoc);
void ActOnCXXForRangeDecl(Decl *D);
StmtResult ActOnCXXForRangeIdentifier(Scope *S, SourceLocation IdentLoc,
IdentifierInfo *Ident,
ParsedAttributes &Attrs,
SourceLocation AttrEnd);
void SetDeclDeleted(Decl *dcl, SourceLocation DelLoc);
void SetDeclDefaulted(Decl *dcl, SourceLocation DefaultLoc);
void CheckStaticLocalForDllExport(VarDecl *VD);
void FinalizeDeclaration(Decl *D);
DeclGroupPtrTy FinalizeDeclaratorGroup(Scope *S, const DeclSpec &DS,
ArrayRef<Decl *> Group);
DeclGroupPtrTy BuildDeclaratorGroup(MutableArrayRef<Decl *> Group);
/// Should be called on all declarations that might have attached
/// documentation comments.
void ActOnDocumentableDecl(Decl *D);
void ActOnDocumentableDecls(ArrayRef<Decl *> Group);
void ActOnFinishKNRParamDeclarations(Scope *S, Declarator &D,
SourceLocation LocAfterDecls);
void CheckForFunctionRedefinition(
FunctionDecl *FD, const FunctionDecl *EffectiveDefinition = nullptr,
SkipBodyInfo *SkipBody = nullptr);
Decl *ActOnStartOfFunctionDef(Scope *S, Declarator &D,
MultiTemplateParamsArg TemplateParamLists,
SkipBodyInfo *SkipBody = nullptr);
Decl *ActOnStartOfFunctionDef(Scope *S, Decl *D,
SkipBodyInfo *SkipBody = nullptr);
void ActOnStartTrailingRequiresClause(Scope *S, Declarator &D);
ExprResult ActOnFinishTrailingRequiresClause(ExprResult ConstraintExpr);
void ActOnStartOfObjCMethodDef(Scope *S, Decl *D);
bool isObjCMethodDecl(Decl *D) {
return D && isa<ObjCMethodDecl>(D);
}
/// Determine whether we can delay parsing the body of a function or
/// function template until it is used, assuming we don't care about emitting
/// code for that function.
///
/// This will be \c false if we may need the body of the function in the
/// middle of parsing an expression (where it's impractical to switch to
/// parsing a different function), for instance, if it's constexpr in C++11
/// or has an 'auto' return type in C++14. These cases are essentially bugs.
bool canDelayFunctionBody(const Declarator &D);
/// Determine whether we can skip parsing the body of a function
/// definition, assuming we don't care about analyzing its body or emitting
/// code for that function.
///
/// This will be \c false only if we may need the body of the function in
/// order to parse the rest of the program (for instance, if it is
/// \c constexpr in C++11 or has an 'auto' return type in C++14).
bool canSkipFunctionBody(Decl *D);
void computeNRVO(Stmt *Body, sema::FunctionScopeInfo *Scope);
Decl *ActOnFinishFunctionBody(Decl *Decl, Stmt *Body);
Decl *ActOnFinishFunctionBody(Decl *Decl, Stmt *Body, bool IsInstantiation);
Decl *ActOnSkippedFunctionBody(Decl *Decl);
void ActOnFinishInlineFunctionDef(FunctionDecl *D);
/// ActOnFinishDelayedAttribute - Invoked when we have finished parsing an
/// attribute for which parsing is delayed.
void ActOnFinishDelayedAttribute(Scope *S, Decl *D, ParsedAttributes &Attrs);
/// Diagnose any unused parameters in the given sequence of
/// ParmVarDecl pointers.
void DiagnoseUnusedParameters(ArrayRef<ParmVarDecl *> Parameters);
/// Diagnose whether the size of parameters or return value of a
/// function or obj-c method definition is pass-by-value and larger than a
/// specified threshold.
void
DiagnoseSizeOfParametersAndReturnValue(ArrayRef<ParmVarDecl *> Parameters,
QualType ReturnTy, NamedDecl *D);
void DiagnoseInvalidJumps(Stmt *Body);
Decl *ActOnFileScopeAsmDecl(Expr *expr,
SourceLocation AsmLoc,
SourceLocation RParenLoc);
/// Handle a C++11 empty-declaration and attribute-declaration.
Decl *ActOnEmptyDeclaration(Scope *S, const ParsedAttributesView &AttrList,
SourceLocation SemiLoc);
enum class ModuleDeclKind {
Interface, ///< 'export module X;'
Implementation, ///< 'module X;'
};
/// The parser has processed a module-declaration that begins the definition
/// of a module interface or implementation.
DeclGroupPtrTy ActOnModuleDecl(SourceLocation StartLoc,
SourceLocation ModuleLoc, ModuleDeclKind MDK,
ModuleIdPath Path, bool IsFirstDecl);
/// The parser has processed a global-module-fragment declaration that begins
/// the definition of the global module fragment of the current module unit.
/// \param ModuleLoc The location of the 'module' keyword.
DeclGroupPtrTy ActOnGlobalModuleFragmentDecl(SourceLocation ModuleLoc);
/// The parser has processed a private-module-fragment declaration that begins
/// the definition of the private module fragment of the current module unit.
/// \param ModuleLoc The location of the 'module' keyword.
/// \param PrivateLoc The location of the 'private' keyword.
DeclGroupPtrTy ActOnPrivateModuleFragmentDecl(SourceLocation ModuleLoc,
SourceLocation PrivateLoc);
/// The parser has processed a module import declaration.
///
/// \param StartLoc The location of the first token in the declaration. This
/// could be the location of an '@', 'export', or 'import'.
/// \param ExportLoc The location of the 'export' keyword, if any.
/// \param ImportLoc The location of the 'import' keyword.
/// \param Path The module access path.
DeclResult ActOnModuleImport(SourceLocation StartLoc,
SourceLocation ExportLoc,
SourceLocation ImportLoc, ModuleIdPath Path);
DeclResult ActOnModuleImport(SourceLocation StartLoc,
SourceLocation ExportLoc,
SourceLocation ImportLoc, Module *M,
ModuleIdPath Path = {});
/// The parser has processed a module import translated from a
/// #include or similar preprocessing directive.
void ActOnModuleInclude(SourceLocation DirectiveLoc, Module *Mod);
void BuildModuleInclude(SourceLocation DirectiveLoc, Module *Mod);
/// The parsed has entered a submodule.
void ActOnModuleBegin(SourceLocation DirectiveLoc, Module *Mod);
/// The parser has left a submodule.
void ActOnModuleEnd(SourceLocation DirectiveLoc, Module *Mod);
/// Create an implicit import of the given module at the given
/// source location, for error recovery, if possible.
///
/// This routine is typically used when an entity found by name lookup
/// is actually hidden within a module that we know about but the user
/// has forgotten to import.
void createImplicitModuleImportForErrorRecovery(SourceLocation Loc,
Module *Mod);
/// Kinds of missing import. Note, the values of these enumerators correspond
/// to %select values in diagnostics.
enum class MissingImportKind {
Declaration,
Definition,
DefaultArgument,
ExplicitSpecialization,
PartialSpecialization
};
/// Diagnose that the specified declaration needs to be visible but
/// isn't, and suggest a module import that would resolve the problem.
void diagnoseMissingImport(SourceLocation Loc, NamedDecl *Decl,
MissingImportKind MIK, bool Recover = true);
void diagnoseMissingImport(SourceLocation Loc, NamedDecl *Decl,
SourceLocation DeclLoc, ArrayRef<Module *> Modules,
MissingImportKind MIK, bool Recover);
Decl *ActOnStartExportDecl(Scope *S, SourceLocation ExportLoc,
SourceLocation LBraceLoc);
Decl *ActOnFinishExportDecl(Scope *S, Decl *ExportDecl,
SourceLocation RBraceLoc);
/// We've found a use of a templated declaration that would trigger an
/// implicit instantiation. Check that any relevant explicit specializations
/// and partial specializations are visible, and diagnose if not.
void checkSpecializationVisibility(SourceLocation Loc, NamedDecl *Spec);
/// We've found a use of a template specialization that would select a
/// partial specialization. Check that the partial specialization is visible,
/// and diagnose if not.
void checkPartialSpecializationVisibility(SourceLocation Loc,
NamedDecl *Spec);
/// Retrieve a suitable printing policy for diagnostics.
PrintingPolicy getPrintingPolicy() const {
return getPrintingPolicy(Context, PP);
}
/// Retrieve a suitable printing policy for diagnostics.
static PrintingPolicy getPrintingPolicy(const ASTContext &Ctx,
const Preprocessor &PP);
/// Scope actions.
void ActOnPopScope(SourceLocation Loc, Scope *S);
void ActOnTranslationUnitScope(Scope *S);
Decl *ParsedFreeStandingDeclSpec(Scope *S, AccessSpecifier AS, DeclSpec &DS,
RecordDecl *&AnonRecord);
Decl *ParsedFreeStandingDeclSpec(Scope *S, AccessSpecifier AS, DeclSpec &DS,
MultiTemplateParamsArg TemplateParams,
bool IsExplicitInstantiation,
RecordDecl *&AnonRecord);
Decl *BuildAnonymousStructOrUnion(Scope *S, DeclSpec &DS,
AccessSpecifier AS,
RecordDecl *Record,
const PrintingPolicy &Policy);
Decl *BuildMicrosoftCAnonymousStruct(Scope *S, DeclSpec &DS,
RecordDecl *Record);
/// Common ways to introduce type names without a tag for use in diagnostics.
/// Keep in sync with err_tag_reference_non_tag.
enum NonTagKind {
NTK_NonStruct,
NTK_NonClass,
NTK_NonUnion,
NTK_NonEnum,
NTK_Typedef,
NTK_TypeAlias,
NTK_Template,
NTK_TypeAliasTemplate,
NTK_TemplateTemplateArgument,
};
/// Given a non-tag type declaration, returns an enum useful for indicating
/// what kind of non-tag type this is.
NonTagKind getNonTagTypeDeclKind(const Decl *D, TagTypeKind TTK);
bool isAcceptableTagRedeclaration(const TagDecl *Previous,
TagTypeKind NewTag, bool isDefinition,
SourceLocation NewTagLoc,
const IdentifierInfo *Name);
enum TagUseKind {
TUK_Reference, // Reference to a tag: 'struct foo *X;'
TUK_Declaration, // Fwd decl of a tag: 'struct foo;'
TUK_Definition, // Definition of a tag: 'struct foo { int X; } Y;'
TUK_Friend // Friend declaration: 'friend struct foo;'
};
Decl *ActOnTag(Scope *S, unsigned TagSpec, TagUseKind TUK,
SourceLocation KWLoc, CXXScopeSpec &SS, IdentifierInfo *Name,
SourceLocation NameLoc, const ParsedAttributesView &Attr,
AccessSpecifier AS, SourceLocation ModulePrivateLoc,
MultiTemplateParamsArg TemplateParameterLists, bool &OwnedDecl,
bool &IsDependent, SourceLocation ScopedEnumKWLoc,
bool ScopedEnumUsesClassTag, TypeResult UnderlyingType,
bool IsTypeSpecifier, bool IsTemplateParamOrArg,
SkipBodyInfo *SkipBody = nullptr);
Decl *ActOnTemplatedFriendTag(Scope *S, SourceLocation FriendLoc,
unsigned TagSpec, SourceLocation TagLoc,
CXXScopeSpec &SS, IdentifierInfo *Name,
SourceLocation NameLoc,
const ParsedAttributesView &Attr,
MultiTemplateParamsArg TempParamLists);
TypeResult ActOnDependentTag(Scope *S,
unsigned TagSpec,
TagUseKind TUK,
const CXXScopeSpec &SS,
IdentifierInfo *Name,
SourceLocation TagLoc,
SourceLocation NameLoc);
void ActOnDefs(Scope *S, Decl *TagD, SourceLocation DeclStart,
IdentifierInfo *ClassName,
SmallVectorImpl<Decl *> &Decls);
Decl *ActOnField(Scope *S, Decl *TagD, SourceLocation DeclStart,
Declarator &D, Expr *BitfieldWidth);
FieldDecl *HandleField(Scope *S, RecordDecl *TagD, SourceLocation DeclStart,
Declarator &D, Expr *BitfieldWidth,
InClassInitStyle InitStyle,
AccessSpecifier AS);
MSPropertyDecl *HandleMSProperty(Scope *S, RecordDecl *TagD,
SourceLocation DeclStart, Declarator &D,
Expr *BitfieldWidth,
InClassInitStyle InitStyle,
AccessSpecifier AS,
const ParsedAttr &MSPropertyAttr);
FieldDecl *CheckFieldDecl(DeclarationName Name, QualType T,
TypeSourceInfo *TInfo,
RecordDecl *Record, SourceLocation Loc,
bool Mutable, Expr *BitfieldWidth,
InClassInitStyle InitStyle,
SourceLocation TSSL,
AccessSpecifier AS, NamedDecl *PrevDecl,
Declarator *D = nullptr);
bool CheckNontrivialField(FieldDecl *FD);
void DiagnoseNontrivial(const CXXRecordDecl *Record, CXXSpecialMember CSM);
enum TrivialABIHandling {
/// The triviality of a method unaffected by "trivial_abi".
TAH_IgnoreTrivialABI,
/// The triviality of a method affected by "trivial_abi".
TAH_ConsiderTrivialABI
};
bool SpecialMemberIsTrivial(CXXMethodDecl *MD, CXXSpecialMember CSM,
TrivialABIHandling TAH = TAH_IgnoreTrivialABI,
bool Diagnose = false);
/// For a defaulted function, the kind of defaulted function that it is.
class DefaultedFunctionKind {
CXXSpecialMember SpecialMember : 8;
DefaultedComparisonKind Comparison : 8;
public:
DefaultedFunctionKind()
: SpecialMember(CXXInvalid), Comparison(DefaultedComparisonKind::None) {
}
DefaultedFunctionKind(CXXSpecialMember CSM)
: SpecialMember(CSM), Comparison(DefaultedComparisonKind::None) {}
DefaultedFunctionKind(DefaultedComparisonKind Comp)
: SpecialMember(CXXInvalid), Comparison(Comp) {}
bool isSpecialMember() const { return SpecialMember != CXXInvalid; }
bool isComparison() const {
return Comparison != DefaultedComparisonKind::None;
}
explicit operator bool() const {
return isSpecialMember() || isComparison();
}
CXXSpecialMember asSpecialMember() const { return SpecialMember; }
DefaultedComparisonKind asComparison() const { return Comparison; }
/// Get the index of this function kind for use in diagnostics.
unsigned getDiagnosticIndex() const {
static_assert(CXXInvalid > CXXDestructor,
"invalid should have highest index");
static_assert((unsigned)DefaultedComparisonKind::None == 0,
"none should be equal to zero");
return SpecialMember + (unsigned)Comparison;
}
};
DefaultedFunctionKind getDefaultedFunctionKind(const FunctionDecl *FD);
CXXSpecialMember getSpecialMember(const CXXMethodDecl *MD) {
return getDefaultedFunctionKind(MD).asSpecialMember();
}
DefaultedComparisonKind getDefaultedComparisonKind(const FunctionDecl *FD) {
return getDefaultedFunctionKind(FD).asComparison();
}
void ActOnLastBitfield(SourceLocation DeclStart,
SmallVectorImpl<Decl *> &AllIvarDecls);
Decl *ActOnIvar(Scope *S, SourceLocation DeclStart,
Declarator &D, Expr *BitfieldWidth,
tok::ObjCKeywordKind visibility);
// This is used for both record definitions and ObjC interface declarations.
void ActOnFields(Scope *S, SourceLocation RecLoc, Decl *TagDecl,
ArrayRef<Decl *> Fields, SourceLocation LBrac,
SourceLocation RBrac, const ParsedAttributesView &AttrList);
/// ActOnTagStartDefinition - Invoked when we have entered the
/// scope of a tag's definition (e.g., for an enumeration, class,
/// struct, or union).
void ActOnTagStartDefinition(Scope *S, Decl *TagDecl);
/// Perform ODR-like check for C/ObjC when merging tag types from modules.
/// Differently from C++, actually parse the body and reject / error out
/// in case of a structural mismatch.
bool ActOnDuplicateDefinition(DeclSpec &DS, Decl *Prev,
SkipBodyInfo &SkipBody);
typedef void *SkippedDefinitionContext;
/// Invoked when we enter a tag definition that we're skipping.
SkippedDefinitionContext ActOnTagStartSkippedDefinition(Scope *S, Decl *TD);
Decl *ActOnObjCContainerStartDefinition(Decl *IDecl);
/// ActOnStartCXXMemberDeclarations - Invoked when we have parsed a
/// C++ record definition's base-specifiers clause and are starting its
/// member declarations.
void ActOnStartCXXMemberDeclarations(Scope *S, Decl *TagDecl,
SourceLocation FinalLoc,
bool IsFinalSpelledSealed,
SourceLocation LBraceLoc);
/// ActOnTagFinishDefinition - Invoked once we have finished parsing
/// the definition of a tag (enumeration, class, struct, or union).
void ActOnTagFinishDefinition(Scope *S, Decl *TagDecl,
SourceRange BraceRange);
void ActOnTagFinishSkippedDefinition(SkippedDefinitionContext Context);
void ActOnObjCContainerFinishDefinition();
/// Invoked when we must temporarily exit the objective-c container
/// scope for parsing/looking-up C constructs.
///
/// Must be followed by a call to \see ActOnObjCReenterContainerContext
void ActOnObjCTemporaryExitContainerContext(DeclContext *DC);
void ActOnObjCReenterContainerContext(DeclContext *DC);
/// ActOnTagDefinitionError - Invoked when there was an unrecoverable
/// error parsing the definition of a tag.
void ActOnTagDefinitionError(Scope *S, Decl *TagDecl);
EnumConstantDecl *CheckEnumConstant(EnumDecl *Enum,
EnumConstantDecl *LastEnumConst,
SourceLocation IdLoc,
IdentifierInfo *Id,
Expr *val);
bool CheckEnumUnderlyingType(TypeSourceInfo *TI);
bool CheckEnumRedeclaration(SourceLocation EnumLoc, bool IsScoped,
QualType EnumUnderlyingTy, bool IsFixed,
const EnumDecl *Prev);
/// Determine whether the body of an anonymous enumeration should be skipped.
/// \param II The name of the first enumerator.
SkipBodyInfo shouldSkipAnonEnumBody(Scope *S, IdentifierInfo *II,
SourceLocation IILoc);
Decl *ActOnEnumConstant(Scope *S, Decl *EnumDecl, Decl *LastEnumConstant,
SourceLocation IdLoc, IdentifierInfo *Id,
const ParsedAttributesView &Attrs,
SourceLocation EqualLoc, Expr *Val);
void ActOnEnumBody(SourceLocation EnumLoc, SourceRange BraceRange,
Decl *EnumDecl, ArrayRef<Decl *> Elements, Scope *S,
const ParsedAttributesView &Attr);
DeclContext *getContainingDC(DeclContext *DC);
/// Set the current declaration context until it gets popped.
void PushDeclContext(Scope *S, DeclContext *DC);
void PopDeclContext();
/// EnterDeclaratorContext - Used when we must lookup names in the context
/// of a declarator's nested name specifier.
void EnterDeclaratorContext(Scope *S, DeclContext *DC);
void ExitDeclaratorContext(Scope *S);
/// Push the parameters of D, which must be a function, into scope.
void ActOnReenterFunctionContext(Scope* S, Decl* D);
void ActOnExitFunctionContext();
DeclContext *getFunctionLevelDeclContext();
/// getCurFunctionDecl - If inside of a function body, this returns a pointer
/// to the function decl for the function being parsed. If we're currently
/// in a 'block', this returns the containing context.
FunctionDecl *getCurFunctionDecl();
/// getCurMethodDecl - If inside of a method body, this returns a pointer to
/// the method decl for the method being parsed. If we're currently
/// in a 'block', this returns the containing context.
ObjCMethodDecl *getCurMethodDecl();
/// getCurFunctionOrMethodDecl - Return the Decl for the current ObjC method
/// or C function we're in, otherwise return null. If we're currently
/// in a 'block', this returns the containing context.
NamedDecl *getCurFunctionOrMethodDecl();
/// Add this decl to the scope shadowed decl chains.
void PushOnScopeChains(NamedDecl *D, Scope *S, bool AddToContext = true);
/// isDeclInScope - If 'Ctx' is a function/method, isDeclInScope returns true
/// if 'D' is in Scope 'S', otherwise 'S' is ignored and isDeclInScope returns
/// true if 'D' belongs to the given declaration context.
///
/// \param AllowInlineNamespace If \c true, allow the declaration to be in the
/// enclosing namespace set of the context, rather than contained
/// directly within it.
bool isDeclInScope(NamedDecl *D, DeclContext *Ctx, Scope *S = nullptr,
bool AllowInlineNamespace = false);
/// Finds the scope corresponding to the given decl context, if it
/// happens to be an enclosing scope. Otherwise return NULL.
static Scope *getScopeForDeclContext(Scope *S, DeclContext *DC);
/// Subroutines of ActOnDeclarator().
TypedefDecl *ParseTypedefDecl(Scope *S, Declarator &D, QualType T,
TypeSourceInfo *TInfo);
bool isIncompatibleTypedef(TypeDecl *Old, TypedefNameDecl *New);
/// Describes the kind of merge to perform for availability
/// attributes (including "deprecated", "unavailable", and "availability").
enum AvailabilityMergeKind {
/// Don't merge availability attributes at all.
AMK_None,
/// Merge availability attributes for a redeclaration, which requires
/// an exact match.
AMK_Redeclaration,
/// Merge availability attributes for an override, which requires
/// an exact match or a weakening of constraints.
AMK_Override,
/// Merge availability attributes for an implementation of
/// a protocol requirement.
AMK_ProtocolImplementation,
};
/// Describes the kind of priority given to an availability attribute.
///
/// The sum of priorities deteremines the final priority of the attribute.
/// The final priority determines how the attribute will be merged.
/// An attribute with a lower priority will always remove higher priority
/// attributes for the specified platform when it is being applied. An
/// attribute with a higher priority will not be applied if the declaration
/// already has an availability attribute with a lower priority for the
/// specified platform. The final prirority values are not expected to match
/// the values in this enumeration, but instead should be treated as a plain
/// integer value. This enumeration just names the priority weights that are
/// used to calculate that final vaue.
enum AvailabilityPriority : int {
/// The availability attribute was specified explicitly next to the
/// declaration.
AP_Explicit = 0,
/// The availability attribute was applied using '#pragma clang attribute'.
AP_PragmaClangAttribute = 1,
/// The availability attribute for a specific platform was inferred from
/// an availability attribute for another platform.
AP_InferredFromOtherPlatform = 2
};
/// Attribute merging methods. Return true if a new attribute was added.
AvailabilityAttr *
mergeAvailabilityAttr(NamedDecl *D, const AttributeCommonInfo &CI,
IdentifierInfo *Platform, bool Implicit,
VersionTuple Introduced, VersionTuple Deprecated,
VersionTuple Obsoleted, bool IsUnavailable,
StringRef Message, bool IsStrict, StringRef Replacement,
AvailabilityMergeKind AMK, int Priority);
TypeVisibilityAttr *
mergeTypeVisibilityAttr(Decl *D, const AttributeCommonInfo &CI,
TypeVisibilityAttr::VisibilityType Vis);
VisibilityAttr *mergeVisibilityAttr(Decl *D, const AttributeCommonInfo &CI,
VisibilityAttr::VisibilityType Vis);
UuidAttr *mergeUuidAttr(Decl *D, const AttributeCommonInfo &CI,
StringRef Uuid);
DLLImportAttr *mergeDLLImportAttr(Decl *D, const AttributeCommonInfo &CI);
DLLExportAttr *mergeDLLExportAttr(Decl *D, const AttributeCommonInfo &CI);
MSInheritanceAttr *mergeMSInheritanceAttr(Decl *D,
const AttributeCommonInfo &CI,
bool BestCase,
MSInheritanceModel Model);
FormatAttr *mergeFormatAttr(Decl *D, const AttributeCommonInfo &CI,
IdentifierInfo *Format, int FormatIdx,
int FirstArg);
SectionAttr *mergeSectionAttr(Decl *D, const AttributeCommonInfo &CI,
StringRef Name);
CodeSegAttr *mergeCodeSegAttr(Decl *D, const AttributeCommonInfo &CI,
StringRef Name);
AlwaysInlineAttr *mergeAlwaysInlineAttr(Decl *D,
const AttributeCommonInfo &CI,
const IdentifierInfo *Ident);
MinSizeAttr *mergeMinSizeAttr(Decl *D, const AttributeCommonInfo &CI);
NoSpeculativeLoadHardeningAttr *
mergeNoSpeculativeLoadHardeningAttr(Decl *D,
const NoSpeculativeLoadHardeningAttr &AL);
SpeculativeLoadHardeningAttr *
mergeSpeculativeLoadHardeningAttr(Decl *D,
const SpeculativeLoadHardeningAttr &AL);
OptimizeNoneAttr *mergeOptimizeNoneAttr(Decl *D,
const AttributeCommonInfo &CI);
InternalLinkageAttr *mergeInternalLinkageAttr(Decl *D, const ParsedAttr &AL);
InternalLinkageAttr *mergeInternalLinkageAttr(Decl *D,
const InternalLinkageAttr &AL);
CommonAttr *mergeCommonAttr(Decl *D, const ParsedAttr &AL);
CommonAttr *mergeCommonAttr(Decl *D, const CommonAttr &AL);
void mergeDeclAttributes(NamedDecl *New, Decl *Old,
AvailabilityMergeKind AMK = AMK_Redeclaration);
void MergeTypedefNameDecl(Scope *S, TypedefNameDecl *New,
LookupResult &OldDecls);
bool MergeFunctionDecl(FunctionDecl *New, NamedDecl *&Old, Scope *S,
bool MergeTypeWithOld);
bool MergeCompatibleFunctionDecls(FunctionDecl *New, FunctionDecl *Old,
Scope *S, bool MergeTypeWithOld);
void mergeObjCMethodDecls(ObjCMethodDecl *New, ObjCMethodDecl *Old);
void MergeVarDecl(VarDecl *New, LookupResult &Previous);
void MergeVarDeclTypes(VarDecl *New, VarDecl *Old, bool MergeTypeWithOld);
void MergeVarDeclExceptionSpecs(VarDecl *New, VarDecl *Old);
bool checkVarDeclRedefinition(VarDecl *OldDefn, VarDecl *NewDefn);
void notePreviousDefinition(const NamedDecl *Old, SourceLocation New);
bool MergeCXXFunctionDecl(FunctionDecl *New, FunctionDecl *Old, Scope *S);
// AssignmentAction - This is used by all the assignment diagnostic functions
// to represent what is actually causing the operation
enum AssignmentAction {
AA_Assigning,
AA_Passing,
AA_Returning,
AA_Converting,
AA_Initializing,
AA_Sending,
AA_Casting,
AA_Passing_CFAudited
};
/// C++ Overloading.
enum OverloadKind {
/// This is a legitimate overload: the existing declarations are
/// functions or function templates with different signatures.
Ovl_Overload,
/// This is not an overload because the signature exactly matches
/// an existing declaration.
Ovl_Match,
/// This is not an overload because the lookup results contain a
/// non-function.
Ovl_NonFunction
};
OverloadKind CheckOverload(Scope *S,
FunctionDecl *New,
const LookupResult &OldDecls,
NamedDecl *&OldDecl,
bool IsForUsingDecl);
bool IsOverload(FunctionDecl *New, FunctionDecl *Old, bool IsForUsingDecl,
bool ConsiderCudaAttrs = true,
bool ConsiderRequiresClauses = true);
enum class AllowedExplicit {
/// Allow no explicit functions to be used.
None,
/// Allow explicit conversion functions but not explicit constructors.
Conversions,
/// Allow both explicit conversion functions and explicit constructors.
All
};
ImplicitConversionSequence
TryImplicitConversion(Expr *From, QualType ToType,
bool SuppressUserConversions,
AllowedExplicit AllowExplicit,
bool InOverloadResolution,
bool CStyle,
bool AllowObjCWritebackConversion);
bool IsIntegralPromotion(Expr *From, QualType FromType, QualType ToType);
bool IsFloatingPointPromotion(QualType FromType, QualType ToType);
bool IsComplexPromotion(QualType FromType, QualType ToType);
bool IsPointerConversion(Expr *From, QualType FromType, QualType ToType,
bool InOverloadResolution,
QualType& ConvertedType, bool &IncompatibleObjC);
bool isObjCPointerConversion(QualType FromType, QualType ToType,
QualType& ConvertedType, bool &IncompatibleObjC);
bool isObjCWritebackConversion(QualType FromType, QualType ToType,
QualType &ConvertedType);
bool IsBlockPointerConversion(QualType FromType, QualType ToType,
QualType& ConvertedType);
bool FunctionParamTypesAreEqual(const FunctionProtoType *OldType,
const FunctionProtoType *NewType,
unsigned *ArgPos = nullptr);
void HandleFunctionTypeMismatch(PartialDiagnostic &PDiag,
QualType FromType, QualType ToType);
void maybeExtendBlockObject(ExprResult &E);
CastKind PrepareCastToObjCObjectPointer(ExprResult &E);
bool CheckPointerConversion(Expr *From, QualType ToType,
CastKind &Kind,
CXXCastPath& BasePath,
bool IgnoreBaseAccess,
bool Diagnose = true);
bool IsMemberPointerConversion(Expr *From, QualType FromType, QualType ToType,
bool InOverloadResolution,
QualType &ConvertedType);
bool CheckMemberPointerConversion(Expr *From, QualType ToType,
CastKind &Kind,
CXXCastPath &BasePath,
bool IgnoreBaseAccess);
bool IsQualificationConversion(QualType FromType, QualType ToType,
bool CStyle, bool &ObjCLifetimeConversion);
bool IsFunctionConversion(QualType FromType, QualType ToType,
QualType &ResultTy);
bool DiagnoseMultipleUserDefinedConversion(Expr *From, QualType ToType);
bool isSameOrCompatibleFunctionType(CanQualType Param, CanQualType Arg);
ExprResult PerformMoveOrCopyInitialization(const InitializedEntity &Entity,
const VarDecl *NRVOCandidate,
QualType ResultType,
Expr *Value,
bool AllowNRVO = true);
bool CanPerformAggregateInitializationForOverloadResolution(
const InitializedEntity &Entity, InitListExpr *From);
bool CanPerformCopyInitialization(const InitializedEntity &Entity,
ExprResult Init);
ExprResult PerformCopyInitialization(const InitializedEntity &Entity,
SourceLocation EqualLoc,
ExprResult Init,
bool TopLevelOfInitList = false,
bool AllowExplicit = false);
ExprResult PerformObjectArgumentInitialization(Expr *From,
NestedNameSpecifier *Qualifier,
NamedDecl *FoundDecl,
CXXMethodDecl *Method);
/// Check that the lifetime of the initializer (and its subobjects) is
/// sufficient for initializing the entity, and perform lifetime extension
/// (when permitted) if not.
void checkInitializerLifetime(const InitializedEntity &Entity, Expr *Init);
ExprResult PerformContextuallyConvertToBool(Expr *From);
ExprResult PerformContextuallyConvertToObjCPointer(Expr *From);
/// Contexts in which a converted constant expression is required.
enum CCEKind {
CCEK_CaseValue, ///< Expression in a case label.
CCEK_Enumerator, ///< Enumerator value with fixed underlying type.
CCEK_TemplateArg, ///< Value of a non-type template parameter.
CCEK_NewExpr, ///< Constant expression in a noptr-new-declarator.
CCEK_ConstexprIf, ///< Condition in a constexpr if statement.
CCEK_ExplicitBool ///< Condition in an explicit(bool) specifier.
};
ExprResult CheckConvertedConstantExpression(Expr *From, QualType T,
llvm::APSInt &Value, CCEKind CCE);
ExprResult CheckConvertedConstantExpression(Expr *From, QualType T,
APValue &Value, CCEKind CCE);
/// Abstract base class used to perform a contextual implicit
/// conversion from an expression to any type passing a filter.
class ContextualImplicitConverter {
public:
bool Suppress;
bool SuppressConversion;
ContextualImplicitConverter(bool Suppress = false,
bool SuppressConversion = false)
: Suppress(Suppress), SuppressConversion(SuppressConversion) {}
/// Determine whether the specified type is a valid destination type
/// for this conversion.
virtual bool match(QualType T) = 0;
/// Emits a diagnostic complaining that the expression does not have
/// integral or enumeration type.
virtual SemaDiagnosticBuilder
diagnoseNoMatch(Sema &S, SourceLocation Loc, QualType T) = 0;
/// Emits a diagnostic when the expression has incomplete class type.
virtual SemaDiagnosticBuilder
diagnoseIncomplete(Sema &S, SourceLocation Loc, QualType T) = 0;
/// Emits a diagnostic when the only matching conversion function
/// is explicit.
virtual SemaDiagnosticBuilder diagnoseExplicitConv(
Sema &S, SourceLocation Loc, QualType T, QualType ConvTy) = 0;
/// Emits a note for the explicit conversion function.
virtual SemaDiagnosticBuilder
noteExplicitConv(Sema &S, CXXConversionDecl *Conv, QualType ConvTy) = 0;
/// Emits a diagnostic when there are multiple possible conversion
/// functions.
virtual SemaDiagnosticBuilder
diagnoseAmbiguous(Sema &S, SourceLocation Loc, QualType T) = 0;
/// Emits a note for one of the candidate conversions.
virtual SemaDiagnosticBuilder
noteAmbiguous(Sema &S, CXXConversionDecl *Conv, QualType ConvTy) = 0;
/// Emits a diagnostic when we picked a conversion function
/// (for cases when we are not allowed to pick a conversion function).
virtual SemaDiagnosticBuilder diagnoseConversion(
Sema &S, SourceLocation Loc, QualType T, QualType ConvTy) = 0;
virtual ~ContextualImplicitConverter() {}
};
class ICEConvertDiagnoser : public ContextualImplicitConverter {
bool AllowScopedEnumerations;
public:
ICEConvertDiagnoser(bool AllowScopedEnumerations,
bool Suppress, bool SuppressConversion)
: ContextualImplicitConverter(Suppress, SuppressConversion),
AllowScopedEnumerations(AllowScopedEnumerations) {}
/// Match an integral or (possibly scoped) enumeration type.
bool match(QualType T) override;
SemaDiagnosticBuilder
diagnoseNoMatch(Sema &S, SourceLocation Loc, QualType T) override {
return diagnoseNotInt(S, Loc, T);
}
/// Emits a diagnostic complaining that the expression does not have
/// integral or enumeration type.
virtual SemaDiagnosticBuilder
diagnoseNotInt(Sema &S, SourceLocation Loc, QualType T) = 0;
};
/// Perform a contextual implicit conversion.
ExprResult PerformContextualImplicitConversion(
SourceLocation Loc, Expr *FromE, ContextualImplicitConverter &Converter);
enum ObjCSubscriptKind {
OS_Array,
OS_Dictionary,
OS_Error
};
ObjCSubscriptKind CheckSubscriptingKind(Expr *FromE);
// Note that LK_String is intentionally after the other literals, as
// this is used for diagnostics logic.
enum ObjCLiteralKind {
LK_Array,
LK_Dictionary,
LK_Numeric,
LK_Boxed,
LK_String,
LK_Block,
LK_None
};
ObjCLiteralKind CheckLiteralKind(Expr *FromE);
ExprResult PerformObjectMemberConversion(Expr *From,
NestedNameSpecifier *Qualifier,
NamedDecl *FoundDecl,
NamedDecl *Member);
// Members have to be NamespaceDecl* or TranslationUnitDecl*.
// TODO: make this is a typesafe union.
typedef llvm::SmallSetVector<DeclContext *, 16> AssociatedNamespaceSet;
typedef llvm::SmallSetVector<CXXRecordDecl *, 16> AssociatedClassSet;
using ADLCallKind = CallExpr::ADLCallKind;
void AddOverloadCandidate(FunctionDecl *Function, DeclAccessPair FoundDecl,
ArrayRef<Expr *> Args,
OverloadCandidateSet &CandidateSet,
bool SuppressUserConversions = false,
bool PartialOverloading = false,
bool AllowExplicit = true,
bool AllowExplicitConversion = false,
ADLCallKind IsADLCandidate = ADLCallKind::NotADL,
ConversionSequenceList EarlyConversions = None,
OverloadCandidateParamOrder PO = {});
void AddFunctionCandidates(const UnresolvedSetImpl &Functions,
ArrayRef<Expr *> Args,
OverloadCandidateSet &CandidateSet,
TemplateArgumentListInfo *ExplicitTemplateArgs = nullptr,
bool SuppressUserConversions = false,
bool PartialOverloading = false,
bool FirstArgumentIsBase = false);
void AddMethodCandidate(DeclAccessPair FoundDecl,
QualType ObjectType,
Expr::Classification ObjectClassification,
ArrayRef<Expr *> Args,
OverloadCandidateSet& CandidateSet,
bool SuppressUserConversion = false,
OverloadCandidateParamOrder PO = {});
void AddMethodCandidate(CXXMethodDecl *Method,
DeclAccessPair FoundDecl,
CXXRecordDecl *ActingContext, QualType ObjectType,
Expr::Classification ObjectClassification,
ArrayRef<Expr *> Args,
OverloadCandidateSet& CandidateSet,
bool SuppressUserConversions = false,
bool PartialOverloading = false,
ConversionSequenceList EarlyConversions = None,
OverloadCandidateParamOrder PO = {});
void AddMethodTemplateCandidate(FunctionTemplateDecl *MethodTmpl,
DeclAccessPair FoundDecl,
CXXRecordDecl *ActingContext,
TemplateArgumentListInfo *ExplicitTemplateArgs,
QualType ObjectType,
Expr::Classification ObjectClassification,
ArrayRef<Expr *> Args,
OverloadCandidateSet& CandidateSet,
bool SuppressUserConversions = false,
bool PartialOverloading = false,
OverloadCandidateParamOrder PO = {});
void AddTemplateOverloadCandidate(
FunctionTemplateDecl *FunctionTemplate, DeclAccessPair FoundDecl,
TemplateArgumentListInfo *ExplicitTemplateArgs, ArrayRef<Expr *> Args,
OverloadCandidateSet &CandidateSet, bool SuppressUserConversions = false,
bool PartialOverloading = false, bool AllowExplicit = true,
ADLCallKind IsADLCandidate = ADLCallKind::NotADL,
OverloadCandidateParamOrder PO = {});
bool CheckNonDependentConversions(
FunctionTemplateDecl *FunctionTemplate, ArrayRef<QualType> ParamTypes,
ArrayRef<Expr *> Args, OverloadCandidateSet &CandidateSet,
ConversionSequenceList &Conversions, bool SuppressUserConversions,
CXXRecordDecl *ActingContext = nullptr, QualType ObjectType = QualType(),
Expr::Classification ObjectClassification = {},
OverloadCandidateParamOrder PO = {});
void AddConversionCandidate(
CXXConversionDecl *Conversion, DeclAccessPair FoundDecl,
CXXRecordDecl *ActingContext, Expr *From, QualType ToType,
OverloadCandidateSet &CandidateSet, bool AllowObjCConversionOnExplicit,
bool AllowExplicit, bool AllowResultConversion = true);
void AddTemplateConversionCandidate(
FunctionTemplateDecl *FunctionTemplate, DeclAccessPair FoundDecl,
CXXRecordDecl *ActingContext, Expr *From, QualType ToType,
OverloadCandidateSet &CandidateSet, bool AllowObjCConversionOnExplicit,
bool AllowExplicit, bool AllowResultConversion = true);
void AddSurrogateCandidate(CXXConversionDecl *Conversion,
DeclAccessPair FoundDecl,
CXXRecordDecl *ActingContext,
const FunctionProtoType *Proto,
Expr *Object, ArrayRef<Expr *> Args,
OverloadCandidateSet& CandidateSet);
void AddNonMemberOperatorCandidates(
const UnresolvedSetImpl &Functions, ArrayRef<Expr *> Args,
OverloadCandidateSet &CandidateSet,
TemplateArgumentListInfo *ExplicitTemplateArgs = nullptr);
void AddMemberOperatorCandidates(OverloadedOperatorKind Op,
SourceLocation OpLoc, ArrayRef<Expr *> Args,
OverloadCandidateSet &CandidateSet,
OverloadCandidateParamOrder PO = {});
void AddBuiltinCandidate(QualType *ParamTys, ArrayRef<Expr *> Args,
OverloadCandidateSet& CandidateSet,
bool IsAssignmentOperator = false,
unsigned NumContextualBoolArguments = 0);
void AddBuiltinOperatorCandidates(OverloadedOperatorKind Op,
SourceLocation OpLoc, ArrayRef<Expr *> Args,
OverloadCandidateSet& CandidateSet);
void AddArgumentDependentLookupCandidates(DeclarationName Name,
SourceLocation Loc,
ArrayRef<Expr *> Args,
TemplateArgumentListInfo *ExplicitTemplateArgs,
OverloadCandidateSet& CandidateSet,
bool PartialOverloading = false);
// Emit as a 'note' the specific overload candidate
void NoteOverloadCandidate(
NamedDecl *Found, FunctionDecl *Fn,
OverloadCandidateRewriteKind RewriteKind = OverloadCandidateRewriteKind(),
QualType DestType = QualType(), bool TakingAddress = false);
// Emit as a series of 'note's all template and non-templates identified by
// the expression Expr
void NoteAllOverloadCandidates(Expr *E, QualType DestType = QualType(),
bool TakingAddress = false);
/// Check the enable_if expressions on the given function. Returns the first
/// failing attribute, or NULL if they were all successful.
EnableIfAttr *CheckEnableIf(FunctionDecl *Function, ArrayRef<Expr *> Args,
bool MissingImplicitThis = false);
/// Find the failed Boolean condition within a given Boolean
/// constant expression, and describe it with a string.
std::pair<Expr *, std::string> findFailedBooleanCondition(Expr *Cond);
/// Emit diagnostics for the diagnose_if attributes on Function, ignoring any
/// non-ArgDependent DiagnoseIfAttrs.
///
/// Argument-dependent diagnose_if attributes should be checked each time a
/// function is used as a direct callee of a function call.
///
/// Returns true if any errors were emitted.
bool diagnoseArgDependentDiagnoseIfAttrs(const FunctionDecl *Function,
const Expr *ThisArg,
ArrayRef<const Expr *> Args,
SourceLocation Loc);
/// Emit diagnostics for the diagnose_if attributes on Function, ignoring any
/// ArgDependent DiagnoseIfAttrs.
///
/// Argument-independent diagnose_if attributes should be checked on every use
/// of a function.
///
/// Returns true if any errors were emitted.
bool diagnoseArgIndependentDiagnoseIfAttrs(const NamedDecl *ND,
SourceLocation Loc);
/// Returns whether the given function's address can be taken or not,
/// optionally emitting a diagnostic if the address can't be taken.
///
/// Returns false if taking the address of the function is illegal.
bool checkAddressOfFunctionIsAvailable(const FunctionDecl *Function,
bool Complain = false,
SourceLocation Loc = SourceLocation());
// [PossiblyAFunctionType] --> [Return]
// NonFunctionType --> NonFunctionType
// R (A) --> R(A)
// R (*)(A) --> R (A)
// R (&)(A) --> R (A)
// R (S::*)(A) --> R (A)
QualType ExtractUnqualifiedFunctionType(QualType PossiblyAFunctionType);
FunctionDecl *
ResolveAddressOfOverloadedFunction(Expr *AddressOfExpr,
QualType TargetType,
bool Complain,
DeclAccessPair &Found,
bool *pHadMultipleCandidates = nullptr);
FunctionDecl *
resolveAddressOfSingleOverloadCandidate(Expr *E, DeclAccessPair &FoundResult);
bool resolveAndFixAddressOfSingleOverloadCandidate(
ExprResult &SrcExpr, bool DoFunctionPointerConversion = false);
FunctionDecl *
ResolveSingleFunctionTemplateSpecialization(OverloadExpr *ovl,
bool Complain = false,
DeclAccessPair *Found = nullptr);
bool ResolveAndFixSingleFunctionTemplateSpecialization(
ExprResult &SrcExpr,
bool DoFunctionPointerConverion = false,
bool Complain = false,
SourceRange OpRangeForComplaining = SourceRange(),
QualType DestTypeForComplaining = QualType(),
unsigned DiagIDForComplaining = 0);
Expr *FixOverloadedFunctionReference(Expr *E,
DeclAccessPair FoundDecl,
FunctionDecl *Fn);
ExprResult FixOverloadedFunctionReference(ExprResult,
DeclAccessPair FoundDecl,
FunctionDecl *Fn);
void AddOverloadedCallCandidates(UnresolvedLookupExpr *ULE,
ArrayRef<Expr *> Args,
OverloadCandidateSet &CandidateSet,
bool PartialOverloading = false);
// An enum used to represent the different possible results of building a
// range-based for loop.
enum ForRangeStatus {
FRS_Success,
FRS_NoViableFunction,
FRS_DiagnosticIssued
};
ForRangeStatus BuildForRangeBeginEndCall(SourceLocation Loc,
SourceLocation RangeLoc,
const DeclarationNameInfo &NameInfo,
LookupResult &MemberLookup,
OverloadCandidateSet *CandidateSet,
Expr *Range, ExprResult *CallExpr);
ExprResult BuildOverloadedCallExpr(Scope *S, Expr *Fn,
UnresolvedLookupExpr *ULE,
SourceLocation LParenLoc,
MultiExprArg Args,
SourceLocation RParenLoc,
Expr *ExecConfig,
bool AllowTypoCorrection=true,
bool CalleesAddressIsTaken=false);
bool buildOverloadedCallSet(Scope *S, Expr *Fn, UnresolvedLookupExpr *ULE,
MultiExprArg Args, SourceLocation RParenLoc,
OverloadCandidateSet *CandidateSet,
ExprResult *Result);
ExprResult CreateOverloadedUnaryOp(SourceLocation OpLoc,
UnaryOperatorKind Opc,
const UnresolvedSetImpl &Fns,
Expr *input, bool RequiresADL = true);
void LookupOverloadedBinOp(OverloadCandidateSet &CandidateSet,
OverloadedOperatorKind Op,
const UnresolvedSetImpl &Fns,
ArrayRef<Expr *> Args, bool RequiresADL = true);
ExprResult CreateOverloadedBinOp(SourceLocation OpLoc,
BinaryOperatorKind Opc,
const UnresolvedSetImpl &Fns,
Expr *LHS, Expr *RHS,
bool RequiresADL = true,
bool AllowRewrittenCandidates = true,
FunctionDecl *DefaultedFn = nullptr);
ExprResult BuildSynthesizedThreeWayComparison(SourceLocation OpLoc,
const UnresolvedSetImpl &Fns,
Expr *LHS, Expr *RHS,
FunctionDecl *DefaultedFn);
ExprResult CreateOverloadedArraySubscriptExpr(SourceLocation LLoc,
SourceLocation RLoc,
Expr *Base,Expr *Idx);
ExprResult
BuildCallToMemberFunction(Scope *S, Expr *MemExpr,
SourceLocation LParenLoc,
MultiExprArg Args,
SourceLocation RParenLoc);
ExprResult
BuildCallToObjectOfClassType(Scope *S, Expr *Object, SourceLocation LParenLoc,
MultiExprArg Args,
SourceLocation RParenLoc);
ExprResult BuildOverloadedArrowExpr(Scope *S, Expr *Base,
SourceLocation OpLoc,
bool *NoArrowOperatorFound = nullptr);
/// CheckCallReturnType - Checks that a call expression's return type is
/// complete. Returns true on failure. The location passed in is the location
/// that best represents the call.
bool CheckCallReturnType(QualType ReturnType, SourceLocation Loc,
CallExpr *CE, FunctionDecl *FD);
/// Helpers for dealing with blocks and functions.
bool CheckParmsForFunctionDef(ArrayRef<ParmVarDecl *> Parameters,
bool CheckParameterNames);
void CheckCXXDefaultArguments(FunctionDecl *FD);
void CheckExtraCXXDefaultArguments(Declarator &D);
Scope *getNonFieldDeclScope(Scope *S);
/// \name Name lookup
///
/// These routines provide name lookup that is used during semantic
/// analysis to resolve the various kinds of names (identifiers,
/// overloaded operator names, constructor names, etc.) into zero or
/// more declarations within a particular scope. The major entry
/// points are LookupName, which performs unqualified name lookup,
/// and LookupQualifiedName, which performs qualified name lookup.
///
/// All name lookup is performed based on some specific criteria,
/// which specify what names will be visible to name lookup and how
/// far name lookup should work. These criteria are important both
/// for capturing language semantics (certain lookups will ignore
/// certain names, for example) and for performance, since name
/// lookup is often a bottleneck in the compilation of C++. Name
/// lookup criteria is specified via the LookupCriteria enumeration.
///
/// The results of name lookup can vary based on the kind of name
/// lookup performed, the current language, and the translation
/// unit. In C, for example, name lookup will either return nothing
/// (no entity found) or a single declaration. In C++, name lookup
/// can additionally refer to a set of overloaded functions or
/// result in an ambiguity. All of the possible results of name
/// lookup are captured by the LookupResult class, which provides
/// the ability to distinguish among them.
//@{
/// Describes the kind of name lookup to perform.
enum LookupNameKind {
/// Ordinary name lookup, which finds ordinary names (functions,
/// variables, typedefs, etc.) in C and most kinds of names
/// (functions, variables, members, types, etc.) in C++.
LookupOrdinaryName = 0,
/// Tag name lookup, which finds the names of enums, classes,
/// structs, and unions.
LookupTagName,
/// Label name lookup.
LookupLabel,
/// Member name lookup, which finds the names of
/// class/struct/union members.
LookupMemberName,
/// Look up of an operator name (e.g., operator+) for use with
/// operator overloading. This lookup is similar to ordinary name
/// lookup, but will ignore any declarations that are class members.
LookupOperatorName,
/// Look up a name following ~ in a destructor name. This is an ordinary
/// lookup, but prefers tags to typedefs.
LookupDestructorName,
/// Look up of a name that precedes the '::' scope resolution
/// operator in C++. This lookup completely ignores operator, object,
/// function, and enumerator names (C++ [basic.lookup.qual]p1).
LookupNestedNameSpecifierName,
/// Look up a namespace name within a C++ using directive or
/// namespace alias definition, ignoring non-namespace names (C++
/// [basic.lookup.udir]p1).
LookupNamespaceName,
/// Look up all declarations in a scope with the given name,
/// including resolved using declarations. This is appropriate
/// for checking redeclarations for a using declaration.
LookupUsingDeclName,
/// Look up an ordinary name that is going to be redeclared as a
/// name with linkage. This lookup ignores any declarations that
/// are outside of the current scope unless they have linkage. See
/// C99 6.2.2p4-5 and C++ [basic.link]p6.
LookupRedeclarationWithLinkage,
/// Look up a friend of a local class. This lookup does not look
/// outside the innermost non-class scope. See C++11 [class.friend]p11.
LookupLocalFriendName,
/// Look up the name of an Objective-C protocol.
LookupObjCProtocolName,
/// Look up implicit 'self' parameter of an objective-c method.
LookupObjCImplicitSelfParam,
/// Look up the name of an OpenMP user-defined reduction operation.
LookupOMPReductionName,
/// Look up the name of an OpenMP user-defined mapper.
LookupOMPMapperName,
/// Look up any declaration with any name.
LookupAnyName
};
/// Specifies whether (or how) name lookup is being performed for a
/// redeclaration (vs. a reference).
enum RedeclarationKind {
/// The lookup is a reference to this name that is not for the
/// purpose of redeclaring the name.
NotForRedeclaration = 0,
/// The lookup results will be used for redeclaration of a name,
/// if an entity by that name already exists and is visible.
ForVisibleRedeclaration,
/// The lookup results will be used for redeclaration of a name
/// with external linkage; non-visible lookup results with external linkage
/// may also be found.
ForExternalRedeclaration
};
RedeclarationKind forRedeclarationInCurContext() {
// A declaration with an owning module for linkage can never link against
// anything that is not visible. We don't need to check linkage here; if
// the context has internal linkage, redeclaration lookup won't find things
// from other TUs, and we can't safely compute linkage yet in general.
if (cast<Decl>(CurContext)
->getOwningModuleForLinkage(/*IgnoreLinkage*/true))
return ForVisibleRedeclaration;
return ForExternalRedeclaration;
}
/// The possible outcomes of name lookup for a literal operator.
enum LiteralOperatorLookupResult {
/// The lookup resulted in an error.
LOLR_Error,
/// The lookup found no match but no diagnostic was issued.
LOLR_ErrorNoDiagnostic,
/// The lookup found a single 'cooked' literal operator, which
/// expects a normal literal to be built and passed to it.
LOLR_Cooked,
/// The lookup found a single 'raw' literal operator, which expects
/// a string literal containing the spelling of the literal token.
LOLR_Raw,
/// The lookup found an overload set of literal operator templates,
/// which expect the characters of the spelling of the literal token to be
/// passed as a non-type template argument pack.
LOLR_Template,
/// The lookup found an overload set of literal operator templates,
/// which expect the character type and characters of the spelling of the
/// string literal token to be passed as template arguments.
LOLR_StringTemplate
};
SpecialMemberOverloadResult LookupSpecialMember(CXXRecordDecl *D,
CXXSpecialMember SM,
bool ConstArg,
bool VolatileArg,
bool RValueThis,
bool ConstThis,
bool VolatileThis);
typedef std::function<void(const TypoCorrection &)> TypoDiagnosticGenerator;
typedef std::function<ExprResult(Sema &, TypoExpr *, TypoCorrection)>
TypoRecoveryCallback;
private:
bool CppLookupName(LookupResult &R, Scope *S);
struct TypoExprState {
std::unique_ptr<TypoCorrectionConsumer> Consumer;
TypoDiagnosticGenerator DiagHandler;
TypoRecoveryCallback RecoveryHandler;
TypoExprState();
TypoExprState(TypoExprState &&other) noexcept;
TypoExprState &operator=(TypoExprState &&other) noexcept;
};
/// The set of unhandled TypoExprs and their associated state.
llvm::MapVector<TypoExpr *, TypoExprState> DelayedTypos;
/// Creates a new TypoExpr AST node.
TypoExpr *createDelayedTypo(std::unique_ptr<TypoCorrectionConsumer> TCC,
TypoDiagnosticGenerator TDG,
TypoRecoveryCallback TRC);
// The set of known/encountered (unique, canonicalized) NamespaceDecls.
//
// The boolean value will be true to indicate that the namespace was loaded
// from an AST/PCH file, or false otherwise.
llvm::MapVector<NamespaceDecl*, bool> KnownNamespaces;
/// Whether we have already loaded known namespaces from an extenal
/// source.
bool LoadedExternalKnownNamespaces;
/// Helper for CorrectTypo and CorrectTypoDelayed used to create and
/// populate a new TypoCorrectionConsumer. Returns nullptr if typo correction
/// should be skipped entirely.
std::unique_ptr<TypoCorrectionConsumer>
makeTypoCorrectionConsumer(const DeclarationNameInfo &Typo,
Sema::LookupNameKind LookupKind, Scope *S,
CXXScopeSpec *SS,
CorrectionCandidateCallback &CCC,
DeclContext *MemberContext, bool EnteringContext,
const ObjCObjectPointerType *OPT,
bool ErrorRecovery);
public:
const TypoExprState &getTypoExprState(TypoExpr *TE) const;
/// Clears the state of the given TypoExpr.
void clearDelayedTypo(TypoExpr *TE);
/// Look up a name, looking for a single declaration. Return
/// null if the results were absent, ambiguous, or overloaded.
///
/// It is preferable to use the elaborated form and explicitly handle
/// ambiguity and overloaded.
NamedDecl *LookupSingleName(Scope *S, DeclarationName Name,
SourceLocation Loc,
LookupNameKind NameKind,
RedeclarationKind Redecl
= NotForRedeclaration);
bool LookupBuiltin(LookupResult &R);
bool LookupName(LookupResult &R, Scope *S,
bool AllowBuiltinCreation = false);
bool LookupQualifiedName(LookupResult &R, DeclContext *LookupCtx,
bool InUnqualifiedLookup = false);
bool LookupQualifiedName(LookupResult &R, DeclContext *LookupCtx,
CXXScopeSpec &SS);
bool LookupParsedName(LookupResult &R, Scope *S, CXXScopeSpec *SS,
bool AllowBuiltinCreation = false,
bool EnteringContext = false);
ObjCProtocolDecl *LookupProtocol(IdentifierInfo *II, SourceLocation IdLoc,
RedeclarationKind Redecl
= NotForRedeclaration);
bool LookupInSuper(LookupResult &R, CXXRecordDecl *Class);
void LookupOverloadedOperatorName(OverloadedOperatorKind Op, Scope *S,
QualType T1, QualType T2,
UnresolvedSetImpl &Functions);
LabelDecl *LookupOrCreateLabel(IdentifierInfo *II, SourceLocation IdentLoc,
SourceLocation GnuLabelLoc = SourceLocation());
DeclContextLookupResult LookupConstructors(CXXRecordDecl *Class);
CXXConstructorDecl *LookupDefaultConstructor(CXXRecordDecl *Class);
CXXConstructorDecl *LookupCopyingConstructor(CXXRecordDecl *Class,
unsigned Quals);
CXXMethodDecl *LookupCopyingAssignment(CXXRecordDecl *Class, unsigned Quals,
bool RValueThis, unsigned ThisQuals);
CXXConstructorDecl *LookupMovingConstructor(CXXRecordDecl *Class,
unsigned Quals);
CXXMethodDecl *LookupMovingAssignment(CXXRecordDecl *Class, unsigned Quals,
bool RValueThis, unsigned ThisQuals);
CXXDestructorDecl *LookupDestructor(CXXRecordDecl *Class);
bool checkLiteralOperatorId(const CXXScopeSpec &SS, const UnqualifiedId &Id);
LiteralOperatorLookupResult LookupLiteralOperator(Scope *S, LookupResult &R,
ArrayRef<QualType> ArgTys,
bool AllowRaw,
bool AllowTemplate,
bool AllowStringTemplate,
bool DiagnoseMissing);
bool isKnownName(StringRef name);
/// Status of the function emission on the CUDA/HIP/OpenMP host/device attrs.
enum class FunctionEmissionStatus {
Emitted,
CUDADiscarded, // Discarded due to CUDA/HIP hostness
OMPDiscarded, // Discarded due to OpenMP hostness
TemplateDiscarded, // Discarded due to uninstantiated templates
Unknown,
};
FunctionEmissionStatus getEmissionStatus(FunctionDecl *Decl);
// Whether the callee should be ignored in CUDA/HIP/OpenMP host/device check.
bool shouldIgnoreInHostDeviceCheck(FunctionDecl *Callee);
void ArgumentDependentLookup(DeclarationName Name, SourceLocation Loc,
ArrayRef<Expr *> Args, ADLResult &Functions);
void LookupVisibleDecls(Scope *S, LookupNameKind Kind,
VisibleDeclConsumer &Consumer,
bool IncludeGlobalScope = true,
bool LoadExternal = true);
void LookupVisibleDecls(DeclContext *Ctx, LookupNameKind Kind,
VisibleDeclConsumer &Consumer,
bool IncludeGlobalScope = true,
bool IncludeDependentBases = false,
bool LoadExternal = true);
enum CorrectTypoKind {
CTK_NonError, // CorrectTypo used in a non error recovery situation.
CTK_ErrorRecovery // CorrectTypo used in normal error recovery.
};
TypoCorrection CorrectTypo(const DeclarationNameInfo &Typo,
Sema::LookupNameKind LookupKind,
Scope *S, CXXScopeSpec *SS,
CorrectionCandidateCallback &CCC,
CorrectTypoKind Mode,
DeclContext *MemberContext = nullptr,
bool EnteringContext = false,
const ObjCObjectPointerType *OPT = nullptr,
bool RecordFailure = true);
TypoExpr *CorrectTypoDelayed(const DeclarationNameInfo &Typo,
Sema::LookupNameKind LookupKind, Scope *S,
CXXScopeSpec *SS,
CorrectionCandidateCallback &CCC,
TypoDiagnosticGenerator TDG,
TypoRecoveryCallback TRC, CorrectTypoKind Mode,
DeclContext *MemberContext = nullptr,
bool EnteringContext = false,
const ObjCObjectPointerType *OPT = nullptr);
/// Process any TypoExprs in the given Expr and its children,
/// generating diagnostics as appropriate and returning a new Expr if there
/// were typos that were all successfully corrected and ExprError if one or
/// more typos could not be corrected.
///
/// \param E The Expr to check for TypoExprs.
///
/// \param InitDecl A VarDecl to avoid because the Expr being corrected is its
/// initializer.
///
/// \param Filter A function applied to a newly rebuilt Expr to determine if
/// it is an acceptable/usable result from a single combination of typo
/// corrections. As long as the filter returns ExprError, different
/// combinations of corrections will be tried until all are exhausted.
ExprResult
CorrectDelayedTyposInExpr(Expr *E, VarDecl *InitDecl = nullptr,
llvm::function_ref<ExprResult(Expr *)> Filter =
[](Expr *E) -> ExprResult { return E; });
ExprResult
CorrectDelayedTyposInExpr(Expr *E,
llvm::function_ref<ExprResult(Expr *)> Filter) {
return CorrectDelayedTyposInExpr(E, nullptr, Filter);
}
ExprResult
CorrectDelayedTyposInExpr(ExprResult ER, VarDecl *InitDecl = nullptr,
llvm::function_ref<ExprResult(Expr *)> Filter =
[](Expr *E) -> ExprResult { return E; }) {
return ER.isInvalid() ? ER : CorrectDelayedTyposInExpr(ER.get(), Filter);
}
ExprResult
CorrectDelayedTyposInExpr(ExprResult ER,
llvm::function_ref<ExprResult(Expr *)> Filter) {
return CorrectDelayedTyposInExpr(ER, nullptr, Filter);
}
void diagnoseTypo(const TypoCorrection &Correction,
const PartialDiagnostic &TypoDiag,
bool ErrorRecovery = true);
void diagnoseTypo(const TypoCorrection &Correction,
const PartialDiagnostic &TypoDiag,
const PartialDiagnostic &PrevNote,
bool ErrorRecovery = true);
void MarkTypoCorrectedFunctionDefinition(const NamedDecl *F);
void FindAssociatedClassesAndNamespaces(SourceLocation InstantiationLoc,
ArrayRef<Expr *> Args,
AssociatedNamespaceSet &AssociatedNamespaces,
AssociatedClassSet &AssociatedClasses);
void FilterLookupForScope(LookupResult &R, DeclContext *Ctx, Scope *S,
bool ConsiderLinkage, bool AllowInlineNamespace);
bool CheckRedeclarationModuleOwnership(NamedDecl *New, NamedDecl *Old);
void DiagnoseAmbiguousLookup(LookupResult &Result);
//@}
ObjCInterfaceDecl *getObjCInterfaceDecl(IdentifierInfo *&Id,
SourceLocation IdLoc,
bool TypoCorrection = false);
NamedDecl *LazilyCreateBuiltin(IdentifierInfo *II, unsigned ID,
Scope *S, bool ForRedeclaration,
SourceLocation Loc);
NamedDecl *ImplicitlyDefineFunction(SourceLocation Loc, IdentifierInfo &II,
Scope *S);
void AddKnownFunctionAttributes(FunctionDecl *FD);
// More parsing and symbol table subroutines.
void ProcessPragmaWeak(Scope *S, Decl *D);
// Decl attributes - this routine is the top level dispatcher.
void ProcessDeclAttributes(Scope *S, Decl *D, const Declarator &PD);
// Helper for delayed processing of attributes.
void ProcessDeclAttributeDelayed(Decl *D,
const ParsedAttributesView &AttrList);
void ProcessDeclAttributeList(Scope *S, Decl *D, const ParsedAttributesView &AL,
bool IncludeCXX11Attributes = true);
bool ProcessAccessDeclAttributeList(AccessSpecDecl *ASDecl,
const ParsedAttributesView &AttrList);
void checkUnusedDeclAttributes(Declarator &D);
/// Determine if type T is a valid subject for a nonnull and similar
/// attributes. By default, we look through references (the behavior used by
/// nonnull), but if the second parameter is true, then we treat a reference
/// type as valid.
bool isValidPointerAttrType(QualType T, bool RefOkay = false);
bool CheckRegparmAttr(const ParsedAttr &attr, unsigned &value);
bool CheckCallingConvAttr(const ParsedAttr &attr, CallingConv &CC,
const FunctionDecl *FD = nullptr);
bool CheckAttrTarget(const ParsedAttr &CurrAttr);
bool CheckAttrNoArgs(const ParsedAttr &CurrAttr);
bool checkStringLiteralArgumentAttr(const ParsedAttr &Attr, unsigned ArgNum,
StringRef &Str,
SourceLocation *ArgLocation = nullptr);
bool checkSectionName(SourceLocation LiteralLoc, StringRef Str);
bool checkTargetAttr(SourceLocation LiteralLoc, StringRef Str);
bool checkMSInheritanceAttrOnDefinition(
CXXRecordDecl *RD, SourceRange Range, bool BestCase,
MSInheritanceModel SemanticSpelling);
void CheckAlignasUnderalignment(Decl *D);
/// Adjust the calling convention of a method to be the ABI default if it
/// wasn't specified explicitly. This handles method types formed from
/// function type typedefs and typename template arguments.
void adjustMemberFunctionCC(QualType &T, bool IsStatic, bool IsCtorOrDtor,
SourceLocation Loc);
// Check if there is an explicit attribute, but only look through parens.
// The intent is to look for an attribute on the current declarator, but not
// one that came from a typedef.
bool hasExplicitCallingConv(QualType T);
/// Get the outermost AttributedType node that sets a calling convention.
/// Valid types should not have multiple attributes with different CCs.
const AttributedType *getCallingConvAttributedType(QualType T) const;
/// Stmt attributes - this routine is the top level dispatcher.
StmtResult ProcessStmtAttributes(Stmt *Stmt,
const ParsedAttributesView &Attrs,
SourceRange Range);
void WarnConflictingTypedMethods(ObjCMethodDecl *Method,
ObjCMethodDecl *MethodDecl,
bool IsProtocolMethodDecl);
void CheckConflictingOverridingMethod(ObjCMethodDecl *Method,
ObjCMethodDecl *Overridden,
bool IsProtocolMethodDecl);
/// WarnExactTypedMethods - This routine issues a warning if method
/// implementation declaration matches exactly that of its declaration.
void WarnExactTypedMethods(ObjCMethodDecl *Method,
ObjCMethodDecl *MethodDecl,
bool IsProtocolMethodDecl);
typedef llvm::SmallPtrSet<Selector, 8> SelectorSet;
/// CheckImplementationIvars - This routine checks if the instance variables
/// listed in the implelementation match those listed in the interface.
void CheckImplementationIvars(ObjCImplementationDecl *ImpDecl,
ObjCIvarDecl **Fields, unsigned nIvars,
SourceLocation Loc);
/// ImplMethodsVsClassMethods - This is main routine to warn if any method
/// remains unimplemented in the class or category \@implementation.
void ImplMethodsVsClassMethods(Scope *S, ObjCImplDecl* IMPDecl,
ObjCContainerDecl* IDecl,
bool IncompleteImpl = false);
/// DiagnoseUnimplementedProperties - This routine warns on those properties
/// which must be implemented by this implementation.
void DiagnoseUnimplementedProperties(Scope *S, ObjCImplDecl* IMPDecl,
ObjCContainerDecl *CDecl,
bool SynthesizeProperties);
/// Diagnose any null-resettable synthesized setters.
void diagnoseNullResettableSynthesizedSetters(const ObjCImplDecl *impDecl);
/// DefaultSynthesizeProperties - This routine default synthesizes all
/// properties which must be synthesized in the class's \@implementation.
void DefaultSynthesizeProperties(Scope *S, ObjCImplDecl *IMPDecl,
ObjCInterfaceDecl *IDecl,
SourceLocation AtEnd);
void DefaultSynthesizeProperties(Scope *S, Decl *D, SourceLocation AtEnd);
/// IvarBacksCurrentMethodAccessor - This routine returns 'true' if 'IV' is
/// an ivar synthesized for 'Method' and 'Method' is a property accessor
/// declared in class 'IFace'.
bool IvarBacksCurrentMethodAccessor(ObjCInterfaceDecl *IFace,
ObjCMethodDecl *Method, ObjCIvarDecl *IV);
/// DiagnoseUnusedBackingIvarInAccessor - Issue an 'unused' warning if ivar which
/// backs the property is not used in the property's accessor.
void DiagnoseUnusedBackingIvarInAccessor(Scope *S,
const ObjCImplementationDecl *ImplD);
/// GetIvarBackingPropertyAccessor - If method is a property setter/getter and
/// it property has a backing ivar, returns this ivar; otherwise, returns NULL.
/// It also returns ivar's property on success.
ObjCIvarDecl *GetIvarBackingPropertyAccessor(const ObjCMethodDecl *Method,
const ObjCPropertyDecl *&PDecl) const;
/// Called by ActOnProperty to handle \@property declarations in
/// class extensions.
ObjCPropertyDecl *HandlePropertyInClassExtension(Scope *S,
SourceLocation AtLoc,
SourceLocation LParenLoc,
FieldDeclarator &FD,
Selector GetterSel,
SourceLocation GetterNameLoc,
Selector SetterSel,
SourceLocation SetterNameLoc,
const bool isReadWrite,
unsigned &Attributes,
const unsigned AttributesAsWritten,
QualType T,
TypeSourceInfo *TSI,
tok::ObjCKeywordKind MethodImplKind);
/// Called by ActOnProperty and HandlePropertyInClassExtension to
/// handle creating the ObjcPropertyDecl for a category or \@interface.
ObjCPropertyDecl *CreatePropertyDecl(Scope *S,
ObjCContainerDecl *CDecl,
SourceLocation AtLoc,
SourceLocation LParenLoc,
FieldDeclarator &FD,
Selector GetterSel,
SourceLocation GetterNameLoc,
Selector SetterSel,
SourceLocation SetterNameLoc,
const bool isReadWrite,
const unsigned Attributes,
const unsigned AttributesAsWritten,
QualType T,
TypeSourceInfo *TSI,
tok::ObjCKeywordKind MethodImplKind,
DeclContext *lexicalDC = nullptr);
/// AtomicPropertySetterGetterRules - This routine enforces the rule (via
/// warning) when atomic property has one but not the other user-declared
/// setter or getter.
void AtomicPropertySetterGetterRules(ObjCImplDecl* IMPDecl,
ObjCInterfaceDecl* IDecl);
void DiagnoseOwningPropertyGetterSynthesis(const ObjCImplementationDecl *D);
void DiagnoseMissingDesignatedInitOverrides(
const ObjCImplementationDecl *ImplD,
const ObjCInterfaceDecl *IFD);
void DiagnoseDuplicateIvars(ObjCInterfaceDecl *ID, ObjCInterfaceDecl *SID);
enum MethodMatchStrategy {
MMS_loose,
MMS_strict
};
/// MatchTwoMethodDeclarations - Checks if two methods' type match and returns
/// true, or false, accordingly.
bool MatchTwoMethodDeclarations(const ObjCMethodDecl *Method,
const ObjCMethodDecl *PrevMethod,
MethodMatchStrategy strategy = MMS_strict);
/// MatchAllMethodDeclarations - Check methods declaraed in interface or
/// or protocol against those declared in their implementations.
void MatchAllMethodDeclarations(const SelectorSet &InsMap,
const SelectorSet &ClsMap,
SelectorSet &InsMapSeen,
SelectorSet &ClsMapSeen,
ObjCImplDecl* IMPDecl,
ObjCContainerDecl* IDecl,
bool &IncompleteImpl,
bool ImmediateClass,
bool WarnCategoryMethodImpl=false);
/// CheckCategoryVsClassMethodMatches - Checks that methods implemented in
/// category matches with those implemented in its primary class and
/// warns each time an exact match is found.
void CheckCategoryVsClassMethodMatches(ObjCCategoryImplDecl *CatIMP);
/// Add the given method to the list of globally-known methods.
void addMethodToGlobalList(ObjCMethodList *List, ObjCMethodDecl *Method);
/// Returns default addr space for method qualifiers.
LangAS getDefaultCXXMethodAddrSpace() const;
private:
/// AddMethodToGlobalPool - Add an instance or factory method to the global
/// pool. See descriptoin of AddInstanceMethodToGlobalPool.
void AddMethodToGlobalPool(ObjCMethodDecl *Method, bool impl, bool instance);
/// LookupMethodInGlobalPool - Returns the instance or factory method and
/// optionally warns if there are multiple signatures.
ObjCMethodDecl *LookupMethodInGlobalPool(Selector Sel, SourceRange R,
bool receiverIdOrClass,
bool instance);
public:
/// - Returns instance or factory methods in global method pool for
/// given selector. It checks the desired kind first, if none is found, and
/// parameter checkTheOther is set, it then checks the other kind. If no such
/// method or only one method is found, function returns false; otherwise, it
/// returns true.
bool
CollectMultipleMethodsInGlobalPool(Selector Sel,
SmallVectorImpl<ObjCMethodDecl*>& Methods,
bool InstanceFirst, bool CheckTheOther,
const ObjCObjectType *TypeBound = nullptr);
bool
AreMultipleMethodsInGlobalPool(Selector Sel, ObjCMethodDecl *BestMethod,
SourceRange R, bool receiverIdOrClass,
SmallVectorImpl<ObjCMethodDecl*>& Methods);
void
DiagnoseMultipleMethodInGlobalPool(SmallVectorImpl<ObjCMethodDecl*> &Methods,
Selector Sel, SourceRange R,
bool receiverIdOrClass);
private:
/// - Returns a selector which best matches given argument list or
/// nullptr if none could be found
ObjCMethodDecl *SelectBestMethod(Selector Sel, MultiExprArg Args,
bool IsInstance,
SmallVectorImpl<ObjCMethodDecl*>& Methods);
/// Record the typo correction failure and return an empty correction.
TypoCorrection FailedCorrection(IdentifierInfo *Typo, SourceLocation TypoLoc,
bool RecordFailure = true) {
if (RecordFailure)
TypoCorrectionFailures[Typo].insert(TypoLoc);
return TypoCorrection();
}
public:
/// AddInstanceMethodToGlobalPool - All instance methods in a translation
/// unit are added to a global pool. This allows us to efficiently associate
/// a selector with a method declaraation for purposes of typechecking
/// messages sent to "id" (where the class of the object is unknown).
void AddInstanceMethodToGlobalPool(ObjCMethodDecl *Method, bool impl=false) {
AddMethodToGlobalPool(Method, impl, /*instance*/true);
}
/// AddFactoryMethodToGlobalPool - Same as above, but for factory methods.
void AddFactoryMethodToGlobalPool(ObjCMethodDecl *Method, bool impl=false) {
AddMethodToGlobalPool(Method, impl, /*instance*/false);
}
/// AddAnyMethodToGlobalPool - Add any method, instance or factory to global
/// pool.
void AddAnyMethodToGlobalPool(Decl *D);
/// LookupInstanceMethodInGlobalPool - Returns the method and warns if
/// there are multiple signatures.
ObjCMethodDecl *LookupInstanceMethodInGlobalPool(Selector Sel, SourceRange R,
bool receiverIdOrClass=false) {
return LookupMethodInGlobalPool(Sel, R, receiverIdOrClass,
/*instance*/true);
}
/// LookupFactoryMethodInGlobalPool - Returns the method and warns if
/// there are multiple signatures.
ObjCMethodDecl *LookupFactoryMethodInGlobalPool(Selector Sel, SourceRange R,
bool receiverIdOrClass=false) {
return LookupMethodInGlobalPool(Sel, R, receiverIdOrClass,
/*instance*/false);
}
const ObjCMethodDecl *SelectorsForTypoCorrection(Selector Sel,
QualType ObjectType=QualType());
/// LookupImplementedMethodInGlobalPool - Returns the method which has an
/// implementation.
ObjCMethodDecl *LookupImplementedMethodInGlobalPool(Selector Sel);
/// CollectIvarsToConstructOrDestruct - Collect those ivars which require
/// initialization.
void CollectIvarsToConstructOrDestruct(ObjCInterfaceDecl *OI,
SmallVectorImpl<ObjCIvarDecl*> &Ivars);
//===--------------------------------------------------------------------===//
// Statement Parsing Callbacks: SemaStmt.cpp.
public:
class FullExprArg {
public:
FullExprArg() : E(nullptr) { }
FullExprArg(Sema &actions) : E(nullptr) { }
ExprResult release() {
return E;
}
Expr *get() const { return E; }
Expr *operator->() {
return E;
}
private:
// FIXME: No need to make the entire Sema class a friend when it's just
// Sema::MakeFullExpr that needs access to the constructor below.
friend class Sema;
explicit FullExprArg(Expr *expr) : E(expr) {}
Expr *E;
};
FullExprArg MakeFullExpr(Expr *Arg) {
return MakeFullExpr(Arg, Arg ? Arg->getExprLoc() : SourceLocation());
}
FullExprArg MakeFullExpr(Expr *Arg, SourceLocation CC) {
return FullExprArg(
ActOnFinishFullExpr(Arg, CC, /*DiscardedValue*/ false).get());
}
FullExprArg MakeFullDiscardedValueExpr(Expr *Arg) {
ExprResult FE =
ActOnFinishFullExpr(Arg, Arg ? Arg->getExprLoc() : SourceLocation(),
/*DiscardedValue*/ true);
return FullExprArg(FE.get());
}
StmtResult ActOnExprStmt(ExprResult Arg, bool DiscardedValue = true);
StmtResult ActOnExprStmtError();
StmtResult ActOnNullStmt(SourceLocation SemiLoc,
bool HasLeadingEmptyMacro = false);
void ActOnStartOfCompoundStmt(bool IsStmtExpr);
void ActOnFinishOfCompoundStmt();
StmtResult ActOnCompoundStmt(SourceLocation L, SourceLocation R,
ArrayRef<Stmt *> Elts, bool isStmtExpr);
/// A RAII object to enter scope of a compound statement.
class CompoundScopeRAII {
public:
CompoundScopeRAII(Sema &S, bool IsStmtExpr = false) : S(S) {
S.ActOnStartOfCompoundStmt(IsStmtExpr);
}
~CompoundScopeRAII() {
S.ActOnFinishOfCompoundStmt();
}
private:
Sema &S;
};
/// An RAII helper that pops function a function scope on exit.
struct FunctionScopeRAII {
Sema &S;
bool Active;
FunctionScopeRAII(Sema &S) : S(S), Active(true) {}
~FunctionScopeRAII() {
if (Active)
S.PopFunctionScopeInfo();
}
void disable() { Active = false; }
};
StmtResult ActOnDeclStmt(DeclGroupPtrTy Decl,
SourceLocation StartLoc,
SourceLocation EndLoc);
void ActOnForEachDeclStmt(DeclGroupPtrTy Decl);
StmtResult ActOnForEachLValueExpr(Expr *E);
ExprResult ActOnCaseExpr(SourceLocation CaseLoc, ExprResult Val);
StmtResult ActOnCaseStmt(SourceLocation CaseLoc, ExprResult LHS,
SourceLocation DotDotDotLoc, ExprResult RHS,
SourceLocation ColonLoc);
void ActOnCaseStmtBody(Stmt *CaseStmt, Stmt *SubStmt);
StmtResult ActOnDefaultStmt(SourceLocation DefaultLoc,
SourceLocation ColonLoc,
Stmt *SubStmt, Scope *CurScope);
StmtResult ActOnLabelStmt(SourceLocation IdentLoc, LabelDecl *TheDecl,
SourceLocation ColonLoc, Stmt *SubStmt);
StmtResult ActOnAttributedStmt(SourceLocation AttrLoc,
ArrayRef<const Attr*> Attrs,
Stmt *SubStmt);
bool CheckRebuiltAttributedStmtAttributes(ArrayRef<const Attr *> Attrs);
class ConditionResult;
StmtResult ActOnIfStmt(SourceLocation IfLoc, bool IsConstexpr,
Stmt *InitStmt,
ConditionResult Cond, Stmt *ThenVal,
SourceLocation ElseLoc, Stmt *ElseVal);
StmtResult BuildIfStmt(SourceLocation IfLoc, bool IsConstexpr,
Stmt *InitStmt,
ConditionResult Cond, Stmt *ThenVal,
SourceLocation ElseLoc, Stmt *ElseVal);
StmtResult ActOnStartOfSwitchStmt(SourceLocation SwitchLoc,
Stmt *InitStmt,
ConditionResult Cond);
StmtResult ActOnFinishSwitchStmt(SourceLocation SwitchLoc,
Stmt *Switch, Stmt *Body);
StmtResult ActOnWhileStmt(SourceLocation WhileLoc, ConditionResult Cond,
Stmt *Body);
StmtResult ActOnDoStmt(SourceLocation DoLoc, Stmt *Body,
SourceLocation WhileLoc, SourceLocation CondLParen,
Expr *Cond, SourceLocation CondRParen);
StmtResult ActOnForStmt(SourceLocation ForLoc,
SourceLocation LParenLoc,
Stmt *First,
ConditionResult Second,
FullExprArg Third,
SourceLocation RParenLoc,
Stmt *Body);
ExprResult CheckObjCForCollectionOperand(SourceLocation forLoc,
Expr *collection);
StmtResult ActOnObjCForCollectionStmt(SourceLocation ForColLoc,
Stmt *First, Expr *collection,
SourceLocation RParenLoc);
StmtResult FinishObjCForCollectionStmt(Stmt *ForCollection, Stmt *Body);
enum BuildForRangeKind {
/// Initial building of a for-range statement.
BFRK_Build,
/// Instantiation or recovery rebuild of a for-range statement. Don't
/// attempt any typo-correction.
BFRK_Rebuild,
/// Determining whether a for-range statement could be built. Avoid any
/// unnecessary or irreversible actions.
BFRK_Check
};
StmtResult ActOnCXXForRangeStmt(Scope *S, SourceLocation ForLoc,
SourceLocation CoawaitLoc,
Stmt *InitStmt,
Stmt *LoopVar,
SourceLocation ColonLoc, Expr *Collection,
SourceLocation RParenLoc,
BuildForRangeKind Kind);
StmtResult BuildCXXForRangeStmt(SourceLocation ForLoc,
SourceLocation CoawaitLoc,
Stmt *InitStmt,
SourceLocation ColonLoc,
Stmt *RangeDecl, Stmt *Begin, Stmt *End,
Expr *Cond, Expr *Inc,
Stmt *LoopVarDecl,
SourceLocation RParenLoc,
BuildForRangeKind Kind);
StmtResult FinishCXXForRangeStmt(Stmt *ForRange, Stmt *Body);
StmtResult ActOnGotoStmt(SourceLocation GotoLoc,
SourceLocation LabelLoc,
LabelDecl *TheDecl);
StmtResult ActOnIndirectGotoStmt(SourceLocation GotoLoc,
SourceLocation StarLoc,
Expr *DestExp);
StmtResult ActOnContinueStmt(SourceLocation ContinueLoc, Scope *CurScope);
StmtResult ActOnBreakStmt(SourceLocation BreakLoc, Scope *CurScope);
void ActOnCapturedRegionStart(SourceLocation Loc, Scope *CurScope,
CapturedRegionKind Kind, unsigned NumParams);
typedef std::pair<StringRef, QualType> CapturedParamNameType;
void ActOnCapturedRegionStart(SourceLocation Loc, Scope *CurScope,
CapturedRegionKind Kind,
ArrayRef<CapturedParamNameType> Params,
unsigned OpenMPCaptureLevel = 0);
StmtResult ActOnCapturedRegionEnd(Stmt *S);
void ActOnCapturedRegionError();
RecordDecl *CreateCapturedStmtRecordDecl(CapturedDecl *&CD,
SourceLocation Loc,
unsigned NumParams);
enum CopyElisionSemanticsKind {
CES_Strict = 0,
CES_AllowParameters = 1,
CES_AllowDifferentTypes = 2,
CES_AllowExceptionVariables = 4,
CES_FormerDefault = (CES_AllowParameters),
CES_Default = (CES_AllowParameters | CES_AllowDifferentTypes),
CES_AsIfByStdMove = (CES_AllowParameters | CES_AllowDifferentTypes |
CES_AllowExceptionVariables),
};
VarDecl *getCopyElisionCandidate(QualType ReturnType, Expr *E,
CopyElisionSemanticsKind CESK);
bool isCopyElisionCandidate(QualType ReturnType, const VarDecl *VD,
CopyElisionSemanticsKind CESK);
StmtResult ActOnReturnStmt(SourceLocation ReturnLoc, Expr *RetValExp,
Scope *CurScope);
StmtResult BuildReturnStmt(SourceLocation ReturnLoc, Expr *RetValExp);
StmtResult ActOnCapScopeReturnStmt(SourceLocation ReturnLoc, Expr *RetValExp);
StmtResult ActOnGCCAsmStmt(SourceLocation AsmLoc, bool IsSimple,
bool IsVolatile, unsigned NumOutputs,
unsigned NumInputs, IdentifierInfo **Names,
MultiExprArg Constraints, MultiExprArg Exprs,
Expr *AsmString, MultiExprArg Clobbers,
unsigned NumLabels,
SourceLocation RParenLoc);
void FillInlineAsmIdentifierInfo(Expr *Res,
llvm::InlineAsmIdentifierInfo &Info);
ExprResult LookupInlineAsmIdentifier(CXXScopeSpec &SS,
SourceLocation TemplateKWLoc,
UnqualifiedId &Id,
bool IsUnevaluatedContext);
bool LookupInlineAsmField(StringRef Base, StringRef Member,
unsigned &Offset, SourceLocation AsmLoc);
ExprResult LookupInlineAsmVarDeclField(Expr *RefExpr, StringRef Member,
SourceLocation AsmLoc);
StmtResult ActOnMSAsmStmt(SourceLocation AsmLoc, SourceLocation LBraceLoc,
ArrayRef<Token> AsmToks,
StringRef AsmString,
unsigned NumOutputs, unsigned NumInputs,
ArrayRef<StringRef> Constraints,
ArrayRef<StringRef> Clobbers,
ArrayRef<Expr*> Exprs,
SourceLocation EndLoc);
LabelDecl *GetOrCreateMSAsmLabel(StringRef ExternalLabelName,
SourceLocation Location,
bool AlwaysCreate);
VarDecl *BuildObjCExceptionDecl(TypeSourceInfo *TInfo, QualType ExceptionType,
SourceLocation StartLoc,
SourceLocation IdLoc, IdentifierInfo *Id,
bool Invalid = false);
Decl *ActOnObjCExceptionDecl(Scope *S, Declarator &D);
StmtResult ActOnObjCAtCatchStmt(SourceLocation AtLoc, SourceLocation RParen,
Decl *Parm, Stmt *Body);
StmtResult ActOnObjCAtFinallyStmt(SourceLocation AtLoc, Stmt *Body);
StmtResult ActOnObjCAtTryStmt(SourceLocation AtLoc, Stmt *Try,
MultiStmtArg Catch, Stmt *Finally);
StmtResult BuildObjCAtThrowStmt(SourceLocation AtLoc, Expr *Throw);
StmtResult ActOnObjCAtThrowStmt(SourceLocation AtLoc, Expr *Throw,
Scope *CurScope);
ExprResult ActOnObjCAtSynchronizedOperand(SourceLocation atLoc,
Expr *operand);
StmtResult ActOnObjCAtSynchronizedStmt(SourceLocation AtLoc,
Expr *SynchExpr,
Stmt *SynchBody);
StmtResult ActOnObjCAutoreleasePoolStmt(SourceLocation AtLoc, Stmt *Body);
VarDecl *BuildExceptionDeclaration(Scope *S, TypeSourceInfo *TInfo,
SourceLocation StartLoc,
SourceLocation IdLoc,
IdentifierInfo *Id);
Decl *ActOnExceptionDeclarator(Scope *S, Declarator &D);
StmtResult ActOnCXXCatchBlock(SourceLocation CatchLoc,
Decl *ExDecl, Stmt *HandlerBlock);
StmtResult ActOnCXXTryBlock(SourceLocation TryLoc, Stmt *TryBlock,
ArrayRef<Stmt *> Handlers);
StmtResult ActOnSEHTryBlock(bool IsCXXTry, // try (true) or __try (false) ?
SourceLocation TryLoc, Stmt *TryBlock,
Stmt *Handler);
StmtResult ActOnSEHExceptBlock(SourceLocation Loc,
Expr *FilterExpr,
Stmt *Block);
void ActOnStartSEHFinallyBlock();
void ActOnAbortSEHFinallyBlock();
StmtResult ActOnFinishSEHFinallyBlock(SourceLocation Loc, Stmt *Block);
StmtResult ActOnSEHLeaveStmt(SourceLocation Loc, Scope *CurScope);
void DiagnoseReturnInConstructorExceptionHandler(CXXTryStmt *TryBlock);
bool ShouldWarnIfUnusedFileScopedDecl(const DeclaratorDecl *D) const;
/// If it's a file scoped decl that must warn if not used, keep track
/// of it.
void MarkUnusedFileScopedDecl(const DeclaratorDecl *D);
/// DiagnoseUnusedExprResult - If the statement passed in is an expression
/// whose result is unused, warn.
void DiagnoseUnusedExprResult(const Stmt *S);
void DiagnoseUnusedNestedTypedefs(const RecordDecl *D);
void DiagnoseUnusedDecl(const NamedDecl *ND);
/// Emit \p DiagID if statement located on \p StmtLoc has a suspicious null
/// statement as a \p Body, and it is located on the same line.
///
/// This helps prevent bugs due to typos, such as:
/// if (condition);
/// do_stuff();
void DiagnoseEmptyStmtBody(SourceLocation StmtLoc,
const Stmt *Body,
unsigned DiagID);
/// Warn if a for/while loop statement \p S, which is followed by
/// \p PossibleBody, has a suspicious null statement as a body.
void DiagnoseEmptyLoopBody(const Stmt *S,
const Stmt *PossibleBody);
/// Warn if a value is moved to itself.
void DiagnoseSelfMove(const Expr *LHSExpr, const Expr *RHSExpr,
SourceLocation OpLoc);
/// Warn if we're implicitly casting from a _Nullable pointer type to a
/// _Nonnull one.
void diagnoseNullableToNonnullConversion(QualType DstType, QualType SrcType,
SourceLocation Loc);
/// Warn when implicitly casting 0 to nullptr.
void diagnoseZeroToNullptrConversion(CastKind Kind, const Expr *E);
ParsingDeclState PushParsingDeclaration(sema::DelayedDiagnosticPool &pool) {
return DelayedDiagnostics.push(pool);
}
void PopParsingDeclaration(ParsingDeclState state, Decl *decl);
typedef ProcessingContextState ParsingClassState;
ParsingClassState PushParsingClass() {
ParsingClassDepth++;
return DelayedDiagnostics.pushUndelayed();
}
void PopParsingClass(ParsingClassState state) {
ParsingClassDepth--;
DelayedDiagnostics.popUndelayed(state);
}
void redelayDiagnostics(sema::DelayedDiagnosticPool &pool);
void DiagnoseAvailabilityOfDecl(NamedDecl *D, ArrayRef<SourceLocation> Locs,
const ObjCInterfaceDecl *UnknownObjCClass,
bool ObjCPropertyAccess,
bool AvoidPartialAvailabilityChecks = false,
ObjCInterfaceDecl *ClassReceiver = nullptr);
bool makeUnavailableInSystemHeader(SourceLocation loc,
UnavailableAttr::ImplicitReason reason);
/// Issue any -Wunguarded-availability warnings in \c FD
void DiagnoseUnguardedAvailabilityViolations(Decl *FD);
void handleDelayedAvailabilityCheck(sema::DelayedDiagnostic &DD, Decl *Ctx);
//===--------------------------------------------------------------------===//
// Expression Parsing Callbacks: SemaExpr.cpp.
bool CanUseDecl(NamedDecl *D, bool TreatUnavailableAsInvalid);
bool DiagnoseUseOfDecl(NamedDecl *D, ArrayRef<SourceLocation> Locs,
const ObjCInterfaceDecl *UnknownObjCClass = nullptr,
bool ObjCPropertyAccess = false,
bool AvoidPartialAvailabilityChecks = false,
ObjCInterfaceDecl *ClassReciever = nullptr);
void NoteDeletedFunction(FunctionDecl *FD);
void NoteDeletedInheritingConstructor(CXXConstructorDecl *CD);
bool DiagnosePropertyAccessorMismatch(ObjCPropertyDecl *PD,
ObjCMethodDecl *Getter,
SourceLocation Loc);
void DiagnoseSentinelCalls(NamedDecl *D, SourceLocation Loc,
ArrayRef<Expr *> Args);
void PushExpressionEvaluationContext(
ExpressionEvaluationContext NewContext, Decl *LambdaContextDecl = nullptr,
ExpressionEvaluationContextRecord::ExpressionKind Type =
ExpressionEvaluationContextRecord::EK_Other);
enum ReuseLambdaContextDecl_t { ReuseLambdaContextDecl };
void PushExpressionEvaluationContext(
ExpressionEvaluationContext NewContext, ReuseLambdaContextDecl_t,
ExpressionEvaluationContextRecord::ExpressionKind Type =
ExpressionEvaluationContextRecord::EK_Other);
void PopExpressionEvaluationContext();
void DiscardCleanupsInEvaluationContext();
ExprResult TransformToPotentiallyEvaluated(Expr *E);
ExprResult HandleExprEvaluationContextForTypeof(Expr *E);
ExprResult CheckUnevaluatedOperand(Expr *E);
void CheckUnusedVolatileAssignment(Expr *E);
ExprResult ActOnConstantExpression(ExprResult Res);
// Functions for marking a declaration referenced. These functions also
// contain the relevant logic for marking if a reference to a function or
// variable is an odr-use (in the C++11 sense). There are separate variants
// for expressions referring to a decl; these exist because odr-use marking
// needs to be delayed for some constant variables when we build one of the
// named expressions.
//
// MightBeOdrUse indicates whether the use could possibly be an odr-use, and
// should usually be true. This only needs to be set to false if the lack of
// odr-use cannot be determined from the current context (for instance,
// because the name denotes a virtual function and was written without an
// explicit nested-name-specifier).
void MarkAnyDeclReferenced(SourceLocation Loc, Decl *D, bool MightBeOdrUse);
void MarkFunctionReferenced(SourceLocation Loc, FunctionDecl *Func,
bool MightBeOdrUse = true);
void MarkVariableReferenced(SourceLocation Loc, VarDecl *Var);
void MarkDeclRefReferenced(DeclRefExpr *E, const Expr *Base = nullptr);
void MarkMemberReferenced(MemberExpr *E);
void MarkFunctionParmPackReferenced(FunctionParmPackExpr *E);
void MarkCaptureUsedInEnclosingContext(VarDecl *Capture, SourceLocation Loc,
unsigned CapturingScopeIndex);
ExprResult CheckLValueToRValueConversionOperand(Expr *E);
void CleanupVarDeclMarking();
enum TryCaptureKind {
TryCapture_Implicit, TryCapture_ExplicitByVal, TryCapture_ExplicitByRef
};
/// Try to capture the given variable.
///
/// \param Var The variable to capture.
///
/// \param Loc The location at which the capture occurs.
///
/// \param Kind The kind of capture, which may be implicit (for either a
/// block or a lambda), or explicit by-value or by-reference (for a lambda).
///
/// \param EllipsisLoc The location of the ellipsis, if one is provided in
/// an explicit lambda capture.
///
/// \param BuildAndDiagnose Whether we are actually supposed to add the
/// captures or diagnose errors. If false, this routine merely check whether
/// the capture can occur without performing the capture itself or complaining
/// if the variable cannot be captured.
///
/// \param CaptureType Will be set to the type of the field used to capture
/// this variable in the innermost block or lambda. Only valid when the
/// variable can be captured.
///
/// \param DeclRefType Will be set to the type of a reference to the capture
/// from within the current scope. Only valid when the variable can be
/// captured.
///
/// \param FunctionScopeIndexToStopAt If non-null, it points to the index
/// of the FunctionScopeInfo stack beyond which we do not attempt to capture.
/// This is useful when enclosing lambdas must speculatively capture
/// variables that may or may not be used in certain specializations of
/// a nested generic lambda.
///
/// \returns true if an error occurred (i.e., the variable cannot be
/// captured) and false if the capture succeeded.
bool tryCaptureVariable(VarDecl *Var, SourceLocation Loc, TryCaptureKind Kind,
SourceLocation EllipsisLoc, bool BuildAndDiagnose,
QualType &CaptureType,
QualType &DeclRefType,
const unsigned *const FunctionScopeIndexToStopAt);
/// Try to capture the given variable.
bool tryCaptureVariable(VarDecl *Var, SourceLocation Loc,
TryCaptureKind Kind = TryCapture_Implicit,
SourceLocation EllipsisLoc = SourceLocation());
/// Checks if the variable must be captured.
bool NeedToCaptureVariable(VarDecl *Var, SourceLocation Loc);
/// Given a variable, determine the type that a reference to that
/// variable will have in the given scope.
QualType getCapturedDeclRefType(VarDecl *Var, SourceLocation Loc);
/// Mark all of the declarations referenced within a particular AST node as
/// referenced. Used when template instantiation instantiates a non-dependent
/// type -- entities referenced by the type are now referenced.
void MarkDeclarationsReferencedInType(SourceLocation Loc, QualType T);
void MarkDeclarationsReferencedInExpr(Expr *E,
bool SkipLocalVariables = false);
/// Try to recover by turning the given expression into a
/// call. Returns true if recovery was attempted or an error was
/// emitted; this may also leave the ExprResult invalid.
bool tryToRecoverWithCall(ExprResult &E, const PartialDiagnostic &PD,
bool ForceComplain = false,
bool (*IsPlausibleResult)(QualType) = nullptr);
/// Figure out if an expression could be turned into a call.
bool tryExprAsCall(Expr &E, QualType &ZeroArgCallReturnTy,
UnresolvedSetImpl &NonTemplateOverloads);
/// Conditionally issue a diagnostic based on the current
/// evaluation context.
///
/// \param Statement If Statement is non-null, delay reporting the
/// diagnostic until the function body is parsed, and then do a basic
/// reachability analysis to determine if the statement is reachable.
/// If it is unreachable, the diagnostic will not be emitted.
bool DiagRuntimeBehavior(SourceLocation Loc, const Stmt *Statement,
const PartialDiagnostic &PD);
/// Similar, but diagnostic is only produced if all the specified statements
/// are reachable.
bool DiagRuntimeBehavior(SourceLocation Loc, ArrayRef<const Stmt*> Stmts,
const PartialDiagnostic &PD);
// Primary Expressions.
SourceRange getExprRange(Expr *E) const;
ExprResult ActOnIdExpression(
Scope *S, CXXScopeSpec &SS, SourceLocation TemplateKWLoc,
UnqualifiedId &Id, bool HasTrailingLParen, bool IsAddressOfOperand,
CorrectionCandidateCallback *CCC = nullptr,
bool IsInlineAsmIdentifier = false, Token *KeywordReplacement = nullptr);
void DecomposeUnqualifiedId(const UnqualifiedId &Id,
TemplateArgumentListInfo &Buffer,
DeclarationNameInfo &NameInfo,
const TemplateArgumentListInfo *&TemplateArgs);
bool
DiagnoseEmptyLookup(Scope *S, CXXScopeSpec &SS, LookupResult &R,
CorrectionCandidateCallback &CCC,
TemplateArgumentListInfo *ExplicitTemplateArgs = nullptr,
ArrayRef<Expr *> Args = None, TypoExpr **Out = nullptr);
DeclResult LookupIvarInObjCMethod(LookupResult &Lookup, Scope *S,
IdentifierInfo *II);
ExprResult BuildIvarRefExpr(Scope *S, SourceLocation Loc, ObjCIvarDecl *IV);
ExprResult LookupInObjCMethod(LookupResult &LookUp, Scope *S,
IdentifierInfo *II,
bool AllowBuiltinCreation=false);
ExprResult ActOnDependentIdExpression(const CXXScopeSpec &SS,
SourceLocation TemplateKWLoc,
const DeclarationNameInfo &NameInfo,
bool isAddressOfOperand,
const TemplateArgumentListInfo *TemplateArgs);
/// If \p D cannot be odr-used in the current expression evaluation context,
/// return a reason explaining why. Otherwise, return NOUR_None.
NonOdrUseReason getNonOdrUseReasonInCurrentContext(ValueDecl *D);
DeclRefExpr *BuildDeclRefExpr(ValueDecl *D, QualType Ty, ExprValueKind VK,
SourceLocation Loc,
const CXXScopeSpec *SS = nullptr);
DeclRefExpr *
BuildDeclRefExpr(ValueDecl *D, QualType Ty, ExprValueKind VK,
const DeclarationNameInfo &NameInfo,
const CXXScopeSpec *SS = nullptr,
NamedDecl *FoundD = nullptr,
SourceLocation TemplateKWLoc = SourceLocation(),
const TemplateArgumentListInfo *TemplateArgs = nullptr);
DeclRefExpr *
BuildDeclRefExpr(ValueDecl *D, QualType Ty, ExprValueKind VK,
const DeclarationNameInfo &NameInfo,
NestedNameSpecifierLoc NNS,
NamedDecl *FoundD = nullptr,
SourceLocation TemplateKWLoc = SourceLocation(),
const TemplateArgumentListInfo *TemplateArgs = nullptr);
ExprResult
BuildAnonymousStructUnionMemberReference(
const CXXScopeSpec &SS,
SourceLocation nameLoc,
IndirectFieldDecl *indirectField,
DeclAccessPair FoundDecl = DeclAccessPair::make(nullptr, AS_none),
Expr *baseObjectExpr = nullptr,
SourceLocation opLoc = SourceLocation());
ExprResult BuildPossibleImplicitMemberExpr(const CXXScopeSpec &SS,
SourceLocation TemplateKWLoc,
LookupResult &R,
const TemplateArgumentListInfo *TemplateArgs,
const Scope *S);
ExprResult BuildImplicitMemberExpr(const CXXScopeSpec &SS,
SourceLocation TemplateKWLoc,
LookupResult &R,
const TemplateArgumentListInfo *TemplateArgs,
bool IsDefiniteInstance,
const Scope *S);
bool UseArgumentDependentLookup(const CXXScopeSpec &SS,
const LookupResult &R,
bool HasTrailingLParen);
ExprResult
BuildQualifiedDeclarationNameExpr(CXXScopeSpec &SS,
const DeclarationNameInfo &NameInfo,
bool IsAddressOfOperand, const Scope *S,
TypeSourceInfo **RecoveryTSI = nullptr);
ExprResult BuildDependentDeclRefExpr(const CXXScopeSpec &SS,
SourceLocation TemplateKWLoc,
const DeclarationNameInfo &NameInfo,
const TemplateArgumentListInfo *TemplateArgs);
ExprResult BuildDeclarationNameExpr(const CXXScopeSpec &SS,
LookupResult &R,
bool NeedsADL,
bool AcceptInvalidDecl = false);
ExprResult BuildDeclarationNameExpr(
const CXXScopeSpec &SS, const DeclarationNameInfo &NameInfo, NamedDecl *D,
NamedDecl *FoundD = nullptr,
const TemplateArgumentListInfo *TemplateArgs = nullptr,
bool AcceptInvalidDecl = false);
ExprResult BuildLiteralOperatorCall(LookupResult &R,
DeclarationNameInfo &SuffixInfo,
ArrayRef<Expr *> Args,
SourceLocation LitEndLoc,
TemplateArgumentListInfo *ExplicitTemplateArgs = nullptr);
ExprResult BuildPredefinedExpr(SourceLocation Loc,
PredefinedExpr::IdentKind IK);
ExprResult ActOnPredefinedExpr(SourceLocation Loc, tok::TokenKind Kind);
ExprResult ActOnIntegerConstant(SourceLocation Loc, uint64_t Val);
ExprResult BuildUniqueStableName(SourceLocation OpLoc,
TypeSourceInfo *Operand);
ExprResult BuildUniqueStableName(SourceLocation OpLoc, Expr *E);
ExprResult ActOnUniqueStableNameExpr(SourceLocation OpLoc, SourceLocation L,
SourceLocation R, ParsedType Ty);
ExprResult ActOnUniqueStableNameExpr(SourceLocation OpLoc, SourceLocation L,
SourceLocation R, Expr *Operand);
bool CheckLoopHintExpr(Expr *E, SourceLocation Loc);
ExprResult ActOnNumericConstant(const Token &Tok, Scope *UDLScope = nullptr);
ExprResult ActOnCharacterConstant(const Token &Tok,
Scope *UDLScope = nullptr);
ExprResult ActOnParenExpr(SourceLocation L, SourceLocation R, Expr *E);
ExprResult ActOnParenListExpr(SourceLocation L,
SourceLocation R,
MultiExprArg Val);
/// ActOnStringLiteral - The specified tokens were lexed as pasted string
/// fragments (e.g. "foo" "bar" L"baz").
ExprResult ActOnStringLiteral(ArrayRef<Token> StringToks,
Scope *UDLScope = nullptr);
ExprResult ActOnGenericSelectionExpr(SourceLocation KeyLoc,
SourceLocation DefaultLoc,
SourceLocation RParenLoc,
Expr *ControllingExpr,
ArrayRef<ParsedType> ArgTypes,
ArrayRef<Expr *> ArgExprs);
ExprResult CreateGenericSelectionExpr(SourceLocation KeyLoc,
SourceLocation DefaultLoc,
SourceLocation RParenLoc,
Expr *ControllingExpr,
ArrayRef<TypeSourceInfo *> Types,
ArrayRef<Expr *> Exprs);
// Binary/Unary Operators. 'Tok' is the token for the operator.
ExprResult CreateBuiltinUnaryOp(SourceLocation OpLoc, UnaryOperatorKind Opc,
Expr *InputExpr);
ExprResult BuildUnaryOp(Scope *S, SourceLocation OpLoc,
UnaryOperatorKind Opc, Expr *Input);
ExprResult ActOnUnaryOp(Scope *S, SourceLocation OpLoc,
tok::TokenKind Op, Expr *Input);
bool isQualifiedMemberAccess(Expr *E);
QualType CheckAddressOfOperand(ExprResult &Operand, SourceLocation OpLoc);
ExprResult CreateUnaryExprOrTypeTraitExpr(TypeSourceInfo *TInfo,
SourceLocation OpLoc,
UnaryExprOrTypeTrait ExprKind,
SourceRange R);
ExprResult CreateUnaryExprOrTypeTraitExpr(Expr *E, SourceLocation OpLoc,
UnaryExprOrTypeTrait ExprKind);
ExprResult
ActOnUnaryExprOrTypeTraitExpr(SourceLocation OpLoc,
UnaryExprOrTypeTrait ExprKind,
bool IsType, void *TyOrEx,
SourceRange ArgRange);
ExprResult CheckPlaceholderExpr(Expr *E);
bool CheckVecStepExpr(Expr *E);
bool CheckUnaryExprOrTypeTraitOperand(Expr *E, UnaryExprOrTypeTrait ExprKind);
bool CheckUnaryExprOrTypeTraitOperand(QualType ExprType, SourceLocation OpLoc,
SourceRange ExprRange,
UnaryExprOrTypeTrait ExprKind);
ExprResult ActOnSizeofParameterPackExpr(Scope *S,
SourceLocation OpLoc,
IdentifierInfo &Name,
SourceLocation NameLoc,
SourceLocation RParenLoc);
ExprResult ActOnPostfixUnaryOp(Scope *S, SourceLocation OpLoc,
tok::TokenKind Kind, Expr *Input);
ExprResult ActOnArraySubscriptExpr(Scope *S, Expr *Base, SourceLocation LLoc,
Expr *Idx, SourceLocation RLoc);
ExprResult CreateBuiltinArraySubscriptExpr(Expr *Base, SourceLocation LLoc,
Expr *Idx, SourceLocation RLoc);
ExprResult ActOnOMPArraySectionExpr(Expr *Base, SourceLocation LBLoc,
Expr *LowerBound, SourceLocation ColonLoc,
Expr *Length, SourceLocation RBLoc);
// This struct is for use by ActOnMemberAccess to allow
// BuildMemberReferenceExpr to be able to reinvoke ActOnMemberAccess after
// changing the access operator from a '.' to a '->' (to see if that is the
// change needed to fix an error about an unknown member, e.g. when the class
// defines a custom operator->).
struct ActOnMemberAccessExtraArgs {
Scope *S;
UnqualifiedId &Id;
Decl *ObjCImpDecl;
};
ExprResult BuildMemberReferenceExpr(
Expr *Base, QualType BaseType, SourceLocation OpLoc, bool IsArrow,
CXXScopeSpec &SS, SourceLocation TemplateKWLoc,
NamedDecl *FirstQualifierInScope, const DeclarationNameInfo &NameInfo,
const TemplateArgumentListInfo *TemplateArgs,
const Scope *S,
ActOnMemberAccessExtraArgs *ExtraArgs = nullptr);
ExprResult
BuildMemberReferenceExpr(Expr *Base, QualType BaseType, SourceLocation OpLoc,
bool IsArrow, const CXXScopeSpec &SS,
SourceLocation TemplateKWLoc,
NamedDecl *FirstQualifierInScope, LookupResult &R,
const TemplateArgumentListInfo *TemplateArgs,
const Scope *S,
bool SuppressQualifierCheck = false,
ActOnMemberAccessExtraArgs *ExtraArgs = nullptr);
ExprResult BuildFieldReferenceExpr(Expr *BaseExpr, bool IsArrow,
SourceLocation OpLoc,
const CXXScopeSpec &SS, FieldDecl *Field,
DeclAccessPair FoundDecl,
const DeclarationNameInfo &MemberNameInfo);
ExprResult PerformMemberExprBaseConversion(Expr *Base, bool IsArrow);
bool CheckQualifiedMemberReference(Expr *BaseExpr, QualType BaseType,
const CXXScopeSpec &SS,
const LookupResult &R);
ExprResult ActOnDependentMemberExpr(Expr *Base, QualType BaseType,
bool IsArrow, SourceLocation OpLoc,
const CXXScopeSpec &SS,
SourceLocation TemplateKWLoc,
NamedDecl *FirstQualifierInScope,
const DeclarationNameInfo &NameInfo,
const TemplateArgumentListInfo *TemplateArgs);
ExprResult ActOnMemberAccessExpr(Scope *S, Expr *Base,
SourceLocation OpLoc,
tok::TokenKind OpKind,
CXXScopeSpec &SS,
SourceLocation TemplateKWLoc,
UnqualifiedId &Member,
Decl *ObjCImpDecl);
MemberExpr *
BuildMemberExpr(Expr *Base, bool IsArrow, SourceLocation OpLoc,
const CXXScopeSpec *SS, SourceLocation TemplateKWLoc,
ValueDecl *Member, DeclAccessPair FoundDecl,
bool HadMultipleCandidates,
const DeclarationNameInfo &MemberNameInfo, QualType Ty,
ExprValueKind VK, ExprObjectKind OK,
const TemplateArgumentListInfo *TemplateArgs = nullptr);
MemberExpr *
BuildMemberExpr(Expr *Base, bool IsArrow, SourceLocation OpLoc,
NestedNameSpecifierLoc NNS, SourceLocation TemplateKWLoc,
ValueDecl *Member, DeclAccessPair FoundDecl,
bool HadMultipleCandidates,
const DeclarationNameInfo &MemberNameInfo, QualType Ty,
ExprValueKind VK, ExprObjectKind OK,
const TemplateArgumentListInfo *TemplateArgs = nullptr);
void ActOnDefaultCtorInitializers(Decl *CDtorDecl);
bool ConvertArgumentsForCall(CallExpr *Call, Expr *Fn,
FunctionDecl *FDecl,
const FunctionProtoType *Proto,
ArrayRef<Expr *> Args,
SourceLocation RParenLoc,
bool ExecConfig = false);
void CheckStaticArrayArgument(SourceLocation CallLoc,
ParmVarDecl *Param,
const Expr *ArgExpr);
/// ActOnCallExpr - Handle a call to Fn with the specified array of arguments.
/// This provides the location of the left/right parens and a list of comma
/// locations.
ExprResult ActOnCallExpr(Scope *S, Expr *Fn, SourceLocation LParenLoc,
MultiExprArg ArgExprs, SourceLocation RParenLoc,
Expr *ExecConfig = nullptr);
ExprResult BuildCallExpr(Scope *S, Expr *Fn, SourceLocation LParenLoc,
MultiExprArg ArgExprs, SourceLocation RParenLoc,
Expr *ExecConfig = nullptr,
bool IsExecConfig = false);
enum class AtomicArgumentOrder { API, AST };
ExprResult
BuildAtomicExpr(SourceRange CallRange, SourceRange ExprRange,
SourceLocation RParenLoc, MultiExprArg Args,
AtomicExpr::AtomicOp Op,
AtomicArgumentOrder ArgOrder = AtomicArgumentOrder::API);
ExprResult
BuildResolvedCallExpr(Expr *Fn, NamedDecl *NDecl, SourceLocation LParenLoc,
ArrayRef<Expr *> Arg, SourceLocation RParenLoc,
Expr *Config = nullptr, bool IsExecConfig = false,
ADLCallKind UsesADL = ADLCallKind::NotADL);
ExprResult ActOnCUDAExecConfigExpr(Scope *S, SourceLocation LLLLoc,
MultiExprArg ExecConfig,
SourceLocation GGGLoc);
ExprResult ActOnCastExpr(Scope *S, SourceLocation LParenLoc,
Declarator &D, ParsedType &Ty,
SourceLocation RParenLoc, Expr *CastExpr);
ExprResult BuildCStyleCastExpr(SourceLocation LParenLoc,
TypeSourceInfo *Ty,
SourceLocation RParenLoc,
Expr *Op);
CastKind PrepareScalarCast(ExprResult &src, QualType destType);
/// Build an altivec or OpenCL literal.
ExprResult BuildVectorLiteral(SourceLocation LParenLoc,
SourceLocation RParenLoc, Expr *E,
TypeSourceInfo *TInfo);
ExprResult MaybeConvertParenListExprToParenExpr(Scope *S, Expr *ME);
ExprResult ActOnCompoundLiteral(SourceLocation LParenLoc,
ParsedType Ty,
SourceLocation RParenLoc,
Expr *InitExpr);
ExprResult BuildCompoundLiteralExpr(SourceLocation LParenLoc,
TypeSourceInfo *TInfo,
SourceLocation RParenLoc,
Expr *LiteralExpr);
ExprResult ActOnInitList(SourceLocation LBraceLoc,
MultiExprArg InitArgList,
SourceLocation RBraceLoc);
ExprResult BuildInitList(SourceLocation LBraceLoc,
MultiExprArg InitArgList,
SourceLocation RBraceLoc);
ExprResult ActOnDesignatedInitializer(Designation &Desig,
SourceLocation EqualOrColonLoc,
bool GNUSyntax,
ExprResult Init);
private:
static BinaryOperatorKind ConvertTokenKindToBinaryOpcode(tok::TokenKind Kind);
public:
ExprResult ActOnBinOp(Scope *S, SourceLocation TokLoc,
tok::TokenKind Kind, Expr *LHSExpr, Expr *RHSExpr);
ExprResult BuildBinOp(Scope *S, SourceLocation OpLoc,
BinaryOperatorKind Opc, Expr *LHSExpr, Expr *RHSExpr);
ExprResult CreateBuiltinBinOp(SourceLocation OpLoc, BinaryOperatorKind Opc,
Expr *LHSExpr, Expr *RHSExpr);
void DiagnoseCommaOperator(const Expr *LHS, SourceLocation Loc);
/// ActOnConditionalOp - Parse a ?: operation. Note that 'LHS' may be null
/// in the case of a the GNU conditional expr extension.
ExprResult ActOnConditionalOp(SourceLocation QuestionLoc,
SourceLocation ColonLoc,
Expr *CondExpr, Expr *LHSExpr, Expr *RHSExpr);
/// ActOnAddrLabel - Parse the GNU address of label extension: "&&foo".
ExprResult ActOnAddrLabel(SourceLocation OpLoc, SourceLocation LabLoc,
LabelDecl *TheDecl);
void ActOnStartStmtExpr();
ExprResult ActOnStmtExpr(SourceLocation LPLoc, Stmt *SubStmt,
SourceLocation RPLoc); // "({..})"
// Handle the final expression in a statement expression.
ExprResult ActOnStmtExprResult(ExprResult E);
void ActOnStmtExprError();
// __builtin_offsetof(type, identifier(.identifier|[expr])*)
struct OffsetOfComponent {
SourceLocation LocStart, LocEnd;
bool isBrackets; // true if [expr], false if .ident
union {
IdentifierInfo *IdentInfo;
Expr *E;
} U;
};
/// __builtin_offsetof(type, a.b[123][456].c)
ExprResult BuildBuiltinOffsetOf(SourceLocation BuiltinLoc,
TypeSourceInfo *TInfo,
ArrayRef<OffsetOfComponent> Components,
SourceLocation RParenLoc);
ExprResult ActOnBuiltinOffsetOf(Scope *S,
SourceLocation BuiltinLoc,
SourceLocation TypeLoc,
ParsedType ParsedArgTy,
ArrayRef<OffsetOfComponent> Components,
SourceLocation RParenLoc);
// __builtin_choose_expr(constExpr, expr1, expr2)
ExprResult ActOnChooseExpr(SourceLocation BuiltinLoc,
Expr *CondExpr, Expr *LHSExpr,
Expr *RHSExpr, SourceLocation RPLoc);
// __builtin_va_arg(expr, type)
ExprResult ActOnVAArg(SourceLocation BuiltinLoc, Expr *E, ParsedType Ty,
SourceLocation RPLoc);
ExprResult BuildVAArgExpr(SourceLocation BuiltinLoc, Expr *E,
TypeSourceInfo *TInfo, SourceLocation RPLoc);
// __builtin_LINE(), __builtin_FUNCTION(), __builtin_FILE(),
// __builtin_COLUMN()
ExprResult ActOnSourceLocExpr(SourceLocExpr::IdentKind Kind,
SourceLocation BuiltinLoc,
SourceLocation RPLoc);
// Build a potentially resolved SourceLocExpr.
ExprResult BuildSourceLocExpr(SourceLocExpr::IdentKind Kind,
SourceLocation BuiltinLoc, SourceLocation RPLoc,
DeclContext *ParentContext);
// __null
ExprResult ActOnGNUNullExpr(SourceLocation TokenLoc);
bool CheckCaseExpression(Expr *E);
/// Describes the result of an "if-exists" condition check.
enum IfExistsResult {
/// The symbol exists.
IER_Exists,
/// The symbol does not exist.
IER_DoesNotExist,
/// The name is a dependent name, so the results will differ
/// from one instantiation to the next.
IER_Dependent,
/// An error occurred.
IER_Error
};
IfExistsResult
CheckMicrosoftIfExistsSymbol(Scope *S, CXXScopeSpec &SS,
const DeclarationNameInfo &TargetNameInfo);
IfExistsResult
CheckMicrosoftIfExistsSymbol(Scope *S, SourceLocation KeywordLoc,
bool IsIfExists, CXXScopeSpec &SS,
UnqualifiedId &Name);
StmtResult BuildMSDependentExistsStmt(SourceLocation KeywordLoc,
bool IsIfExists,
NestedNameSpecifierLoc QualifierLoc,
DeclarationNameInfo NameInfo,
Stmt *Nested);
StmtResult ActOnMSDependentExistsStmt(SourceLocation KeywordLoc,
bool IsIfExists,
CXXScopeSpec &SS, UnqualifiedId &Name,
Stmt *Nested);
//===------------------------- "Block" Extension ------------------------===//
/// ActOnBlockStart - This callback is invoked when a block literal is
/// started.
void ActOnBlockStart(SourceLocation CaretLoc, Scope *CurScope);
/// ActOnBlockArguments - This callback allows processing of block arguments.
/// If there are no arguments, this is still invoked.
void ActOnBlockArguments(SourceLocation CaretLoc, Declarator &ParamInfo,
Scope *CurScope);
/// ActOnBlockError - If there is an error parsing a block, this callback
/// is invoked to pop the information about the block from the action impl.
void ActOnBlockError(SourceLocation CaretLoc, Scope *CurScope);
/// ActOnBlockStmtExpr - This is called when the body of a block statement
/// literal was successfully completed. ^(int x){...}
ExprResult ActOnBlockStmtExpr(SourceLocation CaretLoc, Stmt *Body,
Scope *CurScope);
//===---------------------------- Clang Extensions ----------------------===//
/// __builtin_convertvector(...)
ExprResult ActOnConvertVectorExpr(Expr *E, ParsedType ParsedDestTy,
SourceLocation BuiltinLoc,
SourceLocation RParenLoc);
//===---------------------------- OpenCL Features -----------------------===//
/// __builtin_astype(...)
ExprResult ActOnAsTypeExpr(Expr *E, ParsedType ParsedDestTy,
SourceLocation BuiltinLoc,
SourceLocation RParenLoc);
//===---------------------------- C++ Features --------------------------===//
// Act on C++ namespaces
Decl *ActOnStartNamespaceDef(Scope *S, SourceLocation InlineLoc,
SourceLocation NamespaceLoc,
SourceLocation IdentLoc, IdentifierInfo *Ident,
SourceLocation LBrace,
const ParsedAttributesView &AttrList,
UsingDirectiveDecl *&UsingDecl);
void ActOnFinishNamespaceDef(Decl *Dcl, SourceLocation RBrace);
NamespaceDecl *getStdNamespace() const;
NamespaceDecl *getOrCreateStdNamespace();
NamespaceDecl *lookupStdExperimentalNamespace();
CXXRecordDecl *getStdBadAlloc() const;
EnumDecl *getStdAlignValT() const;
private:
// A cache representing if we've fully checked the various comparison category
// types stored in ASTContext. The bit-index corresponds to the integer value
// of a ComparisonCategoryType enumerator.
llvm::SmallBitVector FullyCheckedComparisonCategories;
ValueDecl *tryLookupCtorInitMemberDecl(CXXRecordDecl *ClassDecl,
CXXScopeSpec &SS,
ParsedType TemplateTypeTy,
IdentifierInfo *MemberOrBase);
public:
enum class ComparisonCategoryUsage {
/// The '<=>' operator was used in an expression and a builtin operator
/// was selected.
OperatorInExpression,
/// A defaulted 'operator<=>' needed the comparison category. This
/// typically only applies to 'std::strong_ordering', due to the implicit
/// fallback return value.
DefaultedOperator,
};
/// Lookup the specified comparison category types in the standard
/// library, an check the VarDecls possibly returned by the operator<=>
/// builtins for that type.
///
/// \return The type of the comparison category type corresponding to the
/// specified Kind, or a null type if an error occurs
QualType CheckComparisonCategoryType(ComparisonCategoryType Kind,
SourceLocation Loc,
ComparisonCategoryUsage Usage);
/// Tests whether Ty is an instance of std::initializer_list and, if
/// it is and Element is not NULL, assigns the element type to Element.
bool isStdInitializerList(QualType Ty, QualType *Element);
/// Looks for the std::initializer_list template and instantiates it
/// with Element, or emits an error if it's not found.
///
/// \returns The instantiated template, or null on error.
QualType BuildStdInitializerList(QualType Element, SourceLocation Loc);
/// Determine whether Ctor is an initializer-list constructor, as
/// defined in [dcl.init.list]p2.
bool isInitListConstructor(const FunctionDecl *Ctor);
Decl *ActOnUsingDirective(Scope *CurScope, SourceLocation UsingLoc,
SourceLocation NamespcLoc, CXXScopeSpec &SS,
SourceLocation IdentLoc,
IdentifierInfo *NamespcName,
const ParsedAttributesView &AttrList);
void PushUsingDirective(Scope *S, UsingDirectiveDecl *UDir);
Decl *ActOnNamespaceAliasDef(Scope *CurScope,
SourceLocation NamespaceLoc,
SourceLocation AliasLoc,
IdentifierInfo *Alias,
CXXScopeSpec &SS,
SourceLocation IdentLoc,
IdentifierInfo *Ident);
void HideUsingShadowDecl(Scope *S, UsingShadowDecl *Shadow);
bool CheckUsingShadowDecl(UsingDecl *UD, NamedDecl *Target,
const LookupResult &PreviousDecls,
UsingShadowDecl *&PrevShadow);
UsingShadowDecl *BuildUsingShadowDecl(Scope *S, UsingDecl *UD,
NamedDecl *Target,
UsingShadowDecl *PrevDecl);
bool CheckUsingDeclRedeclaration(SourceLocation UsingLoc,
bool HasTypenameKeyword,
const CXXScopeSpec &SS,
SourceLocation NameLoc,
const LookupResult &Previous);
bool CheckUsingDeclQualifier(SourceLocation UsingLoc,
bool HasTypename,
const CXXScopeSpec &SS,
const DeclarationNameInfo &NameInfo,
SourceLocation NameLoc);
NamedDecl *BuildUsingDeclaration(
Scope *S, AccessSpecifier AS, SourceLocation UsingLoc,
bool HasTypenameKeyword, SourceLocation TypenameLoc, CXXScopeSpec &SS,
DeclarationNameInfo NameInfo, SourceLocation EllipsisLoc,
const ParsedAttributesView &AttrList, bool IsInstantiation);
NamedDecl *BuildUsingPackDecl(NamedDecl *InstantiatedFrom,
ArrayRef<NamedDecl *> Expansions);
bool CheckInheritingConstructorUsingDecl(UsingDecl *UD);
/// Given a derived-class using shadow declaration for a constructor and the
/// correspnding base class constructor, find or create the implicit
/// synthesized derived class constructor to use for this initialization.
CXXConstructorDecl *
findInheritingConstructor(SourceLocation Loc, CXXConstructorDecl *BaseCtor,
ConstructorUsingShadowDecl *DerivedShadow);
Decl *ActOnUsingDeclaration(Scope *CurScope, AccessSpecifier AS,
SourceLocation UsingLoc,
SourceLocation TypenameLoc, CXXScopeSpec &SS,
UnqualifiedId &Name, SourceLocation EllipsisLoc,
const ParsedAttributesView &AttrList);
Decl *ActOnAliasDeclaration(Scope *CurScope, AccessSpecifier AS,
MultiTemplateParamsArg TemplateParams,
SourceLocation UsingLoc, UnqualifiedId &Name,
const ParsedAttributesView &AttrList,
TypeResult Type, Decl *DeclFromDeclSpec);
/// BuildCXXConstructExpr - Creates a complete call to a constructor,
/// including handling of its default argument expressions.
///
/// \param ConstructKind - a CXXConstructExpr::ConstructionKind
ExprResult
BuildCXXConstructExpr(SourceLocation ConstructLoc, QualType DeclInitType,
NamedDecl *FoundDecl,
CXXConstructorDecl *Constructor, MultiExprArg Exprs,
bool HadMultipleCandidates, bool IsListInitialization,
bool IsStdInitListInitialization,
bool RequiresZeroInit, unsigned ConstructKind,
SourceRange ParenRange);
/// Build a CXXConstructExpr whose constructor has already been resolved if
/// it denotes an inherited constructor.
ExprResult
BuildCXXConstructExpr(SourceLocation ConstructLoc, QualType DeclInitType,
CXXConstructorDecl *Constructor, bool Elidable,
MultiExprArg Exprs,
bool HadMultipleCandidates, bool IsListInitialization,
bool IsStdInitListInitialization,
bool RequiresZeroInit, unsigned ConstructKind,
SourceRange ParenRange);
// FIXME: Can we remove this and have the above BuildCXXConstructExpr check if
// the constructor can be elidable?
ExprResult
BuildCXXConstructExpr(SourceLocation ConstructLoc, QualType DeclInitType,
NamedDecl *FoundDecl,
CXXConstructorDecl *Constructor, bool Elidable,
MultiExprArg Exprs, bool HadMultipleCandidates,
bool IsListInitialization,
bool IsStdInitListInitialization, bool RequiresZeroInit,
unsigned ConstructKind, SourceRange ParenRange);
ExprResult BuildCXXDefaultInitExpr(SourceLocation Loc, FieldDecl *Field);
/// Instantiate or parse a C++ default argument expression as necessary.
/// Return true on error.
bool CheckCXXDefaultArgExpr(SourceLocation CallLoc, FunctionDecl *FD,
ParmVarDecl *Param);
/// BuildCXXDefaultArgExpr - Creates a CXXDefaultArgExpr, instantiating
/// the default expr if needed.
ExprResult BuildCXXDefaultArgExpr(SourceLocation CallLoc,
FunctionDecl *FD,
ParmVarDecl *Param);
/// FinalizeVarWithDestructor - Prepare for calling destructor on the
/// constructed variable.
void FinalizeVarWithDestructor(VarDecl *VD, const RecordType *DeclInitType);
/// Helper class that collects exception specifications for
/// implicitly-declared special member functions.
class ImplicitExceptionSpecification {
// Pointer to allow copying
Sema *Self;
// We order exception specifications thus:
// noexcept is the most restrictive, but is only used in C++11.
// throw() comes next.
// Then a throw(collected exceptions)
// Finally no specification, which is expressed as noexcept(false).
// throw(...) is used instead if any called function uses it.
ExceptionSpecificationType ComputedEST;
llvm::SmallPtrSet<CanQualType, 4> ExceptionsSeen;
SmallVector<QualType, 4> Exceptions;
void ClearExceptions() {
ExceptionsSeen.clear();
Exceptions.clear();
}
public:
explicit ImplicitExceptionSpecification(Sema &Self)
: Self(&Self), ComputedEST(EST_BasicNoexcept) {
if (!Self.getLangOpts().CPlusPlus11)
ComputedEST = EST_DynamicNone;
}
/// Get the computed exception specification type.
ExceptionSpecificationType getExceptionSpecType() const {
assert(!isComputedNoexcept(ComputedEST) &&
"noexcept(expr) should not be a possible result");
return ComputedEST;
}
/// The number of exceptions in the exception specification.
unsigned size() const { return Exceptions.size(); }
/// The set of exceptions in the exception specification.
const QualType *data() const { return Exceptions.data(); }
/// Integrate another called method into the collected data.
void CalledDecl(SourceLocation CallLoc, const CXXMethodDecl *Method);
/// Integrate an invoked expression into the collected data.
void CalledExpr(Expr *E) { CalledStmt(E); }
/// Integrate an invoked statement into the collected data.
void CalledStmt(Stmt *S);
/// Overwrite an EPI's exception specification with this
/// computed exception specification.
FunctionProtoType::ExceptionSpecInfo getExceptionSpec() const {
FunctionProtoType::ExceptionSpecInfo ESI;
ESI.Type = getExceptionSpecType();
if (ESI.Type == EST_Dynamic) {
ESI.Exceptions = Exceptions;
} else if (ESI.Type == EST_None) {
/// C++11 [except.spec]p14:
/// The exception-specification is noexcept(false) if the set of
/// potential exceptions of the special member function contains "any"
ESI.Type = EST_NoexceptFalse;
ESI.NoexceptExpr = Self->ActOnCXXBoolLiteral(SourceLocation(),
tok::kw_false).get();
}
return ESI;
}
};
/// Determine what sort of exception specification a defaulted
/// copy constructor of a class will have.
ImplicitExceptionSpecification
ComputeDefaultedDefaultCtorExceptionSpec(SourceLocation Loc,
CXXMethodDecl *MD);
/// Determine what sort of exception specification a defaulted
/// default constructor of a class will have, and whether the parameter
/// will be const.
ImplicitExceptionSpecification
ComputeDefaultedCopyCtorExceptionSpec(CXXMethodDecl *MD);
/// Determine what sort of exception specification a defaulted
/// copy assignment operator of a class will have, and whether the
/// parameter will be const.
ImplicitExceptionSpecification
ComputeDefaultedCopyAssignmentExceptionSpec(CXXMethodDecl *MD);
/// Determine what sort of exception specification a defaulted move
/// constructor of a class will have.
ImplicitExceptionSpecification
ComputeDefaultedMoveCtorExceptionSpec(CXXMethodDecl *MD);
/// Determine what sort of exception specification a defaulted move
/// assignment operator of a class will have.
ImplicitExceptionSpecification
ComputeDefaultedMoveAssignmentExceptionSpec(CXXMethodDecl *MD);
/// Determine what sort of exception specification a defaulted
/// destructor of a class will have.
ImplicitExceptionSpecification
ComputeDefaultedDtorExceptionSpec(CXXMethodDecl *MD);
/// Determine what sort of exception specification an inheriting
/// constructor of a class will have.
ImplicitExceptionSpecification
ComputeInheritingCtorExceptionSpec(SourceLocation Loc,
CXXConstructorDecl *CD);
/// Evaluate the implicit exception specification for a defaulted
/// special member function.
void EvaluateImplicitExceptionSpec(SourceLocation Loc, FunctionDecl *FD);
/// Check the given noexcept-specifier, convert its expression, and compute
/// the appropriate ExceptionSpecificationType.
ExprResult ActOnNoexceptSpec(SourceLocation NoexceptLoc, Expr *NoexceptExpr,
ExceptionSpecificationType &EST);
/// Check the given exception-specification and update the
/// exception specification information with the results.
void checkExceptionSpecification(bool IsTopLevel,
ExceptionSpecificationType EST,
ArrayRef<ParsedType> DynamicExceptions,
ArrayRef<SourceRange> DynamicExceptionRanges,
Expr *NoexceptExpr,
SmallVectorImpl<QualType> &Exceptions,
FunctionProtoType::ExceptionSpecInfo &ESI);
/// Determine if we're in a case where we need to (incorrectly) eagerly
/// parse an exception specification to work around a libstdc++ bug.
bool isLibstdcxxEagerExceptionSpecHack(const Declarator &D);
/// Add an exception-specification to the given member function
/// (or member function template). The exception-specification was parsed
/// after the method itself was declared.
void actOnDelayedExceptionSpecification(Decl *Method,
ExceptionSpecificationType EST,
SourceRange SpecificationRange,
ArrayRef<ParsedType> DynamicExceptions,
ArrayRef<SourceRange> DynamicExceptionRanges,
Expr *NoexceptExpr);
class InheritedConstructorInfo;
/// Determine if a special member function should have a deleted
/// definition when it is defaulted.
bool ShouldDeleteSpecialMember(CXXMethodDecl *MD, CXXSpecialMember CSM,
InheritedConstructorInfo *ICI = nullptr,
bool Diagnose = false);
/// Produce notes explaining why a defaulted function was defined as deleted.
void DiagnoseDeletedDefaultedFunction(FunctionDecl *FD);
/// Declare the implicit default constructor for the given class.
///
/// \param ClassDecl The class declaration into which the implicit
/// default constructor will be added.
///
/// \returns The implicitly-declared default constructor.
CXXConstructorDecl *DeclareImplicitDefaultConstructor(
CXXRecordDecl *ClassDecl);
/// DefineImplicitDefaultConstructor - Checks for feasibility of
/// defining this constructor as the default constructor.
void DefineImplicitDefaultConstructor(SourceLocation CurrentLocation,
CXXConstructorDecl *Constructor);
/// Declare the implicit destructor for the given class.
///
/// \param ClassDecl The class declaration into which the implicit
/// destructor will be added.
///
/// \returns The implicitly-declared destructor.
CXXDestructorDecl *DeclareImplicitDestructor(CXXRecordDecl *ClassDecl);
/// DefineImplicitDestructor - Checks for feasibility of
/// defining this destructor as the default destructor.
void DefineImplicitDestructor(SourceLocation CurrentLocation,
CXXDestructorDecl *Destructor);
/// Build an exception spec for destructors that don't have one.
///
/// C++11 says that user-defined destructors with no exception spec get one
/// that looks as if the destructor was implicitly declared.
void AdjustDestructorExceptionSpec(CXXDestructorDecl *Destructor);
/// Define the specified inheriting constructor.
void DefineInheritingConstructor(SourceLocation UseLoc,
CXXConstructorDecl *Constructor);
/// Declare the implicit copy constructor for the given class.
///
/// \param ClassDecl The class declaration into which the implicit
/// copy constructor will be added.
///
/// \returns The implicitly-declared copy constructor.
CXXConstructorDecl *DeclareImplicitCopyConstructor(CXXRecordDecl *ClassDecl);
/// DefineImplicitCopyConstructor - Checks for feasibility of
/// defining this constructor as the copy constructor.
void DefineImplicitCopyConstructor(SourceLocation CurrentLocation,
CXXConstructorDecl *Constructor);
/// Declare the implicit move constructor for the given class.
///
/// \param ClassDecl The Class declaration into which the implicit
/// move constructor will be added.
///
/// \returns The implicitly-declared move constructor, or NULL if it wasn't
/// declared.
CXXConstructorDecl *DeclareImplicitMoveConstructor(CXXRecordDecl *ClassDecl);
/// DefineImplicitMoveConstructor - Checks for feasibility of
/// defining this constructor as the move constructor.
void DefineImplicitMoveConstructor(SourceLocation CurrentLocation,
CXXConstructorDecl *Constructor);
/// Declare the implicit copy assignment operator for the given class.
///
/// \param ClassDecl The class declaration into which the implicit
/// copy assignment operator will be added.
///
/// \returns The implicitly-declared copy assignment operator.
CXXMethodDecl *DeclareImplicitCopyAssignment(CXXRecordDecl *ClassDecl);
/// Defines an implicitly-declared copy assignment operator.
void DefineImplicitCopyAssignment(SourceLocation CurrentLocation,
CXXMethodDecl *MethodDecl);
/// Declare the implicit move assignment operator for the given class.
///
/// \param ClassDecl The Class declaration into which the implicit
/// move assignment operator will be added.
///
/// \returns The implicitly-declared move assignment operator, or NULL if it
/// wasn't declared.
CXXMethodDecl *DeclareImplicitMoveAssignment(CXXRecordDecl *ClassDecl);
/// Defines an implicitly-declared move assignment operator.
void DefineImplicitMoveAssignment(SourceLocation CurrentLocation,
CXXMethodDecl *MethodDecl);
/// Force the declaration of any implicitly-declared members of this
/// class.
void ForceDeclarationOfImplicitMembers(CXXRecordDecl *Class);
/// Check a completed declaration of an implicit special member.
void CheckImplicitSpecialMemberDeclaration(Scope *S, FunctionDecl *FD);
/// Determine whether the given function is an implicitly-deleted
/// special member function.
bool isImplicitlyDeleted(FunctionDecl *FD);
/// Check whether 'this' shows up in the type of a static member
/// function after the (naturally empty) cv-qualifier-seq would be.
///
/// \returns true if an error occurred.
bool checkThisInStaticMemberFunctionType(CXXMethodDecl *Method);
/// Whether this' shows up in the exception specification of a static
/// member function.
bool checkThisInStaticMemberFunctionExceptionSpec(CXXMethodDecl *Method);
/// Check whether 'this' shows up in the attributes of the given
/// static member function.
///
/// \returns true if an error occurred.
bool checkThisInStaticMemberFunctionAttributes(CXXMethodDecl *Method);
/// MaybeBindToTemporary - If the passed in expression has a record type with
/// a non-trivial destructor, this will return CXXBindTemporaryExpr. Otherwise
/// it simply returns the passed in expression.
ExprResult MaybeBindToTemporary(Expr *E);
/// Wrap the expression in a ConstantExpr if it is a potential immediate
/// invocation.
ExprResult CheckForImmediateInvocation(ExprResult E, FunctionDecl *Decl);
bool CompleteConstructorCall(CXXConstructorDecl *Constructor,
MultiExprArg ArgsPtr,
SourceLocation Loc,
SmallVectorImpl<Expr*> &ConvertedArgs,
bool AllowExplicit = false,
bool IsListInitialization = false);
ParsedType getInheritingConstructorName(CXXScopeSpec &SS,
SourceLocation NameLoc,
IdentifierInfo &Name);
ParsedType getConstructorName(IdentifierInfo &II, SourceLocation NameLoc,
Scope *S, CXXScopeSpec &SS,
bool EnteringContext);
ParsedType getDestructorName(SourceLocation TildeLoc,
IdentifierInfo &II, SourceLocation NameLoc,
Scope *S, CXXScopeSpec &SS,
ParsedType ObjectType,
bool EnteringContext);
ParsedType getDestructorTypeForDecltype(const DeclSpec &DS,
ParsedType ObjectType);
// Checks that reinterpret casts don't have undefined behavior.
void CheckCompatibleReinterpretCast(QualType SrcType, QualType DestType,
bool IsDereference, SourceRange Range);
/// ActOnCXXNamedCast - Parse {dynamic,static,reinterpret,const}_cast's.
ExprResult ActOnCXXNamedCast(SourceLocation OpLoc,
tok::TokenKind Kind,
SourceLocation LAngleBracketLoc,
Declarator &D,
SourceLocation RAngleBracketLoc,
SourceLocation LParenLoc,
Expr *E,
SourceLocation RParenLoc);
ExprResult BuildCXXNamedCast(SourceLocation OpLoc,
tok::TokenKind Kind,
TypeSourceInfo *Ty,
Expr *E,
SourceRange AngleBrackets,
SourceRange Parens);
ExprResult ActOnBuiltinBitCastExpr(SourceLocation KWLoc, Declarator &Dcl,
ExprResult Operand,
SourceLocation RParenLoc);
ExprResult BuildBuiltinBitCastExpr(SourceLocation KWLoc, TypeSourceInfo *TSI,
Expr *Operand, SourceLocation RParenLoc);
ExprResult BuildCXXTypeId(QualType TypeInfoType,
SourceLocation TypeidLoc,
TypeSourceInfo *Operand,
SourceLocation RParenLoc);
ExprResult BuildCXXTypeId(QualType TypeInfoType,
SourceLocation TypeidLoc,
Expr *Operand,
SourceLocation RParenLoc);
/// ActOnCXXTypeid - Parse typeid( something ).
ExprResult ActOnCXXTypeid(SourceLocation OpLoc,
SourceLocation LParenLoc, bool isType,
void *TyOrExpr,
SourceLocation RParenLoc);
ExprResult BuildCXXUuidof(QualType TypeInfoType,
SourceLocation TypeidLoc,
TypeSourceInfo *Operand,
SourceLocation RParenLoc);
ExprResult BuildCXXUuidof(QualType TypeInfoType,
SourceLocation TypeidLoc,
Expr *Operand,
SourceLocation RParenLoc);
/// ActOnCXXUuidof - Parse __uuidof( something ).
ExprResult ActOnCXXUuidof(SourceLocation OpLoc,
SourceLocation LParenLoc, bool isType,
void *TyOrExpr,
SourceLocation RParenLoc);
/// Handle a C++1z fold-expression: ( expr op ... op expr ).
ExprResult ActOnCXXFoldExpr(SourceLocation LParenLoc, Expr *LHS,
tok::TokenKind Operator,
SourceLocation EllipsisLoc, Expr *RHS,
SourceLocation RParenLoc);
ExprResult BuildCXXFoldExpr(SourceLocation LParenLoc, Expr *LHS,
BinaryOperatorKind Operator,
SourceLocation EllipsisLoc, Expr *RHS,
SourceLocation RParenLoc,
Optional<unsigned> NumExpansions);
ExprResult BuildEmptyCXXFoldExpr(SourceLocation EllipsisLoc,
BinaryOperatorKind Operator);
//// ActOnCXXThis - Parse 'this' pointer.
ExprResult ActOnCXXThis(SourceLocation loc);
/// Build a CXXThisExpr and mark it referenced in the current context.
Expr *BuildCXXThisExpr(SourceLocation Loc, QualType Type, bool IsImplicit);
void MarkThisReferenced(CXXThisExpr *This);
/// Try to retrieve the type of the 'this' pointer.
///
/// \returns The type of 'this', if possible. Otherwise, returns a NULL type.
QualType getCurrentThisType();
/// When non-NULL, the C++ 'this' expression is allowed despite the
/// current context not being a non-static member function. In such cases,
/// this provides the type used for 'this'.
QualType CXXThisTypeOverride;
/// RAII object used to temporarily allow the C++ 'this' expression
/// to be used, with the given qualifiers on the current class type.
class CXXThisScopeRAII {
Sema &S;
QualType OldCXXThisTypeOverride;
bool Enabled;
public:
/// Introduce a new scope where 'this' may be allowed (when enabled),
/// using the given declaration (which is either a class template or a
/// class) along with the given qualifiers.
/// along with the qualifiers placed on '*this'.
CXXThisScopeRAII(Sema &S, Decl *ContextDecl, Qualifiers CXXThisTypeQuals,
bool Enabled = true);
~CXXThisScopeRAII();
};
/// Make sure the value of 'this' is actually available in the current
/// context, if it is a potentially evaluated context.
///
/// \param Loc The location at which the capture of 'this' occurs.
///
/// \param Explicit Whether 'this' is explicitly captured in a lambda
/// capture list.
///
/// \param FunctionScopeIndexToStopAt If non-null, it points to the index
/// of the FunctionScopeInfo stack beyond which we do not attempt to capture.
/// This is useful when enclosing lambdas must speculatively capture
/// 'this' that may or may not be used in certain specializations of
/// a nested generic lambda (depending on whether the name resolves to
/// a non-static member function or a static function).
/// \return returns 'true' if failed, 'false' if success.
bool CheckCXXThisCapture(SourceLocation Loc, bool Explicit = false,
bool BuildAndDiagnose = true,
const unsigned *const FunctionScopeIndexToStopAt = nullptr,
bool ByCopy = false);
/// Determine whether the given type is the type of *this that is used
/// outside of the body of a member function for a type that is currently
/// being defined.
bool isThisOutsideMemberFunctionBody(QualType BaseType);
/// ActOnCXXBoolLiteral - Parse {true,false} literals.
ExprResult ActOnCXXBoolLiteral(SourceLocation OpLoc, tok::TokenKind Kind);
/// ActOnObjCBoolLiteral - Parse {__objc_yes,__objc_no} literals.
ExprResult ActOnObjCBoolLiteral(SourceLocation OpLoc, tok::TokenKind Kind);
ExprResult
ActOnObjCAvailabilityCheckExpr(llvm::ArrayRef<AvailabilitySpec> AvailSpecs,
SourceLocation AtLoc, SourceLocation RParen);
/// ActOnCXXNullPtrLiteral - Parse 'nullptr'.
ExprResult ActOnCXXNullPtrLiteral(SourceLocation Loc);
//// ActOnCXXThrow - Parse throw expressions.
ExprResult ActOnCXXThrow(Scope *S, SourceLocation OpLoc, Expr *expr);
ExprResult BuildCXXThrow(SourceLocation OpLoc, Expr *Ex,
bool IsThrownVarInScope);
bool CheckCXXThrowOperand(SourceLocation ThrowLoc, QualType ThrowTy, Expr *E);
/// ActOnCXXTypeConstructExpr - Parse construction of a specified type.
/// Can be interpreted either as function-style casting ("int(x)")
/// or class type construction ("ClassType(x,y,z)")
/// or creation of a value-initialized type ("int()").
ExprResult ActOnCXXTypeConstructExpr(ParsedType TypeRep,
SourceLocation LParenOrBraceLoc,
MultiExprArg Exprs,
SourceLocation RParenOrBraceLoc,
bool ListInitialization);
ExprResult BuildCXXTypeConstructExpr(TypeSourceInfo *Type,
SourceLocation LParenLoc,
MultiExprArg Exprs,
SourceLocation RParenLoc,
bool ListInitialization);
/// ActOnCXXNew - Parsed a C++ 'new' expression.
ExprResult ActOnCXXNew(SourceLocation StartLoc, bool UseGlobal,
SourceLocation PlacementLParen,
MultiExprArg PlacementArgs,
SourceLocation PlacementRParen,
SourceRange TypeIdParens, Declarator &D,
Expr *Initializer);
ExprResult BuildCXXNew(SourceRange Range, bool UseGlobal,
SourceLocation PlacementLParen,
MultiExprArg PlacementArgs,
SourceLocation PlacementRParen,
SourceRange TypeIdParens,
QualType AllocType,
TypeSourceInfo *AllocTypeInfo,
Optional<Expr *> ArraySize,
SourceRange DirectInitRange,
Expr *Initializer);
/// Determine whether \p FD is an aligned allocation or deallocation
/// function that is unavailable.
bool isUnavailableAlignedAllocationFunction(const FunctionDecl &FD) const;
/// Produce diagnostics if \p FD is an aligned allocation or deallocation
/// function that is unavailable.
void diagnoseUnavailableAlignedAllocation(const FunctionDecl &FD,
SourceLocation Loc);
bool CheckAllocatedType(QualType AllocType, SourceLocation Loc,
SourceRange R);
/// The scope in which to find allocation functions.
enum AllocationFunctionScope {
/// Only look for allocation functions in the global scope.
AFS_Global,
/// Only look for allocation functions in the scope of the
/// allocated class.
AFS_Class,
/// Look for allocation functions in both the global scope
/// and in the scope of the allocated class.
AFS_Both
};
/// Finds the overloads of operator new and delete that are appropriate
/// for the allocation.
bool FindAllocationFunctions(SourceLocation StartLoc, SourceRange Range,
AllocationFunctionScope NewScope,
AllocationFunctionScope DeleteScope,
QualType AllocType, bool IsArray,
bool &PassAlignment, MultiExprArg PlaceArgs,
FunctionDecl *&OperatorNew,
FunctionDecl *&OperatorDelete,
bool Diagnose = true);
void DeclareGlobalNewDelete();
void DeclareGlobalAllocationFunction(DeclarationName Name, QualType Return,
ArrayRef<QualType> Params);
bool FindDeallocationFunction(SourceLocation StartLoc, CXXRecordDecl *RD,
DeclarationName Name, FunctionDecl* &Operator,
bool Diagnose = true);
FunctionDecl *FindUsualDeallocationFunction(SourceLocation StartLoc,
bool CanProvideSize,
bool Overaligned,
DeclarationName Name);
FunctionDecl *FindDeallocationFunctionForDestructor(SourceLocation StartLoc,
CXXRecordDecl *RD);
/// ActOnCXXDelete - Parsed a C++ 'delete' expression
ExprResult ActOnCXXDelete(SourceLocation StartLoc,
bool UseGlobal, bool ArrayForm,
Expr *Operand);
void CheckVirtualDtorCall(CXXDestructorDecl *dtor, SourceLocation Loc,
bool IsDelete, bool CallCanBeVirtual,
bool WarnOnNonAbstractTypes,
SourceLocation DtorLoc);
ExprResult ActOnNoexceptExpr(SourceLocation KeyLoc, SourceLocation LParen,
Expr *Operand, SourceLocation RParen);
ExprResult BuildCXXNoexceptExpr(SourceLocation KeyLoc, Expr *Operand,
SourceLocation RParen);
/// Parsed one of the type trait support pseudo-functions.
ExprResult ActOnTypeTrait(TypeTrait Kind, SourceLocation KWLoc,
ArrayRef<ParsedType> Args,
SourceLocation RParenLoc);
ExprResult BuildTypeTrait(TypeTrait Kind, SourceLocation KWLoc,
ArrayRef<TypeSourceInfo *> Args,
SourceLocation RParenLoc);
/// ActOnArrayTypeTrait - Parsed one of the binary type trait support
/// pseudo-functions.
ExprResult ActOnArrayTypeTrait(ArrayTypeTrait ATT,
SourceLocation KWLoc,
ParsedType LhsTy,
Expr *DimExpr,
SourceLocation RParen);
ExprResult BuildArrayTypeTrait(ArrayTypeTrait ATT,
SourceLocation KWLoc,
TypeSourceInfo *TSInfo,
Expr *DimExpr,
SourceLocation RParen);
/// ActOnExpressionTrait - Parsed one of the unary type trait support
/// pseudo-functions.
ExprResult ActOnExpressionTrait(ExpressionTrait OET,
SourceLocation KWLoc,
Expr *Queried,
SourceLocation RParen);
ExprResult BuildExpressionTrait(ExpressionTrait OET,
SourceLocation KWLoc,
Expr *Queried,
SourceLocation RParen);
ExprResult ActOnStartCXXMemberReference(Scope *S,
Expr *Base,
SourceLocation OpLoc,
tok::TokenKind OpKind,
ParsedType &ObjectType,
bool &MayBePseudoDestructor);
ExprResult BuildPseudoDestructorExpr(Expr *Base,
SourceLocation OpLoc,
tok::TokenKind OpKind,
const CXXScopeSpec &SS,
TypeSourceInfo *ScopeType,
SourceLocation CCLoc,
SourceLocation TildeLoc,
PseudoDestructorTypeStorage DestroyedType);
ExprResult ActOnPseudoDestructorExpr(Scope *S, Expr *Base,
SourceLocation OpLoc,
tok::TokenKind OpKind,
CXXScopeSpec &SS,
UnqualifiedId &FirstTypeName,
SourceLocation CCLoc,
SourceLocation TildeLoc,
UnqualifiedId &SecondTypeName);
ExprResult ActOnPseudoDestructorExpr(Scope *S, Expr *Base,
SourceLocation OpLoc,
tok::TokenKind OpKind,
SourceLocation TildeLoc,
const DeclSpec& DS);
/// MaybeCreateExprWithCleanups - If the current full-expression
/// requires any cleanups, surround it with a ExprWithCleanups node.
/// Otherwise, just returns the passed-in expression.
Expr *MaybeCreateExprWithCleanups(Expr *SubExpr);
Stmt *MaybeCreateStmtWithCleanups(Stmt *SubStmt);
ExprResult MaybeCreateExprWithCleanups(ExprResult SubExpr);
MaterializeTemporaryExpr *
CreateMaterializeTemporaryExpr(QualType T, Expr *Temporary,
bool BoundToLvalueReference);
ExprResult ActOnFinishFullExpr(Expr *Expr, bool DiscardedValue) {
return ActOnFinishFullExpr(
Expr, Expr ? Expr->getExprLoc() : SourceLocation(), DiscardedValue);
}
ExprResult ActOnFinishFullExpr(Expr *Expr, SourceLocation CC,
bool DiscardedValue, bool IsConstexpr = false);
StmtResult ActOnFinishFullStmt(Stmt *Stmt);
// Marks SS invalid if it represents an incomplete type.
bool RequireCompleteDeclContext(CXXScopeSpec &SS, DeclContext *DC);
DeclContext *computeDeclContext(QualType T);
DeclContext *computeDeclContext(const CXXScopeSpec &SS,
bool EnteringContext = false);
bool isDependentScopeSpecifier(const CXXScopeSpec &SS);
CXXRecordDecl *getCurrentInstantiationOf(NestedNameSpecifier *NNS);
/// The parser has parsed a global nested-name-specifier '::'.
///
/// \param CCLoc The location of the '::'.
///
/// \param SS The nested-name-specifier, which will be updated in-place
/// to reflect the parsed nested-name-specifier.
///
/// \returns true if an error occurred, false otherwise.
bool ActOnCXXGlobalScopeSpecifier(SourceLocation CCLoc, CXXScopeSpec &SS);
/// The parser has parsed a '__super' nested-name-specifier.
///
/// \param SuperLoc The location of the '__super' keyword.
///
/// \param ColonColonLoc The location of the '::'.
///
/// \param SS The nested-name-specifier, which will be updated in-place
/// to reflect the parsed nested-name-specifier.
///
/// \returns true if an error occurred, false otherwise.
bool ActOnSuperScopeSpecifier(SourceLocation SuperLoc,
SourceLocation ColonColonLoc, CXXScopeSpec &SS);
bool isAcceptableNestedNameSpecifier(const NamedDecl *SD,
bool *CanCorrect = nullptr);
NamedDecl *FindFirstQualifierInScope(Scope *S, NestedNameSpecifier *NNS);
/// Keeps information about an identifier in a nested-name-spec.
///
struct NestedNameSpecInfo {
/// The type of the object, if we're parsing nested-name-specifier in
/// a member access expression.
ParsedType ObjectType;
/// The identifier preceding the '::'.
IdentifierInfo *Identifier;
/// The location of the identifier.
SourceLocation IdentifierLoc;
/// The location of the '::'.
SourceLocation CCLoc;
/// Creates info object for the most typical case.
NestedNameSpecInfo(IdentifierInfo *II, SourceLocation IdLoc,
SourceLocation ColonColonLoc, ParsedType ObjectType = ParsedType())
: ObjectType(ObjectType), Identifier(II), IdentifierLoc(IdLoc),
CCLoc(ColonColonLoc) {
}
NestedNameSpecInfo(IdentifierInfo *II, SourceLocation IdLoc,
SourceLocation ColonColonLoc, QualType ObjectType)
: ObjectType(ParsedType::make(ObjectType)), Identifier(II),
IdentifierLoc(IdLoc), CCLoc(ColonColonLoc) {
}
};
bool isNonTypeNestedNameSpecifier(Scope *S, CXXScopeSpec &SS,
NestedNameSpecInfo &IdInfo);
bool BuildCXXNestedNameSpecifier(Scope *S,
NestedNameSpecInfo &IdInfo,
bool EnteringContext,
CXXScopeSpec &SS,
NamedDecl *ScopeLookupResult,
bool ErrorRecoveryLookup,
bool *IsCorrectedToColon = nullptr,
bool OnlyNamespace = false);
/// The parser has parsed a nested-name-specifier 'identifier::'.
///
/// \param S The scope in which this nested-name-specifier occurs.
///
/// \param IdInfo Parser information about an identifier in the
/// nested-name-spec.
///
/// \param EnteringContext Whether we're entering the context nominated by
/// this nested-name-specifier.
///
/// \param SS The nested-name-specifier, which is both an input
/// parameter (the nested-name-specifier before this type) and an
/// output parameter (containing the full nested-name-specifier,
/// including this new type).
///
/// \param ErrorRecoveryLookup If true, then this method is called to improve
/// error recovery. In this case do not emit error message.
///
/// \param IsCorrectedToColon If not null, suggestions to replace '::' -> ':'
/// are allowed. The bool value pointed by this parameter is set to 'true'
/// if the identifier is treated as if it was followed by ':', not '::'.
///
/// \param OnlyNamespace If true, only considers namespaces in lookup.
///
/// \returns true if an error occurred, false otherwise.
bool ActOnCXXNestedNameSpecifier(Scope *S,
NestedNameSpecInfo &IdInfo,
bool EnteringContext,
CXXScopeSpec &SS,
bool ErrorRecoveryLookup = false,
bool *IsCorrectedToColon = nullptr,
bool OnlyNamespace = false);
ExprResult ActOnDecltypeExpression(Expr *E);
bool ActOnCXXNestedNameSpecifierDecltype(CXXScopeSpec &SS,
const DeclSpec &DS,
SourceLocation ColonColonLoc);
bool IsInvalidUnlessNestedName(Scope *S, CXXScopeSpec &SS,
NestedNameSpecInfo &IdInfo,
bool EnteringContext);
/// The parser has parsed a nested-name-specifier
/// 'template[opt] template-name < template-args >::'.
///
/// \param S The scope in which this nested-name-specifier occurs.
///
/// \param SS The nested-name-specifier, which is both an input
/// parameter (the nested-name-specifier before this type) and an
/// output parameter (containing the full nested-name-specifier,
/// including this new type).
///
/// \param TemplateKWLoc the location of the 'template' keyword, if any.
/// \param TemplateName the template name.
/// \param TemplateNameLoc The location of the template name.
/// \param LAngleLoc The location of the opening angle bracket ('<').
/// \param TemplateArgs The template arguments.
/// \param RAngleLoc The location of the closing angle bracket ('>').
/// \param CCLoc The location of the '::'.
///
/// \param EnteringContext Whether we're entering the context of the
/// nested-name-specifier.
///
///
/// \returns true if an error occurred, false otherwise.
bool ActOnCXXNestedNameSpecifier(Scope *S,
CXXScopeSpec &SS,
SourceLocation TemplateKWLoc,
TemplateTy TemplateName,
SourceLocation TemplateNameLoc,
SourceLocation LAngleLoc,
ASTTemplateArgsPtr TemplateArgs,
SourceLocation RAngleLoc,
SourceLocation CCLoc,
bool EnteringContext);
/// Given a C++ nested-name-specifier, produce an annotation value
/// that the parser can use later to reconstruct the given
/// nested-name-specifier.
///
/// \param SS A nested-name-specifier.
///
/// \returns A pointer containing all of the information in the
/// nested-name-specifier \p SS.
void *SaveNestedNameSpecifierAnnotation(CXXScopeSpec &SS);
/// Given an annotation pointer for a nested-name-specifier, restore
/// the nested-name-specifier structure.
///
/// \param Annotation The annotation pointer, produced by
/// \c SaveNestedNameSpecifierAnnotation().
///
/// \param AnnotationRange The source range corresponding to the annotation.
///
/// \param SS The nested-name-specifier that will be updated with the contents
/// of the annotation pointer.
void RestoreNestedNameSpecifierAnnotation(void *Annotation,
SourceRange AnnotationRange,
CXXScopeSpec &SS);
bool ShouldEnterDeclaratorScope(Scope *S, const CXXScopeSpec &SS);
/// ActOnCXXEnterDeclaratorScope - Called when a C++ scope specifier (global
/// scope or nested-name-specifier) is parsed, part of a declarator-id.
/// After this method is called, according to [C++ 3.4.3p3], names should be
/// looked up in the declarator-id's scope, until the declarator is parsed and
/// ActOnCXXExitDeclaratorScope is called.
/// The 'SS' should be a non-empty valid CXXScopeSpec.
bool ActOnCXXEnterDeclaratorScope(Scope *S, CXXScopeSpec &SS);
/// ActOnCXXExitDeclaratorScope - Called when a declarator that previously
/// invoked ActOnCXXEnterDeclaratorScope(), is finished. 'SS' is the same
/// CXXScopeSpec that was passed to ActOnCXXEnterDeclaratorScope as well.
/// Used to indicate that names should revert to being looked up in the
/// defining scope.
void ActOnCXXExitDeclaratorScope(Scope *S, const CXXScopeSpec &SS);
/// ActOnCXXEnterDeclInitializer - Invoked when we are about to parse an
/// initializer for the declaration 'Dcl'.
/// After this method is called, according to [C++ 3.4.1p13], if 'Dcl' is a
/// static data member of class X, names should be looked up in the scope of
/// class X.
void ActOnCXXEnterDeclInitializer(Scope *S, Decl *Dcl);
/// ActOnCXXExitDeclInitializer - Invoked after we are finished parsing an
/// initializer for the declaration 'Dcl'.
void ActOnCXXExitDeclInitializer(Scope *S, Decl *Dcl);
/// Create a new lambda closure type.
CXXRecordDecl *createLambdaClosureType(SourceRange IntroducerRange,
TypeSourceInfo *Info,
bool KnownDependent,
LambdaCaptureDefault CaptureDefault);
/// Start the definition of a lambda expression.
CXXMethodDecl *startLambdaDefinition(CXXRecordDecl *Class,
SourceRange IntroducerRange,
TypeSourceInfo *MethodType,
SourceLocation EndLoc,
ArrayRef<ParmVarDecl *> Params,
ConstexprSpecKind ConstexprKind,
Expr *TrailingRequiresClause);
/// Number lambda for linkage purposes if necessary.
void handleLambdaNumbering(
CXXRecordDecl *Class, CXXMethodDecl *Method,
Optional<std::tuple<unsigned, bool, Decl *>> Mangling = None);
/// Endow the lambda scope info with the relevant properties.
void buildLambdaScope(sema::LambdaScopeInfo *LSI,
CXXMethodDecl *CallOperator,
SourceRange IntroducerRange,
LambdaCaptureDefault CaptureDefault,
SourceLocation CaptureDefaultLoc,
bool ExplicitParams,
bool ExplicitResultType,
bool Mutable);
/// Perform initialization analysis of the init-capture and perform
/// any implicit conversions such as an lvalue-to-rvalue conversion if
/// not being used to initialize a reference.
ParsedType actOnLambdaInitCaptureInitialization(
SourceLocation Loc, bool ByRef, SourceLocation EllipsisLoc,
IdentifierInfo *Id, LambdaCaptureInitKind InitKind, Expr *&Init) {
return ParsedType::make(buildLambdaInitCaptureInitialization(
Loc, ByRef, EllipsisLoc, None, Id,
InitKind != LambdaCaptureInitKind::CopyInit, Init));
}
QualType buildLambdaInitCaptureInitialization(
SourceLocation Loc, bool ByRef, SourceLocation EllipsisLoc,
Optional<unsigned> NumExpansions, IdentifierInfo *Id, bool DirectInit,
Expr *&Init);
/// Create a dummy variable within the declcontext of the lambda's
/// call operator, for name lookup purposes for a lambda init capture.
///
/// CodeGen handles emission of lambda captures, ignoring these dummy
/// variables appropriately.
VarDecl *createLambdaInitCaptureVarDecl(SourceLocation Loc,
QualType InitCaptureType,
SourceLocation EllipsisLoc,
IdentifierInfo *Id,
unsigned InitStyle, Expr *Init);
/// Add an init-capture to a lambda scope.
void addInitCapture(sema::LambdaScopeInfo *LSI, VarDecl *Var);
/// Note that we have finished the explicit captures for the
/// given lambda.
void finishLambdaExplicitCaptures(sema::LambdaScopeInfo *LSI);
/// \brief This is called after parsing the explicit template parameter list
/// on a lambda (if it exists) in C++2a.
void ActOnLambdaExplicitTemplateParameterList(SourceLocation LAngleLoc,
ArrayRef<NamedDecl *> TParams,
SourceLocation RAngleLoc);
/// Introduce the lambda parameters into scope.
void addLambdaParameters(
ArrayRef<LambdaIntroducer::LambdaCapture> Captures,
CXXMethodDecl *CallOperator, Scope *CurScope);
/// Deduce a block or lambda's return type based on the return
/// statements present in the body.
void deduceClosureReturnType(sema::CapturingScopeInfo &CSI);
/// ActOnStartOfLambdaDefinition - This is called just before we start
/// parsing the body of a lambda; it analyzes the explicit captures and
/// arguments, and sets up various data-structures for the body of the
/// lambda.
void ActOnStartOfLambdaDefinition(LambdaIntroducer &Intro,
Declarator &ParamInfo, Scope *CurScope);
/// ActOnLambdaError - If there is an error parsing a lambda, this callback
/// is invoked to pop the information about the lambda.
void ActOnLambdaError(SourceLocation StartLoc, Scope *CurScope,
bool IsInstantiation = false);
/// ActOnLambdaExpr - This is called when the body of a lambda expression
/// was successfully completed.
ExprResult ActOnLambdaExpr(SourceLocation StartLoc, Stmt *Body,
Scope *CurScope);
/// Does copying/destroying the captured variable have side effects?
bool CaptureHasSideEffects(const sema::Capture &From);
/// Diagnose if an explicit lambda capture is unused. Returns true if a
/// diagnostic is emitted.
bool DiagnoseUnusedLambdaCapture(SourceRange CaptureRange,
const sema::Capture &From);
/// Build a FieldDecl suitable to hold the given capture.
FieldDecl *BuildCaptureField(RecordDecl *RD, const sema::Capture &Capture);
/// Initialize the given capture with a suitable expression.
ExprResult BuildCaptureInit(const sema::Capture &Capture,
SourceLocation ImplicitCaptureLoc,
bool IsOpenMPMapping = false);
/// Complete a lambda-expression having processed and attached the
/// lambda body.
ExprResult BuildLambdaExpr(SourceLocation StartLoc, SourceLocation EndLoc,
sema::LambdaScopeInfo *LSI);
/// Get the return type to use for a lambda's conversion function(s) to
/// function pointer type, given the type of the call operator.
QualType
getLambdaConversionFunctionResultType(const FunctionProtoType *CallOpType);
/// Define the "body" of the conversion from a lambda object to a
/// function pointer.
///
/// This routine doesn't actually define a sensible body; rather, it fills
/// in the initialization expression needed to copy the lambda object into
/// the block, and IR generation actually generates the real body of the
/// block pointer conversion.
void DefineImplicitLambdaToFunctionPointerConversion(
SourceLocation CurrentLoc, CXXConversionDecl *Conv);
/// Define the "body" of the conversion from a lambda object to a
/// block pointer.
///
/// This routine doesn't actually define a sensible body; rather, it fills
/// in the initialization expression needed to copy the lambda object into
/// the block, and IR generation actually generates the real body of the
/// block pointer conversion.
void DefineImplicitLambdaToBlockPointerConversion(SourceLocation CurrentLoc,
CXXConversionDecl *Conv);
ExprResult BuildBlockForLambdaConversion(SourceLocation CurrentLocation,
SourceLocation ConvLocation,
CXXConversionDecl *Conv,
Expr *Src);
/// Check whether the given expression is a valid constraint expression.
/// A diagnostic is emitted if it is not, false is returned, and
/// PossibleNonPrimary will be set to true if the failure might be due to a
/// non-primary expression being used as an atomic constraint.
bool CheckConstraintExpression(Expr *CE, Token NextToken = Token(),
bool *PossibleNonPrimary = nullptr,
bool IsTrailingRequiresClause = false);
/// Check whether the given type-dependent expression will be the name of a
/// function or another callable function-like entity (e.g. a function
// template or overload set) for any substitution.
bool IsDependentFunctionNameExpr(Expr *E);
private:
/// Caches pairs of template-like decls whose associated constraints were
/// checked for subsumption and whether or not the first's constraints did in
/// fact subsume the second's.
llvm::DenseMap<std::pair<NamedDecl *, NamedDecl *>, bool> SubsumptionCache;
/// Caches the normalized associated constraints of declarations (concepts or
/// constrained declarations). If an error occurred while normalizing the
/// associated constraints of the template or concept, nullptr will be cached
/// here.
llvm::DenseMap<NamedDecl *, NormalizedConstraint *>
NormalizationCache;
llvm::ContextualFoldingSet<ConstraintSatisfaction, const ASTContext &>
SatisfactionCache;
public:
const NormalizedConstraint *
getNormalizedAssociatedConstraints(
NamedDecl *ConstrainedDecl, ArrayRef<const Expr *> AssociatedConstraints);
/// \brief Check whether the given declaration's associated constraints are
/// at least as constrained than another declaration's according to the
/// partial ordering of constraints.
///
/// \param Result If no error occurred, receives the result of true if D1 is
/// at least constrained than D2, and false otherwise.
///
/// \returns true if an error occurred, false otherwise.
bool IsAtLeastAsConstrained(NamedDecl *D1, ArrayRef<const Expr *> AC1,
NamedDecl *D2, ArrayRef<const Expr *> AC2,
bool &Result);
/// If D1 was not at least as constrained as D2, but would've been if a pair
/// of atomic constraints involved had been declared in a concept and not
/// repeated in two separate places in code.
/// \returns true if such a diagnostic was emitted, false otherwise.
bool MaybeEmitAmbiguousAtomicConstraintsDiagnostic(NamedDecl *D1,
ArrayRef<const Expr *> AC1, NamedDecl *D2, ArrayRef<const Expr *> AC2);
/// \brief Check whether the given list of constraint expressions are
/// satisfied (as if in a 'conjunction') given template arguments.
/// \param Template the template-like entity that triggered the constraints
/// check (either a concept or a constrained entity).
/// \param ConstraintExprs a list of constraint expressions, treated as if
/// they were 'AND'ed together.
/// \param TemplateArgs the list of template arguments to substitute into the
/// constraint expression.
/// \param TemplateIDRange The source range of the template id that
/// caused the constraints check.
/// \param Satisfaction if true is returned, will contain details of the
/// satisfaction, with enough information to diagnose an unsatisfied
/// expression.
/// \returns true if an error occurred and satisfaction could not be checked,
/// false otherwise.
bool CheckConstraintSatisfaction(
const NamedDecl *Template, ArrayRef<const Expr *> ConstraintExprs,
ArrayRef<TemplateArgument> TemplateArgs,
SourceRange TemplateIDRange, ConstraintSatisfaction &Satisfaction);
/// \brief Check whether the given non-dependent constraint expression is
/// satisfied. Returns false and updates Satisfaction with the satisfaction
/// verdict if successful, emits a diagnostic and returns true if an error
/// occured and satisfaction could not be determined.
///
/// \returns true if an error occurred, false otherwise.
bool CheckConstraintSatisfaction(const Expr *ConstraintExpr,
ConstraintSatisfaction &Satisfaction);
/// Check whether the given function decl's trailing requires clause is
/// satisfied, if any. Returns false and updates Satisfaction with the
/// satisfaction verdict if successful, emits a diagnostic and returns true if
/// an error occured and satisfaction could not be determined.
///
/// \returns true if an error occurred, false otherwise.
bool CheckFunctionConstraints(const FunctionDecl *FD,
ConstraintSatisfaction &Satisfaction,
SourceLocation UsageLoc = SourceLocation());
/// \brief Ensure that the given template arguments satisfy the constraints
/// associated with the given template, emitting a diagnostic if they do not.
///
/// \param Template The template to which the template arguments are being
/// provided.
///
/// \param TemplateArgs The converted, canonicalized template arguments.
///
/// \param TemplateIDRange The source range of the template id that
/// caused the constraints check.
///
/// \returns true if the constrains are not satisfied or could not be checked
/// for satisfaction, false if the constraints are satisfied.
bool EnsureTemplateArgumentListConstraints(TemplateDecl *Template,
ArrayRef<TemplateArgument> TemplateArgs,
SourceRange TemplateIDRange);
/// \brief Emit diagnostics explaining why a constraint expression was deemed
/// unsatisfied.
/// \param First whether this is the first time an unsatisfied constraint is
/// diagnosed for this error.
void
DiagnoseUnsatisfiedConstraint(const ConstraintSatisfaction &Satisfaction,
bool First = true);
/// \brief Emit diagnostics explaining why a constraint expression was deemed
/// unsatisfied.
void
DiagnoseUnsatisfiedConstraint(const ASTConstraintSatisfaction &Satisfaction,
bool First = true);
/// \brief Emit diagnostics explaining why a constraint expression was deemed
/// unsatisfied because it was ill-formed.
void DiagnoseUnsatisfiedIllFormedConstraint(SourceLocation DiagnosticLocation,
StringRef Diagnostic);
void DiagnoseRedeclarationConstraintMismatch(SourceLocation Old,
SourceLocation New);
// ParseObjCStringLiteral - Parse Objective-C string literals.
ExprResult ParseObjCStringLiteral(SourceLocation *AtLocs,
ArrayRef<Expr *> Strings);
ExprResult BuildObjCStringLiteral(SourceLocation AtLoc, StringLiteral *S);
/// BuildObjCNumericLiteral - builds an ObjCBoxedExpr AST node for the
/// numeric literal expression. Type of the expression will be "NSNumber *"
/// or "id" if NSNumber is unavailable.
ExprResult BuildObjCNumericLiteral(SourceLocation AtLoc, Expr *Number);
ExprResult ActOnObjCBoolLiteral(SourceLocation AtLoc, SourceLocation ValueLoc,
bool Value);
ExprResult BuildObjCArrayLiteral(SourceRange SR, MultiExprArg Elements);
/// BuildObjCBoxedExpr - builds an ObjCBoxedExpr AST node for the
/// '@' prefixed parenthesized expression. The type of the expression will
/// either be "NSNumber *", "NSString *" or "NSValue *" depending on the type
/// of ValueType, which is allowed to be a built-in numeric type, "char *",
/// "const char *" or C structure with attribute 'objc_boxable'.
ExprResult BuildObjCBoxedExpr(SourceRange SR, Expr *ValueExpr);
ExprResult BuildObjCSubscriptExpression(SourceLocation RB, Expr *BaseExpr,
Expr *IndexExpr,
ObjCMethodDecl *getterMethod,
ObjCMethodDecl *setterMethod);
ExprResult BuildObjCDictionaryLiteral(SourceRange SR,
MutableArrayRef<ObjCDictionaryElement> Elements);
ExprResult BuildObjCEncodeExpression(SourceLocation AtLoc,
TypeSourceInfo *EncodedTypeInfo,
SourceLocation RParenLoc);
ExprResult BuildCXXMemberCallExpr(Expr *Exp, NamedDecl *FoundDecl,
CXXConversionDecl *Method,
bool HadMultipleCandidates);
ExprResult ParseObjCEncodeExpression(SourceLocation AtLoc,
SourceLocation EncodeLoc,
SourceLocation LParenLoc,
ParsedType Ty,
SourceLocation RParenLoc);
/// ParseObjCSelectorExpression - Build selector expression for \@selector
ExprResult ParseObjCSelectorExpression(Selector Sel,
SourceLocation AtLoc,
SourceLocation SelLoc,
SourceLocation LParenLoc,
SourceLocation RParenLoc,
bool WarnMultipleSelectors);
/// ParseObjCProtocolExpression - Build protocol expression for \@protocol
ExprResult ParseObjCProtocolExpression(IdentifierInfo * ProtocolName,
SourceLocation AtLoc,
SourceLocation ProtoLoc,
SourceLocation LParenLoc,
SourceLocation ProtoIdLoc,
SourceLocation RParenLoc);
//===--------------------------------------------------------------------===//
// C++ Declarations
//
Decl *ActOnStartLinkageSpecification(Scope *S,
SourceLocation ExternLoc,
Expr *LangStr,
SourceLocation LBraceLoc);
Decl *ActOnFinishLinkageSpecification(Scope *S,
Decl *LinkageSpec,
SourceLocation RBraceLoc);
//===--------------------------------------------------------------------===//
// C++ Classes
//
CXXRecordDecl *getCurrentClass(Scope *S, const CXXScopeSpec *SS);
bool isCurrentClassName(const IdentifierInfo &II, Scope *S,
const CXXScopeSpec *SS = nullptr);
bool isCurrentClassNameTypo(IdentifierInfo *&II, const CXXScopeSpec *SS);
bool ActOnAccessSpecifier(AccessSpecifier Access, SourceLocation ASLoc,
SourceLocation ColonLoc,
const ParsedAttributesView &Attrs);
NamedDecl *ActOnCXXMemberDeclarator(Scope *S, AccessSpecifier AS,
Declarator &D,
MultiTemplateParamsArg TemplateParameterLists,
Expr *BitfieldWidth, const VirtSpecifiers &VS,
InClassInitStyle InitStyle);
void ActOnStartCXXInClassMemberInitializer();
void ActOnFinishCXXInClassMemberInitializer(Decl *VarDecl,
SourceLocation EqualLoc,
Expr *Init);
MemInitResult ActOnMemInitializer(Decl *ConstructorD,
Scope *S,
CXXScopeSpec &SS,
IdentifierInfo *MemberOrBase,
ParsedType TemplateTypeTy,
const DeclSpec &DS,
SourceLocation IdLoc,
SourceLocation LParenLoc,
ArrayRef<Expr *> Args,
SourceLocation RParenLoc,
SourceLocation EllipsisLoc);
MemInitResult ActOnMemInitializer(Decl *ConstructorD,
Scope *S,
CXXScopeSpec &SS,
IdentifierInfo *MemberOrBase,
ParsedType TemplateTypeTy,
const DeclSpec &DS,
SourceLocation IdLoc,
Expr *InitList,
SourceLocation EllipsisLoc);
MemInitResult BuildMemInitializer(Decl *ConstructorD,
Scope *S,
CXXScopeSpec &SS,
IdentifierInfo *MemberOrBase,
ParsedType TemplateTypeTy,
const DeclSpec &DS,
SourceLocation IdLoc,
Expr *Init,
SourceLocation EllipsisLoc);
MemInitResult BuildMemberInitializer(ValueDecl *Member,
Expr *Init,
SourceLocation IdLoc);
MemInitResult BuildBaseInitializer(QualType BaseType,
TypeSourceInfo *BaseTInfo,
Expr *Init,
CXXRecordDecl *ClassDecl,
SourceLocation EllipsisLoc);
MemInitResult BuildDelegatingInitializer(TypeSourceInfo *TInfo,
Expr *Init,
CXXRecordDecl *ClassDecl);
bool SetDelegatingInitializer(CXXConstructorDecl *Constructor,
CXXCtorInitializer *Initializer);
bool SetCtorInitializers(CXXConstructorDecl *Constructor, bool AnyErrors,
ArrayRef<CXXCtorInitializer *> Initializers = None);
void SetIvarInitializers(ObjCImplementationDecl *ObjCImplementation);
/// MarkBaseAndMemberDestructorsReferenced - Given a record decl,
/// mark all the non-trivial destructors of its members and bases as
/// referenced.
void MarkBaseAndMemberDestructorsReferenced(SourceLocation Loc,
CXXRecordDecl *Record);
/// The list of classes whose vtables have been used within
/// this translation unit, and the source locations at which the
/// first use occurred.
typedef std::pair<CXXRecordDecl*, SourceLocation> VTableUse;
/// The list of vtables that are required but have not yet been
/// materialized.
SmallVector<VTableUse, 16> VTableUses;
/// The set of classes whose vtables have been used within
/// this translation unit, and a bit that will be true if the vtable is
/// required to be emitted (otherwise, it should be emitted only if needed
/// by code generation).
llvm::DenseMap<CXXRecordDecl *, bool> VTablesUsed;
/// Load any externally-stored vtable uses.
void LoadExternalVTableUses();
/// Note that the vtable for the given class was used at the
/// given location.
void MarkVTableUsed(SourceLocation Loc, CXXRecordDecl *Class,
bool DefinitionRequired = false);
/// Mark the exception specifications of all virtual member functions
/// in the given class as needed.
void MarkVirtualMemberExceptionSpecsNeeded(SourceLocation Loc,
const CXXRecordDecl *RD);
/// MarkVirtualMembersReferenced - Will mark all members of the given
/// CXXRecordDecl referenced.
void MarkVirtualMembersReferenced(SourceLocation Loc, const CXXRecordDecl *RD,
bool ConstexprOnly = false);
/// Define all of the vtables that have been used in this
/// translation unit and reference any virtual members used by those
/// vtables.
///
/// \returns true if any work was done, false otherwise.
bool DefineUsedVTables();
void AddImplicitlyDeclaredMembersToClass(CXXRecordDecl *ClassDecl);
void ActOnMemInitializers(Decl *ConstructorDecl,
SourceLocation ColonLoc,
ArrayRef<CXXCtorInitializer*> MemInits,
bool AnyErrors);
/// Check class-level dllimport/dllexport attribute. The caller must
/// ensure that referenceDLLExportedClassMethods is called some point later
/// when all outer classes of Class are complete.
void checkClassLevelDLLAttribute(CXXRecordDecl *Class);
void checkClassLevelCodeSegAttribute(CXXRecordDecl *Class);
void referenceDLLExportedClassMethods();
void propagateDLLAttrToBaseClassTemplate(
CXXRecordDecl *Class, Attr *ClassAttr,
ClassTemplateSpecializationDecl *BaseTemplateSpec,
SourceLocation BaseLoc);
/// Add gsl::Pointer attribute to std::container::iterator
/// \param ND The declaration that introduces the name
/// std::container::iterator. \param UnderlyingRecord The record named by ND.
void inferGslPointerAttribute(NamedDecl *ND, CXXRecordDecl *UnderlyingRecord);
/// Add [[gsl::Owner]] and [[gsl::Pointer]] attributes for std:: types.
void inferGslOwnerPointerAttribute(CXXRecordDecl *Record);
/// Add [[gsl::Pointer]] attributes for std:: types.
void inferGslPointerAttribute(TypedefNameDecl *TD);
void CheckCompletedCXXClass(Scope *S, CXXRecordDecl *Record);
/// Check that the C++ class annoated with "trivial_abi" satisfies all the
/// conditions that are needed for the attribute to have an effect.
void checkIllFormedTrivialABIStruct(CXXRecordDecl &RD);
void ActOnFinishCXXMemberSpecification(Scope *S, SourceLocation RLoc,
Decl *TagDecl, SourceLocation LBrac,
SourceLocation RBrac,
const ParsedAttributesView &AttrList);
void ActOnFinishCXXMemberDecls();
void ActOnFinishCXXNonNestedClass();
void ActOnReenterCXXMethodParameter(Scope *S, ParmVarDecl *Param);
unsigned ActOnReenterTemplateScope(Scope *S, Decl *Template);
void ActOnStartDelayedMemberDeclarations(Scope *S, Decl *Record);
void ActOnStartDelayedCXXMethodDeclaration(Scope *S, Decl *Method);
void ActOnDelayedCXXMethodParameter(Scope *S, Decl *Param);
void ActOnFinishDelayedMemberDeclarations(Scope *S, Decl *Record);
void ActOnFinishDelayedCXXMethodDeclaration(Scope *S, Decl *Method);
void ActOnFinishDelayedMemberInitializers(Decl *Record);
void MarkAsLateParsedTemplate(FunctionDecl *FD, Decl *FnD,
CachedTokens &Toks);
void UnmarkAsLateParsedTemplate(FunctionDecl *FD);
bool IsInsideALocalClassWithinATemplateFunction();
Decl *ActOnStaticAssertDeclaration(SourceLocation StaticAssertLoc,
Expr *AssertExpr,
Expr *AssertMessageExpr,
SourceLocation RParenLoc);
Decl *BuildStaticAssertDeclaration(SourceLocation StaticAssertLoc,
Expr *AssertExpr,
StringLiteral *AssertMessageExpr,
SourceLocation RParenLoc,
bool Failed);
FriendDecl *CheckFriendTypeDecl(SourceLocation LocStart,
SourceLocation FriendLoc,
TypeSourceInfo *TSInfo);
Decl *ActOnFriendTypeDecl(Scope *S, const DeclSpec &DS,
MultiTemplateParamsArg TemplateParams);
NamedDecl *ActOnFriendFunctionDecl(Scope *S, Declarator &D,
MultiTemplateParamsArg TemplateParams);
QualType CheckConstructorDeclarator(Declarator &D, QualType R,
StorageClass& SC);
void CheckConstructor(CXXConstructorDecl *Constructor);
QualType CheckDestructorDeclarator(Declarator &D, QualType R,
StorageClass& SC);
bool CheckDestructor(CXXDestructorDecl *Destructor);
void CheckConversionDeclarator(Declarator &D, QualType &R,
StorageClass& SC);
Decl *ActOnConversionDeclarator(CXXConversionDecl *Conversion);
void CheckDeductionGuideDeclarator(Declarator &D, QualType &R,
StorageClass &SC);
void CheckDeductionGuideTemplate(FunctionTemplateDecl *TD);
void CheckExplicitlyDefaultedFunction(Scope *S, FunctionDecl *MD);
bool CheckExplicitlyDefaultedSpecialMember(CXXMethodDecl *MD,
CXXSpecialMember CSM);
void CheckDelayedMemberExceptionSpecs();
bool CheckExplicitlyDefaultedComparison(Scope *S, FunctionDecl *MD,
DefaultedComparisonKind DCK);
void DeclareImplicitEqualityComparison(CXXRecordDecl *RD,
FunctionDecl *Spaceship);
void DefineDefaultedComparison(SourceLocation Loc, FunctionDecl *FD,
DefaultedComparisonKind DCK);
//===--------------------------------------------------------------------===//
// C++ Derived Classes
//
/// ActOnBaseSpecifier - Parsed a base specifier
CXXBaseSpecifier *CheckBaseSpecifier(CXXRecordDecl *Class,
SourceRange SpecifierRange,
bool Virtual, AccessSpecifier Access,
TypeSourceInfo *TInfo,
SourceLocation EllipsisLoc);
BaseResult ActOnBaseSpecifier(Decl *classdecl,
SourceRange SpecifierRange,
ParsedAttributes &Attrs,
bool Virtual, AccessSpecifier Access,
ParsedType basetype,
SourceLocation BaseLoc,
SourceLocation EllipsisLoc);
bool AttachBaseSpecifiers(CXXRecordDecl *Class,
MutableArrayRef<CXXBaseSpecifier *> Bases);
void ActOnBaseSpecifiers(Decl *ClassDecl,
MutableArrayRef<CXXBaseSpecifier *> Bases);
bool IsDerivedFrom(SourceLocation Loc, QualType Derived, QualType Base);
bool IsDerivedFrom(SourceLocation Loc, QualType Derived, QualType Base,
CXXBasePaths &Paths);
// FIXME: I don't like this name.
void BuildBasePathArray(const CXXBasePaths &Paths, CXXCastPath &BasePath);
bool CheckDerivedToBaseConversion(QualType Derived, QualType Base,
SourceLocation Loc, SourceRange Range,
CXXCastPath *BasePath = nullptr,
bool IgnoreAccess = false);
bool CheckDerivedToBaseConversion(QualType Derived, QualType Base,
unsigned InaccessibleBaseID,
unsigned AmbigiousBaseConvID,
SourceLocation Loc, SourceRange Range,
DeclarationName Name,
CXXCastPath *BasePath,
bool IgnoreAccess = false);
std::string getAmbiguousPathsDisplayString(CXXBasePaths &Paths);
bool CheckOverridingFunctionAttributes(const CXXMethodDecl *New,
const CXXMethodDecl *Old);
/// CheckOverridingFunctionReturnType - Checks whether the return types are
/// covariant, according to C++ [class.virtual]p5.
bool CheckOverridingFunctionReturnType(const CXXMethodDecl *New,
const CXXMethodDecl *Old);
/// CheckOverridingFunctionExceptionSpec - Checks whether the exception
/// spec is a subset of base spec.
bool CheckOverridingFunctionExceptionSpec(const CXXMethodDecl *New,
const CXXMethodDecl *Old);
bool CheckPureMethod(CXXMethodDecl *Method, SourceRange InitRange);
/// CheckOverrideControl - Check C++11 override control semantics.
void CheckOverrideControl(NamedDecl *D);
/// DiagnoseAbsenceOfOverrideControl - Diagnose if 'override' keyword was
/// not used in the declaration of an overriding method.
void DiagnoseAbsenceOfOverrideControl(NamedDecl *D);
/// CheckForFunctionMarkedFinal - Checks whether a virtual member function
/// overrides a virtual member function marked 'final', according to
/// C++11 [class.virtual]p4.
bool CheckIfOverriddenFunctionIsMarkedFinal(const CXXMethodDecl *New,
const CXXMethodDecl *Old);
//===--------------------------------------------------------------------===//
// C++ Access Control
//
enum AccessResult {
AR_accessible,
AR_inaccessible,
AR_dependent,
AR_delayed
};
bool SetMemberAccessSpecifier(NamedDecl *MemberDecl,
NamedDecl *PrevMemberDecl,
AccessSpecifier LexicalAS);
AccessResult CheckUnresolvedMemberAccess(UnresolvedMemberExpr *E,
DeclAccessPair FoundDecl);
AccessResult CheckUnresolvedLookupAccess(UnresolvedLookupExpr *E,
DeclAccessPair FoundDecl);
AccessResult CheckAllocationAccess(SourceLocation OperatorLoc,
SourceRange PlacementRange,
CXXRecordDecl *NamingClass,
DeclAccessPair FoundDecl,
bool Diagnose = true);
AccessResult CheckConstructorAccess(SourceLocation Loc,
CXXConstructorDecl *D,
DeclAccessPair FoundDecl,
const InitializedEntity &Entity,
bool IsCopyBindingRefToTemp = false);
AccessResult CheckConstructorAccess(SourceLocation Loc,
CXXConstructorDecl *D,
DeclAccessPair FoundDecl,
const InitializedEntity &Entity,
const PartialDiagnostic &PDiag);
AccessResult CheckDestructorAccess(SourceLocation Loc,
CXXDestructorDecl *Dtor,
const PartialDiagnostic &PDiag,
QualType objectType = QualType());
AccessResult CheckFriendAccess(NamedDecl *D);
AccessResult CheckMemberAccess(SourceLocation UseLoc,
CXXRecordDecl *NamingClass,
DeclAccessPair Found);
AccessResult
CheckStructuredBindingMemberAccess(SourceLocation UseLoc,
CXXRecordDecl *DecomposedClass,
DeclAccessPair Field);
AccessResult CheckMemberOperatorAccess(SourceLocation Loc,
Expr *ObjectExpr,
Expr *ArgExpr,
DeclAccessPair FoundDecl);
AccessResult CheckAddressOfMemberAccess(Expr *OvlExpr,
DeclAccessPair FoundDecl);
AccessResult CheckBaseClassAccess(SourceLocation AccessLoc,
QualType Base, QualType Derived,
const CXXBasePath &Path,
unsigned DiagID,
bool ForceCheck = false,
bool ForceUnprivileged = false);
void CheckLookupAccess(const LookupResult &R);
bool IsSimplyAccessible(NamedDecl *Decl, CXXRecordDecl *NamingClass,
QualType BaseType);
bool isMemberAccessibleForDeletion(CXXRecordDecl *NamingClass,
DeclAccessPair Found, QualType ObjectType,
SourceLocation Loc,
const PartialDiagnostic &Diag);
bool isMemberAccessibleForDeletion(CXXRecordDecl *NamingClass,
DeclAccessPair Found,
QualType ObjectType) {
return isMemberAccessibleForDeletion(NamingClass, Found, ObjectType,
SourceLocation(), PDiag());
}
void HandleDependentAccessCheck(const DependentDiagnostic &DD,
const MultiLevelTemplateArgumentList &TemplateArgs);
void PerformDependentDiagnostics(const DeclContext *Pattern,
const MultiLevelTemplateArgumentList &TemplateArgs);
void HandleDelayedAccessCheck(sema::DelayedDiagnostic &DD, Decl *Ctx);
/// When true, access checking violations are treated as SFINAE
/// failures rather than hard errors.
bool AccessCheckingSFINAE;
enum AbstractDiagSelID {
AbstractNone = -1,
AbstractReturnType,
AbstractParamType,
AbstractVariableType,
AbstractFieldType,
AbstractIvarType,
AbstractSynthesizedIvarType,
AbstractArrayType
};
bool isAbstractType(SourceLocation Loc, QualType T);
bool RequireNonAbstractType(SourceLocation Loc, QualType T,
TypeDiagnoser &Diagnoser);
template <typename... Ts>
bool RequireNonAbstractType(SourceLocation Loc, QualType T, unsigned DiagID,
const Ts &...Args) {
BoundTypeDiagnoser<Ts...> Diagnoser(DiagID, Args...);
return RequireNonAbstractType(Loc, T, Diagnoser);
}
void DiagnoseAbstractType(const CXXRecordDecl *RD);
//===--------------------------------------------------------------------===//
// C++ Overloaded Operators [C++ 13.5]
//
bool CheckOverloadedOperatorDeclaration(FunctionDecl *FnDecl);
bool CheckLiteralOperatorDeclaration(FunctionDecl *FnDecl);
//===--------------------------------------------------------------------===//
// C++ Templates [C++ 14]
//
void FilterAcceptableTemplateNames(LookupResult &R,
bool AllowFunctionTemplates = true,
bool AllowDependent = true);
bool hasAnyAcceptableTemplateNames(LookupResult &R,
bool AllowFunctionTemplates = true,
bool AllowDependent = true,
bool AllowNonTemplateFunctions = false);
/// Try to interpret the lookup result D as a template-name.
///
/// \param D A declaration found by name lookup.
/// \param AllowFunctionTemplates Whether function templates should be
/// considered valid results.
/// \param AllowDependent Whether unresolved using declarations (that might
/// name templates) should be considered valid results.
NamedDecl *getAsTemplateNameDecl(NamedDecl *D,
bool AllowFunctionTemplates = true,
bool AllowDependent = true);
enum class AssumedTemplateKind {
/// This is not assumed to be a template name.
None,
/// This is assumed to be a template name because lookup found nothing.
FoundNothing,
/// This is assumed to be a template name because lookup found one or more
/// functions (but no function templates).
FoundFunctions,
};
bool LookupTemplateName(LookupResult &R, Scope *S, CXXScopeSpec &SS,
QualType ObjectType, bool EnteringContext,
bool &MemberOfUnknownSpecialization,
SourceLocation TemplateKWLoc = SourceLocation(),
AssumedTemplateKind *ATK = nullptr);
TemplateNameKind isTemplateName(Scope *S,
CXXScopeSpec &SS,
bool hasTemplateKeyword,
const UnqualifiedId &Name,
ParsedType ObjectType,
bool EnteringContext,
TemplateTy &Template,
bool &MemberOfUnknownSpecialization);
/// Try to resolve an undeclared template name as a type template.
///
/// Sets II to the identifier corresponding to the template name, and updates
/// Name to a corresponding (typo-corrected) type template name and TNK to
/// the corresponding kind, if possible.
void ActOnUndeclaredTypeTemplateName(Scope *S, TemplateTy &Name,
TemplateNameKind &TNK,
SourceLocation NameLoc,
IdentifierInfo *&II);
bool resolveAssumedTemplateNameAsType(Scope *S, TemplateName &Name,
SourceLocation NameLoc,
bool Diagnose = true);
/// Determine whether a particular identifier might be the name in a C++1z
/// deduction-guide declaration.
bool isDeductionGuideName(Scope *S, const IdentifierInfo &Name,
SourceLocation NameLoc,
ParsedTemplateTy *Template = nullptr);
bool DiagnoseUnknownTemplateName(const IdentifierInfo &II,
SourceLocation IILoc,
Scope *S,
const CXXScopeSpec *SS,
TemplateTy &SuggestedTemplate,
TemplateNameKind &SuggestedKind);
bool DiagnoseUninstantiableTemplate(SourceLocation PointOfInstantiation,
NamedDecl *Instantiation,
bool InstantiatedFromMember,
const NamedDecl *Pattern,
const NamedDecl *PatternDef,
TemplateSpecializationKind TSK,
bool Complain = true);
void DiagnoseTemplateParameterShadow(SourceLocation Loc, Decl *PrevDecl);
TemplateDecl *AdjustDeclIfTemplate(Decl *&Decl);
NamedDecl *ActOnTypeParameter(Scope *S, bool Typename,
SourceLocation EllipsisLoc,
SourceLocation KeyLoc,
IdentifierInfo *ParamName,
SourceLocation ParamNameLoc,
unsigned Depth, unsigned Position,
SourceLocation EqualLoc,
ParsedType DefaultArg, bool HasTypeConstraint);
bool ActOnTypeConstraint(const CXXScopeSpec &SS,
TemplateIdAnnotation *TypeConstraint,
TemplateTypeParmDecl *ConstrainedParameter,
SourceLocation EllipsisLoc);
bool AttachTypeConstraint(NestedNameSpecifierLoc NS,
DeclarationNameInfo NameInfo,
ConceptDecl *NamedConcept,
const TemplateArgumentListInfo *TemplateArgs,
TemplateTypeParmDecl *ConstrainedParameter,
SourceLocation EllipsisLoc);
bool AttachTypeConstraint(AutoTypeLoc TL,
NonTypeTemplateParmDecl *ConstrainedParameter,
SourceLocation EllipsisLoc);
QualType CheckNonTypeTemplateParameterType(TypeSourceInfo *&TSI,
SourceLocation Loc);
QualType CheckNonTypeTemplateParameterType(QualType T, SourceLocation Loc);
NamedDecl *ActOnNonTypeTemplateParameter(Scope *S, Declarator &D,
unsigned Depth,
unsigned Position,
SourceLocation EqualLoc,
Expr *DefaultArg);
NamedDecl *ActOnTemplateTemplateParameter(Scope *S,
SourceLocation TmpLoc,
TemplateParameterList *Params,
SourceLocation EllipsisLoc,
IdentifierInfo *ParamName,
SourceLocation ParamNameLoc,
unsigned Depth,
unsigned Position,
SourceLocation EqualLoc,
ParsedTemplateArgument DefaultArg);
TemplateParameterList *
ActOnTemplateParameterList(unsigned Depth,
SourceLocation ExportLoc,
SourceLocation TemplateLoc,
SourceLocation LAngleLoc,
ArrayRef<NamedDecl *> Params,
SourceLocation RAngleLoc,
Expr *RequiresClause);
/// The context in which we are checking a template parameter list.
enum TemplateParamListContext {
TPC_ClassTemplate,
TPC_VarTemplate,
TPC_FunctionTemplate,
TPC_ClassTemplateMember,
TPC_FriendClassTemplate,
TPC_FriendFunctionTemplate,
TPC_FriendFunctionTemplateDefinition,
TPC_TypeAliasTemplate
};
bool CheckTemplateParameterList(TemplateParameterList *NewParams,
TemplateParameterList *OldParams,
TemplateParamListContext TPC,
SkipBodyInfo *SkipBody = nullptr);
TemplateParameterList *MatchTemplateParametersToScopeSpecifier(
SourceLocation DeclStartLoc, SourceLocation DeclLoc,
const CXXScopeSpec &SS, TemplateIdAnnotation *TemplateId,
ArrayRef<TemplateParameterList *> ParamLists,
bool IsFriend, bool &IsMemberSpecialization, bool &Invalid,
bool SuppressDiagnostic = false);
DeclResult CheckClassTemplate(
Scope *S, unsigned TagSpec, TagUseKind TUK, SourceLocation KWLoc,
CXXScopeSpec &SS, IdentifierInfo *Name, SourceLocation NameLoc,
const ParsedAttributesView &Attr, TemplateParameterList *TemplateParams,
AccessSpecifier AS, SourceLocation ModulePrivateLoc,
SourceLocation FriendLoc, unsigned NumOuterTemplateParamLists,
TemplateParameterList **OuterTemplateParamLists,
SkipBodyInfo *SkipBody = nullptr);
TemplateArgumentLoc getTrivialTemplateArgumentLoc(const TemplateArgument &Arg,
QualType NTTPType,
SourceLocation Loc);
/// Get a template argument mapping the given template parameter to itself,
/// e.g. for X in \c template<int X>, this would return an expression template
/// argument referencing X.
TemplateArgumentLoc getIdentityTemplateArgumentLoc(NamedDecl *Param,
SourceLocation Location);
void translateTemplateArguments(const ASTTemplateArgsPtr &In,
TemplateArgumentListInfo &Out);
ParsedTemplateArgument ActOnTemplateTypeArgument(TypeResult ParsedType);
void NoteAllFoundTemplates(TemplateName Name);
QualType CheckTemplateIdType(TemplateName Template,
SourceLocation TemplateLoc,
TemplateArgumentListInfo &TemplateArgs);
TypeResult
ActOnTemplateIdType(Scope *S, CXXScopeSpec &SS, SourceLocation TemplateKWLoc,
TemplateTy Template, IdentifierInfo *TemplateII,
SourceLocation TemplateIILoc, SourceLocation LAngleLoc,
ASTTemplateArgsPtr TemplateArgs, SourceLocation RAngleLoc,
bool IsCtorOrDtorName = false, bool IsClassName = false);
/// Parsed an elaborated-type-specifier that refers to a template-id,
/// such as \c class T::template apply<U>.
TypeResult ActOnTagTemplateIdType(TagUseKind TUK,
TypeSpecifierType TagSpec,
SourceLocation TagLoc,
CXXScopeSpec &SS,
SourceLocation TemplateKWLoc,
TemplateTy TemplateD,
SourceLocation TemplateLoc,
SourceLocation LAngleLoc,
ASTTemplateArgsPtr TemplateArgsIn,
SourceLocation RAngleLoc);
DeclResult ActOnVarTemplateSpecialization(
Scope *S, Declarator &D, TypeSourceInfo *DI,
SourceLocation TemplateKWLoc, TemplateParameterList *TemplateParams,
StorageClass SC, bool IsPartialSpecialization);
DeclResult CheckVarTemplateId(VarTemplateDecl *Template,
SourceLocation TemplateLoc,
SourceLocation TemplateNameLoc,
const TemplateArgumentListInfo &TemplateArgs);
ExprResult CheckVarTemplateId(const CXXScopeSpec &SS,
const DeclarationNameInfo &NameInfo,
VarTemplateDecl *Template,
SourceLocation TemplateLoc,
const TemplateArgumentListInfo *TemplateArgs);
ExprResult
CheckConceptTemplateId(const CXXScopeSpec &SS,
SourceLocation TemplateKWLoc,
const DeclarationNameInfo &ConceptNameInfo,
NamedDecl *FoundDecl, ConceptDecl *NamedConcept,
const TemplateArgumentListInfo *TemplateArgs);
void diagnoseMissingTemplateArguments(TemplateName Name, SourceLocation Loc);
ExprResult BuildTemplateIdExpr(const CXXScopeSpec &SS,
SourceLocation TemplateKWLoc,
LookupResult &R,
bool RequiresADL,
const TemplateArgumentListInfo *TemplateArgs);
ExprResult BuildQualifiedTemplateIdExpr(CXXScopeSpec &SS,
SourceLocation TemplateKWLoc,
const DeclarationNameInfo &NameInfo,
const TemplateArgumentListInfo *TemplateArgs);
TemplateNameKind ActOnDependentTemplateName(
Scope *S, CXXScopeSpec &SS, SourceLocation TemplateKWLoc,
const UnqualifiedId &Name, ParsedType ObjectType, bool EnteringContext,
TemplateTy &Template, bool AllowInjectedClassName = false);
DeclResult ActOnClassTemplateSpecialization(
Scope *S, unsigned TagSpec, TagUseKind TUK, SourceLocation KWLoc,
SourceLocation ModulePrivateLoc, CXXScopeSpec &SS,
TemplateIdAnnotation &TemplateId, const ParsedAttributesView &Attr,
MultiTemplateParamsArg TemplateParameterLists,
SkipBodyInfo *SkipBody = nullptr);
bool CheckTemplatePartialSpecializationArgs(SourceLocation Loc,
TemplateDecl *PrimaryTemplate,
unsigned NumExplicitArgs,
ArrayRef<TemplateArgument> Args);
void CheckTemplatePartialSpecialization(
ClassTemplatePartialSpecializationDecl *Partial);
void CheckTemplatePartialSpecialization(
VarTemplatePartialSpecializationDecl *Partial);
Decl *ActOnTemplateDeclarator(Scope *S,
MultiTemplateParamsArg TemplateParameterLists,
Declarator &D);
bool
CheckSpecializationInstantiationRedecl(SourceLocation NewLoc,
TemplateSpecializationKind NewTSK,
NamedDecl *PrevDecl,
TemplateSpecializationKind PrevTSK,
SourceLocation PrevPtOfInstantiation,
bool &SuppressNew);
bool CheckDependentFunctionTemplateSpecialization(FunctionDecl *FD,
const TemplateArgumentListInfo &ExplicitTemplateArgs,
LookupResult &Previous);
bool CheckFunctionTemplateSpecialization(
FunctionDecl *FD, TemplateArgumentListInfo *ExplicitTemplateArgs,
LookupResult &Previous, bool QualifiedFriend = false);
bool CheckMemberSpecialization(NamedDecl *Member, LookupResult &Previous);
void CompleteMemberSpecialization(NamedDecl *Member, LookupResult &Previous);
DeclResult ActOnExplicitInstantiation(
Scope *S, SourceLocation ExternLoc, SourceLocation TemplateLoc,
unsigned TagSpec, SourceLocation KWLoc, const CXXScopeSpec &SS,
TemplateTy Template, SourceLocation TemplateNameLoc,
SourceLocation LAngleLoc, ASTTemplateArgsPtr TemplateArgs,
SourceLocation RAngleLoc, const ParsedAttributesView &Attr);
DeclResult ActOnExplicitInstantiation(Scope *S, SourceLocation ExternLoc,
SourceLocation TemplateLoc,
unsigned TagSpec, SourceLocation KWLoc,
CXXScopeSpec &SS, IdentifierInfo *Name,
SourceLocation NameLoc,
const ParsedAttributesView &Attr);
DeclResult ActOnExplicitInstantiation(Scope *S,
SourceLocation ExternLoc,
SourceLocation TemplateLoc,
Declarator &D);
TemplateArgumentLoc
SubstDefaultTemplateArgumentIfAvailable(TemplateDecl *Template,
SourceLocation TemplateLoc,
SourceLocation RAngleLoc,
Decl *Param,
SmallVectorImpl<TemplateArgument>
&Converted,
bool &HasDefaultArg);
/// Specifies the context in which a particular template
/// argument is being checked.
enum CheckTemplateArgumentKind {
/// The template argument was specified in the code or was
/// instantiated with some deduced template arguments.
CTAK_Specified,
/// The template argument was deduced via template argument
/// deduction.
CTAK_Deduced,
/// The template argument was deduced from an array bound
/// via template argument deduction.
CTAK_DeducedFromArrayBound
};
bool CheckTemplateArgument(NamedDecl *Param,
TemplateArgumentLoc &Arg,
NamedDecl *Template,
SourceLocation TemplateLoc,
SourceLocation RAngleLoc,
unsigned ArgumentPackIndex,
SmallVectorImpl<TemplateArgument> &Converted,
CheckTemplateArgumentKind CTAK = CTAK_Specified);
/// Check that the given template arguments can be be provided to
/// the given template, converting the arguments along the way.
///
/// \param Template The template to which the template arguments are being
/// provided.
///
/// \param TemplateLoc The location of the template name in the source.
///
/// \param TemplateArgs The list of template arguments. If the template is
/// a template template parameter, this function may extend the set of
/// template arguments to also include substituted, defaulted template
/// arguments.
///
/// \param PartialTemplateArgs True if the list of template arguments is
/// intentionally partial, e.g., because we're checking just the initial
/// set of template arguments.
///
/// \param Converted Will receive the converted, canonicalized template
/// arguments.
///
/// \param UpdateArgsWithConversions If \c true, update \p TemplateArgs to
/// contain the converted forms of the template arguments as written.
/// Otherwise, \p TemplateArgs will not be modified.
///
/// \param ConstraintsNotSatisfied If provided, and an error occured, will
/// receive true if the cause for the error is the associated constraints of
/// the template not being satisfied by the template arguments.
///
/// \returns true if an error occurred, false otherwise.
bool CheckTemplateArgumentList(TemplateDecl *Template,
SourceLocation TemplateLoc,
TemplateArgumentListInfo &TemplateArgs,
bool PartialTemplateArgs,
SmallVectorImpl<TemplateArgument> &Converted,
bool UpdateArgsWithConversions = true,
bool *ConstraintsNotSatisfied = nullptr);
bool CheckTemplateTypeArgument(TemplateTypeParmDecl *Param,
TemplateArgumentLoc &Arg,
SmallVectorImpl<TemplateArgument> &Converted);
bool CheckTemplateArgument(TemplateTypeParmDecl *Param,
TypeSourceInfo *Arg);
ExprResult CheckTemplateArgument(NonTypeTemplateParmDecl *Param,
QualType InstantiatedParamType, Expr *Arg,
TemplateArgument &Converted,
CheckTemplateArgumentKind CTAK = CTAK_Specified);
bool CheckTemplateTemplateArgument(TemplateTemplateParmDecl *Param,
TemplateParameterList *Params,
TemplateArgumentLoc &Arg);
ExprResult
BuildExpressionFromDeclTemplateArgument(const TemplateArgument &Arg,
QualType ParamType,
SourceLocation Loc);
ExprResult
BuildExpressionFromIntegralTemplateArgument(const TemplateArgument &Arg,
SourceLocation Loc);
/// Enumeration describing how template parameter lists are compared
/// for equality.
enum TemplateParameterListEqualKind {
/// We are matching the template parameter lists of two templates
/// that might be redeclarations.
///
/// \code
/// template<typename T> struct X;
/// template<typename T> struct X;
/// \endcode
TPL_TemplateMatch,
/// We are matching the template parameter lists of two template
/// template parameters as part of matching the template parameter lists
/// of two templates that might be redeclarations.
///
/// \code
/// template<template<int I> class TT> struct X;
/// template<template<int Value> class Other> struct X;
/// \endcode
TPL_TemplateTemplateParmMatch,
/// We are matching the template parameter lists of a template
/// template argument against the template parameter lists of a template
/// template parameter.
///
/// \code
/// template<template<int Value> class Metafun> struct X;
/// template<int Value> struct integer_c;
/// X<integer_c> xic;
/// \endcode
TPL_TemplateTemplateArgumentMatch
};
bool TemplateParameterListsAreEqual(TemplateParameterList *New,
TemplateParameterList *Old,
bool Complain,
TemplateParameterListEqualKind Kind,
SourceLocation TemplateArgLoc
= SourceLocation());
bool CheckTemplateDeclScope(Scope *S, TemplateParameterList *TemplateParams);
/// Called when the parser has parsed a C++ typename
/// specifier, e.g., "typename T::type".
///
/// \param S The scope in which this typename type occurs.
/// \param TypenameLoc the location of the 'typename' keyword
/// \param SS the nested-name-specifier following the typename (e.g., 'T::').
/// \param II the identifier we're retrieving (e.g., 'type' in the example).
/// \param IdLoc the location of the identifier.
TypeResult
ActOnTypenameType(Scope *S, SourceLocation TypenameLoc,
const CXXScopeSpec &SS, const IdentifierInfo &II,
SourceLocation IdLoc);
/// Called when the parser has parsed a C++ typename
/// specifier that ends in a template-id, e.g.,
/// "typename MetaFun::template apply<T1, T2>".
///
/// \param S The scope in which this typename type occurs.
/// \param TypenameLoc the location of the 'typename' keyword
/// \param SS the nested-name-specifier following the typename (e.g., 'T::').
/// \param TemplateLoc the location of the 'template' keyword, if any.
/// \param TemplateName The template name.
/// \param TemplateII The identifier used to name the template.
/// \param TemplateIILoc The location of the template name.
/// \param LAngleLoc The location of the opening angle bracket ('<').
/// \param TemplateArgs The template arguments.
/// \param RAngleLoc The location of the closing angle bracket ('>').
TypeResult
ActOnTypenameType(Scope *S, SourceLocation TypenameLoc,
const CXXScopeSpec &SS,
SourceLocation TemplateLoc,
TemplateTy TemplateName,
IdentifierInfo *TemplateII,
SourceLocation TemplateIILoc,
SourceLocation LAngleLoc,
ASTTemplateArgsPtr TemplateArgs,
SourceLocation RAngleLoc);
QualType CheckTypenameType(ElaboratedTypeKeyword Keyword,
SourceLocation KeywordLoc,
NestedNameSpecifierLoc QualifierLoc,
const IdentifierInfo &II,
SourceLocation IILoc,
TypeSourceInfo **TSI,
bool DeducedTSTContext);
QualType CheckTypenameType(ElaboratedTypeKeyword Keyword,
SourceLocation KeywordLoc,
NestedNameSpecifierLoc QualifierLoc,
const IdentifierInfo &II,
SourceLocation IILoc,
bool DeducedTSTContext = true);
TypeSourceInfo *RebuildTypeInCurrentInstantiation(TypeSourceInfo *T,
SourceLocation Loc,
DeclarationName Name);
bool RebuildNestedNameSpecifierInCurrentInstantiation(CXXScopeSpec &SS);
ExprResult RebuildExprInCurrentInstantiation(Expr *E);
bool RebuildTemplateParamsInCurrentInstantiation(
TemplateParameterList *Params);
std::string
getTemplateArgumentBindingsText(const TemplateParameterList *Params,
const TemplateArgumentList &Args);
std::string
getTemplateArgumentBindingsText(const TemplateParameterList *Params,
const TemplateArgument *Args,
unsigned NumArgs);
//===--------------------------------------------------------------------===//
// C++ Concepts
//===--------------------------------------------------------------------===//
Decl *ActOnConceptDefinition(
Scope *S, MultiTemplateParamsArg TemplateParameterLists,
IdentifierInfo *Name, SourceLocation NameLoc, Expr *ConstraintExpr);
RequiresExprBodyDecl *
ActOnStartRequiresExpr(SourceLocation RequiresKWLoc,
ArrayRef<ParmVarDecl *> LocalParameters,
Scope *BodyScope);
void ActOnFinishRequiresExpr();
concepts::Requirement *ActOnSimpleRequirement(Expr *E);
concepts::Requirement *ActOnTypeRequirement(
SourceLocation TypenameKWLoc, CXXScopeSpec &SS, SourceLocation NameLoc,
IdentifierInfo *TypeName, TemplateIdAnnotation *TemplateId);
concepts::Requirement *ActOnCompoundRequirement(Expr *E,
SourceLocation NoexceptLoc);
concepts::Requirement *
ActOnCompoundRequirement(
Expr *E, SourceLocation NoexceptLoc, CXXScopeSpec &SS,
TemplateIdAnnotation *TypeConstraint, unsigned Depth);
concepts::Requirement *ActOnNestedRequirement(Expr *Constraint);
concepts::ExprRequirement *
BuildExprRequirement(
Expr *E, bool IsSatisfied, SourceLocation NoexceptLoc,
concepts::ExprRequirement::ReturnTypeRequirement ReturnTypeRequirement);
concepts::ExprRequirement *
BuildExprRequirement(
concepts::Requirement::SubstitutionDiagnostic *ExprSubstDiag,
bool IsSatisfied, SourceLocation NoexceptLoc,
concepts::ExprRequirement::ReturnTypeRequirement ReturnTypeRequirement);
concepts::TypeRequirement *BuildTypeRequirement(TypeSourceInfo *Type);
concepts::TypeRequirement *
BuildTypeRequirement(
concepts::Requirement::SubstitutionDiagnostic *SubstDiag);
concepts::NestedRequirement *BuildNestedRequirement(Expr *E);
concepts::NestedRequirement *
BuildNestedRequirement(
concepts::Requirement::SubstitutionDiagnostic *SubstDiag);
ExprResult ActOnRequiresExpr(SourceLocation RequiresKWLoc,
RequiresExprBodyDecl *Body,
ArrayRef<ParmVarDecl *> LocalParameters,
ArrayRef<concepts::Requirement *> Requirements,
SourceLocation ClosingBraceLoc);
//===--------------------------------------------------------------------===//
// C++ Variadic Templates (C++0x [temp.variadic])
//===--------------------------------------------------------------------===//
/// Determine whether an unexpanded parameter pack might be permitted in this
/// location. Useful for error recovery.
bool isUnexpandedParameterPackPermitted();
/// The context in which an unexpanded parameter pack is
/// being diagnosed.
///
/// Note that the values of this enumeration line up with the first
/// argument to the \c err_unexpanded_parameter_pack diagnostic.
enum UnexpandedParameterPackContext {
/// An arbitrary expression.
UPPC_Expression = 0,
/// The base type of a class type.
UPPC_BaseType,
/// The type of an arbitrary declaration.
UPPC_DeclarationType,
/// The type of a data member.
UPPC_DataMemberType,
/// The size of a bit-field.
UPPC_BitFieldWidth,
/// The expression in a static assertion.
UPPC_StaticAssertExpression,
/// The fixed underlying type of an enumeration.
UPPC_FixedUnderlyingType,
/// The enumerator value.
UPPC_EnumeratorValue,
/// A using declaration.
UPPC_UsingDeclaration,
/// A friend declaration.
UPPC_FriendDeclaration,
/// A declaration qualifier.
UPPC_DeclarationQualifier,
/// An initializer.
UPPC_Initializer,
/// A default argument.
UPPC_DefaultArgument,
/// The type of a non-type template parameter.
UPPC_NonTypeTemplateParameterType,
/// The type of an exception.
UPPC_ExceptionType,
/// Partial specialization.
UPPC_PartialSpecialization,
/// Microsoft __if_exists.
UPPC_IfExists,
/// Microsoft __if_not_exists.
UPPC_IfNotExists,
/// Lambda expression.
UPPC_Lambda,
/// Block expression,
UPPC_Block,
/// A type constraint,
UPPC_TypeConstraint
};
/// Diagnose unexpanded parameter packs.
///
/// \param Loc The location at which we should emit the diagnostic.
///
/// \param UPPC The context in which we are diagnosing unexpanded
/// parameter packs.
///
/// \param Unexpanded the set of unexpanded parameter packs.
///
/// \returns true if an error occurred, false otherwise.
bool DiagnoseUnexpandedParameterPacks(SourceLocation Loc,
UnexpandedParameterPackContext UPPC,
ArrayRef<UnexpandedParameterPack> Unexpanded);
/// If the given type contains an unexpanded parameter pack,
/// diagnose the error.
///
/// \param Loc The source location where a diagnostc should be emitted.
///
/// \param T The type that is being checked for unexpanded parameter
/// packs.
///
/// \returns true if an error occurred, false otherwise.
bool DiagnoseUnexpandedParameterPack(SourceLocation Loc, TypeSourceInfo *T,
UnexpandedParameterPackContext UPPC);
/// If the given expression contains an unexpanded parameter
/// pack, diagnose the error.
///
/// \param E The expression that is being checked for unexpanded
/// parameter packs.
///
/// \returns true if an error occurred, false otherwise.
bool DiagnoseUnexpandedParameterPack(Expr *E,
UnexpandedParameterPackContext UPPC = UPPC_Expression);
/// If the given nested-name-specifier contains an unexpanded
/// parameter pack, diagnose the error.
///
/// \param SS The nested-name-specifier that is being checked for
/// unexpanded parameter packs.
///
/// \returns true if an error occurred, false otherwise.
bool DiagnoseUnexpandedParameterPack(const CXXScopeSpec &SS,
UnexpandedParameterPackContext UPPC);
/// If the given name contains an unexpanded parameter pack,
/// diagnose the error.
///
/// \param NameInfo The name (with source location information) that
/// is being checked for unexpanded parameter packs.
///
/// \returns true if an error occurred, false otherwise.
bool DiagnoseUnexpandedParameterPack(const DeclarationNameInfo &NameInfo,
UnexpandedParameterPackContext UPPC);
/// If the given template name contains an unexpanded parameter pack,
/// diagnose the error.
///
/// \param Loc The location of the template name.
///
/// \param Template The template name that is being checked for unexpanded
/// parameter packs.
///
/// \returns true if an error occurred, false otherwise.
bool DiagnoseUnexpandedParameterPack(SourceLocation Loc,
TemplateName Template,
UnexpandedParameterPackContext UPPC);
/// If the given template argument contains an unexpanded parameter
/// pack, diagnose the error.
///
/// \param Arg The template argument that is being checked for unexpanded
/// parameter packs.
///
/// \returns true if an error occurred, false otherwise.
bool DiagnoseUnexpandedParameterPack(TemplateArgumentLoc Arg,
UnexpandedParameterPackContext UPPC);
/// Collect the set of unexpanded parameter packs within the given
/// template argument.
///
/// \param Arg The template argument that will be traversed to find
/// unexpanded parameter packs.
void collectUnexpandedParameterPacks(TemplateArgument Arg,
SmallVectorImpl<UnexpandedParameterPack> &Unexpanded);
/// Collect the set of unexpanded parameter packs within the given
/// template argument.
///
/// \param Arg The template argument that will be traversed to find
/// unexpanded parameter packs.
void collectUnexpandedParameterPacks(TemplateArgumentLoc Arg,
SmallVectorImpl<UnexpandedParameterPack> &Unexpanded);
/// Collect the set of unexpanded parameter packs within the given
/// type.
///
/// \param T The type that will be traversed to find
/// unexpanded parameter packs.
void collectUnexpandedParameterPacks(QualType T,
SmallVectorImpl<UnexpandedParameterPack> &Unexpanded);
/// Collect the set of unexpanded parameter packs within the given
/// type.
///
/// \param TL The type that will be traversed to find
/// unexpanded parameter packs.
void collectUnexpandedParameterPacks(TypeLoc TL,
SmallVectorImpl<UnexpandedParameterPack> &Unexpanded);
/// Collect the set of unexpanded parameter packs within the given
/// nested-name-specifier.
///
/// \param NNS The nested-name-specifier that will be traversed to find
/// unexpanded parameter packs.
void collectUnexpandedParameterPacks(NestedNameSpecifierLoc NNS,
SmallVectorImpl<UnexpandedParameterPack> &Unexpanded);
/// Collect the set of unexpanded parameter packs within the given
/// name.
///
/// \param NameInfo The name that will be traversed to find
/// unexpanded parameter packs.
void collectUnexpandedParameterPacks(const DeclarationNameInfo &NameInfo,
SmallVectorImpl<UnexpandedParameterPack> &Unexpanded);
/// Invoked when parsing a template argument followed by an
/// ellipsis, which creates a pack expansion.
///
/// \param Arg The template argument preceding the ellipsis, which
/// may already be invalid.
///
/// \param EllipsisLoc The location of the ellipsis.
ParsedTemplateArgument ActOnPackExpansion(const ParsedTemplateArgument &Arg,
SourceLocation EllipsisLoc);
/// Invoked when parsing a type followed by an ellipsis, which
/// creates a pack expansion.
///
/// \param Type The type preceding the ellipsis, which will become
/// the pattern of the pack expansion.
///
/// \param EllipsisLoc The location of the ellipsis.
TypeResult ActOnPackExpansion(ParsedType Type, SourceLocation EllipsisLoc);
/// Construct a pack expansion type from the pattern of the pack
/// expansion.
TypeSourceInfo *CheckPackExpansion(TypeSourceInfo *Pattern,
SourceLocation EllipsisLoc,
Optional<unsigned> NumExpansions);
/// Construct a pack expansion type from the pattern of the pack
/// expansion.
QualType CheckPackExpansion(QualType Pattern,
SourceRange PatternRange,
SourceLocation EllipsisLoc,
Optional<unsigned> NumExpansions);
/// Invoked when parsing an expression followed by an ellipsis, which
/// creates a pack expansion.
///
/// \param Pattern The expression preceding the ellipsis, which will become
/// the pattern of the pack expansion.
///
/// \param EllipsisLoc The location of the ellipsis.
ExprResult ActOnPackExpansion(Expr *Pattern, SourceLocation EllipsisLoc);
/// Invoked when parsing an expression followed by an ellipsis, which
/// creates a pack expansion.
///
/// \param Pattern The expression preceding the ellipsis, which will become
/// the pattern of the pack expansion.
///
/// \param EllipsisLoc The location of the ellipsis.
ExprResult CheckPackExpansion(Expr *Pattern, SourceLocation EllipsisLoc,
Optional<unsigned> NumExpansions);
/// Determine whether we could expand a pack expansion with the
/// given set of parameter packs into separate arguments by repeatedly
/// transforming the pattern.
///
/// \param EllipsisLoc The location of the ellipsis that identifies the
/// pack expansion.
///
/// \param PatternRange The source range that covers the entire pattern of
/// the pack expansion.
///
/// \param Unexpanded The set of unexpanded parameter packs within the
/// pattern.
///
/// \param ShouldExpand Will be set to \c true if the transformer should
/// expand the corresponding pack expansions into separate arguments. When
/// set, \c NumExpansions must also be set.
///
/// \param RetainExpansion Whether the caller should add an unexpanded
/// pack expansion after all of the expanded arguments. This is used
/// when extending explicitly-specified template argument packs per
/// C++0x [temp.arg.explicit]p9.
///
/// \param NumExpansions The number of separate arguments that will be in
/// the expanded form of the corresponding pack expansion. This is both an
/// input and an output parameter, which can be set by the caller if the
/// number of expansions is known a priori (e.g., due to a prior substitution)
/// and will be set by the callee when the number of expansions is known.
/// The callee must set this value when \c ShouldExpand is \c true; it may
/// set this value in other cases.
///
/// \returns true if an error occurred (e.g., because the parameter packs
/// are to be instantiated with arguments of different lengths), false
/// otherwise. If false, \c ShouldExpand (and possibly \c NumExpansions)
/// must be set.
bool CheckParameterPacksForExpansion(SourceLocation EllipsisLoc,
SourceRange PatternRange,
ArrayRef<UnexpandedParameterPack> Unexpanded,
const MultiLevelTemplateArgumentList &TemplateArgs,
bool &ShouldExpand,
bool &RetainExpansion,
Optional<unsigned> &NumExpansions);
/// Determine the number of arguments in the given pack expansion
/// type.
///
/// This routine assumes that the number of arguments in the expansion is
/// consistent across all of the unexpanded parameter packs in its pattern.
///
/// Returns an empty Optional if the type can't be expanded.
Optional<unsigned> getNumArgumentsInExpansion(QualType T,
const MultiLevelTemplateArgumentList &TemplateArgs);
/// Determine whether the given declarator contains any unexpanded
/// parameter packs.
///
/// This routine is used by the parser to disambiguate function declarators
/// with an ellipsis prior to the ')', e.g.,
///
/// \code
/// void f(T...);
/// \endcode
///
/// To determine whether we have an (unnamed) function parameter pack or
/// a variadic function.
///
/// \returns true if the declarator contains any unexpanded parameter packs,
/// false otherwise.
bool containsUnexpandedParameterPacks(Declarator &D);
/// Returns the pattern of the pack expansion for a template argument.
///
/// \param OrigLoc The template argument to expand.
///
/// \param Ellipsis Will be set to the location of the ellipsis.
///
/// \param NumExpansions Will be set to the number of expansions that will
/// be generated from this pack expansion, if known a priori.
TemplateArgumentLoc getTemplateArgumentPackExpansionPattern(
TemplateArgumentLoc OrigLoc,
SourceLocation &Ellipsis,
Optional<unsigned> &NumExpansions) const;
/// Given a template argument that contains an unexpanded parameter pack, but
/// which has already been substituted, attempt to determine the number of
/// elements that will be produced once this argument is fully-expanded.
///
/// This is intended for use when transforming 'sizeof...(Arg)' in order to
/// avoid actually expanding the pack where possible.
Optional<unsigned> getFullyPackExpandedSize(TemplateArgument Arg);
//===--------------------------------------------------------------------===//
// C++ Template Argument Deduction (C++ [temp.deduct])
//===--------------------------------------------------------------------===//
/// Adjust the type \p ArgFunctionType to match the calling convention,
/// noreturn, and optionally the exception specification of \p FunctionType.
/// Deduction often wants to ignore these properties when matching function
/// types.
QualType adjustCCAndNoReturn(QualType ArgFunctionType, QualType FunctionType,
bool AdjustExceptionSpec = false);
/// Describes the result of template argument deduction.
///
/// The TemplateDeductionResult enumeration describes the result of
/// template argument deduction, as returned from
/// DeduceTemplateArguments(). The separate TemplateDeductionInfo
/// structure provides additional information about the results of
/// template argument deduction, e.g., the deduced template argument
/// list (if successful) or the specific template parameters or
/// deduced arguments that were involved in the failure.
enum TemplateDeductionResult {
/// Template argument deduction was successful.
TDK_Success = 0,
/// The declaration was invalid; do nothing.
TDK_Invalid,
/// Template argument deduction exceeded the maximum template
/// instantiation depth (which has already been diagnosed).
TDK_InstantiationDepth,
/// Template argument deduction did not deduce a value
/// for every template parameter.
TDK_Incomplete,
/// Template argument deduction did not deduce a value for every
/// expansion of an expanded template parameter pack.
TDK_IncompletePack,
/// Template argument deduction produced inconsistent
/// deduced values for the given template parameter.
TDK_Inconsistent,
/// Template argument deduction failed due to inconsistent
/// cv-qualifiers on a template parameter type that would
/// otherwise be deduced, e.g., we tried to deduce T in "const T"
/// but were given a non-const "X".
TDK_Underqualified,
/// Substitution of the deduced template argument values
/// resulted in an error.
TDK_SubstitutionFailure,
/// After substituting deduced template arguments, a dependent
/// parameter type did not match the corresponding argument.
TDK_DeducedMismatch,
/// After substituting deduced template arguments, an element of
/// a dependent parameter type did not match the corresponding element
/// of the corresponding argument (when deducing from an initializer list).
TDK_DeducedMismatchNested,
/// A non-depnedent component of the parameter did not match the
/// corresponding component of the argument.
TDK_NonDeducedMismatch,
/// When performing template argument deduction for a function
/// template, there were too many call arguments.
TDK_TooManyArguments,
/// When performing template argument deduction for a function
/// template, there were too few call arguments.
TDK_TooFewArguments,
/// The explicitly-specified template arguments were not valid
/// template arguments for the given template.
TDK_InvalidExplicitArguments,
/// Checking non-dependent argument conversions failed.
TDK_NonDependentConversionFailure,
/// The deduced arguments did not satisfy the constraints associated
/// with the template.
TDK_ConstraintsNotSatisfied,
/// Deduction failed; that's all we know.
TDK_MiscellaneousDeductionFailure,
/// CUDA Target attributes do not match.
TDK_CUDATargetMismatch
};
TemplateDeductionResult
DeduceTemplateArguments(ClassTemplatePartialSpecializationDecl *Partial,
const TemplateArgumentList &TemplateArgs,
sema::TemplateDeductionInfo &Info);
TemplateDeductionResult
DeduceTemplateArguments(VarTemplatePartialSpecializationDecl *Partial,
const TemplateArgumentList &TemplateArgs,
sema::TemplateDeductionInfo &Info);
TemplateDeductionResult SubstituteExplicitTemplateArguments(
FunctionTemplateDecl *FunctionTemplate,
TemplateArgumentListInfo &ExplicitTemplateArgs,
SmallVectorImpl<DeducedTemplateArgument> &Deduced,
SmallVectorImpl<QualType> &ParamTypes, QualType *FunctionType,
sema::TemplateDeductionInfo &Info);
/// brief A function argument from which we performed template argument
// deduction for a call.
struct OriginalCallArg {
OriginalCallArg(QualType OriginalParamType, bool DecomposedParam,
unsigned ArgIdx, QualType OriginalArgType)
: OriginalParamType(OriginalParamType),
DecomposedParam(DecomposedParam), ArgIdx(ArgIdx),
OriginalArgType(OriginalArgType) {}
QualType OriginalParamType;
bool DecomposedParam;
unsigned ArgIdx;
QualType OriginalArgType;
};
TemplateDeductionResult FinishTemplateArgumentDeduction(
FunctionTemplateDecl *FunctionTemplate,
SmallVectorImpl<DeducedTemplateArgument> &Deduced,
unsigned NumExplicitlySpecified, FunctionDecl *&Specialization,
sema::TemplateDeductionInfo &Info,
SmallVectorImpl<OriginalCallArg> const *OriginalCallArgs = nullptr,
bool PartialOverloading = false,
llvm::function_ref<bool()> CheckNonDependent = []{ return false; });
TemplateDeductionResult DeduceTemplateArguments(
FunctionTemplateDecl *FunctionTemplate,
TemplateArgumentListInfo *ExplicitTemplateArgs, ArrayRef<Expr *> Args,
FunctionDecl *&Specialization, sema::TemplateDeductionInfo &Info,
bool PartialOverloading,
llvm::function_ref<bool(ArrayRef<QualType>)> CheckNonDependent);
TemplateDeductionResult
DeduceTemplateArguments(FunctionTemplateDecl *FunctionTemplate,
TemplateArgumentListInfo *ExplicitTemplateArgs,
QualType ArgFunctionType,
FunctionDecl *&Specialization,
sema::TemplateDeductionInfo &Info,
bool IsAddressOfFunction = false);
TemplateDeductionResult
DeduceTemplateArguments(FunctionTemplateDecl *FunctionTemplate,
QualType ToType,
CXXConversionDecl *&Specialization,
sema::TemplateDeductionInfo &Info);
TemplateDeductionResult
DeduceTemplateArguments(FunctionTemplateDecl *FunctionTemplate,
TemplateArgumentListInfo *ExplicitTemplateArgs,
FunctionDecl *&Specialization,
sema::TemplateDeductionInfo &Info,
bool IsAddressOfFunction = false);
/// Substitute Replacement for \p auto in \p TypeWithAuto
QualType SubstAutoType(QualType TypeWithAuto, QualType Replacement);
/// Substitute Replacement for auto in TypeWithAuto
TypeSourceInfo* SubstAutoTypeSourceInfo(TypeSourceInfo *TypeWithAuto,
QualType Replacement);
/// Completely replace the \c auto in \p TypeWithAuto by
/// \p Replacement. This does not retain any \c auto type sugar.
QualType ReplaceAutoType(QualType TypeWithAuto, QualType Replacement);
/// Result type of DeduceAutoType.
enum DeduceAutoResult {
DAR_Succeeded,
DAR_Failed,
DAR_FailedAlreadyDiagnosed
};
DeduceAutoResult
DeduceAutoType(TypeSourceInfo *AutoType, Expr *&Initializer, QualType &Result,
Optional<unsigned> DependentDeductionDepth = None,
bool IgnoreConstraints = false);
DeduceAutoResult
DeduceAutoType(TypeLoc AutoTypeLoc, Expr *&Initializer, QualType &Result,
Optional<unsigned> DependentDeductionDepth = None,
bool IgnoreConstraints = false);
void DiagnoseAutoDeductionFailure(VarDecl *VDecl, Expr *Init);
bool DeduceReturnType(FunctionDecl *FD, SourceLocation Loc,
bool Diagnose = true);
/// Declare implicit deduction guides for a class template if we've
/// not already done so.
void DeclareImplicitDeductionGuides(TemplateDecl *Template,
SourceLocation Loc);
QualType DeduceTemplateSpecializationFromInitializer(
TypeSourceInfo *TInfo, const InitializedEntity &Entity,
const InitializationKind &Kind, MultiExprArg Init);
QualType deduceVarTypeFromInitializer(VarDecl *VDecl, DeclarationName Name,
QualType Type, TypeSourceInfo *TSI,
SourceRange Range, bool DirectInit,
Expr *Init);
TypeLoc getReturnTypeLoc(FunctionDecl *FD) const;
bool DeduceFunctionTypeFromReturnExpr(FunctionDecl *FD,
SourceLocation ReturnLoc,
Expr *&RetExpr, AutoType *AT);
FunctionTemplateDecl *getMoreSpecializedTemplate(
FunctionTemplateDecl *FT1, FunctionTemplateDecl *FT2, SourceLocation Loc,
TemplatePartialOrderingContext TPOC, unsigned NumCallArguments1,
unsigned NumCallArguments2, bool Reversed = false);
UnresolvedSetIterator
getMostSpecialized(UnresolvedSetIterator SBegin, UnresolvedSetIterator SEnd,
TemplateSpecCandidateSet &FailedCandidates,
SourceLocation Loc,
const PartialDiagnostic &NoneDiag,
const PartialDiagnostic &AmbigDiag,
const PartialDiagnostic &CandidateDiag,
bool Complain = true, QualType TargetType = QualType());
ClassTemplatePartialSpecializationDecl *
getMoreSpecializedPartialSpecialization(
ClassTemplatePartialSpecializationDecl *PS1,
ClassTemplatePartialSpecializationDecl *PS2,
SourceLocation Loc);
bool isMoreSpecializedThanPrimary(ClassTemplatePartialSpecializationDecl *T,
sema::TemplateDeductionInfo &Info);
VarTemplatePartialSpecializationDecl *getMoreSpecializedPartialSpecialization(
VarTemplatePartialSpecializationDecl *PS1,
VarTemplatePartialSpecializationDecl *PS2, SourceLocation Loc);
bool isMoreSpecializedThanPrimary(VarTemplatePartialSpecializationDecl *T,
sema::TemplateDeductionInfo &Info);
bool isTemplateTemplateParameterAtLeastAsSpecializedAs(
TemplateParameterList *PParam, TemplateDecl *AArg, SourceLocation Loc);
void MarkUsedTemplateParameters(const Expr *E, bool OnlyDeduced,
unsigned Depth, llvm::SmallBitVector &Used);
void MarkUsedTemplateParameters(const TemplateArgumentList &TemplateArgs,
bool OnlyDeduced,
unsigned Depth,
llvm::SmallBitVector &Used);
void MarkDeducedTemplateParameters(
const FunctionTemplateDecl *FunctionTemplate,
llvm::SmallBitVector &Deduced) {
return MarkDeducedTemplateParameters(Context, FunctionTemplate, Deduced);
}
static void MarkDeducedTemplateParameters(ASTContext &Ctx,
const FunctionTemplateDecl *FunctionTemplate,
llvm::SmallBitVector &Deduced);
//===--------------------------------------------------------------------===//
// C++ Template Instantiation
//
MultiLevelTemplateArgumentList
getTemplateInstantiationArgs(NamedDecl *D,
const TemplateArgumentList *Innermost = nullptr,
bool RelativeToPrimary = false,
const FunctionDecl *Pattern = nullptr);
/// A context in which code is being synthesized (where a source location
/// alone is not sufficient to identify the context). This covers template
/// instantiation and various forms of implicitly-generated functions.
struct CodeSynthesisContext {
/// The kind of template instantiation we are performing
enum SynthesisKind {
/// We are instantiating a template declaration. The entity is
/// the declaration we're instantiating (e.g., a CXXRecordDecl).
TemplateInstantiation,
/// We are instantiating a default argument for a template
/// parameter. The Entity is the template parameter whose argument is
/// being instantiated, the Template is the template, and the
/// TemplateArgs/NumTemplateArguments provide the template arguments as
/// specified.
DefaultTemplateArgumentInstantiation,
/// We are instantiating a default argument for a function.
/// The Entity is the ParmVarDecl, and TemplateArgs/NumTemplateArgs
/// provides the template arguments as specified.
DefaultFunctionArgumentInstantiation,
/// We are substituting explicit template arguments provided for
/// a function template. The entity is a FunctionTemplateDecl.
ExplicitTemplateArgumentSubstitution,
/// We are substituting template argument determined as part of
/// template argument deduction for either a class template
/// partial specialization or a function template. The
/// Entity is either a {Class|Var}TemplatePartialSpecializationDecl or
/// a TemplateDecl.
DeducedTemplateArgumentSubstitution,
/// We are substituting prior template arguments into a new
/// template parameter. The template parameter itself is either a
/// NonTypeTemplateParmDecl or a TemplateTemplateParmDecl.
PriorTemplateArgumentSubstitution,
/// We are checking the validity of a default template argument that
/// has been used when naming a template-id.
DefaultTemplateArgumentChecking,
/// We are computing the exception specification for a defaulted special
/// member function.
ExceptionSpecEvaluation,
/// We are instantiating the exception specification for a function
/// template which was deferred until it was needed.
ExceptionSpecInstantiation,
/// We are instantiating a requirement of a requires expression.
RequirementInstantiation,
/// We are checking the satisfaction of a nested requirement of a requires
/// expression.
NestedRequirementConstraintsCheck,
/// We are declaring an implicit special member function.
DeclaringSpecialMember,
/// We are declaring an implicit 'operator==' for a defaulted
/// 'operator<=>'.
DeclaringImplicitEqualityComparison,
/// We are defining a synthesized function (such as a defaulted special
/// member).
DefiningSynthesizedFunction,
// We are checking the constraints associated with a constrained entity or
// the constraint expression of a concept. This includes the checks that
// atomic constraints have the type 'bool' and that they can be constant
// evaluated.
ConstraintsCheck,
// We are substituting template arguments into a constraint expression.
ConstraintSubstitution,
// We are normalizing a constraint expression.
ConstraintNormalization,
// We are substituting into the parameter mapping of an atomic constraint
// during normalization.
ParameterMappingSubstitution,
/// We are rewriting a comparison operator in terms of an operator<=>.
RewritingOperatorAsSpaceship,
/// Added for Template instantiation observation.
/// Memoization means we are _not_ instantiating a template because
/// it is already instantiated (but we entered a context where we
/// would have had to if it was not already instantiated).
Memoization
} Kind;
/// Was the enclosing context a non-instantiation SFINAE context?
bool SavedInNonInstantiationSFINAEContext;
/// The point of instantiation or synthesis within the source code.
SourceLocation PointOfInstantiation;
/// The entity that is being synthesized.
Decl *Entity;
/// The template (or partial specialization) in which we are
/// performing the instantiation, for substitutions of prior template
/// arguments.
NamedDecl *Template;
/// The list of template arguments we are substituting, if they
/// are not part of the entity.
const TemplateArgument *TemplateArgs;
// FIXME: Wrap this union around more members, or perhaps store the
// kind-specific members in the RAII object owning the context.
union {
/// The number of template arguments in TemplateArgs.
unsigned NumTemplateArgs;
/// The special member being declared or defined.
CXXSpecialMember SpecialMember;
};
ArrayRef<TemplateArgument> template_arguments() const {
assert(Kind != DeclaringSpecialMember);
return {TemplateArgs, NumTemplateArgs};
}
/// The template deduction info object associated with the
/// substitution or checking of explicit or deduced template arguments.
sema::TemplateDeductionInfo *DeductionInfo;
/// The source range that covers the construct that cause
/// the instantiation, e.g., the template-id that causes a class
/// template instantiation.
SourceRange InstantiationRange;
CodeSynthesisContext()
: Kind(TemplateInstantiation),
SavedInNonInstantiationSFINAEContext(false), Entity(nullptr),
Template(nullptr), TemplateArgs(nullptr), NumTemplateArgs(0),
DeductionInfo(nullptr) {}
/// Determines whether this template is an actual instantiation
/// that should be counted toward the maximum instantiation depth.
bool isInstantiationRecord() const;
};
/// List of active code synthesis contexts.
///
/// This vector is treated as a stack. As synthesis of one entity requires
/// synthesis of another, additional contexts are pushed onto the stack.
SmallVector<CodeSynthesisContext, 16> CodeSynthesisContexts;
/// Specializations whose definitions are currently being instantiated.
llvm::DenseSet<std::pair<Decl *, unsigned>> InstantiatingSpecializations;
/// Non-dependent types used in templates that have already been instantiated
/// by some template instantiation.
llvm::DenseSet<QualType> InstantiatedNonDependentTypes;
/// Extra modules inspected when performing a lookup during a template
/// instantiation. Computed lazily.
SmallVector<Module*, 16> CodeSynthesisContextLookupModules;
/// Cache of additional modules that should be used for name lookup
/// within the current template instantiation. Computed lazily; use
/// getLookupModules() to get a complete set.
llvm::DenseSet<Module*> LookupModulesCache;
/// Get the set of additional modules that should be checked during
/// name lookup. A module and its imports become visible when instanting a
/// template defined within it.
llvm::DenseSet<Module*> &getLookupModules();
/// Map from the most recent declaration of a namespace to the most
/// recent visible declaration of that namespace.
llvm::DenseMap<NamedDecl*, NamedDecl*> VisibleNamespaceCache;
/// Whether we are in a SFINAE context that is not associated with
/// template instantiation.
///
/// This is used when setting up a SFINAE trap (\c see SFINAETrap) outside
/// of a template instantiation or template argument deduction.
bool InNonInstantiationSFINAEContext;
/// The number of \p CodeSynthesisContexts that are not template
/// instantiations and, therefore, should not be counted as part of the
/// instantiation depth.
///
/// When the instantiation depth reaches the user-configurable limit
/// \p LangOptions::InstantiationDepth we will abort instantiation.
// FIXME: Should we have a similar limit for other forms of synthesis?
unsigned NonInstantiationEntries;
/// The depth of the context stack at the point when the most recent
/// error or warning was produced.
///
/// This value is used to suppress printing of redundant context stacks
/// when there are multiple errors or warnings in the same instantiation.
// FIXME: Does this belong in Sema? It's tough to implement it anywhere else.
unsigned LastEmittedCodeSynthesisContextDepth = 0;
/// The template instantiation callbacks to trace or track
/// instantiations (objects can be chained).
///
/// This callbacks is used to print, trace or track template
/// instantiations as they are being constructed.
std::vector<std::unique_ptr<TemplateInstantiationCallback>>
TemplateInstCallbacks;
/// The current index into pack expansion arguments that will be
/// used for substitution of parameter packs.
///
/// The pack expansion index will be -1 to indicate that parameter packs
/// should be instantiated as themselves. Otherwise, the index specifies
/// which argument within the parameter pack will be used for substitution.
int ArgumentPackSubstitutionIndex;
/// RAII object used to change the argument pack substitution index
/// within a \c Sema object.
///
/// See \c ArgumentPackSubstitutionIndex for more information.
class ArgumentPackSubstitutionIndexRAII {
Sema &Self;
int OldSubstitutionIndex;
public:
ArgumentPackSubstitutionIndexRAII(Sema &Self, int NewSubstitutionIndex)
: Self(Self), OldSubstitutionIndex(Self.ArgumentPackSubstitutionIndex) {
Self.ArgumentPackSubstitutionIndex = NewSubstitutionIndex;
}
~ArgumentPackSubstitutionIndexRAII() {
Self.ArgumentPackSubstitutionIndex = OldSubstitutionIndex;
}
};
friend class ArgumentPackSubstitutionRAII;
/// For each declaration that involved template argument deduction, the
/// set of diagnostics that were suppressed during that template argument
/// deduction.
///
/// FIXME: Serialize this structure to the AST file.
typedef llvm::DenseMap<Decl *, SmallVector<PartialDiagnosticAt, 1> >
SuppressedDiagnosticsMap;
SuppressedDiagnosticsMap SuppressedDiagnostics;
/// A stack object to be created when performing template
/// instantiation.
///
/// Construction of an object of type \c InstantiatingTemplate
/// pushes the current instantiation onto the stack of active
/// instantiations. If the size of this stack exceeds the maximum
/// number of recursive template instantiations, construction
/// produces an error and evaluates true.
///
/// Destruction of this object will pop the named instantiation off
/// the stack.
struct InstantiatingTemplate {
/// Note that we are instantiating a class template,
/// function template, variable template, alias template,
/// or a member thereof.
InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation,
Decl *Entity,
SourceRange InstantiationRange = SourceRange());
struct ExceptionSpecification {};
/// Note that we are instantiating an exception specification
/// of a function template.
InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation,
FunctionDecl *Entity, ExceptionSpecification,
SourceRange InstantiationRange = SourceRange());
/// Note that we are instantiating a default argument in a
/// template-id.
InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation,
TemplateParameter Param, TemplateDecl *Template,
ArrayRef<TemplateArgument> TemplateArgs,
SourceRange InstantiationRange = SourceRange());
/// Note that we are substituting either explicitly-specified or
/// deduced template arguments during function template argument deduction.
InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation,
FunctionTemplateDecl *FunctionTemplate,
ArrayRef<TemplateArgument> TemplateArgs,
CodeSynthesisContext::SynthesisKind Kind,
sema::TemplateDeductionInfo &DeductionInfo,
SourceRange InstantiationRange = SourceRange());
/// Note that we are instantiating as part of template
/// argument deduction for a class template declaration.
InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation,
TemplateDecl *Template,
ArrayRef<TemplateArgument> TemplateArgs,
sema::TemplateDeductionInfo &DeductionInfo,
SourceRange InstantiationRange = SourceRange());
/// Note that we are instantiating as part of template
/// argument deduction for a class template partial
/// specialization.
InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation,
ClassTemplatePartialSpecializationDecl *PartialSpec,
ArrayRef<TemplateArgument> TemplateArgs,
sema::TemplateDeductionInfo &DeductionInfo,
SourceRange InstantiationRange = SourceRange());
/// Note that we are instantiating as part of template
/// argument deduction for a variable template partial
/// specialization.
InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation,
VarTemplatePartialSpecializationDecl *PartialSpec,
ArrayRef<TemplateArgument> TemplateArgs,
sema::TemplateDeductionInfo &DeductionInfo,
SourceRange InstantiationRange = SourceRange());
/// Note that we are instantiating a default argument for a function
/// parameter.
InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation,
ParmVarDecl *Param,
ArrayRef<TemplateArgument> TemplateArgs,
SourceRange InstantiationRange = SourceRange());
/// Note that we are substituting prior template arguments into a
/// non-type parameter.
InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation,
NamedDecl *Template,
NonTypeTemplateParmDecl *Param,
ArrayRef<TemplateArgument> TemplateArgs,
SourceRange InstantiationRange);
/// Note that we are substituting prior template arguments into a
/// template template parameter.
InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation,
NamedDecl *Template,
TemplateTemplateParmDecl *Param,
ArrayRef<TemplateArgument> TemplateArgs,
SourceRange InstantiationRange);
/// Note that we are checking the default template argument
/// against the template parameter for a given template-id.
InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation,
TemplateDecl *Template,
NamedDecl *Param,
ArrayRef<TemplateArgument> TemplateArgs,
SourceRange InstantiationRange);
struct ConstraintsCheck {};
/// \brief Note that we are checking the constraints associated with some
/// constrained entity (a concept declaration or a template with associated
/// constraints).
InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation,
ConstraintsCheck, NamedDecl *Template,
ArrayRef<TemplateArgument> TemplateArgs,
SourceRange InstantiationRange);
struct ConstraintSubstitution {};
/// \brief Note that we are checking a constraint expression associated
/// with a template declaration or as part of the satisfaction check of a
/// concept.
InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation,
ConstraintSubstitution, NamedDecl *Template,
sema::TemplateDeductionInfo &DeductionInfo,
SourceRange InstantiationRange);
struct ConstraintNormalization {};
/// \brief Note that we are normalizing a constraint expression.
InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation,
ConstraintNormalization, NamedDecl *Template,
SourceRange InstantiationRange);
struct ParameterMappingSubstitution {};
/// \brief Note that we are subtituting into the parameter mapping of an
/// atomic constraint during constraint normalization.
InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation,
ParameterMappingSubstitution, NamedDecl *Template,
SourceRange InstantiationRange);
/// \brief Note that we are substituting template arguments into a part of
/// a requirement of a requires expression.
InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation,
concepts::Requirement *Req,
sema::TemplateDeductionInfo &DeductionInfo,
SourceRange InstantiationRange = SourceRange());
/// \brief Note that we are checking the satisfaction of the constraint
/// expression inside of a nested requirement.
InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation,
concepts::NestedRequirement *Req, ConstraintsCheck,
SourceRange InstantiationRange = SourceRange());
/// Note that we have finished instantiating this template.
void Clear();
~InstantiatingTemplate() { Clear(); }
/// Determines whether we have exceeded the maximum
/// recursive template instantiations.
bool isInvalid() const { return Invalid; }
/// Determine whether we are already instantiating this
/// specialization in some surrounding active instantiation.
bool isAlreadyInstantiating() const { return AlreadyInstantiating; }
private:
Sema &SemaRef;
bool Invalid;
bool AlreadyInstantiating;
bool CheckInstantiationDepth(SourceLocation PointOfInstantiation,
SourceRange InstantiationRange);
InstantiatingTemplate(
Sema &SemaRef, CodeSynthesisContext::SynthesisKind Kind,
SourceLocation PointOfInstantiation, SourceRange InstantiationRange,
Decl *Entity, NamedDecl *Template = nullptr,
ArrayRef<TemplateArgument> TemplateArgs = None,
sema::TemplateDeductionInfo *DeductionInfo = nullptr);
InstantiatingTemplate(const InstantiatingTemplate&) = delete;
InstantiatingTemplate&
operator=(const InstantiatingTemplate&) = delete;
};
void pushCodeSynthesisContext(CodeSynthesisContext Ctx);
void popCodeSynthesisContext();
/// Determine whether we are currently performing template instantiation.
bool inTemplateInstantiation() const {
return CodeSynthesisContexts.size() > NonInstantiationEntries;
}
void PrintContextStack() {
if (!CodeSynthesisContexts.empty() &&
CodeSynthesisContexts.size() != LastEmittedCodeSynthesisContextDepth) {
PrintInstantiationStack();
LastEmittedCodeSynthesisContextDepth = CodeSynthesisContexts.size();
}
if (PragmaAttributeCurrentTargetDecl)
PrintPragmaAttributeInstantiationPoint();
}
void PrintInstantiationStack();
void PrintPragmaAttributeInstantiationPoint();
/// Determines whether we are currently in a context where
/// template argument substitution failures are not considered
/// errors.
///
/// \returns An empty \c Optional if we're not in a SFINAE context.
/// Otherwise, contains a pointer that, if non-NULL, contains the nearest
/// template-deduction context object, which can be used to capture
/// diagnostics that will be suppressed.
Optional<sema::TemplateDeductionInfo *> isSFINAEContext() const;
/// Determines whether we are currently in a context that
/// is not evaluated as per C++ [expr] p5.
bool isUnevaluatedContext() const {
assert(!ExprEvalContexts.empty() &&
"Must be in an expression evaluation context");
return ExprEvalContexts.back().isUnevaluated();
}
/// RAII class used to determine whether SFINAE has
/// trapped any errors that occur during template argument
/// deduction.
class SFINAETrap {
Sema &SemaRef;
unsigned PrevSFINAEErrors;
bool PrevInNonInstantiationSFINAEContext;
bool PrevAccessCheckingSFINAE;
bool PrevLastDiagnosticIgnored;
public:
explicit SFINAETrap(Sema &SemaRef, bool AccessCheckingSFINAE = false)
: SemaRef(SemaRef), PrevSFINAEErrors(SemaRef.NumSFINAEErrors),
PrevInNonInstantiationSFINAEContext(
SemaRef.InNonInstantiationSFINAEContext),
PrevAccessCheckingSFINAE(SemaRef.AccessCheckingSFINAE),
PrevLastDiagnosticIgnored(
SemaRef.getDiagnostics().isLastDiagnosticIgnored())
{
if (!SemaRef.isSFINAEContext())
SemaRef.InNonInstantiationSFINAEContext = true;
SemaRef.AccessCheckingSFINAE = AccessCheckingSFINAE;
}
~SFINAETrap() {
SemaRef.NumSFINAEErrors = PrevSFINAEErrors;
SemaRef.InNonInstantiationSFINAEContext
= PrevInNonInstantiationSFINAEContext;
SemaRef.AccessCheckingSFINAE = PrevAccessCheckingSFINAE;
SemaRef.getDiagnostics().setLastDiagnosticIgnored(
PrevLastDiagnosticIgnored);
}
/// Determine whether any SFINAE errors have been trapped.
bool hasErrorOccurred() const {
return SemaRef.NumSFINAEErrors > PrevSFINAEErrors;
}
};
/// RAII class used to indicate that we are performing provisional
/// semantic analysis to determine the validity of a construct, so
/// typo-correction and diagnostics in the immediate context (not within
/// implicitly-instantiated templates) should be suppressed.
class TentativeAnalysisScope {
Sema &SemaRef;
// FIXME: Using a SFINAETrap for this is a hack.
SFINAETrap Trap;
bool PrevDisableTypoCorrection;
public:
explicit TentativeAnalysisScope(Sema &SemaRef)
: SemaRef(SemaRef), Trap(SemaRef, true),
PrevDisableTypoCorrection(SemaRef.DisableTypoCorrection) {
SemaRef.DisableTypoCorrection = true;
}
~TentativeAnalysisScope() {
SemaRef.DisableTypoCorrection = PrevDisableTypoCorrection;
}
};
/// The current instantiation scope used to store local
/// variables.
LocalInstantiationScope *CurrentInstantiationScope;
/// Tracks whether we are in a context where typo correction is
/// disabled.
bool DisableTypoCorrection;
/// The number of typos corrected by CorrectTypo.
unsigned TyposCorrected;
typedef llvm::SmallSet<SourceLocation, 2> SrcLocSet;
typedef llvm::DenseMap<IdentifierInfo *, SrcLocSet> IdentifierSourceLocations;
/// A cache containing identifiers for which typo correction failed and
/// their locations, so that repeated attempts to correct an identifier in a
/// given location are ignored if typo correction already failed for it.
IdentifierSourceLocations TypoCorrectionFailures;
/// Worker object for performing CFG-based warnings.
sema::AnalysisBasedWarnings AnalysisWarnings;
threadSafety::BeforeSet *ThreadSafetyDeclCache;
/// An entity for which implicit template instantiation is required.
///
/// The source location associated with the declaration is the first place in
/// the source code where the declaration was "used". It is not necessarily
/// the point of instantiation (which will be either before or after the
/// namespace-scope declaration that triggered this implicit instantiation),
/// However, it is the location that diagnostics should generally refer to,
/// because users will need to know what code triggered the instantiation.
typedef std::pair<ValueDecl *, SourceLocation> PendingImplicitInstantiation;
/// The queue of implicit template instantiations that are required
/// but have not yet been performed.
std::deque<PendingImplicitInstantiation> PendingInstantiations;
/// Queue of implicit template instantiations that cannot be performed
/// eagerly.
SmallVector<PendingImplicitInstantiation, 1> LateParsedInstantiations;
class GlobalEagerInstantiationScope {
public:
GlobalEagerInstantiationScope(Sema &S, bool Enabled)
: S(S), Enabled(Enabled) {
if (!Enabled) return;
SavedPendingInstantiations.swap(S.PendingInstantiations);
SavedVTableUses.swap(S.VTableUses);
}
void perform() {
if (Enabled) {
S.DefineUsedVTables();
S.PerformPendingInstantiations();
}
}
~GlobalEagerInstantiationScope() {
if (!Enabled) return;
// Restore the set of pending vtables.
assert(S.VTableUses.empty() &&
"VTableUses should be empty before it is discarded.");
S.VTableUses.swap(SavedVTableUses);
// Restore the set of pending implicit instantiations.
assert(S.PendingInstantiations.empty() &&
"PendingInstantiations should be empty before it is discarded.");
S.PendingInstantiations.swap(SavedPendingInstantiations);
}
private:
Sema &S;
SmallVector<VTableUse, 16> SavedVTableUses;
std::deque<PendingImplicitInstantiation> SavedPendingInstantiations;
bool Enabled;
};
/// The queue of implicit template instantiations that are required
/// and must be performed within the current local scope.
///
/// This queue is only used for member functions of local classes in
/// templates, which must be instantiated in the same scope as their
/// enclosing function, so that they can reference function-local
/// types, static variables, enumerators, etc.
std::deque<PendingImplicitInstantiation> PendingLocalImplicitInstantiations;
class LocalEagerInstantiationScope {
public:
LocalEagerInstantiationScope(Sema &S) : S(S) {
SavedPendingLocalImplicitInstantiations.swap(
S.PendingLocalImplicitInstantiations);
}
void perform() { S.PerformPendingInstantiations(/*LocalOnly=*/true); }
~LocalEagerInstantiationScope() {
assert(S.PendingLocalImplicitInstantiations.empty() &&
"there shouldn't be any pending local implicit instantiations");
SavedPendingLocalImplicitInstantiations.swap(
S.PendingLocalImplicitInstantiations);
}
private:
Sema &S;
std::deque<PendingImplicitInstantiation>
SavedPendingLocalImplicitInstantiations;
};
/// A helper class for building up ExtParameterInfos.
class ExtParameterInfoBuilder {
SmallVector<FunctionProtoType::ExtParameterInfo, 16> Infos;
bool HasInteresting = false;
public:
/// Set the ExtParameterInfo for the parameter at the given index,
///
void set(unsigned index, FunctionProtoType::ExtParameterInfo info) {
assert(Infos.size() <= index);
Infos.resize(index);
Infos.push_back(info);
if (!HasInteresting)
HasInteresting = (info != FunctionProtoType::ExtParameterInfo());
}
/// Return a pointer (suitable for setting in an ExtProtoInfo) to the
/// ExtParameterInfo array we've built up.
const FunctionProtoType::ExtParameterInfo *
getPointerOrNull(unsigned numParams) {
if (!HasInteresting) return nullptr;
Infos.resize(numParams);
return Infos.data();
}
};
void PerformPendingInstantiations(bool LocalOnly = false);
TypeSourceInfo *SubstType(TypeSourceInfo *T,
const MultiLevelTemplateArgumentList &TemplateArgs,
SourceLocation Loc, DeclarationName Entity,
bool AllowDeducedTST = false);
QualType SubstType(QualType T,
const MultiLevelTemplateArgumentList &TemplateArgs,
SourceLocation Loc, DeclarationName Entity);
TypeSourceInfo *SubstType(TypeLoc TL,
const MultiLevelTemplateArgumentList &TemplateArgs,
SourceLocation Loc, DeclarationName Entity);
TypeSourceInfo *SubstFunctionDeclType(TypeSourceInfo *T,
const MultiLevelTemplateArgumentList &TemplateArgs,
SourceLocation Loc,
DeclarationName Entity,
CXXRecordDecl *ThisContext,
Qualifiers ThisTypeQuals);
void SubstExceptionSpec(FunctionDecl *New, const FunctionProtoType *Proto,
const MultiLevelTemplateArgumentList &Args);
bool SubstExceptionSpec(SourceLocation Loc,
FunctionProtoType::ExceptionSpecInfo &ESI,
SmallVectorImpl<QualType> &ExceptionStorage,
const MultiLevelTemplateArgumentList &Args);
ParmVarDecl *SubstParmVarDecl(ParmVarDecl *D,
const MultiLevelTemplateArgumentList &TemplateArgs,
int indexAdjustment,
Optional<unsigned> NumExpansions,
bool ExpectParameterPack);
bool SubstParmTypes(SourceLocation Loc, ArrayRef<ParmVarDecl *> Params,
const FunctionProtoType::ExtParameterInfo *ExtParamInfos,
const MultiLevelTemplateArgumentList &TemplateArgs,
SmallVectorImpl<QualType> &ParamTypes,
SmallVectorImpl<ParmVarDecl *> *OutParams,
ExtParameterInfoBuilder &ParamInfos);
ExprResult SubstExpr(Expr *E,
const MultiLevelTemplateArgumentList &TemplateArgs);
/// Substitute the given template arguments into a list of
/// expressions, expanding pack expansions if required.
///
/// \param Exprs The list of expressions to substitute into.
///
/// \param IsCall Whether this is some form of call, in which case
/// default arguments will be dropped.
///
/// \param TemplateArgs The set of template arguments to substitute.
///
/// \param Outputs Will receive all of the substituted arguments.
///
/// \returns true if an error occurred, false otherwise.
bool SubstExprs(ArrayRef<Expr *> Exprs, bool IsCall,
const MultiLevelTemplateArgumentList &TemplateArgs,
SmallVectorImpl<Expr *> &Outputs);
StmtResult SubstStmt(Stmt *S,
const MultiLevelTemplateArgumentList &TemplateArgs);
TemplateParameterList *
SubstTemplateParams(TemplateParameterList *Params, DeclContext *Owner,
const MultiLevelTemplateArgumentList &TemplateArgs);
bool
SubstTemplateArguments(ArrayRef<TemplateArgumentLoc> Args,
const MultiLevelTemplateArgumentList &TemplateArgs,
TemplateArgumentListInfo &Outputs);
Decl *SubstDecl(Decl *D, DeclContext *Owner,
const MultiLevelTemplateArgumentList &TemplateArgs);
/// Substitute the name and return type of a defaulted 'operator<=>' to form
/// an implicit 'operator=='.
FunctionDecl *SubstSpaceshipAsEqualEqual(CXXRecordDecl *RD,
FunctionDecl *Spaceship);
ExprResult SubstInitializer(Expr *E,
const MultiLevelTemplateArgumentList &TemplateArgs,
bool CXXDirectInit);
bool
SubstBaseSpecifiers(CXXRecordDecl *Instantiation,
CXXRecordDecl *Pattern,
const MultiLevelTemplateArgumentList &TemplateArgs);
bool
InstantiateClass(SourceLocation PointOfInstantiation,
CXXRecordDecl *Instantiation, CXXRecordDecl *Pattern,
const MultiLevelTemplateArgumentList &TemplateArgs,
TemplateSpecializationKind TSK,
bool Complain = true);
bool InstantiateEnum(SourceLocation PointOfInstantiation,
EnumDecl *Instantiation, EnumDecl *Pattern,
const MultiLevelTemplateArgumentList &TemplateArgs,
TemplateSpecializationKind TSK);
bool InstantiateInClassInitializer(
SourceLocation PointOfInstantiation, FieldDecl *Instantiation,
FieldDecl *Pattern, const MultiLevelTemplateArgumentList &TemplateArgs);
struct LateInstantiatedAttribute {
const Attr *TmplAttr;
LocalInstantiationScope *Scope;
Decl *NewDecl;
LateInstantiatedAttribute(const Attr *A, LocalInstantiationScope *S,
Decl *D)
: TmplAttr(A), Scope(S), NewDecl(D)
{ }
};
typedef SmallVector<LateInstantiatedAttribute, 16> LateInstantiatedAttrVec;
void InstantiateAttrs(const MultiLevelTemplateArgumentList &TemplateArgs,
const Decl *Pattern, Decl *Inst,
LateInstantiatedAttrVec *LateAttrs = nullptr,
LocalInstantiationScope *OuterMostScope = nullptr);
void
InstantiateAttrsForDecl(const MultiLevelTemplateArgumentList &TemplateArgs,
const Decl *Pattern, Decl *Inst,
LateInstantiatedAttrVec *LateAttrs = nullptr,
LocalInstantiationScope *OuterMostScope = nullptr);
bool usesPartialOrExplicitSpecialization(
SourceLocation Loc, ClassTemplateSpecializationDecl *ClassTemplateSpec);
bool
InstantiateClassTemplateSpecialization(SourceLocation PointOfInstantiation,
ClassTemplateSpecializationDecl *ClassTemplateSpec,
TemplateSpecializationKind TSK,
bool Complain = true);
void InstantiateClassMembers(SourceLocation PointOfInstantiation,
CXXRecordDecl *Instantiation,
const MultiLevelTemplateArgumentList &TemplateArgs,
TemplateSpecializationKind TSK);
void InstantiateClassTemplateSpecializationMembers(
SourceLocation PointOfInstantiation,
ClassTemplateSpecializationDecl *ClassTemplateSpec,
TemplateSpecializationKind TSK);
NestedNameSpecifierLoc
SubstNestedNameSpecifierLoc(NestedNameSpecifierLoc NNS,
const MultiLevelTemplateArgumentList &TemplateArgs);
DeclarationNameInfo
SubstDeclarationNameInfo(const DeclarationNameInfo &NameInfo,
const MultiLevelTemplateArgumentList &TemplateArgs);
TemplateName
SubstTemplateName(NestedNameSpecifierLoc QualifierLoc, TemplateName Name,
SourceLocation Loc,
const MultiLevelTemplateArgumentList &TemplateArgs);
bool Subst(const TemplateArgumentLoc *Args, unsigned NumArgs,
TemplateArgumentListInfo &Result,
const MultiLevelTemplateArgumentList &TemplateArgs);
void InstantiateExceptionSpec(SourceLocation PointOfInstantiation,
FunctionDecl *Function);
bool CheckInstantiatedFunctionTemplateConstraints(
SourceLocation PointOfInstantiation, FunctionDecl *Decl,
ArrayRef<TemplateArgument> TemplateArgs,
ConstraintSatisfaction &Satisfaction);
FunctionDecl *InstantiateFunctionDeclaration(FunctionTemplateDecl *FTD,
const TemplateArgumentList *Args,
SourceLocation Loc);
void InstantiateFunctionDefinition(SourceLocation PointOfInstantiation,
FunctionDecl *Function,
bool Recursive = false,
bool DefinitionRequired = false,
bool AtEndOfTU = false);
VarTemplateSpecializationDecl *BuildVarTemplateInstantiation(
VarTemplateDecl *VarTemplate, VarDecl *FromVar,
const TemplateArgumentList &TemplateArgList,
const TemplateArgumentListInfo &TemplateArgsInfo,
SmallVectorImpl<TemplateArgument> &Converted,
SourceLocation PointOfInstantiation, void *InsertPos,
LateInstantiatedAttrVec *LateAttrs = nullptr,
LocalInstantiationScope *StartingScope = nullptr);
VarTemplateSpecializationDecl *CompleteVarTemplateSpecializationDecl(
VarTemplateSpecializationDecl *VarSpec, VarDecl *PatternDecl,
const MultiLevelTemplateArgumentList &TemplateArgs);
void
BuildVariableInstantiation(VarDecl *NewVar, VarDecl *OldVar,
const MultiLevelTemplateArgumentList &TemplateArgs,
LateInstantiatedAttrVec *LateAttrs,
DeclContext *Owner,
LocalInstantiationScope *StartingScope,
bool InstantiatingVarTemplate = false,
VarTemplateSpecializationDecl *PrevVTSD = nullptr);
VarDecl *getVarTemplateSpecialization(
VarTemplateDecl *VarTempl, const TemplateArgumentListInfo *TemplateArgs,
const DeclarationNameInfo &MemberNameInfo, SourceLocation TemplateKWLoc);
void InstantiateVariableInitializer(
VarDecl *Var, VarDecl *OldVar,
const MultiLevelTemplateArgumentList &TemplateArgs);
void InstantiateVariableDefinition(SourceLocation PointOfInstantiation,
VarDecl *Var, bool Recursive = false,
bool DefinitionRequired = false,
bool AtEndOfTU = false);
void InstantiateMemInitializers(CXXConstructorDecl *New,
const CXXConstructorDecl *Tmpl,
const MultiLevelTemplateArgumentList &TemplateArgs);
NamedDecl *FindInstantiatedDecl(SourceLocation Loc, NamedDecl *D,
const MultiLevelTemplateArgumentList &TemplateArgs,
bool FindingInstantiatedContext = false);
DeclContext *FindInstantiatedContext(SourceLocation Loc, DeclContext *DC,
const MultiLevelTemplateArgumentList &TemplateArgs);
// Objective-C declarations.
enum ObjCContainerKind {
OCK_None = -1,
OCK_Interface = 0,
OCK_Protocol,
OCK_Category,
OCK_ClassExtension,
OCK_Implementation,
OCK_CategoryImplementation
};
ObjCContainerKind getObjCContainerKind() const;
DeclResult actOnObjCTypeParam(Scope *S,
ObjCTypeParamVariance variance,
SourceLocation varianceLoc,
unsigned index,
IdentifierInfo *paramName,
SourceLocation paramLoc,
SourceLocation colonLoc,
ParsedType typeBound);
ObjCTypeParamList *actOnObjCTypeParamList(Scope *S, SourceLocation lAngleLoc,
ArrayRef<Decl *> typeParams,
SourceLocation rAngleLoc);
void popObjCTypeParamList(Scope *S, ObjCTypeParamList *typeParamList);
Decl *ActOnStartClassInterface(
Scope *S, SourceLocation AtInterfaceLoc, IdentifierInfo *ClassName,
SourceLocation ClassLoc, ObjCTypeParamList *typeParamList,
IdentifierInfo *SuperName, SourceLocation SuperLoc,
ArrayRef<ParsedType> SuperTypeArgs, SourceRange SuperTypeArgsRange,
Decl *const *ProtoRefs, unsigned NumProtoRefs,
const SourceLocation *ProtoLocs, SourceLocation EndProtoLoc,
const ParsedAttributesView &AttrList);
void ActOnSuperClassOfClassInterface(Scope *S,
SourceLocation AtInterfaceLoc,
ObjCInterfaceDecl *IDecl,
IdentifierInfo *ClassName,
SourceLocation ClassLoc,
IdentifierInfo *SuperName,
SourceLocation SuperLoc,
ArrayRef<ParsedType> SuperTypeArgs,
SourceRange SuperTypeArgsRange);
void ActOnTypedefedProtocols(SmallVectorImpl<Decl *> &ProtocolRefs,
SmallVectorImpl<SourceLocation> &ProtocolLocs,
IdentifierInfo *SuperName,
SourceLocation SuperLoc);
Decl *ActOnCompatibilityAlias(
SourceLocation AtCompatibilityAliasLoc,
IdentifierInfo *AliasName, SourceLocation AliasLocation,
IdentifierInfo *ClassName, SourceLocation ClassLocation);
bool CheckForwardProtocolDeclarationForCircularDependency(
IdentifierInfo *PName,
SourceLocation &PLoc, SourceLocation PrevLoc,
const ObjCList<ObjCProtocolDecl> &PList);
Decl *ActOnStartProtocolInterface(
SourceLocation AtProtoInterfaceLoc, IdentifierInfo *ProtocolName,
SourceLocation ProtocolLoc, Decl *const *ProtoRefNames,
unsigned NumProtoRefs, const SourceLocation *ProtoLocs,
SourceLocation EndProtoLoc, const ParsedAttributesView &AttrList);
Decl *ActOnStartCategoryInterface(
SourceLocation AtInterfaceLoc, IdentifierInfo *ClassName,
SourceLocation ClassLoc, ObjCTypeParamList *typeParamList,
IdentifierInfo *CategoryName, SourceLocation CategoryLoc,
Decl *const *ProtoRefs, unsigned NumProtoRefs,
const SourceLocation *ProtoLocs, SourceLocation EndProtoLoc,
const ParsedAttributesView &AttrList);
Decl *ActOnStartClassImplementation(SourceLocation AtClassImplLoc,
IdentifierInfo *ClassName,
SourceLocation ClassLoc,
IdentifierInfo *SuperClassname,
SourceLocation SuperClassLoc,
const ParsedAttributesView &AttrList);
Decl *ActOnStartCategoryImplementation(SourceLocation AtCatImplLoc,
IdentifierInfo *ClassName,
SourceLocation ClassLoc,
IdentifierInfo *CatName,
SourceLocation CatLoc,
const ParsedAttributesView &AttrList);
DeclGroupPtrTy ActOnFinishObjCImplementation(Decl *ObjCImpDecl,
ArrayRef<Decl *> Decls);
DeclGroupPtrTy ActOnForwardClassDeclaration(SourceLocation Loc,
IdentifierInfo **IdentList,
SourceLocation *IdentLocs,
ArrayRef<ObjCTypeParamList *> TypeParamLists,
unsigned NumElts);
DeclGroupPtrTy
ActOnForwardProtocolDeclaration(SourceLocation AtProtoclLoc,
ArrayRef<IdentifierLocPair> IdentList,
const ParsedAttributesView &attrList);
void FindProtocolDeclaration(bool WarnOnDeclarations, bool ForObjCContainer,
ArrayRef<IdentifierLocPair> ProtocolId,
SmallVectorImpl<Decl *> &Protocols);
void DiagnoseTypeArgsAndProtocols(IdentifierInfo *ProtocolId,
SourceLocation ProtocolLoc,
IdentifierInfo *TypeArgId,
SourceLocation TypeArgLoc,
bool SelectProtocolFirst = false);
/// Given a list of identifiers (and their locations), resolve the
/// names to either Objective-C protocol qualifiers or type
/// arguments, as appropriate.
void actOnObjCTypeArgsOrProtocolQualifiers(
Scope *S,
ParsedType baseType,
SourceLocation lAngleLoc,
ArrayRef<IdentifierInfo *> identifiers,
ArrayRef<SourceLocation> identifierLocs,
SourceLocation rAngleLoc,
SourceLocation &typeArgsLAngleLoc,
SmallVectorImpl<ParsedType> &typeArgs,
SourceLocation &typeArgsRAngleLoc,
SourceLocation &protocolLAngleLoc,
SmallVectorImpl<Decl *> &protocols,
SourceLocation &protocolRAngleLoc,
bool warnOnIncompleteProtocols);
/// Build a an Objective-C protocol-qualified 'id' type where no
/// base type was specified.
TypeResult actOnObjCProtocolQualifierType(
SourceLocation lAngleLoc,
ArrayRef<Decl *> protocols,
ArrayRef<SourceLocation> protocolLocs,
SourceLocation rAngleLoc);
/// Build a specialized and/or protocol-qualified Objective-C type.
TypeResult actOnObjCTypeArgsAndProtocolQualifiers(
Scope *S,
SourceLocation Loc,
ParsedType BaseType,
SourceLocation TypeArgsLAngleLoc,
ArrayRef<ParsedType> TypeArgs,
SourceLocation TypeArgsRAngleLoc,
SourceLocation ProtocolLAngleLoc,
ArrayRef<Decl *> Protocols,
ArrayRef<SourceLocation> ProtocolLocs,
SourceLocation ProtocolRAngleLoc);
/// Build an Objective-C type parameter type.
QualType BuildObjCTypeParamType(const ObjCTypeParamDecl *Decl,
SourceLocation ProtocolLAngleLoc,
ArrayRef<ObjCProtocolDecl *> Protocols,
ArrayRef<SourceLocation> ProtocolLocs,
SourceLocation ProtocolRAngleLoc,
bool FailOnError = false);
/// Build an Objective-C object pointer type.
QualType BuildObjCObjectType(QualType BaseType,
SourceLocation Loc,
SourceLocation TypeArgsLAngleLoc,
ArrayRef<TypeSourceInfo *> TypeArgs,
SourceLocation TypeArgsRAngleLoc,
SourceLocation ProtocolLAngleLoc,
ArrayRef<ObjCProtocolDecl *> Protocols,
ArrayRef<SourceLocation> ProtocolLocs,
SourceLocation ProtocolRAngleLoc,
bool FailOnError = false);
/// Ensure attributes are consistent with type.
/// \param [in, out] Attributes The attributes to check; they will
/// be modified to be consistent with \p PropertyTy.
void CheckObjCPropertyAttributes(Decl *PropertyPtrTy,
SourceLocation Loc,
unsigned &Attributes,
bool propertyInPrimaryClass);
/// Process the specified property declaration and create decls for the
/// setters and getters as needed.
/// \param property The property declaration being processed
void ProcessPropertyDecl(ObjCPropertyDecl *property);
void DiagnosePropertyMismatch(ObjCPropertyDecl *Property,
ObjCPropertyDecl *SuperProperty,
const IdentifierInfo *Name,
bool OverridingProtocolProperty);
void DiagnoseClassExtensionDupMethods(ObjCCategoryDecl *CAT,
ObjCInterfaceDecl *ID);
Decl *ActOnAtEnd(Scope *S, SourceRange AtEnd,
ArrayRef<Decl *> allMethods = None,
ArrayRef<DeclGroupPtrTy> allTUVars = None);
Decl *ActOnProperty(Scope *S, SourceLocation AtLoc,
SourceLocation LParenLoc,
FieldDeclarator &FD, ObjCDeclSpec &ODS,
Selector GetterSel, Selector SetterSel,
tok::ObjCKeywordKind MethodImplKind,
DeclContext *lexicalDC = nullptr);
Decl *ActOnPropertyImplDecl(Scope *S,
SourceLocation AtLoc,
SourceLocation PropertyLoc,
bool ImplKind,
IdentifierInfo *PropertyId,
IdentifierInfo *PropertyIvar,
SourceLocation PropertyIvarLoc,
ObjCPropertyQueryKind QueryKind);
enum ObjCSpecialMethodKind {
OSMK_None,
OSMK_Alloc,
OSMK_New,
OSMK_Copy,
OSMK_RetainingInit,
OSMK_NonRetainingInit
};
struct ObjCArgInfo {
IdentifierInfo *Name;
SourceLocation NameLoc;
// The Type is null if no type was specified, and the DeclSpec is invalid
// in this case.
ParsedType Type;
ObjCDeclSpec DeclSpec;
/// ArgAttrs - Attribute list for this argument.
ParsedAttributesView ArgAttrs;
};
Decl *ActOnMethodDeclaration(
Scope *S,
SourceLocation BeginLoc, // location of the + or -.
SourceLocation EndLoc, // location of the ; or {.
tok::TokenKind MethodType, ObjCDeclSpec &ReturnQT, ParsedType ReturnType,
ArrayRef<SourceLocation> SelectorLocs, Selector Sel,
// optional arguments. The number of types/arguments is obtained
// from the Sel.getNumArgs().
ObjCArgInfo *ArgInfo, DeclaratorChunk::ParamInfo *CParamInfo,
unsigned CNumArgs, // c-style args
const ParsedAttributesView &AttrList, tok::ObjCKeywordKind MethodImplKind,
bool isVariadic, bool MethodDefinition);
ObjCMethodDecl *LookupMethodInQualifiedType(Selector Sel,
const ObjCObjectPointerType *OPT,
bool IsInstance);
ObjCMethodDecl *LookupMethodInObjectType(Selector Sel, QualType Ty,
bool IsInstance);
bool CheckARCMethodDecl(ObjCMethodDecl *method);
bool inferObjCARCLifetime(ValueDecl *decl);
void deduceOpenCLAddressSpace(ValueDecl *decl);
ExprResult
HandleExprPropertyRefExpr(const ObjCObjectPointerType *OPT,
Expr *BaseExpr,
SourceLocation OpLoc,
DeclarationName MemberName,
SourceLocation MemberLoc,
SourceLocation SuperLoc, QualType SuperType,
bool Super);
ExprResult
ActOnClassPropertyRefExpr(IdentifierInfo &receiverName,
IdentifierInfo &propertyName,
SourceLocation receiverNameLoc,
SourceLocation propertyNameLoc);
ObjCMethodDecl *tryCaptureObjCSelf(SourceLocation Loc);
/// Describes the kind of message expression indicated by a message
/// send that starts with an identifier.
enum ObjCMessageKind {
/// The message is sent to 'super'.
ObjCSuperMessage,
/// The message is an instance message.
ObjCInstanceMessage,
/// The message is a class message, and the identifier is a type
/// name.
ObjCClassMessage
};
ObjCMessageKind getObjCMessageKind(Scope *S,
IdentifierInfo *Name,
SourceLocation NameLoc,
bool IsSuper,
bool HasTrailingDot,
ParsedType &ReceiverType);
ExprResult ActOnSuperMessage(Scope *S, SourceLocation SuperLoc,
Selector Sel,
SourceLocation LBracLoc,
ArrayRef<SourceLocation> SelectorLocs,
SourceLocation RBracLoc,
MultiExprArg Args);
ExprResult BuildClassMessage(TypeSourceInfo *ReceiverTypeInfo,
QualType ReceiverType,
SourceLocation SuperLoc,
Selector Sel,
ObjCMethodDecl *Method,
SourceLocation LBracLoc,
ArrayRef<SourceLocation> SelectorLocs,
SourceLocation RBracLoc,
MultiExprArg Args,
bool isImplicit = false);
ExprResult BuildClassMessageImplicit(QualType ReceiverType,
bool isSuperReceiver,
SourceLocation Loc,
Selector Sel,
ObjCMethodDecl *Method,
MultiExprArg Args);
ExprResult ActOnClassMessage(Scope *S,
ParsedType Receiver,
Selector Sel,
SourceLocation LBracLoc,
ArrayRef<SourceLocation> SelectorLocs,
SourceLocation RBracLoc,
MultiExprArg Args);
ExprResult BuildInstanceMessage(Expr *Receiver,
QualType ReceiverType,
SourceLocation SuperLoc,
Selector Sel,
ObjCMethodDecl *Method,
SourceLocation LBracLoc,
ArrayRef<SourceLocation> SelectorLocs,
SourceLocation RBracLoc,
MultiExprArg Args,
bool isImplicit = false);
ExprResult BuildInstanceMessageImplicit(Expr *Receiver,
QualType ReceiverType,
SourceLocation Loc,
Selector Sel,
ObjCMethodDecl *Method,
MultiExprArg Args);
ExprResult ActOnInstanceMessage(Scope *S,
Expr *Receiver,
Selector Sel,
SourceLocation LBracLoc,
ArrayRef<SourceLocation> SelectorLocs,
SourceLocation RBracLoc,
MultiExprArg Args);
ExprResult BuildObjCBridgedCast(SourceLocation LParenLoc,
ObjCBridgeCastKind Kind,
SourceLocation BridgeKeywordLoc,
TypeSourceInfo *TSInfo,
Expr *SubExpr);
ExprResult ActOnObjCBridgedCast(Scope *S,
SourceLocation LParenLoc,
ObjCBridgeCastKind Kind,
SourceLocation BridgeKeywordLoc,
ParsedType Type,
SourceLocation RParenLoc,
Expr *SubExpr);
void CheckTollFreeBridgeCast(QualType castType, Expr *castExpr);
void CheckObjCBridgeRelatedCast(QualType castType, Expr *castExpr);
bool CheckTollFreeBridgeStaticCast(QualType castType, Expr *castExpr,
CastKind &Kind);
bool checkObjCBridgeRelatedComponents(SourceLocation Loc,
QualType DestType, QualType SrcType,
ObjCInterfaceDecl *&RelatedClass,
ObjCMethodDecl *&ClassMethod,
ObjCMethodDecl *&InstanceMethod,
TypedefNameDecl *&TDNDecl,
bool CfToNs, bool Diagnose = true);
bool CheckObjCBridgeRelatedConversions(SourceLocation Loc,
QualType DestType, QualType SrcType,
Expr *&SrcExpr, bool Diagnose = true);
bool ConversionToObjCStringLiteralCheck(QualType DstType, Expr *&SrcExpr,
bool Diagnose = true);
bool checkInitMethod(ObjCMethodDecl *method, QualType receiverTypeIfCall);
/// Check whether the given new method is a valid override of the
/// given overridden method, and set any properties that should be inherited.
void CheckObjCMethodOverride(ObjCMethodDecl *NewMethod,
const ObjCMethodDecl *Overridden);
/// Describes the compatibility of a result type with its method.
enum ResultTypeCompatibilityKind {
RTC_Compatible,
RTC_Incompatible,
RTC_Unknown
};
void CheckObjCMethodDirectOverrides(ObjCMethodDecl *method,
ObjCMethodDecl *overridden);
void CheckObjCMethodOverrides(ObjCMethodDecl *ObjCMethod,
ObjCInterfaceDecl *CurrentClass,
ResultTypeCompatibilityKind RTC);
enum PragmaOptionsAlignKind {
POAK_Native, // #pragma options align=native
POAK_Natural, // #pragma options align=natural
POAK_Packed, // #pragma options align=packed
POAK_Power, // #pragma options align=power
POAK_Mac68k, // #pragma options align=mac68k
POAK_Reset // #pragma options align=reset
};
/// ActOnPragmaClangSection - Called on well formed \#pragma clang section
void ActOnPragmaClangSection(SourceLocation PragmaLoc,
PragmaClangSectionAction Action,
PragmaClangSectionKind SecKind, StringRef SecName);
/// ActOnPragmaOptionsAlign - Called on well formed \#pragma options align.
void ActOnPragmaOptionsAlign(PragmaOptionsAlignKind Kind,
SourceLocation PragmaLoc);
/// ActOnPragmaPack - Called on well formed \#pragma pack(...).
void ActOnPragmaPack(SourceLocation PragmaLoc, PragmaMsStackAction Action,
StringRef SlotLabel, Expr *Alignment);
enum class PragmaPackDiagnoseKind {
NonDefaultStateAtInclude,
ChangedStateAtExit
};
void DiagnoseNonDefaultPragmaPack(PragmaPackDiagnoseKind Kind,
SourceLocation IncludeLoc);
void DiagnoseUnterminatedPragmaPack();
/// ActOnPragmaMSStruct - Called on well formed \#pragma ms_struct [on|off].
void ActOnPragmaMSStruct(PragmaMSStructKind Kind);
/// ActOnPragmaMSComment - Called on well formed
/// \#pragma comment(kind, "arg").
void ActOnPragmaMSComment(SourceLocation CommentLoc, PragmaMSCommentKind Kind,
StringRef Arg);
/// ActOnPragmaMSPointersToMembers - called on well formed \#pragma
/// pointers_to_members(representation method[, general purpose
/// representation]).
void ActOnPragmaMSPointersToMembers(
LangOptions::PragmaMSPointersToMembersKind Kind,
SourceLocation PragmaLoc);
/// Called on well formed \#pragma vtordisp().
void ActOnPragmaMSVtorDisp(PragmaMsStackAction Action,
SourceLocation PragmaLoc,
MSVtorDispMode Value);
enum PragmaSectionKind {
PSK_DataSeg,
PSK_BSSSeg,
PSK_ConstSeg,
PSK_CodeSeg,
};
bool UnifySection(StringRef SectionName,
int SectionFlags,
DeclaratorDecl *TheDecl);
bool UnifySection(StringRef SectionName,
int SectionFlags,
SourceLocation PragmaSectionLocation);
/// Called on well formed \#pragma bss_seg/data_seg/const_seg/code_seg.
void ActOnPragmaMSSeg(SourceLocation PragmaLocation,
PragmaMsStackAction Action,
llvm::StringRef StackSlotLabel,
StringLiteral *SegmentName,
llvm::StringRef PragmaName);
/// Called on well formed \#pragma section().
void ActOnPragmaMSSection(SourceLocation PragmaLocation,
int SectionFlags, StringLiteral *SegmentName);
/// Called on well-formed \#pragma init_seg().
void ActOnPragmaMSInitSeg(SourceLocation PragmaLocation,
StringLiteral *SegmentName);
/// Called on #pragma clang __debug dump II
void ActOnPragmaDump(Scope *S, SourceLocation Loc, IdentifierInfo *II);
/// ActOnPragmaDetectMismatch - Call on well-formed \#pragma detect_mismatch
void ActOnPragmaDetectMismatch(SourceLocation Loc, StringRef Name,
StringRef Value);
/// ActOnPragmaUnused - Called on well-formed '\#pragma unused'.
void ActOnPragmaUnused(const Token &Identifier,
Scope *curScope,
SourceLocation PragmaLoc);
/// ActOnPragmaVisibility - Called on well formed \#pragma GCC visibility... .
void ActOnPragmaVisibility(const IdentifierInfo* VisType,
SourceLocation PragmaLoc);
NamedDecl *DeclClonePragmaWeak(NamedDecl *ND, IdentifierInfo *II,
SourceLocation Loc);
void DeclApplyPragmaWeak(Scope *S, NamedDecl *ND, WeakInfo &W);
/// ActOnPragmaWeakID - Called on well formed \#pragma weak ident.
void ActOnPragmaWeakID(IdentifierInfo* WeakName,
SourceLocation PragmaLoc,
SourceLocation WeakNameLoc);
/// ActOnPragmaRedefineExtname - Called on well formed
/// \#pragma redefine_extname oldname newname.
void ActOnPragmaRedefineExtname(IdentifierInfo* WeakName,
IdentifierInfo* AliasName,
SourceLocation PragmaLoc,
SourceLocation WeakNameLoc,
SourceLocation AliasNameLoc);
/// ActOnPragmaWeakAlias - Called on well formed \#pragma weak ident = ident.
void ActOnPragmaWeakAlias(IdentifierInfo* WeakName,
IdentifierInfo* AliasName,
SourceLocation PragmaLoc,
SourceLocation WeakNameLoc,
SourceLocation AliasNameLoc);
/// ActOnPragmaFPContract - Called on well formed
/// \#pragma {STDC,OPENCL} FP_CONTRACT and
/// \#pragma clang fp contract
void ActOnPragmaFPContract(LangOptions::FPContractModeKind FPC);
/// ActOnPragmaFenvAccess - Called on well formed
/// \#pragma STDC FENV_ACCESS
void ActOnPragmaFEnvAccess(LangOptions::FEnvAccessModeKind FPC);
/// Called to set rounding mode for floating point operations.
void setRoundingMode(LangOptions::FPRoundingModeKind);
/// Called to set exception behavior for floating point operations.
void setExceptionMode(LangOptions::FPExceptionModeKind);
/// AddAlignmentAttributesForRecord - Adds any needed alignment attributes to
/// a the record decl, to handle '\#pragma pack' and '\#pragma options align'.
void AddAlignmentAttributesForRecord(RecordDecl *RD);
/// AddMsStructLayoutForRecord - Adds ms_struct layout attribute to record.
void AddMsStructLayoutForRecord(RecordDecl *RD);
/// FreePackedContext - Deallocate and null out PackContext.
void FreePackedContext();
/// PushNamespaceVisibilityAttr - Note that we've entered a
/// namespace with a visibility attribute.
void PushNamespaceVisibilityAttr(const VisibilityAttr *Attr,
SourceLocation Loc);
/// AddPushedVisibilityAttribute - If '\#pragma GCC visibility' was used,
/// add an appropriate visibility attribute.
void AddPushedVisibilityAttribute(Decl *RD);
/// PopPragmaVisibility - Pop the top element of the visibility stack; used
/// for '\#pragma GCC visibility' and visibility attributes on namespaces.
void PopPragmaVisibility(bool IsNamespaceEnd, SourceLocation EndLoc);
/// FreeVisContext - Deallocate and null out VisContext.
void FreeVisContext();
/// AddCFAuditedAttribute - Check whether we're currently within
/// '\#pragma clang arc_cf_code_audited' and, if so, consider adding
/// the appropriate attribute.
void AddCFAuditedAttribute(Decl *D);
void ActOnPragmaAttributeAttribute(ParsedAttr &Attribute,
SourceLocation PragmaLoc,
attr::ParsedSubjectMatchRuleSet Rules);
void ActOnPragmaAttributeEmptyPush(SourceLocation PragmaLoc,
const IdentifierInfo *Namespace);
/// Called on well-formed '\#pragma clang attribute pop'.
void ActOnPragmaAttributePop(SourceLocation PragmaLoc,
const IdentifierInfo *Namespace);
/// Adds the attributes that have been specified using the
/// '\#pragma clang attribute push' directives to the given declaration.
void AddPragmaAttributes(Scope *S, Decl *D);
void DiagnoseUnterminatedPragmaAttribute();
/// Called on well formed \#pragma clang optimize.
void ActOnPragmaOptimize(bool On, SourceLocation PragmaLoc);
/// Get the location for the currently active "\#pragma clang optimize
/// off". If this location is invalid, then the state of the pragma is "on".
SourceLocation getOptimizeOffPragmaLocation() const {
return OptimizeOffPragmaLocation;
}
/// Only called on function definitions; if there is a pragma in scope
/// with the effect of a range-based optnone, consider marking the function
/// with attribute optnone.
void AddRangeBasedOptnone(FunctionDecl *FD);
/// Adds the 'optnone' attribute to the function declaration if there
/// are no conflicts; Loc represents the location causing the 'optnone'
/// attribute to be added (usually because of a pragma).
void AddOptnoneAttributeIfNoConflicts(FunctionDecl *FD, SourceLocation Loc);
template <typename AttrType>
bool checkRangedIntegralArgument(Expr *E, const AttrType *TmpAttr,
ExprResult &Result);
template <typename AttrType>
void AddOneConstantValueAttr(Decl *D, const AttributeCommonInfo &CI, Expr *E);
template <typename AttrType>
void AddOneConstantPowerTwoValueAttr(Decl *D, const AttributeCommonInfo &CI,
Expr *E);
void AddIntelFPGABankBitsAttr(Decl *D, const AttributeCommonInfo &CI,
Expr **Exprs, unsigned Size);
/// AddAlignedAttr - Adds an aligned attribute to a particular declaration.
void AddAlignedAttr(Decl *D, const AttributeCommonInfo &CI, Expr *E,
bool IsPackExpansion);
void AddAlignedAttr(Decl *D, const AttributeCommonInfo &CI, TypeSourceInfo *T,
bool IsPackExpansion);
/// AddAssumeAlignedAttr - Adds an assume_aligned attribute to a particular
/// declaration.
void AddAssumeAlignedAttr(Decl *D, const AttributeCommonInfo &CI, Expr *E,
Expr *OE);
/// AddAllocAlignAttr - Adds an alloc_align attribute to a particular
/// declaration.
void AddAllocAlignAttr(Decl *D, const AttributeCommonInfo &CI,
Expr *ParamExpr);
/// AddAlignValueAttr - Adds an align_value attribute to a particular
/// declaration.
void AddAlignValueAttr(Decl *D, const AttributeCommonInfo &CI, Expr *E);
/// AddLaunchBoundsAttr - Adds a launch_bounds attribute to a particular
/// declaration.
void AddLaunchBoundsAttr(Decl *D, const AttributeCommonInfo &CI,
Expr *MaxThreads, Expr *MinBlocks);
/// AddModeAttr - Adds a mode attribute to a particular declaration.
void AddModeAttr(Decl *D, const AttributeCommonInfo &CI, IdentifierInfo *Name,
bool InInstantiation = false);
void AddParameterABIAttr(Decl *D, const AttributeCommonInfo &CI,
ParameterABI ABI);
enum class RetainOwnershipKind {NS, CF, OS};
void AddXConsumedAttr(Decl *D, const AttributeCommonInfo &CI,
RetainOwnershipKind K, bool IsTemplateInstantiation);
/// addAMDGPUFlatWorkGroupSizeAttr - Adds an amdgpu_flat_work_group_size
/// attribute to a particular declaration.
void addAMDGPUFlatWorkGroupSizeAttr(Decl *D, const AttributeCommonInfo &CI,
Expr *Min, Expr *Max);
/// addAMDGPUWavePersEUAttr - Adds an amdgpu_waves_per_eu attribute to a
/// particular declaration.
void addAMDGPUWavesPerEUAttr(Decl *D, const AttributeCommonInfo &CI,
Expr *Min, Expr *Max);
/// addSYCLIntelPipeIOAttr - Adds a pipe I/O attribute to a particular
/// declaration.
void addSYCLIntelPipeIOAttr(Decl *D, const AttributeCommonInfo &CI, Expr *ID);
bool checkNSReturnsRetainedReturnType(SourceLocation loc, QualType type);
//===--------------------------------------------------------------------===//
// C++ Coroutines TS
//
bool ActOnCoroutineBodyStart(Scope *S, SourceLocation KwLoc,
StringRef Keyword);
ExprResult ActOnCoawaitExpr(Scope *S, SourceLocation KwLoc, Expr *E);
ExprResult ActOnCoyieldExpr(Scope *S, SourceLocation KwLoc, Expr *E);
StmtResult ActOnCoreturnStmt(Scope *S, SourceLocation KwLoc, Expr *E);
ExprResult BuildResolvedCoawaitExpr(SourceLocation KwLoc, Expr *E,
bool IsImplicit = false);
ExprResult BuildUnresolvedCoawaitExpr(SourceLocation KwLoc, Expr *E,
UnresolvedLookupExpr* Lookup);
ExprResult BuildCoyieldExpr(SourceLocation KwLoc, Expr *E);
StmtResult BuildCoreturnStmt(SourceLocation KwLoc, Expr *E,
bool IsImplicit = false);
StmtResult BuildCoroutineBodyStmt(CoroutineBodyStmt::CtorArgs);
bool buildCoroutineParameterMoves(SourceLocation Loc);
VarDecl *buildCoroutinePromise(SourceLocation Loc);
void CheckCompletedCoroutineBody(FunctionDecl *FD, Stmt *&Body);
ClassTemplateDecl *lookupCoroutineTraits(SourceLocation KwLoc,
SourceLocation FuncLoc);
//===--------------------------------------------------------------------===//
// OpenCL extensions.
//
private:
std::string CurrOpenCLExtension;
/// Extensions required by an OpenCL type.
llvm::DenseMap<const Type*, std::set<std::string>> OpenCLTypeExtMap;
/// Extensions required by an OpenCL declaration.
llvm::DenseMap<const Decl*, std::set<std::string>> OpenCLDeclExtMap;
public:
llvm::StringRef getCurrentOpenCLExtension() const {
return CurrOpenCLExtension;
}
/// Check if a function declaration \p FD associates with any
/// extensions present in OpenCLDeclExtMap and if so return the
/// extension(s) name(s).
std::string getOpenCLExtensionsFromDeclExtMap(FunctionDecl *FD);
/// Check if a function type \p FT associates with any
/// extensions present in OpenCLTypeExtMap and if so return the
/// extension(s) name(s).
std::string getOpenCLExtensionsFromTypeExtMap(FunctionType *FT);
/// Find an extension in an appropriate extension map and return its name
template<typename T, typename MapT>
std::string getOpenCLExtensionsFromExtMap(T* FT, MapT &Map);
void setCurrentOpenCLExtension(llvm::StringRef Ext) {
CurrOpenCLExtension = std::string(Ext);
}
/// Set OpenCL extensions for a type which can only be used when these
/// OpenCL extensions are enabled. If \p Exts is empty, do nothing.
/// \param Exts A space separated list of OpenCL extensions.
void setOpenCLExtensionForType(QualType T, llvm::StringRef Exts);
/// Set OpenCL extensions for a declaration which can only be
/// used when these OpenCL extensions are enabled. If \p Exts is empty, do
/// nothing.
/// \param Exts A space separated list of OpenCL extensions.
void setOpenCLExtensionForDecl(Decl *FD, llvm::StringRef Exts);
/// Set current OpenCL extensions for a type which can only be used
/// when these OpenCL extensions are enabled. If current OpenCL extension is
/// empty, do nothing.
void setCurrentOpenCLExtensionForType(QualType T);
/// Set current OpenCL extensions for a declaration which
/// can only be used when these OpenCL extensions are enabled. If current
/// OpenCL extension is empty, do nothing.
void setCurrentOpenCLExtensionForDecl(Decl *FD);
bool isOpenCLDisabledDecl(Decl *FD);
/// Check if type \p T corresponding to declaration specifier \p DS
/// is disabled due to required OpenCL extensions being disabled. If so,
/// emit diagnostics.
/// \return true if type is disabled.
bool checkOpenCLDisabledTypeDeclSpec(const DeclSpec &DS, QualType T);
/// Check if declaration \p D used by expression \p E
/// is disabled due to required OpenCL extensions being disabled. If so,
/// emit diagnostics.
/// \return true if type is disabled.
bool checkOpenCLDisabledDecl(const NamedDecl &D, const Expr &E);
//===--------------------------------------------------------------------===//
// OpenMP directives and clauses.
//
private:
void *VarDataSharingAttributesStack;
/// Number of nested '#pragma omp declare target' directives.
unsigned DeclareTargetNestingLevel = 0;
/// Initialization of data-sharing attributes stack.
void InitDataSharingAttributesStack();
void DestroyDataSharingAttributesStack();
ExprResult
VerifyPositiveIntegerConstantInClause(Expr *Op, OpenMPClauseKind CKind,
bool StrictlyPositive = true);
/// Returns OpenMP nesting level for current directive.
unsigned getOpenMPNestingLevel() const;
/// Adjusts the function scopes index for the target-based regions.
void adjustOpenMPTargetScopeIndex(unsigned &FunctionScopesIndex,
unsigned Level) const;
/// Returns the number of scopes associated with the construct on the given
/// OpenMP level.
int getNumberOfConstructScopes(unsigned Level) const;
/// Push new OpenMP function region for non-capturing function.
void pushOpenMPFunctionRegion();
/// Pop OpenMP function region for non-capturing function.
void popOpenMPFunctionRegion(const sema::FunctionScopeInfo *OldFSI);
/// Check whether we're allowed to call Callee from the current function.
void checkOpenMPDeviceFunction(SourceLocation Loc, FunctionDecl *Callee,
bool CheckForDelayedContext = true);
/// Check whether we're allowed to call Callee from the current function.
void checkOpenMPHostFunction(SourceLocation Loc, FunctionDecl *Callee,
bool CheckCaller = true);
/// Check if the expression is allowed to be used in expressions for the
/// OpenMP devices.
void checkOpenMPDeviceExpr(const Expr *E);
/// Finishes analysis of the deferred functions calls that may be declared as
/// host/nohost during device/host compilation.
void finalizeOpenMPDelayedAnalysis();
/// Checks if a type or a declaration is disabled due to the owning extension
/// being disabled, and emits diagnostic messages if it is disabled.
/// \param D type or declaration to be checked.
/// \param DiagLoc source location for the diagnostic message.
/// \param DiagInfo information to be emitted for the diagnostic message.
/// \param SrcRange source range of the declaration.
/// \param Map maps type or declaration to the extensions.
/// \param Selector selects diagnostic message: 0 for type and 1 for
/// declaration.
/// \return true if the type or declaration is disabled.
template <typename T, typename DiagLocT, typename DiagInfoT, typename MapT>
bool checkOpenCLDisabledTypeOrDecl(T D, DiagLocT DiagLoc, DiagInfoT DiagInfo,
MapT &Map, unsigned Selector = 0,
SourceRange SrcRange = SourceRange());
/// Marks all the functions that might be required for the currently active
/// OpenMP context.
void markOpenMPDeclareVariantFuncsReferenced(SourceLocation Loc,
FunctionDecl *Func,
bool MightBeOdrUse);
public:
/// Struct to store the context selectors info for declare variant directive.
/// Checks if the variant/multiversion functions are compatible.
bool areMultiversionVariantFunctionsCompatible(
const FunctionDecl *OldFD, const FunctionDecl *NewFD,
const PartialDiagnostic &NoProtoDiagID,
const PartialDiagnosticAt &NoteCausedDiagIDAt,
const PartialDiagnosticAt &NoSupportDiagIDAt,
const PartialDiagnosticAt &DiffDiagIDAt, bool TemplatesSupported,
bool ConstexprSupported, bool CLinkageMayDiffer);
/// Function tries to capture lambda's captured variables in the OpenMP region
/// before the original lambda is captured.
void tryCaptureOpenMPLambdas(ValueDecl *V);
/// Return true if the provided declaration \a VD should be captured by
/// reference.
/// \param Level Relative level of nested OpenMP construct for that the check
/// is performed.
/// \param OpenMPCaptureLevel Capture level within an OpenMP construct.
bool isOpenMPCapturedByRef(const ValueDecl *D, unsigned Level,
unsigned OpenMPCaptureLevel) const;
/// Check if the specified variable is used in one of the private
/// clauses (private, firstprivate, lastprivate, reduction etc.) in OpenMP
/// constructs.
VarDecl *isOpenMPCapturedDecl(ValueDecl *D, bool CheckScopeInfo = false,
unsigned StopAt = 0);
ExprResult getOpenMPCapturedExpr(VarDecl *Capture, ExprValueKind VK,
ExprObjectKind OK, SourceLocation Loc);
/// If the current region is a loop-based region, mark the start of the loop
/// construct.
void startOpenMPLoop();
/// If the current region is a range loop-based region, mark the start of the
/// loop construct.
void startOpenMPCXXRangeFor();
/// Check if the specified variable is used in 'private' clause.
/// \param Level Relative level of nested OpenMP construct for that the check
/// is performed.
bool isOpenMPPrivateDecl(const ValueDecl *D, unsigned Level) const;
/// Sets OpenMP capture kind (OMPC_private, OMPC_firstprivate, OMPC_map etc.)
/// for \p FD based on DSA for the provided corresponding captured declaration
/// \p D.
void setOpenMPCaptureKind(FieldDecl *FD, const ValueDecl *D, unsigned Level);
/// Check if the specified variable is captured by 'target' directive.
/// \param Level Relative level of nested OpenMP construct for that the check
/// is performed.
bool isOpenMPTargetCapturedDecl(const ValueDecl *D, unsigned Level,
unsigned CaptureLevel) const;
ExprResult PerformOpenMPImplicitIntegerConversion(SourceLocation OpLoc,
Expr *Op);
/// Called on start of new data sharing attribute block.
void StartOpenMPDSABlock(OpenMPDirectiveKind K,
const DeclarationNameInfo &DirName, Scope *CurScope,
SourceLocation Loc);
/// Start analysis of clauses.
void StartOpenMPClause(OpenMPClauseKind K);
/// End analysis of clauses.
void EndOpenMPClause();
/// Called on end of data sharing attribute block.
void EndOpenMPDSABlock(Stmt *CurDirective);
/// Check if the current region is an OpenMP loop region and if it is,
/// mark loop control variable, used in \p Init for loop initialization, as
/// private by default.
/// \param Init First part of the for loop.
void ActOnOpenMPLoopInitialization(SourceLocation ForLoc, Stmt *Init);
// OpenMP directives and clauses.
/// Called on correct id-expression from the '#pragma omp
/// threadprivate'.
ExprResult ActOnOpenMPIdExpression(Scope *CurScope, CXXScopeSpec &ScopeSpec,
const DeclarationNameInfo &Id,
OpenMPDirectiveKind Kind);
/// Called on well-formed '#pragma omp threadprivate'.
DeclGroupPtrTy ActOnOpenMPThreadprivateDirective(
SourceLocation Loc,
ArrayRef<Expr *> VarList);
/// Builds a new OpenMPThreadPrivateDecl and checks its correctness.
OMPThreadPrivateDecl *CheckOMPThreadPrivateDecl(SourceLocation Loc,
ArrayRef<Expr *> VarList);
/// Called on well-formed '#pragma omp allocate'.
DeclGroupPtrTy ActOnOpenMPAllocateDirective(SourceLocation Loc,
ArrayRef<Expr *> VarList,
ArrayRef<OMPClause *> Clauses,
DeclContext *Owner = nullptr);
/// Called on well-formed '#pragma omp requires'.
DeclGroupPtrTy ActOnOpenMPRequiresDirective(SourceLocation Loc,
ArrayRef<OMPClause *> ClauseList);
/// Check restrictions on Requires directive
OMPRequiresDecl *CheckOMPRequiresDecl(SourceLocation Loc,
ArrayRef<OMPClause *> Clauses);
/// Check if the specified type is allowed to be used in 'omp declare
/// reduction' construct.
QualType ActOnOpenMPDeclareReductionType(SourceLocation TyLoc,
TypeResult ParsedType);
/// Called on start of '#pragma omp declare reduction'.
DeclGroupPtrTy ActOnOpenMPDeclareReductionDirectiveStart(
Scope *S, DeclContext *DC, DeclarationName Name,
ArrayRef<std::pair<QualType, SourceLocation>> ReductionTypes,
AccessSpecifier AS, Decl *PrevDeclInScope = nullptr);
/// Initialize declare reduction construct initializer.
void ActOnOpenMPDeclareReductionCombinerStart(Scope *S, Decl *D);
/// Finish current declare reduction construct initializer.
void ActOnOpenMPDeclareReductionCombinerEnd(Decl *D, Expr *Combiner);
/// Initialize declare reduction construct initializer.
/// \return omp_priv variable.
VarDecl *ActOnOpenMPDeclareReductionInitializerStart(Scope *S, Decl *D);
/// Finish current declare reduction construct initializer.
void ActOnOpenMPDeclareReductionInitializerEnd(Decl *D, Expr *Initializer,
VarDecl *OmpPrivParm);
/// Called at the end of '#pragma omp declare reduction'.
DeclGroupPtrTy ActOnOpenMPDeclareReductionDirectiveEnd(
Scope *S, DeclGroupPtrTy DeclReductions, bool IsValid);
/// Check variable declaration in 'omp declare mapper' construct.
TypeResult ActOnOpenMPDeclareMapperVarDecl(Scope *S, Declarator &D);
/// Check if the specified type is allowed to be used in 'omp declare
/// mapper' construct.
QualType ActOnOpenMPDeclareMapperType(SourceLocation TyLoc,
TypeResult ParsedType);
/// Called on start of '#pragma omp declare mapper'.
OMPDeclareMapperDecl *ActOnOpenMPDeclareMapperDirectiveStart(
Scope *S, DeclContext *DC, DeclarationName Name, QualType MapperType,
SourceLocation StartLoc, DeclarationName VN, AccessSpecifier AS,
Decl *PrevDeclInScope = nullptr);
/// Build the mapper variable of '#pragma omp declare mapper'.
void ActOnOpenMPDeclareMapperDirectiveVarDecl(OMPDeclareMapperDecl *DMD,
Scope *S, QualType MapperType,
SourceLocation StartLoc,
DeclarationName VN);
/// Called at the end of '#pragma omp declare mapper'.
DeclGroupPtrTy
ActOnOpenMPDeclareMapperDirectiveEnd(OMPDeclareMapperDecl *D, Scope *S,
ArrayRef<OMPClause *> ClauseList);
/// Called on the start of target region i.e. '#pragma omp declare target'.
bool ActOnStartOpenMPDeclareTargetDirective(SourceLocation Loc);
/// Called at the end of target region i.e. '#pragme omp end declare target'.
void ActOnFinishOpenMPDeclareTargetDirective();
/// Searches for the provided declaration name for OpenMP declare target
/// directive.
NamedDecl *
lookupOpenMPDeclareTargetName(Scope *CurScope, CXXScopeSpec &ScopeSpec,
const DeclarationNameInfo &Id,
NamedDeclSetType &SameDirectiveDecls);
/// Called on correct id-expression from the '#pragma omp declare target'.
void ActOnOpenMPDeclareTargetName(NamedDecl *ND, SourceLocation Loc,
OMPDeclareTargetDeclAttr::MapTypeTy MT,
OMPDeclareTargetDeclAttr::DevTypeTy DT);
/// Check declaration inside target region.
void
checkDeclIsAllowedInOpenMPTarget(Expr *E, Decl *D,
SourceLocation IdLoc = SourceLocation());
/// Return true inside OpenMP declare target region.
bool isInOpenMPDeclareTargetContext() const {
return DeclareTargetNestingLevel > 0;
}
/// Return true inside OpenMP target region.
bool isInOpenMPTargetExecutionDirective() const;
/// Return the number of captured regions created for an OpenMP directive.
static int getOpenMPCaptureLevels(OpenMPDirectiveKind Kind);
/// Initialization of captured region for OpenMP region.
void ActOnOpenMPRegionStart(OpenMPDirectiveKind DKind, Scope *CurScope);
/// End of OpenMP region.
///
/// \param S Statement associated with the current OpenMP region.
/// \param Clauses List of clauses for the current OpenMP region.
///
/// \returns Statement for finished OpenMP region.
StmtResult ActOnOpenMPRegionEnd(StmtResult S, ArrayRef<OMPClause *> Clauses);
StmtResult ActOnOpenMPExecutableDirective(
OpenMPDirectiveKind Kind, const DeclarationNameInfo &DirName,
OpenMPDirectiveKind CancelRegion, ArrayRef<OMPClause *> Clauses,
Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc);
/// Called on well-formed '\#pragma omp parallel' after parsing
/// of the associated statement.
StmtResult ActOnOpenMPParallelDirective(ArrayRef<OMPClause *> Clauses,
Stmt *AStmt,
SourceLocation StartLoc,
SourceLocation EndLoc);
using VarsWithInheritedDSAType =
llvm::SmallDenseMap<const ValueDecl *, const Expr *, 4>;
/// Called on well-formed '\#pragma omp simd' after parsing
/// of the associated statement.
StmtResult
ActOnOpenMPSimdDirective(ArrayRef<OMPClause *> Clauses, Stmt *AStmt,
SourceLocation StartLoc, SourceLocation EndLoc,
VarsWithInheritedDSAType &VarsWithImplicitDSA);
/// Called on well-formed '\#pragma omp for' after parsing
/// of the associated statement.
StmtResult
ActOnOpenMPForDirective(ArrayRef<OMPClause *> Clauses, Stmt *AStmt,
SourceLocation StartLoc, SourceLocation EndLoc,
VarsWithInheritedDSAType &VarsWithImplicitDSA);
/// Called on well-formed '\#pragma omp for simd' after parsing
/// of the associated statement.
StmtResult
ActOnOpenMPForSimdDirective(ArrayRef<OMPClause *> Clauses, Stmt *AStmt,
SourceLocation StartLoc, SourceLocation EndLoc,
VarsWithInheritedDSAType &VarsWithImplicitDSA);
/// Called on well-formed '\#pragma omp sections' after parsing
/// of the associated statement.
StmtResult ActOnOpenMPSectionsDirective(ArrayRef<OMPClause *> Clauses,
Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed '\#pragma omp section' after parsing of the
/// associated statement.
StmtResult ActOnOpenMPSectionDirective(Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed '\#pragma omp single' after parsing of the
/// associated statement.
StmtResult ActOnOpenMPSingleDirective(ArrayRef<OMPClause *> Clauses,
Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed '\#pragma omp master' after parsing of the
/// associated statement.
StmtResult ActOnOpenMPMasterDirective(Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed '\#pragma omp critical' after parsing of the
/// associated statement.
StmtResult ActOnOpenMPCriticalDirective(const DeclarationNameInfo &DirName,
ArrayRef<OMPClause *> Clauses,
Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed '\#pragma omp parallel for' after parsing
/// of the associated statement.
StmtResult ActOnOpenMPParallelForDirective(
ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA);
/// Called on well-formed '\#pragma omp parallel for simd' after
/// parsing of the associated statement.
StmtResult ActOnOpenMPParallelForSimdDirective(
ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA);
/// Called on well-formed '\#pragma omp parallel master' after
/// parsing of the associated statement.
StmtResult ActOnOpenMPParallelMasterDirective(ArrayRef<OMPClause *> Clauses,
Stmt *AStmt,
SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed '\#pragma omp parallel sections' after
/// parsing of the associated statement.
StmtResult ActOnOpenMPParallelSectionsDirective(ArrayRef<OMPClause *> Clauses,
Stmt *AStmt,
SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed '\#pragma omp task' after parsing of the
/// associated statement.
StmtResult ActOnOpenMPTaskDirective(ArrayRef<OMPClause *> Clauses,
Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed '\#pragma omp taskyield'.
StmtResult ActOnOpenMPTaskyieldDirective(SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed '\#pragma omp barrier'.
StmtResult ActOnOpenMPBarrierDirective(SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed '\#pragma omp taskwait'.
StmtResult ActOnOpenMPTaskwaitDirective(SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed '\#pragma omp taskgroup'.
StmtResult ActOnOpenMPTaskgroupDirective(ArrayRef<OMPClause *> Clauses,
Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed '\#pragma omp flush'.
StmtResult ActOnOpenMPFlushDirective(ArrayRef<OMPClause *> Clauses,
SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed '\#pragma omp ordered' after parsing of the
/// associated statement.
StmtResult ActOnOpenMPOrderedDirective(ArrayRef<OMPClause *> Clauses,
Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed '\#pragma omp atomic' after parsing of the
/// associated statement.
StmtResult ActOnOpenMPAtomicDirective(ArrayRef<OMPClause *> Clauses,
Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed '\#pragma omp target' after parsing of the
/// associated statement.
StmtResult ActOnOpenMPTargetDirective(ArrayRef<OMPClause *> Clauses,
Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed '\#pragma omp target data' after parsing of
/// the associated statement.
StmtResult ActOnOpenMPTargetDataDirective(ArrayRef<OMPClause *> Clauses,
Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed '\#pragma omp target enter data' after
/// parsing of the associated statement.
StmtResult ActOnOpenMPTargetEnterDataDirective(ArrayRef<OMPClause *> Clauses,
SourceLocation StartLoc,
SourceLocation EndLoc,
Stmt *AStmt);
/// Called on well-formed '\#pragma omp target exit data' after
/// parsing of the associated statement.
StmtResult ActOnOpenMPTargetExitDataDirective(ArrayRef<OMPClause *> Clauses,
SourceLocation StartLoc,
SourceLocation EndLoc,
Stmt *AStmt);
/// Called on well-formed '\#pragma omp target parallel' after
/// parsing of the associated statement.
StmtResult ActOnOpenMPTargetParallelDirective(ArrayRef<OMPClause *> Clauses,
Stmt *AStmt,
SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed '\#pragma omp target parallel for' after
/// parsing of the associated statement.
StmtResult ActOnOpenMPTargetParallelForDirective(
ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA);
/// Called on well-formed '\#pragma omp teams' after parsing of the
/// associated statement.
StmtResult ActOnOpenMPTeamsDirective(ArrayRef<OMPClause *> Clauses,
Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed '\#pragma omp cancellation point'.
StmtResult
ActOnOpenMPCancellationPointDirective(SourceLocation StartLoc,
SourceLocation EndLoc,
OpenMPDirectiveKind CancelRegion);
/// Called on well-formed '\#pragma omp cancel'.
StmtResult ActOnOpenMPCancelDirective(ArrayRef<OMPClause *> Clauses,
SourceLocation StartLoc,
SourceLocation EndLoc,
OpenMPDirectiveKind CancelRegion);
/// Called on well-formed '\#pragma omp taskloop' after parsing of the
/// associated statement.
StmtResult
ActOnOpenMPTaskLoopDirective(ArrayRef<OMPClause *> Clauses, Stmt *AStmt,
SourceLocation StartLoc, SourceLocation EndLoc,
VarsWithInheritedDSAType &VarsWithImplicitDSA);
/// Called on well-formed '\#pragma omp taskloop simd' after parsing of
/// the associated statement.
StmtResult ActOnOpenMPTaskLoopSimdDirective(
ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA);
/// Called on well-formed '\#pragma omp master taskloop' after parsing of the
/// associated statement.
StmtResult ActOnOpenMPMasterTaskLoopDirective(
ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA);
/// Called on well-formed '\#pragma omp master taskloop simd' after parsing of
/// the associated statement.
StmtResult ActOnOpenMPMasterTaskLoopSimdDirective(
ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA);
/// Called on well-formed '\#pragma omp parallel master taskloop' after
/// parsing of the associated statement.
StmtResult ActOnOpenMPParallelMasterTaskLoopDirective(
ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA);
/// Called on well-formed '\#pragma omp parallel master taskloop simd' after
/// parsing of the associated statement.
StmtResult ActOnOpenMPParallelMasterTaskLoopSimdDirective(
ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA);
/// Called on well-formed '\#pragma omp distribute' after parsing
/// of the associated statement.
StmtResult
ActOnOpenMPDistributeDirective(ArrayRef<OMPClause *> Clauses, Stmt *AStmt,
SourceLocation StartLoc, SourceLocation EndLoc,
VarsWithInheritedDSAType &VarsWithImplicitDSA);
/// Called on well-formed '\#pragma omp target update'.
StmtResult ActOnOpenMPTargetUpdateDirective(ArrayRef<OMPClause *> Clauses,
SourceLocation StartLoc,
SourceLocation EndLoc,
Stmt *AStmt);
/// Called on well-formed '\#pragma omp distribute parallel for' after
/// parsing of the associated statement.
StmtResult ActOnOpenMPDistributeParallelForDirective(
ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA);
/// Called on well-formed '\#pragma omp distribute parallel for simd'
/// after parsing of the associated statement.
StmtResult ActOnOpenMPDistributeParallelForSimdDirective(
ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA);
/// Called on well-formed '\#pragma omp distribute simd' after
/// parsing of the associated statement.
StmtResult ActOnOpenMPDistributeSimdDirective(
ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA);
/// Called on well-formed '\#pragma omp target parallel for simd' after
/// parsing of the associated statement.
StmtResult ActOnOpenMPTargetParallelForSimdDirective(
ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA);
/// Called on well-formed '\#pragma omp target simd' after parsing of
/// the associated statement.
StmtResult
ActOnOpenMPTargetSimdDirective(ArrayRef<OMPClause *> Clauses, Stmt *AStmt,
SourceLocation StartLoc, SourceLocation EndLoc,
VarsWithInheritedDSAType &VarsWithImplicitDSA);
/// Called on well-formed '\#pragma omp teams distribute' after parsing of
/// the associated statement.
StmtResult ActOnOpenMPTeamsDistributeDirective(
ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA);
/// Called on well-formed '\#pragma omp teams distribute simd' after parsing
/// of the associated statement.
StmtResult ActOnOpenMPTeamsDistributeSimdDirective(
ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA);
/// Called on well-formed '\#pragma omp teams distribute parallel for simd'
/// after parsing of the associated statement.
StmtResult ActOnOpenMPTeamsDistributeParallelForSimdDirective(
ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA);
/// Called on well-formed '\#pragma omp teams distribute parallel for'
/// after parsing of the associated statement.
StmtResult ActOnOpenMPTeamsDistributeParallelForDirective(
ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA);
/// Called on well-formed '\#pragma omp target teams' after parsing of the
/// associated statement.
StmtResult ActOnOpenMPTargetTeamsDirective(ArrayRef<OMPClause *> Clauses,
Stmt *AStmt,
SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed '\#pragma omp target teams distribute' after parsing
/// of the associated statement.
StmtResult ActOnOpenMPTargetTeamsDistributeDirective(
ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA);
/// Called on well-formed '\#pragma omp target teams distribute parallel for'
/// after parsing of the associated statement.
StmtResult ActOnOpenMPTargetTeamsDistributeParallelForDirective(
ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA);
/// Called on well-formed '\#pragma omp target teams distribute parallel for
/// simd' after parsing of the associated statement.
StmtResult ActOnOpenMPTargetTeamsDistributeParallelForSimdDirective(
ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA);
/// Called on well-formed '\#pragma omp target teams distribute simd' after
/// parsing of the associated statement.
StmtResult ActOnOpenMPTargetTeamsDistributeSimdDirective(
ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA);
/// Checks correctness of linear modifiers.
bool CheckOpenMPLinearModifier(OpenMPLinearClauseKind LinKind,
SourceLocation LinLoc);
/// Checks that the specified declaration matches requirements for the linear
/// decls.
bool CheckOpenMPLinearDecl(const ValueDecl *D, SourceLocation ELoc,
OpenMPLinearClauseKind LinKind, QualType Type);
/// Called on well-formed '\#pragma omp declare simd' after parsing of
/// the associated method/function.
DeclGroupPtrTy ActOnOpenMPDeclareSimdDirective(
DeclGroupPtrTy DG, OMPDeclareSimdDeclAttr::BranchStateTy BS,
Expr *Simdlen, ArrayRef<Expr *> Uniforms, ArrayRef<Expr *> Aligneds,
ArrayRef<Expr *> Alignments, ArrayRef<Expr *> Linears,
ArrayRef<unsigned> LinModifiers, ArrayRef<Expr *> Steps, SourceRange SR);
/// Checks '\#pragma omp declare variant' variant function and original
/// functions after parsing of the associated method/function.
/// \param DG Function declaration to which declare variant directive is
/// applied to.
/// \param VariantRef Expression that references the variant function, which
/// must be used instead of the original one, specified in \p DG.
/// \param TI The trait info object representing the match clause.
/// \returns None, if the function/variant function are not compatible with
/// the pragma, pair of original function/variant ref expression otherwise.
Optional<std::pair<FunctionDecl *, Expr *>>
checkOpenMPDeclareVariantFunction(DeclGroupPtrTy DG, Expr *VariantRef,
OMPTraitInfo &TI, SourceRange SR);
/// Called on well-formed '\#pragma omp declare variant' after parsing of
/// the associated method/function.
/// \param FD Function declaration to which declare variant directive is
/// applied to.
/// \param VariantRef Expression that references the variant function, which
/// must be used instead of the original one, specified in \p DG.
/// \param TI The context traits associated with the function variant.
void ActOnOpenMPDeclareVariantDirective(FunctionDecl *FD, Expr *VariantRef,
OMPTraitInfo &TI, SourceRange SR);
OMPClause *ActOnOpenMPSingleExprClause(OpenMPClauseKind Kind,
Expr *Expr,
SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
/// Called on well-formed 'allocator' clause.
OMPClause *ActOnOpenMPAllocatorClause(Expr *Allocator,
SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
/// Called on well-formed 'if' clause.
OMPClause *ActOnOpenMPIfClause(OpenMPDirectiveKind NameModifier,
Expr *Condition, SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation NameModifierLoc,
SourceLocation ColonLoc,
SourceLocation EndLoc);
/// Called on well-formed 'final' clause.
OMPClause *ActOnOpenMPFinalClause(Expr *Condition, SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
/// Called on well-formed 'num_threads' clause.
OMPClause *ActOnOpenMPNumThreadsClause(Expr *NumThreads,
SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
/// Called on well-formed 'safelen' clause.
OMPClause *ActOnOpenMPSafelenClause(Expr *Length,
SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
/// Called on well-formed 'simdlen' clause.
OMPClause *ActOnOpenMPSimdlenClause(Expr *Length, SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
/// Called on well-formed 'collapse' clause.
OMPClause *ActOnOpenMPCollapseClause(Expr *NumForLoops,
SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
/// Called on well-formed 'ordered' clause.
OMPClause *
ActOnOpenMPOrderedClause(SourceLocation StartLoc, SourceLocation EndLoc,
SourceLocation LParenLoc = SourceLocation(),
Expr *NumForLoops = nullptr);
/// Called on well-formed 'grainsize' clause.
OMPClause *ActOnOpenMPGrainsizeClause(Expr *Size, SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
/// Called on well-formed 'num_tasks' clause.
OMPClause *ActOnOpenMPNumTasksClause(Expr *NumTasks, SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
/// Called on well-formed 'hint' clause.
OMPClause *ActOnOpenMPHintClause(Expr *Hint, SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
OMPClause *ActOnOpenMPSimpleClause(OpenMPClauseKind Kind,
unsigned Argument,
SourceLocation ArgumentLoc,
SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
/// Called on well-formed 'default' clause.
OMPClause *ActOnOpenMPDefaultClause(llvm::omp::DefaultKind Kind,
SourceLocation KindLoc,
SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
/// Called on well-formed 'proc_bind' clause.
OMPClause *ActOnOpenMPProcBindClause(llvm::omp::ProcBindKind Kind,
SourceLocation KindLoc,
SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
/// Called on well-formed 'order' clause.
OMPClause *ActOnOpenMPOrderClause(OpenMPOrderClauseKind Kind,
SourceLocation KindLoc,
SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
OMPClause *ActOnOpenMPSingleExprWithArgClause(
OpenMPClauseKind Kind, ArrayRef<unsigned> Arguments, Expr *Expr,
SourceLocation StartLoc, SourceLocation LParenLoc,
ArrayRef<SourceLocation> ArgumentsLoc, SourceLocation DelimLoc,
SourceLocation EndLoc);
/// Called on well-formed 'schedule' clause.
OMPClause *ActOnOpenMPScheduleClause(
OpenMPScheduleClauseModifier M1, OpenMPScheduleClauseModifier M2,
OpenMPScheduleClauseKind Kind, Expr *ChunkSize, SourceLocation StartLoc,
SourceLocation LParenLoc, SourceLocation M1Loc, SourceLocation M2Loc,
SourceLocation KindLoc, SourceLocation CommaLoc, SourceLocation EndLoc);
OMPClause *ActOnOpenMPClause(OpenMPClauseKind Kind, SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed 'nowait' clause.
OMPClause *ActOnOpenMPNowaitClause(SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed 'untied' clause.
OMPClause *ActOnOpenMPUntiedClause(SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed 'mergeable' clause.
OMPClause *ActOnOpenMPMergeableClause(SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed 'read' clause.
OMPClause *ActOnOpenMPReadClause(SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed 'write' clause.
OMPClause *ActOnOpenMPWriteClause(SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed 'update' clause.
OMPClause *ActOnOpenMPUpdateClause(SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed 'capture' clause.
OMPClause *ActOnOpenMPCaptureClause(SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed 'seq_cst' clause.
OMPClause *ActOnOpenMPSeqCstClause(SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed 'acq_rel' clause.
OMPClause *ActOnOpenMPAcqRelClause(SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed 'acquire' clause.
OMPClause *ActOnOpenMPAcquireClause(SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed 'release' clause.
OMPClause *ActOnOpenMPReleaseClause(SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed 'relaxed' clause.
OMPClause *ActOnOpenMPRelaxedClause(SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed 'threads' clause.
OMPClause *ActOnOpenMPThreadsClause(SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed 'simd' clause.
OMPClause *ActOnOpenMPSIMDClause(SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed 'nogroup' clause.
OMPClause *ActOnOpenMPNogroupClause(SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed 'unified_address' clause.
OMPClause *ActOnOpenMPUnifiedAddressClause(SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed 'unified_address' clause.
OMPClause *ActOnOpenMPUnifiedSharedMemoryClause(SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed 'reverse_offload' clause.
OMPClause *ActOnOpenMPReverseOffloadClause(SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed 'dynamic_allocators' clause.
OMPClause *ActOnOpenMPDynamicAllocatorsClause(SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed 'atomic_default_mem_order' clause.
OMPClause *ActOnOpenMPAtomicDefaultMemOrderClause(
OpenMPAtomicDefaultMemOrderClauseKind Kind, SourceLocation KindLoc,
SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc);
OMPClause *ActOnOpenMPVarListClause(
OpenMPClauseKind Kind, ArrayRef<Expr *> Vars, Expr *TailExpr,
const OMPVarListLocTy &Locs, SourceLocation ColonLoc,
CXXScopeSpec &ReductionOrMapperIdScopeSpec,
DeclarationNameInfo &ReductionOrMapperId, int ExtraModifier,
ArrayRef<OpenMPMapModifierKind> MapTypeModifiers,
ArrayRef<SourceLocation> MapTypeModifiersLoc, bool IsMapTypeImplicit,
SourceLocation DepLinMapLastLoc);
/// Called on well-formed 'allocate' clause.
OMPClause *
ActOnOpenMPAllocateClause(Expr *Allocator, ArrayRef<Expr *> VarList,
SourceLocation StartLoc, SourceLocation ColonLoc,
SourceLocation LParenLoc, SourceLocation EndLoc);
/// Called on well-formed 'private' clause.
OMPClause *ActOnOpenMPPrivateClause(ArrayRef<Expr *> VarList,
SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
/// Called on well-formed 'firstprivate' clause.
OMPClause *ActOnOpenMPFirstprivateClause(ArrayRef<Expr *> VarList,
SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
/// Called on well-formed 'lastprivate' clause.
OMPClause *ActOnOpenMPLastprivateClause(
ArrayRef<Expr *> VarList, OpenMPLastprivateModifier LPKind,
SourceLocation LPKindLoc, SourceLocation ColonLoc,
SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc);
/// Called on well-formed 'shared' clause.
OMPClause *ActOnOpenMPSharedClause(ArrayRef<Expr *> VarList,
SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
/// Called on well-formed 'reduction' clause.
OMPClause *ActOnOpenMPReductionClause(
ArrayRef<Expr *> VarList, SourceLocation StartLoc,
SourceLocation LParenLoc, SourceLocation ColonLoc, SourceLocation EndLoc,
CXXScopeSpec &ReductionIdScopeSpec,
const DeclarationNameInfo &ReductionId,
ArrayRef<Expr *> UnresolvedReductions = llvm::None);
/// Called on well-formed 'task_reduction' clause.
OMPClause *ActOnOpenMPTaskReductionClause(
ArrayRef<Expr *> VarList, SourceLocation StartLoc,
SourceLocation LParenLoc, SourceLocation ColonLoc, SourceLocation EndLoc,
CXXScopeSpec &ReductionIdScopeSpec,
const DeclarationNameInfo &ReductionId,
ArrayRef<Expr *> UnresolvedReductions = llvm::None);
/// Called on well-formed 'in_reduction' clause.
OMPClause *ActOnOpenMPInReductionClause(
ArrayRef<Expr *> VarList, SourceLocation StartLoc,
SourceLocation LParenLoc, SourceLocation ColonLoc, SourceLocation EndLoc,
CXXScopeSpec &ReductionIdScopeSpec,
const DeclarationNameInfo &ReductionId,
ArrayRef<Expr *> UnresolvedReductions = llvm::None);
/// Called on well-formed 'linear' clause.
OMPClause *
ActOnOpenMPLinearClause(ArrayRef<Expr *> VarList, Expr *Step,
SourceLocation StartLoc, SourceLocation LParenLoc,
OpenMPLinearClauseKind LinKind, SourceLocation LinLoc,
SourceLocation ColonLoc, SourceLocation EndLoc);
/// Called on well-formed 'aligned' clause.
OMPClause *ActOnOpenMPAlignedClause(ArrayRef<Expr *> VarList,
Expr *Alignment,
SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation ColonLoc,
SourceLocation EndLoc);
/// Called on well-formed 'copyin' clause.
OMPClause *ActOnOpenMPCopyinClause(ArrayRef<Expr *> VarList,
SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
/// Called on well-formed 'copyprivate' clause.
OMPClause *ActOnOpenMPCopyprivateClause(ArrayRef<Expr *> VarList,
SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
/// Called on well-formed 'flush' pseudo clause.
OMPClause *ActOnOpenMPFlushClause(ArrayRef<Expr *> VarList,
SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
/// Called on well-formed 'depend' clause.
OMPClause *
ActOnOpenMPDependClause(OpenMPDependClauseKind DepKind, SourceLocation DepLoc,
SourceLocation ColonLoc, ArrayRef<Expr *> VarList,
SourceLocation StartLoc, SourceLocation LParenLoc,
SourceLocation EndLoc);
/// Called on well-formed 'device' clause.
OMPClause *ActOnOpenMPDeviceClause(Expr *Device, SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
/// Called on well-formed 'map' clause.
OMPClause *
ActOnOpenMPMapClause(ArrayRef<OpenMPMapModifierKind> MapTypeModifiers,
ArrayRef<SourceLocation> MapTypeModifiersLoc,
CXXScopeSpec &MapperIdScopeSpec,
DeclarationNameInfo &MapperId,
OpenMPMapClauseKind MapType, bool IsMapTypeImplicit,
SourceLocation MapLoc, SourceLocation ColonLoc,
ArrayRef<Expr *> VarList, const OMPVarListLocTy &Locs,
ArrayRef<Expr *> UnresolvedMappers = llvm::None);
/// Called on well-formed 'num_teams' clause.
OMPClause *ActOnOpenMPNumTeamsClause(Expr *NumTeams, SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
/// Called on well-formed 'thread_limit' clause.
OMPClause *ActOnOpenMPThreadLimitClause(Expr *ThreadLimit,
SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
/// Called on well-formed 'priority' clause.
OMPClause *ActOnOpenMPPriorityClause(Expr *Priority, SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
/// Called on well-formed 'dist_schedule' clause.
OMPClause *ActOnOpenMPDistScheduleClause(
OpenMPDistScheduleClauseKind Kind, Expr *ChunkSize,
SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation KindLoc,
SourceLocation CommaLoc, SourceLocation EndLoc);
/// Called on well-formed 'defaultmap' clause.
OMPClause *ActOnOpenMPDefaultmapClause(
OpenMPDefaultmapClauseModifier M, OpenMPDefaultmapClauseKind Kind,
SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation MLoc,
SourceLocation KindLoc, SourceLocation EndLoc);
/// Called on well-formed 'to' clause.
OMPClause *
ActOnOpenMPToClause(ArrayRef<Expr *> VarList, CXXScopeSpec &MapperIdScopeSpec,
DeclarationNameInfo &MapperId,
const OMPVarListLocTy &Locs,
ArrayRef<Expr *> UnresolvedMappers = llvm::None);
/// Called on well-formed 'from' clause.
OMPClause *ActOnOpenMPFromClause(
ArrayRef<Expr *> VarList, CXXScopeSpec &MapperIdScopeSpec,
DeclarationNameInfo &MapperId, const OMPVarListLocTy &Locs,
ArrayRef<Expr *> UnresolvedMappers = llvm::None);
/// Called on well-formed 'use_device_ptr' clause.
OMPClause *ActOnOpenMPUseDevicePtrClause(ArrayRef<Expr *> VarList,
const OMPVarListLocTy &Locs);
/// Called on well-formed 'is_device_ptr' clause.
OMPClause *ActOnOpenMPIsDevicePtrClause(ArrayRef<Expr *> VarList,
const OMPVarListLocTy &Locs);
/// Called on well-formed 'nontemporal' clause.
OMPClause *ActOnOpenMPNontemporalClause(ArrayRef<Expr *> VarList,
SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
/// The kind of conversion being performed.
enum CheckedConversionKind {
/// An implicit conversion.
CCK_ImplicitConversion,
/// A C-style cast.
CCK_CStyleCast,
/// A functional-style cast.
CCK_FunctionalCast,
/// A cast other than a C-style cast.
CCK_OtherCast,
/// A conversion for an operand of a builtin overloaded operator.
CCK_ForBuiltinOverloadedOp
};
static bool isCast(CheckedConversionKind CCK) {
return CCK == CCK_CStyleCast || CCK == CCK_FunctionalCast ||
CCK == CCK_OtherCast;
}
/// ImpCastExprToType - If Expr is not of type 'Type', insert an implicit
/// cast. If there is already an implicit cast, merge into the existing one.
/// If isLvalue, the result of the cast is an lvalue.
ExprResult ImpCastExprToType(Expr *E, QualType Type, CastKind CK,
ExprValueKind VK = VK_RValue,
const CXXCastPath *BasePath = nullptr,
CheckedConversionKind CCK
= CCK_ImplicitConversion);
/// ScalarTypeToBooleanCastKind - Returns the cast kind corresponding
/// to the conversion from scalar type ScalarTy to the Boolean type.
static CastKind ScalarTypeToBooleanCastKind(QualType ScalarTy);
/// IgnoredValueConversions - Given that an expression's result is
/// syntactically ignored, perform any conversions that are
/// required.
ExprResult IgnoredValueConversions(Expr *E);
// UsualUnaryConversions - promotes integers (C99 6.3.1.1p2) and converts
// functions and arrays to their respective pointers (C99 6.3.2.1).
ExprResult UsualUnaryConversions(Expr *E);
/// CallExprUnaryConversions - a special case of an unary conversion
/// performed on a function designator of a call expression.
ExprResult CallExprUnaryConversions(Expr *E);
// DefaultFunctionArrayConversion - converts functions and arrays
// to their respective pointers (C99 6.3.2.1).
ExprResult DefaultFunctionArrayConversion(Expr *E, bool Diagnose = true);
// DefaultFunctionArrayLvalueConversion - converts functions and
// arrays to their respective pointers and performs the
// lvalue-to-rvalue conversion.
ExprResult DefaultFunctionArrayLvalueConversion(Expr *E,
bool Diagnose = true);
// DefaultLvalueConversion - performs lvalue-to-rvalue conversion on
// the operand. This is DefaultFunctionArrayLvalueConversion,
// except that it assumes the operand isn't of function or array
// type.
ExprResult DefaultLvalueConversion(Expr *E);
// DefaultArgumentPromotion (C99 6.5.2.2p6). Used for function calls that
// do not have a prototype. Integer promotions are performed on each
// argument, and arguments that have type float are promoted to double.
ExprResult DefaultArgumentPromotion(Expr *E);
/// If \p E is a prvalue denoting an unmaterialized temporary, materialize
/// it as an xvalue. In C++98, the result will still be a prvalue, because
/// we don't have xvalues there.
ExprResult TemporaryMaterializationConversion(Expr *E);
// Used for emitting the right warning by DefaultVariadicArgumentPromotion
enum VariadicCallType {
VariadicFunction,
VariadicBlock,
VariadicMethod,
VariadicConstructor,
VariadicDoesNotApply
};
VariadicCallType getVariadicCallType(FunctionDecl *FDecl,
const FunctionProtoType *Proto,
Expr *Fn);
// Used for determining in which context a type is allowed to be passed to a
// vararg function.
enum VarArgKind {
VAK_Valid,
VAK_ValidInCXX11,
VAK_Undefined,
VAK_MSVCUndefined,
VAK_Invalid
};
// Determines which VarArgKind fits an expression.
VarArgKind isValidVarArgType(const QualType &Ty);
/// Check to see if the given expression is a valid argument to a variadic
/// function, issuing a diagnostic if not.
void checkVariadicArgument(const Expr *E, VariadicCallType CT);
/// Check to see if a given expression could have '.c_str()' called on it.
bool hasCStrMethod(const Expr *E);
/// GatherArgumentsForCall - Collector argument expressions for various
/// form of call prototypes.
bool GatherArgumentsForCall(SourceLocation CallLoc, FunctionDecl *FDecl,
const FunctionProtoType *Proto,
unsigned FirstParam, ArrayRef<Expr *> Args,
SmallVectorImpl<Expr *> &AllArgs,
VariadicCallType CallType = VariadicDoesNotApply,
bool AllowExplicit = false,
bool IsListInitialization = false);
// DefaultVariadicArgumentPromotion - Like DefaultArgumentPromotion, but
// will create a runtime trap if the resulting type is not a POD type.
ExprResult DefaultVariadicArgumentPromotion(Expr *E, VariadicCallType CT,
FunctionDecl *FDecl);
/// Context in which we're performing a usual arithmetic conversion.
enum ArithConvKind {
/// An arithmetic operation.
ACK_Arithmetic,
/// A bitwise operation.
ACK_BitwiseOp,
/// A comparison.
ACK_Comparison,
/// A conditional (?:) operator.
ACK_Conditional,
/// A compound assignment expression.
ACK_CompAssign,
};
// UsualArithmeticConversions - performs the UsualUnaryConversions on it's
// operands and then handles various conversions that are common to binary
// operators (C99 6.3.1.8). If both operands aren't arithmetic, this
// routine returns the first non-arithmetic type found. The client is
// responsible for emitting appropriate error diagnostics.
QualType UsualArithmeticConversions(ExprResult &LHS, ExprResult &RHS,
SourceLocation Loc, ArithConvKind ACK);
/// AssignConvertType - All of the 'assignment' semantic checks return this
/// enum to indicate whether the assignment was allowed. These checks are
/// done for simple assignments, as well as initialization, return from
/// function, argument passing, etc. The query is phrased in terms of a
/// source and destination type.
enum AssignConvertType {
/// Compatible - the types are compatible according to the standard.
Compatible,
/// PointerToInt - The assignment converts a pointer to an int, which we
/// accept as an extension.
PointerToInt,
/// IntToPointer - The assignment converts an int to a pointer, which we
/// accept as an extension.
IntToPointer,
/// FunctionVoidPointer - The assignment is between a function pointer and
/// void*, which the standard doesn't allow, but we accept as an extension.
FunctionVoidPointer,
/// IncompatiblePointer - The assignment is between two pointers types that
/// are not compatible, but we accept them as an extension.
IncompatiblePointer,
/// IncompatibleFunctionPointer - The assignment is between two function
/// pointers types that are not compatible, but we accept them as an
/// extension.
IncompatibleFunctionPointer,
/// IncompatiblePointerSign - The assignment is between two pointers types
/// which point to integers which have a different sign, but are otherwise
/// identical. This is a subset of the above, but broken out because it's by
/// far the most common case of incompatible pointers.
IncompatiblePointerSign,
/// CompatiblePointerDiscardsQualifiers - The assignment discards
/// c/v/r qualifiers, which we accept as an extension.
CompatiblePointerDiscardsQualifiers,
/// IncompatiblePointerDiscardsQualifiers - The assignment
/// discards qualifiers that we don't permit to be discarded,
/// like address spaces.
IncompatiblePointerDiscardsQualifiers,
/// IncompatibleNestedPointerAddressSpaceMismatch - The assignment
/// changes address spaces in nested pointer types which is not allowed.
/// For instance, converting __private int ** to __generic int ** is
/// illegal even though __private could be converted to __generic.
IncompatibleNestedPointerAddressSpaceMismatch,
/// IncompatibleNestedPointerQualifiers - The assignment is between two
/// nested pointer types, and the qualifiers other than the first two
/// levels differ e.g. char ** -> const char **, but we accept them as an
/// extension.
IncompatibleNestedPointerQualifiers,
/// IncompatibleVectors - The assignment is between two vector types that
/// have the same size, which we accept as an extension.
IncompatibleVectors,
/// IntToBlockPointer - The assignment converts an int to a block
/// pointer. We disallow this.
IntToBlockPointer,
/// IncompatibleBlockPointer - The assignment is between two block
/// pointers types that are not compatible.
IncompatibleBlockPointer,
/// IncompatibleObjCQualifiedId - The assignment is between a qualified
/// id type and something else (that is incompatible with it). For example,
/// "id <XXX>" = "Foo *", where "Foo *" doesn't implement the XXX protocol.
IncompatibleObjCQualifiedId,
/// IncompatibleObjCWeakRef - Assigning a weak-unavailable object to an
/// object with __weak qualifier.
IncompatibleObjCWeakRef,
/// Incompatible - We reject this conversion outright, it is invalid to
/// represent it in the AST.
Incompatible
};
/// DiagnoseAssignmentResult - Emit a diagnostic, if required, for the
/// assignment conversion type specified by ConvTy. This returns true if the
/// conversion was invalid or false if the conversion was accepted.
bool DiagnoseAssignmentResult(AssignConvertType ConvTy,
SourceLocation Loc,
QualType DstType, QualType SrcType,
Expr *SrcExpr, AssignmentAction Action,
bool *Complained = nullptr);
/// IsValueInFlagEnum - Determine if a value is allowed as part of a flag
/// enum. If AllowMask is true, then we also allow the complement of a valid
/// value, to be used as a mask.
bool IsValueInFlagEnum(const EnumDecl *ED, const llvm::APInt &Val,
bool AllowMask) const;
/// DiagnoseAssignmentEnum - Warn if assignment to enum is a constant
/// integer not in the range of enum values.
void DiagnoseAssignmentEnum(QualType DstType, QualType SrcType,
Expr *SrcExpr);
/// CheckAssignmentConstraints - Perform type checking for assignment,
/// argument passing, variable initialization, and function return values.
/// C99 6.5.16.
AssignConvertType CheckAssignmentConstraints(SourceLocation Loc,
QualType LHSType,
QualType RHSType);
/// Check assignment constraints and optionally prepare for a conversion of
/// the RHS to the LHS type. The conversion is prepared for if ConvertRHS
/// is true.
AssignConvertType CheckAssignmentConstraints(QualType LHSType,
ExprResult &RHS,
CastKind &Kind,
bool ConvertRHS = true);
/// Check assignment constraints for an assignment of RHS to LHSType.
///
/// \param LHSType The destination type for the assignment.
/// \param RHS The source expression for the assignment.
/// \param Diagnose If \c true, diagnostics may be produced when checking
/// for assignability. If a diagnostic is produced, \p RHS will be
/// set to ExprError(). Note that this function may still return
/// without producing a diagnostic, even for an invalid assignment.
/// \param DiagnoseCFAudited If \c true, the target is a function parameter
/// in an audited Core Foundation API and does not need to be checked
/// for ARC retain issues.
/// \param ConvertRHS If \c true, \p RHS will be updated to model the
/// conversions necessary to perform the assignment. If \c false,
/// \p Diagnose must also be \c false.
AssignConvertType CheckSingleAssignmentConstraints(
QualType LHSType, ExprResult &RHS, bool Diagnose = true,
bool DiagnoseCFAudited = false, bool ConvertRHS = true);
// If the lhs type is a transparent union, check whether we
// can initialize the transparent union with the given expression.
AssignConvertType CheckTransparentUnionArgumentConstraints(QualType ArgType,
ExprResult &RHS);
bool IsStringLiteralToNonConstPointerConversion(Expr *From, QualType ToType);
bool CheckExceptionSpecCompatibility(Expr *From, QualType ToType);
ExprResult PerformImplicitConversion(Expr *From, QualType ToType,
AssignmentAction Action,
bool AllowExplicit = false);
ExprResult PerformImplicitConversion(Expr *From, QualType ToType,
AssignmentAction Action,
bool AllowExplicit,
ImplicitConversionSequence& ICS);
ExprResult PerformImplicitConversion(Expr *From, QualType ToType,
const ImplicitConversionSequence& ICS,
AssignmentAction Action,
CheckedConversionKind CCK
= CCK_ImplicitConversion);
ExprResult PerformImplicitConversion(Expr *From, QualType ToType,
const StandardConversionSequence& SCS,
AssignmentAction Action,
CheckedConversionKind CCK);
ExprResult PerformQualificationConversion(
Expr *E, QualType Ty, ExprValueKind VK = VK_RValue,
CheckedConversionKind CCK = CCK_ImplicitConversion);
/// the following "Check" methods will return a valid/converted QualType
/// or a null QualType (indicating an error diagnostic was issued).
/// type checking binary operators (subroutines of CreateBuiltinBinOp).
QualType InvalidOperands(SourceLocation Loc, ExprResult &LHS,
ExprResult &RHS);
QualType InvalidLogicalVectorOperands(SourceLocation Loc, ExprResult &LHS,
ExprResult &RHS);
QualType CheckPointerToMemberOperands( // C++ 5.5
ExprResult &LHS, ExprResult &RHS, ExprValueKind &VK,
SourceLocation OpLoc, bool isIndirect);
QualType CheckMultiplyDivideOperands( // C99 6.5.5
ExprResult &LHS, ExprResult &RHS, SourceLocation Loc, bool IsCompAssign,
bool IsDivide);
QualType CheckRemainderOperands( // C99 6.5.5
ExprResult &LHS, ExprResult &RHS, SourceLocation Loc,
bool IsCompAssign = false);
QualType CheckAdditionOperands( // C99 6.5.6
ExprResult &LHS, ExprResult &RHS, SourceLocation Loc,
BinaryOperatorKind Opc, QualType* CompLHSTy = nullptr);
QualType CheckSubtractionOperands( // C99 6.5.6
ExprResult &LHS, ExprResult &RHS, SourceLocation Loc,
QualType* CompLHSTy = nullptr);
QualType CheckShiftOperands( // C99 6.5.7
ExprResult &LHS, ExprResult &RHS, SourceLocation Loc,
BinaryOperatorKind Opc, bool IsCompAssign = false);
void CheckPtrComparisonWithNullChar(ExprResult &E, ExprResult &NullE);
QualType CheckCompareOperands( // C99 6.5.8/9
ExprResult &LHS, ExprResult &RHS, SourceLocation Loc,
BinaryOperatorKind Opc);
QualType CheckBitwiseOperands( // C99 6.5.[10...12]
ExprResult &LHS, ExprResult &RHS, SourceLocation Loc,
BinaryOperatorKind Opc);
QualType CheckLogicalOperands( // C99 6.5.[13,14]
ExprResult &LHS, ExprResult &RHS, SourceLocation Loc,
BinaryOperatorKind Opc);
// CheckAssignmentOperands is used for both simple and compound assignment.
// For simple assignment, pass both expressions and a null converted type.
// For compound assignment, pass both expressions and the converted type.
QualType CheckAssignmentOperands( // C99 6.5.16.[1,2]
Expr *LHSExpr, ExprResult &RHS, SourceLocation Loc, QualType CompoundType);
ExprResult checkPseudoObjectIncDec(Scope *S, SourceLocation OpLoc,
UnaryOperatorKind Opcode, Expr *Op);
ExprResult checkPseudoObjectAssignment(Scope *S, SourceLocation OpLoc,
BinaryOperatorKind Opcode,
Expr *LHS, Expr *RHS);
ExprResult checkPseudoObjectRValue(Expr *E);
Expr *recreateSyntacticForm(PseudoObjectExpr *E);
QualType CheckConditionalOperands( // C99 6.5.15
ExprResult &Cond, ExprResult &LHS, ExprResult &RHS,
ExprValueKind &VK, ExprObjectKind &OK, SourceLocation QuestionLoc);
QualType CXXCheckConditionalOperands( // C++ 5.16
ExprResult &cond, ExprResult &lhs, ExprResult &rhs,
ExprValueKind &VK, ExprObjectKind &OK, SourceLocation questionLoc);
QualType CheckGNUVectorConditionalTypes(ExprResult &Cond, ExprResult &LHS,
ExprResult &RHS,
SourceLocation QuestionLoc);
QualType FindCompositePointerType(SourceLocation Loc, Expr *&E1, Expr *&E2,
bool ConvertArgs = true);
QualType FindCompositePointerType(SourceLocation Loc,
ExprResult &E1, ExprResult &E2,
bool ConvertArgs = true) {
Expr *E1Tmp = E1.get(), *E2Tmp = E2.get();
QualType Composite =
FindCompositePointerType(Loc, E1Tmp, E2Tmp, ConvertArgs);
E1 = E1Tmp;
E2 = E2Tmp;
return Composite;
}
QualType FindCompositeObjCPointerType(ExprResult &LHS, ExprResult &RHS,
SourceLocation QuestionLoc);
bool DiagnoseConditionalForNull(Expr *LHSExpr, Expr *RHSExpr,
SourceLocation QuestionLoc);
void DiagnoseAlwaysNonNullPointer(Expr *E,
Expr::NullPointerConstantKind NullType,
bool IsEqual, SourceRange Range);
/// type checking for vector binary operators.
QualType CheckVectorOperands(ExprResult &LHS, ExprResult &RHS,
SourceLocation Loc, bool IsCompAssign,
bool AllowBothBool, bool AllowBoolConversion);
QualType GetSignedVectorType(QualType V);
QualType CheckVectorCompareOperands(ExprResult &LHS, ExprResult &RHS,
SourceLocation Loc,
BinaryOperatorKind Opc);
QualType CheckVectorLogicalOperands(ExprResult &LHS, ExprResult &RHS,
SourceLocation Loc);
bool areLaxCompatibleVectorTypes(QualType srcType, QualType destType);
bool isLaxVectorConversion(QualType srcType, QualType destType);
/// type checking declaration initializers (C99 6.7.8)
bool CheckForConstantInitializer(Expr *e, QualType t);
// type checking C++ declaration initializers (C++ [dcl.init]).
/// ReferenceCompareResult - Expresses the result of comparing two
/// types (cv1 T1 and cv2 T2) to determine their compatibility for the
/// purposes of initialization by reference (C++ [dcl.init.ref]p4).
enum ReferenceCompareResult {
/// Ref_Incompatible - The two types are incompatible, so direct
/// reference binding is not possible.
Ref_Incompatible = 0,
/// Ref_Related - The two types are reference-related, which means
/// that their unqualified forms (T1 and T2) are either the same
/// or T1 is a base class of T2.
Ref_Related,
/// Ref_Compatible - The two types are reference-compatible.
Ref_Compatible
};
// Fake up a scoped enumeration that still contextually converts to bool.
struct ReferenceConversionsScope {
/// The conversions that would be performed on an lvalue of type T2 when
/// binding a reference of type T1 to it, as determined when evaluating
/// whether T1 is reference-compatible with T2.
enum ReferenceConversions {
Qualification = 0x1,
NestedQualification = 0x2,
Function = 0x4,
DerivedToBase = 0x8,
ObjC = 0x10,
ObjCLifetime = 0x20,
LLVM_MARK_AS_BITMASK_ENUM(/*LargestValue=*/ObjCLifetime)
};
};
using ReferenceConversions = ReferenceConversionsScope::ReferenceConversions;
ReferenceCompareResult
CompareReferenceRelationship(SourceLocation Loc, QualType T1, QualType T2,
ReferenceConversions *Conv = nullptr);
ExprResult checkUnknownAnyCast(SourceRange TypeRange, QualType CastType,
Expr *CastExpr, CastKind &CastKind,
ExprValueKind &VK, CXXCastPath &Path);
/// Force an expression with unknown-type to an expression of the
/// given type.
ExprResult forceUnknownAnyToType(Expr *E, QualType ToType);
/// Type-check an expression that's being passed to an
/// __unknown_anytype parameter.
ExprResult checkUnknownAnyArg(SourceLocation callLoc,
Expr *result, QualType ¶mType);
// CheckVectorCast - check type constraints for vectors.
// Since vectors are an extension, there are no C standard reference for this.
// We allow casting between vectors and integer datatypes of the same size.
// returns true if the cast is invalid
bool CheckVectorCast(SourceRange R, QualType VectorTy, QualType Ty,
CastKind &Kind);
/// Prepare `SplattedExpr` for a vector splat operation, adding
/// implicit casts if necessary.
ExprResult prepareVectorSplat(QualType VectorTy, Expr *SplattedExpr);
// CheckExtVectorCast - check type constraints for extended vectors.
// Since vectors are an extension, there are no C standard reference for this.
// We allow casting between vectors and integer datatypes of the same size,
// or vectors and the element type of that vector.
// returns the cast expr
ExprResult CheckExtVectorCast(SourceRange R, QualType DestTy, Expr *CastExpr,
CastKind &Kind);
ExprResult BuildCXXFunctionalCastExpr(TypeSourceInfo *TInfo, QualType Type,
SourceLocation LParenLoc,
Expr *CastExpr,
SourceLocation RParenLoc);
enum ARCConversionResult { ACR_okay, ACR_unbridged, ACR_error };
/// Checks for invalid conversions and casts between
/// retainable pointers and other pointer kinds for ARC and Weak.
ARCConversionResult CheckObjCConversion(SourceRange castRange,
QualType castType, Expr *&op,
CheckedConversionKind CCK,
bool Diagnose = true,
bool DiagnoseCFAudited = false,
BinaryOperatorKind Opc = BO_PtrMemD
);
Expr *stripARCUnbridgedCast(Expr *e);
void diagnoseARCUnbridgedCast(Expr *e);
bool CheckObjCARCUnavailableWeakConversion(QualType castType,
QualType ExprType);
/// checkRetainCycles - Check whether an Objective-C message send
/// might create an obvious retain cycle.
void checkRetainCycles(ObjCMessageExpr *msg);
void checkRetainCycles(Expr *receiver, Expr *argument);
void checkRetainCycles(VarDecl *Var, Expr *Init);
/// checkUnsafeAssigns - Check whether +1 expr is being assigned
/// to weak/__unsafe_unretained type.
bool checkUnsafeAssigns(SourceLocation Loc, QualType LHS, Expr *RHS);
/// checkUnsafeExprAssigns - Check whether +1 expr is being assigned
/// to weak/__unsafe_unretained expression.
void checkUnsafeExprAssigns(SourceLocation Loc, Expr *LHS, Expr *RHS);
/// CheckMessageArgumentTypes - Check types in an Obj-C message send.
/// \param Method - May be null.
/// \param [out] ReturnType - The return type of the send.
/// \return true iff there were any incompatible types.
bool CheckMessageArgumentTypes(const Expr *Receiver, QualType ReceiverType,
MultiExprArg Args, Selector Sel,
ArrayRef<SourceLocation> SelectorLocs,
ObjCMethodDecl *Method, bool isClassMessage,
bool isSuperMessage, SourceLocation lbrac,
SourceLocation rbrac, SourceRange RecRange,
QualType &ReturnType, ExprValueKind &VK);
/// Determine the result of a message send expression based on
/// the type of the receiver, the method expected to receive the message,
/// and the form of the message send.
QualType getMessageSendResultType(const Expr *Receiver, QualType ReceiverType,
ObjCMethodDecl *Method, bool isClassMessage,
bool isSuperMessage);
/// If the given expression involves a message send to a method
/// with a related result type, emit a note describing what happened.
void EmitRelatedResultTypeNote(const Expr *E);
/// Given that we had incompatible pointer types in a return
/// statement, check whether we're in a method with a related result
/// type, and if so, emit a note describing what happened.
void EmitRelatedResultTypeNoteForReturn(QualType destType);
class ConditionResult {
Decl *ConditionVar;
FullExprArg Condition;
bool Invalid;
bool HasKnownValue;
bool KnownValue;
friend class Sema;
ConditionResult(Sema &S, Decl *ConditionVar, FullExprArg Condition,
bool IsConstexpr)
: ConditionVar(ConditionVar), Condition(Condition), Invalid(false),
HasKnownValue(IsConstexpr && Condition.get() &&
!Condition.get()->isValueDependent()),
KnownValue(HasKnownValue &&
!!Condition.get()->EvaluateKnownConstInt(S.Context)) {}
explicit ConditionResult(bool Invalid)
: ConditionVar(nullptr), Condition(nullptr), Invalid(Invalid),
HasKnownValue(false), KnownValue(false) {}
public:
ConditionResult() : ConditionResult(false) {}
bool isInvalid() const { return Invalid; }
std::pair<VarDecl *, Expr *> get() const {
return std::make_pair(cast_or_null<VarDecl>(ConditionVar),
Condition.get());
}
llvm::Optional<bool> getKnownValue() const {
if (!HasKnownValue)
return None;
return KnownValue;
}
};
static ConditionResult ConditionError() { return ConditionResult(true); }
enum class ConditionKind {
Boolean, ///< A boolean condition, from 'if', 'while', 'for', or 'do'.
ConstexprIf, ///< A constant boolean condition from 'if constexpr'.
Switch ///< An integral condition for a 'switch' statement.
};
ConditionResult ActOnCondition(Scope *S, SourceLocation Loc,
Expr *SubExpr, ConditionKind CK);
ConditionResult ActOnConditionVariable(Decl *ConditionVar,
SourceLocation StmtLoc,
ConditionKind CK);
DeclResult ActOnCXXConditionDeclaration(Scope *S, Declarator &D);
ExprResult CheckConditionVariable(VarDecl *ConditionVar,
SourceLocation StmtLoc,
ConditionKind CK);
ExprResult CheckSwitchCondition(SourceLocation SwitchLoc, Expr *Cond);
/// CheckBooleanCondition - Diagnose problems involving the use of
/// the given expression as a boolean condition (e.g. in an if
/// statement). Also performs the standard function and array
/// decays, possibly changing the input variable.
///
/// \param Loc - A location associated with the condition, e.g. the
/// 'if' keyword.
/// \return true iff there were any errors
ExprResult CheckBooleanCondition(SourceLocation Loc, Expr *E,
bool IsConstexpr = false);
/// ActOnExplicitBoolSpecifier - Build an ExplicitSpecifier from an expression
/// found in an explicit(bool) specifier.
ExplicitSpecifier ActOnExplicitBoolSpecifier(Expr *E);
/// tryResolveExplicitSpecifier - Attempt to resolve the explict specifier.
/// Returns true if the explicit specifier is now resolved.
bool tryResolveExplicitSpecifier(ExplicitSpecifier &ExplicitSpec);
/// DiagnoseAssignmentAsCondition - Given that an expression is
/// being used as a boolean condition, warn if it's an assignment.
void DiagnoseAssignmentAsCondition(Expr *E);
/// Redundant parentheses over an equality comparison can indicate
/// that the user intended an assignment used as condition.
void DiagnoseEqualityWithExtraParens(ParenExpr *ParenE);
/// CheckCXXBooleanCondition - Returns true if conversion to bool is invalid.
ExprResult CheckCXXBooleanCondition(Expr *CondExpr, bool IsConstexpr = false);
/// ConvertIntegerToTypeWarnOnOverflow - Convert the specified APInt to have
/// the specified width and sign. If an overflow occurs, detect it and emit
/// the specified diagnostic.
void ConvertIntegerToTypeWarnOnOverflow(llvm::APSInt &OldVal,
unsigned NewWidth, bool NewSign,
SourceLocation Loc, unsigned DiagID);
/// Checks that the Objective-C declaration is declared in the global scope.
/// Emits an error and marks the declaration as invalid if it's not declared
/// in the global scope.
bool CheckObjCDeclScope(Decl *D);
/// Abstract base class used for diagnosing integer constant
/// expression violations.
class VerifyICEDiagnoser {
public:
bool Suppress;
VerifyICEDiagnoser(bool Suppress = false) : Suppress(Suppress) { }
virtual void diagnoseNotICE(Sema &S, SourceLocation Loc, SourceRange SR) =0;
virtual void diagnoseFold(Sema &S, SourceLocation Loc, SourceRange SR);
virtual ~VerifyICEDiagnoser() { }
};
/// VerifyIntegerConstantExpression - Verifies that an expression is an ICE,
/// and reports the appropriate diagnostics. Returns false on success.
/// Can optionally return the value of the expression.
ExprResult VerifyIntegerConstantExpression(Expr *E, llvm::APSInt *Result,
VerifyICEDiagnoser &Diagnoser,
bool AllowFold = true);
ExprResult VerifyIntegerConstantExpression(Expr *E, llvm::APSInt *Result,
unsigned DiagID,
bool AllowFold = true);
ExprResult VerifyIntegerConstantExpression(Expr *E,
llvm::APSInt *Result = nullptr);
/// VerifyBitField - verifies that a bit field expression is an ICE and has
/// the correct width, and that the field type is valid.
/// Returns false on success.
/// Can optionally return whether the bit-field is of width 0
ExprResult VerifyBitField(SourceLocation FieldLoc, IdentifierInfo *FieldName,
QualType FieldTy, bool IsMsStruct,
Expr *BitWidth, bool *ZeroWidth = nullptr);
private:
unsigned ForceCUDAHostDeviceDepth = 0;
public:
/// Increments our count of the number of times we've seen a pragma forcing
/// functions to be __host__ __device__. So long as this count is greater
/// than zero, all functions encountered will be __host__ __device__.
void PushForceCUDAHostDevice();
/// Decrements our count of the number of times we've seen a pragma forcing
/// functions to be __host__ __device__. Returns false if the count is 0
/// before incrementing, so you can emit an error.
bool PopForceCUDAHostDevice();
/// Diagnostics that are emitted only if we discover that the given function
/// must be codegen'ed. Because handling these correctly adds overhead to
/// compilation, this is currently only enabled for CUDA compilations.
llvm::DenseMap<CanonicalDeclPtr<FunctionDecl>,
std::vector<PartialDiagnosticAt>>
DeviceDeferredDiags;
/// A pair of a canonical FunctionDecl and a SourceLocation. When used as the
/// key in a hashtable, both the FD and location are hashed.
struct FunctionDeclAndLoc {
CanonicalDeclPtr<FunctionDecl> FD;
SourceLocation Loc;
};
/// FunctionDecls and SourceLocations for which CheckCUDACall has emitted a
/// (maybe deferred) "bad call" diagnostic. We use this to avoid emitting the
/// same deferred diag twice.
llvm::DenseSet<FunctionDeclAndLoc> LocsWithCUDACallDiags;
/// An inverse call graph, mapping known-emitted functions to one of their
/// known-emitted callers (plus the location of the call).
///
/// Functions that we can tell a priori must be emitted aren't added to this
/// map.
llvm::DenseMap</* Callee = */ CanonicalDeclPtr<FunctionDecl>,
/* Caller = */ FunctionDeclAndLoc>
DeviceKnownEmittedFns;
/// A partial call graph maintained during CUDA/OpenMP device code compilation
/// to support deferred diagnostics.
///
/// Functions are only added here if, at the time they're considered, they are
/// not known-emitted. As soon as we discover that a function is
/// known-emitted, we remove it and everything it transitively calls from this
/// set and add those functions to DeviceKnownEmittedFns.
llvm::DenseMap</* Caller = */ CanonicalDeclPtr<FunctionDecl>,
/* Callees = */ llvm::MapVector<CanonicalDeclPtr<FunctionDecl>,
SourceLocation>>
DeviceCallGraph;
/// Diagnostic builder for CUDA/OpenMP devices errors which may or may not be
/// deferred.
///
/// In CUDA, there exist constructs (e.g. variable-length arrays, try/catch)
/// which are not allowed to appear inside __device__ functions and are
/// allowed to appear in __host__ __device__ functions only if the host+device
/// function is never codegen'ed.
///
/// To handle this, we use the notion of "deferred diagnostics", where we
/// attach a diagnostic to a FunctionDecl that's emitted iff it's codegen'ed.
///
/// This class lets you emit either a regular diagnostic, a deferred
/// diagnostic, or no diagnostic at all, according to an argument you pass to
/// its constructor, thus simplifying the process of creating these "maybe
/// deferred" diagnostics.
class DeviceDiagBuilder {
public:
enum Kind {
/// Emit no diagnostics.
K_Nop,
/// Emit the diagnostic immediately (i.e., behave like Sema::Diag()).
K_Immediate,
/// Emit the diagnostic immediately, and, if it's a warning or error, also
/// emit a call stack showing how this function can be reached by an a
/// priori known-emitted function.
K_ImmediateWithCallStack,
/// Create a deferred diagnostic, which is emitted only if the function
/// it's attached to is codegen'ed. Also emit a call stack as with
/// K_ImmediateWithCallStack.
K_Deferred
};
DeviceDiagBuilder(Kind K, SourceLocation Loc, unsigned DiagID,
FunctionDecl *Fn, Sema &S);
DeviceDiagBuilder(DeviceDiagBuilder &&D);
DeviceDiagBuilder(const DeviceDiagBuilder &) = default;
~DeviceDiagBuilder();
/// Convertible to bool: True if we immediately emitted an error, false if
/// we didn't emit an error or we created a deferred error.
///
/// Example usage:
///
/// if (DeviceDiagBuilder(...) << foo << bar)
/// return ExprError();
///
/// But see CUDADiagIfDeviceCode() and CUDADiagIfHostCode() -- you probably
/// want to use these instead of creating a DeviceDiagBuilder yourself.
operator bool() const { return ImmediateDiag.hasValue(); }
template <typename T>
friend const DeviceDiagBuilder &operator<<(const DeviceDiagBuilder &Diag,
const T &Value) {
if (Diag.ImmediateDiag.hasValue())
*Diag.ImmediateDiag << Value;
else if (Diag.PartialDiagId.hasValue())
Diag.S.DeviceDeferredDiags[Diag.Fn][*Diag.PartialDiagId].second
<< Value;
return Diag;
}
private:
Sema &S;
SourceLocation Loc;
unsigned DiagID;
FunctionDecl *Fn;
bool ShowCallStack;
// Invariant: At most one of these Optionals has a value.
// FIXME: Switch these to a Variant once that exists.
llvm::Optional<SemaDiagnosticBuilder> ImmediateDiag;
llvm::Optional<unsigned> PartialDiagId;
};
/// Indicate that this function (and thus everything it transtively calls)
/// will be codegen'ed, and emit any deferred diagnostics on this function and
/// its (transitive) callees.
void markKnownEmitted(
Sema &S, FunctionDecl *OrigCaller, FunctionDecl *OrigCallee,
SourceLocation OrigLoc,
const llvm::function_ref<bool(Sema &, FunctionDecl *)> IsKnownEmitted);
/// Creates a DeviceDiagBuilder that emits the diagnostic if the current context
/// is "used as device code".
///
/// - If CurContext is a __host__ function, does not emit any diagnostics.
/// - If CurContext is a __device__ or __global__ function, emits the
/// diagnostics immediately.
/// - If CurContext is a __host__ __device__ function and we are compiling for
/// the device, creates a diagnostic which is emitted if and when we realize
/// that the function will be codegen'ed.
///
/// Example usage:
///
/// // Variable-length arrays are not allowed in CUDA device code.
/// if (CUDADiagIfDeviceCode(Loc, diag::err_cuda_vla) << CurrentCUDATarget())
/// return ExprError();
/// // Otherwise, continue parsing as normal.
DeviceDiagBuilder CUDADiagIfDeviceCode(SourceLocation Loc, unsigned DiagID);
/// Creates a DeviceDiagBuilder that emits the diagnostic if the current context
/// is "used as host code".
///
/// Same as CUDADiagIfDeviceCode, with "host" and "device" switched.
DeviceDiagBuilder CUDADiagIfHostCode(SourceLocation Loc, unsigned DiagID);
/// Creates a DeviceDiagBuilder that emits the diagnostic if the current
/// context is "used as device code".
///
/// - If CurContext is a `declare target` function or it is known that the
/// function is emitted for the device, emits the diagnostics immediately.
/// - If CurContext is a non-`declare target` function and we are compiling
/// for the device, creates a diagnostic which is emitted if and when we
/// realize that the function will be codegen'ed.
///
/// Example usage:
///
/// // Variable-length arrays are not allowed in NVPTX device code.
/// if (diagIfOpenMPDeviceCode(Loc, diag::err_vla_unsupported))
/// return ExprError();
/// // Otherwise, continue parsing as normal.
DeviceDiagBuilder diagIfOpenMPDeviceCode(SourceLocation Loc, unsigned DiagID);
/// Creates a DeviceDiagBuilder that emits the diagnostic if the current
/// context is "used as host code".
///
/// - If CurContext is a `declare target` function or it is known that the
/// function is emitted for the host, emits the diagnostics immediately.
/// - If CurContext is a non-host function, just ignore it.
///
/// Example usage:
///
/// // Variable-length arrays are not allowed in NVPTX device code.
/// if (diagIfOpenMPHostode(Loc, diag::err_vla_unsupported))
/// return ExprError();
/// // Otherwise, continue parsing as normal.
DeviceDiagBuilder diagIfOpenMPHostCode(SourceLocation Loc, unsigned DiagID);
DeviceDiagBuilder targetDiag(SourceLocation Loc, unsigned DiagID);
enum CUDAFunctionTarget {
CFT_Device,
CFT_Global,
CFT_Host,
CFT_HostDevice,
CFT_InvalidTarget
};
/// Determines whether the given function is a CUDA device/host/kernel/etc.
/// function.
///
/// Use this rather than examining the function's attributes yourself -- you
/// will get it wrong. Returns CFT_Host if D is null.
CUDAFunctionTarget IdentifyCUDATarget(const FunctionDecl *D,
bool IgnoreImplicitHDAttr = false);
CUDAFunctionTarget IdentifyCUDATarget(const ParsedAttributesView &Attrs);
/// Gets the CUDA target for the current context.
CUDAFunctionTarget CurrentCUDATarget() {
return IdentifyCUDATarget(dyn_cast<FunctionDecl>(CurContext));
}
// CUDA function call preference. Must be ordered numerically from
// worst to best.
enum CUDAFunctionPreference {
CFP_Never, // Invalid caller/callee combination.
CFP_WrongSide, // Calls from host-device to host or device
// function that do not match current compilation
// mode.
CFP_HostDevice, // Any calls to host/device functions.
CFP_SameSide, // Calls from host-device to host or device
// function matching current compilation mode.
CFP_Native, // host-to-host or device-to-device calls.
};
/// Identifies relative preference of a given Caller/Callee
/// combination, based on their host/device attributes.
/// \param Caller function which needs address of \p Callee.
/// nullptr in case of global context.
/// \param Callee target function
///
/// \returns preference value for particular Caller/Callee combination.
CUDAFunctionPreference IdentifyCUDAPreference(const FunctionDecl *Caller,
const FunctionDecl *Callee);
/// Determines whether Caller may invoke Callee, based on their CUDA
/// host/device attributes. Returns false if the call is not allowed.
///
/// Note: Will return true for CFP_WrongSide calls. These may appear in
/// semantically correct CUDA programs, but only if they're never codegen'ed.
bool IsAllowedCUDACall(const FunctionDecl *Caller,
const FunctionDecl *Callee) {
return IdentifyCUDAPreference(Caller, Callee) != CFP_Never;
}
/// May add implicit CUDAHostAttr and CUDADeviceAttr attributes to FD,
/// depending on FD and the current compilation settings.
void maybeAddCUDAHostDeviceAttrs(FunctionDecl *FD,
const LookupResult &Previous);
public:
/// Check whether we're allowed to call Callee from the current context.
///
/// - If the call is never allowed in a semantically-correct program
/// (CFP_Never), emits an error and returns false.
///
/// - If the call is allowed in semantically-correct programs, but only if
/// it's never codegen'ed (CFP_WrongSide), creates a deferred diagnostic to
/// be emitted if and when the caller is codegen'ed, and returns true.
///
/// Will only create deferred diagnostics for a given SourceLocation once,
/// so you can safely call this multiple times without generating duplicate
/// deferred errors.
///
/// - Otherwise, returns true without emitting any diagnostics.
bool CheckCUDACall(SourceLocation Loc, FunctionDecl *Callee);
/// Set __device__ or __host__ __device__ attributes on the given lambda
/// operator() method.
///
/// CUDA lambdas declared inside __device__ or __global__ functions inherit
/// the __device__ attribute. Similarly, lambdas inside __host__ __device__
/// functions become __host__ __device__ themselves.
void CUDASetLambdaAttrs(CXXMethodDecl *Method);
/// Finds a function in \p Matches with highest calling priority
/// from \p Caller context and erases all functions with lower
/// calling priority.
void EraseUnwantedCUDAMatches(
const FunctionDecl *Caller,
SmallVectorImpl<std::pair<DeclAccessPair, FunctionDecl *>> &Matches);
/// Given a implicit special member, infer its CUDA target from the
/// calls it needs to make to underlying base/field special members.
/// \param ClassDecl the class for which the member is being created.
/// \param CSM the kind of special member.
/// \param MemberDecl the special member itself.
/// \param ConstRHS true if this is a copy operation with a const object on
/// its RHS.
/// \param Diagnose true if this call should emit diagnostics.
/// \return true if there was an error inferring.
/// The result of this call is implicit CUDA target attribute(s) attached to
/// the member declaration.
bool inferCUDATargetForImplicitSpecialMember(CXXRecordDecl *ClassDecl,
CXXSpecialMember CSM,
CXXMethodDecl *MemberDecl,
bool ConstRHS,
bool Diagnose);
/// \return true if \p CD can be considered empty according to CUDA
/// (E.2.3.1 in CUDA 7.5 Programming guide).
bool isEmptyCudaConstructor(SourceLocation Loc, CXXConstructorDecl *CD);
bool isEmptyCudaDestructor(SourceLocation Loc, CXXDestructorDecl *CD);
// \brief Checks that initializers of \p Var satisfy CUDA restrictions. In
// case of error emits appropriate diagnostic and invalidates \p Var.
//
// \details CUDA allows only empty constructors as initializers for global
// variables (see E.2.3.1, CUDA 7.5). The same restriction also applies to all
// __shared__ variables whether they are local or not (they all are implicitly
// static in CUDA). One exception is that CUDA allows constant initializers
// for __constant__ and __device__ variables.
void checkAllowedCUDAInitializer(VarDecl *VD);
/// Check whether NewFD is a valid overload for CUDA. Emits
/// diagnostics and invalidates NewFD if not.
void checkCUDATargetOverload(FunctionDecl *NewFD,
const LookupResult &Previous);
/// Copies target attributes from the template TD to the function FD.
void inheritCUDATargetAttrs(FunctionDecl *FD, const FunctionTemplateDecl &TD);
/// Returns the name of the launch configuration function. This is the name
/// of the function that will be called to configure kernel call, with the
/// parameters specified via <<<>>>.
std::string getCudaConfigureFuncName() const;
/// \name Code completion
//@{
/// Describes the context in which code completion occurs.
enum ParserCompletionContext {
/// Code completion occurs at top-level or namespace context.
PCC_Namespace,
/// Code completion occurs within a class, struct, or union.
PCC_Class,
/// Code completion occurs within an Objective-C interface, protocol,
/// or category.
PCC_ObjCInterface,
/// Code completion occurs within an Objective-C implementation or
/// category implementation
PCC_ObjCImplementation,
/// Code completion occurs within the list of instance variables
/// in an Objective-C interface, protocol, category, or implementation.
PCC_ObjCInstanceVariableList,
/// Code completion occurs following one or more template
/// headers.
PCC_Template,
/// Code completion occurs following one or more template
/// headers within a class.
PCC_MemberTemplate,
/// Code completion occurs within an expression.
PCC_Expression,
/// Code completion occurs within a statement, which may
/// also be an expression or a declaration.
PCC_Statement,
/// Code completion occurs at the beginning of the
/// initialization statement (or expression) in a for loop.
PCC_ForInit,
/// Code completion occurs within the condition of an if,
/// while, switch, or for statement.
PCC_Condition,
/// Code completion occurs within the body of a function on a
/// recovery path, where we do not have a specific handle on our position
/// in the grammar.
PCC_RecoveryInFunction,
/// Code completion occurs where only a type is permitted.
PCC_Type,
/// Code completion occurs in a parenthesized expression, which
/// might also be a type cast.
PCC_ParenthesizedExpression,
/// Code completion occurs within a sequence of declaration
/// specifiers within a function, method, or block.
PCC_LocalDeclarationSpecifiers
};
void CodeCompleteModuleImport(SourceLocation ImportLoc, ModuleIdPath Path);
void CodeCompleteOrdinaryName(Scope *S,
ParserCompletionContext CompletionContext);
void CodeCompleteDeclSpec(Scope *S, DeclSpec &DS,
bool AllowNonIdentifiers,
bool AllowNestedNameSpecifiers);
struct CodeCompleteExpressionData;
void CodeCompleteExpression(Scope *S,
const CodeCompleteExpressionData &Data);
void CodeCompleteExpression(Scope *S, QualType PreferredType,
bool IsParenthesized = false);
void CodeCompleteMemberReferenceExpr(Scope *S, Expr *Base, Expr *OtherOpBase,
SourceLocation OpLoc, bool IsArrow,
bool IsBaseExprStatement,
QualType PreferredType);
void CodeCompletePostfixExpression(Scope *S, ExprResult LHS,
QualType PreferredType);
void CodeCompleteTag(Scope *S, unsigned TagSpec);
void CodeCompleteTypeQualifiers(DeclSpec &DS);
void CodeCompleteFunctionQualifiers(DeclSpec &DS, Declarator &D,
const VirtSpecifiers *VS = nullptr);
void CodeCompleteBracketDeclarator(Scope *S);
void CodeCompleteCase(Scope *S);
/// Reports signatures for a call to CodeCompleteConsumer and returns the
/// preferred type for the current argument. Returned type can be null.
QualType ProduceCallSignatureHelp(Scope *S, Expr *Fn, ArrayRef<Expr *> Args,
SourceLocation OpenParLoc);
QualType ProduceConstructorSignatureHelp(Scope *S, QualType Type,
SourceLocation Loc,
ArrayRef<Expr *> Args,
SourceLocation OpenParLoc);
QualType ProduceCtorInitMemberSignatureHelp(Scope *S, Decl *ConstructorDecl,
CXXScopeSpec SS,
ParsedType TemplateTypeTy,
ArrayRef<Expr *> ArgExprs,
IdentifierInfo *II,
SourceLocation OpenParLoc);
void CodeCompleteInitializer(Scope *S, Decl *D);
/// Trigger code completion for a record of \p BaseType. \p InitExprs are
/// expressions in the initializer list seen so far and \p D is the current
/// Designation being parsed.
void CodeCompleteDesignator(const QualType BaseType,
llvm::ArrayRef<Expr *> InitExprs,
const Designation &D);
void CodeCompleteAfterIf(Scope *S);
void CodeCompleteQualifiedId(Scope *S, CXXScopeSpec &SS, bool EnteringContext,
bool IsUsingDeclaration, QualType BaseType,
QualType PreferredType);
void CodeCompleteUsing(Scope *S);
void CodeCompleteUsingDirective(Scope *S);
void CodeCompleteNamespaceDecl(Scope *S);
void CodeCompleteNamespaceAliasDecl(Scope *S);
void CodeCompleteOperatorName(Scope *S);
void CodeCompleteConstructorInitializer(
Decl *Constructor,
ArrayRef<CXXCtorInitializer *> Initializers);
void CodeCompleteLambdaIntroducer(Scope *S, LambdaIntroducer &Intro,
bool AfterAmpersand);
void CodeCompleteObjCAtDirective(Scope *S);
void CodeCompleteObjCAtVisibility(Scope *S);
void CodeCompleteObjCAtStatement(Scope *S);
void CodeCompleteObjCAtExpression(Scope *S);
void CodeCompleteObjCPropertyFlags(Scope *S, ObjCDeclSpec &ODS);
void CodeCompleteObjCPropertyGetter(Scope *S);
void CodeCompleteObjCPropertySetter(Scope *S);
void CodeCompleteObjCPassingType(Scope *S, ObjCDeclSpec &DS,
bool IsParameter);
void CodeCompleteObjCMessageReceiver(Scope *S);
void CodeCompleteObjCSuperMessage(Scope *S, SourceLocation SuperLoc,
ArrayRef<IdentifierInfo *> SelIdents,
bool AtArgumentExpression);
void CodeCompleteObjCClassMessage(Scope *S, ParsedType Receiver,
ArrayRef<IdentifierInfo *> SelIdents,
bool AtArgumentExpression,
bool IsSuper = false);
void CodeCompleteObjCInstanceMessage(Scope *S, Expr *Receiver,
ArrayRef<IdentifierInfo *> SelIdents,
bool AtArgumentExpression,
ObjCInterfaceDecl *Super = nullptr);
void CodeCompleteObjCForCollection(Scope *S,
DeclGroupPtrTy IterationVar);
void CodeCompleteObjCSelector(Scope *S,
ArrayRef<IdentifierInfo *> SelIdents);
void CodeCompleteObjCProtocolReferences(
ArrayRef<IdentifierLocPair> Protocols);
void CodeCompleteObjCProtocolDecl(Scope *S);
void CodeCompleteObjCInterfaceDecl(Scope *S);
void CodeCompleteObjCSuperclass(Scope *S,
IdentifierInfo *ClassName,
SourceLocation ClassNameLoc);
void CodeCompleteObjCImplementationDecl(Scope *S);
void CodeCompleteObjCInterfaceCategory(Scope *S,
IdentifierInfo *ClassName,
SourceLocation ClassNameLoc);
void CodeCompleteObjCImplementationCategory(Scope *S,
IdentifierInfo *ClassName,
SourceLocation ClassNameLoc);
void CodeCompleteObjCPropertyDefinition(Scope *S);
void CodeCompleteObjCPropertySynthesizeIvar(Scope *S,
IdentifierInfo *PropertyName);
void CodeCompleteObjCMethodDecl(Scope *S, Optional<bool> IsInstanceMethod,
ParsedType ReturnType);
void CodeCompleteObjCMethodDeclSelector(Scope *S,
bool IsInstanceMethod,
bool AtParameterName,
ParsedType ReturnType,
ArrayRef<IdentifierInfo *> SelIdents);
void CodeCompleteObjCClassPropertyRefExpr(Scope *S, IdentifierInfo &ClassName,
SourceLocation ClassNameLoc,
bool IsBaseExprStatement);
void CodeCompletePreprocessorDirective(bool InConditional);
void CodeCompleteInPreprocessorConditionalExclusion(Scope *S);
void CodeCompletePreprocessorMacroName(bool IsDefinition);
void CodeCompletePreprocessorExpression();
void CodeCompletePreprocessorMacroArgument(Scope *S,
IdentifierInfo *Macro,
MacroInfo *MacroInfo,
unsigned Argument);
void CodeCompleteIncludedFile(llvm::StringRef Dir, bool IsAngled);
void CodeCompleteNaturalLanguage();
void CodeCompleteAvailabilityPlatformName();
void GatherGlobalCodeCompletions(CodeCompletionAllocator &Allocator,
CodeCompletionTUInfo &CCTUInfo,
SmallVectorImpl<CodeCompletionResult> &Results);
//@}
//===--------------------------------------------------------------------===//
// Extra semantic analysis beyond the C type system
public:
SourceLocation getLocationOfStringLiteralByte(const StringLiteral *SL,
unsigned ByteNo) const;
private:
void CheckArrayAccess(const Expr *BaseExpr, const Expr *IndexExpr,
const ArraySubscriptExpr *ASE=nullptr,
bool AllowOnePastEnd=true, bool IndexNegated=false);
void CheckArrayAccess(const Expr *E);
// Used to grab the relevant information from a FormatAttr and a
// FunctionDeclaration.
struct FormatStringInfo {
unsigned FormatIdx;
unsigned FirstDataArg;
bool HasVAListArg;
};
static bool getFormatStringInfo(const FormatAttr *Format, bool IsCXXMember,
FormatStringInfo *FSI);
bool CheckFunctionCall(FunctionDecl *FDecl, CallExpr *TheCall,
const FunctionProtoType *Proto);
bool CheckObjCMethodCall(ObjCMethodDecl *Method, SourceLocation loc,
ArrayRef<const Expr *> Args);
bool CheckPointerCall(NamedDecl *NDecl, CallExpr *TheCall,
const FunctionProtoType *Proto);
bool CheckOtherCall(CallExpr *TheCall, const FunctionProtoType *Proto);
void CheckConstructorCall(FunctionDecl *FDecl,
ArrayRef<const Expr *> Args,
const FunctionProtoType *Proto,
SourceLocation Loc);
void checkCall(NamedDecl *FDecl, const FunctionProtoType *Proto,
const Expr *ThisArg, ArrayRef<const Expr *> Args,
bool IsMemberFunction, SourceLocation Loc, SourceRange Range,
VariadicCallType CallType);
bool CheckObjCString(Expr *Arg);
ExprResult CheckOSLogFormatStringArg(Expr *Arg);
ExprResult CheckBuiltinFunctionCall(FunctionDecl *FDecl,
unsigned BuiltinID, CallExpr *TheCall);
void checkFortifiedBuiltinMemoryFunction(FunctionDecl *FD, CallExpr *TheCall);
bool CheckARMBuiltinExclusiveCall(unsigned BuiltinID, CallExpr *TheCall,
unsigned MaxWidth);
bool CheckNeonBuiltinFunctionCall(unsigned BuiltinID, CallExpr *TheCall);
bool CheckMVEBuiltinFunctionCall(unsigned BuiltinID, CallExpr *TheCall);
bool CheckARMBuiltinFunctionCall(unsigned BuiltinID, CallExpr *TheCall);
bool CheckAArch64BuiltinFunctionCall(unsigned BuiltinID, CallExpr *TheCall);
bool CheckBPFBuiltinFunctionCall(unsigned BuiltinID, CallExpr *TheCall);
bool CheckHexagonBuiltinFunctionCall(unsigned BuiltinID, CallExpr *TheCall);
bool CheckHexagonBuiltinArgument(unsigned BuiltinID, CallExpr *TheCall);
bool CheckMipsBuiltinFunctionCall(unsigned BuiltinID, CallExpr *TheCall);
bool CheckMipsBuiltinCpu(unsigned BuiltinID, CallExpr *TheCall);
bool CheckMipsBuiltinArgument(unsigned BuiltinID, CallExpr *TheCall);
bool CheckSystemZBuiltinFunctionCall(unsigned BuiltinID, CallExpr *TheCall);
bool CheckX86BuiltinRoundingOrSAE(unsigned BuiltinID, CallExpr *TheCall);
bool CheckX86BuiltinGatherScatterScale(unsigned BuiltinID, CallExpr *TheCall);
bool CheckX86BuiltinFunctionCall(unsigned BuiltinID, CallExpr *TheCall);
bool CheckPPCBuiltinFunctionCall(unsigned BuiltinID, CallExpr *TheCall);
bool CheckIntelFPGARegBuiltinFunctionCall(unsigned BuiltinID, CallExpr *Call);
bool CheckIntelFPGAMemBuiltinFunctionCall(CallExpr *Call);
bool SemaBuiltinVAStart(unsigned BuiltinID, CallExpr *TheCall);
bool SemaBuiltinVAStartARMMicrosoft(CallExpr *Call);
bool SemaBuiltinUnorderedCompare(CallExpr *TheCall);
bool SemaBuiltinFPClassification(CallExpr *TheCall, unsigned NumArgs);
bool SemaBuiltinVSX(CallExpr *TheCall);
bool SemaBuiltinOSLogFormat(CallExpr *TheCall);
public:
// Used by C++ template instantiation.
ExprResult SemaBuiltinShuffleVector(CallExpr *TheCall);
ExprResult SemaConvertVectorExpr(Expr *E, TypeSourceInfo *TInfo,
SourceLocation BuiltinLoc,
SourceLocation RParenLoc);
private:
bool SemaBuiltinPrefetch(CallExpr *TheCall);
bool SemaBuiltinAllocaWithAlign(CallExpr *TheCall);
bool SemaBuiltinAssume(CallExpr *TheCall);
bool SemaBuiltinAssumeAligned(CallExpr *TheCall);
bool SemaBuiltinLongjmp(CallExpr *TheCall);
bool SemaBuiltinSetjmp(CallExpr *TheCall);
ExprResult SemaBuiltinAtomicOverloaded(ExprResult TheCallResult);
ExprResult SemaBuiltinNontemporalOverloaded(ExprResult TheCallResult);
ExprResult SemaAtomicOpsOverloaded(ExprResult TheCallResult,
AtomicExpr::AtomicOp Op);
ExprResult SemaBuiltinOperatorNewDeleteOverloaded(ExprResult TheCallResult,
bool IsDelete);
bool SemaBuiltinConstantArg(CallExpr *TheCall, int ArgNum,
llvm::APSInt &Result);
bool SemaBuiltinConstantArgRange(CallExpr *TheCall, int ArgNum, int Low,
int High, bool RangeIsError = true);
bool SemaBuiltinConstantArgMultiple(CallExpr *TheCall, int ArgNum,
unsigned Multiple);
bool SemaBuiltinConstantArgPower2(CallExpr *TheCall, int ArgNum);
bool SemaBuiltinConstantArgShiftedByte(CallExpr *TheCall, int ArgNum,
unsigned ArgBits);
bool SemaBuiltinConstantArgShiftedByteOrXXFF(CallExpr *TheCall, int ArgNum,
unsigned ArgBits);
bool SemaBuiltinARMSpecialReg(unsigned BuiltinID, CallExpr *TheCall,
int ArgNum, unsigned ExpectedFieldNum,
bool AllowName);
bool SemaBuiltinARMMemoryTaggingCall(unsigned BuiltinID, CallExpr *TheCall);
public:
enum FormatStringType {
FST_Scanf,
FST_Printf,
FST_NSString,
FST_Strftime,
FST_Strfmon,
FST_Kprintf,
FST_FreeBSDKPrintf,
FST_OSTrace,
FST_OSLog,
FST_Unknown
};
static FormatStringType GetFormatStringType(const FormatAttr *Format);
bool FormatStringHasSArg(const StringLiteral *FExpr);
static bool GetFormatNSStringIdx(const FormatAttr *Format, unsigned &Idx);
private:
bool CheckFormatArguments(const FormatAttr *Format,
ArrayRef<const Expr *> Args,
bool IsCXXMember,
VariadicCallType CallType,
SourceLocation Loc, SourceRange Range,
llvm::SmallBitVector &CheckedVarArgs);
bool CheckFormatArguments(ArrayRef<const Expr *> Args,
bool HasVAListArg, unsigned format_idx,
unsigned firstDataArg, FormatStringType Type,
VariadicCallType CallType,
SourceLocation Loc, SourceRange range,
llvm::SmallBitVector &CheckedVarArgs);
void CheckAbsoluteValueFunction(const CallExpr *Call,
const FunctionDecl *FDecl);
void CheckMaxUnsignedZero(const CallExpr *Call, const FunctionDecl *FDecl);
void CheckMemaccessArguments(const CallExpr *Call,
unsigned BId,
IdentifierInfo *FnName);
void CheckStrlcpycatArguments(const CallExpr *Call,
IdentifierInfo *FnName);
void CheckStrncatArguments(const CallExpr *Call,
IdentifierInfo *FnName);
void CheckReturnValExpr(Expr *RetValExp, QualType lhsType,
SourceLocation ReturnLoc,
bool isObjCMethod = false,
const AttrVec *Attrs = nullptr,
const FunctionDecl *FD = nullptr);
public:
void CheckFloatComparison(SourceLocation Loc, Expr *LHS, Expr *RHS);
private:
void CheckImplicitConversions(Expr *E, SourceLocation CC = SourceLocation());
void CheckBoolLikeConversion(Expr *E, SourceLocation CC);
void CheckForIntOverflow(Expr *E);
void CheckUnsequencedOperations(const Expr *E);
/// Perform semantic checks on a completed expression. This will either
/// be a full-expression or a default argument expression.
void CheckCompletedExpr(Expr *E, SourceLocation CheckLoc = SourceLocation(),
bool IsConstexpr = false);
void CheckBitFieldInitialization(SourceLocation InitLoc, FieldDecl *Field,
Expr *Init);
/// Check if there is a field shadowing.
void CheckShadowInheritedFields(const SourceLocation &Loc,
DeclarationName FieldName,
const CXXRecordDecl *RD,
bool DeclIsField = true);
/// Check if the given expression contains 'break' or 'continue'
/// statement that produces control flow different from GCC.
void CheckBreakContinueBinding(Expr *E);
/// Check whether receiver is mutable ObjC container which
/// attempts to add itself into the container
void CheckObjCCircularContainer(ObjCMessageExpr *Message);
void AnalyzeDeleteExprMismatch(const CXXDeleteExpr *DE);
void AnalyzeDeleteExprMismatch(FieldDecl *Field, SourceLocation DeleteLoc,
bool DeleteWasArrayForm);
public:
/// Register a magic integral constant to be used as a type tag.
void RegisterTypeTagForDatatype(const IdentifierInfo *ArgumentKind,
uint64_t MagicValue, QualType Type,
bool LayoutCompatible, bool MustBeNull);
struct TypeTagData {
TypeTagData() {}
TypeTagData(QualType Type, bool LayoutCompatible, bool MustBeNull) :
Type(Type), LayoutCompatible(LayoutCompatible),
MustBeNull(MustBeNull)
{}
QualType Type;
/// If true, \c Type should be compared with other expression's types for
/// layout-compatibility.
unsigned LayoutCompatible : 1;
unsigned MustBeNull : 1;
};
/// A pair of ArgumentKind identifier and magic value. This uniquely
/// identifies the magic value.
typedef std::pair<const IdentifierInfo *, uint64_t> TypeTagMagicValue;
private:
/// A map from magic value to type information.
std::unique_ptr<llvm::DenseMap<TypeTagMagicValue, TypeTagData>>
TypeTagForDatatypeMagicValues;
/// Peform checks on a call of a function with argument_with_type_tag
/// or pointer_with_type_tag attributes.
void CheckArgumentWithTypeTag(const ArgumentWithTypeTagAttr *Attr,
const ArrayRef<const Expr *> ExprArgs,
SourceLocation CallSiteLoc);
/// Check if we are taking the address of a packed field
/// as this may be a problem if the pointer value is dereferenced.
void CheckAddressOfPackedMember(Expr *rhs);
/// The parser's current scope.
///
/// The parser maintains this state here.
Scope *CurScope;
mutable IdentifierInfo *Ident_super;
mutable IdentifierInfo *Ident___float128;
/// Nullability type specifiers.
IdentifierInfo *Ident__Nonnull = nullptr;
IdentifierInfo *Ident__Nullable = nullptr;
IdentifierInfo *Ident__Null_unspecified = nullptr;
IdentifierInfo *Ident_NSError = nullptr;
/// The handler for the FileChanged preprocessor events.
///
/// Used for diagnostics that implement custom semantic analysis for #include
/// directives, like -Wpragma-pack.
sema::SemaPPCallbacks *SemaPPCallbackHandler;
protected:
friend class Parser;
friend class InitializationSequence;
friend class ASTReader;
friend class ASTDeclReader;
friend class ASTWriter;
public:
/// Retrieve the keyword associated
IdentifierInfo *getNullabilityKeyword(NullabilityKind nullability);
/// The struct behind the CFErrorRef pointer.
RecordDecl *CFError = nullptr;
/// Retrieve the identifier "NSError".
IdentifierInfo *getNSErrorIdent();
/// Retrieve the parser's current scope.
///
/// This routine must only be used when it is certain that semantic analysis
/// and the parser are in precisely the same context, which is not the case
/// when, e.g., we are performing any kind of template instantiation.
/// Therefore, the only safe places to use this scope are in the parser
/// itself and in routines directly invoked from the parser and *never* from
/// template substitution or instantiation.
Scope *getCurScope() const { return CurScope; }
void incrementMSManglingNumber() const {
return CurScope->incrementMSManglingNumber();
}
IdentifierInfo *getSuperIdentifier() const;
IdentifierInfo *getFloat128Identifier() const;
Decl *getObjCDeclContext() const;
DeclContext *getCurLexicalContext() const {
return OriginalLexicalContext ? OriginalLexicalContext : CurContext;
}
const DeclContext *getCurObjCLexicalContext() const {
const DeclContext *DC = getCurLexicalContext();
// A category implicitly has the attribute of the interface.
if (const ObjCCategoryDecl *CatD = dyn_cast<ObjCCategoryDecl>(DC))
DC = CatD->getClassInterface();
return DC;
}
/// To be used for checking whether the arguments being passed to
/// function exceeds the number of parameters expected for it.
static bool TooManyArguments(size_t NumParams, size_t NumArgs,
bool PartialOverloading = false) {
// We check whether we're just after a comma in code-completion.
if (NumArgs > 0 && PartialOverloading)
return NumArgs + 1 > NumParams; // If so, we view as an extra argument.
return NumArgs > NumParams;
}
// Emitting members of dllexported classes is delayed until the class
// (including field initializers) is fully parsed.
SmallVector<CXXRecordDecl*, 4> DelayedDllExportClasses;
SmallVector<CXXMethodDecl*, 4> DelayedDllExportMemberFunctions;
private:
int ParsingClassDepth = 0;
class SavePendingParsedClassStateRAII {
public:
SavePendingParsedClassStateRAII(Sema &S) : S(S) { swapSavedState(); }
~SavePendingParsedClassStateRAII() {
assert(S.DelayedOverridingExceptionSpecChecks.empty() &&
"there shouldn't be any pending delayed exception spec checks");
assert(S.DelayedEquivalentExceptionSpecChecks.empty() &&
"there shouldn't be any pending delayed exception spec checks");
swapSavedState();
}
private:
Sema &S;
decltype(DelayedOverridingExceptionSpecChecks)
SavedOverridingExceptionSpecChecks;
decltype(DelayedEquivalentExceptionSpecChecks)
SavedEquivalentExceptionSpecChecks;
void swapSavedState() {
SavedOverridingExceptionSpecChecks.swap(
S.DelayedOverridingExceptionSpecChecks);
SavedEquivalentExceptionSpecChecks.swap(
S.DelayedEquivalentExceptionSpecChecks);
}
};
/// Helper class that collects misaligned member designations and
/// their location info for delayed diagnostics.
struct MisalignedMember {
Expr *E;
RecordDecl *RD;
ValueDecl *MD;
CharUnits Alignment;
MisalignedMember() : E(), RD(), MD(), Alignment() {}
MisalignedMember(Expr *E, RecordDecl *RD, ValueDecl *MD,
CharUnits Alignment)
: E(E), RD(RD), MD(MD), Alignment(Alignment) {}
explicit MisalignedMember(Expr *E)
: MisalignedMember(E, nullptr, nullptr, CharUnits()) {}
bool operator==(const MisalignedMember &m) { return this->E == m.E; }
};
/// Small set of gathered accesses to potentially misaligned members
/// due to the packed attribute.
SmallVector<MisalignedMember, 4> MisalignedMembers;
/// Adds an expression to the set of gathered misaligned members.
void AddPotentialMisalignedMembers(Expr *E, RecordDecl *RD, ValueDecl *MD,
CharUnits Alignment);
public:
/// Diagnoses the current set of gathered accesses. This typically
/// happens at full expression level. The set is cleared after emitting the
/// diagnostics.
void DiagnoseMisalignedMembers();
/// This function checks if the expression is in the sef of potentially
/// misaligned members and it is converted to some pointer type T with lower
/// or equal alignment requirements. If so it removes it. This is used when
/// we do not want to diagnose such misaligned access (e.g. in conversions to
/// void*).
void DiscardMisalignedMemberAddress(const Type *T, Expr *E);
/// This function calls Action when it determines that E designates a
/// misaligned member due to the packed attribute. This is used to emit
/// local diagnostics like in reference binding.
void RefersToMemberWithReducedAlignment(
Expr *E,
llvm::function_ref<void(Expr *, RecordDecl *, FieldDecl *, CharUnits)>
Action);
/// Describes the reason a calling convention specification was ignored, used
/// for diagnostics.
enum class CallingConventionIgnoredReason {
ForThisTarget = 0,
VariadicFunction,
ConstructorDestructor,
BuiltinFunction
};
private:
// We store SYCL Kernels here and handle separately -- which is a hack.
// FIXME: It would be best to refactor this.
SmallVector<Decl*, 4> SyclDeviceDecls;
// SYCL integration header instance for current compilation unit this Sema
// is associated with.
std::unique_ptr<SYCLIntegrationHeader> SyclIntHeader;
// Used to suppress diagnostics during kernel construction, since these were
// already emitted earlier. Diagnosing during Kernel emissions also skips the
// useful notes that shows where the kernel was called.
bool ConstructingOpenCLKernel = false;
public:
void addSyclDeviceDecl(Decl *d) { SyclDeviceDecls.push_back(d); }
SmallVectorImpl<Decl *> &syclDeviceDecls() { return SyclDeviceDecls; }
/// Lazily creates and returns SYCL integration header instance.
SYCLIntegrationHeader &getSyclIntegrationHeader() {
if (SyclIntHeader == nullptr)
SyclIntHeader = std::make_unique<SYCLIntegrationHeader>(
getDiagnostics(), getLangOpts().SYCLUnnamedLambda);
return *SyclIntHeader.get();
}
enum SYCLRestrictKind {
KernelGlobalVariable,
KernelRTTI,
KernelNonConstStaticDataVariable,
KernelCallVirtualFunction,
KernelUseExceptions,
KernelCallRecursiveFunction,
KernelCallFunctionPointer,
KernelAllocateStorage,
KernelUseAssembly,
KernelCallDllimportFunction,
KernelCallVariadicFunction
};
bool isKnownGoodSYCLDecl(const Decl *D);
void ConstructOpenCLKernel(FunctionDecl *KernelCallerFunc, MangleContext &MC);
void MarkDevice(void);
/// Creates a DeviceDiagBuilder that emits the diagnostic if the current
/// context is "used as device code".
///
/// - If CurLexicalContext is a kernel function or it is known that the
/// function will be emitted for the device, emits the diagnostics
/// immediately.
/// - If CurLexicalContext is a function and we are compiling
/// for the device, but we don't know that this function will be codegen'ed
/// for devive yet, creates a diagnostic which is emitted if and when we
/// realize that the function will be codegen'ed.
///
/// Example usage:
///
/// Variables with thread storage duration are not allowed to be used in SYCL
/// device code
/// if (getLangOpts().SYCLIsDevice)
/// SYCLDiagIfDeviceCode(Loc, diag::err_thread_unsupported);
DeviceDiagBuilder SYCLDiagIfDeviceCode(SourceLocation Loc, unsigned DiagID);
/// Checks if Callee function is a device function and emits
/// diagnostics if it is known that it is a device function, adds this
/// function to the DeviceCallGraph otherwise.
void checkSYCLDeviceFunction(SourceLocation Loc, FunctionDecl *Callee);
};
template <typename AttrType>
void Sema::AddOneConstantValueAttr(Decl *D, const AttributeCommonInfo &CI,
Expr *E) {
AttrType TmpAttr(Context, CI, E);
if (!E->isValueDependent()) {
ExprResult ICE;
if (checkRangedIntegralArgument<AttrType>(E, &TmpAttr, ICE))
return;
E = ICE.get();
}
if (IntelFPGAPrivateCopiesAttr::classof(&TmpAttr)) {
if (!D->hasAttr<IntelFPGAMemoryAttr>())
D->addAttr(IntelFPGAMemoryAttr::CreateImplicit(
Context, IntelFPGAMemoryAttr::Default));
}
D->addAttr(::new (Context) AttrType(Context, CI, E));
}
template <typename AttrType>
void Sema::AddOneConstantPowerTwoValueAttr(Decl *D,
const AttributeCommonInfo &CI,
Expr *E) {
AttrType TmpAttr(Context, CI, E);
if (!E->isValueDependent()) {
ExprResult ICE;
if (checkRangedIntegralArgument<AttrType>(E, &TmpAttr, ICE))
return;
Expr::EvalResult Result;
E->EvaluateAsInt(Result, Context);
llvm::APSInt Value = Result.Val.getInt();
if (!Value.isPowerOf2()) {
Diag(CI.getLoc(), diag::err_attribute_argument_not_power_of_two)
<< &TmpAttr;
return;
}
if (IntelFPGANumBanksAttr::classof(&TmpAttr)) {
if (auto *BBA = D->getAttr<IntelFPGABankBitsAttr>()) {
unsigned NumBankBits = BBA->args_size();
if (NumBankBits != Value.ceilLogBase2()) {
Diag(TmpAttr.getLocation(), diag::err_bankbits_numbanks_conflicting);
return;
}
}
}
E = ICE.get();
}
if (!D->hasAttr<IntelFPGAMemoryAttr>())
D->addAttr(IntelFPGAMemoryAttr::CreateImplicit(
Context, IntelFPGAMemoryAttr::Default));
// We are adding a user NumBanks, drop any implicit default.
if (IntelFPGANumBanksAttr::classof(&TmpAttr)) {
if (auto *NBA = D->getAttr<IntelFPGANumBanksAttr>())
if (NBA->isImplicit())
D->dropAttr<IntelFPGANumBanksAttr>();
}
D->addAttr(::new (Context) AttrType(Context, CI, E));
}
/// RAII object that enters a new expression evaluation context.
class EnterExpressionEvaluationContext {
Sema &Actions;
bool Entered = true;
public:
EnterExpressionEvaluationContext(
Sema &Actions, Sema::ExpressionEvaluationContext NewContext,
Decl *LambdaContextDecl = nullptr,
Sema::ExpressionEvaluationContextRecord::ExpressionKind ExprContext =
Sema::ExpressionEvaluationContextRecord::EK_Other,
bool ShouldEnter = true)
: Actions(Actions), Entered(ShouldEnter) {
if (Entered)
Actions.PushExpressionEvaluationContext(NewContext, LambdaContextDecl,
ExprContext);
}
EnterExpressionEvaluationContext(
Sema &Actions, Sema::ExpressionEvaluationContext NewContext,
Sema::ReuseLambdaContextDecl_t,
Sema::ExpressionEvaluationContextRecord::ExpressionKind ExprContext =
Sema::ExpressionEvaluationContextRecord::EK_Other)
: Actions(Actions) {
Actions.PushExpressionEvaluationContext(
NewContext, Sema::ReuseLambdaContextDecl, ExprContext);
}
enum InitListTag { InitList };
EnterExpressionEvaluationContext(Sema &Actions, InitListTag,
bool ShouldEnter = true)
: Actions(Actions), Entered(false) {
// In C++11 onwards, narrowing checks are performed on the contents of
// braced-init-lists, even when they occur within unevaluated operands.
// Therefore we still need to instantiate constexpr functions used in such
// a context.
if (ShouldEnter && Actions.isUnevaluatedContext() &&
Actions.getLangOpts().CPlusPlus11) {
Actions.PushExpressionEvaluationContext(
Sema::ExpressionEvaluationContext::UnevaluatedList);
Entered = true;
}
}
~EnterExpressionEvaluationContext() {
if (Entered)
Actions.PopExpressionEvaluationContext();
}
};
DeductionFailureInfo
MakeDeductionFailureInfo(ASTContext &Context, Sema::TemplateDeductionResult TDK,
sema::TemplateDeductionInfo &Info);
/// Contains a late templated function.
/// Will be parsed at the end of the translation unit, used by Sema & Parser.
struct LateParsedTemplate {
CachedTokens Toks;
/// The template function declaration to be late parsed.
Decl *D;
};
} // end namespace clang
namespace llvm {
// Hash a FunctionDeclAndLoc by looking at both its FunctionDecl and its
// SourceLocation.
template <> struct DenseMapInfo<clang::Sema::FunctionDeclAndLoc> {
using FunctionDeclAndLoc = clang::Sema::FunctionDeclAndLoc;
using FDBaseInfo = DenseMapInfo<clang::CanonicalDeclPtr<clang::FunctionDecl>>;
static FunctionDeclAndLoc getEmptyKey() {
return {FDBaseInfo::getEmptyKey(), clang::SourceLocation()};
}
static FunctionDeclAndLoc getTombstoneKey() {
return {FDBaseInfo::getTombstoneKey(), clang::SourceLocation()};
}
static unsigned getHashValue(const FunctionDeclAndLoc &FDL) {
return hash_combine(FDBaseInfo::getHashValue(FDL.FD),
FDL.Loc.getRawEncoding());
}
static bool isEqual(const FunctionDeclAndLoc &LHS,
const FunctionDeclAndLoc &RHS) {
return LHS.FD == RHS.FD && LHS.Loc == RHS.Loc;
}
};
} // namespace llvm
#endif
|
archive_blake2sp_ref.c | /*
BLAKE2 reference source code package - reference C implementations
Copyright 2012, Samuel Neves <sneves@dei.uc.pt>. You may use this under the
terms of the CC0, the OpenSSL Licence, or the Apache Public License 2.0, at
your option. The terms of these licenses can be found at:
- CC0 1.0 Universal : http://creativecommons.org/publicdomain/zero/1.0
- OpenSSL license : https://www.openssl.org/source/license.html
- Apache 2.0 : http://www.apache.org/licenses/LICENSE-2.0
More information about the BLAKE2 hash function can be found at
https://blake2.net.
*/
#include "archive_platform.h"
#include <stdlib.h>
#include <string.h>
#include <stdio.h>
#if defined(_OPENMP)
#include <omp.h>
#endif
#include "archive_blake2.h"
#include "archive_blake2_impl.h"
#define PARALLELISM_DEGREE 8
/*
blake2sp_init_param defaults to setting the expecting output length
from the digest_length parameter block field.
In some cases, however, we do not want this, as the output length
of these instances is given by inner_length instead.
*/
static int blake2sp_init_leaf_param( blake2s_state *S, const blake2s_param *P )
{
int err = blake2s_init_param(S, P);
S->outlen = P->inner_length;
return err;
}
static int blake2sp_init_leaf( blake2s_state *S, size_t outlen, size_t keylen, uint32_t offset )
{
blake2s_param P[1];
P->digest_length = (uint8_t)outlen;
P->key_length = (uint8_t)keylen;
P->fanout = PARALLELISM_DEGREE;
P->depth = 2;
store32( &P->leaf_length, 0 );
store32( &P->node_offset, offset );
store16( &P->xof_length, 0 );
P->node_depth = 0;
P->inner_length = BLAKE2S_OUTBYTES;
memset( P->salt, 0, sizeof( P->salt ) );
memset( P->personal, 0, sizeof( P->personal ) );
return blake2sp_init_leaf_param( S, P );
}
static int blake2sp_init_root( blake2s_state *S, size_t outlen, size_t keylen )
{
blake2s_param P[1];
P->digest_length = (uint8_t)outlen;
P->key_length = (uint8_t)keylen;
P->fanout = PARALLELISM_DEGREE;
P->depth = 2;
store32( &P->leaf_length, 0 );
store32( &P->node_offset, 0 );
store16( &P->xof_length, 0 );
P->node_depth = 1;
P->inner_length = BLAKE2S_OUTBYTES;
memset( P->salt, 0, sizeof( P->salt ) );
memset( P->personal, 0, sizeof( P->personal ) );
return blake2s_init_param( S, P );
}
int blake2sp_init( blake2sp_state *S, size_t outlen )
{
size_t i;
if( !outlen || outlen > BLAKE2S_OUTBYTES ) return -1;
memset( S->buf, 0, sizeof( S->buf ) );
S->buflen = 0;
S->outlen = outlen;
if( blake2sp_init_root( S->R, outlen, 0 ) < 0 )
return -1;
for( i = 0; i < PARALLELISM_DEGREE; ++i )
if( blake2sp_init_leaf( S->S[i], outlen, 0, (uint32_t)i ) < 0 ) return -1;
S->R->last_node = 1;
S->S[PARALLELISM_DEGREE - 1]->last_node = 1;
return 0;
}
int blake2sp_init_key( blake2sp_state *S, size_t outlen, const void *key, size_t keylen )
{
size_t i;
if( !outlen || outlen > BLAKE2S_OUTBYTES ) return -1;
if( !key || !keylen || keylen > BLAKE2S_KEYBYTES ) return -1;
memset( S->buf, 0, sizeof( S->buf ) );
S->buflen = 0;
S->outlen = outlen;
if( blake2sp_init_root( S->R, outlen, keylen ) < 0 )
return -1;
for( i = 0; i < PARALLELISM_DEGREE; ++i )
if( blake2sp_init_leaf( S->S[i], outlen, keylen, (uint32_t)i ) < 0 ) return -1;
S->R->last_node = 1;
S->S[PARALLELISM_DEGREE - 1]->last_node = 1;
{
uint8_t block[BLAKE2S_BLOCKBYTES];
memset( block, 0, BLAKE2S_BLOCKBYTES );
memcpy( block, key, keylen );
for( i = 0; i < PARALLELISM_DEGREE; ++i )
blake2s_update( S->S[i], block, BLAKE2S_BLOCKBYTES );
secure_zero_memory( block, BLAKE2S_BLOCKBYTES ); /* Burn the key from stack */
}
return 0;
}
int blake2sp_update( blake2sp_state *S, const void *pin, size_t inlen )
{
const unsigned char * in = (const unsigned char *)pin;
size_t left = S->buflen;
size_t fill = sizeof( S->buf ) - left;
size_t i;
if( left && inlen >= fill )
{
memcpy( S->buf + left, in, fill );
for( i = 0; i < PARALLELISM_DEGREE; ++i )
blake2s_update( S->S[i], S->buf + i * BLAKE2S_BLOCKBYTES, BLAKE2S_BLOCKBYTES );
in += fill;
inlen -= fill;
left = 0;
}
#if defined(_OPENMP)
#pragma omp parallel shared(S), num_threads(PARALLELISM_DEGREE)
#else
for( i = 0; i < PARALLELISM_DEGREE; ++i )
#endif
{
#if defined(_OPENMP)
size_t i = omp_get_thread_num();
#endif
size_t inlen__ = inlen;
const unsigned char *in__ = ( const unsigned char * )in;
in__ += i * BLAKE2S_BLOCKBYTES;
while( inlen__ >= PARALLELISM_DEGREE * BLAKE2S_BLOCKBYTES )
{
blake2s_update( S->S[i], in__, BLAKE2S_BLOCKBYTES );
in__ += PARALLELISM_DEGREE * BLAKE2S_BLOCKBYTES;
inlen__ -= PARALLELISM_DEGREE * BLAKE2S_BLOCKBYTES;
}
}
in += inlen - inlen % ( PARALLELISM_DEGREE * BLAKE2S_BLOCKBYTES );
inlen %= PARALLELISM_DEGREE * BLAKE2S_BLOCKBYTES;
if( inlen > 0 )
memcpy( S->buf + left, in, inlen );
S->buflen = left + inlen;
return 0;
}
int blake2sp_final( blake2sp_state *S, void *out, size_t outlen )
{
uint8_t hash[PARALLELISM_DEGREE][BLAKE2S_OUTBYTES];
size_t i;
if(out == NULL || outlen < S->outlen) {
return -1;
}
for( i = 0; i < PARALLELISM_DEGREE; ++i )
{
if( S->buflen > i * BLAKE2S_BLOCKBYTES )
{
size_t left = S->buflen - i * BLAKE2S_BLOCKBYTES;
if( left > BLAKE2S_BLOCKBYTES ) left = BLAKE2S_BLOCKBYTES;
blake2s_update( S->S[i], S->buf + i * BLAKE2S_BLOCKBYTES, left );
}
blake2s_final( S->S[i], hash[i], BLAKE2S_OUTBYTES );
}
for( i = 0; i < PARALLELISM_DEGREE; ++i )
blake2s_update( S->R, hash[i], BLAKE2S_OUTBYTES );
return blake2s_final( S->R, out, S->outlen );
}
int blake2sp( void *out, size_t outlen, const void *in, size_t inlen, const void *key, size_t keylen )
{
uint8_t hash[PARALLELISM_DEGREE][BLAKE2S_OUTBYTES];
blake2s_state S[PARALLELISM_DEGREE][1];
blake2s_state FS[1];
size_t i;
/* Verify parameters */
if ( NULL == in && inlen > 0 ) return -1;
if ( NULL == out ) return -1;
if ( NULL == key && keylen > 0) return -1;
if( !outlen || outlen > BLAKE2S_OUTBYTES ) return -1;
if( keylen > BLAKE2S_KEYBYTES ) return -1;
for( i = 0; i < PARALLELISM_DEGREE; ++i )
if( blake2sp_init_leaf( S[i], outlen, keylen, (uint32_t)i ) < 0 ) return -1;
S[PARALLELISM_DEGREE - 1]->last_node = 1; /* mark last node */
if( keylen > 0 )
{
uint8_t block[BLAKE2S_BLOCKBYTES];
memset( block, 0, BLAKE2S_BLOCKBYTES );
memcpy( block, key, keylen );
for( i = 0; i < PARALLELISM_DEGREE; ++i )
blake2s_update( S[i], block, BLAKE2S_BLOCKBYTES );
secure_zero_memory( block, BLAKE2S_BLOCKBYTES ); /* Burn the key from stack */
}
#if defined(_OPENMP)
#pragma omp parallel shared(S,hash), num_threads(PARALLELISM_DEGREE)
#else
for( i = 0; i < PARALLELISM_DEGREE; ++i )
#endif
{
#if defined(_OPENMP)
size_t i = omp_get_thread_num();
#endif
size_t inlen__ = inlen;
const unsigned char *in__ = ( const unsigned char * )in;
in__ += i * BLAKE2S_BLOCKBYTES;
while( inlen__ >= PARALLELISM_DEGREE * BLAKE2S_BLOCKBYTES )
{
blake2s_update( S[i], in__, BLAKE2S_BLOCKBYTES );
in__ += PARALLELISM_DEGREE * BLAKE2S_BLOCKBYTES;
inlen__ -= PARALLELISM_DEGREE * BLAKE2S_BLOCKBYTES;
}
if( inlen__ > i * BLAKE2S_BLOCKBYTES )
{
const size_t left = inlen__ - i * BLAKE2S_BLOCKBYTES;
const size_t len = left <= BLAKE2S_BLOCKBYTES ? left : BLAKE2S_BLOCKBYTES;
blake2s_update( S[i], in__, len );
}
blake2s_final( S[i], hash[i], BLAKE2S_OUTBYTES );
}
if( blake2sp_init_root( FS, outlen, keylen ) < 0 )
return -1;
FS->last_node = 1;
for( i = 0; i < PARALLELISM_DEGREE; ++i )
blake2s_update( FS, hash[i], BLAKE2S_OUTBYTES );
return blake2s_final( FS, out, outlen );
}
#if defined(BLAKE2SP_SELFTEST)
#include <string.h>
#include "blake2-kat.h"
int main( void )
{
uint8_t key[BLAKE2S_KEYBYTES];
uint8_t buf[BLAKE2_KAT_LENGTH];
size_t i, step;
for( i = 0; i < BLAKE2S_KEYBYTES; ++i )
key[i] = ( uint8_t )i;
for( i = 0; i < BLAKE2_KAT_LENGTH; ++i )
buf[i] = ( uint8_t )i;
/* Test simple API */
for( i = 0; i < BLAKE2_KAT_LENGTH; ++i )
{
uint8_t hash[BLAKE2S_OUTBYTES];
blake2sp( hash, BLAKE2S_OUTBYTES, buf, i, key, BLAKE2S_KEYBYTES );
if( 0 != memcmp( hash, blake2sp_keyed_kat[i], BLAKE2S_OUTBYTES ) )
{
goto fail;
}
}
/* Test streaming API */
for(step = 1; step < BLAKE2S_BLOCKBYTES; ++step) {
for (i = 0; i < BLAKE2_KAT_LENGTH; ++i) {
uint8_t hash[BLAKE2S_OUTBYTES];
blake2sp_state S;
uint8_t * p = buf;
size_t mlen = i;
int err = 0;
if( (err = blake2sp_init_key(&S, BLAKE2S_OUTBYTES, key, BLAKE2S_KEYBYTES)) < 0 ) {
goto fail;
}
while (mlen >= step) {
if ( (err = blake2sp_update(&S, p, step)) < 0 ) {
goto fail;
}
mlen -= step;
p += step;
}
if ( (err = blake2sp_update(&S, p, mlen)) < 0) {
goto fail;
}
if ( (err = blake2sp_final(&S, hash, BLAKE2S_OUTBYTES)) < 0) {
goto fail;
}
if (0 != memcmp(hash, blake2sp_keyed_kat[i], BLAKE2S_OUTBYTES)) {
goto fail;
}
}
}
puts( "ok" );
return 0;
fail:
puts("error");
return -1;
}
#endif
|
GB_binop__isge_uint8.c | //------------------------------------------------------------------------------
// GB_binop: hard-coded functions for each built-in binary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// If this file is in the Generated2/ folder, do not edit it
// (it is auto-generated from Generator/*).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_emult.h"
#include "GB_control.h"
#include "GB_ek_slice.h"
#include "GB_dense.h"
#include "GB_atomics.h"
#include "GB_bitmap_assign_methods.h"
#include "GB_binop__include.h"
// C=binop(A,B) is defined by the following types and operators:
// A+B function (eWiseAdd): GB (_AaddB__isge_uint8)
// A.*B function (eWiseMult): GB (_AemultB_01__isge_uint8)
// A.*B function (eWiseMult): GB (_AemultB_02__isge_uint8)
// A.*B function (eWiseMult): GB (_AemultB_03__isge_uint8)
// A.*B function (eWiseMult): GB (_AemultB_bitmap__isge_uint8)
// A*D function (colscale): GB (_AxD__isge_uint8)
// D*A function (rowscale): GB (_DxB__isge_uint8)
// C+=B function (dense accum): GB (_Cdense_accumB__isge_uint8)
// C+=b function (dense accum): GB (_Cdense_accumb__isge_uint8)
// C+=A+B function (dense ewise3): GB ((none))
// C=A+B function (dense ewise3): GB (_Cdense_ewise3_noaccum__isge_uint8)
// C=scalar+B GB (_bind1st__isge_uint8)
// C=scalar+B' GB (_bind1st_tran__isge_uint8)
// C=A+scalar GB (_bind2nd__isge_uint8)
// C=A'+scalar GB (_bind2nd_tran__isge_uint8)
// C type: uint8_t
// A type: uint8_t
// B,b type: uint8_t
// BinaryOp: cij = (aij >= bij)
#define GB_ATYPE \
uint8_t
#define GB_BTYPE \
uint8_t
#define GB_CTYPE \
uint8_t
// true if the types of A and B are identical
#define GB_ATYPE_IS_BTYPE \
1
// true if the types of C and A are identical
#define GB_CTYPE_IS_ATYPE \
1
// true if the types of C and B are identical
#define GB_CTYPE_IS_BTYPE \
1
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA,A_iso) \
uint8_t aij = GBX (Ax, pA, A_iso)
// bij = Bx [pB]
#define GB_GETB(bij,Bx,pB,B_iso) \
uint8_t bij = GBX (Bx, pB, B_iso)
// declare scalar of the same type as C
#define GB_CTYPE_SCALAR(t) \
uint8_t t
// cij = Ax [pA]
#define GB_COPY_A_TO_C(cij,Ax,pA,A_iso) \
cij = GBX (Ax, pA, A_iso)
// cij = Bx [pB]
#define GB_COPY_B_TO_C(cij,Bx,pB,B_iso) \
cij = GBX (Bx, pB, B_iso)
#define GB_CX(p) Cx [p]
// binary operator
#define GB_BINOP(z,x,y,i,j) \
z = (x >= y) ;
// true if the binop must be flipped
#define GB_BINOP_FLIP \
0
// op is second
#define GB_OP_IS_SECOND \
0
// do the numerical phases of GB_add and GB_emult
#define GB_PHASE_2_OF_2
// hard-coded loops can be vectorized
#define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_ISGE || GxB_NO_UINT8 || GxB_NO_ISGE_UINT8)
//------------------------------------------------------------------------------
// C += A+B, all 3 matrices dense
//------------------------------------------------------------------------------
#if 0
// The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV.
void GB ((none))
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#include "GB_dense_ewise3_accum_template.c"
}
#endif
//------------------------------------------------------------------------------
// C = A+B, all 3 matrices dense
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_ewise3_noaccum__isge_uint8)
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_dense_ewise3_noaccum_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += B, accumulate a sparse matrix into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumB__isge_uint8)
(
GrB_Matrix C,
const GrB_Matrix B,
const int64_t *B_ek_slicing, const int B_ntasks, const int B_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
#include "GB_dense_subassign_23_template.c"
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += b, accumulate a scalar into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumb__isge_uint8)
(
GrB_Matrix C,
const GB_void *p_bwork,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
// get the scalar b for C += b, of type uint8_t
uint8_t bwork = (*((uint8_t *) p_bwork)) ;
#include "GB_dense_subassign_22_template.c"
return (GrB_SUCCESS) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = A*D, column scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB (_AxD__isge_uint8)
(
GrB_Matrix C,
const GrB_Matrix A, bool A_is_pattern,
const GrB_Matrix D, bool D_is_pattern,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint8_t *restrict Cx = (uint8_t *) C->x ;
#include "GB_AxB_colscale_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = D*B, row scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB (_DxB__isge_uint8)
(
GrB_Matrix C,
const GrB_Matrix D, bool D_is_pattern,
const GrB_Matrix B, bool B_is_pattern,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint8_t *restrict Cx = (uint8_t *) C->x ;
#include "GB_AxB_rowscale_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseAdd: C = A+B or C<M> = A+B
//------------------------------------------------------------------------------
GrB_Info GB (_AaddB__isge_uint8)
(
GrB_Matrix C,
const int C_sparsity,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool Ch_is_Mh,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
GB_WERK_DECLARE (M_ek_slicing, int64_t) ;
GB_WERK_DECLARE (A_ek_slicing, int64_t) ;
GB_WERK_DECLARE (B_ek_slicing, int64_t) ;
#include "GB_add_template.c"
GB_FREE_WORK ;
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C = A.*B or C<M> = A.*B
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_01__isge_uint8)
(
GrB_Matrix C,
const int C_sparsity,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_01_meta.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C<#> = A.*B when A is sparse/hyper and B is bitmap/full
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_02__isge_uint8)
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool flipxy,
const int64_t *restrict Cp_kfirst,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#if GB_BINOP_FLIP
// The operator is not commutative, and does not have a flipped
// variant. For example z=atan2(y,x).
if (flipxy)
{
// use fmult(y,x)
#undef GB_FLIPPED
#define GB_FLIPPED 1
#include "GB_emult_02_template.c"
}
else
{
// use fmult(x,y)
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
}
#else
// No need to handle the flip: the operator is either commutative, or
// has been handled by changing z=div(y,x) to z=rdiv(x,y) for example.
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
#endif
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C<M> = A.*B, M sparse/hyper, A and B bitmap/full
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_03__isge_uint8)
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict Cp_kfirst,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_03_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C=A.*B, C<M>=A.*B, C<!M>=A.*B where C is bitmap
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_bitmap__isge_uint8)
(
GrB_Matrix C,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_bitmap_emult_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st
//------------------------------------------------------------------------------
GrB_Info GB (_bind1st__isge_uint8)
(
GB_void *Cx_output, // Cx and Bx may be aliased
const GB_void *x_input,
const GB_void *Bx_input,
const int8_t *restrict Bb,
int64_t bnz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint8_t *Cx = (uint8_t *) Cx_output ;
uint8_t x = (*((uint8_t *) x_input)) ;
uint8_t *Bx = (uint8_t *) Bx_input ;
int64_t p ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < bnz ; p++)
{
if (!GBB (Bb, p)) continue ;
uint8_t bij = GBX (Bx, p, false) ;
Cx [p] = (x >= bij) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd
//------------------------------------------------------------------------------
GrB_Info GB (_bind2nd__isge_uint8)
(
GB_void *Cx_output, // Cx and Ax may be aliased
const GB_void *Ax_input,
const GB_void *y_input,
const int8_t *restrict Ab,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
uint8_t *Cx = (uint8_t *) Cx_output ;
uint8_t *Ax = (uint8_t *) Ax_input ;
uint8_t y = (*((uint8_t *) y_input)) ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!GBB (Ab, p)) continue ;
uint8_t aij = GBX (Ax, p, false) ;
Cx [p] = (aij >= y) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (x, A'): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (x, aij), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
uint8_t aij = GBX (Ax, pA, false) ; \
Cx [pC] = (x >= aij) ; \
}
GrB_Info GB (_bind1st_tran__isge_uint8)
(
GrB_Matrix C,
const GB_void *x_input,
const GrB_Matrix A,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
// GB_unop_transpose.c uses GB_ATYPE, but A is
// the 2nd input to binary operator z=f(x,y).
#undef GB_ATYPE
#define GB_ATYPE \
uint8_t
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint8_t x = (*((const uint8_t *) x_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
#undef GB_ATYPE
#define GB_ATYPE \
uint8_t
}
//------------------------------------------------------------------------------
// C = op (A', y): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (aij, y), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
uint8_t aij = GBX (Ax, pA, false) ; \
Cx [pC] = (aij >= y) ; \
}
GrB_Info GB (_bind2nd_tran__isge_uint8)
(
GrB_Matrix C,
const GrB_Matrix A,
const GB_void *y_input,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint8_t y = (*((const uint8_t *) y_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
GB_unop__identity_int64_fc32.c | //------------------------------------------------------------------------------
// GB_unop: hard-coded functions for each built-in unary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// If this file is in the Generated/ folder, do not edit it (auto-generated).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_control.h"
#include "GB_atomics.h"
#include "GB_unop__include.h"
// C=unop(A) is defined by the following types and operators:
// op(A) function: GB (_unop_apply__identity_int64_fc32)
// op(A') function: GB (_unop_tran__identity_int64_fc32)
// C type: int64_t
// A type: GxB_FC32_t
// cast: int64_t cij = GB_cast_to_int64_t ((double) crealf (aij))
// unaryop: cij = aij
#define GB_ATYPE \
GxB_FC32_t
#define GB_CTYPE \
int64_t
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
GxB_FC32_t aij = Ax [pA]
#define GB_CX(p) Cx [p]
// unary operator
#define GB_OP(z, x) \
z = x ;
// casting
#define GB_CAST(z, aij) \
int64_t z = GB_cast_to_int64_t ((double) crealf (aij)) ;
// cij = op (aij)
#define GB_CAST_OP(pC,pA) \
{ \
/* aij = Ax [pA] */ \
GxB_FC32_t aij = Ax [pA] ; \
/* Cx [pC] = op (cast (aij)) */ \
int64_t z = GB_cast_to_int64_t ((double) crealf (aij)) ; \
Cx [pC] = z ; \
}
// true if operator is the identity op with no typecasting
#define GB_OP_IS_IDENTITY_WITH_NO_TYPECAST \
0
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_IDENTITY || GxB_NO_INT64 || GxB_NO_FC32)
//------------------------------------------------------------------------------
// Cx = op (cast (Ax)): apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB (_unop_apply__identity_int64_fc32)
(
int64_t *Cx, // Cx and Ax may be aliased
const GxB_FC32_t *Ax,
const int8_t *restrict Ab, // A->b if A is bitmap
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
// TODO: if OP is ONE and uniform-valued matrices are exploited, then
// do this in O(1) time
if (Ab == NULL)
{
#if ( GB_OP_IS_IDENTITY_WITH_NO_TYPECAST )
GB_memcpy (Cx, Ax, anz * sizeof (GxB_FC32_t), nthreads) ;
#else
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
GxB_FC32_t aij = Ax [p] ;
int64_t z = GB_cast_to_int64_t ((double) crealf (aij)) ;
Cx [p] = z ;
}
#endif
}
else
{
// bitmap case, no transpose; A->b already memcpy'd into C->b
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!Ab [p]) continue ;
GxB_FC32_t aij = Ax [p] ;
int64_t z = GB_cast_to_int64_t ((double) crealf (aij)) ;
Cx [p] = z ;
}
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (cast (A')): transpose, typecast, and apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB (_unop_tran__identity_int64_fc32)
(
GrB_Matrix C,
const GrB_Matrix A,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
target.c | // RUN: %libomptarget-compile-aarch64-unknown-linux-gnu -fopenmp-version=51
// RUN: %libomptarget-run-fail-aarch64-unknown-linux-gnu 2>&1 \
// RUN: | %fcheck-aarch64-unknown-linux-gnu
// RUN: %libomptarget-compile-powerpc64-ibm-linux-gnu -fopenmp-version=51
// RUN: %libomptarget-run-fail-powerpc64-ibm-linux-gnu 2>&1 \
// RUN: | %fcheck-powerpc64-ibm-linux-gnu
// RUN: %libomptarget-compile-powerpc64le-ibm-linux-gnu -fopenmp-version=51
// RUN: %libomptarget-run-fail-powerpc64le-ibm-linux-gnu 2>&1 \
// RUN: | %fcheck-powerpc64le-ibm-linux-gnu
// RUN: %libomptarget-compile-x86_64-pc-linux-gnu -fopenmp-version=51
// RUN: %libomptarget-run-fail-x86_64-pc-linux-gnu 2>&1 \
// RUN: | %fcheck-x86_64-pc-linux-gnu
#include <stdio.h>
int main() {
int i;
// CHECK: addr=0x[[#%x,HOST_ADDR:]], size=[[#%u,SIZE:]]
fprintf(stderr, "addr=%p, size=%ld\n", &i, sizeof i);
// CHECK-NOT: Libomptarget
#pragma omp target data map(alloc: i)
#pragma omp target map(present, alloc: i)
;
// CHECK: i is present
fprintf(stderr, "i is present\n");
// CHECK: Libomptarget message: device mapping required by 'present' map type modifier does not exist for host address 0x{{0*}}[[#HOST_ADDR]] ([[#SIZE]] bytes)
// CHECK: Libomptarget error: Call to getOrAllocTgtPtr returned null pointer ('present' map type modifier).
// CHECK: Libomptarget error: Call to targetDataBegin failed, abort target.
// CHECK: Libomptarget error: Failed to process data before launching the kernel.
// CHECK: Libomptarget fatal error 1: failure of target construct while offloading is mandatory
#pragma omp target map(present, alloc: i)
;
// CHECK-NOT: i is present
fprintf(stderr, "i is present\n");
return 0;
}
|
ordered3.c | #include <stdio.h>
#include <omp.h>
int main (void)
{
int i,j, myval;
#pragma omp parallel for private(myval) ordered(2)
for(i=1; i<=100; i++)
for(j=1; j<=100; j++)
{
myval=i*j;
#pragma omp ordered
{
printf("%d %d\n", i, myval);
}
}
return 0;
}
|
SymOp.h | void forward(double *y, const double *x, int n){
#pragma omp parallel for
for(int i=0;i<n;i++){
y[i*9] = x[6*i+0];
y[i*9+1] = x[6*i+1];
y[i*9+2] = x[6*i+2];
y[i*9+3] = x[6*i+1];
y[i*9+4] = x[6*i+3];
y[i*9+5] = x[6*i+4];
y[i*9+6] = x[6*i+2];
y[i*9+7] = x[6*i+4];
y[i*9+8] = x[6*i+5];
}
}
void backward(double *grad_x, const double *grad_y, const double *y, const double *x, int n){
for(int i=0;i<n;i++){
grad_x[6*i+0] = grad_y[i*9];
grad_x[6*i+1] = grad_y[i*9+1] + grad_y[i*9+3];
grad_x[6*i+2] = grad_y[i*9+2] + grad_y[i*9+6];
grad_x[6*i+3] = grad_y[i*9+4];
grad_x[6*i+4] = grad_y[i*9+5] + grad_y[i*9+7];
grad_x[6*i+5] = grad_y[i*9+8];
}
} |
cov_fcts.h | /*!
* This file is part of GPBoost a C++ library for combining
* boosting with Gaussian process and mixed effects models
*
* Copyright (c) 2020 Fabio Sigrist. All rights reserved.
*
* Licensed under the Apache License Version 2.0. See LICENSE file in the project root for license information.
*/
#ifndef GPB_COV_FUNCTIONS_
#define GPB_COV_FUNCTIONS_
#include <GPBoost/type_defs.h>
#include <string>
#include <set>
#include <string>
#include <vector>
#include <cmath>
#include <LightGBM/utils/log.h>
using LightGBM::Log;
namespace GPBoost {
template<typename T_mat>
class RECompGP;
/*!
* \brief This class implements the covariance functions used for the Gaussian proceses
*
* Some details:
* 1. The template parameter <T_mat> can be either <den_mat_t> or <sp_mat_t>
*/
class CovFunction {
public:
/*! \brief Constructor */
CovFunction();
/*!
* \brief Constructor
* \param cov_fct_type Type of covariance function. We follow the notation and parametrization of Diggle and Ribeiro (2007) except for the Matern covariance where we follow Rassmusen and Williams (2006)
* \param shape Shape parameter of covariance function (=smoothness parameter for Matern and Wendland covariance. For the Wendland covariance function, we follow the notation of Bevilacqua et al. (2018)). This parameter is irrelevant for some covariance functions such as the exponential or Gaussian.
* \param taper_range Range parameter of Wendland covariance function / taper. We follow the notation of Bevilacqua et al. (2018)
* \param taper_mu Parameter \mu of Wendland covariance function / taper. We follow the notation of Bevilacqua et al. (2018)
*/
CovFunction(string_t cov_fct_type,
double shape = 0.,
double taper_range = 1.,
double taper_mu = 2.) {
num_cov_par_ = 2;
if (SUPPORTED_COV_TYPES_.find(cov_fct_type) == SUPPORTED_COV_TYPES_.end()) {
Log::REFatal("Covariance of type '%s' is not supported.", cov_fct_type.c_str());
}
if (cov_fct_type == "matern") {
if (!(AreSame(shape, 0.5) || AreSame(shape, 1.5) || AreSame(shape, 2.5))) {
Log::REFatal("Only shape / smoothness parameters 0.5, 1.5, and 2.5 supported for the Matern covariance function");
}
}
else if (cov_fct_type == "powered_exponential") {
if (shape <= 0. || shape > 2.) {
Log::REFatal("Shape needs to be larger than 0 and smaller or equal than 2 for the 'powered_exponential' covariance function");
}
}
else if (cov_fct_type == "wendland") {
if (!(AreSame(shape, 0.0) || AreSame(shape, 1.0) || AreSame(shape, 2.0))) {
Log::REFatal("Only shape / smoothness parameters 0, 1, and 2 supported for the Wendland covariance function");
}
CHECK(taper_range > 0.);
CHECK(taper_mu >= 1.);
taper_range_ = taper_range;
taper_mu_ = taper_mu;
num_cov_par_ = 1;
}
else if (cov_fct_type == "exponential_tapered") {
if (!(AreSame(shape, 0.0) || AreSame(shape, 1.0) || AreSame(shape, 2.0))) {
Log::REFatal("Only shape / smoothness parameters 0, 1, and 2 supported for the Wendland-tapered exponential covariance function");
}
CHECK(taper_range > 0.);
CHECK(taper_mu >= 1.);
taper_range_ = taper_range;
taper_mu_ = taper_mu;
num_cov_par_ = 2;
}
cov_fct_type_ = cov_fct_type;
shape_ = shape;
}
/*! \brief Destructor */
~CovFunction() {
}
/*!
* \brief Transform the covariance parameters
* \param sigma2 Marginal variance
* \param pars Vector with covariance parameters on orignal scale
* \param[out] pars_trans Transformed covariance parameters
*/
void TransformCovPars(const double sigma2, const vec_t& pars, vec_t& pars_trans) const {
pars_trans = pars;
pars_trans[0] = pars[0] / sigma2;
if (cov_fct_type_ == "exponential" ||
(cov_fct_type_ == "matern" && AreSame(shape_, 0.5)) ||
cov_fct_type_ == "exponential_tapered") {
pars_trans[1] = 1. / pars[1];
}
else if (cov_fct_type_ == "matern" && AreSame(shape_, 1.5)) {
pars_trans[1] = sqrt(3.) / pars[1];
}
else if (cov_fct_type_ == "matern" && AreSame(shape_, 2.5)) {
pars_trans[1] = sqrt(5.) / pars[1];
}
else if (cov_fct_type_ == "gaussian") {
pars_trans[1] = 1. / (pars[1] * pars[1]);
}
else if (cov_fct_type_ == "powered_exponential") {
pars_trans[1] = 1. / (std::pow(pars[1], shape_));
}
}
/*!
* \brief Function transforms the covariance parameters back to the original scale
* \param sigma2 Marginal variance
* \param pars Vector with covariance parameters
* \param[out] pars_orig Back-transformed, original covariance parameters
*/
void TransformBackCovPars(const double sigma2, const vec_t& pars, vec_t& pars_orig) const {
pars_orig = pars;
pars_orig[0] = sigma2 * pars[0];
if (cov_fct_type_ == "exponential" ||
(cov_fct_type_ == "matern" && AreSame(shape_, 0.5)) ||
cov_fct_type_ == "exponential_tapered") {
pars_orig[1] = 1. / pars[1];
}
else if (cov_fct_type_ == "matern" && AreSame(shape_, 1.5)) {
pars_orig[1] = sqrt(3.) / pars[1];
}
else if (cov_fct_type_ == "matern" && AreSame(shape_, 2.5)) {
pars_orig[1] = sqrt(5.) / pars[1];
}
else if (cov_fct_type_ == "gaussian") {
pars_orig[1] = 1. / std::sqrt(pars[1]);
}
else if (cov_fct_type_ == "powered_exponential") {
pars_orig[1] = 1. / (std::pow(pars[1], 1. / shape_));
}
}
//TODO: For the training data, this can be done faster as sigma is symetric: add parameter bool dist_is_symetric and do calculations only for upper half?
/*!
* \brief Calculates covariance matrix
* Note: this is the version for dense matrixes
* \param dist Distance matrix
* \param pars Vector with covariance parameters
* \param[out] sigma Covariance matrix
*/
void GetCovMat(const den_mat_t& dist,
const vec_t& pars,
den_mat_t& sigma) const {
CHECK(pars.size() == num_cov_par_);
if (cov_fct_type_ == "exponential" || (cov_fct_type_ == "matern" && AreSame(shape_, 0.5))) {
//den_mat_t sigma(dist.rows(),dist.cols());//TODO: this is not working, check whether this can be done using triangularView? If it works, make dist_ (see re_comp.h) an upper triangular matrix as lower part is not used
//sigma.triangularView<Eigen::Upper>() = (pars[1] * ((-pars[2] * dist.triangularView<Eigen::Upper>().array()).exp())).matrix();
//sigma.triangularView<Eigen::StrictlyLower>() = sigma.triangularView<Eigen::StrictlyUpper>().transpose();
sigma = (pars[0] * ((-pars[1] * dist.array()).exp())).matrix();
}
else if (cov_fct_type_ == "matern" && AreSame(shape_, 1.5)) {
sigma = (pars[0] * (1. + pars[1] * dist.array()) * ((-pars[1] * dist.array()).exp())).matrix();
}
else if (cov_fct_type_ == "matern" && AreSame(shape_, 2.5)) {
sigma = (pars[0] * (1. + pars[1] * dist.array() + pars[1] * pars[1] * dist.array().square() / 3.) * ((-pars[1] * dist.array()).exp())).matrix();
}
else if (cov_fct_type_ == "gaussian") {
sigma = (pars[0] * ((-pars[1] * dist.array().square()).exp())).matrix();
}
else if (cov_fct_type_ == "powered_exponential") {
sigma = (pars[0] * ((-pars[1] * dist.array().pow(shape_)).exp())).matrix();
}
else if (cov_fct_type_ == "wendland") {
sigma = den_mat_t(dist.rows(), dist.cols());
#pragma omp parallel for schedule(static)
for (int i = 0; i < (int)dist.rows(); ++i) {
for (int j = 0; j < (int)dist.cols(); ++j) {
if (dist(i, j) >= taper_range_) {
sigma(i, j) = 0.;
}
else {
sigma(i, j) = pars[0] * WendlandCorrelation(dist(i, j));
}
}
}
}
else if (cov_fct_type_ == "exponential_tapered") {
sigma = den_mat_t(dist.rows(), dist.cols());
#pragma omp parallel for schedule(static)
for (int i = 0; i < (int)dist.rows(); ++i) {
for (int j = 0; j < (int)dist.cols(); ++j) {
if (dist(i, j) >= taper_range_) {
sigma(i, j) = 0.;
}
else {
sigma(i, j) = pars[0] * WendlandCorrelation(dist(i, j)) * std::exp(-pars[1] * dist(i, j));
}
}
}
}
else {
Log::REFatal("Covariance of type '%s' is not supported.", cov_fct_type_.c_str());
}
}
/*!
* \brief Calculates covariance matrix
* Note: this is the version for sparse matrixes
* \param dist Distance matrix
* \param pars Vector with covariance parameters
* \param[out] sigma Covariance matrix
*/
void GetCovMat(const sp_mat_t& dist,
const vec_t& pars,
sp_mat_t& sigma) const {
CHECK(pars.size() == num_cov_par_);
sigma = dist;
if (cov_fct_type_ == "exponential" || (cov_fct_type_ == "matern" && AreSame(shape_, 0.5))) {
sigma.coeffs() = pars[0] * ((-pars[1] * dist.coeffs()).exp());
}
else if (cov_fct_type_ == "matern" && AreSame(shape_, 1.5)) {
sigma.coeffs() = pars[0] * (1. + pars[1] * dist.coeffs()) * ((-pars[1] * dist.coeffs()).exp());
}
else if (cov_fct_type_ == "matern" && AreSame(shape_, 2.5)) {
sigma.coeffs() = pars[0] * (1. + pars[1] * dist.coeffs() +
pars[1] * pars[1] * dist.coeffs().square() / 3.) * ((-pars[1] * dist.coeffs()).exp());
}
else if (cov_fct_type_ == "gaussian") {
sigma.coeffs() = pars[0] * ((-pars[1] * dist.coeffs().square()).exp());
}
else if (cov_fct_type_ == "powered_exponential") {
sigma.coeffs() = pars[0] * ((-pars[1] * dist.coeffs().pow(shape_)).exp());
}
else if (cov_fct_type_ == "wendland") {
sigma.coeffs() = pars[0];
AddWendlandCorrelation(sigma, dist);
}
else if (cov_fct_type_ == "exponential_tapered") {
sigma.coeffs() = pars[0] * ((-pars[1] * dist.coeffs()).exp());
AddWendlandCorrelation(sigma, dist);
}
//else if (cov_fct_type_ == "wendland" && AreSame(shape_, 0.)) {
// sigma.coeffs() = pars[0] * (1. - sigma.coeffs() / taper_range_).pow(taper_mu_);
//}
//else if (cov_fct_type_ == "wendland" && AreSame(shape_, 1.)) {
// sigma.coeffs() = pars[0] * (1. - sigma.coeffs() / taper_range_).pow(taper_mu_ + 1.) *
// (1. + sigma.coeffs() / taper_range_ * (taper_mu_ + 1.));
//}
//else if (cov_fct_type_ == "wendland" && AreSame(shape_, 2.)) {
// sigma.coeffs() = pars[0] * (1. - sigma.coeffs() / taper_range_).pow(taper_mu_ + 2.) *
// (1. + sigma.coeffs() / taper_range_ * (taper_mu_ + 2.) + (sigma.coeffs() / taper_range_).pow(2) * (taper_mu_ * taper_mu_ + 4 * taper_mu_ + 3.) / 3.);
//}
//else if (cov_fct_type_ == "exponential_tapered" && AreSame(shape_, 0.)) {
// sigma.coeffs() = pars[0] * (1. - sigma.coeffs() / taper_range_).pow(taper_mu_) * ((-pars[1] * sigma.coeffs()).exp());
//}
//else if (cov_fct_type_ == "exponential_tapered" && AreSame(shape_, 1.)) {
// sigma.coeffs() = pars[0] * (1. - sigma.coeffs() / taper_range_).pow(taper_mu_ + 1.) *
// (1. + sigma.coeffs() / taper_range_ * (taper_mu_ + 1.)) *
// ((-pars[1] * sigma.coeffs()).exp());
//}
//else if (cov_fct_type_ == "exponential_tapered" && AreSame(shape_, 2.)) {
// sigma.coeffs() = pars[0] * (1. - sigma.coeffs() / taper_range_).pow(taper_mu_ + 2.) *
// (1. + sigma.coeffs() / taper_range_ * (taper_mu_ + 2.) + (sigma.coeffs() / taper_range_).pow(2) * (taper_mu_ * taper_mu_ + 4 * taper_mu_ + 3.) / 3.) *
// ((-pars[1] * sigma.coeffs()).exp());
//}
else {
Log::REFatal("Covariance of type '%s' is not supported.", cov_fct_type_.c_str());
}
}
/*!
* \brief Calculates derivatives of the covariance matrix with respect to the inverse range parameter
* Note: this is the version for dense matrixes
* \param dist Distance matrix
* \param sigma Covariance matrix
* \param pars Vector with covariance parameters
* \param[out] sigma_grad Derivative of covariance matrix with respect to the inverse range parameter
* \param transf_scale If true, the derivative is taken on the transformed scale otherwise with respect to the original range parameter (the parameters values pars are always given on the transformed scale). Optimiziation is done using transf_scale=true. transf_scale=false is needed, for instance, for calcualting the Fisher information on the original scale.
* \param marg_var Marginal variance parameters sigma^2 (used only if transf_scale = false to transform back)
*/
void GetCovMatGradRange(const den_mat_t& dist,
const den_mat_t& sigma,
const vec_t& pars,
den_mat_t& sigma_grad,
bool transf_scale,
double marg_var) const {
CHECK(pars.size() == num_cov_par_);
if (cov_fct_type_ == "exponential" ||
(cov_fct_type_ == "matern" && AreSame(shape_, 0.5)) ||
cov_fct_type_ == "exponential_tapered") {
double cm = transf_scale ? (-1. * pars[1]) : (marg_var * pars[1] * pars[1]);
sigma_grad = cm * sigma.cwiseProduct(dist);
}
else if (cov_fct_type_ == "matern" && AreSame(shape_, 1.5)) {
double cm = transf_scale ? 1. : (-1. * marg_var * pars[1]);
sigma_grad = cm * ((pars[0] * pars[1] * dist.array() * ((-pars[1] * dist.array()).exp())).matrix() - pars[1] * sigma.cwiseProduct(dist));
}
else if (cov_fct_type_ == "matern" && AreSame(shape_, 2.5)) {
double cm = transf_scale ? 1. : (-1. * marg_var * pars[1]);
sigma_grad = cm * ((pars[0] * (pars[1] * dist.array() + (2. / 3.) * pars[1] * pars[1] * dist.array().square()) *
((-pars[1] * dist.array()).exp())).matrix() - pars[1] * sigma.cwiseProduct(dist));
}
else if (cov_fct_type_ == "gaussian") {
double cm = transf_scale ? (-1. * pars[1]) : (2. * marg_var * std::pow(pars[1], 3. / 2.));
sigma_grad = cm * sigma.cwiseProduct(dist.array().square().matrix());
}
else if (cov_fct_type_ == "powered_exponential") {
double cm = transf_scale ? (-1. * pars[1]) : (shape_ * marg_var * std::pow(pars[1], (shape_ + 1.) / shape_));
sigma_grad = cm * sigma.cwiseProduct(dist.array().pow(shape_).matrix());
}
else {
Log::REFatal("GetCovMatGradRange: Covariance of type '%s' is not supported.", cov_fct_type_.c_str());
}
}
/*!
* \brief Calculates derivatives of the covariance matrix with respect to the inverse range parameter
* Note: this is the version for sparse matrixes
* \param dist Distance matrix
* \param sigma Covariance matrix
* \param pars Vector with covariance parameters
* \param[out] sigma_grad Derivative of covariance matrix with respect to the inverse range parameter
* \param transf_scale If true, the derivative is taken on the transformed scale otherwise with respect to the original range parameter (the parameters values pars are always given on the transformed scale). Optimiziation is done using transf_scale=true. transf_scale=false is needed, for instance, for calcualting the Fisher information on the original scale.
* \param marg_var Marginal variance parameters sigma^2 (used only if transf_scale = false to transform back)
*/
void GetCovMatGradRange(const sp_mat_t& dist,
const sp_mat_t& sigma,
const vec_t& pars,
sp_mat_t& sigma_grad,
bool transf_scale,
double marg_var) const {
CHECK(pars.size() == num_cov_par_);
if (cov_fct_type_ == "exponential" ||
(cov_fct_type_ == "matern" && AreSame(shape_, 0.5)) ||
cov_fct_type_ == "exponential_tapered") {
double cm = transf_scale ? (-1. * pars[1]) : (marg_var * pars[1] * pars[1]);
sigma_grad = cm * sigma.cwiseProduct(dist);
}
else if (cov_fct_type_ == "matern" && AreSame(shape_, 1.5)) {
double cm = transf_scale ? 1. : (-1. * marg_var * pars[1]);
sigma_grad = dist;
sigma_grad.coeffs() = pars[0] * pars[1] * sigma_grad.coeffs() * ((-pars[1] * sigma_grad.coeffs()).exp());
sigma_grad -= pars[1] * sigma.cwiseProduct(dist);
sigma_grad *= cm;
}
else if (cov_fct_type_ == "matern" && AreSame(shape_, 2.5)) {
double cm = transf_scale ? 1. : (-1. * marg_var * pars[1]);
sigma_grad = dist;
sigma_grad.coeffs() = pars[0] * (pars[1] * sigma_grad.coeffs() + (2. / 3.) * pars[1] * pars[1] * sigma_grad.coeffs().square()) *
((-pars[1] * sigma_grad.coeffs()).exp());
sigma_grad -= pars[1] * sigma.cwiseProduct(dist);
sigma_grad *= cm;
}
else if (cov_fct_type_ == "gaussian") {
double cm = transf_scale ? (-1. * pars[1]) : (2. * marg_var * std::pow(pars[1], 3. / 2.));
sigma_grad = dist;
sigma_grad.coeffs() = sigma_grad.coeffs().square();
sigma_grad = cm * sigma.cwiseProduct(sigma_grad);
}
else if (cov_fct_type_ == "powered_exponential") {
double cm = transf_scale ? (-1. * pars[1]) : (shape_ * marg_var * std::pow(pars[1], (shape_ + 1.) / shape_));
sigma_grad = dist;
sigma_grad.coeffs() = sigma_grad.coeffs().pow(shape_);
sigma_grad = cm * sigma.cwiseProduct(sigma_grad);
}
else {
Log::REFatal("GetCovMatGradRange: Covariance of type '%s' is not supported.", cov_fct_type_.c_str());
}
}
private:
/*! \brief Type of covariance function */
string_t cov_fct_type_;
/*! \brief Shape parameter of covariance function (=smoothness parameter for Matern covariance) */
double shape_;
/*! \brief Range parameter of Wendland covariance function / taper */
double taper_range_;
/*! \brief Parameter \mu of Wendland covariance function / taper. We follow the notation of Bevilacqua et al. (2018) */
double taper_mu_;
/*! \brief Number of covariance parameters*/
int num_cov_par_;
/*! \brief List of supported covariance functions */
const std::set<string_t> SUPPORTED_COV_TYPES_{ "exponential",
"gaussian",
"powered_exponential",
"matern",
"wendland",
"exponential_tapered" };
inline bool AreSame(const double a, const double b) const {
const double epsilon = 0.00000001;
bool are_same = false;
if (fabs(a) < epsilon) {
are_same = fabs(b) < epsilon;
}
else {
are_same = fabs(a - b) < a * epsilon;
}
return are_same;
}
/*!
* \brief Calculates Wendland correlation function (function used for dense matrices)
* \param dist Distance
* \return Wendland correlation
*/
inline double WendlandCorrelation(const double dist) const {
if (AreSame(shape_, 0.)) {
return(std::pow((1. - dist / taper_range_), taper_mu_));
}
else if (AreSame(shape_, 1.)) {
return(std::pow((1. - dist / taper_range_), taper_mu_ + 1.) * (1. + dist / taper_range_ * (taper_mu_ + 1.)));
}
else if (AreSame(shape_, 2.)) {
return(std::pow((1. - dist / taper_range_), taper_mu_ + 2.) *
(1. + dist / taper_range_ * (taper_mu_ + 2.) + std::pow(dist / taper_range_, 2) * (taper_mu_ * taper_mu_ + 4 * taper_mu_ + 3.) / 3.));
}
else {
return 0.;
}
}
/*!
* \brief Calculates Wendland correlation function (function used for sparse matrices)
* \param[out] sigma (Covariance) matrix to which the Wendland correlation is "added" (multiplied with)
* \param dist Distance matrix
*/
inline void AddWendlandCorrelation(sp_mat_t& sigma, const sp_mat_t& dist) const {
if (AreSame(shape_, 0.)) {
sigma.coeffs() *= (1. - dist.coeffs() / taper_range_).pow(taper_mu_);
}
else if (AreSame(shape_, 1.)) {
sigma.coeffs() *= (1. - dist.coeffs() / taper_range_).pow(taper_mu_ + 1.) *
(1. + dist.coeffs() / taper_range_ * (taper_mu_ + 1.));
}
else if (AreSame(shape_, 2.)) {
sigma.coeffs() *= (1. - dist.coeffs() / taper_range_).pow(taper_mu_ + 2.) *
(1. + dist.coeffs() / taper_range_ * (taper_mu_ + 2.) + (dist.coeffs() / taper_range_).pow(2) * (taper_mu_ * taper_mu_ + 4 * taper_mu_ + 3.) / 3.);
}
}
template<typename>
friend class RECompGP;
};
} // namespace GPBoost
#endif // GPB_COV_FUNCTIONS_
|
phpassMD5_fmt_plug.c | /*
* This software was written by Jim Fougeron jfoug AT cox dot net in 2009.
* No copyright is claimed, and the software is hereby placed in the public
* domain. In case this attempt to disclaim copyright and place the software in
* the public domain is deemed null and void, then the software is Copyright
* (c) 2009 Jim Fougeron and it is hereby released to the general public under
* the following terms:
*
* This software may be modified, redistributed, and used for any purpose,
* in source and binary forms, with or without modification.
*
* Cracks phpass 'portable' hashes, and phpBBv3 hashes, which are simply phpass
* portable, with a slightly different signature. These are 8 byte salted
* hashes, with a 1 byte 'salt' that defines the number of loops to compute.
* Internally we work with 8 byte salt (the 'real' salt), but let john track
* it as 9 byte salts to also pass in the loop count. Code works even if
* multiple loop count values within the input. PHPv5 kicked up the loop
* count, Wordpress uses same format, but even higher loop count. The loop
* count can be used to 'tune' the format, by asking to process only
* only hashes of a specific count.
*
* uses openSSL's MD5 and SIMD MD5.
*
* Code was pretty much rewritten to re-enable this format, and to deprecate
* dynamic_17. It required ported to use the new intrisic SIMD code, including
* AVX2, AVX2-512, and others, and the overall starting point for this older
* code was pretty bad. This port done August 2015, Jim Fougeron.
*/
#if FMT_EXTERNS_H
extern struct fmt_main fmt_phpassmd5;
#elif FMT_REGISTERS_H
john_register_one(&fmt_phpassmd5);
#else
#include <string.h>
#include "arch.h"
#include "misc.h"
#include "common.h"
#include "johnswap.h"
#include "formats.h"
#include "md5.h"
#include "phpass_common.h"
//#undef _OPENMP
//#undef SIMD_COEF_32
//#undef SIMD_PARA_MD5
#ifdef _OPENMP
#define OMP_SCALE 32
#include <omp.h>
#endif
#include "simd-intrinsics.h"
#include "memdbg.h"
#define FORMAT_LABEL "phpass"
#define FORMAT_NAME ""
#define ALGORITHM_NAME "phpass ($P$ or $H$) " MD5_ALGORITHM_NAME
#ifdef SIMD_COEF_32
#define NBKEYS (SIMD_COEF_32 * SIMD_PARA_MD5)
#endif
#define BENCHMARK_COMMENT " ($P$9)"
#ifndef MD5_BUF_SIZ
#define MD5_BUF_SIZ 16
#endif
#define DIGEST_SIZE 16
#define SALT_SIZE 8
// NOTE salts are only 8 bytes, but we tell john they are 9.
// We then take the 8 bytes of salt, and append the 1 byte of
// loop count data, making it 9.
#ifdef SIMD_COEF_32
#define MIN_KEYS_PER_CRYPT NBKEYS
#define MAX_KEYS_PER_CRYPT NBKEYS
#if ARCH_LITTLE_ENDIAN==1
#define GETPOS(i, index) ( (index&(SIMD_COEF_32-1))*4 + ((i)&(0xffffffff-3))*SIMD_COEF_32 + ((i)&3) + (unsigned int)index/SIMD_COEF_32*MD5_BUF_SIZ*4*SIMD_COEF_32 )
#else
#define GETPOS(i, index) ( (index&(SIMD_COEF_32-1))*4 + ((i)&(0xffffffff-3))*SIMD_COEF_32 + (3-((i)&3)) + (unsigned int)index/SIMD_COEF_32*MD5_BUF_SIZ*4*SIMD_COEF_32 )
#endif
#else
#define MIN_KEYS_PER_CRYPT 1
#define MAX_KEYS_PER_CRYPT 1
#endif
#ifdef SIMD_COEF_32
// hash with key appended (used on all steps other than first)
static uint32_t (*hash_key)[MD5_BUF_SIZ*NBKEYS];
// salt with key appended (only used in 1st step).
static uint32_t (*cursalt)[MD5_BUF_SIZ*NBKEYS];
static uint32_t (*crypt_key)[DIGEST_SIZE/4*NBKEYS];
static unsigned max_keys;
#else
static char (*crypt_key)[PHPASS_CPU_PLAINTEXT_LENGTH+1+PHPASS_BINARY_SIZE];
static char (*saved_key)[PHPASS_CPU_PLAINTEXT_LENGTH + 1];
static unsigned (*saved_len);
static unsigned char cursalt[SALT_SIZE];
#endif
static unsigned loopCnt;
static void init(struct fmt_main *self) {
#ifdef _OPENMP
int omp_t = omp_get_max_threads();
self->params.min_keys_per_crypt *= omp_t;
omp_t *= OMP_SCALE;
self->params.max_keys_per_crypt *= omp_t;
#endif
#ifdef SIMD_COEF_32
crypt_key = mem_calloc_align(self->params.max_keys_per_crypt/NBKEYS,
sizeof(*crypt_key), MEM_ALIGN_SIMD);
hash_key = mem_calloc_align(self->params.max_keys_per_crypt/NBKEYS,
sizeof(*hash_key), MEM_ALIGN_SIMD);
cursalt = mem_calloc_align(self->params.max_keys_per_crypt/NBKEYS,
sizeof(*cursalt), MEM_ALIGN_SIMD);
max_keys = self->params.max_keys_per_crypt;
#else
saved_len = mem_calloc(self->params.max_keys_per_crypt,
sizeof(*saved_len));
crypt_key = mem_calloc(self->params.max_keys_per_crypt,
sizeof(*crypt_key));
saved_key = mem_calloc(self->params.max_keys_per_crypt,
sizeof(*saved_key));
#endif
}
static void done(void)
{
MEM_FREE(crypt_key);
#ifndef SIMD_COEF_32
MEM_FREE(saved_len);
MEM_FREE(saved_key);
#else
MEM_FREE(hash_key);
MEM_FREE(cursalt);
#endif
}
static void set_salt(void *salt)
{
#ifdef SIMD_COEF_32
int i;
uint32_t *p;
p = cursalt[0];
for (i = 0; i < max_keys; ++i) {
if (i && (i&(SIMD_COEF_32-1)) == 0)
p += 15*SIMD_COEF_32;
#if ARCH_LITTLE_ENDIAN==1
p[0] = ((uint32_t *)salt)[0];
p[SIMD_COEF_32] = ((uint32_t *)salt)[1];
#else
p[0] = JOHNSWAP(((uint32_t *)salt)[0]);
p[SIMD_COEF_32] = JOHNSWAP(((uint32_t *)salt)[1]);
#endif
++p;
}
#else // !SIMD_COEF_32
memcpy(cursalt, salt, 8);
#endif
// compute the loop count for this salt
loopCnt = (1 << (atoi64[ARCH_INDEX(((char*)salt)[8])]));
}
static void set_key(char *key, int index) {
#ifdef SIMD_COEF_32
// in SIMD, we put the key into the cursalt (at offset 8),
// and into hash_key (at offset 16). We also clean both
// buffers, and put the 0x80, and the length into them.
int len = strlen(key), i, j;
unsigned char *co1 = (unsigned char*)cursalt;
unsigned char *co2 = (unsigned char*)hash_key;
for (i = 0; i < len; ++i) {
// byte by byte. Slow but easy to follow, and the
// speed here does not really matter.
co1[GETPOS(i+8,index)] = key[i];
co2[GETPOS(i+16,index)] = key[i];
}
// Place the end of string marker
co1[GETPOS(i+8,index)] = 0x80;
co2[GETPOS(i+16,index)] = 0x80;
// clean out both buffers top parts.
for (j = i+9; j < 56; ++j)
co1[GETPOS(j,index)] = 0;
for (j = i+17; j < 56; ++j)
co2[GETPOS(j,index)] = 0;
// set the length in bits of salt and hash
co1[GETPOS(56,index)] = ((len+8)<<3)&0xFF;
co2[GETPOS(56,index)] = ((len+16)<<3)&0xFF;
co1[GETPOS(57,index)] = ((len+8)<<3)>>8;
co2[GETPOS(57,index)] = ((len+16)<<3)>>8;
#else
int len= strlen(key);
saved_len[index]=len;
strcpy(saved_key[index], key);
#endif
}
static char *get_key(int index) {
#ifdef SIMD_COEF_32
unsigned char *saltb8 = (unsigned char*)cursalt;
static char out[PHPASS_CPU_PLAINTEXT_LENGTH+1];
int len, i;
// get salt length (in bits)
len = saltb8[GETPOS(57,index)];
len <<= 8;
len |= saltb8[GETPOS(56,index)];
// convert to bytes.
len >>= 3;
// we skip the 8 bytes of salt (to get to password).
len -= 8;
// now grab the password.
for (i = 0; i < len; ++i)
out[i] = saltb8[GETPOS(8+i,index)];
out[i] = 0;
return out;
#else
return saved_key[index];
#endif
}
static int cmp_all(void *binary, int count) {
unsigned i = 0;
#ifdef SIMD_COEF_32
uint32_t *p;
uint32_t bin = *(uint32_t *)binary;
p = crypt_key[0];
for (i = 0; i < count; ++i) {
if (i && (i&(SIMD_COEF_32-1)) == 0)
p += 3*SIMD_COEF_32;
if (bin == *p++)
return 1;
}
return 0;
#else
for (i = 0; i < count; i++)
if (!memcmp(binary, crypt_key[i], PHPASS_BINARY_SIZE))
return 1;
return 0;
#endif
}
static int cmp_exact(char *source, int index)
{
return 1;
}
static int cmp_one(void * binary, int index)
{
#ifdef SIMD_COEF_32
int idx = index&(SIMD_COEF_32-1);
int off = (index/SIMD_COEF_32)*(4*SIMD_COEF_32);
return((((uint32_t *)binary)[0] == ((uint32_t *)crypt_key)[off+0*SIMD_COEF_32+idx]) &&
(((uint32_t *)binary)[1] == ((uint32_t *)crypt_key)[off+1*SIMD_COEF_32+idx]) &&
(((uint32_t *)binary)[2] == ((uint32_t *)crypt_key)[off+2*SIMD_COEF_32+idx]) &&
(((uint32_t *)binary)[3] == ((uint32_t *)crypt_key)[off+3*SIMD_COEF_32+idx]));
#else
return !memcmp(binary, crypt_key[index], PHPASS_BINARY_SIZE);
#endif
}
static int crypt_all(int *pcount, struct db_salt *salt) {
const int count = *pcount;
int loops = 1, index;
#ifdef _OPENMP
loops = (count + MAX_KEYS_PER_CRYPT - 1) / MAX_KEYS_PER_CRYPT;
#pragma omp parallel for
#endif
for (index = 0; index < loops; index++)
{
unsigned Lcount;
#ifdef SIMD_COEF_32
SIMDmd5body(cursalt[index], hash_key[index], NULL, SSEi_OUTPUT_AS_INP_FMT);
Lcount = loopCnt-1;
do {
SIMDmd5body(hash_key[index], hash_key[index], NULL, SSEi_OUTPUT_AS_INP_FMT);
} while (--Lcount);
// last hash goes into crypt_key
SIMDmd5body(hash_key[index], crypt_key[index], NULL, 0);
#else
MD5_CTX ctx;
MD5_Init( &ctx );
MD5_Update( &ctx, cursalt, 8 );
MD5_Update( &ctx, saved_key[index], saved_len[index] );
MD5_Final( (unsigned char *) crypt_key[index], &ctx);
strcpy(((char*)&(crypt_key[index]))+PHPASS_BINARY_SIZE, saved_key[index]);
Lcount = loopCnt;
do {
MD5_Init( &ctx );
MD5_Update( &ctx, crypt_key[index], PHPASS_BINARY_SIZE+saved_len[index]);
MD5_Final( (unsigned char *)&(crypt_key[index]), &ctx);
} while (--Lcount);
#endif
}
return count;
}
static void *get_salt(char *ciphertext)
{
static union {
unsigned char salt[SALT_SIZE+2];
uint64_t dummy;
} x;
unsigned char *salt = x.salt;
// store off the 'real' 8 bytes of salt
memcpy(salt, &ciphertext[4], 8);
// append the 1 byte of loop count information.
salt[8] = ciphertext[3];
salt[9]=0;
return salt;
}
#define COMMON_GET_HASH_SIMD32 4
#define COMMON_GET_HASH_VAR crypt_key
#include "common-get-hash.h"
static int salt_hash(void *salt)
{
return *((ARCH_WORD *)salt) & 0x3FF;
}
struct fmt_main fmt_phpassmd5 = {
{
FORMAT_LABEL,
FORMAT_NAME,
ALGORITHM_NAME,
BENCHMARK_COMMENT,
BENCHMARK_LENGTH,
0,
PHPASS_CPU_PLAINTEXT_LENGTH,
PHPASS_BINARY_SIZE,
PHPASS_BINARY_ALIGN,
SALT_SIZE+1,
PHPASS_SALT_ALIGN,
MIN_KEYS_PER_CRYPT,
MAX_KEYS_PER_CRYPT,
#ifdef _OPENMP
FMT_OMP |
#endif
FMT_CASE | FMT_8_BIT,
{
"iteration count",
},
{ FORMAT_TAG, FORMAT_TAG2, FORMAT_TAG3 },
phpass_common_tests_39
}, {
init,
done,
fmt_default_reset,
phpass_common_prepare,
phpass_common_valid,
phpass_common_split,
phpass_common_binary,
get_salt,
{
phpass_common_iteration_count,
},
fmt_default_source,
{
fmt_default_binary_hash_0,
fmt_default_binary_hash_1,
fmt_default_binary_hash_2,
fmt_default_binary_hash_3,
fmt_default_binary_hash_4,
fmt_default_binary_hash_5,
fmt_default_binary_hash_6
},
salt_hash,
NULL,
set_salt,
set_key,
get_key,
fmt_default_clear_keys,
crypt_all,
{
#define COMMON_GET_HASH_LINK
#include "common-get-hash.h"
},
cmp_all,
cmp_one,
cmp_exact
}
};
#endif /* plugin stanza */
|
radial_integrals.h | // Copyright (c) 2013-2017 Anton Kozhevnikov, Thomas Schulthess
// All rights reserved.
//
// Redistribution and use in source and binary forms, with or without modification, are permitted provided that
// the following conditions are met:
//
// 1. Redistributions of source code must retain the above copyright notice, this list of conditions and the
// following disclaimer.
// 2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions
// and the following disclaimer in the documentation and/or other materials provided with the distribution.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED
// WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A
// PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR
// ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
// CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
// OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
/** \file radial_integrals.h
*
* \brief Representation of various radial integrals.
*/
#ifndef __RADIAL_INTEGRALS_H__
#define __RADIAL_INTEGRALS_H__
#include "Unit_cell/unit_cell.h"
#include "sbessel.h"
namespace sirius {
/// Base class for all kinds of radial integrals.
template <int N>
class Radial_integrals_base
{
protected:
/// Unit cell.
Unit_cell const& unit_cell_;
/// Linear grid of q-points on which the interpolation of radial integrals is done.
Radial_grid<double> grid_q_;
/// Split index of q-points.
splindex<block> spl_q_;
/// Array with integrals.
mdarray<Spline<double>, N> values_;
public:
/// Constructor.
Radial_integrals_base(Unit_cell const& unit_cell__, double qmax__, int np__)
: unit_cell_(unit_cell__)
{
grid_q_ = Radial_grid_lin<double>(static_cast<int>(np__ * qmax__), 0, qmax__);
spl_q_ = splindex<block>(grid_q_.num_points(), unit_cell_.comm().size(), unit_cell_.comm().rank());
}
/// Get starting index iq and delta dq for the q-point on the linear grid.
/** The following condition is satisfied: q = grid_q[iq] + dq */
inline std::pair<int, double> iqdq(double q__) const
{
std::pair<int, double> result;
/* find index of q-point */
result.first = static_cast<int>((grid_q_.num_points() - 1) * q__ / grid_q_.last());
/* delta q = q - q_i */
result.second = q__ - grid_q_[result.first];
return std::move(result);
}
template <typename... Args>
inline double value(Args... args, double q__) const
{
auto idx = iqdq(q__);
return values_(args...)(idx.first, idx.second);
}
inline int nq() const
{
return grid_q_.num_points();
}
};
/// Radial integrals of the atomic centered orbitals. It is used in
/// initialize_subspace and in the hubbard correction
class Radial_integrals_atomic_wf : public Radial_integrals_base<2>
{
private:
//std::vector<std::vector<int>> angular_momentum;
//std::vector<std::vector<double>> total_angular_momentum;
void generate()
{
PROFILE("sirius::Radial_integrals|atomic_centered_wfc");
/* spherical Bessel functions jl(qx) */
mdarray<Spherical_Bessel_functions, 1> jl(nq());
//angular_momentum.clear();
//total_angular_momentum.clear();
//angular_momentum.resize(unit_cell_.num_atom_types());
//total_angular_momentum.resize(unit_cell_.num_atom_types());
for (int iat = 0; iat < unit_cell_.num_atom_types(); iat++) {
auto& atom_type = unit_cell_.atom_type(iat);
int nwf = atom_type.num_ps_atomic_wf();
if (!nwf) {
continue;
}
/* create jl(qx) */
#pragma omp parallel for
for (int iq = 0; iq < nq(); iq++) {
jl(iq) = Spherical_Bessel_functions(atom_type.lmax_ps_atomic_wf(), atom_type.radial_grid(), grid_q_[iq]);
}
//angular_momentum[iat].clear();
//angular_momentum[iat].resize(nwf);
//total_angular_momentum[iat].clear();
//total_angular_momentum[iat].resize(nwf);
/* loop over all pseudo wave-functions */
for (int i = 0; i < nwf; i++) {
values_(i, iat) = Spline<double>(grid_q_);
auto& wf = atom_type.ps_atomic_wf(i);
const int l = std::abs(wf.first);
const double norm = inner(wf.second, wf.second, 0);
#pragma omp parallel for
for (int iq = 0; iq < nq(); iq++) {
values_(i, iat)(iq) = sirius::inner(jl(iq)[l], wf.second, 1) / std::sqrt(norm);
}
values_(i, iat).interpolate();
//angular_momentum[iat][i] = l;
//if (atom_type.pp_desc().total_angular_momentum_wfs.size()) {
// // will need it for Hubbard + so
// total_angular_momentum[iat][i] = atom_type.pp_desc().total_angular_momentum_wfs[i];
//}
}
}
}
public:
Radial_integrals_atomic_wf(Unit_cell const& unit_cell__, double qmax__, int np__)
: Radial_integrals_base<2>(unit_cell__, qmax__, np__)
{
int no_max{0};
for (int iat = 0; iat < unit_cell__.num_atom_types(); iat++) {
no_max = std::max(no_max, unit_cell__.atom_type(iat).num_ps_atomic_wf());
}
values_ = mdarray<Spline<double>, 2>(no_max, unit_cell_.num_atom_types());
generate();
}
/// retrieve a given orbital from an atom type
inline Spline<double> const& values(int iwf__, int iat__) const
{
return values_(iwf__, iat__);
}
/// Get all values for a given atom type and q-point.
inline mdarray<double, 1> values(int iat__, double q__) const
{
auto idx = iqdq(q__);
auto& atom_type = unit_cell_.atom_type(iat__);
mdarray<double, 1> val(atom_type.num_ps_atomic_wf());
for (int i = 0; i < atom_type.num_ps_atomic_wf(); i++) {
val(i) = values_(i, iat__)(idx.first, idx.second);
}
return std::move(val);
}
};
/// Radial integrals of the augmentation operator.
template <bool jl_deriv>
class Radial_integrals_aug : public Radial_integrals_base<3>
{
private:
void generate()
{
PROFILE("sirius::Radial_integrals|aug");
/* interpolate <j_{l_n}(q*x) | Q_{xi,xi'}^{l}(x) > with splines */
for (int iat = 0; iat < unit_cell_.num_atom_types(); iat++) {
auto& atom_type = unit_cell_.atom_type(iat);
if (!atom_type.augment()) {
continue;
}
/* number of radial beta-functions */
int nbrf = atom_type.mt_radial_basis_size();
/* maximum l of beta-projectors */
int lmax_beta = atom_type.indexr().lmax();
for (int l = 0; l <= 2 * lmax_beta; l++) {
for (int idx = 0; idx < nbrf * (nbrf + 1) / 2; idx++) {
values_(idx, l, iat) = Spline<double>(grid_q_);
}
}
#pragma omp parallel for
for (int iq_loc = 0; iq_loc < spl_q_.local_size(); iq_loc++) {
int iq = spl_q_[iq_loc];
Spherical_Bessel_functions jl(2 * lmax_beta, atom_type.radial_grid(), grid_q_[iq]);
for (int l3 = 0; l3 <= 2 * lmax_beta; l3++) {
for (int idxrf2 = 0; idxrf2 < nbrf; idxrf2++) {
int l2 = atom_type.indexr(idxrf2).l;
for (int idxrf1 = 0; idxrf1 <= idxrf2; idxrf1++) {
int l1 = atom_type.indexr(idxrf1).l;
int idx = idxrf2 * (idxrf2 + 1) / 2 + idxrf1;
if (l3 >= std::abs(l1 - l2) && l3 <= (l1 + l2) && (l1 + l2 + l3) % 2 == 0) {
if (jl_deriv) {
auto s = jl.deriv_q(l3);
values_(idx, l3, iat)(iq) =
sirius::inner(s, atom_type.q_radial_function(idxrf1, idxrf2, l3), 0);
} else {
values_(idx, l3, iat)(iq) =
sirius::inner(jl[l3], atom_type.q_radial_function(idxrf1, idxrf2, l3), 0);
}
}
}
}
}
}
for (int l = 0; l <= 2 * lmax_beta; l++) {
for (int idx = 0; idx < nbrf * (nbrf + 1) / 2; idx++) {
unit_cell_.comm().allgather(&values_(idx, l, iat)(0), spl_q_.global_offset(), spl_q_.local_size());
}
}
#pragma omp parallel for
for (int l = 0; l <= 2 * lmax_beta; l++) {
for (int idx = 0; idx < nbrf * (nbrf + 1) / 2; idx++) {
values_(idx, l, iat).interpolate();
}
}
}
}
public:
Radial_integrals_aug(Unit_cell const& unit_cell__, double qmax__, int np__)
: Radial_integrals_base<3>(unit_cell__, qmax__, np__)
{
int nmax = unit_cell_.max_mt_radial_basis_size();
int lmax = unit_cell_.lmax();
values_ = mdarray<Spline<double>, 3>(nmax * (nmax + 1) / 2, 2 * lmax + 1, unit_cell_.num_atom_types());
generate();
}
inline mdarray<double, 2> values(int iat__, double q__) const
{
auto idx = iqdq(q__);
auto& atom_type = unit_cell_.atom_type(iat__);
int lmax = atom_type.indexr().lmax();
int nbrf = atom_type.mt_radial_basis_size();
mdarray<double, 2> val(nbrf * (nbrf + 1) / 2, 2 * lmax + 1);
for (int l = 0; l <= 2 * lmax; l++) {
for (int i = 0; i < nbrf * (nbrf + 1) / 2; i++) {
val(i, l) = values_(i, l, iat__)(idx.first, idx.second);
}
}
return std::move(val);
}
};
class Radial_integrals_rho_pseudo : public Radial_integrals_base<1>
{
private:
void generate()
{
PROFILE("sirius::Radial_integrals|rho_pseudo");
for (int iat = 0; iat < unit_cell_.num_atom_types(); iat++) {
auto& atom_type = unit_cell_.atom_type(iat);
if (atom_type.ps_total_charge_density().empty()) {
continue;
}
values_(iat) = Spline<double>(grid_q_);
Spline<double> rho(atom_type.radial_grid(), atom_type.ps_total_charge_density());
#pragma omp parallel for
for (int iq_loc = 0; iq_loc < spl_q_.local_size(); iq_loc++) {
int iq = spl_q_[iq_loc];
Spherical_Bessel_functions jl(0, atom_type.radial_grid(), grid_q_[iq]);
values_(iat)(iq) = sirius::inner(jl[0], rho, 0, atom_type.num_mt_points()) / fourpi;
}
unit_cell_.comm().allgather(&values_(iat)(0), spl_q_.global_offset(), spl_q_.local_size());
values_(iat).interpolate();
}
}
public:
Radial_integrals_rho_pseudo(Unit_cell const& unit_cell__, double qmax__, int np__)
: Radial_integrals_base<1>(unit_cell__, qmax__, np__)
{
values_ = mdarray<Spline<double>, 1>(unit_cell_.num_atom_types());
generate();
}
};
template <bool jl_deriv>
class Radial_integrals_rho_core_pseudo : public Radial_integrals_base<1>
{
private:
void generate()
{
PROFILE("sirius::Radial_integrals|rho_core_pseudo");
for (int iat = 0; iat < unit_cell_.num_atom_types(); iat++) {
auto& atom_type = unit_cell_.atom_type(iat);
if (atom_type.ps_core_charge_density().empty()) {
continue;
}
values_(iat) = Spline<double>(grid_q_);
Spline<double> ps_core(atom_type.radial_grid(), atom_type.ps_core_charge_density());
#pragma omp parallel for
for (int iq_loc = 0; iq_loc < spl_q_.local_size(); iq_loc++) {
int iq = spl_q_[iq_loc];
Spherical_Bessel_functions jl(0, atom_type.radial_grid(), grid_q_[iq]);
if (jl_deriv) {
auto s = jl.deriv_q(0);
values_(iat)(iq) = sirius::inner(s, ps_core, 2, atom_type.num_mt_points());
} else {
values_(iat)(iq) = sirius::inner(jl[0], ps_core, 2, atom_type.num_mt_points());
}
}
unit_cell_.comm().allgather(&values_(iat)(0), spl_q_.global_offset(), spl_q_.local_size());
values_(iat).interpolate();
}
}
public:
Radial_integrals_rho_core_pseudo(Unit_cell const& unit_cell__, double qmax__, int np__)
: Radial_integrals_base<1>(unit_cell__, qmax__, np__)
{
values_ = mdarray<Spline<double>, 1>(unit_cell_.num_atom_types());
generate();
}
};
template <bool jl_deriv>
class Radial_integrals_beta : public Radial_integrals_base<2>
{
private:
void generate()
{
PROFILE("sirius::Radial_integrals|beta");
for (int iat = 0; iat < unit_cell_.num_atom_types(); iat++) {
auto& atom_type = unit_cell_.atom_type(iat);
int nrb = atom_type.num_beta_radial_functions();
if (!nrb) {
continue;
}
for (int idxrf = 0; idxrf < nrb; idxrf++) {
values_(idxrf, iat) = Spline<double>(grid_q_);
}
#pragma omp parallel for
for (int iq_loc = 0; iq_loc < spl_q_.local_size(); iq_loc++) {
int iq = spl_q_[iq_loc];
Spherical_Bessel_functions jl(unit_cell_.lmax(), atom_type.radial_grid(), grid_q_[iq]);
for (int idxrf = 0; idxrf < nrb; idxrf++) {
int l = atom_type.indexr(idxrf).l;
/* compute \int j_l(q * r) beta_l(r) r^2 dr or \int d (j_l(q*r) / dq) beta_l(r) r^2 */
/* remeber that beta(r) are defined as miltiplied by r */
if (jl_deriv) {
auto s = jl.deriv_q(l);
values_(idxrf, iat)(iq) = sirius::inner(s, atom_type.beta_radial_function(idxrf), 1);
} else {
values_(idxrf, iat)(iq) = sirius::inner(jl[l], atom_type.beta_radial_function(idxrf), 1);
}
}
}
for (int idxrf = 0; idxrf < nrb; idxrf++) {
unit_cell_.comm().allgather(&values_(idxrf, iat)(0), spl_q_.global_offset(), spl_q_.local_size());
values_(idxrf, iat).interpolate();
}
}
}
public:
Radial_integrals_beta(Unit_cell const& unit_cell__, double qmax__, int np__)
: Radial_integrals_base<2>(unit_cell__, qmax__, np__)
{
/* create space for <j_l(qr)|beta> or <d j_l(qr) / dq|beta> radial integrals */
values_ = mdarray<Spline<double>, 2>(unit_cell_.max_mt_radial_basis_size(), unit_cell_.num_atom_types());
generate();
}
/// Get all values for a given atom type and q-point.
inline mdarray<double, 1> values(int iat__, double q__) const
{
auto idx = iqdq(q__);
auto& atom_type = unit_cell_.atom_type(iat__);
mdarray<double, 1> val(atom_type.mt_radial_basis_size());
for (int i = 0; i < atom_type.mt_radial_basis_size(); i++) {
val(i) = values_(i, iat__)(idx.first, idx.second);
}
return std::move(val);
}
};
class Radial_integrals_beta_jl : public Radial_integrals_base<3>
{
private:
int lmax_;
void generate()
{
PROFILE("sirius::Radial_integrals|beta_jl");
for (int iat = 0; iat < unit_cell_.num_atom_types(); iat++) {
auto& atom_type = unit_cell_.atom_type(iat);
int nrb = atom_type.num_beta_radial_functions();
if (!nrb) {
continue;
}
for (int idxrf = 0; idxrf < nrb; idxrf++) {
for (int l = 0; l <= lmax_; l++) {
values_(idxrf, l, iat) = Spline<double>(grid_q_);
}
}
#pragma omp parallel for
for (int iq = 0; iq < grid_q_.num_points(); iq++) {
Spherical_Bessel_functions jl(lmax_, atom_type.radial_grid(), grid_q_[iq]);
for (int idxrf = 0; idxrf < nrb; idxrf++) {
//int nr = atom_type.pp_desc().num_beta_radial_points[idxrf];
for (int l = 0; l <= lmax_; l++) {
/* compute \int j_{l'}(q * r) beta_l(r) r^2 * r * dr */
/* remeber that beta(r) are defined as miltiplied by r */
values_(idxrf, l, iat)(iq) = sirius::inner(jl[l], atom_type.beta_radial_function(idxrf), 2);
}
}
}
#pragma omp parallel for
for (int idxrf = 0; idxrf < nrb; idxrf++) {
for (int l = 0; l <= lmax_; l++) {
values_(idxrf, l, iat).interpolate();
}
}
}
}
public:
Radial_integrals_beta_jl(Unit_cell const& unit_cell__, double qmax__, int np__)
: Radial_integrals_base<3>(unit_cell__, qmax__, np__)
{
lmax_ = unit_cell__.lmax() + 2;
/* create space for <j_l(qr)|beta> radial integrals */
values_ =
mdarray<Spline<double>, 3>(unit_cell_.max_mt_radial_basis_size(), lmax_ + 1, unit_cell_.num_atom_types());
generate();
}
};
/// Radial integrals for the step function of the LAPW method.
/** Radial integrals have the following expression:
* \f[
* \Theta(\alpha, G) = \int_{0}^{R_{\alpha}} \frac{\sin(Gr)}{Gr} r^2 dr =
* \left\{ \begin{array}{ll} \displaystyle R_{\alpha}^3 / 3 & G=0 \\
* \Big( \sin(GR_{\alpha}) - GR_{\alpha}\cos(GR_{\alpha}) \Big) / G^3 & G \ne 0 \end{array} \right.
* \f]
*/
//class Radial_integrals_theta : public Radial_integrals_base<1>
//{
// private:
// void generate()
// {
// PROFILE("sirius::Radial_integrals|theta");
//
// for (int iat = 0; iat < unit_cell_.num_atom_types(); iat++) {
// auto& atom_type = unit_cell_.atom_type(iat);
// auto R = atom_type.mt_radius();
// values_(iat) = Spline<double>(grid_q_);
//
// #pragma omp parallel for
// for (int iq = 0; iq < grid_q_.num_points(); iq++) {
// if (iq == 0) {
// values_(iat)[iq] = std::pow(R, 3) / 3.0;
// } else {
// double g = grid_q_[iq];
// values_(iat)[iq] = (std::sin(g * R) - g * R * std::cos(g * R)) / std::pow(g, 3);
// }
// }
// values_(iat).interpolate();
// }
// }
//
// public:
// Radial_integrals_theta(Unit_cell const& unit_cell__, double qmax__, int np__)
// : Radial_integrals_base<1>(unit_cell__, qmax__, np__)
// {
// values_ = mdarray<Spline<double>, 1>(unit_cell_.num_atom_types());
// generate();
// }
//};
template <bool jl_deriv>
class Radial_integrals_vloc : public Radial_integrals_base<1>
{
private:
void generate()
{
PROFILE("sirius::Radial_integrals|vloc");
for (int iat = 0; iat < unit_cell_.num_atom_types(); iat++) {
auto& atom_type = unit_cell_.atom_type(iat);
if (atom_type.local_potential().empty()) {
continue;
}
values_(iat) = Spline<double>(grid_q_);
auto& vloc = atom_type.local_potential();
int np = atom_type.radial_grid().index_of(10);
if (np == -1) {
np = atom_type.num_mt_points();
}
auto rg = atom_type.radial_grid().segment(np);
#pragma omp parallel for
for (int iq_loc = 0; iq_loc < spl_q_.local_size(); iq_loc++) {
int iq = spl_q_[iq_loc];
Spline<double> s(rg);
double g = grid_q_[iq];
if (jl_deriv) { /* integral with derivative of j0(q*r) over q */
for (int ir = 0; ir < rg.num_points(); ir++) {
double x = rg[ir];
s(ir) = (x * vloc[ir] + atom_type.zn() * gsl_sf_erf(x)) *
(std::sin(g * x) - g * x * std::cos(g * x));
}
} else { /* integral with j0(q*r) */
if (iq == 0) { /* q=0 case */
if (unit_cell_.parameters().parameters_input().enable_esm_ &&
unit_cell_.parameters().parameters_input().esm_bc_ != "pbc") {
for (int ir = 0; ir < rg.num_points(); ir++) {
double x = rg[ir];
s(ir) = (x * vloc[ir] + atom_type.zn() * gsl_sf_erf(x)) * x;
}
} else {
for (int ir = 0; ir < rg.num_points(); ir++) {
double x = rg[ir];
s(ir) = (x * vloc[ir] + atom_type.zn()) * x;
}
}
} else {
for (int ir = 0; ir < rg.num_points(); ir++) {
double x = rg[ir];
s(ir) = (x * vloc[ir] + atom_type.zn() * gsl_sf_erf(x)) * std::sin(g * x);
}
}
}
values_(iat)(iq) = s.interpolate().integrate(0);
}
unit_cell_.comm().allgather(&values_(iat)(0), spl_q_.global_offset(), spl_q_.local_size());
values_(iat).interpolate();
}
}
public:
Radial_integrals_vloc(Unit_cell const& unit_cell__, double qmax__, int np__)
: Radial_integrals_base<1>(unit_cell__, qmax__, np__)
{
values_ = mdarray<Spline<double>, 1>(unit_cell_.num_atom_types());
generate();
}
inline double value(int iat__, double q__) const
{
auto idx = iqdq(q__);
if (std::abs(q__) < 1e-12) {
if (jl_deriv) {
return 0;
} else {
return values_(iat__)(0);
}
} else {
auto& atom_type = unit_cell_.atom_type(iat__);
auto q2 = std::pow(q__, 2);
if (jl_deriv) {
if (!unit_cell_.parameters().parameters_input().enable_esm_ ||
unit_cell_.parameters().parameters_input().esm_bc_ == "pbc") {
return values_(iat__)(idx.first, idx.second) / q2 / q__ -
atom_type.zn() * std::exp(-q2 / 4) * (4 + q2) / 2 / q2 / q2;
} else {
return values_(iat__)(idx.first, idx.second) / q2 / q__;
}
} else {
if (!unit_cell_.parameters().parameters_input().enable_esm_ ||
unit_cell_.parameters().parameters_input().esm_bc_ == "pbc") {
return values_(iat__)(idx.first, idx.second) / q__ - atom_type.zn() * std::exp(-q2 / 4) / q2;
} else {
return values_(iat__)(idx.first, idx.second) / q__;
}
}
}
}
};
class Radial_integrals_rho_free_atom : public Radial_integrals_base<1>
{
private:
void generate()
{
PROFILE("sirius::Radial_integrals|rho_free_atom");
for (int iat = 0; iat < unit_cell_.num_atom_types(); iat++) {
auto& atom_type = unit_cell_.atom_type(iat);
values_(iat) = Spline<double>(grid_q_);
#pragma omp parallel for
for (int iq = 0; iq < grid_q_.num_points(); iq++) {
double g = grid_q_[iq];
Spline<double> s(unit_cell_.atom_type(iat).free_atom_radial_grid());
if (iq == 0) {
for (int ir = 0; ir < s.num_points(); ir++) {
s(ir) = atom_type.free_atom_density(ir);
}
values_(iat)(iq) = s.interpolate().integrate(2);
} else {
for (int ir = 0; ir < s.num_points(); ir++) {
s(ir) = atom_type.free_atom_density(ir) * std::sin(g * atom_type.free_atom_radial_grid(ir)) / g;
}
values_(iat)(iq) = s.interpolate().integrate(1);
}
}
values_(iat).interpolate();
}
}
public:
Radial_integrals_rho_free_atom(Unit_cell const& unit_cell__, double qmax__, int np__)
: Radial_integrals_base<1>(unit_cell__, qmax__, np__)
{
values_ = mdarray<Spline<double>, 1>(unit_cell_.num_atom_types());
generate();
}
};
} // namespace sirius
#endif // __RADIAL_INTEGRALS_H__
|
rect2lune.c | #include <stdio.h>
#include <stdlib.h>
#include <math.h>
#include "compearth.h"
#ifdef COMPEARTH_USE_MKL
#ifdef __clang__
#pragma clang diagnostic push
#pragma clang diagnostic ignored "-Wreserved-id-macro"
#pragma clang diagnostic ignored "-Wstrict-prototypes"
#endif
#include <mkl_cblas.h>
#ifdef __clang__
#pragma clang diagnostic pop
#endif
#else
#include <cblas.h>
#endif
/*!
* @brief Converts v-w coordinates to lune coordinates.
*
* @param[in] nv Number of v coordinates.
* @param[in] v v coordinates s.t. \f$ v \in [-1/3, 1/3] \f$. This
* an array of dimension [nv].
* @param[in] nw Number of w coordinates.
* @param[in] w w coordinates (similar to lune latitudes) s.t.
* \f$ w \in [-3\pi/8, 3/pi/8] \f$. This is an
* array of dimension [nw].
*
* @param[out] gamma Longitude (degrees) \f$ \gamma \in [-30,30] \f$.
* This is an array of dimension [nv].
* @param[out] delta lune latitude (degrees) \f$ \delta \in [-90,90] \f$.
* This is an array of dimension [nw].
*
* @result 0 indicates success.
*
* @author Carl Tape and converted to C by Ben Baker
*
* @date 2016
*
* @copyright MIT
*
*/
int compearth_rect2lune(const int nv, const double *__restrict__ v,
const int nw, const double *__restrict__ w,
double *__restrict__ gamma,
double *__restrict__ delta)
{
double u[CE_CHUNKSIZE] __attribute__((aligned(64)));
int i, j, ierr, nuLoc;
/*
const int maxit = 20;
const int useHalley = 2;
const double tol = 1.e-12;
*/
const double pi38 = 3.0*M_PI/8.0;
const double pi180i = 180.0/M_PI;
ierr = 0;
// convert v -> gamma
compearth_v2gamma(nv, v, gamma);
// convert latitude to colatitude
for (i=0; i<nw; i=i+CE_CHUNKSIZE)
{
nuLoc = MIN(CE_CHUNKSIZE, nw - i);
for (j=0; j<nuLoc; j++)
{
u[j] = pi38 - w[i+j];
}
// convert u -> beta
ierr = compearth_u2beta(nuLoc, u, &delta[i]);
//ierr = compearth_u2beta(nuLoc, maxit, useHalley, u, tol, &delta[i]);
if (ierr != 0)
{
fprintf(stderr, "%s: Computing u2beta\n", __func__);
return -1;
}
}
// convert to gamma to degrees
cblas_dscal(nv, pi180i, gamma, 1);
// convert delta from colatitude to latitude and radians to degrees
#pragma omp simd
for (i=0; i<nw; i++)
{
delta[i] = 90.0 - pi180i*delta[i];
}
return ierr;
}
|
mypaint-tiled-surface.c | /* libmypaint - The MyPaint Brush Library
* Copyright (C) 2007-2014 Martin Renold <martinxyz@gmx.ch> et. al.
*
* Permission to use, copy, modify, and/or distribute this software for any
* purpose with or without fee is hereby granted, provided that the above
* copyright notice and this permission notice appear in all copies.
*
* THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
* WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
* MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
* ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
* WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
* ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
* OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
*/
#include "config.h"
#include <math.h>
#include <stdio.h>
#include <stdlib.h>
#include <assert.h>
#ifdef _OPENMP
#include <omp.h>
#endif
#include "mypaint-config.h"
#include "mypaint-tiled-surface.h"
#include "tiled-surface-private.h"
#include "helpers.h"
#include "brushmodes.h"
#include "operationqueue.h"
#ifndef M_PI
#define M_PI 3.14159265358979323846
#endif
void process_tile(MyPaintTiledSurface *self, int tx, int ty);
static void
begin_atomic_default(MyPaintSurface *surface)
{
mypaint_tiled_surface_begin_atomic((MyPaintTiledSurface *)surface);
}
static void
end_atomic_default(MyPaintSurface *surface, MyPaintRectangle *roi)
{
mypaint_tiled_surface_end_atomic((MyPaintTiledSurface *)surface, roi);
}
/**
* mypaint_tiled_surface_begin_atomic: (skip)
*
* Implementation of #MyPaintSurface::being_atomic vfunc
* Note: Only intended to be used from #MyPaintTiledSurface subclasses, which should chain up to this
* if implementing their own #MyPaintSurface::begin_atomic vfunc.
* Application code should only use mypaint_surface_being_atomic()
*/
void
mypaint_tiled_surface_begin_atomic(MyPaintTiledSurface *self)
{
self->dirty_bbox.height = 0;
self->dirty_bbox.width = 0;
self->dirty_bbox.y = 0;
self->dirty_bbox.x = 0;
}
/**
* mypaint_tiled_surface_end_atomic: (skip)
*
* Implementation of #MyPaintSurface::end_atomic vfunc
* Note: Only intended to be used from #MyPaintTiledSurface subclasses, which should chain up to this
* if implementing their own #MyPaintSurface::end_atomic vfunc.
* Application code should only use mypaint_surface_end_atomic().
*/
void
mypaint_tiled_surface_end_atomic(MyPaintTiledSurface *self, MyPaintRectangle *roi)
{
// Process tiles
TileIndex *tiles;
int tiles_n = operation_queue_get_dirty_tiles(self->operation_queue, &tiles);
#pragma omp parallel for schedule(static) if(self->threadsafe_tile_requests && tiles_n > 3)
for (int i = 0; i < tiles_n; i++) {
process_tile(self, tiles[i].x, tiles[i].y);
}
operation_queue_clear_dirty_tiles(self->operation_queue);
if (roi) {
*roi = self->dirty_bbox;
}
}
/**
* mypaint_tiled_surface_tile_request_start:
*
* Fetch a tile out from the underlying tile store.
* When successfull, request->data will be set to point to the fetched tile.
* Consumers must *always* call mypaint_tiled_surface_tile_request_end() with the same
* request to complete the transaction.
*/
void mypaint_tiled_surface_tile_request_start(MyPaintTiledSurface *self, MyPaintTileRequest *request)
{
assert(self->tile_request_start);
self->tile_request_start(self, request);
}
/**
* mypaint_tiled_surface_tile_request_end:
*
* Put a (potentially modified) tile back into the underlying tile store.
*
* Consumers must *always* call mypaint_tiled_surface_tile_request_start() with the same
* request to start the transaction before calling this function.
*/
void mypaint_tiled_surface_tile_request_end(MyPaintTiledSurface *self, MyPaintTileRequest *request)
{
assert(self->tile_request_end);
self->tile_request_end(self, request);
}
/* FIXME: either expose this through MyPaintSurface, or move it into the brush engine */
/**
* mypaint_tiled_surface_set_symmetry_state:
* @active: TRUE to enable, FALSE to disable.
* @center_x: X axis to mirror events across.
*
* Enable/Disable symmetric brush painting across an X axis.
*/
void
mypaint_tiled_surface_set_symmetry_state(MyPaintTiledSurface *self, gboolean active, float center_x)
{
self->surface_do_symmetry = active;
self->surface_center_x = center_x;
}
/**
* mypaint_tile_request_init:
*
* Initialize a request for use with mypaint_tiled_surface_tile_request_start()
* and mypaint_tiled_surface_tile_request_end()
*/
void
mypaint_tile_request_init(MyPaintTileRequest *data, int level,
int tx, int ty, gboolean readonly)
{
data->tx = tx;
data->ty = ty;
data->readonly = readonly;
data->buffer = NULL;
data->context = NULL;
#ifdef _OPENMP
data->thread_id = omp_get_thread_num();
#else
data->thread_id = -1;
#endif
data->mipmap_level = level;
}
// Must be threadsafe
static inline float
calculate_r_sample(float x, float y, float aspect_ratio,
float sn, float cs)
{
const float yyr=(y*cs-x*sn)*aspect_ratio;
const float xxr=y*sn+x*cs;
const float r = (yyr*yyr + xxr*xxr);
return r;
}
static inline float
calculate_rr(int xp, int yp, float x, float y, float aspect_ratio,
float sn, float cs, float one_over_radius2)
{
// code duplication, see brush::count_dabs_to()
const float yy = (yp + 0.5f - y);
const float xx = (xp + 0.5f - x);
const float yyr=(yy*cs-xx*sn)*aspect_ratio;
const float xxr=yy*sn+xx*cs;
const float rr = (yyr*yyr + xxr*xxr) * one_over_radius2;
// rr is in range 0.0..1.0*sqrt(2)
return rr;
}
static inline float
sign_point_in_line( float px, float py, float vx, float vy )
{
return (px - vx) * (-vy) - (vx) * (py - vy);
}
static inline void
closest_point_to_line( float lx, float ly, float px, float py, float *ox, float *oy )
{
const float l2 = lx*lx + ly*ly;
const float ltp_dot = px*lx + py*ly;
const float t = ltp_dot / l2;
*ox = lx * t;
*oy = ly * t;
}
// Must be threadsafe
//
// This works by taking the visibility at the nearest point
// and dividing by 1.0 + delta.
//
// - nearest point: point where the dab has more influence
// - farthest point: point at a fixed distance away from
// the nearest point
// - delta: how much occluded is the farthest point relative
// to the nearest point
static inline float
calculate_rr_antialiased(int xp, int yp, float x, float y, float aspect_ratio,
float sn, float cs, float one_over_radius2,
float r_aa_start)
{
// calculate pixel position and borders in a way
// that the dab's center is always at zero
float pixel_right = x - (float)xp;
float pixel_bottom = y - (float)yp;
float pixel_center_x = pixel_right - 0.5f;
float pixel_center_y = pixel_bottom - 0.5f;
float pixel_left = pixel_right - 1.0f;
float pixel_top = pixel_bottom - 1.0f;
float nearest_x, nearest_y; // nearest to origin, but still inside pixel
float farthest_x, farthest_y; // farthest from origin, but still inside pixel
float r_near, r_far, rr_near, rr_far;
// Dab's center is inside pixel?
if( pixel_left<0 && pixel_right>0 &&
pixel_top<0 && pixel_bottom>0 )
{
nearest_x = 0;
nearest_y = 0;
r_near = rr_near = 0;
}
else
{
closest_point_to_line( cs, sn, pixel_center_x, pixel_center_y, &nearest_x, &nearest_y );
nearest_x = CLAMP( nearest_x, pixel_left, pixel_right );
nearest_y = CLAMP( nearest_y, pixel_top, pixel_bottom );
// XXX: precision of "nearest" values could be improved
// by intersecting the line that goes from nearest_x/Y to 0
// with the pixel's borders here, however the improvements
// would probably not justify the perdormance cost.
r_near = calculate_r_sample( nearest_x, nearest_y, aspect_ratio, sn, cs );
rr_near = r_near * one_over_radius2;
}
// out of dab's reach?
if( rr_near > 1.0f )
return rr_near;
// check on which side of the dab's line is the pixel center
float center_sign = sign_point_in_line( pixel_center_x, pixel_center_y, cs, -sn );
// radius of a circle with area=1
// A = pi * r * r
// r = sqrt(1/pi)
const float rad_area_1 = sqrtf( 1.0f / M_PI );
// center is below dab
if( center_sign < 0 )
{
farthest_x = nearest_x - sn*rad_area_1;
farthest_y = nearest_y + cs*rad_area_1;
}
// above dab
else
{
farthest_x = nearest_x + sn*rad_area_1;
farthest_y = nearest_y - cs*rad_area_1;
}
r_far = calculate_r_sample( farthest_x, farthest_y, aspect_ratio, sn, cs );
rr_far = r_far * one_over_radius2;
// check if we can skip heavier AA
if( r_far < r_aa_start )
return (rr_far+rr_near) * 0.5f;
// calculate AA approximate
float visibilityNear = 1.0f - rr_near;
float delta = rr_far - rr_near;
float delta2 = 1.0f + delta;
visibilityNear /= delta2;
return 1.0f - visibilityNear;
}
static inline float
calculate_opa(float rr, float hardness,
float segment1_offset, float segment1_slope,
float segment2_offset, float segment2_slope) {
const float fac = rr <= hardness ? segment1_slope : segment2_slope;
float opa = rr <= hardness ? segment1_offset : segment2_offset;
opa += rr*fac;
if (rr > 1.0f) {
opa = 0.0f;
}
#ifdef HEAVY_DEBUG
assert(isfinite(opa));
assert(opa >= 0.0f && opa <= 1.0f);
#endif
return opa;
}
// Must be threadsafe
void render_dab_mask (uint16_t * mask,
float x, float y,
float radius,
float hardness,
float aspect_ratio, float angle
)
{
hardness = CLAMP(hardness, 0.0, 1.0);
if (aspect_ratio<1.0) aspect_ratio=1.0;
assert(hardness != 0.0); // assured by caller
// For a graphical explanation, see:
// http://wiki.mypaint.info/Development/Documentation/Brushlib
//
// The hardness calculation is explained below:
//
// Dab opacity gradually fades out from the center (rr=0) to
// fringe (rr=1) of the dab. How exactly depends on the hardness.
// We use two linear segments, for which we pre-calculate slope
// and offset here.
//
// opa
// ^
// * .
// | *
// | .
// +-----------*> rr = (distance_from_center/radius)^2
// 0 1
//
float segment1_offset = 1.0f;
float segment1_slope = -(1.0f/hardness - 1.0f);
float segment2_offset = hardness/(1.0f-hardness);
float segment2_slope = -hardness/(1.0f-hardness);
// for hardness == 1.0, segment2 will never be used
float angle_rad=angle/360*2*M_PI;
float cs=cos(angle_rad);
float sn=sin(angle_rad);
const float r_fringe = radius + 1.0f; // +1.0 should not be required, only to be sure
int x0 = floor (x - r_fringe);
int y0 = floor (y - r_fringe);
int x1 = floor (x + r_fringe);
int y1 = floor (y + r_fringe);
if (x0 < 0) x0 = 0;
if (y0 < 0) y0 = 0;
if (x1 > MYPAINT_TILE_SIZE-1) x1 = MYPAINT_TILE_SIZE-1;
if (y1 > MYPAINT_TILE_SIZE-1) y1 = MYPAINT_TILE_SIZE-1;
const float one_over_radius2 = 1.0f/(radius*radius);
// Pre-calculate rr and put it in the mask.
// This an optimization that makes use of auto-vectorization
// OPTIMIZE: if using floats for the brush engine, store these directly in the mask
float rr_mask[MYPAINT_TILE_SIZE*MYPAINT_TILE_SIZE+2*MYPAINT_TILE_SIZE];
if (radius < 3.0f)
{
const float aa_border = 1.0f;
float r_aa_start = ((radius>aa_border) ? (radius-aa_border) : 0);
r_aa_start *= r_aa_start / aspect_ratio;
for (int yp = y0; yp <= y1; yp++) {
for (int xp = x0; xp <= x1; xp++) {
const float rr = calculate_rr_antialiased(xp, yp,
x, y, aspect_ratio,
sn, cs, one_over_radius2,
r_aa_start);
rr_mask[(yp*MYPAINT_TILE_SIZE)+xp] = rr;
}
}
}
else
{
for (int yp = y0; yp <= y1; yp++) {
for (int xp = x0; xp <= x1; xp++) {
const float rr = calculate_rr(xp, yp,
x, y, aspect_ratio,
sn, cs, one_over_radius2);
rr_mask[(yp*MYPAINT_TILE_SIZE)+xp] = rr;
}
}
}
// we do run length encoding: if opacity is zero, the next
// value in the mask is the number of pixels that can be skipped.
uint16_t * mask_p = mask;
int skip=0;
skip += y0*MYPAINT_TILE_SIZE;
for (int yp = y0; yp <= y1; yp++) {
skip += x0;
int xp;
for (xp = x0; xp <= x1; xp++) {
const float rr = rr_mask[(yp*MYPAINT_TILE_SIZE)+xp];
const float opa = calculate_opa(rr, hardness,
segment1_offset, segment1_slope,
segment2_offset, segment2_slope);
const uint16_t opa_ = opa * (1<<15);
if (!opa_) {
skip++;
} else {
if (skip) {
*mask_p++ = 0;
*mask_p++ = skip*4;
skip = 0;
}
*mask_p++ = opa_;
}
}
skip += MYPAINT_TILE_SIZE-xp;
}
*mask_p++ = 0;
*mask_p++ = 0;
}
// Must be threadsafe
void
process_op(uint16_t *rgba_p, uint16_t *mask,
int tx, int ty, OperationDataDrawDab *op)
{
// first, we calculate the mask (opacity for each pixel)
render_dab_mask(mask,
op->x - tx*MYPAINT_TILE_SIZE,
op->y - ty*MYPAINT_TILE_SIZE,
op->radius,
op->hardness,
op->aspect_ratio, op->angle
);
// second, we use the mask to stamp a dab for each activated blend mode
if (op->normal) {
if (op->color_a == 1.0) {
draw_dab_pixels_BlendMode_Normal(mask, rgba_p,
op->color_r, op->color_g, op->color_b, op->normal*op->opaque*(1<<15));
} else {
// normal case for brushes that use smudging (eg. watercolor)
draw_dab_pixels_BlendMode_Normal_and_Eraser(mask, rgba_p,
op->color_r, op->color_g, op->color_b, op->color_a*(1<<15), op->normal*op->opaque*(1<<15));
}
}
if (op->lock_alpha) {
draw_dab_pixels_BlendMode_LockAlpha(mask, rgba_p,
op->color_r, op->color_g, op->color_b, op->lock_alpha*op->opaque*(1<<15));
}
if (op->colorize) {
draw_dab_pixels_BlendMode_Color(mask, rgba_p,
op->color_r, op->color_g, op->color_b,
op->colorize*op->opaque*(1<<15));
}
}
// Must be threadsafe
void
process_tile(MyPaintTiledSurface *self, int tx, int ty)
{
TileIndex tile_index = {tx, ty};
OperationDataDrawDab *op = operation_queue_pop(self->operation_queue, tile_index);
if (!op) {
return;
}
MyPaintTileRequest request_data;
const int mipmap_level = 0;
mypaint_tile_request_init(&request_data, mipmap_level, tx, ty, FALSE);
mypaint_tiled_surface_tile_request_start(self, &request_data);
uint16_t * rgba_p = request_data.buffer;
if (!rgba_p) {
printf("Warning: Unable to get tile!\n");
return;
}
uint16_t mask[MYPAINT_TILE_SIZE*MYPAINT_TILE_SIZE+2*MYPAINT_TILE_SIZE];
while (op) {
process_op(rgba_p, mask, tile_index.x, tile_index.y, op);
free(op);
op = operation_queue_pop(self->operation_queue, tile_index);
}
mypaint_tiled_surface_tile_request_end(self, &request_data);
}
// OPTIMIZE: send a list of the exact changed rects instead of a bounding box
// to minimize the area being composited? Profile to see the effect first.
void
update_dirty_bbox(MyPaintTiledSurface *self, OperationDataDrawDab *op)
{
int bb_x, bb_y, bb_w, bb_h;
float r_fringe = op->radius + 1.0f; // +1.0 should not be required, only to be sure
bb_x = floor (op->x - r_fringe);
bb_y = floor (op->y - r_fringe);
bb_w = floor (op->x + r_fringe) - bb_x + 1;
bb_h = floor (op->y + r_fringe) - bb_y + 1;
mypaint_rectangle_expand_to_include_point(&self->dirty_bbox, bb_x, bb_y);
mypaint_rectangle_expand_to_include_point(&self->dirty_bbox, bb_x+bb_w-1, bb_y+bb_h-1);
}
// returns TRUE if the surface was modified
gboolean draw_dab_internal (MyPaintTiledSurface *self, float x, float y,
float radius,
float color_r, float color_g, float color_b,
float opaque, float hardness,
float color_a,
float aspect_ratio, float angle,
float lock_alpha,
float colorize
)
{
OperationDataDrawDab op_struct;
OperationDataDrawDab *op = &op_struct;
op->x = x;
op->y = y;
op->radius = radius;
op->aspect_ratio = aspect_ratio;
op->angle = angle;
op->opaque = CLAMP(opaque, 0.0f, 1.0f);
op->hardness = CLAMP(hardness, 0.0f, 1.0f);
op->lock_alpha = CLAMP(lock_alpha, 0.0f, 1.0f);
op->colorize = CLAMP(colorize, 0.0f, 1.0f);
if (op->radius < 0.1f) return FALSE; // don't bother with dabs smaller than 0.1 pixel
if (op->hardness == 0.0f) return FALSE; // infintly small center point, fully transparent outside
if (op->opaque == 0.0f) return FALSE;
color_r = CLAMP(color_r, 0.0f, 1.0f);
color_g = CLAMP(color_g, 0.0f, 1.0f);
color_b = CLAMP(color_b, 0.0f, 1.0f);
color_a = CLAMP(color_a, 0.0f, 1.0f);
op->color_r = color_r * (1<<15);
op->color_g = color_g * (1<<15);
op->color_b = color_b * (1<<15);
op->color_a = color_a;
// blending mode preparation
op->normal = 1.0f;
op->normal *= 1.0f-op->lock_alpha;
op->normal *= 1.0f-op->colorize;
if (op->aspect_ratio<1.0f) op->aspect_ratio=1.0f;
// Determine the tiles influenced by operation, and queue it for processing for each tile
float r_fringe = radius + 1.0f; // +1.0 should not be required, only to be sure
int tx1 = floor(floor(x - r_fringe) / MYPAINT_TILE_SIZE);
int tx2 = floor(floor(x + r_fringe) / MYPAINT_TILE_SIZE);
int ty1 = floor(floor(y - r_fringe) / MYPAINT_TILE_SIZE);
int ty2 = floor(floor(y + r_fringe) / MYPAINT_TILE_SIZE);
for (int ty = ty1; ty <= ty2; ty++) {
for (int tx = tx1; tx <= tx2; tx++) {
const TileIndex tile_index = {tx, ty};
OperationDataDrawDab *op_copy = (OperationDataDrawDab *)malloc(sizeof(OperationDataDrawDab));
*op_copy = *op;
operation_queue_add(self->operation_queue, tile_index, op_copy);
}
}
update_dirty_bbox(self, op);
return TRUE;
}
// returns TRUE if the surface was modified
int draw_dab (MyPaintSurface *surface, float x, float y,
float radius,
float color_r, float color_g, float color_b,
float opaque, float hardness,
float color_a,
float aspect_ratio, float angle,
float lock_alpha,
float colorize)
{
MyPaintTiledSurface *self = (MyPaintTiledSurface *)surface;
gboolean surface_modified = FALSE;
// Normal pass
if (draw_dab_internal(self, x, y, radius, color_r, color_g, color_b,
opaque, hardness, color_a, aspect_ratio, angle,
lock_alpha, colorize)) {
surface_modified = TRUE;
}
// Symmetry pass
if(self->surface_do_symmetry) {
const float symm_x = self->surface_center_x + (self->surface_center_x - x);
if (draw_dab_internal(self, symm_x, y, radius, color_r, color_g, color_b,
opaque, hardness, color_a, aspect_ratio, -angle,
lock_alpha, colorize)) {
surface_modified = TRUE;
}
}
return surface_modified;
}
void get_color (MyPaintSurface *surface, float x, float y,
float radius,
float * color_r, float * color_g, float * color_b, float * color_a
)
{
MyPaintTiledSurface *self = (MyPaintTiledSurface *)surface;
if (radius < 1.0f) radius = 1.0f;
const float hardness = 0.5f;
const float aspect_ratio = 1.0f;
const float angle = 0.0f;
float sum_weight, sum_r, sum_g, sum_b, sum_a;
sum_weight = sum_r = sum_g = sum_b = sum_a = 0.0f;
// in case we return with an error
*color_r = 0.0f;
*color_g = 1.0f;
*color_b = 0.0f;
// WARNING: some code duplication with draw_dab
float r_fringe = radius + 1.0f; // +1 should not be required, only to be sure
int tx1 = floor(floor(x - r_fringe) / MYPAINT_TILE_SIZE);
int tx2 = floor(floor(x + r_fringe) / MYPAINT_TILE_SIZE);
int ty1 = floor(floor(y - r_fringe) / MYPAINT_TILE_SIZE);
int ty2 = floor(floor(y + r_fringe) / MYPAINT_TILE_SIZE);
#ifdef _OPENMP
int tiles_n = (tx2 - tx1) * (ty2 - ty1);
#endif
#pragma omp parallel for schedule(static) if(self->threadsafe_tile_requests && tiles_n > 3)
for (int ty = ty1; ty <= ty2; ty++) {
for (int tx = tx1; tx <= tx2; tx++) {
// Flush queued draw_dab operations
process_tile(self, tx, ty);
MyPaintTileRequest request_data;
const int mipmap_level = 0;
mypaint_tile_request_init(&request_data, mipmap_level, tx, ty, TRUE);
mypaint_tiled_surface_tile_request_start(self, &request_data);
uint16_t * rgba_p = request_data.buffer;
if (!rgba_p) {
printf("Warning: Unable to get tile!\n");
break;
}
// first, we calculate the mask (opacity for each pixel)
uint16_t mask[MYPAINT_TILE_SIZE*MYPAINT_TILE_SIZE+2*MYPAINT_TILE_SIZE];
render_dab_mask(mask,
x - tx*MYPAINT_TILE_SIZE,
y - ty*MYPAINT_TILE_SIZE,
radius,
hardness,
aspect_ratio, angle
);
// TODO: try atomic operations instead
#pragma omp critical
{
get_color_pixels_accumulate (mask, rgba_p,
&sum_weight, &sum_r, &sum_g, &sum_b, &sum_a);
}
mypaint_tiled_surface_tile_request_end(self, &request_data);
}
}
assert(sum_weight > 0.0f);
sum_a /= sum_weight;
sum_r /= sum_weight;
sum_g /= sum_weight;
sum_b /= sum_weight;
*color_a = sum_a;
// now un-premultiply the alpha
if (sum_a > 0.0f) {
*color_r = sum_r / sum_a;
*color_g = sum_g / sum_a;
*color_b = sum_b / sum_a;
} else {
// it is all transparent, so don't care about the colors
// (let's make them ugly so bugs will be visible)
*color_r = 0.0f;
*color_g = 1.0f;
*color_b = 0.0f;
}
// fix rounding problems that do happen due to floating point math
*color_r = CLAMP(*color_r, 0.0f, 1.0f);
*color_g = CLAMP(*color_g, 0.0f, 1.0f);
*color_b = CLAMP(*color_b, 0.0f, 1.0f);
*color_a = CLAMP(*color_a, 0.0f, 1.0f);
}
/**
* mypaint_tiled_surface_init: (skip)
*
* Initialize the surface, passing in implementations of the tile backend.
* Note: Only intended to be called from subclasses of #MyPaintTiledSurface
**/
void
mypaint_tiled_surface_init(MyPaintTiledSurface *self,
MyPaintTileRequestStartFunction tile_request_start,
MyPaintTileRequestEndFunction tile_request_end)
{
mypaint_surface_init(&self->parent);
self->parent.draw_dab = draw_dab;
self->parent.get_color = get_color;
self->parent.begin_atomic = begin_atomic_default;
self->parent.end_atomic = end_atomic_default;
self->tile_request_end = tile_request_end;
self->tile_request_start = tile_request_start;
self->tile_size = MYPAINT_TILE_SIZE;
self->threadsafe_tile_requests = FALSE;
self->dirty_bbox.x = 0;
self->dirty_bbox.y = 0;
self->dirty_bbox.width = 0;
self->dirty_bbox.height = 0;
self->surface_do_symmetry = FALSE;
self->surface_center_x = 0.0f;
self->operation_queue = operation_queue_new();
}
/**
* mypaint_tiled_surface_destroy: (skip)
*
* Deallocate resources set up by mypaint_tiled_surface_init()
* Does not free the #MyPaintTiledSurface itself.
* Note: Only intended to be called from subclasses of #MyPaintTiledSurface
*/
void
mypaint_tiled_surface_destroy(MyPaintTiledSurface *self)
{
operation_queue_free(self->operation_queue);
}
|
oskar_evaluate_cross_power_omp.c | /*
* Copyright (c) 2014-2015, The University of Oxford
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
* 1. Redistributions of source code must retain the above copyright notice,
* this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
* 3. Neither the name of the University of Oxford nor the names of its
* contributors may be used to endorse or promote products derived from this
* software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
* LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*/
#include "correlate/oskar_evaluate_cross_power_omp.h"
#include "correlate/private_correlate_functions_inline.h"
#ifdef __cplusplus
extern "C" {
#endif
/* Single precision. */
void oskar_evaluate_cross_power_omp_f(const int num_sources,
const int num_stations, const float4c* restrict jones,
float4c* restrict out)
{
int i = 0;
float norm;
norm = 2.0f / (num_stations * (num_stations - 1));
#pragma omp parallel for private(i)
for (i = 0; i < num_sources; ++i)
{
int SP, SQ;
float4c val, p, q;
/* Calculate cross-power product at the source. */
OSKAR_CLEAR_COMPLEX_MATRIX(float, val)
for (SP = 0; SP < num_stations; ++SP)
{
/* Load data for first station. */
OSKAR_LOAD_MATRIX(p, jones[SP * num_sources + i]);
/* Cross-correlate. */
for (SQ = SP + 1; SQ < num_stations; ++SQ)
{
/* Load data for second station. */
OSKAR_LOAD_MATRIX(q, jones[SQ * num_sources + i]);
/* Multiply-add: val += p * conj(q). */
OSKAR_MUL_ADD_COMPLEX_CONJUGATE(val.a, p.a, q.a);
OSKAR_MUL_ADD_COMPLEX_CONJUGATE(val.a, p.b, q.b);
OSKAR_MUL_ADD_COMPLEX_CONJUGATE(val.b, p.a, q.c);
OSKAR_MUL_ADD_COMPLEX_CONJUGATE(val.b, p.b, q.d);
OSKAR_MUL_ADD_COMPLEX_CONJUGATE(val.c, p.c, q.a);
OSKAR_MUL_ADD_COMPLEX_CONJUGATE(val.c, p.d, q.b);
OSKAR_MUL_ADD_COMPLEX_CONJUGATE(val.d, p.c, q.c);
OSKAR_MUL_ADD_COMPLEX_CONJUGATE(val.d, p.d, q.d);
}
}
/* Calculate average by dividing by number of baselines. */
val.a.x *= norm;
val.a.y *= norm;
val.b.x *= norm;
val.b.y *= norm;
val.c.x *= norm;
val.c.y *= norm;
val.d.x *= norm;
val.d.y *= norm;
/* Store result. */
out[i] = val;
}
}
void oskar_evaluate_cross_power_scalar_omp_f(
const int num_sources, const int num_stations,
const float2* restrict jones, float2* restrict out)
{
int i = 0;
float norm;
norm = 2.0f / (num_stations * (num_stations - 1));
#pragma omp parallel for private(i)
for (i = 0; i < num_sources; ++i)
{
int SP, SQ;
float2 val1, val2, p, q;
/* Calculate cross-power product at the source. */
val1.x = 0.0f;
val1.y = 0.0f;
for (SP = 0; SP < num_stations; ++SP)
{
/* Load data for first station into shared memory. */
p = jones[SP * num_sources + i];
val2.x = 0.0f;
val2.y = 0.0f;
/* Cross-correlate. */
for (SQ = SP + 1; SQ < num_stations; ++SQ)
{
/* Load data for second station into registers. */
q = jones[SQ * num_sources + i];
/* Multiply-add: val += p * conj(q). */
OSKAR_MUL_ADD_COMPLEX_CONJUGATE(val2, p, q);
}
/* Accumulate partial sum (try to preserve numerical precision). */
val1.x += val2.x;
val1.y += val2.y;
}
/* Calculate average by dividing by number of baselines. */
val1.x *= norm;
val1.y *= norm;
/* Store result. */
out[i] = val1;
}
}
/* Double precision. */
void oskar_evaluate_cross_power_omp_d(const int num_sources,
const int num_stations, const double4c* restrict jones,
double4c* restrict out)
{
int i = 0;
double norm;
norm = 2.0 / (num_stations * (num_stations - 1));
#pragma omp parallel for private(i)
for (i = 0; i < num_sources; ++i)
{
int SP, SQ;
double4c val, p, q;
/* Calculate cross-power product at the source. */
OSKAR_CLEAR_COMPLEX_MATRIX(double, val)
for (SP = 0; SP < num_stations; ++SP)
{
/* Load data for first station. */
OSKAR_LOAD_MATRIX(p, jones[SP * num_sources + i]);
/* Cross-correlate. */
for (SQ = SP + 1; SQ < num_stations; ++SQ)
{
/* Load data for second station. */
OSKAR_LOAD_MATRIX(q, jones[SQ * num_sources + i]);
/* Multiply-add: val += p * conj(q). */
OSKAR_MUL_ADD_COMPLEX_CONJUGATE(val.a, p.a, q.a);
OSKAR_MUL_ADD_COMPLEX_CONJUGATE(val.a, p.b, q.b);
OSKAR_MUL_ADD_COMPLEX_CONJUGATE(val.b, p.a, q.c);
OSKAR_MUL_ADD_COMPLEX_CONJUGATE(val.b, p.b, q.d);
OSKAR_MUL_ADD_COMPLEX_CONJUGATE(val.c, p.c, q.a);
OSKAR_MUL_ADD_COMPLEX_CONJUGATE(val.c, p.d, q.b);
OSKAR_MUL_ADD_COMPLEX_CONJUGATE(val.d, p.c, q.c);
OSKAR_MUL_ADD_COMPLEX_CONJUGATE(val.d, p.d, q.d);
}
}
/* Calculate average by dividing by number of baselines. */
val.a.x *= norm;
val.a.y *= norm;
val.b.x *= norm;
val.b.y *= norm;
val.c.x *= norm;
val.c.y *= norm;
val.d.x *= norm;
val.d.y *= norm;
/* Store result. */
out[i] = val;
}
}
void oskar_evaluate_cross_power_scalar_omp_d(
const int num_sources, const int num_stations,
const double2* restrict jones, double2* restrict out)
{
int i = 0;
double norm;
norm = 2.0 / (num_stations * (num_stations - 1));
#pragma omp parallel for private(i)
for (i = 0; i < num_sources; ++i)
{
int SP, SQ;
double2 val1, val2, p, q;
/* Calculate cross-power product at the source. */
val1.x = 0.0;
val1.y = 0.0;
for (SP = 0; SP < num_stations; ++SP)
{
/* Load data for first station into shared memory. */
p = jones[SP * num_sources + i];
val2.x = 0.0;
val2.y = 0.0;
/* Cross-correlate. */
for (SQ = SP + 1; SQ < num_stations; ++SQ)
{
/* Load data for second station into registers. */
q = jones[SQ * num_sources + i];
/* Multiply-add: val += p * conj(q). */
OSKAR_MUL_ADD_COMPLEX_CONJUGATE(val2, p, q);
}
/* Accumulate partial sum (try to preserve numerical precision). */
val1.x += val2.x;
val1.y += val2.y;
}
/* Calculate average by dividing by number of baselines. */
val1.x *= norm;
val1.y *= norm;
/* Store result. */
out[i] = val1;
}
}
#ifdef __cplusplus
}
#endif
|
primo_numeros_reduction.c | #include <stdio.h>
#include <math.h>
#include <omp.h>
typedef unsigned long long Entero_grande;
//#define N 100000000ULL
#define N 100000000ULL
int primo(Entero_grande n)
{
int p;
Entero_grande i, s;
p = (n % 2 != 0 || n == 2);
if (p) {
s = sqrt(n);
for (i = 3; p && i <= s; i += 2)
if (n % i == 0) p = 0;
}
return p;
}
int main()
{
Entero_grande i, n;
#ifdef _OPENMP
double t1 = omp_get_wtime();
#endif
int numberOfThreads;
#pragma omp parallel
numberOfThreads = omp_get_num_threads();
n = 2; /* Por el 1 y el 2 */
#pragma omp parallel for reduction(+:n) schedule(runtime)
for (i = 3; i <= N; i += 2){
if (primo(i))
{
n++;
}
}
#ifdef _OPENMP
double t2 = omp_get_wtime();
printf("looptime: %f seconds \n", t2-t1);
#endif
printf("Entre el 1 y el %llu hay %llu numeros primos.\n",
N, n);
return 0;
}
|
data.c | #include "data.h"
#include "utils.h"
#include "image.h"
#include "cuda.h"
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
pthread_mutex_t mutex = PTHREAD_MUTEX_INITIALIZER;
list *get_paths(char *filename)
{
char *path;
FILE *file = fopen(filename, "r");
if(!file) file_error(filename);
list *lines = make_list();
while((path=fgetl(file))){
list_insert(lines, path);
}
fclose(file);
return lines;
}
/*
char **get_random_paths_indexes(char **paths, int n, int m, int *indexes)
{
char **random_paths = calloc(n, sizeof(char*));
int i;
pthread_mutex_lock(&mutex);
for(i = 0; i < n; ++i){
int index = rand()%m;
indexes[i] = index;
random_paths[i] = paths[index];
if(i == 0) printf("%s\n", paths[index]);
}
pthread_mutex_unlock(&mutex);
return random_paths;
}
*/
char **get_random_paths(char **paths, int n, int m)
{
char **random_paths = calloc(n, sizeof(char*));
int i;
pthread_mutex_lock(&mutex);
for(i = 0; i < n; ++i){
int index = rand()%m;
random_paths[i] = paths[index];
//if(i == 0) printf("%s\n", paths[index]);
}
pthread_mutex_unlock(&mutex);
return random_paths;
}
char **find_replace_paths(char **paths, int n, char *find, char *replace)
{
char **replace_paths = calloc(n, sizeof(char*));
int i;
for(i = 0; i < n; ++i){
char replaced[4096];
find_replace(paths[i], find, replace, replaced);
replace_paths[i] = copy_string(replaced);
}
return replace_paths;
}
matrix load_image_paths_gray(char **paths, int n, int w, int h)
{
int i;
matrix X;
X.rows = n;
X.vals = calloc(X.rows, sizeof(float*));
X.cols = 0;
for(i = 0; i < n; ++i){
image im = load_image(paths[i], w, h, 3);
image gray = grayscale_image(im);
free_image(im);
im = gray;
X.vals[i] = im.data;
X.cols = im.h*im.w*im.c;
}
return X;
}
matrix load_image_paths(char **paths, int n, int w, int h)
{
int i;
matrix X;
X.rows = n;
X.vals = calloc(X.rows, sizeof(float*));
X.cols = 0;
for(i = 0; i < n; ++i){
image im = load_image_color(paths[i], w, h);
X.vals[i] = im.data;
X.cols = im.h*im.w*im.c;
}
return X;
}
matrix load_image_augment_paths(char **paths, int n, int min, int max, int size, float angle, float aspect, float hue, float saturation, float exposure, int center)
{
int i;
matrix X;
X.rows = n;
X.vals = calloc(X.rows, sizeof(float*));
X.cols = 0;
for(i = 0; i < n; ++i){
image im = load_image_color(paths[i], 0, 0);
image crop;
if(center){
crop = center_crop_image(im, size, size);
} else {
crop = random_augment_image(im, angle, aspect, min, max, size, size);
}
int flip = rand()%2;
if (flip) flip_image(crop);
random_distort_image(crop, hue, saturation, exposure);
/*
show_image(im, "orig");
show_image(crop, "crop");
cvWaitKey(0);
*/
free_image(im);
X.vals[i] = crop.data;
X.cols = crop.h*crop.w*crop.c;
}
return X;
}
box_label *read_boxes(char *filename, int *n)
{
FILE *file = fopen(filename, "r");
if(!file) file_error(filename);
float x, y, h, w;
int id;
int count = 0;
int size = 64;
box_label *boxes = calloc(size, sizeof(box_label));
while(fscanf(file, "%d %f %f %f %f", &id, &x, &y, &w, &h) == 5){
if(count == size) {
size = size * 2;
boxes = realloc(boxes, size*sizeof(box_label));
}
boxes[count].id = id;
boxes[count].x = x;
boxes[count].y = y;
boxes[count].h = h;
boxes[count].w = w;
boxes[count].left = x - w/2;
boxes[count].right = x + w/2;
boxes[count].top = y - h/2;
boxes[count].bottom = y + h/2;
++count;
}
fclose(file);
*n = count;
return boxes;
}
void randomize_boxes(box_label *b, int n)
{
int i;
for(i = 0; i < n; ++i){
box_label swap = b[i];
int index = rand()%n;
b[i] = b[index];
b[index] = swap;
}
}
void correct_boxes(box_label *boxes, int n, float dx, float dy, float sx, float sy, int flip)
{
int i;
for(i = 0; i < n; ++i){
if(boxes[i].x == 0 && boxes[i].y == 0) {
boxes[i].x = 999999;
boxes[i].y = 999999;
boxes[i].w = 999999;
boxes[i].h = 999999;
continue;
}
boxes[i].left = boxes[i].left * sx - dx;
boxes[i].right = boxes[i].right * sx - dx;
boxes[i].top = boxes[i].top * sy - dy;
boxes[i].bottom = boxes[i].bottom* sy - dy;
if(flip){
float swap = boxes[i].left;
boxes[i].left = 1. - boxes[i].right;
boxes[i].right = 1. - swap;
}
boxes[i].left = constrain(0, 1, boxes[i].left);
boxes[i].right = constrain(0, 1, boxes[i].right);
boxes[i].top = constrain(0, 1, boxes[i].top);
boxes[i].bottom = constrain(0, 1, boxes[i].bottom);
boxes[i].x = (boxes[i].left+boxes[i].right)/2;
boxes[i].y = (boxes[i].top+boxes[i].bottom)/2;
boxes[i].w = (boxes[i].right - boxes[i].left);
boxes[i].h = (boxes[i].bottom - boxes[i].top);
boxes[i].w = constrain(0, 1, boxes[i].w);
boxes[i].h = constrain(0, 1, boxes[i].h);
}
}
void fill_truth_swag(char *path, float *truth, int classes, int flip, float dx, float dy, float sx, float sy)
{
char labelpath[4096];
find_replace(path, "images", "labels", labelpath);
find_replace(labelpath, "JPEGImages", "labels", labelpath);
find_replace(labelpath, ".jpg", ".txt", labelpath);
find_replace(labelpath, ".JPG", ".txt", labelpath);
find_replace(labelpath, ".JPEG", ".txt", labelpath);
int count = 0;
box_label *boxes = read_boxes(labelpath, &count);
randomize_boxes(boxes, count);
correct_boxes(boxes, count, dx, dy, sx, sy, flip);
float x,y,w,h;
int id;
int i;
for (i = 0; i < count && i < 90; ++i) {
x = boxes[i].x;
y = boxes[i].y;
w = boxes[i].w;
h = boxes[i].h;
id = boxes[i].id;
if (w < .0 || h < .0) continue;
int index = (4+classes) * i;
truth[index++] = x;
truth[index++] = y;
truth[index++] = w;
truth[index++] = h;
if (id < classes) truth[index+id] = 1;
}
free(boxes);
}
void fill_truth_region(char *path, float *truth, int classes, int num_boxes, int flip, float dx, float dy, float sx, float sy)
{
char labelpath[4096];
find_replace(path, "images", "labels", labelpath);
find_replace(labelpath, "JPEGImages", "labels", labelpath);
find_replace(labelpath, ".jpg", ".txt", labelpath);
find_replace(labelpath, ".png", ".txt", labelpath);
find_replace(labelpath, ".JPG", ".txt", labelpath);
find_replace(labelpath, ".JPEG", ".txt", labelpath);
int count = 0;
box_label *boxes = read_boxes(labelpath, &count);
randomize_boxes(boxes, count);
correct_boxes(boxes, count, dx, dy, sx, sy, flip);
float x,y,w,h;
int id;
int i;
for (i = 0; i < count; ++i) {
x = boxes[i].x;
y = boxes[i].y;
w = boxes[i].w;
h = boxes[i].h;
id = boxes[i].id;
if (w < .005 || h < .005) continue;
int col = (int)(x*num_boxes);
int row = (int)(y*num_boxes);
x = x*num_boxes - col;
y = y*num_boxes - row;
int index = (col+row*num_boxes)*(5+classes);
if (truth[index]) continue;
truth[index++] = 1;
if (id < classes) truth[index+id] = 1;
index += classes;
truth[index++] = x;
truth[index++] = y;
truth[index++] = w;
truth[index++] = h;
}
free(boxes);
}
void load_rle(image im, int *rle, int n)
{
int count = 0;
int curr = 0;
int i,j;
for(i = 0; i < n; ++i){
for(j = 0; j < rle[i]; ++j){
im.data[count++] = curr;
}
curr = 1 - curr;
}
for(; count < im.h*im.w*im.c; ++count){
im.data[count] = curr;
}
}
void or_image(image src, image dest, int c)
{
int i;
for(i = 0; i < src.w*src.h; ++i){
if(src.data[i]) dest.data[dest.w*dest.h*c + i] = 1;
}
}
void exclusive_image(image src)
{
int k, j, i;
int s = src.w*src.h;
for(k = 0; k < src.c-1; ++k){
for(i = 0; i < s; ++i){
if (src.data[k*s + i]){
for(j = k+1; j < src.c; ++j){
src.data[j*s + i] = 0;
}
}
}
}
}
box bound_image(image im)
{
int x,y;
int minx = im.w;
int miny = im.h;
int maxx = 0;
int maxy = 0;
for(y = 0; y < im.h; ++y){
for(x = 0; x < im.w; ++x){
if(im.data[y*im.w + x]){
minx = (x < minx) ? x : minx;
miny = (y < miny) ? y : miny;
maxx = (x > maxx) ? x : maxx;
maxy = (y > maxy) ? y : maxy;
}
}
}
box b = {minx, miny, maxx-minx + 1, maxy-miny + 1};
//printf("%f %f %f %f\n", b.x, b.y, b.w, b.h);
return b;
}
void fill_truth_iseg(char *path, int num_boxes, float *truth, int classes, int w, int h, augment_args aug, int flip, int mw, int mh)
{
char labelpath[4096];
find_replace(path, "images", "mask", labelpath);
find_replace(labelpath, "JPEGImages", "mask", labelpath);
find_replace(labelpath, ".jpg", ".txt", labelpath);
find_replace(labelpath, ".JPG", ".txt", labelpath);
find_replace(labelpath, ".JPEG", ".txt", labelpath);
FILE *file = fopen(labelpath, "r");
if(!file) file_error(labelpath);
char buff[32788];
int id;
int i = 0;
image part = make_image(w, h, 1);
while((fscanf(file, "%d %s", &id, buff) == 2) && i < num_boxes){
int n = 0;
int *rle = read_intlist(buff, &n, 0);
load_rle(part, rle, n);
image sized = rotate_crop_image(part, aug.rad, aug.scale, aug.w, aug.h, aug.dx, aug.dy, aug.aspect);
if(flip) flip_image(sized);
box b = bound_image(sized);
if(b.w > 0){
image crop = crop_image(sized, b.x, b.y, b.w, b.h);
image mask = resize_image(crop, mw, mh);
truth[i*(4 + mw*mh + 1) + 0] = (b.x + b.w/2.)/sized.w;
truth[i*(4 + mw*mh + 1) + 1] = (b.y + b.h/2.)/sized.h;
truth[i*(4 + mw*mh + 1) + 2] = b.w/sized.w;
truth[i*(4 + mw*mh + 1) + 3] = b.h/sized.h;
int j;
for(j = 0; j < mw*mh; ++j){
truth[i*(4 + mw*mh + 1) + 4 + j] = mask.data[j];
}
truth[i*(4 + mw*mh + 1) + 4 + mw*mh] = id;
free_image(crop);
free_image(mask);
++i;
}
free_image(sized);
free(rle);
}
fclose(file);
free_image(part);
}
void fill_truth_detection(char *path, int num_boxes, float *truth, int classes, int flip, float dx, float dy, float sx, float sy)
{
char labelpath[4096];
find_replace(path, "images", "labels", labelpath);
find_replace(labelpath, "JPEGImages", "labels", labelpath);
find_replace(labelpath, "raw", "labels", labelpath);
find_replace(labelpath, ".jpg", ".txt", labelpath);
find_replace(labelpath, ".png", ".txt", labelpath);
find_replace(labelpath, ".JPG", ".txt", labelpath);
find_replace(labelpath, ".JPEG", ".txt", labelpath);
int count = 0;
box_label *boxes = read_boxes(labelpath, &count);
randomize_boxes(boxes, count);
correct_boxes(boxes, count, dx, dy, sx, sy, flip);
if(count > num_boxes) count = num_boxes;
float x,y,w,h;
int id;
int i;
int sub = 0;
for (i = 0; i < count; ++i) {
x = boxes[i].x;
y = boxes[i].y;
w = boxes[i].w;
h = boxes[i].h;
id = boxes[i].id;
if ((w < .001 || h < .001)) {
++sub;
continue;
}
truth[(i-sub)*5+0] = x;
truth[(i-sub)*5+1] = y;
truth[(i-sub)*5+2] = w;
truth[(i-sub)*5+3] = h;
truth[(i-sub)*5+4] = id;
}
free(boxes);
}
#define NUMCHARS 37
void print_letters(float *pred, int n)
{
int i;
for(i = 0; i < n; ++i){
int index = max_index(pred+i*NUMCHARS, NUMCHARS);
printf("%c", int_to_alphanum(index));
}
printf("\n");
}
void fill_truth_captcha(char *path, int n, float *truth)
{
char *begin = strrchr(path, '/');
++begin;
int i;
for(i = 0; i < strlen(begin) && i < n && begin[i] != '.'; ++i){
int index = alphanum_to_int(begin[i]);
if(index > 35) printf("Bad %c\n", begin[i]);
truth[i*NUMCHARS+index] = 1;
}
for(;i < n; ++i){
truth[i*NUMCHARS + NUMCHARS-1] = 1;
}
}
data load_data_captcha(char **paths, int n, int m, int k, int w, int h)
{
if(m) paths = get_random_paths(paths, n, m);
data d = {0};
d.shallow = 0;
d.X = load_image_paths(paths, n, w, h);
d.y = make_matrix(n, k*NUMCHARS);
int i;
for(i = 0; i < n; ++i){
fill_truth_captcha(paths[i], k, d.y.vals[i]);
}
if(m) free(paths);
return d;
}
data load_data_captcha_encode(char **paths, int n, int m, int w, int h)
{
if(m) paths = get_random_paths(paths, n, m);
data d = {0};
d.shallow = 0;
d.X = load_image_paths(paths, n, w, h);
d.X.cols = 17100;
d.y = d.X;
if(m) free(paths);
return d;
}
void fill_truth(char *path, char **labels, int k, float *truth)
{
int i;
memset(truth, 0, k*sizeof(float));
int count = 0;
for(i = 0; i < k; ++i){
if(strstr(path, labels[i])){
truth[i] = 1;
++count;
//printf("%s %s %d\n", path, labels[i], i);
}
}
if(count != 1 && (k != 1 || count != 0)) printf("Too many or too few labels: %d, %s\n", count, path);
}
void fill_hierarchy(float *truth, int k, tree *hierarchy)
{
int j;
for(j = 0; j < k; ++j){
if(truth[j]){
int parent = hierarchy->parent[j];
while(parent >= 0){
truth[parent] = 1;
parent = hierarchy->parent[parent];
}
}
}
int i;
int count = 0;
for(j = 0; j < hierarchy->groups; ++j){
//printf("%d\n", count);
int mask = 1;
for(i = 0; i < hierarchy->group_size[j]; ++i){
if(truth[count + i]){
mask = 0;
break;
}
}
if (mask) {
for(i = 0; i < hierarchy->group_size[j]; ++i){
truth[count + i] = SECRET_NUM;
}
}
count += hierarchy->group_size[j];
}
}
matrix load_regression_labels_paths(char **paths, int n, int k)
{
matrix y = make_matrix(n, k);
int i,j;
for(i = 0; i < n; ++i){
char labelpath[4096];
find_replace(paths[i], "images", "labels", labelpath);
find_replace(labelpath, "JPEGImages", "labels", labelpath);
find_replace(labelpath, ".BMP", ".txt", labelpath);
find_replace(labelpath, ".JPEG", ".txt", labelpath);
find_replace(labelpath, ".JPG", ".txt", labelpath);
find_replace(labelpath, ".JPeG", ".txt", labelpath);
find_replace(labelpath, ".Jpeg", ".txt", labelpath);
find_replace(labelpath, ".PNG", ".txt", labelpath);
find_replace(labelpath, ".TIF", ".txt", labelpath);
find_replace(labelpath, ".bmp", ".txt", labelpath);
find_replace(labelpath, ".jpeg", ".txt", labelpath);
find_replace(labelpath, ".jpg", ".txt", labelpath);
find_replace(labelpath, ".png", ".txt", labelpath);
find_replace(labelpath, ".tif", ".txt", labelpath);
FILE *file = fopen(labelpath, "r");
for(j = 0; j < k; ++j){
fscanf(file, "%f", &(y.vals[i][j]));
}
fclose(file);
}
return y;
}
matrix load_labels_paths(char **paths, int n, char **labels, int k, tree *hierarchy)
{
matrix y = make_matrix(n, k);
int i;
for(i = 0; i < n && labels; ++i){
fill_truth(paths[i], labels, k, y.vals[i]);
if(hierarchy){
fill_hierarchy(y.vals[i], k, hierarchy);
}
}
return y;
}
matrix load_tags_paths(char **paths, int n, int k)
{
matrix y = make_matrix(n, k);
int i;
//int count = 0;
for(i = 0; i < n; ++i){
char label[4096];
find_replace(paths[i], "images", "labels", label);
find_replace(label, ".jpg", ".txt", label);
FILE *file = fopen(label, "r");
if (!file) continue;
//++count;
int tag;
while(fscanf(file, "%d", &tag) == 1){
if(tag < k){
y.vals[i][tag] = 1;
}
}
fclose(file);
}
//printf("%d/%d\n", count, n);
return y;
}
char **get_labels(char *filename)
{
list *plist = get_paths(filename);
char **labels = (char **)list_to_array(plist);
free_list(plist);
return labels;
}
void free_data(data d)
{
if(!d.shallow){
free_matrix(d.X);
free_matrix(d.y);
}else{
free(d.X.vals);
free(d.y.vals);
}
}
image get_segmentation_image(char *path, int w, int h, int classes)
{
char labelpath[4096];
find_replace(path, "images", "mask", labelpath);
find_replace(labelpath, "JPEGImages", "mask", labelpath);
find_replace(labelpath, ".jpg", ".txt", labelpath);
find_replace(labelpath, ".JPG", ".txt", labelpath);
find_replace(labelpath, ".JPEG", ".txt", labelpath);
image mask = make_image(w, h, classes);
FILE *file = fopen(labelpath, "r");
if(!file) file_error(labelpath);
char buff[32788];
int id;
image part = make_image(w, h, 1);
while(fscanf(file, "%d %s", &id, buff) == 2){
int n = 0;
int *rle = read_intlist(buff, &n, 0);
load_rle(part, rle, n);
or_image(part, mask, id);
free(rle);
}
//exclusive_image(mask);
fclose(file);
free_image(part);
return mask;
}
image get_segmentation_image2(char *path, int w, int h, int classes)
{
char labelpath[4096];
find_replace(path, "images", "mask", labelpath);
find_replace(labelpath, "JPEGImages", "mask", labelpath);
find_replace(labelpath, ".jpg", ".txt", labelpath);
find_replace(labelpath, ".JPG", ".txt", labelpath);
find_replace(labelpath, ".JPEG", ".txt", labelpath);
image mask = make_image(w, h, classes+1);
int i;
for(i = 0; i < w*h; ++i){
mask.data[w*h*classes + i] = 1;
}
FILE *file = fopen(labelpath, "r");
if(!file) file_error(labelpath);
char buff[32788];
int id;
image part = make_image(w, h, 1);
while(fscanf(file, "%d %s", &id, buff) == 2){
int n = 0;
int *rle = read_intlist(buff, &n, 0);
load_rle(part, rle, n);
or_image(part, mask, id);
for(i = 0; i < w*h; ++i){
if(part.data[i]) mask.data[w*h*classes + i] = 0;
}
free(rle);
}
//exclusive_image(mask);
fclose(file);
free_image(part);
return mask;
}
data load_data_seg(int n, char **paths, int m, int w, int h, int classes, int min, int max, float angle, float aspect, float hue, float saturation, float exposure, int div)
{
char **random_paths = get_random_paths(paths, n, m);
int i;
data d = {0};
d.shallow = 0;
d.X.rows = n;
d.X.vals = calloc(d.X.rows, sizeof(float*));
d.X.cols = h*w*3;
d.y.rows = n;
d.y.cols = h*w*classes/div/div;
d.y.vals = calloc(d.X.rows, sizeof(float*));
for(i = 0; i < n; ++i){
image orig = load_image_color(random_paths[i], 0, 0);
augment_args a = random_augment_args(orig, angle, aspect, min, max, w, h);
image sized = rotate_crop_image(orig, a.rad, a.scale, a.w, a.h, a.dx, a.dy, a.aspect);
int flip = rand()%2;
if(flip) flip_image(sized);
random_distort_image(sized, hue, saturation, exposure);
d.X.vals[i] = sized.data;
image mask = get_segmentation_image(random_paths[i], orig.w, orig.h, classes);
//image mask = make_image(orig.w, orig.h, classes+1);
image sized_m = rotate_crop_image(mask, a.rad, a.scale/div, a.w/div, a.h/div, a.dx/div, a.dy/div, a.aspect);
if(flip) flip_image(sized_m);
d.y.vals[i] = sized_m.data;
free_image(orig);
free_image(mask);
/*
image rgb = mask_to_rgb(sized_m, classes);
show_image(rgb, "part");
show_image(sized, "orig");
cvWaitKey(0);
free_image(rgb);
*/
}
free(random_paths);
return d;
}
data load_data_iseg(int n, char **paths, int m, int w, int h, int classes, int boxes, int coords, int min, int max, float angle, float aspect, float hue, float saturation, float exposure)
{
char **random_paths = get_random_paths(paths, n, m);
int i;
data d = {0};
d.shallow = 0;
d.X.rows = n;
d.X.vals = calloc(d.X.rows, sizeof(float*));
d.X.cols = h*w*3;
d.y = make_matrix(n, (coords+1)*boxes);
for(i = 0; i < n; ++i){
image orig = load_image_color(random_paths[i], 0, 0);
augment_args a = random_augment_args(orig, angle, aspect, min, max, w, h);
image sized = rotate_crop_image(orig, a.rad, a.scale, a.w, a.h, a.dx, a.dy, a.aspect);
int flip = rand()%2;
if(flip) flip_image(sized);
random_distort_image(sized, hue, saturation, exposure);
d.X.vals[i] = sized.data;
//show_image(sized, "image");
fill_truth_iseg(random_paths[i], boxes, d.y.vals[i], classes, orig.w, orig.h, a, flip, 14, 14);
free_image(orig);
/*
image rgb = mask_to_rgb(sized_m, classes);
show_image(rgb, "part");
show_image(sized, "orig");
cvWaitKey(0);
free_image(rgb);
*/
}
free(random_paths);
return d;
}
data load_data_region(int n, char **paths, int m, int w, int h, int size, int classes, float jitter, float hue, float saturation, float exposure)
{
char **random_paths = get_random_paths(paths, n, m);
int i;
data d = {0};
d.shallow = 0;
d.X.rows = n;
d.X.vals = calloc(d.X.rows, sizeof(float*));
d.X.cols = h*w*3;
int k = size*size*(5+classes);
d.y = make_matrix(n, k);
for(i = 0; i < n; ++i){
image orig = load_image_color(random_paths[i], 0, 0);
int oh = orig.h;
int ow = orig.w;
int dw = (ow*jitter);
int dh = (oh*jitter);
int pleft = rand_uniform(-dw, dw);
int pright = rand_uniform(-dw, dw);
int ptop = rand_uniform(-dh, dh);
int pbot = rand_uniform(-dh, dh);
int swidth = ow - pleft - pright;
int sheight = oh - ptop - pbot;
float sx = (float)swidth / ow;
float sy = (float)sheight / oh;
int flip = rand()%2;
image cropped = crop_image(orig, pleft, ptop, swidth, sheight);
float dx = ((float)pleft/ow)/sx;
float dy = ((float)ptop /oh)/sy;
image sized = resize_image(cropped, w, h);
if(flip) flip_image(sized);
random_distort_image(sized, hue, saturation, exposure);
d.X.vals[i] = sized.data;
fill_truth_region(random_paths[i], d.y.vals[i], classes, size, flip, dx, dy, 1./sx, 1./sy);
free_image(orig);
free_image(cropped);
}
free(random_paths);
return d;
}
data load_data_compare(int n, char **paths, int m, int classes, int w, int h)
{
if(m) paths = get_random_paths(paths, 2*n, m);
int i,j;
data d = {0};
d.shallow = 0;
d.X.rows = n;
d.X.vals = calloc(d.X.rows, sizeof(float*));
d.X.cols = h*w*6;
int k = 2*(classes);
d.y = make_matrix(n, k);
for(i = 0; i < n; ++i){
image im1 = load_image_color(paths[i*2], w, h);
image im2 = load_image_color(paths[i*2+1], w, h);
d.X.vals[i] = calloc(d.X.cols, sizeof(float));
memcpy(d.X.vals[i], im1.data, h*w*3*sizeof(float));
memcpy(d.X.vals[i] + h*w*3, im2.data, h*w*3*sizeof(float));
int id;
float iou;
char imlabel1[4096];
char imlabel2[4096];
find_replace(paths[i*2], "imgs", "labels", imlabel1);
find_replace(imlabel1, "jpg", "txt", imlabel1);
FILE *fp1 = fopen(imlabel1, "r");
while(fscanf(fp1, "%d %f", &id, &iou) == 2){
if (d.y.vals[i][2*id] < iou) d.y.vals[i][2*id] = iou;
}
find_replace(paths[i*2+1], "imgs", "labels", imlabel2);
find_replace(imlabel2, "jpg", "txt", imlabel2);
FILE *fp2 = fopen(imlabel2, "r");
while(fscanf(fp2, "%d %f", &id, &iou) == 2){
if (d.y.vals[i][2*id + 1] < iou) d.y.vals[i][2*id + 1] = iou;
}
for (j = 0; j < classes; ++j){
if (d.y.vals[i][2*j] > .5 && d.y.vals[i][2*j+1] < .5){
d.y.vals[i][2*j] = 1;
d.y.vals[i][2*j+1] = 0;
} else if (d.y.vals[i][2*j] < .5 && d.y.vals[i][2*j+1] > .5){
d.y.vals[i][2*j] = 0;
d.y.vals[i][2*j+1] = 1;
} else {
d.y.vals[i][2*j] = SECRET_NUM;
d.y.vals[i][2*j+1] = SECRET_NUM;
}
}
fclose(fp1);
fclose(fp2);
free_image(im1);
free_image(im2);
}
if(m) free(paths);
return d;
}
data load_data_swag(char **paths, int n, int classes, float jitter)
{
int index = rand()%n;
char *random_path = paths[index];
image orig = load_image_color(random_path, 0, 0);
int h = orig.h;
int w = orig.w;
data d = {0};
d.shallow = 0;
d.w = w;
d.h = h;
d.X.rows = 1;
d.X.vals = calloc(d.X.rows, sizeof(float*));
d.X.cols = h*w*3;
int k = (4+classes)*90;
d.y = make_matrix(1, k);
int dw = w*jitter;
int dh = h*jitter;
int pleft = rand_uniform(-dw, dw);
int pright = rand_uniform(-dw, dw);
int ptop = rand_uniform(-dh, dh);
int pbot = rand_uniform(-dh, dh);
int swidth = w - pleft - pright;
int sheight = h - ptop - pbot;
float sx = (float)swidth / w;
float sy = (float)sheight / h;
int flip = rand()%2;
image cropped = crop_image(orig, pleft, ptop, swidth, sheight);
float dx = ((float)pleft/w)/sx;
float dy = ((float)ptop /h)/sy;
image sized = resize_image(cropped, w, h);
if(flip) flip_image(sized);
d.X.vals[0] = sized.data;
fill_truth_swag(random_path, d.y.vals[0], classes, flip, dx, dy, 1./sx, 1./sy);
free_image(orig);
free_image(cropped);
return d;
}
data load_data_detection(int n, char **paths, int m, int w, int h, int boxes, int classes, float jitter, float hue, float saturation, float exposure)
{
char **random_paths = get_random_paths(paths, n, m);
int i;
data d = {0};
d.shallow = 0;
d.X.rows = n;
d.X.vals = calloc(d.X.rows, sizeof(float*));
d.X.cols = h*w*3;
d.y = make_matrix(n, 5*boxes);
for(i = 0; i < n; ++i){
image orig = load_image_color(random_paths[i], 0, 0);
image sized = make_image(w, h, orig.c);
fill_image(sized, .5);
float dw = jitter * orig.w;
float dh = jitter * orig.h;
float new_ar = (orig.w + rand_uniform(-dw, dw)) / (orig.h + rand_uniform(-dh, dh));
float scale = rand_uniform(.25, 2);
float nw, nh;
if(new_ar < 1){
nh = scale * h;
nw = nh * new_ar;
} else {
nw = scale * w;
nh = nw / new_ar;
}
float dx = rand_uniform(0, w - nw);
float dy = rand_uniform(0, h - nh);
place_image(orig, nw, nh, dx, dy, sized);
random_distort_image(sized, hue, saturation, exposure);
int flip = rand()%2;
if(flip) flip_image(sized);
d.X.vals[i] = sized.data;
fill_truth_detection(random_paths[i], boxes, d.y.vals[i], classes, flip, -dx/w, -dy/h, nw/w, nh/h);
free_image(orig);
}
free(random_paths);
return d;
}
void *load_thread(void *ptr)
{
//printf("Loading data: %d\n", rand());
load_args a = *(struct load_args*)ptr;
if(a.exposure == 0) a.exposure = 1;
if(a.saturation == 0) a.saturation = 1;
if(a.aspect == 0) a.aspect = 1;
if (a.type == OLD_CLASSIFICATION_DATA){
*a.d = load_data_old(a.paths, a.n, a.m, a.labels, a.classes, a.w, a.h);
} else if (a.type == REGRESSION_DATA){
*a.d = load_data_regression(a.paths, a.n, a.m, a.classes, a.min, a.max, a.size, a.angle, a.aspect, a.hue, a.saturation, a.exposure);
} else if (a.type == CLASSIFICATION_DATA){
*a.d = load_data_augment(a.paths, a.n, a.m, a.labels, a.classes, a.hierarchy, a.min, a.max, a.size, a.angle, a.aspect, a.hue, a.saturation, a.exposure, a.center);
} else if (a.type == SUPER_DATA){
*a.d = load_data_super(a.paths, a.n, a.m, a.w, a.h, a.scale);
} else if (a.type == WRITING_DATA){
*a.d = load_data_writing(a.paths, a.n, a.m, a.w, a.h, a.out_w, a.out_h);
} else if (a.type == INSTANCE_DATA){
*a.d = load_data_iseg(a.n, a.paths, a.m, a.w, a.h, a.classes, a.num_boxes, a.coords, a.min, a.max, a.angle, a.aspect, a.hue, a.saturation, a.exposure);
} else if (a.type == SEGMENTATION_DATA){
*a.d = load_data_seg(a.n, a.paths, a.m, a.w, a.h, a.classes, a.min, a.max, a.angle, a.aspect, a.hue, a.saturation, a.exposure, a.scale);
} else if (a.type == REGION_DATA){
*a.d = load_data_region(a.n, a.paths, a.m, a.w, a.h, a.num_boxes, a.classes, a.jitter, a.hue, a.saturation, a.exposure);
} else if (a.type == DETECTION_DATA){
*a.d = load_data_detection(a.n, a.paths, a.m, a.w, a.h, a.num_boxes, a.classes, a.jitter, a.hue, a.saturation, a.exposure);
} else if (a.type == SWAG_DATA){
*a.d = load_data_swag(a.paths, a.n, a.classes, a.jitter);
} else if (a.type == COMPARE_DATA){
*a.d = load_data_compare(a.n, a.paths, a.m, a.classes, a.w, a.h);
} else if (a.type == IMAGE_DATA){
*(a.im) = load_image_color(a.path, 0, 0);
*(a.resized) = resize_image(*(a.im), a.w, a.h);
} else if (a.type == LETTERBOX_DATA){
*(a.im) = load_image_color(a.path, 0, 0);
*(a.resized) = letterbox_image(*(a.im), a.w, a.h);
} else if (a.type == TAG_DATA){
*a.d = load_data_tag(a.paths, a.n, a.m, a.classes, a.min, a.max, a.size, a.angle, a.aspect, a.hue, a.saturation, a.exposure);
}
free(ptr);
return 0;
}
pthread_t load_data_in_thread(load_args args)
{
pthread_t thread;
struct load_args *ptr = calloc(1, sizeof(struct load_args));
*ptr = args;
if(pthread_create(&thread, 0, load_thread, ptr)) error("Thread creation failed");
return thread;
}
void *load_threads(void *ptr)
{
int i;
load_args args = *(load_args *)ptr;
if (args.threads == 0) args.threads = 1;
data *out = args.d;
int total = args.n;
free(ptr);
data *buffers = calloc(args.threads, sizeof(data));
pthread_t *threads = calloc(args.threads, sizeof(pthread_t));
for(i = 0; i < args.threads; ++i){
args.d = buffers + i;
args.n = (i+1) * total/args.threads - i * total/args.threads;
threads[i] = load_data_in_thread(args);
}
for(i = 0; i < args.threads; ++i){
pthread_join(threads[i], 0);
}
*out = concat_datas(buffers, args.threads);
out->shallow = 0;
for(i = 0; i < args.threads; ++i){
buffers[i].shallow = 1;
free_data(buffers[i]);
}
free(buffers);
free(threads);
return 0;
}
void load_data_blocking(load_args args)
{
struct load_args *ptr = calloc(1, sizeof(struct load_args));
*ptr = args;
load_thread(ptr);
}
pthread_t load_data(load_args args)
{
pthread_t thread;
struct load_args *ptr = calloc(1, sizeof(struct load_args));
*ptr = args;
if(pthread_create(&thread, 0, load_threads, ptr)) error("Thread creation failed");
return thread;
}
data load_data_writing(char **paths, int n, int m, int w, int h, int out_w, int out_h)
{
if(m) paths = get_random_paths(paths, n, m);
char **replace_paths = find_replace_paths(paths, n, ".png", "-label.png");
data d = {0};
d.shallow = 0;
d.X = load_image_paths(paths, n, w, h);
d.y = load_image_paths_gray(replace_paths, n, out_w, out_h);
if(m) free(paths);
int i;
for(i = 0; i < n; ++i) free(replace_paths[i]);
free(replace_paths);
return d;
}
data load_data_old(char **paths, int n, int m, char **labels, int k, int w, int h)
{
if(m) paths = get_random_paths(paths, n, m);
data d = {0};
d.shallow = 0;
d.X = load_image_paths(paths, n, w, h);
d.y = load_labels_paths(paths, n, labels, k, 0);
if(m) free(paths);
return d;
}
/*
data load_data_study(char **paths, int n, int m, char **labels, int k, int min, int max, int size, float angle, float aspect, float hue, float saturation, float exposure)
{
data d = {0};
d.indexes = calloc(n, sizeof(int));
if(m) paths = get_random_paths_indexes(paths, n, m, d.indexes);
d.shallow = 0;
d.X = load_image_augment_paths(paths, n, min, max, size, angle, aspect, hue, saturation, exposure);
d.y = load_labels_paths(paths, n, labels, k);
if(m) free(paths);
return d;
}
*/
data load_data_super(char **paths, int n, int m, int w, int h, int scale)
{
if(m) paths = get_random_paths(paths, n, m);
data d = {0};
d.shallow = 0;
int i;
d.X.rows = n;
d.X.vals = calloc(n, sizeof(float*));
d.X.cols = w*h*3;
d.y.rows = n;
d.y.vals = calloc(n, sizeof(float*));
d.y.cols = w*scale * h*scale * 3;
for(i = 0; i < n; ++i){
image im = load_image_color(paths[i], 0, 0);
image crop = random_crop_image(im, w*scale, h*scale);
int flip = rand()%2;
if (flip) flip_image(crop);
image resize = resize_image(crop, w, h);
d.X.vals[i] = resize.data;
d.y.vals[i] = crop.data;
free_image(im);
}
if(m) free(paths);
return d;
}
data load_data_regression(char **paths, int n, int m, int k, int min, int max, int size, float angle, float aspect, float hue, float saturation, float exposure)
{
if(m) paths = get_random_paths(paths, n, m);
data d = {0};
d.shallow = 0;
d.X = load_image_augment_paths(paths, n, min, max, size, angle, aspect, hue, saturation, exposure, 0);
d.y = load_regression_labels_paths(paths, n, k);
if(m) free(paths);
return d;
}
data select_data(data *orig, int *inds)
{
data d = {0};
d.shallow = 1;
d.w = orig[0].w;
d.h = orig[0].h;
d.X.rows = orig[0].X.rows;
d.y.rows = orig[0].X.rows;
d.X.cols = orig[0].X.cols;
d.y.cols = orig[0].y.cols;
d.X.vals = calloc(orig[0].X.rows, sizeof(float *));
d.y.vals = calloc(orig[0].y.rows, sizeof(float *));
int i;
for(i = 0; i < d.X.rows; ++i){
d.X.vals[i] = orig[inds[i]].X.vals[i];
d.y.vals[i] = orig[inds[i]].y.vals[i];
}
return d;
}
data *tile_data(data orig, int divs, int size)
{
data *ds = calloc(divs*divs, sizeof(data));
int i, j;
#pragma omp parallel for
for(i = 0; i < divs*divs; ++i){
data d;
d.shallow = 0;
d.w = orig.w/divs * size;
d.h = orig.h/divs * size;
d.X.rows = orig.X.rows;
d.X.cols = d.w*d.h*3;
d.X.vals = calloc(d.X.rows, sizeof(float*));
d.y = copy_matrix(orig.y);
#pragma omp parallel for
for(j = 0; j < orig.X.rows; ++j){
int x = (i%divs) * orig.w / divs - (d.w - orig.w/divs)/2;
int y = (i/divs) * orig.h / divs - (d.h - orig.h/divs)/2;
image im = float_to_image(orig.w, orig.h, 3, orig.X.vals[j]);
d.X.vals[j] = crop_image(im, x, y, d.w, d.h).data;
}
ds[i] = d;
}
return ds;
}
data resize_data(data orig, int w, int h)
{
data d = {0};
d.shallow = 0;
d.w = w;
d.h = h;
int i;
d.X.rows = orig.X.rows;
d.X.cols = w*h*3;
d.X.vals = calloc(d.X.rows, sizeof(float*));
d.y = copy_matrix(orig.y);
#pragma omp parallel for
for(i = 0; i < orig.X.rows; ++i){
image im = float_to_image(orig.w, orig.h, 3, orig.X.vals[i]);
d.X.vals[i] = resize_image(im, w, h).data;
}
return d;
}
data load_data_augment(char **paths, int n, int m, char **labels, int k, tree *hierarchy, int min, int max, int size, float angle, float aspect, float hue, float saturation, float exposure, int center)
{
if(m) paths = get_random_paths(paths, n, m);
data d = {0};
d.shallow = 0;
d.w=size;
d.h=size;
d.X = load_image_augment_paths(paths, n, min, max, size, angle, aspect, hue, saturation, exposure, center);
d.y = load_labels_paths(paths, n, labels, k, hierarchy);
if(m) free(paths);
return d;
}
data load_data_tag(char **paths, int n, int m, int k, int min, int max, int size, float angle, float aspect, float hue, float saturation, float exposure)
{
if(m) paths = get_random_paths(paths, n, m);
data d = {0};
d.w = size;
d.h = size;
d.shallow = 0;
d.X = load_image_augment_paths(paths, n, min, max, size, angle, aspect, hue, saturation, exposure, 0);
d.y = load_tags_paths(paths, n, k);
if(m) free(paths);
return d;
}
matrix concat_matrix(matrix m1, matrix m2)
{
int i, count = 0;
matrix m;
m.cols = m1.cols;
m.rows = m1.rows+m2.rows;
m.vals = calloc(m1.rows + m2.rows, sizeof(float*));
for(i = 0; i < m1.rows; ++i){
m.vals[count++] = m1.vals[i];
}
for(i = 0; i < m2.rows; ++i){
m.vals[count++] = m2.vals[i];
}
return m;
}
data concat_data(data d1, data d2)
{
data d = {0};
d.shallow = 1;
d.X = concat_matrix(d1.X, d2.X);
d.y = concat_matrix(d1.y, d2.y);
d.w = d1.w;
d.h = d1.h;
return d;
}
data concat_datas(data *d, int n)
{
int i;
data out = {0};
for(i = 0; i < n; ++i){
data new = concat_data(d[i], out);
free_data(out);
out = new;
}
return out;
}
data load_categorical_data_csv(char *filename, int target, int k)
{
data d = {0};
d.shallow = 0;
matrix X = csv_to_matrix(filename);
float *truth_1d = pop_column(&X, target);
float **truth = one_hot_encode(truth_1d, X.rows, k);
matrix y;
y.rows = X.rows;
y.cols = k;
y.vals = truth;
d.X = X;
d.y = y;
free(truth_1d);
return d;
}
data load_cifar10_data(char *filename)
{
data d = {0};
d.shallow = 0;
long i,j;
matrix X = make_matrix(10000, 3072);
matrix y = make_matrix(10000, 10);
d.X = X;
d.y = y;
FILE *fp = fopen(filename, "rb");
if(!fp) file_error(filename);
for(i = 0; i < 10000; ++i){
unsigned char bytes[3073];
fread(bytes, 1, 3073, fp);
int class = bytes[0];
y.vals[i][class] = 1;
for(j = 0; j < X.cols; ++j){
X.vals[i][j] = (double)bytes[j+1];
}
}
scale_data_rows(d, 1./255);
//normalize_data_rows(d);
fclose(fp);
return d;
}
void get_random_batch(data d, int n, float *X, float *y)
{
int j;
for(j = 0; j < n; ++j){
int index = rand()%d.X.rows;
memcpy(X+j*d.X.cols, d.X.vals[index], d.X.cols*sizeof(float));
memcpy(y+j*d.y.cols, d.y.vals[index], d.y.cols*sizeof(float));
}
}
void get_next_batch(data d, int n, int offset, float *X, float *y)
{
int j;
for(j = 0; j < n; ++j){
int index = offset + j;
memcpy(X+j*d.X.cols, d.X.vals[index], d.X.cols*sizeof(float));
if(y) memcpy(y+j*d.y.cols, d.y.vals[index], d.y.cols*sizeof(float));
}
}
void smooth_data(data d)
{
int i, j;
float scale = 1. / d.y.cols;
float eps = .1;
for(i = 0; i < d.y.rows; ++i){
for(j = 0; j < d.y.cols; ++j){
d.y.vals[i][j] = eps * scale + (1-eps) * d.y.vals[i][j];
}
}
}
data load_all_cifar10()
{
data d = {0};
d.shallow = 0;
int i,j,b;
matrix X = make_matrix(50000, 3072);
matrix y = make_matrix(50000, 10);
d.X = X;
d.y = y;
for(b = 0; b < 5; ++b){
char buff[256];
sprintf(buff, "data/cifar/cifar-10-batches-bin/data_batch_%d.bin", b+1);
FILE *fp = fopen(buff, "rb");
if(!fp) file_error(buff);
for(i = 0; i < 10000; ++i){
unsigned char bytes[3073];
fread(bytes, 1, 3073, fp);
int class = bytes[0];
y.vals[i+b*10000][class] = 1;
for(j = 0; j < X.cols; ++j){
X.vals[i+b*10000][j] = (double)bytes[j+1];
}
}
fclose(fp);
}
//normalize_data_rows(d);
scale_data_rows(d, 1./255);
smooth_data(d);
return d;
}
data load_go(char *filename)
{
FILE *fp = fopen(filename, "rb");
matrix X = make_matrix(3363059, 361);
matrix y = make_matrix(3363059, 361);
int row, col;
if(!fp) file_error(filename);
char *label;
int count = 0;
while((label = fgetl(fp))){
int i;
if(count == X.rows){
X = resize_matrix(X, count*2);
y = resize_matrix(y, count*2);
}
sscanf(label, "%d %d", &row, &col);
char *board = fgetl(fp);
int index = row*19 + col;
y.vals[count][index] = 1;
for(i = 0; i < 19*19; ++i){
float val = 0;
if(board[i] == '1') val = 1;
else if(board[i] == '2') val = -1;
X.vals[count][i] = val;
}
++count;
free(label);
free(board);
}
X = resize_matrix(X, count);
y = resize_matrix(y, count);
data d = {0};
d.shallow = 0;
d.X = X;
d.y = y;
fclose(fp);
return d;
}
void randomize_data(data d)
{
int i;
for(i = d.X.rows-1; i > 0; --i){
int index = rand()%i;
float *swap = d.X.vals[index];
d.X.vals[index] = d.X.vals[i];
d.X.vals[i] = swap;
swap = d.y.vals[index];
d.y.vals[index] = d.y.vals[i];
d.y.vals[i] = swap;
}
}
void scale_data_rows(data d, float s)
{
int i;
for(i = 0; i < d.X.rows; ++i){
scale_array(d.X.vals[i], d.X.cols, s);
}
}
void translate_data_rows(data d, float s)
{
int i;
for(i = 0; i < d.X.rows; ++i){
translate_array(d.X.vals[i], d.X.cols, s);
}
}
data copy_data(data d)
{
data c = {0};
c.w = d.w;
c.h = d.h;
c.shallow = 0;
c.num_boxes = d.num_boxes;
c.boxes = d.boxes;
c.X = copy_matrix(d.X);
c.y = copy_matrix(d.y);
return c;
}
void normalize_data_rows(data d)
{
int i;
for(i = 0; i < d.X.rows; ++i){
normalize_array(d.X.vals[i], d.X.cols);
}
}
data get_data_part(data d, int part, int total)
{
data p = {0};
p.shallow = 1;
p.X.rows = d.X.rows * (part + 1) / total - d.X.rows * part / total;
p.y.rows = d.y.rows * (part + 1) / total - d.y.rows * part / total;
p.X.cols = d.X.cols;
p.y.cols = d.y.cols;
p.X.vals = d.X.vals + d.X.rows * part / total;
p.y.vals = d.y.vals + d.y.rows * part / total;
return p;
}
data get_random_data(data d, int num)
{
data r = {0};
r.shallow = 1;
r.X.rows = num;
r.y.rows = num;
r.X.cols = d.X.cols;
r.y.cols = d.y.cols;
r.X.vals = calloc(num, sizeof(float *));
r.y.vals = calloc(num, sizeof(float *));
int i;
for(i = 0; i < num; ++i){
int index = rand()%d.X.rows;
r.X.vals[i] = d.X.vals[index];
r.y.vals[i] = d.y.vals[index];
}
return r;
}
data *split_data(data d, int part, int total)
{
data *split = calloc(2, sizeof(data));
int i;
int start = part*d.X.rows/total;
int end = (part+1)*d.X.rows/total;
data train;
data test;
train.shallow = test.shallow = 1;
test.X.rows = test.y.rows = end-start;
train.X.rows = train.y.rows = d.X.rows - (end-start);
train.X.cols = test.X.cols = d.X.cols;
train.y.cols = test.y.cols = d.y.cols;
train.X.vals = calloc(train.X.rows, sizeof(float*));
test.X.vals = calloc(test.X.rows, sizeof(float*));
train.y.vals = calloc(train.y.rows, sizeof(float*));
test.y.vals = calloc(test.y.rows, sizeof(float*));
for(i = 0; i < start; ++i){
train.X.vals[i] = d.X.vals[i];
train.y.vals[i] = d.y.vals[i];
}
for(i = start; i < end; ++i){
test.X.vals[i-start] = d.X.vals[i];
test.y.vals[i-start] = d.y.vals[i];
}
for(i = end; i < d.X.rows; ++i){
train.X.vals[i-(end-start)] = d.X.vals[i];
train.y.vals[i-(end-start)] = d.y.vals[i];
}
split[0] = train;
split[1] = test;
return split;
}
|
gemm.c | #include "gemm.h"
#include "utils.h"
#include "im2col.h"
#include "dark_cuda.h"
#include <stdlib.h>
#include <stdio.h>
#include <math.h>
#include <float.h>
#include <string.h>
#include <stdint.h>
#ifdef _WIN32
#include <intrin.h>
#endif
#if defined(_OPENMP)
#include <omp.h>
#endif
#define TILE_M 4 // 4 ops
#define TILE_N 16 // AVX2 = 2 ops * 8 floats
#define TILE_K 16 // loop
#ifdef __cplusplus
#define PUT_IN_REGISTER
#else
#define PUT_IN_REGISTER register
#endif
void gemm_bin(int M, int N, int K, float ALPHA,
char *A, int lda,
float *B, int ldb,
float *C, int ldc)
{
int i,j,k;
for(i = 0; i < M; ++i){
for(k = 0; k < K; ++k){
char A_PART = A[i*lda+k];
if(A_PART){
for(j = 0; j < N; ++j){
C[i*ldc+j] += B[k*ldb+j];
}
} else {
for(j = 0; j < N; ++j){
C[i*ldc+j] -= B[k*ldb+j];
}
}
}
}
}
float *random_matrix(int rows, int cols)
{
int i;
float* m = (float*)calloc(rows * cols, sizeof(float));
for(i = 0; i < rows*cols; ++i){
m[i] = (float)rand()/RAND_MAX;
}
return m;
}
void time_random_matrix(int TA, int TB, int m, int k, int n)
{
float *a;
if(!TA) a = random_matrix(m,k);
else a = random_matrix(k,m);
int lda = (!TA)?k:m;
float *b;
if(!TB) b = random_matrix(k,n);
else b = random_matrix(n,k);
int ldb = (!TB)?n:k;
float *c = random_matrix(m,n);
int i;
clock_t start = clock(), end;
for(i = 0; i<10; ++i){
gemm_cpu(TA,TB,m,n,k,1,a,lda,b,ldb,1,c,n);
}
end = clock();
printf("Matrix Multiplication %dx%d * %dx%d, TA=%d, TB=%d: %lf ms\n",m,k,k,n, TA, TB, (float)(end-start)/CLOCKS_PER_SEC);
free(a);
free(b);
free(c);
}
void gemm(int TA, int TB, int M, int N, int K, float ALPHA,
float *A, int lda,
float *B, int ldb,
float BETA,
float *C, int ldc)
{
gemm_cpu( TA, TB, M, N, K, ALPHA,A,lda, B, ldb,BETA,C,ldc);
}
//--------------------------------------------
// XNOR bitwise GEMM for binary neural network
//--------------------------------------------
static inline unsigned char xnor(unsigned char a, unsigned char b) {
//return a == b;
return !(a^b);
}
// INT-32
static inline uint32_t get_bit_int32(uint32_t const*const src, size_t index) {
size_t src_i = index / 32;
int src_shift = index % 32;
unsigned char val = (src[src_i] & (1 << src_shift)) > 0;
return val;
}
static inline uint32_t xnor_int32(uint32_t a, uint32_t b) {
return ~(a^b);
}
static inline uint64_t xnor_int64(uint64_t a, uint64_t b) {
return ~(a^b);
}
static inline uint32_t fill_bit_int32(char src) {
if (src == 0) return 0x00000000;
else return 0xFFFFFFFF;
}
static inline uint64_t fill_bit_int64(char src) {
if (src == 0) return 0x0000000000000000;
else return 0xFFFFFFFFFFFFFFFF;
}
void binary_int32_printf(uint32_t src) {
int i;
for (i = 0; i < 32; ++i) {
if (src & 1) printf("1");
else printf("0");
src = src >> 1;
}
printf("\n");
}
void binary_int64_printf(uint64_t src) {
int i;
for (i = 0; i < 64; ++i) {
if (src & 1) printf("1");
else printf("0");
src = src >> 1;
}
printf("\n");
}
/*
void gemm_nn_custom_bin_mean(int M, int N, int K, float ALPHA_UNUSED,
unsigned char *A, int lda,
unsigned char *B, int ldb,
float *C, int ldc, float *mean_arr)
{
int *count_arr = calloc(M*N, sizeof(int));
int i, j, k;
for (i = 0; i < M; ++i) { // l.n - filters [16 - 55 - 1024]
for (k = 0; k < K; ++k) { // l.size*l.size*l.c - one filter size [27 - 9216]
char a_bit = get_bit(A, i*lda + k);
for (j = 0; j < N; ++j) { // out_h*out_w - one channel output size [169 - 173056]
char b_bit = get_bit(B, k*ldb + j);
count_arr[i*ldc + j] += xnor(a_bit, b_bit);
}
}
}
for (i = 0; i < M; ++i) {
float mean_val = mean_arr[i];
for (j = 0; j < N; ++j) {
C[i*ldc + j] = (2 * count_arr[i*ldc + j] - K) * mean_val;
}
}
free(count_arr);
}
*/
/*
void gemm_nn_custom_bin_mean_transposed(int M, int N, int K, float ALPHA_UNUSED,
unsigned char *A, int lda,
unsigned char *B, int ldb,
float *C, int ldc, float *mean_arr)
{
int *count_arr = calloc(M*N, sizeof(int));
int i, j, k;
for (i = 0; i < M; ++i) { // l.n - filters [16 - 55 - 1024]
for (j = 0; j < N; ++j) { // out_h*out_w - one channel output size [169 - 173056]
for (k = 0; k < K; ++k) { // l.size*l.size*l.c - one filter size [27 - 9216]
char a_bit = get_bit(A, i*lda + k);
char b_bit = get_bit(B, j*ldb + k);
count_arr[i*ldc + j] += xnor(a_bit, b_bit);
}
}
}
for (i = 0; i < M; ++i) {
float mean_val = mean_arr[i];
for (j = 0; j < N; ++j) {
C[i*ldc + j] = (2 * count_arr[i*ldc + j] - K) * mean_val;
}
}
free(count_arr);
}
*/
/*
void gemm_nn_custom_bin_mean(int M, int N, int K, float ALPHA_UNUSED,
unsigned char *A, int lda,
unsigned char *B, int ldb,
float *C, int ldc, float *mean_arr)
{
int *count_arr = calloc(M*N, sizeof(int));
int i;
#pragma omp parallel for
for (i = 0; i < M; ++i) { // l.n - filters [16 - 55 - 1024]
int j, k, h;
for (k = 0; k < K; ++k) { // l.size*l.size*l.c - one filter size [27 - 9216]
const char a_bit = get_bit(A, i*lda + k);
uint64_t a_bit64 = fill_bit_int64(a_bit);
int k_ldb = k*ldb;
for (j = 0; j < N; j += 64) { // out_h*out_w - one channel output size [169 - 173056]
if ((N - j > 64) && (k_ldb % 8 == 0)) {
uint64_t b_bit64 = *((uint64_t *)(B + (k_ldb + j) / 8));
uint64_t c_bit64 = xnor_int64(a_bit64, b_bit64);
//printf("\n %d \n",__builtin_popcountll(c_bit64)); // gcc
printf("\n %d \n", __popcnt64(c_bit64)); // msvs
int h;
for (h = 0; h < 64; ++h)
if ((c_bit64 >> h) & 1) count_arr[i*ldc + j + h] += 1;
//binary_int64_printf(a_bit64);
//binary_int64_printf(b_bit64);
//binary_int64_printf(c_bit64);
}
else {
for (; j < N; ++j) { // out_h*out_w - one channel output size [169 - 173056]
char b_bit = get_bit(B, k_ldb + j);
if (xnor(a_bit, b_bit)) count_arr[i*ldc + j] += 1;
}
}
}
}
}
if (mean_arr) {
//int K_2 = K / 2;
for (i = 0; i < M; ++i) {
float mean_val = mean_arr[i];
//float mean_val2 = 2 * mean_val;
for (j = 0; j < N; ++j) {
C[i*ldc + j] = (2 * count_arr[i*ldc + j] - K) * mean_val;
//C[i*ldc + j] = (count_arr[i*ldc + j] - K_2) *mean_val2;
}
}
}
else {
for (i = 0; i < M; ++i) {
for (j = 0; j < N; ++j) {
C[i*ldc + j] = count_arr[i*ldc + j] - K / 2;
}
}
}
free(count_arr);
//getchar();
}
*/
/*
void gemm_nn_custom_bin_mean_transposed(int M, int N, int K, float ALPHA_UNUSED,
unsigned char *A, int lda,
unsigned char *B, int ldb,
float *C, int ldc, float *mean_arr)
{
int i;
#pragma omp parallel for
for (i = 0; i < M; ++i) { // l.n - filters [16 - 55 - 1024]
int j, k, h;
float mean_val = mean_arr[i];
for (j = 0; j < N; ++j) { // out_h*out_w - one channel output size [169 - 173056]
int count = 0;
for (k = 0; k < K; k += 64) { // l.size*l.size*l.c - one filter size [27 - 9216]
uint64_t a_bit64 = *((uint64_t *)(A + (i*lda + k) / 8));
uint64_t b_bit64 = *((uint64_t *)(B + (j*ldb + k) / 8));
uint64_t c_bit64 = xnor_int64(a_bit64, b_bit64);
#ifdef WIN32
int tmp_count = __popcnt64(c_bit64);
#else
int tmp_count = __builtin_popcountll(c_bit64);
#endif
if (K - k < 64) tmp_count = tmp_count - (64 - (K - k)); // remove extra bits
count += tmp_count;
//binary_int64_printf(c_bit64);
//printf(", count = %d \n\n", tmp_count);
}
C[i*ldc + j] = (2 * count - K) * mean_val;
}
}
}
*/
//----------------------------
// is not used
void transpose_32x32_bits_my(uint32_t *A, uint32_t *B, int lda, int ldb)
{
unsigned int x, y;
for (y = 0; y < 32; ++y) {
for (x = 0; x < 32; ++x) {
if (A[y * lda] & (1 << x)) B[x * ldb] |= (uint32_t)1 << y;
}
}
}
#ifndef GPU
uint8_t reverse_8_bit(uint8_t a) {
return ((a * 0x0802LU & 0x22110LU) | (a * 0x8020LU & 0x88440LU)) * 0x10101LU >> 16;
}
uint32_t reverse_32_bit(uint32_t a)
{
// unsigned int __rbit(unsigned int val) // for ARM //__asm__("rbit %0, %1\n" : "=r"(output) : "r"(input));
return (reverse_8_bit(a >> 24) << 0) |
(reverse_8_bit(a >> 16) << 8) |
(reverse_8_bit(a >> 8) << 16) |
(reverse_8_bit(a >> 0) << 24);
}
#define swap(a0, a1, j, m) t = (a0 ^ (a1 >>j)) & m; a0 = a0 ^ t; a1 = a1 ^ (t << j);
void transpose32_optimized(uint32_t A[32]) {
int j, k;
unsigned m, t;
//m = 0x0000FFFF;
//for (j = 16; j != 0; j = j >> 1, m = m ^ (m << j)) {
// for (k = 0; k < 32; k = (k + j + 1) & ~j) {
// t = (A[k] ^ (A[k + j] >> j)) & m;
// A[k] = A[k] ^ t;
// A[k + j] = A[k + j] ^ (t << j);
// }
//}
j = 16;
m = 0x0000FFFF;
for (k = 0; k < 32; k = (k + j + 1) & ~j) { swap(A[k], A[k + j], j, m); }
j = 8;
m = 0x00ff00ff;
for (k = 0; k < 32; k = (k + j + 1) & ~j) { swap(A[k], A[k + j], j, m); }
j = 4;
m = 0x0f0f0f0f;
for (k = 0; k < 32; k = (k + j + 1) & ~j) { swap(A[k], A[k + j], j, m); }
j = 2;
m = 0x33333333;
for (k = 0; k < 32; k = (k + j + 1) & ~j) { swap(A[k], A[k + j], j, m); }
j = 1;
m = 0x55555555;
for (k = 0; k < 32; k = (k + j + 1) & ~j) { swap(A[k], A[k + j], j, m); }
// reverse Y
for (j = 0; j < 16; ++j) {
uint32_t tmp = A[j];
A[j] = reverse_32_bit(A[31 - j]);
A[31 - j] = reverse_32_bit(tmp);
}
}
void transpose_32x32_bits_reversed_diagonale(uint32_t *A, uint32_t *B, int m, int n)
{
unsigned A_tmp[32];
int i;
#pragma unroll
for (i = 0; i < 32; ++i) A_tmp[i] = A[i * m];
transpose32_optimized(A_tmp);
#pragma unroll
for (i = 0; i < 32; ++i) B[i*n] = A_tmp[i];
}
void transpose_8x8_bits_my(unsigned char *A, unsigned char *B, int lda, int ldb)
{
unsigned x, y;
for (y = 0; y < 8; ++y) {
for (x = 0; x < 8; ++x) {
if (A[y * lda] & (1 << x)) B[x * ldb] |= 1 << y;
}
}
}
unsigned char reverse_byte_1(char a)
{
return ((a & 0x1) << 7) | ((a & 0x2) << 5) |
((a & 0x4) << 3) | ((a & 0x8) << 1) |
((a & 0x10) >> 1) | ((a & 0x20) >> 3) |
((a & 0x40) >> 5) | ((a & 0x80) >> 7);
}
unsigned char reverse_byte(unsigned char a)
{
return ((a * 0x0802LU & 0x22110LU) | (a * 0x8020LU & 0x88440LU)) * 0x10101LU >> 16;
}
static unsigned char lookup[16] = {
0x0, 0x8, 0x4, 0xc, 0x2, 0xa, 0x6, 0xe,
0x1, 0x9, 0x5, 0xd, 0x3, 0xb, 0x7, 0xf, };
unsigned char reverse_byte_3(unsigned char n) {
// Reverse the top and bottom nibble then swap them.
return (lookup[n & 0b1111] << 4) | lookup[n >> 4];
}
void transpose8rS32_reversed_diagonale(unsigned char* A, unsigned char* B, int m, int n)
{
unsigned x, y, t;
x = y = 0;
// Load the array and pack it into x and y.
//x = (A[0] << 24) | (A[m] << 16) | (A[2 * m] << 8) | A[3 * m];
//y = (A[4 * m] << 24) | (A[5 * m] << 16) | (A[6 * m] << 8) | A[7 * m];
t = (x ^ (x >> 7)) & 0x00AA00AA; x = x ^ t ^ (t << 7);
t = (y ^ (y >> 7)) & 0x00AA00AA; y = y ^ t ^ (t << 7);
t = (x ^ (x >> 14)) & 0x0000CCCC; x = x ^ t ^ (t << 14);
t = (y ^ (y >> 14)) & 0x0000CCCC; y = y ^ t ^ (t << 14);
t = (x & 0xF0F0F0F0) | ((y >> 4) & 0x0F0F0F0F);
y = ((x << 4) & 0xF0F0F0F0) | (y & 0x0F0F0F0F);
x = t;
B[7 * n] = reverse_byte(x >> 24); B[6 * n] = reverse_byte(x >> 16); B[5 * n] = reverse_byte(x >> 8); B[4 * n] = reverse_byte(x);
B[3 * n] = reverse_byte(y >> 24); B[2 * n] = reverse_byte(y >> 16); B[1 * n] = reverse_byte(y >> 8); B[0 * n] = reverse_byte(y);
}
/*
// transpose by 8-bit
void transpose_bin(char *A, char *B, const int n, const int m,
const int lda, const int ldb, const int block_size)
{
//printf("\n n = %d, ldb = %d \t\t m = %d, lda = %d \n", n, ldb, m, lda);
int i;
#pragma omp parallel for
for (i = 0; i < n; i += 8) {
int j;
for (j = 0; j < m; j += 8) {
int a_index = i*lda + j;
int b_index = j*ldb + i;
//transpose_8x8_bits_my(&A[a_index/8], &B[b_index/8], lda/8, ldb/8);
transpose8rS32_reversed_diagonale(&A[a_index / 8], &B[b_index / 8], lda / 8, ldb / 8);
}
for (; j < m; ++j) {
if (get_bit(A, i*lda + j)) set_bit(B, j*ldb + i);
}
}
}
*/
#endif
// transpose by 32-bit
void transpose_bin(uint32_t *A, uint32_t *B, const int n, const int m,
const int lda, const int ldb, const int block_size)
{
//printf("\n n = %d (n mod 32 = %d), m = %d (m mod 32 = %d) \n", n, n % 32, m, m % 32);
//printf("\n lda = %d (lda mod 32 = %d), ldb = %d (ldb mod 32 = %d) \n", lda, lda % 32, ldb, ldb % 32);
int i;
#pragma omp parallel for
for (i = 0; i < n; i += 32) {
int j;
for (j = 0; j < m; j += 32) {
int a_index = i*lda + j;
int b_index = j*ldb + i;
transpose_32x32_bits_reversed_diagonale(&A[a_index / 32], &B[b_index / 32], lda / 32, ldb / 32);
//transpose_32x32_bits_my(&A[a_index/32], &B[b_index/32], lda/32, ldb/32);
}
for (; j < m; ++j) {
if (get_bit((const unsigned char* const)A, i * lda + j)) set_bit((unsigned char* const)B, j * ldb + i);
}
}
}
static inline int popcnt_32(uint32_t val32) {
#ifdef WIN32 // Windows MSVS
int tmp_count = __popcnt(val32);
#else // Linux GCC
int tmp_count = __builtin_popcount(val32);
#endif
return tmp_count;
}
//----------------------------
#if (defined(__AVX__) && defined(__x86_64__)) || defined(_WIN64)
#ifdef _WIN64
#include <intrin.h>
#include <ammintrin.h>
#include <immintrin.h>
#include <smmintrin.h>
#if defined(_MSC_VER) && _MSC_VER <= 1900
static inline __int32 _mm256_extract_epi64(__m256i a, const int index) {
return a.m256i_i64[index];
}
static inline __int32 _mm256_extract_epi32(__m256i a, const int index) {
return a.m256i_i32[index];
}
#endif
static inline float _castu32_f32(uint32_t a) {
return *((float *)&a);
}
static inline float _mm256_extract_float32(__m256 a, const int index) {
return a.m256_f32[index];
}
#else // Linux GCC/Clang
#include <x86intrin.h>
#include <ammintrin.h>
#include <immintrin.h>
#include <smmintrin.h>
#include <cpuid.h>
static inline float _castu32_f32(uint32_t a) {
return *((float *)&a);
}
static inline float _mm256_extract_float32(__m256 a, const int index) {
return _castu32_f32(_mm256_extract_epi32(_mm256_castps_si256(a), index));
}
void asm_cpuid(uint32_t* abcd, uint32_t eax)
{
uint32_t ebx = 0, edx = 0, ecx = 0;
// EBX is saved to EDI and later restored
__asm__("movl %%ebx, %%edi;"
"cpuid;"
"xchgl %%ebx, %%edi;"
: "=D"(ebx),
"+a"(eax), "+c"(ecx), "=d"(edx));
abcd[0] = eax;
abcd[1] = ebx;
abcd[2] = ecx;
abcd[3] = edx;
}
#endif
#ifdef _WIN32
// Windows
#define cpuid(info, x) __cpuidex(info, x, 0)
#else
// GCC Intrinsics
void cpuid(int info[4], int InfoType) {
__cpuid_count(InfoType, 0, info[0], info[1], info[2], info[3]);
}
#endif
// Misc.
static int HW_MMX, HW_x64, HW_RDRAND, HW_BMI1, HW_BMI2, HW_ADX, HW_PREFETCHWT1;
static int HW_ABM; // Advanced Bit Manipulation
// SIMD: 128-bit
static int HW_SSE, HW_SSE2, HW_SSE3, HW_SSSE3, HW_SSE41, HW_SSE42, HW_SSE4a, HW_AES, HW_SHA;
// SIMD: 256-bit
static int HW_AVX, HW_XOP, HW_FMA3, HW_FMA4, HW_AVX2;
// SIMD: 512-bit
static int HW_AVX512F; // AVX512 Foundation
static int HW_AVX512CD; // AVX512 Conflict Detection
static int HW_AVX512PF; // AVX512 Prefetch
static int HW_AVX512ER; // AVX512 Exponential + Reciprocal
static int HW_AVX512VL; // AVX512 Vector Length Extensions
static int HW_AVX512BW; // AVX512 Byte + Word
static int HW_AVX512DQ; // AVX512 Doubleword + Quadword
static int HW_AVX512IFMA; // AVX512 Integer 52-bit Fused Multiply-Add
static int HW_AVX512VBMI; // AVX512 Vector Byte Manipulation Instructions
// https://stackoverflow.com/questions/6121792/how-to-check-if-a-cpu-supports-the-sse3-instruction-set
void check_cpu_features(void) {
int info[4];
cpuid(info, 0);
int nIds = info[0];
cpuid(info, 0x80000000);
unsigned nExIds = info[0];
// Detect Features
if (nIds >= 0x00000001) {
cpuid(info, 0x00000001);
HW_MMX = (info[3] & ((int)1 << 23)) != 0;
HW_SSE = (info[3] & ((int)1 << 25)) != 0;
HW_SSE2 = (info[3] & ((int)1 << 26)) != 0;
HW_SSE3 = (info[2] & ((int)1 << 0)) != 0;
HW_SSSE3 = (info[2] & ((int)1 << 9)) != 0;
HW_SSE41 = (info[2] & ((int)1 << 19)) != 0;
HW_SSE42 = (info[2] & ((int)1 << 20)) != 0;
HW_AES = (info[2] & ((int)1 << 25)) != 0;
HW_AVX = (info[2] & ((int)1 << 28)) != 0;
HW_FMA3 = (info[2] & ((int)1 << 12)) != 0;
HW_RDRAND = (info[2] & ((int)1 << 30)) != 0;
}
if (nIds >= 0x00000007) {
cpuid(info, 0x00000007);
HW_AVX2 = (info[1] & ((int)1 << 5)) != 0;
HW_BMI1 = (info[1] & ((int)1 << 3)) != 0;
HW_BMI2 = (info[1] & ((int)1 << 8)) != 0;
HW_ADX = (info[1] & ((int)1 << 19)) != 0;
HW_SHA = (info[1] & ((int)1 << 29)) != 0;
HW_PREFETCHWT1 = (info[2] & ((int)1 << 0)) != 0;
HW_AVX512F = (info[1] & ((int)1 << 16)) != 0;
HW_AVX512CD = (info[1] & ((int)1 << 28)) != 0;
HW_AVX512PF = (info[1] & ((int)1 << 26)) != 0;
HW_AVX512ER = (info[1] & ((int)1 << 27)) != 0;
HW_AVX512VL = (info[1] & ((int)1 << 31)) != 0;
HW_AVX512BW = (info[1] & ((int)1 << 30)) != 0;
HW_AVX512DQ = (info[1] & ((int)1 << 17)) != 0;
HW_AVX512IFMA = (info[1] & ((int)1 << 21)) != 0;
HW_AVX512VBMI = (info[2] & ((int)1 << 1)) != 0;
}
if (nExIds >= 0x80000001) {
cpuid(info, 0x80000001);
HW_x64 = (info[3] & ((int)1 << 29)) != 0;
HW_ABM = (info[2] & ((int)1 << 5)) != 0;
HW_SSE4a = (info[2] & ((int)1 << 6)) != 0;
HW_FMA4 = (info[2] & ((int)1 << 16)) != 0;
HW_XOP = (info[2] & ((int)1 << 11)) != 0;
}
}
int is_avx() {
static int result = -1;
if (result == -1) {
check_cpu_features();
result = HW_AVX;
if (result == 1) printf(" Used AVX \n");
else printf(" Not used AVX \n");
}
return result;
}
int is_fma_avx2() {
static int result = -1;
if (result == -1) {
check_cpu_features();
result = HW_FMA3 && HW_AVX2;
if (result == 1) printf(" Used FMA & AVX2 \n");
else printf(" Not used FMA & AVX2 \n");
}
return result;
}
// https://software.intel.com/sites/landingpage/IntrinsicsGuide
void gemm_nn(int M, int N, int K, float ALPHA,
float *A, int lda,
float *B, int ldb,
float *C, int ldc)
{
int i, j, k;
if (is_avx() == 1) { // AVX
for (i = 0; i < M; ++i) {
for (k = 0; k < K; ++k) {
float A_PART = ALPHA*A[i*lda + k];
__m256 a256, b256, c256, result256; // AVX
a256 = _mm256_set1_ps(A_PART);
for (j = 0; j < N - 8; j += 8) {
b256 = _mm256_loadu_ps(&B[k*ldb + j]);
c256 = _mm256_loadu_ps(&C[i*ldc + j]);
// FMA - Intel Haswell (2013), AMD Piledriver (2012)
//result256 = _mm256_fmadd_ps(a256, b256, c256);
result256 = _mm256_mul_ps(a256, b256);
result256 = _mm256_add_ps(result256, c256);
_mm256_storeu_ps(&C[i*ldc + j], result256);
}
int prev_end = (N % 8 == 0) ? (N - 8) : (N / 8) * 8;
for (j = prev_end; j < N; ++j)
C[i*ldc + j] += A_PART*B[k*ldb + j];
}
}
}
else {
for (i = 0; i < M; ++i) {
for (k = 0; k < K; ++k) {
PUT_IN_REGISTER float A_PART = ALPHA * A[i * lda + k];
for (j = 0; j < N; ++j) {
C[i*ldc + j] += A_PART*B[k*ldb + j];
}
/* // SSE
__m128 a128, b128, c128, result128; // SSE
a128 = _mm_set1_ps(A_PART);
for (j = 0; j < N - 4; j += 4) {
b128 = _mm_loadu_ps(&B[k*ldb + j]);
c128 = _mm_loadu_ps(&C[i*ldc + j]);
//result128 = _mm_fmadd_ps(a128, b128, c128);
result128 = _mm_mul_ps(a128, b128);
result128 = _mm_add_ps(result128, c128);
_mm_storeu_ps(&C[i*ldc + j], result128);
}
int prev_end = (N % 4 == 0) ? (N - 4) : (N / 4) * 4;
for (j = prev_end; j < N; ++j){
C[i*ldc + j] += A_PART*B[k*ldb + j];
}
*/
}
}
}
}
void gemm_nn_fast(int M, int N, int K, float ALPHA,
float *A, int lda,
float *B, int ldb,
float *C, int ldc)
{
int i;
#pragma omp parallel for
for (i = 0; i < (M / TILE_M)*TILE_M; i += TILE_M)
{
int j, k;
int i_d, k_d;
for (k = 0; k < (K / TILE_K)*TILE_K; k += TILE_K)
{
for (j = 0; j < (N / TILE_N)*TILE_N; j += TILE_N)
{
// L1 - 6 bits tag [11:6] - cache size 32 KB, conflict for each 4 KB
// L2 - 9 bits tag [14:6] - cache size 256 KB, conflict for each 32 KB
// L3 - 13 bits tag [18:6] - cache size 8 MB, conflict for each 512 KB
__m256 result256;
__m256 a256_0, b256_0; // AVX
__m256 a256_1, b256_1; // AVX
__m256 a256_2;// , b256_2; // AVX
__m256 a256_3;// , b256_3; // AVX
__m256 c256_0, c256_1, c256_2, c256_3;
__m256 c256_4, c256_5, c256_6, c256_7;
c256_0 = _mm256_loadu_ps(&C[(0 + i)*ldc + (0 + j)]);
c256_1 = _mm256_loadu_ps(&C[(1 + i)*ldc + (0 + j)]);
c256_2 = _mm256_loadu_ps(&C[(0 + i)*ldc + (8 + j)]);
c256_3 = _mm256_loadu_ps(&C[(1 + i)*ldc + (8 + j)]);
c256_4 = _mm256_loadu_ps(&C[(2 + i)*ldc + (0 + j)]);
c256_5 = _mm256_loadu_ps(&C[(3 + i)*ldc + (0 + j)]);
c256_6 = _mm256_loadu_ps(&C[(2 + i)*ldc + (8 + j)]);
c256_7 = _mm256_loadu_ps(&C[(3 + i)*ldc + (8 + j)]);
for (k_d = 0; k_d < (TILE_K); ++k_d)
{
a256_0 = _mm256_set1_ps(ALPHA*A[(0 + i)*lda + (k_d + k)]);
a256_1 = _mm256_set1_ps(ALPHA*A[(1 + i)*lda + (k_d + k)]);
a256_2 = _mm256_set1_ps(ALPHA*A[(2 + i)*lda + (k_d + k)]);
a256_3 = _mm256_set1_ps(ALPHA*A[(3 + i)*lda + (k_d + k)]);
b256_0 = _mm256_loadu_ps(&B[(k_d + k)*ldb + (0 + j)]);
b256_1 = _mm256_loadu_ps(&B[(k_d + k)*ldb + (8 + j)]);
// FMA - Intel Haswell (2013), AMD Piledriver (2012)
//c256_0 = _mm256_fmadd_ps(a256_0, b256_0, c256_0);
//c256_1 = _mm256_fmadd_ps(a256_1, b256_0, c256_1);
//c256_2 = _mm256_fmadd_ps(a256_0, b256_1, c256_2);
//c256_3 = _mm256_fmadd_ps(a256_1, b256_1, c256_3);
//c256_4 = _mm256_fmadd_ps(a256_2, b256_0, c256_4);
//c256_5 = _mm256_fmadd_ps(a256_3, b256_0, c256_5);
//c256_6 = _mm256_fmadd_ps(a256_2, b256_1, c256_6);
//c256_7 = _mm256_fmadd_ps(a256_3, b256_1, c256_7);
result256 = _mm256_mul_ps(a256_0, b256_0);
c256_0 = _mm256_add_ps(result256, c256_0);
result256 = _mm256_mul_ps(a256_1, b256_0);
c256_1 = _mm256_add_ps(result256, c256_1);
result256 = _mm256_mul_ps(a256_0, b256_1);
c256_2 = _mm256_add_ps(result256, c256_2);
result256 = _mm256_mul_ps(a256_1, b256_1);
c256_3 = _mm256_add_ps(result256, c256_3);
result256 = _mm256_mul_ps(a256_2, b256_0);
c256_4 = _mm256_add_ps(result256, c256_4);
result256 = _mm256_mul_ps(a256_3, b256_0);
c256_5 = _mm256_add_ps(result256, c256_5);
result256 = _mm256_mul_ps(a256_2, b256_1);
c256_6 = _mm256_add_ps(result256, c256_6);
result256 = _mm256_mul_ps(a256_3, b256_1);
c256_7 = _mm256_add_ps(result256, c256_7);
}
_mm256_storeu_ps(&C[(0 + i)*ldc + (0 + j)], c256_0);
_mm256_storeu_ps(&C[(1 + i)*ldc + (0 + j)], c256_1);
_mm256_storeu_ps(&C[(0 + i)*ldc + (8 + j)], c256_2);
_mm256_storeu_ps(&C[(1 + i)*ldc + (8 + j)], c256_3);
_mm256_storeu_ps(&C[(2 + i)*ldc + (0 + j)], c256_4);
_mm256_storeu_ps(&C[(3 + i)*ldc + (0 + j)], c256_5);
_mm256_storeu_ps(&C[(2 + i)*ldc + (8 + j)], c256_6);
_mm256_storeu_ps(&C[(3 + i)*ldc + (8 + j)], c256_7);
}
for (j = (N / TILE_N)*TILE_N; j < N; ++j) {
for (i_d = i; i_d < (i + TILE_M); ++i_d)
{
for (k_d = k; k_d < (k + TILE_K); ++k_d)
{
PUT_IN_REGISTER float A_PART = ALPHA*A[i_d*lda + k_d];
C[i_d*ldc + j] += A_PART*B[k_d*ldb + j];
}
}
}
}
for (k = (K / TILE_K)*TILE_K; k < K; ++k)
{
for (i_d = i; i_d < (i + TILE_M); ++i_d)
{
PUT_IN_REGISTER float A_PART = ALPHA*A[i_d*lda + k];
for (j = 0; j < N; ++j) {
C[i_d*ldc + j] += A_PART*B[k*ldb + j];
}
}
}
}
for (i = (M / TILE_M)*TILE_M; i < M; ++i) {
int j, k;
for (k = 0; k < K; ++k) {
PUT_IN_REGISTER float A_PART = ALPHA*A[i*lda + k];
for (j = 0; j < N; ++j) {
C[i*ldc + j] += A_PART*B[k*ldb + j];
}
}
}
}
void gemm_nn_bin_32bit_packed(int M, int N, int K, float ALPHA,
uint32_t *A, int lda,
uint32_t *B, int ldb,
float *C, int ldc, float *mean_arr)
{
int i;
#pragma omp parallel for
for (i = 0; i < M; ++i) { // l.n
int j, s;
float mean_val = mean_arr[i];
//printf(" l.mean_arr[i] = %d \n ", l.mean_arr[i]);
for (s = 0; s < K; ++s) // l.size*l.size*l.c/32 or (l.size*l.size*l.c)
{
PUT_IN_REGISTER uint32_t A_PART = A[i*lda + s];
__m256i a256 = _mm256_set1_epi32(A_PART);
for (j = 0; j < N - 8; j += 8)
{
__m256i b256 = *((__m256i*)&B[s*ldb + j]);
__m256i xor256 = _mm256_xor_si256(a256, b256); // xnor = xor(a,b)
__m256i all_1 = _mm256_set1_epi8((char)255);
__m256i xnor256 = _mm256_andnot_si256(xor256, all_1); // xnor = not(xor(a,b))
// waiting for - CPUID Flags: AVX512VPOPCNTDQ: __m512i _mm512_popcnt_epi32(__m512i a)
__m256 count = _mm256_setr_ps(
popcnt_32(_mm256_extract_epi32(xnor256, 0)),
popcnt_32(_mm256_extract_epi32(xnor256, 1)),
popcnt_32(_mm256_extract_epi32(xnor256, 2)),
popcnt_32(_mm256_extract_epi32(xnor256, 3)),
popcnt_32(_mm256_extract_epi32(xnor256, 4)),
popcnt_32(_mm256_extract_epi32(xnor256, 5)),
popcnt_32(_mm256_extract_epi32(xnor256, 6)),
popcnt_32(_mm256_extract_epi32(xnor256, 7)));
__m256 val2 = _mm256_set1_ps(2);
count = _mm256_mul_ps(count, val2); // count * 2
__m256 val32 = _mm256_set1_ps(32);
count = _mm256_sub_ps(count, val32); // count - 32
__m256 mean256 = _mm256_set1_ps(mean_val);
count = _mm256_mul_ps(count, mean256); // count * mean_val
__m256 c256 = *((__m256*)&C[i*ldc + j]);
count = _mm256_add_ps(count, c256); // c = c + count
*((__m256*)&C[i*ldc + j]) = count;
}
for (; j < N; ++j) // out_h*out_w;
{
PUT_IN_REGISTER uint32_t B_PART = B[s*ldb + j];
uint32_t xnor_result = ~(A_PART ^ B_PART);
int32_t count = popcnt_32(xnor_result); // must be Signed int
C[i*ldc + j] += (2 * count - 32) * mean_val;
}
}
}
}
void convolution_2d_old(int w, int h, int ksize, int n, int c, int pad, int stride,
float *weights, float *input, float *output)
{
//const int out_h = (h + 2 * pad - ksize) / stride + 1; // output_height=input_height for stride=1 and pad=1
//const int out_w = (w + 2 * pad - ksize) / stride + 1; // output_width=input_width for stride=1 and pad=1
int fil;
// filter index
#pragma omp parallel for // "omp parallel for" - automatic parallelization of loop by using OpenMP
for (fil = 0; fil < n; ++fil) {
//int i, f, j;
int chan, y, x, f_y, f_x;
// channel index
for (chan = 0; chan < c; ++chan)
// input - y
for (y = 0; y < h; ++y)
// input - x
for (x = 0; x < w; ++x)
{
int const output_index = fil*w*h + y*w + x;
int const weights_pre_index = fil*c*ksize*ksize + chan*ksize*ksize;
int const input_pre_index = chan*w*h;
float sum = 0;
// filter - y
for (f_y = 0; f_y < ksize; ++f_y)
{
int input_y = y + f_y - pad;
// filter - x
for (f_x = 0; f_x < ksize; ++f_x)
{
int input_x = x + f_x - pad;
if (input_y < 0 || input_x < 0 || input_y >= h || input_x >= w) continue;
int input_index = input_pre_index + input_y*w + input_x;
int weights_index = weights_pre_index + f_y*ksize + f_x;
sum += input[input_index] * weights[weights_index];
}
}
// l.output[filters][width][height] +=
// state.input[channels][width][height] *
// l.weights[filters][channels][filter_width][filter_height];
output[output_index] += sum;
}
}
}
void convolution_2d(int w, int h, int ksize, int n, int c, int pad, int stride,
float *weights, float *input, float *output, float *mean)
{
//const int out_h = (h + 2 * pad - ksize) / stride + 1; // output_height=input_height for stride=1 and pad=1
//const int out_w = (w + 2 * pad - ksize) / stride + 1; // output_width=input_width for stride=1 and pad=1
int i;
#if defined(_OPENMP)
static int max_num_threads = 0;
if (max_num_threads == 0) {
max_num_threads = omp_get_max_threads();
//omp_set_num_threads( max_num_threads / 2);
}
#endif
//convolution_2d_old(w, h, ksize, n, c, pad, stride, weights, input, output);
__m256i all256_sing1 = _mm256_set_epi32(0x80000000, 0x80000000, 0x80000000, 0x80000000, 0x80000000, 0x80000000, 0x80000000, 0x80000000);
for (i = 0; i < ksize*ksize*n*c; i+=8) {
*((__m256*)&weights[i]) = _mm256_and_ps(*((__m256*)&weights[i]), _mm256_castsi256_ps(all256_sing1));
}
//for (i = 0; i < w*h*c; i += 8) {
//*((__m256*)&input[i]) = _mm256_and_ps(*((__m256*)&input[i]), _mm256_castsi256_ps(all256_sing1));
//}
//__m256i all256_last_zero = _mm256_set1_epi32(0xFFFFFFFF);
//all256_last_zero.m256i_i32[7] = 0;
__m256i all256_last_zero =
_mm256_set_epi32(0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0x0);
__m256i idx256 = _mm256_set_epi32(0, 7, 6, 5, 4, 3, 2, 1);
//__m256 all256_sing1 = _mm256_set1_ps(0x80000000);
__m256 all256_one = _mm256_set1_ps(1);
__m256i all256i_one = _mm256_set1_epi32(1);
///__m256i src256 = _mm256_loadu_si256((__m256i *)(&src[i]));
///__m256i result256 = _mm256_and_si256(src256, all256_sing1); // check sign in 8 x 32-bit floats
int fil;
// filter index
#pragma omp parallel for // "omp parallel for" - automatic parallelization of loop by using OpenMP
for (fil = 0; fil < n; ++fil) {
int chan, y, x, f_y, f_x;
float cur_mean = fabs(mean[fil]);
__m256 mean256 = _mm256_set1_ps(cur_mean);
// channel index
//for (chan = 0; chan < c; ++chan)
// input - y
for (y = 0; y < h; ++y)
// input - x
for (x = 0; x < w-8; x+=8)
{
int const output_index = fil*w*h + y*w + x;
float sum = 0;
__m256 sum256 = _mm256_set1_ps(0);
for (chan = 0; chan < c; ++chan) {
int const weights_pre_index = fil*c*ksize*ksize + chan*ksize*ksize;
int const input_pre_index = chan*w*h;
// filter - y
for (f_y = 0; f_y < ksize; ++f_y)
{
int input_y = y + f_y - pad;
//__m256 in = *((__m256*)&input[input_pre_index + input_y*w]);
if (input_y < 0 || input_y >= h) continue;
//__m256 in = _mm256_loadu_ps(&input[input_pre_index + input_y*w + x - pad]);
// filter - x
for (f_x = 0; f_x < ksize; ++f_x)
{
int input_x = x + f_x - pad;
//if (input_y < 0 || input_x < 0 || input_y >= h || input_x >= w) continue;
int input_index = input_pre_index + input_y*w + input_x;
int weights_index = weights_pre_index + f_y*ksize + f_x;
//if (input_y < 0 || input_y >= h) continue;
//sum += input[input_index] * weights[weights_index];
__m256 in = *((__m256*)&input[input_index]);
__m256 w = _mm256_set1_ps(weights[weights_index]);
//__m256 w_sign = _mm256_and_ps(w, _mm256_castsi256_ps(all256_sing1)); // check sign in 8 x 32-bit floats
__m256 xor256 = _mm256_xor_ps(w, in);
//printf("\n xor256_1 = %f, xor256_2 = %f \n", xor256.m256_f32[0], xor256.m256_f32[1]);
//printf("\n in = %f, w = %f, xor256 = %f \n", in.m256_f32[0], w_sign.m256_f32[0], xor256.m256_f32[0]);
//__m256 pn1 = _mm256_and_ps(_mm256_castsi256_ps(all256i_one), xor256);
//sum256 = xor256;
sum256 = _mm256_add_ps(xor256, sum256);
//printf("\n --- \n");
//printf("\n 0 = %f, 1 = %f, 2 = %f, 3 = %f, 4 = %f, 5 = %f, 6 = %f, 7 = %f \n", in.m256_f32[0], in.m256_f32[1], in.m256_f32[2], in.m256_f32[3], in.m256_f32[4], in.m256_f32[5], in.m256_f32[6], in.m256_f32[7]);
if (f_x < ksize-1) {
//in = _mm256_permutevar8x32_ps(in, idx256);
//in = _mm256_and_ps(in, _mm256_castsi256_ps(all256_last_zero));
}
}
}
}
// l.output[filters][width][height] +=
// state.input[channels][width][height] *
// l.weights[filters][channels][filter_width][filter_height];
//output[output_index] += sum;
sum256 = _mm256_mul_ps(sum256, mean256);
//printf("\n cur_mean = %f, sum256 = %f, sum256 = %f, in = %f \n",
// cur_mean, sum256.m256_f32[0], sum256.m256_f32[1], input[input_pre_index]);
//__m256 out = *((__m256*)&output[output_index]);
//out = _mm256_add_ps(out, sum256);
//*((__m256*)&output[output_index]) = out;
*((__m256*)&output[output_index]) = sum256;
//_mm256_storeu_ps(&C[i*ldc + j], result256);
}
}
}
// http://graphics.stanford.edu/~seander/bithacks.html
// https://stackoverflow.com/questions/17354971/fast-counting-the-number-of-set-bits-in-m128i-register
// https://arxiv.org/pdf/1611.07612.pdf
static inline int popcnt128(__m128i n) {
const __m128i n_hi = _mm_unpackhi_epi64(n, n);
#ifdef _MSC_VER
return __popcnt64(_mm_cvtsi128_si64(n)) + __popcnt64(_mm_cvtsi128_si64(n_hi));
#else
return __popcntq(_mm_cvtsi128_si64(n)) + __popcntq(_mm_cvtsi128_si64(n_hi));
#endif
}
static inline int popcnt256(__m256i n) {
return popcnt128(_mm256_extractf128_si256(n, 0)) + popcnt128(_mm256_extractf128_si256(n, 1));
}
static inline __m256i count256(__m256i v) {
__m256i lookup =
_mm256_setr_epi8(0, 1, 1, 2, 1, 2, 2, 3, 1, 2,
2, 3, 2, 3, 3, 4, 0, 1, 1, 2, 1, 2, 2, 3,
1, 2, 2, 3, 2, 3, 3, 4);
__m256i low_mask = _mm256_set1_epi8(0x0f);
__m256i lo = _mm256_and_si256(v, low_mask);
__m256i hi = _mm256_and_si256(_mm256_srli_epi32(v, 4), low_mask);
__m256i popcnt1 = _mm256_shuffle_epi8(lookup, lo);
__m256i popcnt2 = _mm256_shuffle_epi8(lookup, hi);
__m256i total = _mm256_add_epi8(popcnt1, popcnt2);
return _mm256_sad_epu8(total, _mm256_setzero_si256());
}
static inline int popcnt256_custom(__m256i n) {
__m256i val = count256(n);
//return val.m256i_i64[0] +
//val.m256i_i64[1] +
//val.m256i_i64[2] +
//val.m256i_i64[3];
return _mm256_extract_epi64(val, 0)
+ _mm256_extract_epi64(val, 1)
+ _mm256_extract_epi64(val, 2)
+ _mm256_extract_epi64(val, 3);
}
static inline void xnor_avx2_popcnt(__m256i a_bit256, __m256i b_bit256, __m256i *count_sum) {
__m256i c_bit256 = _mm256_set1_epi8((char)255);
__m256i xor256 = _mm256_xor_si256(a_bit256, b_bit256); // xnor = not(xor(a,b))
c_bit256 = _mm256_andnot_si256(xor256, c_bit256); // can be optimized - we can do other NOT for wegihts once and do not do this NOT
*count_sum = _mm256_add_epi64(count256(c_bit256), *count_sum); // 1st part - popcnt Mula's algorithm
}
// 2nd part - popcnt Mula's algorithm
static inline int get_count_mula(__m256i count_sum) {
return _mm256_extract_epi64(count_sum, 0)
+ _mm256_extract_epi64(count_sum, 1)
+ _mm256_extract_epi64(count_sum, 2)
+ _mm256_extract_epi64(count_sum, 3);
}
// 5x times faster than gemm()-float32
// further optimizations: do mean-mult only for the last layer
void gemm_nn_custom_bin_mean_transposed(int M, int N, int K, float ALPHA_UNUSED,
unsigned char *A, int lda,
unsigned char *B, int ldb,
float *C, int ldc, float *mean_arr)
{
int i;
#if defined(_OPENMP)
static int max_num_threads = 0;
if (max_num_threads == 0) {
max_num_threads = omp_get_max_threads();
//omp_set_num_threads(max_num_threads / 2);
}
#endif
//#pragma omp parallel for
//for (i = 0; i < M; ++i)
#pragma omp parallel for
for (i = 0; i < (M/2)*2; i += 2)
{ // l.n - filters [16 - 55 - 1024]
float mean_val_0 = mean_arr[i + 0];
float mean_val_1 = mean_arr[i + 1];
int j, k;
//__m256i all_1 = _mm256_set1_epi8(255);
//for (j = 0; j < N; ++j)
for (j = 0; j < (N/2)*2; j += 2)
{ // out_h*out_w - one channel output size [169 - 173056]
//int count = 0;
const int bit_step = 256;
__m256i count_sum_0 = _mm256_set1_epi8(0);
__m256i count_sum_1 = _mm256_set1_epi8(0);
__m256i count_sum_2 = _mm256_set1_epi8(0);
__m256i count_sum_3 = _mm256_set1_epi8(0);
for (k = 0; k < K; k += bit_step) { // l.size*l.size*l.c - one filter size [27 - 9216]
__m256i a_bit256_0 = _mm256_loadu_si256((__m256i *)(A + ((i + 0)*lda + k) / 8));
__m256i b_bit256_0 = _mm256_loadu_si256((__m256i *)(B + ((j + 0)*ldb + k) / 8));
__m256i a_bit256_1 = _mm256_loadu_si256((__m256i *)(A + ((i + 1)*lda + k) / 8));
__m256i b_bit256_1 = _mm256_loadu_si256((__m256i *)(B + ((j + 1)*ldb + k) / 8));
xnor_avx2_popcnt(a_bit256_0, b_bit256_0, &count_sum_0);
xnor_avx2_popcnt(a_bit256_0, b_bit256_1, &count_sum_1);
xnor_avx2_popcnt(a_bit256_1, b_bit256_0, &count_sum_2);
xnor_avx2_popcnt(a_bit256_1, b_bit256_1, &count_sum_3);
//count += popcnt256(c_bit256);
//binary_int64_printf(c_bit64);
//printf(", count = %d \n\n", tmp_count);
}
int count_0 = get_count_mula(count_sum_0);
int count_1 = get_count_mula(count_sum_1);
int count_2 = get_count_mula(count_sum_2);
int count_3 = get_count_mula(count_sum_3);
const int f1 = (K % bit_step == 0) ? 0 : (bit_step - (K % bit_step));
count_0 = count_0 - f1; // remove extra bits (from empty space for align only)
count_1 = count_1 - f1;
count_2 = count_2 - f1;
count_3 = count_3 - f1;
C[i*ldc + (j + 0)] = (2 * count_0 - K) * mean_val_0;
C[i*ldc + (j + 1)] = (2 * count_1 - K) * mean_val_0;
C[(i + 1)*ldc + (j + 0)] = (2 * count_2 - K) * mean_val_1;
C[(i + 1)*ldc + (j + 1)] = (2 * count_3 - K) * mean_val_1;
}
int i_d;
for (i_d = 0; i_d < 2; ++i_d)
{
float mean_val = mean_arr[i + i_d];
for (j = (N / 2) * 2; j < N; j += 1)
{ // out_h*out_w - one channel output size [169 - 173056]
const int bit_step = 256;
__m256i count_sum = _mm256_set1_epi8(0);
for (k = 0; k < K; k += bit_step) { // l.size*l.size*l.c - one filter size [27 - 9216]
__m256i a_bit256_0 = _mm256_loadu_si256((__m256i *)(A + ((i + i_d + 0)*lda + k) / 8));
__m256i b_bit256_0 = _mm256_loadu_si256((__m256i *)(B + ((j + 0)*ldb + k) / 8));
xnor_avx2_popcnt(a_bit256_0, b_bit256_0, &count_sum);
}
int count = get_count_mula(count_sum);
const int f1 = (K % bit_step == 0) ? 0 : (bit_step - (K % bit_step));
count = count - f1; // remove extra bits (from empty space for align only)
C[(i + i_d)*ldc + j] = (2 * count - K) * mean_val;
}
}
}
for (i = (M / 2) * 2; i < M; i += 1)
{
float mean_val = mean_arr[i];
int j, k;
for (j = 0; j < N; j += 1)
{ // out_h*out_w - one channel output size [169 - 173056]
const int bit_step = 256;
__m256i count_sum = _mm256_set1_epi8(0);
for (k = 0; k < K; k += bit_step) { // l.size*l.size*l.c - one filter size [27 - 9216]
__m256i a_bit256_0 = _mm256_loadu_si256((__m256i *)(A + ((i + 0)*lda + k) / 8));
__m256i b_bit256_0 = _mm256_loadu_si256((__m256i *)(B + ((j + 0)*ldb + k) / 8));
xnor_avx2_popcnt(a_bit256_0, b_bit256_0, &count_sum);
}
int count = get_count_mula(count_sum);
const int f1 = (K % bit_step == 0) ? 0 : (bit_step - (K % bit_step));
count = count - f1; // remove extra bits (from empty space for align only)
C[i*ldc + j] = (2 * count - K) * mean_val;
}
}
}
//From Berkeley Vision's Caffe!
//https://github.com/BVLC/caffe/blob/master/LICENSE
void im2col_cpu_custom_transpose(float* data_im,
int channels, int height, int width,
int ksize, int stride, int pad, float* data_col, int ldb_align)
{
const int height_col = (height + 2 * pad - ksize) / stride + 1;
const int width_col = (width + 2 * pad - ksize) / stride + 1;
const int channels_col = channels * ksize * ksize;
int c;
// optimized version
if (height_col == height && width_col == width && stride == 1 && pad == 1)
{
#pragma omp parallel for
for (c = 0; c < channels_col; ++c) {
int h, w;
int w_offset = c % ksize;
int h_offset = (c / ksize) % ksize;
int c_im = c / ksize / ksize;
for (h = pad; h < height_col - pad; ++h) {
for (w = pad; w < width_col - pad - 4; w+=8) {
int im_row = h_offset + h - pad;
int im_col = w_offset + w - pad;
//int col_index = (c * height_col + h) * width_col + w;
int col_index = (h * width_col + w)*ldb_align + c; // transposed & aligned
//data_col[col_index] = data_im[im_col + width*(im_row + height*c_im)];
__m256 src256 = _mm256_loadu_ps((float *)(&data_im[im_col + width*(im_row + height*c_im)]));
data_col[col_index + ldb_align * 0] = _mm256_extract_float32(src256, 0);// src256.m256_f32[0];
data_col[col_index + ldb_align * 1] = _mm256_extract_float32(src256, 1);// src256.m256_f32[1];
data_col[col_index + ldb_align * 2] = _mm256_extract_float32(src256, 2);// src256.m256_f32[2];
data_col[col_index + ldb_align * 3] = _mm256_extract_float32(src256, 3);// src256.m256_f32[3];
data_col[col_index + ldb_align * 4] = _mm256_extract_float32(src256, 4);// src256.m256_f32[4];
data_col[col_index + ldb_align * 5] = _mm256_extract_float32(src256, 5);// src256.m256_f32[5];
data_col[col_index + ldb_align * 6] = _mm256_extract_float32(src256, 6);// src256.m256_f32[6];
data_col[col_index + ldb_align * 7] = _mm256_extract_float32(src256, 7);// src256.m256_f32[7];
//_mm256_storeu_ps(&data_col[col_index], src256);
}
for (; w < width_col - pad; ++w) {
int im_row = h_offset + h - pad;
int im_col = w_offset + w - pad;
int col_index = (h * width_col + w)*ldb_align + c; // transposed & aligned
data_col[col_index] = data_im[im_col + width*(im_row + height*c_im)];
}
}
{
w = 0;
for (h = 0; h < height_col; ++h) {
int im_row = h_offset + h;
int im_col = w_offset + w;
int col_index = (h * width_col + w)*ldb_align + c; // transposed & aligned
data_col[col_index] = im2col_get_pixel(data_im, height, width, channels,
im_row, im_col, c_im, pad);
}
}
{
w = width_col - 1;
for (h = 0; h < height_col; ++h) {
int im_row = h_offset + h;
int im_col = w_offset + w;
int col_index = (h * width_col + w)*ldb_align + c; // transposed & aligned
data_col[col_index] = im2col_get_pixel(data_im, height, width, channels,
im_row, im_col, c_im, pad);
}
}
{
h = 0;
for (w = 0; w < width_col; ++w) {
int im_row = h_offset + h;
int im_col = w_offset + w;
int col_index = (h * width_col + w)*ldb_align + c; // transposed & aligned
data_col[col_index] = im2col_get_pixel(data_im, height, width, channels,
im_row, im_col, c_im, pad);
}
}
{
h = height_col - 1;
for (w = 0; w < width_col; ++w) {
int im_row = h_offset + h;
int im_col = w_offset + w;
int col_index = (h * width_col + w)*ldb_align + c; // transposed & aligned
data_col[col_index] = im2col_get_pixel(data_im, height, width, channels,
im_row, im_col, c_im, pad);
}
}
}
}
else {
#pragma omp parallel for
for (c = 0; c < channels_col; ++c) {
int h, w;
int w_offset = c % ksize;
int h_offset = (c / ksize) % ksize;
int c_im = c / ksize / ksize;
for (h = 0; h < height_col; ++h) {
for (w = 0; w < width_col; ++w) {
int im_row = h_offset + h * stride;
int im_col = w_offset + w * stride;
int col_index = (h * width_col + w)*ldb_align + c; // transposed & aligned
data_col[col_index] = im2col_get_pixel(data_im, height, width, channels,
im_row, im_col, c_im, pad);
}
}
}
}
}
//From Berkeley Vision's Caffe!
//https://github.com/BVLC/caffe/blob/master/LICENSE
void im2col_cpu_custom(float* data_im,
int channels, int height, int width,
int ksize, int stride, int pad, float* data_col)
{
int c;
const int height_col = (height + 2 * pad - ksize) / stride + 1;
const int width_col = (width + 2 * pad - ksize) / stride + 1;
const int channels_col = channels * ksize * ksize;
// optimized version
if (height_col == height && width_col == width && stride == 1 && pad == 1 && is_fma_avx2())
{
#pragma omp parallel for
for (c = 0; c < channels_col; ++c) {
int h, w;
int w_offset = c % ksize;
int h_offset = (c / ksize) % ksize;
int c_im = c / ksize / ksize;
for (h = pad; h < height_col-pad; ++h) {
for (w = pad; w < width_col-pad-8; w += 8) {
int im_row = h_offset + h - pad;
int im_col = w_offset + w - pad;
int col_index = (c * height_col + h) * width_col + w;
//data_col[col_index] = data_im[im_col + width*(im_row + height*c_im)];
__m256 src256 = _mm256_loadu_ps((float *)(&data_im[im_col + width*(im_row + height*c_im)]));
_mm256_storeu_ps(&data_col[col_index], src256);
}
for (; w < width_col - pad; ++w) {
int im_row = h_offset + h - pad;
int im_col = w_offset + w - pad;
int col_index = (c * height_col + h) * width_col + w;
data_col[col_index] = data_im[im_col + width*(im_row + height*c_im)];
}
}
{
w = 0;
for (h = 0; h < height_col; ++h) {
int im_row = h_offset + h;
int im_col = w_offset + w;
int col_index = (c * height_col + h) * width_col + w;
data_col[col_index] = im2col_get_pixel(data_im, height, width, channels,
im_row, im_col, c_im, pad);
}
}
{
w = width_col-1;
for (h = 0; h < height_col; ++h) {
int im_row = h_offset + h;
int im_col = w_offset + w;
int col_index = (c * height_col + h) * width_col + w;
data_col[col_index] = im2col_get_pixel(data_im, height, width, channels,
im_row, im_col, c_im, pad);
}
}
{
h = 0;
for (w = 0; w < width_col; ++w) {
int im_row = h_offset + h;
int im_col = w_offset + w;
int col_index = (c * height_col + h) * width_col + w;
data_col[col_index] = im2col_get_pixel(data_im, height, width, channels,
im_row, im_col, c_im, pad);
}
}
{
h = height_col-1;
for (w = 0; w < width_col; ++w) {
int im_row = h_offset + h;
int im_col = w_offset + w;
int col_index = (c * height_col + h) * width_col + w;
data_col[col_index] = im2col_get_pixel(data_im, height, width, channels,
im_row, im_col, c_im, pad);
}
}
}
}
else {
//printf("\n Error: is no non-optimized version \n");
im2col_cpu(data_im, channels, height, width, ksize, stride, pad, data_col);
}
}
//From Berkeley Vision's Caffe!
//https://github.com/BVLC/caffe/blob/master/LICENSE
void im2col_cpu_custom_align(float* data_im,
int channels, int height, int width,
int ksize, int stride, int pad, float* data_col, int bit_align)
{
int c;
const int height_col = (height + 2 * pad - ksize) / stride + 1;
const int width_col = (width + 2 * pad - ksize) / stride + 1;
const int channels_col = channels * ksize * ksize;
// optimized version
if (height_col == height && width_col == width && stride == 1 && pad == 1 && is_fma_avx2())
{
int new_ldb = bit_align;
#pragma omp parallel for
for (c = 0; c < channels_col; ++c) {
int h, w;
int w_offset = c % ksize;
int h_offset = (c / ksize) % ksize;
int c_im = c / ksize / ksize;
for (h = pad; h < height_col - pad; ++h) {
for (w = pad; w < width_col - pad - 8; w += 8) {
int im_row = h_offset + h - pad;
int im_col = w_offset + w - pad;
//int col_index = (c * height_col + h) * width_col + w;
int col_index = c * new_ldb + h * width_col + w;
//data_col[col_index] = data_im[im_col + width*(im_row + height*c_im)];
__m256 src256 = _mm256_loadu_ps((float *)(&data_im[im_col + width*(im_row + height*c_im)]));
_mm256_storeu_ps(&data_col[col_index], src256);
}
for (; w < width_col - pad; ++w) {
int im_row = h_offset + h - pad;
int im_col = w_offset + w - pad;
//int col_index = (c * height_col + h) * width_col + w;
int col_index = c * new_ldb + h * width_col + w;
data_col[col_index] = data_im[im_col + width*(im_row + height*c_im)];
}
}
{
w = 0;
for (h = 0; h < height_col; ++h) {
int im_row = h_offset + h;
int im_col = w_offset + w;
//int col_index = (c * height_col + h) * width_col + w;
int col_index = c * new_ldb + h * width_col + w;
data_col[col_index] = im2col_get_pixel(data_im, height, width, channels, im_row, im_col, c_im, pad);
}
}
{
w = width_col - 1;
for (h = 0; h < height_col; ++h) {
int im_row = h_offset + h;
int im_col = w_offset + w;
//int col_index = (c * height_col + h) * width_col + w;
int col_index = c * new_ldb + h * width_col + w;
data_col[col_index] = im2col_get_pixel(data_im, height, width, channels, im_row, im_col, c_im, pad);
}
}
{
h = 0;
for (w = 0; w < width_col; ++w) {
int im_row = h_offset + h;
int im_col = w_offset + w;
//int col_index = (c * height_col + h) * width_col + w;
int col_index = c * new_ldb + h * width_col + w;
data_col[col_index] = im2col_get_pixel(data_im, height, width, channels, im_row, im_col, c_im, pad);
}
}
{
h = height_col - 1;
for (w = 0; w < width_col; ++w) {
int im_row = h_offset + h;
int im_col = w_offset + w;
//int col_index = (c * height_col + h) * width_col + w;
int col_index = c * new_ldb + h * width_col + w;
data_col[col_index] = im2col_get_pixel(data_im, height, width, channels, im_row, im_col, c_im, pad);
}
}
}
}
else {
printf("\n Error: is no non-optimized version \n");
//im2col_cpu(data_im, channels, height, width, ksize, stride, pad, data_col); // must be aligned for transpose after float_to_bin
// float_to_bit(b, t_input, src_size);
// transpose_bin(t_input, *t_bit_input, k, n, bit_align, new_ldb, 8);
}
}
//From Berkeley Vision's Caffe!
//https://github.com/BVLC/caffe/blob/master/LICENSE
void im2col_cpu_custom_bin(float* data_im,
int channels, int height, int width,
int ksize, int stride, int pad, float* data_col, int bit_align)
{
int c;
const int height_col = (height + 2 * pad - ksize) / stride + 1;
const int width_col = (width + 2 * pad - ksize) / stride + 1;
const int channels_col = channels * ksize * ksize;
// optimized version
if (height_col == height && width_col == width && stride == 1 && pad == 1 && is_fma_avx2())
{
__m256i all256_sing1 = _mm256_set_epi32(0x80000000, 0x80000000, 0x80000000, 0x80000000, 0x80000000, 0x80000000, 0x80000000, 0x80000000);
__m256 float_zero256 = _mm256_set1_ps(0.00);
int new_ldb = bit_align;
#pragma omp parallel for
for (c = 0; c < channels_col; ++c) {
int h, w;
int w_offset = c % ksize;
int h_offset = (c / ksize) % ksize;
int c_im = c / ksize / ksize;
for (h = pad; h < height_col - pad; ++h) {
for (w = pad; w < width_col - pad - 8; w += 8) {
int im_row = h_offset + h - pad;
int im_col = w_offset + w - pad;
//int col_index = (c * height_col + h) * width_col + w;
int col_index = c * new_ldb + h * width_col + w;
//__m256i src256 = _mm256_loadu_si256((__m256i *)(&data_im[im_col + width*(im_row + height*c_im)]));
//__m256i result256 = _mm256_and_si256(src256, all256_sing1); // check sign in 8 x 32-bit floats
//uint16_t mask = _mm256_movemask_ps(_mm256_castsi256_ps(result256)); // (val >= 0) ? 0 : 1
//mask = ~mask; // inverse mask, (val >= 0) ? 1 : 0
__m256 src256 = _mm256_loadu_ps((float *)(&data_im[im_col + width*(im_row + height*c_im)]));
__m256 result256 = _mm256_cmp_ps(src256, float_zero256, _CMP_GT_OS);
uint16_t mask = _mm256_movemask_ps(result256); // (val > 0) ? 0 : 1
uint16_t* dst_ptr = (uint16_t*)&((uint8_t*)data_col)[col_index / 8];
*dst_ptr |= (mask << (col_index % 8));
}
for (; w < width_col - pad; ++w) {
int im_row = h_offset + h - pad;
int im_col = w_offset + w - pad;
//int col_index = (c * height_col + h) * width_col + w;
int col_index = c * new_ldb + h * width_col + w;
//data_col[col_index] = data_im[im_col + width*(im_row + height*c_im)];
float val = data_im[im_col + width*(im_row + height*c_im)];
if (val > 0) set_bit((unsigned char* const)data_col, col_index);
}
}
{
w = 0;
for (h = 0; h < height_col; ++h) {
int im_row = h_offset + h;
int im_col = w_offset + w;
//int col_index = (c * height_col + h) * width_col + w;
int col_index = c * new_ldb + h * width_col + w;
//data_col[col_index] = im2col_get_pixel(data_im, height, width, channels, im_row, im_col, c_im, pad);
float val = im2col_get_pixel(data_im, height, width, channels, im_row, im_col, c_im, pad);
if (val > 0) set_bit((unsigned char* const)data_col, col_index);
}
}
{
w = width_col - 1;
for (h = 0; h < height_col; ++h) {
int im_row = h_offset + h;
int im_col = w_offset + w;
//int col_index = (c * height_col + h) * width_col + w;
int col_index = c * new_ldb + h * width_col + w;
//data_col[col_index] = im2col_get_pixel(data_im, height, width, channels, im_row, im_col, c_im, pad);
float val = im2col_get_pixel(data_im, height, width, channels, im_row, im_col, c_im, pad);
if (val > 0) set_bit((unsigned char* const)data_col, col_index);
}
}
{
h = 0;
for (w = 0; w < width_col; ++w) {
int im_row = h_offset + h;
int im_col = w_offset + w;
//int col_index = (c * height_col + h) * width_col + w;
int col_index = c * new_ldb + h * width_col + w;
//data_col[col_index] = im2col_get_pixel(data_im, height, width, channels, im_row, im_col, c_im, pad);
float val = im2col_get_pixel(data_im, height, width, channels, im_row, im_col, c_im, pad);
if (val > 0) set_bit((unsigned char* const)data_col, col_index);
}
}
{
h = height_col - 1;
for (w = 0; w < width_col; ++w) {
int im_row = h_offset + h;
int im_col = w_offset + w;
//int col_index = (c * height_col + h) * width_col + w;
int col_index = c * new_ldb + h * width_col + w;
//data_col[col_index] = im2col_get_pixel(data_im, height, width, channels, im_row, im_col, c_im, pad);
float val = im2col_get_pixel(data_im, height, width, channels, im_row, im_col, c_im, pad);
if (val > 0) set_bit((unsigned char* const)data_col, col_index);
}
}
}
}
else {
printf("\n Error: is no non-optimized version \n");
//im2col_cpu(data_im, channels, height, width, ksize, stride, pad, data_col); // must be aligned for transpose after float_to_bin
// float_to_bit(b, t_input, src_size);
// transpose_bin(t_input, *t_bit_input, k, n, bit_align, new_ldb, 8);
}
}
void activate_array_cpu_custom(float *x, const int n, const ACTIVATION a)
{
int i = 0;
if (a == LINEAR)
{}
else if (a == LEAKY)
{
if (is_fma_avx2()) {
__m256i all256_sing1 = _mm256_set_epi32(0x80000000, 0x80000000, 0x80000000, 0x80000000, 0x80000000, 0x80000000, 0x80000000, 0x80000000);
__m256 all256_01 = _mm256_set1_ps(0.1F);
for (i = 0; i < n - 8; i += 8) {
//x[i] = (x[i]>0) ? x[i] : .1*x[i];
__m256 src256 = _mm256_loadu_ps(&x[i]);
__m256 mult256 = _mm256_mul_ps((src256), all256_01); // mult * 0.1
__m256i sign256 = _mm256_and_si256(_mm256_castps_si256(src256), all256_sing1); // check sign in 8 x 32-bit floats
__m256 result256 = _mm256_blendv_ps(src256, mult256, _mm256_castsi256_ps(sign256)); // (sign>0) ? src : mult;
_mm256_storeu_ps(&x[i], result256);
}
}
for (; i < n; ++i) {
x[i] = (x[i]>0) ? x[i] : .1*x[i];
}
}
else {
for (i = 0; i < n; ++i) {
x[i] = activate(x[i], a);
}
}
}
void float_to_bit(float *src, unsigned char *dst, size_t size)
{
size_t dst_size = size / 8 + 1;
memset(dst, 0, dst_size);
size_t i;
//__m256i all256_sing1 = _mm256_set_epi32(0x80000000, 0x80000000, 0x80000000, 0x80000000, 0x80000000, 0x80000000, 0x80000000, 0x80000000);
__m256 float_zero256 = _mm256_set1_ps(0.0);
for (i = 0; i < size; i+=8)
{
//__m256i src256 = _mm256_loadu_si256((__m256i *)(&src[i]));
//__m256i result256 = _mm256_and_si256(src256, all256_sing1); // check sign in 8 x 32-bit floats
//uint32_t mask = _mm256_movemask_ps(_mm256_castsi256_ps(result256)); // (val >= 0) ? 0 : 1
////mask = ~mask; // inverse mask, (val >= 0) ? 1 : 0
__m256 src256 = _mm256_loadu_ps((float *)(&src[i]));
__m256 result256 = _mm256_cmp_ps(src256, float_zero256, _CMP_GT_OS);
uint32_t mask = _mm256_movemask_ps(result256); // (val > 0) ? 0 : 1
dst[i / 8] = mask;
}
}
static inline void transpose4x4_SSE(float *A, float *B, const int lda, const int ldb)
{
__m128 row1 = _mm_loadu_ps(&A[0 * lda]);
__m128 row2 = _mm_loadu_ps(&A[1 * lda]);
__m128 row3 = _mm_loadu_ps(&A[2 * lda]);
__m128 row4 = _mm_loadu_ps(&A[3 * lda]);
_MM_TRANSPOSE4_PS(row1, row2, row3, row4);
_mm_storeu_ps(&B[0 * ldb], row1);
_mm_storeu_ps(&B[1 * ldb], row2);
_mm_storeu_ps(&B[2 * ldb], row3);
_mm_storeu_ps(&B[3 * ldb], row4);
}
void transpose_block_SSE4x4(float *A, float *B, const int n, const int m,
const int lda, const int ldb, const int block_size)
{
int i;
#pragma omp parallel for
for (i = 0; i < n; i += block_size) {
int j, i2, j2;
//int max_i2 = (i + block_size < n) ? (i + block_size) : n;
if (i + block_size < n) {
int max_i2 = i + block_size;
for (j = 0; j < m; j += block_size) {
//int max_j2 = (j + block_size < m) ? (j + block_size) : m;
if (j + block_size < m) {
int max_j2 = j + block_size;
for (i2 = i; i2 < max_i2; i2 += 4) {
for (j2 = j; j2 < max_j2; j2 += 4) {
transpose4x4_SSE(&A[i2*lda + j2], &B[j2*ldb + i2], lda, ldb);
}
}
}
else {
for (i2 = i; i2 < max_i2; ++i2) {
for (j2 = j; j2 < m; ++j2) {
B[j2*ldb + i2] = A[i2*lda + j2];
}
}
}
}
}
else {
for (i2 = i; i2 < n; ++i2) {
for (j2 = 0; j2 < m; ++j2) {
B[j2*ldb + i2] = A[i2*lda + j2];
}
}
}
}
}
void forward_maxpool_layer_avx(float *src, float *dst, int *indexes, int size, int w, int h, int out_w, int out_h, int c,
int pad, int stride, int batch)
{
const int w_offset = -pad / 2;
const int h_offset = -pad / 2;
int b, k;
for (b = 0; b < batch; ++b) {
#pragma omp parallel for
for (k = 0; k < c; ++k) {
int i, j, m, n;
for (i = 0; i < out_h; ++i) {
//for (j = 0; j < out_w; ++j) {
j = 0;
if(stride == 1 && is_avx() == 1) {
for (j = 0; j < out_w - 8 - (size - 1); j += 8) {
int out_index = j + out_w*(i + out_h*(k + c*b));
__m256 max256 = _mm256_set1_ps(-FLT_MAX);
for (n = 0; n < size; ++n) {
for (m = 0; m < size; ++m) {
int cur_h = h_offset + i*stride + n;
int cur_w = w_offset + j*stride + m;
int index = cur_w + w*(cur_h + h*(k + b*c));
int valid = (cur_h >= 0 && cur_h < h &&
cur_w >= 0 && cur_w < w);
if (!valid) continue;
__m256 src256 = _mm256_loadu_ps(&src[index]);
max256 = _mm256_max_ps(src256, max256);
}
}
_mm256_storeu_ps(&dst[out_index], max256);
}
}
else if (size == 2 && stride == 2 && is_avx() == 1) {
for (j = 0; j < out_w - 4; j += 4) {
int out_index = j + out_w*(i + out_h*(k + c*b));
//float max = -FLT_MAX;
//int max_i = -1;
__m128 max128 = _mm_set1_ps(-FLT_MAX);
for (n = 0; n < size; ++n) {
//for (m = 0; m < size; ++m)
m = 0;
{
int cur_h = h_offset + i*stride + n;
int cur_w = w_offset + j*stride + m;
int index = cur_w + w*(cur_h + h*(k + b*c));
int valid = (cur_h >= 0 && cur_h < h &&
cur_w >= 0 && cur_w < w);
if (!valid) continue;
__m256 src256 = _mm256_loadu_ps(&src[index]);
__m256 src256_2 = _mm256_permute_ps(src256, (1 << 0) | (3 << 4));
__m256 max256 = _mm256_max_ps(src256, src256_2);
__m128 src128_0 = _mm256_extractf128_ps(max256, 0);
__m128 src128_1 = _mm256_extractf128_ps(max256, 1);
__m128 src128 = _mm_shuffle_ps(src128_0, src128_1, (2 << 2) | (2 << 6));
max128 = _mm_max_ps(src128, max128);
}
}
_mm_storeu_ps(&dst[out_index], max128);
}
}
for (; j < out_w; ++j) {
int out_index = j + out_w*(i + out_h*(k + c*b));
float max = -FLT_MAX;
int max_i = -1;
for (n = 0; n < size; ++n) {
for (m = 0; m < size; ++m) {
int cur_h = h_offset + i*stride + n;
int cur_w = w_offset + j*stride + m;
int index = cur_w + w*(cur_h + h*(k + b*c));
int valid = (cur_h >= 0 && cur_h < h &&
cur_w >= 0 && cur_w < w);
float val = (valid != 0) ? src[index] : -FLT_MAX;
max_i = (val > max) ? index : max_i;
max = (val > max) ? val : max;
}
}
dst[out_index] = max;
indexes[out_index] = max_i;
}
}
}
}
}
#else // AVX
int is_avx() {
return 0;
}
int is_fma_avx2() {
return 0;
}
void gemm_nn(int M, int N, int K, float ALPHA,
float *A, int lda,
float *B, int ldb,
float *C, int ldc)
{
int i, j, k;
for (i = 0; i < M; ++i) {
for (k = 0; k < K; ++k) {
PUT_IN_REGISTER float A_PART = ALPHA * A[i * lda + k];
for (j = 0; j < N; ++j) {
C[i*ldc + j] += A_PART*B[k*ldb + j];
}
}
}
}
void gemm_nn_fast(int M, int N, int K, float ALPHA,
float *A, int lda,
float *B, int ldb,
float *C, int ldc)
{
int i, j, k;
#pragma omp parallel for
for (i = 0; i < M; ++i) {
for (k = 0; k < K; ++k) {
PUT_IN_REGISTER float A_PART = ALPHA*A[i*lda + k];
for (j = 0; j < N; ++j) {
C[i*ldc + j] += A_PART*B[k*ldb + j];
}
}
}
}
void gemm_nn_bin_32bit_packed(int M, int N, int K, float ALPHA,
uint32_t *A, int lda,
uint32_t *B, int ldb,
float *C, int ldc, float *mean_arr)
{
int i;
#pragma omp parallel for
for (i = 0; i < M; ++i) { // l.n
int j, s;
float mean_val = mean_arr[i];
//printf(" l.mean_arr[i] = %d \n ", l.mean_arr[i]);
for (s = 0; s < K; ++s) // l.size*l.size*l.c/32 or (l.size*l.size*l.c)
{
//PUT_IN_REGISTER float A_PART = 1*a[i*k + s];
PUT_IN_REGISTER uint32_t A_PART = A[i * lda + s];
for (j = 0; j < N; ++j) // out_h*out_w;
{
//c[i*n + j] += A_PART*b[s*n + j];
PUT_IN_REGISTER uint32_t B_PART = B[s * ldb + j];
uint32_t xnor_result = ~(A_PART ^ B_PART);
//printf(" xnor_result = %d, ", xnor_result);
int32_t count = popcnt_32(xnor_result); // must be Signed int
C[i*ldc + j] += (2 * count - 32) * mean_val;
//c[i*n + j] += count*mean;
}
}
}
}
void convolution_2d(int w, int h, int ksize, int n, int c, int pad, int stride,
float *weights, float *input, float *output, float *mean)
{
const int out_h = (h + 2 * pad - ksize) / stride + 1; // output_height=input_height for stride=1 and pad=1
const int out_w = (w + 2 * pad - ksize) / stride + 1; // output_width=input_width for stride=1 and pad=1
//int i, f, j;
int fil;
// filter index
#pragma omp parallel for // "omp parallel for" - automatic parallelization of loop by using OpenMP
for (fil = 0; fil < n; ++fil) {
int chan, y, x, f_y, f_x;
// channel index
for (chan = 0; chan < c; ++chan)
// input - y
for (y = 0; y < h; ++y)
// input - x
for (x = 0; x < w; ++x)
{
int const output_index = fil*w*h + y*w + x;
int const weights_pre_index = fil*c*ksize*ksize + chan*ksize*ksize;
int const input_pre_index = chan*w*h;
float sum = 0;
// filter - y
for (f_y = 0; f_y < ksize; ++f_y)
{
int input_y = y + f_y - pad;
// filter - x
for (f_x = 0; f_x < ksize; ++f_x)
{
int input_x = x + f_x - pad;
if (input_y < 0 || input_x < 0 || input_y >= h || input_x >= w) continue;
int input_index = input_pre_index + input_y*w + input_x;
int weights_index = weights_pre_index + f_y*ksize + f_x;
sum += input[input_index] * weights[weights_index];
}
}
// l.output[filters][width][height] +=
// state.input[channels][width][height] *
// l.weights[filters][channels][filter_width][filter_height];
output[output_index] += sum;
}
}
}
static inline int popcnt_64(uint64_t val64) {
#ifdef WIN32 // Windows
#ifdef _WIN64 // Windows 64-bit
int tmp_count = __popcnt64(val64);
#else // Windows 32-bit
int tmp_count = __popcnt(val64);
tmp_count += __popcnt(val64 >> 32);
#endif
#else // Linux
#if defined(__x86_64__) || defined(__aarch64__) // Linux 64-bit
int tmp_count = __builtin_popcountll(val64);
#else // Linux 32-bit
int tmp_count = __builtin_popcount(val64);
tmp_count += __builtin_popcount(val64 >> 32);
#endif
#endif
return tmp_count;
}
void gemm_nn_custom_bin_mean_transposed(int M, int N, int K, float ALPHA_UNUSED,
unsigned char *A, int lda,
unsigned char *B, int ldb,
float *C, int ldc, float *mean_arr)
{
int i;
#pragma omp parallel for
for (i = 0; i < M; ++i) { // l.n - filters [16 - 55 - 1024]
int j, k;
float mean_val = mean_arr[i];
for (j = 0; j < N; ++j) { // out_h*out_w - one channel output size [169 - 173056]
int count = 0;
for (k = 0; k < K; k += 64) { // l.size*l.size*l.c - one filter size [27 - 9216]
uint64_t a_bit64 = *((uint64_t *)(A + (i*lda + k) / 8));
uint64_t b_bit64 = *((uint64_t *)(B + (j*ldb + k) / 8));
uint64_t c_bit64 = xnor_int64(a_bit64, b_bit64);
int tmp_count = popcnt_64(c_bit64);
if (K - k < 64) tmp_count = tmp_count - (64 - (K - k)); // remove extra bits
count += tmp_count;
//binary_int64_printf(c_bit64);
//printf(", count = %d \n\n", tmp_count);
}
C[i*ldc + j] = (2 * count - K) * mean_val;
}
}
}
void im2col_cpu_custom_transpose(float* data_im,
int channels, int height, int width,
int ksize, int stride, int pad, float* data_col, int ldb_align)
{
printf("\n im2col_cpu_custom_transpose() isn't implemented without AVX \n");
}
//From Berkeley Vision's Caffe!
//https://github.com/BVLC/caffe/blob/master/LICENSE
void im2col_cpu_custom(float* data_im,
int channels, int height, int width,
int ksize, int stride, int pad, float* data_col)
{
im2col_cpu(data_im, channels, height, width, ksize, stride, pad, data_col);
return;
int c;
const int height_col = (height + 2 * pad - ksize) / stride + 1;
const int width_col = (width + 2 * pad - ksize) / stride + 1;
const int channels_col = channels * ksize * ksize;
// optimized version
if (height_col == height && width_col == width && stride == 1 && pad == 1)
{
#pragma omp parallel for
for (c = 0; c < channels_col; ++c) {
int h, w;
int w_offset = c % ksize;
int h_offset = (c / ksize) % ksize;
int c_im = c / ksize / ksize;
for (h = pad; h < height_col - pad; ++h) {
for (w = pad; w < width_col - pad; ++w) {
int im_row = h_offset + h - pad;
int im_col = w_offset + w - pad;
int col_index = (c * height_col + h) * width_col + w;
data_col[col_index] = data_im[im_col + width*(im_row + height*c_im)];
}
for (; w < width_col - pad; ++w) {
int im_row = h_offset + h - pad;
int im_col = w_offset + w - pad;
int col_index = (c * height_col + h) * width_col + w;
data_col[col_index] = data_im[im_col + width*(im_row + height*c_im)];
}
}
{
w = 0;
for (h = 0; h < height_col; ++h) {
int im_row = h_offset + h;
int im_col = w_offset + w;
int col_index = (c * height_col + h) * width_col + w;
data_col[col_index] = im2col_get_pixel(data_im, height, width, channels,
im_row, im_col, c_im, pad);
}
}
{
w = width_col - 1;
for (h = 0; h < height_col; ++h) {
int im_row = h_offset + h;
int im_col = w_offset + w;
int col_index = (c * height_col + h) * width_col + w;
data_col[col_index] = im2col_get_pixel(data_im, height, width, channels,
im_row, im_col, c_im, pad);
}
}
{
h = 0;
for (w = 0; w < width_col; ++w) {
int im_row = h_offset + h;
int im_col = w_offset + w;
int col_index = (c * height_col + h) * width_col + w;
data_col[col_index] = im2col_get_pixel(data_im, height, width, channels,
im_row, im_col, c_im, pad);
}
}
{
h = height_col - 1;
for (w = 0; w < width_col; ++w) {
int im_row = h_offset + h;
int im_col = w_offset + w;
int col_index = (c * height_col + h) * width_col + w;
data_col[col_index] = im2col_get_pixel(data_im, height, width, channels,
im_row, im_col, c_im, pad);
}
}
}
}
else {
//printf("\n Error: is no non-optimized version \n");
im2col_cpu(data_im, channels, height, width, ksize, stride, pad, data_col);
}
}
//From Berkeley Vision's Caffe!
//https://github.com/BVLC/caffe/blob/master/LICENSE
void im2col_cpu_custom_bin(float* data_im,
int channels, int height, int width,
int ksize, int stride, int pad, float* data_col, int bit_align)
{
int c;
const int height_col = (height + 2 * pad - ksize) / stride + 1;
const int width_col = (width + 2 * pad - ksize) / stride + 1;
const int channels_col = channels * ksize * ksize;
// optimized version
if (height_col == height && width_col == width && stride == 1 && pad == 1)
{
int new_ldb = bit_align;
#pragma omp parallel for
for (c = 0; c < channels_col; ++c) {
int h, w;
int w_offset = c % ksize;
int h_offset = (c / ksize) % ksize;
int c_im = c / ksize / ksize;
for (h = pad; h < height_col - pad; ++h) {
for (w = pad; w < width_col - pad - 8; w += 1) {
int im_row = h_offset + h - pad;
int im_col = w_offset + w - pad;
//int col_index = (c * height_col + h) * width_col + w;
int col_index = c * new_ldb + h * width_col + w;
float val = data_im[im_col + width*(im_row + height*c_im)];
if (val > 0) set_bit((unsigned char*)data_col, col_index);
}
for (; w < width_col - pad; ++w) {
int im_row = h_offset + h - pad;
int im_col = w_offset + w - pad;
//int col_index = (c * height_col + h) * width_col + w;
int col_index = c * new_ldb + h * width_col + w;
//data_col[col_index] = data_im[im_col + width*(im_row + height*c_im)];
float val = data_im[im_col + width*(im_row + height*c_im)];
if (val > 0) set_bit((unsigned char*)data_col, col_index);
}
}
{
w = 0;
for (h = 0; h < height_col; ++h) {
int im_row = h_offset + h;
int im_col = w_offset + w;
//int col_index = (c * height_col + h) * width_col + w;
int col_index = c * new_ldb + h * width_col + w;
//data_col[col_index] = im2col_get_pixel(data_im, height, width, channels, im_row, im_col, c_im, pad);
float val = im2col_get_pixel(data_im, height, width, channels, im_row, im_col, c_im, pad);
if (val > 0) set_bit((unsigned char*)data_col, col_index);
}
}
{
w = width_col - 1;
for (h = 0; h < height_col; ++h) {
int im_row = h_offset + h;
int im_col = w_offset + w;
//int col_index = (c * height_col + h) * width_col + w;
int col_index = c * new_ldb + h * width_col + w;
//data_col[col_index] = im2col_get_pixel(data_im, height, width, channels, im_row, im_col, c_im, pad);
float val = im2col_get_pixel(data_im, height, width, channels, im_row, im_col, c_im, pad);
if (val > 0) set_bit((unsigned char*)data_col, col_index);
}
}
{
h = 0;
for (w = 0; w < width_col; ++w) {
int im_row = h_offset + h;
int im_col = w_offset + w;
//int col_index = (c * height_col + h) * width_col + w;
int col_index = c * new_ldb + h * width_col + w;
//data_col[col_index] = im2col_get_pixel(data_im, height, width, channels, im_row, im_col, c_im, pad);
float val = im2col_get_pixel(data_im, height, width, channels, im_row, im_col, c_im, pad);
if (val > 0) set_bit((unsigned char*)data_col, col_index);
}
}
{
h = height_col - 1;
for (w = 0; w < width_col; ++w) {
int im_row = h_offset + h;
int im_col = w_offset + w;
//int col_index = (c * height_col + h) * width_col + w;
int col_index = c * new_ldb + h * width_col + w;
//data_col[col_index] = im2col_get_pixel(data_im, height, width, channels, im_row, im_col, c_im, pad);
float val = im2col_get_pixel(data_im, height, width, channels, im_row, im_col, c_im, pad);
if (val > 0) set_bit((unsigned char*)data_col, col_index);
}
}
}
}
else {
printf("\n Error: is no non-optimized version \n");
//im2col_cpu(data_im, channels, height, width, ksize, stride, pad, data_col); // must be aligned for transpose after float_to_bin
// float_to_bit(b, t_input, src_size);
// transpose_bin(t_input, *t_bit_input, k, n, bit_align, new_ldb, 8);
}
}
void activate_array_cpu_custom(float *x, const int n, const ACTIVATION a)
{
int i;
if (a == LINEAR)
{
}
else if (a == LEAKY)
{
for (i = 0; i < n; ++i) {
x[i] = (x[i]>0) ? x[i] : .1*x[i];
}
}
else {
for (i = 0; i < n; ++i) {
x[i] = activate(x[i], a);
}
}
}
void float_to_bit(float *src, unsigned char *dst, size_t size)
{
size_t dst_size = size / 8 + 1;
memset(dst, 0, dst_size);
size_t i;
char* byte_arr = (char*)calloc(size, sizeof(char));
for (i = 0; i < size; ++i) {
if (src[i] > 0) byte_arr[i] = 1;
}
//for (i = 0; i < size; ++i) {
// dst[i / 8] |= byte_arr[i] << (i % 8);
//}
for (i = 0; i < size; i += 8) {
char dst_tmp = 0;
dst_tmp |= byte_arr[i + 0] << 0;
dst_tmp |= byte_arr[i + 1] << 1;
dst_tmp |= byte_arr[i + 2] << 2;
dst_tmp |= byte_arr[i + 3] << 3;
dst_tmp |= byte_arr[i + 4] << 4;
dst_tmp |= byte_arr[i + 5] << 5;
dst_tmp |= byte_arr[i + 6] << 6;
dst_tmp |= byte_arr[i + 7] << 7;
dst[i / 8] = dst_tmp;
}
free(byte_arr);
}
static inline void transpose_scalar_block(float *A, float *B, const int lda, const int ldb, const int block_size)
{
int i;
//#pragma omp parallel for
for (i = 0; i<block_size; i++) {
int j;
for (j = 0; j<block_size; j++) {
B[j*ldb + i] = A[i*lda + j];
}
}
}
void transpose_block_SSE4x4(float *A, float *B, const int n, const int m,
const int lda, const int ldb, const int block_size)
{
int i;
#pragma omp parallel for
for (i = 0; i < n; i += block_size) {
int j, i2, j2;
for (j = 0; j < m; j += block_size) {
int max_i2 = i + block_size < n ? i + block_size : n;
int max_j2 = j + block_size < m ? j + block_size : m;
for (i2 = i; i2 < max_i2; ++i2) {
for (j2 = j; j2 < max_j2; ++j2) {
B[j2*ldb + i2] = A[i2*lda + j2];
}
}
}
}
}
void forward_maxpool_layer_avx(float *src, float *dst, int *indexes, int size, int w, int h, int out_w, int out_h, int c,
int pad, int stride, int batch)
{
int b, k;
const int w_offset = -pad / 2;
const int h_offset = -pad / 2;
for (b = 0; b < batch; ++b) {
#pragma omp parallel for
for (k = 0; k < c; ++k) {
int i, j, m, n;
for (i = 0; i < out_h; ++i) {
for (j = 0; j < out_w; ++j) {
int out_index = j + out_w*(i + out_h*(k + c*b));
float max = -FLT_MAX;
int max_i = -1;
for (n = 0; n < size; ++n) {
for (m = 0; m < size; ++m) {
int cur_h = h_offset + i*stride + n;
int cur_w = w_offset + j*stride + m;
int index = cur_w + w*(cur_h + h*(k + b*c));
int valid = (cur_h >= 0 && cur_h < h &&
cur_w >= 0 && cur_w < w);
float val = (valid != 0) ? src[index] : -FLT_MAX;
max_i = (val > max) ? index : max_i;
max = (val > max) ? val : max;
}
}
dst[out_index] = max;
indexes[out_index] = max_i;
}
}
}
}
}
#endif // AVX
// 32 channels -> 1 channel (with 32 floats)
// 256 channels -> 8 channels (with 32 floats)
void repack_input(float *input, float *re_packed_input, int w, int h, int c)
{
const int items_per_channel = w * h;
int chan, i;
for (chan = 0; chan < c; chan += 32)
{
for (i = 0; i < items_per_channel; ++i)
{
int c_pack;
for (c_pack = 0; c_pack < 32; ++c_pack) {
float src = input[(chan + c_pack)*items_per_channel + i];
re_packed_input[chan*items_per_channel + i * 32 + c_pack] = src;
}
}
}
}
void transpose_uint32(uint32_t *src, uint32_t *dst, int src_h, int src_w, int src_align, int dst_align)
{
//l.bit_align - algined (n) by 32
//new_ldb - aligned (k) by 256
int i;
//#pragma omp parallel for
for (i = 0; i < src_h; i += 1) // l.size*l.size*l.c;
{
int j;
for (j = 0; j < src_w; j += 1) // out_h*out_w;
{
((uint32_t *)dst)[j*dst_align / 32 + i] = ((uint32_t *)src)[i*src_align + j];
}
}
}
void gemm_nn_bin_transposed_32bit_packed(int M, int N, int K, float ALPHA,
uint32_t *A, int lda,
uint32_t *B, int ldb,
float *C, int ldc, float *mean_arr)
{
int i;
#pragma omp parallel for
for (i = 0; i < M; ++i) { // l.n
int j, s;
float mean_val = mean_arr[i];
for (j = 0; j < N; ++j) // out_h*out_w;
{
float val = 0;
for (s = 0; s < K; ++s) // l.size*l.size*l.c/32 or (l.size*l.size*l.c)
{
PUT_IN_REGISTER uint32_t A_PART = ((uint32_t*)A)[i*lda + s];
PUT_IN_REGISTER uint32_t B_PART = ((uint32_t*)B)[j * ldb + s];
uint32_t xnor_result = ~(A_PART ^ B_PART);
int32_t count = popcnt_32(xnor_result); // must be Signed int
val += (2 * count - 32) * mean_val;
}
C[i*ldc + j] += val;
}
}
}
void convolution_repacked(uint32_t *packed_input, uint32_t *packed_weights, float *output,
int w, int h, int c, int n, int size, int pad, int new_lda, float *mean_arr)
{
int fil;
// filter index
#pragma omp parallel for
for (fil = 0; fil < n; ++fil) {
float mean_val = mean_arr[fil];
int chan, y, x, f_y, f_x; // c_pack
// channel index
for (chan = 0; chan < c / 32; ++chan)
//for (chan = 0; chan < l.c; chan += 32)
//for (c_pack = 0; c_pack < 32; ++c_pack)
// input - y
for (y = 0; y < h; ++y)
// input - x
for (x = 0; x < w; ++x)
{
int const output_index = fil*w*h + y*w + x;
float sum = 0;
// filter - y
for (f_y = 0; f_y < size; ++f_y)
{
int input_y = y + f_y - pad;
// filter - x
for (f_x = 0; f_x < size; ++f_x)
{
int input_x = x + f_x - pad;
if (input_y < 0 || input_x < 0 || input_y >= h || input_x >= w) continue;
// normal
//float input = state.input[(chan + c_pack)*l.w*l.h + input_y*l.w + input_x];
//float weight = l.weights[fil*l.c*l.size*l.size + (chan + c_pack)*l.size*l.size + f_y*l.size + f_x];
// packed
//float input = re_packed_input[chan*l.w*l.h + (input_y*l.w + input_x) * 32 + c_pack];
//float weight = l.weights[fil*l.c*l.size*l.size + chan*l.size*l.size + (f_y*l.size + f_x) * 32 + c_pack];
//sum += input * weight;
//float input = re_packed_input[chan*l.w*l.h + (input_y*l.w + input_x) * 32 + c_pack];
//float weight = l.weights[fil*l.c*l.size*l.size + chan*l.size*l.size + (f_y*l.size + f_x) * 32 + c_pack];
//uint32_t bit1 = input > 0;
//uint32_t bit2 = weight > 0;
//uint32_t count = (~(bit1 ^ bit2)) & 1;
//float result = (2 * (float)count - 1) * mean_val;
//printf("\n mul = %f, bit1 = %d, bit2 = %d, count = %d, mean = %f, result = %f ", input*weight, bit1, bit2, count, mean_val, result);
//sum += result;
uint32_t input = ((uint32_t *)packed_input)[chan*w*h + input_y*w + input_x];
//uint32_t weight = ((uint32_t *)l.align_bit_weights)[fil*l.c*l.size*l.size/32 + chan*l.size*l.size + f_y*l.size + f_x];
uint32_t weight = ((uint32_t *)packed_weights)[fil*new_lda / 32 + chan*size*size + f_y*size + f_x];
uint32_t xnor_result = ~(input ^ weight);
int32_t count = popcnt_32(xnor_result); // mandatory Signed int
sum += (2 * count - 32) * mean_val;
}
}
// l.output[filters][width][height] +=
// state.input[channels][width][height] *
// l.weights[filters][channels][filter_width][filter_height];
output[output_index] += sum;
}
}
}
void gemm_nt(int M, int N, int K, float ALPHA,
float *A, int lda,
float *B, int ldb,
float *C, int ldc)
{
int i,j,k;
for(i = 0; i < M; ++i){
for(j = 0; j < N; ++j){
PUT_IN_REGISTER float sum = 0;
for(k = 0; k < K; ++k){
sum += ALPHA*A[i*lda+k]*B[j*ldb + k];
}
C[i*ldc+j] += sum;
}
}
}
void gemm_tn(int M, int N, int K, float ALPHA,
float *A, int lda,
float *B, int ldb,
float *C, int ldc)
{
int i,j,k;
for(i = 0; i < M; ++i){
for(k = 0; k < K; ++k){
PUT_IN_REGISTER float A_PART = ALPHA * A[k * lda + i];
for(j = 0; j < N; ++j){
C[i*ldc+j] += A_PART*B[k*ldb+j];
}
}
}
}
void gemm_tt(int M, int N, int K, float ALPHA,
float *A, int lda,
float *B, int ldb,
float *C, int ldc)
{
int i,j,k;
for(i = 0; i < M; ++i){
for(j = 0; j < N; ++j){
PUT_IN_REGISTER float sum = 0;
for(k = 0; k < K; ++k){
sum += ALPHA*A[i+k*lda]*B[k+j*ldb];
}
C[i*ldc+j] += sum;
}
}
}
void gemm_cpu(int TA, int TB, int M, int N, int K, float ALPHA,
float *A, int lda,
float *B, int ldb,
float BETA,
float *C, int ldc)
{
//printf("cpu: %d %d %d %d %d %f %d %d %f %d\n",TA, TB, M, N, K, ALPHA, lda, ldb, BETA, ldc);
if (BETA != 1){
int i, j;
for(i = 0; i < M; ++i){
for(j = 0; j < N; ++j){
C[i*ldc + j] *= BETA;
}
}
}
is_avx(); // initialize static variable
if (is_fma_avx2() && !TA && !TB) {
gemm_nn_fast(M, N, K, ALPHA, A, lda, B, ldb, C, ldc);
}
else {
int t;
#pragma omp parallel for
for (t = 0; t < M; ++t) {
if (!TA && !TB)
gemm_nn(1, N, K, ALPHA, A + t*lda, lda, B, ldb, C + t*ldc, ldc);
else if (TA && !TB)
gemm_tn(1, N, K, ALPHA, A + t, lda, B, ldb, C + t*ldc, ldc);
else if (!TA && TB)
gemm_nt(1, N, K, ALPHA, A + t*lda, lda, B, ldb, C + t*ldc, ldc);
else
gemm_tt(1, N, K, ALPHA, A + t, lda, B, ldb, C + t*ldc, ldc);
}
}
}
#ifdef GPU
#include <math.h>
void gemm_ongpu(int TA, int TB, int M, int N, int K, float ALPHA,
float *A_gpu, int lda,
float *B_gpu, int ldb,
float BETA,
float *C_gpu, int ldc)
{
cublasHandle_t handle = blas_handle();
cudaError_t stream_status = (cudaError_t)cublasSetStream(handle, get_cuda_stream());
CHECK_CUDA(stream_status);
cudaError_t status = (cudaError_t)cublasSgemm(handle, (TB ? CUBLAS_OP_T : CUBLAS_OP_N),
(TA ? CUBLAS_OP_T : CUBLAS_OP_N), N, M, K, &ALPHA, B_gpu, ldb, A_gpu, lda, &BETA, C_gpu, ldc);
CHECK_CUDA(status);
}
void gemm_gpu(int TA, int TB, int M, int N, int K, float ALPHA,
float *A, int lda,
float *B, int ldb,
float BETA,
float *C, int ldc)
{
float *A_gpu = cuda_make_array(A, (TA ? lda*K:lda*M));
float *B_gpu = cuda_make_array(B, (TB ? ldb*N : ldb*K));
float *C_gpu = cuda_make_array(C, ldc*M);
gemm_ongpu(TA, TB, M, N, K, ALPHA, A_gpu, lda, B_gpu, ldb, BETA, C_gpu, ldc);
cuda_pull_array(C_gpu, C, ldc*M);
cuda_free(A_gpu);
cuda_free(B_gpu);
cuda_free(C_gpu);
}
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <time.h>
void time_gpu_random_matrix(int TA, int TB, int m, int k, int n)
{
float *a;
if(!TA) a = random_matrix(m,k);
else a = random_matrix(k,m);
int lda = (!TA)?k:m;
float *b;
if(!TB) b = random_matrix(k,n);
else b = random_matrix(n,k);
int ldb = (!TB)?n:k;
float *c = random_matrix(m,n);
int i;
clock_t start = clock(), end;
for(i = 0; i<32; ++i){
gemm_gpu(TA,TB,m,n,k,1,a,lda,b,ldb,1,c,n);
}
end = clock();
printf("Matrix Multiplication %dx%d * %dx%d, TA=%d, TB=%d: %lf s\n",m,k,k,n, TA, TB, (float)(end-start)/CLOCKS_PER_SEC);
free(a);
free(b);
free(c);
}
void time_ongpu(int TA, int TB, int m, int k, int n)
{
int iter = 10;
float *a = random_matrix(m,k);
float *b = random_matrix(k,n);
int lda = (!TA)?k:m;
int ldb = (!TB)?n:k;
float *c = random_matrix(m,n);
float *a_cl = cuda_make_array(a, m*k);
float *b_cl = cuda_make_array(b, k*n);
float *c_cl = cuda_make_array(c, m*n);
int i;
clock_t start = clock(), end;
for(i = 0; i<iter; ++i){
gemm_ongpu(TA,TB,m,n,k,1,a_cl,lda,b_cl,ldb,1,c_cl,n);
cudaDeviceSynchronize();
}
double flop = ((double)m)*n*(2.*k + 2.)*iter;
double gflop = flop/pow(10., 9);
end = clock();
double seconds = sec(end-start);
printf("Matrix Multiplication %dx%d * %dx%d, TA=%d, TB=%d: %lf s, %lf GFLOPS\n",m,k,k,n, TA, TB, seconds, gflop/seconds);
cuda_free(a_cl);
cuda_free(b_cl);
cuda_free(c_cl);
free(a);
free(b);
free(c);
}
void test_gpu_accuracy(int TA, int TB, int m, int k, int n)
{
srand(0);
float *a;
if(!TA) a = random_matrix(m,k);
else a = random_matrix(k,m);
int lda = (!TA)?k:m;
float *b;
if(!TB) b = random_matrix(k,n);
else b = random_matrix(n,k);
int ldb = (!TB)?n:k;
float *c = random_matrix(m,n);
float *c_gpu = random_matrix(m,n);
memset(c, 0, m*n*sizeof(float));
memset(c_gpu, 0, m*n*sizeof(float));
int i;
//pm(m,k,b);
gemm_gpu(TA,TB,m,n,k,1,a,lda,b,ldb,1,c_gpu,n);
//printf("GPU\n");
//pm(m, n, c_gpu);
gemm_cpu(TA,TB,m,n,k,1,a,lda,b,ldb,1,c,n);
//printf("\n\nCPU\n");
//pm(m, n, c);
double sse = 0;
for(i = 0; i < m*n; ++i) {
//printf("%f %f\n", c[i], c_gpu[i]);
sse += pow(c[i]-c_gpu[i], 2);
}
printf("Matrix Multiplication %dx%d * %dx%d, TA=%d, TB=%d: %g SSE\n",m,k,k,n, TA, TB, sse/(m*n));
free(a);
free(b);
free(c);
free(c_gpu);
}
int test_gpu_blas()
{
/*
test_gpu_accuracy(0,0,10,576,75);
test_gpu_accuracy(0,0,17,10,10);
test_gpu_accuracy(1,0,17,10,10);
test_gpu_accuracy(0,1,17,10,10);
test_gpu_accuracy(1,1,17,10,10);
test_gpu_accuracy(0,0,1000,10,100);
test_gpu_accuracy(1,0,1000,10,100);
test_gpu_accuracy(0,1,1000,10,100);
test_gpu_accuracy(1,1,1000,10,100);
test_gpu_accuracy(0,0,10,10,10);
time_ongpu(0,0,64,2916,363);
time_ongpu(0,0,64,2916,363);
time_ongpu(0,0,64,2916,363);
time_ongpu(0,0,192,729,1600);
time_ongpu(0,0,384,196,1728);
time_ongpu(0,0,256,196,3456);
time_ongpu(0,0,256,196,2304);
time_ongpu(0,0,128,4096,12544);
time_ongpu(0,0,128,4096,4096);
*/
time_ongpu(0,0,64,75,12544);
time_ongpu(0,0,64,75,12544);
time_ongpu(0,0,64,75,12544);
time_ongpu(0,0,64,576,12544);
time_ongpu(0,0,256,2304,784);
time_ongpu(1,1,2304,256,784);
time_ongpu(0,0,512,4608,196);
time_ongpu(1,1,4608,512,196);
return 0;
}
#endif
|
matrix.c | //
// Created by vmachado on 2/11/20.
//
#include "matrix.h"
matrix* matrix_alloc(int rows, int columns) {
int len = rows * columns;
matrix *out = aalloc(sizeof(*out));
out->data = aalloc(sizeof(float) * len);
out->rows = rows;
out->columns = columns;
return out;
}
static inline matrix *internal_alloc(int rows, int columns) {
matrix *out = aalloc(sizeof(*out));
out->data = aalloc(sizeof(float) * rows * columns);
out->rows = rows;
out->columns = columns;
return out;
}
void matrix_free(matrix *src) {
if (src != NULL) {
free(src->data);
free(src);
}
}
matrix* transposed(matrix *src) {
matrix *out = internal_alloc(src->columns, src->rows);
#pragma omp parallel for
for (int i = 0; i < src->rows; i++) {
register float *src_row = src->data + i * src->columns;
for (int j = 0; j < src->columns; j++) {
out->data[j * src->rows + i] = src_row[j];
}
}
return out;
}
matrix* softmaxed(matrix *src) {
matrix *out = internal_alloc(src->rows, src->columns);
#pragma omp parallel for
for (int i = 0; i < src->rows; i++) {
int j;
float sum = 0, max = -FLT_MAX;
register float *src_row = src->data + i * src->columns;
register float *out_row = out->data + i * src->columns;
for (j = 0; j < src->columns; j++) {
if (src_row[j] > max) {
max = src_row[j];
}
}
//#pragma omp simd reduction(+: sum)
for (j = 0; j < src->columns; j++) {
out_row[j] = expf(src_row[j] - max);
sum += out_row[j];
}
float inv_sum = 1.0f / sum;
for (j = 0; j < src->columns; j++) {
out_row[j] *= inv_sum;
}
}
return out;
}
void softmax(matrix *src) {
#pragma omp parallel for
for (int i = 0; i < src->rows; i++) {
int j;
float sum = 0, max = -FLT_MAX;
register float *src_row = src->data + i * src->columns;
for (j = 0; j < src->columns; j++) {
if (src_row[j] > max) {
max = src_row[j];
}
}
//#pragma omp simd reduction(+: sum)
for (j = 0; j < src->columns; j++) {
src_row[j] = expf(src_row[j] - max);
sum += src_row[j];
}
const float inv_sum = 1.0f / sum;
for (j = 0; j < src->columns; j++) {
src_row[j] *= inv_sum;
}
}
}
matrix *multiply(matrix *src, matrix *in, bool tra, bool trb, int m, int n, int k) {
matrix *out = internal_alloc(m, n);
gemm(tra, trb, m, n, k, src->data, in->data, 1.0f, out->data);
return out;
}
matrix* sum(matrix *src, matrix *in, float scalar) {
assert((src->rows == in->rows) && (src->columns == in->columns));
matrix *out = internal_alloc(src->rows, src->columns);
int len = src->rows * src->columns;
#pragma omp parallel for simd
for (int i = 0; i < len; i++) {
out->data[i] = src->data[i] + (in->data[i] * scalar);
}
return out;
}
void apply_sum(matrix *src, matrix *in, float scalar) {
assert((src->rows == in->rows) && (src->columns == in->columns));
int len = src->rows * src->columns;
#pragma omp parallel for simd
for (int i = 0; i < len; i++) {
src->data[i] += (in->data[i] * scalar);
}
}
matrix* elemwise_mul(matrix *src, matrix* in) {
assert((src->rows == in->rows) && (src->columns == in->columns));
int len = src->rows * src->columns;
matrix *out = internal_alloc(src->rows, src->columns);
#pragma omp parallel for simd
for (int i = 0; i < len; i++) {
out->data[i] = src->data[i] * in->data[i];
}
return out;
}
matrix* mean(matrix *src, int spatial, int channels) {
matrix *out = matrix_alloc(1, channels);
const float n_inv = 1.0f / (float)(src->rows * spatial);
#pragma omp parallel for
for (int c = 0; c < channels; c++) {
register float sum = 0.0f;
for (int b = 0; b < src->rows; b++) {
register float *src_ptr = src->data + spatial * (b * channels + c);
for (int i = 0; i < spatial; i++) {
sum += src_ptr[i];
}
}
out->data[c] = sum * n_inv;
}
return out;
}
matrix* variance(matrix *src, matrix *mean, int spatial, int channels) {
matrix *out = matrix_alloc(1, channels);
const float n_inv = 1.0f / (float)(src->rows * spatial);
#pragma omp parallel for
for (int c = 0; c < channels; c++) {
register float sum = 0.0f;
register float curr_mean = mean->data[c];
for (int b = 0; b < src->rows; b++) {
register float *src_ptr = src->data + spatial * (b * channels + c);
for (int i = 0; i < spatial; i++) {
float diff = src_ptr[i] - curr_mean;
sum += diff * diff;
}
}
out->data[c] = sum * n_inv;
}
return out;
}
void normalize(matrix *src, matrix *mean, matrix *variance, int spatial, int channels) {
const float eps = 1e-5f;
#pragma omp parallel for
for (int b = 0; b < src->rows; b++) {
for (int c = 0; c < channels; c++) {
register float *src_ptr = src->data + spatial * (b * channels + c);
for (int i = 0; i < spatial; i++) {
src_ptr[i] = (src_ptr[i] - mean->data[c]) / sqrtf(variance->data[c] + eps);
}
}
}
}
matrix* normalized(matrix *src, matrix *mean, matrix *variance, int spatial, int channels) {
matrix *out = matrix_alloc(src->rows, src->columns);
const float eps = 1e-5f;
#pragma omp parallel for
for (int b = 0; b < src->rows; b++) {
for (int c = 0; c < channels; c++) {
register float *src_ptr = src->data + spatial * (b * channels + c);
register float *out_ptr = out->data + spatial * (b * channels + c);
for (int i = 0; i < spatial; i++) {
out_ptr[i] = (src_ptr[i] - mean->data[c]) / sqrtf(variance->data[c] + eps);
}
}
}
return out;
}
matrix* scale_shifted(matrix *src, matrix *gamma, matrix *beta, int channels, int spatial) {
matrix *out = matrix_alloc(src->rows, src->columns);
#pragma omp parallel for
for (int b = 0; b < src->rows; b++) {
for (int c = 0; c < channels; c++) {
register float *src_ptr = src->data + spatial * (b * channels + c);
register float *out_ptr = out->data + spatial * (b * channels + c);
for (int i = 0; i < spatial; i++) {
out_ptr[i] = (src_ptr[i] * gamma->data[c]) + beta->data[c];
}
}
}
return out;
}
matrix* sum_rows(matrix *src) { //profile
matrix *out = matrix_alloc(1, src->columns);
for (int i = 0; i < src->rows; i++) {
register float* src_row = src->data + i * src->columns;
for (int j = 0; j < src->columns; j++) {
out->data[j] += src_row[j];
}
}
return out;
}
matrix* sum_columns(matrix *src) { //profile
matrix *out = matrix_alloc(1, src->rows);
#pragma omp parallel for
for (int i = 0; i < src->rows; i++) {
register float* src_row = src->data + i * src->columns;
register float sum = 0;
#pragma omp simd reduction(+: sum)
for (int j = 0; j < src->columns; j++) {
sum += src_row[j];
}
out->data[i] = sum;
}
return out;
}
void randomize(matrix *src, float mean, float stddev) {
srand((unsigned)time(0));
int len = src->rows * src->columns;
int range = len + len % 2;
float u, v;
for (int i = 0; i < range; i += 2) {
get_gauss(mean, stddev, &u, &v);
src->data[i] = u;
src->data[i + 1] = v;
}
}
float sum_elem(matrix *src) {
float sum = 0;
int len = src->rows * src->columns;
#pragma omp parallel for reduction(+: sum)
for (int i = 0; i < len; i++) {
sum += src->data[i];
}
return sum;
}
matrix* mat_copy(matrix *src) {
matrix *out = internal_alloc(src->rows, src->columns);
int len = src->rows * src->columns;
#pragma omp parallel for
for (int i = 0; i < len; i++) {
out->data[i] = src->data[i];
}
return out;
}
|
GB_binop__eq_int64.c |
//------------------------------------------------------------------------------
// GB_binop: hard-coded functions for each built-in binary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2022, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// If this file is in the Generated2/ folder, do not edit it
// (it is auto-generated from Generator/*).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_emult.h"
#include "GB_control.h"
#include "GB_ek_slice.h"
#include "GB_dense.h"
#include "GB_atomics.h"
#include "GB_bitmap_assign_methods.h"
#include "GB_binop__include.h"
// C=binop(A,B) is defined by the following types and operators:
// A+B function (eWiseAdd): GB (_AaddB__eq_int64)
// A.*B function (eWiseMult): GB (_AemultB_08__eq_int64)
// A.*B function (eWiseMult): GB (_AemultB_02__eq_int64)
// A.*B function (eWiseMult): GB (_AemultB_04__eq_int64)
// A.*B function (eWiseMult): GB (_AemultB_bitmap__eq_int64)
// A*D function (colscale): GB (_AxD__eq_int64)
// D*A function (rowscale): GB (_DxB__eq_int64)
// C+=B function (dense accum): GB (_Cdense_accumB__eq_int64)
// C+=b function (dense accum): GB (_Cdense_accumb__eq_int64)
// C+=A+B function (dense ewise3): GB ((none))
// C=A+B function (dense ewise3): GB (_Cdense_ewise3_noaccum__eq_int64)
// C=scalar+B GB (_bind1st__eq_int64)
// C=scalar+B' GB (_bind1st_tran__eq_int64)
// C=A+scalar GB (_bind2nd__eq_int64)
// C=A'+scalar GB (_bind2nd_tran__eq_int64)
// C type: bool
// A type: int64_t
// A pattern? 0
// B type: int64_t
// B pattern? 0
// BinaryOp: cij = (aij == bij)
#define GB_ATYPE \
int64_t
#define GB_BTYPE \
int64_t
#define GB_CTYPE \
bool
// true if the types of A and B are identical
#define GB_ATYPE_IS_BTYPE \
1
// true if the types of C and A are identical
#define GB_CTYPE_IS_ATYPE \
0
// true if the types of C and B are identical
#define GB_CTYPE_IS_BTYPE \
0
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA,A_iso) \
int64_t aij = GBX (Ax, pA, A_iso)
// true if values of A are not used
#define GB_A_IS_PATTERN \
0 \
// bij = Bx [pB]
#define GB_GETB(bij,Bx,pB,B_iso) \
int64_t bij = GBX (Bx, pB, B_iso)
// true if values of B are not used
#define GB_B_IS_PATTERN \
0 \
// declare scalar of the same type as C
#define GB_CTYPE_SCALAR(t) \
bool t
// cij = Ax [pA]
#define GB_COPY_A_TO_C(cij,Ax,pA,A_iso) \
cij = GBX (Ax, pA, A_iso)
// cij = Bx [pB]
#define GB_COPY_B_TO_C(cij,Bx,pB,B_iso) \
cij = GBX (Bx, pB, B_iso)
#define GB_CX(p) Cx [p]
// binary operator
#define GB_BINOP(z,x,y,i,j) \
z = (x == y) ;
// true if the binop must be flipped
#define GB_BINOP_FLIP \
0
// op is second
#define GB_OP_IS_SECOND \
0
// do the numerical phases of GB_add and GB_emult
#define GB_PHASE_2_OF_2
// hard-coded loops can be vectorized
#define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_EQ || GxB_NO_INT64 || GxB_NO_EQ_INT64)
//------------------------------------------------------------------------------
// C += A+B, all 3 matrices dense
//------------------------------------------------------------------------------
#if 0
// The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV.
void GB ((none))
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#include "GB_dense_ewise3_accum_template.c"
}
#endif
//------------------------------------------------------------------------------
// C = A+B, all 3 matrices dense
//------------------------------------------------------------------------------
void GB (_Cdense_ewise3_noaccum__eq_int64)
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#include "GB_dense_ewise3_noaccum_template.c"
}
//------------------------------------------------------------------------------
// C += B, accumulate a sparse matrix into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumB__eq_int64)
(
GrB_Matrix C,
const GrB_Matrix B,
const int64_t *B_ek_slicing, const int B_ntasks, const int B_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#if 0
{
#include "GB_dense_subassign_23_template.c"
}
#endif
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += b, accumulate a scalar into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumb__eq_int64)
(
GrB_Matrix C,
const GB_void *p_bwork,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#if 0
{
// get the scalar b for C += b, of type int64_t
int64_t bwork = (*((int64_t *) p_bwork)) ;
#include "GB_dense_subassign_22_template.c"
return (GrB_SUCCESS) ;
}
#endif
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = A*D, column scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB (_AxD__eq_int64)
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix D,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
bool *restrict Cx = (bool *) C->x ;
#include "GB_AxB_colscale_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = D*B, row scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB (_DxB__eq_int64)
(
GrB_Matrix C,
const GrB_Matrix D,
const GrB_Matrix B,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
bool *restrict Cx = (bool *) C->x ;
#include "GB_AxB_rowscale_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseAdd: C=A+B, C<M>=A+B, C<!M>=A+B
//------------------------------------------------------------------------------
GrB_Info GB (_AaddB__eq_int64)
(
GrB_Matrix C,
const int C_sparsity,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool is_eWiseUnion,
const GB_void *alpha_scalar_in,
const GB_void *beta_scalar_in,
const bool Ch_is_Mh,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
GB_WERK_DECLARE (M_ek_slicing, int64_t) ;
GB_WERK_DECLARE (A_ek_slicing, int64_t) ;
GB_WERK_DECLARE (B_ek_slicing, int64_t) ;
int64_t alpha_scalar ;
int64_t beta_scalar ;
if (is_eWiseUnion)
{
alpha_scalar = (*((int64_t *) alpha_scalar_in)) ;
beta_scalar = (*((int64_t *) beta_scalar_in )) ;
}
#include "GB_add_template.c"
GB_FREE_WORKSPACE ;
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C=A.*B, C<M>=A.*B, or C<M!>=A.*B where C is sparse/hyper
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_08__eq_int64)
(
GrB_Matrix C,
const int C_sparsity,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_08_meta.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C<#> = A.*B when A is sparse/hyper and B is bitmap/full
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_02__eq_int64)
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool flipxy,
const int64_t *restrict Cp_kfirst,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#if GB_BINOP_FLIP
// The operator is not commutative, and does not have a flipped
// variant. For example z=atan2(y,x).
if (flipxy)
{
// use fmult(y,x)
#undef GB_FLIPPED
#define GB_FLIPPED 1
#include "GB_emult_02_template.c"
}
else
{
// use fmult(x,y)
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
}
#else
// No need to handle the flip: the operator is either commutative, or
// has been handled by changing z=div(y,x) to z=rdiv(x,y) for example.
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
#endif
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C<M> = A.*B, M sparse/hyper, A and B bitmap/full
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_04__eq_int64)
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict Cp_kfirst,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_04_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C=A.*B, C<M>=A.*B, C<!M>=A.*B where C is bitmap
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_bitmap__eq_int64)
(
GrB_Matrix C,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_bitmap_emult_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st
//------------------------------------------------------------------------------
GrB_Info GB (_bind1st__eq_int64)
(
GB_void *Cx_output, // Cx and Bx may be aliased
const GB_void *x_input,
const GB_void *Bx_input,
const int8_t *restrict Bb,
int64_t bnz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
bool *Cx = (bool *) Cx_output ;
int64_t x = (*((int64_t *) x_input)) ;
int64_t *Bx = (int64_t *) Bx_input ;
int64_t p ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < bnz ; p++)
{
if (!GBB (Bb, p)) continue ;
int64_t bij = GBX (Bx, p, false) ;
Cx [p] = (x == bij) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd
//------------------------------------------------------------------------------
GrB_Info GB (_bind2nd__eq_int64)
(
GB_void *Cx_output, // Cx and Ax may be aliased
const GB_void *Ax_input,
const GB_void *y_input,
const int8_t *restrict Ab,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
bool *Cx = (bool *) Cx_output ;
int64_t *Ax = (int64_t *) Ax_input ;
int64_t y = (*((int64_t *) y_input)) ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!GBB (Ab, p)) continue ;
int64_t aij = GBX (Ax, p, false) ;
Cx [p] = (aij == y) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (x, A'): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (x, aij), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
int64_t aij = GBX (Ax, pA, false) ; \
Cx [pC] = (x == aij) ; \
}
GrB_Info GB (_bind1st_tran__eq_int64)
(
GrB_Matrix C,
const GB_void *x_input,
const GrB_Matrix A,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
// GB_unop_transpose.c uses GB_ATYPE, but A is
// the 2nd input to binary operator z=f(x,y).
#undef GB_ATYPE
#define GB_ATYPE \
int64_t
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t x = (*((const int64_t *) x_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
#undef GB_ATYPE
#define GB_ATYPE \
int64_t
}
//------------------------------------------------------------------------------
// C = op (A', y): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (aij, y), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
int64_t aij = GBX (Ax, pA, false) ; \
Cx [pC] = (aij == y) ; \
}
GrB_Info GB (_bind2nd_tran__eq_int64)
(
GrB_Matrix C,
const GrB_Matrix A,
const GB_void *y_input,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t y = (*((const int64_t *) y_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
eval.c | /*****************************************************************************
*
* Elmer, A Finite Element Software for Multiphysical Problems
*
* Copyright 1st April 1995 - , CSC - IT Center for Science Ltd., Finland
*
* This library is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* This library is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with this library (in file ../LGPL-2.1); if not, write
* to the Free Software Foundation, Inc., 51 Franklin Street,
* Fifth Floor, Boston, MA 02110-1301 USA
*
*****************************************************************************/
/*******************************************************************************
*
* Evaluate expression threes in format parsed by parser.c.
*
*******************************************************************************
*
* Author: Juha Ruokolainen
*
* Address: CSC - IT Center for Science Ltd.
* Keilaranta 14, P.O. BOX 405
* 02101 Espoo, Finland
* Tel. +358 0 457 2723
* Telefax: +358 0 457 2302
* EMail: Juha.Ruokolainen@csc.fi
*
* Date: 30 May 1996
*
* Modified by:
*
* Date of modification:
*
******************************************************************************/
/***********************************************************************
|
| EVAL.C - Last Edited 6. 8. 1988
|
***********************************************************************/
#include "../config.h"
/*======================================================================
|Syntax of the manual pages:
|
|FUNCTION NAME(...) params ...
|
$ usage of the function and type of the parameters
? explane the effects of the function
= return value and the type of value if not of type int
@ globals effected directly by this routine
! current known bugs or limitations
& functions called by this function
~ these functions may interest you as an alternative function or
| because they control this function somehow
^=====================================================================*/
/*
* $Id: eval.c,v 1.5 2006/02/02 06:51:16 jpr Exp $
*
* $Log: eval.c,v $
* Revision 1.4 2005/08/25 13:44:22 vierinen
* windoze stuff
*
* Revision 1.3 2005/05/27 12:26:20 vierinen
* changed header install location
*
* Revision 1.2 2005/05/26 12:34:53 vierinen
* windows stuff
*
* Revision 1.1.1.1 2005/04/14 13:29:14 vierinen
* initial matc automake package
*
* Revision 1.2 1998/08/01 12:34:35 jpr
*
* Added Id, started Log.
*
*
*/
#include "elmer/matc.h"
VARIABLE *evaltree(root) TREE *root;
/*======================================================================
? Evaluate the equation tree (real hard). The tree might be a list of
| trees. If there is more than one tree in the list, the return value
| will be a single column vector, the result from each tree is
| concatenated to this vector. If there's only one tree, return
| value is tree's evaluated result.
|
& strlen(), fopen(), fclose(), var_temp_new(), var_temp_copy(),
| var_delete_temp(), com_check(), var_check(), fnc_check(),
| fnc_exec(), com_pointw(), com_source()
|
^=====================================================================*/
{
VARIABLE *first, /* used to hold start address for */
/* temporary list of VARIABLES, the */
/* evaluated trees results */
*subs, /* indexes for tree elements */
*par, /* parameters for tree elements */
*res, /* result is returned in this var. */
/* probably used as a temporary too */
*leftptr, /* result from left leaf */
*rightptr, /* result from right leaf */
*tmp, /* can't manage without these */
*tmp1;
FUNCTION *fnc; /* needed, if someone's using defined functions */
COMMAND *com; /* needed, maybe */
MATRIX *opres; /* don't know really */
TREE *argptr; /* temporary */
int i, /* there's always reason for these */
dim, /* the final dimension of the return value */
argcount; /* parameter count */
char *resbeg; /* for memcpying */
FILE *fp; /* used to check if a file exists */
if (root == NULL) return NULL;
dim = 0; first = NULL;
/*-------------------------------------------------------------------*/
/*
while there's trees in the list.
*/
while(root)
{
subs = par = tmp = NULL;
/*
check if the result will be indexed
*/
argptr = SUBS(root);
if (argptr)
{
subs = tmp = evaltree(argptr);
argptr = NEXT(argptr);
while(argptr)
{
NEXT(tmp) = evaltree(argptr);
argptr = NEXT(argptr); tmp = NEXT(tmp);
}
}
switch(ETYPE(root))
{
/******************************************************
some kind of existing identifier.
*******************************************************/
case ETYPE_NAME:
/*
is there parameters for this one ?
and how many ?
*/
argptr = ARGS(root);
argcount = 0;
if (argptr)
{
par = tmp = evaltree(argptr);
argptr = NEXT(argptr);
argcount++;
while(argptr)
{
argcount++;
NEXT(tmp) = evaltree(argptr);
argptr = NEXT(argptr); tmp = NEXT(tmp);
}
}
/*
one of the builtin commands (sin, ones, help, ...)
*/
if ((com = com_check(SDATA(root))) != (COMMAND *)NULL)
{
if (argcount < com->minp || argcount > com->maxp)
{
if (com->minp == com->maxp)
{
error( "Builtin function [%s] requires %d argument(s).\n", SDATA(root), com->minp);
}
else
{
error("Builtin function [%s] takes from %d to %d argument(s).\n",
SDATA(root), com->minp, com->maxp);
}
}
if (com->flags & CMDFLAG_PW)
{
tmp = com_pointw((double (*)())com->sub, par);
}
else
{
tmp = (*com->sub)(par);
}
}
/*
a variable name
*/
else if ((tmp1 = var_check(SDATA(root))) != (VARIABLE *)NULL)
{
tmp = (VARIABLE *)ALLOCMEM(VARIABLESIZE);
tmp ->this = tmp1->this;
REFCNT(tmp)++;
if (par != NULL)
{
subs = par; par = NULL;
}
}
/*
user defined function
*/
else if ((fnc = fnc_check(SDATA(root))) != (FUNCTION *)NULL)
{
tmp = fnc_exec(fnc, par); par = NULL;
}
/*
maybe a file name ?
*/
else if ((fp = fopen(SDATA(root),"r")) != (FILE *)NULL)
{
fclose(fp);
tmp = var_temp_new(TYPE_STRING, 1, strlen(SDATA(root)));
for(i = 0; i < strlen(SDATA(root)); i++)
{
M(tmp, 0, i) = SDATA(root)[i];
}
(*com_source)(tmp);
var_delete_temp(tmp);
tmp = NULL;
}
/*
troubles!
*/
else
{
error("Undeclared identifier: [%s].\n", SDATA(root));
}
break;
/******************************************************
single string constant
*******************************************************/
case ETYPE_STRING:
tmp = var_temp_new(TYPE_STRING, 1, strlen(SDATA(root)));
for(i = 0; i < strlen(SDATA(root)); i++)
{
M(tmp,0,i) = SDATA(root)[i];
}
break;
/******************************************************
single numeric constant
*******************************************************/
case ETYPE_NUMBER:
tmp = var_temp_new(TYPE_DOUBLE, 1, 1);
M(tmp,0,0) = DDATA(root);
break;
/******************************************************
constant matrix
*******************************************************/
case ETYPE_CONST:
tmp = (VARIABLE *)ALLOCMEM(VARIABLESIZE);
tmp->this = CDATA(root)->this;
REFCNT(tmp)++;
break;
/******************************************************
huh ?
*******************************************************/
case ETYPE_EQUAT:
tmp = evaltree(LEFT(root));
break;
/******************************************************
left oper [right]
oper = divide, multiply, transpose, power,...
*******************************************************/
case ETYPE_OPER:
/*
* evaluate both leafs.
*/
leftptr = rightptr = NULL; opres = NULL;
leftptr = evaltree(LEFT(root));
rightptr = evaltree(RIGHT(root));
if (leftptr && rightptr)
opres = (*VDATA(root))(leftptr->this, rightptr->this);
else if (leftptr)
opres = (*VDATA(root))(leftptr->this, NULL);
else if (rightptr)
opres = (*VDATA(root))(rightptr->this, NULL);
var_delete_temp(leftptr);
var_delete_temp(rightptr);
if (opres != NULL)
{
tmp = (VARIABLE *)ALLOCMEM(VARIABLESIZE);
tmp->this = opres;
REFCNT(tmp) = 1;
}
break;
}
/*
if NULL result, don't really know what to do, so we try quitting.
*/
/*
if (tmp == (VARIABLE *)NULL)
{
if (subs) var_delete_temp(subs);
if (par) var_delete_temp(par);
if (first) var_delete_temp(first);
return (VARIABLE *)NULL;
}
*/
/******************************************************
deleteting temporaries, preparing for next loop
*******************************************************/
if (subs != NULL)
{
if (tmp != NULL)
{
tmp1 = tmp;
NEXT(tmp1) = subs;
tmp = com_el(tmp1);
var_delete_temp(tmp1);
}
else
{
var_delete_temp(subs);
}
tmp1 = subs = NULL;
}
if (first == NULL)
{
first = res = tmp;
}
else if (tmp != NULL)
{
NEXT(res) = tmp; res = NEXT(res);
}
if (subs) var_delete_temp(subs);
if (par) var_delete_temp(par);
if (tmp != NULL) dim += NROW(tmp) * NCOL(tmp);/* count size of the result */
root = LINK(root);
}
/*-------------------------------------------------------------------*/
/*
there was only one tree, so return it's result.
*/
if (tmp == first) return first;
/*
it was a list of trees, concatenate the results
*/
res = var_temp_new(TYPE(first), 1, dim);
resbeg = (char *)MATR(res);
for(tmp = first; tmp; tmp = NEXT(tmp))
{
memcpy(resbeg, (char *)MATR(tmp), MATSIZE(tmp));
resbeg += MATSIZE(tmp);
}
/*
and delete rest of the temporaries.
*/
var_delete_temp(first);
return res;
}
VARIABLE *evalclause(root) CLAUSE *root;
/*======================================================================
? Evaluate the operations list. The list contains equations trees
| (actually lists of trees :-).
|
& ALLOCMEM, FREEMEM, STRCOPY, var_temp_new(), var_delete_temp(),
& var_check(), com_check(), fnc_check(), evaltree(),
^=====================================================================*/
{
VARIABLE *ptr = NULL,
*res, *par, *tmp; /* used for something magic */
TREE *argptr; /* pointer to parameters */
double *d;
int i;
/*-------------------------------------------------------------------*/
while(root)
{
if (root->data == endsym) return ptr;
switch(root->data)
{
/************************************************************
System call
************************************************************/
case systemcall:
{
#if defined(WIN32) || defined(MINGW32)
FILE *fp = _popen( SDATA(root->this), "r" );
#else
FILE *fp = popen( SDATA(root->this), "r" );
#endif
static char s[101];
#pragma omp threadprivate (s)
if ( !fp ) error( "systemcall: open failure: [%s].\n", SDATA(root->this) );
while( fgets( s, 120, fp ) ) PrintOut( s );
#if defined(WIN32) || defined(MINGW32)
_pclose( fp );
#else
pclose( fp );
#endif
}
break;
/************************************************************
Function definition
************************************************************/
case funcsym:
{
FUNCTION *fnc; /* pointer to created function */
TREE *tptr; /* function parameters */
char *name = SDATA(root->this); /* function name */
int i, n, argcount; /* well... */
/*
check for name conflicts
*/
if (var_check(name) || com_check(name))
{
error( "Function not created [%s], identifier in use.\n",name);
}
/*
* allocate mem for FUNCTION structure and add it
* to the the FUNCTIONS list
*/
if (fnc = fnc_check(name)) fnc_free_entry(fnc);
fnc = (FUNCTION *)ALLOCMEM(sizeof(FUNCTION));
NAME(fnc) = STRCOPY(name);
lst_add(FUNCTIONS, (LIST *)fnc);
/*
* copy parameter names to the structure.
*/
argcount = 0;
for(tptr = ARGS(root->this); tptr; tptr = NEXT(tptr)) argcount++;
if (argcount > 0)
{
fnc -> parnames = (char **)ALLOCMEM(argcount * sizeof(char *));
for(i = 0, tptr = ARGS(root->this); tptr; tptr = NEXT(tptr))
fnc -> parnames[i++] = STRCOPY(SDATA(tptr));
}
fnc -> parcount = argcount;
/*
* copy help text if any to the structure.
*/
argcount = n = 0;
for( tptr = SUBS(root->this); tptr; tptr = NEXT(tptr))
{
if ( SDATA(tptr) )
{
argcount++;
n += strlen( SDATA(tptr) );
}
}
if ( argcount > 0 && n > 0)
{
fnc->help = (char *)ALLOCMEM( (n+argcount+1)*sizeof(char) );
for( tptr = SUBS(root->this); tptr; tptr = NEXT(tptr) )
{
if ( SDATA(tptr) )
{
strcat( fnc->help, SDATA(tptr) );
strcat( fnc->help, "\n" );
}
}
}
/*
* copy imported variable names to the structure.
*/
argcount = 0;
for(tptr = LEFT(root->this); tptr; tptr = NEXT(tptr)) argcount++;
if (argcount > 0)
{
fnc -> imports = (char **)ALLOCMEM((argcount+1) * sizeof(char *));
for(i = 0, tptr = LEFT(root->this); tptr; tptr = NEXT(tptr))
fnc -> imports[i++] = STRCOPY(SDATA(tptr));
fnc -> imports[i] = NULL;
}
else
fnc -> imports = NULL;
/*
* copy exported variable names to the structure.
*/
argcount = 0;
for(tptr = RIGHT(root->this); tptr; tptr = NEXT(tptr)) argcount++;
if (argcount > 0)
{
fnc -> exports = (char **)ALLOCMEM((argcount+1) * sizeof(char *));
for(i = 0, tptr = RIGHT(root->this); tptr; tptr = NEXT(tptr))
fnc -> exports[i++] = STRCOPY(SDATA(tptr));
fnc -> exports[i] = NULL;
}
else
fnc -> exports = NULL;
/*
and finally the function body
*/
fnc -> body = LINK(root); LINK(root) = NULL;
/* PrintOut( "Function defined: [%s].\n", name); */
return NULL;
}
/***************************************************************
statement
****************************************************************/
case assignsym:
{
int iflg = FALSE, /* is the result indexed */
pflg = TRUE; /* should it be printed to */
/* output stream */
char *r = "ans"; /* resulted VARIABLE name */
par = NULL;
/*
* there is an explicit assigment (ie. x = x + 1, not x + 1)
*/
if (root->this)
{
/*
* VARIABLE name
*/
r = SDATA( root->this );
/*
* check for name conflicts
*/
if ( fnc_check(r) || com_check(r) || lst_find(CONSTANTS, r) )
{
error( "VARIABLE not created [%s], identifier in use.\n", r );
}
/*
* is it indexed ?
*/
pflg = FALSE;
argptr = ARGS(root->this);
if (argptr)
{
iflg = TRUE;
if ((par = tmp = evaltree(argptr)) != NULL)
{
argptr = NEXT(argptr);
while(argptr)
{
if ((NEXT(tmp) = evaltree(argptr)) == NULL) break;
argptr = NEXT(argptr);
tmp = NEXT(tmp);
}
}
}
}
/*
* evaluate the right side of the statement
* and put the result where it belongs
*/
ptr = evaltree( LINK(root)->this );
ptr = put_result( ptr, r, par, iflg, pflg );
if ( par ) var_delete_temp( par );
root = LINK(root);
break;
}
/***************************************************************
if statement
****************************************************************/
case ifsym:
if ((res = evaltree(root->this)) != NULL)
{
for(i = 0, d=MATR(res); i < NROW(res)*NCOL(res); i++)
if (*d++ == 0) break;
/*
* condition false
*/
if (*--d == 0)
{
root = root->jmp;
if (root->data == elsesym)
{
ptr = evalclause(LINK(root));
root = root -> jmp;
}
}
/*
* condition true
*/
else
{
ptr = evalclause(LINK(root));
root = root->jmp;
if (root->data == elsesym)
{
root = root->jmp;
}
}
var_delete_temp(res);
}
else
{
root = root -> jmp;
if (root->data == elsesym)
{
root = root->jmp;
}
}
break;
/***************************************************************
while statement
****************************************************************/
case whilesym:
while(TRUE)
{
if ((res = evaltree(root->this)) == NULL) break;
for(i=0, d=MATR(res); i < NROW(res)*NCOL(res); i++)
if (*d++ == 0) break;
/*
condition true, go for another loop
*/
if (*--d != 0)
{
ptr = evalclause(LINK(root));
var_delete_temp(res);
}
/*
condition false, done with while-loop
*/
else
{
var_delete_temp(res);
break;
}
}
root = root->jmp;
break;
/***************************************************************
for statement
****************************************************************/
case forsym:
{
VARIABLE *var;
char *r;
/*
* VARIABLE name
*/
r = SDATA(root->this);
/*
* check for name conflicts
*/
if (fnc_check(r) || com_check(r) || lst_find(CONSTANTS, r))
{
error( "VARIABLE not created [%s], identifier in use.\n ", r);
}
if ((res = evaltree(LINK(root->this))) != NULL)
{
var_delete(r);
var = var_new(r,TYPE(res),1,1);
d = MATR(res);
for(i = 0; i < NCOL(res)*NROW(res); i++)
{
*MATR(var) = *d++;
ptr = evalclause(LINK(root));
}
var_delete_temp(res);
}
root = root->jmp;
break;
}
}
root = LINK(root);
}
return ptr;
}
VARIABLE *put_values(ptr, resname, par) VARIABLE *ptr, *par; char *resname;
/*======================================================================
? extract values from ptr, indexes in par, and put them to res
^=====================================================================*/
{
static double defind = 0.0;
#pragma omp threadprivate (defind)
double *ind1,
*ind2,
*dtmp;
int i, j, k,
rows, cols,
size1, size2,
imax1, imax2,
ind;
VARIABLE *res;
res = (VARIABLE *)lst_find(VARIABLES, resname);
if (NEXT(par) == NULL)
{
if (
res != NULL && NROW(par) == NROW(res) && NCOL(par) == NCOL(res)
&& !(NROW(res) == 1 && NCOL(res) == 1)
)
{
int logical = TRUE,
csize = 0;
dtmp = MATR(par);
for(i = 0; i < NROW(par)*NCOL(par); i++)
if (dtmp[i] != 0 && dtmp[i] != 1)
{
logical = FALSE;
break;
}
if (logical)
{
imax1 = NROW(ptr) * NCOL(ptr);
dtmp = MATR(ptr);
for(i = 0, k = 0; i < NROW(res); i++)
for(j = 0,csize=0; j < NCOL(res); j++)
{
while(M(par,i,j)==1 && j+csize<NCOL(res) && k+csize<imax1) csize++;
if (csize > 0)
{
memcpy(&M(res,i,j),&dtmp[k],csize*sizeof(double));
j += csize-1;
k += csize;
csize = 0;
if (k >= imax1) k = 0;
}
}
var_delete_temp(ptr);
return res;
}
else
{
ind1 = &defind;
size1 = 1;
ind2 = MATR(par);
size2 = NCOL(par);
}
}
else
{
ind1 = &defind;
size1 = 1;
ind2 = MATR(par);
size2 = NCOL(par);
}
}
else
{
ind1 = MATR(par);
size1 = NCOL(par);
ind2 = MATR(NEXT(par));
size2 = NCOL(NEXT(par));
}
imax1 = (int)ind1[0];
for(i = 1; i < size1; i++)
{
imax1 = max(imax1, (int)ind1[i]);
}
imax2 = (int)ind2[0];
for(i = 1; i < size2; i++)
{
imax2 = max(imax2, (int)ind2[i]);
}
if (res == NULL)
{
res = var_new(resname, TYPE(ptr), imax1+1, imax2+1);
}
else if (NROW(res) <= imax1 || NCOL(res) <= imax2)
{
int ir = NROW(res), jc = NCOL(res);
MATRIX *t;
imax1 = max(ir, imax1 + 1);
imax2 = max(jc, imax2 + 1);
t = mat_new(TYPE(res), imax1, imax2);
dtmp = t->data;
for(i = 0; i < ir; i++)
{
memcpy(&dtmp[i*imax2],&M(res,i,0),jc*sizeof(double));
}
if (--REFCNT(res) == 0)
{
mat_free(res->this);
}
res->this = t;
REFCNT(res) = 1;
}
else if (REFCNT(res) > 1)
{
--REFCNT(res);
res->this = mat_copy(res->this);
}
imax1 = NROW(ptr) * NCOL(ptr);
dtmp = MATR(ptr);
for(i = 0, k = 0; i < size1; i++)
{
ind = (int)ind1[i];
for(j = 0; j < size2; j++)
{
memcpy(&M(res,ind,(int)ind2[j]),&dtmp[k++],sizeof(double));
if (k >= imax1) k = 0;
}
}
var_delete_temp(ptr);
return res;
}
VARIABLE *put_result(ptr, resname, par, indexflag, printflag)
/*======================================================================
? copy VARIABLE from one place to another
| and conditionally print the result
^=====================================================================*/
int indexflag, printflag;
VARIABLE *ptr, *par;
char *resname;
{
VARIABLE *res = NULL;
var_delete( "ans" );
if (indexflag && par)
{
res = put_values(ptr, resname, par);
}
else
{
res = var_rename( ptr, resname );
}
if ( res ) res->changed = 1;
if (printflag) var_print(res);
return res;
}
|
ReebSpace.h | /// \ingroup base
/// \class ttk::ReebSpace
/// \author Julien Tierny <julien.tierny@lip6.fr>
/// \date October 2015.
///
/// \brief TTK processing package that efficiently computes the Reeb space of
/// bivariate volumetric data.
///
/// The Reeb space is a useful topological abstraction of bivariate scalar
/// fields for data segmentation purposes. Intuitively, it allows the automatic
/// separation of volumetric regions that project to the same areas in the
/// range. This class also implements a simplification heuristic for progressive
/// coarsening. Used in conjunction with continuous scatterplots, this class
/// enables continuous scattterplot peeling for instance.
///
/// \param dataTypeU Data type of the input first component field (char, float,
/// etc.)
/// \param dataTypeV Data type of the input second component field (char, float,
/// etc.)
///
/// \b Related \b publication \n
/// "Jacobi Fiber Surfaces for Bivariate Reeb Space Computation" \n
/// Julien Tierny, Hamish Carr \n
/// Proc. of IEEE VIS 2016.\n
/// IEEE Transactions on Visualization and Computer Graphics, 2016.
///
/// \sa ttkReebSpace.cpp %for a usage example.
#ifndef _REEBSPACE_H
#define _REEBSPACE_H
// standard includes
// base code includes
#include <FiberSurface.h>
#include <Geometry.h>
#include <JacobiSet.h>
#include <Triangulation.h>
// to add FiberSurface.h
#include <Wrapper.h>
#include <map>
#include <set>
namespace ttk{
class ReebSpace : public Debug{
public:
enum SimplificationCriterion{
domainVolume, // 0
rangeArea, // 1
hyperVolume //2
};
class Sheet0{
public:
SimplexId vertexId_, pruned_;
// 0: extrema-sheet, 1: saddle-sheet
char type_;
std::vector<SimplexId> sheet1List_;
std::vector<SimplexId> sheet3List_;
};
class Sheet1{
public:
bool hasSaddleEdges_, pruned_;
std::vector<SimplexId> edgeList_;
// 0: extrema-sheet, 1: saddle-sheet (can be inferred by the number
// of 2-sheets attached to it?)
std::vector<SimplexId> sheet0List_;
// NB: the corresponding 2-sheet should have the same global
// indentifier.
std::vector<SimplexId> sheet3List_;
};
class Sheet2{
public:
bool pruned_;
SimplexId sheet1Id_;
// for each point, x, y, z coordinates
// this is how coincident point will be merged afterwards
// this list is meant to be temporary. after the merge, we'll use a
// global list
std::vector<std::vector<FiberSurface::Vertex> >
vertexList_;
std::vector<std::vector<FiberSurface::Triangle> >
triangleList_;
std::vector<SimplexId> sheet3List_;
};
class Sheet3{
public:
SimplexId Id_, simplificationId_, preMerger_;
bool pruned_;
double domainVolume_, rangeArea_, hyperVolume_;
std::vector<SimplexId> vertexList_;
std::vector<SimplexId> tetList_;
std::vector<SimplexId> sheet0List_;
std::vector<SimplexId> sheet1List_;
std::vector<SimplexId> sheet2List_;
std::vector<SimplexId> sheet3List_;
std::vector<SimplexId> preMergedSheets_;
};
ReebSpace();
~ReebSpace();
inline bool empty() const{
return (currentData_.vertex2sheet0_.size() == 0);
}
template <class dataTypeU, class dataTypeV> inline int execute();
inline const Sheet0* get0sheet(const SimplexId &sheetId) const{
#ifndef TTK_ENABLE_KAMIKAZE
if((sheetId < 0)||(sheetId >= (SimplexId) currentData_.sheet0List_.size()))
return NULL;
#endif
return &(currentData_.sheet0List_[sheetId]);
}
inline const Sheet1* get1sheet(const SimplexId &sheetId) const{
#ifndef TTK_ENABLE_KAMIKAZE
if((sheetId < 0)||(sheetId >= (SimplexId) currentData_.sheet1List_.size()))
return NULL;
#endif
return &(currentData_.sheet1List_[sheetId]);
}
// warning, these are the originals
inline const Sheet2* get2sheet(const SimplexId &sheetId) const{
#ifndef TTK_ENABLE_KAMIKAZE
if((sheetId < 0)||(sheetId >= (SimplexId) originalData_.sheet2List_.size()))
return NULL;
#endif
return &(originalData_.sheet2List_[sheetId]);
}
inline const Sheet3* get3sheet(const SimplexId &sheetId) const{
#ifndef TTK_ENABLE_KAMIKAZE
if((sheetId < 0)||(sheetId >= (SimplexId) currentData_.sheet3List_.size()))
return NULL;
#endif
return &(currentData_.sheet3List_[sheetId]);
}
inline const std::vector<SimplexId>* get0sheetSegmentation() const{
return ¤tData_.vertex2sheet0_;
}
const std::vector<SimplexId>* get1sheetSegmentation() const{
return ¤tData_.edge2sheet1_;
}
const std::vector<SimplexId>* get3sheetVertexSegmentation() const{
return ¤tData_.vertex2sheet3_;
}
const std::vector<SimplexId>* get3sheetTetSegmentation() const{
return ¤tData_.tet2sheet3_;
}
const std::vector<SimplexId>* getEdgeTypes() const{
return ¤tData_.edgeTypes_;
}
inline const std::vector<FiberSurface::Vertex>*
getFiberSurfaceVertices() const{
return &(fiberSurfaceVertexList_);
}
inline int getJacobi2Edge(const SimplexId &jacobiEdgeId) const{
if((jacobiEdgeId < 0)
||(jacobiEdgeId >= (SimplexId) jacobi2edges_.size()))
return -1;
return jacobi2edges_[jacobiEdgeId];
}
inline SimplexId getNumberOf2sheets() const {
return currentData_.sheet2List_.size();
}
// inline std::vector<long long int>* getSheetTriangulationCells(){
// return &sheet3cells_;
// }
//
// inline std::vector<float>* getSheetTriangulationPoints(){
// return &sheet3points_;
// }
template <class dataTypeU, class dataTypeV>
inline int perturbate(
const dataTypeU &uEpsilon = pow(10, -DBL_DIG),
const dataTypeV &vEpsilon = pow(10, -DBL_DIG)) const;
inline int setExpand3Sheets(const bool &onOff){
expand3sheets_ = onOff;
return 0;
}
inline int setInputField(const void *uField, const void *vField){
uField_ = uField;
vField_ = vField;
#ifdef TTK_ENABLE_FIBER_SURFACE_WITH_RANGE_OCTREE
fiberSurface_.flushOctree();
#endif
return 0;
}
inline bool setRangeDrivenOctree(const bool &onOff){
if(onOff != withRangeDrivenOctree_){
withRangeDrivenOctree_ = onOff;
return true;
}
withRangeDrivenOctree_ = onOff;
return false;
}
inline int setSosOffsetsU(std::vector<SimplexId> *sosOffsetsU){
sosOffsetsU_ = sosOffsetsU;
return 0;
}
inline int setSosOffsetsV(std::vector<SimplexId> *sosOffsetsV){
sosOffsetsV_ = sosOffsetsV;
return 0;
}
inline int setTetNumber(const SimplexId &tetNumber){
tetNumber_ = tetNumber;
return 0;
}
/// Set the number of vertices in the scalar field.
/// \param vertexNumber Number of vertices in the data-set.
/// \return Returns 0 upon success, negative values otherwise.
int setVertexNumber(const SimplexId &vertexNumber){
vertexNumber_ = vertexNumber;
return 0;
}
// WARNING: if you plan to use the range driven octree, make sure
// that you provided pointers to the u and v fields.
template <class dataTypeU, class dataTypeV>
inline int setupTriangulation(Triangulation *triangulation){
triangulation_ = triangulation;
if(triangulation_){
triangulation_->preprocessVertexStars();
triangulation_->preprocessEdges();
triangulation_->preprocessVertexEdges();
JacobiSet<dataTypeU, dataTypeV> jacobiSet;
jacobiSet.setWrapper(wrapper_);
// trigger the jacobiSet pre-processing on the triangulation.
jacobiSet.setupTriangulation(triangulation_);
// trigger the fiberSurface pre-processing on the triangulation.
fiberSurface_.setWrapper(wrapper_);
fiberSurface_.setInputField(uField_, vField_);
fiberSurface_.setupTriangulation(triangulation_);
// trigger the fiber surface precomputation
#ifdef TTK_ENABLE_FIBER_SURFACE_WITH_RANGE_OCTREE
if(withRangeDrivenOctree_)
fiberSurface_.buildOctree<dataTypeU, dataTypeV>();
#endif
vertexNumber_ = triangulation_->getNumberOfVertices();
edgeNumber_ = triangulation_->getNumberOfEdges();
tetNumber_ = triangulation_->getNumberOfCells();
}
return 0;
}
template <class dataTypeU, class dataTypeV>
inline int simplify(const double &simplificationThreshold,
const SimplificationCriterion &criterion = rangeArea);
protected:
class ReebSpaceData;
int compute1sheetsOnly(const std::vector<std::pair<SimplexId, char> > &jacobiSet,
std::vector<std::pair<SimplexId, SimplexId> > &jacobiSetClassification);
int compute1sheets(const std::vector<std::pair<SimplexId, char> > &jacobiSet,
std::vector<std::pair<SimplexId, SimplexId> > &jacobiSetClassification);
template <class dataTypeU, class dataTypeV>
inline int compute2sheets(
const std::vector<std::pair<SimplexId, SimplexId> > &jacobiEdges);
template <class dataTypeU, class dataTypeV>
inline int compute2sheetChambers();
int compute3sheet(const SimplexId &vertexId,
const std::vector<std::vector<std::vector<SimplexId> > > &tetTriangles);
int compute3sheets(std::vector<std::vector<std::vector<SimplexId> > > &tetTriangles);
template <class dataTypeU, class dataTypeV>
inline int computeGeometricalMeasures(Sheet3 &sheet);
int connect3sheetTo0sheet(
ReebSpaceData &data,
const SimplexId &sheet3Id, const SimplexId &sheet0Id);
int connect3sheetTo1sheet(
ReebSpaceData &data,
const SimplexId &sheet3Id, const SimplexId &sheet1Id);
int connect3sheetTo2sheet(
ReebSpaceData &data,
const SimplexId &sheet3Id, const SimplexId &sheet2Id);
int connect3sheetTo3sheet(
ReebSpaceData &data,
const SimplexId &sheet3Id, const SimplexId &otherSheet3Id);
int connectSheets();
int disconnect1sheetFrom0sheet(ReebSpaceData &data,
const SimplexId &sheet1Id, const SimplexId &sheet0Id, const SimplexId &biggerId);
int disconnect3sheetFrom0sheet(ReebSpaceData &data,
const SimplexId &sheet3Id, const SimplexId &sheet0Id);
int disconnect3sheetFrom1sheet(ReebSpaceData &data,
const SimplexId &sheet3Id, const SimplexId &sheet1Id, const SimplexId &biggerId);
int disconnect3sheetFrom2sheet(ReebSpaceData &data,
const SimplexId &sheet3Id, const SimplexId &sheet2Id);
int disconnect3sheetFrom3sheet(ReebSpaceData &data,
const SimplexId &sheet3Id, const SimplexId &other3SheetId);
int flush();
int mergeSheets(const SimplexId &smallerId, const SimplexId &biggerId);
int preMergeSheets(const SimplexId &sheetId0, const SimplexId &sheetId1);
int prepareSimplification();
int printConnectivity(std::ostream &stream, const ReebSpaceData &data) const;
int simplifySheets(const double &simplificationThreshold,
const SimplificationCriterion &simplificationCriterion);
int simplifySheet(const SimplexId &sheetId,
const SimplificationCriterion &simplificationCriterion);
// int triangulateTetrahedron(const int &tetId,
// const std::vector<std::vector<int> > &triangles,
// std::vector<long long int> &outputTets);
//
// int triangulateThreeSheets();
SimplexId vertexNumber_, edgeNumber_, tetNumber_;
double totalArea_, totalVolume_, totalHyperVolume_;
const void *uField_, *vField_;
std::vector<SimplexId> *sosOffsetsU_, *sosOffsetsV_;
// output segmentation
class ReebSpaceData{
public:
SimplificationCriterion
simplificationCriterion_;
double simplificationThreshold_;
std::vector<SimplexId> edge2sheet1_;
std::vector<SimplexId> edgeTypes_;
std::vector<SimplexId> tet2sheet3_;
std::vector<SimplexId> vertex2sheet0_;
std::vector<SimplexId> vertex2sheet3_;
// structure
std::vector<Sheet0> sheet0List_;
std::vector<Sheet1> sheet1List_;
std::vector<Sheet2> sheet2List_;
std::vector<Sheet3> sheet3List_;
// std::vector<float> sheet3points_;
// std::vector<long long int> sheet3cells_;
};
bool hasConnectedSheets_,
expand3sheets_,
withRangeDrivenOctree_;
ReebSpaceData originalData_, currentData_;
// information that does not get simplified
std::vector<std::pair<SimplexId, char> >
jacobiSetEdges_;
std::vector<SimplexId> jacobi2edges_;
FiberSurface fiberSurface_;
std::vector<FiberSurface::Vertex>
fiberSurfaceVertexList_;
Triangulation *triangulation_;
};
}
// if the package is not a template, comment the following line
// #include <ReebSpace.cpp>
template <class dataTypeU, class dataTypeV>
inline int ttk::ReebSpace::execute(){
flush();
Timer t;
// 1) compute the jacobi set
JacobiSet<dataTypeU, dataTypeV> jacobiSet;
jacobiSet.setWrapper(wrapper_);
jacobiSet.setupTriangulation(triangulation_);
jacobiSet.setInputField(uField_, vField_);
jacobiSet.setSosOffsetsU(sosOffsetsU_);
jacobiSet.setSosOffsetsV(sosOffsetsV_);
jacobiSet.execute(jacobiSetEdges_);
// 2) compute the list saddle 1-sheets
// + list of saddle 0-sheets
std::vector<std::pair<SimplexId, SimplexId> > jacobiSetClassification;
compute1sheetsOnly(
jacobiSetEdges_, jacobiSetClassification);
// at this stage, jacobiSetClassification contains the list of saddle edges
// along with their 1-sheet Id.
compute2sheets<dataTypeU, dataTypeV>(jacobiSetClassification);
// compute2sheetChambers<dataTypeU, dataTypeV>();
std::vector<std::vector<std::vector<SimplexId> > > tetTriangles;
compute3sheets(tetTriangles);
{
std::stringstream msg;
msg << "[ReebSpace] Data-set (" << vertexNumber_
<< " points) processed in "
<< t.getElapsedTime() << " s. TOTAL (" << threadNumber_
<< " thread(s))."
<< std::endl;
dMsg(std::cout, msg.str(), timeMsg);
}
// post-process for further interaction
if((totalArea_ == -1)||(totalVolume_ == -1)||(totalHyperVolume_ == -1)){
Timer t;
#ifdef TTK_ENABLE_OPENMP
#pragma omp parallel for num_threads(threadNumber_)
#endif
for(SimplexId i = 0; i < (SimplexId) originalData_.sheet3List_.size(); i++){
computeGeometricalMeasures<dataTypeU, dataTypeV>(
originalData_.sheet3List_[i]);
}
for(SimplexId i = 0; i < (SimplexId) originalData_.sheet3List_.size(); i++){
totalArea_ += originalData_.sheet3List_[i].rangeArea_;
totalVolume_ += originalData_.sheet3List_[i].domainVolume_;
totalHyperVolume_ += originalData_.sheet3List_[i].hyperVolume_;
}
{
std::stringstream msg;
msg << "[ReebSpace] Geometrical measures computed in "
<< t.getElapsedTime() << " s. (" << threadNumber_
<< " thread(s))" << std::endl;
dMsg(std::cout, msg.str(), timeMsg);
}
}
fiberSurface_.finalize<dataTypeU, dataTypeV>();
prepareSimplification();
return 0;
}
template <class dataTypeU, class dataTypeV>
inline int ttk::ReebSpace::compute2sheets(
const std::vector<std::pair<SimplexId, SimplexId> > &jacobiEdges){
#ifndef TTK_ENABLE_KAMIKAZE
if(!triangulation_)
return -1;
#endif
Timer t;
// at this point, they have exactly the same size
originalData_.sheet2List_.resize(originalData_.sheet1List_.size());
for(SimplexId i = 0; i < (SimplexId) originalData_.sheet2List_.size(); i++){
originalData_.sheet2List_[i].sheet1Id_ = i;
originalData_.sheet2List_[i].pruned_ = false;
originalData_.sheet2List_[i].vertexList_.resize(
originalData_.sheet1List_[
originalData_.sheet2List_[i].sheet1Id_].edgeList_.size());
originalData_.sheet2List_[i].triangleList_.resize(
originalData_.sheet1List_[
originalData_.sheet2List_[i].sheet1Id_].edgeList_.size());
for(SimplexId j = 0;
j < (SimplexId) originalData_.sheet2List_[i].vertexList_.size(); j++){
originalData_.sheet2List_[i].vertexList_[j].clear();
originalData_.sheet2List_[i].triangleList_[j].clear();
}
}
fiberSurface_.setWrapper(wrapper_);
fiberSurface_.setGlobalVertexList(&fiberSurfaceVertexList_);
fiberSurface_.setInputField(uField_, vField_);
fiberSurface_.setupTriangulation(triangulation_);
fiberSurface_.setPolygonEdgeNumber(jacobiEdges.size());
std::vector<SimplexId> edge2polygonEdgeId(edgeNumber_, -1);
jacobi2edges_.resize(jacobiEdges.size());
for(SimplexId i = 0; i < (SimplexId) jacobiEdges.size(); i++){
SimplexId edgeId = jacobiEdges[i].first;
edge2polygonEdgeId[edgeId] = i;
jacobi2edges_[i] = edgeId;
}
#ifdef TTK_ENABLE_OPENMP
#pragma omp parallel for num_threads(threadNumber_)
#endif
for(SimplexId i = 0; i < (SimplexId) originalData_.sheet2List_.size(); i++){
for(SimplexId j = 0;
j < (SimplexId) originalData_.sheet1List_[
originalData_.sheet2List_[i].sheet1Id_].edgeList_.size(); j++){
SimplexId edgeId = originalData_.sheet1List_[
originalData_.sheet2List_[i].sheet1Id_].edgeList_[j];
fiberSurface_.setTriangleList(
edge2polygonEdgeId[edgeId],
&(originalData_.sheet2List_[i].triangleList_[j]));
fiberSurface_.setVertexList(
edge2polygonEdgeId[edgeId],
&(originalData_.sheet2List_[i].vertexList_[j]));
}
}
#ifdef TTK_ENABLE_OPENMP
#pragma omp parallel for num_threads(threadNumber_)
#endif
for(SimplexId i = 0; i < (SimplexId) jacobiEdges.size(); i++){
SimplexId edgeId = jacobiEdges[i].first;
std::pair<double, double> rangePoint0, rangePoint1;
SimplexId vertexId0 = -1, vertexId1 = -1;
triangulation_->getEdgeVertex(edgeId, 0, vertexId0);
triangulation_->getEdgeVertex(edgeId, 1, vertexId1);
rangePoint0.first = ((dataTypeU *) uField_)[vertexId0];
rangePoint0.second = ((dataTypeV *) vField_)[vertexId0];
rangePoint1.first = ((dataTypeU *) uField_)[vertexId1];
rangePoint1.second = ((dataTypeV *) vField_)[vertexId1];
if(originalData_.edgeTypes_[edgeId] == 1){
std::vector<SimplexId> edgeSeeds(triangulation_->getEdgeStarNumber(edgeId), -1);
for(SimplexId j = 0; j < (SimplexId) edgeSeeds.size(); j++){
triangulation_->getEdgeStar(edgeId, j, edgeSeeds[j]);
}
fiberSurface_.computeContour<dataTypeU, dataTypeV>(
rangePoint0, rangePoint1,
edgeSeeds,
edge2polygonEdgeId[edgeId]);
}
else{
#ifdef TTK_ENABLE_FIBER_SURFACE_WITH_RANGE_OCTREE
if(withRangeDrivenOctree_){
fiberSurface_.computeSurfaceWithOctree<dataTypeU, dataTypeV>(
rangePoint0, rangePoint1, edge2polygonEdgeId[edgeId]);
}
else{
fiberSurface_.computeSurface<dataTypeU, dataTypeV>(
rangePoint0, rangePoint1, edge2polygonEdgeId[edgeId]);
}
#else
fiberSurface_.computeSurface<dataTypeU, dataTypeV>(
rangePoint0, rangePoint1, edge2polygonEdgeId[edgeId]);
#endif
}
}
// #ifdef TTK_ENABLE_OPENMP
// #pragma omp parallel for num_threads(threadNumber_)
// #endif
// for(int i = 0; i < (int) originalData_.sheet2List_.size(); i++){
//
// int sheet1Id = originalData_.sheet2List_[i].sheet1Id_;
//
// std::vector<bool> inList(tetNumber_, false);
// std::vector<int> seedList;
// std::vector<std::pair<std::pair<double, double>, std::pair<double,
// double> > > edgeList;
// std::vector<int> jacobiEdgeIdList;
//
// for(int j = 0;
// j < (int) originalData_.sheet1List_[sheet1Id].edgeList_.size(); j++){
// int edgeId = originalData_.sheet1List_[sheet1Id].edgeList_[j];
//
// if(originalData_.edgeTypes_[edgeId] == 1){
// for(int k = 0; k < (int) (*edgeStars_)[edgeId].size(); k++){
// int tetId = (*edgeStars_)[edgeId][k];
// if(!inList[tetId]){
// seedList.push_back(tetId);
// }
// }
// }
//
// if((originalData_.edgeTypes_[edgeId] != 1)
// &&(originalData_.edgeTypes_[edgeId] != -1)){
// edgeList.resize(edgeList.size() + 1);
//
// edgeList.back().first.first =
// ((dataTypeU *) uField_)[(*edgeList_)[edgeId].first];
// edgeList.back().first.second =
// ((dataTypeV *) vField_)[(*edgeList_)[edgeId].first];
//
// edgeList.back().second.first =
// ((dataTypeU *) uField_)[(*edgeList_)[edgeId].second];
// edgeList.back().second.second =
// ((dataTypeV *) vField_)[(*edgeList_)[edgeId].second];
//
// jacobiEdgeIdList.push_back(edge2polygonEdgeId[edgeId]);
// }
// }
//
// if(seedList.size()){
// fiberSurface_.computeContour<dataTypeU, dataTypeV>(
// edgeList, seedList,
// &jacobiEdgeIdList);
// }
// }
{
std::stringstream msg;
msg << "[ReebSpace] Fiber surfaces computed in "
<< t.getElapsedTime()
<< " s. overall (" << threadNumber_ << " thread(s))." << std::endl;
dMsg(std::cout, msg.str(), timeMsg);
}
return 0;
}
template <class dataTypeU, class dataTypeV>
inline int ttk::ReebSpace::compute2sheetChambers(){
#ifndef TTK_ENABLE_KAMIKAZE
if(!triangulation_)
return -1;
#endif
{
std::stringstream msg;
msg << "[ReebSpace] Computing chambers' pre-images." << std::endl;
msg << "[ReebSpace] This will take a LONG time." << std::endl;
dMsg(std::cout, msg.str(), timeMsg);
}
Timer t;
// at this point, they have exactly the same size
originalData_.sheet2List_.resize(threadNumber_);
for(SimplexId i = 0; i < (SimplexId) originalData_.sheet2List_.size(); i++){
originalData_.sheet2List_[i].sheet1Id_ = i;
originalData_.sheet2List_[i].pruned_ = false;
originalData_.sheet2List_[i].vertexList_.resize(1);
originalData_.sheet2List_[i].triangleList_.resize(1);
}
fiberSurface_.setWrapper(wrapper_);
fiberSurface_.setGlobalVertexList(&fiberSurfaceVertexList_);
fiberSurface_.setInputField(uField_, vField_);
fiberSurface_.setupTriangulation(triangulation_);
fiberSurface_.setTetNumber(tetNumber_);
fiberSurface_.setPolygonEdgeNumber(threadNumber_);
for(SimplexId i = 0; i < (SimplexId) originalData_.sheet2List_.size(); i++){
fiberSurface_.setTriangleList(i,
&(originalData_.sheet2List_[i].triangleList_[0]));
fiberSurface_.setVertexList(i,
&(originalData_.sheet2List_[i].vertexList_[0]));
}
#ifdef TTK_ENABLE_OPENMP
#pragma omp parallel for num_threads(threadNumber_)
#endif
for(SimplexId i = 0; i < (SimplexId) edgeNumber_; i++){
#ifdef TTK_ENABLE_OPENMP
ThreadId threadId = omp_get_thread_num();
#else
ThreadId threadId = 0;
#endif
SimplexId edgeId = i;
std::pair<double, double> rangePoint0, rangePoint1;
SimplexId vertexId0 = -1, vertexId1 = -1;
triangulation_->getEdgeVertex(edgeId, 0, vertexId0);
triangulation_->getEdgeVertex(edgeId, 1, vertexId1);
rangePoint0.first = ((dataTypeU *) uField_)[vertexId0];
rangePoint0.second = ((dataTypeV *) vField_)[vertexId0];
rangePoint1.first = ((dataTypeU *) uField_)[vertexId1];
rangePoint1.second = ((dataTypeV *) vField_)[vertexId1];
fiberSurface_.computeSurface<dataTypeU, dataTypeV>(
rangePoint0, rangePoint1, threadId);
// clear the memory now.. otherwise we'll swap and never end
originalData_.sheet2List_[threadId].triangleList_[0].clear();
originalData_.sheet2List_[threadId].vertexList_[0].clear();
}
{
std::stringstream msg;
msg << "[ReebSpace] Chambers pre-image boundaries computed in "
<< t.getElapsedTime()
<< " s. overall (" << threadNumber_ << " thread(s))." << std::endl;
dMsg(std::cout, msg.str(), timeMsg);
}
return 0;
}
template <class dataTypeU, class dataTypeV>
inline int ttk::ReebSpace::computeGeometricalMeasures(Sheet3 &sheet){
sheet.domainVolume_ = 0;
sheet.rangeArea_ = 0;
sheet.hyperVolume_ = 0;
for(SimplexId i = 0; i < (SimplexId) sheet.tetList_.size(); i++){
SimplexId tetId = sheet.tetList_[i];
std::vector<std::pair<double, double> > domainBox, rangeBox;
std::vector<std::vector<float> > domainPoints(4), rangePoints(4);
for(int j = 0; j < 4; j++){
domainPoints[j].resize(3);
rangePoints[j].resize(2);
SimplexId vertexId = -1;
triangulation_->getCellVertex(tetId, j, vertexId);
triangulation_->getVertexPoint(vertexId,
domainPoints[j][0], domainPoints[j][1], domainPoints[j][2]);
rangePoints[j][0] = ((dataTypeU *) uField_)[vertexId];
rangePoints[j][1] = ((dataTypeV *) vField_)[vertexId];
}
Geometry::getBoundingBox(domainPoints, domainBox);
Geometry::getBoundingBox(rangePoints, rangeBox);
sheet.domainVolume_ +=
(domainBox[0].second - domainBox[0].first)
*(domainBox[1].second - domainBox[1].first)
*(domainBox[2].second - domainBox[2].first);
sheet.rangeArea_ +=
(rangeBox[0].second - rangeBox[0].first)
*(rangeBox[1].second - rangeBox[1].first);
}
if(sheet.domainVolume_){
sheet.hyperVolume_ = sheet.rangeArea_/sheet.domainVolume_;
}
else{
sheet.hyperVolume_ = 0;
}
return 0;
}
template <class dataTypeU, class dataTypeV>
inline int ttk::ReebSpace::perturbate(
const dataTypeU &uEpsilon, const dataTypeV &vEpsilon) const{
JacobiSet<dataTypeU, dataTypeV> jacobiSet;
jacobiSet.setWrapper(wrapper_);
jacobiSet.setInputField(uField_, vField_);
jacobiSet.setVertexNumber(vertexNumber_);
jacobiSet.perturbate(uEpsilon, vEpsilon);
return 0;
}
template <class dataTypeU, class dataTypeV>
inline int ttk::ReebSpace::simplify(const double &simplificationThreshold,
const SimplificationCriterion &criterion){
if((totalArea_ == -1)||(totalVolume_ == -1)||(totalHyperVolume_ == -1)){
Timer t;
#ifdef TTK_ENABLE_OPENMP
#pragma omp parallel for num_threads(threadNumber_)
#endif
for(SimplexId i = 0; i < (SimplexId) originalData_.sheet3List_.size(); i++){
computeGeometricalMeasures<dataTypeU, dataTypeV>(
originalData_.sheet3List_[i]);
}
for(SimplexId i = 0; i < (SimplexId) originalData_.sheet3List_.size(); i++){
totalArea_ += originalData_.sheet3List_[i].rangeArea_;
totalVolume_ += originalData_.sheet3List_[i].domainVolume_;
totalHyperVolume_ += originalData_.sheet3List_[i].hyperVolume_;
}
{
std::stringstream msg;
msg << "[ReebSpace] Geometrical measures computed in "
<< t.getElapsedTime() << " s. (" << threadNumber_
<< " thread(s))" << std::endl;
dMsg(std::cout, msg.str(), timeMsg);
}
}
if(!hasConnectedSheets_){
connectSheets();
prepareSimplification();
}
{
std::stringstream msg;
msg << "[ReebSpace] Simplifying with criterion ";
switch(criterion){
case domainVolume:
msg << "'Domain Volume'";
break;
case rangeArea:
msg << "'Range Area'";
break;
case hyperVolume:
msg << "'HyperVolume'";
break;
}
msg << " at threshold " << simplificationThreshold << "." << std::endl;
dMsg(std::cout, msg.str(), timeMsg);
}
if(!((criterion == currentData_.simplificationCriterion_)
&&(simplificationThreshold > currentData_.simplificationThreshold_))){
prepareSimplification();
}
simplifySheets(simplificationThreshold, criterion);
return 0;
}
#endif // REEBSPACE_H
|
q_rhashmap_mk_hash.tmpl.c | #include "q_rhashmap_common.h"
#include "_q_rhashmap_mk_hash___K__.h"
//------------------------------------------------------
int
q_rhashmap_mk_hash___K__(
__KEYTYPE__ *keys, // input [nkeys]
uint32_t nkeys, // input
uint64_t hmap_hashkey, // input
uint32_t *hashes// output
)
{
int status = 0;
int chunk_size = 1024;
#pragma omp parallel for schedule(static, chunk_size)
for ( uint32_t i = 0; i < nkeys; i++ ) {
hashes[i] = murmurhash3(&(keys[i]), sizeof(__KEYTYPE__), hmap_hashkey);
}
return status;
}
|
Example_threadprivate.1.c | /*
* @@name: threadprivate.1c
* @@type: C
* @@compilable: yes
* @@linkable: no
* @@expect: success
*/
int counter = 0;
#pragma omp threadprivate(counter)
int increment_counter()
{
counter++;
return(counter);
}
|
GB_unop__bnot_uint8_uint8.c | //------------------------------------------------------------------------------
// GB_unop: hard-coded functions for each built-in unary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2022, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// If this file is in the Generated2/ folder, do not edit it
// (it is auto-generated from Generator/*).
#include "GB.h"
#ifndef GBCUDA_DEV
#include "GB_control.h"
#include "GB_atomics.h"
#include "GB_unop__include.h"
// C=unop(A) is defined by the following types and operators:
// op(A) function: GB (_unop_apply__bnot_uint8_uint8)
// op(A') function: GB (_unop_tran__bnot_uint8_uint8)
// C type: uint8_t
// A type: uint8_t
// cast: uint8_t cij = aij
// unaryop: cij = ~(aij)
#define GB_ATYPE \
uint8_t
#define GB_CTYPE \
uint8_t
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
uint8_t aij = Ax [pA]
#define GB_CX(p) Cx [p]
// unary operator
#define GB_OP(z, x) \
z = ~(x) ;
// casting
#define GB_CAST(z, aij) \
uint8_t z = aij ;
// cij = op (aij)
#define GB_CAST_OP(pC,pA) \
{ \
/* aij = Ax [pA] */ \
uint8_t aij = Ax [pA] ; \
/* Cx [pC] = op (cast (aij)) */ \
uint8_t z = aij ; \
Cx [pC] = ~(z) ; \
}
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_BNOT || GxB_NO_UINT8)
//------------------------------------------------------------------------------
// Cx = op (cast (Ax)): apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB (_unop_apply__bnot_uint8_uint8)
(
uint8_t *Cx, // Cx and Ax may be aliased
const uint8_t *Ax,
const int8_t *restrict Ab, // A->b if A is bitmap
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
if (Ab == NULL)
{
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
uint8_t aij = Ax [p] ;
uint8_t z = aij ;
Cx [p] = ~(z) ;
}
}
else
{
// bitmap case, no transpose; A->b already memcpy'd into C->b
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!Ab [p]) continue ;
uint8_t aij = Ax [p] ;
uint8_t z = aij ;
Cx [p] = ~(z) ;
}
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (cast (A')): transpose, typecast, and apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB (_unop_tran__bnot_uint8_uint8)
(
GrB_Matrix C,
const GrB_Matrix A,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
lu.c | /*--------------------------------------------------------------------
NAS Parallel Benchmarks 3.0 structured OpenMP C versions - LU
This benchmark is an OpenMP C version of the NPB LU code.
The OpenMP C 2.3 versions are derived by RWCP from the serial Fortran versions
in "NPB 2.3-serial" developed by NAS. 3.0 translation is performed by the UVSQ.
Permission to use, copy, distribute and modify this software for any
purpose with or without fee is hereby granted.
This software is provided "as is" without express or implied warranty.
Information on OpenMP activities at RWCP is available at:
http://pdplab.trc.rwcp.or.jp/pdperf/Omni/
Information on NAS Parallel Benchmarks 2.3 is available at:
http://www.nas.nasa.gov/NAS/NPB/
--------------------------------------------------------------------*/
/*--------------------------------------------------------------------
Authors: S. Weeratunga
V. Venkatakrishnan
E. Barszcz
M. Yarrow
OpenMP C version: S. Satoh
3.0 structure translation: M. Popov
--------------------------------------------------------------------*/
#include "../common/npb-C.h"
/* global variables */
#include "applu.h"
#if defined(_OPENMP)
/* for thread synchronization */
static boolean flag[ISIZ1/2*2+1];
#endif /* _OPENMP */
/* function declarations */
static void blts (int nx, int ny, int nz, int k,
double omega,
double v[ISIZ1][ISIZ2/2*2+1][ISIZ3/2*2+1][5],
double ldz[ISIZ1][ISIZ2][5][5],
double ldy[ISIZ1][ISIZ2][5][5],
double ldx[ISIZ1][ISIZ2][5][5],
double d[ISIZ1][ISIZ2][5][5],
int ist, int iend, int jst, int jend,
int nx0, int ny0 );
static void buts(int nx, int ny, int nz, int k,
double omega,
double v[ISIZ1][ISIZ2/2*2+1][ISIZ3/2*2+1][5],
double tv[ISIZ1][ISIZ2][5],
double d[ISIZ1][ISIZ2][5][5],
double udx[ISIZ1][ISIZ2][5][5],
double udy[ISIZ1][ISIZ2][5][5],
double udz[ISIZ1][ISIZ2][5][5],
int ist, int iend, int jst, int jend,
int nx0, int ny0 );
static void domain(void);
static void erhs(void);
static void error(void);
static void exact( int i, int j, int k, double u000ijk[5] );
static void jacld(int k);
static void jacu(int k);
static void l2norm (int nx0, int ny0, int nz0,
int ist, int iend,
int jst, int jend,
double v[ISIZ1][ISIZ2/2*2+1][ISIZ3/2*2+1][5],
double sum[5]);
static void pintgr(void);
static void read_input(void);
static void rhs(void);
static void setbv(void);
static void setcoeff(void);
static void setiv(void);
static void ssor(void);
static void verify(double xcr[5], double xce[5], double xci,
char *class, boolean *verified);
/*--------------------------------------------------------------------
program applu
--------------------------------------------------------------------*/
int main(int argc, char **argv) {
/*--------------------------------------------------------------------
c
c driver for the performance evaluation of the solver for
c five coupled parabolic/elliptic partial differential equations.
c
--------------------------------------------------------------------*/
char class;
boolean verified;
double mflops;
int nthreads = 1;
/*--------------------------------------------------------------------
c read input data
--------------------------------------------------------------------*/
read_input();
/*--------------------------------------------------------------------
c set up domain sizes
--------------------------------------------------------------------*/
domain();
/*--------------------------------------------------------------------
c set up coefficients
--------------------------------------------------------------------*/
setcoeff();
/*--------------------------------------------------------------------
c set the boundary values for dependent variables
--------------------------------------------------------------------*/
setbv();
/*--------------------------------------------------------------------
c set the initial values for dependent variables
--------------------------------------------------------------------*/
setiv();
/*--------------------------------------------------------------------
c compute the forcing term based on prescribed exact solution
--------------------------------------------------------------------*/
erhs();
{
#if defined(_OPENMP)
nthreads = omp_get_num_threads();
#endif /* _OPENMP */
}
/*--------------------------------------------------------------------
c perform the SSOR iterations
--------------------------------------------------------------------*/
ssor();
/*--------------------------------------------------------------------
c compute the solution error
--------------------------------------------------------------------*/
error();
/*--------------------------------------------------------------------
c compute the surface integral
--------------------------------------------------------------------*/
pintgr();
/*--------------------------------------------------------------------
c verification test
--------------------------------------------------------------------*/
verify ( rsdnm, errnm, frc, &class, &verified );
mflops = (double)itmax*(1984.77*(double)nx0
*(double)ny0
*(double)nz0
-10923.3*pow2((double)( nx0+ny0+nz0 )/3.0)
+27770.9* (double)( nx0+ny0+nz0 )/3.0
-144010.0)
/ (maxtime*1000000.0);
c_print_results("LU", class, nx0,
ny0, nz0, itmax, nthreads,
maxtime, mflops, " floating point", verified,
NPBVERSION, COMPILETIME, CS1, CS2, CS3, CS4, CS5, CS6,
"(none)");
}
/*--------------------------------------------------------------------
--------------------------------------------------------------------*/
static void blts (int nx, int ny, int nz, int k,
double omega,
/*--------------------------------------------------------------------
c To improve cache performance, second two dimensions padded by 1
c for even number sizes only. Only needed in v.
--------------------------------------------------------------------*/
double v[ISIZ1][ISIZ2/2*2+1][ISIZ3/2*2+1][5],
double ldz[ISIZ1][ISIZ2][5][5],
double ldy[ISIZ1][ISIZ2][5][5],
double ldx[ISIZ1][ISIZ2][5][5],
double d[ISIZ1][ISIZ2][5][5],
int ist, int iend, int jst, int jend,
int nx0, int ny0 ) {
/*--------------------------------------------------------------------
c
c compute the regular-sparse, block lower triangular solution:
c
c v <-- ( L-inv ) * v
c
--------------------------------------------------------------------*/
/*--------------------------------------------------------------------
c local variables
--------------------------------------------------------------------*/
int i, j, m;
double tmp, tmp1;
double tmat[5][5];
#pragma omp for
for (i = ist; i <= iend; i++) {
#pragma omp parallel for private(j) firstprivate(m ,k , i )
for (j = jst; j <= jend; j++) {
#pragma omp parallel for private(m) firstprivate(j ,k ,i )
for (m = 0; m < 5; m++) {
v[i][j][k][m] = v[i][j][k][m]
- omega * ( ldz[i][j][m][0] * v[i][j][k-1][0]
+ ldz[i][j][m][1] * v[i][j][k-1][1]
+ ldz[i][j][m][2] * v[i][j][k-1][2]
+ ldz[i][j][m][3] * v[i][j][k-1][3]
+ ldz[i][j][m][4] * v[i][j][k-1][4] );
}
}
}
for (i = ist; i <= iend; i++) {
#if defined(_OPENMP)
if (i != ist) {
while (flag[i-1] == 0) {
;
}
}
if (i != iend) {
while (flag[i] == 1) {
;
}
}
#endif /* _OPENMP */
for (j = jst; j <= jend; j++) {
for (m = 0; m < 5; m++) {
v[i][j][k][m] = v[i][j][k][m]
- omega * ( ldy[i][j][m][0] * v[i][j-1][k][0]
+ ldx[i][j][m][0] * v[i-1][j][k][0]
+ ldy[i][j][m][1] * v[i][j-1][k][1]
+ ldx[i][j][m][1] * v[i-1][j][k][1]
+ ldy[i][j][m][2] * v[i][j-1][k][2]
+ ldx[i][j][m][2] * v[i-1][j][k][2]
+ ldy[i][j][m][3] * v[i][j-1][k][3]
+ ldx[i][j][m][3] * v[i-1][j][k][3]
+ ldy[i][j][m][4] * v[i][j-1][k][4]
+ ldx[i][j][m][4] * v[i-1][j][k][4] );
}
/*--------------------------------------------------------------------
c diagonal block inversion
c
c forward elimination
--------------------------------------------------------------------*/
for (m = 0; m < 5; m++) {
tmat[m][0] = d[i][j][m][0];
tmat[m][1] = d[i][j][m][1];
tmat[m][2] = d[i][j][m][2];
tmat[m][3] = d[i][j][m][3];
tmat[m][4] = d[i][j][m][4];
}
tmp1 = 1.0 / tmat[0][0];
tmp = tmp1 * tmat[1][0];
tmat[1][1] = tmat[1][1]
- tmp * tmat[0][1];
tmat[1][2] = tmat[1][2]
- tmp * tmat[0][2];
tmat[1][3] = tmat[1][3]
- tmp * tmat[0][3];
tmat[1][4] = tmat[1][4]
- tmp * tmat[0][4];
v[i][j][k][1] = v[i][j][k][1]
- v[i][j][k][0] * tmp;
tmp = tmp1 * tmat[2][0];
tmat[2][1] = tmat[2][1]
- tmp * tmat[0][1];
tmat[2][2] = tmat[2][2]
- tmp * tmat[0][2];
tmat[2][3] = tmat[2][3]
- tmp * tmat[0][3];
tmat[2][4] = tmat[2][4]
- tmp * tmat[0][4];
v[i][j][k][2] = v[i][j][k][2]
- v[i][j][k][0] * tmp;
tmp = tmp1 * tmat[3][0];
tmat[3][1] = tmat[3][1]
- tmp * tmat[0][1];
tmat[3][2] = tmat[3][2]
- tmp * tmat[0][2];
tmat[3][3] = tmat[3][3]
- tmp * tmat[0][3];
tmat[3][4] = tmat[3][4]
- tmp * tmat[0][4];
v[i][j][k][3] = v[i][j][k][3]
- v[i][j][k][0] * tmp;
tmp = tmp1 * tmat[4][0];
tmat[4][1] = tmat[4][1]
- tmp * tmat[0][1];
tmat[4][2] = tmat[4][2]
- tmp * tmat[0][2];
tmat[4][3] = tmat[4][3]
- tmp * tmat[0][3];
tmat[4][4] = tmat[4][4]
- tmp * tmat[0][4];
v[i][j][k][4] = v[i][j][k][4]
- v[i][j][k][0] * tmp;
tmp1 = 1.0 / tmat[ 1][1];
tmp = tmp1 * tmat[ 2][1];
tmat[2][2] = tmat[2][2]
- tmp * tmat[1][2];
tmat[2][3] = tmat[2][3]
- tmp * tmat[1][3];
tmat[2][4] = tmat[2][4]
- tmp * tmat[1][4];
v[i][j][k][2] = v[i][j][k][2]
- v[i][j][k][1] * tmp;
tmp = tmp1 * tmat[3][1];
tmat[3][2] = tmat[3][2]
- tmp * tmat[1][2];
tmat[3][3] = tmat[3][3]
- tmp * tmat[1][3];
tmat[3][4] = tmat[3][4]
- tmp * tmat[1][4];
v[i][j][k][3] = v[i][j][k][3]
- v[i][j][k][1] * tmp;
tmp = tmp1 * tmat[4][1];
tmat[4][2] = tmat[4][2]
- tmp * tmat[1][2];
tmat[4][3] = tmat[4][3]
- tmp * tmat[1][3];
tmat[4][4] = tmat[4][4]
- tmp * tmat[1][4];
v[i][j][k][4] = v[i][j][k][4]
- v[i][j][k][1] * tmp;
tmp1 = 1.0 / tmat[2][2];
tmp = tmp1 * tmat[3][2];
tmat[3][3] = tmat[3][3]
- tmp * tmat[2][3];
tmat[3][4] = tmat[3][4]
- tmp * tmat[2][4];
v[i][j][k][3] = v[i][j][k][3]
- v[i][j][k][2] * tmp;
tmp = tmp1 * tmat[4][2];
tmat[4][3] = tmat[4][3]
- tmp * tmat[2][3];
tmat[4][4] = tmat[4][4]
- tmp * tmat[2][4];
v[i][j][k][4] = v[i][j][k][4]
- v[i][j][k][2] * tmp;
tmp1 = 1.0 / tmat[3][3];
tmp = tmp1 * tmat[4][3];
tmat[4][4] = tmat[4][4]
- tmp * tmat[3][4];
v[i][j][k][4] = v[i][j][k][4]
- v[i][j][k][3] * tmp;
/*--------------------------------------------------------------------
c back substitution
--------------------------------------------------------------------*/
v[i][j][k][4] = v[i][j][k][4]
/ tmat[4][4];
v[i][j][k][3] = v[i][j][k][3]
- tmat[3][4] * v[i][j][k][4];
v[i][j][k][3] = v[i][j][k][3]
/ tmat[3][3];
v[i][j][k][2] = v[i][j][k][2]
- tmat[2][3] * v[i][j][k][3]
- tmat[2][4] * v[i][j][k][4];
v[i][j][k][2] = v[i][j][k][2]
/ tmat[2][2];
v[i][j][k][1] = v[i][j][k][1]
- tmat[1][2] * v[i][j][k][2]
- tmat[1][3] * v[i][j][k][3]
- tmat[1][4] * v[i][j][k][4];
v[i][j][k][1] = v[i][j][k][1]
/ tmat[1][1];
v[i][j][k][0] = v[i][j][k][0]
- tmat[0][1] * v[i][j][k][1]
- tmat[0][2] * v[i][j][k][2]
- tmat[0][3] * v[i][j][k][3]
- tmat[0][4] * v[i][j][k][4];
v[i][j][k][0] = v[i][j][k][0]
/ tmat[0][0];
}
#if defined(_OPENMP)
if (i != ist) flag[i-1] = 0;
if (i != iend) flag[i] = 1;
#endif /* _OPENMP */
}
}
/*--------------------------------------------------------------------
--------------------------------------------------------------------*/
static void buts(int nx, int ny, int nz, int k,
double omega,
/*--------------------------------------------------------------------
c To improve cache performance, second two dimensions padded by 1
c for even number sizes only. Only needed in v.
--------------------------------------------------------------------*/
double v[ISIZ1][ISIZ2/2*2+1][ISIZ3/2*2+1][5],
double tv[ISIZ1][ISIZ2][5],
double d[ISIZ1][ISIZ2][5][5],
double udx[ISIZ1][ISIZ2][5][5],
double udy[ISIZ1][ISIZ2][5][5],
double udz[ISIZ1][ISIZ2][5][5],
int ist, int iend, int jst, int jend,
int nx0, int ny0 ) {
/*--------------------------------------------------------------------
c
c compute the regular-sparse, block upper triangular solution:
c
c v <-- ( U-inv ) * v
c
--------------------------------------------------------------------*/
/*--------------------------------------------------------------------
c local variables
--------------------------------------------------------------------*/
int i, j, m;
double tmp, tmp1;
double tmat[5][5];
#pragma omp parallel for
for (i = iend; i >= ist; i--) {
#pragma omp parallel for private(j) firstprivate(k ,omega ,i)
for (j = jend; j >= jst; j--) {
#pragma omp parallel for private(m) firstprivate(k, j, omega, i)
for (m = 0; m < 5; m++) {
tv[i][j][m] =
omega * ( udz[i][j][m][0] * v[i][j][k+1][0]
+ udz[i][j][m][1] * v[i][j][k+1][1]
+ udz[i][j][m][2] * v[i][j][k+1][2]
+ udz[i][j][m][3] * v[i][j][k+1][3]
+ udz[i][j][m][4] * v[i][j][k+1][4] );
}
}
}
for (i = iend; i >= ist; i--) {
#if defined(_OPENMP)
if (i != iend) {
while (flag[i+1] == 0) {
;
}
}
if (i != ist) {
while (flag[i] == 1) {
;
}
}
#endif /* _OPENMP */
for (j = jend; j >= jst; j--) {
for (m = 0; m < 5; m++) {
tv[i][j][m] = tv[i][j][m]
+ omega * ( udy[i][j][m][0] * v[i][j+1][k][0]
+ udx[i][j][m][0] * v[i+1][j][k][0]
+ udy[i][j][m][1] * v[i][j+1][k][1]
+ udx[i][j][m][1] * v[i+1][j][k][1]
+ udy[i][j][m][2] * v[i][j+1][k][2]
+ udx[i][j][m][2] * v[i+1][j][k][2]
+ udy[i][j][m][3] * v[i][j+1][k][3]
+ udx[i][j][m][3] * v[i+1][j][k][3]
+ udy[i][j][m][4] * v[i][j+1][k][4]
+ udx[i][j][m][4] * v[i+1][j][k][4] );
}
/*--------------------------------------------------------------------
c diagonal block inversion
--------------------------------------------------------------------*/
for (m = 0; m < 5; m++) {
tmat[m][0] = d[i][j][m][0];
tmat[m][1] = d[i][j][m][1];
tmat[m][2] = d[i][j][m][2];
tmat[m][3] = d[i][j][m][3];
tmat[m][4] = d[i][j][m][4];
}
tmp1 = 1.0 / tmat[0][0];
tmp = tmp1 * tmat[1][0];
tmat[1][1] = tmat[1][1]
- tmp * tmat[0][1];
tmat[1][2] = tmat[1][2]
- tmp * tmat[0][2];
tmat[1][3] = tmat[1][3]
- tmp * tmat[0][3];
tmat[1][4] = tmat[1][4]
- tmp * tmat[0][4];
tv[i][j][1] = tv[i][j][1]
- tv[i][j][0] * tmp;
tmp = tmp1 * tmat[2][0];
tmat[2][1] = tmat[2][1]
- tmp * tmat[0][1];
tmat[2][2] = tmat[2][2]
- tmp * tmat[0][2];
tmat[2][3] = tmat[2][3]
- tmp * tmat[0][3];
tmat[2][4] = tmat[2][4]
- tmp * tmat[0][4];
tv[i][j][2] = tv[i][j][2]
- tv[i][j][0] * tmp;
tmp = tmp1 * tmat[3][0];
tmat[3][1] = tmat[3][1]
- tmp * tmat[0][1];
tmat[3][2] = tmat[3][2]
- tmp * tmat[0][2];
tmat[3][3] = tmat[3][3]
- tmp * tmat[0][3];
tmat[3][4] = tmat[3][4]
- tmp * tmat[0][4];
tv[i][j][3] = tv[i][j][3]
- tv[i][j][0] * tmp;
tmp = tmp1 * tmat[4][0];
tmat[4][1] = tmat[4][1]
- tmp * tmat[0][1];
tmat[4][2] = tmat[4][2]
- tmp * tmat[0][2];
tmat[4][3] = tmat[4][3]
- tmp * tmat[0][3];
tmat[4][4] = tmat[4][4]
- tmp * tmat[0][4];
tv[i][j][4] = tv[i][j][4]
- tv[i][j][0] * tmp;
tmp1 = 1.0 / tmat[1][1];
tmp = tmp1 * tmat[2][1];
tmat[2][2] = tmat[2][2]
- tmp * tmat[1][2];
tmat[2][3] = tmat[2][3]
- tmp * tmat[1][3];
tmat[2][4] = tmat[2][4]
- tmp * tmat[1][4];
tv[i][j][2] = tv[i][j][2]
- tv[i][j][1] * tmp;
tmp = tmp1 * tmat[3][1];
tmat[3][2] = tmat[3][2]
- tmp * tmat[1][2];
tmat[3][3] = tmat[3][3]
- tmp * tmat[1][3];
tmat[3][4] = tmat[3][4]
- tmp * tmat[1][4];
tv[i][j][3] = tv[i][j][3]
- tv[i][j][1] * tmp;
tmp = tmp1 * tmat[4][1];
tmat[4][2] = tmat[4][2]
- tmp * tmat[1][2];
tmat[4][3] = tmat[4][3]
- tmp * tmat[1][3];
tmat[4][4] = tmat[4][4]
- tmp * tmat[1][4];
tv[i][j][4] = tv[i][j][4]
- tv[i][j][1] * tmp;
tmp1 = 1.0 / tmat[2][2];
tmp = tmp1 * tmat[3][2];
tmat[3][3] = tmat[3][3]
- tmp * tmat[2][3];
tmat[3][4] = tmat[3][4]
- tmp * tmat[2][4];
tv[i][j][3] = tv[i][j][3]
- tv[i][j][2] * tmp;
tmp = tmp1 * tmat[4][2];
tmat[4][3] = tmat[4][3]
- tmp * tmat[2][3];
tmat[4][4] = tmat[4][4]
- tmp * tmat[2][4];
tv[i][j][4] = tv[i][j][4]
- tv[i][j][2] * tmp;
tmp1 = 1.0 / tmat[3][3];
tmp = tmp1 * tmat[4][3];
tmat[4][4] = tmat[4][4]
- tmp * tmat[3][4];
tv[i][j][4] = tv[i][j][4]
- tv[i][j][3] * tmp;
/*--------------------------------------------------------------------
c back substitution
--------------------------------------------------------------------*/
tv[i][j][4] = tv[i][j][4]
/ tmat[4][4];
tv[i][j][3] = tv[i][j][3]
- tmat[3][4] * tv[i][j][4];
tv[i][j][3] = tv[i][j][3]
/ tmat[3][3];
tv[i][j][2] = tv[i][j][2]
- tmat[2][3] * tv[i][j][3]
- tmat[2][4] * tv[i][j][4];
tv[i][j][2] = tv[i][j][2]
/ tmat[2][2];
tv[i][j][1] = tv[i][j][1]
- tmat[1][2] * tv[i][j][2]
- tmat[1][3] * tv[i][j][3]
- tmat[1][4] * tv[i][j][4];
tv[i][j][1] = tv[i][j][1]
/ tmat[1][1];
tv[i][j][0] = tv[i][j][0]
- tmat[0][1] * tv[i][j][1]
- tmat[0][2] * tv[i][j][2]
- tmat[0][3] * tv[i][j][3]
- tmat[0][4] * tv[i][j][4];
tv[i][j][0] = tv[i][j][0]
/ tmat[0][0];
v[i][j][k][0] = v[i][j][k][0] - tv[i][j][0];
v[i][j][k][1] = v[i][j][k][1] - tv[i][j][1];
v[i][j][k][2] = v[i][j][k][2] - tv[i][j][2];
v[i][j][k][3] = v[i][j][k][3] - tv[i][j][3];
v[i][j][k][4] = v[i][j][k][4] - tv[i][j][4];
}
#if defined(_OPENMP)
if (i != iend) flag[i+1] = 0;
if (i != ist) flag[i] = 1;
#endif /* _OPENMP */
}
}
/*--------------------------------------------------------------------
--------------------------------------------------------------------*/
static void domain(void) {
/*--------------------------------------------------------------------
c local variables
--------------------------------------------------------------------*/
nx = nx0;
ny = ny0;
nz = nz0;
/*--------------------------------------------------------------------
c check the sub-domain size
--------------------------------------------------------------------*/
if ( nx < 4 || ny < 4 || nz < 4 ) {
printf(" SUBDOMAIN SIZE IS TOO SMALL - \n"
" ADJUST PROBLEM SIZE OR NUMBER OF PROCESSORS\n"
" SO THAT NX, NY AND NZ ARE GREATER THAN OR EQUAL\n"
" TO 4 THEY ARE CURRENTLY%3d%3d%3d\n", nx, ny, nz);
exit(1);
}
if ( nx > ISIZ1 || ny > ISIZ2 || nz > ISIZ3 ) {
printf(" SUBDOMAIN SIZE IS TOO LARGE - \n"
" ADJUST PROBLEM SIZE OR NUMBER OF PROCESSORS\n"
" SO THAT NX, NY AND NZ ARE LESS THAN OR EQUAL TO \n"
" ISIZ1, ISIZ2 AND ISIZ3 RESPECTIVELY. THEY ARE\n"
" CURRENTLY%4d%4d%4d\n", nx, ny, nz);
exit(1);
}
/*--------------------------------------------------------------------
c set up the start and end in i and j extents for all processors
--------------------------------------------------------------------*/
ist = 1;
iend = nx - 2;
jst = 1;
jend = ny - 2;
}
/*--------------------------------------------------------------------
--------------------------------------------------------------------*/
static void erhs(void) {
{
/*--------------------------------------------------------------------
c
c compute the right hand side based on exact solution
c
--------------------------------------------------------------------*/
/*--------------------------------------------------------------------
c local variables
--------------------------------------------------------------------*/
int i, j, k, m;
int iglob, jglob;
int L1, L2;
int ist1, iend1;
int jst1, jend1;
double dsspm;
double xi, eta, zeta;
double q;
double u21, u31, u41;
double tmp;
double u21i, u31i, u41i, u51i;
double u21j, u31j, u41j, u51j;
double u21k, u31k, u41k, u51k;
double u21im1, u31im1, u41im1, u51im1;
double u21jm1, u31jm1, u41jm1, u51jm1;
double u21km1, u31km1, u41km1, u51km1;
dsspm = dssp;
#pragma omp parallel for
for (i = 0; i < nx; i++) {
#pragma omp parallel for private(j) firstprivate(nx ,m ,k ,nz ,ny ,i )
for (j = 0; j < ny; j++) {
#pragma omp parallel for private(k) firstprivate(nx ,m ,j ,nz ,ny ,i )
for (k = 0; k < nz; k++) {
#pragma omp parallel for private(m) firstprivate(nx ,k ,j ,nz ,ny ,i )
for (m = 0; m < 5; m++) {
frct[i][j][k][m] = 0.0;
}
}
}
}
#pragma omp parallel for
for (i = 0; i < nx; i++) {
iglob = i;
xi = ( (double)(iglob) ) / ( nx0 - 1 );
#pragma omp parallel for private(j, zeta) firstprivate(nx ,m ,k ,xi ,eta ,nx0 ,ny0 ,nz ,ny ,i )
for (j = 0; j < ny; j++) {
jglob = j;
eta = ( (double)(jglob) ) / ( ny0 - 1 );
#pragma omp parallel for private(k, zeta) firstprivate(nx ,m ,j ,xi ,eta ,nx0 ,ny0 ,nz ,ny ,i )
for (k = 0; k < nz; k++) {
zeta = ( (double)(k) ) / ( nz - 1 );
for (m = 0; m < 5; m++) {
rsd[i][j][k][m] = ce[m][0]
+ ce[m][1] * xi
+ ce[m][2] * eta
+ ce[m][3] * zeta
+ ce[m][4] * xi * xi
+ ce[m][5] * eta * eta
+ ce[m][6] * zeta * zeta
+ ce[m][7] * xi * xi * xi
+ ce[m][8] * eta * eta * eta
+ ce[m][9] * zeta * zeta * zeta
+ ce[m][10] * xi * xi * xi * xi
+ ce[m][11] * eta * eta * eta * eta
+ ce[m][12] * zeta * zeta * zeta * zeta;
}
}
}
}
/*--------------------------------------------------------------------
c xi-direction flux differences
--------------------------------------------------------------------*/
L1 = 0;
L2 = nx-1;
#pragma omp parallel for
for (i = L1; i <= L2; i++) {
#pragma omp parallel for private(j) firstprivate(L2 ,nx ,k ,u21 ,q ,nz ,jst ,jend ,i )
for (j = jst; j <= jend; j++) {
#pragma omp parallel for private(k) firstprivate(L2 ,nx ,j ,u21 ,q ,nz ,jst ,jend ,i )
for (k = 1; k < nz - 1; k++) {
flux[i][j][k][0] = rsd[i][j][k][1];
u21 = rsd[i][j][k][1] / rsd[i][j][k][0];
q = 0.50 * ( rsd[i][j][k][1] * rsd[i][j][k][1]
+ rsd[i][j][k][2] * rsd[i][j][k][2]
+ rsd[i][j][k][3] * rsd[i][j][k][3] )
/ rsd[i][j][k][0];
flux[i][j][k][1] = rsd[i][j][k][1] * u21 + C2 *
( rsd[i][j][k][4] - q );
flux[i][j][k][2] = rsd[i][j][k][2] * u21;
flux[i][j][k][3] = rsd[i][j][k][3] * u21;
flux[i][j][k][4] = ( C1 * rsd[i][j][k][4] - C2 * q ) * u21;
}
}
}
#pragma omp parallel for
for (j = jst; j <= jend; j++) {
#pragma omp parallel for firstprivate(jend ,m ,i ,jst ,u21i ,u31i ,u41i ,u51i ,tmp ,u21im1 ,u31im1 ,u41im1 ,u51im1 ,tx2 ,ist ,iend ,tx3 ,L2 ,tx1 ,dx1 ,dx2 ,dx3 ,dx4 ,dx5 ,dssp ,iend1 ,nx ,nz ,j )
for (k = 1; k <= nz - 2; k++) {
#pragma omp parallel for firstprivate(jend ,m ,jst ,k ,u21i ,u31i ,u41i ,u51i ,tmp ,u21im1 ,u31im1 ,u41im1 ,u51im1 ,tx2 ,ist ,iend ,tx3 ,L2 ,tx1 ,dx1 ,dx2 ,dx3 ,dx4 ,dx5 ,dssp ,iend1 ,nx ,nz ,j )
for (i = ist; i <= iend; i++) {
#pragma omp parallel for firstprivate(jend ,i ,jst ,k ,u21i ,u31i ,u41i ,u51i ,tmp ,u21im1 ,u31im1 ,u41im1 ,u51im1 ,tx2 ,ist ,iend ,tx3 ,L2 ,tx1 ,dx1 ,dx2 ,dx3 ,dx4 ,dx5 ,dssp ,iend1 ,nx ,nz ,j )
for (m = 0; m < 5; m++) {
frct[i][j][k][m] = frct[i][j][k][m]
- tx2 * ( flux[i+1][j][k][m] - flux[i-1][j][k][m] );
}
}
#pragma omp parallel for firstprivate(jend ,m ,jst ,k ,u21i ,u31i ,u41i ,u51i ,tmp ,u21im1 ,u31im1 ,u41im1 ,u51im1 ,tx2 ,ist ,iend ,tx3 ,L2 ,tx1 ,dx1 ,dx2 ,dx3 ,dx4 ,dx5 ,dssp ,iend1 ,nx ,nz ,j )
for (i = ist; i <= L2; i++) {
tmp = 1.0 / rsd[i][j][k][0];
u21i = tmp * rsd[i][j][k][1];
u31i = tmp * rsd[i][j][k][2];
u41i = tmp * rsd[i][j][k][3];
u51i = tmp * rsd[i][j][k][4];
tmp = 1.0 / rsd[i-1][j][k][0];
u21im1 = tmp * rsd[i-1][j][k][1];
u31im1 = tmp * rsd[i-1][j][k][2];
u41im1 = tmp * rsd[i-1][j][k][3];
u51im1 = tmp * rsd[i-1][j][k][4];
flux[i][j][k][1] = (4.0/3.0) * tx3 *
( u21i - u21im1 );
flux[i][j][k][2] = tx3 * ( u31i - u31im1 );
flux[i][j][k][3] = tx3 * ( u41i - u41im1 );
flux[i][j][k][4] = 0.50 * ( 1.0 - C1*C5 )
* tx3 * ( ( u21i * u21i + u31i * u31i + u41i * u41i )
- ( u21im1*u21im1 + u31im1*u31im1 + u41im1*u41im1 ) )
+ (1.0/6.0)
* tx3 * ( u21i*u21i - u21im1*u21im1 )
+ C1 * C5 * tx3 * ( u51i - u51im1 );
}
#pragma omp parallel for firstprivate(jend ,m ,jst ,k ,u21i ,u31i ,u41i ,u51i ,tmp ,u21im1 ,u31im1 ,u41im1 ,u51im1 ,tx2 ,ist ,iend ,tx3 ,L2 ,tx1 ,dx1 ,dx2 ,dx3 ,dx4 ,dx5 ,dssp ,iend1 ,nx ,nz ,j )
for (i = ist; i <= iend; i++) {
frct[i][j][k][0] = frct[i][j][k][0]
+ dx1 * tx1 * ( rsd[i-1][j][k][0]
- 2.0 * rsd[i][j][k][0]
+ rsd[i+1][j][k][0] );
frct[i][j][k][1] = frct[i][j][k][1]
+ tx3 * C3 * C4 * ( flux[i+1][j][k][1] - flux[i][j][k][1] )
+ dx2 * tx1 * ( rsd[i-1][j][k][1]
- 2.0 * rsd[i][j][k][1]
+ rsd[i+1][j][k][1] );
frct[i][j][k][2] = frct[i][j][k][2]
+ tx3 * C3 * C4 * ( flux[i+1][j][k][2] - flux[i][j][k][2] )
+ dx3 * tx1 * ( rsd[i-1][j][k][2]
- 2.0 * rsd[i][j][k][2]
+ rsd[i+1][j][k][2] );
frct[i][j][k][3] = frct[i][j][k][3]
+ tx3 * C3 * C4 * ( flux[i+1][j][k][3] - flux[i][j][k][3] )
+ dx4 * tx1 * ( rsd[i-1][j][k][3]
- 2.0 * rsd[i][j][k][3]
+ rsd[i+1][j][k][3] );
frct[i][j][k][4] = frct[i][j][k][4]
+ tx3 * C3 * C4 * ( flux[i+1][j][k][4] - flux[i][j][k][4] )
+ dx5 * tx1 * ( rsd[i-1][j][k][4]
- 2.0 * rsd[i][j][k][4]
+ rsd[i+1][j][k][4] );
}
/*--------------------------------------------------------------------
c Fourth-order dissipation
--------------------------------------------------------------------*/
for (m = 0; m < 5; m++) {
frct[1][j][k][m] = frct[1][j][k][m]
- dsspm * ( + 5.0 * rsd[1][j][k][m]
- 4.0 * rsd[2][j][k][m]
+ rsd[3][j][k][m] );
frct[2][j][k][m] = frct[2][j][k][m]
- dsspm * ( - 4.0 * rsd[1][j][k][m]
+ 6.0 * rsd[2][j][k][m]
- 4.0 * rsd[3][j][k][m]
+ rsd[4][j][k][m] );
}
ist1 = 3;
iend1 = nx - 4;
for (i = ist1; i <=iend1; i++) {
for (m = 0; m < 5; m++) {
frct[i][j][k][m] = frct[i][j][k][m]
- dsspm * ( rsd[i-2][j][k][m]
- 4.0 * rsd[i-1][j][k][m]
+ 6.0 * rsd[i][j][k][m]
- 4.0 * rsd[i+1][j][k][m]
+ rsd[i+2][j][k][m] );
}
}
for (m = 0; m < 5; m++) {
frct[nx-3][j][k][m] = frct[nx-3][j][k][m]
- dsspm * ( rsd[nx-5][j][k][m]
- 4.0 * rsd[nx-4][j][k][m]
+ 6.0 * rsd[nx-3][j][k][m]
- 4.0 * rsd[nx-2][j][k][m] );
frct[nx-2][j][k][m] = frct[nx-2][j][k][m]
- dsspm * ( rsd[nx-4][j][k][m]
- 4.0 * rsd[nx-3][j][k][m]
+ 5.0 * rsd[nx-2][j][k][m] );
}
}
}
/*--------------------------------------------------------------------
c eta-direction flux differences
--------------------------------------------------------------------*/
L1 = 0;
L2 = ny-1;
#pragma omp parallel for
for (i = ist; i <= iend; i++) {
#pragma omp parallel for firstprivate(iend ,ist ,k ,ny ,u31 ,q ,nz ,L2 ,i )
for (j = L1; j <= L2; j++) {
#pragma omp parallel for firstprivate(iend ,ist ,j ,ny ,u31 ,q ,nz ,L2 ,i )
for (k = 1; k <= nz - 2; k++) {
flux[i][j][k][0] = rsd[i][j][k][2];
u31 = rsd[i][j][k][2] / rsd[i][j][k][0];
q = 0.50 * ( rsd[i][j][k][1] * rsd[i][j][k][1]
+ rsd[i][j][k][2] * rsd[i][j][k][2]
+ rsd[i][j][k][3] * rsd[i][j][k][3] )
/ rsd[i][j][k][0];
flux[i][j][k][1] = rsd[i][j][k][1] * u31;
flux[i][j][k][2] = rsd[i][j][k][2] * u31 + C2 *
( rsd[i][j][k][4] - q );
flux[i][j][k][3] = rsd[i][j][k][3] * u31;
flux[i][j][k][4] = ( C1 * rsd[i][j][k][4] - C2 * q ) * u31;
}
}
}
#pragma omp parallel for
for (i = ist; i <= iend; i++) {
#pragma omp parallel for firstprivate(iend ,m ,j ,ist ,u21j ,u31j ,u41j ,u51j ,tmp ,u21jm1 ,u31jm1 ,u41jm1 ,u51jm1 ,ty2 ,jst ,jend ,ty3 ,L2 ,ty1 ,dy1 ,dy2 ,dy3 ,dy4 ,dy5 ,dssp ,jend1 ,ny ,nz ,i )
for (k = 1; k <= nz - 2; k++) {
#pragma omp parallel for firstprivate(iend ,m ,ist ,k ,u21j ,u31j ,u41j ,u51j ,tmp ,u21jm1 ,u31jm1 ,u41jm1 ,u51jm1 ,ty2 ,jst ,jend ,ty3 ,L2 ,ty1 ,dy1 ,dy2 ,dy3 ,dy4 ,dy5 ,dssp ,jend1 ,ny ,nz ,i )
for (j = jst; j <= jend; j++) {
#pragma omp parallel for firstprivate(iend ,j ,ist ,k ,u21j ,u31j ,u41j ,u51j ,tmp ,u21jm1 ,u31jm1 ,u41jm1 ,u51jm1 ,ty2 ,jst ,jend ,ty3 ,L2 ,ty1 ,dy1 ,dy2 ,dy3 ,dy4 ,dy5 ,dssp ,jend1 ,ny ,nz ,i )
for (m = 0; m < 5; m++) {
frct[i][j][k][m] = frct[i][j][k][m]
- ty2 * ( flux[i][j+1][k][m] - flux[i][j-1][k][m] );
}
}
#pragma omp parallel for firstprivate(iend ,m ,ist ,k ,u21j ,u31j ,u41j ,u51j ,tmp ,u21jm1 ,u31jm1 ,u41jm1 ,u51jm1 ,ty2 ,jst ,jend ,ty3 ,L2 ,ty1 ,dy1 ,dy2 ,dy3 ,dy4 ,dy5 ,dssp ,jend1 ,ny ,nz ,i )
for (j = jst; j <= L2; j++) {
tmp = 1.0 / rsd[i][j][k][0];
u21j = tmp * rsd[i][j][k][1];
u31j = tmp * rsd[i][j][k][2];
u41j = tmp * rsd[i][j][k][3];
u51j = tmp * rsd[i][j][k][4];
tmp = 1.0 / rsd[i][j-1][k][0];
u21jm1 = tmp * rsd[i][j-1][k][1];
u31jm1 = tmp * rsd[i][j-1][k][2];
u41jm1 = tmp * rsd[i][j-1][k][3];
u51jm1 = tmp * rsd[i][j-1][k][4];
flux[i][j][k][1] = ty3 * ( u21j - u21jm1 );
flux[i][j][k][2] = (4.0/3.0) * ty3 *
( u31j - u31jm1 );
flux[i][j][k][3] = ty3 * ( u41j - u41jm1 );
flux[i][j][k][4] = 0.50 * ( 1.0 - C1*C5 )
* ty3 * ( ( u21j *u21j + u31j *u31j + u41j *u41j )
- ( u21jm1*u21jm1 + u31jm1*u31jm1 + u41jm1*u41jm1 ) )
+ (1.0/6.0)
* ty3 * ( u31j*u31j - u31jm1*u31jm1 )
+ C1 * C5 * ty3 * ( u51j - u51jm1 );
}
#pragma omp parallel for firstprivate(iend ,m ,ist ,k ,u21j ,u31j ,u41j ,u51j ,tmp ,u21jm1 ,u31jm1 ,u41jm1 ,u51jm1 ,ty2 ,jst ,jend ,ty3 ,L2 ,ty1 ,dy1 ,dy2 ,dy3 ,dy4 ,dy5 ,dssp ,jend1 ,ny ,nz ,i )
for (j = jst; j <= jend; j++) {
frct[i][j][k][0] = frct[i][j][k][0]
+ dy1 * ty1 * ( rsd[i][j-1][k][0]
- 2.0 * rsd[i][j][k][0]
+ rsd[i][j+1][k][0] );
frct[i][j][k][1] = frct[i][j][k][1]
+ ty3 * C3 * C4 * ( flux[i][j+1][k][1] - flux[i][j][k][1] )
+ dy2 * ty1 * ( rsd[i][j-1][k][1]
- 2.0 * rsd[i][j][k][1]
+ rsd[i][j+1][k][1] );
frct[i][j][k][2] = frct[i][j][k][2]
+ ty3 * C3 * C4 * ( flux[i][j+1][k][2] - flux[i][j][k][2] )
+ dy3 * ty1 * ( rsd[i][j-1][k][2]
- 2.0 * rsd[i][j][k][2]
+ rsd[i][j+1][k][2] );
frct[i][j][k][3] = frct[i][j][k][3]
+ ty3 * C3 * C4 * ( flux[i][j+1][k][3] - flux[i][j][k][3] )
+ dy4 * ty1 * ( rsd[i][j-1][k][3]
- 2.0 * rsd[i][j][k][3]
+ rsd[i][j+1][k][3] );
frct[i][j][k][4] = frct[i][j][k][4]
+ ty3 * C3 * C4 * ( flux[i][j+1][k][4] - flux[i][j][k][4] )
+ dy5 * ty1 * ( rsd[i][j-1][k][4]
- 2.0 * rsd[i][j][k][4]
+ rsd[i][j+1][k][4] );
}
/*--------------------------------------------------------------------
c fourth-order dissipation
--------------------------------------------------------------------*/
for (m = 0; m < 5; m++) {
frct[i][1][k][m] = frct[i][1][k][m]
- dsspm * ( + 5.0 * rsd[i][1][k][m]
- 4.0 * rsd[i][2][k][m]
+ rsd[i][3][k][m] );
frct[i][2][k][m] = frct[i][2][k][m]
- dsspm * ( - 4.0 * rsd[i][1][k][m]
+ 6.0 * rsd[i][2][k][m]
- 4.0 * rsd[i][3][k][m]
+ rsd[i][4][k][m] );
}
jst1 = 3;
jend1 = ny - 4;
for (j = jst1; j <= jend1; j++) {
for (m = 0; m < 5; m++) {
frct[i][j][k][m] = frct[i][j][k][m]
- dsspm * ( rsd[i][j-2][k][m]
- 4.0 * rsd[i][j-1][k][m]
+ 6.0 * rsd[i][j][k][m]
- 4.0 * rsd[i][j+1][k][m]
+ rsd[i][j+2][k][m] );
}
}
for (m = 0; m < 5; m++) {
frct[i][ny-3][k][m] = frct[i][ny-3][k][m]
- dsspm * ( rsd[i][ny-5][k][m]
- 4.0 * rsd[i][ny-4][k][m]
+ 6.0 * rsd[i][ny-3][k][m]
- 4.0 * rsd[i][ny-2][k][m] );
frct[i][ny-2][k][m] = frct[i][ny-2][k][m]
- dsspm * ( rsd[i][ny-4][k][m]
- 4.0 * rsd[i][ny-3][k][m]
+ 5.0 * rsd[i][ny-2][k][m] );
}
}
}
/*--------------------------------------------------------------------
c zeta-direction flux differences
--------------------------------------------------------------------*/
#pragma omp parallel for
for (i = ist; i <= iend; i++) {
for (j = jst; j <= jend; j++) {
#pragma omp parallel for firstprivate(nz ,ist ,jst ,u41 ,q ,j ,i )
for (k = 0; k <= nz-1; k++) {
flux[i][j][k][0] = rsd[i][j][k][3];
u41 = rsd[i][j][k][3] / rsd[i][j][k][0];
q = 0.50 * ( rsd[i][j][k][1] * rsd[i][j][k][1]
+ rsd[i][j][k][2] * rsd[i][j][k][2]
+ rsd[i][j][k][3] * rsd[i][j][k][3] )
/ rsd[i][j][k][0];
flux[i][j][k][1] = rsd[i][j][k][1] * u41;
flux[i][j][k][2] = rsd[i][j][k][2] * u41;
flux[i][j][k][3] = rsd[i][j][k][3] * u41 + C2 *
( rsd[i][j][k][4] - q );
flux[i][j][k][4] = ( C1 * rsd[i][j][k][4] - C2 * q ) * u41;
}
#pragma omp parallel for firstprivate(nz ,ist ,jst ,m ,tz2 ,j ,i )
for (k = 1; k <= nz - 2; k++) {
#pragma omp parallel for firstprivate(nz ,ist ,jst ,tz2 ,k ,j ,i )
for (m = 0; m < 5; m++) {
frct[i][j][k][m] = frct[i][j][k][m]
- tz2 * ( flux[i][j][k+1][m] - flux[i][j][k-1][m] );
}
}
#pragma omp parallel for firstprivate(nz ,ist ,jst ,u21k ,u31k ,u41k ,u51k ,tmp ,u21km1 ,u31km1 ,u41km1 ,u51km1 ,tz3 ,j ,i )
for (k = 1; k <= nz-1; k++) {
tmp = 1.0 / rsd[i][j][k][0];
u21k = tmp * rsd[i][j][k][1];
u31k = tmp * rsd[i][j][k][2];
u41k = tmp * rsd[i][j][k][3];
u51k = tmp * rsd[i][j][k][4];
tmp = 1.0 / rsd[i][j][k-1][0];
u21km1 = tmp * rsd[i][j][k-1][1];
u31km1 = tmp * rsd[i][j][k-1][2];
u41km1 = tmp * rsd[i][j][k-1][3];
u51km1 = tmp * rsd[i][j][k-1][4];
flux[i][j][k][1] = tz3 * ( u21k - u21km1 );
flux[i][j][k][2] = tz3 * ( u31k - u31km1 );
flux[i][j][k][3] = (4.0/3.0) * tz3 * ( u41k
- u41km1 );
flux[i][j][k][4] = 0.50 * ( 1.0 - C1*C5 )
* tz3 * ( ( u21k *u21k + u31k *u31k + u41k *u41k )
- ( u21km1*u21km1 + u31km1*u31km1 + u41km1*u41km1 ) )
+ (1.0/6.0)
* tz3 * ( u41k*u41k - u41km1*u41km1 )
+ C1 * C5 * tz3 * ( u51k - u51km1 );
}
#pragma omp parallel for firstprivate(nz ,ist ,jst ,tz1 ,dz1 ,dz2 ,tz3 ,dz3 ,dz4 ,dz5 ,j ,i )
for (k = 1; k <= nz - 2; k++) {
frct[i][j][k][0] = frct[i][j][k][0]
+ dz1 * tz1 * ( rsd[i][j][k+1][0]
- 2.0 * rsd[i][j][k][0]
+ rsd[i][j][k-1][0] );
frct[i][j][k][1] = frct[i][j][k][1]
+ tz3 * C3 * C4 * ( flux[i][j][k+1][1] - flux[i][j][k][1] )
+ dz2 * tz1 * ( rsd[i][j][k+1][1]
- 2.0 * rsd[i][j][k][1]
+ rsd[i][j][k-1][1] );
frct[i][j][k][2] = frct[i][j][k][2]
+ tz3 * C3 * C4 * ( flux[i][j][k+1][2] - flux[i][j][k][2] )
+ dz3 * tz1 * ( rsd[i][j][k+1][2]
- 2.0 * rsd[i][j][k][2]
+ rsd[i][j][k-1][2] );
frct[i][j][k][3] = frct[i][j][k][3]
+ tz3 * C3 * C4 * ( flux[i][j][k+1][3] - flux[i][j][k][3] )
+ dz4 * tz1 * ( rsd[i][j][k+1][3]
- 2.0 * rsd[i][j][k][3]
+ rsd[i][j][k-1][3] );
frct[i][j][k][4] = frct[i][j][k][4]
+ tz3 * C3 * C4 * ( flux[i][j][k+1][4] - flux[i][j][k][4] )
+ dz5 * tz1 * ( rsd[i][j][k+1][4]
- 2.0 * rsd[i][j][k][4]
+ rsd[i][j][k-1][4] );
}
/*--------------------------------------------------------------------
c fourth-order dissipation
--------------------------------------------------------------------*/
for (m = 0; m < 5; m++) {
frct[i][j][1][m] = frct[i][j][1][m]
- dsspm * ( + 5.0 * rsd[i][j][1][m]
- 4.0 * rsd[i][j][2][m]
+ rsd[i][j][3][m] );
frct[i][j][2][m] = frct[i][j][2][m]
- dsspm * (- 4.0 * rsd[i][j][1][m]
+ 6.0 * rsd[i][j][2][m]
- 4.0 * rsd[i][j][3][m]
+ rsd[i][j][4][m] );
}
#pragma omp parallel for firstprivate(nz ,ist ,jst ,m ,dssp ,j ,i )
for (k = 3; k <= nz - 4; k++) {
#pragma omp parallel for firstprivate(nz ,ist ,jst ,dssp ,k ,j ,i )
for (m = 0; m < 5; m++) {
frct[i][j][k][m] = frct[i][j][k][m]
- dsspm * ( rsd[i][j][k-2][m]
- 4.0 * rsd[i][j][k-1][m]
+ 6.0 * rsd[i][j][k][m]
- 4.0 * rsd[i][j][k+1][m]
+ rsd[i][j][k+2][m] );
}
}
for (m = 0; m < 5; m++) {
frct[i][j][nz-3][m] = frct[i][j][nz-3][m]
- dsspm * ( rsd[i][j][nz-5][m]
- 4.0 * rsd[i][j][nz-4][m]
+ 6.0 * rsd[i][j][nz-3][m]
- 4.0 * rsd[i][j][nz-2][m] );
frct[i][j][nz-2][m] = frct[i][j][nz-2][m]
- dsspm * ( rsd[i][j][nz-4][m]
- 4.0 * rsd[i][j][nz-3][m]
+ 5.0 * rsd[i][j][nz-2][m] );
}
}
}
}
}
/*--------------------------------------------------------------------
--------------------------------------------------------------------*/
static void error(void) {
/*--------------------------------------------------------------------
c
c compute the solution error
c
--------------------------------------------------------------------*/
/*--------------------------------------------------------------------
c local variables
--------------------------------------------------------------------*/
int i, j, k, m;
int iglob, jglob;
double tmp;
double u000ijk[5];
#pragma omp parallel for
for (m = 0; m < 5; m++) {
errnm[m] = 0.0;
}
for (i = ist; i <= iend; i++) {
iglob = i;
for (j = jst; j <= jend; j++) {
jglob = j;
for (k = 1; k <= nz-2; k++) {
exact( iglob, jglob, k, u000ijk );
#pragma omp parallel for firstprivate(ist ,tmp ,jst ,k ,j ,i )
for (m = 0; m < 5; m++) {
tmp = ( u000ijk[m] - u[i][j][k][m] );
errnm[m] = errnm[m] + tmp *tmp;
}
}
}
}
#pragma omp parallel for firstprivate(nz0 ,ny0 ,nx0 )
for (m = 0; m < 5; m++) {
errnm[m] = sqrt ( errnm[m] / ( (nx0-2)*(ny0-2)*(nz0-2) ) );
}
}
/*--------------------------------------------------------------------
--------------------------------------------------------------------*/
static void exact( int i, int j, int k, double u000ijk[5] ) {
/*--------------------------------------------------------------------
c
c compute the exact solution at (i,j,k)
c
--------------------------------------------------------------------*/
/*--------------------------------------------------------------------
c local variables
--------------------------------------------------------------------*/
int m;
double xi, eta, zeta;
xi = ((double)i) / (nx0 - 1);
eta = ((double)j) / (ny0 - 1);
zeta = ((double)k) / (nz - 1);
#pragma omp parallel for firstprivate(zeta ,eta ,xi ,u000ijk )
for (m = 0; m < 5; m++) {
u000ijk[m] = ce[m][0]
+ ce[m][1] * xi
+ ce[m][2] * eta
+ ce[m][3] * zeta
+ ce[m][4] * xi * xi
+ ce[m][5] * eta * eta
+ ce[m][6] * zeta * zeta
+ ce[m][7] * xi * xi * xi
+ ce[m][8] * eta * eta * eta
+ ce[m][9] * zeta * zeta * zeta
+ ce[m][10] * xi * xi * xi * xi
+ ce[m][11] * eta * eta * eta * eta
+ ce[m][12] * zeta * zeta * zeta * zeta;
}
}
/*--------------------------------------------------------------------
--------------------------------------------------------------------*/
static void jacld(int k) {
/*--------------------------------------------------------------------
c compute the lower triangular part of the jacobian matrix
--------------------------------------------------------------------*/
/*--------------------------------------------------------------------
c local variables
--------------------------------------------------------------------*/
int i, j;
double r43;
double c1345;
double c34;
double tmp1, tmp2, tmp3;
r43 = ( 4.0 / 3.0 );
c1345 = C1 * C3 * C4 * C5;
c34 = C3 * C4;
#pragma omp for
for (i = ist; i <= iend; i++) {
#pragma omp parallel for firstprivate(iend ,ist ,tmp1 ,tmp2 ,tmp3 ,k ,dz1 ,tz1 ,dy1 ,ty1 ,dx1 ,tx1 ,dt ,dz2 ,dy2 ,dx2 ,dz3 ,dy3 ,dx3 ,dz4 ,dy4 ,dx4 ,dz5 ,dy5 ,dx5 ,tz2 ,ty2 ,tx2 ,jst ,jend ,i )
for (j = jst; j <= jend; j++) {
/*--------------------------------------------------------------------
c form the block daigonal
--------------------------------------------------------------------*/
tmp1 = 1.0 / u[i][j][k][0];
tmp2 = tmp1 * tmp1;
tmp3 = tmp1 * tmp2;
d[i][j][0][0] = 1.0
+ dt * 2.0 * ( tx1 * dx1
+ ty1 * dy1
+ tz1 * dz1 );
d[i][j][0][1] = 0.0;
d[i][j][0][2] = 0.0;
d[i][j][0][3] = 0.0;
d[i][j][0][4] = 0.0;
d[i][j][1][0] = dt * 2.0
* ( tx1 * ( - r43 * c34 * tmp2 * u[i][j][k][1] )
+ ty1 * ( - c34 * tmp2 * u[i][j][k][1] )
+ tz1 * ( - c34 * tmp2 * u[i][j][k][1] ) );
d[i][j][1][1] = 1.0
+ dt * 2.0
* ( tx1 * r43 * c34 * tmp1
+ ty1 * c34 * tmp1
+ tz1 * c34 * tmp1 )
+ dt * 2.0 * ( tx1 * dx2
+ ty1 * dy2
+ tz1 * dz2 );
d[i][j][1][2] = 0.0;
d[i][j][1][3] = 0.0;
d[i][j][1][4] = 0.0;
d[i][j][2][0] = dt * 2.0
* ( tx1 * ( - c34 * tmp2 * u[i][j][k][2] )
+ ty1 * ( - r43 * c34 * tmp2 * u[i][j][k][2] )
+ tz1 * ( - c34 * tmp2 * u[i][j][k][2] ) );
d[i][j][2][1] = 0.0;
d[i][j][2][2] = 1.0
+ dt * 2.0
* ( tx1 * c34 * tmp1
+ ty1 * r43 * c34 * tmp1
+ tz1 * c34 * tmp1 )
+ dt * 2.0 * ( tx1 * dx3
+ ty1 * dy3
+ tz1 * dz3 );
d[i][j][2][3] = 0.0;
d[i][j][2][4] = 0.0;
d[i][j][3][0] = dt * 2.0
* ( tx1 * ( - c34 * tmp2 * u[i][j][k][3] )
+ ty1 * ( - c34 * tmp2 * u[i][j][k][3] )
+ tz1 * ( - r43 * c34 * tmp2 * u[i][j][k][3] ) );
d[i][j][3][1] = 0.0;
d[i][j][3][2] = 0.0;
d[i][j][3][3] = 1.0
+ dt * 2.0
* ( tx1 * c34 * tmp1
+ ty1 * c34 * tmp1
+ tz1 * r43 * c34 * tmp1 )
+ dt * 2.0 * ( tx1 * dx4
+ ty1 * dy4
+ tz1 * dz4 );
d[i][j][3][4] = 0.0;
d[i][j][4][0] = dt * 2.0
* ( tx1 * ( - ( r43*c34 - c1345 ) * tmp3 * ( pow2(u[i][j][k][1]) )
- ( c34 - c1345 ) * tmp3 * ( pow2(u[i][j][k][2]) )
- ( c34 - c1345 ) * tmp3 * ( pow2(u[i][j][k][3]) )
- ( c1345 ) * tmp2 * u[i][j][k][4] )
+ ty1 * ( - ( c34 - c1345 ) * tmp3 * ( pow2(u[i][j][k][1]) )
- ( r43*c34 - c1345 ) * tmp3 * ( pow2(u[i][j][k][2]) )
- ( c34 - c1345 ) * tmp3 * ( pow2(u[i][j][k][3]) )
- ( c1345 ) * tmp2 * u[i][j][k][4] )
+ tz1 * ( - ( c34 - c1345 ) * tmp3 * ( pow2(u[i][j][k][1]) )
- ( c34 - c1345 ) * tmp3 * ( pow2(u[i][j][k][2]) )
- ( r43*c34 - c1345 ) * tmp3 * ( pow2(u[i][j][k][3]) )
- ( c1345 ) * tmp2 * u[i][j][k][4] ) );
d[i][j][4][1] = dt * 2.0
* ( tx1 * ( r43*c34 - c1345 ) * tmp2 * u[i][j][k][1]
+ ty1 * ( c34 - c1345 ) * tmp2 * u[i][j][k][1]
+ tz1 * ( c34 - c1345 ) * tmp2 * u[i][j][k][1] );
d[i][j][4][2] = dt * 2.0
* ( tx1 * ( c34 - c1345 ) * tmp2 * u[i][j][k][2]
+ ty1 * ( r43*c34 -c1345 ) * tmp2 * u[i][j][k][2]
+ tz1 * ( c34 - c1345 ) * tmp2 * u[i][j][k][2] );
d[i][j][4][3] = dt * 2.0
* ( tx1 * ( c34 - c1345 ) * tmp2 * u[i][j][k][3]
+ ty1 * ( c34 - c1345 ) * tmp2 * u[i][j][k][3]
+ tz1 * ( r43*c34 - c1345 ) * tmp2 * u[i][j][k][3] );
d[i][j][4][4] = 1.0
+ dt * 2.0 * ( tx1 * c1345 * tmp1
+ ty1 * c1345 * tmp1
+ tz1 * c1345 * tmp1 )
+ dt * 2.0 * ( tx1 * dx5
+ ty1 * dy5
+ tz1 * dz5 );
/*--------------------------------------------------------------------
c form the first block sub-diagonal
--------------------------------------------------------------------*/
tmp1 = 1.0 / u[i][j][k-1][0];
tmp2 = tmp1 * tmp1;
tmp3 = tmp1 * tmp2;
a[i][j][0][0] = - dt * tz1 * dz1;
a[i][j][0][1] = 0.0;
a[i][j][0][2] = 0.0;
a[i][j][0][3] = - dt * tz2;
a[i][j][0][4] = 0.0;
a[i][j][1][0] = - dt * tz2
* ( - ( u[i][j][k-1][1]*u[i][j][k-1][3] ) * tmp2 )
- dt * tz1 * ( - c34 * tmp2 * u[i][j][k-1][1] );
a[i][j][1][1] = - dt * tz2 * ( u[i][j][k-1][3] * tmp1 )
- dt * tz1 * c34 * tmp1
- dt * tz1 * dz2 ;
a[i][j][1][2] = 0.0;
a[i][j][1][3] = - dt * tz2 * ( u[i][j][k-1][1] * tmp1 );
a[i][j][1][4] = 0.0;
a[i][j][2][0] = - dt * tz2
* ( - ( u[i][j][k-1][2]*u[i][j][k-1][3] ) * tmp2 )
- dt * tz1 * ( - c34 * tmp2 * u[i][j][k-1][2] );
a[i][j][2][1] = 0.0;
a[i][j][2][2] = - dt * tz2 * ( u[i][j][k-1][3] * tmp1 )
- dt * tz1 * ( c34 * tmp1 )
- dt * tz1 * dz3;
a[i][j][2][3] = - dt * tz2 * ( u[i][j][k-1][2] * tmp1 );
a[i][j][2][4] = 0.0;
a[i][j][3][0] = - dt * tz2
* ( - ( u[i][j][k-1][3] * tmp1 ) *( u[i][j][k-1][3] * tmp1 )
+ 0.50 * C2
* ( ( u[i][j][k-1][1] * u[i][j][k-1][1]
+ u[i][j][k-1][2] * u[i][j][k-1][2]
+ u[i][j][k-1][3] * u[i][j][k-1][3] ) * tmp2 ) )
- dt * tz1 * ( - r43 * c34 * tmp2 * u[i][j][k-1][3] );
a[i][j][3][1] = - dt * tz2
* ( - C2 * ( u[i][j][k-1][1] * tmp1 ) );
a[i][j][3][2] = - dt * tz2
* ( - C2 * ( u[i][j][k-1][2] * tmp1 ) );
a[i][j][3][3] = - dt * tz2 * ( 2.0 - C2 )
* ( u[i][j][k-1][3] * tmp1 )
- dt * tz1 * ( r43 * c34 * tmp1 )
- dt * tz1 * dz4;
a[i][j][3][4] = - dt * tz2 * C2;
a[i][j][4][0] = - dt * tz2
* ( ( C2 * ( u[i][j][k-1][1] * u[i][j][k-1][1]
+ u[i][j][k-1][2] * u[i][j][k-1][2]
+ u[i][j][k-1][3] * u[i][j][k-1][3] ) * tmp2
- C1 * ( u[i][j][k-1][4] * tmp1 ) )
* ( u[i][j][k-1][3] * tmp1 ) )
- dt * tz1
* ( - ( c34 - c1345 ) * tmp3 * (u[i][j][k-1][1]*u[i][j][k-1][1])
- ( c34 - c1345 ) * tmp3 * (u[i][j][k-1][2]*u[i][j][k-1][2])
- ( r43*c34 - c1345 )* tmp3 * (u[i][j][k-1][3]*u[i][j][k-1][3])
- c1345 * tmp2 * u[i][j][k-1][4] );
a[i][j][4][1] = - dt * tz2
* ( - C2 * ( u[i][j][k-1][1]*u[i][j][k-1][3] ) * tmp2 )
- dt * tz1 * ( c34 - c1345 ) * tmp2 * u[i][j][k-1][1];
a[i][j][4][2] = - dt * tz2
* ( - C2 * ( u[i][j][k-1][2]*u[i][j][k-1][3] ) * tmp2 )
- dt * tz1 * ( c34 - c1345 ) * tmp2 * u[i][j][k-1][2];
a[i][j][4][3] = - dt * tz2
* ( C1 * ( u[i][j][k-1][4] * tmp1 )
- 0.50 * C2
* ( ( u[i][j][k-1][1]*u[i][j][k-1][1]
+ u[i][j][k-1][2]*u[i][j][k-1][2]
+ 3.0*u[i][j][k-1][3]*u[i][j][k-1][3] ) * tmp2 ) )
- dt * tz1 * ( r43*c34 - c1345 ) * tmp2 * u[i][j][k-1][3];
a[i][j][4][4] = - dt * tz2
* ( C1 * ( u[i][j][k-1][3] * tmp1 ) )
- dt * tz1 * c1345 * tmp1
- dt * tz1 * dz5;
/*--------------------------------------------------------------------
c form the second block sub-diagonal
--------------------------------------------------------------------*/
tmp1 = 1.0 / u[i][j-1][k][0];
tmp2 = tmp1 * tmp1;
tmp3 = tmp1 * tmp2;
b[i][j][0][0] = - dt * ty1 * dy1;
b[i][j][0][1] = 0.0;
b[i][j][0][2] = - dt * ty2;
b[i][j][0][3] = 0.0;
b[i][j][0][4] = 0.0;
b[i][j][1][0] = - dt * ty2
* ( - ( u[i][j-1][k][1]*u[i][j-1][k][2] ) * tmp2 )
- dt * ty1 * ( - c34 * tmp2 * u[i][j-1][k][1] );
b[i][j][1][1] = - dt * ty2 * ( u[i][j-1][k][2] * tmp1 )
- dt * ty1 * ( c34 * tmp1 )
- dt * ty1 * dy2;
b[i][j][1][2] = - dt * ty2 * ( u[i][j-1][k][1] * tmp1 );
b[i][j][1][3] = 0.0;
b[i][j][1][4] = 0.0;
b[i][j][2][0] = - dt * ty2
* ( - ( u[i][j-1][k][2] * tmp1 ) *( u[i][j-1][k][2] * tmp1 )
+ 0.50 * C2 * ( ( u[i][j-1][k][1] * u[i][j-1][k][1]
+ u[i][j-1][k][2] * u[i][j-1][k][2]
+ u[i][j-1][k][3] * u[i][j-1][k][3] )
* tmp2 ) )
- dt * ty1 * ( - r43 * c34 * tmp2 * u[i][j-1][k][2] );
b[i][j][2][1] = - dt * ty2
* ( - C2 * ( u[i][j-1][k][1] * tmp1 ) );
b[i][j][2][2] = - dt * ty2 * ( ( 2.0 - C2 )
* ( u[i][j-1][k][2] * tmp1 ) )
- dt * ty1 * ( r43 * c34 * tmp1 )
- dt * ty1 * dy3;
b[i][j][2][3] = - dt * ty2
* ( - C2 * ( u[i][j-1][k][3] * tmp1 ) );
b[i][j][2][4] = - dt * ty2 * C2;
b[i][j][3][0] = - dt * ty2
* ( - ( u[i][j-1][k][2]*u[i][j-1][k][3] ) * tmp2 )
- dt * ty1 * ( - c34 * tmp2 * u[i][j-1][k][3] );
b[i][j][3][1] = 0.0;
b[i][j][3][2] = - dt * ty2 * ( u[i][j-1][k][3] * tmp1 );
b[i][j][3][3] = - dt * ty2 * ( u[i][j-1][k][2] * tmp1 )
- dt * ty1 * ( c34 * tmp1 )
- dt * ty1 * dy4;
b[i][j][3][4] = 0.0;
b[i][j][4][0] = - dt * ty2
* ( ( C2 * ( u[i][j-1][k][1] * u[i][j-1][k][1]
+ u[i][j-1][k][2] * u[i][j-1][k][2]
+ u[i][j-1][k][3] * u[i][j-1][k][3] ) * tmp2
- C1 * ( u[i][j-1][k][4] * tmp1 ) )
* ( u[i][j-1][k][2] * tmp1 ) )
- dt * ty1
* ( - ( c34 - c1345 )*tmp3*(pow2(u[i][j-1][k][1]))
- ( r43*c34 - c1345 )*tmp3*(pow2(u[i][j-1][k][2]))
- ( c34 - c1345 )*tmp3*(pow2(u[i][j-1][k][3]))
- c1345*tmp2*u[i][j-1][k][4] );
b[i][j][4][1] = - dt * ty2
* ( - C2 * ( u[i][j-1][k][1]*u[i][j-1][k][2] ) * tmp2 )
- dt * ty1
* ( c34 - c1345 ) * tmp2 * u[i][j-1][k][1];
b[i][j][4][2] = - dt * ty2
* ( C1 * ( u[i][j-1][k][4] * tmp1 )
- 0.50 * C2
* ( ( u[i][j-1][k][1]*u[i][j-1][k][1]
+ 3.0 * u[i][j-1][k][2]*u[i][j-1][k][2]
+ u[i][j-1][k][3]*u[i][j-1][k][3] ) * tmp2 ) )
- dt * ty1
* ( r43*c34 - c1345 ) * tmp2 * u[i][j-1][k][2];
b[i][j][4][3] = - dt * ty2
* ( - C2 * ( u[i][j-1][k][2]*u[i][j-1][k][3] ) * tmp2 )
- dt * ty1 * ( c34 - c1345 ) * tmp2 * u[i][j-1][k][3];
b[i][j][4][4] = - dt * ty2
* ( C1 * ( u[i][j-1][k][2] * tmp1 ) )
- dt * ty1 * c1345 * tmp1
- dt * ty1 * dy5;
/*--------------------------------------------------------------------
c form the third block sub-diagonal
--------------------------------------------------------------------*/
tmp1 = 1.0 / u[i-1][j][k][0];
tmp2 = tmp1 * tmp1;
tmp3 = tmp1 * tmp2;
c[i][j][0][0] = - dt * tx1 * dx1;
c[i][j][0][1] = - dt * tx2;
c[i][j][0][2] = 0.0;
c[i][j][0][3] = 0.0;
c[i][j][0][4] = 0.0;
c[i][j][1][0] = - dt * tx2
* ( - ( u[i-1][j][k][1] * tmp1 ) *( u[i-1][j][k][1] * tmp1 )
+ C2 * 0.50 * ( u[i-1][j][k][1] * u[i-1][j][k][1]
+ u[i-1][j][k][2] * u[i-1][j][k][2]
+ u[i-1][j][k][3] * u[i-1][j][k][3] ) * tmp2 )
- dt * tx1 * ( - r43 * c34 * tmp2 * u[i-1][j][k][1] );
c[i][j][1][1] = - dt * tx2
* ( ( 2.0 - C2 ) * ( u[i-1][j][k][1] * tmp1 ) )
- dt * tx1 * ( r43 * c34 * tmp1 )
- dt * tx1 * dx2;
c[i][j][1][2] = - dt * tx2
* ( - C2 * ( u[i-1][j][k][2] * tmp1 ) );
c[i][j][1][3] = - dt * tx2
* ( - C2 * ( u[i-1][j][k][3] * tmp1 ) );
c[i][j][1][4] = - dt * tx2 * C2;
c[i][j][2][0] = - dt * tx2
* ( - ( u[i-1][j][k][1] * u[i-1][j][k][2] ) * tmp2 )
- dt * tx1 * ( - c34 * tmp2 * u[i-1][j][k][2] );
c[i][j][2][1] = - dt * tx2 * ( u[i-1][j][k][2] * tmp1 );
c[i][j][2][2] = - dt * tx2 * ( u[i-1][j][k][1] * tmp1 )
- dt * tx1 * ( c34 * tmp1 )
- dt * tx1 * dx3;
c[i][j][2][3] = 0.0;
c[i][j][2][4] = 0.0;
c[i][j][3][0] = - dt * tx2
* ( - ( u[i-1][j][k][1]*u[i-1][j][k][3] ) * tmp2 )
- dt * tx1 * ( - c34 * tmp2 * u[i-1][j][k][3] );
c[i][j][3][1] = - dt * tx2 * ( u[i-1][j][k][3] * tmp1 );
c[i][j][3][2] = 0.0;
c[i][j][3][3] = - dt * tx2 * ( u[i-1][j][k][1] * tmp1 )
- dt * tx1 * ( c34 * tmp1 )
- dt * tx1 * dx4;
c[i][j][3][4] = 0.0;
c[i][j][4][0] = - dt * tx2
* ( ( C2 * ( u[i-1][j][k][1] * u[i-1][j][k][1]
+ u[i-1][j][k][2] * u[i-1][j][k][2]
+ u[i-1][j][k][3] * u[i-1][j][k][3] ) * tmp2
- C1 * ( u[i-1][j][k][4] * tmp1 ) )
* ( u[i-1][j][k][1] * tmp1 ) )
- dt * tx1
* ( - ( r43*c34 - c1345 ) * tmp3 * ( pow2(u[i-1][j][k][1]) )
- ( c34 - c1345 ) * tmp3 * ( pow2(u[i-1][j][k][2]) )
- ( c34 - c1345 ) * tmp3 * ( pow2(u[i-1][j][k][3]) )
- c1345 * tmp2 * u[i-1][j][k][4] );
c[i][j][4][1] = - dt * tx2
* ( C1 * ( u[i-1][j][k][4] * tmp1 )
- 0.50 * C2
* ( ( 3.0*u[i-1][j][k][1]*u[i-1][j][k][1]
+ u[i-1][j][k][2]*u[i-1][j][k][2]
+ u[i-1][j][k][3]*u[i-1][j][k][3] ) * tmp2 ) )
- dt * tx1
* ( r43*c34 - c1345 ) * tmp2 * u[i-1][j][k][1];
c[i][j][4][2] = - dt * tx2
* ( - C2 * ( u[i-1][j][k][2]*u[i-1][j][k][1] ) * tmp2 )
- dt * tx1
* ( c34 - c1345 ) * tmp2 * u[i-1][j][k][2];
c[i][j][4][3] = - dt * tx2
* ( - C2 * ( u[i-1][j][k][3]*u[i-1][j][k][1] ) * tmp2 )
- dt * tx1
* ( c34 - c1345 ) * tmp2 * u[i-1][j][k][3];
c[i][j][4][4] = - dt * tx2
* ( C1 * ( u[i-1][j][k][1] * tmp1 ) )
- dt * tx1 * c1345 * tmp1
- dt * tx1 * dx5;
}
}
}
/*--------------------------------------------------------------------
--------------------------------------------------------------------*/
static void jacu(int k) {
/*--------------------------------------------------------------------
c compute the upper triangular part of the jacobian matrix
--------------------------------------------------------------------*/
/*--------------------------------------------------------------------
c local variables
--------------------------------------------------------------------*/
int i, j;
double r43;
double c1345;
double c34;
double tmp1, tmp2, tmp3;
r43 = ( 4.0 / 3.0 );
c1345 = C1 * C3 * C4 * C5;
c34 = C3 * C4;
#if defined(_OPENMP)
#pragma omp parallel for firstprivate(ist ,iend ,tmp1 ,tmp2 ,tmp3 ,k ,dz1 ,tz1 ,dy1 ,ty1 ,dx1 ,tx1 ,dt ,dz2 ,dy2 ,dx2 ,dz3 ,dy3 ,dx3 ,dz4 ,dy4 ,dx4 ,dz5 ,dy5 ,dx5 ,tx2 ,ty2 ,tz2 ,jst ,jend )
for (i = iend; i >= ist; i--) {
#pragma omp parallel for firstprivate(ist ,iend ,tmp1 ,tmp2 ,tmp3 ,k ,dz1 ,tz1 ,dy1 ,ty1 ,dx1 ,tx1 ,dt ,dz2 ,dy2 ,dx2 ,dz3 ,dy3 ,dx3 ,dz4 ,dy4 ,dx4 ,dz5 ,dy5 ,dx5 ,tx2 ,ty2 ,tz2 ,jst ,jend ,i )
for (j = jend; j >= jst; j--) {
#else
for (i = ist; i <= iend; i++) {
for (j = jst; j <= jend; j++) {
#endif
/*--------------------------------------------------------------------
c form the block daigonal
--------------------------------------------------------------------*/
tmp1 = 1.0 / u[i][j][k][0];
tmp2 = tmp1 * tmp1;
tmp3 = tmp1 * tmp2;
d[i][j][0][0] = 1.0
+ dt * 2.0 * ( tx1 * dx1
+ ty1 * dy1
+ tz1 * dz1 );
d[i][j][0][1] = 0.0;
d[i][j][0][2] = 0.0;
d[i][j][0][3] = 0.0;
d[i][j][0][4] = 0.0;
d[i][j][1][0] = dt * 2.0
* ( tx1 * ( - r43 * c34 * tmp2 * u[i][j][k][1] )
+ ty1 * ( - c34 * tmp2 * u[i][j][k][1] )
+ tz1 * ( - c34 * tmp2 * u[i][j][k][1] ) );
d[i][j][1][1] = 1.0
+ dt * 2.0
* ( tx1 * r43 * c34 * tmp1
+ ty1 * c34 * tmp1
+ tz1 * c34 * tmp1 )
+ dt * 2.0 * ( tx1 * dx2
+ ty1 * dy2
+ tz1 * dz2 );
d[i][j][1][2] = 0.0;
d[i][j][1][3] = 0.0;
d[i][j][1][4] = 0.0;
d[i][j][2][0] = dt * 2.0
* ( tx1 * ( - c34 * tmp2 * u[i][j][k][2] )
+ ty1 * ( - r43 * c34 * tmp2 * u[i][j][k][2] )
+ tz1 * ( - c34 * tmp2 * u[i][j][k][2] ) );
d[i][j][2][1] = 0.0;
d[i][j][2][2] = 1.0
+ dt * 2.0
* ( tx1 * c34 * tmp1
+ ty1 * r43 * c34 * tmp1
+ tz1 * c34 * tmp1 )
+ dt * 2.0 * ( tx1 * dx3
+ ty1 * dy3
+ tz1 * dz3 );
d[i][j][2][3] = 0.0;
d[i][j][2][4] = 0.0;
d[i][j][3][0] = dt * 2.0
* ( tx1 * ( - c34 * tmp2 * u[i][j][k][3] )
+ ty1 * ( - c34 * tmp2 * u[i][j][k][3] )
+ tz1 * ( - r43 * c34 * tmp2 * u[i][j][k][3] ) );
d[i][j][3][1] = 0.0;
d[i][j][3][2] = 0.0;
d[i][j][3][3] = 1.0
+ dt * 2.0
* ( tx1 * c34 * tmp1
+ ty1 * c34 * tmp1
+ tz1 * r43 * c34 * tmp1 )
+ dt * 2.0 * ( tx1 * dx4
+ ty1 * dy4
+ tz1 * dz4 );
d[i][j][3][4] = 0.0;
d[i][j][4][0] = dt * 2.0
* ( tx1 * ( - ( r43*c34 - c1345 ) * tmp3 * ( pow2(u[i][j][k][1]) )
- ( c34 - c1345 ) * tmp3 * ( pow2(u[i][j][k][2]) )
- ( c34 - c1345 ) * tmp3 * ( pow2(u[i][j][k][3]) )
- ( c1345 ) * tmp2 * u[i][j][k][4] )
+ ty1 * ( - ( c34 - c1345 ) * tmp3 * ( pow2(u[i][j][k][1]) )
- ( r43*c34 - c1345 ) * tmp3 * ( pow2(u[i][j][k][2]) )
- ( c34 - c1345 ) * tmp3 * ( pow2(u[i][j][k][3]) )
- ( c1345 ) * tmp2 * u[i][j][k][4] )
+ tz1 * ( - ( c34 - c1345 ) * tmp3 * ( pow2(u[i][j][k][1]) )
- ( c34 - c1345 ) * tmp3 * ( pow2(u[i][j][k][2]) )
- ( r43*c34 - c1345 ) * tmp3 * ( pow2(u[i][j][k][3]) )
- ( c1345 ) * tmp2 * u[i][j][k][4] ) );
d[i][j][4][1] = dt * 2.0
* ( tx1 * ( r43*c34 - c1345 ) * tmp2 * u[i][j][k][1]
+ ty1 * ( c34 - c1345 ) * tmp2 * u[i][j][k][1]
+ tz1 * ( c34 - c1345 ) * tmp2 * u[i][j][k][1] );
d[i][j][4][2] = dt * 2.0
* ( tx1 * ( c34 - c1345 ) * tmp2 * u[i][j][k][2]
+ ty1 * ( r43*c34 -c1345 ) * tmp2 * u[i][j][k][2]
+ tz1 * ( c34 - c1345 ) * tmp2 * u[i][j][k][2] );
d[i][j][4][3] = dt * 2.0
* ( tx1 * ( c34 - c1345 ) * tmp2 * u[i][j][k][3]
+ ty1 * ( c34 - c1345 ) * tmp2 * u[i][j][k][3]
+ tz1 * ( r43*c34 - c1345 ) * tmp2 * u[i][j][k][3] );
d[i][j][4][4] = 1.0
+ dt * 2.0 * ( tx1 * c1345 * tmp1
+ ty1 * c1345 * tmp1
+ tz1 * c1345 * tmp1 )
+ dt * 2.0 * ( tx1 * dx5
+ ty1 * dy5
+ tz1 * dz5 );
/*--------------------------------------------------------------------
c form the first block sub-diagonal
--------------------------------------------------------------------*/
tmp1 = 1.0 / u[i+1][j][k][0];
tmp2 = tmp1 * tmp1;
tmp3 = tmp1 * tmp2;
a[i][j][0][0] = - dt * tx1 * dx1;
a[i][j][0][1] = dt * tx2;
a[i][j][0][2] = 0.0;
a[i][j][0][3] = 0.0;
a[i][j][0][4] = 0.0;
a[i][j][1][0] = dt * tx2
* ( - ( u[i+1][j][k][1] * tmp1 ) *( u[i+1][j][k][1] * tmp1 )
+ C2 * 0.50 * ( u[i+1][j][k][1] * u[i+1][j][k][1]
+ u[i+1][j][k][2] * u[i+1][j][k][2]
+ u[i+1][j][k][3] * u[i+1][j][k][3] ) * tmp2 )
- dt * tx1 * ( - r43 * c34 * tmp2 * u[i+1][j][k][1] );
a[i][j][1][1] = dt * tx2
* ( ( 2.0 - C2 ) * ( u[i+1][j][k][1] * tmp1 ) )
- dt * tx1 * ( r43 * c34 * tmp1 )
- dt * tx1 * dx2;
a[i][j][1][2] = dt * tx2
* ( - C2 * ( u[i+1][j][k][2] * tmp1 ) );
a[i][j][1][3] = dt * tx2
* ( - C2 * ( u[i+1][j][k][3] * tmp1 ) );
a[i][j][1][4] = dt * tx2 * C2 ;
a[i][j][2][0] = dt * tx2
* ( - ( u[i+1][j][k][1] * u[i+1][j][k][2] ) * tmp2 )
- dt * tx1 * ( - c34 * tmp2 * u[i+1][j][k][2] );
a[i][j][2][1] = dt * tx2 * ( u[i+1][j][k][2] * tmp1 );
a[i][j][2][2] = dt * tx2 * ( u[i+1][j][k][1] * tmp1 )
- dt * tx1 * ( c34 * tmp1 )
- dt * tx1 * dx3;
a[i][j][2][3] = 0.0;
a[i][j][2][4] = 0.0;
a[i][j][3][0] = dt * tx2
* ( - ( u[i+1][j][k][1]*u[i+1][j][k][3] ) * tmp2 )
- dt * tx1 * ( - c34 * tmp2 * u[i+1][j][k][3] );
a[i][j][3][1] = dt * tx2 * ( u[i+1][j][k][3] * tmp1 );
a[i][j][3][2] = 0.0;
a[i][j][3][3] = dt * tx2 * ( u[i+1][j][k][1] * tmp1 )
- dt * tx1 * ( c34 * tmp1 )
- dt * tx1 * dx4;
a[i][j][3][4] = 0.0;
a[i][j][4][0] = dt * tx2
* ( ( C2 * ( u[i+1][j][k][1] * u[i+1][j][k][1]
+ u[i+1][j][k][2] * u[i+1][j][k][2]
+ u[i+1][j][k][3] * u[i+1][j][k][3] ) * tmp2
- C1 * ( u[i+1][j][k][4] * tmp1 ) )
* ( u[i+1][j][k][1] * tmp1 ) )
- dt * tx1
* ( - ( r43*c34 - c1345 ) * tmp3 * ( pow2(u[i+1][j][k][1]) )
- ( c34 - c1345 ) * tmp3 * ( pow2(u[i+1][j][k][2]) )
- ( c34 - c1345 ) * tmp3 * ( pow2(u[i+1][j][k][3]) )
- c1345 * tmp2 * u[i+1][j][k][4] );
a[i][j][4][1] = dt * tx2
* ( C1 * ( u[i+1][j][k][4] * tmp1 )
- 0.50 * C2
* ( ( 3.0*u[i+1][j][k][1]*u[i+1][j][k][1]
+ u[i+1][j][k][2]*u[i+1][j][k][2]
+ u[i+1][j][k][3]*u[i+1][j][k][3] ) * tmp2 ) )
- dt * tx1
* ( r43*c34 - c1345 ) * tmp2 * u[i+1][j][k][1];
a[i][j][4][2] = dt * tx2
* ( - C2 * ( u[i+1][j][k][2]*u[i+1][j][k][1] ) * tmp2 )
- dt * tx1
* ( c34 - c1345 ) * tmp2 * u[i+1][j][k][2];
a[i][j][4][3] = dt * tx2
* ( - C2 * ( u[i+1][j][k][3]*u[i+1][j][k][1] ) * tmp2 )
- dt * tx1
* ( c34 - c1345 ) * tmp2 * u[i+1][j][k][3];
a[i][j][4][4] = dt * tx2
* ( C1 * ( u[i+1][j][k][1] * tmp1 ) )
- dt * tx1 * c1345 * tmp1
- dt * tx1 * dx5;
/*--------------------------------------------------------------------
c form the second block sub-diagonal
--------------------------------------------------------------------*/
tmp1 = 1.0 / u[i][j+1][k][0];
tmp2 = tmp1 * tmp1;
tmp3 = tmp1 * tmp2;
b[i][j][0][0] = - dt * ty1 * dy1;
b[i][j][0][1] = 0.0;
b[i][j][0][2] = dt * ty2;
b[i][j][0][3] = 0.0;
b[i][j][0][4] = 0.0;
b[i][j][1][0] = dt * ty2
* ( - ( u[i][j+1][k][1]*u[i][j+1][k][2] ) * tmp2 )
- dt * ty1 * ( - c34 * tmp2 * u[i][j+1][k][1] );
b[i][j][1][1] = dt * ty2 * ( u[i][j+1][k][2] * tmp1 )
- dt * ty1 * ( c34 * tmp1 )
- dt * ty1 * dy2;
b[i][j][1][2] = dt * ty2 * ( u[i][j+1][k][1] * tmp1 );
b[i][j][1][3] = 0.0;
b[i][j][1][4] = 0.0;
b[i][j][2][0] = dt * ty2
* ( - ( u[i][j+1][k][2] * tmp1 ) *( u[i][j+1][k][2] * tmp1 )
+ 0.50 * C2 * ( ( u[i][j+1][k][1] * u[i][j+1][k][1]
+ u[i][j+1][k][2] * u[i][j+1][k][2]
+ u[i][j+1][k][3] * u[i][j+1][k][3] )
* tmp2 ) )
- dt * ty1 * ( - r43 * c34 * tmp2 * u[i][j+1][k][2] );
b[i][j][2][1] = dt * ty2
* ( - C2 * ( u[i][j+1][k][1] * tmp1 ) );
b[i][j][2][2] = dt * ty2 * ( ( 2.0 - C2 )
* ( u[i][j+1][k][2] * tmp1 ) )
- dt * ty1 * ( r43 * c34 * tmp1 )
- dt * ty1 * dy3;
b[i][j][2][3] = dt * ty2
* ( - C2 * ( u[i][j+1][k][3] * tmp1 ) );
b[i][j][2][4] = dt * ty2 * C2;
b[i][j][3][0] = dt * ty2
* ( - ( u[i][j+1][k][2]*u[i][j+1][k][3] ) * tmp2 )
- dt * ty1 * ( - c34 * tmp2 * u[i][j+1][k][3] );
b[i][j][3][1] = 0.0;
b[i][j][3][2] = dt * ty2 * ( u[i][j+1][k][3] * tmp1 );
b[i][j][3][3] = dt * ty2 * ( u[i][j+1][k][2] * tmp1 )
- dt * ty1 * ( c34 * tmp1 )
- dt * ty1 * dy4;
b[i][j][3][4] = 0.0;
b[i][j][4][0] = dt * ty2
* ( ( C2 * ( u[i][j+1][k][1] * u[i][j+1][k][1]
+ u[i][j+1][k][2] * u[i][j+1][k][2]
+ u[i][j+1][k][3] * u[i][j+1][k][3] ) * tmp2
- C1 * ( u[i][j+1][k][4] * tmp1 ) )
* ( u[i][j+1][k][2] * tmp1 ) )
- dt * ty1
* ( - ( c34 - c1345 )*tmp3*( pow2(u[i][j+1][k][1]) )
- ( r43*c34 - c1345 )*tmp3*( pow2(u[i][j+1][k][2]) )
- ( c34 - c1345 )*tmp3*( pow2(u[i][j+1][k][3]) )
- c1345*tmp2*u[i][j+1][k][4] );
b[i][j][4][1] = dt * ty2
* ( - C2 * ( u[i][j+1][k][1]*u[i][j+1][k][2] ) * tmp2 )
- dt * ty1
* ( c34 - c1345 ) * tmp2 * u[i][j+1][k][1];
b[i][j][4][2] = dt * ty2
* ( C1 * ( u[i][j+1][k][4] * tmp1 )
- 0.50 * C2
* ( ( u[i][j+1][k][1]*u[i][j+1][k][1]
+ 3.0 * u[i][j+1][k][2]*u[i][j+1][k][2]
+ u[i][j+1][k][3]*u[i][j+1][k][3] ) * tmp2 ) )
- dt * ty1
* ( r43*c34 - c1345 ) * tmp2 * u[i][j+1][k][2];
b[i][j][4][3] = dt * ty2
* ( - C2 * ( u[i][j+1][k][2]*u[i][j+1][k][3] ) * tmp2 )
- dt * ty1 * ( c34 - c1345 ) * tmp2 * u[i][j+1][k][3];
b[i][j][4][4] = dt * ty2
* ( C1 * ( u[i][j+1][k][2] * tmp1 ) )
- dt * ty1 * c1345 * tmp1
- dt * ty1 * dy5;
/*--------------------------------------------------------------------
c form the third block sub-diagonal
--------------------------------------------------------------------*/
tmp1 = 1.0 / u[i][j][k+1][0];
tmp2 = tmp1 * tmp1;
tmp3 = tmp1 * tmp2;
c[i][j][0][0] = - dt * tz1 * dz1;
c[i][j][0][1] = 0.0;
c[i][j][0][2] = 0.0;
c[i][j][0][3] = dt * tz2;
c[i][j][0][4] = 0.0;
c[i][j][1][0] = dt * tz2
* ( - ( u[i][j][k+1][1]*u[i][j][k+1][3] ) * tmp2 )
- dt * tz1 * ( - c34 * tmp2 * u[i][j][k+1][1] );
c[i][j][1][1] = dt * tz2 * ( u[i][j][k+1][3] * tmp1 )
- dt * tz1 * c34 * tmp1
- dt * tz1 * dz2 ;
c[i][j][1][2] = 0.0;
c[i][j][1][3] = dt * tz2 * ( u[i][j][k+1][1] * tmp1 );
c[i][j][1][4] = 0.0;
c[i][j][2][0] = dt * tz2
* ( - ( u[i][j][k+1][2]*u[i][j][k+1][3] ) * tmp2 )
- dt * tz1 * ( - c34 * tmp2 * u[i][j][k+1][2] );
c[i][j][2][1] = 0.0;
c[i][j][2][2] = dt * tz2 * ( u[i][j][k+1][3] * tmp1 )
- dt * tz1 * ( c34 * tmp1 )
- dt * tz1 * dz3;
c[i][j][2][3] = dt * tz2 * ( u[i][j][k+1][2] * tmp1 );
c[i][j][2][4] = 0.0;
c[i][j][3][0] = dt * tz2
* ( - ( u[i][j][k+1][3] * tmp1 ) *( u[i][j][k+1][3] * tmp1 )
+ 0.50 * C2
* ( ( u[i][j][k+1][1] * u[i][j][k+1][1]
+ u[i][j][k+1][2] * u[i][j][k+1][2]
+ u[i][j][k+1][3] * u[i][j][k+1][3] ) * tmp2 ) )
- dt * tz1 * ( - r43 * c34 * tmp2 * u[i][j][k+1][3] );
c[i][j][3][1] = dt * tz2
* ( - C2 * ( u[i][j][k+1][1] * tmp1 ) );
c[i][j][3][2] = dt * tz2
* ( - C2 * ( u[i][j][k+1][2] * tmp1 ) );
c[i][j][3][3] = dt * tz2 * ( 2.0 - C2 )
* ( u[i][j][k+1][3] * tmp1 )
- dt * tz1 * ( r43 * c34 * tmp1 )
- dt * tz1 * dz4;
c[i][j][3][4] = dt * tz2 * C2;
c[i][j][4][0] = dt * tz2
* ( ( C2 * ( u[i][j][k+1][1] * u[i][j][k+1][1]
+ u[i][j][k+1][2] * u[i][j][k+1][2]
+ u[i][j][k+1][3] * u[i][j][k+1][3] ) * tmp2
- C1 * ( u[i][j][k+1][4] * tmp1 ) )
* ( u[i][j][k+1][3] * tmp1 ) )
- dt * tz1
* ( - ( c34 - c1345 ) * tmp3 * ( pow2(u[i][j][k+1][1]) )
- ( c34 - c1345 ) * tmp3 * ( pow2(u[i][j][k+1][2]) )
- ( r43*c34 - c1345 )* tmp3 * ( pow2(u[i][j][k+1][3]) )
- c1345 * tmp2 * u[i][j][k+1][4] );
c[i][j][4][1] = dt * tz2
* ( - C2 * ( u[i][j][k+1][1]*u[i][j][k+1][3] ) * tmp2 )
- dt * tz1 * ( c34 - c1345 ) * tmp2 * u[i][j][k+1][1];
c[i][j][4][2] = dt * tz2
* ( - C2 * ( u[i][j][k+1][2]*u[i][j][k+1][3] ) * tmp2 )
- dt * tz1 * ( c34 - c1345 ) * tmp2 * u[i][j][k+1][2];
c[i][j][4][3] = dt * tz2
* ( C1 * ( u[i][j][k+1][4] * tmp1 )
- 0.50 * C2
* ( ( u[i][j][k+1][1]*u[i][j][k+1][1]
+ u[i][j][k+1][2]*u[i][j][k+1][2]
+ 3.0*u[i][j][k+1][3]*u[i][j][k+1][3] ) * tmp2 ) )
- dt * tz1 * ( r43*c34 - c1345 ) * tmp2 * u[i][j][k+1][3];
c[i][j][4][4] = dt * tz2
* ( C1 * ( u[i][j][k+1][3] * tmp1 ) )
- dt * tz1 * c1345 * tmp1
- dt * tz1 * dz5;
}
}
}
/*--------------------------------------------------------------------
--------------------------------------------------------------------*/
static void l2norm (int nx0, int ny0, int nz0,
int ist, int iend,
int jst, int jend,
/*--------------------------------------------------------------------
c To improve cache performance, second two dimensions padded by 1
c for even number sizes only. Only needed in v.
--------------------------------------------------------------------*/
double v[ISIZ1][ISIZ2/2*2+1][ISIZ3/2*2+1][5],
double sum[5]) {
{
/*--------------------------------------------------------------------
c to compute the l2-norm of vector v.
--------------------------------------------------------------------*/
/*--------------------------------------------------------------------
c local variables
--------------------------------------------------------------------*/
int i, j, k, m;
double sum0=0.0, sum1=0.0, sum2=0.0, sum3=0.0, sum4=0.0;
#pragma omp parallel for
for (m = 0; m < 5; m++) {
sum[m] = 0.0;
}
#pragma omp parallel for
for (i = ist; i <= iend; i++) {
#pragma omp parallel for firstprivate(iend ,ist ,k ,v ,nz0 ,jst ,jend ,i ) reduction(+:sum4) reduction(+:sum3) reduction(+:sum2) reduction(+:sum1) reduction(+:sum0)
for (j = jst; j <= jend; j++) {
#pragma omp parallel for firstprivate(iend ,ist ,j ,v ,nz0 ,jst ,jend ,i ) reduction(+:sum4) reduction(+:sum3) reduction(+:sum2) reduction(+:sum1) reduction(+:sum0)
for (k = 1; k <= nz0-2; k++) {
sum0 = sum0 + v[i][j][k][0] * v[i][j][k][0];
sum1 = sum1 + v[i][j][k][1] * v[i][j][k][1];
sum2 = sum2 + v[i][j][k][2] * v[i][j][k][2];
sum3 = sum3 + v[i][j][k][3] * v[i][j][k][3];
sum4 = sum4 + v[i][j][k][4] * v[i][j][k][4];
}
}
}
{
sum[0] += sum0;
sum[1] += sum1;
sum[2] += sum2;
sum[3] += sum3;
sum[4] += sum4;
}
#pragma omp parallel for firstprivate(nz0 ,ny0 ,nx0 ,sum )
for (m = 0; m < 5; m++) {
sum[m] = sqrt ( sum[m] / ( (nx0-2)*(ny0-2)*(nz0-2) ) );
}
}
}
/*--------------------------------------------------------------------
--------------------------------------------------------------------*/
static void pintgr(void) {
/*--------------------------------------------------------------------
c local variables
--------------------------------------------------------------------*/
int i, j, k;
int ibeg, ifin, ifin1;
int jbeg, jfin, jfin1;
int iglob, iglob1, iglob2;
int jglob, jglob1, jglob2;
double phi1[ISIZ2+2][ISIZ3+2]; /* phi1(0:isiz2+1,0:isiz3+1) */
double phi2[ISIZ2+2][ISIZ3+2]; /* phi2(0:isiz2+1,0:isiz3+1) */
double frc1, frc2, frc3;
/*--------------------------------------------------------------------
c set up the sub-domains for integeration in each processor
--------------------------------------------------------------------*/
ibeg = nx;
ifin = 0;
iglob1 = -1;
iglob2 = nx-1;
if (iglob1 >= ii1 && iglob2 < ii2+nx) ibeg = 0;
if (iglob1 >= ii1-nx && iglob2 <= ii2) ifin = nx;
if (ii1 >= iglob1 && ii1 <= iglob2) ibeg = ii1;
if (ii2 >= iglob1 && ii2 <= iglob2) ifin = ii2;
jbeg = ny;
jfin = -1;
jglob1 = 0;
jglob2 = ny-1;
if (jglob1 >= ji1 && jglob2 < ji2+ny) jbeg = 0;
if (jglob1 > ji1-ny && jglob2 <= ji2) jfin = ny;
if (ji1 >= jglob1 && ji1 <= jglob2) jbeg = ji1;
if (ji2 >= jglob1 && ji2 <= jglob2) jfin = ji2;
ifin1 = ifin;
jfin1 = jfin;
if (ifin1 == ii2) ifin1 = ifin -1;
if (jfin1 == ji2) jfin1 = jfin -1;
/*--------------------------------------------------------------------
c initialize
--------------------------------------------------------------------*/
#pragma omp parallel for
for (i = 0; i <= ISIZ2+1; i++) {
#pragma omp parallel for firstprivate(i )
for (k = 0; k <= ISIZ3+1; k++) {
phi1[i][k] = 0.0;
phi2[i][k] = 0.0;
}
}
#pragma omp parallel for firstprivate(ibeg ,ifin ,j ,jbeg ,ki1 ,ki2 ,jfin)
for (i = ibeg; i <= ifin; i++) {
iglob = i;
#pragma omp parallel for firstprivate(ibeg ,ifin ,jbeg ,ki1 ,ki2 ,jfin ,i )
for (j = jbeg; j <= jfin; j++) {
jglob = j;
k = ki1;
phi1[i][j] = C2*( u[i][j][k][4]
- 0.50 * ( pow2(u[i][j][k][1])
+ pow2(u[i][j][k][2])
+ pow2(u[i][j][k][3]) )
/ u[i][j][k][0] );
k = ki2;
phi2[i][j] = C2*( u[i][j][k][4]
- 0.50 * ( pow2(u[i][j][k][1])
+ pow2(u[i][j][k][2])
+ pow2(u[i][j][k][3]) )
/ u[i][j][k][0] );
}
}
frc1 = 0.0;
#pragma omp parallel for firstprivate(ibeg ,ifin1 ,j ,jbeg ,jfin1 ) reduction(+:frc1)
for (i = ibeg; i <= ifin1; i++) {
#pragma omp parallel for firstprivate(ibeg ,ifin1 ,jbeg ,jfin1 ,i ) reduction(+:frc1)
for (j = jbeg; j <= jfin1; j++) {
frc1 = frc1 + ( phi1[i][j]
+ phi1[i+1][j]
+ phi1[i][j+1]
+ phi1[i+1][j+1]
+ phi2[i][j]
+ phi2[i+1][j]
+ phi2[i][j+1]
+ phi2[i+1][j+1] );
}
}
frc1 = dxi * deta * frc1;
/*--------------------------------------------------------------------
c initialize
--------------------------------------------------------------------*/
#pragma omp parallel for
for (i = 0; i <= ISIZ2+1; i++) {
#pragma omp parallel for
for (k = 0; k <= ISIZ3+1; k++) {
phi1[i][k] = 0.0;
phi2[i][k] = 0.0;
}
}
jglob = jbeg;
if (jglob == ji1) {
#pragma omp parallel for firstprivate(ibeg ,ifin ,k ,jbeg ,ki1 ,ki2)
for (i = ibeg; i <= ifin; i++) {
iglob = i;
#pragma omp parallel for firstprivate(ibeg ,ifin ,jbeg ,ki1 ,ki2 ,i )
for (k = ki1; k <= ki2; k++) {
phi1[i][k] = C2*( u[i][jbeg][k][4]
- 0.50 * ( pow2(u[i][jbeg][k][1])
+ pow2(u[i][jbeg][k][2])
+ pow2(u[i][jbeg][k][3]) )
/ u[i][jbeg][k][0] );
}
}
}
jglob = jfin;
if (jglob == ji2) {
#pragma omp parallel for firstprivate(ibeg ,ifin ,k ,jfin ,ki1 ,ki2)
for (i = ibeg; i <= ifin; i++) {
iglob = i;
#pragma omp parallel for firstprivate(ibeg ,ifin ,jfin ,ki1 ,ki2 ,i )
for (k = ki1; k <= ki2; k++) {
phi2[i][k] = C2*( u[i][jfin][k][4]
- 0.50 * ( pow2(u[i][jfin][k][1])
+ pow2(u[i][jfin][k][2])
+ pow2(u[i][jfin][k][3]) )
/ u[i][jfin][k][0] );
}
}
}
frc2 = 0.0;
#pragma omp parallel for firstprivate(ibeg ,ifin1 ,k ,ki1 ,ki2) reduction(+:frc2)
for (i = ibeg; i <= ifin1; i++) {
#pragma omp parallel for firstprivate(ibeg ,ifin1 ,ki1 ,ki2 ,i ) reduction(+:frc2)
for (k = ki1; k <= ki2-1; k++) {
frc2 = frc2 + ( phi1[i][k]
+ phi1[i+1][k]
+ phi1[i][k+1]
+ phi1[i+1][k+1]
+ phi2[i][k]
+ phi2[i+1][k]
+ phi2[i][k+1]
+ phi2[i+1][k+1] );
}
}
frc2 = dxi * dzeta * frc2;
/*--------------------------------------------------------------------
c initialize
--------------------------------------------------------------------*/
#pragma omp parallel for
for (i = 0; i <= ISIZ2+1; i++) {
#pragma omp parallel for firstprivate(i )
for (k = 0; k <= ISIZ3+1; k++) {
phi1[i][k] = 0.0;
phi2[i][k] = 0.0;
}
}
iglob = ibeg;
if (iglob == ii1) {
for (j = jbeg; j <= jfin; j++) {
jglob = j;
for (k = ki1; k <= ki2; k++) {
phi1[j][k] = C2*( u[ibeg][j][k][4]
- 0.50 * ( pow2(u[ibeg][j][k][1])
+ pow2(u[ibeg][j][k][2])
+ pow2(u[ibeg][j][k][3]) )
/ u[ibeg][j][k][0] );
}
}
}
iglob = ifin;
if (iglob == ii2) {
for (j = jbeg; j <= jfin; j++) {
jglob = j;
for (k = ki1; k <= ki2; k++) {
phi2[j][k] = C2*( u[ifin][j][k][4]
- 0.50 * ( pow2(u[ifin][j][k][1])
+ pow2(u[ifin][j][k][2])
+ pow2(u[ifin][j][k][3]) )
/ u[ifin][j][k][0] );
}
}
}
frc3 = 0.0;
for (j = jbeg; j <= jfin1; j++) {
for (k = ki1; k <= ki2-1; k++) {
frc3 = frc3 + ( phi1[j][k]
+ phi1[j+1][k]
+ phi1[j][k+1]
+ phi1[j+1][k+1]
+ phi2[j][k]
+ phi2[j+1][k]
+ phi2[j][k+1]
+ phi2[j+1][k+1] );
}
}
frc3 = deta * dzeta * frc3;
frc = 0.25 * ( frc1 + frc2 + frc3 );
}
/*--------------------------------------------------------------------
--------------------------------------------------------------------*/
static void read_input(void) {
FILE *fp;
/*--------------------------------------------------------------------
c if input file does not exist, it uses defaults
c ipr = 1 for detailed progress output
c inorm = how often the norm is printed (once every inorm iterations)
c itmax = number of pseudo time steps
c dt = time step
c omega 1 over-relaxation factor for SSOR
c tolrsd = steady state residual tolerance levels
c nx, ny, nz = number of grid points in x, y, z directions
--------------------------------------------------------------------*/
printf("\n\n NAS Parallel Benchmarks 3.0 structured OpenMP C version"
" - LU Benchmark\n\n");
fp = fopen("inputlu.data", "r");
if (fp != NULL) {
printf(" Reading from input file inputlu.data\n");
while(fgetc(fp) != '\n'); while(fgetc(fp) != '\n');
fscanf(fp, "%d%d", &ipr, &inorm);
while(fgetc(fp) != '\n');
while(fgetc(fp) != '\n'); while(fgetc(fp) != '\n');
fscanf(fp, "%d", &itmax);
while(fgetc(fp) != '\n');
while(fgetc(fp) != '\n'); while(fgetc(fp) != '\n');
fscanf(fp, "%lf", &dt);
while(fgetc(fp) != '\n');
while(fgetc(fp) != '\n'); while(fgetc(fp) != '\n');
fscanf(fp, "%lf", &omega);
while(fgetc(fp) != '\n');
while(fgetc(fp) != '\n'); while(fgetc(fp) != '\n');
fscanf(fp, "%lf%lf%lf%lf%lf",
&tolrsd[0], &tolrsd[1], &tolrsd[2], &tolrsd[3], &tolrsd[4]);
while(fgetc(fp) != '\n');
while(fgetc(fp) != '\n'); while(fgetc(fp) != '\n');
fscanf(fp, "%d%d%d", &nx0, &ny0, &nz0);
while(fgetc(fp) != '\n');
fclose(fp);
} else {
ipr = IPR_DEFAULT;
inorm = INORM_DEFAULT;
itmax = ITMAX_DEFAULT;
dt = DT_DEFAULT;
omega = OMEGA_DEFAULT;
tolrsd[0] = TOLRSD1_DEF;
tolrsd[1] = TOLRSD2_DEF;
tolrsd[2] = TOLRSD3_DEF;
tolrsd[3] = TOLRSD4_DEF;
tolrsd[4] = TOLRSD5_DEF;
nx0 = ISIZ1;
ny0 = ISIZ2;
nz0 = ISIZ3;
}
/*--------------------------------------------------------------------
c check problem size
--------------------------------------------------------------------*/
if ( nx0 < 4 || ny0 < 4 || nz0 < 4 ) {
printf(" PROBLEM SIZE IS TOO SMALL - \n"
" SET EACH OF NX, NY AND NZ AT LEAST EQUAL TO 5\n");
exit(1);
}
if ( nx0 > ISIZ1 || ny0 > ISIZ2 || nz0 > ISIZ3 ) {
printf(" PROBLEM SIZE IS TOO LARGE - \n"
" NX, NY AND NZ SHOULD BE EQUAL TO \n"
" ISIZ1, ISIZ2 AND ISIZ3 RESPECTIVELY\n");
exit(1);
}
printf(" Size: %3dx%3dx%3d\n", nx0, ny0, nz0);
printf(" Iterations: %3d\n", itmax);
}
/*--------------------------------------------------------------------
--------------------------------------------------------------------*/
static void rhs(void) {
{
/*--------------------------------------------------------------------
c compute the right hand sides
--------------------------------------------------------------------*/
/*--------------------------------------------------------------------
c local variables
--------------------------------------------------------------------*/
int i, j, k, m;
int L1, L2;
int ist1, iend1;
int jst1, jend1;
double q;
double u21, u31, u41;
double tmp;
double u21i, u31i, u41i, u51i;
double u21j, u31j, u41j, u51j;
double u21k, u31k, u41k, u51k;
double u21im1, u31im1, u41im1, u51im1;
double u21jm1, u31jm1, u41jm1, u51jm1;
double u21km1, u31km1, u41km1, u51km1;
#pragma omp parallel for
for (i = 0; i <= nx-1; i++) {
#pragma omp parallel for firstprivate(i )
for (j = 0; j <= ny-1; j++) {
#pragma omp parallel for firstprivate(j ,i )
for (k = 0; k <= nz-1; k++) {
#pragma omp parallel for firstprivate(j ,k ,i )
for (m = 0; m < 5; m++) {
rsd[i][j][k][m] = - frct[i][j][k][m];
}
}
}
}
/*--------------------------------------------------------------------
c xi-direction flux differences
--------------------------------------------------------------------*/
L1 = 0;
L2 = nx-1;
#pragma omp parallel for
for (i = L1; i <= L2; i++) {
#pragma omp parallel for firstprivate(i )
for (j = jst; j <= jend; j++) {
#pragma omp parallel for firstprivate(j ,i )
for (k = 1; k <= nz - 2; k++) {
flux[i][j][k][0] = u[i][j][k][1];
u21 = u[i][j][k][1] / u[i][j][k][0];
q = 0.50 * ( u[i][j][k][1] * u[i][j][k][1]
+ u[i][j][k][2] * u[i][j][k][2]
+ u[i][j][k][3] * u[i][j][k][3] )
/ u[i][j][k][0];
flux[i][j][k][1] = u[i][j][k][1] * u21 + C2 *
( u[i][j][k][4] - q );
flux[i][j][k][2] = u[i][j][k][2] * u21;
flux[i][j][k][3] = u[i][j][k][3] * u21;
flux[i][j][k][4] = ( C1 * u[i][j][k][4] - C2 * q ) * u21;
}
}
}
#pragma omp parallel for
for (j = jst; j <= jend; j++) {
#pragma omp parallel for firstprivate(j)
for (k = 1; k <= nz - 2; k++) {
#pragma omp parallel for firstprivate(j ,k)
for (i = ist; i <= iend; i++) {
#pragma omp parallel for firstprivate(j,i,k)
for (m = 0; m < 5; m++) {
rsd[i][j][k][m] = rsd[i][j][k][m]
- tx2 * ( flux[i+1][j][k][m] - flux[i-1][j][k][m] );
}
}
L2 = nx-1;
#pragma omp parallel for firstprivate(j ,k)
for (i = ist; i <= L2; i++) {
tmp = 1.0 / u[i][j][k][0];
u21i = tmp * u[i][j][k][1];
u31i = tmp * u[i][j][k][2];
u41i = tmp * u[i][j][k][3];
u51i = tmp * u[i][j][k][4];
tmp = 1.0 / u[i-1][j][k][0];
u21im1 = tmp * u[i-1][j][k][1];
u31im1 = tmp * u[i-1][j][k][2];
u41im1 = tmp * u[i-1][j][k][3];
u51im1 = tmp * u[i-1][j][k][4];
flux[i][j][k][1] = (4.0/3.0) * tx3 * (u21i-u21im1);
flux[i][j][k][2] = tx3 * ( u31i - u31im1 );
flux[i][j][k][3] = tx3 * ( u41i - u41im1 );
flux[i][j][k][4] = 0.50 * ( 1.0 - C1*C5 )
* tx3 * ( ( pow2(u21i) + pow2(u31i) + pow2(u41i) )
- ( pow2(u21im1) + pow2(u31im1) + pow2(u41im1) ) )
+ (1.0/6.0)
* tx3 * ( pow2(u21i) - pow2(u21im1) )
+ C1 * C5 * tx3 * ( u51i - u51im1 );
}
#pragma omp parallel for firstprivate(j ,k)
for (i = ist; i <= iend; i++) {
rsd[i][j][k][0] = rsd[i][j][k][0]
+ dx1 * tx1 * ( u[i-1][j][k][0]
- 2.0 * u[i][j][k][0]
+ u[i+1][j][k][0] );
rsd[i][j][k][1] = rsd[i][j][k][1]
+ tx3 * C3 * C4 * ( flux[i+1][j][k][1] - flux[i][j][k][1] )
+ dx2 * tx1 * ( u[i-1][j][k][1]
- 2.0 * u[i][j][k][1]
+ u[i+1][j][k][1] );
rsd[i][j][k][2] = rsd[i][j][k][2]
+ tx3 * C3 * C4 * ( flux[i+1][j][k][2] - flux[i][j][k][2] )
+ dx3 * tx1 * ( u[i-1][j][k][2]
- 2.0 * u[i][j][k][2]
+ u[i+1][j][k][2] );
rsd[i][j][k][3] = rsd[i][j][k][3]
+ tx3 * C3 * C4 * ( flux[i+1][j][k][3] - flux[i][j][k][3] )
+ dx4 * tx1 * ( u[i-1][j][k][3]
- 2.0 * u[i][j][k][3]
+ u[i+1][j][k][3] );
rsd[i][j][k][4] = rsd[i][j][k][4]
+ tx3 * C3 * C4 * ( flux[i+1][j][k][4] - flux[i][j][k][4] )
+ dx5 * tx1 * ( u[i-1][j][k][4]
- 2.0 * u[i][j][k][4]
+ u[i+1][j][k][4] );
}
/*--------------------------------------------------------------------
c Fourth-order dissipation
--------------------------------------------------------------------*/
for (m = 0; m < 5; m++) {
rsd[1][j][k][m] = rsd[1][j][k][m]
- dssp * ( + 5.0 * u[1][j][k][m]
- 4.0 * u[2][j][k][m]
+ u[3][j][k][m] );
rsd[2][j][k][m] = rsd[2][j][k][m]
- dssp * ( - 4.0 * u[1][j][k][m]
+ 6.0 * u[2][j][k][m]
- 4.0 * u[3][j][k][m]
+ u[4][j][k][m] );
}
ist1 = 3;
iend1 = nx - 4;
for (i = ist1; i <= iend1; i++) {
for (m = 0; m < 5; m++) {
rsd[i][j][k][m] = rsd[i][j][k][m]
- dssp * ( u[i-2][j][k][m]
- 4.0 * u[i-1][j][k][m]
+ 6.0 * u[i][j][k][m]
- 4.0 * u[i+1][j][k][m]
+ u[i+2][j][k][m] );
}
}
for (m = 0; m < 5; m++) {
rsd[nx-3][j][k][m] = rsd[nx-3][j][k][m]
- dssp * ( u[nx-5][j][k][m]
- 4.0 * u[nx-4][j][k][m]
+ 6.0 * u[nx-3][j][k][m]
- 4.0 * u[nx-2][j][k][m] );
rsd[nx-2][j][k][m] = rsd[nx-2][j][k][m]
- dssp * ( u[nx-4][j][k][m]
- 4.0 * u[nx-3][j][k][m]
+ 5.0 * u[nx-2][j][k][m] );
}
}
}
/*--------------------------------------------------------------------
c eta-direction flux differences
--------------------------------------------------------------------*/
L1 = 0;
L2 = ny-1;
#pragma omp parallel for
for (i = ist; i <= iend; i++) {
#pragma omp parallel for firstprivate(i)
for (j = L1; j <= L2; j++) {
#pragma omp parallel for firstprivate(j,i)
for (k = 1; k <= nz - 2; k++) {
flux[i][j][k][0] = u[i][j][k][2];
u31 = u[i][j][k][2] / u[i][j][k][0];
q = 0.50 * ( u[i][j][k][1] * u[i][j][k][1]
+ u[i][j][k][2] * u[i][j][k][2]
+ u[i][j][k][3] * u[i][j][k][3] )
/ u[i][j][k][0];
flux[i][j][k][1] = u[i][j][k][1] * u31;
flux[i][j][k][2] = u[i][j][k][2] * u31 + C2 * (u[i][j][k][4]-q);
flux[i][j][k][3] = u[i][j][k][3] * u31;
flux[i][j][k][4] = ( C1 * u[i][j][k][4] - C2 * q ) * u31;
}
}
}
#pragma omp parallel for
for (i = ist; i <= iend; i++) {
#pragma omp parallel for firstprivate(i)
for (k = 1; k <= nz - 2; k++) {
#pragma omp parallel for firstprivate(i ,k)
for (j = jst; j <= jend; j++) {
#pragma omp parallel for firstprivate(j ,i ,k)
for (m = 0; m < 5; m++) {
rsd[i][j][k][m] = rsd[i][j][k][m]
- ty2 * ( flux[i][j+1][k][m] - flux[i][j-1][k][m] );
}
}
L2 = ny-1;
#pragma omp parallel for firstprivate(i ,k)
for (j = jst; j <= L2; j++) {
tmp = 1.0 / u[i][j][k][0];
u21j = tmp * u[i][j][k][1];
u31j = tmp * u[i][j][k][2];
u41j = tmp * u[i][j][k][3];
u51j = tmp * u[i][j][k][4];
tmp = 1.0 / u[i][j-1][k][0];
u21jm1 = tmp * u[i][j-1][k][1];
u31jm1 = tmp * u[i][j-1][k][2];
u41jm1 = tmp * u[i][j-1][k][3];
u51jm1 = tmp * u[i][j-1][k][4];
flux[i][j][k][1] = ty3 * ( u21j - u21jm1 );
flux[i][j][k][2] = (4.0/3.0) * ty3 * (u31j-u31jm1);
flux[i][j][k][3] = ty3 * ( u41j - u41jm1 );
flux[i][j][k][4] = 0.50 * ( 1.0 - C1*C5 )
* ty3 * ( ( pow2(u21j) + pow2(u31j) + pow2(u41j) )
- ( pow2(u21jm1) + pow2(u31jm1) + pow2(u41jm1) ) )
+ (1.0/6.0)
* ty3 * ( pow2(u31j) - pow2(u31jm1) )
+ C1 * C5 * ty3 * ( u51j - u51jm1 );
}
#pragma omp parallel for firstprivate(i ,k)
for (j = jst; j <= jend; j++) {
rsd[i][j][k][0] = rsd[i][j][k][0]
+ dy1 * ty1 * ( u[i][j-1][k][0]
- 2.0 * u[i][j][k][0]
+ u[i][j+1][k][0] );
rsd[i][j][k][1] = rsd[i][j][k][1]
+ ty3 * C3 * C4 * ( flux[i][j+1][k][1] - flux[i][j][k][1] )
+ dy2 * ty1 * ( u[i][j-1][k][1]
- 2.0 * u[i][j][k][1]
+ u[i][j+1][k][1] );
rsd[i][j][k][2] = rsd[i][j][k][2]
+ ty3 * C3 * C4 * ( flux[i][j+1][k][2] - flux[i][j][k][2] )
+ dy3 * ty1 * ( u[i][j-1][k][2]
- 2.0 * u[i][j][k][2]
+ u[i][j+1][k][2] );
rsd[i][j][k][3] = rsd[i][j][k][3]
+ ty3 * C3 * C4 * ( flux[i][j+1][k][3] - flux[i][j][k][3] )
+ dy4 * ty1 * ( u[i][j-1][k][3]
- 2.0 * u[i][j][k][3]
+ u[i][j+1][k][3] );
rsd[i][j][k][4] = rsd[i][j][k][4]
+ ty3 * C3 * C4 * ( flux[i][j+1][k][4] - flux[i][j][k][4] )
+ dy5 * ty1 * ( u[i][j-1][k][4]
- 2.0 * u[i][j][k][4]
+ u[i][j+1][k][4] );
}
/*--------------------------------------------------------------------
c fourth-order dissipation
--------------------------------------------------------------------*/
for (m = 0; m < 5; m++) {
rsd[i][1][k][m] = rsd[i][1][k][m]
- dssp * ( + 5.0 * u[i][1][k][m]
- 4.0 * u[i][2][k][m]
+ u[i][3][k][m] );
rsd[i][2][k][m] = rsd[i][2][k][m]
- dssp * ( - 4.0 * u[i][1][k][m]
+ 6.0 * u[i][2][k][m]
- 4.0 * u[i][3][k][m]
+ u[i][4][k][m] );
}
jst1 = 3;
jend1 = ny - 4;
for (j = jst1; j <= jend1; j++) {
for (m = 0; m < 5; m++) {
rsd[i][j][k][m] = rsd[i][j][k][m]
- dssp * ( u[i][j-2][k][m]
- 4.0 * u[i][j-1][k][m]
+ 6.0 * u[i][j][k][m]
- 4.0 * u[i][j+1][k][m]
+ u[i][j+2][k][m] );
}
}
for (m = 0; m < 5; m++) {
rsd[i][ny-3][k][m] = rsd[i][ny-3][k][m]
- dssp * ( u[i][ny-5][k][m]
- 4.0 * u[i][ny-4][k][m]
+ 6.0 * u[i][ny-3][k][m]
- 4.0 * u[i][ny-2][k][m] );
rsd[i][ny-2][k][m] = rsd[i][ny-2][k][m]
- dssp * ( u[i][ny-4][k][m]
- 4.0 * u[i][ny-3][k][m]
+ 5.0 * u[i][ny-2][k][m] );
}
}
}
/*--------------------------------------------------------------------
c zeta-direction flux differences
--------------------------------------------------------------------*/
#pragma omp parallel for
for (i = ist; i <= iend; i++) {
for (j = jst; j <= jend; j++) {
#pragma omp parallel for firstprivate(j ,i)
for (k = 0; k <= nz-1; k++) {
flux[i][j][k][0] = u[i][j][k][3];
u41 = u[i][j][k][3] / u[i][j][k][0];
q = 0.50 * ( u[i][j][k][1] * u[i][j][k][1]
+ u[i][j][k][2] * u[i][j][k][2]
+ u[i][j][k][3] * u[i][j][k][3] )
/ u[i][j][k][0];
flux[i][j][k][1] = u[i][j][k][1] * u41;
flux[i][j][k][2] = u[i][j][k][2] * u41;
flux[i][j][k][3] = u[i][j][k][3] * u41 + C2 * (u[i][j][k][4]-q);
flux[i][j][k][4] = ( C1 * u[i][j][k][4] - C2 * q ) * u41;
}
#pragma omp parallel for firstprivate(j ,i)
for (k = 1; k <= nz - 2; k++) {
#pragma omp parallel for firstprivate(k ,j ,i)
for (m = 0; m < 5; m++) {
rsd[i][j][k][m] = rsd[i][j][k][m]
- tz2 * ( flux[i][j][k+1][m] - flux[i][j][k-1][m] );
}
}
#pragma omp parallel for firstprivate(j ,i)
for (k = 1; k <= nz-1; k++) {
tmp = 1.0 / u[i][j][k][0];
u21k = tmp * u[i][j][k][1];
u31k = tmp * u[i][j][k][2];
u41k = tmp * u[i][j][k][3];
u51k = tmp * u[i][j][k][4];
tmp = 1.0 / u[i][j][k-1][0];
u21km1 = tmp * u[i][j][k-1][1];
u31km1 = tmp * u[i][j][k-1][2];
u41km1 = tmp * u[i][j][k-1][3];
u51km1 = tmp * u[i][j][k-1][4];
flux[i][j][k][1] = tz3 * ( u21k - u21km1 );
flux[i][j][k][2] = tz3 * ( u31k - u31km1 );
flux[i][j][k][3] = (4.0/3.0) * tz3 * (u41k-u41km1);
flux[i][j][k][4] = 0.50 * ( 1.0 - C1*C5 )
* tz3 * ( ( pow2(u21k) + pow2(u31k) + pow2(u41k) )
- ( pow2(u21km1) + pow2(u31km1) + pow2(u41km1) ) )
+ (1.0/6.0)
* tz3 * ( pow2(u41k) - pow2(u41km1) )
+ C1 * C5 * tz3 * ( u51k - u51km1 );
}
#pragma omp parallel for firstprivate(j ,i)
for (k = 1; k <= nz - 2; k++) {
rsd[i][j][k][0] = rsd[i][j][k][0]
+ dz1 * tz1 * ( u[i][j][k-1][0]
- 2.0 * u[i][j][k][0]
+ u[i][j][k+1][0] );
rsd[i][j][k][1] = rsd[i][j][k][1]
+ tz3 * C3 * C4 * ( flux[i][j][k+1][1] - flux[i][j][k][1] )
+ dz2 * tz1 * ( u[i][j][k-1][1]
- 2.0 * u[i][j][k][1]
+ u[i][j][k+1][1] );
rsd[i][j][k][2] = rsd[i][j][k][2]
+ tz3 * C3 * C4 * ( flux[i][j][k+1][2] - flux[i][j][k][2] )
+ dz3 * tz1 * ( u[i][j][k-1][2]
- 2.0 * u[i][j][k][2]
+ u[i][j][k+1][2] );
rsd[i][j][k][3] = rsd[i][j][k][3]
+ tz3 * C3 * C4 * ( flux[i][j][k+1][3] - flux[i][j][k][3] )
+ dz4 * tz1 * ( u[i][j][k-1][3]
- 2.0 * u[i][j][k][3]
+ u[i][j][k+1][3] );
rsd[i][j][k][4] = rsd[i][j][k][4]
+ tz3 * C3 * C4 * ( flux[i][j][k+1][4] - flux[i][j][k][4] )
+ dz5 * tz1 * ( u[i][j][k-1][4]
- 2.0 * u[i][j][k][4]
+ u[i][j][k+1][4] );
}
/*--------------------------------------------------------------------
c fourth-order dissipation
--------------------------------------------------------------------*/
for (m = 0; m < 5; m++) {
rsd[i][j][1][m] = rsd[i][j][1][m]
- dssp * ( + 5.0 * u[i][j][1][m]
- 4.0 * u[i][j][2][m]
+ u[i][j][3][m] );
rsd[i][j][2][m] = rsd[i][j][2][m]
- dssp * ( - 4.0 * u[i][j][1][m]
+ 6.0 * u[i][j][2][m]
- 4.0 * u[i][j][3][m]
+ u[i][j][4][m] );
}
#pragma omp parallel for firstprivate(j ,i)
for (k = 3; k <= nz - 4; k++) {
#pragma omp parallel for firstprivate(k ,j ,i)
for (m = 0; m < 5; m++) {
rsd[i][j][k][m] = rsd[i][j][k][m]
- dssp * ( u[i][j][k-2][m]
- 4.0 * u[i][j][k-1][m]
+ 6.0 * u[i][j][k][m]
- 4.0 * u[i][j][k+1][m]
+ u[i][j][k+2][m] );
}
}
for (m = 0; m < 5; m++) {
rsd[i][j][nz-3][m] = rsd[i][j][nz-3][m]
- dssp * ( u[i][j][nz-5][m]
- 4.0 * u[i][j][nz-4][m]
+ 6.0 * u[i][j][nz-3][m]
- 4.0 * u[i][j][nz-2][m] );
rsd[i][j][nz-2][m] = rsd[i][j][nz-2][m]
- dssp * ( u[i][j][nz-4][m]
- 4.0 * u[i][j][nz-3][m]
+ 5.0 * u[i][j][nz-2][m] );
}
}
}
}
}
/*--------------------------------------------------------------------
--------------------------------------------------------------------*/
static void setbv(void) {
{
/*--------------------------------------------------------------------
c set the boundary values of dependent variables
--------------------------------------------------------------------*/
/*--------------------------------------------------------------------
c local variables
--------------------------------------------------------------------*/
int i, j, k;
int iglob, jglob;
/*--------------------------------------------------------------------
c set the dependent variable values along the top and bottom faces
--------------------------------------------------------------------*/
#pragma omp parallel for
for (i = 0; i < nx; i++) {
iglob = i;
for (j = 0; j < ny; j++) {
jglob = j;
exact( iglob, jglob, 0, &u[i][j][0][0] );
exact( iglob, jglob, nz-1, &u[i][j][nz-1][0] );
}
}
/*--------------------------------------------------------------------
c set the dependent variable values along north and south faces
--------------------------------------------------------------------*/
#pragma omp parallel for
for (i = 0; i < nx; i++) {
iglob = i;
for (k = 0; k < nz; k++) {
exact( iglob, 0, k, &u[i][0][k][0] );
}
}
#pragma omp parallel for
for (i = 0; i < nx; i++) {
iglob = i;
for (k = 0; k < nz; k++) {
exact( iglob, ny0-1, k, &u[i][ny-1][k][0] );
}
}
/*--------------------------------------------------------------------
c set the dependent variable values along east and west faces
--------------------------------------------------------------------*/
#pragma omp parallel for
for (j = 0; j < ny; j++) {
jglob = j;
for (k = 0; k < nz; k++) {
exact( 0, jglob, k, &u[0][j][k][0] );
}
}
#pragma omp parallel for
for (j = 0; j < ny; j++) {
jglob = j;
for (k = 0; k < nz; k++) {
exact( nx0-1, jglob, k, &u[nx-1][j][k][0] );
}
}
}
}
/*--------------------------------------------------------------------
--------------------------------------------------------------------*/
static void setcoeff(void) {
/*--------------------------------------------------------------------
c set up coefficients
--------------------------------------------------------------------*/
dxi = 1.0 / ( nx0 - 1 );
deta = 1.0 / ( ny0 - 1 );
dzeta = 1.0 / ( nz0 - 1 );
tx1 = 1.0 / ( dxi * dxi );
tx2 = 1.0 / ( 2.0 * dxi );
tx3 = 1.0 / dxi;
ty1 = 1.0 / ( deta * deta );
ty2 = 1.0 / ( 2.0 * deta );
ty3 = 1.0 / deta;
tz1 = 1.0 / ( dzeta * dzeta );
tz2 = 1.0 / ( 2.0 * dzeta );
tz3 = 1.0 / dzeta;
ii1 = 1;
ii2 = nx0 - 2;
ji1 = 1;
ji2 = ny0 - 3;
ki1 = 2;
ki2 = nz0 - 2;
/*--------------------------------------------------------------------
c diffusion coefficients
--------------------------------------------------------------------*/
dx1 = 0.75;
dx2 = dx1;
dx3 = dx1;
dx4 = dx1;
dx5 = dx1;
dy1 = 0.75;
dy2 = dy1;
dy3 = dy1;
dy4 = dy1;
dy5 = dy1;
dz1 = 1.00;
dz2 = dz1;
dz3 = dz1;
dz4 = dz1;
dz5 = dz1;
/*--------------------------------------------------------------------
c fourth difference dissipation
--------------------------------------------------------------------*/
dssp = ( max (dx1, max(dy1, dz1) ) ) / 4.0;
/*--------------------------------------------------------------------
c coefficients of the exact solution to the first pde
--------------------------------------------------------------------*/
ce[0][0] = 2.0;
ce[0][1] = 0.0;
ce[0][2] = 0.0;
ce[0][3] = 4.0;
ce[0][4] = 5.0;
ce[0][5] = 3.0;
ce[0][6] = 5.0e-01;
ce[0][7] = 2.0e-02;
ce[0][8] = 1.0e-02;
ce[0][9] = 3.0e-02;
ce[0][10] = 5.0e-01;
ce[0][11] = 4.0e-01;
ce[0][12] = 3.0e-01;
/*--------------------------------------------------------------------
c coefficients of the exact solution to the second pde
--------------------------------------------------------------------*/
ce[1][0] = 1.0;
ce[1][1] = 0.0;
ce[1][2] = 0.0;
ce[1][3] = 0.0;
ce[1][4] = 1.0;
ce[1][5] = 2.0;
ce[1][6] = 3.0;
ce[1][7] = 1.0e-02;
ce[1][8] = 3.0e-02;
ce[1][9] = 2.0e-02;
ce[1][10] = 4.0e-01;
ce[1][11] = 3.0e-01;
ce[1][12] = 5.0e-01;
/*--------------------------------------------------------------------
c coefficients of the exact solution to the third pde
--------------------------------------------------------------------*/
ce[2][0] = 2.0;
ce[2][1] = 2.0;
ce[2][2] = 0.0;
ce[2][3] = 0.0;
ce[2][4] = 0.0;
ce[2][5] = 2.0;
ce[2][6] = 3.0;
ce[2][7] = 4.0e-02;
ce[2][8] = 3.0e-02;
ce[2][9] = 5.0e-02;
ce[2][10] = 3.0e-01;
ce[2][11] = 5.0e-01;
ce[2][12] = 4.0e-01;
/*--------------------------------------------------------------------
c coefficients of the exact solution to the fourth pde
--------------------------------------------------------------------*/
ce[3][0] = 2.0;
ce[3][1] = 2.0;
ce[3][2] = 0.0;
ce[3][3] = 0.0;
ce[3][4] = 0.0;
ce[3][5] = 2.0;
ce[3][6] = 3.0;
ce[3][7] = 3.0e-02;
ce[3][8] = 5.0e-02;
ce[3][9] = 4.0e-02;
ce[3][10] = 2.0e-01;
ce[3][11] = 1.0e-01;
ce[3][12] = 3.0e-01;
/*--------------------------------------------------------------------
c coefficients of the exact solution to the fifth pde
--------------------------------------------------------------------*/
ce[4][0] = 5.0;
ce[4][1] = 4.0;
ce[4][2] = 3.0;
ce[4][3] = 2.0;
ce[4][4] = 1.0e-01;
ce[4][5] = 4.0e-01;
ce[4][6] = 3.0e-01;
ce[4][7] = 5.0e-02;
ce[4][8] = 4.0e-02;
ce[4][9] = 3.0e-02;
ce[4][10] = 1.0e-01;
ce[4][11] = 3.0e-01;
ce[4][12] = 2.0e-01;
}
/*--------------------------------------------------------------------
--------------------------------------------------------------------*/
static void setiv(void) {
{
/*--------------------------------------------------------------------
c
c set the initial values of independent variables based on tri-linear
c interpolation of boundary values in the computational space.
c
--------------------------------------------------------------------*/
/*--------------------------------------------------------------------
c local variables
--------------------------------------------------------------------*/
int i, j, k, m;
int iglob, jglob;
double xi, eta, zeta;
double pxi, peta, pzeta;
double ue_1jk[5],ue_nx0jk[5],ue_i1k[5],
ue_iny0k[5],ue_ij1[5],ue_ijnz[5];
#pragma omp parallel for
for (j = 0; j < ny; j++) {
jglob = j;
for (k = 1; k < nz - 1; k++) {
zeta = ((double)k) / (nz-1);
if (jglob != 0 && jglob != ny0-1) {
eta = ( (double) (jglob) ) / (ny0-1);
for (i = 0; i < nx; i++) {
iglob = i;
if(iglob != 0 && iglob != nx0-1) {
xi = ( (double) (iglob) ) / (nx0-1);
exact (0,jglob,k,ue_1jk);
exact (nx0-1,jglob,k,ue_nx0jk);
exact (iglob,0,k,ue_i1k);
exact (iglob,ny0-1,k,ue_iny0k);
exact (iglob,jglob,0,ue_ij1);
exact (iglob,jglob,nz-1,ue_ijnz);
#pragma omp parallel for firstprivate(pxi ,peta ,pzeta ,xi ,eta ,zeta ,i ,k ,j )
for (m = 0; m < 5; m++) {
pxi = ( 1.0 - xi ) * ue_1jk[m]
+ xi * ue_nx0jk[m];
peta = ( 1.0 - eta ) * ue_i1k[m]
+ eta * ue_iny0k[m];
pzeta = ( 1.0 - zeta ) * ue_ij1[m]
+ zeta * ue_ijnz[m];
u[i][j][k][m] = pxi + peta + pzeta
- pxi * peta - peta * pzeta - pzeta * pxi
+ pxi * peta * pzeta;
}
}
}
}
}
}
}
}
/*--------------------------------------------------------------------
--------------------------------------------------------------------*/
static void ssor(void) {
/*--------------------------------------------------------------------
c to perform pseudo-time stepping SSOR iterations
c for five nonlinear pde s.
--------------------------------------------------------------------*/
/*--------------------------------------------------------------------
c local variables
--------------------------------------------------------------------*/
int i, j, k, m;
int istep;
double tmp;
double delunm[5], tv[ISIZ1][ISIZ2][5];
/*--------------------------------------------------------------------
c begin pseudo-time stepping iterations
--------------------------------------------------------------------*/
tmp = 1.0 / ( omega * ( 2.0 - omega ) ) ;
/*--------------------------------------------------------------------
c initialize a,b,c,d to zero (guarantees that page tables have been
c formed, if applicable on given architecture, before timestepping).
--------------------------------------------------------------------*/
{
#pragma omp parallel for firstprivate(j ,k ,m )
for (i = 0; i < ISIZ1; i++) {
#pragma omp parallel for private(istep ,i ,k ,m )
for (j = 0; j < ISIZ2; j++) {
#pragma omp parallel for firstprivate(j ,m ,i )
for (k = 0; k < 5; k++) {
#pragma omp parallel for firstprivate(j ,k ,i )
for (m = 0; m < 5; m++) {
a[i][j][k][m] = 0.0;
b[i][j][k][m] = 0.0;
c[i][j][k][m] = 0.0;
d[i][j][k][m] = 0.0;
}
}
}
}
}
/*--------------------------------------------------------------------
c compute the steady-state residuals
--------------------------------------------------------------------*/
rhs();
/*--------------------------------------------------------------------
c compute the L2 norms of newton iteration residuals
--------------------------------------------------------------------*/
l2norm( nx0, ny0, nz0,
ist, iend, jst, jend,
rsd, rsdnm );
timer_clear(1);
timer_start(1);
/*--------------------------------------------------------------------
c the timestep loop
--------------------------------------------------------------------*/
for (istep = 1; istep <= itmax; istep++) {
if (istep%20 == 0 || istep == itmax || istep == 1) {
printf(" Time step %4d\n", istep);
}
{
/*--------------------------------------------------------------------
c perform SSOR iteration
--------------------------------------------------------------------*/
#pragma omp parallel for firstprivate(iend ,ist ,j ,k ,m ,dt ,nz ,jst ,jend ,istep )
for (i = ist; i <= iend; i++) {
#pragma omp parallel for firstprivate(iend ,ist ,k ,m ,dt ,nz ,jst ,jend ,i ,istep )
for (j = jst; j <= jend; j++) {
#pragma omp parallel for private(istep ,i ,j ,m )
for (k = 1; k <= nz - 2; k++) {
#pragma omp parallel for firstprivate(iend ,ist ,j ,k ,dt ,nz ,jst ,jend ,i ,istep )
for (m = 0; m < 5; m++) {
rsd[i][j][k][m] = dt * rsd[i][j][k][m];
}
}
}
}
for (k = 1; k <= nz - 2; k++) {
/*--------------------------------------------------------------------
c form the lower triangular part of the jacobian matrix
--------------------------------------------------------------------*/
jacld(k);
/*--------------------------------------------------------------------
c perform the lower triangular solution
--------------------------------------------------------------------*/
blts(nx, ny, nz, k,
omega,
rsd,
a, b, c, d,
ist, iend, jst, jend,
nx0, ny0 );
}
for (k = nz - 2; k >= 1; k--) {
/*--------------------------------------------------------------------
c form the strictly upper triangular part of the jacobian matrix
--------------------------------------------------------------------*/
jacu(k);
/*--------------------------------------------------------------------
c perform the upper triangular solution
--------------------------------------------------------------------*/
buts(nx, ny, nz, k,
omega,
rsd, tv,
d, a, b, c,
ist, iend, jst, jend,
nx0, ny0 );
}
/*--------------------------------------------------------------------
c update the variables
--------------------------------------------------------------------*/
for (i = ist; i <= iend; i++) {
for (j = jst; j <= jend; j++) {
for (k = 1; k <= nz-2; k++) {
for (m = 0; m < 5; m++) {
u[i][j][k][m] = u[i][j][k][m]
+ tmp * rsd[i][j][k][m];
}
}
}
}
} /* end parallel */
/*--------------------------------------------------------------------
c compute the max-norms of newton iteration corrections
--------------------------------------------------------------------*/
if ( istep % inorm == 0 ) {
l2norm( nx0, ny0, nz0,
ist, iend, jst, jend,
rsd, delunm );
}
/*--------------------------------------------------------------------
c compute the steady-state residuals
--------------------------------------------------------------------*/
rhs();
/*--------------------------------------------------------------------
c compute the max-norms of newton iteration residuals
--------------------------------------------------------------------*/
if ( ( istep % inorm == 0 ) ||
( istep == itmax ) ) {
l2norm( nx0, ny0, nz0,
ist, iend, jst, jend,
rsd, rsdnm );
}
/*--------------------------------------------------------------------
c check the newton-iteration residuals against the tolerance levels
--------------------------------------------------------------------*/
if ( ( rsdnm[0] < tolrsd[0] ) &&
( rsdnm[1] < tolrsd[1] ) &&
( rsdnm[2] < tolrsd[2] ) &&
( rsdnm[3] < tolrsd[3] ) &&
( rsdnm[4] < tolrsd[4] ) ) {
exit(1);
}
}
timer_stop(1);
maxtime= timer_read(1);
}
/*--------------------------------------------------------------------
--------------------------------------------------------------------*/
static void verify(double xcr[5], double xce[5], double xci,
char *class, boolean *verified) {
/*--------------------------------------------------------------------
c verification routine
--------------------------------------------------------------------*/
double xcrref[5],xceref[5],xciref,
xcrdif[5],xcedif[5],xcidif,
epsilon, dtref;
int m;
/*--------------------------------------------------------------------
c tolerance level
--------------------------------------------------------------------*/
epsilon = 1.0e-08;
*class = 'U';
*verified = TRUE;
#pragma omp parallel for
for (m = 0; m < 5; m++) {
xcrref[m] = 1.0;
xceref[m] = 1.0;
}
xciref = 1.0;
if ( nx0 == 12 && ny0 == 12 && nz0 == 12 && itmax == 50) {
*class = 'S';
dtref = 5.0e-1;
/*--------------------------------------------------------------------
c Reference values of RMS-norms of residual, for the (12X12X12) grid,
c after 50 time steps, with DT = 5.0d-01
--------------------------------------------------------------------*/
xcrref[0] = 1.6196343210976702e-02;
xcrref[1] = 2.1976745164821318e-03;
xcrref[2] = 1.5179927653399185e-03;
xcrref[3] = 1.5029584435994323e-03;
xcrref[4] = 3.4264073155896461e-02;
/*--------------------------------------------------------------------
c Reference values of RMS-norms of solution error, for the (12X12X12) grid,
c after 50 time steps, with DT = 5.0d-01
--------------------------------------------------------------------*/
xceref[0] = 6.4223319957960924e-04;
xceref[1] = 8.4144342047347926e-05;
xceref[2] = 5.8588269616485186e-05;
xceref[3] = 5.8474222595157350e-05;
xceref[4] = 1.3103347914111294e-03;
/*--------------------------------------------------------------------
c Reference value of surface integral, for the (12X12X12) grid,
c after 50 time steps, with DT = 5.0d-01
--------------------------------------------------------------------*/
xciref = 7.8418928865937083;
} else if ( nx0 == 33 && ny0 == 33 && nz0 == 33 && itmax == 300) {
*class = 'W'; /* SPEC95fp size */
dtref = 1.5e-3;
/*--------------------------------------------------------------------
c Reference values of RMS-norms of residual, for the (33x33x33) grid,
c after 300 time steps, with DT = 1.5d-3
--------------------------------------------------------------------*/
xcrref[0] = 0.1236511638192e+02;
xcrref[1] = 0.1317228477799e+01;
xcrref[2] = 0.2550120713095e+01;
xcrref[3] = 0.2326187750252e+01;
xcrref[4] = 0.2826799444189e+02;
/*--------------------------------------------------------------------
c Reference values of RMS-norms of solution error, for the (33X33X33) grid,
--------------------------------------------------------------------*/
xceref[0] = 0.4867877144216;
xceref[1] = 0.5064652880982e-01;
xceref[2] = 0.9281818101960e-01;
xceref[3] = 0.8570126542733e-01;
xceref[4] = 0.1084277417792e+01;
/*--------------------------------------------------------------------
c Reference value of surface integral, for the (33X33X33) grid,
c after 300 time steps, with DT = 1.5d-3
--------------------------------------------------------------------*/
xciref = 0.1161399311023e+02;
} else if ( nx0 == 64 && ny0 == 64 && nz0 == 64 && itmax == 250) {
*class = 'A';
dtref = 2.0e+0;
/*--------------------------------------------------------------------
c Reference values of RMS-norms of residual, for the (64X64X64) grid,
c after 250 time steps, with DT = 2.0d+0.0
--------------------------------------------------------------------*/
xcrref[0] = 7.7902107606689367e+02;
xcrref[1] = 6.3402765259692870e+01;
xcrref[2] = 1.9499249727292479e+02;
xcrref[3] = 1.7845301160418537e+02;
xcrref[4] = 1.8384760349464247e+03;
/*--------------------------------------------------------------------
c Reference values of RMS-norms of solution error, for the (64X64X64) grid,
c after 250 time steps, with DT = 2.0d+0.0
--------------------------------------------------------------------*/
xceref[0] = 2.9964085685471943e+01;
xceref[1] = 2.8194576365003349;
xceref[2] = 7.3473412698774742;
xceref[3] = 6.7139225687777051;
xceref[4] = 7.0715315688392578e+01;
/*--------------------------------------------------------------------
c Reference value of surface integral, for the (64X64X64) grid,
c after 250 time steps, with DT = 2.0d+0.0
--------------------------------------------------------------------*/
xciref = 2.6030925604886277e+01;
} else if ( nx0 == 102 && ny0 == 102 && nz0 == 102 && itmax == 250) {
*class = 'B';
dtref = 2.0e+0;
/*--------------------------------------------------------------------
c Reference values of RMS-norms of residual, for the (102X102X102) grid,
c after 250 time steps, with DT = 2.0d+0.0
--------------------------------------------------------------------*/
xcrref[0] = 3.5532672969982736e+03;
xcrref[1] = 2.6214750795310692e+02;
xcrref[2] = 8.8333721850952190e+02;
xcrref[3] = 7.7812774739425265e+02;
xcrref[4] = 7.3087969592545314e+03;
/*--------------------------------------------------------------------
c Reference values of RMS-norms of solution error, for the (102X102X102)
c grid, after 250 time steps, with DT = 2.0d+0.0
--------------------------------------------------------------------*/
xceref[0] = 1.1401176380212709e+02;
xceref[1] = 8.1098963655421574;
xceref[2] = 2.8480597317698308e+01;
xceref[3] = 2.5905394567832939e+01;
xceref[4] = 2.6054907504857413e+02;
/*--------------------------------------------------------------------
c Reference value of surface integral, for the (102X102X102) grid,
c after 250 time steps, with DT = 2.0d+0.0
--------------------------------------------------------------------*/
xciref = 4.7887162703308227e+01;
} else if ( nx0 == 162 && ny0 == 162 && nz0 == 162 && itmax == 250) {
*class = 'C';
dtref = 2.0e+0;
/*--------------------------------------------------------------------
c Reference values of RMS-norms of residual, for the (162X162X162) grid,
c after 250 time steps, with DT = 2.0d+0.0
--------------------------------------------------------------------*/
xcrref[0] = 1.03766980323537846e+04;
xcrref[1] = 8.92212458801008552e+02;
xcrref[2] = 2.56238814582660871e+03;
xcrref[3] = 2.19194343857831427e+03;
xcrref[4] = 1.78078057261061185e+04;
/*--------------------------------------------------------------------
c Reference values of RMS-norms of solution error, for the (162X162X162)
c grid, after 250 time steps, with DT = 2.0d+0.0
--------------------------------------------------------------------*/
xceref[0] = 2.15986399716949279e+02;
xceref[1] = 1.55789559239863600e+01;
xceref[2] = 5.41318863077207766e+01;
xceref[3] = 4.82262643154045421e+01;
xceref[4] = 4.55902910043250358e+02;
/*--------------------------------------------------------------------
c Reference value of surface integral, for the (162X162X162) grid,
c after 250 time steps, with DT = 2.0d+0.0
--------------------------------------------------------------------*/
xciref = 6.66404553572181300e+01;
} else {
*verified = FALSE;
}
/*--------------------------------------------------------------------
c verification test for residuals if gridsize is either 12X12X12 or
c 64X64X64 or 102X102X102 or 162X162X162
--------------------------------------------------------------------*/
/*--------------------------------------------------------------------
c Compute the difference of solution values and the known reference values.
--------------------------------------------------------------------*/
#pragma omp parallel for
for (m = 0; m < 5; m++) {
xcrdif[m] = fabs((xcr[m]-xcrref[m])/xcrref[m]);
xcedif[m] = fabs((xce[m]-xceref[m])/xceref[m]);
}
xcidif = fabs((xci - xciref)/xciref);
/*--------------------------------------------------------------------
c Output the comparison of computed results to known cases.
--------------------------------------------------------------------*/
if (*class != 'U') {
printf("\n Verification being performed for class %1c\n", *class);
printf(" Accuracy setting for epsilon = %20.13e\n", epsilon);
if (fabs(dt-dtref) > epsilon) {
*verified = FALSE;
*class = 'U';
printf(" DT does not match the reference value of %15.8e\n", dtref);
}
} else {
printf(" Unknown class\n");
}
if (*class != 'U') {
printf(" Comparison of RMS-norms of residual\n");
} else {
printf(" RMS-norms of residual\n");
}
for (m = 0; m < 5; m++) {
if (*class == 'U') {
printf(" %2d %20.13e\n", m, xcr[m]);
} else if (xcrdif[m] > epsilon) {
*verified = FALSE;
printf(" FAILURE: %2d %20.13e%20.13e%20.13e\n",
m,xcr[m],xcrref[m],xcrdif[m]);
} else {
printf(" %2d %20.13e%20.13e%20.13e\n",
m,xcr[m],xcrref[m],xcrdif[m]);
}
}
if (*class != 'U') {
printf(" Comparison of RMS-norms of solution error\n");
} else {
printf(" RMS-norms of solution error\n");
}
for (m = 0; m < 5; m++) {
if (*class == 'U') {
printf(" %2d %20.13e\n", m, xce[m]);
} else if (xcedif[m] > epsilon) {
*verified = FALSE;
printf(" FAILURE: %2d %20.13e%20.13e%20.13e\n",
m,xce[m],xceref[m],xcedif[m]);
} else {
printf(" %2d %20.13e%20.13e%20.13e\n",
m,xce[m],xceref[m],xcedif[m]);
}
}
if (*class != 'U') {
printf(" Comparison of surface integral\n");
} else {
printf(" Surface integral\n");
}
if (*class == 'U') {
printf(" %20.13e\n", xci);
} else if (xcidif > epsilon) {
*verified = FALSE;
printf(" FAILURE: %20.13e%20.13e%20.13e\n",
xci, xciref, xcidif);
} else {
printf(" %20.13e%20.13e%20.13e\n",
xci, xciref, xcidif);
}
if (*class == 'U') {
printf(" No reference values provided\n");
printf(" No verification performed\n");
} else if (*verified) {
printf(" Verification Successful\n");
} else {
printf(" Verification failed\n");
}
}
|
single.c | #include<stdio.h>
#include<omp.h>
int main() {
#pragma omp parallel
{
#pragma omp single
{
printf("Top Secret\n");
}
// We have an implicit barrier here to ensure that the single thread is executed before continuing
printf("Don't tell it to anyone\n");
}
} |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.