source stringlengths 3 92 | c stringlengths 26 2.25M |
|---|---|
target_parallel_for_misc_messages.c | // RUN: %clang_cc1 -fsyntax-only -fopenmp -verify %s -Wuninitialized
// RUN: %clang_cc1 -fsyntax-only -fopenmp-simd -verify %s -Wuninitialized
// expected-error@+1 {{unexpected OpenMP directive '#pragma omp target parallel for'}}
#pragma omp target parallel for
// expected-error@+1 {{unexpected OpenMP directive '#pragma omp target parallel for'}}
#pragma omp target parallel for foo
void test_no_clause(void) {
int i;
#pragma omp target parallel for
for (i = 0; i < 16; ++i)
;
// expected-error@+2 {{statement after '#pragma omp target parallel for' must be a for loop}}
#pragma omp target parallel for
++i;
}
void test_branch_protected_scope(void) {
int i = 0;
L1:
++i;
int x[24];
#pragma omp target parallel for
for (i = 0; i < 16; ++i) {
if (i == 5)
goto L1; // expected-error {{use of undeclared label 'L1'}}
else if (i == 6)
return; // expected-error {{cannot return from OpenMP region}}
else if (i == 7)
goto L2;
else if (i == 8) {
L2:
x[i]++;
}
}
if (x[0] == 0)
goto L2; // expected-error {{use of undeclared label 'L2'}}
else if (x[1] == 1)
goto L1;
}
void test_invalid_clause(void) {
int i;
// expected-warning@+1 {{extra tokens at the end of '#pragma omp target parallel for' are ignored}}
#pragma omp target parallel for foo bar
for (i = 0; i < 16; ++i)
;
}
void test_non_identifiers(void) {
int i, x;
// expected-warning@+1 {{extra tokens at the end of '#pragma omp target parallel for' are ignored}}
#pragma omp target parallel for;
for (i = 0; i < 16; ++i)
;
// expected-warning@+1 {{extra tokens at the end of '#pragma omp target parallel for' are ignored}}
#pragma omp target parallel for private(x);
for (i = 0; i < 16; ++i)
;
// expected-warning@+1 {{extra tokens at the end of '#pragma omp target parallel for' are ignored}}
#pragma omp target parallel for, private(x);
for (i = 0; i < 16; ++i)
;
}
extern int foo(void);
void test_collapse(void) {
int i;
// expected-error@+1 {{expected '('}}
#pragma omp target parallel for collapse
for (i = 0; i < 16; ++i)
;
// expected-error@+1 {{expected expression}} expected-error@+1 {{expected ')'}} expected-note@+1 {{to match this '('}}
#pragma omp target parallel for collapse(
for (i = 0; i < 16; ++i)
;
// expected-error@+1 {{expected expression}}
#pragma omp target parallel for collapse()
for (i = 0; i < 16; ++i)
;
// expected-error@+1 {{expected expression}} expected-error@+1 {{expected ')'}} expected-note@+1 {{to match this '('}}
#pragma omp target parallel for collapse(,
for (i = 0; i < 16; ++i)
;
// expected-error@+1 {{expected expression}} expected-error@+1 {{expected ')'}} expected-note@+1 {{to match this '('}}
#pragma omp target parallel for collapse(, )
for (i = 0; i < 16; ++i)
;
// expected-warning@+2 {{extra tokens at the end of '#pragma omp target parallel for' are ignored}}
// expected-error@+1 {{expected '('}}
#pragma omp target parallel for collapse 4)
for (i = 0; i < 16; ++i)
;
// expected-error@+2 {{expected ')'}}
// expected-note@+1 {{to match this '('}} expected-note@+1 {{as specified in 'collapse' clause}}
#pragma omp target parallel for collapse(4
for (i = 0; i < 16; ++i)
; // expected-error {{expected 4 for loops after '#pragma omp target parallel for', but found only 1}}
// expected-error@+2 {{expected ')'}}
// expected-note@+1 {{to match this '('}} expected-note@+1 {{as specified in 'collapse' clause}}
#pragma omp target parallel for collapse(4,
for (i = 0; i < 16; ++i)
; // expected-error {{expected 4 for loops after '#pragma omp target parallel for', but found only 1}}
// expected-error@+2 {{expected ')'}}
// expected-note@+1 {{to match this '('}} expected-note@+1 {{as specified in 'collapse' clause}}
#pragma omp target parallel for collapse(4, )
for (i = 0; i < 16; ++i)
; // expected-error {{expected 4 for loops after '#pragma omp target parallel for', but found only 1}}
// expected-note@+1 {{as specified in 'collapse' clause}}
#pragma omp target parallel for collapse(4)
for (i = 0; i < 16; ++i)
; // expected-error {{expected 4 for loops after '#pragma omp target parallel for', but found only 1}}
// expected-error@+2 {{expected ')'}}
// expected-note@+1 {{to match this '('}} expected-note@+1 {{as specified in 'collapse' clause}}
#pragma omp target parallel for collapse(4 4)
for (i = 0; i < 16; ++i)
; // expected-error {{expected 4 for loops after '#pragma omp target parallel for', but found only 1}}
// expected-error@+2 {{expected ')'}}
// expected-note@+1 {{to match this '('}} expected-note@+1 {{as specified in 'collapse' clause}}
#pragma omp target parallel for collapse(4, , 4)
for (i = 0; i < 16; ++i)
; // expected-error {{expected 4 for loops after '#pragma omp target parallel for', but found only 1}}
#pragma omp target parallel for collapse(4)
for (int i1 = 0; i1 < 16; ++i1)
for (int i2 = 0; i2 < 16; ++i2)
for (int i3 = 0; i3 < 16; ++i3)
for (int i4 = 0; i4 < 16; ++i4)
foo();
// expected-error@+2 {{expected ')'}}
// expected-note@+1 {{to match this '('}} expected-note@+1 {{as specified in 'collapse' clause}}
#pragma omp target parallel for collapse(4, 8)
for (i = 0; i < 16; ++i)
; // expected-error {{expected 4 for loops after '#pragma omp target parallel for', but found only 1}}
// expected-error@+1 {{integer constant expression}}
#pragma omp target parallel for collapse(2.5)
for (i = 0; i < 16; ++i)
;
// expected-error@+1 {{integer constant expression}}
#pragma omp target parallel for collapse(foo())
for (i = 0; i < 16; ++i)
;
// expected-error@+1 {{argument to 'collapse' clause must be a strictly positive integer value}}
#pragma omp target parallel for collapse(-5)
for (i = 0; i < 16; ++i)
;
// expected-error@+1 {{argument to 'collapse' clause must be a strictly positive integer value}}
#pragma omp target parallel for collapse(0)
for (i = 0; i < 16; ++i)
;
// expected-error@+1 {{argument to 'collapse' clause must be a strictly positive integer value}}
#pragma omp target parallel for collapse(5 - 5)
for (i = 0; i < 16; ++i)
;
// expected-note@+1 2 {{defined as firstprivate}}
#pragma omp target parallel for collapse(2) firstprivate(i)
for (i = 0; i < 16; ++i) // expected-error {{loop iteration variable in the associated loop of 'omp target parallel for' directive may not be firstprivate, predetermined as private}}
// expected-note@+1 {{variable with automatic storage duration is predetermined as private; perhaps you forget to enclose 'omp for' directive into a parallel or another task region?}}
for (int j = 0; j < 16; ++j)
// expected-error@+2 2 {{reduction variable must be shared}}
// expected-error@+1 {{region cannot be closely nested inside 'target parallel for' region; perhaps you forget to enclose 'omp for' directive into a parallel region?}}
#pragma omp for reduction(+ : i, j)
for (int k = 0; k < 16; ++k)
i += j;
}
void test_private(void) {
int i;
// expected-error@+2 {{expected expression}}
// expected-error@+1 {{expected ')'}} expected-note@+1 {{to match this '('}}
#pragma omp target parallel for private(
for (i = 0; i < 16; ++i)
;
// expected-error@+2 {{expected ')'}} expected-note@+2 {{to match this '('}}
// expected-error@+1 2 {{expected expression}}
#pragma omp target parallel for private(,
for (i = 0; i < 16; ++i)
;
// expected-error@+1 2 {{expected expression}}
#pragma omp target parallel for private(, )
for (i = 0; i < 16; ++i)
;
// expected-error@+1 {{expected expression}}
#pragma omp target parallel for private()
for (i = 0; i < 16; ++i)
;
// expected-error@+1 {{expected expression}}
#pragma omp target parallel for private(int)
for (i = 0; i < 16; ++i)
;
// expected-error@+1 {{expected variable name}}
#pragma omp target parallel for private(0)
for (i = 0; i < 16; ++i)
;
int x, y, z;
#pragma omp target parallel for private(x)
for (i = 0; i < 16; ++i)
;
#pragma omp target parallel for private(x, y)
for (i = 0; i < 16; ++i)
;
#pragma omp target parallel for private(x, y, z)
for (i = 0; i < 16; ++i) {
x = y * i + z;
}
}
void test_lastprivate(void) {
int i;
// expected-error@+2 {{expected ')'}} expected-note@+2 {{to match this '('}}
// expected-error@+1 {{expected expression}}
#pragma omp target parallel for lastprivate(
for (i = 0; i < 16; ++i)
;
// expected-error@+2 {{expected ')'}} expected-note@+2 {{to match this '('}}
// expected-error@+1 2 {{expected expression}}
#pragma omp target parallel for lastprivate(,
for (i = 0; i < 16; ++i)
;
// expected-error@+1 2 {{expected expression}}
#pragma omp target parallel for lastprivate(, )
for (i = 0; i < 16; ++i)
;
// expected-error@+1 {{expected expression}}
#pragma omp target parallel for lastprivate()
for (i = 0; i < 16; ++i)
;
// expected-error@+1 {{expected expression}}
#pragma omp target parallel for lastprivate(int)
for (i = 0; i < 16; ++i)
;
// expected-error@+1 {{expected variable name}}
#pragma omp target parallel for lastprivate(0)
for (i = 0; i < 16; ++i)
;
int x, y, z;
#pragma omp target parallel for lastprivate(x)
for (i = 0; i < 16; ++i)
;
#pragma omp target parallel for lastprivate(x, y)
for (i = 0; i < 16; ++i)
;
#pragma omp target parallel for lastprivate(x, y, z)
for (i = 0; i < 16; ++i)
;
}
void test_firstprivate(void) {
int i;
// expected-error@+2 {{expected ')'}} expected-note@+2 {{to match this '('}}
// expected-error@+1 {{expected expression}}
#pragma omp target parallel for firstprivate(
for (i = 0; i < 16; ++i)
;
// expected-error@+2 {{expected ')'}} expected-note@+2 {{to match this '('}}
// expected-error@+1 2 {{expected expression}}
#pragma omp target parallel for firstprivate(,
for (i = 0; i < 16; ++i)
;
// expected-error@+1 2 {{expected expression}}
#pragma omp target parallel for firstprivate(, )
for (i = 0; i < 16; ++i)
;
// expected-error@+1 {{expected expression}}
#pragma omp target parallel for firstprivate()
for (i = 0; i < 16; ++i)
;
// expected-error@+1 {{expected expression}}
#pragma omp target parallel for firstprivate(int)
for (i = 0; i < 16; ++i)
;
// expected-error@+1 {{expected variable name}}
#pragma omp target parallel for firstprivate(0)
for (i = 0; i < 16; ++i)
;
int x, y, z;
#pragma omp target parallel for lastprivate(x) firstprivate(x)
for (i = 0; i < 16; ++i)
;
#pragma omp target parallel for lastprivate(x, y) firstprivate(x, y)
for (i = 0; i < 16; ++i)
;
#pragma omp target parallel for lastprivate(x, y, z) firstprivate(x, y, z)
for (i = 0; i < 16; ++i)
;
}
void test_loop_messages(void) {
float a[100], b[100], c[100];
// expected-error@+2 {{variable must be of integer or pointer type}}
#pragma omp target parallel for
for (float fi = 0; fi < 10.0; fi++) {
c[(int)fi] = a[(int)fi] + b[(int)fi];
}
// expected-error@+2 {{variable must be of integer or pointer type}}
#pragma omp target parallel for
for (double fi = 0; fi < 10.0; fi++) {
c[(int)fi] = a[(int)fi] + b[(int)fi];
}
}
|
GB_queue_check.c | //------------------------------------------------------------------------------
// GB_queue_check: check the status of the queue for a particular matrix
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2018, All Rights Reserved.
// http://suitesparse.com See GraphBLAS/Doc/License.txt for license.
//------------------------------------------------------------------------------
#include "GB.h"
void GB_queue_check
(
GrB_Matrix A, // matrix to check
GrB_Matrix *head, // head of the queue
GrB_Matrix *prev, // prev from A
GrB_Matrix *next, // next after A
bool *enqd // true if A is in the queue
)
{
#pragma omp critical (GB_queue)
{
// get the status of the queue for this matrix
(*head) = (GrB_Matrix) (GB_Global.queue_head) ;
(*prev) = (GrB_Matrix) (A->queue_prev) ;
(*next) = (GrB_Matrix) (A->queue_next) ;
(*enqd) = A->enqueued ;
}
}
|
evolve.c | #include <tgmath.h>
#include <stdio.h>
#include <stdlib.h>
#ifdef _OPENMP
#include <omp.h>
#endif
#include "evolve.h"
#include "evolve_shared.h"
#include "evolve_shared_collisions.h"
#include "evolve_sf.h"
#include "evolve_cc.h"
#include "evolve_kepler.h"
#include "evolve_ok.h"
#include "evolve_bs.h"
#include "evolve_error_control.h"
#ifdef EVOLVE_OPENCL
#include "evolve_cl.h"
#endif
int verbosity=0;
struct sys debugsys;
FLOAT eps2;
FLOAT dt_param;
struct sys zerosys ={0,0,NULL,NULL};
int accel_zero_mass=1;
int opencl_device_type=0;
struct diagnostics global_diag;
struct diagnostics *diag;
static void report(struct sys s,DOUBLE etime, int inttype);
#ifndef M_SQRT2
#define M_SQRT2 1.41421356237309504880168872420969808L
#endif
void move_system(struct sys s, DOUBLE dpos[3],DOUBLE dvel[3],int dir)
{
struct particle *ipart;
for(UINT p=0;p<s.n;p++)
{
ipart=GETPART(s, p);
for(int i=0;i<3;i++)
{
COMPSUMP(ipart->pos[i],ipart->pos_e[i],dir*dpos[i])
COMPSUMV(ipart->vel[i],ipart->vel_e[i],dir*dvel[i])
}
}
}
void system_center_of_mass(struct sys s, DOUBLE *cmpos, DOUBLE *cmvel)
{
DOUBLE mass=0.,pos[3]={0.,0.,0.},vel[3]={0.,0.,0.};
struct particle *ipart;
for(UINT p=0;p<s.n;p++)
{
ipart=GETPART(s, p);
for(int i=0;i<3;i++)
{
pos[i]+=(DOUBLE) ipart->mass*ipart->pos[i];
vel[i]+=(DOUBLE) ipart->mass*ipart->vel[i];
}
mass+=(DOUBLE) ipart->mass;
}
for(int i=0;i<3;i++)
{
cmpos[i]=pos[i]/mass;
cmvel[i]=vel[i]/mass;
}
}
DOUBLE system_kinetic_energy(struct sys s)
{
UINT i;
DOUBLE e=0.;
struct particle *ipart;
for(i=0;i<s.n;i++)
{
ipart=GETPART(s, i);
e+=0.5*ipart->mass*( ipart->vel[0]*ipart->vel[0]+
ipart->vel[1]*ipart->vel[1]+
ipart->vel[2]*ipart->vel[2] );
}
return e;
}
DOUBLE system_potential_energy(struct sys s)
{
UINT i;
DOUBLE e=0.;
struct particle *ipart;
for(i=0;i<s.n;i++)
{
ipart=GETPART(s, i);
e+=ipart->mass*ipart->pot;
}
return e/2;
}
void init_code()
{
diag=&global_diag;
#ifdef EVOLVE_OPENCL
init_cl();
#endif
}
void stop_code()
{
#ifdef EVOLVE_OPENCL
close_cl();
#endif
evolve_ok_stop(); // safe to call even if ok was not used
}
void init_evolve(struct sys s,int inttype)
{
struct particle *ipart;
for(UINT i=0;i<s.n;i++)
{
ipart=GETPART(s,i);
ipart->postime=0.;
ipart->pot=0.;
#ifdef COMPENSATED_SUMMP
ipart->pos_e[0]=0.;ipart->pos_e[1]=0.;ipart->pos_e[2]=0.;
#endif
#ifdef COMPENSATED_SUMMV
ipart->vel_e[0]=0.;ipart->vel_e[1]=0.;ipart->vel_e[2]=0.;
#endif
}
if(accel_zero_mass) split_zeromass(&s); // because of potential calc
potential(s,s);
evolve_ok_stop();
if (inttype == OK) evolve_ok_init(s);
}
void zero_diagnostics(struct diagnostics* diag)
{
diag->deepsteps=0;
diag->simtime=0.;
diag->timetrack=0.;
#ifdef EVOLVE_OPENCL
diag->cpu_step=0;
diag->cl_step=0;
diag->cpu_count=0;
diag->cl_count=0;
#endif
for(int i=0;i<MAXLEVEL;i++)
{
diag->tstep[i]=0;diag->tcount[i]=0;
diag->kstep[i]=0;diag->kcount[i]=0;
diag->dstep[i]=0;diag->dcount[i]=0;
diag->cefail[i]=0;diag->cecount[i]=0;
diag->bsstep[i]=0;diag->jcount[i]=0;
diag->ntasks[i]=0;diag->taskcount[i]=0;
}
diag->taskdrift=0;
diag->taskkick=0;
}
void sum_diagnostics(struct diagnostics* total,struct diagnostics* diag)
{
int tasksum=0;
unsigned long taskcountsum=0;
total->simtime+=diag->simtime;
total->timetrack+=diag->timetrack;
total->deepsteps+=diag->deepsteps;
for(int i=0;i<MAXLEVEL;i++)
{
total->tstep[i]+=diag->tstep[i];
total->tcount[i]+=diag->tcount[i];
total->kstep[i]+=diag->kstep[i];
total->kcount[i]+=diag->kcount[i];
total->dstep[i]+=diag->dstep[i];
total->dcount[i]+=diag->dcount[i];
total->cefail[i]+=diag->cefail[i];
total->cecount[i]+=diag->cecount[i];
total->bsstep[i]+=diag->bsstep[i];
total->jcount[i]+=diag->jcount[i];
total->ntasks[i]+=diag->ntasks[i];tasksum+=diag->ntasks[i];
total->taskcount[i]+=diag->taskcount[i];taskcountsum+=diag->taskcount[i];
}
#ifdef EVOLVE_OPENCL
total->cpu_step+=diag->cpu_step;
total->cl_step+=diag->cl_step;
total->cpu_count+=diag->cpu_count;
total->cl_count+=diag->cl_count;
#endif
#ifdef _OPENMP
printf("task %d: %d %li %li %li\n",omp_get_thread_num(),tasksum,diag->taskdrift,
diag->taskkick,taskcountsum);
#endif
}
void do_evolve(struct sys s, double dt, int inttype)
{
int i,clevel;
struct particle *ipart;
if(dt==0) return;
for(UINT p=0;p<s.n;p++) GETPART(s,p)->postime=0.;
clevel=0;
if(accel_zero_mass) split_zeromass(&s);
zero_diagnostics(diag);
debugsys=s;
switch (inttype)
{
case CONSTANT:
case CONSTANT2:
evolve_constant2(clevel,s, (DOUBLE) 0.,(DOUBLE) dt,(DOUBLE) dt);
break;
case CONSTANT4:
evolve_constant4(clevel,s, (DOUBLE) 0.,(DOUBLE) dt,(DOUBLE) dt);
break;
case CONSTANT6:
evolve_constant6(clevel,s, (DOUBLE) 0.,(DOUBLE) dt,(DOUBLE) dt);
break;
case CONSTANT8:
evolve_constant8(clevel,s, (DOUBLE) 0.,(DOUBLE) dt,(DOUBLE) dt);
break;
case CONSTANT10:
evolve_constant10(clevel,s, (DOUBLE) 0.,(DOUBLE) dt,(DOUBLE) dt);
break;
case SHARED2:
evolve_shared2(clevel,s, (DOUBLE) 0.,(DOUBLE) dt,(DOUBLE) dt, -1.);
break;
case SHARED4:
evolve_shared4(clevel,s, (DOUBLE) 0.,(DOUBLE) dt,(DOUBLE) dt, -1.);
break;
case SHARED6:
evolve_shared6(clevel,s, (DOUBLE) 0.,(DOUBLE) dt,(DOUBLE) dt, -1.);
break;
case SHARED8:
evolve_shared8(clevel,s, (DOUBLE) 0.,(DOUBLE) dt,(DOUBLE) dt, -1.);
break;
case SHARED10:
evolve_shared10(clevel,s, (DOUBLE) 0.,(DOUBLE) dt,(DOUBLE) dt, -1.);
break;
case SHAREDBS:
evolve_bs_adaptive(clevel,s, (DOUBLE) 0.,(DOUBLE) dt,(DOUBLE) dt, -1.);
break;
case BS_CC_KEPLER:
evolve_bs(clevel,s, (DOUBLE) 0.,(DOUBLE) dt,(DOUBLE) dt);
break;
case PASS:
evolve_split_pass(clevel,s, zerosys,(DOUBLE) 0.,(DOUBLE) dt,(DOUBLE) dt,1);
break;
case HOLD:
evolve_split_hold(clevel,s,(DOUBLE) 0.,(DOUBLE) dt,(DOUBLE) dt,1);
break;
case BRIDGE:
evolve_split_bridge(clevel,s,(DOUBLE) 0.,(DOUBLE) dt,(DOUBLE) dt,1);
break;
case NAIVE:
evolve_split_naive(clevel,s, zerosys,(DOUBLE) 0.,(DOUBLE) dt,(DOUBLE) dt,1);
break;
case HOLD_DKD:
evolve_split_hold_dkd(clevel,s,(DOUBLE) 0.,(DOUBLE) dt,(DOUBLE) dt,1);
break;
case PASS_DKD:
evolve_split_pass_dkd(clevel,s, zerosys, (DOUBLE) 0.,(DOUBLE) dt,(DOUBLE) dt,1);
break;
case PPASS_DKD:
evolve_split_ppass_dkd(clevel,s, zerosys, (DOUBLE) 0.,(DOUBLE) dt,(DOUBLE) dt,1);
break;
case BRIDGE_DKD:
evolve_split_bridge_dkd(clevel,s,(DOUBLE) 0.,(DOUBLE) dt,(DOUBLE) dt,1);
break;
case CC:
case CC_KEPLER:
case CC_BS:
case CC_BSA:
case CCC:
case CCC_KEPLER:
case CCC_BS:
case CCC_BSA:
case CC_SHARED10:
case CCC_SHARED10:
#ifdef _OPENMP
#pragma omp parallel shared(global_diag,s,dt,clevel) copyin(dt_param)
{
diag=(struct diagnostics *) malloc(sizeof( struct diagnostics));
zero_diagnostics(diag);
#pragma omp master
printf("Total Threads # %d\n", omp_get_num_threads());
#pragma omp single
#endif
#ifdef CC2_SPLIT_SHORTCUTS
evolve_cc2_shortcut(clevel,s,(DOUBLE) 0.,(DOUBLE) dt,(DOUBLE) dt, inttype, 1, -1.);
#else
evolve_cc2(clevel,s,(DOUBLE) 0.,(DOUBLE) dt,(DOUBLE) dt, inttype, 1);
#endif
#ifdef _OPENMP
#pragma omp critical
sum_diagnostics(&global_diag,diag);
}
diag=&global_diag;
#endif
break;
case OK:
evolve_ok2(clevel,s, zeroforces, (DOUBLE) 0.,(DOUBLE) dt,(DOUBLE) dt,1);
break;
case KEPLER:
evolve_kepler(clevel,s,(DOUBLE) 0.,(DOUBLE) dt,(DOUBLE) dt);
break;
case FOURTH_M4:
evolve_sf_4m4(clevel,s,(DOUBLE) 0.,(DOUBLE) dt,(DOUBLE) dt,1);
break;
case FOURTH_M5:
evolve_sf_4m5(clevel,s,(DOUBLE) 0.,(DOUBLE) dt,(DOUBLE) dt,1);
break;
case SHARED2_COLLISIONS:
evolve_shared2_collision_detection(s, (DOUBLE) dt);
break;
case SHARED4_COLLISIONS:
evolve_shared4_collision_detection(s, (DOUBLE) dt);
break;
case SHARED6_COLLISIONS:
evolve_shared6_collision_detection(s, (DOUBLE) dt);
break;
case SHARED8_COLLISIONS:
evolve_shared8_collision_detection(s, (DOUBLE) dt);
break;
case SHARED10_COLLISIONS:
evolve_shared10_collision_detection(s, (DOUBLE) dt);
break;
case ERROR_CONTROL:
evolve_error_control(clevel,s, (DOUBLE) 0.,(DOUBLE) dt,(DOUBLE) dt, -1.);
break;
default:
ENDRUN("unknown integrator %d\n", inttype);
break;
}
for(UINT p=0;p<s.n;p++) GETPART(s,p)->pot=0;
potential(s,s);
if(verbosity>0) report(s,(DOUBLE) dt, inttype);
}
void drift(int clevel,struct sys s, DOUBLE etime, DOUBLE dt)
{
struct particle *ipart;
for(UINT i=0;i<s.n;i++)
{
ipart=GETPART(s,i);
COMPSUMP(ipart->pos[0],ipart->pos_e[0],dt*ipart->vel[0])
COMPSUMP(ipart->pos[1],ipart->pos_e[1],dt*ipart->vel[1])
COMPSUMP(ipart->pos[2],ipart->pos_e[2],dt*ipart->vel[2])
ipart->postime=etime;
}
diag->dstep[clevel]++;
diag->dcount[clevel]+=s.n;
diag->taskdrift+=s.n;
}
static void kick_cpu(struct sys s1, struct sys s2, DOUBLE dt)
{
FLOAT dx[3],dr3,dr2,dr,acci;
FLOAT acc[3];
struct particle *ipart, *jpart;
#pragma omp parallel for if((ULONG) s1.n*(s2.n-s2.nzero)>MPWORKLIMIT && !omp_in_parallel()) default(none) \
private(dx,dr3,dr2,dr,acc,acci,ipart,jpart) \
shared(dt,s1,s2,eps2)
for(UINT i=0;i<s1.n;i++)
{
ipart=GETPART(s1,i);
acc[0]=0.;
acc[1]=0.;
acc[2]=0.;
for(UINT j=0;j<s2.n-s2.nzero;j++)
{
jpart=GETPART(s2,j);
//~ if(jpart->mass==0) continue;
// if(ipart==jpart) continue;
dx[0]=ipart->pos[0]-jpart->pos[0];
dx[1]=ipart->pos[1]-jpart->pos[1];
dx[2]=ipart->pos[2]-jpart->pos[2];
dr2=dx[0]*dx[0]+dx[1]*dx[1]+dx[2]*dx[2]+eps2;
if(dr2>0)
{
dr=sqrt(dr2);
dr3=dr*dr2;
acci=jpart->mass/dr3;
acc[0]-=dx[0]*acci;
acc[1]-=dx[1]*acci;
acc[2]-=dx[2]*acci;
}
}
COMPSUMV(ipart->vel[0],ipart->vel_e[0],dt*acc[0]);
COMPSUMV(ipart->vel[1],ipart->vel_e[1],dt*acc[1]);
COMPSUMV(ipart->vel[2],ipart->vel_e[2],dt*acc[2]);
}
}
void kick(int clevel,struct sys s1, struct sys s2, DOUBLE dt)
{
#ifdef EVOLVE_OPENCL
if((ULONG) s1.n*(s2.n-s2.nzero)>CLWORKLIMIT)
{
#pragma omp critical
kick_cl(s1,s2,dt);
diag->cl_step++;
diag->cl_count+=(ULONG) s1.n*s2.n;
} else
{
kick_cpu(s1,s2,dt);
diag->cpu_step++;
diag->cpu_count+=(ULONG) s1.n*s2.n;
}
#else
kick_cpu(s1,s2,dt);
#endif
diag->kstep[clevel]++;
diag->kcount[clevel]+=(ULONG) s1.n*s2.n;
diag->taskkick+=(ULONG) s1.n*s2.n;
}
static void potential_cpu(struct sys s1,struct sys s2)
{
FLOAT dx[3],dr2,dr;
FLOAT pot;
struct particle *ipart, *jpart;
#pragma omp parallel for if((ULONG) s1.n*(s2.n-s2.nzero)>MPWORKLIMIT && !omp_in_parallel()) default(none) \
private(dx,dr2,dr,pot, ipart,jpart) \
shared(s1,s2,eps2)
for(UINT i=0;i<s1.n;i++)
{
pot=0;
ipart=GETPART(s1,i);
for(UINT j=0;j<s2.n-s2.nzero;j++)
{
jpart=GETPART(s2,j);
if(ipart==jpart) continue;
dx[0]=ipart->pos[0]-jpart->pos[0];
dx[1]=ipart->pos[1]-jpart->pos[1];
dx[2]=ipart->pos[2]-jpart->pos[2];
dr2=dx[0]*dx[0]+dx[1]*dx[1]+dx[2]*dx[2]+eps2;
if(dr2>0)
{
dr=sqrt(dr2);
pot-=jpart->mass/dr;
}
}
ipart->pot+=pot;
}
}
void potential(struct sys s1, struct sys s2)
{
#ifdef EVOLVE_OPENCL
if((ULONG) s1.n*(s2.n-s2.nzero)>CLWORKLIMIT)
{
#pragma omp critical
potential_cl(s1,s2);
} else
{
potential_cpu(s1,s2);
}
#else
potential_cpu(s1,s2);
#endif
}
inline FLOAT timestep_ij(struct particle *i, struct particle *j,int dir) {
FLOAT timestep;
FLOAT dx[3],dr3,dr2,dr,dv[3],dv2,mu,vdotdr2,tau,dtau;
timestep=HUGE_VAL;
if(i==j) return timestep;
dx[0]=i->pos[0] - j->pos[0];
dx[1]=i->pos[1] - j->pos[1];
dx[2]=i->pos[2] - j->pos[2];
dr2=dx[0]*dx[0]+dx[1]*dx[1]+dx[2]*dx[2]+eps2;
mu=i->mass + j->mass;
if(dr2>0 && mu>0)
{
dr=sqrt(dr2);
dr3=dr*dr2;
dv[0]=i->vel[0] - j->vel[0];
dv[1]=i->vel[1] - j->vel[1];
dv[2]=i->vel[2] - j->vel[2];
vdotdr2=(dv[0]*dx[0]+dv[1]*dx[1]+dv[2]*dx[2])/dr2;
dv2=dv[0]*dv[0]+dv[1]*dv[1]+dv[2]*dv[2];
#ifdef RATIMESTEP
tau=RARVRATIO*dt_param/M_SQRT2*sqrt(dr3/mu);
dtau=3/2.*dir*tau*vdotdr2;
if(dtau>1.) dtau=1.;
tau/=(1-dtau/2);
if(tau < timestep) timestep=tau;
#endif
#ifdef RVTIMESTEP
if(dv2>0)
{
tau=dt_param*dr/sqrt(dv2);
dtau=dir*tau*vdotdr2*(1+mu/(dv2*dr));
if(dtau>1.) dtau=1.;
tau/=(1-dtau/2);
if(tau < timestep) timestep=tau;
}
#endif
}
if (timestep < 0)
{
ENDRUN("negative timestep!\n");
}
return timestep;
}
static void timestep_cpu(struct sys s1, struct sys s2,int dir)
{
UINT i,j, jmax;
FLOAT timestep,tau;
struct particle *ipart;
#pragma omp parallel for if((ULONG) (s1.n*s2.n-s1.nzero*s2.nzero)>MPWORKLIMIT && !omp_in_parallel()) default(none) \
private(i,j,tau,timestep, jmax, ipart) copyin(dt_param) \
shared(s1,s2,stdout,dir)
for(i=0;i<s1.n;i++)
{
timestep=HUGE_VAL;
ipart=GETPART(s1,i);
jmax=s2.n;if(i>=s1.n-s1.nzero) jmax=s2.n-s2.nzero;
for(j=0;j<jmax;j++)
{
tau=timestep_ij(ipart,GETPART(s2,j),dir);
if(tau < timestep) timestep=tau;
}
// if(timestep<ipart->timestep)
ipart->timestep=timestep;
}
}
void timestep(int clevel,struct sys s1, struct sys s2,int dir)
{
#ifdef EVOLVE_OPENCL
if((ULONG) (s1.n*s2.n-s1.nzero*s2.nzero)>CLWORKLIMIT)
{
#pragma omp critical
timestep_cl(s1,s2,dir);
} else
{
timestep_cpu(s1,s2,dir);
}
#else
timestep_cpu(s1,s2,dir);
#endif
diag->tstep[clevel]++;
diag->tcount[clevel]+=(ULONG) s1.n*s2.n;
}
static void report(struct sys s,DOUBLE etime, int inttype)
{
int maxlevel=0,i;
long int ktot=0,dtot=0, kstot=0,dstot=0,ttot=0,tstot=0;
UINT n,p,err=0;
struct particle *ipart;
n=s.n;
printf("** report **\n");
printf("interaction counts:\n");
for(i=0;i<MAXLEVEL;i++)
{
printf(" %4i: %10li %18li, %10li %18li\n",i, diag->kstep[i], diag->kcount[i], diag->dstep[i],diag->dcount[i]);
if(diag->kcount[i]>0) maxlevel=i;
ttot+=diag->tcount[i];
ktot+=diag->kcount[i];
dtot+=diag->dcount[i];
tstot+=diag->tstep[i];
kstot+=diag->kstep[i];
dstot+=diag->dstep[i];
}
printf("total: %18li %18li %18li\n",ktot,dtot,ttot);
if(inttype == PASS_DKD || inttype == HOLD_DKD || inttype == PPASS_DKD)
printf("equiv: %18li %18li %18li\n",(long int) diag->deepsteps*n*n,2*diag->deepsteps*n,(long int) diag->deepsteps*n*n);
else
printf("equiv: %18li %18li %18li\n",(long int) 2*diag->deepsteps*n*n,diag->deepsteps*n,(long int) diag->deepsteps*n*n);
printf("ksteps: %18li, dsteps: %18li, tsteps: %18li\n", kstot,dstot,tstot);
printf("steps: %18li, equiv: %18li, maxlevel: %i\n",
diag->deepsteps,((long) 1)<<maxlevel,maxlevel);
for(p=0;p<s.n;p++) if(GETPART(s,p)->postime != (DOUBLE) etime) err++;
printf("postime errors: %u \n",err);
printf("target time, actual time: %12.8g %12.8g %12.8g\n",
(double) etime,(double) diag->simtime,(double) ((DOUBLE) etime-diag->simtime));
printf("time track, ratio: %12.8g %12.8g\n", (double) diag->timetrack,
(double) (diag->simtime!=0? (diag->timetrack/diag->simtime) :1));
#ifdef EVOLVE_OPENCL
printf("cpu step,count: %12li,%18li\n",diag->cpu_step,diag->cpu_count);
printf("cl step,count: %12li,%18li\n",diag->cl_step,diag->cl_count);
#endif
if(inttype==SHAREDBS || inttype==CC_BS || inttype==CCC_BS || inttype==CC_BSA || inttype==CCC_BSA)
{
unsigned long totalbs=0,totalj=0;
printf("bs counts:\n");
for(i=0;i<MAXLEVEL;i++)
{
totalbs+=diag->bsstep[i];
totalj+=diag->jcount[i];
printf("%d: %18li %18li %f\n",i,diag->bsstep[i],diag->jcount[i],diag->jcount[i]/(1.*diag->bsstep[i]+1.e-20));
}
printf(" total, total j, mean j: %18li %18li %f\n",totalbs,totalj,totalj/(1.*totalbs));
}
if(inttype==KEPLER || inttype==CC_KEPLER || inttype==CCC_KEPLER || inttype==CCC_BS ||
inttype==CC_BS || inttype==CCC_BSA || inttype==CC_BSA || inttype==CC_SHARED10 || inttype==CC_SHARED10)
{
unsigned long totalcefail=0,totalcecount=0;
printf("kepler solver counts:\n");
for(i=0;i<MAXLEVEL;i++)
{
totalcefail+=diag->cefail[i];
totalcecount+=diag->cecount[i];
printf("%d: %18li %18li\n",i,diag->cefail[i],diag->cecount[i]);
}
printf(" total, total j, mean j: %18li %18li\n",totalcefail,totalcecount);
}
#ifdef _OPENMP
{
int totaltasks=0;
printf("task counts:\n");
for(i=0;i<MAXLEVEL;i++)
{
printf("%d: %18li %18li\n",i,diag->ntasks[i],diag->taskcount[i]);
totaltasks+=diag->ntasks[i];
}
printf("openmp tasks: %d\n",totaltasks);
}
#endif
fflush(stdout);
}
void join_array(UINT n1, struct particle *p1,
UINT n2, struct particle *p2,
UINT *n, struct particle **p)
{
if(n1==0 && n2==0)
{
*n=0;*p=NULL;
}
if(n1!=0 && n2==0)
{
*n=n1;*p=p1;
}
if(n1==0 && n2!=0)
{
*n=n2;*p=p2;
}
if(n1!=0 && n2!=0)
{
*n=n1+n2;
if(p1+n1==p2)
{
*p=p1;
}
else
{
if(p2+n2==p1)
{
*p=p2;
} else ENDRUN("join_array error");
}
}
}
struct sys join(struct sys s1,struct sys s2)
{
struct sys s=zerosys;
if(s1.n==0) return s2;
if(s2.n==0) return s1;
join_array(s1.n-s1.nzero, s1.part, s2.n-s2.nzero, s2.part, &s.n, &s.part);
join_array(s1.nzero, s1.zeropart, s2.nzero, s2.zeropart, &s.nzero, &s.zeropart);
s.n=s.n+s.nzero;
if(s.n-s.nzero>0 && LAST(s)-s.part + 1 != s.n-s.nzero) ENDRUN("join error 1");
if(s.nzero>0 && LASTZERO(s)-s.zeropart + 1 != s.nzero) ENDRUN("join error 2");
return s;
}
FLOAT global_timestep(struct sys s)
{
FLOAT dt,mindt=HUGE_VAL;
for(UINT i=0;i<s.n;i++)
{
dt=GETPART(s, i)->timestep;
if(dt<mindt) mindt=dt;
}
return mindt;
}
FLOAT max_global_timestep(struct sys s)
{
FLOAT dt,maxdt=0;
for(UINT i=0;i<s.n;i++)
{
dt=GETPART(s, i)->timestep;
if(dt>maxdt) maxdt=dt;
}
return maxdt;
}
void kdk(int clevel,struct sys s1,struct sys s2, DOUBLE stime, DOUBLE etime, DOUBLE dt)
{
if(s2.n>0) kick(clevel,s2, s1, dt/2);
kick(clevel,s1,join(s1,s2),dt/2);
drift(clevel,s1,etime, dt);
kick(clevel,s1,join(s1,s2),dt/2);
if(s2.n>0) kick(clevel,s2, s1, dt/2);
}
void dkd(int clevel,struct sys s1,struct sys s2, DOUBLE stime, DOUBLE etime, DOUBLE dt)
{
drift(clevel,s1,stime+dt/2, dt/2);
kick(clevel,s1,join(s1,s2),dt);
if(s2.n>0) kick(clevel,s2, s1, dt);
drift(clevel,s1,etime, dt/2);
}
void split_zeromass(struct sys *s)
{
UINT i=0;
struct particle *left, *right;
if(s->n==0) return;
if(s->part==NULL) ENDRUN("split_zeromass malformed input");
if(s->n-s->nzero==0)
{
if(s->zeropart==NULL || s->part!=s->zeropart) ENDRUN("split_zeromass malformed input");
if(LASTZERO(*s)-s->zeropart+1!=s->nzero) ENDRUN( "split_zeromass malformed input sys");
return;
}
if(s->nzero!=0 && LAST(*s)+1!=s->zeropart)
ENDRUN("split_zeromass can only work on fully contiguous sys");
left=s->part;
right=s->part+(s->n-1);
while(1)
{
if(i>=s->n) ENDRUN("split_zeromass error 1");
i++;
while(left->mass!=0 && left<right) left++;
while(right->mass==0 && left<right) right--;
if(left<right)
{SWAP( *left, *right, struct particle);}
else
break;
}
if(left->mass!=0) left++;
s->nzero=s->n-(left-s->part);
if(s->nzero<0) ENDRUN("split_zeromass find negative number of part");
if(s->nzero>0)
{
s->zeropart=left;
}
if((left-s->part)+s->nzero !=s->n) ENDRUN( "split_zeromass error 2");
for(i=0;i<(s->n-s->nzero);i++) if(GETPART(*s,i)->mass==0) ENDRUN ("split_zromass error 3");
for(i=s->n-s->nzero;i<s->n;i++) if(GETPART(*s,i)->mass!=0) ENDRUN ("split_zeromass error 4");
#ifdef CONSISTENCY_CHECKS
verify_split_zeromass(*s);
#endif
}
void verify_split_zeromass(struct sys s)
{
if(!accel_zero_mass) return;
for(UINT i=0;i<s.n-s.nzero;i++) if(GETPART(s,i)->mass==0) ENDRUN("massless particle in main part\n")
for(UINT i=s.n-s.nzero;i<s.n;i++) if(GETPART(s,i)->mass!=0) ENDRUN("massive particle in massless part\n")
}
|
loop.c | #include<stdio.h>
#include<stdlib.h>
#include<omp.h>
#define TAM 64*127*5
#define ITERACOES_TESTE 100000
int main(){
int i;
long soma;
int *vetor = calloc(TAM,sizeof(int));
if(vetor == NULL){
printf("Falha ao alocar memória");
return -1;
}
for(int contador=0; contador < ITERACOES_TESTE; contador++){
for(int i=0; i<TAM; i++){
vetor[i] ++;
}
}
/*for(int contador=0; contador < ITERACOES_TESTE; contador++){
#pragma omp parallel num_threads(2)
{//thread1 faz os pares
if(omp_get_thread_num()==0){
for(int i=0; i<TAM; i+=2){
vetor[i] ++;
}
}else if(omp_get_thread_num()==1){//thread2 faz os ímpares
for(int i=1; i<TAM; i+=2){
vetor[i] ++;
}
}
}
}*/
soma = 0;
for(int i=0; i<TAM; i++){
soma += vetor[i];
}
printf("%ld\n", soma);
free(vetor);
return 0;
}
|
psd.c | /*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% PPPP SSSSS DDDD %
% P P SS D D %
% PPPP SSS D D %
% P SS D D %
% P SSSSS DDDD %
% %
% %
% Read/Write Adobe Photoshop Image Format %
% %
% Software Design %
% Cristy %
% Leonard Rosenthol %
% July 1992 %
% Dirk Lemstra %
% December 2013 %
% %
% %
% Copyright 1999-2020 ImageMagick Studio LLC, a non-profit organization %
% dedicated to making software imaging solutions freely available. %
% %
% You may not use this file except in compliance with the License. You may %
% obtain a copy of the License at %
% %
% https://imagemagick.org/script/license.php %
% %
% Unless required by applicable law or agreed to in writing, software %
% distributed under the License is distributed on an "AS IS" BASIS, %
% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. %
% See the License for the specific language governing permissions and %
% limitations under the License. %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% Photoshop spec @ https://www.adobe.com/devnet-apps/photoshop/fileformatashtml
%
*/
/*
Include declarations.
*/
#include "MagickCore/studio.h"
#include "MagickCore/artifact.h"
#include "MagickCore/attribute.h"
#include "MagickCore/blob.h"
#include "MagickCore/blob-private.h"
#include "MagickCore/cache.h"
#include "MagickCore/channel.h"
#include "MagickCore/colormap.h"
#include "MagickCore/colormap-private.h"
#include "MagickCore/colorspace.h"
#include "MagickCore/colorspace-private.h"
#include "MagickCore/constitute.h"
#include "MagickCore/enhance.h"
#include "MagickCore/exception.h"
#include "MagickCore/exception-private.h"
#include "MagickCore/image.h"
#include "MagickCore/image-private.h"
#include "MagickCore/list.h"
#include "MagickCore/log.h"
#include "MagickCore/magick.h"
#include "MagickCore/memory_.h"
#include "MagickCore/module.h"
#include "MagickCore/monitor-private.h"
#include "MagickCore/option.h"
#include "MagickCore/pixel.h"
#include "MagickCore/pixel-accessor.h"
#include "MagickCore/policy.h"
#include "MagickCore/profile.h"
#include "MagickCore/property.h"
#include "MagickCore/registry.h"
#include "MagickCore/quantum-private.h"
#include "MagickCore/static.h"
#include "MagickCore/string_.h"
#include "MagickCore/string-private.h"
#include "MagickCore/thread-private.h"
#ifdef MAGICKCORE_ZLIB_DELEGATE
#include <zlib.h>
#endif
#include "psd-private.h"
/*
Define declaractions.
*/
#define MaxPSDChannels 56
#define PSDQuantum(x) (((ssize_t) (x)+1) & -2)
/*
Enumerated declaractions.
*/
typedef enum
{
Raw = 0,
RLE = 1,
ZipWithoutPrediction = 2,
ZipWithPrediction = 3
} PSDCompressionType;
typedef enum
{
BitmapMode = 0,
GrayscaleMode = 1,
IndexedMode = 2,
RGBMode = 3,
CMYKMode = 4,
MultichannelMode = 7,
DuotoneMode = 8,
LabMode = 9
} PSDImageType;
/*
Typedef declaractions.
*/
typedef struct _ChannelInfo
{
short
type;
size_t
size;
} ChannelInfo;
typedef struct _MaskInfo
{
Image
*image;
RectangleInfo
page;
unsigned char
background,
flags;
} MaskInfo;
typedef struct _LayerInfo
{
ChannelInfo
channel_info[MaxPSDChannels];
char
blendkey[4];
Image
*image;
MaskInfo
mask;
Quantum
opacity;
RectangleInfo
page;
size_t
offset_x,
offset_y;
unsigned char
clipping,
flags,
name[257],
visible;
unsigned short
channels;
StringInfo
*info;
} LayerInfo;
/*
Forward declarations.
*/
static MagickBooleanType
WritePSDImage(const ImageInfo *,Image *,ExceptionInfo *);
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% I s P S D %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% IsPSD()() returns MagickTrue if the image format type, identified by the
% magick string, is PSD.
%
% The format of the IsPSD method is:
%
% MagickBooleanType IsPSD(const unsigned char *magick,const size_t length)
%
% A description of each parameter follows:
%
% o magick: compare image format pattern against these bytes.
%
% o length: Specifies the length of the magick string.
%
*/
static MagickBooleanType IsPSD(const unsigned char *magick,const size_t length)
{
if (length < 4)
return(MagickFalse);
if (LocaleNCompare((const char *) magick,"8BPS",4) == 0)
return(MagickTrue);
return(MagickFalse);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% R e a d P S D I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% ReadPSDImage() reads an Adobe Photoshop image file and returns it. It
% allocates the memory necessary for the new Image structure and returns a
% pointer to the new image.
%
% The format of the ReadPSDImage method is:
%
% Image *ReadPSDImage(image_info,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image_info: the image info.
%
% o exception: return any errors or warnings in this structure.
%
*/
static const char *CompositeOperatorToPSDBlendMode(Image *image)
{
switch (image->compose)
{
case ColorBurnCompositeOp:
return(image->endian == LSBEndian ? "vidi" : "idiv");
case ColorDodgeCompositeOp:
return(image->endian == LSBEndian ? " vid" : "div ");
case ColorizeCompositeOp:
return(image->endian == LSBEndian ? "rloc" : "colr");
case DarkenCompositeOp:
return(image->endian == LSBEndian ? "krad" : "dark");
case DifferenceCompositeOp:
return(image->endian == LSBEndian ? "ffid" : "diff");
case DissolveCompositeOp:
return(image->endian == LSBEndian ? "ssid" : "diss");
case ExclusionCompositeOp:
return(image->endian == LSBEndian ? "dums" : "smud");
case HardLightCompositeOp:
return(image->endian == LSBEndian ? "tiLh" : "hLit");
case HardMixCompositeOp:
return(image->endian == LSBEndian ? "xiMh" : "hMix");
case HueCompositeOp:
return(image->endian == LSBEndian ? " euh" : "hue ");
case LightenCompositeOp:
return(image->endian == LSBEndian ? "etil" : "lite");
case LinearBurnCompositeOp:
return(image->endian == LSBEndian ? "nrbl" : "lbrn");
case LinearDodgeCompositeOp:
return(image->endian == LSBEndian ? "gddl" : "lddg");
case LinearLightCompositeOp:
return(image->endian == LSBEndian ? "tiLl" : "lLit");
case LuminizeCompositeOp:
return(image->endian == LSBEndian ? " mul" : "lum ");
case MultiplyCompositeOp:
return(image->endian == LSBEndian ? " lum" : "mul ");
case OverlayCompositeOp:
return(image->endian == LSBEndian ? "revo" : "over");
case PinLightCompositeOp:
return(image->endian == LSBEndian ? "tiLp" : "pLit");
case SaturateCompositeOp:
return(image->endian == LSBEndian ? " tas" : "sat ");
case ScreenCompositeOp:
return(image->endian == LSBEndian ? "nrcs" : "scrn");
case SoftLightCompositeOp:
return(image->endian == LSBEndian ? "tiLs" : "sLit");
case VividLightCompositeOp:
return(image->endian == LSBEndian ? "tiLv" : "vLit");
case OverCompositeOp:
default:
return(image->endian == LSBEndian ? "mron" : "norm");
}
}
/*
For some reason Photoshop seems to blend semi-transparent pixels with white.
This method reverts the blending. This can be disabled by setting the
option 'psd:alpha-unblend' to off.
*/
static MagickBooleanType CorrectPSDAlphaBlend(const ImageInfo *image_info,
Image *image,ExceptionInfo* exception)
{
const char
*option;
MagickBooleanType
status;
ssize_t
y;
if ((image->alpha_trait != BlendPixelTrait) ||
(image->colorspace != sRGBColorspace))
return(MagickTrue);
option=GetImageOption(image_info,"psd:alpha-unblend");
if (IsStringFalse(option) != MagickFalse)
return(MagickTrue);
status=MagickTrue;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(status) \
magick_number_threads(image,image,image->rows,1)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
register Quantum
*magick_restrict q;
register ssize_t
x;
if (status == MagickFalse)
continue;
q=GetAuthenticPixels(image,0,y,image->columns,1,exception);
if (q == (Quantum *) NULL)
{
status=MagickFalse;
continue;
}
for (x=0; x < (ssize_t) image->columns; x++)
{
double
gamma;
register ssize_t
i;
gamma=QuantumScale*GetPixelAlpha(image, q);
if (gamma != 0.0 && gamma != 1.0)
{
for (i=0; i < (ssize_t) GetPixelChannels(image); i++)
{
PixelChannel channel = GetPixelChannelChannel(image,i);
if (channel != AlphaPixelChannel)
q[i]=ClampToQuantum((q[i]-((1.0-gamma)*QuantumRange))/gamma);
}
}
q+=GetPixelChannels(image);
}
if (SyncAuthenticPixels(image,exception) == MagickFalse)
status=MagickFalse;
}
return(status);
}
static inline CompressionType ConvertPSDCompression(
PSDCompressionType compression)
{
switch (compression)
{
case RLE:
return RLECompression;
case ZipWithPrediction:
case ZipWithoutPrediction:
return ZipCompression;
default:
return NoCompression;
}
}
static MagickBooleanType ApplyPSDLayerOpacity(Image *image,Quantum opacity,
MagickBooleanType revert,ExceptionInfo *exception)
{
MagickBooleanType
status;
ssize_t
y;
if (image->debug != MagickFalse)
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
" applying layer opacity %.20g", (double) opacity);
if (opacity == OpaqueAlpha)
return(MagickTrue);
if (image->alpha_trait != BlendPixelTrait)
(void) SetImageAlphaChannel(image,OpaqueAlphaChannel,exception);
status=MagickTrue;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(status) \
magick_number_threads(image,image,image->rows,1)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
register Quantum
*magick_restrict q;
register ssize_t
x;
if (status == MagickFalse)
continue;
q=GetAuthenticPixels(image,0,y,image->columns,1,exception);
if (q == (Quantum *) NULL)
{
status=MagickFalse;
continue;
}
for (x=0; x < (ssize_t) image->columns; x++)
{
if (revert == MagickFalse)
SetPixelAlpha(image,(Quantum) (QuantumScale*(GetPixelAlpha(image,q))*
opacity),q);
else if (opacity > 0)
SetPixelAlpha(image,(Quantum) (QuantumRange*(GetPixelAlpha(image,q)/
(MagickRealType) opacity)),q);
q+=GetPixelChannels(image);
}
if (SyncAuthenticPixels(image,exception) == MagickFalse)
status=MagickFalse;
}
return(status);
}
static MagickBooleanType ApplyPSDOpacityMask(Image *image,const Image *mask,
Quantum background,MagickBooleanType revert,ExceptionInfo *exception)
{
Image
*complete_mask;
MagickBooleanType
status;
PixelInfo
color;
ssize_t
y;
if (image->alpha_trait == UndefinedPixelTrait)
return(MagickTrue);
if (image->debug != MagickFalse)
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
" applying opacity mask");
complete_mask=CloneImage(image,0,0,MagickTrue,exception);
if (complete_mask == (Image *) NULL)
return(MagickFalse);
complete_mask->alpha_trait=BlendPixelTrait;
GetPixelInfo(complete_mask,&color);
color.red=(MagickRealType) background;
(void) SetImageColor(complete_mask,&color,exception);
status=CompositeImage(complete_mask,mask,OverCompositeOp,MagickTrue,
mask->page.x-image->page.x,mask->page.y-image->page.y,exception);
if (status == MagickFalse)
{
complete_mask=DestroyImage(complete_mask);
return(status);
}
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(status) \
magick_number_threads(image,image,image->rows,1)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
register Quantum
*magick_restrict q;
register Quantum
*p;
register ssize_t
x;
if (status == MagickFalse)
continue;
q=GetAuthenticPixels(image,0,y,image->columns,1,exception);
p=GetAuthenticPixels(complete_mask,0,y,complete_mask->columns,1,exception);
if ((q == (Quantum *) NULL) || (p == (Quantum *) NULL))
{
status=MagickFalse;
continue;
}
for (x=0; x < (ssize_t) image->columns; x++)
{
MagickRealType
alpha,
intensity;
alpha=(MagickRealType) GetPixelAlpha(image,q);
intensity=GetPixelIntensity(complete_mask,p);
if (revert == MagickFalse)
SetPixelAlpha(image,ClampToQuantum(intensity*(QuantumScale*alpha)),q);
else if (intensity > 0)
SetPixelAlpha(image,ClampToQuantum((alpha/intensity)*QuantumRange),q);
q+=GetPixelChannels(image);
p+=GetPixelChannels(complete_mask);
}
if (SyncAuthenticPixels(image,exception) == MagickFalse)
status=MagickFalse;
}
complete_mask=DestroyImage(complete_mask);
return(status);
}
static void PreservePSDOpacityMask(Image *image,LayerInfo* layer_info,
ExceptionInfo *exception)
{
char
*key;
RandomInfo
*random_info;
StringInfo
*key_info;
if (image->debug != MagickFalse)
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
" preserving opacity mask");
random_info=AcquireRandomInfo();
key_info=GetRandomKey(random_info,2+1);
key=(char *) GetStringInfoDatum(key_info);
key[8]=(char) layer_info->mask.background;
key[9]='\0';
layer_info->mask.image->page.x+=layer_info->page.x;
layer_info->mask.image->page.y+=layer_info->page.y;
(void) SetImageRegistry(ImageRegistryType,(const char *) key,
layer_info->mask.image,exception);
(void) SetImageArtifact(layer_info->image,"psd:opacity-mask",
(const char *) key);
key_info=DestroyStringInfo(key_info);
random_info=DestroyRandomInfo(random_info);
}
static ssize_t DecodePSDPixels(const size_t number_compact_pixels,
const unsigned char *compact_pixels,const ssize_t depth,
const size_t number_pixels,unsigned char *pixels)
{
#define CheckNumberCompactPixels \
if (packets == 0) \
return(i); \
packets--
#define CheckNumberPixels(count) \
if (((ssize_t) i + count) > (ssize_t) number_pixels) \
return(i); \
i+=count
int
pixel;
register ssize_t
i,
j;
size_t
length;
ssize_t
packets;
packets=(ssize_t) number_compact_pixels;
for (i=0; (packets > 1) && (i < (ssize_t) number_pixels); )
{
packets--;
length=(size_t) (*compact_pixels++);
if (length == 128)
continue;
if (length > 128)
{
length=256-length+1;
CheckNumberCompactPixels;
pixel=(*compact_pixels++);
for (j=0; j < (ssize_t) length; j++)
{
switch (depth)
{
case 1:
{
CheckNumberPixels(8);
*pixels++=(pixel >> 7) & 0x01 ? 0U : 255U;
*pixels++=(pixel >> 6) & 0x01 ? 0U : 255U;
*pixels++=(pixel >> 5) & 0x01 ? 0U : 255U;
*pixels++=(pixel >> 4) & 0x01 ? 0U : 255U;
*pixels++=(pixel >> 3) & 0x01 ? 0U : 255U;
*pixels++=(pixel >> 2) & 0x01 ? 0U : 255U;
*pixels++=(pixel >> 1) & 0x01 ? 0U : 255U;
*pixels++=(pixel >> 0) & 0x01 ? 0U : 255U;
break;
}
case 2:
{
CheckNumberPixels(4);
*pixels++=(unsigned char) ((pixel >> 6) & 0x03);
*pixels++=(unsigned char) ((pixel >> 4) & 0x03);
*pixels++=(unsigned char) ((pixel >> 2) & 0x03);
*pixels++=(unsigned char) ((pixel & 0x03) & 0x03);
break;
}
case 4:
{
CheckNumberPixels(2);
*pixels++=(unsigned char) ((pixel >> 4) & 0xff);
*pixels++=(unsigned char) ((pixel & 0x0f) & 0xff);
break;
}
default:
{
CheckNumberPixels(1);
*pixels++=(unsigned char) pixel;
break;
}
}
}
continue;
}
length++;
for (j=0; j < (ssize_t) length; j++)
{
CheckNumberCompactPixels;
switch (depth)
{
case 1:
{
CheckNumberPixels(8);
*pixels++=(*compact_pixels >> 7) & 0x01 ? 0U : 255U;
*pixels++=(*compact_pixels >> 6) & 0x01 ? 0U : 255U;
*pixels++=(*compact_pixels >> 5) & 0x01 ? 0U : 255U;
*pixels++=(*compact_pixels >> 4) & 0x01 ? 0U : 255U;
*pixels++=(*compact_pixels >> 3) & 0x01 ? 0U : 255U;
*pixels++=(*compact_pixels >> 2) & 0x01 ? 0U : 255U;
*pixels++=(*compact_pixels >> 1) & 0x01 ? 0U : 255U;
*pixels++=(*compact_pixels >> 0) & 0x01 ? 0U : 255U;
break;
}
case 2:
{
CheckNumberPixels(4);
*pixels++=(*compact_pixels >> 6) & 0x03;
*pixels++=(*compact_pixels >> 4) & 0x03;
*pixels++=(*compact_pixels >> 2) & 0x03;
*pixels++=(*compact_pixels & 0x03) & 0x03;
break;
}
case 4:
{
CheckNumberPixels(2);
*pixels++=(*compact_pixels >> 4) & 0xff;
*pixels++=(*compact_pixels & 0x0f) & 0xff;
break;
}
default:
{
CheckNumberPixels(1);
*pixels++=(*compact_pixels);
break;
}
}
compact_pixels++;
}
}
return(i);
}
static inline LayerInfo *DestroyLayerInfo(LayerInfo *layer_info,
const ssize_t number_layers)
{
ssize_t
i;
for (i=0; i<number_layers; i++)
{
if (layer_info[i].image != (Image *) NULL)
layer_info[i].image=DestroyImage(layer_info[i].image);
if (layer_info[i].mask.image != (Image *) NULL)
layer_info[i].mask.image=DestroyImage(layer_info[i].mask.image);
if (layer_info[i].info != (StringInfo *) NULL)
layer_info[i].info=DestroyStringInfo(layer_info[i].info);
}
return (LayerInfo *) RelinquishMagickMemory(layer_info);
}
static inline size_t GetPSDPacketSize(const Image *image)
{
if (image->storage_class == PseudoClass)
{
if (image->colors > 256)
return(2);
}
if (image->depth > 16)
return(4);
if (image->depth > 8)
return(2);
return(1);
}
static inline MagickSizeType GetPSDSize(const PSDInfo *psd_info,Image *image)
{
if (psd_info->version == 1)
return((MagickSizeType) ReadBlobLong(image));
return((MagickSizeType) ReadBlobLongLong(image));
}
static inline size_t GetPSDRowSize(Image *image)
{
if (image->depth == 1)
return(((image->columns+7)/8)*GetPSDPacketSize(image));
else
return(image->columns*GetPSDPacketSize(image));
}
static const char *ModeToString(PSDImageType type)
{
switch (type)
{
case BitmapMode: return "Bitmap";
case GrayscaleMode: return "Grayscale";
case IndexedMode: return "Indexed";
case RGBMode: return "RGB";
case CMYKMode: return "CMYK";
case MultichannelMode: return "Multichannel";
case DuotoneMode: return "Duotone";
case LabMode: return "L*A*B";
default: return "unknown";
}
}
static MagickBooleanType NegateCMYK(Image *image,ExceptionInfo *exception)
{
ChannelType
channel_mask;
MagickBooleanType
status;
channel_mask=SetImageChannelMask(image,(ChannelType)(AllChannels &~
AlphaChannel));
status=NegateImage(image,MagickFalse,exception);
(void) SetImageChannelMask(image,channel_mask);
return(status);
}
static StringInfo *ParseImageResourceBlocks(PSDInfo *psd_info,Image *image,
const unsigned char *blocks,size_t length)
{
const unsigned char
*p;
ssize_t
offset;
StringInfo
*profile;
unsigned char
name_length;
unsigned int
count;
unsigned short
id,
short_sans;
if (length < 16)
return((StringInfo *) NULL);
profile=BlobToStringInfo((const unsigned char *) NULL,length);
SetStringInfoDatum(profile,blocks);
SetStringInfoName(profile,"8bim");
for (p=blocks; (p >= blocks) && (p < (blocks+length-7)); )
{
if (LocaleNCompare((const char *) p,"8BIM",4) != 0)
break;
p+=4;
p=PushShortPixel(MSBEndian,p,&id);
p=PushCharPixel(p,&name_length);
if ((name_length % 2) == 0)
name_length++;
p+=name_length;
if (p > (blocks+length-4))
break;
p=PushLongPixel(MSBEndian,p,&count);
offset=(ssize_t) count;
if (((p+offset) < blocks) || ((p+offset) > (blocks+length)))
break;
switch (id)
{
case 0x03ed:
{
unsigned short
resolution;
/*
Resolution info.
*/
if (offset < 16)
break;
p=PushShortPixel(MSBEndian,p,&resolution);
image->resolution.x=(double) resolution;
(void) FormatImageProperty(image,"tiff:XResolution","%*g",
GetMagickPrecision(),image->resolution.x);
p=PushShortPixel(MSBEndian,p,&short_sans);
p=PushShortPixel(MSBEndian,p,&short_sans);
p=PushShortPixel(MSBEndian,p,&short_sans);
p=PushShortPixel(MSBEndian,p,&resolution);
image->resolution.y=(double) resolution;
(void) FormatImageProperty(image,"tiff:YResolution","%*g",
GetMagickPrecision(),image->resolution.y);
p=PushShortPixel(MSBEndian,p,&short_sans);
p=PushShortPixel(MSBEndian,p,&short_sans);
p=PushShortPixel(MSBEndian,p,&short_sans);
image->units=PixelsPerInchResolution;
break;
}
case 0x0421:
{
if ((offset > 4) && (*(p+4) == 0))
psd_info->has_merged_image=MagickFalse;
p+=offset;
break;
}
default:
{
p+=offset;
break;
}
}
if ((offset & 0x01) != 0)
p++;
}
return(profile);
}
static CompositeOperator PSDBlendModeToCompositeOperator(const char *mode)
{
if (mode == (const char *) NULL)
return(OverCompositeOp);
if (LocaleNCompare(mode,"norm",4) == 0)
return(OverCompositeOp);
if (LocaleNCompare(mode,"mul ",4) == 0)
return(MultiplyCompositeOp);
if (LocaleNCompare(mode,"diss",4) == 0)
return(DissolveCompositeOp);
if (LocaleNCompare(mode,"diff",4) == 0)
return(DifferenceCompositeOp);
if (LocaleNCompare(mode,"dark",4) == 0)
return(DarkenCompositeOp);
if (LocaleNCompare(mode,"lite",4) == 0)
return(LightenCompositeOp);
if (LocaleNCompare(mode,"hue ",4) == 0)
return(HueCompositeOp);
if (LocaleNCompare(mode,"sat ",4) == 0)
return(SaturateCompositeOp);
if (LocaleNCompare(mode,"colr",4) == 0)
return(ColorizeCompositeOp);
if (LocaleNCompare(mode,"lum ",4) == 0)
return(LuminizeCompositeOp);
if (LocaleNCompare(mode,"scrn",4) == 0)
return(ScreenCompositeOp);
if (LocaleNCompare(mode,"over",4) == 0)
return(OverlayCompositeOp);
if (LocaleNCompare(mode,"hLit",4) == 0)
return(HardLightCompositeOp);
if (LocaleNCompare(mode,"sLit",4) == 0)
return(SoftLightCompositeOp);
if (LocaleNCompare(mode,"smud",4) == 0)
return(ExclusionCompositeOp);
if (LocaleNCompare(mode,"div ",4) == 0)
return(ColorDodgeCompositeOp);
if (LocaleNCompare(mode,"idiv",4) == 0)
return(ColorBurnCompositeOp);
if (LocaleNCompare(mode,"lbrn",4) == 0)
return(LinearBurnCompositeOp);
if (LocaleNCompare(mode,"lddg",4) == 0)
return(LinearDodgeCompositeOp);
if (LocaleNCompare(mode,"lLit",4) == 0)
return(LinearLightCompositeOp);
if (LocaleNCompare(mode,"vLit",4) == 0)
return(VividLightCompositeOp);
if (LocaleNCompare(mode,"pLit",4) == 0)
return(PinLightCompositeOp);
if (LocaleNCompare(mode,"hMix",4) == 0)
return(HardMixCompositeOp);
return(OverCompositeOp);
}
static inline ssize_t ReadPSDString(Image *image,char *p,const size_t length)
{
ssize_t
count;
count=ReadBlob(image,length,(unsigned char *) p);
if ((count == (ssize_t) length) && (image->endian != MSBEndian))
{
char
*q;
q=p+length;
for(--q; p < q; ++p, --q)
{
*p = *p ^ *q,
*q = *p ^ *q,
*p = *p ^ *q;
}
}
return(count);
}
static inline void SetPSDPixel(Image *image,const size_t channels,
const ssize_t type,const size_t packet_size,const Quantum pixel,Quantum *q,
ExceptionInfo *exception)
{
if (image->storage_class == PseudoClass)
{
PixelInfo
*color;
Quantum
index;
index=pixel;
if (packet_size == 1)
index=(Quantum) ScaleQuantumToChar(index);
index=(Quantum) ConstrainColormapIndex(image,(ssize_t) index,
exception);
if (type == 0)
SetPixelIndex(image,index,q);
if ((type == 0) && (channels > 1))
return;
color=image->colormap+(ssize_t) GetPixelIndex(image,q);
if (type != 0)
color->alpha=(MagickRealType) pixel;
SetPixelViaPixelInfo(image,color,q);
return;
}
switch (type)
{
case -1:
{
SetPixelAlpha(image,pixel,q);
break;
}
case -2:
case 0:
{
SetPixelRed(image,pixel,q);
break;
}
case -3:
case 1:
{
SetPixelGreen(image,pixel,q);
break;
}
case -4:
case 2:
{
SetPixelBlue(image,pixel,q);
break;
}
case 3:
{
if (image->colorspace == CMYKColorspace)
SetPixelBlack(image,pixel,q);
else
if (image->alpha_trait != UndefinedPixelTrait)
SetPixelAlpha(image,pixel,q);
break;
}
case 4:
{
if ((IssRGBCompatibleColorspace(image->colorspace) != MagickFalse) &&
(channels > 3))
break;
if (image->alpha_trait != UndefinedPixelTrait)
SetPixelAlpha(image,pixel,q);
break;
}
}
}
static MagickBooleanType ReadPSDChannelPixels(Image *image,
const size_t channels,const ssize_t row,const ssize_t type,
const unsigned char *pixels,ExceptionInfo *exception)
{
Quantum
pixel;
register const unsigned char
*p;
register Quantum
*q;
register ssize_t
x;
size_t
packet_size;
p=pixels;
q=GetAuthenticPixels(image,0,row,image->columns,1,exception);
if (q == (Quantum *) NULL)
return MagickFalse;
packet_size=GetPSDPacketSize(image);
for (x=0; x < (ssize_t) image->columns; x++)
{
if (packet_size == 1)
pixel=ScaleCharToQuantum(*p++);
else
if (packet_size == 2)
{
unsigned short
nibble;
p=PushShortPixel(MSBEndian,p,&nibble);
pixel=ScaleShortToQuantum(nibble);
}
else
{
MagickFloatType
nibble;
p=PushFloatPixel(MSBEndian,p,&nibble);
pixel=ClampToQuantum((MagickRealType) (QuantumRange*nibble));
}
if (image->depth > 1)
{
SetPSDPixel(image,channels,type,packet_size,pixel,q,exception);
q+=GetPixelChannels(image);
}
else
{
ssize_t
bit,
number_bits;
number_bits=(ssize_t) image->columns-x;
if (number_bits > 8)
number_bits=8;
for (bit = 0; bit < (ssize_t) number_bits; bit++)
{
SetPSDPixel(image,channels,type,packet_size,(((unsigned char) pixel)
& (0x01 << (7-bit))) != 0 ? 0 : QuantumRange,q,exception);
q+=GetPixelChannels(image);
x++;
}
if (x != (ssize_t) image->columns)
x--;
continue;
}
}
return(SyncAuthenticPixels(image,exception));
}
static MagickBooleanType ReadPSDChannelRaw(Image *image,const size_t channels,
const ssize_t type,ExceptionInfo *exception)
{
MagickBooleanType
status;
size_t
row_size;
ssize_t
count,
y;
unsigned char
*pixels;
if (image->debug != MagickFalse)
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
" layer data is RAW");
row_size=GetPSDRowSize(image);
pixels=(unsigned char *) AcquireQuantumMemory(row_size,sizeof(*pixels));
if (pixels == (unsigned char *) NULL)
ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed",
image->filename);
(void) memset(pixels,0,row_size*sizeof(*pixels));
status=MagickTrue;
for (y=0; y < (ssize_t) image->rows; y++)
{
status=MagickFalse;
count=ReadBlob(image,row_size,pixels);
if (count != (ssize_t) row_size)
break;
status=ReadPSDChannelPixels(image,channels,y,type,pixels,exception);
if (status == MagickFalse)
break;
}
pixels=(unsigned char *) RelinquishMagickMemory(pixels);
return(status);
}
static inline MagickOffsetType *ReadPSDRLESizes(Image *image,
const PSDInfo *psd_info,const size_t size)
{
MagickOffsetType
*sizes;
ssize_t
y;
sizes=(MagickOffsetType *) AcquireQuantumMemory(size,sizeof(*sizes));
if(sizes != (MagickOffsetType *) NULL)
{
for (y=0; y < (ssize_t) size; y++)
{
if (psd_info->version == 1)
sizes[y]=(MagickOffsetType) ReadBlobShort(image);
else
sizes[y]=(MagickOffsetType) ReadBlobLong(image);
}
}
return sizes;
}
static MagickBooleanType ReadPSDChannelRLE(Image *image,const PSDInfo *psd_info,
const ssize_t type,MagickOffsetType *sizes,ExceptionInfo *exception)
{
MagickBooleanType
status;
size_t
length,
row_size;
ssize_t
count,
y;
unsigned char
*compact_pixels,
*pixels;
if (image->debug != MagickFalse)
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
" layer data is RLE compressed");
row_size=GetPSDRowSize(image);
pixels=(unsigned char *) AcquireQuantumMemory(row_size,sizeof(*pixels));
if (pixels == (unsigned char *) NULL)
ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed",
image->filename);
length=0;
for (y=0; y < (ssize_t) image->rows; y++)
if ((MagickOffsetType) length < sizes[y])
length=(size_t) sizes[y];
if (length > (row_size+2048)) /* arbitrary number */
{
pixels=(unsigned char *) RelinquishMagickMemory(pixels);
ThrowBinaryException(ResourceLimitError,"InvalidLength",image->filename);
}
compact_pixels=(unsigned char *) AcquireQuantumMemory(length,sizeof(*pixels));
if (compact_pixels == (unsigned char *) NULL)
{
pixels=(unsigned char *) RelinquishMagickMemory(pixels);
ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed",
image->filename);
}
(void) memset(compact_pixels,0,length*sizeof(*compact_pixels));
status=MagickTrue;
for (y=0; y < (ssize_t) image->rows; y++)
{
status=MagickFalse;
count=ReadBlob(image,(size_t) sizes[y],compact_pixels);
if (count != (ssize_t) sizes[y])
break;
count=DecodePSDPixels((size_t) sizes[y],compact_pixels,
(ssize_t) (image->depth == 1 ? 123456 : image->depth),row_size,pixels);
if (count != (ssize_t) row_size)
break;
status=ReadPSDChannelPixels(image,psd_info->channels,y,type,pixels,
exception);
if (status == MagickFalse)
break;
}
compact_pixels=(unsigned char *) RelinquishMagickMemory(compact_pixels);
pixels=(unsigned char *) RelinquishMagickMemory(pixels);
return(status);
}
#ifdef MAGICKCORE_ZLIB_DELEGATE
static void Unpredict8Bit(const Image *image,unsigned char *pixels,
const size_t count,const size_t row_size)
{
register unsigned char
*p;
size_t
length,
remaining;
p=pixels;
remaining=count;
while (remaining > 0)
{
length=image->columns;
while (--length)
{
*(p+1)+=*p;
p++;
}
p++;
remaining-=row_size;
}
}
static void Unpredict16Bit(const Image *image,unsigned char *pixels,
const size_t count,const size_t row_size)
{
register unsigned char
*p;
size_t
length,
remaining;
p=pixels;
remaining=count;
while (remaining > 0)
{
length=image->columns;
while (--length)
{
p[2]+=p[0]+((p[1]+p[3]) >> 8);
p[3]+=p[1];
p+=2;
}
p+=2;
remaining-=row_size;
}
}
static void Unpredict32Bit(const Image *image,unsigned char *pixels,
unsigned char *output_pixels,const size_t row_size)
{
register unsigned char
*p,
*q;
register ssize_t
y;
size_t
offset1,
offset2,
offset3,
remaining;
unsigned char
*start;
offset1=image->columns;
offset2=2*offset1;
offset3=3*offset1;
p=pixels;
q=output_pixels;
for (y=0; y < (ssize_t) image->rows; y++)
{
start=p;
remaining=row_size;
while (--remaining)
{
*(p+1)+=*p;
p++;
}
p=start;
remaining=image->columns;
while (remaining--)
{
*(q++)=*p;
*(q++)=*(p+offset1);
*(q++)=*(p+offset2);
*(q++)=*(p+offset3);
p++;
}
p=start+row_size;
}
}
static MagickBooleanType ReadPSDChannelZip(Image *image,const size_t channels,
const ssize_t type,const PSDCompressionType compression,
const size_t compact_size,ExceptionInfo *exception)
{
MagickBooleanType
status;
register unsigned char
*p;
size_t
count,
packet_size,
row_size;
register ssize_t
y;
unsigned char
*compact_pixels,
*pixels;
z_stream
stream;
if (image->debug != MagickFalse)
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
" layer data is ZIP compressed");
if ((MagickSizeType) compact_size > GetBlobSize(image))
ThrowBinaryException(CorruptImageError,"UnexpectedEndOfFile",
image->filename);
compact_pixels=(unsigned char *) AcquireQuantumMemory(compact_size,
sizeof(*compact_pixels));
if (compact_pixels == (unsigned char *) NULL)
ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed",
image->filename);
packet_size=GetPSDPacketSize(image);
row_size=image->columns*packet_size;
count=image->rows*row_size;
pixels=(unsigned char *) AcquireQuantumMemory(count,sizeof(*pixels));
if (pixels == (unsigned char *) NULL)
{
compact_pixels=(unsigned char *) RelinquishMagickMemory(compact_pixels);
ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed",
image->filename);
}
if (ReadBlob(image,compact_size,compact_pixels) != (ssize_t) compact_size)
{
pixels=(unsigned char *) RelinquishMagickMemory(pixels);
compact_pixels=(unsigned char *) RelinquishMagickMemory(compact_pixels);
ThrowBinaryException(CorruptImageError,"UnexpectedEndOfFile",
image->filename);
}
memset(&stream,0,sizeof(stream));
stream.data_type=Z_BINARY;
stream.next_in=(Bytef *)compact_pixels;
stream.avail_in=(uInt) compact_size;
stream.next_out=(Bytef *)pixels;
stream.avail_out=(uInt) count;
if (inflateInit(&stream) == Z_OK)
{
int
ret;
while (stream.avail_out > 0)
{
ret=inflate(&stream,Z_SYNC_FLUSH);
if ((ret != Z_OK) && (ret != Z_STREAM_END))
{
(void) inflateEnd(&stream);
compact_pixels=(unsigned char *) RelinquishMagickMemory(
compact_pixels);
pixels=(unsigned char *) RelinquishMagickMemory(pixels);
return(MagickFalse);
}
if (ret == Z_STREAM_END)
break;
}
(void) inflateEnd(&stream);
}
if (compression == ZipWithPrediction)
{
if (packet_size == 1)
Unpredict8Bit(image,pixels,count,row_size);
else if (packet_size == 2)
Unpredict16Bit(image,pixels,count,row_size);
else if (packet_size == 4)
{
unsigned char
*output_pixels;
output_pixels=(unsigned char *) AcquireQuantumMemory(count,
sizeof(*output_pixels));
if (pixels == (unsigned char *) NULL)
{
compact_pixels=(unsigned char *) RelinquishMagickMemory(
compact_pixels);
pixels=(unsigned char *) RelinquishMagickMemory(pixels);
ThrowBinaryException(ResourceLimitError,
"MemoryAllocationFailed",image->filename);
}
Unpredict32Bit(image,pixels,output_pixels,row_size);
pixels=(unsigned char *) RelinquishMagickMemory(pixels);
pixels=output_pixels;
}
}
status=MagickTrue;
p=pixels;
for (y=0; y < (ssize_t) image->rows; y++)
{
status=ReadPSDChannelPixels(image,channels,y,type,p,exception);
if (status == MagickFalse)
break;
p+=row_size;
}
compact_pixels=(unsigned char *) RelinquishMagickMemory(compact_pixels);
pixels=(unsigned char *) RelinquishMagickMemory(pixels);
return(status);
}
#endif
static MagickBooleanType ReadPSDChannel(Image *image,
const ImageInfo *image_info,const PSDInfo *psd_info,LayerInfo* layer_info,
const size_t channel,const PSDCompressionType compression,
ExceptionInfo *exception)
{
Image
*channel_image,
*mask;
MagickOffsetType
offset;
MagickBooleanType
status;
channel_image=image;
mask=(Image *) NULL;
if ((layer_info->channel_info[channel].type < -1) &&
(layer_info->mask.page.width > 0) && (layer_info->mask.page.height > 0))
{
const char
*option;
/*
Ignore mask that is not a user supplied layer mask, if the mask is
disabled or if the flags have unsupported values.
*/
option=GetImageOption(image_info,"psd:preserve-opacity-mask");
if ((layer_info->channel_info[channel].type != -2) ||
(layer_info->mask.flags > 2) || ((layer_info->mask.flags & 0x02) &&
(IsStringTrue(option) == MagickFalse)))
{
(void) SeekBlob(image,(MagickOffsetType)
layer_info->channel_info[channel].size-2,SEEK_CUR);
return(MagickTrue);
}
mask=CloneImage(image,layer_info->mask.page.width,
layer_info->mask.page.height,MagickFalse,exception);
if (mask != (Image *) NULL)
{
(void) ResetImagePixels(mask,exception);
(void) SetImageType(mask,GrayscaleType,exception);
channel_image=mask;
}
}
offset=TellBlob(image);
status=MagickFalse;
switch(compression)
{
case Raw:
status=ReadPSDChannelRaw(channel_image,psd_info->channels,
(ssize_t) layer_info->channel_info[channel].type,exception);
break;
case RLE:
{
MagickOffsetType
*sizes;
sizes=ReadPSDRLESizes(channel_image,psd_info,channel_image->rows);
if (sizes == (MagickOffsetType *) NULL)
ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed",
image->filename);
status=ReadPSDChannelRLE(channel_image,psd_info,
(ssize_t) layer_info->channel_info[channel].type,sizes,exception);
sizes=(MagickOffsetType *) RelinquishMagickMemory(sizes);
}
break;
case ZipWithPrediction:
case ZipWithoutPrediction:
#ifdef MAGICKCORE_ZLIB_DELEGATE
status=ReadPSDChannelZip(channel_image,layer_info->channels,
(ssize_t) layer_info->channel_info[channel].type,compression,
layer_info->channel_info[channel].size-2,exception);
#else
(void) ThrowMagickException(exception,GetMagickModule(),
MissingDelegateWarning,"DelegateLibrarySupportNotBuiltIn",
"'%s' (ZLIB)",image->filename);
#endif
break;
default:
(void) ThrowMagickException(exception,GetMagickModule(),TypeWarning,
"CompressionNotSupported","'%.20g'",(double) compression);
break;
}
(void) SeekBlob(image,offset+layer_info->channel_info[channel].size-2,
SEEK_SET);
if (status == MagickFalse)
{
if (mask != (Image *) NULL)
(void) DestroyImage(mask);
ThrowBinaryException(CoderError,"UnableToDecompressImage",
image->filename);
}
if (mask != (Image *) NULL)
{
if (layer_info->mask.image != (Image *) NULL)
layer_info->mask.image=DestroyImage(layer_info->mask.image);
layer_info->mask.image=mask;
}
return(status);
}
static MagickBooleanType ReadPSDLayer(Image *image,const ImageInfo *image_info,
const PSDInfo *psd_info,LayerInfo* layer_info,ExceptionInfo *exception)
{
char
message[MagickPathExtent];
MagickBooleanType
status;
PSDCompressionType
compression;
ssize_t
j;
if (image->debug != MagickFalse)
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
" setting up new layer image");
if (psd_info->mode != IndexedMode)
(void) SetImageBackgroundColor(layer_info->image,exception);
layer_info->image->compose=PSDBlendModeToCompositeOperator(
layer_info->blendkey);
if (layer_info->visible == MagickFalse)
layer_info->image->compose=NoCompositeOp;
/*
Set up some hidden attributes for folks that need them.
*/
(void) FormatLocaleString(message,MagickPathExtent,"%.20g",
(double) layer_info->page.x);
(void) SetImageArtifact(layer_info->image,"psd:layer.x",message);
(void) FormatLocaleString(message,MagickPathExtent,"%.20g",
(double) layer_info->page.y);
(void) SetImageArtifact(layer_info->image,"psd:layer.y",message);
(void) FormatLocaleString(message,MagickPathExtent,"%.20g",(double)
layer_info->opacity);
(void) SetImageArtifact(layer_info->image,"psd:layer.opacity",message);
(void) SetImageProperty(layer_info->image,"label",(char *) layer_info->name,
exception);
status=MagickTrue;
for (j=0; j < (ssize_t) layer_info->channels; j++)
{
if (image->debug != MagickFalse)
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
" reading data for channel %.20g",(double) j);
compression=(PSDCompressionType) ReadBlobShort(layer_info->image);
layer_info->image->compression=ConvertPSDCompression(compression);
if (layer_info->channel_info[j].type == -1)
layer_info->image->alpha_trait=BlendPixelTrait;
status=ReadPSDChannel(layer_info->image,image_info,psd_info,layer_info,
(size_t) j,compression,exception);
if (status == MagickFalse)
break;
}
if (status != MagickFalse)
status=ApplyPSDLayerOpacity(layer_info->image,layer_info->opacity,
MagickFalse,exception);
if ((status != MagickFalse) &&
(layer_info->image->colorspace == CMYKColorspace))
status=NegateCMYK(layer_info->image,exception);
if ((status != MagickFalse) && (layer_info->mask.image != (Image *) NULL))
{
const char
*option;
layer_info->mask.image->page.x=layer_info->mask.page.x;
layer_info->mask.image->page.y=layer_info->mask.page.y;
/* Do not composite the mask when it is disabled */
if ((layer_info->mask.flags & 0x02) == 0x02)
layer_info->mask.image->compose=NoCompositeOp;
else
status=ApplyPSDOpacityMask(layer_info->image,layer_info->mask.image,
layer_info->mask.background == 0 ? 0 : QuantumRange,MagickFalse,
exception);
option=GetImageOption(image_info,"psd:preserve-opacity-mask");
if (IsStringTrue(option) != MagickFalse)
PreservePSDOpacityMask(image,layer_info,exception);
layer_info->mask.image=DestroyImage(layer_info->mask.image);
}
return(status);
}
static MagickBooleanType CheckPSDChannels(const PSDInfo *psd_info,
LayerInfo *layer_info)
{
int
channel_type;
register ssize_t
i;
if (layer_info->channels < psd_info->min_channels)
return(MagickFalse);
channel_type=RedChannel;
if (psd_info->min_channels >= 3)
channel_type|=(GreenChannel | BlueChannel);
if (psd_info->min_channels >= 4)
channel_type|=BlackChannel;
for (i=0; i < (ssize_t) layer_info->channels; i++)
{
short
type;
type=layer_info->channel_info[i].type;
if ((i == 0) && (psd_info->mode == IndexedMode) && (type != 0))
return(MagickFalse);
if (type == -1)
{
channel_type|=AlphaChannel;
continue;
}
if (type < -1)
continue;
if (type == 0)
channel_type&=~RedChannel;
else if (type == 1)
channel_type&=~GreenChannel;
else if (type == 2)
channel_type&=~BlueChannel;
else if (type == 3)
channel_type&=~BlackChannel;
}
if (channel_type == 0)
return(MagickTrue);
if ((channel_type == AlphaChannel) &&
(layer_info->channels >= psd_info->min_channels + 1))
return(MagickTrue);
return(MagickFalse);
}
static void AttachPSDLayers(Image *image,LayerInfo *layer_info,
ssize_t number_layers)
{
register ssize_t
i;
ssize_t
j;
for (i=0; i < number_layers; i++)
{
if (layer_info[i].image == (Image *) NULL)
{
for (j=i; j < number_layers - 1; j++)
layer_info[j] = layer_info[j+1];
number_layers--;
i--;
}
}
if (number_layers == 0)
{
layer_info=(LayerInfo *) RelinquishMagickMemory(layer_info);
return;
}
for (i=0; i < number_layers; i++)
{
if (i > 0)
layer_info[i].image->previous=layer_info[i-1].image;
if (i < (number_layers-1))
layer_info[i].image->next=layer_info[i+1].image;
layer_info[i].image->page=layer_info[i].page;
}
image->next=layer_info[0].image;
layer_info[0].image->previous=image;
layer_info=(LayerInfo *) RelinquishMagickMemory(layer_info);
}
static inline MagickBooleanType PSDSkipImage(const PSDInfo *psd_info,
const ImageInfo *image_info,const size_t index)
{
if (psd_info->has_merged_image == MagickFalse)
return(MagickFalse);
if (image_info->number_scenes == 0)
return(MagickFalse);
if (index < image_info->scene)
return(MagickTrue);
if (index > image_info->scene+image_info->number_scenes-1)
return(MagickTrue);
return(MagickFalse);
}
static void CheckMergedImageAlpha(const PSDInfo *psd_info,Image *image)
{
/*
The number of layers cannot be used to determine if the merged image
contains an alpha channel. So we enable it when we think we should.
*/
if (((psd_info->mode == GrayscaleMode) && (psd_info->channels > 1)) ||
((psd_info->mode == RGBMode) && (psd_info->channels > 3)) ||
((psd_info->mode == CMYKMode) && (psd_info->channels > 4)))
image->alpha_trait=BlendPixelTrait;
}
static void ParseAdditionalInfo(LayerInfo *layer_info)
{
char
key[5];
size_t
remaining_length;
unsigned char
*p;
unsigned int
size;
p=GetStringInfoDatum(layer_info->info);
remaining_length=GetStringInfoLength(layer_info->info);
while (remaining_length >= 12)
{
/* skip over signature */
p+=4;
key[0]=(char) (*p++);
key[1]=(char) (*p++);
key[2]=(char) (*p++);
key[3]=(char) (*p++);
key[4]='\0';
size=(unsigned int) (*p++) << 24;
size|=(unsigned int) (*p++) << 16;
size|=(unsigned int) (*p++) << 8;
size|=(unsigned int) (*p++);
size=size & 0xffffffff;
remaining_length-=12;
if ((size_t) size > remaining_length)
break;
if (LocaleNCompare(key,"luni",sizeof(key)) == 0)
{
unsigned char
*name;
unsigned int
length;
length=(unsigned int) (*p++) << 24;
length|=(unsigned int) (*p++) << 16;
length|=(unsigned int) (*p++) << 8;
length|=(unsigned int) (*p++);
if (length * 2 > size - 4)
break;
if (sizeof(layer_info->name) <= length)
break;
name=layer_info->name;
while (length > 0)
{
/* Only ASCII strings are supported */
if (*p++ != '\0')
break;
*name++=*p++;
length--;
}
if (length == 0)
*name='\0';
break;
}
else
p+=size;
remaining_length-=(size_t) size;
}
}
static MagickSizeType GetLayerInfoSize(const PSDInfo *psd_info,Image *image)
{
char
type[4];
MagickSizeType
size;
ssize_t
count;
size=GetPSDSize(psd_info,image);
if (size != 0)
return(size);
(void) ReadBlobLong(image);
count=ReadPSDString(image,type,4);
if ((count != 4) || (LocaleNCompare(type,"8BIM",4) != 0))
return(0);
count=ReadPSDString(image,type,4);
if ((count == 4) && ((LocaleNCompare(type,"Mt16",4) == 0) ||
(LocaleNCompare(type,"Mt32",4) == 0) ||
(LocaleNCompare(type,"Mtrn",4) == 0)))
{
size=GetPSDSize(psd_info,image);
if (size != 0)
return(0);
image->alpha_trait=BlendPixelTrait;
count=ReadPSDString(image,type,4);
if ((count != 4) || (LocaleNCompare(type,"8BIM",4) != 0))
return(0);
count=ReadPSDString(image,type,4);
}
if ((count == 4) && ((LocaleNCompare(type,"Lr16",4) == 0) ||
(LocaleNCompare(type,"Lr32",4) == 0)))
size=GetPSDSize(psd_info,image);
return(size);
}
static MagickBooleanType ReadPSDLayersInternal(Image *image,
const ImageInfo *image_info,const PSDInfo *psd_info,
const MagickBooleanType skip_layers,ExceptionInfo *exception)
{
char
type[4];
LayerInfo
*layer_info;
MagickSizeType
size;
MagickBooleanType
status;
register ssize_t
i;
ssize_t
count,
index,
j,
number_layers;
size=GetLayerInfoSize(psd_info,image);
if (size == 0)
{
CheckMergedImageAlpha(psd_info,image);
return(MagickTrue);
}
layer_info=(LayerInfo *) NULL;
number_layers=(ssize_t) ReadBlobSignedShort(image);
if (number_layers < 0)
{
/*
The first alpha channel in the merged result contains the
transparency data for the merged result.
*/
number_layers=MagickAbsoluteValue(number_layers);
if (image->debug != MagickFalse)
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
" negative layer count corrected for");
image->alpha_trait=BlendPixelTrait;
}
/*
We only need to know if the image has an alpha channel
*/
if (skip_layers != MagickFalse)
return(MagickTrue);
if (image->debug != MagickFalse)
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
" image contains %.20g layers",(double) number_layers);
if (number_layers == 0)
ThrowBinaryException(CorruptImageError,"InvalidNumberOfLayers",
image->filename);
layer_info=(LayerInfo *) AcquireQuantumMemory((size_t) number_layers,
sizeof(*layer_info));
if (layer_info == (LayerInfo *) NULL)
{
if (image->debug != MagickFalse)
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
" allocation of LayerInfo failed");
ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed",
image->filename);
}
(void) memset(layer_info,0,(size_t) number_layers*sizeof(*layer_info));
for (i=0; i < number_layers; i++)
{
ssize_t
top,
left,
bottom,
right;
if (image->debug != MagickFalse)
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
" reading layer #%.20g",(double) i+1);
top=(ssize_t) ReadBlobSignedLong(image);
left=(ssize_t) ReadBlobSignedLong(image);
bottom=(ssize_t) ReadBlobSignedLong(image);
right=(ssize_t) ReadBlobSignedLong(image);
if ((right < left) || (bottom < top))
{
layer_info=DestroyLayerInfo(layer_info,number_layers);
ThrowBinaryException(CorruptImageError,"ImproperImageHeader",
image->filename);
}
layer_info[i].page.y=top;
layer_info[i].page.x=left;
layer_info[i].page.width=(size_t) (right-left);
layer_info[i].page.height=(size_t) (bottom-top);
layer_info[i].channels=ReadBlobShort(image);
if (layer_info[i].channels > MaxPSDChannels)
{
layer_info=DestroyLayerInfo(layer_info,number_layers);
ThrowBinaryException(CorruptImageError,"MaximumChannelsExceeded",
image->filename);
}
if (image->debug != MagickFalse)
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
" offset(%.20g,%.20g), size(%.20g,%.20g), channels=%.20g",
(double) layer_info[i].page.x,(double) layer_info[i].page.y,
(double) layer_info[i].page.height,(double)
layer_info[i].page.width,(double) layer_info[i].channels);
for (j=0; j < (ssize_t) layer_info[i].channels; j++)
{
layer_info[i].channel_info[j].type=(short) ReadBlobShort(image);
if ((layer_info[i].channel_info[j].type < -4) ||
(layer_info[i].channel_info[j].type > 4))
{
layer_info=DestroyLayerInfo(layer_info,number_layers);
ThrowBinaryException(CorruptImageError,"NoSuchImageChannel",
image->filename);
}
layer_info[i].channel_info[j].size=(size_t) GetPSDSize(psd_info,
image);
if (image->debug != MagickFalse)
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
" channel[%.20g]: type=%.20g, size=%.20g",(double) j,
(double) layer_info[i].channel_info[j].type,
(double) layer_info[i].channel_info[j].size);
}
if (CheckPSDChannels(psd_info,&layer_info[i]) == MagickFalse)
{
layer_info=DestroyLayerInfo(layer_info,number_layers);
ThrowBinaryException(CorruptImageError,"ImproperImageHeader",
image->filename);
}
count=ReadPSDString(image,type,4);
if ((count != 4) || (LocaleNCompare(type,"8BIM",4) != 0))
{
if (image->debug != MagickFalse)
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
" layer type was %.4s instead of 8BIM", type);
layer_info=DestroyLayerInfo(layer_info,number_layers);
ThrowBinaryException(CorruptImageError,"ImproperImageHeader",
image->filename);
}
count=ReadPSDString(image,layer_info[i].blendkey,4);
if (count != 4)
{
layer_info=DestroyLayerInfo(layer_info,number_layers);
ThrowBinaryException(CorruptImageError,"ImproperImageHeader",
image->filename);
}
layer_info[i].opacity=(Quantum) ScaleCharToQuantum((unsigned char)
ReadBlobByte(image));
layer_info[i].clipping=(unsigned char) ReadBlobByte(image);
layer_info[i].flags=(unsigned char) ReadBlobByte(image);
layer_info[i].visible=!(layer_info[i].flags & 0x02);
if (image->debug != MagickFalse)
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
" blend=%.4s, opacity=%.20g, clipping=%s, flags=%d, visible=%s",
layer_info[i].blendkey,(double) layer_info[i].opacity,
layer_info[i].clipping ? "true" : "false",layer_info[i].flags,
layer_info[i].visible ? "true" : "false");
(void) ReadBlobByte(image); /* filler */
size=ReadBlobLong(image);
if (size != 0)
{
MagickSizeType
combined_length,
length;
if (image->debug != MagickFalse)
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
" layer contains additional info");
length=ReadBlobLong(image);
combined_length=length+4;
if (length != 0)
{
/*
Layer mask info.
*/
layer_info[i].mask.page.y=(ssize_t) ReadBlobSignedLong(image);
layer_info[i].mask.page.x=(ssize_t) ReadBlobSignedLong(image);
layer_info[i].mask.page.height=(size_t)
(ReadBlobSignedLong(image)-layer_info[i].mask.page.y);
layer_info[i].mask.page.width=(size_t) (
ReadBlobSignedLong(image)-layer_info[i].mask.page.x);
layer_info[i].mask.background=(unsigned char) ReadBlobByte(
image);
layer_info[i].mask.flags=(unsigned char) ReadBlobByte(image);
if (!(layer_info[i].mask.flags & 0x01))
{
layer_info[i].mask.page.y=layer_info[i].mask.page.y-
layer_info[i].page.y;
layer_info[i].mask.page.x=layer_info[i].mask.page.x-
layer_info[i].page.x;
}
if (image->debug != MagickFalse)
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
" layer mask: offset(%.20g,%.20g), size(%.20g,%.20g), length=%.20g",
(double) layer_info[i].mask.page.x,(double)
layer_info[i].mask.page.y,(double)
layer_info[i].mask.page.width,(double)
layer_info[i].mask.page.height,(double) ((MagickOffsetType)
length)-18);
/*
Skip over the rest of the layer mask information.
*/
if (DiscardBlobBytes(image,(MagickSizeType) (length-18)) == MagickFalse)
{
layer_info=DestroyLayerInfo(layer_info,number_layers);
ThrowBinaryException(CorruptImageError,
"UnexpectedEndOfFile",image->filename);
}
}
length=ReadBlobLong(image);
combined_length+=length+4;
if (length != 0)
{
/*
Layer blending ranges info.
*/
if (image->debug != MagickFalse)
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
" layer blending ranges: length=%.20g",(double)
((MagickOffsetType) length));
if (DiscardBlobBytes(image,length) == MagickFalse)
{
layer_info=DestroyLayerInfo(layer_info,number_layers);
ThrowBinaryException(CorruptImageError,
"UnexpectedEndOfFile",image->filename);
}
}
/*
Layer name.
*/
length=(MagickSizeType) (unsigned char) ReadBlobByte(image);
combined_length+=length+1;
if (length > 0)
(void) ReadBlob(image,(size_t) length++,layer_info[i].name);
layer_info[i].name[length]='\0';
if (image->debug != MagickFalse)
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
" layer name: %s",layer_info[i].name);
if ((length % 4) != 0)
{
length=4-(length % 4);
combined_length+=length;
/* Skip over the padding of the layer name */
if (DiscardBlobBytes(image,length) == MagickFalse)
{
layer_info=DestroyLayerInfo(layer_info,number_layers);
ThrowBinaryException(CorruptImageError,
"UnexpectedEndOfFile",image->filename);
}
}
length=(MagickSizeType) size-combined_length;
if (length > 0)
{
unsigned char
*info;
if (length > GetBlobSize(image))
{
layer_info=DestroyLayerInfo(layer_info,number_layers);
ThrowBinaryException(CorruptImageError,
"InsufficientImageDataInFile",image->filename);
}
layer_info[i].info=AcquireStringInfo((const size_t) length);
info=GetStringInfoDatum(layer_info[i].info);
(void) ReadBlob(image,(const size_t) length,info);
ParseAdditionalInfo(&layer_info[i]);
}
}
}
for (i=0; i < number_layers; i++)
{
if ((layer_info[i].page.width == 0) || (layer_info[i].page.height == 0))
{
if (image->debug != MagickFalse)
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
" layer data is empty");
if (layer_info[i].info != (StringInfo *) NULL)
layer_info[i].info=DestroyStringInfo(layer_info[i].info);
continue;
}
/*
Allocate layered image.
*/
layer_info[i].image=CloneImage(image,layer_info[i].page.width,
layer_info[i].page.height,MagickFalse,exception);
if (layer_info[i].image == (Image *) NULL)
{
layer_info=DestroyLayerInfo(layer_info,number_layers);
if (image->debug != MagickFalse)
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
" allocation of image for layer %.20g failed",(double) i);
ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed",
image->filename);
}
if (layer_info[i].info != (StringInfo *) NULL)
{
(void) SetImageProfile(layer_info[i].image,"psd:additional-info",
layer_info[i].info,exception);
layer_info[i].info=DestroyStringInfo(layer_info[i].info);
}
}
if (image_info->ping != MagickFalse)
{
AttachPSDLayers(image,layer_info,number_layers);
return(MagickTrue);
}
status=MagickTrue;
index=0;
for (i=0; i < number_layers; i++)
{
if ((layer_info[i].image == (Image *) NULL) ||
(PSDSkipImage(psd_info, image_info,++index) != MagickFalse))
{
for (j=0; j < (ssize_t) layer_info[i].channels; j++)
{
if (DiscardBlobBytes(image,(MagickSizeType)
layer_info[i].channel_info[j].size) == MagickFalse)
{
layer_info=DestroyLayerInfo(layer_info,number_layers);
ThrowBinaryException(CorruptImageError,
"UnexpectedEndOfFile",image->filename);
}
}
continue;
}
if (image->debug != MagickFalse)
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
" reading data for layer %.20g",(double) i);
status=ReadPSDLayer(image,image_info,psd_info,&layer_info[i],
exception);
if (status == MagickFalse)
break;
status=SetImageProgress(image,LoadImagesTag,(MagickOffsetType) i,
(MagickSizeType) number_layers);
if (status == MagickFalse)
break;
}
if (status != MagickFalse)
AttachPSDLayers(image,layer_info,number_layers);
else
layer_info=DestroyLayerInfo(layer_info,number_layers);
return(status);
}
ModuleExport MagickBooleanType ReadPSDLayers(Image *image,
const ImageInfo *image_info,const PSDInfo *psd_info,ExceptionInfo *exception)
{
PolicyDomain
domain;
PolicyRights
rights;
domain=CoderPolicyDomain;
rights=ReadPolicyRights;
if (IsRightsAuthorized(domain,rights,"PSD") == MagickFalse)
return(MagickTrue);
return(ReadPSDLayersInternal(image,image_info,psd_info,MagickFalse,
exception));
}
static MagickBooleanType ReadPSDMergedImage(const ImageInfo *image_info,
Image *image,const PSDInfo *psd_info,ExceptionInfo *exception)
{
MagickOffsetType
*sizes;
MagickBooleanType
status;
PSDCompressionType
compression;
register ssize_t
i;
if ((image_info->number_scenes != 0) && (image_info->scene != 0))
return(MagickTrue);
compression=(PSDCompressionType) ReadBlobMSBShort(image);
image->compression=ConvertPSDCompression(compression);
if (compression != Raw && compression != RLE)
{
(void) ThrowMagickException(exception,GetMagickModule(),
TypeWarning,"CompressionNotSupported","'%.20g'",(double) compression);
return(MagickFalse);
}
sizes=(MagickOffsetType *) NULL;
if (compression == RLE)
{
sizes=ReadPSDRLESizes(image,psd_info,image->rows*psd_info->channels);
if (sizes == (MagickOffsetType *) NULL)
ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed",
image->filename);
}
status=MagickTrue;
for (i=0; i < (ssize_t) psd_info->channels; i++)
{
ssize_t
type;
type=i;
if ((type == 1) && (psd_info->channels == 2))
type=-1;
if (compression == RLE)
status=ReadPSDChannelRLE(image,psd_info,type,sizes+(i*image->rows),
exception);
else
status=ReadPSDChannelRaw(image,psd_info->channels,type,exception);
if (status != MagickFalse)
status=SetImageProgress(image,LoadImagesTag,(MagickOffsetType) i,
psd_info->channels);
if (status == MagickFalse)
break;
}
if ((status != MagickFalse) && (image->colorspace == CMYKColorspace))
status=NegateCMYK(image,exception);
if (status != MagickFalse)
status=CorrectPSDAlphaBlend(image_info,image,exception);
sizes=(MagickOffsetType *) RelinquishMagickMemory(sizes);
return(status);
}
static Image *ReadPSDImage(const ImageInfo *image_info,ExceptionInfo *exception)
{
Image
*image;
MagickBooleanType
skip_layers;
MagickOffsetType
offset;
MagickSizeType
length;
MagickBooleanType
status;
PSDInfo
psd_info;
register ssize_t
i;
size_t
image_list_length;
ssize_t
count;
StringInfo
*profile;
/*
Open image file.
*/
assert(image_info != (const ImageInfo *) NULL);
assert(image_info->signature == MagickCoreSignature);
if (image_info->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",
image_info->filename);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickCoreSignature);
image=AcquireImage(image_info,exception);
status=OpenBlob(image_info,image,ReadBinaryBlobMode,exception);
if (status == MagickFalse)
{
image=DestroyImageList(image);
return((Image *) NULL);
}
/*
Read image header.
*/
image->endian=MSBEndian;
count=ReadBlob(image,4,(unsigned char *) psd_info.signature);
psd_info.version=ReadBlobMSBShort(image);
if ((count != 4) || (LocaleNCompare(psd_info.signature,"8BPS",4) != 0) ||
((psd_info.version != 1) && (psd_info.version != 2)))
ThrowReaderException(CorruptImageError,"ImproperImageHeader");
(void) ReadBlob(image,6,psd_info.reserved);
psd_info.channels=ReadBlobMSBShort(image);
if (psd_info.channels < 1)
ThrowReaderException(CorruptImageError,"MissingImageChannel");
if (psd_info.channels > MaxPSDChannels)
ThrowReaderException(CorruptImageError,"MaximumChannelsExceeded");
psd_info.rows=ReadBlobMSBLong(image);
psd_info.columns=ReadBlobMSBLong(image);
if ((psd_info.version == 1) && ((psd_info.rows > 30000) ||
(psd_info.columns > 30000)))
ThrowReaderException(CorruptImageError,"ImproperImageHeader");
psd_info.depth=ReadBlobMSBShort(image);
if ((psd_info.depth != 1) && (psd_info.depth != 8) &&
(psd_info.depth != 16) && (psd_info.depth != 32))
ThrowReaderException(CorruptImageError,"ImproperImageHeader");
psd_info.mode=ReadBlobMSBShort(image);
if ((psd_info.mode == IndexedMode) && (psd_info.channels > 3))
ThrowReaderException(CorruptImageError,"ImproperImageHeader");
if (image->debug != MagickFalse)
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
" Image is %.20g x %.20g with channels=%.20g, depth=%.20g, mode=%s",
(double) psd_info.columns,(double) psd_info.rows,(double)
psd_info.channels,(double) psd_info.depth,ModeToString((PSDImageType)
psd_info.mode));
if (EOFBlob(image) != MagickFalse)
ThrowReaderException(CorruptImageError,"ImproperImageHeader");
/*
Initialize image.
*/
image->depth=psd_info.depth;
image->columns=psd_info.columns;
image->rows=psd_info.rows;
status=SetImageExtent(image,image->columns,image->rows,exception);
if (status == MagickFalse)
return(DestroyImageList(image));
status=ResetImagePixels(image,exception);
if (status == MagickFalse)
return(DestroyImageList(image));
psd_info.min_channels=3;
if (psd_info.mode == LabMode)
(void) SetImageColorspace(image,LabColorspace,exception);
if (psd_info.mode == CMYKMode)
{
psd_info.min_channels=4;
(void) SetImageColorspace(image,CMYKColorspace,exception);
}
else
if ((psd_info.mode == BitmapMode) || (psd_info.mode == GrayscaleMode) ||
(psd_info.mode == DuotoneMode))
{
if (psd_info.depth != 32)
{
status=AcquireImageColormap(image,MagickMin((size_t)
(psd_info.depth < 16 ? 256 : 65536), MaxColormapSize),exception);
if (status == MagickFalse)
ThrowReaderException(ResourceLimitError,"MemoryAllocationFailed");
if (image->debug != MagickFalse)
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
" Image colormap allocated");
}
psd_info.min_channels=1;
(void) SetImageColorspace(image,GRAYColorspace,exception);
}
else
if (psd_info.mode == IndexedMode)
psd_info.min_channels=1;
if (psd_info.channels < psd_info.min_channels)
ThrowReaderException(CorruptImageError,"ImproperImageHeader");
/*
Read PSD raster colormap only present for indexed and duotone images.
*/
length=ReadBlobMSBLong(image);
if ((psd_info.mode == IndexedMode) && (length < 3))
ThrowReaderException(CorruptImageError,"ImproperImageHeader");
if (length != 0)
{
if (image->debug != MagickFalse)
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
" reading colormap");
if ((psd_info.mode == DuotoneMode) || (psd_info.depth == 32))
{
/*
Duotone image data; the format of this data is undocumented.
32 bits per pixel; the colormap is ignored.
*/
(void) SeekBlob(image,(const MagickOffsetType) length,SEEK_CUR);
}
else
{
size_t
number_colors;
/*
Read PSD raster colormap.
*/
number_colors=(size_t) length/3;
if (number_colors > 65536)
ThrowReaderException(CorruptImageError,"ImproperImageHeader");
if (AcquireImageColormap(image,number_colors,exception) == MagickFalse)
ThrowReaderException(ResourceLimitError,"MemoryAllocationFailed");
for (i=0; i < (ssize_t) image->colors; i++)
image->colormap[i].red=(MagickRealType) ScaleCharToQuantum(
(unsigned char) ReadBlobByte(image));
for (i=0; i < (ssize_t) image->colors; i++)
image->colormap[i].green=(MagickRealType) ScaleCharToQuantum(
(unsigned char) ReadBlobByte(image));
for (i=0; i < (ssize_t) image->colors; i++)
image->colormap[i].blue=(MagickRealType) ScaleCharToQuantum(
(unsigned char) ReadBlobByte(image));
image->alpha_trait=UndefinedPixelTrait;
}
}
if ((image->depth == 1) && (image->storage_class != PseudoClass))
ThrowReaderException(CorruptImageError, "ImproperImageHeader");
psd_info.has_merged_image=MagickTrue;
profile=(StringInfo *) NULL;
length=ReadBlobMSBLong(image);
if (length != 0)
{
unsigned char
*blocks;
/*
Image resources block.
*/
if (image->debug != MagickFalse)
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
" reading image resource blocks - %.20g bytes",(double)
((MagickOffsetType) length));
if (length > GetBlobSize(image))
ThrowReaderException(CorruptImageError,"InsufficientImageDataInFile");
blocks=(unsigned char *) AcquireQuantumMemory((size_t) length,
sizeof(*blocks));
if (blocks == (unsigned char *) NULL)
ThrowReaderException(ResourceLimitError,"MemoryAllocationFailed");
count=ReadBlob(image,(size_t) length,blocks);
if ((count != (ssize_t) length) || (length < 4) ||
(LocaleNCompare((char *) blocks,"8BIM",4) != 0))
{
blocks=(unsigned char *) RelinquishMagickMemory(blocks);
ThrowReaderException(CorruptImageError,"ImproperImageHeader");
}
profile=ParseImageResourceBlocks(&psd_info,image,blocks,(size_t) length);
blocks=(unsigned char *) RelinquishMagickMemory(blocks);
}
/*
Layer and mask block.
*/
length=GetPSDSize(&psd_info,image);
if (length == 8)
{
length=ReadBlobMSBLong(image);
length=ReadBlobMSBLong(image);
}
offset=TellBlob(image);
skip_layers=MagickFalse;
if ((image_info->number_scenes == 1) && (image_info->scene == 0) &&
(psd_info.has_merged_image != MagickFalse))
{
if (image->debug != MagickFalse)
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
" read composite only");
skip_layers=MagickTrue;
}
if (length == 0)
{
if (image->debug != MagickFalse)
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
" image has no layers");
}
else
{
if (ReadPSDLayersInternal(image,image_info,&psd_info,skip_layers,
exception) != MagickTrue)
{
if (profile != (StringInfo *) NULL)
profile=DestroyStringInfo(profile);
(void) CloseBlob(image);
image=DestroyImageList(image);
return((Image *) NULL);
}
/*
Skip the rest of the layer and mask information.
*/
(void) SeekBlob(image,offset+length,SEEK_SET);
}
/*
If we are only "pinging" the image, then we're done - so return.
*/
if (EOFBlob(image) != MagickFalse)
{
if (profile != (StringInfo *) NULL)
profile=DestroyStringInfo(profile);
ThrowReaderException(CorruptImageError,"UnexpectedEndOfFile");
}
if (image_info->ping != MagickFalse)
{
if (profile != (StringInfo *) NULL)
profile=DestroyStringInfo(profile);
(void) CloseBlob(image);
return(GetFirstImageInList(image));
}
/*
Read the precombined layer, present for PSD < 4 compatibility.
*/
if (image->debug != MagickFalse)
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
" reading the precombined layer");
image_list_length=GetImageListLength(image);
if ((psd_info.has_merged_image != MagickFalse) || (image_list_length == 1))
psd_info.has_merged_image=(MagickBooleanType) ReadPSDMergedImage(
image_info,image,&psd_info,exception);
if ((psd_info.has_merged_image == MagickFalse) && (image_list_length == 1) &&
(length != 0))
{
(void) SeekBlob(image,offset,SEEK_SET);
status=ReadPSDLayersInternal(image,image_info,&psd_info,MagickFalse,
exception);
if (status != MagickTrue)
{
if (profile != (StringInfo *) NULL)
profile=DestroyStringInfo(profile);
(void) CloseBlob(image);
image=DestroyImageList(image);
return((Image *) NULL);
}
image_list_length=GetImageListLength(image);
}
if (psd_info.has_merged_image == MagickFalse)
{
Image
*merged;
if (image_list_length == 1)
{
if (profile != (StringInfo *) NULL)
profile=DestroyStringInfo(profile);
ThrowReaderException(CorruptImageError,"InsufficientImageDataInFile");
}
image->background_color.alpha=(MagickRealType) TransparentAlpha;
image->background_color.alpha_trait=BlendPixelTrait;
(void) SetImageBackgroundColor(image,exception);
merged=MergeImageLayers(image,FlattenLayer,exception);
if (merged == (Image *) NULL)
{
(void) CloseBlob(image);
image=DestroyImageList(image);
return((Image *) NULL);
}
ReplaceImageInList(&image,merged);
}
if (profile != (StringInfo *) NULL)
{
Image
*next;
i=0;
next=image;
while (next != (Image *) NULL)
{
if (PSDSkipImage(&psd_info,image_info,i++) == MagickFalse)
(void) SetImageProfile(next,GetStringInfoName(profile),profile,
exception);
next=next->next;
}
profile=DestroyStringInfo(profile);
}
(void) CloseBlob(image);
return(GetFirstImageInList(image));
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% R e g i s t e r P S D I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% RegisterPSDImage() adds properties for the PSD image format to
% the list of supported formats. The properties include the image format
% tag, a method to read and/or write the format, whether the format
% supports the saving of more than one frame to the same file or blob,
% whether the format supports native in-memory I/O, and a brief
% description of the format.
%
% The format of the RegisterPSDImage method is:
%
% size_t RegisterPSDImage(void)
%
*/
ModuleExport size_t RegisterPSDImage(void)
{
MagickInfo
*entry;
entry=AcquireMagickInfo("PSD","PSB","Adobe Large Document Format");
entry->decoder=(DecodeImageHandler *) ReadPSDImage;
entry->encoder=(EncodeImageHandler *) WritePSDImage;
entry->magick=(IsImageFormatHandler *) IsPSD;
entry->flags|=CoderDecoderSeekableStreamFlag;
entry->flags|=CoderEncoderSeekableStreamFlag;
(void) RegisterMagickInfo(entry);
entry=AcquireMagickInfo("PSD","PSD","Adobe Photoshop bitmap");
entry->decoder=(DecodeImageHandler *) ReadPSDImage;
entry->encoder=(EncodeImageHandler *) WritePSDImage;
entry->magick=(IsImageFormatHandler *) IsPSD;
entry->flags|=CoderDecoderSeekableStreamFlag;
entry->flags|=CoderEncoderSeekableStreamFlag;
(void) RegisterMagickInfo(entry);
return(MagickImageCoderSignature);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% U n r e g i s t e r P S D I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% UnregisterPSDImage() removes format registrations made by the
% PSD module from the list of supported formats.
%
% The format of the UnregisterPSDImage method is:
%
% UnregisterPSDImage(void)
%
*/
ModuleExport void UnregisterPSDImage(void)
{
(void) UnregisterMagickInfo("PSB");
(void) UnregisterMagickInfo("PSD");
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% W r i t e P S D I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% WritePSDImage() writes an image in the Adobe Photoshop encoded image format.
%
% The format of the WritePSDImage method is:
%
% MagickBooleanType WritePSDImage(const ImageInfo *image_info,Image *image,
% ExceptionInfo *exception)
%
% A description of each parameter follows.
%
% o image_info: the image info.
%
% o image: The image.
%
% o exception: return any errors or warnings in this structure.
%
*/
static inline ssize_t SetPSDOffset(const PSDInfo *psd_info,Image *image,
const size_t offset)
{
if (psd_info->version == 1)
return(WriteBlobMSBShort(image,(unsigned short) offset));
return(WriteBlobMSBLong(image,(unsigned int) offset));
}
static inline ssize_t WritePSDOffset(const PSDInfo *psd_info,Image *image,
const MagickSizeType size,const MagickOffsetType offset)
{
MagickOffsetType
current_offset;
ssize_t
result;
current_offset=TellBlob(image);
(void) SeekBlob(image,offset,SEEK_SET);
if (psd_info->version == 1)
result=WriteBlobMSBShort(image,(unsigned short) size);
else
result=WriteBlobMSBLong(image,(unsigned int) size);
(void) SeekBlob(image,current_offset,SEEK_SET);
return(result);
}
static inline ssize_t SetPSDSize(const PSDInfo *psd_info,Image *image,
const MagickSizeType size)
{
if (psd_info->version == 1)
return(WriteBlobLong(image,(unsigned int) size));
return(WriteBlobLongLong(image,size));
}
static inline ssize_t WritePSDSize(const PSDInfo *psd_info,Image *image,
const MagickSizeType size,const MagickOffsetType offset)
{
MagickOffsetType
current_offset;
ssize_t
result;
current_offset=TellBlob(image);
(void) SeekBlob(image,offset,SEEK_SET);
result=SetPSDSize(psd_info,image,size);
(void) SeekBlob(image,current_offset,SEEK_SET);
return(result);
}
static size_t PSDPackbitsEncodeImage(Image *image,const size_t length,
const unsigned char *pixels,unsigned char *compact_pixels,
ExceptionInfo *exception)
{
int
count;
register ssize_t
i,
j;
register unsigned char
*q;
unsigned char
*packbits;
/*
Compress pixels with Packbits encoding.
*/
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(pixels != (unsigned char *) NULL);
assert(compact_pixels != (unsigned char *) NULL);
packbits=(unsigned char *) AcquireQuantumMemory(128UL,sizeof(*packbits));
if (packbits == (unsigned char *) NULL)
ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed",
image->filename);
q=compact_pixels;
for (i=(ssize_t) length; i != 0; )
{
switch (i)
{
case 1:
{
i--;
*q++=(unsigned char) 0;
*q++=(*pixels);
break;
}
case 2:
{
i-=2;
*q++=(unsigned char) 1;
*q++=(*pixels);
*q++=pixels[1];
break;
}
case 3:
{
i-=3;
if ((*pixels == *(pixels+1)) && (*(pixels+1) == *(pixels+2)))
{
*q++=(unsigned char) ((256-3)+1);
*q++=(*pixels);
break;
}
*q++=(unsigned char) 2;
*q++=(*pixels);
*q++=pixels[1];
*q++=pixels[2];
break;
}
default:
{
if ((*pixels == *(pixels+1)) && (*(pixels+1) == *(pixels+2)))
{
/*
Packed run.
*/
count=3;
while (((ssize_t) count < i) && (*pixels == *(pixels+count)))
{
count++;
if (count >= 127)
break;
}
i-=count;
*q++=(unsigned char) ((256-count)+1);
*q++=(*pixels);
pixels+=count;
break;
}
/*
Literal run.
*/
count=0;
while ((*(pixels+count) != *(pixels+count+1)) ||
(*(pixels+count+1) != *(pixels+count+2)))
{
packbits[count+1]=pixels[count];
count++;
if (((ssize_t) count >= (i-3)) || (count >= 127))
break;
}
i-=count;
*packbits=(unsigned char) (count-1);
for (j=0; j <= (ssize_t) count; j++)
*q++=packbits[j];
pixels+=count;
break;
}
}
}
*q++=(unsigned char) 128; /* EOD marker */
packbits=(unsigned char *) RelinquishMagickMemory(packbits);
return((size_t) (q-compact_pixels));
}
static size_t WriteCompressionStart(const PSDInfo *psd_info,Image *image,
const Image *next_image,const CompressionType compression,
const ssize_t channels)
{
size_t
length;
ssize_t
i,
y;
if (compression == RLECompression)
{
length=(size_t) WriteBlobShort(image,RLE);
for (i=0; i < channels; i++)
for (y=0; y < (ssize_t) next_image->rows; y++)
length+=SetPSDOffset(psd_info,image,0);
}
#ifdef MAGICKCORE_ZLIB_DELEGATE
else if (compression == ZipCompression)
length=(size_t) WriteBlobShort(image,ZipWithoutPrediction);
#endif
else
length=(size_t) WriteBlobShort(image,Raw);
return(length);
}
static size_t WritePSDChannel(const PSDInfo *psd_info,
const ImageInfo *image_info,Image *image,Image *next_image,
const QuantumType quantum_type, unsigned char *compact_pixels,
MagickOffsetType size_offset,const MagickBooleanType separate,
const CompressionType compression,ExceptionInfo *exception)
{
MagickBooleanType
monochrome;
QuantumInfo
*quantum_info;
register const Quantum
*p;
register ssize_t
i;
size_t
count,
length;
ssize_t
y;
unsigned char
*pixels;
#ifdef MAGICKCORE_ZLIB_DELEGATE
int
flush,
level;
unsigned char
*compressed_pixels;
z_stream
stream;
compressed_pixels=(unsigned char *) NULL;
flush=Z_NO_FLUSH;
#endif
count=0;
if (separate != MagickFalse)
{
size_offset=TellBlob(image)+2;
count+=WriteCompressionStart(psd_info,image,next_image,compression,1);
}
if (next_image->depth > 8)
next_image->depth=16;
monochrome=IsImageMonochrome(image) && (image->depth == 1) ?
MagickTrue : MagickFalse;
quantum_info=AcquireQuantumInfo(image_info,next_image);
if (quantum_info == (QuantumInfo *) NULL)
return(0);
pixels=(unsigned char *) GetQuantumPixels(quantum_info);
#ifdef MAGICKCORE_ZLIB_DELEGATE
if (compression == ZipCompression)
{
compressed_pixels=(unsigned char *) AcquireQuantumMemory(
MagickMinBufferExtent,sizeof(*compressed_pixels));
if (compressed_pixels == (unsigned char *) NULL)
{
quantum_info=DestroyQuantumInfo(quantum_info);
return(0);
}
memset(&stream,0,sizeof(stream));
stream.data_type=Z_BINARY;
level=Z_DEFAULT_COMPRESSION;
if ((image_info->quality > 0 && image_info->quality < 10))
level=(int) image_info->quality;
if (deflateInit(&stream,level) != Z_OK)
{
quantum_info=DestroyQuantumInfo(quantum_info);
compressed_pixels=(unsigned char *) RelinquishMagickMemory(
compressed_pixels);
return(0);
}
}
#endif
for (y=0; y < (ssize_t) next_image->rows; y++)
{
p=GetVirtualPixels(next_image,0,y,next_image->columns,1,exception);
if (p == (const Quantum *) NULL)
break;
length=ExportQuantumPixels(next_image,(CacheView *) NULL,quantum_info,
quantum_type,pixels,exception);
if (monochrome != MagickFalse)
for (i=0; i < (ssize_t) length; i++)
pixels[i]=(~pixels[i]);
if (compression == RLECompression)
{
length=PSDPackbitsEncodeImage(image,length,pixels,compact_pixels,
exception);
count+=WriteBlob(image,length,compact_pixels);
size_offset+=WritePSDOffset(psd_info,image,length,size_offset);
}
#ifdef MAGICKCORE_ZLIB_DELEGATE
else if (compression == ZipCompression)
{
stream.avail_in=(uInt) length;
stream.next_in=(Bytef *) pixels;
if (y == (ssize_t) next_image->rows-1)
flush=Z_FINISH;
do {
stream.avail_out=(uInt) MagickMinBufferExtent;
stream.next_out=(Bytef *) compressed_pixels;
if (deflate(&stream,flush) == Z_STREAM_ERROR)
break;
length=(size_t) MagickMinBufferExtent-stream.avail_out;
if (length > 0)
count+=WriteBlob(image,length,compressed_pixels);
} while (stream.avail_out == 0);
}
#endif
else
count+=WriteBlob(image,length,pixels);
}
#ifdef MAGICKCORE_ZLIB_DELEGATE
if (compression == ZipCompression)
{
(void) deflateEnd(&stream);
compressed_pixels=(unsigned char *) RelinquishMagickMemory(
compressed_pixels);
}
#endif
quantum_info=DestroyQuantumInfo(quantum_info);
return(count);
}
static unsigned char *AcquireCompactPixels(const Image *image,
ExceptionInfo *exception)
{
size_t
packet_size;
unsigned char
*compact_pixels;
packet_size=image->depth > 8UL ? 2UL : 1UL;
compact_pixels=(unsigned char *) AcquireQuantumMemory((9*
image->columns)+1,packet_size*sizeof(*compact_pixels));
if (compact_pixels == (unsigned char *) NULL)
{
(void) ThrowMagickException(exception,GetMagickModule(),
ResourceLimitError,"MemoryAllocationFailed","`%s'",image->filename);
}
return(compact_pixels);
}
static size_t WritePSDChannels(const PSDInfo *psd_info,
const ImageInfo *image_info,Image *image,Image *next_image,
MagickOffsetType size_offset,const MagickBooleanType separate,
ExceptionInfo *exception)
{
CompressionType
compression;
Image
*mask;
MagickOffsetType
rows_offset;
size_t
channels,
count,
length,
offset_length;
unsigned char
*compact_pixels;
count=0;
offset_length=0;
rows_offset=0;
compact_pixels=(unsigned char *) NULL;
compression=next_image->compression;
if (image_info->compression != UndefinedCompression)
compression=image_info->compression;
if (compression == RLECompression)
{
compact_pixels=AcquireCompactPixels(next_image,exception);
if (compact_pixels == (unsigned char *) NULL)
return(0);
}
channels=1;
if (separate == MagickFalse)
{
if ((next_image->storage_class != PseudoClass) ||
(IsImageGray(next_image) != MagickFalse))
{
if (IsImageGray(next_image) == MagickFalse)
channels=(size_t) (next_image->colorspace == CMYKColorspace ? 4 :
3);
if (next_image->alpha_trait != UndefinedPixelTrait)
channels++;
}
rows_offset=TellBlob(image)+2;
count+=WriteCompressionStart(psd_info,image,next_image,compression,
(ssize_t) channels);
offset_length=(next_image->rows*(psd_info->version == 1 ? 2 : 4));
}
size_offset+=2;
if ((next_image->storage_class == PseudoClass) &&
(IsImageGray(next_image) == MagickFalse))
{
length=WritePSDChannel(psd_info,image_info,image,next_image,
IndexQuantum,compact_pixels,rows_offset,separate,compression,
exception);
if (separate != MagickFalse)
size_offset+=WritePSDSize(psd_info,image,length,size_offset)+2;
else
rows_offset+=offset_length;
count+=length;
}
else
{
if (IsImageGray(next_image) != MagickFalse)
{
length=WritePSDChannel(psd_info,image_info,image,next_image,
GrayQuantum,compact_pixels,rows_offset,separate,compression,
exception);
if (separate != MagickFalse)
size_offset+=WritePSDSize(psd_info,image,length,size_offset)+2;
else
rows_offset+=offset_length;
count+=length;
}
else
{
if (next_image->colorspace == CMYKColorspace)
(void) NegateCMYK(next_image,exception);
length=WritePSDChannel(psd_info,image_info,image,next_image,
RedQuantum,compact_pixels,rows_offset,separate,compression,
exception);
if (separate != MagickFalse)
size_offset+=WritePSDSize(psd_info,image,length,size_offset)+2;
else
rows_offset+=offset_length;
count+=length;
length=WritePSDChannel(psd_info,image_info,image,next_image,
GreenQuantum,compact_pixels,rows_offset,separate,compression,
exception);
if (separate != MagickFalse)
size_offset+=WritePSDSize(psd_info,image,length,size_offset)+2;
else
rows_offset+=offset_length;
count+=length;
length=WritePSDChannel(psd_info,image_info,image,next_image,
BlueQuantum,compact_pixels,rows_offset,separate,compression,
exception);
if (separate != MagickFalse)
size_offset+=WritePSDSize(psd_info,image,length,size_offset)+2;
else
rows_offset+=offset_length;
count+=length;
if (next_image->colorspace == CMYKColorspace)
{
length=WritePSDChannel(psd_info,image_info,image,next_image,
BlackQuantum,compact_pixels,rows_offset,separate,compression,
exception);
if (separate != MagickFalse)
size_offset+=WritePSDSize(psd_info,image,length,size_offset)+2;
else
rows_offset+=offset_length;
count+=length;
}
}
if (next_image->alpha_trait != UndefinedPixelTrait)
{
length=WritePSDChannel(psd_info,image_info,image,next_image,
AlphaQuantum,compact_pixels,rows_offset,separate,compression,
exception);
if (separate != MagickFalse)
size_offset+=WritePSDSize(psd_info,image,length,size_offset)+2;
else
rows_offset+=offset_length;
count+=length;
}
}
compact_pixels=(unsigned char *) RelinquishMagickMemory(compact_pixels);
if (next_image->colorspace == CMYKColorspace)
(void) NegateCMYK(next_image,exception);
if (separate != MagickFalse)
{
const char
*property;
property=GetImageArtifact(next_image,"psd:opacity-mask");
if (property != (const char *) NULL)
{
mask=(Image *) GetImageRegistry(ImageRegistryType,property,
exception);
if (mask != (Image *) NULL)
{
if (compression == RLECompression)
{
compact_pixels=AcquireCompactPixels(mask,exception);
if (compact_pixels == (unsigned char *) NULL)
return(0);
}
length=WritePSDChannel(psd_info,image_info,image,mask,
RedQuantum,compact_pixels,rows_offset,MagickTrue,compression,
exception);
(void) WritePSDSize(psd_info,image,length,size_offset);
count+=length;
compact_pixels=(unsigned char *) RelinquishMagickMemory(
compact_pixels);
}
}
}
return(count);
}
static size_t WritePascalString(Image *image,const char *value,size_t padding)
{
size_t
count,
length;
register ssize_t
i;
/*
Max length is 255.
*/
count=0;
length=(strlen(value) > 255UL ) ? 255UL : strlen(value);
if (length == 0)
count+=WriteBlobByte(image,0);
else
{
count+=WriteBlobByte(image,(unsigned char) length);
count+=WriteBlob(image,length,(const unsigned char *) value);
}
length++;
if ((length % padding) == 0)
return(count);
for (i=0; i < (ssize_t) (padding-(length % padding)); i++)
count+=WriteBlobByte(image,0);
return(count);
}
static void WriteResolutionResourceBlock(Image *image)
{
double
x_resolution,
y_resolution;
unsigned short
units;
if (image->units == PixelsPerCentimeterResolution)
{
x_resolution=2.54*65536.0*image->resolution.x+0.5;
y_resolution=2.54*65536.0*image->resolution.y+0.5;
units=2;
}
else
{
x_resolution=65536.0*image->resolution.x+0.5;
y_resolution=65536.0*image->resolution.y+0.5;
units=1;
}
(void) WriteBlob(image,4,(const unsigned char *) "8BIM");
(void) WriteBlobMSBShort(image,0x03ED);
(void) WriteBlobMSBShort(image,0);
(void) WriteBlobMSBLong(image,16); /* resource size */
(void) WriteBlobMSBLong(image,(unsigned int) (x_resolution+0.5));
(void) WriteBlobMSBShort(image,units); /* horizontal resolution unit */
(void) WriteBlobMSBShort(image,units); /* width unit */
(void) WriteBlobMSBLong(image,(unsigned int) (y_resolution+0.5));
(void) WriteBlobMSBShort(image,units); /* vertical resolution unit */
(void) WriteBlobMSBShort(image,units); /* height unit */
}
static inline size_t WriteChannelSize(const PSDInfo *psd_info,Image *image,
const signed short channel)
{
size_t
count;
count=(size_t) WriteBlobShort(image,(const unsigned short) channel);
count+=SetPSDSize(psd_info,image,0);
return(count);
}
static void RemoveICCProfileFromResourceBlock(StringInfo *bim_profile)
{
register const unsigned char
*p;
size_t
length;
unsigned char
*datum;
unsigned int
count,
long_sans;
unsigned short
id,
short_sans;
length=GetStringInfoLength(bim_profile);
if (length < 16)
return;
datum=GetStringInfoDatum(bim_profile);
for (p=datum; (p >= datum) && (p < (datum+length-16)); )
{
register unsigned char
*q;
q=(unsigned char *) p;
if (LocaleNCompare((const char *) p,"8BIM",4) != 0)
break;
p=PushLongPixel(MSBEndian,p,&long_sans);
p=PushShortPixel(MSBEndian,p,&id);
p=PushShortPixel(MSBEndian,p,&short_sans);
p=PushLongPixel(MSBEndian,p,&count);
if (id == 0x0000040f)
{
ssize_t
quantum;
quantum=PSDQuantum(count)+12;
if ((quantum >= 12) && (quantum < (ssize_t) length))
{
if ((q+quantum < (datum+length-16)))
(void) memmove(q,q+quantum,length-quantum-(q-datum));
SetStringInfoLength(bim_profile,length-quantum);
}
break;
}
p+=count;
if ((count & 0x01) != 0)
p++;
}
}
static void RemoveResolutionFromResourceBlock(StringInfo *bim_profile)
{
register const unsigned char
*p;
size_t
length;
unsigned char
*datum;
unsigned int
count,
long_sans;
unsigned short
id,
short_sans;
length=GetStringInfoLength(bim_profile);
if (length < 16)
return;
datum=GetStringInfoDatum(bim_profile);
for (p=datum; (p >= datum) && (p < (datum+length-16)); )
{
register unsigned char
*q;
ssize_t
cnt;
q=(unsigned char *) p;
if (LocaleNCompare((const char *) p,"8BIM",4) != 0)
return;
p=PushLongPixel(MSBEndian,p,&long_sans);
p=PushShortPixel(MSBEndian,p,&id);
p=PushShortPixel(MSBEndian,p,&short_sans);
p=PushLongPixel(MSBEndian,p,&count);
cnt=PSDQuantum(count);
if (cnt < 0)
return;
if ((id == 0x000003ed) && (cnt < (ssize_t) (length-12)) &&
((ssize_t) length-(cnt+12)-(q-datum)) > 0)
{
(void) memmove(q,q+cnt+12,length-(cnt+12)-(q-datum));
SetStringInfoLength(bim_profile,length-(cnt+12));
break;
}
p+=count;
if ((count & 0x01) != 0)
p++;
}
}
static const StringInfo *GetAdditionalInformation(const ImageInfo *image_info,
Image *image,ExceptionInfo *exception)
{
#define PSDKeySize 5
#define PSDAllowedLength 36
char
key[PSDKeySize];
/* Whitelist of keys from: https://www.adobe.com/devnet-apps/photoshop/fileformatashtml/ */
const char
allowed[PSDAllowedLength][PSDKeySize] = {
"blnc", "blwh", "brit", "brst", "clbl", "clrL", "curv", "expA", "FMsk",
"GdFl", "grdm", "hue ", "hue2", "infx", "knko", "lclr", "levl", "lnsr",
"lfx2", "luni", "lrFX", "lspf", "lyid", "lyvr", "mixr", "nvrt", "phfl",
"post", "PtFl", "selc", "shpa", "sn2P", "SoCo", "thrs", "tsly", "vibA"
},
*option;
const StringInfo
*info;
MagickBooleanType
found;
register size_t
i;
size_t
remaining_length,
length;
StringInfo
*profile;
unsigned char
*p;
unsigned int
size;
info=GetImageProfile(image,"psd:additional-info");
if (info == (const StringInfo *) NULL)
return((const StringInfo *) NULL);
option=GetImageOption(image_info,"psd:additional-info");
if (LocaleCompare(option,"all") == 0)
return(info);
if (LocaleCompare(option,"selective") != 0)
{
profile=RemoveImageProfile(image,"psd:additional-info");
return(DestroyStringInfo(profile));
}
length=GetStringInfoLength(info);
p=GetStringInfoDatum(info);
remaining_length=length;
length=0;
while (remaining_length >= 12)
{
/* skip over signature */
p+=4;
key[0]=(char) (*p++);
key[1]=(char) (*p++);
key[2]=(char) (*p++);
key[3]=(char) (*p++);
key[4]='\0';
size=(unsigned int) (*p++) << 24;
size|=(unsigned int) (*p++) << 16;
size|=(unsigned int) (*p++) << 8;
size|=(unsigned int) (*p++);
size=size & 0xffffffff;
remaining_length-=12;
if ((size_t) size > remaining_length)
return((const StringInfo *) NULL);
found=MagickFalse;
for (i=0; i < PSDAllowedLength; i++)
{
if (LocaleNCompare(key,allowed[i],PSDKeySize) != 0)
continue;
found=MagickTrue;
break;
}
remaining_length-=(size_t) size;
if (found == MagickFalse)
{
if (remaining_length > 0)
p=(unsigned char *) memmove(p-12,p+size,remaining_length);
continue;
}
length+=(size_t) size+12;
p+=size;
}
profile=RemoveImageProfile(image,"psd:additional-info");
if (length == 0)
return(DestroyStringInfo(profile));
SetStringInfoLength(profile,(const size_t) length);
(void) SetImageProfile(image,"psd:additional-info",info,exception);
return(profile);
}
static MagickBooleanType WritePSDLayersInternal(Image *image,
const ImageInfo *image_info,const PSDInfo *psd_info,size_t *layers_size,
ExceptionInfo *exception)
{
char
layer_name[MagickPathExtent];
const char
*property;
const StringInfo
*info;
Image
*base_image,
*next_image;
MagickBooleanType
status;
MagickOffsetType
*layer_size_offsets,
size_offset;
register ssize_t
i;
size_t
layer_count,
layer_index,
length,
name_length,
rounded_size,
size;
status=MagickTrue;
base_image=GetNextImageInList(image);
if (base_image == (Image *) NULL)
base_image=image;
size=0;
size_offset=TellBlob(image);
(void) SetPSDSize(psd_info,image,0);
layer_count=0;
for (next_image=base_image; next_image != NULL; )
{
layer_count++;
next_image=GetNextImageInList(next_image);
}
if (image->alpha_trait != UndefinedPixelTrait)
size+=WriteBlobShort(image,-(unsigned short) layer_count);
else
size+=WriteBlobShort(image,(unsigned short) layer_count);
layer_size_offsets=(MagickOffsetType *) AcquireQuantumMemory(
(size_t) layer_count,sizeof(MagickOffsetType));
if (layer_size_offsets == (MagickOffsetType *) NULL)
ThrowWriterException(ResourceLimitError,"MemoryAllocationFailed");
layer_index=0;
for (next_image=base_image; next_image != NULL; )
{
Image
*mask;
unsigned char
default_color;
unsigned short
channels,
total_channels;
mask=(Image *) NULL;
property=GetImageArtifact(next_image,"psd:opacity-mask");
default_color=0;
if (property != (const char *) NULL)
{
mask=(Image *) GetImageRegistry(ImageRegistryType,property,exception);
default_color=(unsigned char) (strlen(property) == 9 ? 255 : 0);
}
size+=WriteBlobSignedLong(image,(signed int) next_image->page.y);
size+=WriteBlobSignedLong(image,(signed int) next_image->page.x);
size+=WriteBlobSignedLong(image,(signed int) (next_image->page.y+
next_image->rows));
size+=WriteBlobSignedLong(image,(signed int) (next_image->page.x+
next_image->columns));
channels=1;
if ((next_image->storage_class != PseudoClass) &&
(IsImageGray(next_image) == MagickFalse))
channels=(unsigned short) (next_image->colorspace == CMYKColorspace ? 4 :
3);
total_channels=channels;
if (next_image->alpha_trait != UndefinedPixelTrait)
total_channels++;
if (mask != (Image *) NULL)
total_channels++;
size+=WriteBlobShort(image,total_channels);
layer_size_offsets[layer_index++]=TellBlob(image);
for (i=0; i < (ssize_t) channels; i++)
size+=WriteChannelSize(psd_info,image,(signed short) i);
if (next_image->alpha_trait != UndefinedPixelTrait)
size+=WriteChannelSize(psd_info,image,-1);
if (mask != (Image *) NULL)
size+=WriteChannelSize(psd_info,image,-2);
size+=WriteBlobString(image,image->endian == LSBEndian ? "MIB8" :"8BIM");
size+=WriteBlobString(image,CompositeOperatorToPSDBlendMode(next_image));
property=GetImageArtifact(next_image,"psd:layer.opacity");
if (property != (const char *) NULL)
{
Quantum
opacity;
opacity=(Quantum) StringToInteger(property);
size+=WriteBlobByte(image,ScaleQuantumToChar(opacity));
(void) ApplyPSDLayerOpacity(next_image,opacity,MagickTrue,exception);
}
else
size+=WriteBlobByte(image,255);
size+=WriteBlobByte(image,0);
size+=WriteBlobByte(image,(const unsigned char)
(next_image->compose == NoCompositeOp ? 1 << 0x02 : 1)); /* layer properties - visible, etc. */
size+=WriteBlobByte(image,0);
info=GetAdditionalInformation(image_info,next_image,exception);
property=(const char *) GetImageProperty(next_image,"label",exception);
if (property == (const char *) NULL)
{
(void) FormatLocaleString(layer_name,MagickPathExtent,"L%.20g",
(double) layer_index);
property=layer_name;
}
name_length=strlen(property)+1;
if ((name_length % 4) != 0)
name_length+=(4-(name_length % 4));
if (info != (const StringInfo *) NULL)
name_length+=GetStringInfoLength(info);
name_length+=8;
if (mask != (Image *) NULL)
name_length+=20;
size+=WriteBlobLong(image,(unsigned int) name_length);
if (mask == (Image *) NULL)
size+=WriteBlobLong(image,0);
else
{
if (mask->compose != NoCompositeOp)
(void) ApplyPSDOpacityMask(next_image,mask,ScaleCharToQuantum(
default_color),MagickTrue,exception);
mask->page.y+=image->page.y;
mask->page.x+=image->page.x;
size+=WriteBlobLong(image,20);
size+=WriteBlobSignedLong(image,(const signed int) mask->page.y);
size+=WriteBlobSignedLong(image,(const signed int) mask->page.x);
size+=WriteBlobSignedLong(image,(const signed int) (mask->rows+
mask->page.y));
size+=WriteBlobSignedLong(image,(const signed int) (mask->columns+
mask->page.x));
size+=WriteBlobByte(image,default_color);
size+=WriteBlobByte(image,(const unsigned char)
(mask->compose == NoCompositeOp ? 2 : 0));
size+=WriteBlobMSBShort(image,0);
}
size+=WriteBlobLong(image,0);
size+=WritePascalString(image,property,4);
if (info != (const StringInfo *) NULL)
size+=WriteBlob(image,GetStringInfoLength(info),
GetStringInfoDatum(info));
next_image=GetNextImageInList(next_image);
}
/*
Now the image data!
*/
next_image=base_image;
layer_index=0;
while (next_image != NULL)
{
length=WritePSDChannels(psd_info,image_info,image,next_image,
layer_size_offsets[layer_index++],MagickTrue,exception);
if (length == 0)
{
status=MagickFalse;
break;
}
size+=length;
next_image=GetNextImageInList(next_image);
}
/*
Write the total size
*/
if (layers_size != (size_t*) NULL)
*layers_size=size;
if ((size/2) != ((size+1)/2))
rounded_size=size+1;
else
rounded_size=size;
(void) WritePSDSize(psd_info,image,rounded_size,size_offset);
layer_size_offsets=(MagickOffsetType *) RelinquishMagickMemory(
layer_size_offsets);
/*
Remove the opacity mask from the registry
*/
next_image=base_image;
while (next_image != (Image *) NULL)
{
property=GetImageArtifact(next_image,"psd:opacity-mask");
if (property != (const char *) NULL)
(void) DeleteImageRegistry(property);
next_image=GetNextImageInList(next_image);
}
return(status);
}
ModuleExport MagickBooleanType WritePSDLayers(Image * image,
const ImageInfo *image_info,const PSDInfo *psd_info,ExceptionInfo *exception)
{
PolicyDomain
domain;
PolicyRights
rights;
domain=CoderPolicyDomain;
rights=WritePolicyRights;
if (IsRightsAuthorized(domain,rights,"PSD") == MagickFalse)
return(MagickTrue);
return WritePSDLayersInternal(image,image_info,psd_info,(size_t*) NULL,
exception);
}
static MagickBooleanType WritePSDImage(const ImageInfo *image_info,
Image *image,ExceptionInfo *exception)
{
const StringInfo
*icc_profile;
MagickBooleanType
status;
PSDInfo
psd_info;
register ssize_t
i;
size_t
length,
num_channels,
packet_size;
StringInfo
*bim_profile;
/*
Open image file.
*/
assert(image_info != (const ImageInfo *) NULL);
assert(image_info->signature == MagickCoreSignature);
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickCoreSignature);
status=OpenBlob(image_info,image,WriteBinaryBlobMode,exception);
if (status == MagickFalse)
return(status);
packet_size=(size_t) (image->depth > 8 ? 6 : 3);
if (image->alpha_trait != UndefinedPixelTrait)
packet_size+=image->depth > 8 ? 2 : 1;
psd_info.version=1;
if ((LocaleCompare(image_info->magick,"PSB") == 0) ||
(image->columns > 30000) || (image->rows > 30000))
psd_info.version=2;
(void) WriteBlob(image,4,(const unsigned char *) "8BPS");
(void) WriteBlobMSBShort(image,psd_info.version); /* version */
for (i=1; i <= 6; i++)
(void) WriteBlobByte(image, 0); /* 6 bytes of reserved */
/* When the image has a color profile it won't be converted to gray scale */
if ((GetImageProfile(image,"icc") == (StringInfo *) NULL) &&
(SetImageGray(image,exception) != MagickFalse))
num_channels=(image->alpha_trait != UndefinedPixelTrait ? 2UL : 1UL);
else
if ((image_info->type != TrueColorType) && (image_info->type !=
TrueColorAlphaType) && (image->storage_class == PseudoClass))
num_channels=(image->alpha_trait != UndefinedPixelTrait ? 2UL : 1UL);
else
{
if (image->storage_class == PseudoClass)
(void) SetImageStorageClass(image,DirectClass,exception);
if (image->colorspace != CMYKColorspace)
num_channels=(image->alpha_trait != UndefinedPixelTrait ? 4UL : 3UL);
else
num_channels=(image->alpha_trait != UndefinedPixelTrait ? 5UL : 4UL);
}
(void) WriteBlobMSBShort(image,(unsigned short) num_channels);
(void) WriteBlobMSBLong(image,(unsigned int) image->rows);
(void) WriteBlobMSBLong(image,(unsigned int) image->columns);
if (IsImageGray(image) != MagickFalse)
{
MagickBooleanType
monochrome;
/*
Write depth & mode.
*/
monochrome=IsImageMonochrome(image) && (image->depth == 1) ?
MagickTrue : MagickFalse;
(void) WriteBlobMSBShort(image,(unsigned short)
(monochrome != MagickFalse ? 1 : image->depth > 8 ? 16 : 8));
(void) WriteBlobMSBShort(image,(unsigned short)
(monochrome != MagickFalse ? BitmapMode : GrayscaleMode));
}
else
{
(void) WriteBlobMSBShort(image,(unsigned short) (image->storage_class ==
PseudoClass ? 8 : image->depth > 8 ? 16 : 8));
if (((image_info->colorspace != UndefinedColorspace) ||
(image->colorspace != CMYKColorspace)) &&
(image_info->colorspace != CMYKColorspace))
{
(void) TransformImageColorspace(image,sRGBColorspace,exception);
(void) WriteBlobMSBShort(image,(unsigned short)
(image->storage_class == PseudoClass ? IndexedMode : RGBMode));
}
else
{
if (image->colorspace != CMYKColorspace)
(void) TransformImageColorspace(image,CMYKColorspace,exception);
(void) WriteBlobMSBShort(image,CMYKMode);
}
}
if ((IsImageGray(image) != MagickFalse) ||
(image->storage_class == DirectClass) || (image->colors > 256))
(void) WriteBlobMSBLong(image,0);
else
{
/*
Write PSD raster colormap.
*/
(void) WriteBlobMSBLong(image,768);
for (i=0; i < (ssize_t) image->colors; i++)
(void) WriteBlobByte(image,ScaleQuantumToChar(ClampToQuantum(
image->colormap[i].red)));
for ( ; i < 256; i++)
(void) WriteBlobByte(image,0);
for (i=0; i < (ssize_t) image->colors; i++)
(void) WriteBlobByte(image,ScaleQuantumToChar(ClampToQuantum(
image->colormap[i].green)));
for ( ; i < 256; i++)
(void) WriteBlobByte(image,0);
for (i=0; i < (ssize_t) image->colors; i++)
(void) WriteBlobByte(image,ScaleQuantumToChar(ClampToQuantum(
image->colormap[i].blue)));
for ( ; i < 256; i++)
(void) WriteBlobByte(image,0);
}
/*
Image resource block.
*/
length=28; /* 0x03EB */
bim_profile=(StringInfo *) GetImageProfile(image,"8bim");
icc_profile=GetImageProfile(image,"icc");
if (bim_profile != (StringInfo *) NULL)
{
bim_profile=CloneStringInfo(bim_profile);
if (icc_profile != (StringInfo *) NULL)
RemoveICCProfileFromResourceBlock(bim_profile);
RemoveResolutionFromResourceBlock(bim_profile);
length+=PSDQuantum(GetStringInfoLength(bim_profile));
}
if (icc_profile != (const StringInfo *) NULL)
length+=PSDQuantum(GetStringInfoLength(icc_profile))+12;
(void) WriteBlobMSBLong(image,(unsigned int) length);
WriteResolutionResourceBlock(image);
if (bim_profile != (StringInfo *) NULL)
{
(void) WriteBlob(image,GetStringInfoLength(bim_profile),
GetStringInfoDatum(bim_profile));
bim_profile=DestroyStringInfo(bim_profile);
}
if (icc_profile != (StringInfo *) NULL)
{
(void) WriteBlob(image,4,(const unsigned char *) "8BIM");
(void) WriteBlobMSBShort(image,0x0000040F);
(void) WriteBlobMSBShort(image,0);
(void) WriteBlobMSBLong(image,(unsigned int) GetStringInfoLength(
icc_profile));
(void) WriteBlob(image,GetStringInfoLength(icc_profile),
GetStringInfoDatum(icc_profile));
if ((ssize_t) GetStringInfoLength(icc_profile) != PSDQuantum(GetStringInfoLength(icc_profile)))
(void) WriteBlobByte(image,0);
}
if (status != MagickFalse)
{
MagickOffsetType
size_offset;
size_t
size;
size_offset=TellBlob(image);
(void) SetPSDSize(&psd_info,image,0);
status=WritePSDLayersInternal(image,image_info,&psd_info,&size,
exception);
size_offset+=WritePSDSize(&psd_info,image,size+
(psd_info.version == 1 ? 8 : 12),size_offset);
}
(void) WriteBlobMSBLong(image,0); /* user mask data */
/*
Write composite image.
*/
if (status != MagickFalse)
{
CompressionType
compression;
compression=image->compression;
if (image_info->compression != UndefinedCompression)
image->compression=image_info->compression;
if (image->compression == ZipCompression)
image->compression=RLECompression;
if (WritePSDChannels(&psd_info,image_info,image,image,0,MagickFalse,
exception) == 0)
status=MagickFalse;
image->compression=compression;
}
(void) CloseBlob(image);
return(status);
}
|
ttables.h | // Copyright 2013 by Chris Dyer
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//
#ifndef _TTABLES_H_
#define _TTABLES_H_
#include <cassert>
#include <cmath>
#include <fstream>
#include <iostream>
#include <vector>
#include "src/hashtables.h"
#include "src/corpus.h"
struct Md {
static double digamma(double x) {
double result = 0, xx, xx2, xx4;
for ( ; x < 7; ++x)
result -= 1/x;
x -= 1.0/2.0;
xx = 1.0/x;
xx2 = xx*xx;
xx4 = xx2*xx2;
result += log(x)+(1./24.)*xx2-(7.0/960.0)*xx4+(31.0/8064.0)*xx4*xx2-(127.0/30720.0)*xx4*xx4;
return result;
}
static inline double log_poisson(unsigned x, const double& lambda) {
assert(lambda > 0.0);
return std::log(lambda) * x - lgamma(x + 1) - lambda;
}
};
class TTable {
public:
TTable() : frozen_(false), probs_initialized_(false) {}
// typedef std::unordered_map<unsigned, double> Word2Double;
typedef std::vector<Word2Double> Word2Word2Double;
inline double prob(const unsigned e, const unsigned f) const {
return probs_initialized_ ? ttable[e].find(f)->second : 1e-9;
}
inline double safe_prob(const int& e, const int& f) const {
if (e < static_cast<int>(ttable.size())) {
const Word2Double& cpd = ttable[e];
const Word2Double::const_iterator it = cpd.find(f);
if (it == cpd.end()) return 1e-9;
return it->second;
} else {
return 1e-9;
}
}
inline void SetMaxE(const unsigned e) {
// NOT thread safe
if (e >= counts.size())
counts.resize(e + 1);
}
inline void Insert(const unsigned e, const unsigned f) {
// NOT thread safe
if (e >= counts.size())
counts.resize(e + 1);
counts[e][f] = 0;
}
inline void Increment(const unsigned e, const unsigned f, const double x) {
#pragma omp atomic
counts[e].find(f)->second += x;
}
void NormalizeVB(const double alpha) {
ttable.swap(counts);
#pragma omp parallel for schedule(dynamic)
for (unsigned i = 0; i < ttable.size(); ++i) {
double tot = 0;
Word2Double& cpd = ttable[i];
for (Word2Double::iterator it = cpd.begin(); it != cpd.end(); ++it)
tot += it->second + alpha;
if (!tot) tot = 1;
const double digamma_tot = Md::digamma(tot);
for (Word2Double::iterator it = cpd.begin(); it != cpd.end(); ++it)
it->second = exp(Md::digamma(it->second + alpha) - digamma_tot);
}
ClearCounts();
probs_initialized_ = true;
}
void Normalize() {
ttable.swap(counts);
#pragma omp parallel for schedule(dynamic)
for (unsigned i = 0; i < ttable.size(); ++i) {
double tot = 0;
Word2Double& cpd = ttable[i];
for (Word2Double::iterator it = cpd.begin(); it != cpd.end(); ++it)
tot += it->second;
if (!tot) tot = 1;
for (Word2Double::iterator it = cpd.begin(); it != cpd.end(); ++it)
it->second /= tot;
}
ClearCounts();
probs_initialized_ = true;
}
void Freeze() {
// duplicate all values in counts into ttable
// later updates to both are semi-threadsafe
assert (!frozen_);
if (!frozen_) {
ttable.resize(counts.size());
for (unsigned i = 0; i < counts.size(); ++i) {
ttable[i] = counts[i];
}
}
frozen_ = true;
}
// adds counts from another TTable - probabilities remain unchanged
TTable& operator+=(const TTable& rhs) {
if (rhs.counts.size() > counts.size()) counts.resize(rhs.counts.size());
for (unsigned i = 0; i < rhs.counts.size(); ++i) {
const Word2Double& cpd = rhs.counts[i];
Word2Double& tgt = counts[i];
for (Word2Double::const_iterator j = cpd.begin(); j != cpd.end(); ++j) {
tgt[j->first] += j->second;
}
}
return *this;
}
void ExportToFile(const char* filename, Dict& d, double BEAM_THRESHOLD) const {
std::ofstream file(filename);
for (unsigned i = 0; i < ttable.size(); ++i) {
const std::string& a = d.Convert(i);
const Word2Double& cpd = ttable[i];
double max_p = -1;
for (auto& it : cpd)
if (it.second > max_p) max_p = it.second;
const double threshold = - log(max_p) * BEAM_THRESHOLD;
for (auto& it : cpd) {
const std::string& b = d.Convert(it.first);
double c = log(it.second);
if (c >= threshold)
file << a << '\t' << b << '\t' << c << std::endl;
}
}
file.close();
}
private:
void ClearCounts() {
#pragma omp parallel for schedule(dynamic)
for (size_t i=0; i<counts.size();++i) {
for (auto& cnt : counts[i]) {
cnt.second = 0.0;
}
}
}
Word2Word2Double ttable;
Word2Word2Double counts;
bool frozen_; // Disallow new e,f pairs to be added to counts
bool probs_initialized_; // If we can use the values in probs
public:
void DeserializeLogProbsFromText(std::istream* in, Dict& d);
};
#endif
|
Interp1PrimFifthOrderWENOChar.c | /*! @file Interp1PrimFifthOrderWENOChar.c
@author Debojyoti Ghosh
@brief Characteristic-based WENO5 Scheme
*/
#include <stdlib.h>
#include <basic.h>
#include <arrayfunctions.h>
#include <mathfunctions.h>
#include <interpolation.h>
#include <mpivars.h>
#include <hypar.h>
#ifdef with_omp
#include <omp.h>
#endif
#undef _MINIMUM_GHOSTS_
/*! \def _MINIMUM_GHOSTS_
* Minimum number of ghost points required for this interpolation
* method.
*/
#define _MINIMUM_GHOSTS_ 3
/*! @brief 5th order WENO reconstruction (characteristic-based) on a uniform grid
Computes the interpolated values of the first primitive of a function \f${\bf f}\left({\bf u}\right)\f$
at the interfaces from the cell-centered values of the function using the fifth order WENO scheme on a
uniform grid. The first primitive is defined as a function \f${\bf h}\left({\bf u}\right)\f$ that satisfies:
\f{equation}{
{\bf f}\left({\bf u}\left(x\right)\right) = \frac{1}{\Delta x} \int_{x-\Delta x/2}^{x+\Delta x/2} {\bf h}\left({\bf u}\left(\zeta\right)\right)d\zeta,
\f}
where \f$x\f$ is the spatial coordinate along the dimension of the interpolation. This function computes the 5th order WENO numerical approximation \f$\hat{\bf f}_{j+1/2} \approx {\bf h}_{j+1/2}\f$ as the convex combination of three 3rd order methods:
\f{align}{
&\ \omega_1\ \times\ \left[ \hat{\alpha}^k_{j+1/2} = \frac{1}{3} {\alpha}^k_{j-2} - \frac{7}{6} {\alpha}^k_{j-1} + \frac{11}{6} {\alpha}^k_j \right]\\
+ &\ \omega_2\ \times\ \left[ \hat{\alpha}^k_{j+1/2} = -\frac{1}{6} {\alpha}^k_{j-1} + \frac{5}{6} {\alpha}^k_j + \frac{1}{3} {\alpha}^k_{j+1} \right]\\
+ &\ \omega_3\ \times\ \left[ \hat{\alpha}^k_{j+1/2} = \frac{1}{3} {\alpha}^k_j + \frac{5}{6} {\alpha}^k_{j+1} - \frac{1}{6} {\alpha}^k_{j+2} \right]\\
\Rightarrow &\ \hat{\alpha}^k_{j+1/2} = \frac{\omega_1}{3} {\alpha}^k_{j-2} - \frac{1}{6}(7\omega_1+\omega_2){\alpha}^k_{j-1} + \frac{1}{6}(11\omega_1+5\omega_2+2\omega_3){\alpha}^k_j + \frac{1}{6}(2\omega_2+5\omega_3){\alpha}^k_{j+1} - \frac{\omega_3}{6}{\alpha}^k_{j+2},
\f}
where
\f{equation}{
\alpha^k = {\bf l}_k \cdot {\bf f},\ k=1,\cdots,n
\f}
is the \f$k\f$-th characteristic quantity, and \f${\bf l}_k\f$ is the \f$k\f$-th left eigenvector, \f${\bf r}_k\f$ is the \f$k\f$-th right eigenvector, and \f$n\f$ is #HyPar::nvars. The nonlinear weights \f$\omega_k; k=1,2,3\f$ are the WENO weights computed in WENOFifthOrderCalculateWeightsChar(). The final interpolated function is computed from the interpolated characteristic quantities as:
\f{equation}{
\hat{\bf f}_{j+1/2} = \sum_{k=1}^n \alpha^k_{j+1/2} {\bf r}_k
\f}
\b Implementation \b Notes:
+ This method assumes a uniform grid in the spatial dimension corresponding to the interpolation.
+ The method described above corresponds to a left-biased interpolation. The corresponding right-biased
interpolation can be obtained by reflecting the equations about interface j+1/2.
+ The left and right eigenvectors are computed at an averaged quantity at j+1/2. Thus, this function requires
functions to compute the average state, and the left and right eigenvectors. These are provided by the physical
model through
- #HyPar::GetLeftEigenvectors()
- #HyPar::GetRightEigenvectors()
- #HyPar::AveragingFunction()
If these functions are not provided by the physical model, then a characteristic-based interpolation cannot be used.
+ The function computes the interpolant for the entire grid in one call. It loops over all the grid lines along the interpolation direction
and carries out the 1D interpolation along these grid lines.
+ Location of cell-centers and cell interfaces along the spatial dimension of the interpolation is shown in the following figure:
@image html chap1_1Ddomain.png
@image latex chap1_1Ddomain.eps width=0.9\textwidth
\b Function \b arguments:
Argument | Type | Explanation
--------- | --------- | ---------------------------------------------
fI | double* | Array to hold the computed interpolant at the grid interfaces. This array must have the same layout as the solution, but with \b no \b ghost \b points. Its size should be the same as u in all dimensions, except dir (the dimension along which to interpolate) along which it should be larger by 1 (number of interfaces is 1 more than the number of interior cell centers).
fC | double* | Array with the cell-centered values of the flux function \f${\bf f}\left({\bf u}\right)\f$. This array must have the same layout and size as the solution, \b with \b ghost \b points.
u | double* | The solution array \f${\bf u}\f$ (with ghost points). If the interpolation is characteristic based, this is needed to compute the eigendecomposition. For a multidimensional problem, the layout is as follows: u is a contiguous 1D array of size (nvars*dim[0]*dim[1]*...*dim[D-1]) corresponding to the multi-dimensional solution, with the following ordering - nvars, dim[0], dim[1], ..., dim[D-1], where nvars is the number of solution components (#HyPar::nvars), dim is the local size (#HyPar::dim_local), D is the number of spatial dimensions.
x | double* | The grid array (with ghost points). This is used only by non-uniform-grid interpolation methods. For multidimensional problems, the layout is as follows: x is a contiguous 1D array of size (dim[0]+dim[1]+...+dim[D-1]), with the spatial coordinates along dim[0] stored from 0,...,dim[0]-1, the spatial coordinates along dim[1] stored along dim[0],...,dim[0]+dim[1]-1, and so forth.
upw | int | Upwinding direction: if positive, a left-biased interpolant will be computed; if negative, a right-biased interpolant will be computed. If the interpolation method is central, then this has no effect.
dir | int | Spatial dimension along which to interpolate (eg: 0 for 1D; 0 or 1 for 2D; 0,1 or 2 for 3D)
s | void* | Solver object of type #HyPar: the following variables are needed - #HyPar::ghosts, #HyPar::ndims, #HyPar::nvars, #HyPar::dim_local.
m | void* | MPI object of type #MPIVariables: this is needed only by compact interpolation method that need to solve a global implicit system across MPI ranks.
uflag | int | A flag indicating if the function being interpolated \f${\bf f}\f$ is the solution itself \f${\bf u}\f$ (if 1, \f${\bf f}\left({\bf u}\right) \equiv {\bf u}\f$).
\b Reference:
+ Jiang, G.-S., Shu, C.-W., Efficient Implementation of Weighted ENO Schemes, J. Comput. Phys., 126 (1), 1996, pp. 202-228, http://dx.doi.org/10.1006/jcph.1996.0130
*/
int Interp1PrimFifthOrderWENOChar(
double *fI, /*!< Array of interpolated function values at the interfaces */
double *fC, /*!< Array of cell-centered values of the function \f${\bf f}\left({\bf u}\right)\f$ */
double *u, /*!< Array of cell-centered values of the solution \f${\bf u}\f$ */
double *x, /*!< Grid coordinates */
int upw, /*!< Upwind direction (left or right biased) */
int dir, /*!< Spatial dimension along which to interpolation */
void *s, /*!< Object of type #HyPar containing solver-related variables */
void *m, /*!< Object of type #MPIVariables containing MPI-related variables */
int uflag /*!< Flag to indicate if \f$f(u) \equiv u\f$, i.e, if the solution is being reconstructed */
)
{
HyPar *solver = (HyPar*) s;
WENOParameters *weno = (WENOParameters*) solver->interp;
int i, k, v;
_DECLARE_IERR_;
int ghosts = solver->ghosts;
int ndims = solver->ndims;
int nvars = solver->nvars;
int *dim = solver->dim_local;
/* define some constants */
static const double one_sixth = 1.0/6.0;
double *ww1, *ww2, *ww3;
ww1 = weno->w1 + (upw < 0 ? 2*weno->size : 0) + (uflag ? weno->size : 0) + weno->offset[dir];
ww2 = weno->w2 + (upw < 0 ? 2*weno->size : 0) + (uflag ? weno->size : 0) + weno->offset[dir];
ww3 = weno->w3 + (upw < 0 ? 2*weno->size : 0) + (uflag ? weno->size : 0) + weno->offset[dir];
/* create index and bounds for the outer loop, i.e., to loop over all 1D lines along
dimension "dir" */
int indexC[ndims], indexI[ndims], index_outer[ndims], bounds_outer[ndims], bounds_inter[ndims];
_ArrayCopy1D_(dim,bounds_outer,ndims); bounds_outer[dir] = 1;
_ArrayCopy1D_(dim,bounds_inter,ndims); bounds_inter[dir] += 1;
int N_outer; _ArrayProduct1D_(bounds_outer,ndims,N_outer);
/* allocate arrays for the averaged state, eigenvectors and characteristic interpolated f */
double R[nvars*nvars], L[nvars*nvars], uavg[nvars], fchar[nvars];
#pragma omp parallel for schedule(auto) default(shared) private(i,k,v,R,L,uavg,fchar,index_outer,indexC,indexI)
for (i=0; i<N_outer; i++) {
_ArrayIndexnD_(ndims,i,bounds_outer,index_outer,0);
_ArrayCopy1D_(index_outer,indexC,ndims);
_ArrayCopy1D_(index_outer,indexI,ndims);
for (indexI[dir] = 0; indexI[dir] < dim[dir]+1; indexI[dir]++) {
/* 1D indices of the stencil grid points */
int qm1,qm2,qm3,qp1,qp2;
if (upw > 0) {
indexC[dir] = indexI[dir]-3; _ArrayIndex1D_(ndims,dim,indexC,ghosts,qm3);
indexC[dir] = indexI[dir]-2; _ArrayIndex1D_(ndims,dim,indexC,ghosts,qm2);
indexC[dir] = indexI[dir]-1; _ArrayIndex1D_(ndims,dim,indexC,ghosts,qm1);
indexC[dir] = indexI[dir] ; _ArrayIndex1D_(ndims,dim,indexC,ghosts,qp1);
indexC[dir] = indexI[dir]+1; _ArrayIndex1D_(ndims,dim,indexC,ghosts,qp2);
} else {
indexC[dir] = indexI[dir]+2; _ArrayIndex1D_(ndims,dim,indexC,ghosts,qm3);
indexC[dir] = indexI[dir]+1; _ArrayIndex1D_(ndims,dim,indexC,ghosts,qm2);
indexC[dir] = indexI[dir] ; _ArrayIndex1D_(ndims,dim,indexC,ghosts,qm1);
indexC[dir] = indexI[dir]-1; _ArrayIndex1D_(ndims,dim,indexC,ghosts,qp1);
indexC[dir] = indexI[dir]-2; _ArrayIndex1D_(ndims,dim,indexC,ghosts,qp2);
}
int p; /* 1D index of the interface */
_ArrayIndex1D_(ndims,bounds_inter,indexI,0,p);
/* find averaged state at this interface */
IERR solver->AveragingFunction(uavg,&u[nvars*qm1],&u[nvars*qp1],solver->physics); CHECKERR(ierr);
/* Get the left and right eigenvectors */
IERR solver->GetLeftEigenvectors (uavg,L,solver->physics,dir); CHECKERR(ierr);
IERR solver->GetRightEigenvectors (uavg,R,solver->physics,dir); CHECKERR(ierr);
/* For each characteristic field */
for (v = 0; v < nvars; v++) {
/* calculate the characteristic flux components along this characteristic */
double fm3, fm2, fm1, fp1, fp2;
fm3 = fm2 = fm1 = fp1 = fp2 = 0;
for (k = 0; k < nvars; k++) {
fm3 += L[v*nvars+k] * fC[qm3*nvars+k];
fm2 += L[v*nvars+k] * fC[qm2*nvars+k];
fm1 += L[v*nvars+k] * fC[qm1*nvars+k];
fp1 += L[v*nvars+k] * fC[qp1*nvars+k];
fp2 += L[v*nvars+k] * fC[qp2*nvars+k];
}
/* Candidate stencils and their optimal weights*/
double f1, f2, f3;
f1 = (2*one_sixth)*fm3 - (7.0*one_sixth)*fm2 + (11.0*one_sixth)*fm1;
f2 = (-one_sixth)*fm2 + (5.0*one_sixth)*fm1 + (2*one_sixth)*fp1;
f3 = (2*one_sixth)*fm1 + (5*one_sixth)*fp1 - (one_sixth)*fp2;
/* calculate WENO weights */
double w1,w2,w3;
w1 = *(ww1+p*nvars+v);
w2 = *(ww2+p*nvars+v);
w3 = *(ww3+p*nvars+v);
/* fifth order WENO approximation of the characteristic flux */
fchar[v] = w1*f1 + w2*f2 + w3*f3;
}
/* calculate the interface u from the characteristic u */
IERR MatVecMult(nvars,(fI+nvars*p),R,fchar); CHECKERR(ierr);
}
}
return(0);
}
|
statistic.c | /*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% SSSSS TTTTT AAA TTTTT IIIII SSSSS TTTTT IIIII CCCC %
% SS T A A T I SS T I C %
% SSS T AAAAA T I SSS T I C %
% SS T A A T I SS T I C %
% SSSSS T A A T IIIII SSSSS T IIIII CCCC %
% %
% %
% MagickCore Image Statistical Methods %
% %
% Software Design %
% Cristy %
% July 1992 %
% %
% %
% Copyright 1999-2021 ImageMagick Studio LLC, a non-profit organization %
% dedicated to making software imaging solutions freely available. %
% %
% You may not use this file except in compliance with the License. You may %
% obtain a copy of the License at %
% %
% https://imagemagick.org/script/license.php %
% %
% Unless required by applicable law or agreed to in writing, software %
% distributed under the License is distributed on an "AS IS" BASIS, %
% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. %
% See the License for the specific language governing permissions and %
% limitations under the License. %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
%
%
*/
/*
Include declarations.
*/
#include "MagickCore/studio.h"
#include "MagickCore/accelerate-private.h"
#include "MagickCore/animate.h"
#include "MagickCore/artifact.h"
#include "MagickCore/blob.h"
#include "MagickCore/blob-private.h"
#include "MagickCore/cache.h"
#include "MagickCore/cache-private.h"
#include "MagickCore/cache-view.h"
#include "MagickCore/client.h"
#include "MagickCore/color.h"
#include "MagickCore/color-private.h"
#include "MagickCore/colorspace.h"
#include "MagickCore/colorspace-private.h"
#include "MagickCore/composite.h"
#include "MagickCore/composite-private.h"
#include "MagickCore/compress.h"
#include "MagickCore/constitute.h"
#include "MagickCore/display.h"
#include "MagickCore/draw.h"
#include "MagickCore/enhance.h"
#include "MagickCore/exception.h"
#include "MagickCore/exception-private.h"
#include "MagickCore/gem.h"
#include "MagickCore/gem-private.h"
#include "MagickCore/geometry.h"
#include "MagickCore/list.h"
#include "MagickCore/image-private.h"
#include "MagickCore/magic.h"
#include "MagickCore/magick.h"
#include "MagickCore/memory_.h"
#include "MagickCore/module.h"
#include "MagickCore/monitor.h"
#include "MagickCore/monitor-private.h"
#include "MagickCore/option.h"
#include "MagickCore/paint.h"
#include "MagickCore/pixel-accessor.h"
#include "MagickCore/profile.h"
#include "MagickCore/property.h"
#include "MagickCore/quantize.h"
#include "MagickCore/quantum-private.h"
#include "MagickCore/random_.h"
#include "MagickCore/random-private.h"
#include "MagickCore/resource_.h"
#include "MagickCore/segment.h"
#include "MagickCore/semaphore.h"
#include "MagickCore/signature-private.h"
#include "MagickCore/statistic.h"
#include "MagickCore/string_.h"
#include "MagickCore/thread-private.h"
#include "MagickCore/timer.h"
#include "MagickCore/utility.h"
#include "MagickCore/version.h"
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% E v a l u a t e I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% EvaluateImage() applies a value to the image with an arithmetic, relational,
% or logical operator to an image. Use these operations to lighten or darken
% an image, to increase or decrease contrast in an image, or to produce the
% "negative" of an image.
%
% The format of the EvaluateImage method is:
%
% MagickBooleanType EvaluateImage(Image *image,
% const MagickEvaluateOperator op,const double value,
% ExceptionInfo *exception)
% MagickBooleanType EvaluateImages(Image *images,
% const MagickEvaluateOperator op,const double value,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o op: A channel op.
%
% o value: A value value.
%
% o exception: return any errors or warnings in this structure.
%
*/
typedef struct _PixelChannels
{
double
channel[MaxPixelChannels];
} PixelChannels;
static PixelChannels **DestroyPixelThreadSet(const Image *images,
PixelChannels **pixels)
{
ssize_t
i;
size_t
rows;
assert(pixels != (PixelChannels **) NULL);
rows=MagickMax(GetImageListLength(images),(size_t)
GetMagickResourceLimit(ThreadResource));
for (i=0; i < (ssize_t) rows; i++)
if (pixels[i] != (PixelChannels *) NULL)
pixels[i]=(PixelChannels *) RelinquishMagickMemory(pixels[i]);
pixels=(PixelChannels **) RelinquishMagickMemory(pixels);
return(pixels);
}
static PixelChannels **AcquirePixelThreadSet(const Image *images)
{
const Image
*next;
PixelChannels
**pixels;
ssize_t
i;
size_t
columns,
number_images,
rows;
number_images=GetImageListLength(images);
rows=MagickMax(number_images,(size_t) GetMagickResourceLimit(ThreadResource));
pixels=(PixelChannels **) AcquireQuantumMemory(rows,sizeof(*pixels));
if (pixels == (PixelChannels **) NULL)
return((PixelChannels **) NULL);
(void) memset(pixels,0,rows*sizeof(*pixels));
columns=MagickMax(number_images,MaxPixelChannels);
for (next=images; next != (Image *) NULL; next=next->next)
columns=MagickMax(next->columns,columns);
for (i=0; i < (ssize_t) rows; i++)
{
ssize_t
j;
pixels[i]=(PixelChannels *) AcquireQuantumMemory(columns,sizeof(**pixels));
if (pixels[i] == (PixelChannels *) NULL)
return(DestroyPixelThreadSet(images,pixels));
for (j=0; j < (ssize_t) columns; j++)
{
ssize_t
k;
for (k=0; k < MaxPixelChannels; k++)
pixels[i][j].channel[k]=0.0;
}
}
return(pixels);
}
static inline double EvaluateMax(const double x,const double y)
{
if (x > y)
return(x);
return(y);
}
#if defined(__cplusplus) || defined(c_plusplus)
extern "C" {
#endif
static int IntensityCompare(const void *x,const void *y)
{
const PixelChannels
*color_1,
*color_2;
double
distance;
ssize_t
i;
color_1=(const PixelChannels *) x;
color_2=(const PixelChannels *) y;
distance=0.0;
for (i=0; i < MaxPixelChannels; i++)
distance+=color_1->channel[i]-(double) color_2->channel[i];
return(distance < 0.0 ? -1 : distance > 0.0 ? 1 : 0);
}
#if defined(__cplusplus) || defined(c_plusplus)
}
#endif
static double ApplyEvaluateOperator(RandomInfo *random_info,const Quantum pixel,
const MagickEvaluateOperator op,const double value)
{
double
result;
ssize_t
i;
result=0.0;
switch (op)
{
case UndefinedEvaluateOperator:
break;
case AbsEvaluateOperator:
{
result=(double) fabs((double) (pixel+value));
break;
}
case AddEvaluateOperator:
{
result=(double) (pixel+value);
break;
}
case AddModulusEvaluateOperator:
{
/*
This returns a 'floored modulus' of the addition which is a positive
result. It differs from % or fmod() that returns a 'truncated modulus'
result, where floor() is replaced by trunc() and could return a
negative result (which is clipped).
*/
result=pixel+value;
result-=(QuantumRange+1.0)*floor((double) result/(QuantumRange+1.0));
break;
}
case AndEvaluateOperator:
{
result=(double) ((ssize_t) pixel & (ssize_t) (value+0.5));
break;
}
case CosineEvaluateOperator:
{
result=(double) (QuantumRange*(0.5*cos((double) (2.0*MagickPI*
QuantumScale*pixel*value))+0.5));
break;
}
case DivideEvaluateOperator:
{
result=pixel/(value == 0.0 ? 1.0 : value);
break;
}
case ExponentialEvaluateOperator:
{
result=(double) (QuantumRange*exp((double) (value*QuantumScale*pixel)));
break;
}
case GaussianNoiseEvaluateOperator:
{
result=(double) GenerateDifferentialNoise(random_info,pixel,GaussianNoise,
value);
break;
}
case ImpulseNoiseEvaluateOperator:
{
result=(double) GenerateDifferentialNoise(random_info,pixel,ImpulseNoise,
value);
break;
}
case InverseLogEvaluateOperator:
{
result=(QuantumRange*pow((value+1.0),QuantumScale*pixel)-1.0)*
PerceptibleReciprocal(value);
break;
}
case LaplacianNoiseEvaluateOperator:
{
result=(double) GenerateDifferentialNoise(random_info,pixel,
LaplacianNoise,value);
break;
}
case LeftShiftEvaluateOperator:
{
result=(double) pixel;
for (i=0; i < (ssize_t) value; i++)
result*=2.0;
break;
}
case LogEvaluateOperator:
{
if ((QuantumScale*pixel) >= MagickEpsilon)
result=(double) (QuantumRange*log((double) (QuantumScale*value*pixel+
1.0))/log((double) (value+1.0)));
break;
}
case MaxEvaluateOperator:
{
result=(double) EvaluateMax((double) pixel,value);
break;
}
case MeanEvaluateOperator:
{
result=(double) (pixel+value);
break;
}
case MedianEvaluateOperator:
{
result=(double) (pixel+value);
break;
}
case MinEvaluateOperator:
{
result=(double) MagickMin((double) pixel,value);
break;
}
case MultiplicativeNoiseEvaluateOperator:
{
result=(double) GenerateDifferentialNoise(random_info,pixel,
MultiplicativeGaussianNoise,value);
break;
}
case MultiplyEvaluateOperator:
{
result=(double) (value*pixel);
break;
}
case OrEvaluateOperator:
{
result=(double) ((ssize_t) pixel | (ssize_t) (value+0.5));
break;
}
case PoissonNoiseEvaluateOperator:
{
result=(double) GenerateDifferentialNoise(random_info,pixel,PoissonNoise,
value);
break;
}
case PowEvaluateOperator:
{
if (pixel < 0)
result=(double) -(QuantumRange*pow((double) -(QuantumScale*pixel),
(double) value));
else
result=(double) (QuantumRange*pow((double) (QuantumScale*pixel),
(double) value));
break;
}
case RightShiftEvaluateOperator:
{
result=(double) pixel;
for (i=0; i < (ssize_t) value; i++)
result/=2.0;
break;
}
case RootMeanSquareEvaluateOperator:
{
result=((double) pixel*pixel+value);
break;
}
case SetEvaluateOperator:
{
result=value;
break;
}
case SineEvaluateOperator:
{
result=(double) (QuantumRange*(0.5*sin((double) (2.0*MagickPI*
QuantumScale*pixel*value))+0.5));
break;
}
case SubtractEvaluateOperator:
{
result=(double) (pixel-value);
break;
}
case SumEvaluateOperator:
{
result=(double) (pixel+value);
break;
}
case ThresholdEvaluateOperator:
{
result=(double) (((double) pixel <= value) ? 0 : QuantumRange);
break;
}
case ThresholdBlackEvaluateOperator:
{
result=(double) (((double) pixel <= value) ? 0 : pixel);
break;
}
case ThresholdWhiteEvaluateOperator:
{
result=(double) (((double) pixel > value) ? QuantumRange : pixel);
break;
}
case UniformNoiseEvaluateOperator:
{
result=(double) GenerateDifferentialNoise(random_info,pixel,UniformNoise,
value);
break;
}
case XorEvaluateOperator:
{
result=(double) ((ssize_t) pixel ^ (ssize_t) (value+0.5));
break;
}
}
return(result);
}
static Image *AcquireImageCanvas(const Image *images,ExceptionInfo *exception)
{
const Image
*p,
*q;
size_t
columns,
rows;
q=images;
columns=images->columns;
rows=images->rows;
for (p=images; p != (Image *) NULL; p=p->next)
{
if (p->number_channels > q->number_channels)
q=p;
if (p->columns > columns)
columns=p->columns;
if (p->rows > rows)
rows=p->rows;
}
return(CloneImage(q,columns,rows,MagickTrue,exception));
}
MagickExport Image *EvaluateImages(const Image *images,
const MagickEvaluateOperator op,ExceptionInfo *exception)
{
#define EvaluateImageTag "Evaluate/Image"
CacheView
*evaluate_view,
**image_view;
const Image
*next;
Image
*image;
MagickBooleanType
status;
MagickOffsetType
progress;
PixelChannels
**magick_restrict evaluate_pixels;
RandomInfo
**magick_restrict random_info;
size_t
number_images;
ssize_t
j,
y;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
unsigned long
key;
#endif
assert(images != (Image *) NULL);
assert(images->signature == MagickCoreSignature);
if (images->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",images->filename);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickCoreSignature);
image=AcquireImageCanvas(images,exception);
if (image == (Image *) NULL)
return((Image *) NULL);
if (SetImageStorageClass(image,DirectClass,exception) == MagickFalse)
{
image=DestroyImage(image);
return((Image *) NULL);
}
number_images=GetImageListLength(images);
evaluate_pixels=AcquirePixelThreadSet(images);
if (evaluate_pixels == (PixelChannels **) NULL)
{
image=DestroyImage(image);
(void) ThrowMagickException(exception,GetMagickModule(),
ResourceLimitError,"MemoryAllocationFailed","`%s'",images->filename);
return((Image *) NULL);
}
image_view=(CacheView **) AcquireQuantumMemory(number_images,
sizeof(*image_view));
if (image_view == (CacheView **) NULL)
{
image=DestroyImage(image);
evaluate_pixels=DestroyPixelThreadSet(images,evaluate_pixels);
(void) ThrowMagickException(exception,GetMagickModule(),
ResourceLimitError,"MemoryAllocationFailed","`%s'",images->filename);
return(image);
}
next=images;
for (j=0; j < (ssize_t) number_images; j++)
{
image_view[j]=AcquireVirtualCacheView(next,exception);
next=GetNextImageInList(next);
}
/*
Evaluate image pixels.
*/
status=MagickTrue;
progress=0;
random_info=AcquireRandomInfoThreadSet();
evaluate_view=AcquireAuthenticCacheView(image,exception);
if (op == MedianEvaluateOperator)
{
#if defined(MAGICKCORE_OPENMP_SUPPORT)
key=GetRandomSecretKey(random_info[0]);
#pragma omp parallel for schedule(static) shared(progress,status) \
magick_number_threads(image,images,image->rows,key == ~0UL)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
const Image
*next;
const int
id = GetOpenMPThreadId();
const Quantum
**p;
PixelChannels
*evaluate_pixel;
Quantum
*magick_restrict q;
ssize_t
x;
ssize_t
j;
if (status == MagickFalse)
continue;
p=(const Quantum **) AcquireQuantumMemory(number_images,sizeof(*p));
if (p == (const Quantum **) NULL)
{
status=MagickFalse;
(void) ThrowMagickException(exception,GetMagickModule(),
ResourceLimitError,"MemoryAllocationFailed","`%s'",
images->filename);
continue;
}
for (j=0; j < (ssize_t) number_images; j++)
{
p[j]=GetCacheViewVirtualPixels(image_view[j],0,y,image->columns,1,
exception);
if (p[j] == (const Quantum *) NULL)
break;
}
q=QueueCacheViewAuthenticPixels(evaluate_view,0,y,image->columns,1,
exception);
if ((j < (ssize_t) number_images) || (q == (Quantum *) NULL))
{
status=MagickFalse;
continue;
}
evaluate_pixel=evaluate_pixels[id];
for (x=0; x < (ssize_t) image->columns; x++)
{
ssize_t
i;
next=images;
for (j=0; j < (ssize_t) number_images; j++)
{
for (i=0; i < MaxPixelChannels; i++)
evaluate_pixel[j].channel[i]=0.0;
for (i=0; i < (ssize_t) GetPixelChannels(image); i++)
{
PixelChannel channel = GetPixelChannelChannel(image,i);
PixelTrait traits = GetPixelChannelTraits(next,channel);
PixelTrait evaluate_traits = GetPixelChannelTraits(image,channel);
if ((traits == UndefinedPixelTrait) ||
(evaluate_traits == UndefinedPixelTrait) ||
((traits & UpdatePixelTrait) == 0))
continue;
evaluate_pixel[j].channel[i]=ApplyEvaluateOperator(
random_info[id],GetPixelChannel(next,channel,p[j]),op,
evaluate_pixel[j].channel[i]);
}
p[j]+=GetPixelChannels(next);
next=GetNextImageInList(next);
}
qsort((void *) evaluate_pixel,number_images,sizeof(*evaluate_pixel),
IntensityCompare);
for (i=0; i < (ssize_t) GetPixelChannels(image); i++)
{
PixelChannel channel = GetPixelChannelChannel(image,i);
PixelTrait traits = GetPixelChannelTraits(image,channel);
if ((traits == UndefinedPixelTrait) ||
((traits & UpdatePixelTrait) == 0))
continue;
q[i]=ClampToQuantum(evaluate_pixel[number_images/2].channel[i]);
}
q+=GetPixelChannels(image);
}
p=(const Quantum **) RelinquishMagickMemory(p);
if (SyncCacheViewAuthenticPixels(evaluate_view,exception) == MagickFalse)
status=MagickFalse;
if (images->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp atomic
#endif
progress++;
proceed=SetImageProgress(images,EvaluateImageTag,progress,
image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
}
else
{
#if defined(MAGICKCORE_OPENMP_SUPPORT)
key=GetRandomSecretKey(random_info[0]);
#pragma omp parallel for schedule(static) shared(progress,status) \
magick_number_threads(image,images,image->rows,key == ~0UL)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
const Image
*next;
const int
id = GetOpenMPThreadId();
const Quantum
**p;
ssize_t
i,
x;
PixelChannels
*evaluate_pixel;
Quantum
*magick_restrict q;
ssize_t
j;
if (status == MagickFalse)
continue;
p=(const Quantum **) AcquireQuantumMemory(number_images,sizeof(*p));
if (p == (const Quantum **) NULL)
{
status=MagickFalse;
(void) ThrowMagickException(exception,GetMagickModule(),
ResourceLimitError,"MemoryAllocationFailed","`%s'",
images->filename);
continue;
}
for (j=0; j < (ssize_t) number_images; j++)
{
p[j]=GetCacheViewVirtualPixels(image_view[j],0,y,image->columns,1,
exception);
if (p[j] == (const Quantum *) NULL)
break;
}
q=QueueCacheViewAuthenticPixels(evaluate_view,0,y,image->columns,1,
exception);
if ((j < (ssize_t) number_images) || (q == (Quantum *) NULL))
{
status=MagickFalse;
continue;
}
evaluate_pixel=evaluate_pixels[id];
for (j=0; j < (ssize_t) image->columns; j++)
for (i=0; i < MaxPixelChannels; i++)
evaluate_pixel[j].channel[i]=0.0;
next=images;
for (j=0; j < (ssize_t) number_images; j++)
{
for (x=0; x < (ssize_t) image->columns; x++)
{
ssize_t
i;
for (i=0; i < (ssize_t) GetPixelChannels(next); i++)
{
PixelChannel channel = GetPixelChannelChannel(image,i);
PixelTrait traits = GetPixelChannelTraits(next,channel);
PixelTrait evaluate_traits = GetPixelChannelTraits(image,channel);
if ((traits == UndefinedPixelTrait) ||
(evaluate_traits == UndefinedPixelTrait))
continue;
if ((traits & UpdatePixelTrait) == 0)
continue;
evaluate_pixel[x].channel[i]=ApplyEvaluateOperator(
random_info[id],GetPixelChannel(next,channel,p[j]),j == 0 ?
AddEvaluateOperator : op,evaluate_pixel[x].channel[i]);
}
p[j]+=GetPixelChannels(next);
}
next=GetNextImageInList(next);
}
for (x=0; x < (ssize_t) image->columns; x++)
{
switch (op)
{
case MeanEvaluateOperator:
{
for (i=0; i < (ssize_t) GetPixelChannels(image); i++)
evaluate_pixel[x].channel[i]/=(double) number_images;
break;
}
case MultiplyEvaluateOperator:
{
for (i=0; i < (ssize_t) GetPixelChannels(image); i++)
{
ssize_t
j;
for (j=0; j < (ssize_t) (number_images-1); j++)
evaluate_pixel[x].channel[i]*=QuantumScale;
}
break;
}
case RootMeanSquareEvaluateOperator:
{
for (i=0; i < (ssize_t) GetPixelChannels(image); i++)
evaluate_pixel[x].channel[i]=sqrt(evaluate_pixel[x].channel[i]/
number_images);
break;
}
default:
break;
}
}
for (x=0; x < (ssize_t) image->columns; x++)
{
for (i=0; i < (ssize_t) GetPixelChannels(image); i++)
{
PixelChannel channel = GetPixelChannelChannel(image,i);
PixelTrait traits = GetPixelChannelTraits(image,channel);
if ((traits == UndefinedPixelTrait) ||
((traits & UpdatePixelTrait) == 0))
continue;
q[i]=ClampToQuantum(evaluate_pixel[x].channel[i]);
}
q+=GetPixelChannels(image);
}
p=(const Quantum **) RelinquishMagickMemory(p);
if (SyncCacheViewAuthenticPixels(evaluate_view,exception) == MagickFalse)
status=MagickFalse;
if (images->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp atomic
#endif
progress++;
proceed=SetImageProgress(images,EvaluateImageTag,progress,
image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
}
for (j=0; j < (ssize_t) number_images; j++)
image_view[j]=DestroyCacheView(image_view[j]);
image_view=(CacheView **) RelinquishMagickMemory(image_view);
evaluate_view=DestroyCacheView(evaluate_view);
evaluate_pixels=DestroyPixelThreadSet(images,evaluate_pixels);
random_info=DestroyRandomInfoThreadSet(random_info);
if (status == MagickFalse)
image=DestroyImage(image);
return(image);
}
MagickExport MagickBooleanType EvaluateImage(Image *image,
const MagickEvaluateOperator op,const double value,ExceptionInfo *exception)
{
CacheView
*image_view;
MagickBooleanType
status;
MagickOffsetType
progress;
RandomInfo
**magick_restrict random_info;
ssize_t
y;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
unsigned long
key;
#endif
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickCoreSignature);
if (SetImageStorageClass(image,DirectClass,exception) == MagickFalse)
return(MagickFalse);
status=MagickTrue;
progress=0;
random_info=AcquireRandomInfoThreadSet();
image_view=AcquireAuthenticCacheView(image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
key=GetRandomSecretKey(random_info[0]);
#pragma omp parallel for schedule(static) shared(progress,status) \
magick_number_threads(image,image,image->rows,key == ~0UL)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
const int
id = GetOpenMPThreadId();
Quantum
*magick_restrict q;
ssize_t
x;
if (status == MagickFalse)
continue;
q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception);
if (q == (Quantum *) NULL)
{
status=MagickFalse;
continue;
}
for (x=0; x < (ssize_t) image->columns; x++)
{
double
result;
ssize_t
i;
for (i=0; i < (ssize_t) GetPixelChannels(image); i++)
{
PixelChannel channel = GetPixelChannelChannel(image,i);
PixelTrait traits = GetPixelChannelTraits(image,channel);
if (traits == UndefinedPixelTrait)
continue;
if ((traits & CopyPixelTrait) != 0)
continue;
if ((traits & UpdatePixelTrait) == 0)
continue;
result=ApplyEvaluateOperator(random_info[id],q[i],op,value);
if (op == MeanEvaluateOperator)
result/=2.0;
q[i]=ClampToQuantum(result);
}
q+=GetPixelChannels(image);
}
if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp atomic
#endif
progress++;
proceed=SetImageProgress(image,EvaluateImageTag,progress,image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
image_view=DestroyCacheView(image_view);
random_info=DestroyRandomInfoThreadSet(random_info);
return(status);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% F u n c t i o n I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% FunctionImage() applies a value to the image with an arithmetic, relational,
% or logical operator to an image. Use these operations to lighten or darken
% an image, to increase or decrease contrast in an image, or to produce the
% "negative" of an image.
%
% The format of the FunctionImage method is:
%
% MagickBooleanType FunctionImage(Image *image,
% const MagickFunction function,const ssize_t number_parameters,
% const double *parameters,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o function: A channel function.
%
% o parameters: one or more parameters.
%
% o exception: return any errors or warnings in this structure.
%
*/
static Quantum ApplyFunction(Quantum pixel,const MagickFunction function,
const size_t number_parameters,const double *parameters,
ExceptionInfo *exception)
{
double
result;
ssize_t
i;
(void) exception;
result=0.0;
switch (function)
{
case PolynomialFunction:
{
/*
Polynomial: polynomial constants, highest to lowest order (e.g. c0*x^3+
c1*x^2+c2*x+c3).
*/
result=0.0;
for (i=0; i < (ssize_t) number_parameters; i++)
result=result*QuantumScale*pixel+parameters[i];
result*=QuantumRange;
break;
}
case SinusoidFunction:
{
double
amplitude,
bias,
frequency,
phase;
/*
Sinusoid: frequency, phase, amplitude, bias.
*/
frequency=(number_parameters >= 1) ? parameters[0] : 1.0;
phase=(number_parameters >= 2) ? parameters[1] : 0.0;
amplitude=(number_parameters >= 3) ? parameters[2] : 0.5;
bias=(number_parameters >= 4) ? parameters[3] : 0.5;
result=(double) (QuantumRange*(amplitude*sin((double) (2.0*
MagickPI*(frequency*QuantumScale*pixel+phase/360.0)))+bias));
break;
}
case ArcsinFunction:
{
double
bias,
center,
range,
width;
/*
Arcsin (peged at range limits for invalid results): width, center,
range, and bias.
*/
width=(number_parameters >= 1) ? parameters[0] : 1.0;
center=(number_parameters >= 2) ? parameters[1] : 0.5;
range=(number_parameters >= 3) ? parameters[2] : 1.0;
bias=(number_parameters >= 4) ? parameters[3] : 0.5;
result=2.0*PerceptibleReciprocal(width)*(QuantumScale*pixel-center);
if (result <= -1.0)
result=bias-range/2.0;
else
if (result >= 1.0)
result=bias+range/2.0;
else
result=(double) (range/MagickPI*asin((double) result)+bias);
result*=QuantumRange;
break;
}
case ArctanFunction:
{
double
center,
bias,
range,
slope;
/*
Arctan: slope, center, range, and bias.
*/
slope=(number_parameters >= 1) ? parameters[0] : 1.0;
center=(number_parameters >= 2) ? parameters[1] : 0.5;
range=(number_parameters >= 3) ? parameters[2] : 1.0;
bias=(number_parameters >= 4) ? parameters[3] : 0.5;
result=(double) (MagickPI*slope*(QuantumScale*pixel-center));
result=(double) (QuantumRange*(range/MagickPI*atan((double)
result)+bias));
break;
}
case UndefinedFunction:
break;
}
return(ClampToQuantum(result));
}
MagickExport MagickBooleanType FunctionImage(Image *image,
const MagickFunction function,const size_t number_parameters,
const double *parameters,ExceptionInfo *exception)
{
#define FunctionImageTag "Function/Image "
CacheView
*image_view;
MagickBooleanType
status;
MagickOffsetType
progress;
ssize_t
y;
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickCoreSignature);
#if defined(MAGICKCORE_OPENCL_SUPPORT)
if (AccelerateFunctionImage(image,function,number_parameters,parameters,
exception) != MagickFalse)
return(MagickTrue);
#endif
if (SetImageStorageClass(image,DirectClass,exception) == MagickFalse)
return(MagickFalse);
status=MagickTrue;
progress=0;
image_view=AcquireAuthenticCacheView(image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(progress,status) \
magick_number_threads(image,image,image->rows,1)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
Quantum
*magick_restrict q;
ssize_t
x;
if (status == MagickFalse)
continue;
q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception);
if (q == (Quantum *) NULL)
{
status=MagickFalse;
continue;
}
for (x=0; x < (ssize_t) image->columns; x++)
{
ssize_t
i;
for (i=0; i < (ssize_t) GetPixelChannels(image); i++)
{
PixelChannel channel = GetPixelChannelChannel(image,i);
PixelTrait traits = GetPixelChannelTraits(image,channel);
if (traits == UndefinedPixelTrait)
continue;
if ((traits & UpdatePixelTrait) == 0)
continue;
q[i]=ApplyFunction(q[i],function,number_parameters,parameters,
exception);
}
q+=GetPixelChannels(image);
}
if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp atomic
#endif
progress++;
proceed=SetImageProgress(image,FunctionImageTag,progress,image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
image_view=DestroyCacheView(image_view);
return(status);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% G e t I m a g e E n t r o p y %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetImageEntropy() returns the entropy of one or more image channels.
%
% The format of the GetImageEntropy method is:
%
% MagickBooleanType GetImageEntropy(const Image *image,double *entropy,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o entropy: the average entropy of the selected channels.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport MagickBooleanType GetImageEntropy(const Image *image,
double *entropy,ExceptionInfo *exception)
{
ChannelStatistics
*channel_statistics;
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
channel_statistics=GetImageStatistics(image,exception);
if (channel_statistics == (ChannelStatistics *) NULL)
return(MagickFalse);
*entropy=channel_statistics[CompositePixelChannel].entropy;
channel_statistics=(ChannelStatistics *) RelinquishMagickMemory(
channel_statistics);
return(MagickTrue);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% G e t I m a g e E x t r e m a %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetImageExtrema() returns the extrema of one or more image channels.
%
% The format of the GetImageExtrema method is:
%
% MagickBooleanType GetImageExtrema(const Image *image,size_t *minima,
% size_t *maxima,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o minima: the minimum value in the channel.
%
% o maxima: the maximum value in the channel.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport MagickBooleanType GetImageExtrema(const Image *image,
size_t *minima,size_t *maxima,ExceptionInfo *exception)
{
double
max,
min;
MagickBooleanType
status;
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
status=GetImageRange(image,&min,&max,exception);
*minima=(size_t) ceil(min-0.5);
*maxima=(size_t) floor(max+0.5);
return(status);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% G e t I m a g e K u r t o s i s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetImageKurtosis() returns the kurtosis and skewness of one or more image
% channels.
%
% The format of the GetImageKurtosis method is:
%
% MagickBooleanType GetImageKurtosis(const Image *image,double *kurtosis,
% double *skewness,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o kurtosis: the kurtosis of the channel.
%
% o skewness: the skewness of the channel.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport MagickBooleanType GetImageKurtosis(const Image *image,
double *kurtosis,double *skewness,ExceptionInfo *exception)
{
ChannelStatistics
*channel_statistics;
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
channel_statistics=GetImageStatistics(image,exception);
if (channel_statistics == (ChannelStatistics *) NULL)
return(MagickFalse);
*kurtosis=channel_statistics[CompositePixelChannel].kurtosis;
*skewness=channel_statistics[CompositePixelChannel].skewness;
channel_statistics=(ChannelStatistics *) RelinquishMagickMemory(
channel_statistics);
return(MagickTrue);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% G e t I m a g e M e a n %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetImageMean() returns the mean and standard deviation of one or more image
% channels.
%
% The format of the GetImageMean method is:
%
% MagickBooleanType GetImageMean(const Image *image,double *mean,
% double *standard_deviation,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o mean: the average value in the channel.
%
% o standard_deviation: the standard deviation of the channel.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport MagickBooleanType GetImageMean(const Image *image,double *mean,
double *standard_deviation,ExceptionInfo *exception)
{
ChannelStatistics
*channel_statistics;
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
channel_statistics=GetImageStatistics(image,exception);
if (channel_statistics == (ChannelStatistics *) NULL)
return(MagickFalse);
*mean=channel_statistics[CompositePixelChannel].mean;
*standard_deviation=
channel_statistics[CompositePixelChannel].standard_deviation;
channel_statistics=(ChannelStatistics *) RelinquishMagickMemory(
channel_statistics);
return(MagickTrue);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% G e t I m a g e M e d i a n %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetImageMedian() returns the median pixel of one or more image channels.
%
% The format of the GetImageMedian method is:
%
% MagickBooleanType GetImageMedian(const Image *image,double *median,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o median: the average value in the channel.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport MagickBooleanType GetImageMedian(const Image *image,double *median,
ExceptionInfo *exception)
{
ChannelStatistics
*channel_statistics;
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
channel_statistics=GetImageStatistics(image,exception);
if (channel_statistics == (ChannelStatistics *) NULL)
return(MagickFalse);
*median=channel_statistics[CompositePixelChannel].median;
channel_statistics=(ChannelStatistics *) RelinquishMagickMemory(
channel_statistics);
return(MagickTrue);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% G e t I m a g e M o m e n t s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetImageMoments() returns the normalized moments of one or more image
% channels.
%
% The format of the GetImageMoments method is:
%
% ChannelMoments *GetImageMoments(const Image *image,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o exception: return any errors or warnings in this structure.
%
*/
static size_t GetImageChannels(const Image *image)
{
ssize_t
i;
size_t
channels;
channels=0;
for (i=0; i < (ssize_t) GetPixelChannels(image); i++)
{
PixelChannel channel = GetPixelChannelChannel(image,i);
PixelTrait traits = GetPixelChannelTraits(image,channel);
if (traits == UndefinedPixelTrait)
continue;
if ((traits & UpdatePixelTrait) == 0)
continue;
channels++;
}
return((size_t) (channels == 0 ? 1 : channels));
}
MagickExport ChannelMoments *GetImageMoments(const Image *image,
ExceptionInfo *exception)
{
#define MaxNumberImageMoments 8
CacheView
*image_view;
ChannelMoments
*channel_moments;
double
M00[MaxPixelChannels+1],
M01[MaxPixelChannels+1],
M02[MaxPixelChannels+1],
M03[MaxPixelChannels+1],
M10[MaxPixelChannels+1],
M11[MaxPixelChannels+1],
M12[MaxPixelChannels+1],
M20[MaxPixelChannels+1],
M21[MaxPixelChannels+1],
M22[MaxPixelChannels+1],
M30[MaxPixelChannels+1];
PointInfo
centroid[MaxPixelChannels+1];
ssize_t
channel,
y;
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
channel_moments=(ChannelMoments *) AcquireQuantumMemory(MaxPixelChannels+1,
sizeof(*channel_moments));
if (channel_moments == (ChannelMoments *) NULL)
return(channel_moments);
(void) memset(channel_moments,0,(MaxPixelChannels+1)*
sizeof(*channel_moments));
(void) memset(centroid,0,sizeof(centroid));
(void) memset(M00,0,sizeof(M00));
(void) memset(M01,0,sizeof(M01));
(void) memset(M02,0,sizeof(M02));
(void) memset(M03,0,sizeof(M03));
(void) memset(M10,0,sizeof(M10));
(void) memset(M11,0,sizeof(M11));
(void) memset(M12,0,sizeof(M12));
(void) memset(M20,0,sizeof(M20));
(void) memset(M21,0,sizeof(M21));
(void) memset(M22,0,sizeof(M22));
(void) memset(M30,0,sizeof(M30));
image_view=AcquireVirtualCacheView(image,exception);
for (y=0; y < (ssize_t) image->rows; y++)
{
const Quantum
*magick_restrict p;
ssize_t
x;
/*
Compute center of mass (centroid).
*/
p=GetCacheViewVirtualPixels(image_view,0,y,image->columns,1,exception);
if (p == (const Quantum *) NULL)
break;
for (x=0; x < (ssize_t) image->columns; x++)
{
ssize_t
i;
for (i=0; i < (ssize_t) GetPixelChannels(image); i++)
{
PixelChannel channel = GetPixelChannelChannel(image,i);
PixelTrait traits = GetPixelChannelTraits(image,channel);
if (traits == UndefinedPixelTrait)
continue;
if ((traits & UpdatePixelTrait) == 0)
continue;
M00[channel]+=QuantumScale*p[i];
M00[MaxPixelChannels]+=QuantumScale*p[i];
M10[channel]+=x*QuantumScale*p[i];
M10[MaxPixelChannels]+=x*QuantumScale*p[i];
M01[channel]+=y*QuantumScale*p[i];
M01[MaxPixelChannels]+=y*QuantumScale*p[i];
}
p+=GetPixelChannels(image);
}
}
for (channel=0; channel <= MaxPixelChannels; channel++)
{
/*
Compute center of mass (centroid).
*/
centroid[channel].x=M10[channel]*PerceptibleReciprocal(M00[channel]);
centroid[channel].y=M01[channel]*PerceptibleReciprocal(M00[channel]);
}
for (y=0; y < (ssize_t) image->rows; y++)
{
const Quantum
*magick_restrict p;
ssize_t
x;
/*
Compute the image moments.
*/
p=GetCacheViewVirtualPixels(image_view,0,y,image->columns,1,exception);
if (p == (const Quantum *) NULL)
break;
for (x=0; x < (ssize_t) image->columns; x++)
{
ssize_t
i;
for (i=0; i < (ssize_t) GetPixelChannels(image); i++)
{
PixelChannel channel = GetPixelChannelChannel(image,i);
PixelTrait traits = GetPixelChannelTraits(image,channel);
if (traits == UndefinedPixelTrait)
continue;
if ((traits & UpdatePixelTrait) == 0)
continue;
M11[channel]+=(x-centroid[channel].x)*(y-centroid[channel].y)*
QuantumScale*p[i];
M11[MaxPixelChannels]+=(x-centroid[channel].x)*(y-centroid[channel].y)*
QuantumScale*p[i];
M20[channel]+=(x-centroid[channel].x)*(x-centroid[channel].x)*
QuantumScale*p[i];
M20[MaxPixelChannels]+=(x-centroid[channel].x)*(x-centroid[channel].x)*
QuantumScale*p[i];
M02[channel]+=(y-centroid[channel].y)*(y-centroid[channel].y)*
QuantumScale*p[i];
M02[MaxPixelChannels]+=(y-centroid[channel].y)*(y-centroid[channel].y)*
QuantumScale*p[i];
M21[channel]+=(x-centroid[channel].x)*(x-centroid[channel].x)*
(y-centroid[channel].y)*QuantumScale*p[i];
M21[MaxPixelChannels]+=(x-centroid[channel].x)*(x-centroid[channel].x)*
(y-centroid[channel].y)*QuantumScale*p[i];
M12[channel]+=(x-centroid[channel].x)*(y-centroid[channel].y)*
(y-centroid[channel].y)*QuantumScale*p[i];
M12[MaxPixelChannels]+=(x-centroid[channel].x)*(y-centroid[channel].y)*
(y-centroid[channel].y)*QuantumScale*p[i];
M22[channel]+=(x-centroid[channel].x)*(x-centroid[channel].x)*
(y-centroid[channel].y)*(y-centroid[channel].y)*QuantumScale*p[i];
M22[MaxPixelChannels]+=(x-centroid[channel].x)*(x-centroid[channel].x)*
(y-centroid[channel].y)*(y-centroid[channel].y)*QuantumScale*p[i];
M30[channel]+=(x-centroid[channel].x)*(x-centroid[channel].x)*
(x-centroid[channel].x)*QuantumScale*p[i];
M30[MaxPixelChannels]+=(x-centroid[channel].x)*(x-centroid[channel].x)*
(x-centroid[channel].x)*QuantumScale*p[i];
M03[channel]+=(y-centroid[channel].y)*(y-centroid[channel].y)*
(y-centroid[channel].y)*QuantumScale*p[i];
M03[MaxPixelChannels]+=(y-centroid[channel].y)*(y-centroid[channel].y)*
(y-centroid[channel].y)*QuantumScale*p[i];
}
p+=GetPixelChannels(image);
}
}
M00[MaxPixelChannels]/=GetImageChannels(image);
M01[MaxPixelChannels]/=GetImageChannels(image);
M02[MaxPixelChannels]/=GetImageChannels(image);
M03[MaxPixelChannels]/=GetImageChannels(image);
M10[MaxPixelChannels]/=GetImageChannels(image);
M11[MaxPixelChannels]/=GetImageChannels(image);
M12[MaxPixelChannels]/=GetImageChannels(image);
M20[MaxPixelChannels]/=GetImageChannels(image);
M21[MaxPixelChannels]/=GetImageChannels(image);
M22[MaxPixelChannels]/=GetImageChannels(image);
M30[MaxPixelChannels]/=GetImageChannels(image);
for (channel=0; channel <= MaxPixelChannels; channel++)
{
/*
Compute elliptical angle, major and minor axes, eccentricity, & intensity.
*/
channel_moments[channel].centroid=centroid[channel];
channel_moments[channel].ellipse_axis.x=sqrt((2.0*
PerceptibleReciprocal(M00[channel]))*((M20[channel]+M02[channel])+
sqrt(4.0*M11[channel]*M11[channel]+(M20[channel]-M02[channel])*
(M20[channel]-M02[channel]))));
channel_moments[channel].ellipse_axis.y=sqrt((2.0*
PerceptibleReciprocal(M00[channel]))*((M20[channel]+M02[channel])-
sqrt(4.0*M11[channel]*M11[channel]+(M20[channel]-M02[channel])*
(M20[channel]-M02[channel]))));
channel_moments[channel].ellipse_angle=RadiansToDegrees(1.0/2.0*atan(2.0*
M11[channel]*PerceptibleReciprocal(M20[channel]-M02[channel])));
if (fabs(M11[channel]) < 0.0)
{
if ((fabs(M20[channel]-M02[channel]) >= 0.0) &&
((M20[channel]-M02[channel]) < 0.0))
channel_moments[channel].ellipse_angle+=90.0;
}
else
if (M11[channel] < 0.0)
{
if (fabs(M20[channel]-M02[channel]) >= 0.0)
{
if ((M20[channel]-M02[channel]) < 0.0)
channel_moments[channel].ellipse_angle+=90.0;
else
channel_moments[channel].ellipse_angle+=180.0;
}
}
else
if ((fabs(M20[channel]-M02[channel]) >= 0.0) &&
((M20[channel]-M02[channel]) < 0.0))
channel_moments[channel].ellipse_angle+=90.0;
channel_moments[channel].ellipse_eccentricity=sqrt(1.0-(
channel_moments[channel].ellipse_axis.y*
channel_moments[channel].ellipse_axis.y*PerceptibleReciprocal(
channel_moments[channel].ellipse_axis.x*
channel_moments[channel].ellipse_axis.x)));
channel_moments[channel].ellipse_intensity=M00[channel]*
PerceptibleReciprocal(MagickPI*channel_moments[channel].ellipse_axis.x*
channel_moments[channel].ellipse_axis.y+MagickEpsilon);
}
for (channel=0; channel <= MaxPixelChannels; channel++)
{
/*
Normalize image moments.
*/
M10[channel]=0.0;
M01[channel]=0.0;
M11[channel]*=PerceptibleReciprocal(pow(M00[channel],1.0+(1.0+1.0)/2.0));
M20[channel]*=PerceptibleReciprocal(pow(M00[channel],1.0+(2.0+0.0)/2.0));
M02[channel]*=PerceptibleReciprocal(pow(M00[channel],1.0+(0.0+2.0)/2.0));
M21[channel]*=PerceptibleReciprocal(pow(M00[channel],1.0+(2.0+1.0)/2.0));
M12[channel]*=PerceptibleReciprocal(pow(M00[channel],1.0+(1.0+2.0)/2.0));
M22[channel]*=PerceptibleReciprocal(pow(M00[channel],1.0+(2.0+2.0)/2.0));
M30[channel]*=PerceptibleReciprocal(pow(M00[channel],1.0+(3.0+0.0)/2.0));
M03[channel]*=PerceptibleReciprocal(pow(M00[channel],1.0+(0.0+3.0)/2.0));
M00[channel]=1.0;
}
image_view=DestroyCacheView(image_view);
for (channel=0; channel <= MaxPixelChannels; channel++)
{
/*
Compute Hu invariant moments.
*/
channel_moments[channel].invariant[0]=M20[channel]+M02[channel];
channel_moments[channel].invariant[1]=(M20[channel]-M02[channel])*
(M20[channel]-M02[channel])+4.0*M11[channel]*M11[channel];
channel_moments[channel].invariant[2]=(M30[channel]-3.0*M12[channel])*
(M30[channel]-3.0*M12[channel])+(3.0*M21[channel]-M03[channel])*
(3.0*M21[channel]-M03[channel]);
channel_moments[channel].invariant[3]=(M30[channel]+M12[channel])*
(M30[channel]+M12[channel])+(M21[channel]+M03[channel])*
(M21[channel]+M03[channel]);
channel_moments[channel].invariant[4]=(M30[channel]-3.0*M12[channel])*
(M30[channel]+M12[channel])*((M30[channel]+M12[channel])*
(M30[channel]+M12[channel])-3.0*(M21[channel]+M03[channel])*
(M21[channel]+M03[channel]))+(3.0*M21[channel]-M03[channel])*
(M21[channel]+M03[channel])*(3.0*(M30[channel]+M12[channel])*
(M30[channel]+M12[channel])-(M21[channel]+M03[channel])*
(M21[channel]+M03[channel]));
channel_moments[channel].invariant[5]=(M20[channel]-M02[channel])*
((M30[channel]+M12[channel])*(M30[channel]+M12[channel])-
(M21[channel]+M03[channel])*(M21[channel]+M03[channel]))+
4.0*M11[channel]*(M30[channel]+M12[channel])*(M21[channel]+M03[channel]);
channel_moments[channel].invariant[6]=(3.0*M21[channel]-M03[channel])*
(M30[channel]+M12[channel])*((M30[channel]+M12[channel])*
(M30[channel]+M12[channel])-3.0*(M21[channel]+M03[channel])*
(M21[channel]+M03[channel]))-(M30[channel]-3*M12[channel])*
(M21[channel]+M03[channel])*(3.0*(M30[channel]+M12[channel])*
(M30[channel]+M12[channel])-(M21[channel]+M03[channel])*
(M21[channel]+M03[channel]));
channel_moments[channel].invariant[7]=M11[channel]*((M30[channel]+
M12[channel])*(M30[channel]+M12[channel])-(M03[channel]+M21[channel])*
(M03[channel]+M21[channel]))-(M20[channel]-M02[channel])*
(M30[channel]+M12[channel])*(M03[channel]+M21[channel]);
}
if (y < (ssize_t) image->rows)
channel_moments=(ChannelMoments *) RelinquishMagickMemory(channel_moments);
return(channel_moments);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% G e t I m a g e C h a n n e l P e r c e p t u a l H a s h %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetImagePerceptualHash() returns the perceptual hash of one or more
% image channels.
%
% The format of the GetImagePerceptualHash method is:
%
% ChannelPerceptualHash *GetImagePerceptualHash(const Image *image,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o exception: return any errors or warnings in this structure.
%
*/
static inline double MagickLog10(const double x)
{
#define Log10Epsilon (1.0e-11)
if (fabs(x) < Log10Epsilon)
return(log10(Log10Epsilon));
return(log10(fabs(x)));
}
MagickExport ChannelPerceptualHash *GetImagePerceptualHash(const Image *image,
ExceptionInfo *exception)
{
ChannelPerceptualHash
*perceptual_hash;
char
*colorspaces,
*p,
*q;
const char
*artifact;
MagickBooleanType
status;
ssize_t
i;
perceptual_hash=(ChannelPerceptualHash *) AcquireQuantumMemory(
MaxPixelChannels+1UL,sizeof(*perceptual_hash));
if (perceptual_hash == (ChannelPerceptualHash *) NULL)
return((ChannelPerceptualHash *) NULL);
artifact=GetImageArtifact(image,"phash:colorspaces");
if (artifact != NULL)
colorspaces=AcquireString(artifact);
else
colorspaces=AcquireString("sRGB,HCLp");
perceptual_hash[0].number_colorspaces=0;
perceptual_hash[0].number_channels=0;
q=colorspaces;
for (i=0; (p=StringToken(",",&q)) != (char *) NULL; i++)
{
ChannelMoments
*moments;
Image
*hash_image;
size_t
j;
ssize_t
channel,
colorspace;
if (i >= MaximumNumberOfPerceptualColorspaces)
break;
colorspace=ParseCommandOption(MagickColorspaceOptions,MagickFalse,p);
if (colorspace < 0)
break;
perceptual_hash[0].colorspace[i]=(ColorspaceType) colorspace;
hash_image=BlurImage(image,0.0,1.0,exception);
if (hash_image == (Image *) NULL)
break;
hash_image->depth=8;
status=TransformImageColorspace(hash_image,(ColorspaceType) colorspace,
exception);
if (status == MagickFalse)
break;
moments=GetImageMoments(hash_image,exception);
perceptual_hash[0].number_colorspaces++;
perceptual_hash[0].number_channels+=GetImageChannels(hash_image);
hash_image=DestroyImage(hash_image);
if (moments == (ChannelMoments *) NULL)
break;
for (channel=0; channel <= MaxPixelChannels; channel++)
for (j=0; j < MaximumNumberOfImageMoments; j++)
perceptual_hash[channel].phash[i][j]=
(-MagickLog10(moments[channel].invariant[j]));
moments=(ChannelMoments *) RelinquishMagickMemory(moments);
}
colorspaces=DestroyString(colorspaces);
return(perceptual_hash);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% G e t I m a g e R a n g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetImageRange() returns the range of one or more image channels.
%
% The format of the GetImageRange method is:
%
% MagickBooleanType GetImageRange(const Image *image,double *minima,
% double *maxima,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o minima: the minimum value in the channel.
%
% o maxima: the maximum value in the channel.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport MagickBooleanType GetImageRange(const Image *image,double *minima,
double *maxima,ExceptionInfo *exception)
{
CacheView
*image_view;
MagickBooleanType
initialize,
status;
ssize_t
y;
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
status=MagickTrue;
initialize=MagickTrue;
*maxima=0.0;
*minima=0.0;
image_view=AcquireVirtualCacheView(image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(status,initialize) \
magick_number_threads(image,image,image->rows,1)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
double
row_maxima = 0.0,
row_minima = 0.0;
MagickBooleanType
row_initialize;
const Quantum
*magick_restrict p;
ssize_t
x;
if (status == MagickFalse)
continue;
p=GetCacheViewVirtualPixels(image_view,0,y,image->columns,1,exception);
if (p == (const Quantum *) NULL)
{
status=MagickFalse;
continue;
}
row_initialize=MagickTrue;
for (x=0; x < (ssize_t) image->columns; x++)
{
ssize_t
i;
for (i=0; i < (ssize_t) GetPixelChannels(image); i++)
{
PixelChannel channel = GetPixelChannelChannel(image,i);
PixelTrait traits = GetPixelChannelTraits(image,channel);
if (traits == UndefinedPixelTrait)
continue;
if ((traits & UpdatePixelTrait) == 0)
continue;
if (row_initialize != MagickFalse)
{
row_minima=(double) p[i];
row_maxima=(double) p[i];
row_initialize=MagickFalse;
}
else
{
if ((double) p[i] < row_minima)
row_minima=(double) p[i];
if ((double) p[i] > row_maxima)
row_maxima=(double) p[i];
}
}
p+=GetPixelChannels(image);
}
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp critical (MagickCore_GetImageRange)
#endif
{
if (initialize != MagickFalse)
{
*minima=row_minima;
*maxima=row_maxima;
initialize=MagickFalse;
}
else
{
if (row_minima < *minima)
*minima=row_minima;
if (row_maxima > *maxima)
*maxima=row_maxima;
}
}
}
image_view=DestroyCacheView(image_view);
return(status);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% G e t I m a g e S t a t i s t i c s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetImageStatistics() returns statistics for each channel in the image. The
% statistics include the channel depth, its minima, maxima, mean, standard
% deviation, kurtosis and skewness. You can access the red channel mean, for
% example, like this:
%
% channel_statistics=GetImageStatistics(image,exception);
% red_mean=channel_statistics[RedPixelChannel].mean;
%
% Use MagickRelinquishMemory() to free the statistics buffer.
%
% The format of the GetImageStatistics method is:
%
% ChannelStatistics *GetImageStatistics(const Image *image,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o exception: return any errors or warnings in this structure.
%
*/
static ssize_t GetMedianPixel(Quantum *pixels,const size_t n)
{
#define SwapPixels(alpha,beta) \
{ \
Quantum gamma=(alpha); \
(alpha)=(beta);(beta)=gamma; \
}
ssize_t
low = 0,
high = (ssize_t) n-1,
median = (low+high)/2;
for ( ; ; )
{
ssize_t
l = low+1,
h = high,
mid = (low+high)/2;
if (high <= low)
return(median);
if (high == (low+1))
{
if (pixels[low] > pixels[high])
SwapPixels(pixels[low],pixels[high]);
return(median);
}
if (pixels[mid] > pixels[high])
SwapPixels(pixels[mid],pixels[high]);
if (pixels[low] > pixels[high])
SwapPixels(pixels[low], pixels[high]);
if (pixels[mid] > pixels[low])
SwapPixels(pixels[mid],pixels[low]);
SwapPixels(pixels[mid],pixels[low+1]);
for ( ; ; )
{
do l++; while (pixels[low] > pixels[l]);
do h--; while (pixels[h] > pixels[low]);
if (h < l)
break;
SwapPixels(pixels[l],pixels[h]);
}
SwapPixels(pixels[low],pixels[h]);
if (h <= median)
low=l;
if (h >= median)
high=h-1;
}
}
MagickExport ChannelStatistics *GetImageStatistics(const Image *image,
ExceptionInfo *exception)
{
ChannelStatistics
*channel_statistics;
double
area,
*histogram,
standard_deviation;
MagickStatusType
status;
MemoryInfo
*median_info;
Quantum
*median;
QuantumAny
range;
ssize_t
i;
size_t
depth;
ssize_t
y;
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
histogram=(double *) AcquireQuantumMemory(MaxMap+1UL,GetPixelChannels(image)*
sizeof(*histogram));
channel_statistics=(ChannelStatistics *) AcquireQuantumMemory(
MaxPixelChannels+1,sizeof(*channel_statistics));
if ((channel_statistics == (ChannelStatistics *) NULL) ||
(histogram == (double *) NULL))
{
if (histogram != (double *) NULL)
histogram=(double *) RelinquishMagickMemory(histogram);
if (channel_statistics != (ChannelStatistics *) NULL)
channel_statistics=(ChannelStatistics *) RelinquishMagickMemory(
channel_statistics);
return(channel_statistics);
}
(void) memset(channel_statistics,0,(MaxPixelChannels+1)*
sizeof(*channel_statistics));
for (i=0; i <= (ssize_t) MaxPixelChannels; i++)
{
channel_statistics[i].depth=1;
channel_statistics[i].maxima=(-MagickMaximumValue);
channel_statistics[i].minima=MagickMaximumValue;
}
(void) memset(histogram,0,(MaxMap+1)*GetPixelChannels(image)*
sizeof(*histogram));
for (y=0; y < (ssize_t) image->rows; y++)
{
const Quantum
*magick_restrict p;
ssize_t
x;
/*
Compute pixel statistics.
*/
p=GetVirtualPixels(image,0,y,image->columns,1,exception);
if (p == (const Quantum *) NULL)
break;
for (x=0; x < (ssize_t) image->columns; x++)
{
ssize_t
i;
if (GetPixelReadMask(image,p) <= (QuantumRange/2))
{
p+=GetPixelChannels(image);
continue;
}
for (i=0; i < (ssize_t) GetPixelChannels(image); i++)
{
PixelChannel channel = GetPixelChannelChannel(image,i);
PixelTrait traits = GetPixelChannelTraits(image,channel);
if (traits == UndefinedPixelTrait)
continue;
if ((traits & UpdatePixelTrait) == 0)
continue;
if (channel_statistics[channel].depth != MAGICKCORE_QUANTUM_DEPTH)
{
depth=channel_statistics[channel].depth;
range=GetQuantumRange(depth);
status=p[i] != ScaleAnyToQuantum(ScaleQuantumToAny(p[i],range),
range) ? MagickTrue : MagickFalse;
if (status != MagickFalse)
{
channel_statistics[channel].depth++;
if (channel_statistics[channel].depth >
channel_statistics[CompositePixelChannel].depth)
channel_statistics[CompositePixelChannel].depth=
channel_statistics[channel].depth;
i--;
continue;
}
}
if ((double) p[i] < channel_statistics[channel].minima)
channel_statistics[channel].minima=(double) p[i];
if ((double) p[i] > channel_statistics[channel].maxima)
channel_statistics[channel].maxima=(double) p[i];
channel_statistics[channel].sum+=p[i];
channel_statistics[channel].sum_squared+=(double) p[i]*p[i];
channel_statistics[channel].sum_cubed+=(double) p[i]*p[i]*p[i];
channel_statistics[channel].sum_fourth_power+=(double) p[i]*p[i]*p[i]*
p[i];
channel_statistics[channel].area++;
if ((double) p[i] < channel_statistics[CompositePixelChannel].minima)
channel_statistics[CompositePixelChannel].minima=(double) p[i];
if ((double) p[i] > channel_statistics[CompositePixelChannel].maxima)
channel_statistics[CompositePixelChannel].maxima=(double) p[i];
histogram[GetPixelChannels(image)*ScaleQuantumToMap(
ClampToQuantum((double) p[i]))+i]++;
channel_statistics[CompositePixelChannel].sum+=(double) p[i];
channel_statistics[CompositePixelChannel].sum_squared+=(double)
p[i]*p[i];
channel_statistics[CompositePixelChannel].sum_cubed+=(double)
p[i]*p[i]*p[i];
channel_statistics[CompositePixelChannel].sum_fourth_power+=(double)
p[i]*p[i]*p[i]*p[i];
channel_statistics[CompositePixelChannel].area++;
}
p+=GetPixelChannels(image);
}
}
for (i=0; i <= (ssize_t) MaxPixelChannels; i++)
{
/*
Normalize pixel statistics.
*/
area=PerceptibleReciprocal(channel_statistics[i].area);
channel_statistics[i].sum*=area;
channel_statistics[i].sum_squared*=area;
channel_statistics[i].sum_cubed*=area;
channel_statistics[i].sum_fourth_power*=area;
channel_statistics[i].mean=channel_statistics[i].sum;
channel_statistics[i].variance=channel_statistics[i].sum_squared;
standard_deviation=sqrt(channel_statistics[i].variance-
(channel_statistics[i].mean*channel_statistics[i].mean));
standard_deviation=sqrt(PerceptibleReciprocal(channel_statistics[i].area-
1.0)*channel_statistics[i].area*standard_deviation*standard_deviation);
channel_statistics[i].standard_deviation=standard_deviation;
}
for (i=0; i < (ssize_t) GetPixelChannels(image); i++)
{
double
number_bins;
ssize_t
j;
/*
Compute pixel entropy.
*/
PixelChannel channel = GetPixelChannelChannel(image,i);
number_bins=0.0;
for (j=0; j <= (ssize_t) MaxMap; j++)
if (histogram[GetPixelChannels(image)*j+i] > 0.0)
number_bins++;
area=PerceptibleReciprocal(channel_statistics[channel].area);
for (j=0; j <= (ssize_t) MaxMap; j++)
{
double
count;
count=area*histogram[GetPixelChannels(image)*j+i];
channel_statistics[channel].entropy+=-count*MagickLog10(count)*
PerceptibleReciprocal(MagickLog10(number_bins));
channel_statistics[CompositePixelChannel].entropy+=-count*
MagickLog10(count)*PerceptibleReciprocal(MagickLog10(number_bins))/
GetPixelChannels(image);
}
}
histogram=(double *) RelinquishMagickMemory(histogram);
for (i=0; i <= (ssize_t) MaxPixelChannels; i++)
{
/*
Compute kurtosis & skewness statistics.
*/
standard_deviation=PerceptibleReciprocal(
channel_statistics[i].standard_deviation);
channel_statistics[i].skewness=(channel_statistics[i].sum_cubed-3.0*
channel_statistics[i].mean*channel_statistics[i].sum_squared+2.0*
channel_statistics[i].mean*channel_statistics[i].mean*
channel_statistics[i].mean)*(standard_deviation*standard_deviation*
standard_deviation);
channel_statistics[i].kurtosis=(channel_statistics[i].sum_fourth_power-4.0*
channel_statistics[i].mean*channel_statistics[i].sum_cubed+6.0*
channel_statistics[i].mean*channel_statistics[i].mean*
channel_statistics[i].sum_squared-3.0*channel_statistics[i].mean*
channel_statistics[i].mean*1.0*channel_statistics[i].mean*
channel_statistics[i].mean)*(standard_deviation*standard_deviation*
standard_deviation*standard_deviation)-3.0;
}
median_info=AcquireVirtualMemory(image->columns,image->rows*sizeof(*median));
if (median_info == (MemoryInfo *) NULL)
(void) ThrowMagickException(exception,GetMagickModule(),
ResourceLimitError,"MemoryAllocationFailed","`%s'",image->filename);
else
{
ssize_t
i;
median=(Quantum *) GetVirtualMemoryBlob(median_info);
for (i=0; i < (ssize_t) GetPixelChannels(image); i++)
{
size_t
n = 0;
/*
Compute median statistics for each channel.
*/
PixelChannel channel = GetPixelChannelChannel(image,i);
PixelTrait traits = GetPixelChannelTraits(image,channel);
if (traits == UndefinedPixelTrait)
continue;
if ((traits & UpdatePixelTrait) == 0)
continue;
for (y=0; y < (ssize_t) image->rows; y++)
{
const Quantum
*magick_restrict p;
ssize_t
x;
p=GetVirtualPixels(image,0,y,image->columns,1,exception);
if (p == (const Quantum *) NULL)
break;
for (x=0; x < (ssize_t) image->columns; x++)
{
if (GetPixelReadMask(image,p) <= (QuantumRange/2))
{
p+=GetPixelChannels(image);
continue;
}
median[n++]=p[i];
}
p+=GetPixelChannels(image);
}
channel_statistics[channel].median=(double) median[
GetMedianPixel(median,n)];
}
median_info=RelinquishVirtualMemory(median_info);
}
channel_statistics[CompositePixelChannel].mean=0.0;
channel_statistics[CompositePixelChannel].median=0.0;
channel_statistics[CompositePixelChannel].standard_deviation=0.0;
channel_statistics[CompositePixelChannel].entropy=0.0;
for (i=0; i < (ssize_t) MaxPixelChannels; i++)
{
channel_statistics[CompositePixelChannel].mean+=
channel_statistics[i].mean;
channel_statistics[CompositePixelChannel].median+=
channel_statistics[i].median;
channel_statistics[CompositePixelChannel].standard_deviation+=
channel_statistics[i].standard_deviation;
channel_statistics[CompositePixelChannel].entropy+=
channel_statistics[i].entropy;
}
channel_statistics[CompositePixelChannel].mean/=(double)
GetImageChannels(image);
channel_statistics[CompositePixelChannel].median/=(double)
GetImageChannels(image);
channel_statistics[CompositePixelChannel].standard_deviation/=(double)
GetImageChannels(image);
channel_statistics[CompositePixelChannel].entropy/=(double)
GetImageChannels(image);
if (y < (ssize_t) image->rows)
channel_statistics=(ChannelStatistics *) RelinquishMagickMemory(
channel_statistics);
return(channel_statistics);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% P o l y n o m i a l I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% PolynomialImage() returns a new image where each pixel is the sum of the
% pixels in the image sequence after applying its corresponding terms
% (coefficient and degree pairs).
%
% The format of the PolynomialImage method is:
%
% Image *PolynomialImage(const Image *images,const size_t number_terms,
% const double *terms,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o images: the image sequence.
%
% o number_terms: the number of terms in the list. The actual list length
% is 2 x number_terms + 1 (the constant).
%
% o terms: the list of polynomial coefficients and degree pairs and a
% constant.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport Image *PolynomialImage(const Image *images,
const size_t number_terms,const double *terms,ExceptionInfo *exception)
{
#define PolynomialImageTag "Polynomial/Image"
CacheView
*polynomial_view;
Image
*image;
MagickBooleanType
status;
MagickOffsetType
progress;
PixelChannels
**magick_restrict polynomial_pixels;
size_t
number_images;
ssize_t
y;
assert(images != (Image *) NULL);
assert(images->signature == MagickCoreSignature);
if (images->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",images->filename);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickCoreSignature);
image=AcquireImageCanvas(images,exception);
if (image == (Image *) NULL)
return((Image *) NULL);
if (SetImageStorageClass(image,DirectClass,exception) == MagickFalse)
{
image=DestroyImage(image);
return((Image *) NULL);
}
number_images=GetImageListLength(images);
polynomial_pixels=AcquirePixelThreadSet(images);
if (polynomial_pixels == (PixelChannels **) NULL)
{
image=DestroyImage(image);
(void) ThrowMagickException(exception,GetMagickModule(),
ResourceLimitError,"MemoryAllocationFailed","`%s'",images->filename);
return((Image *) NULL);
}
/*
Polynomial image pixels.
*/
status=MagickTrue;
progress=0;
polynomial_view=AcquireAuthenticCacheView(image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(progress,status) \
magick_number_threads(image,image,image->rows,1)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
CacheView
*image_view;
const Image
*next;
const int
id = GetOpenMPThreadId();
ssize_t
i,
x;
PixelChannels
*polynomial_pixel;
Quantum
*magick_restrict q;
ssize_t
j;
if (status == MagickFalse)
continue;
q=QueueCacheViewAuthenticPixels(polynomial_view,0,y,image->columns,1,
exception);
if (q == (Quantum *) NULL)
{
status=MagickFalse;
continue;
}
polynomial_pixel=polynomial_pixels[id];
for (j=0; j < (ssize_t) image->columns; j++)
for (i=0; i < MaxPixelChannels; i++)
polynomial_pixel[j].channel[i]=0.0;
next=images;
for (j=0; j < (ssize_t) number_images; j++)
{
const Quantum
*p;
if (j >= (ssize_t) number_terms)
continue;
image_view=AcquireVirtualCacheView(next,exception);
p=GetCacheViewVirtualPixels(image_view,0,y,image->columns,1,exception);
if (p == (const Quantum *) NULL)
{
image_view=DestroyCacheView(image_view);
break;
}
for (x=0; x < (ssize_t) image->columns; x++)
{
ssize_t
i;
for (i=0; i < (ssize_t) GetPixelChannels(next); i++)
{
MagickRealType
coefficient,
degree;
PixelChannel channel = GetPixelChannelChannel(image,i);
PixelTrait traits = GetPixelChannelTraits(next,channel);
PixelTrait polynomial_traits=GetPixelChannelTraits(image,channel);
if ((traits == UndefinedPixelTrait) ||
(polynomial_traits == UndefinedPixelTrait))
continue;
if ((traits & UpdatePixelTrait) == 0)
continue;
coefficient=(MagickRealType) terms[2*j];
degree=(MagickRealType) terms[(j << 1)+1];
polynomial_pixel[x].channel[i]+=coefficient*
pow(QuantumScale*GetPixelChannel(image,channel,p),degree);
}
p+=GetPixelChannels(next);
}
image_view=DestroyCacheView(image_view);
next=GetNextImageInList(next);
}
for (x=0; x < (ssize_t) image->columns; x++)
{
ssize_t
i;
for (i=0; i < (ssize_t) GetPixelChannels(image); i++)
{
PixelChannel channel = GetPixelChannelChannel(image,i);
PixelTrait traits = GetPixelChannelTraits(image,channel);
if (traits == UndefinedPixelTrait)
continue;
if ((traits & UpdatePixelTrait) == 0)
continue;
q[i]=ClampToQuantum(QuantumRange*polynomial_pixel[x].channel[i]);
}
q+=GetPixelChannels(image);
}
if (SyncCacheViewAuthenticPixels(polynomial_view,exception) == MagickFalse)
status=MagickFalse;
if (images->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp atomic
#endif
progress++;
proceed=SetImageProgress(images,PolynomialImageTag,progress,
image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
polynomial_view=DestroyCacheView(polynomial_view);
polynomial_pixels=DestroyPixelThreadSet(images,polynomial_pixels);
if (status == MagickFalse)
image=DestroyImage(image);
return(image);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% S t a t i s t i c I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% StatisticImage() makes each pixel the min / max / median / mode / etc. of
% the neighborhood of the specified width and height.
%
% The format of the StatisticImage method is:
%
% Image *StatisticImage(const Image *image,const StatisticType type,
% const size_t width,const size_t height,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o type: the statistic type (median, mode, etc.).
%
% o width: the width of the pixel neighborhood.
%
% o height: the height of the pixel neighborhood.
%
% o exception: return any errors or warnings in this structure.
%
*/
typedef struct _SkipNode
{
size_t
next[9],
count,
signature;
} SkipNode;
typedef struct _SkipList
{
ssize_t
level;
SkipNode
*nodes;
} SkipList;
typedef struct _PixelList
{
size_t
length,
seed;
SkipList
skip_list;
size_t
signature;
} PixelList;
static PixelList *DestroyPixelList(PixelList *pixel_list)
{
if (pixel_list == (PixelList *) NULL)
return((PixelList *) NULL);
if (pixel_list->skip_list.nodes != (SkipNode *) NULL)
pixel_list->skip_list.nodes=(SkipNode *) RelinquishAlignedMemory(
pixel_list->skip_list.nodes);
pixel_list=(PixelList *) RelinquishMagickMemory(pixel_list);
return(pixel_list);
}
static PixelList **DestroyPixelListThreadSet(PixelList **pixel_list)
{
ssize_t
i;
assert(pixel_list != (PixelList **) NULL);
for (i=0; i < (ssize_t) GetMagickResourceLimit(ThreadResource); i++)
if (pixel_list[i] != (PixelList *) NULL)
pixel_list[i]=DestroyPixelList(pixel_list[i]);
pixel_list=(PixelList **) RelinquishMagickMemory(pixel_list);
return(pixel_list);
}
static PixelList *AcquirePixelList(const size_t width,const size_t height)
{
PixelList
*pixel_list;
pixel_list=(PixelList *) AcquireMagickMemory(sizeof(*pixel_list));
if (pixel_list == (PixelList *) NULL)
return(pixel_list);
(void) memset((void *) pixel_list,0,sizeof(*pixel_list));
pixel_list->length=width*height;
pixel_list->skip_list.nodes=(SkipNode *) AcquireAlignedMemory(65537UL,
sizeof(*pixel_list->skip_list.nodes));
if (pixel_list->skip_list.nodes == (SkipNode *) NULL)
return(DestroyPixelList(pixel_list));
(void) memset(pixel_list->skip_list.nodes,0,65537UL*
sizeof(*pixel_list->skip_list.nodes));
pixel_list->signature=MagickCoreSignature;
return(pixel_list);
}
static PixelList **AcquirePixelListThreadSet(const size_t width,
const size_t height)
{
PixelList
**pixel_list;
ssize_t
i;
size_t
number_threads;
number_threads=(size_t) GetMagickResourceLimit(ThreadResource);
pixel_list=(PixelList **) AcquireQuantumMemory(number_threads,
sizeof(*pixel_list));
if (pixel_list == (PixelList **) NULL)
return((PixelList **) NULL);
(void) memset(pixel_list,0,number_threads*sizeof(*pixel_list));
for (i=0; i < (ssize_t) number_threads; i++)
{
pixel_list[i]=AcquirePixelList(width,height);
if (pixel_list[i] == (PixelList *) NULL)
return(DestroyPixelListThreadSet(pixel_list));
}
return(pixel_list);
}
static void AddNodePixelList(PixelList *pixel_list,const size_t color)
{
SkipList
*p;
ssize_t
level;
size_t
search,
update[9];
/*
Initialize the node.
*/
p=(&pixel_list->skip_list);
p->nodes[color].signature=pixel_list->signature;
p->nodes[color].count=1;
/*
Determine where it belongs in the list.
*/
search=65536UL;
for (level=p->level; level >= 0; level--)
{
while (p->nodes[search].next[level] < color)
search=p->nodes[search].next[level];
update[level]=search;
}
/*
Generate a pseudo-random level for this node.
*/
for (level=0; ; level++)
{
pixel_list->seed=(pixel_list->seed*42893621L)+1L;
if ((pixel_list->seed & 0x300) != 0x300)
break;
}
if (level > 8)
level=8;
if (level > (p->level+2))
level=p->level+2;
/*
If we're raising the list's level, link back to the root node.
*/
while (level > p->level)
{
p->level++;
update[p->level]=65536UL;
}
/*
Link the node into the skip-list.
*/
do
{
p->nodes[color].next[level]=p->nodes[update[level]].next[level];
p->nodes[update[level]].next[level]=color;
} while (level-- > 0);
}
static inline void GetMedianPixelList(PixelList *pixel_list,Quantum *pixel)
{
SkipList
*p;
size_t
color;
ssize_t
count;
/*
Find the median value for each of the color.
*/
p=(&pixel_list->skip_list);
color=65536L;
count=0;
do
{
color=p->nodes[color].next[0];
count+=p->nodes[color].count;
} while (count <= (ssize_t) (pixel_list->length >> 1));
*pixel=ScaleShortToQuantum((unsigned short) color);
}
static inline void GetModePixelList(PixelList *pixel_list,Quantum *pixel)
{
SkipList
*p;
size_t
color,
max_count,
mode;
ssize_t
count;
/*
Make each pixel the 'predominant color' of the specified neighborhood.
*/
p=(&pixel_list->skip_list);
color=65536L;
mode=color;
max_count=p->nodes[mode].count;
count=0;
do
{
color=p->nodes[color].next[0];
if (p->nodes[color].count > max_count)
{
mode=color;
max_count=p->nodes[mode].count;
}
count+=p->nodes[color].count;
} while (count < (ssize_t) pixel_list->length);
*pixel=ScaleShortToQuantum((unsigned short) mode);
}
static inline void GetNonpeakPixelList(PixelList *pixel_list,Quantum *pixel)
{
SkipList
*p;
size_t
color,
next,
previous;
ssize_t
count;
/*
Finds the non peak value for each of the colors.
*/
p=(&pixel_list->skip_list);
color=65536L;
next=p->nodes[color].next[0];
count=0;
do
{
previous=color;
color=next;
next=p->nodes[color].next[0];
count+=p->nodes[color].count;
} while (count <= (ssize_t) (pixel_list->length >> 1));
if ((previous == 65536UL) && (next != 65536UL))
color=next;
else
if ((previous != 65536UL) && (next == 65536UL))
color=previous;
*pixel=ScaleShortToQuantum((unsigned short) color);
}
static inline void InsertPixelList(const Quantum pixel,PixelList *pixel_list)
{
size_t
signature;
unsigned short
index;
index=ScaleQuantumToShort(pixel);
signature=pixel_list->skip_list.nodes[index].signature;
if (signature == pixel_list->signature)
{
pixel_list->skip_list.nodes[index].count++;
return;
}
AddNodePixelList(pixel_list,index);
}
static void ResetPixelList(PixelList *pixel_list)
{
int
level;
SkipNode
*root;
SkipList
*p;
/*
Reset the skip-list.
*/
p=(&pixel_list->skip_list);
root=p->nodes+65536UL;
p->level=0;
for (level=0; level < 9; level++)
root->next[level]=65536UL;
pixel_list->seed=pixel_list->signature++;
}
MagickExport Image *StatisticImage(const Image *image,const StatisticType type,
const size_t width,const size_t height,ExceptionInfo *exception)
{
#define StatisticImageTag "Statistic/Image"
CacheView
*image_view,
*statistic_view;
Image
*statistic_image;
MagickBooleanType
status;
MagickOffsetType
progress;
PixelList
**magick_restrict pixel_list;
ssize_t
center,
y;
/*
Initialize statistics image attributes.
*/
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickCoreSignature);
statistic_image=CloneImage(image,0,0,MagickTrue,
exception);
if (statistic_image == (Image *) NULL)
return((Image *) NULL);
status=SetImageStorageClass(statistic_image,DirectClass,exception);
if (status == MagickFalse)
{
statistic_image=DestroyImage(statistic_image);
return((Image *) NULL);
}
pixel_list=AcquirePixelListThreadSet(MagickMax(width,1),MagickMax(height,1));
if (pixel_list == (PixelList **) NULL)
{
statistic_image=DestroyImage(statistic_image);
ThrowImageException(ResourceLimitError,"MemoryAllocationFailed");
}
/*
Make each pixel the min / max / median / mode / etc. of the neighborhood.
*/
center=(ssize_t) GetPixelChannels(image)*(image->columns+MagickMax(width,1))*
(MagickMax(height,1)/2L)+GetPixelChannels(image)*(MagickMax(width,1)/2L);
status=MagickTrue;
progress=0;
image_view=AcquireVirtualCacheView(image,exception);
statistic_view=AcquireAuthenticCacheView(statistic_image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(progress,status) \
magick_number_threads(image,statistic_image,statistic_image->rows,1)
#endif
for (y=0; y < (ssize_t) statistic_image->rows; y++)
{
const int
id = GetOpenMPThreadId();
const Quantum
*magick_restrict p;
Quantum
*magick_restrict q;
ssize_t
x;
if (status == MagickFalse)
continue;
p=GetCacheViewVirtualPixels(image_view,-((ssize_t) MagickMax(width,1)/2L),y-
(ssize_t) (MagickMax(height,1)/2L),image->columns+MagickMax(width,1),
MagickMax(height,1),exception);
q=QueueCacheViewAuthenticPixels(statistic_view,0,y,statistic_image->columns, 1,exception);
if ((p == (const Quantum *) NULL) || (q == (Quantum *) NULL))
{
status=MagickFalse;
continue;
}
for (x=0; x < (ssize_t) statistic_image->columns; x++)
{
ssize_t
i;
for (i=0; i < (ssize_t) GetPixelChannels(image); i++)
{
double
area,
maximum,
minimum,
sum,
sum_squared;
Quantum
pixel;
const Quantum
*magick_restrict pixels;
ssize_t
u;
ssize_t
v;
PixelChannel channel = GetPixelChannelChannel(image,i);
PixelTrait traits = GetPixelChannelTraits(image,channel);
PixelTrait statistic_traits=GetPixelChannelTraits(statistic_image,
channel);
if ((traits == UndefinedPixelTrait) ||
(statistic_traits == UndefinedPixelTrait))
continue;
if (((statistic_traits & CopyPixelTrait) != 0) ||
(GetPixelWriteMask(image,p) <= (QuantumRange/2)))
{
SetPixelChannel(statistic_image,channel,p[center+i],q);
continue;
}
if ((statistic_traits & UpdatePixelTrait) == 0)
continue;
pixels=p;
area=0.0;
minimum=pixels[i];
maximum=pixels[i];
sum=0.0;
sum_squared=0.0;
ResetPixelList(pixel_list[id]);
for (v=0; v < (ssize_t) MagickMax(height,1); v++)
{
for (u=0; u < (ssize_t) MagickMax(width,1); u++)
{
if ((type == MedianStatistic) || (type == ModeStatistic) ||
(type == NonpeakStatistic))
{
InsertPixelList(pixels[i],pixel_list[id]);
pixels+=GetPixelChannels(image);
continue;
}
area++;
if (pixels[i] < minimum)
minimum=(double) pixels[i];
if (pixels[i] > maximum)
maximum=(double) pixels[i];
sum+=(double) pixels[i];
sum_squared+=(double) pixels[i]*pixels[i];
pixels+=GetPixelChannels(image);
}
pixels+=GetPixelChannels(image)*image->columns;
}
switch (type)
{
case ContrastStatistic:
{
pixel=ClampToQuantum(MagickAbsoluteValue((maximum-minimum)*
PerceptibleReciprocal(maximum+minimum)));
break;
}
case GradientStatistic:
{
pixel=ClampToQuantum(MagickAbsoluteValue(maximum-minimum));
break;
}
case MaximumStatistic:
{
pixel=ClampToQuantum(maximum);
break;
}
case MeanStatistic:
default:
{
pixel=ClampToQuantum(sum/area);
break;
}
case MedianStatistic:
{
GetMedianPixelList(pixel_list[id],&pixel);
break;
}
case MinimumStatistic:
{
pixel=ClampToQuantum(minimum);
break;
}
case ModeStatistic:
{
GetModePixelList(pixel_list[id],&pixel);
break;
}
case NonpeakStatistic:
{
GetNonpeakPixelList(pixel_list[id],&pixel);
break;
}
case RootMeanSquareStatistic:
{
pixel=ClampToQuantum(sqrt(sum_squared/area));
break;
}
case StandardDeviationStatistic:
{
pixel=ClampToQuantum(sqrt(sum_squared/area-(sum/area*sum/area)));
break;
}
}
SetPixelChannel(statistic_image,channel,pixel,q);
}
p+=GetPixelChannels(image);
q+=GetPixelChannels(statistic_image);
}
if (SyncCacheViewAuthenticPixels(statistic_view,exception) == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp atomic
#endif
progress++;
proceed=SetImageProgress(image,StatisticImageTag,progress,image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
statistic_view=DestroyCacheView(statistic_view);
image_view=DestroyCacheView(image_view);
pixel_list=DestroyPixelListThreadSet(pixel_list);
if (status == MagickFalse)
statistic_image=DestroyImage(statistic_image);
return(statistic_image);
}
|
sparseBlas.h | //==============================================================================
//
// Copyright 2018 The InsideLoop Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//
//==============================================================================
#ifndef IL_SPARSE_BLAS_H
#define IL_SPARSE_BLAS_H
#include <il/linearAlgebra/sparse/blas/SparseMatrixBlas.h>
#ifdef IL_MKL
namespace il {
////////////////////////////////////////////////////////////////////////////////
// BLAS Level 2
////////////////////////////////////////////////////////////////////////////////
inline void blas(double alpha, const il::SparseMatrixCSR<int, double>& A,
const il::Array<double>& x, double beta, il::io_t,
il::Array<double>& y) {
//#pragma omp parallel for
for (int i = 0; i < A.size(0); ++i) {
double sum = 0.0;
for (int k = A.row(i); k < A.row(i + 1); ++k) {
sum += A.element(k) * x[A.column(k)];
}
y[i] = alpha * sum + beta * y[i];
}
}
inline void blas(double alpha, const il::SparseMatrixCSR<il::int_t, double>& A,
const il::Array<double>& x, double beta, il::io_t,
il::Array<double>& y) {
//#pragma omp parallel for
for (il::int_t i = 0; i < A.size(0); ++i) {
double sum = 0.0;
for (il::int_t k = A.row(i); k < A.row(i + 1); ++k) {
sum += A.element(k) * x[A.column(k)];
}
y[i] = alpha * sum + beta * y[i];
}
}
// inline void blas(const il::Array<double>& x, il::io_t,
// il::SparseMatrixCSR<int, double>& A, il::Array<double>& y) {
// IL_EXPECT_FAST(y.size() == A.size(0));
// IL_EXPECT_FAST(x.size() == A.size(1));
// IL_EXPECT_FAST(A.size(0) == A.size(1));
//
// const char transa = 'n';
// const MKL_INT m = A.size(0);
// MKL_INT info;
//
// int* A_row_data = A.rowData();
// int* A_column_data = A.columnData();
// for (int i = 0; i <= A.size(0); ++i) {
// ++A_row_data[i];
// }
// for (int i = 0; i <= A.nbNonZeros(); ++i) {
// ++A_column_data[i];
// }
//
// mkl_dcsrgemv(&transa, &m, A.elementData(), A.rowData(), A.columnData(),
// x.data(), y.data());
//
// for (int i = 0; i <= A.size(0); ++i) {
// --A_row_data[i];
// }
// for (int i = 0; i <= A.nbNonZeros(); ++i) {
// --A_column_data[i];
// }
//}
inline void blas(float alpha, il::SparseMatrixBlas<int, float>& A_optimized,
const il::Array<float>& x, float beta, il::io_t,
il::Array<float>& y) {
const sparse_operation_t operation = SPARSE_OPERATION_NON_TRANSPOSE;
const sparse_matrix_t A = A_optimized.handle();
matrix_descr descr;
descr.type = SPARSE_MATRIX_TYPE_GENERAL;
sparse_status_t status =
mkl_sparse_s_mv(operation, alpha, A, descr, x.data(), beta, y.Data());
IL_EXPECT_FAST(status == SPARSE_STATUS_SUCCESS);
}
inline void blas(double alpha, il::SparseMatrixBlas<int, double>& A_optimized,
const il::Array<double>& x, double beta, il::io_t,
il::Array<double>& y) {
const sparse_operation_t operation = SPARSE_OPERATION_NON_TRANSPOSE;
const sparse_matrix_t A = A_optimized.handle();
matrix_descr descr;
descr.type = SPARSE_MATRIX_TYPE_GENERAL;
sparse_status_t status =
mkl_sparse_d_mv(operation, alpha, A, descr, x.data(), beta, y.Data());
IL_EXPECT_FAST(status == SPARSE_STATUS_SUCCESS);
}
} // namespace il
#endif // IL_MKL
#endif // IL_SPARSE_BLAS_H
|
GB_binop__second_fp64.c | //------------------------------------------------------------------------------
// GB_binop: hard-coded functions for each built-in binary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// If this file is in the Generated2/ folder, do not edit it
// (it is auto-generated from Generator/*).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_emult.h"
#include "GB_control.h"
#include "GB_ek_slice.h"
#include "GB_dense.h"
#include "GB_atomics.h"
#include "GB_bitmap_assign_methods.h"
#include "GB_binop__include.h"
// C=binop(A,B) is defined by the following types and operators:
// A+B function (eWiseAdd): GB (_AaddB__second_fp64)
// A.*B function (eWiseMult): GB (_AemultB_08__second_fp64)
// A.*B function (eWiseMult): GB (_AemultB_02__second_fp64)
// A.*B function (eWiseMult): GB (_AemultB_04__second_fp64)
// A.*B function (eWiseMult): GB (_AemultB_bitmap__second_fp64)
// A*D function (colscale): GB (_AxD__second_fp64)
// D*A function (rowscale): GB (_DxB__second_fp64)
// C+=B function (dense accum): GB (_Cdense_accumB__second_fp64)
// C+=b function (dense accum): GB (_Cdense_accumb__second_fp64)
// C+=A+B function (dense ewise3): GB ((none))
// C=A+B function (dense ewise3): GB (_Cdense_ewise3_noaccum__second_fp64)
// C=scalar+B GB ((none))
// C=scalar+B' GB ((none))
// C=A+scalar GB ((none))
// C=A'+scalar GB ((none))
// C type: double
// A type: double
// B,b type: double
// BinaryOp: cij = bij
#define GB_ATYPE \
double
#define GB_BTYPE \
double
#define GB_CTYPE \
double
// true if the types of A and B are identical
#define GB_ATYPE_IS_BTYPE \
1
// true if the types of C and A are identical
#define GB_CTYPE_IS_ATYPE \
1
// true if the types of C and B are identical
#define GB_CTYPE_IS_BTYPE \
1
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA,A_iso) \
;
// bij = Bx [pB]
#define GB_GETB(bij,Bx,pB,B_iso) \
double bij = GBX (Bx, pB, B_iso)
// declare scalar of the same type as C
#define GB_CTYPE_SCALAR(t) \
double t
// cij = Ax [pA]
#define GB_COPY_A_TO_C(cij,Ax,pA,A_iso) \
cij = GBX (Ax, pA, A_iso)
// cij = Bx [pB]
#define GB_COPY_B_TO_C(cij,Bx,pB,B_iso) \
cij = GBX (Bx, pB, B_iso)
#define GB_CX(p) Cx [p]
// binary operator
#define GB_BINOP(z,x,y,i,j) \
z = y ;
// true if the binop must be flipped
#define GB_BINOP_FLIP \
0
// op is second
#define GB_OP_IS_SECOND \
1
// do the numerical phases of GB_add and GB_emult
#define GB_PHASE_2_OF_2
// hard-coded loops can be vectorized
#define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_SECOND || GxB_NO_FP64 || GxB_NO_SECOND_FP64)
//------------------------------------------------------------------------------
// C += A+B, all 3 matrices dense
//------------------------------------------------------------------------------
#if 0
// The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV.
void GB ((none))
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#include "GB_dense_ewise3_accum_template.c"
}
#endif
//------------------------------------------------------------------------------
// C = A+B, all 3 matrices dense
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_ewise3_noaccum__second_fp64)
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_dense_ewise3_noaccum_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += B, accumulate a sparse matrix into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumB__second_fp64)
(
GrB_Matrix C,
const GrB_Matrix B,
const int64_t *B_ek_slicing, const int B_ntasks, const int B_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
#include "GB_dense_subassign_23_template.c"
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += b, accumulate a scalar into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumb__second_fp64)
(
GrB_Matrix C,
const GB_void *p_bwork,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
// get the scalar b for C += b, of type double
double bwork = (*((double *) p_bwork)) ;
#include "GB_dense_subassign_22_template.c"
return (GrB_SUCCESS) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = A*D, column scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB (_AxD__second_fp64)
(
GrB_Matrix C,
const GrB_Matrix A, bool A_is_pattern,
const GrB_Matrix D, bool D_is_pattern,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
double *restrict Cx = (double *) C->x ;
#include "GB_AxB_colscale_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = D*B, row scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB (_DxB__second_fp64)
(
GrB_Matrix C,
const GrB_Matrix D, bool D_is_pattern,
const GrB_Matrix B, bool B_is_pattern,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
double *restrict Cx = (double *) C->x ;
#include "GB_AxB_rowscale_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseAdd: C=A+B, C<M>=A+B, C<!M>=A+B
//------------------------------------------------------------------------------
GrB_Info GB (_AaddB__second_fp64)
(
GrB_Matrix C,
const int C_sparsity,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool Ch_is_Mh,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
GB_WERK_DECLARE (M_ek_slicing, int64_t) ;
GB_WERK_DECLARE (A_ek_slicing, int64_t) ;
GB_WERK_DECLARE (B_ek_slicing, int64_t) ;
#include "GB_add_template.c"
GB_FREE_WORK ;
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C=A.*B, C<M>=A.*B, or C<M!>=A.*B where C is sparse/hyper
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_08__second_fp64)
(
GrB_Matrix C,
const int C_sparsity,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_08_meta.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C<#> = A.*B when A is sparse/hyper and B is bitmap/full
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_02__second_fp64)
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool flipxy,
const int64_t *restrict Cp_kfirst,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#if GB_BINOP_FLIP
// The operator is not commutative, and does not have a flipped
// variant. For example z=atan2(y,x).
if (flipxy)
{
// use fmult(y,x)
#undef GB_FLIPPED
#define GB_FLIPPED 1
#include "GB_emult_02_template.c"
}
else
{
// use fmult(x,y)
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
}
#else
// No need to handle the flip: the operator is either commutative, or
// has been handled by changing z=div(y,x) to z=rdiv(x,y) for example.
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
#endif
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C<M> = A.*B, M sparse/hyper, A and B bitmap/full
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_04__second_fp64)
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict Cp_kfirst,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_04_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C=A.*B, C<M>=A.*B, C<!M>=A.*B where C is bitmap
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_bitmap__second_fp64)
(
GrB_Matrix C,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_bitmap_emult_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st
//------------------------------------------------------------------------------
#if 0
GrB_Info GB ((none))
(
GB_void *Cx_output, // Cx and Bx may be aliased
const GB_void *x_input,
const GB_void *Bx_input,
const int8_t *restrict Bb,
int64_t bnz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
double *Cx = (double *) Cx_output ;
double x = (*((double *) x_input)) ;
double *Bx = (double *) Bx_input ;
int64_t p ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < bnz ; p++)
{
if (!GBB (Bb, p)) continue ;
double bij = GBX (Bx, p, false) ;
Cx [p] = bij ;
}
return (GrB_SUCCESS) ;
#endif
}
#endif
//------------------------------------------------------------------------------
// Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd
//------------------------------------------------------------------------------
#if 0
GrB_Info GB ((none))
(
GB_void *Cx_output, // Cx and Ax may be aliased
const GB_void *Ax_input,
const GB_void *y_input,
const int8_t *restrict Ab,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
double *Cx = (double *) Cx_output ;
double *Ax = (double *) Ax_input ;
double y = (*((double *) y_input)) ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!GBB (Ab, p)) continue ;
; ;
Cx [p] = y ;
}
return (GrB_SUCCESS) ;
#endif
}
#endif
//------------------------------------------------------------------------------
// C = op (x, A'): transpose and apply a binary operator
//------------------------------------------------------------------------------
#if 0
// cij = op (x, aij), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
double aij = GBX (Ax, pA, false) ; \
Cx [pC] = aij ; \
}
GrB_Info GB ((none))
(
GrB_Matrix C,
const GB_void *x_input,
const GrB_Matrix A,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
// GB_unop_transpose.c uses GB_ATYPE, but A is
// the 2nd input to binary operator z=f(x,y).
#undef GB_ATYPE
#define GB_ATYPE \
double
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
double x = (*((const double *) x_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
#undef GB_ATYPE
#define GB_ATYPE \
double
}
#endif
//------------------------------------------------------------------------------
// C = op (A', y): transpose and apply a binary operator
//------------------------------------------------------------------------------
#if 0
// cij = op (aij, y), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
; ; \
Cx [pC] = y ; \
}
GrB_Info GB ((none))
(
GrB_Matrix C,
const GrB_Matrix A,
const GB_void *y_input,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
double y = (*((const double *) y_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
#endif
|
GB_unop__one_fc32_fc32.c | //------------------------------------------------------------------------------
// GB_unop: hard-coded functions for each built-in unary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// If this file is in the Generated/ folder, do not edit it (auto-generated).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_control.h"
#include "GB_atomics.h"
#include "GB_unop__include.h"
// C=unop(A) is defined by the following types and operators:
// op(A) function: GB_unop_apply__one_fc32_fc32
// op(A') function: GB_unop_tran__one_fc32_fc32
// C type: GxB_FC32_t
// A type: GxB_FC32_t
// cast: ;
// unaryop: cij = GxB_CMPLXF(1,0)
#define GB_ATYPE \
GxB_FC32_t
#define GB_CTYPE \
GxB_FC32_t
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
;
#define GB_CX(p) Cx [p]
// unary operator
#define GB_OP(z, x) \
z = GxB_CMPLXF(1,0) ;
// casting
#define GB_CAST(z, aij) \
; ;
// cij = op (aij)
#define GB_CAST_OP(pC,pA) \
{ \
/* aij = Ax [pA] */ \
; ; \
/* Cx [pC] = op (cast (aij)) */ \
; ; \
Cx [pC] = GxB_CMPLXF(1,0) ; \
}
// true if operator is the identity op with no typecasting
#define GB_OP_IS_IDENTITY_WITH_NO_TYPECAST \
0
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_ONE || GxB_NO_FC32)
//------------------------------------------------------------------------------
// Cx = op (cast (Ax)): apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_unop_apply__one_fc32_fc32
(
GxB_FC32_t *Cx, // Cx and Ax may be aliased
const GxB_FC32_t *Ax,
const int8_t *GB_RESTRICT Ab, // A->b if A is bitmap
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
if (Ab == NULL)
{
#if ( GB_OP_IS_IDENTITY_WITH_NO_TYPECAST )
GB_memcpy (Cx, Ax, anz * sizeof (GxB_FC32_t), nthreads) ;
#else
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
; ;
; ;
Cx [p] = GxB_CMPLXF(1,0) ;
}
#endif
}
else
{
// bitmap case, no transpose; A->b already memcpy'd into C->b
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!Ab [p]) continue ;
; ;
; ;
Cx [p] = GxB_CMPLXF(1,0) ;
}
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (cast (A')): transpose, typecast, and apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_unop_tran__one_fc32_fc32
(
GrB_Matrix C,
const GrB_Matrix A,
int64_t *GB_RESTRICT *Workspaces,
const int64_t *GB_RESTRICT A_slice,
int nworkspaces,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
encipher.h | /*
header file ini berisi fungsi enkripsi dan dekripsi untuk mengamankan file simpanan,
dan bisa dibuka kembali dan menunjukan isinya (dilengkapi dengan fungsi tambahan).
*/
#ifndef ENCIPHER
#define ENCIPHER
#include "subbytes.h"
#include "parallel_string.h"
int special(int a, int b) { //function for negative numbers of modulo, recursively
if (a >= 0) {
return a;
} else {
return special(a + b, b);
}
}
int mod(int a, int b) { //function for modulo, since C can't moduled negative numbers
if (a < 0) {
return special(a + b, b);
} else {
return a % b;
}
}
//encryption function
char encrypt(unsigned char * M,
const unsigned char * key) {
int maksKey, maksM;
#pragma omp single {
maksKey = my_strlen(key);
maksM = my_strlen(M);
}
#pragma omp parallel shared(maksKey, maksM)
{
int from, to, i, j;
#pragma ompfor
for (i = 0; i < maksM; i += maksKey + 1) {
from = i;
to = i + maksKey;
#pragma omp task
{
for (j = from; j <= to; j++) {
M[j] = mod((M[j] + key[j % maksKey]), 256);
M[j] = sbox(M[j]);
}
}
}
}
return 0;
}
//decryption function
char decrypt(unsigned char * M,
const unsigned char * key) {
int maksKey, maksM;
#pragma omp single
{
maksKey = my_strlen(key);
maksM = my_strlen(M);
}
#pragma omp parallel shared(maksKey, maksM, M, key)
{
int from, to, i, j;
#pragma omp for
for (i = 0; i < maksM; i += maksKey + 1) {
from = i;
to = i + maksKey;
#pragma omp taskwait
{
for (j = from; j <= to; j++) {
M[j] = rsbox(M[j]); //inverse subbox
M[j] = mod((M[j] - key[j % maksKey]), 256); //vigenere function
}
}
}
}
return 0;
}
#endif //ENCIPHER |
ff2mtGreens.c | #include <stdio.h>
#include <stdlib.h>
#include <math.h>
#include "parmt_utils.h"
#ifdef PARMT_USE_INTEL
#include <mkl_cblas.h>
#else
#include <cblas.h>
#endif
#include "iscl/memory/memory.h"
/*!
* @brief Converts the NED based fundamental faults to Green's functions which
* scale the NED based moment tensor terms s.t. \f$ u = G m \f$.
* where \f$ m = \{ m_{xx}, m_{yy}, m_{zz}, m_{xy}, m_{xz}, m_{yz} \} \f$
* are the NED moment tensor terms and \f$ u \f$ is the estimate for
* the icomp'th component with source receiver azimuth, az, and given
* channel orientation encapsulated by cmpaz and cmpinc.
*
* @param[in] npgrns number of points in greens functions
* @param[in] ldg leading dimension of G (>= 6)
* @param[in] icomp =1 for vertical Green's functions.
* =2 for north (channel 2) Green's functions.
* =3 for east (channel 3) Green's functions.
* @param[in] azin source to receiver azimuth (degrees)
* @param[in] bazin receiver to source azimuth (degrees). if you
* do not know then set this to:
* fmod(az + 180.0, 360.0)
* @param[in] cmpaz component azimuth (0 north, +90 east)
* @param[in] cmpinc component inclinantion (-90 up, 0 east/north, +90 down)
* @param[in] ZDS vertical greens fn for 90 degree dip slip [npgrns]
* @param[in] ZSS vertical greens fn for vertical strike slip [npgrns]
* @param[in] ZDD vertical greens fn for 45 degree dip slip [npgrns]
* @param[in] ZEX vertical greens fn for an explosion [npgrns]
* @param[in] RDS radial greens fn for 90 degree dip slip [npgrns]
* @param[in] RSS radial greens fn for vertical strike slip [npgrns]
* @param[in] RDD radial greens fn for 45 degree dip slip [npgrns]
* @param[in] REX radial greens fn for an explosion [npgrns]
* @param[in] TDS transverse greens fn for 90 degree dip slip [npgrns]
* @param[in] TSS transverse greens fn for vertical strike slip [npgrns]
*
* @param[out] G row major Green's functions matrix [npgrns x ldg].
* the columns are ordered:
* \f$ \{ G_{xx}, G_{yy}, G_{zz},
* G_{xy}, G_{xz}, G_{yz} \} \f$
*
* @result 0 indicates success
*
* @author Ben Baker
*
* @copyright ISTI distributed under Apache 2
*
*/
int parmt_utils_ff2mtGreensMatrix64f(const int npgrns, const int ldg,
const int icomp,
const double azin, const double bazin,
const double cmpaz, const double cmpinc,
const double *__restrict__ ZDS,
const double *__restrict__ ZSS,
const double *__restrict__ ZDD,
const double *__restrict__ ZEX,
const double *__restrict__ RDS,
const double *__restrict__ RSS,
const double *__restrict__ RDD,
const double *__restrict__ REX,
const double *__restrict__ TDS,
const double *__restrict__ TSS,
double *__restrict__ G)
{
double *Gxx, *Gyy, *Gzz, *Gxy, *Gxz, *Gyz;
int ierr;
ierr = 0;
if (npgrns < 1 || ldg < 6 || G == NULL)
{
if (npgrns < 1)
{
fprintf(stderr, "%s: No points in greens fns\n", __func__);
}
if (ldg < 6){fprintf(stderr, "%s: ldg must be >= 6\n", __func__);}
if (G == NULL){fprintf(stderr, "%s: Error G is NULL\n", __func__);}
return -1;
}
Gxx = memory_calloc64f(npgrns);
Gyy = memory_calloc64f(npgrns);
Gzz = memory_calloc64f(npgrns);
Gxy = memory_calloc64f(npgrns);
Gxz = memory_calloc64f(npgrns);
Gyz = memory_calloc64f(npgrns);
ierr = parmt_utils_ff2mtGreens64f(npgrns, icomp, azin, bazin,
cmpaz, cmpinc,
ZDS, ZSS, ZDD, ZEX,
RDS, RSS, RDD, REX,
TDS, TSS,
Gxx, Gyy, Gzz,
Gxy, Gxz, Gyz);
if (ierr != 0)
{
fprintf(stderr, "%s: Failed to compute greens functions columns\n",
__func__);
}
else
{
cblas_dcopy(npgrns, Gxx, 1, &G[0], ldg);
cblas_dcopy(npgrns, Gyy, 1, &G[1], ldg);
cblas_dcopy(npgrns, Gzz, 1, &G[2], ldg);
cblas_dcopy(npgrns, Gxy, 1, &G[3], ldg);
cblas_dcopy(npgrns, Gxz, 1, &G[4], ldg);
cblas_dcopy(npgrns, Gyz, 1, &G[5], ldg);
}
memory_free64f(&Gxx);
memory_free64f(&Gyy);
memory_free64f(&Gzz);
memory_free64f(&Gxy);
memory_free64f(&Gxz);
memory_free64f(&Gyz);
return ierr;
}
//============================================================================//
/*!
* @brief Converts the NED based fundamental faults to Green's functions which
* scale the NED based moment tensor terms s.t. \f$ u = G m \f$.
* where \f$ m = \{ m_{xx}, m_{yy}, m_{zz}, m_{xy}, m_{xz}, m_{yz} \} \f$
* are the NED moment tensor terms and \f$ u \f$ is the estimate for
* the icomp'th component with source receiver azimuth, az, and given
* channel orientation encapsulated by cmpaz and cmpinc.
*
* @param[in] npgrns number of points in greens functions
* @param[in] icomp =1 for vertical Green's functions.
* =2 for north (channel 2) Green's functions.
* =3 for east (channel 3) Green's functions.
* @param[in] azin source to receiver azimuth (degrees)
* @param[in] bazin receiver to source azimuth (degrees). if you
* do not know then set this to:
* fmod(az + 180.0, 360.0)
* @param[in] cmpaz component azimuth (0 north, +90 east)
* @param[in] cmpinc component inclinantion (-90 up, 0 east/north, +90 down)
* @param[in] ZDS vertical greens fn for 90 degree dip slip [npgrns]
* @param[in] ZSS vertical greens fn for vertical strike slip [npgrns]
* @param[in] ZDD vertical greens fn for 45 degree dip slip [npgrns]
* @param[in] ZEX vertical greens fn for an explosion [npgrns]
* @param[in] RDS radial greens fn for 90 degree dip slip [npgrns]
* @param[in] RSS radial greens fn for vertical strike slip [npgrns]
* @param[in] RDD radial greens fn for 45 degree dip slip [npgrns]
* @param[in] REX radial greens fn for an explosion [npgrns]
* @param[in] TDS transverse greens fn for 90 degree dip slip [npgrns]
* @param[in] TSS transverse greens fn for vertical strike slip [npgrns]
*
* @param[out] Gxx greens function scaling mxx moment tensor term [npgrns]
* @param[out] Gyy greens function scaling myy moment tensor term [npgrns]
* @param[out] Gzz greens function scaling mzz moment tensor term [npgrns]
* @param[out] Gxy greens function scaling mxy moment tensor term [npgrns]
* @param[out] Gxz greens function scaling mxz moment tensor term [npgrns]
* @param[out] Gyz greens function scaling myz moment tensor term [npgrns]
*
* @result 0 indicates success
*
* @author Ben Baker
*
* @copyright ISTI distributed under Apache 2
*
*/
int parmt_utils_ff2mtGreens64f(const int npgrns, const int icomp,
const double azin, const double bazin,
const double cmpaz, const double cmpinc,
const double *__restrict__ ZDS,
const double *__restrict__ ZSS,
const double *__restrict__ ZDD,
const double *__restrict__ ZEX,
const double *__restrict__ RDS,
const double *__restrict__ RSS,
const double *__restrict__ RDD,
const double *__restrict__ REX,
const double *__restrict__ TDS,
const double *__restrict__ TSS,
double *__restrict__ Gxx,
double *__restrict__ Gyy,
double *__restrict__ Gzz,
double *__restrict__ Gxy,
double *__restrict__ Gxz,
double *__restrict__ Gyz)
{
double az, baz, c2a, ca, caz, cos_caz, cost,
gxx_e, gxy_e, gxz_e, gyy_e, gyz_e, gzz_e,
gxx_n, gxy_n, gxz_n, gyy_n, gyz_n, gzz_n,
gxx_r, gxy_r, gxz_r, gyy_r, gyz_r, gzz_r,
gxx_t, gxy_t, gxz_t, gyy_t, gyz_t, gzz_t,
gxx_z, gxy_z, gxz_z, gyy_z, gyz_z, gzz_z,
rex, rdd, rds, rss, s2a, sa, sin_caz, sint,
tds, tss, xscal,
zex, zdd, zds, zss;
int i;
const double pi180 = M_PI/180.0;
const double half = 0.5;
const double sixth = 1.0/6.0;
const double third = 1.0/3.0;
//------------------------------------------------------------------------//
if (icomp < 1 || icomp > 3)
{
fprintf(stderr, "%s: Invalid component: %d\n", __func__, icomp);
return -1;
}
if (icomp == 1)
{
if (ZDS == NULL || ZSS == NULL || ZDD == NULL || ZEX == NULL)
{
if (ZDS == NULL){fprintf(stderr, "%s: zds is NULL\n", __func__);}
if (ZSS == NULL){fprintf(stderr, "%s: zss is NULL\n", __func__);}
if (ZDD == NULL){fprintf(stderr, "%s: zdd is NULL\n", __func__);}
if (ZEX == NULL){fprintf(stderr, "%s: zex is NULL\n", __func__);}
return -1;
}
}
else
{
if (RDS == NULL || RSS == NULL || RDD == NULL || REX == NULL ||
TDS == NULL || TSS == NULL)
{
if (RDS == NULL){fprintf(stderr, "%s: rds is NULL\n", __func__);}
if (RSS == NULL){fprintf(stderr, "%s: rss is NULL\n", __func__);}
if (RDD == NULL){fprintf(stderr, "%s: rdd is NULL\n", __func__);}
if (REX == NULL){fprintf(stderr, "%s: rex is NULL\n", __func__);}
if (TDS == NULL){fprintf(stderr, "%s: tds is NULL\n", __func__);}
if (TSS == NULL){fprintf(stderr, "%s: tss is NULL\n", __func__);}
return -1;
}
}
// Get the backazimuth
az = azin;
baz = bazin; //fmod(az + 180.0, 360.0);
// Convert to radians
az = az*pi180;
baz = baz*pi180;
caz = cmpaz*pi180;
// Compute the geometric factors
sa = sin(az);
ca = cos(az);
s2a = sin(2.*az);
c2a = cos(2.*az);
sint = sin(baz);
cost = cos(baz);
cos_caz = cos(caz);
sin_caz = sin(caz);
// the cmpaz will come from the metadata so fix that
if (icomp == 3)
{
cos_caz = cos(caz - M_PI/2.0);
sin_caz = sin(caz - M_PI/2.0);
}
// Set the columns (Minson et. al. 2008)
xscal = 1.0;
if (fabs(cmpinc - 90.0) < 1.e-4){xscal =-1.0;}
if (icomp == 1)
{
#pragma omp simd
for (i=0; i<npgrns; i++)
{
zss = ZSS[i];
zdd = ZDD[i];
zds = ZDS[i];
zex = ZEX[i];
// compute the greens function corresponding to the mt
gxx_z = half*(zss*c2a) - sixth*zdd + third*zex;
gyy_z =-half*(zss*c2a) - sixth*zdd + third*zex;
gzz_z = third*(zdd + zex);
gxy_z = zss*s2a;
gxz_z = zds*ca;
gyz_z = zds*sa;
// copy and include polarity so it matches the observation
Gxx[i] = xscal*gxx_z;
Gyy[i] = xscal*gyy_z;
Gzz[i] = xscal*gzz_z;
Gxy[i] = xscal*gxy_z;
Gxz[i] = xscal*gxz_z;
Gyz[i] = xscal*gyz_z;
} // Loop on points
}
else
{
// North or 2 component
if (icomp == 2)
{
// Loop on rows (data points)
//#pragma omp simd
for (i=0; i<npgrns; i++)
{
rss = RSS[i];
rdd = RDD[i];
rds = RDS[i];
rex = REX[i];
tds = TDS[i];
tss = TSS[i];
gxx_r = half*(rss*c2a) - sixth*rdd + third*rex;
gyy_r =-half*(rss*c2a) - sixth*rdd + third*rex;
gzz_r = third*(rdd + rex);
gxy_r = rss*s2a;
gxz_r = rds*ca;
gyz_r = rds*sa;
gxx_t = half*tss*s2a;
gyy_t =-half*tss*s2a;
gzz_t = 0.0;
gxy_t =-tss*c2a;
gxz_t = tds*sa;
gyz_t =-tds*ca;
// Rotate from (r, t) -> (n, e)
// n = cos(baz-180)*r - sin(baz-180)*t
// =-cos(baz)*r + sin(baz)*t
// e = sin(baz-180)*r + cos(baz-180)*t
// =-sin(baz)*r - cos(baz)*t
gxx_n =-cost*gxx_r + sint*gxx_t;
gxx_e =-sint*gxx_r - cost*gxx_t;
gyy_n =-cost*gyy_r + sint*gyy_t;
gyy_e =-sint*gyy_r - cost*gyy_t;
gzz_n =-cost*gzz_r + sint*gzz_t;
gzz_e =-sint*gzz_r - cost*gzz_t;
gxy_n =-cost*gxy_r + sint*gxy_t;
gxy_e =-sint*gxy_r - cost*gxy_t;
gxz_n =-cost*gxz_r + sint*gxz_t;
gxz_e =-sint*gxz_r - cost*gxz_t;
gyz_n =-cost*gyz_r + sint*gyz_t;
gyz_e =-sint*gyz_r - cost*gyz_t;
// Rotate from (n, e) -> (1, 2) around az
// 1 = cos(comp_az)*n + sin(comp_az)*e
Gxx[i] = cos_caz*gxx_n + sin_caz*gxx_e;
Gyy[i] = cos_caz*gyy_n + sin_caz*gyy_e;
Gzz[i] = cos_caz*gzz_n + sin_caz*gzz_e;
Gxy[i] = cos_caz*gxy_n + sin_caz*gxy_e;
Gxz[i] = cos_caz*gxz_n + sin_caz*gxz_e;
Gyz[i] = cos_caz*gyz_n + sin_caz*gyz_e;
} // Loop on points
}
// East or 3 component
else
{
// Loop on rows (data points)
#pragma omp simd
for (i=0; i<npgrns; i++)
{
rss = RSS[i];
rdd = RDD[i];
rds = RDS[i];
rex = REX[i];
tds = TDS[i];
tss = TSS[i];
gxx_r = half*(rss*c2a) - sixth*rdd + third*rex;
gyy_r =-half*(rss*c2a) - sixth*rdd + third*rex;
gzz_r = third*(rdd + rex);
gxy_r = rss*s2a;
gxz_r = rds*ca;
gyz_r = rds*sa;
gxx_t = half*tss*s2a;
gyy_t =-half*tss*s2a;
gzz_t = 0.0;
gxy_t =-tss*c2a;
gxz_t = tds*sa;
gyz_t =-tds*ca;
// Rotate from (r, t) -> (n, e)
// n = cos(baz-180)*r - sin(baz-180)*t
// =-cos(baz)*r + sin(baz)*t
// e = sin(baz-180)*r + cos(baz-180)*t
// =-sin(baz)*r - cos(baz)*t
gxx_n =-cost*gxx_r + sint*gxx_t;
gxx_e =-sint*gxx_r - cost*gxx_t;
gyy_n =-cost*gyy_r + sint*gyy_t;
gyy_e =-sint*gyy_r - cost*gyy_t;
gzz_n =-cost*gzz_r + sint*gzz_t;
gzz_e =-sint*gzz_r - cost*gzz_t;
gxy_n =-cost*gxy_r + sint*gxy_t;
gxy_e =-sint*gxy_r - cost*gxy_t;
gxz_n =-cost*gxz_r + sint*gxz_t;
gxz_e =-sint*gxz_r - cost*gxz_t;
gyz_n =-cost*gyz_r + sint*gyz_t;
gyz_e =-sint*gyz_r - cost*gyz_t;
// Rotate from (n, e) -> (1, 2) around az
// 2 =-sin(comp_az)*n + cos(comp_az)*e
Gxx[i] =-sin_caz*gxx_n + cos_caz*gxx_e;
Gyy[i] =-sin_caz*gyy_n + cos_caz*gyy_e;
Gzz[i] =-sin_caz*gzz_n + cos_caz*gzz_e;
Gxy[i] =-sin_caz*gxy_n + cos_caz*gxy_e;
Gxz[i] =-sin_caz*gxz_n + cos_caz*gxz_e;
Gyz[i] =-sin_caz*gyz_n + cos_caz*gyz_e;
} // Loop on points
} // End check on 1 or 2 component
} // End check on component
return 0;
}
|
multiimagereconstructor.h | #ifndef MULTILINEARRECONSTRUCTION_MULTIIMAGERECONSTRUCTOR_H
#define MULTILINEARRECONSTRUCTION_MULTIIMAGERECONSTRUCTOR_H
#ifndef MKL_BLAS
#define MKL_BLAS MKL_DOMAIN_BLAS
#endif
#define EIGEN_USE_MKL_ALL
#include <eigen3/Eigen/Dense>
#include <eigen3/Eigen/Geometry>
#include <eigen3/Eigen/LU>
#include "ceres/ceres.h"
#include <opencv2/opencv.hpp>
#include "basicmesh.h"
#include "common.h"
#include "constraints.h"
#include "costfunctions.h"
#include "multilinearmodel.h"
#include "parameters.h"
#include "singleimagereconstructor.hpp"
#include "statsutils.h"
#include "utils.hpp"
#include "OffscreenMeshVisualizer.h"
#include "AAM/aammodel.h"
#include "boost/filesystem/operations.hpp"
#include "boost/filesystem/path.hpp"
namespace fs = boost::filesystem;
using namespace Eigen;
namespace {
struct PixelInfo {
PixelInfo() : fidx(-1) {}
PixelInfo(int fidx, glm::vec3 bcoords) : fidx(fidx), bcoords(bcoords) {}
int fidx; // trinagle index
glm::vec3 bcoords; // bary centric coordinates
};
inline void encode_index(int idx, unsigned char& r, unsigned char& g, unsigned char& b) {
r = static_cast<unsigned char>(idx & 0xff); idx >>= 8;
g = static_cast<unsigned char>(idx & 0xff); idx >>= 8;
b = static_cast<unsigned char>(idx & 0xff);
}
inline int decode_index(unsigned char r, unsigned char g, unsigned char b, int& idx) {
idx = b; idx <<= 8; idx |= g; idx <<= 8; idx |= r;
return idx;
}
template <typename T>
T clamp(T val, T lower, T upper) {
return std::max(lower, std::min(upper, val));
}
inline glm::dvec3 bilinear_sample(const QImage& img, double x, double y) {
int x0 = floor(x), x1 = x0 + 1;
int y0 = floor(y), y1 = y0 + 1;
if(x0 < 0 || y0 < 0) return glm::dvec3(-1, -1, -1);
if(x1 >= img.width() || y1 >= img.height()) return glm::dvec3(-1, -1, -1);
double c0 = x - x0, c0c = 1 - c0;
double c1 = y - y0, c1c = 1 - c1;
QRgb p00 = img.pixel(x0, y0);
QRgb p01 = img.pixel(x1, y0);
QRgb p10 = img.pixel(x0, y1);
QRgb p11 = img.pixel(x1, y1);
double r = c0c * c1c * qRed(p00) + c0c * c1 * qRed(p01) + c0 * c1c * qRed(p10) + c0 * c1 * qRed(p11);
double g = c0c * c1c * qGreen(p00) + c0c * c1 * qGreen(p01) + c0 * c1c * qGreen(p10) + c0 * c1 * qGreen(p11);
double b = c0c * c1c * qBlue(p00) + c0c * c1 * qBlue(p01) + c0 * c1c * qBlue(p10) + c0 * c1 * qBlue(p11);
return glm::dvec3(r, g, b);
}
inline pair<set<int>, vector<int>> FindTrianglesIndices(const QImage& img) {
int w = img.width(), h = img.height();
set<int> S;
vector<int> indices_map(w*h);
for(int i=0, pidx = 0;i<h;++i) {
for(int j=0;j<w;++j, ++pidx) {
QRgb pix = img.pixel(j, i);
unsigned char r = static_cast<unsigned char>(qRed(pix));
unsigned char g = static_cast<unsigned char>(qGreen(pix));
unsigned char b = static_cast<unsigned char>(qBlue(pix));
if(r == 0 && g == 0 && b == 0) {
indices_map[pidx] = -1;
continue;
}
else {
int idx;
decode_index(r, g, b, idx);
S.insert(idx);
indices_map[pidx] = idx;
}
}
}
return make_pair(S, indices_map);
}
static QImage TransferColor(const QImage& source, const QImage& target,
const vector<int>& valid_pixels_s,
const vector<int>& valid_pixels_t) {
// Make a copy
QImage result = source;
const int num_rows_s = source.height(), num_cols_s = source.width();
const int num_rows_t = target.height(), num_cols_t = target.width();
const size_t num_pixels_s = valid_pixels_s.size();
const size_t num_pixels_t = valid_pixels_t.size();
Matrix3d RGB2LMS, LMS2RGB;
RGB2LMS << 0.3811, 0.5783, 0.0402,
0.1967, 0.7244, 0.0782,
0.0241, 0.1288, 0.8444;
LMS2RGB << 4.4679, -3.5873, 0.1193,
-1.2186, 2.3809, -0.1624,
0.0497, -0.2439, 1.2045;
Matrix3d b, c, b2, c2;
b << 1.0/sqrt(3.0), 0, 0,
0, 1.0/sqrt(6.0), 0,
0, 0, 1.0/sqrt(2.0);
c << 1, 1, 1,
1, 1, -2,
1, -1, 0;
b2 << sqrt(3.0)/3.0, 0, 0,
0, sqrt(6.0)/6.0, 0,
0, 0, sqrt(2.0)/2.0;
c2 << 1, 1, 1,
1, 1, -1,
1, -2, 0;
Matrix3d LMS2lab = b * c;
Matrix3d lab2LMS = c2 * b2;
auto unpack_pixel = [](QRgb pix) {
int r = max(1, qRed(pix)), g = max(1, qGreen(pix)), b = max(1, qBlue(pix));
return make_tuple(r, g, b);
};
auto compute_image_stats = [&](const QImage& img, const vector<int>& valid_pixels) {
const size_t num_pixels = valid_pixels.size();
const int num_cols = img.width(), num_rows = img.height();
MatrixXd pixels(3, num_pixels);
cout << num_cols << 'x' << num_rows << endl;
for(size_t i=0;i<num_pixels;++i) {
int y = valid_pixels[i] / num_cols;
int x = valid_pixels[i] % num_cols;
int r, g, b;
tie(r, g, b) = unpack_pixel(img.pixel(x, y));
pixels.col(i) = Vector3d(r / 255.0, g / 255.0, b / 255.0);
}
MatrixXd pixels_LMS = RGB2LMS * pixels;
for(int i=0;i<3;i++) {
for(int j=0;j<num_pixels;++j) {
pixels_LMS(i, j) = log10(pixels_LMS(i, j));
}
}
MatrixXd pixels_lab = LMS2lab * pixels_LMS;
Vector3d mean = pixels_lab.rowwise().mean();
Vector3d stdev(0, 0, 0);
for(int i=0;i<num_pixels;++i) {
Vector3d diff = pixels_lab.col(i) - mean;
stdev += Vector3d(diff[0]*diff[0], diff[1]*diff[1], diff[2]*diff[2]);
}
stdev /= (num_pixels - 1);
for(int i=0;i<3;++i) stdev[i] = sqrt(stdev[i]);
cout << "mean: " << mean << endl;
cout << "std: " << stdev << endl;
return make_tuple(pixels_lab, mean, stdev);
};
// Compute stats of both images
MatrixXd lab_s, lab_t;
Vector3d mean_s, std_s, mean_t, std_t;
tie(lab_s, mean_s, std_s) = compute_image_stats(source, valid_pixels_s);
tie(lab_t, mean_t, std_t) = compute_image_stats(target, valid_pixels_t);
// Do the transfer
MatrixXd res(3, num_pixels_s);
for(int i=0;i<3;++i) {
for(int j=0;j<num_pixels_s;++j) {
//res(i, j) = (lab_s(i, j) - mean_s[i]) * std_t[i] / std_s[i] + mean_t[i];
res(i, j) = (lab_s(i, j) - mean_s[i]) + mean_t[i];
}
}
MatrixXd LMS_res = lab2LMS * res;
for(int i=0;i<3;++i) {
for(int j=0;j<num_pixels_s;++j) {
LMS_res(i, j) = pow(10, LMS_res(i, j));
}
}
MatrixXd est_im = LMS2RGB * LMS_res;
for(size_t i=0;i<num_pixels_s;++i) {
int y = valid_pixels_s[i] / num_cols_s;
int x = valid_pixels_s[i] % num_cols_s;
result.setPixel(x, y, qRgb(clamp<double>(est_im(0, i) * 255.0, 0., 255.),
clamp<double>(est_im(1, i) * 255.0, 0., 255.),
clamp<double>(est_im(2, i) * 255.0, 0., 255.)));
}
return result;
}
}
template <typename Constraint>
class MultiImageReconstructor {
public:
MultiImageReconstructor():
enable_selection(true),
enable_failure_detection(true),
direct_multi_recon(false) {}
void LoadModel(const string& filename) {
model = MultilinearModel(filename);
single_recon.LoadModel(filename);
}
void LoadPriors(const string& filename_id, const string& filename_exp) {
prior.load(filename_id, filename_exp);
single_recon.LoadPriors(filename_id, filename_exp);
}
void SetContourIndices(const vector<vector<int>>& contour_indices_in) {
contour_indices = contour_indices_in;
single_recon.SetContourIndices(contour_indices_in);
}
void SetMesh(const BasicMesh& mesh) {
template_mesh = mesh;
}
void SetIndices(const vector<int>& indices) {
init_indices = indices;
}
void AddImagePointsPair(const string& filename, const pair<QImage, vector<Constraint>>& p) {
image_filenames.push_back(filename);
image_points_pairs.push_back(p);
}
bool Reconstruct();
const Vector3d& GetRotation(int imgidx) const { return param_sets[imgidx].model.R; }
const Vector3d& GetTranslation(int imgidx) const { return param_sets[imgidx].model.T; }
const VectorXd& GetIdentityWeights(int imgidx) const { return param_sets[imgidx].model.Wid; }
const VectorXd& GetExpressionWeights(int imgidx) const { return param_sets[imgidx].model.Wexp_FACS; }
const Tensor1& GetGeometry(int imgidx) const {
model.ApplyWeights(GetIdentityWeights(imgidx), GetExpressionWeights(imgidx));
return model.GetTM();
}
const CameraParameters GetCameraParameters(int imgidx) const { return param_sets[imgidx].cam; }
const vector<int> GetIndices(int imgidx) const { return param_sets[imgidx].indices; }
vector<int> GetUpdatedIndices(int imgidx) const {
vector<int> idxs;
for(int i=0;i<param_sets[imgidx].recon.cons.size();++i) {
idxs.push_back(param_sets[imgidx].recon.cons[i].vidx);
}
return idxs;
}
void SetSelectionState(bool val) { enable_selection = val; }
void SetFailureDetectionState(bool val) { enable_failure_detection = val; }
void SetDirectMultiRecon(bool val) { direct_multi_recon = val; }
void SetProgressiveReconState(bool val) { enable_progressive_recon = val; }
protected:
void VisualizeReconstructionResult(const fs::path& folder, int i, bool scale_output=true) {
// Visualize the reconstruction results
#if 0
MeshVisualizer w("reconstruction result", param_sets[i].mesh);
w.BindConstraints(image_points_pairs[i].second);
w.BindImage(image_points_pairs[i].first);
w.BindLandmarks(init_indices);
w.BindUpdatedLandmarks(param_sets[i].indices);
w.SetMeshRotationTranslation(param_sets[i].model.R, param_sets[i].model.T);
w.SetCameraParameters(param_sets[i].cam);
w.resize(image_points_pairs[i].first.width(), image_points_pairs[i].first.height());
w.show();
w.paintGL();
w.update();
QImage recon_image = w.grabFrameBuffer();
fs::path image_path = fs::path(image_filenames[i]);
recon_image.save( (folder / fs::path(image_path.stem().string() + ".png")).string().c_str() );
#else
int imgw = image_points_pairs[i].first.width();
int imgh = image_points_pairs[i].first.height();
if(scale_output) {
const int target_size = 640;
double scale = static_cast<double>(target_size) / imgw;
imgw *= scale;
imgh *= scale;
}
const string home_directory = QDir::homePath().toStdString();
cout << "Home dir: " << home_directory << endl;
OffscreenMeshVisualizer visualizer(imgw, imgh);
// Always compute normal
param_sets[i].mesh.ComputeNormals();
visualizer.SetMVPMode(OffscreenMeshVisualizer::CamPerspective);
visualizer.SetRenderMode(OffscreenMeshVisualizer::MeshAndImage);
visualizer.BindMesh(param_sets[i].mesh);
visualizer.BindImage(image_points_pairs[i].first);
visualizer.SetCameraParameters(param_sets[i].cam);
visualizer.SetMeshRotationTranslation(param_sets[i].model.R, param_sets[i].model.T);
visualizer.SetIndexEncoded(false);
visualizer.SetEnableLighting(true);
visualizer.LoadRenderingSettings(home_directory + "/Data/Settings/blendshape_vis_ao.json");
QImage img = visualizer.Render(true);
fs::path image_path = fs::path(image_filenames[i]);
img.save((folder / fs::path(image_path.stem().string() + ".png")).string().c_str());
#endif
}
private:
MultilinearModel model;
MultilinearModelPrior prior;
vector<vector<int>> contour_indices;
vector<int> init_indices;
BasicMesh template_mesh;
struct ParameterSet {
vector<int> indices;
BasicMesh mesh;
CameraParameters cam;
ModelParameters model;
ReconstructionParameters<Constraint> recon;
OptimizationParameters opt;
ReconstructionStats stats;
string img_filename;
};
// Input image points pairs
vector<pair<QImage, vector<Constraint>>> image_points_pairs;
vector<string> image_filenames;
// AAM model for consistent set selection
aam::AAMModel aam;
// A set of parameters for each image
vector<ParameterSet> param_sets;
// The worker for single image reconstruction
SingleImageReconstructor<Constraint> single_recon;
bool enable_selection;
bool enable_failure_detection;
bool enable_progressive_recon;
bool direct_multi_recon;
};
namespace {
void safe_create(const fs::path& p) {
if(fs::exists(p)) fs::remove_all(p);
fs::create_directory(p);
}
}
template <typename Constraint>
bool MultiImageReconstructor<Constraint>::Reconstruct() {
cout << "Reconstruction begins..." << endl;
const string home_directory = QDir::homePath().toStdString();
cout << "Home dir: " << home_directory << endl;
// Preparing necessary stuff
const int tex_size = 2048;
const string albedo_index_map_filename(home_directory + "/Data/Multilinear/albedo_index.png");
const string albedo_pixel_map_filename(home_directory + "/Data/Multilinear/albedo_pixel.png");
const string valid_faces_indices_filename(home_directory + "/Data/Multilinear/face_region_indices.txt");
QImage albedo_index_map;
// Get the albedo index map
if(QFile::exists(albedo_index_map_filename.c_str())) {
message("loading index map for albedo.");
albedo_index_map = QImage(albedo_index_map_filename.c_str());
albedo_index_map.save("albedo_index.png");
} else {
cerr << "albedo index map does not exist. Abort." << endl;
exit(1);
}
auto valid_faces_indices_quad = LoadIndices(valid_faces_indices_filename);
// @HACK each quad face is triangulated, so the indices change from i to [2*i, 2*i+1]
vector<int> valid_faces_indices;
for(auto fidx : valid_faces_indices_quad) {
valid_faces_indices.push_back(fidx*2);
valid_faces_indices.push_back(fidx*2+1);
}
// Compute the barycentric coordinates for each pixel
vector<vector<PixelInfo>> albedo_pixel_map(tex_size, vector<PixelInfo>(tex_size));
// Generate pixel map for albedo
bool gen_pixel_map = false;
QImage pixel_map_image;
if(QFile::exists(albedo_pixel_map_filename.c_str())) {
pixel_map_image = QImage(albedo_pixel_map_filename.c_str());
message("generating pixel map for albedo ...");
boost::timer::auto_cpu_timer t("pixel map for albedo generation time = %w seconds.\n");
for(int i=0;i<tex_size;++i) {
for(int j=0;j<tex_size;++j) {
QRgb pix = albedo_index_map.pixel(j, i);
unsigned char r = static_cast<unsigned char>(qRed(pix));
unsigned char g = static_cast<unsigned char>(qGreen(pix));
unsigned char b = static_cast<unsigned char>(qBlue(pix));
if(r == 0 && g == 0 && b == 0) continue;
int fidx;
decode_index(r, g, b, fidx);
QRgb bcoords_pix = pixel_map_image.pixel(j, i);
float x = static_cast<float>(qRed(bcoords_pix)) / 255.0f;
float y = static_cast<float>(qGreen(bcoords_pix)) / 255.0f;
float z = static_cast<float>(qBlue(bcoords_pix)) / 255.0f;
albedo_pixel_map[i][j] = PixelInfo(fidx, glm::vec3(x, y, z));
}
}
message("done.");
} else {
cerr << "albedo pixel map does not exist. Abort." << endl;
exit(1);
}
vector<vector<glm::dvec3>> mean_texture(tex_size, vector<glm::dvec3>(tex_size, glm::dvec3(0, 0, 0)));
cv::Mat mean_texture_mat(tex_size, tex_size, CV_64FC3);
vector<vector<double>> mean_texture_weight(tex_size, vector<double>(tex_size, 0));
QImage mean_texture_image;
// Misc stuff
cout << image_filenames.size() << endl;
fs::path image_path = fs::path(image_filenames.front()).parent_path();
fs::path result_path = image_path / fs::path("multi_recon");
cout << "creating directory " << result_path.string() << endl;
safe_create(result_path);
cout << "directory created ..." << endl;
// Initialize the parameter sets
param_sets.resize(image_points_pairs.size());
for(size_t i=0;i<param_sets.size();++i) {
auto& params = param_sets[i];
params.img_filename = fs::path(image_filenames[i]).filename().string();
params.indices = init_indices;
params.mesh = template_mesh;
const int image_width = image_points_pairs[i].first.width();
const int image_height = image_points_pairs[i].first.height();
// camera parameters
cout << image_width << "x" << image_height << endl;
params.cam = CameraParameters::DefaultParameters(image_width, image_height);
cout << params.cam.image_size.x << ", " << params.cam.image_size.y << endl;
// model parameters
params.model = ModelParameters::DefaultParameters(prior.Uid, prior.Uexp);
// reconstruction parameters
params.recon.cons = image_points_pairs[i].second;
params.recon.imageWidth = image_width;
params.recon.imageHeight = image_height;
}
const int num_images = image_points_pairs.size();
// Initialize AAM model
auto constraints_to_mat = [=](const vector<Constraint>& constraints, int h) {
const int npoints = constraints.size();
cv::Mat m(npoints, 2, CV_64FC1);
for(int j=0;j<npoints;++j) {
m.at<double>(j, 0) = constraints[j].data.x;
m.at<double>(j, 1) = h - constraints[j].data.y;
}
return m;
};
vector<int> inliers;
if(enable_failure_detection) {
vector<QImage> images(image_points_pairs.size());
vector<cv::Mat> points(image_points_pairs.size());
// Collect input images and points
for(int i=0;i<image_points_pairs.size();++i) {
images[i] = image_points_pairs[i].first;
points[i] = constraints_to_mat(image_points_pairs[i].second,
image_points_pairs[i].first.height());
}
aam.SetOutputPath(result_path.string());
aam.SetImages(images);
aam.SetPoints(points);
aam.Preprocess();
aam.SetErrorMetric(aam::AAMModel::Hybrid);
// For Debugging
inliers = aam.FindInliers_Iterative();
} else {
inliers.resize(num_images);
iota(inliers.begin(), inliers.end(), 0);
}
VectorXd identity_centroid;
// Main reconstruction loop
// 1. Use single image reconstructor to do per-image reconstruction first
// 2. Select a consistent set of images for joint reconstruction
// 3. Convergence test. If not converged, goto step 1.
const int max_iters_main_loop = enable_progressive_recon?3:1;
int iters_main_loop = 0;
vector<MatrixXd> identity_weights_history;
vector<VectorXd> identity_weights_centroid_history;
vector<int> consistent_set, final_chosen_set;
// Initialize the consistent set to inliers
#if 0
consistent_set.resize(num_images);
iota(consistent_set.begin(), consistent_set.end(), 0);
#else
consistent_set = inliers;
#endif
while(iters_main_loop++ < max_iters_main_loop){
fs::path step_result_path = result_path / fs::path("step" + to_string(iters_main_loop));
safe_create(step_result_path);
// Single image reconstruction step
OptimizationParameters opt_params = OptimizationParameters::Defaults();
opt_params.w_prior_id = 10 * pow(iters_main_loop, 0.25);
opt_params.w_prior_exp = 10;
opt_params.num_initializations = 1;
opt_params.perturbation_range = 0.01;
opt_params.errorThreshold = 0.01;
fs::path step_single_recon_result_path = step_result_path / fs::path("single_recon");
safe_create(step_single_recon_result_path);
for(int i=0;i<num_images;++i) {
single_recon.SetMesh(param_sets[i].mesh);
single_recon.SetIndices(param_sets[i].indices);
single_recon.SetImageSize(param_sets[i].recon.imageWidth, param_sets[i].recon.imageHeight);
single_recon.SetConstraints(param_sets[i].recon.cons);
single_recon.SetInitialParameters(param_sets[i].model, param_sets[i].cam);
if(iters_main_loop > 1) single_recon.SetIdentityPrior(identity_centroid);
// Perform reconstruction
if(!direct_multi_recon) {
boost::timer::auto_cpu_timer t("Single image reconstruction finished in %w seconds.\n");
single_recon.Reconstruct(opt_params);
} else continue;
// Store results
auto tm = single_recon.GetGeometry();
param_sets[i].mesh.UpdateVertices(tm);
param_sets[i].mesh.ComputeNormals();
param_sets[i].model = single_recon.GetModelParameters();
param_sets[i].indices = single_recon.GetIndices();
param_sets[i].cam = single_recon.GetCameraParameters();
if (true) {
VisualizeReconstructionResult(step_single_recon_result_path, i);
fs::path image_path = fs::path(image_filenames[i]);
single_recon.SaveReconstructionResults( (step_single_recon_result_path / fs::path(image_path.stem().string() + ".res")).string());
}
}
// TODO Parameters estimation step, choose a consistent set of images for joint
// optimization
MatrixXd identity_weights(param_sets[0].model.Wid.rows(), num_images);
for(int i=0;i<num_images;++i) {
identity_weights.col(i) = param_sets[i].model.Wid;
}
identity_weights_history.push_back(identity_weights);
// Remove outliers
fs::path selection_result_path = step_result_path / fs::path("selection");
safe_create(selection_result_path);
int selection_method = enable_selection?1:2;
switch(selection_method) {
case 0: {
const double ratios[] = {0.0, 0.4, 0.6, 0.8};
consistent_set = StatsUtils::FindConsistentSet(identity_weights, 0.5, ratios[iters_main_loop] * num_images, &identity_centroid);
assert(consistent_set.size() > 0);
for(auto i : consistent_set) {
VisualizeReconstructionResult(selection_result_path, i);
}
break;
}
case 1: {
double ratios[] = {0.0, 0.4, 0.6, 0.8};
// HACK for testing the system without progressive reconstruction
if(max_iters_main_loop == 1) ratios[1] = 0.8;
// Take the first few as good shape
int k = max(1, static_cast<int>(ratios[iters_main_loop] * num_images));
consistent_set.clear();
auto take_first_k = [](vector<pair<int, double>> stats, int k) {
set<int> subset;
std::sort(stats.begin(), stats.end(), [](pair<int,double> a, pair<int, double> b){
return a.second < b.second;
});
for(int i=0;i<k;++i) {
subset.insert(stats[i].first);
}
return subset;
};
// Choose the ones with smallest error, not very useful
vector<pair<int, double>> errors(num_images);
for(int i=0;i<num_images;++i) {
errors[i] = make_pair(i, param_sets[i].stats.avg_error);
}
auto subset_error = take_first_k(errors, num_images);
for(auto sx : subset_error) cout << sx << ' '; cout << endl;
// Compute the distance to mean identity weights, choose the close ones
VectorXd mean_identity = StatsUtils::mean(identity_weights, 2);
vector<pair<int, double>> d_identity(num_images);
for(int i=0;i<num_images;++i) {
d_identity[i] = make_pair(i, (identity_weights.col(i) - mean_identity).norm());
}
auto subset_identity = take_first_k(d_identity, k);
for(auto sx : subset_identity) cout << sx << ' '; cout << endl;
// Compute the norm of the expression weights, choose the smaller ones
vector<pair<int, double>> n_expression(num_images);
for(int i=0;i<num_images;++i) {
n_expression[i] = make_pair(i, (param_sets[i].model.Wexp_FACS).norm());
}
auto subset_expression = take_first_k(n_expression, 0.8 * num_images);
for(auto sx : subset_expression) cout << sx << ' '; cout << endl;
#if 1
if(iters_main_loop == 1) {
// Compute the RMSE of color transferred texture
// Collect texture information from each input (image, mesh) pair to obtain mean texture
bool generate_mean_texture = true;
vector<vector<int>> face_indices_maps;
{
for(int img_i=0;img_i<num_images;++img_i) {
const auto& mesh = param_sets[img_i].mesh;
// for each image bundle, render the mesh to FBO with culling to get the visible triangles
OffscreenMeshVisualizer visualizer(image_points_pairs[img_i].first.width(),
image_points_pairs[img_i].first.height());
visualizer.SetMVPMode(OffscreenMeshVisualizer::CamPerspective);
visualizer.SetRenderMode(OffscreenMeshVisualizer::Mesh);
visualizer.BindMesh(param_sets[img_i].mesh);
visualizer.SetCameraParameters(param_sets[img_i].cam);
visualizer.SetMeshRotationTranslation(param_sets[img_i].model.R, param_sets[img_i].model.T);
visualizer.SetIndexEncoded(true);
visualizer.SetEnableLighting(false);
QImage img = visualizer.Render();
//img.save("mesh.png");
// find the visible triangles from the index map
auto triangles_indices_pair = FindTrianglesIndices(img);
set<int> triangles = triangles_indices_pair.first;
face_indices_maps.push_back(triangles_indices_pair.second);
cerr << "triangles = " << triangles.size() << endl;
// get the projection parameters
glm::dmat4 Rmat = glm::eulerAngleYXZ(param_sets[img_i].model.R[0],
param_sets[img_i].model.R[1],
param_sets[img_i].model.R[2]);
glm::dmat4 Tmat = glm::translate(glm::dmat4(1.0),
glm::dvec3(param_sets[img_i].model.T[0],
param_sets[img_i].model.T[1],
param_sets[img_i].model.T[2]));
glm::dmat4 Mview = Tmat * Rmat;
// FOR DEBUGGING
#if 0
// for each visible triangle, compute the coordinates of its 3 corners
QImage img_vertices = img;
vector<vector<glm::dvec3>> triangles_projected;
for(auto tidx : triangles) {
auto face_i = mesh.face(tidx);
auto v0_mesh = mesh.vertex(face_i[0]);
auto v1_mesh = mesh.vertex(face_i[1]);
auto v2_mesh = mesh.vertex(face_i[2]);
glm::dvec3 v0_tri = ProjectPoint(glm::dvec3(v0_mesh[0], v0_mesh[1], v0_mesh[2]), Mview, param_sets[img_i].cam);
glm::dvec3 v1_tri = ProjectPoint(glm::dvec3(v1_mesh[0], v1_mesh[1], v1_mesh[2]), Mview, param_sets[img_i].cam);
glm::dvec3 v2_tri = ProjectPoint(glm::dvec3(v2_mesh[0], v2_mesh[1], v2_mesh[2]), Mview, param_sets[img_i].cam);
triangles_projected.push_back(vector<glm::dvec3>{v0_tri, v1_tri, v2_tri});
img_vertices.setPixel(v0_tri.x, img.height()-1-v0_tri.y, qRgb(255, 255, 255));
img_vertices.setPixel(v1_tri.x, img.height()-1-v1_tri.y, qRgb(255, 255, 255));
img_vertices.setPixel(v2_tri.x, img.height()-1-v2_tri.y, qRgb(255, 255, 255));
}
img_vertices.save("mesh_with_vertices.png");
#endif
#define DEBUG_RECON 1 // for visualizing large scale recon selection related data
message("generating mean texture...");
message("collecting texels...");
if(generate_mean_texture) {
// for each pixel in the texture map, use backward projection to obtain pixel value in the input image
// accumulate the texels in average texel map
for(int ti=0;ti<tex_size;++ti) {
for(int tj=0;tj<tex_size;++tj) {
PixelInfo pix_ij = albedo_pixel_map[ti][tj];
// skip if the triangle is not visible
if(triangles.find(pix_ij.fidx) == triangles.end()) continue;
auto face_i = mesh.face(pix_ij.fidx);
auto v0_mesh = mesh.vertex(face_i[0]);
auto v1_mesh = mesh.vertex(face_i[1]);
auto v2_mesh = mesh.vertex(face_i[2]);
auto v = v0_mesh * pix_ij.bcoords.x + v1_mesh * pix_ij.bcoords.y + v2_mesh * pix_ij.bcoords.z;
glm::dvec3 v_img = ProjectPoint(glm::dvec3(v[0], v[1], v[2]), Mview, param_sets[img_i].cam);
// take the pixel from the input image through bilinear sampling
glm::dvec3 texel = bilinear_sample(image_points_pairs[img_i].first, v_img.x, image_points_pairs[img_i].first.height()-1-v_img.y);
if(texel.r < 0 && texel.g < 0 && texel.b < 0) continue;
mean_texture[ti][tj] += texel;
mean_texture_weight[ti][tj] += 1.0;
}
}
}
}
message("done.");
try {
// [Optional]: render the mesh with texture to verify the texel values
if(generate_mean_texture) {
message("computing mean texture...");
mean_texture_image = QImage(tex_size, tex_size, QImage::Format_ARGB32);
mean_texture_image.fill(0);
for(int ti=0; ti<tex_size; ++ti) {
for (int tj=0; tj<(tex_size/2); ++tj) {
double weight_ij = mean_texture_weight[ti][tj];
double weight_ij_s = mean_texture_weight[ti][tex_size-1-tj];
if(weight_ij == 0 && weight_ij_s == 0) {
mean_texture_mat.at<cv::Vec3d>(ti, tj) = cv::Vec3d(0, 0, 0);
continue;
} else {
glm::dvec3 texel = (mean_texture[ti][tj] + mean_texture[ti][tex_size-1-tj]) / (weight_ij + weight_ij_s);
mean_texture[ti][tj] = texel;
mean_texture[ti][tex_size-1-tj] = texel;
mean_texture_image.setPixel(tj, ti, qRgb(texel.r, texel.g, texel.b));
mean_texture_image.setPixel(tex_size-1-tj, ti, qRgb(texel.r, texel.g, texel.b));
mean_texture_mat.at<cv::Vec3d>(ti, tj) = cv::Vec3d(texel.x, texel.y, texel.z);
mean_texture_mat.at<cv::Vec3d>(ti, tex_size-1-tj) = cv::Vec3d(texel.x, texel.y, texel.z);
}
}
}
message("done.");
cv::resize(mean_texture_mat, mean_texture_mat, cv::Size(), 0.25, 0.25);
//cv::Mat mean_texture_refined_mat = mean_texture_mat.clone();
cv::Mat mean_texture_refined_mat;
{
boost::timer::auto_cpu_timer timer_solve(
"[Joint optimization] Mean texture generation = %w seconds.\n");
#if 1
cv::GaussianBlur(mean_texture_mat, mean_texture_refined_mat, cv::Size(5, 5), 3.0);
mean_texture_refined_mat = StatsUtils::MeanShiftSegmentation(mean_texture_refined_mat, 5.0, 30.0, 0.5);
mean_texture_refined_mat = 0.25 * mean_texture_mat + 0.75 * mean_texture_refined_mat;
/*
mean_texture_refined_mat = StatsUtils::MeanShiftSegmentation(mean_texture_refined_mat, 10.0, 30.0, 0.5);
mean_texture_refined_mat = 0.25 * mean_texture_mat + 0.75 * mean_texture_refined_mat;
mean_texture_refined_mat = StatsUtils::MeanShiftSegmentation(mean_texture_refined_mat, 20.0, 30.0, 0.5);
mean_texture_refined_mat = 0.25 * mean_texture_mat + 0.75 * mean_texture_refined_mat;
*/
cv::resize(mean_texture_refined_mat, mean_texture_refined_mat, cv::Size(), 4.0, 4.0);
#else
cv::Mat mean_texture_refined_mat = mean_texture_mat;
#endif
}
QImage mean_texture_image_refined(tex_size, tex_size, QImage::Format_ARGB32);
for(int ti=0;ti<tex_size;++ti) {
for(int tj=0;tj<tex_size;++tj) {
cv::Vec3d pix = mean_texture_refined_mat.at<cv::Vec3d>(ti, tj);
mean_texture_image_refined.setPixel(tj, ti, qRgb(pix[0], pix[1], pix[2]));
}
}
#if DEBUG_RECON
mean_texture_image.save( (step_result_path / fs::path("mean_texture.png")).string().c_str() );
mean_texture_image_refined.save( (step_result_path / fs::path("mean_texture_refined.png")).string().c_str() );
#endif
mean_texture_image = mean_texture_image_refined;
}
} catch(exception& e) {
cerr << e.what() << endl;
exit(1);
}
}
}
vector<pair<int, double>> d_texture(num_images);
// Rendering the albedo to each image
vector<QImage> albedo_images(num_images);
//#pragma omp parallel for
for(int i=0;i<num_images;++i) {
// for each image bundle, render the mesh to FBO with culling to get the visible triangles
OffscreenMeshVisualizer visualizer(image_points_pairs[i].first.width(),
image_points_pairs[i].first.height());
visualizer.SetMVPMode(OffscreenMeshVisualizer::CamPerspective);
visualizer.SetRenderMode(OffscreenMeshVisualizer::TexturedMesh);
visualizer.BindMesh(param_sets[i].mesh);
visualizer.BindTexture(mean_texture_image);
visualizer.SetCameraParameters(param_sets[i].cam);
visualizer.SetMeshRotationTranslation(param_sets[i].model.R, param_sets[i].model.T);
visualizer.SetFacesToRender(valid_faces_indices);
vector<float> depth_i;
tie(albedo_images[i],depth_i) = visualizer.RenderWithDepth();
auto unpack_pixel = [](QRgb pix) {
return Vector3d(qRed(pix)/255.0, qGreen(pix)/255.0, qBlue(pix)/255.0);
};
int img_w = image_points_pairs[i].first.width();
int img_h = image_points_pairs[i].first.height();
vector<int> valid_pixels_map_i;
for(int y=0;y<img_h;++y) {
for(int x=0;x<img_w;++x) {
float dval = depth_i[(img_h-1-y)*img_w+x];
if(dval<1) {
valid_pixels_map_i.push_back(y*img_w + x);
QRgb pix1 = albedo_images[i].pixel(x, y);
albedo_images[i].setPixel(x, y, qRgb(qBlue(pix1), qGreen(pix1), qRed(pix1)));
}
}
}
albedo_images[i] = TransferColor(albedo_images[i], image_points_pairs[i].first,
valid_pixels_map_i, valid_pixels_map_i);
#if DEBUG_RECON
albedo_images[i].save( (step_result_path / fs::path("albedo_" + std::to_string(i) + ".png")).string().c_str() );
#endif
// compute texture difference
double diff_i = 0;
int valid_count = 0;
#if DEBUG_RECON
QImage depth_image = albedo_images[i];
depth_image.fill(0);
#endif
for(int y=0;y<img_h;++y) {
for(int x=0;x<img_w;++x) {
float dval = depth_i[(img_h-1-y)*img_w+x];
if(dval<1) {
#if DEBUG_RECON
depth_image.setPixel(x, y, qRgb(dval*255, 0, (1-dval)*255));
#endif
valid_count++;
QRgb pix1 = albedo_images[i].pixel(x, y);
QRgb pix2 = image_points_pairs[i].first.pixel(x, y);
auto p1 = unpack_pixel(pix1);
auto p2 = unpack_pixel(pix2);
double dr = p1[0] - p2[0];
double dg = p1[1] - p2[1];
double db = p1[2] - p2[2];
diff_i += dr*dr+dg*dg+db*db;
}
}
}
d_texture[i] = make_pair(i, diff_i/valid_count);
#if DEBUG_RECON
depth_image.save( (step_result_path / fs::path("depth_" + std::to_string(i) + ".png")).string().c_str() );
#endif
}
auto subset_texture = take_first_k(d_texture, k);
for(auto sx : subset_texture) cout << sx << ' '; cout << endl;
#endif
// Merge them into a consistent set
set<int> final_set(subset_identity.begin(), subset_identity.end());
for(int i=0;i<num_images;++i) {
if(subset_identity.count(i)) {
#if 1
// Use expression as a condition
bool exclude = (subset_expression.count(i) == 0) || (subset_error.count(i) == 0) ||
(subset_texture.count(i) == 0) || (find(inliers.begin(), inliers.end(), i) == inliers.end());
#else
// Use only recon error and texture metric
bool exclude = (subset_error.count(i) == 0) || (subset_texture.count(i) == 0);
#endif
if(exclude) final_set.erase(i);
}
}
// rare case, we go with the mean identity
if(final_set.empty()) {
final_set = take_first_k(d_identity, 1);
}
consistent_set.assign(final_set.begin(), final_set.end());
for(auto i : consistent_set) {
VisualizeReconstructionResult(selection_result_path, i);
}
break;
}
case 2: {
// nothing to do, just use whatever consistent_set is
break;
}
}
// Compute the centroid of the consistent set
identity_centroid = VectorXd::Zero(param_sets[0].model.Wid.rows());
for(auto i : consistent_set) {
cout << i << endl;
identity_centroid += param_sets[i].model.Wid;
}
identity_centroid /= consistent_set.size();
// Update the identity weights for all images
for(auto& param : param_sets) {
param.model.Wid = identity_centroid;
}
// Joint reconstruction step, obtain refined identity weights
int num_iters_joint_optimization = (iters_main_loop == max_iters_main_loop)?4:3;
// Just one-pass optimization
opt_params.num_initializations = 1;
for(int iters_joint_optimization=0;
iters_joint_optimization<num_iters_joint_optimization;
++iters_joint_optimization){
// [Joint reconstruction] step 1: estimate pose and expression weights individually
// In the final iteration, no need to refine the identity weights anymore
if((iters_joint_optimization == num_iters_joint_optimization - 1) && (iters_main_loop == max_iters_main_loop)) {
// Store the final selection
// HACK try to use the inliners as final_chosen_set to produce more point clouds
#if 1
final_chosen_set = consistent_set;
#else
// No good!
final_chosen_set = inliers;
#endif
// Reset consistent_set so all images will be reconstructed in this iteration
consistent_set.resize(num_images);
for(int i=0;i<num_images;++i) consistent_set[i] = i;
}
fs::path joint_pre_result_path = step_result_path / fs::path("joint_recon_" + to_string(iters_joint_optimization) + "_pre");
safe_create(joint_pre_result_path);
for(auto i : consistent_set) {
single_recon.SetMesh(param_sets[i].mesh);
single_recon.SetIndices(param_sets[i].indices);
single_recon.SetImageSize(param_sets[i].recon.imageWidth, param_sets[i].recon.imageHeight);
single_recon.SetConstraints(param_sets[i].recon.cons);
single_recon.SetInitialParameters(param_sets[i].model, param_sets[i].cam);
single_recon.SetOptimizationMode(
static_cast<typename SingleImageReconstructor<Constraint>::OptimizationMode>(
SingleImageReconstructor<Constraint>::Pose
| SingleImageReconstructor<Constraint>::Expression
| SingleImageReconstructor<Constraint>::FocalLength));
{
boost::timer::auto_cpu_timer t("Single image reconstruction finished in %w seconds.\n");
single_recon.Reconstruct(opt_params);
}
// Store results
auto tm = single_recon.GetGeometry();
param_sets[i].mesh.UpdateVertices(tm);
param_sets[i].model = single_recon.GetModelParameters();
param_sets[i].indices = single_recon.GetIndices();
param_sets[i].cam = single_recon.GetCameraParameters();
if (true) {
// Visualize the reconstruction results
VisualizeReconstructionResult(joint_pre_result_path, i);
}
}
if((iters_joint_optimization == num_iters_joint_optimization - 1) && (iters_main_loop == max_iters_main_loop)) {
// In the final iteration, no need to refine the identity weights anymore
break;
}
// [Joint reconstruction] step 2: estimate identity weights jointly
{
fs::path joint_post_result_path = step_result_path / fs::path("joint_recon_" + to_string(iters_joint_optimization) + "_post");
safe_create(joint_post_result_path);
ceres::Problem problem;
VectorXd params = param_sets[0].model.Wid;
// Add constraints from each image
for(auto i : consistent_set) {
// Create a projected model first
vector<MultilinearModel> model_projected_i(param_sets[i].indices.size());
for(size_t j=0;j<param_sets[i].indices.size();++j) {
model_projected_i[j] = model.project(vector<int>(1, param_sets[i].indices[j]));
model_projected_i[j].ApplyWeights(param_sets[i].model.Wid, param_sets[i].model.Wexp);
}
// Create relevant matrices
glm::dmat4 Rmat_i = glm::eulerAngleYXZ(param_sets[i].model.R[0], param_sets[i].model.R[1],
param_sets[i].model.R[2]);
glm::dmat4 Tmat_i = glm::translate(glm::dmat4(1.0),
glm::dvec3(param_sets[i].model.T[0],
param_sets[i].model.T[1],
param_sets[i].model.T[2]));
glm::dmat4 Mview_i = Tmat_i * Rmat_i;
double puple_distance = glm::distance(
0.5 * (param_sets[i].recon.cons[28].data + param_sets[i].recon.cons[30].data),
0.5 * (param_sets[i].recon.cons[32].data + param_sets[i].recon.cons[34].data));
double weight_i = 100.0 / puple_distance;
// Add per-vertex constraints
for(size_t j=0;j<param_sets[i].indices.size();++j) {
ceres::CostFunction * cost_function = new IdentityCostFunction_analytic(
model_projected_i[j], param_sets[i].recon.cons[j], params.size(), Mview_i, Rmat_i,
param_sets[i].cam, weight_i);
problem.AddResidualBlock(cost_function, NULL, params.data());
}
}
// Add prior constraint
ceres::DynamicNumericDiffCostFunction<PriorCostFunction> *prior_cost_function =
new ceres::DynamicNumericDiffCostFunction<PriorCostFunction>(
new PriorCostFunction(prior.Wid_avg, prior.inv_sigma_Wid,
prior.weight_Wid * consistent_set.size()));
prior_cost_function->AddParameterBlock(params.size());
prior_cost_function->SetNumResiduals(1);
problem.AddResidualBlock(prior_cost_function, NULL, params.data());
// Solve it
{
boost::timer::auto_cpu_timer timer_solve(
"[Identity optimization] Problem solve time = %w seconds.\n");
ceres::Solver::Options options;
options.max_num_iterations = 3;
options.minimizer_type = ceres::LINE_SEARCH;
options.line_search_direction_type = ceres::LBFGS;
DEBUG_EXPR(options.minimizer_progress_to_stdout = true;)
ceres::Solver::Summary summary;
ceres::Solve(options, &problem, &summary);
DEBUG_OUTPUT(summary.FullReport())
}
// Update the identity weights
for(auto& param : param_sets) {
param.model.Wid = params;
// Also update geometry if needed
{
model.ApplyWeights(param.model.Wid, param.model.Wexp);
param.mesh.UpdateVertices(model.GetTM());
param.mesh.ComputeNormals();
}
}
for(auto i : consistent_set) {
if(true) {
VisualizeReconstructionResult(joint_post_result_path, i);
}
}
identity_weights_centroid_history.push_back(params);
}
}
} // end of main reconstruction loop
// Output the reconstructed identity weights
{
for(size_t i=0;i<identity_weights_history.size();++i) {
ofstream fout("identity_matrix" + std::to_string(i) + ".txt");
fout << identity_weights_history[i];
fout.close();
}
for(size_t i=0;i<identity_weights_centroid_history.size();++i) {
ofstream fout("identity_centroid" + std::to_string(i) + ".txt");
fout << identity_weights_centroid_history[i];
fout.close();
}
}
// Output the chosen subset
{
ofstream fout( (result_path / fs::path("selection.txt")).string() );
for(int i=0;i<final_chosen_set.size();++i) {
// The row indices in the settings file!
const int row_index = final_chosen_set[i];
const int L = param_sets[row_index].img_filename.size();
fout << param_sets[row_index].img_filename.substr(0, L-4) << endl;
}
fout.close();
}
// Visualize the final reconstruction results
for(int i=0;i<num_images;++i) {
// Visualize the reconstruction results
#if 0
MeshVisualizer* w = new MeshVisualizer("reconstruction result", param_sets[i].mesh);
w->BindConstraints(image_points_pairs[i].second);
w->BindImage(image_points_pairs[i].first);
w->BindLandmarks(init_indices);
w->BindUpdatedLandmarks(param_sets[i].indices);
w->SetMeshRotationTranslation(param_sets[i].model.R, param_sets[i].model.T);
w->SetCameraParameters(param_sets[i].cam);
int show_width = image_points_pairs[i].first.width();
int show_height = image_points_pairs[i].first.height();
double show_ratio = 640.0 / show_height;
w->resize(show_width * show_ratio, 640);
w->show();
w->paintGL();
QImage recon_image = w->grabFrameBuffer();
fs::path image_path = fs::path(image_filenames[i]);
recon_image.save( (result_path / fs::path(image_path.stem().string() + "_recon.png")).string().c_str() );
#else
VisualizeReconstructionResult(result_path, i);
#endif
ofstream fout(image_filenames[i] + ".res");
fout << param_sets[i].cam << endl;
fout << param_sets[i].model << endl;
fout << param_sets[i].stats << endl;
fout.close();
}
return true;
}
#endif //MULTILINEARRECONSTRUCTION_MULTIIMAGERECONSTRUCTOR_H
|
DRB056-jacobi2d-tile-no.c | /**
* jacobi-2d-imper.c: This file is part of the PolyBench/C 3.2 test suite.
* Jacobi with array copying, no reduction. with tiling and nested SIMD.
*
* Contact: Louis-Noel Pouchet <pouchet@cse.ohio-state.edu>
* Web address: http://polybench.sourceforge.net
* License: /LICENSE.OSU.txt
*/
#include <stdio.h>
#include <unistd.h>
#include <string.h>
#include <math.h>
/* Include polybench common header. */
#include "polybench/polybench.h"
/* Include benchmark-specific header. */
/* Default data type is double, default size is 20x1000. */
#include "polybench/jacobi-2d-imper.h"
/* Array initialization. */
static void init_array(int n,double A[500 + 0][500 + 0],double B[500 + 0][500 + 0])
{
//int i;
//int j;
{
int c1;
int c2;
int c4;
int c3;
if (n >= 1) {
#pragma omp parallel for private(c1 ,c3 ,c4 ,c2 )
for (c1 = 0; c1 <= (((n + -1) * 16 < 0?((16 < 0?-((-(n + -1) + 16 + 1) / 16) : -((-(n + -1) + 16 - 1) / 16))) : (n + -1) / 16)); c1++) {
#pragma omp parallel for private(c2 ,c3 ,c4 )
for (c2 = 0; c2 <= (((n + -1) * 16 < 0?((16 < 0?-((-(n + -1) + 16 + 1) / 16) : -((-(n + -1) + 16 - 1) / 16))) : (n + -1) / 16)); c2++) {
#pragma omp parallel for private(c3 ,c4 )
for (c3 = 16 * c2; c3 <= ((16 * c2 + 15 < n + -1?16 * c2 + 15 : n + -1)); c3++) {
#pragma omp parallel for private(c4 )
for (c4 = 16 * c1; c4 <= ((16 * c1 + 15 < n + -1?16 * c1 + 15 : n + -1)); c4++) {
A[c4][c3] = (((double )c4) * (c3 + 2) + 2) / n;
B[c4][c3] = (((double )c4) * (c3 + 3) + 3) / n;
}
}
}
}
}
}
}
/* DCE code. Must scan the entire live-out data.
Can be used also to check the correctness of the output. */
static void print_array(int n,double A[500 + 0][500 + 0])
{
int i;
int j;
for (i = 0; i < n; i++)
for (j = 0; j < n; j++) {
fprintf(stderr,"%0.2lf ",A[i][j]);
if ((i * n + j) % 20 == 0)
fprintf(stderr,"\n");
}
fprintf(stderr,"\n");
}
/* Main computational kernel. The whole function will be timed,
including the call and return. */
static void kernel_jacobi_2d_imper(int tsteps,int n,double A[500 + 0][500 + 0],double B[500 + 0][500 + 0])
{
//int t;
//int i;
//int j;
//#pragma scop
{
int c0;
int c1;
int c3;
int c2;
int c4;
int c5;
if (n >= 3 && tsteps >= 1) {
for (c0 = 0; c0 <= (((n + 3 * tsteps + -4) * 16 < 0?((16 < 0?-((-(n + 3 * tsteps + -4) + 16 + 1) / 16) : -((-(n + 3 * tsteps + -4) + 16 - 1) / 16))) : (n + 3 * tsteps + -4) / 16)); c0++) {
#pragma omp parallel for private(c1 ,c5 ,c4 ,c2 ,c3 )
for (c1 = (((2 * c0 * 3 < 0?-(-(2 * c0) / 3) : ((3 < 0?(-(2 * c0) + - 3 - 1) / - 3 : (2 * c0 + 3 - 1) / 3)))) > (((16 * c0 + -1 * tsteps + 1) * 16 < 0?-(-(16 * c0 + -1 * tsteps + 1) / 16) : ((16 < 0?(-(16 * c0 + -1 * tsteps + 1) + - 16 - 1) / - 16 : (16 * c0 + -1 * tsteps + 1 + 16 - 1) / 16))))?((2 * c0 * 3 < 0?-(-(2 * c0) / 3) : ((3 < 0?(-(2 * c0) + - 3 - 1) / - 3 : (2 * c0 + 3 - 1) / 3)))) : (((16 * c0 + -1 * tsteps + 1) * 16 < 0?-(-(16 * c0 + -1 * tsteps + 1) / 16) : ((16 < 0?(-(16 * c0 + -1 * tsteps + 1) + - 16 - 1) / - 16 : (16 * c0 + -1 * tsteps + 1 + 16 - 1) / 16))))); c1 <= (((((((n + 2 * tsteps + -3) * 16 < 0?((16 < 0?-((-(n + 2 * tsteps + -3) + 16 + 1) / 16) : -((-(n + 2 * tsteps + -3) + 16 - 1) / 16))) : (n + 2 * tsteps + -3) / 16)) < (((32 * c0 + n + 29) * 48 < 0?((48 < 0?-((-(32 * c0 + n + 29) + 48 + 1) / 48) : -((-(32 * c0 + n + 29) + 48 - 1) / 48))) : (32 * c0 + n + 29) / 48))?(((n + 2 * tsteps + -3) * 16 < 0?((16 < 0?-((-(n + 2 * tsteps + -3) + 16 + 1) / 16) : -((-(n + 2 * tsteps + -3) + 16 - 1) / 16))) : (n + 2 * tsteps + -3) / 16)) : (((32 * c0 + n + 29) * 48 < 0?((48 < 0?-((-(32 * c0 + n + 29) + 48 + 1) / 48) : -((-(32 * c0 + n + 29) + 48 - 1) / 48))) : (32 * c0 + n + 29) / 48)))) < c0?(((((n + 2 * tsteps + -3) * 16 < 0?((16 < 0?-((-(n + 2 * tsteps + -3) + 16 + 1) / 16) : -((-(n + 2 * tsteps + -3) + 16 - 1) / 16))) : (n + 2 * tsteps + -3) / 16)) < (((32 * c0 + n + 29) * 48 < 0?((48 < 0?-((-(32 * c0 + n + 29) + 48 + 1) / 48) : -((-(32 * c0 + n + 29) + 48 - 1) / 48))) : (32 * c0 + n + 29) / 48))?(((n + 2 * tsteps + -3) * 16 < 0?((16 < 0?-((-(n + 2 * tsteps + -3) + 16 + 1) / 16) : -((-(n + 2 * tsteps + -3) + 16 - 1) / 16))) : (n + 2 * tsteps + -3) / 16)) : (((32 * c0 + n + 29) * 48 < 0?((48 < 0?-((-(32 * c0 + n + 29) + 48 + 1) / 48) : -((-(32 * c0 + n + 29) + 48 - 1) / 48))) : (32 * c0 + n + 29) / 48)))) : c0)); c1++) {
for (c2 = ((((16 * c1 + -1 * n + -12) * 16 < 0?-(-(16 * c1 + -1 * n + -12) / 16) : ((16 < 0?(-(16 * c1 + -1 * n + -12) + - 16 - 1) / - 16 : (16 * c1 + -1 * n + -12 + 16 - 1) / 16)))) > 2 * c0 + -2 * c1?(((16 * c1 + -1 * n + -12) * 16 < 0?-(-(16 * c1 + -1 * n + -12) / 16) : ((16 < 0?(-(16 * c1 + -1 * n + -12) + - 16 - 1) / - 16 : (16 * c1 + -1 * n + -12 + 16 - 1) / 16)))) : 2 * c0 + -2 * c1); c2 <= (((((((16 * c1 + n + 12) * 16 < 0?((16 < 0?-((-(16 * c1 + n + 12) + 16 + 1) / 16) : -((-(16 * c1 + n + 12) + 16 - 1) / 16))) : (16 * c1 + n + 12) / 16)) < (((n + 2 * tsteps + -3) * 16 < 0?((16 < 0?-((-(n + 2 * tsteps + -3) + 16 + 1) / 16) : -((-(n + 2 * tsteps + -3) + 16 - 1) / 16))) : (n + 2 * tsteps + -3) / 16))?(((16 * c1 + n + 12) * 16 < 0?((16 < 0?-((-(16 * c1 + n + 12) + 16 + 1) / 16) : -((-(16 * c1 + n + 12) + 16 - 1) / 16))) : (16 * c1 + n + 12) / 16)) : (((n + 2 * tsteps + -3) * 16 < 0?((16 < 0?-((-(n + 2 * tsteps + -3) + 16 + 1) / 16) : -((-(n + 2 * tsteps + -3) + 16 - 1) / 16))) : (n + 2 * tsteps + -3) / 16)))) < (((32 * c0 + -32 * c1 + n + 29) * 16 < 0?((16 < 0?-((-(32 * c0 + -32 * c1 + n + 29) + 16 + 1) / 16) : -((-(32 * c0 + -32 * c1 + n + 29) + 16 - 1) / 16))) : (32 * c0 + -32 * c1 + n + 29) / 16))?(((((16 * c1 + n + 12) * 16 < 0?((16 < 0?-((-(16 * c1 + n + 12) + 16 + 1) / 16) : -((-(16 * c1 + n + 12) + 16 - 1) / 16))) : (16 * c1 + n + 12) / 16)) < (((n + 2 * tsteps + -3) * 16 < 0?((16 < 0?-((-(n + 2 * tsteps + -3) + 16 + 1) / 16) : -((-(n + 2 * tsteps + -3) + 16 - 1) / 16))) : (n + 2 * tsteps + -3) / 16))?(((16 * c1 + n + 12) * 16 < 0?((16 < 0?-((-(16 * c1 + n + 12) + 16 + 1) / 16) : -((-(16 * c1 + n + 12) + 16 - 1) / 16))) : (16 * c1 + n + 12) / 16)) : (((n + 2 * tsteps + -3) * 16 < 0?((16 < 0?-((-(n + 2 * tsteps + -3) + 16 + 1) / 16) : -((-(n + 2 * tsteps + -3) + 16 - 1) / 16))) : (n + 2 * tsteps + -3) / 16)))) : (((32 * c0 + -32 * c1 + n + 29) * 16 < 0?((16 < 0?-((-(32 * c0 + -32 * c1 + n + 29) + 16 + 1) / 16) : -((-(32 * c0 + -32 * c1 + n + 29) + 16 - 1) / 16))) : (32 * c0 + -32 * c1 + n + 29) / 16)))); c2++) {
if (c0 <= (((32 * c1 + 16 * c2 + -1 * n + 1) * 32 < 0?((32 < 0?-((-(32 * c1 + 16 * c2 + -1 * n + 1) + 32 + 1) / 32) : -((-(32 * c1 + 16 * c2 + -1 * n + 1) + 32 - 1) / 32))) : (32 * c1 + 16 * c2 + -1 * n + 1) / 32)) && c1 <= c2 + -1) {
if ((n + 1) % 2 == 0) {
for (c4 = (16 * c1 > 16 * c2 + -1 * n + 3?16 * c1 : 16 * c2 + -1 * n + 3); c4 <= 16 * c1 + 15; c4++) {
A[-16 * c2 + c4 + n + -2][n + -2] = B[-16 * c2 + c4 + n + -2][n + -2];
}
}
}
if (c0 <= (((48 * c1 + -1 * n + 1) * 32 < 0?((32 < 0?-((-(48 * c1 + -1 * n + 1) + 32 + 1) / 32) : -((-(48 * c1 + -1 * n + 1) + 32 - 1) / 32))) : (48 * c1 + -1 * n + 1) / 32)) && c1 >= c2) {
if ((n + 1) % 2 == 0) {
for (c5 = (16 * c2 > 16 * c1 + -1 * n + 3?16 * c2 : 16 * c1 + -1 * n + 3); c5 <= ((16 * c1 < 16 * c2 + 15?16 * c1 : 16 * c2 + 15)); c5++) {
A[n + -2][-16 * c1 + c5 + n + -2] = B[n + -2][-16 * c1 + c5 + n + -2];
}
}
}
for (c3 = ((((((16 * c1 + -1 * n + 2) * 2 < 0?-(-(16 * c1 + -1 * n + 2) / 2) : ((2 < 0?(-(16 * c1 + -1 * n + 2) + - 2 - 1) / - 2 : (16 * c1 + -1 * n + 2 + 2 - 1) / 2)))) > (((16 * c2 + -1 * n + 2) * 2 < 0?-(-(16 * c2 + -1 * n + 2) / 2) : ((2 < 0?(-(16 * c2 + -1 * n + 2) + - 2 - 1) / - 2 : (16 * c2 + -1 * n + 2 + 2 - 1) / 2))))?(((16 * c1 + -1 * n + 2) * 2 < 0?-(-(16 * c1 + -1 * n + 2) / 2) : ((2 < 0?(-(16 * c1 + -1 * n + 2) + - 2 - 1) / - 2 : (16 * c1 + -1 * n + 2 + 2 - 1) / 2)))) : (((16 * c2 + -1 * n + 2) * 2 < 0?-(-(16 * c2 + -1 * n + 2) / 2) : ((2 < 0?(-(16 * c2 + -1 * n + 2) + - 2 - 1) / - 2 : (16 * c2 + -1 * n + 2 + 2 - 1) / 2)))))) > 16 * c0 + -16 * c1?(((((16 * c1 + -1 * n + 2) * 2 < 0?-(-(16 * c1 + -1 * n + 2) / 2) : ((2 < 0?(-(16 * c1 + -1 * n + 2) + - 2 - 1) / - 2 : (16 * c1 + -1 * n + 2 + 2 - 1) / 2)))) > (((16 * c2 + -1 * n + 2) * 2 < 0?-(-(16 * c2 + -1 * n + 2) / 2) : ((2 < 0?(-(16 * c2 + -1 * n + 2) + - 2 - 1) / - 2 : (16 * c2 + -1 * n + 2 + 2 - 1) / 2))))?(((16 * c1 + -1 * n + 2) * 2 < 0?-(-(16 * c1 + -1 * n + 2) / 2) : ((2 < 0?(-(16 * c1 + -1 * n + 2) + - 2 - 1) / - 2 : (16 * c1 + -1 * n + 2 + 2 - 1) / 2)))) : (((16 * c2 + -1 * n + 2) * 2 < 0?-(-(16 * c2 + -1 * n + 2) / 2) : ((2 < 0?(-(16 * c2 + -1 * n + 2) + - 2 - 1) / - 2 : (16 * c2 + -1 * n + 2 + 2 - 1) / 2)))))) : 16 * c0 + -16 * c1); c3 <= ((((((8 * c1 + 6 < 8 * c2 + 6?8 * c1 + 6 : 8 * c2 + 6)) < tsteps + -1?((8 * c1 + 6 < 8 * c2 + 6?8 * c1 + 6 : 8 * c2 + 6)) : tsteps + -1)) < 16 * c0 + -16 * c1 + 15?((((8 * c1 + 6 < 8 * c2 + 6?8 * c1 + 6 : 8 * c2 + 6)) < tsteps + -1?((8 * c1 + 6 < 8 * c2 + 6?8 * c1 + 6 : 8 * c2 + 6)) : tsteps + -1)) : 16 * c0 + -16 * c1 + 15)); c3++) {
if (c1 <= ((c3 * 8 < 0?((8 < 0?-((-c3 + 8 + 1) / 8) : -((-c3 + 8 - 1) / 8))) : c3 / 8))) {
for (c5 = (16 * c2 > 2 * c3 + 1?16 * c2 : 2 * c3 + 1); c5 <= ((16 * c2 + 15 < 2 * c3 + n + -2?16 * c2 + 15 : 2 * c3 + n + -2)); c5++) {
B[1][-2 * c3 + c5] = 0.2 * (A[1][-2 * c3 + c5] + A[1][-2 * c3 + c5 - 1] + A[1][1 + (-2 * c3 + c5)] + A[1 + 1][-2 * c3 + c5] + A[1 - 1][-2 * c3 + c5]);
}
}
for (c4 = (16 * c1 > 2 * c3 + 2?16 * c1 : 2 * c3 + 2); c4 <= ((16 * c1 + 15 < 2 * c3 + n + -2?16 * c1 + 15 : 2 * c3 + n + -2)); c4++) {
if (c2 <= ((c3 * 8 < 0?((8 < 0?-((-c3 + 8 + 1) / 8) : -((-c3 + 8 - 1) / 8))) : c3 / 8))) {
B[-2 * c3 + c4][1] = 0.2 * (A[-2 * c3 + c4][1] + A[-2 * c3 + c4][1 - 1] + A[-2 * c3 + c4][1 + 1] + A[1 + (-2 * c3 + c4)][1] + A[-2 * c3 + c4 - 1][1]);
}
for (c5 = (16 * c2 > 2 * c3 + 2?16 * c2 : 2 * c3 + 2); c5 <= ((16 * c2 + 15 < 2 * c3 + n + -2?16 * c2 + 15 : 2 * c3 + n + -2)); c5++) {
B[-2 * c3 + c4][-2 * c3 + c5] = 0.2 * (A[-2 * c3 + c4][-2 * c3 + c5] + A[-2 * c3 + c4][-2 * c3 + c5 - 1] + A[-2 * c3 + c4][1 + (-2 * c3 + c5)] + A[1 + (-2 * c3 + c4)][-2 * c3 + c5] + A[-2 * c3 + c4 - 1][-2 * c3 + c5]);
A[-2 * c3 + c4 + -1][-2 * c3 + c5 + -1] = B[-2 * c3 + c4 + -1][-2 * c3 + c5 + -1];
}
if (c2 >= (((2 * c3 + n + -16) * 16 < 0?-(-(2 * c3 + n + -16) / 16) : ((16 < 0?(-(2 * c3 + n + -16) + - 16 - 1) / - 16 : (2 * c3 + n + -16 + 16 - 1) / 16))))) {
A[-2 * c3 + c4 + -1][n + -2] = B[-2 * c3 + c4 + -1][n + -2];
}
}
if (c1 >= (((2 * c3 + n + -16) * 16 < 0?-(-(2 * c3 + n + -16) / 16) : ((16 < 0?(-(2 * c3 + n + -16) + - 16 - 1) / - 16 : (2 * c3 + n + -16 + 16 - 1) / 16))))) {
for (c5 = (16 * c2 > 2 * c3 + 2?16 * c2 : 2 * c3 + 2); c5 <= ((16 * c2 + 15 < 2 * c3 + n + -1?16 * c2 + 15 : 2 * c3 + n + -1)); c5++) {
A[n + -2][-2 * c3 + c5 + -1] = B[n + -2][-2 * c3 + c5 + -1];
}
}
}
if (c0 >= (((2 * c1 + c2 + -1) * 2 < 0?-(-(2 * c1 + c2 + -1) / 2) : ((2 < 0?(-(2 * c1 + c2 + -1) + - 2 - 1) / - 2 : (2 * c1 + c2 + -1 + 2 - 1) / 2)))) && c1 >= c2 + 1 && c2 <= (((tsteps + -8) * 8 < 0?((8 < 0?-((-(tsteps + -8) + 8 + 1) / 8) : -((-(tsteps + -8) + 8 - 1) / 8))) : (tsteps + -8) / 8))) {
for (c4 = 16 * c1; c4 <= ((16 * c1 + 15 < 16 * c2 + n + 12?16 * c1 + 15 : 16 * c2 + n + 12)); c4++) {
B[-16 * c2 + c4 + -14][1] = 0.2 * (A[-16 * c2 + c4 + -14][1] + A[-16 * c2 + c4 + -14][1 - 1] + A[-16 * c2 + c4 + -14][1 + 1] + A[1 + (-16 * c2 + c4 + -14)][1] + A[-16 * c2 + c4 + -14 - 1][1]);
}
}
if (c0 >= (((3 * c1 + -1) * 2 < 0?-(-(3 * c1 + -1) / 2) : ((2 < 0?(-(3 * c1 + -1) + - 2 - 1) / - 2 : (3 * c1 + -1 + 2 - 1) / 2)))) && c1 <= (((((tsteps + -8) * 8 < 0?((8 < 0?-((-(tsteps + -8) + 8 + 1) / 8) : -((-(tsteps + -8) + 8 - 1) / 8))) : (tsteps + -8) / 8)) < c2?(((tsteps + -8) * 8 < 0?((8 < 0?-((-(tsteps + -8) + 8 + 1) / 8) : -((-(tsteps + -8) + 8 - 1) / 8))) : (tsteps + -8) / 8)) : c2))) {
for (c5 = (16 * c2 > 16 * c1 + 15?16 * c2 : 16 * c1 + 15); c5 <= ((16 * c2 + 15 < 16 * c1 + n + 12?16 * c2 + 15 : 16 * c1 + n + 12)); c5++) {
B[1][-16 * c1 + c5 + -14] = 0.2 * (A[1][-16 * c1 + c5 + -14] + A[1][-16 * c1 + c5 + -14 - 1] + A[1][1 + (-16 * c1 + c5 + -14)] + A[1 + 1][-16 * c1 + c5 + -14] + A[1 - 1][-16 * c1 + c5 + -14]);
}
}
}
}
}
}
}
//#pragma endscop
}
int main(int argc,char **argv)
{
/* Retrieve problem size. */
int n = 500;
int tsteps = 10;
/* Variable declaration/allocation. */
double (*A)[500 + 0][500 + 0];
A = ((double (*)[500 + 0][500 + 0])(polybench_alloc_data(((500 + 0) * (500 + 0)),(sizeof(double )))));
;
double (*B)[500 + 0][500 + 0];
B = ((double (*)[500 + 0][500 + 0])(polybench_alloc_data(((500 + 0) * (500 + 0)),(sizeof(double )))));
;
/* Initialize array(s). */
init_array(n, *A, *B);
/* Start timer. */
polybench_timer_start();
;
/* Run kernel. */
kernel_jacobi_2d_imper(tsteps,n, *A, *B);
/* Stop and print timer. */
polybench_timer_stop();
;
polybench_timer_print();
;
/* Prevent dead-code elimination. All live-out data must be printed
by the function call in argument. */
if (argc > 42 && !strcmp(argv[0],""))
print_array(n, *A);
/* Be clean. */
free(((void *)A));
;
free(((void *)B));
;
return 0;
}
|
parallel.c | #include <stdio.h>
#include <omp.h>
#include <time.h>
#include <stdlib.h>
void shellsort(int arr[], int n);
int IsSort(int *array, int size);
int main(int argc, char** argv) {
int size = 1500000, algorithm, i, *arr, opt;
arr = malloc(size* sizeof(int));
srand(time(NULL));
for (i = 0; i < size; i++)
arr[i] = rand()%size;
double start, end;
omp_set_num_threads(12);
start = omp_get_wtime();
shellsort(arr, size);
end = omp_get_wtime();
printf("Tempo: %.3f\n",end - start);
if(IsSort(arr, size) == 1)
printf("Result: Sorted\n");
else
printf("Result: Not Sorted\n");
return 0;
}
void shellsort(int arr[], int n){
int gap, i, j, grupoId, temp;
for (gap = n/2; gap > 0; gap /= 2)
#pragma omp parallel for private(j, i)
for(grupoId = 0; grupoId < gap; grupoId++)
for (i=gap+grupoId; i<n-grupoId; i+=gap) {
int key = arr[i];
j = i - gap;
while (j >= 0 && arr[j] > key) {
arr[j+gap] = arr[j];
j-=gap;
}
arr[j+gap] = key;
}
}
int IsSort(int *array, int size) {
int i, value = 0;
for(i = 1; i < size; i++)
if(array[i-1] > array[i])
return 0;
return 1;
}
|
fig4.34-lastprivate-clause.c | /*
DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS HEADER.
Copyright 2009 Sun Microsystems, Inc. All rights reserved.
The contents of this file are subject to the terms of the BSD License("BSD")(the "License").
You can obtain a copy of the License at: http://www.opensparc.net/pubs/t1/licenses/BSD+_License.txt
The BSD License
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
* Redistribution of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
* Redistribution in binary form must reproduce the above copyright
notice, this list of conditions and the following disclaimer in
the documentation and/or other materials provided with the
distribution.
* Neither the name of Sun Microsystems, Inc. or the names of
contributors may be used to endorse or promote products derived
from this software without specific prior written permission.
This software is provided "AS IS," without a warranty of any kind. ALL
EXPRESS OR IMPLIED CONDITIONS, REPRESENTATIONS AND WARRANTIES, INCLUDING ANY
IMPLIED WARRANTY OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE OR
NON-INFRINGEMENT, ARE HEREBY EXCLUDED. SUN MICROSYSTEMS, INC. ("SUN") AND
ITS LICENSORS SHALL NOT BE LIABLE FOR ANY DAMAGES SUFFERED BY LICENSEE AS A
RESULT OF USING, MODIFYING OR DISTRIBUTING THIS SOFTWARE OR ITS DERIVATIVES.
IN NO EVENT WILL SUN OR ITS LICENSORS BE LIABLE FOR ANY LOST REVENUE, PROFIT
OR DATA, OR FOR DIRECT, INDIRECT, SPECIAL, CONSEQUENTIAL, INCIDENTAL OR
PUNITIVE DAMAGES, HOWEVER CAUSED AND REGARDLESS OF THE THEORY OF LIABILITY,
ARISING OUT OF THE USE OF OR INABILITY TO USE THIS SOFTWARE, EVEN IF SUN HAS
BEEN ADVISED OF THE POSSIBILITY OF SUCH DAMAGES.
You acknowledge that this software is not designed, licensed or intended for
use in the design, construction, operation or maintenance of any nuclear facility.
*/
#include <stdio.h>
#include <stdlib.h>
#ifdef _OPENMP
#include <omp.h>
#define TRUE 1
#define FALSE 0
#else
#define omp_get_thread_num() 0
#define omp_get_num_threads() 1
#endif
int main()
{
int i, a, n = 5;
#ifdef _OPENMP
(void) omp_set_dynamic(FALSE);
if (omp_get_dynamic()) {printf("Warning: dynamic adjustment of threads has been set\n");}
(void) omp_set_num_threads(3);
#endif
#pragma omp parallel for private(i) lastprivate(a)
for (i=0; i<n; i++)
{
a = i+1;
printf("Thread %d has a value of a = %d for i = %d\n",
omp_get_thread_num(),a,i);
} /*-- End of parallel for --*/
printf("Value of a after parallel for: a = %d\n",a);
return(0);
}
|
ars_vectorized_environment.h | #ifndef ARS_VECTORIZED_ENVIRONMENT_H
#define ARS_VECTORIZED_ENVIRONMENT_H
#include <algorithm>
#include <thread>
#include <functional>
#include <vector>
/// @param[in] nb_elements : size of your for loop
/// @param[in] functor(start, end) :
/// your function processing a sub chunk of the for loop.
/// "start" is the first index to process (included) until the index "end"
/// (excluded)
/// @code
/// for(int i = start; i < end; ++i)
/// computation(i);
/// @endcode
/// @param use_threads : enable / disable threads.
///
///
static
void parallel_for(unsigned nb_elements,
std::function<void (int start, int end)> functor,
bool use_threads = true)
{
// -------
unsigned nb_threads_hint = 20;//std::thread::hardware_concurrency();
unsigned nb_threads = nb_threads_hint == 0 ? 8 : (nb_threads_hint);
unsigned batch_size = nb_elements / nb_threads;
unsigned batch_remainder = nb_elements % nb_threads;
std::vector< std::thread > my_threads(nb_threads);
if( use_threads )
{
// Multithread execution
for(unsigned i = 0; i < nb_threads; ++i)
{
int start = i * batch_size;
my_threads[i] = std::thread(functor, start, start+batch_size);
}
}
else
{
// Single thread execution (for easy debugging)
for(unsigned i = 0; i < nb_threads; ++i){
int start = i * batch_size;
functor( start, start+batch_size );
}
}
// Deform the elements left
//int start = nb_threads * batch_size;
//functor( start, start+batch_remainder);
// Wait for the other thread to finish their task
if( use_threads )
std::for_each(my_threads.begin(), my_threads.end(), std::mem_fn(&std::thread::join));
}
#define PARALLEL_FOR_BEGIN(nb_elements) parallel_for(nb_elements, [&](int start, int end){ for(int i = start; i < end; ++i)
#define PARALLEL_FOR_END()})
template <typename Algebra, typename AbstractSimulation>
struct VectorizedEnvironment
{
AbstractSimulation& contact_sim;
using Scalar = typename Algebra::Scalar;
using Vector3 = typename Algebra::Vector3;
using Transform = typename Algebra::Transform;
std::vector<std::vector<Scalar>> sim_states_;
std::vector<std::vector<Scalar>> sim_states_with_action_and_variables;
std::vector<std::vector<Scalar>> sim_states_with_graphics_;
std::vector<tds::NeuralNetwork<Algebra> > neural_networks_;
int observation_dim_{0};
VectorizedEnvironment(AbstractSimulation& sim)
:contact_sim(sim)
{
neural_networks_.resize(g_num_total_threads);
sim_states_.resize(g_num_total_threads);
sim_states_with_action_and_variables.resize(g_num_total_threads);
sim_states_with_graphics_.resize(g_num_total_threads);
observation_dim_ = contact_sim.input_dim();
bool use_input_bias = false;
for (int index=0;index<g_num_total_threads;index++)
{
neural_networks_[index].set_input_dim(observation_dim_, use_input_bias);
//network.add_linear_layer(tds::NN_ACT_RELU, 32);
//neural_network.add_linear_layer(tds::NN_ACT_RELU, 64);
bool learn_bias = true;
neural_networks_[index].add_linear_layer(tds::NN_ACT_IDENTITY, sim.action_dim(),learn_bias);
}
}
virtual ~VectorizedEnvironment()
{
}
void init_neural_network(int index, const std::vector<double> &x)
{
neural_networks_[index].set_parameters(x);
}
void seed(long long int s) {
//std::cout<<"seed:" << s << std::endl;
std::srand(s);
}
std::vector< std::vector<double> > reset()
{
for (int index=0;index<g_num_total_threads;index++)
{
sim_states_[index].resize(0);
sim_states_[index].resize(contact_sim.input_dim(), Scalar(0));
MyAlgebra::Vector3 start_pos(0,0,.48);//0.4002847
MyAlgebra::Quaternion start_orn (0,0,0,1);
if (contact_sim.mb_->is_floating())
{
sim_states_[index][0] = start_orn.x();
sim_states_[index][1] = start_orn.y();
sim_states_[index][2] = start_orn.z();
sim_states_[index][3] = start_orn.w();
sim_states_[index][4] = start_pos.x();
sim_states_[index][5] = start_pos.y();
sim_states_[index][6] = start_pos.z();
int qoffset = 7;
for(int j=0;j<get_initial_poses<Scalar>().size();j++)
{
sim_states_[index][j+qoffset] = get_initial_poses<Scalar>()[j]+0.05*((std::rand() * 1. / RAND_MAX)-0.5)*2.0;
}
}
else
{
sim_states_[index][0] = start_pos.x();
sim_states_[index][1] = start_pos.y();
sim_states_[index][2] = start_pos.z();
sim_states_[index][3] = 0;
sim_states_[index][4] = 0;
sim_states_[index][5] = 0;
int qoffset = 6;
for(int j=0;j<get_initial_poses<Scalar>().size();j++)
{
sim_states_[index][j+qoffset] = get_initial_poses<Scalar>()[j]+0.05*((std::rand() * 1. / RAND_MAX)-0.5)*2.0;
}
}
}
std::vector< std::vector<double>> zero_actions(g_num_total_threads);
for (int i=0;i<g_num_total_threads;i++)
{
zero_actions[i].resize(get_initial_poses<Scalar>().size(), Scalar(0));
}
std::vector< std::vector<double>> observations;
observations.resize(g_num_total_threads);
std::vector<double> rewards;
rewards.resize(g_num_total_threads);
std::vector<bool> dones;
dones.resize(g_num_total_threads);
//todo: tune this
int settle_down_steps= 50;
for (int i=0;i<settle_down_steps;i++)
{
step(zero_actions, observations, rewards, dones);
}
//for (auto v : sim_state)
// std::cout << v << std::endl;
return observations;
}
void step( std::vector< std::vector<double>>& actions,std::vector<std::vector<double>>& observations,
std::vector<double>& rewards,std::vector<bool>& dones)
{
std::vector<std::vector<Scalar>> outputs(
g_num_total_threads, std::vector<Scalar>(contact_sim.output_dim()));
std::vector<std::vector<Scalar>> inputs(g_num_total_threads);
for (int index=0;index<g_num_total_threads;index++)
{
int simstate_size = sim_states_[index].size();
sim_states_with_action_and_variables[index] = sim_states_[index];
sim_states_with_action_and_variables[index].resize(contact_sim.input_dim_with_action_and_variables());
contact_sim.prepare_sim_state_with_action_and_variables(
sim_states_with_action_and_variables[index],
actions[index]);
inputs[index] = sim_states_with_action_and_variables[index];
}
//#define DEBUG_ON_CPU
#ifdef DEBUG_ON_CPU
for (int index =0; index<g_num_total_threads;index++)
{
sim_states_with_graphics_[index] = contact_sim.step_forward1(sim_states_with_action_and_variables[index]);
}
#else
#if 1
#pragma omp parallel
#pragma omp for
for (int index=0;index<g_num_total_threads;index++)
{
if (!dones[index])
{
contact_sim.forward_kernel(1,&outputs[index][0], &inputs[index][0]);
}
}
#else
PARALLEL_FOR_BEGIN(g_num_total_threads)
{
if (!dones[i])
{
contact_sim.forward_kernel_fast(1,&outputs[i][0], &inputs[i][0]);
}
}PARALLEL_FOR_END();
#endif
for (int index=0;index<g_num_total_threads;index++)
{
if (!dones[index])
{
sim_states_with_graphics_[index] = outputs[index];
}
}
#endif //DEBUG_ON_CPU
for (int index=0;index<g_num_total_threads;index++)
{
if (!dones[index])
{
bool done;
Scalar reward;
contact_sim.compute_reward_done(
sim_states_[index],
sim_states_with_graphics_[index],
reward, done);
rewards[index] = reward;
dones[index] = done;
sim_states_[index] = sim_states_with_graphics_[index];
sim_states_[index].resize(contact_sim.input_dim());
observations[index] = sim_states_[index];
} else
{
rewards[index] = 0;
}
}
}
inline const std::vector<double> policy(int index, const std::vector<double>& obs)
{
std::vector<double> action (get_initial_poses<Scalar>().size(), Scalar(0));
neural_networks_[index].compute(obs, action);
return action;
}
};
#endif //ARS_VECTORIZED_ENVIRONMENT_H |
initAtoms.c | /// \file
/// Initialize the atom configuration.
#include "initAtoms.h"
#include <math.h>
#include <assert.h>
#include "constants.h"
#include "decomposition.h"
#include "parallel.h"
#include "random.h"
#include "linkCells.h"
#include "timestep.h"
#include "memUtils.h"
#include "performanceTimers.h"
static void computeVcm(SimFlat* s, real_t vcm[3]);
/// \details
/// Call functions such as createFccLattice and setTemperature to set up
/// initial atom positions and momenta.
Atoms* initAtoms(LinkCell* boxes)
{
Atoms* atoms = comdMalloc(sizeof(Atoms));
int maxTotalAtoms = MAXATOMS*boxes->nTotalBoxes;
{
atoms->gid = (int*) comdMalloc(maxTotalAtoms*sizeof(int));
atoms->iSpecies = (int*) comdMalloc(maxTotalAtoms*sizeof(int));
atoms->r = (real3*) comdMalloc(maxTotalAtoms*sizeof(real3));
atoms->p = (real3*) comdMalloc(maxTotalAtoms*sizeof(real3));
atoms->f = (real3*) comdMalloc(maxTotalAtoms*sizeof(real3));
atoms->U = (real_t*)comdMalloc(maxTotalAtoms*sizeof(real_t));
}
atoms->nLocal = 0;
atoms->nGlobal = 0;
#pragma sst compute
for (int iOff = 0; iOff < maxTotalAtoms; iOff++)
{
atoms->gid[iOff] = 0;
atoms->iSpecies[iOff] = 0;
zeroReal3(atoms->r[iOff]);
zeroReal3(atoms->p[iOff]);
zeroReal3(atoms->f[iOff]);
atoms->U[iOff] = 0.;
}
return atoms;
}
void destroyAtoms(Atoms *atoms)
{
freeMe(atoms,gid);
freeMe(atoms,iSpecies);
freeMe(atoms,r);
freeMe(atoms,p);
freeMe(atoms,f);
freeMe(atoms,U);
comdFree(atoms);
}
/// Creates atom positions on a face centered cubic (FCC) lattice with
/// nx * ny * nz unit cells and lattice constant lat.
/// Set momenta to zero.
void createFccLattice(int nx, int ny, int nz, real_t lat, SimFlat* s)
{
const real_t* localMin = s->domain->localMin; // alias
const real_t* localMax = s->domain->localMax; // alias
int nb = 4; // number of atoms in the basis
real3 basis[4] = { {0.25, 0.25, 0.25},
{0.25, 0.75, 0.75},
{0.75, 0.25, 0.75},
{0.75, 0.75, 0.25} };
// create and place atoms
int begin[3];
int end[3];
for (int ii=0; ii<3; ++ii)
{
begin[ii] = floor(localMin[ii]/lat);
end[ii] = ceil (localMax[ii]/lat);
}
real_t px,py,pz;
px=py=pz=0.0;
#pragma sst compute
for (int ix=begin[0]; ix<end[0]; ++ix)
for (int iy=begin[1]; iy<end[1]; ++iy)
for (int iz=begin[2]; iz<end[2]; ++iz)
for (int ib=0; ib<nb; ++ib)
{
real_t rx = (ix+basis[ib][0]) * lat;
real_t ry = (iy+basis[ib][1]) * lat;
real_t rz = (iz+basis[ib][2]) * lat;
if (rx < localMin[0] || rx >= localMax[0]) continue;
if (ry < localMin[1] || ry >= localMax[1]) continue;
if (rz < localMin[2] || rz >= localMax[2]) continue;
int id = ib+nb*(iz+nz*(iy+ny*(ix)));
putAtomInBox(s->boxes, s->atoms, id, 0, rx, ry, rz, px, py, pz);
}
#pragma sst init ((int64_t)nb*nx)*((int64_t)(ny*nz))
s->atoms->nGlobal = 0;
if (getMyRank() == 0)
printf("nb=%d nx=%d ny=%d nz=%d nr=%d nglbl=%lld\n",
nb, nx, ny, nz, getNRanks(), s->atoms->nGlobal);
#pragma sst init s->atoms->nGlobal / getNRanks()
s->atoms->nLocal = s->atoms->nLocal;
s->boxes->nTotalAtoms = s->atoms->nLocal;
// set total atoms in simulation
startTimer(commReduceTimer);
addIntParallel(&s->atoms->nLocal, &s->atoms->nGlobal, 1);
stopTimer(commReduceTimer);
#pragma sst delete
assert(s->atoms->nGlobal == nb*nx*ny*nz);
}
/// Sets the center of mass velocity of the system.
/// \param [in] newVcm The desired center of mass velocity.
void setVcm(SimFlat* s, real_t newVcm[3])
{
real_t oldVcm[3];
computeVcm(s, oldVcm);
real_t vShift[3];
vShift[0] = (newVcm[0] - oldVcm[0]);
vShift[1] = (newVcm[1] - oldVcm[1]);
vShift[2] = (newVcm[2] - oldVcm[2]);
int avgAtomsPerBox = s->boxes->nTotalAtoms / s->boxes->nLocalBoxes;
#pragma omp parallel for
for (int iBox=0; iBox<s->boxes->nLocalBoxes; ++iBox)
{
#pragma sst loop_count avgAtomsPerBox
for (int iOff=MAXATOMS*iBox, ii=0; ii<s->boxes->nAtoms[iBox]; ++ii, ++iOff)
{
int iSpecies = s->atoms->iSpecies[iOff];
real_t mass = s->species[iSpecies].mass;
s->atoms->p[iOff][0] += mass * vShift[0];
s->atoms->p[iOff][1] += mass * vShift[1];
s->atoms->p[iOff][2] += mass * vShift[2];
}
}
}
/// Sets the temperature of system.
///
/// Selects atom velocities randomly from a boltzmann (equilibrium)
/// distribution that corresponds to the specified temperature. This
/// random process will typically result in a small, but non zero center
/// of mass velocity and a small difference from the specified
/// temperature. For typical MD runs these small differences are
/// unimportant, However, to avoid possible confusion, we set the center
/// of mass velocity to zero and scale the velocities to exactly match
/// the input temperature.
void setTemperature(SimFlat* s, real_t temperature)
{
s->initialTemp = temperature;
int avgAtomsPerBox = s->boxes->nTotalAtoms / s->boxes->nLocalBoxes;
// set initial velocities for the distribution
#pragma omp parallel for
for (int iBox=0; iBox<s->boxes->nLocalBoxes; ++iBox)
{
#pragma sst loop_count avgAtomsPerBox
for (int iOff=MAXATOMS*iBox, ii=0; ii<s->boxes->nAtoms[iBox]; ++ii, ++iOff)
{
int iType = s->atoms->iSpecies[iOff];
real_t mass = s->species[iType].mass;
real_t sigma = sqrt(kB_eV * temperature/mass);
uint64_t seed = mkSeed(s->atoms->gid[iOff], 123);
s->atoms->p[iOff][0] = mass * sigma * gasdev(&seed);
s->atoms->p[iOff][1] = mass * sigma * gasdev(&seed);
s->atoms->p[iOff][2] = mass * sigma * gasdev(&seed);
}
}
// compute the resulting temperature
// kinetic energy = 3/2 kB * Temperature
if (temperature == 0.0) return;
real_t vZero[3] = {0., 0., 0.};
setVcm(s, vZero);
kineticEnergy(s);
real_t temp = (s->eKinetic/s->atoms->nGlobal)/kB_eV/1.5;
// scale the velocities to achieve the target temperature
real_t scaleFactor = sqrt(temperature/temp);
#pragma omp parallel for
for (int iBox=0; iBox<s->boxes->nLocalBoxes; ++iBox)
{
#pragma sst loop_count avgAtomsPerBox
for (int iOff=MAXATOMS*iBox, ii=0; ii<s->boxes->nAtoms[iBox]; ++ii, ++iOff)
{
s->atoms->p[iOff][0] *= scaleFactor;
s->atoms->p[iOff][1] *= scaleFactor;
s->atoms->p[iOff][2] *= scaleFactor;
}
}
kineticEnergy(s);
temp = s->eKinetic/s->atoms->nGlobal/kB_eV/1.5;
}
/// Add a random displacement to the atom positions.
/// Atoms are displaced by a random distance in the range
/// [-delta, +delta] along each axis.
/// \param [in] delta The maximum displacement (along each axis).
void randomDisplacements(SimFlat* s, real_t delta)
{
int avgAtomsPerBox = s->boxes->nTotalAtoms / s->boxes->nLocalBoxes;
#pragma omp parallel for
for (int iBox=0; iBox<s->boxes->nLocalBoxes; ++iBox)
{
#pragma sst loop_count avgAtomsPerBox
for (int iOff=MAXATOMS*iBox, ii=0; ii<s->boxes->nAtoms[iBox]; ++ii, ++iOff)
{
uint64_t seed = mkSeed(s->atoms->gid[iOff], 457);
s->atoms->r[iOff][0] += (2.0*lcg61(&seed)-1.0) * delta;
s->atoms->r[iOff][1] += (2.0*lcg61(&seed)-1.0) * delta;
s->atoms->r[iOff][2] += (2.0*lcg61(&seed)-1.0) * delta;
}
}
}
/// Computes the center of mass velocity of the system.
void computeVcm(SimFlat* s, real_t vcm[3])
{
real_t vcmLocal[4] = {0., 0., 0., 0.};
real_t vcmSum[4] = {0., 0., 0., 0.};
real_t v0 = 0.0;
real_t v1 = 0.0;
real_t v2 = 0.0;
real_t v3 = 0.0;
int avgAtomsPerBox = s->boxes->nTotalAtoms / s->boxes->nLocalBoxes;
// sum the momenta and particle masses
#pragma omp parallel for reduction(+:v0) reduction(+:v1) reduction(+:v2) reduction(+:v3)
for (int iBox=0; iBox<s->boxes->nLocalBoxes; ++iBox)
{
#pragma sst loop_count avgAtomsPerBox
for (int iOff=MAXATOMS*iBox, ii=0; ii<s->boxes->nAtoms[iBox]; ++ii, ++iOff)
{
v0 += s->atoms->p[iOff][0];
v1 += s->atoms->p[iOff][1];
v2 += s->atoms->p[iOff][2];
int iSpecies = s->atoms->iSpecies[iOff];
v3 += s->species[iSpecies].mass;
}
}
vcmLocal[0] = v0;
vcmLocal[1] = v1;
vcmLocal[2] = v2;
vcmLocal[3] = v3;
startTimer(commReduceTimer);
addRealParallel(vcmLocal, vcmSum, 4);
stopTimer(commReduceTimer);
real_t totalMass = vcmSum[3];
vcm[0] = vcmSum[0]/totalMass;
vcm[1] = vcmSum[1]/totalMass;
vcm[2] = vcmSum[2]/totalMass;
}
|
generator_spgemm_csr_asparse.c | /******************************************************************************
** Copyright (c) 2015-2017, Intel Corporation **
** All rights reserved. **
** **
** Redistribution and use in source and binary forms, with or without **
** modification, are permitted provided that the following conditions **
** are met: **
** 1. Redistributions of source code must retain the above copyright **
** notice, this list of conditions and the following disclaimer. **
** 2. Redistributions in binary form must reproduce the above copyright **
** notice, this list of conditions and the following disclaimer in the **
** documentation and/or other materials provided with the distribution. **
** 3. Neither the name of the copyright holder nor the names of its **
** contributors may be used to endorse or promote products derived **
** from this software without specific prior written permission. **
** **
** THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS **
** "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT **
** LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR **
** A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT **
** HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, **
** SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED **
** TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR **
** PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF **
** LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING **
** NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS **
** SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. **
******************************************************************************/
/* Alexander Heinecke (Intel Corp.)
******************************************************************************/
#include "generator_spgemm_csr_asparse.h"
#include "generator_common.h"
#include <libxsmm_macros.h>
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
LIBXSMM_INTERNAL_API_DEFINITION
void libxsmm_generator_spgemm_csr_asparse( libxsmm_generated_code* io_generated_code,
const libxsmm_gemm_descriptor* i_xgemm_desc,
const char* i_arch,
const unsigned int* i_row_idx,
const unsigned int* i_column_idx,
const double* i_values ) {
unsigned int l_m;
unsigned int l_z;
unsigned int l_row_elements;
unsigned int l_flop_count = 0;
char l_new_code[512];
int l_max_code_length = 511;
int l_code_length = 0;
LIBXSMM_UNUSED(i_values);
l_code_length = LIBXSMM_SNPRINTF(l_new_code, l_max_code_length, " unsigned int l_n = 0;\n");
libxsmm_append_code_as_string( io_generated_code, l_new_code, l_code_length );
/* reset C if beta is zero */
if ( i_xgemm_desc->beta == 0 ) {
l_code_length = LIBXSMM_SNPRINTF(l_new_code, l_max_code_length, " unsigned int l_m = 0;\n");
libxsmm_append_code_as_string( io_generated_code, l_new_code, l_code_length );
l_code_length = LIBXSMM_SNPRINTF(l_new_code, l_max_code_length, " for ( l_m = 0; l_m < %u; l_m++) {\n", (unsigned int)i_xgemm_desc->m);
libxsmm_append_code_as_string( io_generated_code, l_new_code, l_code_length );
if ( i_xgemm_desc->m > 1 ) {
l_code_length = LIBXSMM_SNPRINTF(l_new_code, l_max_code_length, " #pragma simd\n");
libxsmm_append_code_as_string( io_generated_code, l_new_code, l_code_length );
l_code_length = LIBXSMM_SNPRINTF(l_new_code, l_max_code_length, " #pragma vector aligned\n");
libxsmm_append_code_as_string( io_generated_code, l_new_code, l_code_length );
}
if ( (LIBXSMM_GEMM_FLAG_F32PREC & i_xgemm_desc->flags) == 0 ) {
l_code_length = LIBXSMM_SNPRINTF(l_new_code, l_max_code_length, " for ( l_n = 0; l_n < %u; l_n++) { C[(l_m*%u)+l_n] = 0.0; }\n", (unsigned int)i_xgemm_desc->ldc, (unsigned int)i_xgemm_desc->ldc);
} else {
l_code_length = LIBXSMM_SNPRINTF(l_new_code, l_max_code_length, " for ( l_n = 0; l_n < %u; l_n++) { C[(l_m*%u)+l_n] = 0.0f; }\n", (unsigned int)i_xgemm_desc->ldc, (unsigned int)i_xgemm_desc->ldc);
}
libxsmm_append_code_as_string( io_generated_code, l_new_code, l_code_length );
l_code_length = LIBXSMM_SNPRINTF(l_new_code, l_max_code_length, " }\n");
libxsmm_append_code_as_string( io_generated_code, l_new_code, l_code_length );
}
l_code_length = LIBXSMM_SNPRINTF(l_new_code, l_max_code_length, "\n");
libxsmm_append_code_as_string( io_generated_code, l_new_code, l_code_length );
/* determine the correct simd pragma for each architecture */
if ( ( strcmp( i_arch, "noarch" ) == 0 ) ||
( strcmp( i_arch, "wsm" ) == 0 ) ||
( strcmp( i_arch, "snb" ) == 0 ) ||
( strcmp( i_arch, "hsw" ) == 0 ) ) {
if ( i_xgemm_desc->n > 7 ) {
l_code_length = LIBXSMM_SNPRINTF(l_new_code, l_max_code_length, " #pragma simd vectorlength(8)\n");
libxsmm_append_code_as_string( io_generated_code, l_new_code, l_code_length );
} else if ( i_xgemm_desc->n > 3 ) {
l_code_length = LIBXSMM_SNPRINTF(l_new_code, l_max_code_length, " #pragma simd vectorlength(4)\n");
libxsmm_append_code_as_string( io_generated_code, l_new_code, l_code_length );
} else if ( i_xgemm_desc->n > 1 ) {
l_code_length = LIBXSMM_SNPRINTF(l_new_code, l_max_code_length, " #pragma simd vectorlength(2)\n");
libxsmm_append_code_as_string( io_generated_code, l_new_code, l_code_length );
} else {}
} else if ( ( strcmp( i_arch, "knc" ) == 0 ) ||
( strcmp( i_arch, "knl" ) == 0 ) ||
( strcmp( i_arch, "skx" ) == 0 ) ) {
if ( (i_xgemm_desc->n > 1) ) {
l_code_length = LIBXSMM_SNPRINTF(l_new_code, l_max_code_length, " #pragma simd vectorlength(16)\n");
libxsmm_append_code_as_string( io_generated_code, l_new_code, l_code_length );
}
} else {
libxsmm_handle_error( io_generated_code, LIBXSMM_ERR_ARCH );
return;
}
if ( (i_xgemm_desc->n > 1) &&
((LIBXSMM_GEMM_FLAG_ALIGN_A & i_xgemm_desc->flags) != 0) &&
((LIBXSMM_GEMM_FLAG_ALIGN_C & i_xgemm_desc->flags) != 0) ) {
l_code_length = LIBXSMM_SNPRINTF(l_new_code, l_max_code_length, " #pragma vector aligned\n");
libxsmm_append_code_as_string( io_generated_code, l_new_code, l_code_length );
}
/* generate the actuel kernel */
l_code_length = LIBXSMM_SNPRINTF(l_new_code, l_max_code_length, " for ( l_n = 0; l_n < %u; l_n++) {\n", (unsigned int)i_xgemm_desc->n);
libxsmm_append_code_as_string( io_generated_code, l_new_code, l_code_length );
for ( l_m = 0; l_m < (unsigned int)i_xgemm_desc->m; l_m++ ) {
l_row_elements = i_row_idx[l_m+1] - i_row_idx[l_m];
for ( l_z = 0; l_z < l_row_elements; l_z++ ) {
/* check k such that we just use columns which actually need to be multiplied */
if ( i_column_idx[i_row_idx[l_m] + l_z] < (unsigned int)i_xgemm_desc->k ) {
l_code_length = LIBXSMM_SNPRINTF(l_new_code, l_max_code_length, " C[%u+l_n] += A[%u] * B[%u+l_n];\n", l_m * i_xgemm_desc->ldc, i_row_idx[l_m] + l_z, i_column_idx[i_row_idx[l_m] + l_z]*i_xgemm_desc->ldb );
libxsmm_append_code_as_string( io_generated_code, l_new_code, l_code_length );
l_flop_count += 2;
}
}
}
l_code_length = LIBXSMM_SNPRINTF(l_new_code, l_max_code_length, " }\n");
libxsmm_append_code_as_string( io_generated_code, l_new_code, l_code_length );
/* add flop counter */
l_code_length = LIBXSMM_SNPRINTF(l_new_code, l_max_code_length, "\n#ifndef NDEBUG\n#ifdef _OPENMP\n#pragma omp atomic\n#endif\nlibxsmm_num_total_flops += %u;\n#endif\n", l_flop_count * (unsigned int)i_xgemm_desc->m);
libxsmm_append_code_as_string( io_generated_code, l_new_code, l_code_length );
}
|
GB_unaryop__ainv_uint64_int16.c | //------------------------------------------------------------------------------
// GB_unaryop: hard-coded functions for each built-in unary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2020, All Rights Reserved.
// http://suitesparse.com See GraphBLAS/Doc/License.txt for license.
//------------------------------------------------------------------------------
// If this file is in the Generated/ folder, do not edit it (auto-generated).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_control.h"
#include "GB_iterator.h"
#include "GB_unaryop__include.h"
// C=unop(A) is defined by the following types and operators:
// op(A) function: GB_unop__ainv_uint64_int16
// op(A') function: GB_tran__ainv_uint64_int16
// C type: uint64_t
// A type: int16_t
// cast: uint64_t cij = (uint64_t) aij
// unaryop: cij = -aij
#define GB_ATYPE \
int16_t
#define GB_CTYPE \
uint64_t
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
int16_t aij = Ax [pA]
#define GB_CX(p) Cx [p]
// unary operator
#define GB_OP(z, x) \
z = -x ;
// casting
#define GB_CASTING(z, aij) \
uint64_t z = (uint64_t) aij ;
// cij = op (cast (aij))
#define GB_CAST_OP(pC,pA) \
{ \
/* aij = Ax [pA] */ \
GB_GETA (aij, Ax, pA) ; \
/* Cx [pC] = op (cast (aij)) */ \
GB_CASTING (z, aij) ; \
GB_OP (GB_CX (pC), z) ; \
}
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_AINV || GxB_NO_UINT64 || GxB_NO_INT16)
//------------------------------------------------------------------------------
// Cx = op (cast (Ax)): apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_unop__ainv_uint64_int16
(
uint64_t *Cx, // Cx and Ax may be aliased
int16_t *Ax,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
GB_CAST_OP (p, p) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (cast (A')): transpose, typecast, and apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_tran__ainv_uint64_int16
(
GrB_Matrix C,
const GrB_Matrix A,
int64_t *GB_RESTRICT *Rowcounts,
GBI_single_iterator Iter,
const int64_t *GB_RESTRICT A_slice,
int naslice
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#define GB_PHASE_2_OF_2
#include "GB_unaryop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
cryptsha512_fmt_plug.c | /*
* This file is part of John the Ripper password cracker,
* based on rawSHA256_fmt.c code and Drepper's spec at
* http://www.akkadia.org/drepper/SHA-crypt.txt
*
* This software is Copyright (c) 2012 magnum, and it is hereby released to the
* general public under the following terms: Redistribution and use in source
* and binary forms, with or without modification, are permitted.
*
* See code/comments in cryptsha256 for how and why this is being done. NOTE,
* we could limit ourselves to 15 byte password, and then only need 1 limb
* SHA512 SIMD logic. If we allow 2 limb logic then 79 byte passwords are max.
* this is better than cryptsha256, where if we only allowed 1 limb, then only
* 3 btye passwords would have been max, and even at 2 limbs, 35 byte passwords
* are the longest we can do.
*
* Porting to SSE2, May 2015, JimF. A little harder than some, since we have to
* group and rearrange passwords based upon length. We must only run passwords
* of a specific block group size in 1 SSE_COEF_SHA512 bundle. If we later do
* PARA_SHA512, then each bundle of SSE_COEF_SHA512*PARA_SHA512 will have to be
* made up of passwords of same block group size.
*
* Here are the block sizes per password length. To be equal group size, all
* numbers for 2 passwords must be equal all the way across. So, password
* lengths of 0, 1, ... 15 are 1 group. 16..23 are another group. 24..31 are
* yet another, etc. There are 5 'groups' of lengths.
*
* Here is the raw block length data. Only first and last length for the group has been kept.
Len: cp pspc cspp ppc cpp psc csp pc
0 : 1 1 1 1 1 1 1 1
15 : 1 1 1 1 1 1 1 1
16 : 1 2 2 1 1 1 1 1
23 : 1 2 2 1 1 1 1 1
24 : 1 2 2 2 2 1 1 1
31 : 1 2 2 2 2 1 1 1
32 : 1 2 2 2 2 2 2 1
47 : 1 2 2 2 2 2 2 1
48 : 2 2 2 2 2 2 2 2
79 : 2 2 2 2 2 2 2 2
Source to make above table (made up to 90,but over 79 is 3 limbs)
#include <stdio.h>
int c=64, s=16;
int S(int sz) {
if (sz<=111) return 1;
else if (sz <= 111+128) return 2;
else return 3;
}
void proc(int p) {
int cp=p+c;
printf("%-2d : %d %d %d %d %d %d %d %d\n",
p,S(cp),S(cp+s+p),S(cp+s+p),S(cp+p),S(cp+p),S(cp+s),S(cp+s),S(cp));
}
void main(int argc, char **argv) {
int i;
if (argc==2) s=atoi(argv[1]);
printf ("Len: cp pspc cspp ppc cpp psc csp pc (saltlen=%d)\n",s);
for (i = 0; i < 90; ++i)
proc(i);
}
*/
#if FMT_EXTERNS_H
extern struct fmt_main fmt_cryptsha512;
#elif FMT_REGISTERS_H
john_register_one(&fmt_cryptsha512);
#else
#include "arch.h"
//#undef SIMD_COEF_64
#include "sha2.h"
#define _GNU_SOURCE 1
#include <string.h>
#include "params.h"
#include "common.h"
#include "formats.h"
#include "johnswap.h"
#include "simd-intrinsics.h"
#ifdef _OPENMP
#ifndef OMP_SCALE
#define OMP_SCALE 16
#endif
#include <omp.h>
#endif
#include "memdbg.h"
// NOTE, in SSE mode, even if NOT in OMP, we may need to scale, quite a bit, due to needing
// to 'group' passwords differently, so that we have lengths which 'share' the same number
// of crypt block counts for each 'type'. We may want to scale as much as 128 or so, just
// to try to have better saturation. If we only had 8 passwords given to us, and they were
// one each of these lengths: 3 7 8 12 13 14 15 21, in theory, we could do this
// with only 2 SSE calls (SIMD_COEF_32==4 for SHA256). However, length 3 has to to run by itself,
// length 7 by itself, 8 by itself, and the rest can run together, but there are 5 of them,
// so it takes to runs. So, instead of 2 runs, we have to do 5 runs. Not very efficient.
// however, if we have a lot more passwords to work with, we can re-arrange them, to run
// them in groups that all 'fit' together, and do so until we exhaust all from a given length
// range, then do all in the next range. Thus, until we get to the last set within a length
// range, we are doing a fully packed SSE run, and having a LOT less wasted space. This will
// get even more interesting, when we start doing OMP, but it should just be the same principal,
// preload more passwords, and group them, then run the OMP threads over a single length, then
// go to the next length, until done, trying to keep each thread running, and keeping each block
// of SSE data full, until the last in a range. We probably can simply build all the rearrangments,
// then let the threads go on ALL data, without caring about the length, since each thread will only
// be working on passwords in a single MMX buffer that all match, at any given moment.
#ifdef SIMD_COEF_64
#ifdef _OPENMP
#define SIMD_COEF_SCALE (32/SIMD_COEF_64)
#else
#define SIMD_COEF_SCALE (64/SIMD_COEF_64)
#endif
#else
#define SIMD_COEF_SCALE 1
#endif
#define FORMAT_LABEL "sha512crypt"
#ifdef SIMD_COEF_64
#define ALGORITHM_NAME SHA512_ALGORITHM_NAME
#else
#if ARCH_BITS >= 64
#define ALGORITHM_NAME "64/" ARCH_BITS_STR " " SHA2_LIB
#else
#define ALGORITHM_NAME "32/" ARCH_BITS_STR " " SHA2_LIB
#endif
#endif
// 79 is max length we can do in 2 SIMD limbs, so just make it 79 always.
#define PLAINTEXT_LENGTH 79
#define BINARY_ALIGN 4
#define SALT_SIZE sizeof(struct saltstruct)
#define SALT_ALIGN 4
#ifdef SIMD_COEF_64
#define MIN_KEYS_PER_CRYPT (SIMD_COEF_64*SIMD_PARA_SHA512)
#define MAX_KEYS_PER_CRYPT (SIMD_COEF_64*SIMD_PARA_SHA512)
#else
#define MIN_KEYS_PER_CRYPT 1
#define MAX_KEYS_PER_CRYPT 1
#endif
// these MUST be defined prior to loading cryptsha512_valid.h
#define BINARY_SIZE 64
#define SALT_LENGTH 16
#define CIPHERTEXT_LENGTH 86
#define __CRYPTSHA512_CREATE_PROPER_TESTS_ARRAY__
#include "cryptsha512_common.h"
#define BLKS MAX_KEYS_PER_CRYPT
/* This structure is 'pre-loaded' with the keyspace of all possible crypts which */
/* will be performed WITHIN the inner loop. There are 8 possible buffers that */
/* are used. They are cp, pspc, cspp, ppc, cpp, psc, csp, and pc, where p stands */
/* for the 'hash' built from the password (and it is the same length as the */
/* password), s stands for the hash built from the salt (same size as salt), and */
/* c stands for the crypt results from the prior loop. There are 8 possible */
/* buffer layouts listed, but they fall into a pattern that is 42 long (2*3*7) */
/* this structure encapsulates this. we build this buffer, after computing the */
/* s hash, the p hash, and the starting c values. Then, within the inner loop, */
/* we simply spin through this structure, calling the SHA512 code to do the work. */
/* NOTE, most of the time, there will be 1 block and 2 block crypts. As the */
/* the password length grows, the more 2 block crypts there are, thus slower */
/**/
/* for SSE only, but 'could' be done for sha2.c code (jtr sha2) */
/* This keyspace was changed, to be put into BE at the start, and then we never */
/* do any swapping, but keep it in BE format from that point on. To do this, we */
/* changed the pointers to be a pointer to the start of the block, AND an offset */
/* for SSE, we need a pointer to the start of the block[0], and the offset. The */
/* index needed will be known in the crypt_all. This means we need something */
/* similar to out GET_POS macros, but also for oSSL formats. */
/* To do this, we have to use the JtR sha2.c functions, since there is this func: */
/* sha512_hash_block(&CTX, data, int perform_endian_swap). So if we set the last */
/* param to 0, we can call this function, and it will avoid the byte swapping */
typedef struct cryptloopstruct_t {
unsigned char buf[8*2*128*BLKS]; // will allocate to hold 42 2 block buffers (42 * 2 * 128) Reduced to only requiring 8*2*128
// now, the cryptstructs are on the stack within the crypt for loop, so we avoid allocation.
// and to avoid the single static variable, or a static array.
unsigned char *bufs[BLKS][42]; // points to the start of each 2 block buffer.
#ifdef SIMD_COEF_64
int offs[BLKS][42];
#endif
unsigned char *cptr[BLKS][42]; // points to where we copy the crypt pointer for next round.
// Round 0 points to somewhere in round 1's buffer, etc.
int datlen[42]; // if 1, then this is a small, only 1 block crypt. Some rounds for shorter passwords take only 1 crypt block.
// NOTE, datlen could be changed to a number, and then we could do > 2 block crypts. Would take a little
// more memory (and longer PW's certainly DO take more time), but it should work fine. It may be an issue
// especially when doing OMP, that the memory footprint of this 'hot' inner loop simply gets too big, and
// things slow down. For now, we are limiting ourselves to 35 byte password, which fits into 2 SHA512 buffers
} cryptloopstruct;
static int (*saved_len);
static char (*saved_key)[PLAINTEXT_LENGTH + 1];
static ARCH_WORD_32 (*crypt_out)[BINARY_SIZE / sizeof(ARCH_WORD_32)];
/* these 2 values are used in setup of the cryptloopstruct, AND to do our SHA512_Init() calls, in the inner loop */
static const unsigned char padding[256] = { 0x80, 0 /* 0,0,0,0.... */ };
#if !defined(JTR_INC_COMMON_CRYPTO_SHA2) && !defined (SIMD_COEF_64)
static const uint64_t ctx_init[8] =
{0x6A09E667F3BCC908ULL,0xBB67AE8584CAA73BULL,0x3C6EF372FE94F82BULL,0xA54FF53A5F1D36F1ULL,0x510E527FADE682D1ULL,0x9B05688C2B3E6C1FULL,0x1F83D9ABFB41BD6BULL,0x5BE0CD19137E2179ULL};
#endif
static struct saltstruct {
unsigned int len;
unsigned int rounds;
unsigned char salt[SALT_LENGTH];
} *cur_salt;
static void init(struct fmt_main *self)
{
int omp_t = 1;
int max_crypts;
#ifdef _OPENMP
omp_t = omp_get_max_threads();
omp_t *= OMP_SCALE;
#endif
max_crypts = SIMD_COEF_SCALE * omp_t * MAX_KEYS_PER_CRYPT;
self->params.max_keys_per_crypt = max_crypts;
// we allocate 1 more than needed, and use that 'extra' value as a zero
// length PW to fill in the tail groups in MMX mode.
saved_len = mem_calloc(1 + max_crypts, sizeof(*saved_len));
saved_key = mem_calloc(1 + max_crypts, sizeof(*saved_key));
crypt_out = mem_calloc(1 + max_crypts, sizeof(*crypt_out));
}
static void done(void)
{
MEM_FREE(crypt_out);
MEM_FREE(saved_key);
MEM_FREE(saved_len);
}
static int get_hash_0(int index) { return crypt_out[index][0] & PH_MASK_0; }
static int get_hash_1(int index) { return crypt_out[index][0] & PH_MASK_1; }
static int get_hash_2(int index) { return crypt_out[index][0] & PH_MASK_2; }
static int get_hash_3(int index) { return crypt_out[index][0] & PH_MASK_3; }
static int get_hash_4(int index) { return crypt_out[index][0] & PH_MASK_4; }
static int get_hash_5(int index) { return crypt_out[index][0] & PH_MASK_5; }
static int get_hash_6(int index) { return crypt_out[index][0] & PH_MASK_6; }
static void set_key(char *key, int index)
{
int len = strlen(key);
saved_len[index] = len;
if (len > PLAINTEXT_LENGTH)
len = saved_len[index] = PLAINTEXT_LENGTH;
memcpy(saved_key[index], key, len);
saved_key[index][len] = 0;
}
static char *get_key(int index)
{
saved_key[index][saved_len[index]] = 0;
return saved_key[index];
}
/*
These are the 8 types of buffers this algorithm uses:
cp
pspc
cspp
ppc
cpp
psc
csp
pc
*/
static void LoadCryptStruct(cryptloopstruct *crypt_struct, int index, int idx, char *p_bytes, char *s_bytes) {
unsigned len_pc, len_ppsc, len_ppc, len_psc; // length of 'data'
unsigned tot_pc, tot_ppsc, tot_ppc, tot_psc; // length of entire block to crypt (128 or 256)
unsigned off_pc, off_pspc, off_ppc, off_psc; // offset to the crypt ptr for these 4 'types'.
unsigned dlen_pc, dlen_ppsc, dlen_ppc, dlen_psc; // is this 1 or 2 block (or actual len for CommonCrypto, since it uses SHA512_Final()
unsigned plen=saved_len[index];
unsigned char *cp = crypt_struct->buf;
cryptloopstruct *pstr = crypt_struct;
#ifdef SIMD_COEF_64
// in SSE mode, we FORCE every buffer to be 2 blocks, even if it COULD fit into 1.
// Then we simply use the 2 block SSE code.
unsigned char *next_cp;
cp += idx*2*128;
#endif
len_pc = plen + BINARY_SIZE;
len_ppsc = (plen<<1) + cur_salt->len + BINARY_SIZE;
len_ppc = (plen<<1) + BINARY_SIZE;
len_psc = plen + cur_salt->len + BINARY_SIZE;
#ifdef JTR_INC_COMMON_CRYPTO_SHA2
if (len_pc <=111) tot_pc =128; else tot_pc =256;
if (len_ppsc<=111) tot_ppsc=128; else tot_ppsc=256;
if (len_ppc <=111) tot_ppc =128; else tot_ppc =256;
if (len_psc <=111) tot_psc =128; else tot_psc =256;
dlen_pc =len_pc;
dlen_ppsc=len_ppsc;
dlen_ppc =len_ppc;
dlen_psc =len_psc;
#else
if (len_pc <=111) {tot_pc =128; dlen_pc =128;}else{tot_pc =256; dlen_pc =256; }
if (len_ppsc<=111) {tot_ppsc=128; dlen_ppsc=128;}else{tot_ppsc=256; dlen_ppsc=256; }
if (len_ppc <=111) {tot_ppc =128; dlen_ppc =128;}else{tot_ppc =256; dlen_ppc =256; }
if (len_psc <=111) {tot_psc =128; dlen_psc =128;}else{tot_psc =256; dlen_psc =256; }
#endif
off_pc = len_pc - BINARY_SIZE;
off_pspc = len_ppsc - BINARY_SIZE;
off_ppc = len_ppc - BINARY_SIZE;
off_psc = len_psc - BINARY_SIZE;
// Adjust cp for idx;
#ifdef SIMD_COEF_64
next_cp = cp + (2*128*BLKS);
#endif
// pstr->buf[0] is a cp (First of this type)
pstr->bufs[idx][0] = pstr->cptr[idx][41] = cp;
// For fist element only, we DO copy in the c value.
memcpy(cp, crypt_out[index], BINARY_SIZE); cp += BINARY_SIZE;
memcpy(cp, p_bytes, plen); cp += plen;
if (!idx) pstr->datlen[0] = dlen_pc;
memcpy(cp, padding, tot_pc-2-len_pc); cp += (tot_pc-len_pc);
pstr->bufs[idx][0][tot_pc-2] = (len_pc<<3)>>8;
pstr->bufs[idx][0][tot_pc-1] = (len_pc<<3)&0xFF;
#ifdef SIMD_COEF_64
cp = next_cp;
next_cp = cp + (2*128*BLKS);
#endif
// pstr->buf[1] is a pspc (First of this type)
pstr->bufs[idx][1] = cp;
pstr->cptr[idx][0] = cp + off_pspc;
memcpy(cp, p_bytes, plen); cp += plen;
memcpy(cp, s_bytes, cur_salt->len); cp += cur_salt->len;
memcpy(cp, p_bytes, plen); cp += (plen+BINARY_SIZE);
if (!idx) pstr->datlen[1] = dlen_ppsc;
memcpy(cp, padding, tot_ppsc-2-len_ppsc); cp += (tot_ppsc-len_ppsc);
pstr->bufs[idx][1][tot_ppsc-2] = (len_ppsc<<3)>>8;
pstr->bufs[idx][1][tot_ppsc-1] = (len_ppsc<<3)&0xFF;
#ifdef SIMD_COEF_64
cp = next_cp;
next_cp = cp + (2*128*BLKS);
#endif
// pstr->buf[2] is a cspp (First of this type)
pstr->bufs[idx][2] = pstr->cptr[idx][1] = cp;
cp += BINARY_SIZE;
memcpy(cp, s_bytes, cur_salt->len); cp += cur_salt->len;
memcpy(cp, p_bytes, plen); cp += plen;
memcpy(cp, p_bytes, plen); cp += plen;
if (!idx) pstr->datlen[2] = dlen_ppsc;
memcpy(cp, padding, tot_ppsc-2-len_ppsc); cp += (tot_ppsc-len_ppsc);
pstr->bufs[idx][2][tot_ppsc-2] = (len_ppsc<<3)>>8;
pstr->bufs[idx][2][tot_ppsc-1] = (len_ppsc<<3)&0xFF;
#ifdef SIMD_COEF_64
cp = next_cp;
next_cp = cp + (2*128*BLKS);
#endif
// pstr->buf[3] is a ppc (First of this type)
pstr->bufs[idx][3] = cp;
pstr->cptr[idx][2] = cp + off_ppc;
memcpy(cp, p_bytes, plen); cp += plen;
memcpy(cp, p_bytes, plen); cp +=(plen+BINARY_SIZE);
if (!idx) pstr->datlen[3] = dlen_ppc;
memcpy(cp, padding, tot_ppc-2-len_ppc); cp += (tot_ppc-len_ppc);
pstr->bufs[idx][3][tot_ppc-2] = (len_ppc<<3)>>8;
pstr->bufs[idx][3][tot_ppc-1] = (len_ppc<<3)&0xFF;
#ifdef SIMD_COEF_64
cp = next_cp;
next_cp = cp + (2*128*BLKS);
#endif
// pstr->buf[4] is a cspp (from 2)
pstr->bufs[idx][4] = pstr->cptr[idx][3] = pstr->bufs[idx][2];
if (!idx) pstr->datlen[4] = dlen_ppsc;
// pstr->buf[5] is a pspc (from [1])
pstr->bufs[idx][5] = pstr->bufs[idx][1]; pstr->cptr[idx][4] = pstr->cptr[idx][0];
if (!idx) pstr->datlen[5] = dlen_ppsc;
// pstr->buf[6] is a cpp (First of this type)
pstr->bufs[idx][6] = pstr->cptr[idx][5] = cp;
cp += BINARY_SIZE;
memcpy(cp, p_bytes, plen); cp += plen;
memcpy(cp, p_bytes, plen); cp += plen;
if (!idx) pstr->datlen[6] = dlen_ppc;
memcpy(cp, padding, tot_ppc-2-len_ppc); cp += (tot_ppc-len_ppc);
pstr->bufs[idx][6][tot_ppc-2] = (len_ppc<<3)>>8;
pstr->bufs[idx][6][tot_ppc-1] = (len_ppc<<3)&0xFF;
#ifdef SIMD_COEF_64
cp = next_cp;
next_cp = cp + (2*128*BLKS);
#endif
// pstr->buf[07] psc (First of this type)
pstr->bufs[idx][7] = cp;
pstr->cptr[idx][6] = cp + off_psc;
memcpy(cp, p_bytes, plen); cp += plen;
memcpy(cp, s_bytes, cur_salt->len); cp += (cur_salt->len+BINARY_SIZE);
if (!idx) pstr->datlen[7] = dlen_psc;
memcpy(cp, padding, tot_psc-2-len_psc); cp += (tot_psc-len_psc);
pstr->bufs[idx][7][tot_psc-2] = (len_psc<<3)>>8;
pstr->bufs[idx][7][tot_psc-1] = (len_psc<<3)&0xFF;
#ifdef SIMD_COEF_64
cp = next_cp;
next_cp = cp + (2*128*BLKS);
#endif
// pstr->buf[08] cspp (from 2)
pstr->bufs[idx][8] = pstr->cptr[idx][7] = pstr->bufs[idx][2];
if (!idx) pstr->datlen[8] = dlen_ppsc;
// pstr->buf[09] ppc (from 3)
pstr->bufs[idx][9] = pstr->bufs[idx][3]; pstr->cptr[idx][8] = pstr->cptr[idx][2];
if (!idx) pstr->datlen[9] = dlen_ppc;
// pstr->buf[10] cspp (from 2)
pstr->bufs[idx][10] = pstr->cptr[idx][9] = pstr->bufs[idx][2];
if (!idx) pstr->datlen[10] = dlen_ppsc;
// pstr->buf[11] pspc (from 1)
pstr->bufs[idx][11] = pstr->bufs[idx][1]; pstr->cptr[idx][10] = pstr->cptr[idx][0];
if (!idx) pstr->datlen[11] = dlen_ppsc;
// pstr->buf[12] cpp (from 6)
pstr->bufs[idx][12] = pstr->cptr[idx][11] = pstr->bufs[idx][6];
if (!idx) pstr->datlen[12] = dlen_ppc;
// pstr->buf[13] pspc (from 1)
pstr->bufs[idx][13] = pstr->bufs[idx][1]; pstr->cptr[idx][12] = pstr->cptr[idx][0];
if (!idx) pstr->datlen[13] = dlen_ppsc;
// pstr->buf[14] csp (First of this type)
pstr->bufs[idx][14] = pstr->cptr[idx][13] = cp;
cp += BINARY_SIZE;
memcpy(cp, s_bytes, cur_salt->len); cp += cur_salt->len;
memcpy(cp, p_bytes, plen); cp += plen;
if (!idx) pstr->datlen[14] = dlen_psc;
memcpy(cp, padding, tot_psc-2-len_psc); cp += (tot_psc-len_psc);
pstr->bufs[idx][14][tot_psc-2] = (len_psc<<3)>>8;
pstr->bufs[idx][14][tot_psc-1] = (len_psc<<3)&0xFF;
#ifdef SIMD_COEF_64
cp = next_cp;
next_cp = cp + (2*128*BLKS);
#endif
// pstr->buf[15] ppc (from 3)
pstr->bufs[idx][15] = pstr->bufs[idx][3]; pstr->cptr[idx][14] = pstr->cptr[idx][2];
if (!idx) pstr->datlen[15] = dlen_ppc;
// pstr->buf[16] cspp (from 2)
pstr->bufs[idx][16] = pstr->cptr[idx][15] = pstr->bufs[idx][2];
if (!idx) pstr->datlen[16] = dlen_ppsc;
// pstr->buf[17] pspc (from 1)
pstr->bufs[idx][17] = pstr->bufs[idx][1]; pstr->cptr[idx][16] = pstr->cptr[idx][0];
if (!idx) pstr->datlen[17] = dlen_ppsc;
// pstr->buf[18] cpp (from 6)
pstr->bufs[idx][18] = pstr->cptr[idx][17] = pstr->bufs[idx][6];
if (!idx) pstr->datlen[18] = dlen_ppc;
// pstr->buf[19] pspc (from 1)
pstr->bufs[idx][19] = pstr->bufs[idx][1]; pstr->cptr[idx][18] = pstr->cptr[idx][0];
if (!idx) pstr->datlen[19] = dlen_ppsc;
// pstr->buf[20] cspp (from 2)
pstr->bufs[idx][20] = pstr->cptr[idx][19] = pstr->bufs[idx][2];
if (!idx) pstr->datlen[20] = dlen_ppsc;
// pstr->buf[21] pc (First of this type)
pstr->bufs[idx][21] = cp;
pstr->cptr[idx][20] = cp + off_pc;
memcpy(cp, p_bytes, plen); cp += (plen+BINARY_SIZE);
if (!idx) pstr->datlen[21] = dlen_pc;
memcpy(cp, padding, tot_psc-2-len_pc);
pstr->bufs[idx][21][tot_pc-2] = (len_pc<<3)>>8;
pstr->bufs[idx][21][tot_pc-1] = (len_pc<<3)&0xFF;
#ifdef SIMD_COEF_64
cp = next_cp;
next_cp = cp + (2*128*BLKS);
#endif
// pstr->buf[22] cspp (from 2)
pstr->bufs[idx][22] = pstr->cptr[idx][21] = pstr->bufs[idx][2];
if (!idx) pstr->datlen[22] = dlen_ppsc;
// pstr->buf[23] pspc (from 1)
pstr->bufs[idx][23] = pstr->bufs[idx][1]; pstr->cptr[idx][22] = pstr->cptr[idx][0];
if (!idx) pstr->datlen[23] = dlen_ppsc;
// pstr->buf[24] cpp (from 6)
pstr->bufs[idx][24] = pstr->cptr[idx][23] = pstr->bufs[idx][6];
if (!idx) pstr->datlen[24] = dlen_ppc;
// pstr->buf[25] pspc (from 1)
pstr->bufs[idx][25] = pstr->bufs[idx][1]; pstr->cptr[idx][24] = pstr->cptr[idx][0];
if (!idx) pstr->datlen[25] = dlen_ppsc;
// pstr->buf[26] cspp (from 2)
pstr->bufs[idx][26] = pstr->cptr[idx][25] = pstr->bufs[idx][2];
if (!idx) pstr->datlen[26] = dlen_ppsc;
// pstr->buf[27] ppc (from 3)
pstr->bufs[idx][27] = pstr->bufs[idx][3]; pstr->cptr[idx][26] = pstr->cptr[idx][2];
if (!idx) pstr->datlen[27] = dlen_ppc;
// pstr->buf[28] csp (from 14)
pstr->bufs[idx][28] = pstr->cptr[idx][27] = pstr->bufs[idx][14];
if (!idx) pstr->datlen[28] = dlen_psc;
// pstr->buf[29] pspc (from 1)
pstr->bufs[idx][29] = pstr->bufs[idx][1]; pstr->cptr[idx][28] = pstr->cptr[idx][0];
if (!idx) pstr->datlen[29] = dlen_ppsc;
// pstr->buf[30] cpp (from 6)
pstr->bufs[idx][30] = pstr->cptr[idx][29] = pstr->bufs[idx][6];
if (!idx) pstr->datlen[30] = dlen_ppc;
// pstr->buf[31] pspc (from 1)
pstr->bufs[idx][31] = pstr->bufs[idx][1]; pstr->cptr[idx][30] = pstr->cptr[idx][0];
if (!idx) pstr->datlen[31] = dlen_ppsc;
// pstr->buf[32] cspp (from 2)
pstr->bufs[idx][32] = pstr->cptr[idx][31] = pstr->bufs[idx][2];
if (!idx) pstr->datlen[32] = dlen_ppsc;
// pstr->buf[33] ppc (from 3)
pstr->bufs[idx][33] = pstr->bufs[idx][3]; pstr->cptr[idx][32] = pstr->cptr[idx][2];
if (!idx) pstr->datlen[33] = dlen_ppc;
// pstr->buf[34] cspp (from 2)
pstr->bufs[idx][34] = pstr->cptr[idx][33] = pstr->bufs[idx][2];
if (!idx) pstr->datlen[34] = dlen_ppsc;
// pstr->buf[35] psc (from 7)
pstr->bufs[idx][35] = pstr->bufs[idx][7]; pstr->cptr[idx][34] = pstr->cptr[idx][6];
if (!idx) pstr->datlen[35] = dlen_psc;
// pstr->buf[36] cpp (from 6)
pstr->bufs[idx][36] = pstr->cptr[idx][35] = pstr->bufs[idx][6];
if (!idx) pstr->datlen[36] = dlen_ppc;
// pstr->buf[37] pspc (from 1)
pstr->bufs[idx][37] = pstr->bufs[idx][1]; pstr->cptr[idx][36] = pstr->cptr[idx][0];
if (!idx) pstr->datlen[37] = dlen_ppsc;
// pstr->buf[38] cspp (from 2)
pstr->bufs[idx][38] = pstr->cptr[idx][37] = pstr->bufs[idx][2];
if (!idx) pstr->datlen[38] = dlen_ppsc;
// pstr->buf[39] ppc (from 3)
pstr->bufs[idx][39] = pstr->bufs[idx][3]; pstr->cptr[idx][38] = pstr->cptr[idx][2];
if (!idx) pstr->datlen[39] = dlen_ppc;
// pstr->buf[40] cspp (from 2)
pstr->bufs[idx][40] = pstr->cptr[idx][39] = pstr->bufs[idx][2];
if (!idx) pstr->datlen[40] = dlen_ppsc;
// pstr->buf[41] pspc (from 1)
pstr->bufs[idx][41] = pstr->bufs[idx][1]; pstr->cptr[idx][40] = pstr->cptr[idx][0];
if (!idx) pstr->datlen[41] = dlen_ppsc;
}
static int crypt_all(int *pcount, struct db_salt *salt)
{
const int count = *pcount;
int index = 0;
int *MixOrder, tot_todo;
#ifdef SIMD_COEF_64
// group based upon size splits.
MixOrder = mem_calloc((count+6*MAX_KEYS_PER_CRYPT), sizeof(int));
{
static const int lens[17][6] = {
{0,24,48,88,89,90}, // 0 byte salt
{0,24,48,88,89,90}, // 1 byte salt
{0,23,24,46,48,87}, // 2 byte salt
{0,23,24,45,48,87}, // 3 byte salt
{0,22,24,44,48,86}, // 4 byte salt
{0,22,24,43,48,86}, // 5 byte salt
{0,21,24,42,48,85}, // 6 byte salt
{0,21,24,41,48,85}, // 7 byte salt
{0,20,24,40,48,84}, // 8 byte salt
{0,20,24,39,48,84}, // 9 byte salt
{0,19,24,38,48,83}, // 10 byte salt
{0,19,24,37,48,83}, // 11 byte salt
{0,18,24,36,48,82}, // 12 byte salt
{0,18,24,35,48,82}, // 13 byte salt
{0,17,24,34,48,81}, // 14 byte salt
{0,17,24,33,48,81}, // 15 byte salt
{0,16,24,32,48,80} };
int j;
tot_todo = 0;
saved_len[count] = 0; // point all 'tail' MMX buffer elements to this location.
for (j = 0; j < 5; ++j) {
for (index = 0; index < count; ++index) {
if (saved_len[index] >= lens[cur_salt->len][j] && saved_len[index] < lens[cur_salt->len][j+1])
MixOrder[tot_todo++] = index;
}
while (tot_todo % MAX_KEYS_PER_CRYPT)
MixOrder[tot_todo++] = count;
}
}
#else
// no need to mix. just run them one after the next, in any order.
MixOrder = mem_calloc(count, sizeof(int));
for (index = 0; index < count; ++index)
MixOrder[index] = index;
tot_todo = count;
#endif
#ifdef _OPENMP
#pragma omp parallel for
#endif
for (index = 0; index < tot_todo; index += MAX_KEYS_PER_CRYPT)
{
// portably align temp_result char * pointer machine word size.
union xx {
unsigned char c[BINARY_SIZE];
ARCH_WORD a[BINARY_SIZE/sizeof(ARCH_WORD)];
} u;
unsigned char *temp_result = u.c;
SHA512_CTX ctx;
SHA512_CTX alt_ctx;
size_t cnt;
int idx;
char *cp;
char p_bytes[PLAINTEXT_LENGTH+1];
char s_bytes[PLAINTEXT_LENGTH+1];
char tmp_cls[sizeof(cryptloopstruct)+MEM_ALIGN_SIMD];
cryptloopstruct *crypt_struct;
#ifdef SIMD_COEF_64
char tmp_sse_out[8*MAX_KEYS_PER_CRYPT*8+MEM_ALIGN_SIMD];
ARCH_WORD_64 *sse_out;
sse_out = (ARCH_WORD_64 *)mem_align(tmp_sse_out, MEM_ALIGN_SIMD);
#endif
crypt_struct = (cryptloopstruct *)mem_align(tmp_cls,MEM_ALIGN_SIMD);
for (idx = 0; idx < MAX_KEYS_PER_CRYPT; ++idx)
{
/* Prepare for the real work. */
SHA512_Init(&ctx);
/* Add the key string. */
SHA512_Update(&ctx, (unsigned char*)saved_key[MixOrder[index+idx]], saved_len[MixOrder[index+idx]]);
/* The last part is the salt string. This must be at most 16
characters and it ends at the first `$' character (for
compatibility with existing implementations). */
SHA512_Update(&ctx, cur_salt->salt, cur_salt->len);
/* Compute alternate SHA512 sum with input KEY, SALT, and KEY. The
final result will be added to the first context. */
SHA512_Init(&alt_ctx);
/* Add key. */
SHA512_Update(&alt_ctx, (unsigned char*)saved_key[MixOrder[index+idx]], saved_len[MixOrder[index+idx]]);
/* Add salt. */
SHA512_Update(&alt_ctx, cur_salt->salt, cur_salt->len);
/* Add key again. */
SHA512_Update(&alt_ctx, (unsigned char*)saved_key[MixOrder[index+idx]], saved_len[MixOrder[index+idx]]);
/* Now get result of this (64 bytes) and add it to the other
context. */
SHA512_Final((unsigned char*)crypt_out[MixOrder[index+idx]], &alt_ctx);
/* Add for any character in the key one byte of the alternate sum. */
for (cnt = saved_len[MixOrder[index+idx]]; cnt > BINARY_SIZE; cnt -= BINARY_SIZE)
SHA512_Update(&ctx, (unsigned char*)crypt_out[MixOrder[index+idx]], BINARY_SIZE);
SHA512_Update(&ctx, (unsigned char*)crypt_out[MixOrder[index+idx]], cnt);
/* Take the binary representation of the length of the key and for every
1 add the alternate sum, for every 0 the key. */
for (cnt = saved_len[MixOrder[index+idx]]; cnt > 0; cnt >>= 1)
if ((cnt & 1) != 0)
SHA512_Update(&ctx, (unsigned char*)crypt_out[MixOrder[index+idx]], BINARY_SIZE);
else
SHA512_Update(&ctx, (unsigned char*)saved_key[MixOrder[index+idx]], saved_len[MixOrder[index+idx]]);
/* Create intermediate result. */
SHA512_Final((unsigned char*)crypt_out[MixOrder[index+idx]], &ctx);
/* Start computation of P byte sequence. */
SHA512_Init(&alt_ctx);
/* For every character in the password add the entire password. */
for (cnt = 0; cnt < saved_len[MixOrder[index+idx]]; ++cnt)
SHA512_Update(&alt_ctx, (unsigned char*)saved_key[MixOrder[index+idx]], saved_len[MixOrder[index+idx]]);
/* Finish the digest. */
SHA512_Final(temp_result, &alt_ctx);
/* Create byte sequence P. */
cp = p_bytes;
for (cnt = saved_len[MixOrder[index+idx]]; cnt >= BINARY_SIZE; cnt -= BINARY_SIZE)
cp = (char *) memcpy (cp, temp_result, BINARY_SIZE) + BINARY_SIZE;
memcpy (cp, temp_result, cnt);
/* Start computation of S byte sequence. */
SHA512_Init(&alt_ctx);
/* repeat the following 16+A[0] times, where A[0] represents the
first byte in digest A interpreted as an 8-bit unsigned value */
for (cnt = 0; cnt < 16 + ((unsigned char*)crypt_out[MixOrder[index+idx]])[0]; ++cnt)
SHA512_Update(&alt_ctx, cur_salt->salt, cur_salt->len);
/* Finish the digest. */
SHA512_Final(temp_result, &alt_ctx);
/* Create byte sequence S. */
cp = s_bytes;
for (cnt = cur_salt->len; cnt >= BINARY_SIZE; cnt -= BINARY_SIZE)
cp = (char *) memcpy (cp, temp_result, BINARY_SIZE) + BINARY_SIZE;
memcpy (cp, temp_result, cnt);
/* Repeatedly run the collected hash value through SHA512 to
burn CPU cycles. */
LoadCryptStruct(crypt_struct, MixOrder[index+idx], idx, p_bytes, s_bytes);
}
idx = 0;
#ifdef SIMD_COEF_64
for (cnt = 1; ; ++cnt) {
if (crypt_struct->datlen[idx]==256) {
unsigned char *cp = crypt_struct->bufs[0][idx];
SIMDSHA512body((__m128i *)cp, sse_out, NULL, SSEi_FLAT_IN|SSEi_2BUF_INPUT_FIRST_BLK);
SIMDSHA512body((__m128i *)&cp[128], sse_out, sse_out, SSEi_FLAT_IN|SSEi_2BUF_INPUT_FIRST_BLK|SSEi_RELOAD);
} else {
unsigned char *cp = crypt_struct->bufs[0][idx];
SIMDSHA512body((__m128i *)cp, sse_out, NULL, SSEi_FLAT_IN|SSEi_2BUF_INPUT_FIRST_BLK);
}
if (cnt == cur_salt->rounds)
break;
{
int j, k;
for (k = 0; k < MAX_KEYS_PER_CRYPT; ++k) {
ARCH_WORD_64 *o = (ARCH_WORD_64 *)crypt_struct->cptr[k][idx];
for (j = 0; j < 8; ++j)
*o++ = JOHNSWAP64(sse_out[j*SIMD_COEF_64+(k&(SIMD_COEF_64-1))+k/SIMD_COEF_64*8*SIMD_COEF_64]);
}
}
if (++idx == 42)
idx = 0;
}
{
int j, k;
for (k = 0; k < MAX_KEYS_PER_CRYPT; ++k) {
ARCH_WORD_64 *o = (ARCH_WORD_64 *)crypt_out[MixOrder[index+k]];
for (j = 0; j < 8; ++j)
*o++ = JOHNSWAP64(sse_out[j*SIMD_COEF_64+(k&(SIMD_COEF_64-1))+k/SIMD_COEF_64*8*SIMD_COEF_64]);
}
}
#else
SHA512_Init(&ctx);
for (cnt = 1; ; ++cnt) {
// calling with 128 byte, or 256 byte always, will force the update to properly crypt the data.
// NOTE the data is fully formed. It ends in a 0x80, is padded with nulls, AND has bit appended.
SHA512_Update(&ctx, crypt_struct->bufs[0][idx], crypt_struct->datlen[idx]);
if (cnt == cur_salt->rounds)
break;
#ifdef JTR_INC_COMMON_CRYPTO_SHA2
SHA512_Final(crypt_struct->cptr[0][idx], &ctx);
#else // !defined JTR_INC_COMMON_CRYPTO_SHA2, so it is oSSL, or generic
#if ARCH_LITTLE_ENDIAN
{
int j;
ARCH_WORD_64 *o = (ARCH_WORD_64 *)crypt_struct->cptr[0][idx];
for (j = 0; j < 8; ++j)
*o++ = JOHNSWAP64(ctx.h[j]);
}
#else
memcpy(crypt_struct->cptr[0][idx], ctx.h, BINARY_SIZE);
#endif
#endif
if (++idx == 42)
idx = 0;
#ifdef JTR_INC_COMMON_CRYPTO_SHA2
SHA512_Init(&ctx);
#else
// this memcpy is 'good enough', used instead of SHA512_Init()
memcpy(ctx.h, ctx_init, sizeof(ctx_init));
#endif
}
#ifdef JTR_INC_COMMON_CRYPTO_SHA2
SHA512_Final((unsigned char*)crypt_out[MixOrder[index]], &ctx);
#else
#if ARCH_LITTLE_ENDIAN
{
int j;
ARCH_WORD_64 *o = (ARCH_WORD_64 *)crypt_out[MixOrder[index]];
for (j = 0; j < 8; ++j)
*o++ = JOHNSWAP64(ctx.h[j]);
}
#else
memcpy(crypt_out[MixOrder[index]], ctx.h, BINARY_SIZE);
#endif
#endif
#endif
}
MEM_FREE(MixOrder);
return count;
}
static void set_salt(void *salt)
{
cur_salt = salt;
}
static void *get_salt(char *ciphertext)
{
static struct saltstruct out;
int len;
memset(&out, 0, sizeof(out));
out.rounds = ROUNDS_DEFAULT;
ciphertext += 3;
if (!strncmp(ciphertext, ROUNDS_PREFIX,
sizeof(ROUNDS_PREFIX) - 1)) {
const char *num = ciphertext + sizeof(ROUNDS_PREFIX) - 1;
char *endp;
unsigned long int srounds = strtoul(num, &endp, 10);
if (*endp == '$')
{
ciphertext = endp + 1;
srounds = srounds < ROUNDS_MIN ?
ROUNDS_MIN : srounds;
out.rounds = srounds > ROUNDS_MAX ?
ROUNDS_MAX : srounds;
}
}
for (len = 0; ciphertext[len] != '$'; len++);
if (len > SALT_LENGTH)
len = SALT_LENGTH;
memcpy(out.salt, ciphertext, len);
out.len = len;
return &out;
}
static int cmp_all(void *binary, int count)
{
int index = 0;
for (; index < count; index++)
if (!memcmp(binary, crypt_out[index], ARCH_SIZE))
return 1;
return 0;
}
static int cmp_one(void *binary, int index)
{
return !memcmp(binary, crypt_out[index], BINARY_SIZE);
}
static int cmp_exact(char *source, int index)
{
return 1;
}
static unsigned int sha512crypt_iterations(void *salt)
{
struct saltstruct *sha512crypt_salt;
sha512crypt_salt = salt;
return (unsigned int)sha512crypt_salt->rounds;
}
// Public domain hash function by DJ Bernstein
// We are hashing the entire struct
static int salt_hash(void *salt)
{
unsigned char *s = salt;
unsigned int hash = 5381;
unsigned int i;
for (i = 0; i < SALT_SIZE; i++)
hash = ((hash << 5) + hash) ^ s[i];
return hash & (SALT_HASH_SIZE - 1);
}
struct fmt_main fmt_cryptsha512 = {
{
FORMAT_LABEL,
FORMAT_NAME,
"SHA512 " ALGORITHM_NAME,
BENCHMARK_COMMENT,
BENCHMARK_LENGTH,
0,
PLAINTEXT_LENGTH,
BINARY_SIZE,
BINARY_ALIGN,
SALT_SIZE,
SALT_ALIGN,
MIN_KEYS_PER_CRYPT,
MAX_KEYS_PER_CRYPT,
FMT_CASE | FMT_8_BIT | FMT_OMP,
{
"iteration count",
},
tests
}, {
init,
done,
fmt_default_reset,
fmt_default_prepare,
valid,
fmt_default_split,
get_binary,
get_salt,
{
sha512crypt_iterations,
},
fmt_default_source,
{
fmt_default_binary_hash_0,
fmt_default_binary_hash_1,
fmt_default_binary_hash_2,
fmt_default_binary_hash_3,
fmt_default_binary_hash_4,
fmt_default_binary_hash_5,
fmt_default_binary_hash_6
},
salt_hash,
NULL,
set_salt,
set_key,
get_key,
fmt_default_clear_keys,
crypt_all,
{
get_hash_0,
get_hash_1,
get_hash_2,
get_hash_3,
get_hash_4,
get_hash_5,
get_hash_6
},
cmp_all,
cmp_one,
cmp_exact
}
};
#endif /* plugin stanza */
|
pr24455.c | /* { dg-do run } */
/* { dg-additional-sources pr24455-1.c } */
/* { dg-require-effective-target tls_runtime } */
extern void abort (void);
extern int i;
#pragma omp threadprivate(i)
int main()
{
i = 0;
#pragma omp parallel default(none) num_threads(10)
{
i++;
#pragma omp barrier
if (i != 1)
abort ();
}
return 0;
}
|
ex_particle_OPENMP_seq.ref.c | #include <sys/time.h>
#include <time.h>
#include <stdio.h>
static unsigned long long current_time_ns() {
#ifdef __MACH__
clock_serv_t cclock;
mach_timespec_t mts;
host_get_clock_service(mach_host_self(), CALENDAR_CLOCK, &cclock);
clock_get_time(cclock, &mts);
mach_port_deallocate(mach_task_self(), cclock);
unsigned long long s = 1000000000ULL * (unsigned long long)mts.tv_sec;
return (unsigned long long)mts.tv_nsec + s;
#else
struct timespec t ={0,0};
clock_gettime(CLOCK_MONOTONIC, &t);
unsigned long long s = 1000000000ULL * (unsigned long long)t.tv_sec;
return (((unsigned long long)t.tv_nsec)) + s;
#endif
}
/**
* @file ex_particle_OPENMP_seq.c
* @author Michael Trotter & Matt Goodrum
* @brief Particle filter implementation in C/OpenMP
*/
#include <stdlib.h>
#include <stdio.h>
#include <math.h>
#include <sys/time.h>
#include <omp.h>
#include <limits.h>
#include <time.h>
#include <string.h>
#define PI 3.1415926535897932
/**
@var M value for Linear Congruential Generator (LCG); use GCC's value
*/
long M = INT_MAX;
/**
@var A value for LCG
*/
int A = 1103515245;
/**
@var C value for LCG
*/
int C = 12345;
/*****************************
*GET_TIME
*returns a long int representing the time
*****************************/
long long get_time() {
struct timeval tv;
gettimeofday(&tv, NULL);
return (tv.tv_sec * 1000000) + tv.tv_usec;
}
// Returns the number of seconds elapsed between the two specified times
float elapsed_time(long long start_time, long long end_time) {
return (float) (end_time - start_time) / (1000 * 1000);
}
/**
* Takes in a double and returns an integer that approximates to that double
* @return if the mantissa < .5 => return value < input value; else return value > input value
*/
double roundDouble(double value){
int newValue = (int)(value);
if(value - newValue < .5)
return newValue;
else
return newValue++;
}
/**
* Set values of the 3D array to a newValue if that value is equal to the testValue
* @param testValue The value to be replaced
* @param newValue The value to replace testValue with
* @param array3D The image vector
* @param dimX The x dimension of the frame
* @param dimY The y dimension of the frame
* @param dimZ The number of frames
*/
void setIf(int testValue, int newValue, int * array3D, int * dimX, int * dimY, int * dimZ){
int x, y, z;
for(x = 0; x < *dimX; x++){
for(y = 0; y < *dimY; y++){
for(z = 0; z < *dimZ; z++){
if(array3D[x * *dimY * *dimZ+y * *dimZ + z] == testValue)
array3D[x * *dimY * *dimZ + y * *dimZ + z] = newValue;
}
}
}
}
/**
* Generates a uniformly distributed random number using the provided seed and GCC's settings for the Linear Congruential Generator (LCG)
* @see http://en.wikipedia.org/wiki/Linear_congruential_generator
* @note This function is thread-safe
* @param seed The seed array
* @param index The specific index of the seed to be advanced
* @return a uniformly distributed number [0, 1)
*/
double randu(int * seed, int index)
{
int num = A*seed[index] + C;
seed[index] = num % M;
return fabs(seed[index]/((double) M));
}
/**
* Generates a normally distributed random number using the Box-Muller transformation
* @note This function is thread-safe
* @param seed The seed array
* @param index The specific index of the seed to be advanced
* @return a double representing random number generated using the Box-Muller algorithm
* @see http://en.wikipedia.org/wiki/Normal_distribution, section computing value for normal random distribution
*/
double randn(int * seed, int index){
/*Box-Muller algorithm*/
double u = randu(seed, index);
double v = randu(seed, index);
double cosine = cos(2*PI*v);
double rt = -2*log(u);
return sqrt(rt)*cosine;
}
/**
* Sets values of 3D matrix using randomly generated numbers from a normal distribution
* @param array3D The video to be modified
* @param dimX The x dimension of the frame
* @param dimY The y dimension of the frame
* @param dimZ The number of frames
* @param seed The seed array
*/
void addNoise(int * array3D, int * dimX, int * dimY, int * dimZ, int * seed){
int x, y, z;
for(x = 0; x < *dimX; x++){
for(y = 0; y < *dimY; y++){
for(z = 0; z < *dimZ; z++){
array3D[x * *dimY * *dimZ + y * *dimZ + z] = array3D[x * *dimY * *dimZ + y * *dimZ + z] + (int)(5*randn(seed, 0));
}
}
}
}
/**
* Fills a radius x radius matrix representing the disk
* @param disk The pointer to the disk to be made
* @param radius The radius of the disk to be made
*/
void strelDisk(int * disk, int radius)
{
int diameter = radius*2 - 1;
int x, y;
for(x = 0; x < diameter; x++){
for(y = 0; y < diameter; y++){
double distance = sqrt(pow((double)(x-radius+1),2) + pow((double)(y-radius+1),2));
if(distance < radius)
disk[x*diameter + y] = 1;
}
}
}
/**
* Dilates the provided video
* @param matrix The video to be dilated
* @param posX The x location of the pixel to be dilated
* @param posY The y location of the pixel to be dilated
* @param poxZ The z location of the pixel to be dilated
* @param dimX The x dimension of the frame
* @param dimY The y dimension of the frame
* @param dimZ The number of frames
* @param error The error radius
*/
void dilate_matrix(int * matrix, int posX, int posY, int posZ, int dimX, int dimY, int dimZ, int error)
{
int startX = posX - error;
while(startX < 0)
startX++;
int startY = posY - error;
while(startY < 0)
startY++;
int endX = posX + error;
while(endX > dimX)
endX--;
int endY = posY + error;
while(endY > dimY)
endY--;
int x,y;
for(x = startX; x < endX; x++){
for(y = startY; y < endY; y++){
double distance = sqrt( pow((double)(x-posX),2) + pow((double)(y-posY),2) );
if(distance < error)
matrix[x*dimY*dimZ + y*dimZ + posZ] = 1;
}
}
}
/**
* Dilates the target matrix using the radius as a guide
* @param matrix The reference matrix
* @param dimX The x dimension of the video
* @param dimY The y dimension of the video
* @param dimZ The z dimension of the video
* @param error The error radius to be dilated
* @param newMatrix The target matrix
*/
void imdilate_disk(int * matrix, int dimX, int dimY, int dimZ, int error, int * newMatrix)
{
int x, y, z;
for(z = 0; z < dimZ; z++){
for(x = 0; x < dimX; x++){
for(y = 0; y < dimY; y++){
if(matrix[x*dimY*dimZ + y*dimZ + z] == 1){
dilate_matrix(newMatrix, x, y, z, dimX, dimY, dimZ, error);
}
}
}
}
}
/**
* Fills a 2D array describing the offsets of the disk object
* @param se The disk object
* @param numOnes The number of ones in the disk
* @param neighbors The array that will contain the offsets
* @param radius The radius used for dilation
*/
void getneighbors(int * se, int numOnes, double * neighbors, int radius){
int x, y;
int neighY = 0;
int center = radius - 1;
int diameter = radius*2 -1;
for(x = 0; x < diameter; x++){
for(y = 0; y < diameter; y++){
if(se[x*diameter + y]){
neighbors[neighY*2] = (int)(y - center);
neighbors[neighY*2 + 1] = (int)(x - center);
neighY++;
}
}
}
}
/**
* The synthetic video sequence we will work with here is composed of a
* single moving object, circular in shape (fixed radius)
* The motion here is a linear motion
* the foreground intensity and the backgrounf intensity is known
* the image is corrupted with zero mean Gaussian noise
* @param I The video itself
* @param IszX The x dimension of the video
* @param IszY The y dimension of the video
* @param Nfr The number of frames of the video
* @param seed The seed array used for number generation
*/
void videoSequence(int * I, int IszX, int IszY, int Nfr, int * seed){
int k;
int max_size = IszX*IszY*Nfr;
/*get object centers*/
int x0 = (int)roundDouble(IszY/2.0);
int y0 = (int)roundDouble(IszX/2.0);
I[x0 *IszY *Nfr + y0 * Nfr + 0] = 1;
/*move point*/
int xk, yk, pos;
for(k = 1; k < Nfr; k++){
xk = abs(x0 + (k-1));
yk = abs(y0 - 2*(k-1));
pos = yk * IszY * Nfr + xk *Nfr + k;
if(pos >= max_size)
pos = 0;
I[pos] = 1;
}
/*dilate matrix*/
int * newMatrix = (int *)malloc(sizeof(int)*IszX*IszY*Nfr);
imdilate_disk(I, IszX, IszY, Nfr, 5, newMatrix);
int x, y;
for(x = 0; x < IszX; x++){
for(y = 0; y < IszY; y++){
for(k = 0; k < Nfr; k++){
I[x*IszY*Nfr + y*Nfr + k] = newMatrix[x*IszY*Nfr + y*Nfr + k];
}
}
}
free(newMatrix);
/*define background, add noise*/
setIf(0, 100, I, &IszX, &IszY, &Nfr);
setIf(1, 228, I, &IszX, &IszY, &Nfr);
/*add noise*/
addNoise(I, &IszX, &IszY, &Nfr, seed);
}
/**
* Determines the likelihood sum based on the formula: SUM( (IK[IND] - 100)^2 - (IK[IND] - 228)^2)/ 100
* @param I The 3D matrix
* @param ind The current ind array
* @param numOnes The length of ind array
* @return A double representing the sum
*/
double calcLikelihoodSum(int * I, int * ind, int numOnes){
double likelihoodSum = 0.0;
int y;
for(y = 0; y < numOnes; y++)
likelihoodSum += (pow((I[ind[y]] - 100),2) - pow((I[ind[y]]-228),2))/50.0;
return likelihoodSum;
}
/**
* Finds the first element in the CDF that is greater than or equal to the provided value and returns that index
* @note This function uses sequential search
* @param CDF The CDF
* @param lengthCDF The length of CDF
* @param value The value to be found
* @return The index of value in the CDF; if value is never found, returns the last index
*/
int findIndex(double * CDF, int lengthCDF, double value){
int index = -1;
int x;
for(x = 0; x < lengthCDF; x++){
if(CDF[x] >= value){
index = x;
break;
}
}
if(index == -1){
return lengthCDF-1;
}
return index;
}
/**
* Finds the first element in the CDF that is greater than or equal to the provided value and returns that index
* @note This function uses binary search before switching to sequential search
* @param CDF The CDF
* @param beginIndex The index to start searching from
* @param endIndex The index to stop searching
* @param value The value to find
* @return The index of value in the CDF; if value is never found, returns the last index
* @warning Use at your own risk; not fully tested
*/
int findIndexBin(double * CDF, int beginIndex, int endIndex, double value){
if(endIndex < beginIndex)
return -1;
int middleIndex = beginIndex + ((endIndex - beginIndex)/2);
/*check the value*/
if(CDF[middleIndex] >= value)
{
/*check that it's good*/
if(middleIndex == 0)
return middleIndex;
else if(CDF[middleIndex-1] < value)
return middleIndex;
else if(CDF[middleIndex-1] == value)
{
while(middleIndex > 0 && CDF[middleIndex-1] == value)
middleIndex--;
return middleIndex;
}
}
if(CDF[middleIndex] > value)
return findIndexBin(CDF, beginIndex, middleIndex+1, value);
return findIndexBin(CDF, middleIndex-1, endIndex, value);
}
/**
* The implementation of the particle filter using OpenMP for many frames
* @see http://openmp.org/wp/
* @note This function is designed to work with a video of several frames. In addition, it references a provided MATLAB function which takes the video, the objxy matrix and the x and y arrays as arguments and returns the likelihoods
* @param I The video to be run
* @param IszX The x dimension of the video
* @param IszY The y dimension of the video
* @param Nfr The number of frames
* @param seed The seed array used for random number generation
* @param Nparticles The number of particles to be used
*/
void particleFilter(int * I, int IszX, int IszY, int Nfr, int * seed, int Nparticles){
int max_size = IszX*IszY*Nfr;
long long start = get_time();
//original particle centroid
double xe = roundDouble(IszY/2.0);
double ye = roundDouble(IszX/2.0);
//expected object locations, compared to center
int radius = 5;
int diameter = radius*2 - 1;
int * disk = (int *)malloc(diameter*diameter*sizeof(int));
strelDisk(disk, radius);
int countOnes = 0;
int x, y;
for(x = 0; x < diameter; x++){
for(y = 0; y < diameter; y++){
if(disk[x*diameter + y] == 1)
countOnes++;
}
}
double * objxy = (double *)malloc(countOnes*2*sizeof(double));
getneighbors(disk, countOnes, objxy, radius);
long long get_neighbors = get_time();
printf("TIME TO GET NEIGHBORS TOOK: %f\n", elapsed_time(start, get_neighbors));
//initial weights are all equal (1/Nparticles)
double * weights = (double *)malloc(sizeof(double)*Nparticles);
{ const unsigned long long parallel_for_start = current_time_ns();
#pragma omp parallel for shared(weights, Nparticles) private(x)
for(x = 0; x < Nparticles; x++){
weights[x] = 1/((double)(Nparticles));
} ;
const unsigned long long parallel_for_end = current_time_ns();
printf("pragma373_omp_parallel %llu ns\n", parallel_for_end - parallel_for_start); }
long long get_weights = get_time();
printf("TIME TO GET WEIGHTSTOOK: %f\n", elapsed_time(get_neighbors, get_weights));
//initial likelihood to 0.0
double * likelihood = (double *)malloc(sizeof(double)*Nparticles);
double * arrayX = (double *)malloc(sizeof(double)*Nparticles);
double * arrayY = (double *)malloc(sizeof(double)*Nparticles);
double * xj = (double *)malloc(sizeof(double)*Nparticles);
double * yj = (double *)malloc(sizeof(double)*Nparticles);
double * CDF = (double *)malloc(sizeof(double)*Nparticles);
double * u = (double *)malloc(sizeof(double)*Nparticles);
int * ind = (int*)malloc(sizeof(int)*countOnes*Nparticles);
{ const unsigned long long parallel_for_start = current_time_ns();
#pragma omp parallel for shared(arrayX, arrayY, xe, ye) private(x)
for(x = 0; x < Nparticles; x++){
arrayX[x] = xe;
arrayY[x] = ye;
} ;
const unsigned long long parallel_for_end = current_time_ns();
printf("pragma388_omp_parallel %llu ns\n", parallel_for_end - parallel_for_start); }
int k;
printf("TIME TO SET ARRAYS TOOK: %f\n", elapsed_time(get_weights, get_time()));
int indX, indY;
for(k = 1; k < Nfr; k++){
long long set_arrays = get_time();
//apply motion model
//draws sample from motion model (random walk). The only prior information
//is that the object moves 2x as fast as in the y direction
{ const unsigned long long parallel_for_start = current_time_ns();
#pragma omp parallel for shared(arrayX, arrayY, Nparticles, seed) private(x)
for(x = 0; x < Nparticles; x++){
arrayX[x] += 1 + 5*randn(seed, x);
arrayY[x] += -2 + 2*randn(seed, x);
} ;
const unsigned long long parallel_for_end = current_time_ns();
printf("pragma402_omp_parallel %llu ns\n", parallel_for_end - parallel_for_start); }
long long error = get_time();
printf("TIME TO SET ERROR TOOK: %f\n", elapsed_time(set_arrays, error));
//particle filter likelihood
{ const unsigned long long parallel_for_start = current_time_ns();
#pragma omp parallel for shared(likelihood, I, arrayX, arrayY, objxy, ind) private(x, y, indX, indY)
for(x = 0; x < Nparticles; x++){
//compute the likelihood: remember our assumption is that you know
// foreground and the background image intensity distribution.
// Notice that we consider here a likelihood ratio, instead of
// p(z|x). It is possible in this case. why? a hometask for you.
//calc ind
for(y = 0; y < countOnes; y++){
indX = roundDouble(arrayX[x]) + objxy[y*2 + 1];
indY = roundDouble(arrayY[x]) + objxy[y*2];
ind[x*countOnes + y] = fabs((double)(indX*IszY*Nfr + indY*Nfr + k));
if(ind[x*countOnes + y] >= max_size)
ind[x*countOnes + y] = 0;
}
likelihood[x] = 0;
for(y = 0; y < countOnes; y++)
likelihood[x] += (pow((I[ind[x*countOnes + y]] - 100),2) - pow((I[ind[x*countOnes + y]]-228),2))/50.0;
likelihood[x] = likelihood[x]/((double) countOnes);
} ;
const unsigned long long parallel_for_end = current_time_ns();
printf("pragma410_omp_parallel %llu ns\n", parallel_for_end - parallel_for_start); }
long long likelihood_time = get_time();
printf("TIME TO GET LIKELIHOODS TOOK: %f\n", elapsed_time(error, likelihood_time));
// update & normalize weights
// using equation (63) of Arulampalam Tutorial
{ const unsigned long long parallel_for_start = current_time_ns();
#pragma omp parallel for shared(Nparticles, weights, likelihood) private(x)
for(x = 0; x < Nparticles; x++){
weights[x] = weights[x] * exp(likelihood[x]);
} ;
const unsigned long long parallel_for_end = current_time_ns();
printf("pragma433_omp_parallel %llu ns\n", parallel_for_end - parallel_for_start); }
long long exponential = get_time();
printf("TIME TO GET EXP TOOK: %f\n", elapsed_time(likelihood_time, exponential));
double sumWeights = 0;
{ const unsigned long long parallel_for_start = current_time_ns();
#pragma omp parallel for private(x) reduction(+:sumWeights)
for(x = 0; x < Nparticles; x++){
sumWeights += weights[x];
} ;
const unsigned long long parallel_for_end = current_time_ns();
printf("pragma440_omp_parallel %llu ns\n", parallel_for_end - parallel_for_start); }
long long sum_time = get_time();
printf("TIME TO SUM WEIGHTS TOOK: %f\n", elapsed_time(exponential, sum_time));
{ const unsigned long long parallel_for_start = current_time_ns();
#pragma omp parallel for shared(sumWeights, weights) private(x)
for(x = 0; x < Nparticles; x++){
weights[x] = weights[x]/sumWeights;
} ;
const unsigned long long parallel_for_end = current_time_ns();
printf("pragma446_omp_parallel %llu ns\n", parallel_for_end - parallel_for_start); }
long long normalize = get_time();
printf("TIME TO NORMALIZE WEIGHTS TOOK: %f\n", elapsed_time(sum_time, normalize));
xe = 0;
ye = 0;
// estimate the object location by expected values
{ const unsigned long long parallel_for_start = current_time_ns();
#pragma omp parallel for private(x) reduction(+:xe, ye)
for(x = 0; x < Nparticles; x++){
xe += arrayX[x] * weights[x];
ye += arrayY[x] * weights[x];
} ;
const unsigned long long parallel_for_end = current_time_ns();
printf("pragma455_omp_parallel %llu ns\n", parallel_for_end - parallel_for_start); }
long long move_time = get_time();
printf("TIME TO MOVE OBJECT TOOK: %f\n", elapsed_time(normalize, move_time));
printf("XE: %lf\n", xe);
printf("YE: %lf\n", ye);
double distance = sqrt( pow((double)(xe-(int)roundDouble(IszY/2.0)),2) + pow((double)(ye-(int)roundDouble(IszX/2.0)),2) );
printf("%lf\n", distance);
//display(hold off for now)
//pause(hold off for now)
//resampling
CDF[0] = weights[0];
for(x = 1; x < Nparticles; x++){
CDF[x] = weights[x] + CDF[x-1];
}
long long cum_sum = get_time();
printf("TIME TO CALC CUM SUM TOOK: %f\n", elapsed_time(move_time, cum_sum));
double u1 = (1/((double)(Nparticles)))*randu(seed, 0);
{ const unsigned long long parallel_for_start = current_time_ns();
#pragma omp parallel for shared(u, u1, Nparticles) private(x)
for(x = 0; x < Nparticles; x++){
u[x] = u1 + x/((double)(Nparticles));
} ;
const unsigned long long parallel_for_end = current_time_ns();
printf("pragma480_omp_parallel %llu ns\n", parallel_for_end - parallel_for_start); }
long long u_time = get_time();
printf("TIME TO CALC U TOOK: %f\n", elapsed_time(cum_sum, u_time));
int j, i;
{ const unsigned long long parallel_for_start = current_time_ns();
#pragma omp parallel for shared(CDF, Nparticles, xj, yj, u, arrayX, arrayY) private(i, j)
for(j = 0; j < Nparticles; j++){
i = findIndex(CDF, Nparticles, u[j]);
if(i == -1)
i = Nparticles-1;
xj[j] = arrayX[i];
yj[j] = arrayY[i];
} ;
const unsigned long long parallel_for_end = current_time_ns();
printf("pragma488_omp_parallel %llu ns\n", parallel_for_end - parallel_for_start); }
long long xyj_time = get_time();
printf("TIME TO CALC NEW ARRAY X AND Y TOOK: %f\n", elapsed_time(u_time, xyj_time));
//#pragma omp parallel for shared(weights, Nparticles) private(x)
for(x = 0; x < Nparticles; x++){
//reassign arrayX and arrayY
arrayX[x] = xj[x];
arrayY[x] = yj[x];
weights[x] = 1/((double)(Nparticles));
}
long long reset = get_time();
printf("TIME TO RESET WEIGHTS TOOK: %f\n", elapsed_time(xyj_time, reset));
}
free(disk);
free(objxy);
free(weights);
free(likelihood);
free(xj);
free(yj);
free(arrayX);
free(arrayY);
free(CDF);
free(u);
free(ind);
}
int main(int argc, char * argv[]){
char* usage = "openmp.out -x <dimX> -y <dimY> -z <Nfr> -np <Nparticles>";
//check number of arguments
if(argc != 9)
{
printf("%s\n", usage);
return 0;
}
//check args deliminators
if( strcmp( argv[1], "-x" ) || strcmp( argv[3], "-y" ) || strcmp( argv[5], "-z" ) || strcmp( argv[7], "-np" ) ) {
printf( "%s\n",usage );
return 0;
}
int IszX, IszY, Nfr, Nparticles;
//converting a string to a integer
if( sscanf( argv[2], "%d", &IszX ) == EOF ) {
printf("ERROR: dimX input is incorrect");
return 0;
}
if( IszX <= 0 ) {
printf("dimX must be > 0\n");
return 0;
}
//converting a string to a integer
if( sscanf( argv[4], "%d", &IszY ) == EOF ) {
printf("ERROR: dimY input is incorrect");
return 0;
}
if( IszY <= 0 ) {
printf("dimY must be > 0\n");
return 0;
}
//converting a string to a integer
if( sscanf( argv[6], "%d", &Nfr ) == EOF ) {
printf("ERROR: Number of frames input is incorrect");
return 0;
}
if( Nfr <= 0 ) {
printf("number of frames must be > 0\n");
return 0;
}
//converting a string to a integer
if( sscanf( argv[8], "%d", &Nparticles ) == EOF ) {
printf("ERROR: Number of particles input is incorrect");
return 0;
}
if( Nparticles <= 0 ) {
printf("Number of particles must be > 0\n");
return 0;
}
//establish seed
int * seed = (int *)malloc(sizeof(int)*Nparticles);
int i;
for(i = 0; i < Nparticles; i++)
seed[i] = time(0)*i;
//malloc matrix
int * I = (int *)malloc(sizeof(int)*IszX*IszY*Nfr);
long long start = get_time();
//call video sequence
videoSequence(I, IszX, IszY, Nfr, seed);
long long endVideoSequence = get_time();
printf("VIDEO SEQUENCE TOOK %f\n", elapsed_time(start, endVideoSequence));
//call particle filter
const unsigned long long full_program_start = current_time_ns();
particleFilter(I, IszX, IszY, Nfr, seed, Nparticles) ;
const unsigned long long full_program_end = current_time_ns();
printf("full_program %llu ns\n", full_program_end - full_program_start);
;
long long endParticleFilter = get_time();
printf("PARTICLE FILTER TOOK %f\n", elapsed_time(endVideoSequence, endParticleFilter));
printf("ENTIRE PROGRAM TOOK %f\n", elapsed_time(start, endParticleFilter));
free(seed);
free(I);
return 0;
}
|
softmax-inl.h | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
/*!
* Copyright (c) 2017 by Contributors
* \file softmax-inl.h
* \brief
*/
#ifndef MXNET_OPERATOR_NN_SOFTMAX_INL_H_
#define MXNET_OPERATOR_NN_SOFTMAX_INL_H_
#include <algorithm>
#include <string>
#include <utility>
#include <vector>
#include <type_traits>
#include "../mxnet_op.h"
#include "../operator_common.h"
#include "../tensor/broadcast_reduce_op.h"
using mshadow::red::limits::MinValue;
namespace mxnet {
namespace op {
namespace mxnet_op {
struct softmax_fwd {
template <typename AType>
MSHADOW_XINLINE static AType Map(float a, AType b) {
return AType(expf(a) / b);
}
template <typename AType>
MSHADOW_XINLINE static AType Map(double a, AType b) {
return AType(exp(a) / b);
}
};
struct log_softmax_fwd {
template <typename DType>
MSHADOW_XINLINE static float Map(DType a, float b) {
return a - logf(b);
}
template <typename DType>
MSHADOW_XINLINE static double Map(DType a, double b) {
return a - log(b);
}
};
template <typename OP,
bool negate,
typename AType,
typename DType,
typename OType,
typename IType,
int ndim>
inline void Softmax(Stream<cpu>* s,
DType* in,
OType* out,
IType* length,
Shape<ndim> shape,
int axis,
const DType temperature) {
index_t M = shape[axis];
if (M == 0)
return;
index_t N = shape.Size() / M;
Shape<ndim> stride = calc_stride(shape);
Shape<ndim> sshape = shape;
sshape[axis] = 1;
index_t sa = stride[axis];
if (length == nullptr) {
#pragma omp parallel for
for (index_t i = 0; i < N; ++i) {
index_t base = unravel_dot(i, sshape, stride);
DType mmax = negate ? -in[base] : in[base];
DType val;
for (index_t j = 1; j < M; ++j) {
val = negate ? -in[base + j * sa] : in[base + j * sa];
if (mmax < val)
mmax = val;
}
AType sum = AType(0);
DType in_val;
// By default temperature is 1.0.
// Adding a branch here to save the CPU 'divide-by-1' computation at runtime
if (temperature == 1.0) {
for (index_t j = 0; j < M; ++j) {
in_val = negate ? -in[base + j * sa] : in[base + j * sa];
sum += std::exp(in_val - mmax);
}
for (index_t j = 0; j < M; ++j) {
in_val = negate ? -in[base + j * sa] : in[base + j * sa];
out[base + j * sa] = OP::Map(in_val - mmax, sum);
}
} else {
for (index_t j = 0; j < M; ++j) {
in_val = negate ? -in[base + j * sa] : in[base + j * sa];
sum += std::exp((in_val - mmax) / temperature);
}
for (index_t j = 0; j < M; ++j) {
in_val = negate ? -in[base + j * sa] : in[base + j * sa];
out[base + j * sa] = OP::Map((in_val - mmax) / temperature, sum);
}
}
}
} else {
#pragma omp parallel for
for (index_t i = 0; i < N; ++i) {
index_t len = static_cast<index_t>(length[i]);
index_t base = unravel_dot(i, sshape, stride);
DType mmax = negate ? -in[base] : in[base];
DType val;
for (index_t j = 1; j < len; ++j) {
val = negate ? -in[base + j * sa] : in[base + j * sa];
if (mmax < val)
mmax = val;
}
for (index_t j = len; j < M; ++j) {
out[base + j * sa] = OType(0.0f);
}
AType sum = AType(0);
DType in_val;
// By default temperature is 1.0.
// Adding a branch here to save the CPU 'divide-by-1' computation at runtime
if (temperature == 1.0) {
for (index_t j = 0; j < len; ++j) {
in_val = negate ? -in[base + j * sa] : in[base + j * sa];
sum += std::exp(in_val - mmax);
}
for (index_t j = 0; j < len; ++j) {
in_val = negate ? -in[base + j * sa] : in[base + j * sa];
out[base + j * sa] = OP::Map(in_val - mmax, sum);
}
} else {
for (index_t j = 0; j < len; ++j) {
in_val = negate ? -in[base + j * sa] : in[base + j * sa];
sum += std::exp((in_val - mmax) / temperature);
}
for (index_t j = 0; j < len; ++j) {
in_val = negate ? -in[base + j * sa] : in[base + j * sa];
out[base + j * sa] = OP::Map((in_val - mmax) / temperature, sum);
}
}
}
}
}
struct masked_softmax_where {
template <typename DType, int ndim>
MSHADOW_XINLINE static void Map(index_t id,
DType* out,
const bool* cond,
const DType* x,
const double y,
Shape<ndim> data_shape,
Shape<ndim> mask_shape) {
index_t mask_pos = 0;
index_t stride = 1;
for (index_t i = ndim - 1, j = id; i >= 0; --i) {
auto tmp = j / data_shape[i];
if (mask_shape[i] != 1) {
mask_pos += (j - tmp * mask_shape[i]) * stride;
}
stride *= mask_shape[i];
j = tmp;
}
KERNEL_ASSIGN(out[id], kWriteTo, (cond[mask_pos] ? x[id] : static_cast<DType>(y)));
}
};
template <typename OP, bool masked_neg_inf, bool negate, typename AType, typename DType, int ndim>
inline void MaskedSoftmax(Stream<cpu>* s,
DType* in,
DType* out,
bool* mask,
Shape<ndim> data_shape,
Shape<ndim> mask_shape,
int axis,
const double temperature,
bool normalize,
const OpContext& ctx) {
Tensor<cpu, 1, DType> workspace =
ctx.requested[0].get_space_typed<cpu, 1, DType>(Shape1(data_shape.Size()), s);
DType* masked_input = TBlob(workspace).dptr<DType>();
double neg = MinValue<DType>();
Kernel<masked_softmax_where, cpu>::Launch(
s, data_shape.Size(), masked_input, mask, in, neg, data_shape, mask_shape);
int* max_lenghts = nullptr;
double masked_value = 0.0;
if (masked_neg_inf)
masked_value = -INFINITY;
Softmax<OP, negate, AType, DType>(
s, masked_input, out, max_lenghts, data_shape, axis, temperature);
Kernel<masked_softmax_where, cpu>::Launch(
s, data_shape.Size(), out, mask, out, masked_value, data_shape, mask_shape);
}
struct softmax_bwd {
template <typename DType, typename AType>
MSHADOW_XINLINE static AType Map(DType ograd, DType out, AType sum) {
return AType(out * (ograd - sum));
}
};
struct log_softmax_bwd {
template <typename AType>
MSHADOW_XINLINE static AType Map(float ograd, float out, AType sum) {
return AType(ograd - expf(out) * sum);
}
template <typename AType>
MSHADOW_XINLINE static AType Map(double ograd, double out, AType sum) {
return AType(ograd - exp(out) * sum);
}
};
template <typename OP1,
typename OP2,
int Req,
bool negate,
typename AType,
typename DType,
typename OType,
typename IType,
int ndim>
inline void SoftmaxGrad(Stream<cpu>* s,
OType* out,
OType* ograd,
DType* igrad,
IType* length,
Shape<ndim> shape,
int axis,
const DType temperature) {
index_t M = shape[axis];
if (M == 0)
return;
index_t N = shape.Size() / M;
Shape<ndim> stride = calc_stride(shape);
Shape<ndim> sshape = shape;
sshape[axis] = 1;
index_t sa = stride[axis];
if (length != nullptr) {
#pragma omp parallel for
for (index_t i = 0; i < N; ++i) {
index_t base = unravel_dot(i, sshape, stride);
index_t len = static_cast<index_t>(length[i]);
AType sum = AType(0);
for (index_t j = 0; j < len; ++j) {
sum += OP1::Map(ograd[base + j * sa], out[base + j * sa]);
}
// By default temperature is 1.0.
// Adding a branch here to save the CPU 'divide-by-1' computation at runtime
DType final_result;
if (temperature == 1.0) {
for (index_t j = 0; j < M; ++j) {
final_result = negate ? -OP2::Map(ograd[base + j * sa], out[base + j * sa], sum)
: OP2::Map(ograd[base + j * sa], out[base + j * sa], sum);
final_result = (j < len) ? final_result : DType(0.0f);
KERNEL_ASSIGN(igrad[base + j * sa], Req, final_result);
}
} else {
for (index_t j = 0; j < M; ++j) {
final_result =
negate ? -OP2::Map(ograd[base + j * sa], out[base + j * sa], sum) / temperature
: OP2::Map(ograd[base + j * sa], out[base + j * sa], sum) / temperature;
final_result = (j < len) ? final_result : DType(0.0f);
KERNEL_ASSIGN(igrad[base + j * sa], Req, final_result);
}
}
}
} else {
#pragma omp parallel for
for (index_t i = 0; i < N; ++i) {
index_t base = unravel_dot(i, sshape, stride);
AType sum = AType(0);
for (index_t j = 0; j < M; ++j) {
sum += OP1::Map(ograd[base + j * sa], out[base + j * sa]);
}
// By default temperature is 1.0.
// Adding a branch here to save the CPU 'divide-by-1' computation at runtime
DType final_result;
if (temperature == 1.0) {
for (index_t j = 0; j < M; ++j) {
final_result = negate ? -OP2::Map(ograd[base + j * sa], out[base + j * sa], sum)
: OP2::Map(ograd[base + j * sa], out[base + j * sa], sum);
KERNEL_ASSIGN(igrad[base + j * sa], Req, final_result);
}
} else {
for (index_t j = 0; j < M; ++j) {
final_result =
negate ? -OP2::Map(ograd[base + j * sa], out[base + j * sa], sum) / temperature
: OP2::Map(ograd[base + j * sa], out[base + j * sa], sum) / temperature;
KERNEL_ASSIGN(igrad[base + j * sa], Req, final_result);
}
}
}
}
}
template <typename OP1,
typename OP2,
int Req,
bool negate,
typename AType,
int ndim,
typename DType>
inline void MaskedSoftmaxGrad(Stream<cpu>* s,
DType* out,
DType* ograd,
DType* igrad,
bool* mask,
Shape<ndim> data_shape,
Shape<ndim> mask_shape,
int axis,
const double temperature,
const OpContext& ctx) {
Tensor<cpu, 1, DType> workspace =
ctx.requested[0].get_space_typed<cpu, 1, DType>(Shape1(data_shape.Size()), s);
DType* masked_ograd = TBlob(workspace).dptr<DType>();
Kernel<masked_softmax_where, cpu>::Launch(
s, data_shape.Size(), masked_ograd, mask, ograd, 0.0, data_shape, mask_shape);
int* max_lenghts = nullptr;
SoftmaxGrad<OP1, OP2, Req, negate, AType, DType, DType, int, ndim>(
s, out, masked_ograd, igrad, max_lenghts, data_shape, axis, temperature);
Kernel<masked_softmax_where, cpu>::Launch(
s, data_shape.Size(), igrad, mask, igrad, 0.0, data_shape, mask_shape);
}
#ifdef __CUDACC__
const int softmax_threads_per_block = 512;
template <int ndim>
MSHADOW_XINLINE index_t get_mask_position(const index_t idx,
const Shape<ndim>& data_shape,
const Shape<ndim>& mask_shape,
int axis,
index_t* stride_axis) {
index_t ret = 0;
index_t stride = 1;
*stride_axis = 1;
#pragma unroll
for (index_t i = ndim - 1, j = idx; i >= 0; --i) {
auto tmp = j / data_shape[i];
if (i != axis && mask_shape[i] != 1) {
ret += (j - tmp * mask_shape[i]) * stride;
if (i > axis)
*stride_axis *= mask_shape[i];
}
stride *= mask_shape[i];
j = tmp;
}
return ret;
}
template <bool normalize,
int x_bits,
typename OP,
bool masked_neg_inf,
bool negate,
typename AType,
int ndim,
typename DType>
__global__ void masked_softmax_kernel(DType* in,
DType* out,
bool* in_mask,
index_t M,
int axis,
Shape<ndim> sshape,
Shape<ndim> stride,
Shape<ndim> mask_shape,
const double temperature) {
extern __shared__ double shared[];
AType* smem = reinterpret_cast<AType*>(shared); // x_size
const unsigned x_size = 1 << x_bits;
index_t sa = stride[axis];
index_t base = unravel_dot(blockIdx.x, sshape, stride);
index_t sa_mask = 0;
index_t base_mask = get_mask_position(blockIdx.x, sshape, mask_shape, axis, &sa_mask);
bool bcst_mask_axis = (mask_shape[axis] == 1);
index_t x = threadIdx.x;
DType smax = 0.0;
if (normalize) {
red::maximum::SetInitValue(smem[x]);
for (index_t i = x; i < M; i += x_size) {
bool mask_value = bcst_mask_axis ? in_mask[base_mask] : in_mask[base_mask + i * sa_mask];
if (mask_value)
smem[x] = ::max(smem[x], negate ? -in[base + i * sa] : in[base + i * sa]);
}
__syncthreads();
cuda::Reduce1D<red::maximum, x_bits>(smem);
__syncthreads();
smax = smem[0];
__syncthreads();
}
red::sum::SetInitValue(smem[x]);
DType val;
for (index_t i = x; i < M; i += x_size) {
bool mask_value = bcst_mask_axis ? in_mask[base_mask] : in_mask[base_mask + i * sa_mask];
if (mask_value) {
val = (negate ? -in[base + i * sa] : in[base + i * sa]);
smem[x] += static_cast<AType>(expf((val - smax) / static_cast<AType>(temperature)));
}
}
__syncthreads();
cuda::Reduce1D<red::sum, x_bits>(smem);
__syncthreads();
AType ssum = smem[0];
__syncthreads();
double masked_value = 0.0;
if (masked_neg_inf)
masked_value = -INFINITY;
for (index_t i = x; i < M; i += x_size) {
val = (negate ? -in[base + i * sa] : in[base + i * sa]);
bool mask_value = bcst_mask_axis ? in_mask[base_mask] : in_mask[base_mask + i * sa_mask];
out[base + i * sa] = mask_value
? DType(OP::Map((val - smax) / static_cast<DType>(temperature), ssum))
: DType(masked_value);
}
}
template <bool normalize,
typename OP,
bool masked_neg_inf,
bool negate,
typename AType,
typename LType,
typename LTypeMask,
typename DType,
int ndim>
__global__ void masked_softmax_stride1_kernel(const DType* in,
DType* out,
bool* in_mask,
const index_t M,
int axis,
Shape<ndim> sshape,
Shape<ndim> mask_shape,
const double temperature,
const int rows_per_block,
const index_t total_rows,
const size_t size_input_shared,
const size_t size_mask_shared) {
const int entries_per_load = sizeof(LType) / sizeof(DType);
const int entries_per_load_mask = sizeof(LTypeMask) / sizeof(bool);
const int row_length = entries_per_load > 0 ? M / entries_per_load : 0;
const int row_length_mask = entries_per_load > 0 ? M / entries_per_load_mask : 0;
extern __shared__ double shared[];
LType* persistent_storage = reinterpret_cast<LType*>(shared);
// rows_per_block * M (DType), aligned to double
LTypeMask* mask_shared = reinterpret_cast<LTypeMask*>(&shared[size_input_shared]);
// rows_per_block * M (bool), aligned to double
AType* scratch = reinterpret_cast<AType*>(&shared[size_input_shared + size_mask_shared]);
// softmax_threads_per_block
const int warp_size = 32;
const int threads_per_row = softmax_threads_per_block / rows_per_block;
const int my_local_row = threadIdx.x / threads_per_row;
const int my_row = blockIdx.x * rows_per_block + my_local_row;
if (my_row >= total_rows)
return;
const int my_id = threadIdx.x % threads_per_row;
size_t base = my_row * row_length;
index_t pos_mask = 0;
index_t stride = mask_shape[axis];
#pragma unroll
for (index_t i = axis - 1, j = my_row; i >= 0; --i) {
auto tmp = j / sshape[i];
if (mask_shape[i] != 1) {
pos_mask += (j - tmp * mask_shape[i]) * stride;
stride *= mask_shape[i];
}
j = tmp;
}
const LType* in_aligned = reinterpret_cast<const LType*>(in);
for (index_t i = my_id; i < row_length; i += threads_per_row) {
persistent_storage[my_local_row * row_length + i] = in_aligned[base + i];
}
const LTypeMask* in_mask_aligned = reinterpret_cast<const LTypeMask*>(&in_mask[pos_mask]);
for (index_t i = my_id; i < row_length_mask; i += threads_per_row) {
mask_shared[my_local_row * row_length_mask + i] =
(mask_shape[axis] > 1) ? in_mask_aligned[i] : in_mask_aligned[0];
}
DType* row = reinterpret_cast<DType*>(persistent_storage + my_local_row * row_length);
bool* row_mask = reinterpret_cast<bool*>(mask_shared + my_local_row * row_length_mask);
__syncthreads();
DType smax = 0.0;
if (normalize) {
DType my_max_value;
red::maximum::SetInitValue(my_max_value);
for (index_t i = my_id; i < M; i += threads_per_row) {
if (row_mask[i])
my_max_value = ::max(my_max_value, negate ? -row[i] : row[i]);
}
scratch[threadIdx.x] = my_max_value;
__syncthreads();
for (int size = threads_per_row / 2; size >= warp_size; size /= 2) {
if (my_id < size) {
scratch[threadIdx.x] = ::max(scratch[threadIdx.x], scratch[threadIdx.x + size]);
}
__syncthreads();
}
if (my_id < warp_size) {
AType my_value = common::cuda::warp_reduce(scratch[threadIdx.x],
[](AType x, AType y) { return ::max(x, y); });
scratch[threadIdx.x] = my_value;
}
__syncthreads();
smax = scratch[threadIdx.x - threadIdx.x % threads_per_row];
__syncthreads();
}
AType my_sum;
red::sum::SetInitValue(my_sum);
for (index_t i = my_id; i < M; i += threads_per_row) {
if (row_mask[i]) {
const DType val = (negate ? -row[i] : row[i]);
my_sum += static_cast<AType>(expf((val - smax) / static_cast<AType>(temperature)));
}
}
scratch[threadIdx.x] = my_sum;
__syncthreads();
for (int size = threads_per_row / 2; size >= warp_size; size /= 2) {
if (my_id < size) {
scratch[threadIdx.x] += scratch[threadIdx.x + size];
}
__syncthreads();
}
if (my_id < warp_size) {
AType my_value =
common::cuda::warp_reduce(scratch[threadIdx.x], [](AType x, AType y) { return x + y; });
scratch[threadIdx.x] = my_value;
}
__syncthreads();
AType ssum = scratch[threadIdx.x - threadIdx.x % threads_per_row];
__syncthreads();
double masked_value = 0.0;
if (masked_neg_inf)
masked_value = -INFINITY;
for (index_t i = my_id; i < M; i += threads_per_row) {
const DType val = (negate ? -row[i] : row[i]);
row[i] = row_mask[i] ? DType(OP::Map((val - smax) / static_cast<DType>(temperature), ssum))
: DType(masked_value);
}
__syncthreads();
LType* out_aligned = reinterpret_cast<LType*>(out);
for (index_t i = my_id; i < row_length; i += threads_per_row) {
out_aligned[base + i] = persistent_storage[my_local_row * row_length + i];
}
}
template <typename OP,
bool masked_neg_inf,
bool negate,
typename AType,
typename DType,
typename OType,
int ndim>
inline void MaskedSoftmax(Stream<gpu>* s,
DType* in,
OType* out,
bool* mask,
Shape<ndim> data_shape,
Shape<ndim> mask_shape,
int axis,
const double temperature,
bool normalize,
const OpContext& ctx) {
const int x_bits = 7;
const int x_size = 1 << x_bits;
index_t M = data_shape[axis];
if (M == 0 || data_shape.Size() == 0)
return;
index_t N = data_shape.Size() / M;
Shape<ndim> stride = calc_stride(data_shape);
Shape<ndim> sshape = data_shape;
sshape[axis] = 1;
const size_t DSize = sizeof(DType);
// Using max of 20 kB of shared memory for InputData in the optimized case
const size_t max_opt_M = 20 * 1024 / DSize;
if (stride[axis] == 1 && static_cast<size_t>(M) <= max_opt_M &&
std::is_same<DType, OType>::value) {
int ltype = mxnet::common::cuda::get_load_type(M * sizeof(DType));
int ltype_mask = mxnet::common::cuda::get_load_type(mask_shape[axis] * sizeof(bool));
MXNET_LOAD_TYPE_SWITCH(ltype, LType, {
CHECK_LE(sizeof(DType), sizeof(LType));
MXNET_LOAD_TYPE_SWITCH(ltype_mask, LTypeMask, {
CHECK_LE(sizeof(bool), sizeof(LTypeMask));
int rows_per_block = mxnet::common::cuda::get_rows_per_block(
M * sizeof(DType) / sizeof(LType), softmax_threads_per_block);
// calculate amount shared memory (slots aligned to double)
int entries_per_load = entries_per_load = sizeof(LType) / sizeof(DType);
int entries_per_load_mask = sizeof(LTypeMask) / sizeof(bool);
size_t size_input_shared = entries_per_load > 0 ? rows_per_block * M / entries_per_load : 0;
size_t size_mask_shared =
entries_per_load_mask > 0 ? rows_per_block * M / entries_per_load_mask : 0;
size_input_shared =
((size_input_shared * sizeof(LType) + sizeof(double) - 1) / sizeof(double));
size_mask_shared =
((size_mask_shared * sizeof(LTypeMask) + sizeof(double) - 1) / sizeof(double));
size_t amount_shared = size_input_shared * sizeof(double) +
size_mask_shared * sizeof(double) +
softmax_threads_per_block * sizeof(AType);
int nblocks = (N + rows_per_block - 1) / rows_per_block;
if (normalize) {
masked_softmax_stride1_kernel<true, OP, masked_neg_inf, negate, AType, LType, LTypeMask>
<<<nblocks,
softmax_threads_per_block,
amount_shared,
mshadow::Stream<gpu>::GetStream(s)>>>(in,
out,
mask,
M,
axis,
sshape,
mask_shape,
temperature,
rows_per_block,
N,
size_input_shared,
size_mask_shared);
} else {
masked_softmax_stride1_kernel<false, OP, masked_neg_inf, negate, AType, LType, LTypeMask>
<<<nblocks,
softmax_threads_per_block,
amount_shared,
mshadow::Stream<gpu>::GetStream(s)>>>(in,
out,
mask,
M,
axis,
sshape,
mask_shape,
temperature,
rows_per_block,
N,
size_input_shared,
size_mask_shared);
}
});
});
MSHADOW_CUDA_POST_KERNEL_CHECK(masked_softmax_stride1_kernel);
} else {
size_t amount_shared = x_size * sizeof(AType);
if (normalize) {
masked_softmax_kernel<true, x_bits, OP, masked_neg_inf, negate, AType, ndim>
<<<N, x_size, amount_shared, mshadow::Stream<gpu>::GetStream(s)>>>(
in, out, mask, M, axis, sshape, stride, mask_shape, temperature);
} else {
masked_softmax_kernel<false, x_bits, OP, masked_neg_inf, negate, AType, ndim>
<<<N, x_size, amount_shared, mshadow::Stream<gpu>::GetStream(s)>>>(
in, out, mask, M, axis, sshape, stride, mask_shape, temperature);
}
MSHADOW_CUDA_POST_KERNEL_CHECK(masked_softmax_kernel);
}
}
template <typename OP1,
typename OP2,
int Req,
bool negate,
typename AType,
typename LType,
typename LTypeMask,
typename DType,
typename OType,
int ndim>
__global__ void masked_softmax_stride1_grad_kernel(const OType* out,
const OType* ograd,
DType* igrad,
const bool* in_mask,
const index_t M,
int axis,
Shape<ndim> sshape,
Shape<ndim> mask_shape,
const double temperature,
const int rows_per_block,
const index_t total_rows,
const size_t size_input_shared,
const size_t size_mask_shared) {
const int entries_per_load = sizeof(LType) / sizeof(DType);
const int entries_per_load_mask = sizeof(LTypeMask) / sizeof(bool);
const int row_length = entries_per_load > 0 ? M / entries_per_load : 0;
const int row_length_mask = entries_per_load > 0 ? M / entries_per_load_mask : 0;
extern __shared__ double shared[];
LType* persistent_storage = reinterpret_cast<LType*>(shared);
// 2 * rows_per_block * M (DType), aligned to double
LTypeMask* mask_shared = reinterpret_cast<LTypeMask*>(&shared[size_input_shared]);
// rows_per_block * M (bool), aligned to double
AType* scratch = reinterpret_cast<AType*>(&shared[size_input_shared + size_mask_shared]);
// softmax_threads_per_block
const int warp_size = 32;
const int threads_per_row = softmax_threads_per_block / rows_per_block;
const int my_local_row = threadIdx.x / threads_per_row;
const int my_row = blockIdx.x * rows_per_block + my_local_row;
if (my_row >= total_rows)
return;
const int my_id = threadIdx.x % threads_per_row;
size_t base = my_row * row_length;
index_t pos_mask = 0;
index_t stride = mask_shape[axis];
#pragma unroll
for (index_t i = axis - 1, j = my_row; i >= 0; --i) {
auto tmp = j / sshape[i];
if (mask_shape[i] != 1) {
pos_mask += (j - tmp * mask_shape[i]) * stride;
stride *= mask_shape[i];
}
j = tmp;
}
const LType* out_aligned = reinterpret_cast<const LType*>(out);
const LType* ograd_aligned = reinterpret_cast<const LType*>(ograd);
for (index_t i = my_id; i < row_length; i += threads_per_row) {
persistent_storage[my_local_row * row_length * 2 + i] = out_aligned[base + i];
persistent_storage[my_local_row * row_length * 2 + row_length + i] = ograd_aligned[base + i];
}
const LTypeMask* in_mask_aligned = reinterpret_cast<const LTypeMask*>(&in_mask[pos_mask]);
for (index_t i = my_id; i < row_length_mask; i += threads_per_row) {
mask_shared[my_local_row * row_length_mask + i] =
(mask_shape[axis] > 1) ? in_mask_aligned[i] : in_mask_aligned[0];
}
DType* row = reinterpret_cast<DType*>(persistent_storage + my_local_row * row_length * 2);
bool* row_mask = reinterpret_cast<bool*>(mask_shared + my_local_row * row_length_mask);
__syncthreads();
AType my_sum_value;
red::sum::SetInitValue(my_sum_value);
for (index_t i = my_id; i < M; i += threads_per_row) {
if (row_mask[i])
my_sum_value += OP1::Map(row[i + M], row[i]);
}
scratch[threadIdx.x] = my_sum_value;
__syncthreads();
for (int size = threads_per_row / 2; size >= warp_size; size /= 2) {
if (my_id < size) {
scratch[threadIdx.x] = scratch[threadIdx.x] + scratch[threadIdx.x + size];
}
__syncthreads();
}
if (my_id < warp_size) {
AType my_value =
common::cuda::warp_reduce(scratch[threadIdx.x], [](AType x, AType y) { return x + y; });
scratch[threadIdx.x] = my_value;
}
__syncthreads();
AType ssum = scratch[threadIdx.x - threadIdx.x % threads_per_row];
__syncthreads();
for (index_t i = my_id; i < M; i += threads_per_row) {
const DType val =
negate ? -OP2::Map(row[i + M], row[i], ssum) : OP2::Map(row[i + M], row[i], ssum);
row[i] = row_mask[i] ? DType(val / static_cast<DType>(temperature)) : DType(0.0f);
if (Req == kAddTo) {
row[i] += igrad[my_row * M + i];
}
}
__syncthreads();
LType* igrad_aligned = reinterpret_cast<LType*>(igrad);
for (index_t i = my_id; i < row_length; i += threads_per_row) {
igrad_aligned[base + i] = persistent_storage[my_local_row * row_length * 2 + i];
}
}
template <int x_bits,
typename OP1,
typename OP2,
int Req,
bool negate,
typename AType,
int ndim,
typename DType,
typename OType>
__global__ void masked_softmax_grad_kernel(OType* out,
OType* ograd,
DType* igrad,
const bool* in_mask,
index_t M,
int axis,
Shape<ndim> sshape,
Shape<ndim> stride,
Shape<ndim> mask_shape,
const double temperature) {
const unsigned x_size = 1 << x_bits;
__shared__ AType smem[x_size];
index_t sa = stride[axis];
index_t base = unravel_dot(blockIdx.x, sshape, stride);
index_t sa_mask = 0;
index_t base_mask = get_mask_position(blockIdx.x, sshape, mask_shape, axis, &sa_mask);
bool bcst_mask_axis = (mask_shape[axis] == 1);
index_t x = threadIdx.x;
red::sum::SetInitValue(smem[x]);
for (index_t i = x; i < M; i += x_size) {
bool mask_value = bcst_mask_axis ? in_mask[base_mask] : in_mask[base_mask + i * sa_mask];
if (mask_value)
smem[x] += OP1::Map(ograd[base + i * sa], out[base + i * sa]);
}
__syncthreads();
cuda::Reduce1D<red::sum, x_bits>(smem);
__syncthreads();
AType ssum = smem[0];
__syncthreads();
DType final_result;
for (index_t i = x; i < M; i += x_size) {
bool mask_value = bcst_mask_axis ? in_mask[base_mask] : in_mask[base_mask + i * sa_mask];
final_result = negate ? -OP2::Map(ograd[base + i * sa], out[base + i * sa], ssum)
: OP2::Map(ograd[base + i * sa], out[base + i * sa], ssum);
final_result = mask_value ? final_result / static_cast<DType>(temperature) : DType(0.0f);
KERNEL_ASSIGN(igrad[base + i * sa], Req, final_result);
}
}
template <typename OP1,
typename OP2,
int Req,
bool negate,
typename AType,
int ndim,
typename DType,
typename OType>
inline void MaskedSoftmaxGrad(Stream<gpu>* s,
OType* out,
OType* ograd,
DType* igrad,
bool* mask,
Shape<ndim> data_shape,
Shape<ndim> mask_shape,
int axis,
const double temperature,
const OpContext& ctx) {
const int x_bits = 7;
const int x_size = 1 << x_bits;
index_t M = data_shape[axis];
if (M == 0 || data_shape.Size() == 0)
return;
index_t N = data_shape.Size() / M;
Shape<ndim> stride = calc_stride(data_shape);
Shape<ndim> sshape = data_shape;
sshape[axis] = 1;
const size_t DSize = sizeof(DType);
// Using max of 20 kB of shared memory for InputData in the optimized case
const size_t max_opt_M = 20 * 1024 / DSize;
if (stride[axis] == 1 && static_cast<size_t>(M) <= max_opt_M &&
std::is_same<DType, OType>::value) {
int ltype = mxnet::common::cuda::get_load_type(M * sizeof(DType));
int ltype_mask = mxnet::common::cuda::get_load_type(mask_shape[axis] * sizeof(bool));
MXNET_LOAD_TYPE_SWITCH(ltype, LType, {
CHECK_LE(sizeof(DType), sizeof(LType));
MXNET_LOAD_TYPE_SWITCH(ltype_mask, LTypeMask, {
CHECK_LE(sizeof(bool), sizeof(LTypeMask));
int rows_per_block = mxnet::common::cuda::get_rows_per_block(
M * sizeof(DType) / sizeof(LType), softmax_threads_per_block);
// calculate amount shared memory (slots aligned to double)
int entries_per_load = entries_per_load = sizeof(LType) / sizeof(DType);
int entries_per_load_mask = sizeof(LTypeMask) / sizeof(bool);
size_t size_input_shared = entries_per_load > 0 ? rows_per_block * M / entries_per_load : 0;
size_t size_mask_shared =
entries_per_load_mask > 0 ? rows_per_block * M / entries_per_load_mask : 0;
size_input_shared =
((2 * size_input_shared * sizeof(LType) + sizeof(double) - 1) / sizeof(double));
size_mask_shared =
((size_mask_shared * sizeof(LTypeMask) + sizeof(double) - 1) / sizeof(double));
size_t amount_shared = size_input_shared * sizeof(double) +
size_mask_shared * sizeof(double) +
softmax_threads_per_block * sizeof(AType);
int nblocks = (N + rows_per_block - 1) / rows_per_block;
masked_softmax_stride1_grad_kernel<OP1, OP2, Req, negate, AType, LType, LTypeMask>
<<<nblocks,
softmax_threads_per_block,
amount_shared,
mshadow::Stream<gpu>::GetStream(s)>>>(out,
ograd,
igrad,
mask,
M,
axis,
sshape,
mask_shape,
temperature,
rows_per_block,
N,
size_input_shared,
size_mask_shared);
});
});
MSHADOW_CUDA_POST_KERNEL_CHECK(masked_softmax_stride1_grad_kernel);
} else {
masked_softmax_grad_kernel<x_bits, OP1, OP2, Req, negate, AType, ndim>
<<<N, x_size, 0, mshadow::Stream<gpu>::GetStream(s)>>>(
out, ograd, igrad, mask, M, axis, sshape, stride, mask_shape, temperature);
MSHADOW_CUDA_POST_KERNEL_CHECK(masked_softmax_grad_kernel);
}
}
#endif
} // namespace mxnet_op
struct SoftmaxParam : public dmlc::Parameter<SoftmaxParam> {
int axis;
dmlc::optional<double> temperature;
dmlc::optional<int> dtype;
dmlc::optional<bool> use_length;
DMLC_DECLARE_PARAMETER(SoftmaxParam) {
DMLC_DECLARE_FIELD(axis).set_default(-1).describe("The axis along which to compute softmax.");
DMLC_DECLARE_FIELD(temperature)
.set_default(dmlc::optional<double>())
.describe("Temperature parameter in softmax");
DMLC_DECLARE_FIELD(dtype)
.add_enum("float16", mshadow::kFloat16)
.add_enum("float32", mshadow::kFloat32)
.add_enum("float64", mshadow::kFloat64)
.set_default(dmlc::optional<int>())
.describe(
"DType of the output in case this can't be inferred. "
"Defaults to the same as input's dtype if not defined (dtype=None).");
DMLC_DECLARE_FIELD(use_length)
.set_default(dmlc::optional<bool>(false))
.describe("Whether to use the length input as a mask over the data input.");
}
bool operator==(const SoftmaxParam& other) const {
return this->axis == other.axis && this->temperature == other.temperature &&
this->dtype == other.dtype && this->use_length == other.use_length;
}
void SetAttrDict(std::unordered_map<std::string, std::string>* dict) {
std::ostringstream axis_s, temperature_s, dtype_s, use_length_s;
axis_s << axis;
temperature_s << temperature;
dtype_s << dtype;
use_length_s << use_length;
(*dict)["axis"] = axis_s.str();
(*dict)["temperature"] = temperature_s.str();
if (dtype.has_value()) {
(*dict)["dtype"] = MXNetTypeWithBool2String(dtype.value());
} else {
(*dict)["dtype"] = dtype_s.str();
}
(*dict)["use_length"] = use_length_s.str();
}
};
struct MaskedSoftmaxParam : public dmlc::Parameter<MaskedSoftmaxParam> {
int axis;
dmlc::optional<double> temperature;
dmlc::optional<bool> normalize;
DMLC_DECLARE_PARAMETER(MaskedSoftmaxParam) {
DMLC_DECLARE_FIELD(axis).set_default(-1).describe("The axis along which to compute softmax.");
DMLC_DECLARE_FIELD(temperature)
.set_default(dmlc::optional<double>())
.describe("Temperature parameter in softmax");
DMLC_DECLARE_FIELD(normalize)
.set_default(dmlc::optional<bool>(true))
.describe("Whether to normalize input data x: x = x - max(x)");
}
void SetAttrDict(std::unordered_map<std::string, std::string>* dict) {
std::ostringstream axis_s, temperature_s, normalize_s;
axis_s << axis;
temperature_s << temperature;
normalize_s << normalize;
(*dict)["axis"] = axis_s.str();
(*dict)["temperature"] = temperature_s.str();
(*dict)["normalize"] = normalize_s.str();
}
};
static inline bool softmax_has_dtype_override(const nnvm::NodeAttrs& attrs) {
const SoftmaxParam& param = nnvm::get<SoftmaxParam>(attrs.parsed);
return param.dtype.has_value() && param.dtype.value() != -1;
}
static inline bool softmax_use_length(const nnvm::NodeAttrs& attrs) {
const SoftmaxParam& param = nnvm::get<SoftmaxParam>(attrs.parsed);
return param.use_length.value();
}
static inline bool SoftmaxOpType(const nnvm::NodeAttrs& attrs,
std::vector<int>* in_attrs,
std::vector<int>* out_attrs) {
CHECK_EQ(out_attrs->size(), 1);
const SoftmaxParam& param = nnvm::get<SoftmaxParam>(attrs.parsed);
CHECK_EQ(in_attrs->size(), softmax_use_length(attrs) ? 2U : 1U);
if (softmax_has_dtype_override(attrs)) {
TYPE_ASSIGN_CHECK(*out_attrs, 0, param.dtype.value());
type_assign(&(*in_attrs)[0], (*out_attrs)[0]);
return true;
} else {
std::vector<int> tmp = {in_attrs->at(0)};
return ElemwiseType<1, 1>(attrs, &tmp, out_attrs);
}
}
static inline bool SoftmaxOpShape(const nnvm::NodeAttrs& attrs,
mxnet::ShapeVector* in_attrs,
mxnet::ShapeVector* out_attrs) {
CHECK_EQ(out_attrs->size(), 1U);
const SoftmaxParam& param = nnvm::get<SoftmaxParam>(attrs.parsed);
CHECK_EQ(in_attrs->size(), param.use_length.value() ? 2U : 1U);
if (param.use_length.value()) {
mxnet::TShape& dshape = in_attrs->at(0);
mxnet::TShape tmp_shape((dshape.ndim() == 1) ? 1U : dshape.ndim() - 1, 1);
int j = 0;
int axis = param.axis != -1 ? param.axis : dshape.ndim() - 1;
for (int i = 0; i < dshape.ndim(); ++i) {
if (i != axis) {
tmp_shape[j++] = dshape[i];
}
}
SHAPE_ASSIGN_CHECK(*in_attrs, 1, tmp_shape);
}
mxnet::ShapeVector tmp = {in_attrs->at(0)};
return ElemwiseShape<1, 1>(attrs, &tmp, out_attrs);
}
static inline bool SoftmaxGradOpShape(const nnvm::NodeAttrs& attrs,
mxnet::ShapeVector* in_attrs,
mxnet::ShapeVector* out_attrs) {
if (softmax_has_dtype_override(attrs) || softmax_use_length(attrs)) {
if (softmax_use_length(attrs)) {
mxnet::ShapeVector ins = {in_attrs->at(0), in_attrs->at(1), in_attrs->at(3)};
mxnet::ShapeVector dgrad = {out_attrs->at(0)};
bool res = ElemwiseShape<3, 1>(attrs, &ins, &dgrad);
SHAPE_ASSIGN_CHECK(*in_attrs, 0, ins[0]);
SHAPE_ASSIGN_CHECK(*in_attrs, 1, ins[1]);
SHAPE_ASSIGN_CHECK(*in_attrs, 3, ins[2]);
SHAPE_ASSIGN_CHECK(*out_attrs, 0, dgrad[0]);
mxnet::ShapeVector length = {in_attrs->at(2)};
mxnet::ShapeVector lgrad = {out_attrs->at(1)};
res = (res && ElemwiseShape<1, 1>(attrs, &length, &lgrad));
SHAPE_ASSIGN_CHECK(*in_attrs, 2, length[0]);
SHAPE_ASSIGN_CHECK(*out_attrs, 1, lgrad[0]);
return res;
} else {
return ElemwiseShape<3, 1>(attrs, in_attrs, out_attrs);
}
} else {
return ElemwiseShape<2, 1>(attrs, in_attrs, out_attrs);
}
}
static inline bool SoftmaxGradOpType(const nnvm::NodeAttrs& attrs,
std::vector<int>* in_attrs,
std::vector<int>* out_attrs) {
CHECK_EQ(out_attrs->size(), softmax_use_length(attrs) ? 2U : 1U);
if (softmax_has_dtype_override(attrs) || softmax_use_length(attrs)) {
CHECK_EQ(in_attrs->size(), softmax_use_length(attrs) ? 4U : 3U);
int in_dtype = (*in_attrs)[1];
int out_dtype = (*in_attrs)[softmax_use_length(attrs) ? 3 : 2];
TYPE_ASSIGN_CHECK(*in_attrs, 0, out_dtype);
TYPE_ASSIGN_CHECK(*out_attrs, 0, in_dtype);
if (softmax_use_length(attrs)) {
TYPE_ASSIGN_CHECK(*out_attrs, 1, in_attrs->at(2));
}
return (*out_attrs)[0] != -1 && (*in_attrs)[0] != -1 &&
(!softmax_use_length(attrs) || ((*out_attrs)[1] != -1 && (*in_attrs)[1] != -1));
} else {
CHECK_EQ(in_attrs->size(), 2U);
int out_dtype = (*in_attrs)[1];
TYPE_ASSIGN_CHECK(*out_attrs, 0, out_dtype);
TYPE_ASSIGN_CHECK(*in_attrs, 0, out_dtype);
return (*out_attrs)[0] != -1 && (*in_attrs)[0] != -1;
}
}
static inline std::vector<std::pair<int, int>> SoftmaxGradOpInplaceOption(
const nnvm::NodeAttrs& attrs) {
if (softmax_has_dtype_override(attrs) || softmax_use_length(attrs)) {
if (softmax_use_length(attrs)) {
return std::vector<std::pair<int, int>>{{0, 0}, {1, 0}, {2, 1}, {3, 0}};
} else {
return std::vector<std::pair<int, int>>{{0, 0}, {1, 0}, {2, 0}};
}
} else {
return std::vector<std::pair<int, int>>{{0, 0}, {1, 0}};
}
}
static inline uint32_t SoftmaxGradOpNumInputs(const nnvm::NodeAttrs& attrs) {
if (softmax_has_dtype_override(attrs) || softmax_use_length(attrs)) {
return softmax_use_length(attrs) ? 4 : 3;
}
return 2;
}
static inline std::vector<std::string> SoftmaxGradOpInputNames(const nnvm::NodeAttrs& attrs) {
if (softmax_has_dtype_override(attrs) || softmax_use_length(attrs)) {
if (softmax_use_length(attrs)) {
return std::vector<std::string>{"ograd", "data", "length", "output"};
} else {
return std::vector<std::string>{"ograd", "data", "output"};
}
} else {
return std::vector<std::string>{"ograd", "output"};
}
}
struct SoftmaxFGradient {
const char* op_name;
std::vector<nnvm::NodeEntry> operator()(const nnvm::ObjectPtr& n,
const std::vector<nnvm::NodeEntry>& ograds) const {
if (softmax_has_dtype_override(n->attrs) || softmax_use_length(n->attrs)) {
return ElemwiseGradUseInOut{op_name}(n, ograds); // NOLINT
} else {
return ElemwiseGradUseOut{op_name}(n, ograds); // NOLINT
}
}
};
static inline bool MaskedSoftmaxOpType(const nnvm::NodeAttrs& attrs,
std::vector<int>* in_attrs,
std::vector<int>* out_attrs) {
CHECK_EQ(out_attrs->size(), 1);
CHECK_EQ(in_attrs->size(), 2U);
std::vector<int> tmp = {in_attrs->at(0)};
return ElemwiseType<1, 1>(attrs, &tmp, out_attrs);
}
static inline bool MaskedSoftmaxOpShape(const nnvm::NodeAttrs& attrs,
mxnet::ShapeVector* in_shape,
mxnet::ShapeVector* out_shape) {
CHECK_EQ(out_shape->size(), 1U);
CHECK_EQ(in_shape->size(), 2U);
mxnet::TShape& data_shape = (*in_shape)[0];
mxnet::TShape& mask_shape = (*in_shape)[1];
if (!mxnet::ndim_is_known(data_shape) || !mxnet::ndim_is_known(mask_shape)) {
return false;
}
CHECK(data_shape.ndim() == mask_shape.ndim())
<< "Number of dimensions in data and mask does not match";
CHECK(data_shape.ndim() > 0) << "Empty tuple is not allowed";
for (int i = 0; i < data_shape.ndim(); ++i) {
CHECK(data_shape[i] == mask_shape[i] || mask_shape[i] == 1)
<< "Mask cannot be broadcasted from " << mask_shape << " to " << data_shape;
}
SHAPE_ASSIGN_CHECK(*out_shape, 0, in_shape->at(0));
SHAPE_ASSIGN_CHECK(*in_shape, 0, out_shape->at(0));
return true;
}
static inline bool MaskedSoftmaxGradOpShape(const nnvm::NodeAttrs& attrs,
mxnet::ShapeVector* in_shape,
mxnet::ShapeVector* out_shape) {
CHECK_EQ(out_shape->size(), 1U);
CHECK_EQ(in_shape->size(), 3U);
mxnet::TShape& ograd_shape = (*in_shape)[0];
mxnet::TShape& mask_shape = (*in_shape)[1];
if (!mxnet::ndim_is_known(ograd_shape) || !mxnet::ndim_is_known(mask_shape)) {
return false;
}
CHECK(ograd_shape.ndim() == mask_shape.ndim())
<< "Number of dimensions in data and mask does not match";
CHECK(ograd_shape.ndim() > 0) << "Empty tuple is not allowed";
for (int i = 0; i < ograd_shape.ndim(); ++i) {
CHECK(ograd_shape[i] == mask_shape[i] || mask_shape[i] == 1)
<< "Mask cannot be broadcasted from " << mask_shape << " to " << ograd_shape;
}
SHAPE_ASSIGN_CHECK(*out_shape, 0, in_shape->at(0));
SHAPE_ASSIGN_CHECK(*out_shape, 0, in_shape->at(2));
SHAPE_ASSIGN_CHECK(*in_shape, 0, out_shape->at(0));
SHAPE_ASSIGN_CHECK(*in_shape, 2, out_shape->at(0));
return true;
}
static inline bool MaskedSoftmaxGradOpType(const nnvm::NodeAttrs& attrs,
std::vector<int>* in_attrs,
std::vector<int>* out_attrs) {
CHECK_EQ(out_attrs->size(), 1U);
CHECK_EQ(in_attrs->size(), 3U);
int data_dtype = (*in_attrs)[0];
TYPE_ASSIGN_CHECK(*in_attrs, 2, data_dtype);
TYPE_ASSIGN_CHECK(*out_attrs, 0, data_dtype);
data_dtype = (*out_attrs)[0];
TYPE_ASSIGN_CHECK(*in_attrs, 0, data_dtype);
return true;
}
static inline std::vector<std::pair<int, int>> MaskedSoftmaxGradOpInplaceOption(
const nnvm::NodeAttrs& attrs) {
return std::vector<std::pair<int, int>>{{0, 0}, {1, 0}, {2, 1}, {3, 0}};
}
template <typename xpu, typename OP, bool negate = false>
void SoftmaxCompute(const nnvm::NodeAttrs& attrs,
const OpContext& ctx,
const std::vector<TBlob>& inputs,
const std::vector<OpReqType>& req,
const std::vector<TBlob>& outputs) {
using namespace mxnet_op;
if (req[0] == kNullOp || inputs[0].Size() == 0U)
return;
CHECK_NE(req[0], kAddTo);
const SoftmaxParam& param = nnvm::get<SoftmaxParam>(attrs.parsed);
int axis = CheckAxis(param.axis, inputs[0].ndim());
const double temperature = param.temperature.has_value() ? param.temperature.value() : 1.0;
mxnet::TShape shape = AxisShapeCompact(inputs[0].shape_, &axis, true);
bool safe_acc = dmlc::GetEnv("MXNET_SAFE_ACCUMULATION", true);
if (!safe_acc && inputs[0].type_flag_ == mshadow::kFloat16) {
common::LogOnce(
"MXNET_SAFE_ACCUMULATION=1 is recommended for softmax with float16 inputs. "
"See https://mxnet.apache.org/api/faq/env_var "
"for more details.");
}
MXNET_REAL_ACC_TYPE_SWITCH(inputs[0].type_flag_, DType, AType, {
MSHADOW_REAL_TYPE_SWITCH(
outputs[0].type_flag_, OType, {
int type = kInt32;
if (param.use_length.value()) {
CHECK(inputs.size() > 1)
<< "Mask needs to be provided when using softmax with use_length=True.";
type = inputs[1].type_flag_;
}
MXNET_INT32_INT64_TYPE_SWITCH(type, IType, {
IType* mask_ptr = nullptr;
if (param.use_length.value()) {
mask_ptr = inputs[1].dptr<IType>();
}
if (safe_acc) {
if (shape.ndim() == 2) {
Softmax<OP, negate, AType>(ctx.get_stream<xpu>(),
inputs[0].dptr<DType>(),
outputs[0].dptr<OType>(),
mask_ptr,
shape.get<2>(),
axis,
static_cast<DType>(temperature));
} else {
Softmax<OP, negate, AType>(ctx.get_stream<xpu>(),
inputs[0].dptr<DType>(),
outputs[0].dptr<OType>(),
mask_ptr,
shape.get<3>(),
axis,
static_cast<DType>(temperature));
}
} else {
if (shape.ndim() == 2) {
Softmax<OP, negate, DType>(ctx.get_stream<xpu>(),
inputs[0].dptr<DType>(),
outputs[0].dptr<OType>(),
mask_ptr,
shape.get<2>(),
axis,
static_cast<DType>(temperature));
} else {
Softmax<OP, negate, DType>(ctx.get_stream<xpu>(),
inputs[0].dptr<DType>(),
outputs[0].dptr<OType>(),
mask_ptr,
shape.get<3>(),
axis,
static_cast<DType>(temperature));
}
}
});
});
});
}
template <typename xpu, typename OP, bool masked_neg_inf, bool negate = false>
void MaskedSoftmaxCompute(const nnvm::NodeAttrs& attrs,
const OpContext& ctx,
const std::vector<TBlob>& inputs,
const std::vector<OpReqType>& req,
const std::vector<TBlob>& outputs) {
using namespace mxnet_op;
if (req[0] == kNullOp || inputs[0].Size() == 0U)
return;
CHECK_NE(req[0], kAddTo);
const MaskedSoftmaxParam& param = nnvm::get<MaskedSoftmaxParam>(attrs.parsed);
int axis = CheckAxis(param.axis, inputs[0].ndim());
const double temperature = param.temperature.has_value() ? param.temperature.value() : 1.0;
bool safe_acc = dmlc::GetEnv("MXNET_SAFE_ACCUMULATION", true);
if (!safe_acc && inputs[0].type_flag_ == mshadow::kFloat16) {
common::LogOnce(
"MXNET_SAFE_ACCUMULATION=1 is recommended for masked_softmax with "
"float16 inputs. "
"See https://mxnet.apache.org/api/faq/env_var "
"for more details.");
}
MXNET_REAL_ACC_TYPE_SWITCH(inputs[0].type_flag_, DType, AType, {
MXNET_NDIM_SWITCH(inputs[0].ndim(), ndim, {
bool* mask_ptr = inputs[1].dptr<bool>();
if (safe_acc) {
MaskedSoftmax<OP, masked_neg_inf, negate, AType>(ctx.get_stream<xpu>(),
inputs[0].dptr<DType>(),
outputs[0].dptr<DType>(),
mask_ptr,
inputs[0].shape_.get<ndim>(),
inputs[1].shape_.get<ndim>(),
axis,
temperature,
param.normalize.value(),
ctx);
} else {
MaskedSoftmax<OP, masked_neg_inf, negate, DType>(ctx.get_stream<xpu>(),
inputs[0].dptr<DType>(),
outputs[0].dptr<DType>(),
mask_ptr,
inputs[0].shape_.get<ndim>(),
inputs[1].shape_.get<ndim>(),
axis,
temperature,
param.normalize.value(),
ctx);
}
});
});
}
#if MXNET_USE_CUDA
struct SoftmaxRTCCompute {
std::string OP;
bool negate = false;
void operator()(const nnvm::NodeAttrs& attrs,
const OpContext& ctx,
const std::vector<TBlob>& inputs,
const std::vector<OpReqType>& req,
const std::vector<TBlob>& outputs);
};
struct SoftmaxRTCGradCompute {
std::string OP1;
std::string OP2;
bool negate = false;
void operator()(const nnvm::NodeAttrs& attrs,
const OpContext& ctx,
const std::vector<TBlob>& inputs,
const std::vector<OpReqType>& req,
const std::vector<TBlob>& outputs);
};
#endif
template <typename xpu, typename OP1, typename OP2, bool negate = false>
void SoftmaxGradCompute(const nnvm::NodeAttrs& attrs,
const OpContext& ctx,
const std::vector<TBlob>& inputs,
const std::vector<OpReqType>& req,
const std::vector<TBlob>& outputs) {
using namespace mxnet_op;
if (softmax_use_length(attrs)) {
MXNET_INT32_INT64_TYPE_SWITCH(inputs[2].type_flag_, IType, {
if (req[1] != kNullOp) {
mxnet_op::Kernel<mxnet_op::set_zero, xpu>::Launch(
ctx.get_stream<xpu>(), outputs[1].Size(), outputs[1].dptr<IType>());
}
});
}
if (req[0] == kNullOp)
return;
const int itype = softmax_use_length(attrs) ? inputs[2].type_flag_ : kInt32;
const SoftmaxParam& param = nnvm::get<SoftmaxParam>(attrs.parsed);
int axis = CheckAxis(param.axis, inputs[0].ndim());
const double temperature = param.temperature.has_value() ? param.temperature.value() : 1.0;
mxnet::TShape shape = AxisShapeCompact(inputs[0].shape_, &axis, true);
int out_idx = softmax_has_dtype_override(attrs) ? 2 : 1;
out_idx = softmax_use_length(attrs) ? 3 : out_idx;
bool safe_acc = dmlc::GetEnv("MXNET_SAFE_ACCUMULATION", true);
MXNET_REAL_ACC_TYPE_SWITCH(inputs[0].type_flag_, OType, AType, {
MSHADOW_REAL_TYPE_SWITCH(outputs[0].type_flag_, DType, {
MXNET_ASSIGN_REQ_SWITCH(req[0], Req, {
MXNET_INT32_INT64_TYPE_SWITCH(itype, IType, {
IType* length_ptr = nullptr;
if (softmax_use_length(attrs)) {
length_ptr = inputs[2].dptr<IType>();
}
if (safe_acc) {
if (shape.ndim() == 2) {
SoftmaxGrad<OP1, OP2, Req, negate, AType>(ctx.get_stream<xpu>(),
inputs[out_idx].dptr<OType>(),
inputs[0].dptr<OType>(),
outputs[0].dptr<DType>(),
length_ptr,
shape.get<2>(),
axis,
static_cast<DType>(temperature));
} else {
SoftmaxGrad<OP1, OP2, Req, negate, AType>(ctx.get_stream<xpu>(),
inputs[out_idx].dptr<OType>(),
inputs[0].dptr<OType>(),
outputs[0].dptr<DType>(),
length_ptr,
shape.get<3>(),
axis,
static_cast<DType>(temperature));
}
} else {
if (shape.ndim() == 2) {
SoftmaxGrad<OP1, OP2, Req, negate, DType>(ctx.get_stream<xpu>(),
inputs[out_idx].dptr<OType>(),
inputs[0].dptr<OType>(),
outputs[0].dptr<DType>(),
length_ptr,
shape.get<2>(),
axis,
static_cast<DType>(temperature));
} else {
SoftmaxGrad<OP1, OP2, Req, negate, DType>(ctx.get_stream<xpu>(),
inputs[out_idx].dptr<OType>(),
inputs[0].dptr<OType>(),
outputs[0].dptr<DType>(),
length_ptr,
shape.get<3>(),
axis,
static_cast<DType>(temperature));
}
}
});
});
});
});
}
template <typename xpu, typename OP1, typename OP2, bool negate = false>
void MaskedSoftmaxGradCompute(const nnvm::NodeAttrs& attrs,
const OpContext& ctx,
const std::vector<TBlob>& inputs,
const std::vector<OpReqType>& req,
const std::vector<TBlob>& outputs) {
using namespace mxnet_op;
if (req[0] == kNullOp)
return;
const MaskedSoftmaxParam& param = nnvm::get<MaskedSoftmaxParam>(attrs.parsed);
int axis = CheckAxis(param.axis, inputs[0].ndim());
const double temperature = param.temperature.has_value() ? param.temperature.value() : 1.0;
bool safe_acc = dmlc::GetEnv("MXNET_SAFE_ACCUMULATION", true);
MXNET_REAL_ACC_TYPE_SWITCH(inputs[0].type_flag_, DType, AType, {
MXNET_ASSIGN_REQ_SWITCH(req[0], Req, {
MXNET_NDIM_SWITCH(inputs[0].ndim(), ndim, {
DType* ograd_ptr = inputs[0].dptr<DType>();
DType* out_ptr = inputs[2].dptr<DType>();
bool* mask_ptr = inputs[1].dptr<bool>();
DType* grad_data = outputs[0].dptr<DType>();
if (safe_acc) {
MaskedSoftmaxGrad<OP1, OP2, Req, negate, AType>(ctx.get_stream<xpu>(),
out_ptr,
ograd_ptr,
grad_data,
mask_ptr,
inputs[0].shape_.get<ndim>(),
inputs[1].shape_.get<ndim>(),
axis,
static_cast<DType>(temperature),
ctx);
} else {
MaskedSoftmaxGrad<OP1, OP2, Req, negate, DType>(ctx.get_stream<xpu>(),
out_ptr,
ograd_ptr,
grad_data,
mask_ptr,
inputs[0].shape_.get<ndim>(),
inputs[1].shape_.get<ndim>(),
axis,
static_cast<DType>(temperature),
ctx);
}
});
});
});
}
} // namespace op
} // namespace mxnet
namespace std {
template <>
struct hash<mxnet::op::SoftmaxParam> {
size_t operator()(const mxnet::op::SoftmaxParam& val) {
size_t ret = 0;
ret = dmlc::HashCombine(ret, val.axis);
ret = dmlc::HashCombine(ret, val.temperature);
ret = dmlc::HashCombine(ret, val.dtype);
ret = dmlc::HashCombine(ret, val.use_length);
return ret;
}
};
} // namespace std
#endif // MXNET_OPERATOR_NN_SOFTMAX_INL_H_
|
getrf_cmpt.c | #include <stdio.h>
#include <stdlib.h>
#include <math.h>
#include <omp.h>
#include "mkl.h"
#include "sys/time.h"
#define idx2(i, j, ldi) ((j * ldi) + i)
#define min(X,Y) (((X) < (Y)) ? (X) : (Y))
#define max(X,Y) (((X) > (Y)) ? (X) : (Y))
#define GETRF dgetrf
#define GETRF_COMPUTE_BATCH LAPACKE_dgetrf_compute_batch
#define fabst fabs
#define FPTYPE double
#define VECTOR_LENGTH 8
#define REPEAT 1000
#define MEM_ALIGNMENT 64
#define GRP_COUNT 1
int main(int argc, char *argv[])
{
int grp, i, j, ii, jj, nthr;
int g, i_matrix, num_matrices;
#pragma omp parallel
{
nthr = omp_get_num_threads();
}
unsigned long op_count = 0;
double startt, stopt, mint, mints;
mint = 1e5;
mints = 1e5;
int group_count = GRP_COUNT;
int pack_length = VECTOR_LENGTH;
int m_init, grp_init;
if (argc > 1) m_init = atoi(argv[1]);
else m_init = 5;
if (argc > 2) grp_init = atoi(argv[2]);
else grp_init = 512;
MKL_INT m[GRP_COUNT] = {m_init};
MKL_INT n[GRP_COUNT] = {m_init};
MKL_INT lda[GRP_COUNT] = {m_init};
MKL_INT size_per_grp[GRP_COUNT] = {grp_init};
int num_pointers = 0;
for (i = 0; i < GRP_COUNT; i++) num_pointers += size_per_grp[i];
int a_total = 0;
for (i = 0; i < GRP_COUNT; i++) a_total += m[i] * n[i] * size_per_grp[i];
FPTYPE *a, *d;
a = (FPTYPE *)_mm_malloc( a_total * sizeof(FPTYPE), MEM_ALIGNMENT );
d = (FPTYPE *)_mm_malloc( a_total * sizeof(FPTYPE), MEM_ALIGNMENT );
MKL_INT *ipiv, *info;
ipiv = (MKL_INT *)_mm_malloc( a_total * sizeof(MKL_INT), MEM_ALIGNMENT );
info = (MKL_INT *)_mm_malloc( num_pointers * sizeof(MKL_INT), MEM_ALIGNMENT );
for (i = 0; i < a_total; i++) a[i] = rand() / (FPTYPE) RAND_MAX + .5;
for (i = 0; i < grp_init; i++)
for (j = 0; j < m_init * m_init; j+= m_init+1)
a[i*(m_init*m_init) + j] += 10.0;
for (i = 0; i < a_total; i++) d[i] = a[i];
for (i = 0; i < a_total; i++) ipiv[i] = 0;
FPTYPE *a_array[num_pointers], *d_array[num_pointers];
MKL_INT *ipiv_array[num_pointers];
int a_idx = 0;
int p_num = 0;
for (i = 0; i < GRP_COUNT; i++) {
double emn = (double) min(m[i], n[i]);
for (j = 0; j < size_per_grp[i]; j++) {
a_array[p_num] = &a[ a_idx ];
d_array[p_num] = &d[ a_idx ];
ipiv_array[p_num] = &ipiv[ a_idx ];
p_num++;
a_idx += m[i] * n[i];
op_count += (2.0 * m[i] * m[i] * m[i]) / 3.0;
}
}
// setup packed arrays
int grp_idx = 0;
int A_p_idx = 0;
int i_grp, i_format, format_tail;
FPTYPE *a_packed;
a_packed = (FPTYPE *)_mm_malloc( a_total * sizeof(FPTYPE), MEM_ALIGNMENT );
// setup compact_t structs
compact_t A_p;
A_p.layout = CblasColMajor;
A_p.rows = m;
A_p.cols = n;
A_p.stride = lda;
A_p.group_count = GRP_COUNT;
A_p.size_per_group = size_per_grp;
A_p.format = VECTOR_LENGTH;
A_p.mat = a_packed;
for (i_grp=0; i_grp<GRP_COUNT; i_grp++) {
for (i_format=0; i_format<(size_per_grp[i_grp]/VECTOR_LENGTH)*VECTOR_LENGTH; i_format+=VECTOR_LENGTH) {
for (j=0; j<A_p.cols[i_grp]; j++) {
for (i=0; i<A_p.rows[i_grp]; i++) {
for (ii=0; ii<VECTOR_LENGTH; ii++) {
a_packed[ii + A_p_idx] = a_array[ii + grp_idx + i_format][A_p.rows[i_grp]*j + i];
}
A_p_idx+=VECTOR_LENGTH;
}
}
}
// tail handling
format_tail = size_per_grp[i_grp] - i_format;
i_format = (size_per_grp[i_grp]/VECTOR_LENGTH)*VECTOR_LENGTH;
if (format_tail > 0) {
for (j=0; j<A_p.cols[i_grp]; j++) {
for (i=0; i<A_p.rows[i_grp]; i++) {
for (ii=0; ii<format_tail; ii++) {
a_packed[ii + A_p_idx] = a_array[ii + grp_idx + i_format][A_p.rows[i_grp]*j + i];
}
A_p_idx+=format_tail;
}
}
}
grp_idx += size_per_grp[i_grp];
}
printf("\n THREADS --- m = k = n --- GETRF_BATCH --- GETRF_BATCH_COMPUTE\n");
for (i = 0; i < REPEAT; i++) {
num_matrices = 0;
startt = omp_get_wtime();
for (g=0; g < GRP_COUNT; g++) {
#pragma omp parallel for shared(m, n, d_array, lda, ipiv_array, info, g) private(i_matrix) schedule(static,1)
for (i_matrix=num_matrices; i_matrix<size_per_grp[g] + num_matrices; i_matrix++) {
GETRF( &m[g], &n[g], d_array[i_matrix], &lda[g], ipiv_array[i_matrix], &info[i_matrix] );
}
num_matrices += size_per_grp[g];
}
stopt = omp_get_wtime();
mint = min(mint, (stopt - startt));
}
int one=1;
for (i = 0; i < REPEAT; i++) {
startt = omp_get_wtime();
GETRF_COMPUTE_BATCH( &A_p );
stopt = omp_get_wtime();
mints = min(mints, (stopt - startt));
}
printf(" %d --- %d --- %5.2f --- %5.2f\n", nthr, m_init, (double)op_count/(mint*1e9), (double)op_count/(mints*1e9));
_mm_free(a_packed);
_mm_free(a);
_mm_free(d);
_mm_free(ipiv);
_mm_free(info);
return 0;
}
|
GB_unaryop__minv_fp32_int64.c | //------------------------------------------------------------------------------
// GB_unaryop: hard-coded functions for each built-in unary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2019, All Rights Reserved.
// http://suitesparse.com See GraphBLAS/Doc/License.txt for license.
//------------------------------------------------------------------------------
// If this file is in the Generated/ folder, do not edit it (auto-generated).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_control.h"
#include "GB_iterator.h"
#include "GB_unaryop__include.h"
// C=unop(A) is defined by the following types and operators:
// op(A) function: GB_unop__minv_fp32_int64
// op(A') function: GB_tran__minv_fp32_int64
// C type: float
// A type: int64_t
// cast: float cij = (float) aij
// unaryop: cij = (1.0F)/aij
#define GB_ATYPE \
int64_t
#define GB_CTYPE \
float
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
int64_t aij = Ax [pA]
#define GB_CX(p) Cx [p]
// unary operator
#define GB_OP(z, x) \
z = (1.0F)/x ;
// casting
#define GB_CASTING(z, x) \
float z = (float) x ;
// cij = op (cast (aij))
#define GB_CAST_OP(pC,pA) \
{ \
/* aij = Ax [pA] */ \
GB_GETA (aij, Ax, pA) ; \
/* Cx [pC] = op (cast (aij)) */ \
GB_CASTING (x, aij) ; \
GB_OP (GB_CX (pC), x) ; \
}
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_MINV || GxB_NO_FP32 || GxB_NO_INT64)
//------------------------------------------------------------------------------
// Cx = op (cast (Ax)): apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_unop__minv_fp32_int64
(
float *restrict Cx,
const int64_t *restrict Ax,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (int64_t p = 0 ; p < anz ; p++)
{
GB_CAST_OP (p, p) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (cast (A')): transpose, typecast, and apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_tran__minv_fp32_int64
(
GrB_Matrix C,
const GrB_Matrix A,
int64_t *restrict *Rowcounts,
GBI_single_iterator Iter,
const int64_t *restrict A_slice,
int naslice
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#define GB_PHASE_2_OF_2
#include "GB_unaryop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
omp-maxof-elements-lock.c | /***********************************************************************************
Example 3.2 : omp-maxof-elements-lock.c
Objective : Write an OpenMP program to print Largest of an element
in an array
This example demonstrates the use of
omp_init_lock(),omp_set_lock(),omp_unset_lock(),
omp_destroy_lock() which are known as LOCK functions.
Input : Number of threads
Number of elements of the array Input
Output : Each thread checks with its available iterations and finally
Master thread prints the maximum value in the array,Time taken
to find the Max Element and also the threads .
Created :Aug 2011.
Author : RarchK
****************************************************************************/
#include <stdio.h>
#include <omp.h>
#include <stdlib.h>
#include <sys/time.h>
#define MINUS_INFINITY -9999
#define MAXIMUM_VALUE 65535
/* Main Program */
main(int argc,char **argv)
{
int *array, i, Noofelements, cur_max, current_value,Noofthreads;
struct timeval TimeValue_Start;
struct timezone TimeZone_Start;
struct timeval TimeValue_Final;
struct timezone TimeZone_Final;
long time_start, time_end;
double time_overhead;
omp_lock_t MAXLOCK;
printf("\n\t\t---------------------------------------------------------------------------");
printf("\n\t\t Email : RarchK");
printf("\n\t\t---------------------------------------------------------------------------");
printf("\n\t\t Objective : Finding Maximum element of an Array ");
printf("\n\t\t This example demonstrates the use of OpenMP Lock routines such as : omp_init_lock()");
printf("\n\t\t omp_set_lock(),omp_unset_lock(), omp_destroy_lock(). ");
printf("\n\t\t..........................................................................\n");
/* Checking for command line arguments */
if( argc != 3 ){
printf("\t\t Very Few Arguments\n ");
printf("\t\t Syntax : exec <Threads> <No. of elements> \n");
exit(-1);
}
Noofthreads=atoi(argv[1]);
if ((Noofthreads!=1) && (Noofthreads!=2) && (Noofthreads!=4) && (Noofthreads!=8) && (Noofthreads!= 16) ) {
printf("\n Number of threads should be 1,2,4,8 or 16 for the execution of program. \n\n");
exit(-1);
}
Noofelements=atoi(argv[2]);
/* printf("\n\t\t Enter the number of elements\n");
scanf("%d", &Noofelements);*/
printf("\n\t\t Threads : %d",Noofthreads);
printf("\n\t\t Number of elements in Array : %d \n ",Noofelements);
if (Noofelements <= 0) {
printf("\n\t\t The array elements cannot be stored\n");
exit(1);
}
/* Dynamic Memory Allocation */
array = (int *) malloc(sizeof(int) * Noofelements);
/* Allocating Random Number To Array Elements */
srand(MAXIMUM_VALUE);
for (i = 0; i < Noofelements; i++)
array[i] = rand();
if (Noofelements == 1) {
printf("\n\t\t The Largest Element In The Array Is %d", array[0]);
exit(1);
}
gettimeofday(&TimeValue_Start, &TimeZone_Start);
/* set the no. of threads */
omp_set_num_threads(Noofthreads);
/* Initialize the lock variable */
omp_init_lock(&MAXLOCK);
cur_max = MINUS_INFINITY;
/* OpenMP Parallel For Directive And Lock Functions : Fork the team of threads */
#pragma omp parallel for
for (i = 0; i < Noofelements; i = i + 1) {
if (array[i] > cur_max) {
omp_set_lock(&MAXLOCK);
if (array[i] > cur_max)
cur_max = array[i];
omp_unset_lock(&MAXLOCK);
}
} /* End of the parallel section */
/* Destroying The Lock */
omp_destroy_lock(&MAXLOCK);
gettimeofday(&TimeValue_Final, &TimeZone_Final);
/* calculate the timing for the computation */
time_start = TimeValue_Start.tv_sec * 1000000 + TimeValue_Start.tv_usec;
time_end = TimeValue_Final.tv_sec * 1000000 + TimeValue_Final.tv_usec;
time_overhead = (time_end - time_start)/1000000.0;
/* Serial Calculation */
current_value = array[0];
for (i = 1; i < Noofelements; i++)
if (array[i] > current_value)
current_value = array[i];
/* Checking For Output Validity */
if (current_value == cur_max)
printf("\n\n\t\t The Max Value Is Same For Serial And Using Parallel OpenMP Directive\n");
else {
printf("\n\n\t\t The Max Value Is Not Same In Serial And Using Parallel OpenMP Directive\n");
exit(-1);
}
/* Freeing Allocated Memory */
free(array);
printf("\n\t\t The Largest Number Of The Array Is %d\n", cur_max);
printf("\n\t\t Time in Seconds (T) : %lf Seconds \n",time_overhead);
printf("\n\t\t..........................................................................\n");
}
|
forces.c | #include <omp.h>
/*
* Compute forces and accumulate the virial and the potential
*/
extern double epot, vir;
void
forces(int npart, double x[], double f[], double side, double rcoff, double **ftemp, int nthreads){
int i, myid;
#pragma omp single
{
vir = 0.0;
epot = 0.0;
}
myid = omp_get_thread_num();
#pragma omp for reduction(+:epot,vir) schedule(static,32)
for (i=0; i<npart*3; i+=3) {
// zero force components on particle i
double fxi = 0.0;
double fyi = 0.0;
double fzi = 0.0;
int j;
// loop over all particles with index > i
for (j=i+3; j<npart*3; j+=3) {
// compute distance between particles i and j allowing for wraparound
double xx = x[i]-x[j];
double yy = x[i+1]-x[j+1];
double zz = x[i+2]-x[j+2];
if (xx< (-0.5*side) ) xx += side;
if (xx> (0.5*side) ) xx -= side;
if (yy< (-0.5*side) ) yy += side;
if (yy> (0.5*side) ) yy -= side;
if (zz< (-0.5*side) ) zz += side;
if (zz> (0.5*side) ) zz -= side;
double rd = xx*xx+yy*yy+zz*zz;
// if distance is inside cutoff radius compute forces
// and contributions to pot. energy and virial
if (rd<=rcoff*rcoff) {
double rrd = 1.0/rd;
double rrd3 = rrd*rrd*rrd;
double rrd4 = rrd3*rrd;
double r148 = rrd4*(rrd3 - 0.5);
epot += rrd3*(rrd3-1.0);
vir -= rd*r148;
fxi += xx*r148;
fyi += yy*r148;
fzi += zz*r148;
ftemp[myid][j] -= xx*r148;
ftemp[myid][j+1] -= yy*r148;
ftemp[myid][j+2] -= zz*r148;
}
}
ftemp[myid][i] += fxi;
ftemp[myid][i+1] += fyi;
ftemp[myid][i+2] += fzi;
}
/* accumulate per-thread copies of forces into f
and zero ftemp for the next time round */
#pragma omp for
for (i = 0; i < npart*3; i++) {
int id;
for (id = 0; id < nthreads; id++){
f[i] += ftemp[id][i];
ftemp[id][i] = 0.0;
}
}
}
|
Stmt.h | //===--- Stmt.h - Classes for representing statements -----------*- C++ -*-===//
//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
//
// This file defines the Stmt interface and subclasses.
//
//===----------------------------------------------------------------------===//
#ifndef LLVM_CLANG_AST_STMT_H
#define LLVM_CLANG_AST_STMT_H
#include "clang/AST/DeclGroup.h"
#include "clang/AST/StmtIterator.h"
#include "clang/Basic/CapturedStmt.h"
#include "clang/Basic/IdentifierTable.h"
#include "clang/Basic/LLVM.h"
#include "clang/Basic/SourceLocation.h"
#include "llvm/ADT/ArrayRef.h"
#include "llvm/ADT/PointerIntPair.h"
#include "llvm/ADT/iterator.h"
#include "llvm/Support/Compiler.h"
#include "llvm/Support/ErrorHandling.h"
#include <string>
namespace llvm {
class FoldingSetNodeID;
}
namespace clang {
class ASTContext;
class Attr;
class CapturedDecl;
class Decl;
class Expr;
class IdentifierInfo;
class LabelDecl;
class ParmVarDecl;
class PrinterHelper;
struct PrintingPolicy;
class QualType;
class RecordDecl;
class SourceManager;
class StringLiteral;
class SwitchStmt;
class Token;
class VarDecl;
//===----------------------------------------------------------------------===//
// AST classes for statements.
//===----------------------------------------------------------------------===//
/// Stmt - This represents one statement.
///
class LLVM_ALIGNAS(LLVM_PTR_SIZE) Stmt {
public:
enum StmtClass {
NoStmtClass = 0,
#define STMT(CLASS, PARENT) CLASS##Class,
#define STMT_RANGE(BASE, FIRST, LAST) \
first##BASE##Constant=FIRST##Class, last##BASE##Constant=LAST##Class,
#define LAST_STMT_RANGE(BASE, FIRST, LAST) \
first##BASE##Constant=FIRST##Class, last##BASE##Constant=LAST##Class
#define ABSTRACT_STMT(STMT)
#include "clang/AST/StmtNodes.inc"
};
// Make vanilla 'new' and 'delete' illegal for Stmts.
protected:
void *operator new(size_t bytes) LLVM_NOEXCEPT {
llvm_unreachable("Stmts cannot be allocated with regular 'new'.");
}
void operator delete(void *data) LLVM_NOEXCEPT {
llvm_unreachable("Stmts cannot be released with regular 'delete'.");
}
class StmtBitfields {
friend class Stmt;
/// \brief The statement class.
unsigned sClass : 8;
};
enum { NumStmtBits = 8 };
class CompoundStmtBitfields {
friend class CompoundStmt;
unsigned : NumStmtBits;
unsigned NumStmts : 32 - NumStmtBits;
};
class ExprBitfields {
friend class Expr;
friend class DeclRefExpr; // computeDependence
friend class InitListExpr; // ctor
friend class DesignatedInitExpr; // ctor
friend class BlockDeclRefExpr; // ctor
friend class ASTStmtReader; // deserialization
friend class CXXNewExpr; // ctor
friend class DependentScopeDeclRefExpr; // ctor
friend class CXXConstructExpr; // ctor
friend class CallExpr; // ctor
friend class OffsetOfExpr; // ctor
friend class ObjCMessageExpr; // ctor
friend class ObjCArrayLiteral; // ctor
friend class ObjCDictionaryLiteral; // ctor
friend class ShuffleVectorExpr; // ctor
friend class ParenListExpr; // ctor
friend class CXXUnresolvedConstructExpr; // ctor
friend class CXXDependentScopeMemberExpr; // ctor
friend class OverloadExpr; // ctor
friend class PseudoObjectExpr; // ctor
friend class AtomicExpr; // ctor
friend class OpaqueValueExpr; // ctor
unsigned : NumStmtBits;
unsigned ValueKind : 2;
unsigned ObjectKind : 2;
unsigned TypeDependent : 1;
unsigned ValueDependent : 1;
unsigned InstantiationDependent : 1;
unsigned ContainsUnexpandedParameterPack : 1;
};
enum { NumExprBits = 16 };
class CharacterLiteralBitfields {
friend class CharacterLiteral;
unsigned : NumExprBits;
unsigned Kind : 3;
};
enum APFloatSemantics {
IEEEhalf,
IEEEsingle,
IEEEdouble,
x87DoubleExtended,
IEEEquad,
PPCDoubleDouble
};
class FloatingLiteralBitfields {
friend class FloatingLiteral;
unsigned : NumExprBits;
unsigned Semantics : 3; // Provides semantics for APFloat construction
unsigned IsExact : 1;
};
class UnaryExprOrTypeTraitExprBitfields {
friend class UnaryExprOrTypeTraitExpr;
unsigned : NumExprBits;
unsigned Kind : 2;
unsigned IsType : 1; // true if operand is a type, false if an expression.
};
class DeclRefExprBitfields {
friend class DeclRefExpr;
friend class ASTStmtReader; // deserialization
unsigned : NumExprBits;
unsigned HasQualifier : 1;
unsigned HasTemplateKWAndArgsInfo : 1;
unsigned HasFoundDecl : 1;
unsigned HadMultipleCandidates : 1;
unsigned RefersToEnclosingVariableOrCapture : 1;
};
class CastExprBitfields {
friend class CastExpr;
unsigned : NumExprBits;
unsigned Kind : 6;
unsigned BasePathSize : 32 - 6 - NumExprBits;
};
class CallExprBitfields {
friend class CallExpr;
unsigned : NumExprBits;
unsigned NumPreArgs : 1;
};
class ExprWithCleanupsBitfields {
friend class ExprWithCleanups;
friend class ASTStmtReader; // deserialization
unsigned : NumExprBits;
unsigned NumObjects : 32 - NumExprBits;
};
class PseudoObjectExprBitfields {
friend class PseudoObjectExpr;
friend class ASTStmtReader; // deserialization
unsigned : NumExprBits;
// These don't need to be particularly wide, because they're
// strictly limited by the forms of expressions we permit.
unsigned NumSubExprs : 8;
unsigned ResultIndex : 32 - 8 - NumExprBits;
};
class ObjCIndirectCopyRestoreExprBitfields {
friend class ObjCIndirectCopyRestoreExpr;
unsigned : NumExprBits;
unsigned ShouldCopy : 1;
};
class InitListExprBitfields {
friend class InitListExpr;
unsigned : NumExprBits;
/// Whether this initializer list originally had a GNU array-range
/// designator in it. This is a temporary marker used by CodeGen.
unsigned HadArrayRangeDesignator : 1;
};
class TypeTraitExprBitfields {
friend class TypeTraitExpr;
friend class ASTStmtReader;
friend class ASTStmtWriter;
unsigned : NumExprBits;
/// \brief The kind of type trait, which is a value of a TypeTrait enumerator.
unsigned Kind : 8;
/// \brief If this expression is not value-dependent, this indicates whether
/// the trait evaluated true or false.
unsigned Value : 1;
/// \brief The number of arguments to this type trait.
unsigned NumArgs : 32 - 8 - 1 - NumExprBits;
};
union {
StmtBitfields StmtBits;
CompoundStmtBitfields CompoundStmtBits;
ExprBitfields ExprBits;
CharacterLiteralBitfields CharacterLiteralBits;
FloatingLiteralBitfields FloatingLiteralBits;
UnaryExprOrTypeTraitExprBitfields UnaryExprOrTypeTraitExprBits;
DeclRefExprBitfields DeclRefExprBits;
CastExprBitfields CastExprBits;
CallExprBitfields CallExprBits;
ExprWithCleanupsBitfields ExprWithCleanupsBits;
PseudoObjectExprBitfields PseudoObjectExprBits;
ObjCIndirectCopyRestoreExprBitfields ObjCIndirectCopyRestoreExprBits;
InitListExprBitfields InitListExprBits;
TypeTraitExprBitfields TypeTraitExprBits;
};
friend class ASTStmtReader;
friend class ASTStmtWriter;
public:
// Only allow allocation of Stmts using the allocator in ASTContext
// or by doing a placement new.
void* operator new(size_t bytes, const ASTContext& C,
unsigned alignment = 8);
void* operator new(size_t bytes, const ASTContext* C,
unsigned alignment = 8) {
return operator new(bytes, *C, alignment);
}
void *operator new(size_t bytes, void *mem) LLVM_NOEXCEPT { return mem; }
void operator delete(void *, const ASTContext &, unsigned) LLVM_NOEXCEPT {}
void operator delete(void *, const ASTContext *, unsigned) LLVM_NOEXCEPT {}
void operator delete(void *, size_t) LLVM_NOEXCEPT {}
void operator delete(void *, void *) LLVM_NOEXCEPT {}
public:
/// \brief A placeholder type used to construct an empty shell of a
/// type, that will be filled in later (e.g., by some
/// de-serialization).
struct EmptyShell { };
protected:
/// Iterator for iterating over Stmt * arrays that contain only Expr *
///
/// This is needed because AST nodes use Stmt* arrays to store
/// references to children (to be compatible with StmtIterator).
struct ExprIterator
: llvm::iterator_adaptor_base<ExprIterator, Stmt **,
std::random_access_iterator_tag, Expr *> {
ExprIterator() : iterator_adaptor_base(nullptr) {}
ExprIterator(Stmt **I) : iterator_adaptor_base(I) {}
reference operator*() const {
assert((*I)->getStmtClass() >= firstExprConstant &&
(*I)->getStmtClass() <= lastExprConstant);
return *reinterpret_cast<Expr **>(I);
}
};
/// Const iterator for iterating over Stmt * arrays that contain only Expr *
struct ConstExprIterator
: llvm::iterator_adaptor_base<ConstExprIterator, const Stmt *const *,
std::random_access_iterator_tag,
const Expr *const> {
ConstExprIterator() : iterator_adaptor_base(nullptr) {}
ConstExprIterator(const Stmt *const *I) : iterator_adaptor_base(I) {}
reference operator*() const {
assert((*I)->getStmtClass() >= firstExprConstant &&
(*I)->getStmtClass() <= lastExprConstant);
return *reinterpret_cast<const Expr *const *>(I);
}
};
private:
/// \brief Whether statistic collection is enabled.
static bool StatisticsEnabled;
protected:
/// \brief Construct an empty statement.
explicit Stmt(StmtClass SC, EmptyShell) : Stmt(SC) {}
public:
Stmt(StmtClass SC) {
static_assert(sizeof(*this) % llvm::AlignOf<void *>::Alignment == 0,
"Insufficient alignment!");
StmtBits.sClass = SC;
if (StatisticsEnabled) Stmt::addStmtClass(SC);
}
StmtClass getStmtClass() const {
return static_cast<StmtClass>(StmtBits.sClass);
}
const char *getStmtClassName() const;
/// SourceLocation tokens are not useful in isolation - they are low level
/// value objects created/interpreted by SourceManager. We assume AST
/// clients will have a pointer to the respective SourceManager.
SourceRange getSourceRange() const LLVM_READONLY;
SourceLocation getLocStart() const LLVM_READONLY;
SourceLocation getLocEnd() const LLVM_READONLY;
// global temp stats (until we have a per-module visitor)
static void addStmtClass(const StmtClass s);
static void EnableStatistics();
static void PrintStats();
/// \brief Dumps the specified AST fragment and all subtrees to
/// \c llvm::errs().
void dump() const;
void dump(SourceManager &SM) const;
void dump(raw_ostream &OS, SourceManager &SM) const;
void dump(raw_ostream &OS) const;
/// dumpColor - same as dump(), but forces color highlighting.
void dumpColor() const;
/// dumpPretty/printPretty - These two methods do a "pretty print" of the AST
/// back to its original source language syntax.
void dumpPretty(const ASTContext &Context) const;
void printPretty(raw_ostream &OS, PrinterHelper *Helper,
const PrintingPolicy &Policy,
unsigned Indentation = 0) const;
/// viewAST - Visualize an AST rooted at this Stmt* using GraphViz. Only
/// works on systems with GraphViz (Mac OS X) or dot+gv installed.
void viewAST() const;
/// Skip past any implicit AST nodes which might surround this
/// statement, such as ExprWithCleanups or ImplicitCastExpr nodes.
Stmt *IgnoreImplicit();
/// \brief Skip no-op (attributed, compound) container stmts and skip captured
/// stmt at the top, if \a IgnoreCaptured is true.
Stmt *IgnoreContainers(bool IgnoreCaptured = false);
const Stmt *stripLabelLikeStatements() const;
Stmt *stripLabelLikeStatements() {
return const_cast<Stmt*>(
const_cast<const Stmt*>(this)->stripLabelLikeStatements());
}
/// Child Iterators: All subclasses must implement 'children'
/// to permit easy iteration over the substatements/subexpessions of an
/// AST node. This permits easy iteration over all nodes in the AST.
typedef StmtIterator child_iterator;
typedef ConstStmtIterator const_child_iterator;
typedef llvm::iterator_range<child_iterator> child_range;
typedef llvm::iterator_range<const_child_iterator> const_child_range;
child_range children();
const_child_range children() const {
auto Children = const_cast<Stmt *>(this)->children();
return const_child_range(Children.begin(), Children.end());
}
child_iterator child_begin() { return children().begin(); }
child_iterator child_end() { return children().end(); }
const_child_iterator child_begin() const { return children().begin(); }
const_child_iterator child_end() const { return children().end(); }
/// \brief Produce a unique representation of the given statement.
///
/// \param ID once the profiling operation is complete, will contain
/// the unique representation of the given statement.
///
/// \param Context the AST context in which the statement resides
///
/// \param Canonical whether the profile should be based on the canonical
/// representation of this statement (e.g., where non-type template
/// parameters are identified by index/level rather than their
/// declaration pointers) or the exact representation of the statement as
/// written in the source.
void Profile(llvm::FoldingSetNodeID &ID, const ASTContext &Context,
bool Canonical) const;
};
/// DeclStmt - Adaptor class for mixing declarations with statements and
/// expressions. For example, CompoundStmt mixes statements, expressions
/// and declarations (variables, types). Another example is ForStmt, where
/// the first statement can be an expression or a declaration.
///
class DeclStmt : public Stmt {
DeclGroupRef DG;
SourceLocation StartLoc, EndLoc;
public:
DeclStmt(DeclGroupRef dg, SourceLocation startLoc,
SourceLocation endLoc) : Stmt(DeclStmtClass), DG(dg),
StartLoc(startLoc), EndLoc(endLoc) {}
/// \brief Build an empty declaration statement.
explicit DeclStmt(EmptyShell Empty) : Stmt(DeclStmtClass, Empty) { }
/// isSingleDecl - This method returns true if this DeclStmt refers
/// to a single Decl.
bool isSingleDecl() const {
return DG.isSingleDecl();
}
const Decl *getSingleDecl() const { return DG.getSingleDecl(); }
Decl *getSingleDecl() { return DG.getSingleDecl(); }
const DeclGroupRef getDeclGroup() const { return DG; }
DeclGroupRef getDeclGroup() { return DG; }
void setDeclGroup(DeclGroupRef DGR) { DG = DGR; }
SourceLocation getStartLoc() const { return StartLoc; }
void setStartLoc(SourceLocation L) { StartLoc = L; }
SourceLocation getEndLoc() const { return EndLoc; }
void setEndLoc(SourceLocation L) { EndLoc = L; }
SourceLocation getLocStart() const LLVM_READONLY { return StartLoc; }
SourceLocation getLocEnd() const LLVM_READONLY { return EndLoc; }
static bool classof(const Stmt *T) {
return T->getStmtClass() == DeclStmtClass;
}
// Iterators over subexpressions.
child_range children() {
return child_range(child_iterator(DG.begin(), DG.end()),
child_iterator(DG.end(), DG.end()));
}
typedef DeclGroupRef::iterator decl_iterator;
typedef DeclGroupRef::const_iterator const_decl_iterator;
typedef llvm::iterator_range<decl_iterator> decl_range;
typedef llvm::iterator_range<const_decl_iterator> decl_const_range;
decl_range decls() { return decl_range(decl_begin(), decl_end()); }
decl_const_range decls() const {
return decl_const_range(decl_begin(), decl_end());
}
decl_iterator decl_begin() { return DG.begin(); }
decl_iterator decl_end() { return DG.end(); }
const_decl_iterator decl_begin() const { return DG.begin(); }
const_decl_iterator decl_end() const { return DG.end(); }
typedef std::reverse_iterator<decl_iterator> reverse_decl_iterator;
reverse_decl_iterator decl_rbegin() {
return reverse_decl_iterator(decl_end());
}
reverse_decl_iterator decl_rend() {
return reverse_decl_iterator(decl_begin());
}
};
/// NullStmt - This is the null statement ";": C99 6.8.3p3.
///
class NullStmt : public Stmt {
SourceLocation SemiLoc;
/// \brief True if the null statement was preceded by an empty macro, e.g:
/// @code
/// #define CALL(x)
/// CALL(0);
/// @endcode
bool HasLeadingEmptyMacro;
public:
NullStmt(SourceLocation L, bool hasLeadingEmptyMacro = false)
: Stmt(NullStmtClass), SemiLoc(L),
HasLeadingEmptyMacro(hasLeadingEmptyMacro) {}
/// \brief Build an empty null statement.
explicit NullStmt(EmptyShell Empty) : Stmt(NullStmtClass, Empty),
HasLeadingEmptyMacro(false) { }
SourceLocation getSemiLoc() const { return SemiLoc; }
void setSemiLoc(SourceLocation L) { SemiLoc = L; }
bool hasLeadingEmptyMacro() const { return HasLeadingEmptyMacro; }
SourceLocation getLocStart() const LLVM_READONLY { return SemiLoc; }
SourceLocation getLocEnd() const LLVM_READONLY { return SemiLoc; }
static bool classof(const Stmt *T) {
return T->getStmtClass() == NullStmtClass;
}
child_range children() {
return child_range(child_iterator(), child_iterator());
}
friend class ASTStmtReader;
friend class ASTStmtWriter;
};
/// CompoundStmt - This represents a group of statements like { stmt stmt }.
///
class CompoundStmt : public Stmt {
Stmt** Body;
SourceLocation LBraceLoc, RBraceLoc;
friend class ASTStmtReader;
public:
CompoundStmt(const ASTContext &C, ArrayRef<Stmt*> Stmts,
SourceLocation LB, SourceLocation RB);
// \brief Build an empty compound statement with a location.
explicit CompoundStmt(SourceLocation Loc)
: Stmt(CompoundStmtClass), Body(nullptr), LBraceLoc(Loc), RBraceLoc(Loc) {
CompoundStmtBits.NumStmts = 0;
}
// \brief Build an empty compound statement.
explicit CompoundStmt(EmptyShell Empty)
: Stmt(CompoundStmtClass, Empty), Body(nullptr) {
CompoundStmtBits.NumStmts = 0;
}
void setStmts(const ASTContext &C, ArrayRef<Stmt *> Stmts);
bool body_empty() const { return CompoundStmtBits.NumStmts == 0; }
unsigned size() const { return CompoundStmtBits.NumStmts; }
typedef Stmt** body_iterator;
typedef llvm::iterator_range<body_iterator> body_range;
body_range body() { return body_range(body_begin(), body_end()); }
body_iterator body_begin() { return Body; }
body_iterator body_end() { return Body + size(); }
Stmt *body_front() { return !body_empty() ? Body[0] : nullptr; }
Stmt *body_back() { return !body_empty() ? Body[size()-1] : nullptr; }
void setLastStmt(Stmt *S) {
assert(!body_empty() && "setLastStmt");
Body[size()-1] = S;
}
typedef Stmt* const * const_body_iterator;
typedef llvm::iterator_range<const_body_iterator> body_const_range;
body_const_range body() const {
return body_const_range(body_begin(), body_end());
}
const_body_iterator body_begin() const { return Body; }
const_body_iterator body_end() const { return Body + size(); }
const Stmt *body_front() const {
return !body_empty() ? Body[0] : nullptr;
}
const Stmt *body_back() const {
return !body_empty() ? Body[size() - 1] : nullptr;
}
typedef std::reverse_iterator<body_iterator> reverse_body_iterator;
reverse_body_iterator body_rbegin() {
return reverse_body_iterator(body_end());
}
reverse_body_iterator body_rend() {
return reverse_body_iterator(body_begin());
}
typedef std::reverse_iterator<const_body_iterator>
const_reverse_body_iterator;
const_reverse_body_iterator body_rbegin() const {
return const_reverse_body_iterator(body_end());
}
const_reverse_body_iterator body_rend() const {
return const_reverse_body_iterator(body_begin());
}
SourceLocation getLocStart() const LLVM_READONLY { return LBraceLoc; }
SourceLocation getLocEnd() const LLVM_READONLY { return RBraceLoc; }
SourceLocation getLBracLoc() const { return LBraceLoc; }
SourceLocation getRBracLoc() const { return RBraceLoc; }
static bool classof(const Stmt *T) {
return T->getStmtClass() == CompoundStmtClass;
}
// Iterators
child_range children() {
return child_range(Body, Body + CompoundStmtBits.NumStmts);
}
const_child_range children() const {
return const_child_range(child_iterator(Body),
child_iterator(Body + CompoundStmtBits.NumStmts));
}
};
// SwitchCase is the base class for CaseStmt and DefaultStmt,
class SwitchCase : public Stmt {
protected:
// A pointer to the following CaseStmt or DefaultStmt class,
// used by SwitchStmt.
SwitchCase *NextSwitchCase;
SourceLocation KeywordLoc;
SourceLocation ColonLoc;
SwitchCase(StmtClass SC, SourceLocation KWLoc, SourceLocation ColonLoc)
: Stmt(SC), NextSwitchCase(nullptr), KeywordLoc(KWLoc), ColonLoc(ColonLoc) {
}
SwitchCase(StmtClass SC, EmptyShell)
: Stmt(SC), NextSwitchCase(nullptr) {}
public:
const SwitchCase *getNextSwitchCase() const { return NextSwitchCase; }
SwitchCase *getNextSwitchCase() { return NextSwitchCase; }
void setNextSwitchCase(SwitchCase *SC) { NextSwitchCase = SC; }
SourceLocation getKeywordLoc() const { return KeywordLoc; }
void setKeywordLoc(SourceLocation L) { KeywordLoc = L; }
SourceLocation getColonLoc() const { return ColonLoc; }
void setColonLoc(SourceLocation L) { ColonLoc = L; }
Stmt *getSubStmt();
const Stmt *getSubStmt() const {
return const_cast<SwitchCase*>(this)->getSubStmt();
}
SourceLocation getLocStart() const LLVM_READONLY { return KeywordLoc; }
SourceLocation getLocEnd() const LLVM_READONLY;
static bool classof(const Stmt *T) {
return T->getStmtClass() == CaseStmtClass ||
T->getStmtClass() == DefaultStmtClass;
}
};
class CaseStmt : public SwitchCase {
SourceLocation EllipsisLoc;
enum { LHS, RHS, SUBSTMT, END_EXPR };
Stmt* SubExprs[END_EXPR]; // The expression for the RHS is Non-null for
// GNU "case 1 ... 4" extension
public:
CaseStmt(Expr *lhs, Expr *rhs, SourceLocation caseLoc,
SourceLocation ellipsisLoc, SourceLocation colonLoc)
: SwitchCase(CaseStmtClass, caseLoc, colonLoc) {
SubExprs[SUBSTMT] = nullptr;
SubExprs[LHS] = reinterpret_cast<Stmt*>(lhs);
SubExprs[RHS] = reinterpret_cast<Stmt*>(rhs);
EllipsisLoc = ellipsisLoc;
}
/// \brief Build an empty switch case statement.
explicit CaseStmt(EmptyShell Empty) : SwitchCase(CaseStmtClass, Empty) { }
SourceLocation getCaseLoc() const { return KeywordLoc; }
void setCaseLoc(SourceLocation L) { KeywordLoc = L; }
SourceLocation getEllipsisLoc() const { return EllipsisLoc; }
void setEllipsisLoc(SourceLocation L) { EllipsisLoc = L; }
SourceLocation getColonLoc() const { return ColonLoc; }
void setColonLoc(SourceLocation L) { ColonLoc = L; }
Expr *getLHS() { return reinterpret_cast<Expr*>(SubExprs[LHS]); }
Expr *getRHS() { return reinterpret_cast<Expr*>(SubExprs[RHS]); }
Stmt *getSubStmt() { return SubExprs[SUBSTMT]; }
const Expr *getLHS() const {
return reinterpret_cast<const Expr*>(SubExprs[LHS]);
}
const Expr *getRHS() const {
return reinterpret_cast<const Expr*>(SubExprs[RHS]);
}
const Stmt *getSubStmt() const { return SubExprs[SUBSTMT]; }
void setSubStmt(Stmt *S) { SubExprs[SUBSTMT] = S; }
void setLHS(Expr *Val) { SubExprs[LHS] = reinterpret_cast<Stmt*>(Val); }
void setRHS(Expr *Val) { SubExprs[RHS] = reinterpret_cast<Stmt*>(Val); }
SourceLocation getLocStart() const LLVM_READONLY { return KeywordLoc; }
SourceLocation getLocEnd() const LLVM_READONLY {
// Handle deeply nested case statements with iteration instead of recursion.
const CaseStmt *CS = this;
while (const CaseStmt *CS2 = dyn_cast<CaseStmt>(CS->getSubStmt()))
CS = CS2;
return CS->getSubStmt()->getLocEnd();
}
static bool classof(const Stmt *T) {
return T->getStmtClass() == CaseStmtClass;
}
// Iterators
child_range children() {
return child_range(&SubExprs[0], &SubExprs[END_EXPR]);
}
};
class DefaultStmt : public SwitchCase {
Stmt* SubStmt;
public:
DefaultStmt(SourceLocation DL, SourceLocation CL, Stmt *substmt) :
SwitchCase(DefaultStmtClass, DL, CL), SubStmt(substmt) {}
/// \brief Build an empty default statement.
explicit DefaultStmt(EmptyShell Empty)
: SwitchCase(DefaultStmtClass, Empty) { }
Stmt *getSubStmt() { return SubStmt; }
const Stmt *getSubStmt() const { return SubStmt; }
void setSubStmt(Stmt *S) { SubStmt = S; }
SourceLocation getDefaultLoc() const { return KeywordLoc; }
void setDefaultLoc(SourceLocation L) { KeywordLoc = L; }
SourceLocation getColonLoc() const { return ColonLoc; }
void setColonLoc(SourceLocation L) { ColonLoc = L; }
SourceLocation getLocStart() const LLVM_READONLY { return KeywordLoc; }
SourceLocation getLocEnd() const LLVM_READONLY { return SubStmt->getLocEnd();}
static bool classof(const Stmt *T) {
return T->getStmtClass() == DefaultStmtClass;
}
// Iterators
child_range children() { return child_range(&SubStmt, &SubStmt+1); }
};
inline SourceLocation SwitchCase::getLocEnd() const {
if (const CaseStmt *CS = dyn_cast<CaseStmt>(this))
return CS->getLocEnd();
return cast<DefaultStmt>(this)->getLocEnd();
}
/// LabelStmt - Represents a label, which has a substatement. For example:
/// foo: return;
///
class LabelStmt : public Stmt {
SourceLocation IdentLoc;
LabelDecl *TheDecl;
Stmt *SubStmt;
public:
LabelStmt(SourceLocation IL, LabelDecl *D, Stmt *substmt)
: Stmt(LabelStmtClass), IdentLoc(IL), TheDecl(D), SubStmt(substmt) {
static_assert(sizeof(LabelStmt) ==
2 * sizeof(SourceLocation) + 2 * sizeof(void *),
"LabelStmt too big");
}
// \brief Build an empty label statement.
explicit LabelStmt(EmptyShell Empty) : Stmt(LabelStmtClass, Empty) { }
SourceLocation getIdentLoc() const { return IdentLoc; }
LabelDecl *getDecl() const { return TheDecl; }
void setDecl(LabelDecl *D) { TheDecl = D; }
const char *getName() const;
Stmt *getSubStmt() { return SubStmt; }
const Stmt *getSubStmt() const { return SubStmt; }
void setIdentLoc(SourceLocation L) { IdentLoc = L; }
void setSubStmt(Stmt *SS) { SubStmt = SS; }
SourceLocation getLocStart() const LLVM_READONLY { return IdentLoc; }
SourceLocation getLocEnd() const LLVM_READONLY { return SubStmt->getLocEnd();}
child_range children() { return child_range(&SubStmt, &SubStmt+1); }
static bool classof(const Stmt *T) {
return T->getStmtClass() == LabelStmtClass;
}
};
/// \brief Represents an attribute applied to a statement.
///
/// Represents an attribute applied to a statement. For example:
/// [[omp::for(...)]] for (...) { ... }
///
class AttributedStmt : public Stmt {
Stmt *SubStmt;
SourceLocation AttrLoc;
unsigned NumAttrs;
friend class ASTStmtReader;
AttributedStmt(SourceLocation Loc, ArrayRef<const Attr*> Attrs, Stmt *SubStmt)
: Stmt(AttributedStmtClass), SubStmt(SubStmt), AttrLoc(Loc),
NumAttrs(Attrs.size()) {
std::copy(Attrs.begin(), Attrs.end(), getAttrArrayPtr());
}
explicit AttributedStmt(EmptyShell Empty, unsigned NumAttrs)
: Stmt(AttributedStmtClass, Empty), NumAttrs(NumAttrs) {
std::fill_n(getAttrArrayPtr(), NumAttrs, nullptr);
}
const Attr *const *getAttrArrayPtr() const {
return reinterpret_cast<const Attr *const *>(this + 1);
}
const Attr **getAttrArrayPtr() {
return reinterpret_cast<const Attr **>(this + 1);
}
public:
static AttributedStmt *Create(const ASTContext &C, SourceLocation Loc,
ArrayRef<const Attr*> Attrs, Stmt *SubStmt);
// \brief Build an empty attributed statement.
static AttributedStmt *CreateEmpty(const ASTContext &C, unsigned NumAttrs);
SourceLocation getAttrLoc() const { return AttrLoc; }
ArrayRef<const Attr*> getAttrs() const {
return llvm::makeArrayRef(getAttrArrayPtr(), NumAttrs);
}
Stmt *getSubStmt() { return SubStmt; }
const Stmt *getSubStmt() const { return SubStmt; }
SourceLocation getLocStart() const LLVM_READONLY { return AttrLoc; }
SourceLocation getLocEnd() const LLVM_READONLY { return SubStmt->getLocEnd();}
child_range children() { return child_range(&SubStmt, &SubStmt + 1); }
static bool classof(const Stmt *T) {
return T->getStmtClass() == AttributedStmtClass;
}
};
/// IfStmt - This represents an if/then/else.
///
class IfStmt : public Stmt {
enum { VAR, COND, THEN, ELSE, END_EXPR };
Stmt* SubExprs[END_EXPR];
SourceLocation IfLoc;
SourceLocation ElseLoc;
public:
IfStmt(const ASTContext &C, SourceLocation IL, VarDecl *var, Expr *cond,
Stmt *then, SourceLocation EL = SourceLocation(),
Stmt *elsev = nullptr);
/// \brief Build an empty if/then/else statement
explicit IfStmt(EmptyShell Empty) : Stmt(IfStmtClass, Empty) { }
/// \brief Retrieve the variable declared in this "if" statement, if any.
///
/// In the following example, "x" is the condition variable.
/// \code
/// if (int x = foo()) {
/// printf("x is %d", x);
/// }
/// \endcode
VarDecl *getConditionVariable() const;
void setConditionVariable(const ASTContext &C, VarDecl *V);
/// If this IfStmt has a condition variable, return the faux DeclStmt
/// associated with the creation of that condition variable.
const DeclStmt *getConditionVariableDeclStmt() const {
return reinterpret_cast<DeclStmt*>(SubExprs[VAR]);
}
const Expr *getCond() const { return reinterpret_cast<Expr*>(SubExprs[COND]);}
void setCond(Expr *E) { SubExprs[COND] = reinterpret_cast<Stmt *>(E); }
const Stmt *getThen() const { return SubExprs[THEN]; }
void setThen(Stmt *S) { SubExprs[THEN] = S; }
const Stmt *getElse() const { return SubExprs[ELSE]; }
void setElse(Stmt *S) { SubExprs[ELSE] = S; }
Expr *getCond() { return reinterpret_cast<Expr*>(SubExprs[COND]); }
Stmt *getThen() { return SubExprs[THEN]; }
Stmt *getElse() { return SubExprs[ELSE]; }
SourceLocation getIfLoc() const { return IfLoc; }
void setIfLoc(SourceLocation L) { IfLoc = L; }
SourceLocation getElseLoc() const { return ElseLoc; }
void setElseLoc(SourceLocation L) { ElseLoc = L; }
SourceLocation getLocStart() const LLVM_READONLY { return IfLoc; }
SourceLocation getLocEnd() const LLVM_READONLY {
if (SubExprs[ELSE])
return SubExprs[ELSE]->getLocEnd();
else
return SubExprs[THEN]->getLocEnd();
}
// Iterators over subexpressions. The iterators will include iterating
// over the initialization expression referenced by the condition variable.
child_range children() {
return child_range(&SubExprs[0], &SubExprs[0]+END_EXPR);
}
static bool classof(const Stmt *T) {
return T->getStmtClass() == IfStmtClass;
}
};
/// SwitchStmt - This represents a 'switch' stmt.
///
class SwitchStmt : public Stmt {
SourceLocation SwitchLoc;
enum { VAR, COND, BODY, END_EXPR };
Stmt* SubExprs[END_EXPR];
// This points to a linked list of case and default statements and, if the
// SwitchStmt is a switch on an enum value, records whether all the enum
// values were covered by CaseStmts. The coverage information value is meant
// to be a hint for possible clients.
llvm::PointerIntPair<SwitchCase *, 1, bool> FirstCase;
public:
SwitchStmt(const ASTContext &C, VarDecl *Var, Expr *cond);
/// \brief Build a empty switch statement.
explicit SwitchStmt(EmptyShell Empty) : Stmt(SwitchStmtClass, Empty) { }
/// \brief Retrieve the variable declared in this "switch" statement, if any.
///
/// In the following example, "x" is the condition variable.
/// \code
/// switch (int x = foo()) {
/// case 0: break;
/// // ...
/// }
/// \endcode
VarDecl *getConditionVariable() const;
void setConditionVariable(const ASTContext &C, VarDecl *V);
/// If this SwitchStmt has a condition variable, return the faux DeclStmt
/// associated with the creation of that condition variable.
const DeclStmt *getConditionVariableDeclStmt() const {
return reinterpret_cast<DeclStmt*>(SubExprs[VAR]);
}
const Expr *getCond() const { return reinterpret_cast<Expr*>(SubExprs[COND]);}
const Stmt *getBody() const { return SubExprs[BODY]; }
const SwitchCase *getSwitchCaseList() const { return FirstCase.getPointer(); }
Expr *getCond() { return reinterpret_cast<Expr*>(SubExprs[COND]);}
void setCond(Expr *E) { SubExprs[COND] = reinterpret_cast<Stmt *>(E); }
Stmt *getBody() { return SubExprs[BODY]; }
void setBody(Stmt *S) { SubExprs[BODY] = S; }
SwitchCase *getSwitchCaseList() { return FirstCase.getPointer(); }
/// \brief Set the case list for this switch statement.
void setSwitchCaseList(SwitchCase *SC) { FirstCase.setPointer(SC); }
SourceLocation getSwitchLoc() const { return SwitchLoc; }
void setSwitchLoc(SourceLocation L) { SwitchLoc = L; }
void setBody(Stmt *S, SourceLocation SL) {
SubExprs[BODY] = S;
SwitchLoc = SL;
}
void addSwitchCase(SwitchCase *SC) {
assert(!SC->getNextSwitchCase()
&& "case/default already added to a switch");
SC->setNextSwitchCase(FirstCase.getPointer());
FirstCase.setPointer(SC);
}
/// Set a flag in the SwitchStmt indicating that if the 'switch (X)' is a
/// switch over an enum value then all cases have been explicitly covered.
void setAllEnumCasesCovered() { FirstCase.setInt(true); }
/// Returns true if the SwitchStmt is a switch of an enum value and all cases
/// have been explicitly covered.
bool isAllEnumCasesCovered() const { return FirstCase.getInt(); }
SourceLocation getLocStart() const LLVM_READONLY { return SwitchLoc; }
SourceLocation getLocEnd() const LLVM_READONLY {
return SubExprs[BODY] ? SubExprs[BODY]->getLocEnd() : SubExprs[COND]->getLocEnd();
}
// Iterators
child_range children() {
return child_range(&SubExprs[0], &SubExprs[0]+END_EXPR);
}
static bool classof(const Stmt *T) {
return T->getStmtClass() == SwitchStmtClass;
}
};
/// WhileStmt - This represents a 'while' stmt.
///
class WhileStmt : public Stmt {
SourceLocation WhileLoc;
enum { VAR, COND, BODY, END_EXPR };
Stmt* SubExprs[END_EXPR];
public:
WhileStmt(const ASTContext &C, VarDecl *Var, Expr *cond, Stmt *body,
SourceLocation WL);
/// \brief Build an empty while statement.
explicit WhileStmt(EmptyShell Empty) : Stmt(WhileStmtClass, Empty) { }
/// \brief Retrieve the variable declared in this "while" statement, if any.
///
/// In the following example, "x" is the condition variable.
/// \code
/// while (int x = random()) {
/// // ...
/// }
/// \endcode
VarDecl *getConditionVariable() const;
void setConditionVariable(const ASTContext &C, VarDecl *V);
/// If this WhileStmt has a condition variable, return the faux DeclStmt
/// associated with the creation of that condition variable.
const DeclStmt *getConditionVariableDeclStmt() const {
return reinterpret_cast<DeclStmt*>(SubExprs[VAR]);
}
Expr *getCond() { return reinterpret_cast<Expr*>(SubExprs[COND]); }
const Expr *getCond() const { return reinterpret_cast<Expr*>(SubExprs[COND]);}
void setCond(Expr *E) { SubExprs[COND] = reinterpret_cast<Stmt*>(E); }
Stmt *getBody() { return SubExprs[BODY]; }
const Stmt *getBody() const { return SubExprs[BODY]; }
void setBody(Stmt *S) { SubExprs[BODY] = S; }
SourceLocation getWhileLoc() const { return WhileLoc; }
void setWhileLoc(SourceLocation L) { WhileLoc = L; }
SourceLocation getLocStart() const LLVM_READONLY { return WhileLoc; }
SourceLocation getLocEnd() const LLVM_READONLY {
return SubExprs[BODY]->getLocEnd();
}
static bool classof(const Stmt *T) {
return T->getStmtClass() == WhileStmtClass;
}
// Iterators
child_range children() {
return child_range(&SubExprs[0], &SubExprs[0]+END_EXPR);
}
};
/// DoStmt - This represents a 'do/while' stmt.
///
class DoStmt : public Stmt {
SourceLocation DoLoc;
enum { BODY, COND, END_EXPR };
Stmt* SubExprs[END_EXPR];
SourceLocation WhileLoc;
SourceLocation RParenLoc; // Location of final ')' in do stmt condition.
public:
DoStmt(Stmt *body, Expr *cond, SourceLocation DL, SourceLocation WL,
SourceLocation RP)
: Stmt(DoStmtClass), DoLoc(DL), WhileLoc(WL), RParenLoc(RP) {
SubExprs[COND] = reinterpret_cast<Stmt*>(cond);
SubExprs[BODY] = body;
}
/// \brief Build an empty do-while statement.
explicit DoStmt(EmptyShell Empty) : Stmt(DoStmtClass, Empty) { }
Expr *getCond() { return reinterpret_cast<Expr*>(SubExprs[COND]); }
const Expr *getCond() const { return reinterpret_cast<Expr*>(SubExprs[COND]);}
void setCond(Expr *E) { SubExprs[COND] = reinterpret_cast<Stmt*>(E); }
Stmt *getBody() { return SubExprs[BODY]; }
const Stmt *getBody() const { return SubExprs[BODY]; }
void setBody(Stmt *S) { SubExprs[BODY] = S; }
SourceLocation getDoLoc() const { return DoLoc; }
void setDoLoc(SourceLocation L) { DoLoc = L; }
SourceLocation getWhileLoc() const { return WhileLoc; }
void setWhileLoc(SourceLocation L) { WhileLoc = L; }
SourceLocation getRParenLoc() const { return RParenLoc; }
void setRParenLoc(SourceLocation L) { RParenLoc = L; }
SourceLocation getLocStart() const LLVM_READONLY { return DoLoc; }
SourceLocation getLocEnd() const LLVM_READONLY { return RParenLoc; }
static bool classof(const Stmt *T) {
return T->getStmtClass() == DoStmtClass;
}
// Iterators
child_range children() {
return child_range(&SubExprs[0], &SubExprs[0]+END_EXPR);
}
};
/// ForStmt - This represents a 'for (init;cond;inc)' stmt. Note that any of
/// the init/cond/inc parts of the ForStmt will be null if they were not
/// specified in the source.
///
class ForStmt : public Stmt {
SourceLocation ForLoc;
enum { INIT, CONDVAR, COND, INC, BODY, END_EXPR };
Stmt* SubExprs[END_EXPR]; // SubExprs[INIT] is an expression or declstmt.
SourceLocation LParenLoc, RParenLoc;
public:
ForStmt(const ASTContext &C, Stmt *Init, Expr *Cond, VarDecl *condVar,
Expr *Inc, Stmt *Body, SourceLocation FL, SourceLocation LP,
SourceLocation RP);
/// \brief Build an empty for statement.
explicit ForStmt(EmptyShell Empty) : Stmt(ForStmtClass, Empty) { }
Stmt *getInit() { return SubExprs[INIT]; }
/// \brief Retrieve the variable declared in this "for" statement, if any.
///
/// In the following example, "y" is the condition variable.
/// \code
/// for (int x = random(); int y = mangle(x); ++x) {
/// // ...
/// }
/// \endcode
VarDecl *getConditionVariable() const;
void setConditionVariable(const ASTContext &C, VarDecl *V);
/// If this ForStmt has a condition variable, return the faux DeclStmt
/// associated with the creation of that condition variable.
const DeclStmt *getConditionVariableDeclStmt() const {
return reinterpret_cast<DeclStmt*>(SubExprs[CONDVAR]);
}
Expr *getCond() { return reinterpret_cast<Expr*>(SubExprs[COND]); }
Expr *getInc() { return reinterpret_cast<Expr*>(SubExprs[INC]); }
Stmt *getBody() { return SubExprs[BODY]; }
const Stmt *getInit() const { return SubExprs[INIT]; }
const Expr *getCond() const { return reinterpret_cast<Expr*>(SubExprs[COND]);}
const Expr *getInc() const { return reinterpret_cast<Expr*>(SubExprs[INC]); }
const Stmt *getBody() const { return SubExprs[BODY]; }
void setInit(Stmt *S) { SubExprs[INIT] = S; }
void setCond(Expr *E) { SubExprs[COND] = reinterpret_cast<Stmt*>(E); }
void setInc(Expr *E) { SubExprs[INC] = reinterpret_cast<Stmt*>(E); }
void setBody(Stmt *S) { SubExprs[BODY] = S; }
SourceLocation getForLoc() const { return ForLoc; }
void setForLoc(SourceLocation L) { ForLoc = L; }
SourceLocation getLParenLoc() const { return LParenLoc; }
void setLParenLoc(SourceLocation L) { LParenLoc = L; }
SourceLocation getRParenLoc() const { return RParenLoc; }
void setRParenLoc(SourceLocation L) { RParenLoc = L; }
SourceLocation getLocStart() const LLVM_READONLY { return ForLoc; }
SourceLocation getLocEnd() const LLVM_READONLY {
return SubExprs[BODY]->getLocEnd();
}
static bool classof(const Stmt *T) {
return T->getStmtClass() == ForStmtClass;
}
// Iterators
child_range children() {
return child_range(&SubExprs[0], &SubExprs[0]+END_EXPR);
}
};
/// GotoStmt - This represents a direct goto.
///
class GotoStmt : public Stmt {
LabelDecl *Label;
SourceLocation GotoLoc;
SourceLocation LabelLoc;
public:
GotoStmt(LabelDecl *label, SourceLocation GL, SourceLocation LL)
: Stmt(GotoStmtClass), Label(label), GotoLoc(GL), LabelLoc(LL) {}
/// \brief Build an empty goto statement.
explicit GotoStmt(EmptyShell Empty) : Stmt(GotoStmtClass, Empty) { }
LabelDecl *getLabel() const { return Label; }
void setLabel(LabelDecl *D) { Label = D; }
SourceLocation getGotoLoc() const { return GotoLoc; }
void setGotoLoc(SourceLocation L) { GotoLoc = L; }
SourceLocation getLabelLoc() const { return LabelLoc; }
void setLabelLoc(SourceLocation L) { LabelLoc = L; }
SourceLocation getLocStart() const LLVM_READONLY { return GotoLoc; }
SourceLocation getLocEnd() const LLVM_READONLY { return LabelLoc; }
static bool classof(const Stmt *T) {
return T->getStmtClass() == GotoStmtClass;
}
// Iterators
child_range children() {
return child_range(child_iterator(), child_iterator());
}
};
/// IndirectGotoStmt - This represents an indirect goto.
///
class IndirectGotoStmt : public Stmt {
SourceLocation GotoLoc;
SourceLocation StarLoc;
Stmt *Target;
public:
IndirectGotoStmt(SourceLocation gotoLoc, SourceLocation starLoc,
Expr *target)
: Stmt(IndirectGotoStmtClass), GotoLoc(gotoLoc), StarLoc(starLoc),
Target((Stmt*)target) {}
/// \brief Build an empty indirect goto statement.
explicit IndirectGotoStmt(EmptyShell Empty)
: Stmt(IndirectGotoStmtClass, Empty) { }
void setGotoLoc(SourceLocation L) { GotoLoc = L; }
SourceLocation getGotoLoc() const { return GotoLoc; }
void setStarLoc(SourceLocation L) { StarLoc = L; }
SourceLocation getStarLoc() const { return StarLoc; }
Expr *getTarget() { return reinterpret_cast<Expr*>(Target); }
const Expr *getTarget() const {return reinterpret_cast<const Expr*>(Target);}
void setTarget(Expr *E) { Target = reinterpret_cast<Stmt*>(E); }
/// getConstantTarget - Returns the fixed target of this indirect
/// goto, if one exists.
LabelDecl *getConstantTarget();
const LabelDecl *getConstantTarget() const {
return const_cast<IndirectGotoStmt*>(this)->getConstantTarget();
}
SourceLocation getLocStart() const LLVM_READONLY { return GotoLoc; }
SourceLocation getLocEnd() const LLVM_READONLY { return Target->getLocEnd(); }
static bool classof(const Stmt *T) {
return T->getStmtClass() == IndirectGotoStmtClass;
}
// Iterators
child_range children() { return child_range(&Target, &Target+1); }
};
/// ContinueStmt - This represents a continue.
///
class ContinueStmt : public Stmt {
SourceLocation ContinueLoc;
public:
ContinueStmt(SourceLocation CL) : Stmt(ContinueStmtClass), ContinueLoc(CL) {}
/// \brief Build an empty continue statement.
explicit ContinueStmt(EmptyShell Empty) : Stmt(ContinueStmtClass, Empty) { }
SourceLocation getContinueLoc() const { return ContinueLoc; }
void setContinueLoc(SourceLocation L) { ContinueLoc = L; }
SourceLocation getLocStart() const LLVM_READONLY { return ContinueLoc; }
SourceLocation getLocEnd() const LLVM_READONLY { return ContinueLoc; }
static bool classof(const Stmt *T) {
return T->getStmtClass() == ContinueStmtClass;
}
// Iterators
child_range children() {
return child_range(child_iterator(), child_iterator());
}
};
/// BreakStmt - This represents a break.
///
class BreakStmt : public Stmt {
SourceLocation BreakLoc;
public:
BreakStmt(SourceLocation BL) : Stmt(BreakStmtClass), BreakLoc(BL) {
static_assert(sizeof(BreakStmt) == 2 * sizeof(SourceLocation),
"BreakStmt too large");
}
/// \brief Build an empty break statement.
explicit BreakStmt(EmptyShell Empty) : Stmt(BreakStmtClass, Empty) { }
SourceLocation getBreakLoc() const { return BreakLoc; }
void setBreakLoc(SourceLocation L) { BreakLoc = L; }
SourceLocation getLocStart() const LLVM_READONLY { return BreakLoc; }
SourceLocation getLocEnd() const LLVM_READONLY { return BreakLoc; }
static bool classof(const Stmt *T) {
return T->getStmtClass() == BreakStmtClass;
}
// Iterators
child_range children() {
return child_range(child_iterator(), child_iterator());
}
};
/// ReturnStmt - This represents a return, optionally of an expression:
/// return;
/// return 4;
///
/// Note that GCC allows return with no argument in a function declared to
/// return a value, and it allows returning a value in functions declared to
/// return void. We explicitly model this in the AST, which means you can't
/// depend on the return type of the function and the presence of an argument.
///
class ReturnStmt : public Stmt {
SourceLocation RetLoc;
Stmt *RetExpr;
const VarDecl *NRVOCandidate;
public:
explicit ReturnStmt(SourceLocation RL) : ReturnStmt(RL, nullptr, nullptr) {}
ReturnStmt(SourceLocation RL, Expr *E, const VarDecl *NRVOCandidate)
: Stmt(ReturnStmtClass), RetLoc(RL), RetExpr((Stmt *)E),
NRVOCandidate(NRVOCandidate) {}
/// \brief Build an empty return expression.
explicit ReturnStmt(EmptyShell Empty) : Stmt(ReturnStmtClass, Empty) { }
const Expr *getRetValue() const;
Expr *getRetValue();
void setRetValue(Expr *E) { RetExpr = reinterpret_cast<Stmt*>(E); }
SourceLocation getReturnLoc() const { return RetLoc; }
void setReturnLoc(SourceLocation L) { RetLoc = L; }
/// \brief Retrieve the variable that might be used for the named return
/// value optimization.
///
/// The optimization itself can only be performed if the variable is
/// also marked as an NRVO object.
const VarDecl *getNRVOCandidate() const { return NRVOCandidate; }
void setNRVOCandidate(const VarDecl *Var) { NRVOCandidate = Var; }
SourceLocation getLocStart() const LLVM_READONLY { return RetLoc; }
SourceLocation getLocEnd() const LLVM_READONLY {
return RetExpr ? RetExpr->getLocEnd() : RetLoc;
}
static bool classof(const Stmt *T) {
return T->getStmtClass() == ReturnStmtClass;
}
// Iterators
child_range children() {
if (RetExpr) return child_range(&RetExpr, &RetExpr+1);
return child_range(child_iterator(), child_iterator());
}
};
/// AsmStmt is the base class for GCCAsmStmt and MSAsmStmt.
///
class AsmStmt : public Stmt {
protected:
SourceLocation AsmLoc;
/// \brief True if the assembly statement does not have any input or output
/// operands.
bool IsSimple;
/// \brief If true, treat this inline assembly as having side effects.
/// This assembly statement should not be optimized, deleted or moved.
bool IsVolatile;
unsigned NumOutputs;
unsigned NumInputs;
unsigned NumClobbers;
Stmt **Exprs;
AsmStmt(StmtClass SC, SourceLocation asmloc, bool issimple, bool isvolatile,
unsigned numoutputs, unsigned numinputs, unsigned numclobbers) :
Stmt (SC), AsmLoc(asmloc), IsSimple(issimple), IsVolatile(isvolatile),
NumOutputs(numoutputs), NumInputs(numinputs), NumClobbers(numclobbers) { }
friend class ASTStmtReader;
public:
/// \brief Build an empty inline-assembly statement.
explicit AsmStmt(StmtClass SC, EmptyShell Empty) :
Stmt(SC, Empty), Exprs(nullptr) { }
SourceLocation getAsmLoc() const { return AsmLoc; }
void setAsmLoc(SourceLocation L) { AsmLoc = L; }
bool isSimple() const { return IsSimple; }
void setSimple(bool V) { IsSimple = V; }
bool isVolatile() const { return IsVolatile; }
void setVolatile(bool V) { IsVolatile = V; }
SourceLocation getLocStart() const LLVM_READONLY { return SourceLocation(); }
SourceLocation getLocEnd() const LLVM_READONLY { return SourceLocation(); }
//===--- Asm String Analysis ---===//
/// Assemble final IR asm string.
std::string generateAsmString(const ASTContext &C) const;
//===--- Output operands ---===//
unsigned getNumOutputs() const { return NumOutputs; }
/// getOutputConstraint - Return the constraint string for the specified
/// output operand. All output constraints are known to be non-empty (either
/// '=' or '+').
StringRef getOutputConstraint(unsigned i) const;
/// isOutputPlusConstraint - Return true if the specified output constraint
/// is a "+" constraint (which is both an input and an output) or false if it
/// is an "=" constraint (just an output).
bool isOutputPlusConstraint(unsigned i) const {
return getOutputConstraint(i)[0] == '+';
}
const Expr *getOutputExpr(unsigned i) const;
/// getNumPlusOperands - Return the number of output operands that have a "+"
/// constraint.
unsigned getNumPlusOperands() const;
//===--- Input operands ---===//
unsigned getNumInputs() const { return NumInputs; }
/// getInputConstraint - Return the specified input constraint. Unlike output
/// constraints, these can be empty.
StringRef getInputConstraint(unsigned i) const;
const Expr *getInputExpr(unsigned i) const;
//===--- Other ---===//
unsigned getNumClobbers() const { return NumClobbers; }
StringRef getClobber(unsigned i) const;
static bool classof(const Stmt *T) {
return T->getStmtClass() == GCCAsmStmtClass ||
T->getStmtClass() == MSAsmStmtClass;
}
// Input expr iterators.
typedef ExprIterator inputs_iterator;
typedef ConstExprIterator const_inputs_iterator;
typedef llvm::iterator_range<inputs_iterator> inputs_range;
typedef llvm::iterator_range<const_inputs_iterator> inputs_const_range;
inputs_iterator begin_inputs() {
return &Exprs[0] + NumOutputs;
}
inputs_iterator end_inputs() {
return &Exprs[0] + NumOutputs + NumInputs;
}
inputs_range inputs() { return inputs_range(begin_inputs(), end_inputs()); }
const_inputs_iterator begin_inputs() const {
return &Exprs[0] + NumOutputs;
}
const_inputs_iterator end_inputs() const {
return &Exprs[0] + NumOutputs + NumInputs;
}
inputs_const_range inputs() const {
return inputs_const_range(begin_inputs(), end_inputs());
}
// Output expr iterators.
typedef ExprIterator outputs_iterator;
typedef ConstExprIterator const_outputs_iterator;
typedef llvm::iterator_range<outputs_iterator> outputs_range;
typedef llvm::iterator_range<const_outputs_iterator> outputs_const_range;
outputs_iterator begin_outputs() {
return &Exprs[0];
}
outputs_iterator end_outputs() {
return &Exprs[0] + NumOutputs;
}
outputs_range outputs() {
return outputs_range(begin_outputs(), end_outputs());
}
const_outputs_iterator begin_outputs() const {
return &Exprs[0];
}
const_outputs_iterator end_outputs() const {
return &Exprs[0] + NumOutputs;
}
outputs_const_range outputs() const {
return outputs_const_range(begin_outputs(), end_outputs());
}
child_range children() {
return child_range(&Exprs[0], &Exprs[0] + NumOutputs + NumInputs);
}
};
/// This represents a GCC inline-assembly statement extension.
///
class GCCAsmStmt : public AsmStmt {
SourceLocation RParenLoc;
StringLiteral *AsmStr;
// FIXME: If we wanted to, we could allocate all of these in one big array.
StringLiteral **Constraints;
StringLiteral **Clobbers;
IdentifierInfo **Names;
friend class ASTStmtReader;
public:
GCCAsmStmt(const ASTContext &C, SourceLocation asmloc, bool issimple,
bool isvolatile, unsigned numoutputs, unsigned numinputs,
IdentifierInfo **names, StringLiteral **constraints, Expr **exprs,
StringLiteral *asmstr, unsigned numclobbers,
StringLiteral **clobbers, SourceLocation rparenloc);
/// \brief Build an empty inline-assembly statement.
explicit GCCAsmStmt(EmptyShell Empty) : AsmStmt(GCCAsmStmtClass, Empty),
Constraints(nullptr), Clobbers(nullptr), Names(nullptr) { }
SourceLocation getRParenLoc() const { return RParenLoc; }
void setRParenLoc(SourceLocation L) { RParenLoc = L; }
//===--- Asm String Analysis ---===//
const StringLiteral *getAsmString() const { return AsmStr; }
StringLiteral *getAsmString() { return AsmStr; }
void setAsmString(StringLiteral *E) { AsmStr = E; }
/// AsmStringPiece - this is part of a decomposed asm string specification
/// (for use with the AnalyzeAsmString function below). An asm string is
/// considered to be a concatenation of these parts.
class AsmStringPiece {
public:
enum Kind {
String, // String in .ll asm string form, "$" -> "$$" and "%%" -> "%".
Operand // Operand reference, with optional modifier %c4.
};
private:
Kind MyKind;
std::string Str;
unsigned OperandNo;
// Source range for operand references.
CharSourceRange Range;
public:
AsmStringPiece(const std::string &S) : MyKind(String), Str(S) {}
AsmStringPiece(unsigned OpNo, const std::string &S, SourceLocation Begin,
SourceLocation End)
: MyKind(Operand), Str(S), OperandNo(OpNo),
Range(CharSourceRange::getCharRange(Begin, End)) {
}
bool isString() const { return MyKind == String; }
bool isOperand() const { return MyKind == Operand; }
const std::string &getString() const {
return Str;
}
unsigned getOperandNo() const {
assert(isOperand());
return OperandNo;
}
CharSourceRange getRange() const {
assert(isOperand() && "Range is currently used only for Operands.");
return Range;
}
/// getModifier - Get the modifier for this operand, if present. This
/// returns '\0' if there was no modifier.
char getModifier() const;
};
/// AnalyzeAsmString - Analyze the asm string of the current asm, decomposing
/// it into pieces. If the asm string is erroneous, emit errors and return
/// true, otherwise return false. This handles canonicalization and
/// translation of strings from GCC syntax to LLVM IR syntax, and handles
//// flattening of named references like %[foo] to Operand AsmStringPiece's.
unsigned AnalyzeAsmString(SmallVectorImpl<AsmStringPiece> &Pieces,
const ASTContext &C, unsigned &DiagOffs) const;
/// Assemble final IR asm string.
std::string generateAsmString(const ASTContext &C) const;
//===--- Output operands ---===//
IdentifierInfo *getOutputIdentifier(unsigned i) const {
return Names[i];
}
StringRef getOutputName(unsigned i) const {
if (IdentifierInfo *II = getOutputIdentifier(i))
return II->getName();
return StringRef();
}
StringRef getOutputConstraint(unsigned i) const;
const StringLiteral *getOutputConstraintLiteral(unsigned i) const {
return Constraints[i];
}
StringLiteral *getOutputConstraintLiteral(unsigned i) {
return Constraints[i];
}
Expr *getOutputExpr(unsigned i);
const Expr *getOutputExpr(unsigned i) const {
return const_cast<GCCAsmStmt*>(this)->getOutputExpr(i);
}
//===--- Input operands ---===//
IdentifierInfo *getInputIdentifier(unsigned i) const {
return Names[i + NumOutputs];
}
StringRef getInputName(unsigned i) const {
if (IdentifierInfo *II = getInputIdentifier(i))
return II->getName();
return StringRef();
}
StringRef getInputConstraint(unsigned i) const;
const StringLiteral *getInputConstraintLiteral(unsigned i) const {
return Constraints[i + NumOutputs];
}
StringLiteral *getInputConstraintLiteral(unsigned i) {
return Constraints[i + NumOutputs];
}
Expr *getInputExpr(unsigned i);
void setInputExpr(unsigned i, Expr *E);
const Expr *getInputExpr(unsigned i) const {
return const_cast<GCCAsmStmt*>(this)->getInputExpr(i);
}
private:
void setOutputsAndInputsAndClobbers(const ASTContext &C,
IdentifierInfo **Names,
StringLiteral **Constraints,
Stmt **Exprs,
unsigned NumOutputs,
unsigned NumInputs,
StringLiteral **Clobbers,
unsigned NumClobbers);
public:
//===--- Other ---===//
/// getNamedOperand - Given a symbolic operand reference like %[foo],
/// translate this into a numeric value needed to reference the same operand.
/// This returns -1 if the operand name is invalid.
int getNamedOperand(StringRef SymbolicName) const;
StringRef getClobber(unsigned i) const;
StringLiteral *getClobberStringLiteral(unsigned i) { return Clobbers[i]; }
const StringLiteral *getClobberStringLiteral(unsigned i) const {
return Clobbers[i];
}
SourceLocation getLocStart() const LLVM_READONLY { return AsmLoc; }
SourceLocation getLocEnd() const LLVM_READONLY { return RParenLoc; }
static bool classof(const Stmt *T) {
return T->getStmtClass() == GCCAsmStmtClass;
}
};
/// This represents a Microsoft inline-assembly statement extension.
///
class MSAsmStmt : public AsmStmt {
SourceLocation LBraceLoc, EndLoc;
StringRef AsmStr;
unsigned NumAsmToks;
Token *AsmToks;
StringRef *Constraints;
StringRef *Clobbers;
friend class ASTStmtReader;
public:
MSAsmStmt(const ASTContext &C, SourceLocation asmloc,
SourceLocation lbraceloc, bool issimple, bool isvolatile,
ArrayRef<Token> asmtoks, unsigned numoutputs, unsigned numinputs,
ArrayRef<StringRef> constraints,
ArrayRef<Expr*> exprs, StringRef asmstr,
ArrayRef<StringRef> clobbers, SourceLocation endloc);
/// \brief Build an empty MS-style inline-assembly statement.
explicit MSAsmStmt(EmptyShell Empty) : AsmStmt(MSAsmStmtClass, Empty),
NumAsmToks(0), AsmToks(nullptr), Constraints(nullptr), Clobbers(nullptr) { }
SourceLocation getLBraceLoc() const { return LBraceLoc; }
void setLBraceLoc(SourceLocation L) { LBraceLoc = L; }
SourceLocation getEndLoc() const { return EndLoc; }
void setEndLoc(SourceLocation L) { EndLoc = L; }
bool hasBraces() const { return LBraceLoc.isValid(); }
unsigned getNumAsmToks() { return NumAsmToks; }
Token *getAsmToks() { return AsmToks; }
//===--- Asm String Analysis ---===//
StringRef getAsmString() const { return AsmStr; }
/// Assemble final IR asm string.
std::string generateAsmString(const ASTContext &C) const;
//===--- Output operands ---===//
StringRef getOutputConstraint(unsigned i) const {
assert(i < NumOutputs);
return Constraints[i];
}
Expr *getOutputExpr(unsigned i);
const Expr *getOutputExpr(unsigned i) const {
return const_cast<MSAsmStmt*>(this)->getOutputExpr(i);
}
//===--- Input operands ---===//
StringRef getInputConstraint(unsigned i) const {
assert(i < NumInputs);
return Constraints[i + NumOutputs];
}
Expr *getInputExpr(unsigned i);
void setInputExpr(unsigned i, Expr *E);
const Expr *getInputExpr(unsigned i) const {
return const_cast<MSAsmStmt*>(this)->getInputExpr(i);
}
//===--- Other ---===//
ArrayRef<StringRef> getAllConstraints() const {
return llvm::makeArrayRef(Constraints, NumInputs + NumOutputs);
}
ArrayRef<StringRef> getClobbers() const {
return llvm::makeArrayRef(Clobbers, NumClobbers);
}
ArrayRef<Expr*> getAllExprs() const {
return llvm::makeArrayRef(reinterpret_cast<Expr**>(Exprs),
NumInputs + NumOutputs);
}
StringRef getClobber(unsigned i) const { return getClobbers()[i]; }
private:
void initialize(const ASTContext &C, StringRef AsmString,
ArrayRef<Token> AsmToks, ArrayRef<StringRef> Constraints,
ArrayRef<Expr*> Exprs, ArrayRef<StringRef> Clobbers);
public:
SourceLocation getLocStart() const LLVM_READONLY { return AsmLoc; }
SourceLocation getLocEnd() const LLVM_READONLY { return EndLoc; }
static bool classof(const Stmt *T) {
return T->getStmtClass() == MSAsmStmtClass;
}
child_range children() {
return child_range(&Exprs[0], &Exprs[NumInputs + NumOutputs]);
}
};
class SEHExceptStmt : public Stmt {
SourceLocation Loc;
Stmt *Children[2];
enum { FILTER_EXPR, BLOCK };
SEHExceptStmt(SourceLocation Loc,
Expr *FilterExpr,
Stmt *Block);
friend class ASTReader;
friend class ASTStmtReader;
explicit SEHExceptStmt(EmptyShell E) : Stmt(SEHExceptStmtClass, E) { }
public:
static SEHExceptStmt* Create(const ASTContext &C,
SourceLocation ExceptLoc,
Expr *FilterExpr,
Stmt *Block);
SourceLocation getLocStart() const LLVM_READONLY { return getExceptLoc(); }
SourceLocation getLocEnd() const LLVM_READONLY { return getEndLoc(); }
SourceLocation getExceptLoc() const { return Loc; }
SourceLocation getEndLoc() const { return getBlock()->getLocEnd(); }
Expr *getFilterExpr() const {
return reinterpret_cast<Expr*>(Children[FILTER_EXPR]);
}
CompoundStmt *getBlock() const {
return cast<CompoundStmt>(Children[BLOCK]);
}
child_range children() {
return child_range(Children,Children+2);
}
static bool classof(const Stmt *T) {
return T->getStmtClass() == SEHExceptStmtClass;
}
};
class SEHFinallyStmt : public Stmt {
SourceLocation Loc;
Stmt *Block;
SEHFinallyStmt(SourceLocation Loc,
Stmt *Block);
friend class ASTReader;
friend class ASTStmtReader;
explicit SEHFinallyStmt(EmptyShell E) : Stmt(SEHFinallyStmtClass, E) { }
public:
static SEHFinallyStmt* Create(const ASTContext &C,
SourceLocation FinallyLoc,
Stmt *Block);
SourceLocation getLocStart() const LLVM_READONLY { return getFinallyLoc(); }
SourceLocation getLocEnd() const LLVM_READONLY { return getEndLoc(); }
SourceLocation getFinallyLoc() const { return Loc; }
SourceLocation getEndLoc() const { return Block->getLocEnd(); }
CompoundStmt *getBlock() const { return cast<CompoundStmt>(Block); }
child_range children() {
return child_range(&Block,&Block+1);
}
static bool classof(const Stmt *T) {
return T->getStmtClass() == SEHFinallyStmtClass;
}
};
class SEHTryStmt : public Stmt {
bool IsCXXTry;
SourceLocation TryLoc;
Stmt *Children[2];
enum { TRY = 0, HANDLER = 1 };
SEHTryStmt(bool isCXXTry, // true if 'try' otherwise '__try'
SourceLocation TryLoc,
Stmt *TryBlock,
Stmt *Handler);
friend class ASTReader;
friend class ASTStmtReader;
explicit SEHTryStmt(EmptyShell E) : Stmt(SEHTryStmtClass, E) { }
public:
static SEHTryStmt* Create(const ASTContext &C, bool isCXXTry,
SourceLocation TryLoc, Stmt *TryBlock,
Stmt *Handler);
SourceLocation getLocStart() const LLVM_READONLY { return getTryLoc(); }
SourceLocation getLocEnd() const LLVM_READONLY { return getEndLoc(); }
SourceLocation getTryLoc() const { return TryLoc; }
SourceLocation getEndLoc() const { return Children[HANDLER]->getLocEnd(); }
bool getIsCXXTry() const { return IsCXXTry; }
CompoundStmt* getTryBlock() const {
return cast<CompoundStmt>(Children[TRY]);
}
Stmt *getHandler() const { return Children[HANDLER]; }
/// Returns 0 if not defined
SEHExceptStmt *getExceptHandler() const;
SEHFinallyStmt *getFinallyHandler() const;
child_range children() {
return child_range(Children,Children+2);
}
static bool classof(const Stmt *T) {
return T->getStmtClass() == SEHTryStmtClass;
}
};
/// Represents a __leave statement.
///
class SEHLeaveStmt : public Stmt {
SourceLocation LeaveLoc;
public:
explicit SEHLeaveStmt(SourceLocation LL)
: Stmt(SEHLeaveStmtClass), LeaveLoc(LL) {}
/// \brief Build an empty __leave statement.
explicit SEHLeaveStmt(EmptyShell Empty) : Stmt(SEHLeaveStmtClass, Empty) { }
SourceLocation getLeaveLoc() const { return LeaveLoc; }
void setLeaveLoc(SourceLocation L) { LeaveLoc = L; }
SourceLocation getLocStart() const LLVM_READONLY { return LeaveLoc; }
SourceLocation getLocEnd() const LLVM_READONLY { return LeaveLoc; }
static bool classof(const Stmt *T) {
return T->getStmtClass() == SEHLeaveStmtClass;
}
// Iterators
child_range children() {
return child_range(child_iterator(), child_iterator());
}
};
/// \brief This captures a statement into a function. For example, the following
/// pragma annotated compound statement can be represented as a CapturedStmt,
/// and this compound statement is the body of an anonymous outlined function.
/// @code
/// #pragma omp parallel
/// {
/// compute();
/// }
/// @endcode
class CapturedStmt : public Stmt {
public:
/// \brief The different capture forms: by 'this', by reference, capture for
/// variable-length array type etc.
enum VariableCaptureKind {
VCK_This,
VCK_ByRef,
VCK_ByCopy,
VCK_VLAType,
};
/// \brief Describes the capture of either a variable, or 'this', or
/// variable-length array type.
class Capture {
llvm::PointerIntPair<VarDecl *, 2, VariableCaptureKind> VarAndKind;
SourceLocation Loc;
public:
/// \brief Create a new capture.
///
/// \param Loc The source location associated with this capture.
///
/// \param Kind The kind of capture (this, ByRef, ...).
///
/// \param Var The variable being captured, or null if capturing this.
///
Capture(SourceLocation Loc, VariableCaptureKind Kind,
VarDecl *Var = nullptr);
/// \brief Determine the kind of capture.
VariableCaptureKind getCaptureKind() const;
/// \brief Retrieve the source location at which the variable or 'this' was
/// first used.
SourceLocation getLocation() const { return Loc; }
/// \brief Determine whether this capture handles the C++ 'this' pointer.
bool capturesThis() const { return getCaptureKind() == VCK_This; }
/// \brief Determine whether this capture handles a variable (by reference).
bool capturesVariable() const { return getCaptureKind() == VCK_ByRef; }
/// \brief Determine whether this capture handles a variable by copy.
bool capturesVariableByCopy() const {
return getCaptureKind() == VCK_ByCopy;
}
/// \brief Determine whether this capture handles a variable-length array
/// type.
bool capturesVariableArrayType() const {
return getCaptureKind() == VCK_VLAType;
}
/// \brief Retrieve the declaration of the variable being captured.
///
/// This operation is only valid if this capture captures a variable.
VarDecl *getCapturedVar() const;
friend class ASTStmtReader;
};
private:
/// \brief The number of variable captured, including 'this'.
unsigned NumCaptures;
/// \brief The pointer part is the implicit the outlined function and the
/// int part is the captured region kind, 'CR_Default' etc.
llvm::PointerIntPair<CapturedDecl *, 1, CapturedRegionKind> CapDeclAndKind;
/// \brief The record for captured variables, a RecordDecl or CXXRecordDecl.
RecordDecl *TheRecordDecl;
/// \brief Construct a captured statement.
CapturedStmt(Stmt *S, CapturedRegionKind Kind, ArrayRef<Capture> Captures,
ArrayRef<Expr *> CaptureInits, CapturedDecl *CD, RecordDecl *RD);
/// \brief Construct an empty captured statement.
CapturedStmt(EmptyShell Empty, unsigned NumCaptures);
Stmt **getStoredStmts() { return reinterpret_cast<Stmt **>(this + 1); }
Stmt *const *getStoredStmts() const {
return reinterpret_cast<Stmt *const *>(this + 1);
}
Capture *getStoredCaptures() const;
void setCapturedStmt(Stmt *S) { getStoredStmts()[NumCaptures] = S; }
public:
static CapturedStmt *Create(const ASTContext &Context, Stmt *S,
CapturedRegionKind Kind,
ArrayRef<Capture> Captures,
ArrayRef<Expr *> CaptureInits,
CapturedDecl *CD, RecordDecl *RD);
static CapturedStmt *CreateDeserialized(const ASTContext &Context,
unsigned NumCaptures);
/// \brief Retrieve the statement being captured.
Stmt *getCapturedStmt() { return getStoredStmts()[NumCaptures]; }
const Stmt *getCapturedStmt() const { return getStoredStmts()[NumCaptures]; }
/// \brief Retrieve the outlined function declaration.
CapturedDecl *getCapturedDecl();
const CapturedDecl *getCapturedDecl() const;
/// \brief Set the outlined function declaration.
void setCapturedDecl(CapturedDecl *D);
/// \brief Retrieve the captured region kind.
CapturedRegionKind getCapturedRegionKind() const;
/// \brief Set the captured region kind.
void setCapturedRegionKind(CapturedRegionKind Kind);
/// \brief Retrieve the record declaration for captured variables.
const RecordDecl *getCapturedRecordDecl() const { return TheRecordDecl; }
/// \brief Set the record declaration for captured variables.
void setCapturedRecordDecl(RecordDecl *D) {
assert(D && "null RecordDecl");
TheRecordDecl = D;
}
/// \brief True if this variable has been captured.
bool capturesVariable(const VarDecl *Var) const;
/// \brief An iterator that walks over the captures.
typedef Capture *capture_iterator;
typedef const Capture *const_capture_iterator;
typedef llvm::iterator_range<capture_iterator> capture_range;
typedef llvm::iterator_range<const_capture_iterator> capture_const_range;
capture_range captures() {
return capture_range(capture_begin(), capture_end());
}
capture_const_range captures() const {
return capture_const_range(capture_begin(), capture_end());
}
/// \brief Retrieve an iterator pointing to the first capture.
capture_iterator capture_begin() { return getStoredCaptures(); }
const_capture_iterator capture_begin() const { return getStoredCaptures(); }
/// \brief Retrieve an iterator pointing past the end of the sequence of
/// captures.
capture_iterator capture_end() const {
return getStoredCaptures() + NumCaptures;
}
/// \brief Retrieve the number of captures, including 'this'.
unsigned capture_size() const { return NumCaptures; }
/// \brief Iterator that walks over the capture initialization arguments.
typedef Expr **capture_init_iterator;
typedef llvm::iterator_range<capture_init_iterator> capture_init_range;
/// \brief Const iterator that walks over the capture initialization
/// arguments.
typedef Expr *const *const_capture_init_iterator;
typedef llvm::iterator_range<const_capture_init_iterator>
const_capture_init_range;
capture_init_range capture_inits() {
return capture_init_range(capture_init_begin(), capture_init_end());
}
const_capture_init_range capture_inits() const {
return const_capture_init_range(capture_init_begin(), capture_init_end());
}
/// \brief Retrieve the first initialization argument.
capture_init_iterator capture_init_begin() {
return reinterpret_cast<Expr **>(getStoredStmts());
}
const_capture_init_iterator capture_init_begin() const {
return reinterpret_cast<Expr *const *>(getStoredStmts());
}
/// \brief Retrieve the iterator pointing one past the last initialization
/// argument.
capture_init_iterator capture_init_end() {
return capture_init_begin() + NumCaptures;
}
const_capture_init_iterator capture_init_end() const {
return capture_init_begin() + NumCaptures;
}
SourceLocation getLocStart() const LLVM_READONLY {
return getCapturedStmt()->getLocStart();
}
SourceLocation getLocEnd() const LLVM_READONLY {
return getCapturedStmt()->getLocEnd();
}
SourceRange getSourceRange() const LLVM_READONLY {
return getCapturedStmt()->getSourceRange();
}
static bool classof(const Stmt *T) {
return T->getStmtClass() == CapturedStmtClass;
}
child_range children();
friend class ASTStmtReader;
};
} // end namespace clang
#endif
|
SymOp.h | void forward_SymOp(double *y, const double *x, int n){
#pragma omp parallel for
for(int i=0;i<n;i++){
y[i*9] = x[6*i+0];
y[i*9+1] = x[6*i+1];
y[i*9+2] = x[6*i+2];
y[i*9+3] = x[6*i+1];
y[i*9+4] = x[6*i+3];
y[i*9+5] = x[6*i+4];
y[i*9+6] = x[6*i+2];
y[i*9+7] = x[6*i+4];
y[i*9+8] = x[6*i+5];
}
}
void forward_SymOp(double *grad_x, const double *grad_y, const double *y, const double *x, int n){
for(int i=0;i<n;i++){
grad_x[6*i+0] = grad_y[i*9];
grad_x[6*i+1] = grad_y[i*9+1] + grad_y[i*9+3];
grad_x[6*i+2] = grad_y[i*9+2] + grad_y[i*9+6];
grad_x[6*i+3] = grad_y[i*9+4];
grad_x[6*i+4] = grad_y[i*9+5] + grad_y[i*9+7];
grad_x[6*i+5] = grad_y[i*9+8];
}
} |
atax.c | /**
* atax.c: This file was adapted from PolyBench/GPU 1.0 test suite
* to run on GPU with OpenMP 4.0 pragmas and OpenCL driver.
*
* http://www.cse.ohio-state.edu/~pouchet/software/polybench/GPU
*
* Contacts: Marcio M Pereira <mpereira@ic.unicamp.br>
* Rafael Cardoso F Sousa <rafael.cardoso@students.ic.unicamp.br>
* Luís Felipe Mattos <ra107822@students.ic.unicamp.br>
*/
#include <assert.h>
#include <math.h>
#include <stdio.h>
#include <stdlib.h>
#include <sys/time.h>
#include <unistd.h>
#ifdef _OPENMP
#include <omp.h>
#endif
#include "BenchmarksUtil.h"
// define the error threshold for the results "not matching"
#define PERCENT_DIFF_ERROR_THRESHOLD 0.5
/* Problem size. */
#ifdef RUN_TEST
#define SIZE 1100
#elif RUN_BENCHMARK
#define SIZE 9600
#else
#define SIZE 1000
#endif
#define NX SIZE
#define NY SIZE
#ifndef M_PI
#define M_PI 3.14159
#endif
/* Can switch DATA_TYPE between float and double */
typedef float DATA_TYPE;
void init_array(DATA_TYPE *x, DATA_TYPE *A) {
int i, j;
for (i = 0; i < NX; i++) {
x[i] = i * M_PI;
for (j = 0; j < NY; j++) {
A[i * NY + j] = ((DATA_TYPE)i * (j)) / NX;
}
}
}
int compareResults(DATA_TYPE *z, DATA_TYPE *z_outputFromGpu) {
int i, fail;
fail = 0;
for (i = 0; i < NY; i++) {
if (percentDiff(z[i], z_outputFromGpu[i]) > PERCENT_DIFF_ERROR_THRESHOLD) {
fail++;
}
}
// print results
printf("Non-Matching CPU-GPU Outputs Beyond Error Threshold of %4.2f "
"Percent: %d\n",
PERCENT_DIFF_ERROR_THRESHOLD, fail);
return fail;
}
void atax_cpu(DATA_TYPE *A, DATA_TYPE *x, DATA_TYPE *y, DATA_TYPE *tmp) {
int i, j;
for (i = 0; i < NY; i++) {
y[i] = 0;
}
for (i = 0; i < NX; i++) {
tmp[i] = 0;
for (j = 0; j < NY; j++) {
tmp[i] = tmp[i] + A[i * NY + j] * x[j];
}
for (j = 0; j < NY; j++) {
y[j] = y[j] + A[i * NY + j] * tmp[i];
}
}
}
void atax_OMP(DATA_TYPE *A, DATA_TYPE *x, DATA_TYPE *y, DATA_TYPE *tmp) {
for (int i = 0; i < NY; i++) {
y[i] = 0;
}
#pragma omp target map(to : A[ : NX *NY], x[ : NY]) map( \
tofrom : tmp[ : NX], y[ : NY]) device(DEVICE_ID)
{
#pragma omp parallel for
for (int i = 0; i < NX; i++) {
tmp[i] = 0;
for (int j = 0; j < NY; j++) {
tmp[i] = tmp[i] + A[i * NY + j] * x[j];
}
}
// Note that the Loop has been reversed
#pragma omp parallel for // collapse(1)
for (int j = 0; j < NY; j++)
for (int i = 0; i < NX; i++) {
{ y[j] = y[j] + A[i * NY + j] * tmp[i]; }
}
}
}
int main(int argc, char **argv) {
double t_start, t_end;
int fail = 0;
DATA_TYPE *A;
DATA_TYPE *x;
DATA_TYPE *y;
DATA_TYPE *y_outputFromGpu;
DATA_TYPE *tmp;
A = (DATA_TYPE *)malloc(NX * NY * sizeof(DATA_TYPE));
x = (DATA_TYPE *)malloc(NY * sizeof(DATA_TYPE));
y = (DATA_TYPE *)malloc(NY * sizeof(DATA_TYPE));
y_outputFromGpu = (DATA_TYPE *)malloc(NY * sizeof(DATA_TYPE));
tmp = (DATA_TYPE *)malloc(NX * sizeof(DATA_TYPE));
fprintf(stdout, "<< Matrix Transpose and Vector Multiplication >>\n");
init_array(x, A);
t_start = rtclock();
atax_OMP(A, x, y_outputFromGpu, tmp);
t_end = rtclock();
fprintf(stdout, "GPU Runtime: %0.6lfs\n", t_end - t_start);
#ifdef RUN_TEST
t_start = rtclock();
atax_cpu(A, x, y, tmp);
t_end = rtclock();
fprintf(stdout, "CPU Runtime: %0.6lfs\n", t_end - t_start);
fail = compareResults(y, y_outputFromGpu);
#endif
free(A);
free(x);
free(y);
free(y_outputFromGpu);
free(tmp);
return fail;
}
|
potential.h | // Copyright (c) 2013-2017 Anton Kozhevnikov, Thomas Schulthess
// All rights reserved.
//
// Redistribution and use in source and binary forms, with or without modification, are permitted provided that
// the following conditions are met:
//
// 1. Redistributions of source code must retain the above copyright notice, this list of conditions and the
// following disclaimer.
// 2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions
// and the following disclaimer in the documentation and/or other materials provided with the distribution.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED
// WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A
// PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR
// ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
// CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
// OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
/** \file potential.h
*
* \brief Contains declaration and partial implementation of sirius::Potential class.
*/
#ifndef __POTENTIAL_H__
#define __POTENTIAL_H__
#include "periodic_function.h"
#include "spheric_function.h"
#include "simulation_context.h"
#include "density.h"
namespace sirius {
/// Generate effective potential from charge density and magnetization.
/** \note At some point we need to update the atomic potential with the new MT potential. This is simple if the
effective potential is a global function. Otherwise we need to pass the effective potential between MPI ranks.
This is also simple, but requires some time. It is also easier to mix the global functions. */
class Potential
{
private:
Simulation_context& ctx_;
Unit_cell& unit_cell_;
Communicator const& comm_;
/// Total effective potential.
std::unique_ptr<Periodic_function<double>> effective_potential_;
/// Total effective magnetic field.
Periodic_function<double>* effective_magnetic_field_[3];
/// Hartree potential.
std::unique_ptr<Periodic_function<double>> hartree_potential_;
/// XC potential.
Periodic_function<double>* xc_potential_;
/// XC energy per unit particle.
Periodic_function<double>* xc_energy_density_;
/// Local part of pseudopotential.
std::unique_ptr<Smooth_periodic_function<double>> local_potential_;
/// Derivative \f$ \partial \epsilon^{XC} / \partial \sigma_{\alpha} \f$.
/** \f$ \epsilon^{XC} \f$ is the exchange-correlation energy per unit volume and \f$ \sigma \f$ is one of
* \f$ \nabla \rho_{\uparrow} \nabla \rho_{\uparrow} \f$, \f$ \nabla \rho_{\uparrow} \nabla \rho_{\downarrow} \f$ or
* \f$ \nabla \rho_{\downarrow} \nabla \rho_{\downarrow} \f$. This quantity is required to compute the GGA
* contribution to the XC stress tensor.
*/
std::array<std::unique_ptr<Smooth_periodic_function<double>>, 2> vsigma_;
/// Used to compute SCF correction to forces.
/** This function is set by PW code and is not computed here. */
std::unique_ptr<Smooth_periodic_function<double>> dveff_;
mdarray<double, 3> sbessel_mom_;
mdarray<double, 3> sbessel_mt_;
mdarray<double, 2> gamma_factors_R_;
int lmax_;
std::unique_ptr<SHT> sht_;
int pseudo_density_order_{9};
std::vector<double_complex> zil_;
std::vector<double_complex> zilm_;
mdarray<int, 1> l_by_lm_;
mdarray<double_complex, 2> gvec_ylm_;
double energy_vha_;
/// Electronic part of Hartree potential.
/** Used to compute electron-nuclear contribution to the total energy */
mdarray<double, 1> vh_el_;
std::unique_ptr<Mixer<double>> mixer_{nullptr};
std::vector<XC_functional> xc_func_;
/// Plane-wave coefficients of the effective potential weighted by the unit step-function.
mdarray<double_complex, 1> veff_pw_;
/// Plane-wave coefficients of the inverse relativistic mass weighted by the unit step-function.
mdarray<double_complex, 1> rm_inv_pw_;
/// Plane-wave coefficients of the squared inverse relativistic mass weighted by the unit step-function.
mdarray<double_complex, 1> rm2_inv_pw_;
struct paw_potential_data_t
{
Atom *atom_{nullptr};
int ia{-1};
int ia_paw{-1};
std::vector<Spheric_function<spectral, double>> ae_potential_;
std::vector<Spheric_function<spectral, double>> ps_potential_;
double hartree_energy_{0.0};
double xc_energy_{0.0};
double core_energy_{0.0};
double one_elec_energy_{0.0};
};
std::vector<double> paw_hartree_energies_;
std::vector<double> paw_xc_energies_;
std::vector<double> paw_core_energies_;
std::vector<double> paw_one_elec_energies_;
double paw_hartree_total_energy_{0.0};
double paw_xc_total_energy_{0.0};
double paw_total_core_energy_{0.0};
double paw_one_elec_energy_{0.0};
std::vector<paw_potential_data_t> paw_potential_data_;
mdarray<double, 4> paw_dij_;
int max_paw_basis_size_{0};
void init_PAW();
double xc_mt_PAW_nonmagnetic(Spheric_function<spectral, double>& full_potential,
Spheric_function<spectral, double> const& full_density,
std::vector<double> const& rho_core);
double xc_mt_PAW_collinear(std::vector<Spheric_function<spectral, double>>& potential,
std::vector<Spheric_function<spectral, double>> const& density,
std::vector<double> const& rho_core);
double xc_mt_PAW_noncollinear(std::vector<Spheric_function<spectral, double>>& potential,
std::vector<Spheric_function<spectral, double>> const& density,
std::vector<double> const& rho_core);
void calc_PAW_local_potential(paw_potential_data_t &pdd,
std::vector<Spheric_function<spectral, double>> const& ae_density,
std::vector<Spheric_function<spectral, double>> const& ps_density);
void calc_PAW_local_Dij(paw_potential_data_t &pdd, mdarray<double, 4>& paw_dij);
double calc_PAW_hartree_potential(Atom& atom,
Spheric_function<spectral, double> const& full_density,
Spheric_function<spectral, double>& full_potential);
double calc_PAW_one_elec_energy(paw_potential_data_t &pdd,
const mdarray<double_complex, 4>& density_matrix,
const mdarray<double, 4>& paw_dij);
void add_paw_Dij_to_atom_Dmtrx();
/// Compute MT part of the potential and MT multipole moments
inline void poisson_vmt(Periodic_function<double> const& rho__,
mdarray<double_complex, 2>& qmt__)
{
PROFILE("sirius::Potential::poisson_vmt");
qmt__.zero();
for (int ialoc = 0; ialoc < unit_cell_.spl_num_atoms().local_size(); ialoc++) {
int ia = unit_cell_.spl_num_atoms(ialoc);
auto qmt = poisson_vmt<false>(unit_cell_.atom(ia), rho__.f_mt(ialoc),
const_cast<Spheric_function<function_domain_t::spectral, double>&>(hartree_potential_->f_mt(ialoc)));
SHT::convert(ctx_.lmax_rho(), &qmt[0], &qmt__(0, ia));
}
ctx_.comm().allreduce(&qmt__(0, 0), (int)qmt__.size());
}
/// Perform a G-vector summation of plane-wave coefficiens multiplied by radial integrals.
inline void poisson_sum_G(int lmmax__,
double_complex const* fpw__,
mdarray<double, 3>& fl__,
mdarray<double_complex, 2>& flm__);
/// Add contribution from the pseudocharge to the plane-wave expansion
inline void poisson_add_pseudo_pw(mdarray<double_complex, 2>& qmt, mdarray<double_complex, 2>& qit, double_complex* rho_pw);
/// Generate local part of pseudo potential.
/** Total local potential is a lattice sum:
* \f[
* V({\bf r}) = \sum_{{\bf T},\alpha} V_{\alpha}({\bf r} - {\bf T} - {\bf \tau}_{\alpha})
* \f]
* We want to compute it's plane-wave expansion coefficients:
* \f[
* V({\bf G}) = \frac{1}{V} \int e^{-i{\bf Gr}} V({\bf r}) d{\bf r} =
* \frac{1}{V} \sum_{{\bf T},\alpha} \int e^{-i{\bf Gr}}V_{\alpha}({\bf r} - {\bf T} - {\bf \tau}_{\alpha})d{\bf r}
* \f]
* Standard change of variables: \f$ {\bf r}' = {\bf r} - {\bf T} - {\bf \tau}_{\alpha},\; {\bf r} = {\bf r}' + {\bf T} + {\bf \tau}_{\alpha} \f$
* leads to:
* \f[
* V({\bf G}) = \frac{1}{V} \sum_{{\bf T},\alpha} \int e^{-i{\bf G}({\bf r}' + {\bf T} + {\bf \tau}_{\alpha})}V_{\alpha}({\bf r}')d{\bf r'} =
* \frac{N}{V} \sum_{\alpha} \int e^{-i{\bf G}({\bf r}' + {\bf \tau}_{\alpha})}V_{\alpha}({\bf r}')d{\bf r'} =
* \frac{1}{\Omega} \sum_{\alpha} e^{-i {\bf G} {\bf \tau}_{\alpha} } \int e^{-i{\bf G}{\bf r}}V_{\alpha}({\bf r})d{\bf r}
* \f]
* Using the well-known expansion of a plane wave in terms of spherical Bessel functions:
* \f[
* e^{i{\bf G}{\bf r}}=4\pi \sum_{\ell m} i^\ell j_{\ell}(Gr)Y_{\ell m}^{*}({\bf \hat G})Y_{\ell m}({\bf \hat r})
* \f]
* and remembering that for \f$ \ell = 0 \f$ (potential is sphericla) \f$ j_{0}(x) = \sin(x) / x \f$ we have:
* \f[
* V_{\alpha}({\bf G}) = \int V_{\alpha}(r) 4\pi \frac{\sin(Gr)}{Gr} Y^{*}_{00} Y_{00} r^2 \sin(\theta) dr d \phi d\theta =
* 4\pi \int V_{\alpha}(r) \frac{\sin(Gr)}{Gr} r^2 dr
* \f]
* The tricky part comes next: \f$ V_{\alpha}({\bf r}) \f$ is a long-range potential -- it decays slowly as
* \f$ -Z_{\alpha}^{p}/r \f$ and the straightforward integration with sperical Bessel function is numerically
* unstable. For \f$ {\bf G} = 0 \f$ an extra term \f$ Z_{\alpha}^p/r \f$, corresponding to the potential of
* pseudo-ion, is added to and removed from the local part of the atomic pseudopotential \f$ V_{\alpha}({\bf r}) \f$:
* \f[
* V_{\alpha}({\bf G} = 0) = \int V_{\alpha}({\bf r})d{\bf r} \Rightarrow
* 4\pi \int \Big( V_{\alpha}(r) + \frac{Z_{\alpha}^p}{r} \Big) r^2 dr -
* 4\pi \int \Big( \frac{Z_{\alpha}^p}{r} \Big) r^2 dr
* \f]
* Second term corresponds to the average electrostatic potential of ions and it is ignored
* (like the \f$ {\bf G} = 0 \f$ term in the Hartree potential of electrons).
* For \f$ G \ne 0 \f$ the following trick is done: \f$ Z_{\alpha}^p {\rm erf}(r) / r \f$ is added to and
* removed from \f$ V_{\alpha}(r) \f$. The idea is to make potential decay quickly and then take the extra
* contribution analytically. We have:
* \f[
* V_{\alpha}({\bf G}) = 4\pi \int \Big(V_{\alpha}(r) + Z_{\alpha}^p \frac{{\rm erf}(r)} {r} -
* Z_{\alpha}^p \frac{{\rm erf}(r)}{r}\Big) \frac{\sin(Gr)}{Gr} r^2 dr
* \f]
* Analytical contribution from the error function is computed using the 1D Fourier transform in complex plane:
* \f[
* \frac{1}{\sqrt{2 \pi}} \int_{-\infty}^{\infty} {\rm erf}(t) e^{i\omega t} dt =
* \frac{i e^{-\frac{\omega ^2}{4}} \sqrt{\frac{2}{\pi }}}{\omega }
* \f]
* from which we immediately get
* \f[
* \int_{0}^{\infty} \frac{{\rm erf}(r)}{r} \frac{\sin(Gr)}{Gr} r^2 dr = \frac{e^{-\frac{G^2}{4}}}{G^2}
* \f]
* The final expression for the local potential radial integrals for \f$ G \ne 0 \f$ take the following form:
* \f[
* 4\pi \int \Big(V_{\alpha}(r) r + Z_{\alpha}^p {\rm erf}(r) \Big) \frac{\sin(Gr)}{G} dr - Z_{\alpha}^p \frac{e^{-\frac{G^2}{4}}}{G^2}
* \f]
*/
inline void generate_local_potential()
{
PROFILE("sirius::Potential::generate_local_potential");
Radial_integrals_vloc<false> ri(ctx_.unit_cell(), ctx_.pw_cutoff(), ctx_.settings().nprii_vloc_);
auto v = ctx_.make_periodic_function<index_domain_t::local>([&](int iat, double g)
{
if (this->ctx_.unit_cell().atom_type(iat).local_potential().empty()) {
return 0.0;
} else {
return ri.value(iat, g);
}
});
std::copy(v.begin(), v.end(), &local_potential_->f_pw_local(0));
local_potential_->fft_transform(1);
if (ctx_.control().print_checksum_) {
//auto cs = local_potential_->checksum_pw();
auto cs1 = local_potential_->checksum_rg();
if (ctx_.comm().rank() == 0) {
//DUMP("checksum(local_potential_pw): %18.10f %18.10f", cs.real(), cs.imag());
print_checksum("local_potential_rg", cs1);
}
}
}
/// Generate non-spin polarized XC potential in the muffin-tins.
inline void xc_mt_nonmagnetic(Radial_grid<double> const& rgrid,
std::vector<XC_functional>& xc_func,
Spheric_function<spectral, double> const& rho_lm,
Spheric_function<spatial, double>& rho_tp,
Spheric_function<spatial, double>& vxc_tp,
Spheric_function<spatial, double>& exc_tp);
/// Generate spin-polarized XC potential in the muffin-tins.
inline void xc_mt_magnetic(Radial_grid<double> const& rgrid,
std::vector<XC_functional>& xc_func,
Spheric_function<spectral, double>& rho_up_lm,
Spheric_function<spatial, double>& rho_up_tp,
Spheric_function<spectral, double>& rho_dn_lm,
Spheric_function<spatial, double>& rho_dn_tp,
Spheric_function<spatial, double>& vxc_up_tp,
Spheric_function<spatial, double>& vxc_dn_tp,
Spheric_function<spatial, double>& exc_tp);
/// Generate XC potential in the muffin-tins.
inline void xc_mt(Density const& density__);
/// Generate non-magnetic XC potential on the regular real-space grid.
template <bool add_pseudo_core__>
inline void xc_rg_nonmagnetic(Density const& density__);
/// Generate magnetic XC potential on the regular real-space grid.
template <bool add_pseudo_core__>
inline void xc_rg_magnetic(Density const& density__);
inline void init();
public:
/// Constructor
Potential(Simulation_context& ctx__)
: ctx_(ctx__)
, unit_cell_(ctx__.unit_cell())
, comm_(ctx__.comm())
{
PROFILE("sirius::Potential::Potential");
lmax_ = std::max(ctx_.lmax_rho(), ctx_.lmax_pot());
sht_ = std::unique_ptr<SHT>(new SHT(lmax_));
if (lmax_ >= 0) {
l_by_lm_ = Utils::l_by_lm(lmax_);
/* precompute i^l */
zil_.resize(lmax_ + 1);
for (int l = 0; l <= lmax_; l++) {
zil_[l] = std::pow(double_complex(0, 1), l);
}
zilm_.resize(Utils::lmmax(lmax_));
for (int l = 0, lm = 0; l <= lmax_; l++) {
for (int m = -l; m <= l; m++, lm++) {
zilm_[lm] = zil_[l];
}
}
}
effective_potential_ = std::unique_ptr<Periodic_function<double>>(new Periodic_function<double>(ctx_, ctx_.lmmax_pot()));
for (int j = 0; j < ctx_.num_mag_dims(); j++) {
effective_magnetic_field_[j] = new Periodic_function<double>(ctx_, ctx_.lmmax_pot());
}
hartree_potential_ = std::unique_ptr<Periodic_function<double>>(new Periodic_function<double>(ctx_, ctx_.lmmax_pot()));
hartree_potential_->allocate_mt(false);
xc_potential_ = new Periodic_function<double>(ctx_, ctx_.lmmax_pot());
xc_potential_->allocate_mt(false);
xc_energy_density_ = new Periodic_function<double>(ctx_, ctx_.lmmax_pot());
xc_energy_density_->allocate_mt(false);
for (int ispn = 0; ispn < ctx_.num_spins(); ispn++) {
vsigma_[ispn] = std::unique_ptr<Smooth_periodic_function<double>>(new Smooth_periodic_function<double>(ctx_.fft(), ctx_.gvec_partition()));
}
if (!ctx_.full_potential()) {
local_potential_ = std::unique_ptr<Smooth_periodic_function<double>>(new Smooth_periodic_function<double>(ctx_.fft(), ctx_.gvec_partition()));
local_potential_->zero();
bool is_empty{true};
for (int iat = 0; iat < unit_cell_.num_atom_types(); iat++) {
is_empty &= unit_cell_.atom_type(iat).local_potential().empty();
}
if (!is_empty) {
generate_local_potential();
}
dveff_ = std::unique_ptr<Smooth_periodic_function<double>>(new Smooth_periodic_function<double>(ctx_.fft(), ctx_.gvec_partition()));
}
vh_el_ = mdarray<double, 1>(unit_cell_.num_atoms());
if (ctx_.full_potential()) {
gvec_ylm_ = mdarray<double_complex, 2>(ctx_.lmmax_pot(), ctx_.gvec().count(), memory_t::host, "gvec_ylm_");
#pragma omp parallel for schedule(static)
for (int igloc = 0; igloc < ctx_.gvec().count(); igloc++) {
int ig = ctx_.gvec().offset() + igloc;
auto rtp = SHT::spherical_coordinates(ctx_.gvec().gvec_cart(ig));
SHT::spherical_harmonics(ctx_.lmax_pot(), rtp[1], rtp[2], &gvec_ylm_(0, igloc));
}
switch (ctx_.valence_relativity()) {
case relativity_t::iora: {
rm2_inv_pw_ = mdarray<double_complex, 1>(ctx_.gvec().num_gvec());
}
case relativity_t::zora: {
rm_inv_pw_ = mdarray<double_complex, 1>(ctx_.gvec().num_gvec());
}
default: {
veff_pw_ = mdarray<double_complex, 1>(ctx_.gvec().num_gvec());
}
}
}
init();
/* create list of XC functionals */
for (auto& xc_label: ctx_.xc_functionals()) {
xc_func_.push_back(std::move(XC_functional(xc_label, ctx_.num_spins())));
}
/* in case of PAW */
init_PAW();
}
~Potential()
{
for (int j = 0; j < ctx_.num_mag_dims(); j++) {
delete effective_magnetic_field_[j];
}
delete xc_potential_;
delete xc_energy_density_;
}
inline void set_effective_potential_ptr(double* veffmt, double* veffit)
{
if (ctx_.full_potential() && veffmt) {
effective_potential_->set_mt_ptr(veffmt);
}
if (veffit) {
effective_potential_->set_rg_ptr(veffit);
}
}
inline void set_effective_magnetic_field_ptr(double* beffmt, double* beffit)
{
if (ctx_.num_mag_dims() == 0) {
return;
}
assert(ctx_.num_spins() == 2);
/* set temporary array wrapper */
mdarray<double, 4> beffmt_tmp(beffmt, ctx_.lmmax_pot(), unit_cell_.max_num_mt_points(),
unit_cell_.num_atoms(), ctx_.num_mag_dims());
mdarray<double, 2> beffit_tmp(beffit, ctx_.fft().size(), ctx_.num_mag_dims());
if (ctx_.num_mag_dims() == 1) {
/* z-component */
if (beffmt) {
effective_magnetic_field_[0]->set_mt_ptr(&beffmt_tmp(0, 0, 0, 0));
}
if (beffit) {
effective_magnetic_field_[0]->set_rg_ptr(&beffit_tmp(0, 0));
}
}
if (ctx_.num_mag_dims() == 3) {
if (beffmt) {
/* z-component */
effective_magnetic_field_[0]->set_mt_ptr(&beffmt_tmp(0, 0, 0, 2));
/* x-component */
effective_magnetic_field_[1]->set_mt_ptr(&beffmt_tmp(0, 0, 0, 0));
/* y-component */
effective_magnetic_field_[2]->set_mt_ptr(&beffmt_tmp(0, 0, 0, 1));
}
if (beffit) {
/* z-component */
effective_magnetic_field_[0]->set_rg_ptr(&beffit_tmp(0, 2));
/* x-component */
effective_magnetic_field_[1]->set_rg_ptr(&beffit_tmp(0, 0));
/* y-component */
effective_magnetic_field_[2]->set_rg_ptr(&beffit_tmp(0, 1));
}
}
}
/// Zero effective potential and magnetic field.
inline void zero()
{
effective_potential_->zero();
for (int j = 0; j < ctx_.num_mag_dims(); j++) {
effective_magnetic_field_[j]->zero();
}
}
/// Solve Poisson equation for a single atom.
template <bool free_atom, typename T>
inline std::vector<T>
poisson_vmt(Atom const& atom__,
Spheric_function<function_domain_t::spectral, T> const& rho_mt__,
Spheric_function<function_domain_t::spectral, T>& vha_mt__) const
{
const bool use_r_prefact{false};
int lmmax_rho = rho_mt__.angular_domain_size();
int lmmax_pot = vha_mt__.angular_domain_size();
assert((int)l_by_lm_.size() >= lmmax_rho);
if (lmmax_rho > ctx_.lmmax_rho()) {
std::stringstream s;
s << "wrong angular size of rho_mt for atom of " << atom__.type().symbol() << std::endl
<< " lmmax_rho: " << lmmax_rho << std::endl
<< " ctx.lmmax_rho(): " << ctx_.lmmax_rho();
TERMINATE(s);
}
std::vector<T> qmt(ctx_.lmmax_rho(), 0);
double R = atom__.mt_radius();
int nmtp = atom__.num_mt_points();
#pragma omp parallel
{
std::vector<T> g1;
std::vector<T> g2;
#pragma omp for
for (int lm = 0; lm < lmmax_rho; lm++) {
int l = l_by_lm_[lm];
auto rholm = rho_mt__.component(lm);
if (use_r_prefact) {
/* save multipole moment */
qmt[lm] = rholm.integrate(g1, l + 2);
} else {
for (int ir = 0; ir < nmtp; ir++) {
double r = atom__.radial_grid(ir);
rholm(ir) *= std::pow(r, l + 2);
}
qmt[lm] = rholm.interpolate().integrate(g1, 0);
}
if (lm < lmmax_pot) {
if (use_r_prefact) {
rholm.integrate(g2, 1 - l);
} else {
rholm = rho_mt__.component(lm);
for (int ir = 0; ir < nmtp; ir++) {
double r = atom__.radial_grid(ir);
rholm(ir) *= std::pow(r, 1 - l);
}
rholm.interpolate().integrate(g2, 0);
}
double fact = fourpi / double(2 * l + 1);
T vlm;
for (int ir = 0; ir < nmtp; ir++) {
double r = atom__.radial_grid(ir);
if (free_atom) {
vlm = g1[ir] / std::pow(r, l + 1) + (g2.back() - g2[ir]) * std::pow(r, l);
} else {
double d1 = 1.0 / std::pow(R, 2 * l + 1);
vlm = (1.0 - std::pow(r / R, 2 * l + 1)) * g1[ir] / std::pow(r, l + 1) +
(g2.back() - g2[ir]) * std::pow(r, l) - (g1.back() - g1[ir]) * std::pow(r, l) * d1;
}
vha_mt__(lm, ir) = vlm * fact;
}
}
}
}
if (!free_atom) {
/* constant part of nuclear potential -z*(1/r - 1/R) */
for (int ir = 0; ir < nmtp; ir++) {
#ifdef __VHA_AUX
double r = atom__.radial_grid(ir);
vha_mt__(0, ir) += atom__.zn() * (1 / R - 1 / r) / y00;
#else
vha_mt__(0, ir) += atom__.zn() / R / y00;
#endif
}
}
/* nuclear multipole moment */
qmt[0] -= atom__.zn() * y00;
return std::move(qmt);
}
/// Poisson solver.
/** Detailed explanation is available in:
* - Weinert, M. (1981). Solution of Poisson's equation: beyond Ewald-type methods.
* Journal of Mathematical Physics, 22(11), 2433–2439. doi:10.1063/1.524800
* - Classical Electrodynamics Third Edition by J. D. Jackson.
*
* Solution of Poisson's equation for the muffin-tin geometry is carried out in several steps:
* - True multipole moments \f$ q_{\ell m}^{\alpha} \f$ of the muffin-tin charge density are computed.
* - Pseudocharge density is introduced. Pseudocharge density coincides with the true charge density
* in the interstitial region and it's multipole moments inside muffin-tin spheres coincide with the
* true multipole moments.
* - Poisson's equation for the pseudocharge density is solved in the plane-wave domain. It gives the
* correct interstitial potential and correct muffin-tin boundary values.
* - Finally, muffin-tin part of potential is found by solving Poisson's equation in spherical coordinates
* with Dirichlet boundary conditions.
*
* We start by computing true multipole moments of the charge density inside the muffin-tin spheres:
* \f[
* q_{\ell m}^{\alpha} = \int Y_{\ell m}^{*}(\hat {\bf r}) r^{\ell} \rho({\bf r}) d {\bf r} =
* \int \rho_{\ell m}^{\alpha}(r) r^{\ell + 2} dr
* \f]
* and for the nucleus with charge density \f$ \rho(r, \theta, \phi) = -\frac{Z \delta(r)}{4 \pi r^2} \f$:
* \f[
* q_{00}^{\alpha} = \int Y_{0 0} \frac{-Z_{\alpha} \delta(r)}{4 \pi r^2} r^2 \sin \theta dr d\phi d\theta =
* -Z_{\alpha} Y_{00}
* \f]
*
* Now we need to get the multipole moments of the interstitial charge density \f$ \rho^{I}({\bf r}) \f$ inside
* muffin-tin spheres. We need this in order to estimate the amount of pseudocharge to be added to
* \f$ \rho^{I}({\bf r}) \f$ to get the pseudocharge multipole moments equal to the true multipole moments.
* We want to compute
* \f[
* q_{\ell m}^{I,\alpha} = \int Y_{\ell m}^{*}(\hat {\bf r}) r^{\ell} \rho^{I}({\bf r}) d {\bf r}
* \f]
* where
* \f[
* \rho^{I}({\bf r}) = \sum_{\bf G}e^{i{\bf Gr}} \rho({\bf G})
* \f]
*
* Recall the spherical plane wave expansion:
* \f[
* e^{i{\bf G r}}=4\pi e^{i{\bf G r}_{\alpha}} \sum_{\ell m} i^\ell
* j_{\ell}(G|{\bf r}-{\bf r}_{\alpha}|)
* Y_{\ell m}^{*}({\bf \hat G}) Y_{\ell m}(\widehat{{\bf r}-{\bf r}_{\alpha}})
* \f]
* Multipole moments of each plane-wave are computed as:
* \f[
* q_{\ell m}^{\alpha}({\bf G}) = 4 \pi e^{i{\bf G r}_{\alpha}} Y_{\ell m}^{*}({\bf \hat G}) i^{\ell}
* \int_{0}^{R} j_{\ell}(Gr) r^{\ell + 2} dr = 4 \pi e^{i{\bf G r}_{\alpha}} Y_{\ell m}^{*}({\bf \hat G}) i^{\ell}
* \left\{\begin{array}{ll} \frac{R^{\ell + 2} j_{\ell + 1}(GR)}{G} & G \ne 0 \\
* \frac{R^3}{3} \delta_{\ell 0} & G = 0 \end{array} \right.
* \f]
*
* Final expression for the muffin-tin multipole moments of the interstitial charge denisty:
* \f[
* q_{\ell m}^{I,\alpha} = \sum_{\bf G}\rho({\bf G}) q_{\ell m}^{\alpha}({\bf G})
* \f]
*
* Now we are going to modify interstitial charge density inside the muffin-tin region in order to
* get the true multipole moments. We will add a pseudodensity of the form:
* \f[
* P({\bf r}) = \sum_{\ell m} p_{\ell m}^{\alpha} Y_{\ell m}(\hat {\bf r}) r^{\ell} \left(1-\frac{r^2}{R^2}\right)^n
* \f]
* Radial functions of the pseudodensity are chosen in a special way. First, they produce a confined and
* smooth functions inside muffin-tins and second (most important) plane-wave coefficients of the
* pseudodensity can be computed analytically. Let's find the relation between \f$ p_{\ell m}^{\alpha} \f$
* coefficients and true and interstitial multipole moments first. We are searching for the pseudodensity which restores
* the true multipole moments:
* \f[
* \int Y_{\ell m}^{*}(\hat {\bf r}) r^{\ell} \Big(\rho^{I}({\bf r}) + P({\bf r})\Big) d {\bf r} = q_{\ell m}^{\alpha}
* \f]
* Then
* \f[
* p_{\ell m}^{\alpha} = \frac{q_{\ell m}^{\alpha} - q_{\ell m}^{I,\alpha}}
* {\int r^{2 \ell + 2} \left(1-\frac{r^2}{R^2}\right)^n dr} =
* (q_{\ell m}^{\alpha} - q_{\ell m}^{I,\alpha}) \frac{2 \Gamma(5/2 + \ell + n)}{R^{2\ell + 3}\Gamma(3/2 + \ell) \Gamma(n + 1)}
* \f]
*
* Now let's find the plane-wave coefficients of \f$ P({\bf r}) \f$ inside each muffin-tin:
* \f[
* P^{\alpha}({\bf G}) = \frac{4\pi e^{-i{\bf G r}_{\alpha}}}{\Omega} \sum_{\ell m} (-i)^{\ell} Y_{\ell m}({\bf \hat G})
* p_{\ell m}^{\alpha} \int_{0}^{R} j_{\ell}(G r) r^{\ell} \left(1-\frac{r^2}{R^2}\right)^n r^2 dr
* \f]
*
* Integral of the spherical Bessel function with the radial pseudodensity component is taken analytically:
* \f[
* \int_{0}^{R} j_{\ell}(G r) r^{\ell} \left(1-\frac{r^2}{R^2}\right)^n r^2 dr =
* 2^n R^{\ell + 3} (GR)^{-n - 1} \Gamma(n + 1) j_{n + \ell + 1}(GR)
* \f]
*
* The final expression for the pseudodensity plane-wave component is:
* \f[
* P^{\alpha}({\bf G}) = \frac{4\pi e^{-i{\bf G r}_{\alpha}}}{\Omega} \sum_{\ell m} (-i)^{\ell} Y_{\ell m}({\bf \hat G})
* (q_{\ell m}^{\alpha} - q_{\ell m}^{I,\alpha}) \Big( \frac{2}{GR} \Big)^{n+1}
* \frac{ \Gamma(5/2 + n + \ell) } {R^{\ell} \Gamma(3/2+\ell)} j_{n + \ell + 1}(GR)
* \f]
*
* For \f$ G=0 \f$ only \f$ \ell = 0 \f$ contribution survives:
* \f[
* P^{\alpha}({\bf G}=0) = \frac{4\pi}{\Omega} Y_{00} (q_{00}^{\alpha} - q_{00}^{I,\alpha})
* \f]
*
* We can now sum the contributions from all muffin-tin spheres and obtain a modified charge density,
* which is equal to the exact charge density in the interstitial region and which has correct multipole
* moments inside muffin-tin spheres:
* \f[
* \tilde \rho({\bf G}) = \rho({\bf G}) + \sum_{\alpha} P^{\alpha}({\bf G})
* \f]
* This density is used to solve the Poisson's equation in the plane-wave domain:
* \f[
* V_{H}({\bf G}) = \frac{4 \pi \tilde \rho({\bf G})}{G^2}
* \f]
* The potential is correct in the interstitial region and also on the muffin-tin surface. We will use
* it to find the boundary conditions for the potential inside the muffin-tins. Using spherical
* plane-wave expansion we get:
* \f[
* V^{\alpha}_{\ell m}(R) = \sum_{\bf G} V_{H}({\bf G})
* 4\pi e^{i{\bf G r}_{\alpha}} i^\ell
* j_{\ell}^{\alpha}(GR) Y_{\ell m}^{*}({\bf \hat G})
* \f]
*
* As soon as the muffin-tin boundary conditions for the potential are known, we can find the potential
* inside spheres using Dirichlet Green's function:
* \f[
* V({\bf x}) = \int \rho({\bf x'})G_D({\bf x},{\bf x'}) d{\bf x'} - \frac{1}{4 \pi} \int_{S} V({\bf x'})
* \frac{\partial G_D}{\partial n'} d{\bf S'}
* \f]
* where Dirichlet Green's function for the sphere is defined as:
* \f[
* G_D({\bf x},{\bf x'}) = 4\pi \sum_{\ell m} \frac{Y_{\ell m}^{*}({\bf \hat x'})
* Y_{\ell m}(\hat {\bf x})}{2\ell + 1}
* \frac{r_{<}^{\ell}}{r_{>}^{\ell+1}}\Biggl(1 - \Big( \frac{r_{>}}{R} \Big)^{2\ell + 1} \Biggr)
* \f]
* and it's normal derivative at the surface is equal to:
* \f[
* \frac{\partial G_D}{\partial n'} = -\frac{4 \pi}{R^2} \sum_{\ell m} \Big( \frac{r}{R} \Big)^{\ell}
* Y_{\ell m}^{*}({\bf \hat x'}) Y_{\ell m}(\hat {\bf x})
* \f]
*/
inline void poisson(Periodic_function<double> const& rho);
/// Generate XC potential and energy density
/** In case of spin-unpolarized GGA the XC potential has the following expression:
* \f[
* V_{XC}({\bf r}) = \frac{\partial}{\partial \rho} \varepsilon_{xc}(\rho, \nabla \rho) -
* \nabla \frac{\partial}{\partial (\nabla \rho)} \varepsilon_{xc}(\rho, \nabla \rho)
* \f]
* LibXC packs the gradient information into the so-called \a sigma array:
* \f[
* \sigma = \nabla \rho \nabla \rho
* \f]
* Changing variables in \f$ V_{XC} \f$ expression gives:
* \f{eqnarray*}{
* V_{XC}({\bf r}) &=& \frac{\partial}{\partial \rho} \varepsilon_{xc}(\rho, \sigma) -
* \nabla \frac{\partial \varepsilon_{xc}(\rho, \sigma)}{\partial \sigma}
* \frac{\partial \sigma}{ \partial (\nabla \rho)} \\
* &=& \frac{\partial}{\partial \rho} \varepsilon_{xc}(\rho, \sigma) -
* 2 \nabla \frac{\partial \varepsilon_{xc}(\rho, \sigma)}{\partial \sigma} \nabla \rho -
* 2 \frac{\partial \varepsilon_{xc}(\rho, \sigma)}{\partial \sigma} \nabla^2 \rho
* \f}
* The following sequence of functions must be computed:
* - density on the real space grid
* - gradient of density (in spectral representation)
* - gradient of density on the real space grid
* - laplacian of density (in spectral representation)
* - laplacian of density on the real space grid
* - \a sigma array
* - a call to Libxc must be performed \a sigma derivatives must be obtained
* - \f$ \frac{\partial \varepsilon_{xc}(\rho, \sigma)}{\partial \sigma} \f$ in spectral representation
* - gradient of \f$ \frac{\partial \varepsilon_{xc}(\rho, \sigma)}{\partial \sigma} \f$ in spectral representation
* - gradient of \f$ \frac{\partial \varepsilon_{xc}(\rho, \sigma)}{\partial \sigma} \f$ on the real space grid
*
* Expression for spin-polarized potential has a bit more complicated form:
* \f{eqnarray*}
* V_{XC}^{\gamma} &=& \frac{\partial \varepsilon_{xc}}{\partial \rho_{\gamma}} - \nabla
* \Big( 2 \frac{\partial \varepsilon_{xc}}{\partial \sigma_{\gamma \gamma}} \nabla \rho_{\gamma} +
* \frac{\partial \varepsilon_{xc}}{\partial \sigma_{\gamma \delta}} \nabla \rho_{\delta} \Big) \\
* &=& \frac{\partial \varepsilon_{xc}}{\partial \rho_{\gamma}}
* -2 \nabla \frac{\partial \varepsilon_{xc}}{\partial \sigma_{\gamma \gamma}} \nabla \rho_{\gamma}
* -2 \frac{\partial \varepsilon_{xc}}{\partial \sigma_{\gamma \gamma}} \nabla^2 \rho_{\gamma}
* - \nabla \frac{\partial \varepsilon_{xc}}{\partial \sigma_{\gamma \delta}} \nabla \rho_{\delta}
* - \frac{\partial \varepsilon_{xc}}{\partial \sigma_{\gamma \delta}} \nabla^2 \rho_{\delta}
* \f}
* In magnetic case the "up" and "dn" density and potential decomposition is used. Using the fact that the
* effective magnetic field is parallel to magnetization at each point in space, we can write the coupling
* of density and magnetization with XC potential and XC magentic field as:
* \f[
* V_{xc}({\bf r}) \rho({\bf r}) + {\bf B}_{xc}({\bf r}){\bf m}({\bf r}) =
* V_{xc}({\bf r}) \rho({\bf r}) + {\rm B}_{xc}({\bf r}) {\rm m}({\bf r}) =
* V^{\uparrow}({\bf r})\rho^{\uparrow}({\bf r}) + V^{\downarrow}({\bf r})\rho^{\downarrow}({\bf r})
* \f]
* where
* \f{eqnarray*}{
* \rho^{\uparrow}({\bf r}) &=& \frac{1}{2}\Big( \rho({\bf r}) + {\rm m}({\bf r}) \Big) \\
* \rho^{\downarrow}({\bf r}) &=& \frac{1}{2}\Big( \rho({\bf r}) - {\rm m}({\bf r}) \Big)
* \f}
* and
* \f{eqnarray*}{
* V^{\uparrow}({\bf r}) &=& V_{xc}({\bf r}) + {\rm B}_{xc}({\bf r}) \\
* V^{\downarrow}({\bf r}) &=& V_{xc}({\bf r}) - {\rm B}_{xc}({\bf r})
* \f}
*/
template <bool add_pseudo_core__ = false>
void xc(Density const& rho__);
/// Generate effective potential and magnetic field from charge density and magnetization.
inline void generate(Density const& density__)
{
PROFILE("sirius::Potential::generate");
/* zero effective potential and magnetic field */
zero();
/* solve Poisson equation */
poisson(density__.rho());
/* add Hartree potential to the total potential */
effective_potential_->add(hartree_potential_.get());
if (ctx_.control().print_hash_) {
auto h = effective_potential_->hash_f_rg();
if (ctx_.comm().rank() == 0) {
print_hash("Vha", h);
}
}
if (ctx_.full_potential()) {
xc(density__);
} else {
/* add local ionic potential to the effective potential */
effective_potential()->add(local_potential());
/* construct XC potentials from rho + rho_core */
xc<true>(density__);
}
/* add XC potential to the effective potential */
effective_potential_->add(xc_potential_);
if (ctx_.control().print_hash_) {
auto h = effective_potential_->hash_f_rg();
if (ctx_.comm().rank() == 0) {
print_hash("Vha+Vxc", h);
}
}
if (ctx_.full_potential()) {
effective_potential_->sync_mt();
for (int j = 0; j < ctx_.num_mag_dims(); j++) {
effective_magnetic_field_[j]->sync_mt();
}
}
/* get plane-wave coefficients of effective potential;
* they will be used in three places:
* 1) compute D-matrix
* 2) establish a mapping between fine and coarse FFT grid for the Hloc operator
* 3) symmetrize effective potential */
effective_potential_->fft_transform(-1);
for (int j = 0; j < ctx_.num_mag_dims(); j++) {
effective_magnetic_field_[j]->fft_transform(-1);
}
if (ctx_.control().print_hash_) {
auto h = effective_potential_->hash_f_pw();
if (ctx_.comm().rank() == 0) {
print_hash("V(G)", h);
}
}
if (!ctx_.full_potential()) {
generate_D_operator_matrix();
generate_PAW_effective_potential(density__);
}
}
inline void save()
{
effective_potential_->hdf5_write(storage_file_name, "effective_potential");
for (int j = 0; j < ctx_.num_mag_dims(); j++) {
std::stringstream s;
s << "effective_magnetic_field/" << j;
effective_magnetic_field_[j]->hdf5_write(storage_file_name, s.str());
}
if (ctx_.comm().rank() == 0 && !ctx_.full_potential()) {
HDF5_tree fout(storage_file_name, hdf5_access_t::read_write);
for (int j = 0; j < ctx_.unit_cell().num_atoms(); j++) {
fout["unit_cell"]["atoms"][j].write("D_operator", ctx_.unit_cell().atom(j).d_mtrx());
}
}
comm_.barrier();
}
inline void load()
{
HDF5_tree fin(storage_file_name, hdf5_access_t::read_only);
int ngv;
fin.read("/parameters/num_gvec", &ngv, 1);
if (ngv != ctx_.gvec().num_gvec()) {
TERMINATE("wrong number of G-vectors");
}
mdarray<int, 2> gv(3, ngv);
fin.read("/parameters/gvec", gv);
effective_potential_->hdf5_read(fin["effective_potential"], gv);
for (int j = 0; j < ctx_.num_mag_dims(); j++) {
effective_magnetic_field_[j]->hdf5_read(fin["effective_magnetic_field"][j], gv);
}
if (ctx_.full_potential()) {
update_atomic_potential();
}
if (!ctx_.full_potential()) {
HDF5_tree fout(storage_file_name, hdf5_access_t::read_only);
for (int j = 0; j < ctx_.unit_cell().num_atoms(); j++) {
fout["unit_cell"]["atoms"][j].read("D_operator", ctx_.unit_cell().atom(j).d_mtrx());
}
}
}
inline void update_atomic_potential()
{
for (int ic = 0; ic < unit_cell_.num_atom_symmetry_classes(); ic++) {
int ia = unit_cell_.atom_symmetry_class(ic).atom_id(0);
int nmtp = unit_cell_.atom(ia).num_mt_points();
std::vector<double> veff(nmtp);
for (int ir = 0; ir < nmtp; ir++) {
veff[ir] = y00 * effective_potential_->f_mt<index_domain_t::global>(0, ir, ia);
}
unit_cell_.atom_symmetry_class(ic).set_spherical_potential(veff);
}
for (int ia = 0; ia < unit_cell_.num_atoms(); ia++) {
double* veff = &effective_potential_->f_mt<index_domain_t::global>(0, 0, ia);
double* beff[] = {nullptr, nullptr, nullptr};
for (int i = 0; i < ctx_.num_mag_dims(); i++) {
beff[i] = &effective_magnetic_field_[i]->f_mt<index_domain_t::global>(0, 0, ia);
}
unit_cell_.atom(ia).set_nonspherical_potential(veff, beff);
}
}
template <device_t pu>
void add_mt_contribution_to_pw();
/// Generate plane-wave coefficients of the potential in the interstitial region.
void generate_pw_coefs();
/// Calculate D operator from potential and augmentation charge.
/** The following real symmetric matrix is computed:
* \f[
* D_{\xi \xi'}^{\alpha} = \int V({\bf r}) Q_{\xi \xi'}^{\alpha}({\bf r}) d{\bf r}
* \f]
* In the plane-wave domain this integrals transform into sum over Fourier components:
* \f[
* D_{\xi \xi'}^{\alpha} = \sum_{\bf G} \langle V |{\bf G}\rangle \langle{\bf G}|Q_{\xi \xi'}^{\alpha} \rangle =
* \sum_{\bf G} V^{*}({\bf G}) e^{-i{\bf r}_{\alpha}{\bf G}} Q_{\xi \xi'}^{A}({\bf G}) =
* \sum_{\bf G} Q_{\xi \xi'}^{A}({\bf G}) \tilde V_{\alpha}^{*}({\bf G})
* \f]
* where \f$ \alpha \f$ is the atom, \f$ A \f$ is the atom type and
* \f[
* \tilde V_{\alpha}({\bf G}) = e^{i{\bf r}_{\alpha}{\bf G}} V({\bf G})
* \f]
* Both \f$ V({\bf r}) \f$ and \f$ Q({\bf r}) \f$ functions are real and the following condition is fulfilled:
* \f[
* \tilde V_{\alpha}({\bf G}) = \tilde V_{\alpha}^{*}(-{\bf G})
* \f]
* \f[
* Q_{\xi \xi'}({\bf G}) = Q_{\xi \xi'}^{*}(-{\bf G})
* \f]
* In the sum over plane-wave coefficients the \f$ {\bf G} \f$ and \f$ -{\bf G} \f$ contributions will give:
* \f[
* Q_{\xi \xi'}^{A}({\bf G}) \tilde V_{\alpha}^{*}({\bf G}) + Q_{\xi \xi'}^{A}(-{\bf G}) \tilde V_{\alpha}^{*}(-{\bf G}) =
* 2 \Re \Big( Q_{\xi \xi'}^{A}({\bf G}) \Big) \Re \Big( \tilde V_{\alpha}({\bf G}) \Big) +
* 2 \Im \Big( Q_{\xi \xi'}^{A}({\bf G}) \Big) \Im \Big( \tilde V_{\alpha}({\bf G}) \Big)
* \f]
* This allows the use of a <b>dgemm</b> instead of a <b>zgemm</b> when \f$ D_{\xi \xi'}^{\alpha} \f$ matrix
* is calculated for all atoms of the same type.
*/
void generate_D_operator_matrix();
void generate_PAW_effective_potential(Density const& density);
std::vector<double> const& PAW_hartree_energies() const
{
return paw_hartree_energies_;
}
std::vector<double> const& PAW_xc_energies() const
{
return paw_xc_energies_;
}
std::vector<double> const& PAW_core_energies() const
{
return paw_core_energies_;
}
std::vector<double> const& PAW_one_elec_energies()
{
return paw_one_elec_energies_;
}
double PAW_hartree_total_energy() const
{
return paw_hartree_total_energy_;
}
double PAW_xc_total_energy() const
{
return paw_xc_total_energy_;
}
double PAW_total_core_energy() const
{
return paw_total_core_energy_;
}
double PAW_total_energy()
{
return paw_hartree_total_energy_ + paw_xc_total_energy_ ;
}
double PAW_one_elec_energy()
{
return paw_one_elec_energy_;
}
void check_potential_continuity_at_mt();
Periodic_function<double>* effective_potential()
{
return effective_potential_.get();
}
Smooth_periodic_function<double>& local_potential()
{
return *local_potential_;
}
Smooth_periodic_function<double>& dveff()
{
return *dveff_;
}
Spheric_function<spectral, double> const& effective_potential_mt(int ialoc) const
{
return effective_potential_->f_mt(ialoc);
}
Periodic_function<double>* effective_magnetic_field(int i)
{
return effective_magnetic_field_[i];
}
Periodic_function<double>& hartree_potential()
{
return *hartree_potential_;
}
Spheric_function<spectral, double> const& hartree_potential_mt(int ialoc) const
{
return hartree_potential_->f_mt(ialoc);
}
Periodic_function<double>* xc_potential()
{
return xc_potential_;
}
Periodic_function<double>* xc_energy_density()
{
return xc_energy_density_;
}
void allocate()
{
effective_potential_->allocate_mt(true);
for (int j = 0; j < ctx_.num_mag_dims(); j++) {
effective_magnetic_field_[j]->allocate_mt(true);
}
}
inline double vh_el(int ia)
{
return vh_el_(ia);
}
inline double energy_vha()
{
return energy_vha_;
}
void mixer_input()
{
/* collect density and magnetization into single array */
std::vector<Periodic_function<double>*> veff_vec(ctx_.num_mag_dims() + 1);
veff_vec[0] = effective_potential_.get();
for (int j = 0; j < ctx_.num_mag_dims(); j++) {
veff_vec[1 + j] = effective_magnetic_field_[j];
}
int k{0};
for (int j = 0; j < ctx_.num_mag_dims() + 1; j++) {
for (int ialoc = 0; ialoc < ctx_.unit_cell().spl_num_atoms().local_size(); ialoc++) {
for (int i = 0; i < static_cast<int>(veff_vec[j]->f_mt(ialoc).size()); i++) {
mixer_->input_local(k++, veff_vec[j]->f_mt(ialoc)[i]);
}
}
for (int i = 0; i < ctx_.fft().local_size(); i++) {
mixer_->input_local(k++, veff_vec[j]->f_rg(i));
}
}
}
void mixer_output()
{
/* collect density and magnetization into single array */
std::vector<Periodic_function<double>*> veff_vec(ctx_.num_mag_dims() + 1);
veff_vec[0] = effective_potential_.get();
for (int j = 0; j < ctx_.num_mag_dims(); j++) {
veff_vec[1 + j] = effective_magnetic_field_[j];
}
int k{0};
for (int j = 0; j < ctx_.num_mag_dims() + 1; j++) {
for (int ialoc = 0; ialoc < ctx_.unit_cell().spl_num_atoms().local_size(); ialoc++) {
auto& f_mt = const_cast<Spheric_function<spectral, double>&>(veff_vec[j]->f_mt(ialoc));
for (int i = 0; i < static_cast<int>(veff_vec[j]->f_mt(ialoc).size()); i++) {
f_mt[i] = mixer_->output_local(k++);
}
}
for (int i = 0; i < ctx_.fft().local_size(); i++) {
veff_vec[j]->f_rg(i) = mixer_->output_local(k++);
}
}
for (auto e: veff_vec) {
e->sync_mt();
}
}
void mixer_init()
{
int sz{0};
for (int ialoc = 0; ialoc < ctx_.unit_cell().spl_num_atoms().local_size(); ialoc++) {
sz += static_cast<int>(effective_potential_->f_mt(ialoc).size());
}
sz += ctx_.fft().local_size();
mixer_ = Mixer_factory<double>(ctx_.mixer_input().type_, 0, (ctx_.num_mag_dims() + 1) * sz, ctx_.mixer_input(), comm_);
mixer_input();
mixer_->initialize();
}
double mix()
{
mixer_input();
double rms = mixer_->mix(ctx_.settings().mixer_rss_min_);
mixer_output();
return rms;
}
double_complex const& veff_pw(int ig__) const
{
return veff_pw_(ig__);
}
inline void set_veff_pw(double_complex* veff_pw__)
{
std::copy(veff_pw__, veff_pw__ + ctx_.gvec().num_gvec(), veff_pw_.at<CPU>());
}
double_complex const& rm_inv_pw(int ig__) const
{
return rm_inv_pw_(ig__);
}
inline void set_rm_inv_pw(double_complex* rm_inv_pw__)
{
std::copy(rm_inv_pw__, rm_inv_pw__ + ctx_.gvec().num_gvec(), rm_inv_pw_.at<CPU>());
}
double_complex const& rm2_inv_pw(int ig__) const
{
return rm2_inv_pw_(ig__);
}
inline void set_rm2_inv_pw(double_complex* rm2_inv_pw__)
{
std::copy(rm2_inv_pw__, rm2_inv_pw__ + ctx_.gvec().num_gvec(), rm2_inv_pw_.at<CPU>());
}
inline void fft_transform(int direction__)
{
effective_potential_->fft_transform(direction__);
for (int j = 0; j < ctx_.num_mag_dims(); j++) {
effective_magnetic_field_[j]->fft_transform(direction__);
}
}
/// Integral of \f$ \rho({\bf r}) V^{XC}({\bf r}) \f$.
double energy_vxc(Density& density__)
{
return density__.rho().inner(xc_potential());
}
/// Integral of \f$ \rho({\bf r}) \epsilon^{XC}({\bf r}) \f$.
double energy_exc(Density& density__)
{
double exc = density__.rho().inner(xc_energy_density());
if (!ctx_.full_potential()) {
exc += density__.rho_pseudo_core().inner(*xc_energy_density());
}
return exc;
}
bool is_gradient_correction() const
{
bool is_gga{false};
for (auto& ixc: xc_func_) {
if (ixc.is_gga()) {
is_gga = true;
}
}
return is_gga;
}
Smooth_periodic_function<double>& vsigma(int ispn__)
{
return (*vsigma_[ispn__].get());
}
inline double vha_el(int ia__) const
{
return vh_el_(ia__);
}
};
#include "Potential/init.hpp"
#include "Potential/generate_d_operator_matrix.hpp"
#include "Potential/generate_pw_coefs.hpp"
#include "Potential/xc.hpp"
#include "Potential/poisson.hpp"
#include "Potential/paw_potential.hpp"
};
#endif // __POTENTIAL_H__
|
DRB002-antidep1-var-yes.c | /*
Copyright (c) 2017, Lawrence Livermore National Security, LLC.
Produced at the Lawrence Livermore National Laboratory
Written by Chunhua Liao, Pei-Hung Lin, Joshua Asplund,
Markus Schordan, and Ian Karlin
(email: liao6@llnl.gov, lin32@llnl.gov, asplund1@llnl.gov,
schordan1@llnl.gov, karlin1@llnl.gov)
LLNL-CODE-732144
All rights reserved.
This file is part of DataRaceBench. For details, see
https://github.com/LLNL/dataracebench. Please also see the LICENSE file
for our additional BSD notice.
Redistribution and use in source and binary forms, with
or without modification, are permitted provided that the following
conditions are met:
* Redistributions of source code must retain the above copyright
notice, this list of conditions and the disclaimer below.
* Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the disclaimer (as noted below)
in the documentation and/or other materials provided with the
distribution.
* Neither the name of the LLNS/LLNL nor the names of its contributors
may be used to endorse or promote products derived from this
software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND
CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES,
INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL LAWRENCE LIVERMORE NATIONAL
SECURITY, LLC, THE U.S. DEPARTMENT OF ENERGY OR CONTRIBUTORS BE
LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY,
OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
THE POSSIBILITY OF SUCH DAMAGE.
*/
/*
A loop with loop-carried anti-dependence.
Data race pair: a[i+1]@67:10 vs. a[i]@67:5
*/
#include <stdlib.h>
#include <stdio.h>
int main(int argc, char* argv[])
{
int i;
int len = 1000;
if (argc>1)
len = atoi(argv[1]);
int a[len];
#pragma omp parallel for simd
for (i=0; i<len; i++)
a[i]= i;
#pragma omp simd
for (i=0;i< len -1 ;i++)
a[i]=a[i+1]+1;
#pragma omp parallel for simd ordered
for (i=0; i<len; i++)
#pragma omp ordered simd
printf("%d\n", a[i]);
return 0;
}
|
wtsne_inl.h | /*
*
* Copyright (c) 2014, Nicola Pezzotti (Delft University of Technology)
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* 3. All advertising materials mentioning features or use of this software
* must display the following acknowledgement:
* This product includes software developed by the Delft University of Technology.
* 4. Neither the name of the Delft University of Technology nor the names of
* its contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY NICOLA PEZZOTTI ''AS IS'' AND ANY EXPRESS
* OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
* OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO
* EVENT SHALL NICOLA PEZZOTTI BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
* PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
* BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
* IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY
* OF SUCH DAMAGE.
*
*/
#ifndef WSNE_INL
#define WSNE_INL
#include "hdi/dimensionality_reduction/wtsne.h"
#include "hdi/utils/math_utils.h"
#include "hdi/utils/log_helper_functions.h"
#include "hdi/utils/scoped_timers.h"
#include "weighted_sptree.h"
#include <random>
#ifdef __USE_GCD__
#include <dispatch/dispatch.h>
#endif
#pragma warning( push )
#pragma warning( disable : 4267)
#pragma warning( push )
#pragma warning( disable : 4291)
#pragma warning( push )
#pragma warning( disable : 4996)
#pragma warning( push )
#pragma warning( disable : 4018)
#pragma warning( push )
#pragma warning( disable : 4244)
#pragma warning( pop )
#pragma warning( pop )
#pragma warning( pop )
#pragma warning( pop )
#pragma warning( pop )
namespace hdi{
namespace dr{
/////////////////////////////////////////////////////////////////////////
template <typename scalar, typename sparse_scalar_matrix>
WeightedTSNE<scalar, sparse_scalar_matrix>::Parameters::Parameters():
_seed(-1),
_embedding_dimensionality(2),
_minimum_gain(0.1),
_eta(200),
_momentum(0.2),
_final_momentum(0.5),
_mom_switching_iter(250),
_exaggeration_factor(4),
_remove_exaggeration_iter(250),
_exponential_decay_iter(150)
{}
/////////////////////////////////////////////////////////////////////////
template <typename scalar, typename sparse_scalar_matrix>
WeightedTSNE<scalar, sparse_scalar_matrix>::WeightedTSNE():
_initialized(false),
_logger(nullptr),
_theta(0)
{
}
template <typename scalar, typename sparse_scalar_matrix>
void WeightedTSNE<scalar, sparse_scalar_matrix>::reset(){
_initialized = false;
}
template <typename scalar, typename sparse_scalar_matrix>
void WeightedTSNE<scalar, sparse_scalar_matrix>::clear(){
_embedding->clear();
_initialized = false;
}
template <typename scalar, typename sparse_scalar_matrix>
void WeightedTSNE<scalar, sparse_scalar_matrix>::getEmbeddingPosition(scalar_vector_type& embedding_position, data_handle_type handle)const{
if(!_initialized){
throw std::logic_error("Algorithm must be initialized before ");
}
embedding_position.resize(_params._embedding_dimensionality);
for(int i = 0; i < _params._embedding_dimensionality; ++i){
(*_embedding_container)[i] = (*_embedding_container)[handle*_params._embedding_dimensionality + i];
}
}
/////////////////////////////////////////////////////////////////////////
template <typename scalar, typename sparse_scalar_matrix>
void WeightedTSNE<scalar, sparse_scalar_matrix>::initialize(const sparse_scalar_matrix& probabilities, data::Embedding<scalar_type>* embedding, Parameters params){
utils::secureLog(_logger,"Initializing W-tSNE...");
{//Aux data
_params = params;
unsigned int size = probabilities.size();
unsigned int size_sq = probabilities.size()*probabilities.size();
_embedding = embedding;
_embedding_container = &(embedding->getContainer());
_embedding->resize(_params._embedding_dimensionality,size);
_P.resize(size);
_Q.resize(size_sq);
_gradient.resize(size*params._embedding_dimensionality,0);
_previous_gradient.resize(size*params._embedding_dimensionality,0);
_gain.resize(size*params._embedding_dimensionality,1);
}
utils::secureLogValue(_logger,"Number of data points",_P.size());
computeHighDimensionalDistribution(probabilities);
initializeEmbeddingPosition(params._seed);
computeWeights();
_iteration = 0;
_initialized = true;
utils::secureLog(_logger,"Initialization complete!");
}
template <typename scalar, typename sparse_scalar_matrix>
void WeightedTSNE<scalar, sparse_scalar_matrix>::initializeWithJointProbabilityDistribution(const sparse_scalar_matrix& distribution, data::Embedding<scalar_type>* embedding, Parameters params){
utils::secureLog(_logger,"Initializing W-tSNE with a user-defined joint-probability distribution...");
{//Aux data
_params = params;
unsigned int size = distribution.size();
unsigned int size_sq = distribution.size()*distribution.size();
_embedding = embedding;
_embedding_container = &(embedding->getContainer());
_embedding->resize(_params._embedding_dimensionality,size);
_P.resize(size);
_Q.resize(size_sq);
_gradient.resize(size*params._embedding_dimensionality,0);
_previous_gradient.resize(size*params._embedding_dimensionality,0);
_gain.resize(size*params._embedding_dimensionality,1);
}
utils::secureLogValue(_logger,"Number of data points",_P.size());
_P = distribution;
initializeEmbeddingPosition(params._seed);
computeWeights();
_iteration = 0;
_initialized = true;
utils::secureLog(_logger,"Initialization complete!");
}
template <typename scalar, typename sparse_scalar_matrix>
void WeightedTSNE<scalar, sparse_scalar_matrix>::computeHighDimensionalDistribution(const sparse_scalar_matrix& probabilities){
utils::secureLog(_logger,"Computing high-dimensional joint probability distribution...");
const int n = getNumberOfDataPoints();
//Can be improved by using the simmetry of the matrix (half the memory) //TODO
for(int j = 0; j < n; ++j){
for(auto& elem: probabilities[j]){
scalar_type v0 = elem.second;
auto iter = probabilities[elem.first].find(j);
scalar_type v1 = 0.;
if(iter != probabilities[elem.first].end())
v1 = iter->second;
_P[j][elem.first] = static_cast<scalar_type>((v0+v1)*0.5);
_P[elem.first][j] = static_cast<scalar_type>((v0+v1)*0.5);
}
}
}
template <typename scalar, typename sparse_scalar_matrix>
void WeightedTSNE<scalar, sparse_scalar_matrix>::computeWeights(){
_weights.clear();
_weights.resize(_P.size(),0);
for(int i = 0; i < _weights.size(); ++i){
for(auto v: _P[i]){
_weights[i] += v.second;
}
}
utils::secureLogVectorStats(_logger,"Weights",_weights);
}
//! Set weights (overwrites the default weights)
template <typename scalar, typename sparse_scalar_matrix>
void WeightedTSNE<scalar, sparse_scalar_matrix>::setWeights(const scalar_vector_type& weights){
checkAndThrowLogic(weights.size() == _P.size(), "setWeights: wrong size");
_weights = weights;
utils::secureLogVectorStats(_logger,"Weights",_weights);
}
template <typename scalar, typename sparse_scalar_matrix>
void WeightedTSNE<scalar, sparse_scalar_matrix>::initializeEmbeddingPosition(int seed, double multiplier){
utils::secureLog(_logger,"Initializing the embedding...");
if(seed < 0){
std::srand(static_cast<unsigned int>(time(NULL)));
}
else{
std::srand(seed);
}
for(auto& v : (*_embedding_container)){
double x(0.);
double y(0.);
double radius(0.);
do {
x = 2 * (rand() / ((double)RAND_MAX + 1)) - 1;
y = 2 * (rand() / ((double)RAND_MAX + 1)) - 1;
radius = (x * x) + (y * y);
} while((radius >= 1.0) || (radius == 0.0));
radius = sqrt(-2 * log(radius) / radius);
x *= radius;
y *= radius;
v = static_cast<scalar_type>(x * multiplier);
}
}
template <typename scalar, typename sparse_scalar_matrix>
void WeightedTSNE<scalar, sparse_scalar_matrix>::doAnIteration(double mult){
if(!_initialized){
throw std::logic_error("Cannot compute a gradient descent iteration on unitialized data");
}
if(_iteration == _params._mom_switching_iter){
utils::secureLog(_logger,"Switch to final momentum...");
}
if(_iteration == _params._remove_exaggeration_iter){
utils::secureLog(_logger,"Remove exaggeration...");
}
if(_theta == 0){
doAnIterationExact(mult);
}else{
doAnIterationBarnesHut(mult);
}
}
template <typename scalar, typename sparse_scalar_matrix>
scalar WeightedTSNE<scalar, sparse_scalar_matrix>::exaggerationFactor(){
scalar_type exaggeration = 1;
if(_iteration <= _params._remove_exaggeration_iter){
exaggeration = _params._exaggeration_factor;
}else if(_iteration <= (_params._remove_exaggeration_iter + _params._exponential_decay_iter)){
double decay = std::exp(-scalar_type(_iteration-_params._remove_exaggeration_iter)/30.);
exaggeration = 1 + (_params._exaggeration_factor-1)*decay;
//utils::secureLogValue(_logger,"Exaggeration decay...",exaggeration);
}
return exaggeration;
}
template <typename scalar, typename sparse_scalar_matrix>
void WeightedTSNE<scalar, sparse_scalar_matrix>::doAnIterationExact(double mult){
//Compute Low-dimensional distribution
computeLowDimensionalDistribution();
//Compute gradient of the KL function
computeExactGradient(exaggerationFactor());
//Update the embedding based on the gradient
updateTheEmbedding(mult);
}
template <typename scalar, typename sparse_scalar_matrix>
void WeightedTSNE<scalar, sparse_scalar_matrix>::doAnIterationBarnesHut(double mult){
//Compute gradient of the KL function using the Barnes Hut approximation
computeBarnesHutGradient(exaggerationFactor());
//Update the embedding based on the gradient
updateTheEmbedding();
}
template <typename scalar, typename sparse_scalar_matrix>
void WeightedTSNE<scalar, sparse_scalar_matrix>::computeLowDimensionalDistribution(){
const int n = getNumberOfDataPoints();
double sum_Q = 0;
// Loop over all edges in the graph
#ifdef __USE_GCD__
std::cout << "GCD dispatch, wtsne_inl 303.\n";
dispatch_apply(n, dispatch_get_global_queue(0, 0), ^(size_t j) {
#else
#pragma omp parallel for
for(int j = 0; j < n; ++j){
#endif //__USE_GCD__
//_Q[j*n + j] = 0;
for(int i = j+1; i < n; ++i){
const double euclidean_dist_sq(
utils::euclideanDistanceSquared<scalar_type>(
(*_embedding_container).begin()+j*_params._embedding_dimensionality,
(*_embedding_container).begin()+(j+1)*_params._embedding_dimensionality,
(*_embedding_container).begin()+i*_params._embedding_dimensionality,
(*_embedding_container).begin()+(i+1)*_params._embedding_dimensionality
)
);
const double v = 1./(1.+euclidean_dist_sq);
_Q[j*n + i] = static_cast<scalar_type>(v);
_Q[i*n + j] = static_cast<scalar_type>(v);
}
}
#ifdef __USE_GCD__
);
#endif
for(int j = 0; j < n; ++j){
for(int i = 0; i < n; ++i){
sum_Q += _Q[j*n + i]*_weights[j]*_weights[i];
}
}
_normalization_Q = static_cast<scalar_type>(sum_Q);
}
template <typename scalar, typename sparse_scalar_matrix>
void WeightedTSNE<scalar, sparse_scalar_matrix>::computeExactGradient(double exaggeration){
const int n = getNumberOfDataPoints();
const int dim = _params._embedding_dimensionality;
for(int i = 0; i < n; ++i){
for(int d = 0; d < dim; ++d){
_gradient[i * dim + d] = 0;
}
}
for(int i = 0; i < n; ++i){
for(int j = 0; j < n; ++j){
for(int d = 0; d < dim; ++d){
const int idx = i*n + j;
const double distance((*_embedding_container)[i * dim + d] - (*_embedding_container)[j * dim + d]);
const double negative(_weights[i] * _weights[j] * _Q[idx] * _Q[idx] * distance / _normalization_Q);
_gradient[i * dim + d] += static_cast<scalar_type>(-4*negative);
}
}
for(auto& elem: _P[i]){
for(int d = 0; d < dim; ++d){
const int j = elem.first;
const int idx = i*n + j;
const double distance((*_embedding_container)[i * dim + d] - (*_embedding_container)[j * dim + d]);
double p_ij = elem.second/n;
const double positive(p_ij * _Q[idx] * distance);
_gradient[i * dim + d] += static_cast<scalar_type>(4*exaggeration*positive);
}
}
}
}
template <typename scalar, typename sparse_scalar_matrix>
void WeightedTSNE<scalar, sparse_scalar_matrix>::computeBarnesHutGradient(double exaggeration){
typedef double hp_scalar_type;
WeightedSPTree<scalar_type> sptree(_params._embedding_dimensionality,_embedding->getContainer().data(),_weights.data(),getNumberOfDataPoints());
scalar_type sum_Q = .0;
std::vector<hp_scalar_type> positive_forces(getNumberOfDataPoints()*_params._embedding_dimensionality);
#ifdef __USE_GCD__
__block std::vector<hp_scalar_type> negative_forces(getNumberOfDataPoints()*_params._embedding_dimensionality);
#else
std::vector<hp_scalar_type> negative_forces(getNumberOfDataPoints()*_params._embedding_dimensionality);
#endif //__USE_GCD__
sptree.computeEdgeForces(_P, exaggeration, positive_forces.data());
#ifdef __USE_GCD__
__block std::vector<hp_scalar_type> sum_Q_subvalues(getNumberOfDataPoints(),0);
#else
std::vector<hp_scalar_type> sum_Q_subvalues(getNumberOfDataPoints(),0);
#endif //__USE_GCD__
#ifdef __USE_GCD__
std::cout << "GCD dispatch, wtsne_inl 303.\n";
dispatch_apply(getNumberOfDataPoints(), dispatch_get_global_queue(0, 0), ^(size_t n) {
#else
#pragma omp parallel for
for(int n = 0; n < getNumberOfDataPoints(); n++){
#endif //__USE_GCD__
sptree.computeNonEdgeForces(n, _theta, negative_forces.data() + n * _params._embedding_dimensionality, sum_Q_subvalues[n]);
}
#ifdef __USE_GCD__
);
#endif
sum_Q = 0;
for(int n = 0; n < getNumberOfDataPoints(); n++){
sum_Q += sum_Q_subvalues[n];
}
for(int i = 0; i < _gradient.size(); i++){
_gradient[i] = positive_forces[i] - (negative_forces[i] / sum_Q);
}
}
//temp
template <typename T>
T sign(T x) { return (x == .0 ? .0 : (x < .0 ? -1.0 : 1.0)); }
template <typename scalar, typename sparse_scalar_matrix>
void WeightedTSNE<scalar, sparse_scalar_matrix>::updateTheEmbedding(double mult){
for(int i = 0; i < _gradient.size(); ++i){
_gain[i] = static_cast<scalar_type>((sign(_gradient[i]) != sign(_previous_gradient[i])) ? (_gain[i] + .2) : (_gain[i] * .8));
if(_gain[i] < _params._minimum_gain){
_gain[i] = static_cast<scalar_type>(_params._minimum_gain);
}
_gradient[i] = static_cast<scalar_type>((_gradient[i]>0?1:-1)*std::abs(_gradient[i]*_params._eta* _gain[i])/(_params._eta*_gain[i]));
_previous_gradient[i] = static_cast<scalar_type>(((_iteration<_params._mom_switching_iter)?_params._momentum:_params._final_momentum) * _previous_gradient[i] - _params._eta * _gain[i] * _gradient[i]);
(*_embedding_container)[i] += static_cast<scalar_type>(_previous_gradient[i] * mult);
}
++_iteration;
}
template <typename scalar, typename sparse_scalar_matrix>
double WeightedTSNE<scalar, sparse_scalar_matrix>::computeKullbackLeiblerDivergence(){
assert(false);
return 0;
}
}
}
#endif
|
quicksort.h | // -*- C++ -*-
// Copyright (C) 2007, 2008, 2009, 2010, 2011 Free Software Foundation, Inc.
//
// This file is part of the GNU ISO C++ Library. This library is free
// software; you can redistribute it and/or modify it under the terms
// of the GNU General Public License as published by the Free Software
// Foundation; either version 3, or (at your option) any later
// version.
// This library is distributed in the hope that it will be useful, but
// WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
// General Public License for more details.
// Under Section 7 of GPL version 3, you are granted additional
// permissions described in the GCC Runtime Library Exception, version
// 3.1, as published by the Free Software Foundation.
// You should have received a copy of the GNU General Public License and
// a copy of the GCC Runtime Library Exception along with this program;
// see the files COPYING3 and COPYING.RUNTIME respectively. If not, see
// <http://www.gnu.org/licenses/>.
/** @file parallel/quicksort.h
* @brief Implementation of a unbalanced parallel quicksort (in-place).
* This file is a GNU parallel extension to the Standard C++ Library.
*/
// Written by Johannes Singler.
#ifndef _GLIBCXX_PARALLEL_QUICKSORT_H
#define _GLIBCXX_PARALLEL_QUICKSORT_H 1
#include <parallel/parallel.h>
#include <parallel/partition.h>
namespace __gnu_parallel
{
/** @brief Unbalanced quicksort divide step.
* @param __begin Begin iterator of subsequence.
* @param __end End iterator of subsequence.
* @param __comp Comparator.
* @param __pivot_rank Desired __rank of the pivot.
* @param __num_samples Choose pivot from that many samples.
* @param __num_threads Number of threads that are allowed to work on
* this part.
*/
template<typename _RAIter, typename _Compare>
typename std::iterator_traits<_RAIter>::difference_type
__parallel_sort_qs_divide(_RAIter __begin, _RAIter __end,
_Compare __comp, typename std::iterator_traits
<_RAIter>::difference_type __pivot_rank,
typename std::iterator_traits
<_RAIter>::difference_type
__num_samples, _ThreadIndex __num_threads)
{
typedef std::iterator_traits<_RAIter> _TraitsType;
typedef typename _TraitsType::value_type _ValueType;
typedef typename _TraitsType::difference_type _DifferenceType;
_DifferenceType __n = __end - __begin;
__num_samples = std::min(__num_samples, __n);
// Allocate uninitialized, to avoid default constructor.
_ValueType* __samples = static_cast<_ValueType*>
(::operator new(__num_samples * sizeof(_ValueType)));
for (_DifferenceType __s = 0; __s < __num_samples; ++__s)
{
const unsigned long long __index = static_cast<unsigned long long>
(__s) * __n / __num_samples;
::new(&(__samples[__s])) _ValueType(__begin[__index]);
}
__gnu_sequential::sort(__samples, __samples + __num_samples, __comp);
_ValueType& __pivot = __samples[__pivot_rank * __num_samples / __n];
__gnu_parallel::__binder2nd<_Compare, _ValueType, _ValueType, bool>
__pred(__comp, __pivot);
_DifferenceType __split = __parallel_partition(__begin, __end,
__pred, __num_threads);
for (_DifferenceType __s = 0; __s < __num_samples; ++__s)
__samples[__s].~_ValueType();
::operator delete(__samples);
return __split;
}
/** @brief Unbalanced quicksort conquer step.
* @param __begin Begin iterator of subsequence.
* @param __end End iterator of subsequence.
* @param __comp Comparator.
* @param __num_threads Number of threads that are allowed to work on
* this part.
*/
template<typename _RAIter, typename _Compare>
void
__parallel_sort_qs_conquer(_RAIter __begin, _RAIter __end,
_Compare __comp,
_ThreadIndex __num_threads)
{
typedef std::iterator_traits<_RAIter> _TraitsType;
typedef typename _TraitsType::value_type _ValueType;
typedef typename _TraitsType::difference_type _DifferenceType;
if (__num_threads <= 1)
{
__gnu_sequential::sort(__begin, __end, __comp);
return;
}
_DifferenceType __n = __end - __begin, __pivot_rank;
if (__n <= 1)
return;
_ThreadIndex __num_threads_left;
if ((__num_threads % 2) == 1)
__num_threads_left = __num_threads / 2 + 1;
else
__num_threads_left = __num_threads / 2;
__pivot_rank = __n * __num_threads_left / __num_threads;
_DifferenceType __split = __parallel_sort_qs_divide
(__begin, __end, __comp, __pivot_rank,
_Settings::get().sort_qs_num_samples_preset, __num_threads);
#pragma omp parallel sections num_threads(2)
{
#pragma omp section
__parallel_sort_qs_conquer(__begin, __begin + __split,
__comp, __num_threads_left);
#pragma omp section
__parallel_sort_qs_conquer(__begin + __split, __end,
__comp, __num_threads - __num_threads_left);
}
}
/** @brief Unbalanced quicksort main call.
* @param __begin Begin iterator of input sequence.
* @param __end End iterator input sequence, ignored.
* @param __comp Comparator.
* @param __num_threads Number of threads that are allowed to work on
* this part.
*/
template<typename _RAIter, typename _Compare>
void
__parallel_sort_qs(_RAIter __begin, _RAIter __end,
_Compare __comp,
_ThreadIndex __num_threads)
{
_GLIBCXX_CALL(__n)
typedef std::iterator_traits<_RAIter> _TraitsType;
typedef typename _TraitsType::value_type _ValueType;
typedef typename _TraitsType::difference_type _DifferenceType;
_DifferenceType __n = __end - __begin;
// At least one element per processor.
if (__num_threads > __n)
__num_threads = static_cast<_ThreadIndex>(__n);
__parallel_sort_qs_conquer(
__begin, __begin + __n, __comp, __num_threads);
}
} //namespace __gnu_parallel
#endif /* _GLIBCXX_PARALLEL_QUICKSORT_H */
|
BKTree.h | // Copyright (c) Microsoft Corporation. All rights reserved.
// Licensed under the MIT License.
#ifndef _SPTAG_COMMON_BKTREE_H_
#define _SPTAG_COMMON_BKTREE_H_
#include <stack>
#include <string>
#include <vector>
#include <shared_mutex>
#include "../VectorIndex.h"
#include "CommonUtils.h"
#include "QueryResultSet.h"
#include "WorkSpace.h"
#include "Dataset.h"
#include "DistanceUtils.h"
namespace SPTAG
{
namespace COMMON
{
// node type for storing BKT
struct BKTNode
{
SizeType centerid;
SizeType childStart;
SizeType childEnd;
BKTNode(SizeType cid = -1) : centerid(cid), childStart(-1), childEnd(-1) {}
};
template <typename T>
struct KmeansArgs {
int _K;
int _DK;
DimensionType _D;
int _T;
DistCalcMethod _M;
T* centers;
T* newTCenters;
SizeType* counts;
float* newCenters;
SizeType* newCounts;
int* label;
SizeType* clusterIdx;
float* clusterDist;
float* weightedCounts;
float* newWeightedCounts;
float(*fComputeDistance)(const T* pX, const T* pY, DimensionType length);
KmeansArgs(int k, DimensionType dim, SizeType datasize, int threadnum, DistCalcMethod distMethod) : _K(k), _DK(k), _D(dim), _T(threadnum), _M(distMethod) {
centers = (T*)aligned_malloc(sizeof(T) * k * dim, ALIGN);
newTCenters = (T*)aligned_malloc(sizeof(T) * k * dim, ALIGN);
counts = new SizeType[k];
newCenters = new float[threadnum * k * dim];
newCounts = new SizeType[threadnum * k];
label = new int[datasize];
clusterIdx = new SizeType[threadnum * k];
clusterDist = new float[threadnum * k];
weightedCounts = new float[k];
newWeightedCounts = new float[threadnum * k];
fComputeDistance = COMMON::DistanceCalcSelector<T>(distMethod);
}
~KmeansArgs() {
aligned_free(centers);
aligned_free(newTCenters);
delete[] counts;
delete[] newCenters;
delete[] newCounts;
delete[] label;
delete[] clusterIdx;
delete[] clusterDist;
delete[] weightedCounts;
delete[] newWeightedCounts;
}
inline void ClearCounts() {
memset(newCounts, 0, sizeof(SizeType) * _T * _K);
memset(newWeightedCounts, 0, sizeof(float) * _T * _K);
}
inline void ClearCenters() {
memset(newCenters, 0, sizeof(float) * _T * _K * _D);
}
inline void ClearDists(float dist) {
for (int i = 0; i < _T * _K; ++i) {
clusterIdx[i] = -1;
clusterDist[i] = dist;
}
}
void Shuffle(std::vector<SizeType>& indices, SizeType first, SizeType last) {
SizeType* pos = new SizeType[_K];
pos[0] = first;
for (int k = 1; k < _K; k++) pos[k] = pos[k - 1] + newCounts[k - 1];
for (int k = 0; k < _K; k++) {
if (newCounts[k] == 0) continue;
SizeType i = pos[k];
while (newCounts[k] > 0) {
SizeType swapid = pos[label[i]] + newCounts[label[i]] - 1;
newCounts[label[i]]--;
Kokkos::swap(indices[i], indices[swapid]);
Kokkos::swap(label[i], label[swapid]);
}
while (indices[i] != clusterIdx[k]) i++;
Kokkos::swap(indices[i], indices[pos[k] + counts[k] - 1]);
}
delete[] pos;
}
};
template <typename T>
float RefineCenters(const Dataset<T>& data, KmeansArgs<T>& args)
{
int maxcluster = -1;
SizeType maxCount = 0;
for (int k = 0; k < args._DK; k++) {
if (args.counts[k] > maxCount && args.newCounts[k] > 0 && DistanceUtils::ComputeDistance((T*)data[args.clusterIdx[k]], args.centers + k * args._D, args._D, DistCalcMethod::L2) > 1e-6)
{
maxcluster = k;
maxCount = args.counts[k];
}
}
if (maxcluster != -1 && (args.clusterIdx[maxcluster] < 0 || args.clusterIdx[maxcluster] >= data.R()))
LOG(Helper::LogLevel::LL_Debug, "maxcluster:%d(%d) Error dist:%f\n", maxcluster, args.newCounts[maxcluster], args.clusterDist[maxcluster]);
float diff = 0;
for (int k = 0; k < args._DK; k++) {
T* TCenter = args.newTCenters + k * args._D;
if (args.counts[k] == 0) {
if (maxcluster != -1) {
//int nextid = Utils::rand_int(last, first);
//while (args.label[nextid] != maxcluster) nextid = Utils::rand_int(last, first);
SizeType nextid = args.clusterIdx[maxcluster];
std::memcpy(TCenter, data[nextid], sizeof(T)*args._D);
}
else {
std::memcpy(TCenter, args.centers + k * args._D, sizeof(T)*args._D);
}
}
else {
float* currCenters = args.newCenters + k * args._D;
for (DimensionType j = 0; j < args._D; ++j) currCenters[j] /= args.counts[k];
if (args._M == DistCalcMethod::Cosine) {
COMMON::Utils::Normalize(currCenters, args._D, COMMON::Utils::GetBase<T>());
}
for (DimensionType j = 0; j < args._D; ++j) TCenter[j] = (T)(currCenters[j]);
}
diff += args.fComputeDistance(args.centers + k*args._D, TCenter, args._D);
}
return diff;
}
template <typename T>
inline float KmeansAssign(const Dataset<T>& data,
std::vector<SizeType>& indices,
const SizeType first, const SizeType last, KmeansArgs<T>& args,
const bool updateCenters, float lambda) {
float currDist = 0;
SizeType subsize = (last - first - 1) / args._T + 1;
#pragma omp parallel for num_threads(args._T) shared(data, indices) reduction(+:currDist)
for (int tid = 0; tid < args._T; tid++)
{
SizeType istart = first + tid * subsize;
SizeType iend = std::min(first + (tid + 1) * subsize, last);
SizeType *inewCounts = args.newCounts + tid * args._K;
float *inewCenters = args.newCenters + tid * args._K * args._D;
SizeType * iclusterIdx = args.clusterIdx + tid * args._K;
float * iclusterDist = args.clusterDist + tid * args._K;
float idist = 0;
for (SizeType i = istart; i < iend; ++i) {
int clusterid = 0;
float smallestDist = MaxDist;
for (int k = 0; k < args._DK; k++) {
float dist = args.fComputeDistance(data[indices[i]], args.centers + k*args._D, args._D) + lambda*args.counts[k];
if (dist > -MaxDist && dist < smallestDist) {
clusterid = k; smallestDist = dist;
}
}
args.label[i] = clusterid;
inewCounts[clusterid]++;
idist += smallestDist;
if (updateCenters) {
const T* v = (const T*)data[indices[i]];
float* center = inewCenters + clusterid*args._D;
for (DimensionType j = 0; j < args._D; ++j) center[j] += v[j];
if (smallestDist > iclusterDist[clusterid]) {
iclusterDist[clusterid] = smallestDist;
iclusterIdx[clusterid] = indices[i];
}
}
else {
if (smallestDist <= iclusterDist[clusterid]) {
iclusterDist[clusterid] = smallestDist;
iclusterIdx[clusterid] = indices[i];
}
}
}
currDist += idist;
}
for (int i = 1; i < args._T; ++i) {
for (int k = 0; k < args._DK; k++)
args.newCounts[k] += args.newCounts[i*args._K + k];
}
if (updateCenters) {
for (int i = 1; i < args._T; ++i) {
float* currCenter = args.newCenters + i*args._K*args._D;
for (uint64 j = 0; j < ((uint64)args._DK) * args._D; ++j) args.newCenters[j] += currCenter[j];
for (int k = 0; k < args._DK; k++) {
if (args.clusterIdx[i*args._K + k] != -1 && args.clusterDist[i*args._K + k] > args.clusterDist[k]) {
args.clusterDist[k] = args.clusterDist[i*args._K + k];
args.clusterIdx[k] = args.clusterIdx[i*args._K + k];
}
}
}
}
else {
for (int i = 1; i < args._T; ++i) {
for (int k = 0; k < args._DK; k++) {
if (args.clusterIdx[i*args._K + k] != -1 && args.clusterDist[i*args._K + k] <= args.clusterDist[k]) {
args.clusterDist[k] = args.clusterDist[i*args._K + k];
args.clusterIdx[k] = args.clusterIdx[i*args._K + k];
}
}
}
}
return currDist;
}
template <typename T>
inline void InitCenters(const Dataset<T>& data,
std::vector<SizeType>& indices, const SizeType first, const SizeType last,
KmeansArgs<T>& args, int samples, int tryIters) {
SizeType batchEnd = std::min(first + samples, last);
float currDist, minClusterDist = MaxDist;
for (int numKmeans = 0; numKmeans < tryIters; numKmeans++) {
for (int k = 0; k < args._DK; k++) {
SizeType randid = COMMON::Utils::rand(last, first);
std::memcpy(args.centers + k*args._D, data[indices[randid]], sizeof(T)*args._D);
}
args.ClearCounts();
args.ClearDists(MaxDist);
currDist = KmeansAssign(data, indices, first, batchEnd, args, false, 0);
if (currDist < minClusterDist) {
minClusterDist = currDist;
memcpy(args.newTCenters, args.centers, sizeof(T)*args._K*args._D);
memcpy(args.counts, args.newCounts, sizeof(SizeType) * args._K);
}
}
}
template <typename T>
int KmeansClustering(const Dataset<T>& data,
std::vector<SizeType>& indices, const SizeType first, const SizeType last,
KmeansArgs<T>& args, int samples = 1000) {
InitCenters(data, indices, first, last, args, samples, 3);
SizeType batchEnd = std::min(first + samples, last);
float currDiff, currDist, minClusterDist = MaxDist;
int noImprovement = 0;
for (int iter = 0; iter < 100; iter++) {
std::memcpy(args.centers, args.newTCenters, sizeof(T)*args._K*args._D);
std::random_shuffle(indices.begin() + first, indices.begin() + last);
args.ClearCenters();
args.ClearCounts();
args.ClearDists(-MaxDist);
currDist = KmeansAssign(data, indices, first, batchEnd, args, true,
COMMON::Utils::GetBase<T>() * COMMON::Utils::GetBase<T>() / (100.0f * (batchEnd - first)));
std::memcpy(args.counts, args.newCounts, sizeof(SizeType) * args._K);
if (currDist < minClusterDist) {
noImprovement = 0;
minClusterDist = currDist;
}
else {
noImprovement++;
}
currDiff = RefineCenters(data, args);
if (currDiff < 1e-3 || noImprovement >= 5) break;
}
args.ClearCounts();
args.ClearDists(MaxDist);
currDist = KmeansAssign(data, indices, first, last, args, false, 0);
std::memcpy(args.counts, args.newCounts, sizeof(SizeType) * args._K);
int numClusters = 0;
for (int i = 0; i < args._K; ++i) if (args.counts[i] > 0) numClusters++;
if (numClusters <= 1) {
return numClusters;
}
args.Shuffle(indices, first, last);
return numClusters;
}
class BKTree
{
public:
BKTree(): m_iTreeNumber(1), m_iBKTKmeansK(32), m_iBKTLeafSize(8), m_iSamples(1000), m_lock(new std::shared_timed_mutex) {}
BKTree(const BKTree& other): m_iTreeNumber(other.m_iTreeNumber),
m_iBKTKmeansK(other.m_iBKTKmeansK),
m_iBKTLeafSize(other.m_iBKTLeafSize),
m_iSamples(other.m_iSamples),
m_lock(new std::shared_timed_mutex) {}
~BKTree() {}
inline const BKTNode& operator[](SizeType index) const { return m_pTreeRoots[index]; }
inline BKTNode& operator[](SizeType index) { return m_pTreeRoots[index]; }
inline SizeType size() const { return (SizeType)m_pTreeRoots.size(); }
inline SizeType sizePerTree() const {
std::shared_lock<std::shared_timed_mutex> lock(*m_lock);
return (SizeType)m_pTreeRoots.size() - m_pTreeStart.back();
}
inline const std::unordered_map<SizeType, SizeType>& GetSampleMap() const { return m_pSampleCenterMap; }
template <typename T>
void Rebuild(const Dataset<T>& data, DistCalcMethod distMethod)
{
BKTree newTrees(*this);
newTrees.BuildTrees<T>(data, distMethod, 1);
std::unique_lock<std::shared_timed_mutex> lock(*m_lock);
m_pTreeRoots.swap(newTrees.m_pTreeRoots);
m_pTreeStart.swap(newTrees.m_pTreeStart);
m_pSampleCenterMap.swap(newTrees.m_pSampleCenterMap);
}
template <typename T>
void BuildTrees(const Dataset<T>& data, DistCalcMethod distMethod, int numOfThreads, std::vector<SizeType>* indices = nullptr, std::vector<SizeType>* reverseIndices = nullptr, bool dynamicK = false)
{
struct BKTStackItem {
SizeType index, first, last;
BKTStackItem(SizeType index_, SizeType first_, SizeType last_) : index(index_), first(first_), last(last_) {}
};
std::stack<BKTStackItem> ss;
std::vector<SizeType> localindices;
if (indices == nullptr) {
localindices.resize(data.R());
for (SizeType i = 0; i < localindices.size(); ++i) localindices[i] = i;
}
else {
localindices.assign(indices->begin(), indices->end());
}
KmeansArgs<T> args(m_iBKTKmeansK, data.C(), (SizeType)localindices.size(), numOfThreads, distMethod);
m_pSampleCenterMap.clear();
for (char i = 0; i < m_iTreeNumber; ++i)
{
std::random_shuffle(localindices.begin(), localindices.end());
m_pTreeStart.push_back((SizeType)m_pTreeRoots.size());
m_pTreeRoots.emplace_back((SizeType)localindices.size());
LOG(Helper::LogLevel::LL_Info, "Start to build BKTree %d\n", i + 1);
ss.push(BKTStackItem(m_pTreeStart[i], 0, (SizeType)localindices.size()));
while (!ss.empty()) {
BKTStackItem item = ss.top(); ss.pop();
SizeType newBKTid = (SizeType)m_pTreeRoots.size();
m_pTreeRoots[item.index].childStart = newBKTid;
if (item.last - item.first <= m_iBKTLeafSize) {
for (SizeType j = item.first; j < item.last; ++j) {
SizeType cid = (reverseIndices == nullptr)? localindices[j]: reverseIndices->at(localindices[j]);
m_pTreeRoots.emplace_back(cid);
}
}
else { // clustering the data into BKTKmeansK clusters
if (dynamicK) {
args._DK = std::std::min<int>((item.last - item.first) / m_iBKTLeafSize + 1, m_iBKTKmeansK);
args._DK = std::max<int>(args._DK, 2);
}
int numClusters = KmeansClustering(data, localindices, item.first, item.last, args, m_iSamples);
if (numClusters <= 1) {
SizeType end = std::min(item.last + 1, (SizeType)localindices.size());
std::sort(localindices.begin() + item.first, localindices.begin() + end);
m_pTreeRoots[item.index].centerid = (reverseIndices == nullptr) ? localindices[item.first] : reverseIndices->at(localindices[item.first]);
m_pTreeRoots[item.index].childStart = -m_pTreeRoots[item.index].childStart;
for (SizeType j = item.first + 1; j < end; ++j) {
SizeType cid = (reverseIndices == nullptr) ? localindices[j] : reverseIndices->at(localindices[j]);
m_pTreeRoots.emplace_back(cid);
m_pSampleCenterMap[cid] = m_pTreeRoots[item.index].centerid;
}
m_pSampleCenterMap[-1 - m_pTreeRoots[item.index].centerid] = item.index;
}
else {
for (int k = 0; k < m_iBKTKmeansK; k++) {
if (args.counts[k] == 0) continue;
SizeType cid = (reverseIndices == nullptr) ? localindices[item.first + args.counts[k] - 1] : reverseIndices->at(localindices[item.first + args.counts[k] - 1]);
m_pTreeRoots.emplace_back(cid);
if (args.counts[k] > 1) ss.push(BKTStackItem(newBKTid++, item.first, item.first + args.counts[k] - 1));
item.first += args.counts[k];
}
}
}
m_pTreeRoots[item.index].childEnd = (SizeType)m_pTreeRoots.size();
}
m_pTreeRoots.emplace_back(-1);
LOG(Helper::LogLevel::LL_Info, "%d BKTree built, %zu %zu\n", i + 1, m_pTreeRoots.size() - m_pTreeStart[i], localindices.size());
}
}
inline std::uint64_t BufferSize() const
{
return sizeof(int) + sizeof(SizeType) * m_iTreeNumber +
sizeof(SizeType) + sizeof(BKTNode) * m_pTreeRoots.size();
}
ErrorCode SaveTrees(std::shared_ptr<Helper::DiskPriorityIO> p_out) const
{
std::shared_lock<std::shared_timed_mutex> lock(*m_lock);
IOBINARY(p_out, WriteBinary, sizeof(m_iTreeNumber), (char*)&m_iTreeNumber);
IOBINARY(p_out, WriteBinary, sizeof(SizeType) * m_iTreeNumber, (char*)m_pTreeStart.data());
SizeType treeNodeSize = (SizeType)m_pTreeRoots.size();
IOBINARY(p_out, WriteBinary, sizeof(treeNodeSize), (char*)&treeNodeSize);
IOBINARY(p_out, WriteBinary, sizeof(BKTNode) * treeNodeSize, (char*)m_pTreeRoots.data());
LOG(Helper::LogLevel::LL_Info, "Save BKT (%d,%d) Finish!\n", m_iTreeNumber, treeNodeSize);
return ErrorCode::Success;
}
ErrorCode SaveTrees(std::string sTreeFileName) const
{
LOG(Helper::LogLevel::LL_Info, "Save BKT to %s\n", sTreeFileName.c_str());
auto ptr = f_createIO();
if (ptr == nullptr || !ptr->Initialize(sTreeFileName.c_str(), std::ios::binary | std::ios::out)) return ErrorCode::FailedCreateFile;
return SaveTrees(ptr);
}
ErrorCode LoadTrees(char* pBKTMemFile)
{
m_iTreeNumber = *((int*)pBKTMemFile);
pBKTMemFile += sizeof(int);
m_pTreeStart.resize(m_iTreeNumber);
memcpy(m_pTreeStart.data(), pBKTMemFile, sizeof(SizeType) * m_iTreeNumber);
pBKTMemFile += sizeof(SizeType)*m_iTreeNumber;
SizeType treeNodeSize = *((SizeType*)pBKTMemFile);
pBKTMemFile += sizeof(SizeType);
m_pTreeRoots.resize(treeNodeSize);
memcpy(m_pTreeRoots.data(), pBKTMemFile, sizeof(BKTNode) * treeNodeSize);
if (m_pTreeRoots.size() > 0 && m_pTreeRoots.back().centerid != -1) m_pTreeRoots.emplace_back(-1);
LOG(Helper::LogLevel::LL_Info, "Load BKT (%d,%d) Finish!\n", m_iTreeNumber, treeNodeSize);
return ErrorCode::Success;
}
ErrorCode LoadTrees(std::shared_ptr<Helper::DiskPriorityIO> p_input)
{
IOBINARY(p_input, ReadBinary, sizeof(m_iTreeNumber), (char*)&m_iTreeNumber);
m_pTreeStart.resize(m_iTreeNumber);
IOBINARY(p_input, ReadBinary, sizeof(SizeType) * m_iTreeNumber, (char*)m_pTreeStart.data());
SizeType treeNodeSize;
IOBINARY(p_input, ReadBinary, sizeof(treeNodeSize), (char*)&treeNodeSize);
m_pTreeRoots.resize(treeNodeSize);
IOBINARY(p_input, ReadBinary, sizeof(BKTNode) * treeNodeSize, (char*)m_pTreeRoots.data());
if (m_pTreeRoots.size() > 0 && m_pTreeRoots.back().centerid != -1) m_pTreeRoots.emplace_back(-1);
LOG(Helper::LogLevel::LL_Info, "Load BKT (%d,%d) Finish!\n", m_iTreeNumber, treeNodeSize);
return ErrorCode::Success;
}
ErrorCode LoadTrees(std::string sTreeFileName)
{
LOG(Helper::LogLevel::LL_Info, "Load BKT From %s\n", sTreeFileName.c_str());
auto ptr = f_createIO();
if (ptr == nullptr || !ptr->Initialize(sTreeFileName.c_str(), std::ios::binary | std::ios::in)) return ErrorCode::FailedOpenFile;
return LoadTrees(ptr);
}
template <typename T>
void InitSearchTrees(const Dataset<T>& data, float(*fComputeDistance)(const T* pX, const T* pY, DimensionType length), const COMMON::QueryResultSet<T> &p_query, COMMON::WorkSpace &p_space) const
{
for (char i = 0; i < m_iTreeNumber; ++i) {
const BKTNode& node = m_pTreeRoots[m_pTreeStart[i]];
if (node.childStart < 0) {
p_space.m_SPTQueue.insert(COMMON::HeapCell(m_pTreeStart[i], fComputeDistance(p_query.GetTarget(), data[node.centerid], data.C())));
}
else {
for (SizeType begin = node.childStart; begin < node.childEnd; begin++) {
SizeType index = m_pTreeRoots[begin].centerid;
p_space.m_SPTQueue.insert(COMMON::HeapCell(begin, fComputeDistance(p_query.GetTarget(), data[index], data.C())));
}
}
}
}
template <typename T>
void SearchTrees(const Dataset<T>& data, float(*fComputeDistance)(const T* pX, const T* pY, DimensionType length), const COMMON::QueryResultSet<T> &p_query,
COMMON::WorkSpace &p_space, const int p_limits) const
{
while (!p_space.m_SPTQueue.empty())
{
COMMON::HeapCell bcell = p_space.m_SPTQueue.pop();
const BKTNode& tnode = m_pTreeRoots[bcell.node];
if (tnode.childStart < 0) {
if (!p_space.CheckAndSet(tnode.centerid)) {
p_space.m_iNumberOfCheckedLeaves++;
p_space.m_NGQueue.insert(COMMON::HeapCell(tnode.centerid, bcell.distance));
}
if (p_space.m_iNumberOfCheckedLeaves >= p_limits) break;
}
else {
if (!p_space.CheckAndSet(tnode.centerid)) {
p_space.m_NGQueue.insert(COMMON::HeapCell(tnode.centerid, bcell.distance));
}
for (SizeType begin = tnode.childStart; begin < tnode.childEnd; begin++) {
SizeType index = m_pTreeRoots[begin].centerid;
p_space.m_SPTQueue.insert(COMMON::HeapCell(begin, fComputeDistance(p_query.GetTarget(), data[index], data.C())));
}
}
}
}
private:
std::vector<SizeType> m_pTreeStart;
std::vector<BKTNode> m_pTreeRoots;
std::unordered_map<SizeType, SizeType> m_pSampleCenterMap;
public:
std::unique_ptr<std::shared_timed_mutex> m_lock;
int m_iTreeNumber, m_iBKTKmeansK, m_iBKTLeafSize, m_iSamples;
};
}
}
#endif
|
ast-dump-openmp-critical.c | // RUN: %clang_cc1 -triple x86_64-unknown-unknown -fopenmp -ast-dump %s | FileCheck --match-full-lines -implicit-check-not=openmp_structured_block %s
void test() {
#pragma omp critical
;
}
// CHECK: TranslationUnitDecl {{.*}} <<invalid sloc>> <invalid sloc>
// CHECK: `-FunctionDecl {{.*}} <{{.*}}ast-dump-openmp-critical.c:3:1, line:6:1> line:3:6 test 'void ()'
// CHECK-NEXT: `-CompoundStmt {{.*}} <col:13, line:6:1>
// CHECK-NEXT: `-OMPCriticalDirective {{.*}} <line:4:1, col:21>
// CHECK-NEXT: `-CapturedStmt {{.*}} <line:5:3>
// CHECK-NEXT: `-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc>
// CHECK-NEXT: |-NullStmt {{.*}} <col:3> openmp_structured_block
// CHECK-NEXT: `-ImplicitParamDecl {{.*}} <line:4:1> col:1 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-critical.c:4:1) *const restrict'
|
keystore_fmt_plug.c | /* Java KeyStore cracker. Written by Dhiru Kholia <dhiru at openwall.com> and
* Narendra Kangralkar <narendrakangralkar at gmail.com>.
*
* Input Format: $keystore$target$data_length$data$hash$nkeys$keylength$keydata$keylength$keydata...
*
* This software is Copyright (c) 2013, Dhiru Kholia <dhiru.kholia at gmail.com>
* and Narendra Kangralkar <narendrakangralkar at gmail.com> and it is hereby
* released to the general public under the following terms: *
* Redistribution and use in source and binary forms, with or without modification,
* are permitted.
*/
#if FMT_EXTERNS_H
extern struct fmt_main fmt_keystore;
#elif FMT_REGISTERS_H
john_register_one(&fmt_keystore);
#else
#include <string.h>
#include <assert.h>
#include <errno.h>
#include "arch.h"
#include "sha.h"
#include "misc.h"
#include "common.h"
#include "formats.h"
#include "params.h"
#include "options.h"
#ifdef _OPENMP
static int omp_t = 1;
#include <omp.h>
#define OMP_SCALE 64
#endif
#include "memdbg.h"
#define FORMAT_LABEL "keystore"
#define FORMAT_NAME "Java KeyStore"
#define ALGORITHM_NAME "SHA1 32/" ARCH_BITS_STR
#define BENCHMARK_COMMENT ""
#define BENCHMARK_LENGTH -1
#define PLAINTEXT_LENGTH 125
#define BINARY_SIZE 20
#define BINARY_ALIGN 4
#define SALT_SIZE sizeof(struct custom_salt)
#define SALT_ALIGN 4
#define MIN_KEYS_PER_CRYPT 1
#define MAX_KEYS_PER_CRYPT 1
#define SZ 819200
static struct fmt_tests keystore_tests[] = {
{"$keystore$0$2126$feedfeed000000020000000100000001000f616e64726f696464656275676b65790000013c3ea72ab000000501308204fd300e060a2b060104012a021101010500048204e9e76fea55eed58e4257c253b670948abb18093fbbb667f00807560242f17a4b3cd8b90d0e2a5c6c96f758f45e0e2320039c10af4ecc95e56930fd85713318da506bb48fa586b5caf7c286cf3b66134cb0e13dcdbc665680fb1214d9db2405ccb297acdefd4f5f7cb1c1babd5b77414223b45ae11ab0ec0a2ce5423a6ab69f372adb79a38973a0fde89f9b1e8ef62de04a5e6b35008ce3191c350f98a98ed917ccfc3524f9a4786a3ab055cee25efb118f67d73cacfdd5a3f0ca04399d2b31acfffc63ab6b47f371ff879768ef84bc8c58bcfaab1539e6343cf7b81d0446f57abbeb84fb20b540616aabbfd4c823acb2124ea25538c7531609b72b8da90327a8a3845bcfd69d659a1a77c35efb0d62651e4178459dfde9e165edc6d52cc3d8fee78e3132346588b09e3d27e1400421d33e88748ed1c01af1dc6064a71c991e0322e72c55ed5bcd8c232048bddfecd299d4d9c296639866dd21ad073a4993733b44bac4d6a77eec05cda65d5d9ad0a42a5aa9d443e3ba7ea5744e7fdc2617f527cd9cf480bce033bd5eec6746b2a58328aeed26757664109e1046c93e2377db18c58c35828916f4a42964aae2fe75ad944896bd321ae92cd5723735b37f85250a635a8d1875d3efb2ffbcabc3602ea3b6952da060ec1d1c0a961b1a50836dee911a166e09a33d036d6ef7dc988545b580841945a8718b178bb06ef8e78c6703a496cf66990d57b696b2117922ee1855dff439b2bda3201b145fdb4533b7d2cfa22291a79bac67bb6b3d963dd4137b6208931f02c3ee30bfd0731443edadd5bfffec0147f5f2bd13930deace26fec0ebf0c1befe1294875fb9d8a08919fdc1697ec78d1b86c03a0db4e61bd6a9db6803fdd8e2547ead44bd48cf223b964b0c6903ede0fc0e1b7d02b83ba18ed649bc0e40896ff7cde1d092a9f30314da8fc67d113c79fe7046da75bc090b08b3f31a5d0feb33abab2c608e3afaca1521f2809ae79c14e5ab16d7fa319ddc4dbae61cf41bd15829055970f26361fc1ae22a15e401b25eb500411e70a3cacca38e0d59a6add6513c02d0e6a766303e231d8adf8368b1579e7d58a7d3a5981542c9b8fec0b1780713031fefa60d93755215cbbc34f27634537b6c4fe391578be1a3547fc97d1eeb3e8b11444e8ad99902911fba55034a2796d791039bb29bd193406f05b942f69d47a4a236a64f610e7808387586f4a96a84059e93b11355ecd9125e7a805503e41f4097893b043c7d539d76933515c8fbde11f2a69a6f47aebbac3ed29b0231b3a74ecc9a5421ad61c995a039e44c0a8717dd6e5efbdc2f6ab8daefbc58867ca2e852780c66d1163a03662c34b5365983405093452bb004f78eb973a804edb1b4e8214ab982ed9c81992cc508d8852288fee4ced3af41cca7baaddb828830f3e7dd7c92610def60bbaf6a866e84ea81bd4e88a5b5a035b15b370f942af17f213706c681a59da20b150697c188edb4ac8b59b3babf9c895078f268940aa805c15a2712042c22ce5c44a62554d5f2efb6db179e1db29570b6b063d00349a0273277751e6adf32b6d36b02cb81025d80e620b61a418b0584441c087ce75ed03c871dfe8463a9a3641b036e849fd0fdc9b381ebe43e067353642f182d67ef6bef43463dc6b8d7abd035677b443440c7624d91baa11002e193d86a76974eef4f6fb44a8c440b73ddb323e9eb8f7fdd67aa368ce6aefdff1060e6a519d48b28718b1548e4665360f141d5e16027f0e7c41d07c582dd2a29fa55a00f000000010005582e353039000003113082030d308201f5a00302010202043e310348300d06092a864886f70d01010b05003037310b30090603550406130255533110300e060355040a1307416e64726f6964311630140603550403130d416e64726f6964204465627567301e170d3133303131353134343030385a170d3133303431353134343030385a3037310b30090603550406130255533110300e060355040a1307416e64726f6964311630140603550403130d416e64726f696420446562756730820122300d06092a864886f70d01010105000382010f003082010a02820101009116fccceb121c8951fb12992ef59393a8c2aeab4ec76a30a71d3aca40a277ab1500613c30bda5472bc15812bdbe9395b4a6009edaf94ca7427cd94ca840c0ac9d42ab8246a401628dbba7acb408738929b75f319d496e8594afd75423c07299ec195efce351b7f2b730ad5e61ab292a4783611cdad41139302ada3e239656c2ec842a59418efc711072e75193cfba1105a1980a631f4a513e4116a89806a47f8b308c03684e2ce83e03c40c438445143fa3fab756909e101f89410a35bb6e6a5cbdcef19d0359c8ed7862fe7ae7f81c32a9a75f72419f89eddbe4acc4373e45a390fd185ae3b28adb8445c4e38e30773acad396788428b0321936f241e905c50203010001a321301f301d0603551d0e041604148c2df598ae53bebe11c4e4696abc6cad6bce4286300d06092a864886f70d01010b05000382010100507e62f723154b2e818140fbc47547c8a600f97a580de244afdf6cdc02977aa7fb990c77a0d79d3ef53aadcf9d7705b385c365e3e06bf15de1a9d3f5c6b6b40fc4b629f763da8f12fc16a005b66026de2be8f1144d37ef14fc1c99dc13dd33fc750898a7ac9e2a12543402ba5021432a8453d38b4879a95736f65956d13d92d96b6f546b853c92f0cc51a98dcd233076ae285d5ed44601f1fe361974c74067eb263386fe8e085e8b20c3cd72768d4265bd9bf4937b2aeae3323c6289dfe75e820907ba38e85b3fc2ceb44e770b91babfdf1d003bbc56ed7066f97ba86e0648ff0874a31c1563d52f42f38005b3698f800be11257f405b185ca421113072f8531$a8ab7a46059faddb183f66d4aef78f47911c88aa$1$1281$308204fd300e060a2b060104012a021101010500048204e9e76fea55eed58e4257c253b670948abb18093fbbb667f00807560242f17a4b3cd8b90d0e2a5c6c96f758f45e0e2320039c10af4ecc95e56930fd85713318da506bb48fa586b5caf7c286cf3b66134cb0e13dcdbc665680fb1214d9db2405ccb297acdefd4f5f7cb1c1babd5b77414223b45ae11ab0ec0a2ce5423a6ab69f372adb79a38973a0fde89f9b1e8ef62de04a5e6b35008ce3191c350f98a98ed917ccfc3524f9a4786a3ab055cee25efb118f67d73cacfdd5a3f0ca04399d2b31acfffc63ab6b47f371ff879768ef84bc8c58bcfaab1539e6343cf7b81d0446f57abbeb84fb20b540616aabbfd4c823acb2124ea25538c7531609b72b8da90327a8a3845bcfd69d659a1a77c35efb0d62651e4178459dfde9e165edc6d52cc3d8fee78e3132346588b09e3d27e1400421d33e88748ed1c01af1dc6064a71c991e0322e72c55ed5bcd8c232048bddfecd299d4d9c296639866dd21ad073a4993733b44bac4d6a77eec05cda65d5d9ad0a42a5aa9d443e3ba7ea5744e7fdc2617f527cd9cf480bce033bd5eec6746b2a58328aeed26757664109e1046c93e2377db18c58c35828916f4a42964aae2fe75ad944896bd321ae92cd5723735b37f85250a635a8d1875d3efb2ffbcabc3602ea3b6952da060ec1d1c0a961b1a50836dee911a166e09a33d036d6ef7dc988545b580841945a8718b178bb06ef8e78c6703a496cf66990d57b696b2117922ee1855dff439b2bda3201b145fdb4533b7d2cfa22291a79bac67bb6b3d963dd4137b6208931f02c3ee30bfd0731443edadd5bfffec0147f5f2bd13930deace26fec0ebf0c1befe1294875fb9d8a08919fdc1697ec78d1b86c03a0db4e61bd6a9db6803fdd8e2547ead44bd48cf223b964b0c6903ede0fc0e1b7d02b83ba18ed649bc0e40896ff7cde1d092a9f30314da8fc67d113c79fe7046da75bc090b08b3f31a5d0feb33abab2c608e3afaca1521f2809ae79c14e5ab16d7fa319ddc4dbae61cf41bd15829055970f26361fc1ae22a15e401b25eb500411e70a3cacca38e0d59a6add6513c02d0e6a766303e231d8adf8368b1579e7d58a7d3a5981542c9b8fec0b1780713031fefa60d93755215cbbc34f27634537b6c4fe391578be1a3547fc97d1eeb3e8b11444e8ad99902911fba55034a2796d791039bb29bd193406f05b942f69d47a4a236a64f610e7808387586f4a96a84059e93b11355ecd9125e7a805503e41f4097893b043c7d539d76933515c8fbde11f2a69a6f47aebbac3ed29b0231b3a74ecc9a5421ad61c995a039e44c0a8717dd6e5efbdc2f6ab8daefbc58867ca2e852780c66d1163a03662c34b5365983405093452bb004f78eb973a804edb1b4e8214ab982ed9c81992cc508d8852288fee4ced3af41cca7baaddb828830f3e7dd7c92610def60bbaf6a866e84ea81bd4e88a5b5a035b15b370f942af17f213706c681a59da20b150697c188edb4ac8b59b3babf9c895078f268940aa805c15a2712042c22ce5c44a62554d5f2efb6db179e1db29570b6b063d00349a0273277751e6adf32b6d36b02cb81025d80e620b61a418b0584441c087ce75ed03c871dfe8463a9a3641b036e849fd0fdc9b381ebe43e067353642f182d67ef6bef43463dc6b8d7abd035677b443440c7624d91baa11002e193d86a76974eef4f6fb44a8c440b73ddb323e9eb8f7fdd67aa368ce6aefdff1060e6a519d48b28718b1548e4665360f141d5e16027f0e7c41d07c582dd2a29fa55a00f", "android"},
{NULL}
};
static char (*saved_key)[PLAINTEXT_LENGTH + 1];
static ARCH_WORD_32 (*crypt_out)[BINARY_SIZE / sizeof(ARCH_WORD_32)];
static struct custom_salt {
int target;
int data_length;
int count;
int keysize;
unsigned char data[SZ];
unsigned char keydata[SZ];
} *cur_salt;
/* To guard against tampering with the keystore, we append a keyed
* hash with a bit of whitener. */
static inline void getPreKeyedHash(char *password, SHA_CTX *ctxp)
{
int i, j;
unsigned char passwdBytes[PLAINTEXT_LENGTH * 2];
char *magic = "Mighty Aphrodite";
for (i=0, j=0; i < strlen(password); i++) {
passwdBytes[j++] = (password[i] >> 8);
passwdBytes[j++] = password[i];
}
SHA1_Init(ctxp);
SHA1_Update(ctxp, passwdBytes, strlen(password) * 2);
SHA1_Update(ctxp, magic, 16);
}
static void init(struct fmt_main *self)
{
#ifdef _OPENMP
omp_t = omp_get_max_threads();
self->params.min_keys_per_crypt *= omp_t;
omp_t *= OMP_SCALE;
self->params.max_keys_per_crypt *= omp_t;
#endif
saved_key = mem_calloc_tiny(sizeof(*saved_key) *
self->params.max_keys_per_crypt, MEM_ALIGN_WORD);
crypt_out = mem_calloc_tiny(sizeof(*crypt_out) * self->params.max_keys_per_crypt, MEM_ALIGN_WORD);
}
static int valid(char *ciphertext, struct fmt_main *self)
{
char *p;
char *ctcopy;
char *keeptr;
int target;
int v;
if (strncmp(ciphertext, "$keystore$", 10) != 0)
return 0;
ctcopy = strdup(ciphertext);
keeptr = ctcopy;
ctcopy += 10;
if ((p = strtok(ctcopy, "$")) == NULL)
goto bail;
target = atoi(p);
if (target != 1 && target != 0)
goto bail;
if ((p = strtok(NULL, "$")) == NULL)
goto bail;
v = atoi(p);
if ((p = strtok(NULL, "$")) == NULL)
goto bail;
if (strlen(p) != v * 2)
goto bail;
if ((p = strtok(NULL, "$")) == NULL) /* hash */
goto bail;
if ((p = strtok(NULL, "$")) == NULL) /* number of keys */
goto bail;
/* currently we support only 1 key */
if(atoi(p) != 1)
goto bail;
if ((p = strtok(NULL, "$")) == NULL) /* key length */
goto bail;
v = atoi(p);
if (v > SZ)
goto bail;
if ((p = strtok(NULL, "$")) == NULL) /* key data */
goto bail;
if (strlen(p) != v * 2)
goto bail;
MEM_FREE(keeptr);
return 1;
bail:
MEM_FREE(keeptr);
return 0;
}
static void *get_salt(char *ciphertext)
{
char *ctcopy = strdup(ciphertext);
char *keeptr = ctcopy;
char *p;
int i;
/* NOTE: do we need dynamic allocation because of underlying large object size? */
static struct custom_salt cs;
memset(&cs, 0, sizeof(cs));
ctcopy += 10; /* skip over "$keystore$" */
p = strtok(ctcopy, "$");
cs.target = atoi(p);
p = strtok(NULL, "$");
cs.data_length = atoi(p);
p = strtok(NULL, "$");
for (i = 0; i < cs.data_length; i++)
cs.data[i] = atoi16[ARCH_INDEX(p[i * 2])] * 16
+ atoi16[ARCH_INDEX(p[i * 2 + 1])];
p = strtok(NULL, "$"); /* skip hash */
p = strtok(NULL, "$");
cs.count = atoi(p);
p = strtok(NULL, "$");
cs.keysize = atoi(p);
for (i = 0; i < cs.keysize; i++)
cs.keydata[i] = atoi16[ARCH_INDEX(p[i * 2])] * 16
+ atoi16[ARCH_INDEX(p[i * 2 + 1])];
MEM_FREE(keeptr);
return (void *)&cs;
}
static void *get_binary(char *ciphertext)
{
static union {
unsigned char c[BINARY_SIZE];
ARCH_WORD dummy;
} buf;
unsigned char *out = buf.c;
int i;
char *ctcopy = strdup(ciphertext);
char *keeptr = ctcopy;
char *p;
ctcopy += 10; /* skip over "$keystore$" */
p = strtok(ctcopy, "$");
p = strtok(NULL, "$");
p = strtok(NULL, "$");
p = strtok(NULL, "$"); /* at hash now */
for (i = 0; i < BINARY_SIZE; i++) {
out[i] =
(atoi16[ARCH_INDEX(*p)] << 4) |
atoi16[ARCH_INDEX(p[1])];
p += 2;
}
MEM_FREE(keeptr);
return out;
}
static int get_hash_0(int index) { return crypt_out[index][0] & 0xf; }
static int get_hash_1(int index) { return crypt_out[index][0] & 0xff; }
static int get_hash_2(int index) { return crypt_out[index][0] & 0xfff; }
static int get_hash_3(int index) { return crypt_out[index][0] & 0xffff; }
static int get_hash_4(int index) { return crypt_out[index][0] & 0xfffff; }
static int get_hash_5(int index) { return crypt_out[index][0] & 0xffffff; }
static int get_hash_6(int index) { return crypt_out[index][0] & 0x7ffffff; }
static void set_salt(void *salt)
{
cur_salt = (struct custom_salt*)salt;
}
static int crypt_all(int *pcount, struct db_salt *salt)
{
int count = *pcount;
int index = 0;
#ifdef _OPENMP
#pragma omp parallel for
#endif
#if defined(_OPENMP) || MAX_KEYS_PER_CRYPT > 1
for (index = 0; index < count; index++)
#endif
{
SHA_CTX ctx;
getPreKeyedHash(saved_key[index], &ctx);
SHA1_Update(&ctx, cur_salt->data, cur_salt->data_length);
SHA1_Final((unsigned char*)crypt_out[index], &ctx);
}
return count;
}
static int cmp_all(void *binary, int count)
{
int index = 0;
#if defined(_OPENMP) || MAX_KEYS_PER_CRYPT > 1
for (; index < count; index++)
#endif
if (((ARCH_WORD_32*)binary)[0] == crypt_out[index][0])
return 1;
return 0;
}
static int cmp_one(void *binary, int index)
{
return !memcmp(binary, crypt_out[index], BINARY_SIZE);
}
static int cmp_exact(char *source, int index)
{
return 1;
}
static void keystore_set_key(char *key, int index)
{
int saved_key_length = strlen(key);
if (saved_key_length > PLAINTEXT_LENGTH)
saved_key_length = PLAINTEXT_LENGTH;
memcpy(saved_key[index], key, saved_key_length);
saved_key[index][saved_key_length] = 0;
}
static char *get_key(int index)
{
return saved_key[index];
}
struct fmt_main fmt_keystore = {
{
FORMAT_LABEL,
FORMAT_NAME,
ALGORITHM_NAME,
BENCHMARK_COMMENT,
BENCHMARK_LENGTH,
PLAINTEXT_LENGTH,
BINARY_SIZE,
BINARY_ALIGN,
SALT_SIZE,
SALT_ALIGN,
MIN_KEYS_PER_CRYPT,
MAX_KEYS_PER_CRYPT,
FMT_CASE | FMT_8_BIT | FMT_OMP,
#if FMT_MAIN_VERSION > 11
/* FIXME: report cur_salt->data_length as tunable cost? */
{ NULL },
#endif
keystore_tests
}, {
init,
fmt_default_done,
fmt_default_reset,
fmt_default_prepare,
valid,
fmt_default_split,
get_binary,
get_salt,
#if FMT_MAIN_VERSION > 11
{ NULL },
#endif
fmt_default_source,
{
fmt_default_binary_hash_0,
fmt_default_binary_hash_1,
fmt_default_binary_hash_2,
fmt_default_binary_hash_3,
fmt_default_binary_hash_4,
fmt_default_binary_hash_5,
fmt_default_binary_hash_6
},
fmt_default_salt_hash,
set_salt,
keystore_set_key,
get_key,
fmt_default_clear_keys,
crypt_all,
{
get_hash_0,
get_hash_1,
get_hash_2,
get_hash_3,
get_hash_4,
get_hash_5,
get_hash_6
},
cmp_all,
cmp_one,
cmp_exact
}
};
#endif /* plugin stanza */
|
2d.p.c | #include <stdio.h>
#include <stdlib.h>
#include <sys/time.h>
#include <omp.h>
#define ceild(n,d) ceil(((double)(n))/((double)(d)))
#define max(x,y) ((x) > (y)? (x) : (y))
#define min(x,y) ((x) < (y)? (x) : (y))
#define myabs(x,y) (((x) > (y))? ((x)-(y)) : ((y)-(x)))
#define myceil(x,y) (int)ceil(((double)x)/((double)y)) // if x and y are integers, myceil(x,y) = (x-1)/y + 1
#define myfloor(x,y) (int)floor(((double)x)/((double)y)) // if x and y are integers, myceil(x,y) = (x-1)/y + 1
#if !defined(point)
#define point 5
#endif
int updatedpionts = 0;
#if point == 5
#ifdef CHECK
#define kernel(A) updatedpionts++;A[(t+1)%2][x][y] = 0.125 * (A[t%2][x+1][y] - 2.0 * A[t%2][x][y] + A[t%2][x-1][y]) + \
0.125 * (A[t%2][x][y+1] - 2.0 * A[t%2][x][y] + A[t%2][x][y-1]) + \
A[t%2][x][y];
#define kernel_boundary_left(A) updatedpionts++;A[(t+1)%2][x][y] = 0.125 * (A[t%2][x==NX-1?0:(x+1)][y] - 2.0 * A[t%2][x][y] + A[t%2][x==0?(NX-1):(x-1)][y]) + \
0.125 * (A[t%2][x][y==NY-1?0:(y+1)] - 2.0 * A[t%2][x][y] + A[t%2][x][y==0?(NY-1):(y-1)]) + \
A[t%2][x][y];
#define kernel_boundary_right(A) updatedpionts++;A[(t+1)%2][x][y] = 0.125 * (A[t%2][x==NX-1?0:(x+1)][y] - 2.0 * A[t%2][x][y] + A[t%2][x==0?(NX-1):(x-1)][y]) + \
0.125 * (A[t%2][x][y==NY-1?0:(y+1)] - 2.0 * A[t%2][x][y] + A[t%2][x][y==0?(NY-1):(y-1)]) + \
A[t%2][x][y];
#define kernel_boundary_top(A) updatedpionts++;A[(t+1)%2][x][y] = 0.125 * (A[t%2][x==NX-1?0:(x+1)][y] - 2.0 * A[t%2][x][y] + A[t%2][x==0?(NX-1):(x-1)][y]) + \
0.125 * (A[t%2][x][y==NY-1?0:(y+1)] - 2.0 * A[t%2][x][y] + A[t%2][x][y==0?(NY-1):(y-1)]) + \
A[t%2][x][y];
#define kernel_boundary_bottom(A) updatedpionts++;A[(t+1)%2][x][y] = 0.125 * (A[t%2][x==NX-1?0:(x+1)][y] - 2.0 * A[t%2][x][y] + A[t%2][x==0?(NX-1):(x-1)][y]) + \
0.125 * (A[t%2][x][y==NY-1?0:(y+1)] - 2.0 * A[t%2][x][y] + A[t%2][x][y==0?(NY-1):(y-1)]) + \
A[t%2][x][y];
#define kernel_boundary_left_bottom(A) updatedpionts++;A[(t+1)%2][x][y] = 0.125 * (A[t%2][x==NX-1?0:(x+1)][y] - 2.0 * A[t%2][x][y] + A[t%2][x==0?(NX-1):(x-1)][y]) + \
0.125 * (A[t%2][x][y==NY-1?0:(y+1)] - 2.0 * A[t%2][x][y] + A[t%2][x][y==0?(NY-1):(y-1)]) + \
A[t%2][x][y];
#define kernel_boundary_left_top(A) updatedpionts++;A[(t+1)%2][x][y] = 0.125 * (A[t%2][x==NX-1?0:(x+1)][y] - 2.0 * A[t%2][x][y] + A[t%2][x==0?(NX-1):(x-1)][y]) + \
0.125 * (A[t%2][x][y==NY-1?0:(y+1)] - 2.0 * A[t%2][x][y] + A[t%2][x][y==0?(NY-1):(y-1)]) + \
A[t%2][x][y];
#define kernel_boundary_right_bottom(A) updatedpionts++;A[(t+1)%2][x][y] = 0.125 * (A[t%2][x==NX-1?0:(x+1)][y] - 2.0 * A[t%2][x][y] + A[t%2][x==0?(NX-1):(x-1)][y]) + \
0.125 * (A[t%2][x][y==NY-1?0:(y+1)] - 2.0 * A[t%2][x][y] + A[t%2][x][y==0?(NY-1):(y-1)]) + \
A[t%2][x][y];
#define kernel_boundary_right_top(A) updatedpionts++;A[(t+1)%2][x][y] = 0.125 * (A[t%2][x==NX-1?0:(x+1)][y] - 2.0 * A[t%2][x][y] + A[t%2][x==0?(NX-1):(x-1)][y]) + \
0.125 * (A[t%2][x][y==NY-1?0:(y+1)] - 2.0 * A[t%2][x][y] + A[t%2][x][y==0?(NY-1):(y-1)]) + \
A[t%2][x][y];
#define kernel_boundary(A) updatedpionts++;A[(t+1)%2][x][y] = 0.125 * (A[t%2][x==NX-1?0:(x+1)][y] - 2.0 * A[t%2][x][y] + A[t%2][x==0?(NX-1):(x-1)][y]) + \
0.125 * (A[t%2][x][y==NY-1?0:(y+1)] - 2.0 * A[t%2][x][y] + A[t%2][x][y==0?(NY-1):(y-1)]) + \
A[t%2][x][y];
#else
#define kernel(A) A[(t+1)%2][x][y] = 0.125 * (A[t%2][x+1][y] - 2.0 * A[t%2][x][y] + A[t%2][x-1][y]) + \
0.125 * (A[t%2][x][y+1] - 2.0 * A[t%2][x][y] + A[t%2][x][y-1]) + \
A[t%2][x][y];
#define kernel_boundary_left(A) A[(t+1)%2][x][y] = 0.125 * (A[t%2][x==NX-1?0:(x+1)][y] - 2.0 * A[t%2][x][y] + A[t%2][x==0?(NX-1):(x-1)][y]) + \
0.125 * (A[t%2][x][y==NY-1?0:(y+1)] - 2.0 * A[t%2][x][y] + A[t%2][x][y==0?(NY-1):(y-1)]) + \
A[t%2][x][y];
#define kernel_boundary_right(A) A[(t+1)%2][x][y] = 0.125 * (A[t%2][x==NX-1?0:(x+1)][y] - 2.0 * A[t%2][x][y] + A[t%2][x==0?(NX-1):(x-1)][y]) + \
0.125 * (A[t%2][x][y==NY-1?0:(y+1)] - 2.0 * A[t%2][x][y] + A[t%2][x][y==0?(NY-1):(y-1)]) + \
A[t%2][x][y];
#define kernel_boundary_top(A) A[(t+1)%2][x][y] = 0.125 * (A[t%2][x==NX-1?0:(x+1)][y] - 2.0 * A[t%2][x][y] + A[t%2][x==0?(NX-1):(x-1)][y]) + \
0.125 * (A[t%2][x][y==NY-1?0:(y+1)] - 2.0 * A[t%2][x][y] + A[t%2][x][y==0?(NY-1):(y-1)]) + \
A[t%2][x][y];
#define kernel_boundary_bottom(A) A[(t+1)%2][x][y] = 0.125 * (A[t%2][x==NX-1?0:(x+1)][y] - 2.0 * A[t%2][x][y] + A[t%2][x==0?(NX-1):(x-1)][y]) + \
0.125 * (A[t%2][x][y==NY-1?0:(y+1)] - 2.0 * A[t%2][x][y] + A[t%2][x][y==0?(NY-1):(y-1)]) + \
A[t%2][x][y];
#define kernel_boundary_left_bottom(A) A[(t+1)%2][x][y] = 0.125 * (A[t%2][x==NX-1?0:(x+1)][y] - 2.0 * A[t%2][x][y] + A[t%2][x==0?(NX-1):(x-1)][y]) + \
0.125 * (A[t%2][x][y==NY-1?0:(y+1)] - 2.0 * A[t%2][x][y] + A[t%2][x][y==0?(NY-1):(y-1)]) + \
A[t%2][x][y];
#define kernel_boundary_left_top(A) A[(t+1)%2][x][y] = 0.125 * (A[t%2][x==NX-1?0:(x+1)][y] - 2.0 * A[t%2][x][y] + A[t%2][x==0?(NX-1):(x-1)][y]) + \
0.125 * (A[t%2][x][y==NY-1?0:(y+1)] - 2.0 * A[t%2][x][y] + A[t%2][x][y==0?(NY-1):(y-1)]) + \
A[t%2][x][y];
#define kernel_boundary_right_bottom(A) A[(t+1)%2][x][y] = 0.125 * (A[t%2][x==NX-1?0:(x+1)][y] - 2.0 * A[t%2][x][y] + A[t%2][x==0?(NX-1):(x-1)][y]) + \
0.125 * (A[t%2][x][y==NY-1?0:(y+1)] - 2.0 * A[t%2][x][y] + A[t%2][x][y==0?(NY-1):(y-1)]) + \
A[t%2][x][y];
#define kernel_boundary_right_top(A) A[(t+1)%2][x][y] = 0.125 * (A[t%2][x==NX-1?0:(x+1)][y] - 2.0 * A[t%2][x][y] + A[t%2][x==0?(NX-1):(x-1)][y]) + \
0.125 * (A[t%2][x][y==NY-1?0:(y+1)] - 2.0 * A[t%2][x][y] + A[t%2][x][y==0?(NY-1):(y-1)]) + \
A[t%2][x][y];
#define kernel_boundary(A) A[(t+1)%2][x][y] = 0.125 * (A[t%2][x==NX-1?0:(x+1)][y] - 2.0 * A[t%2][x][y] + A[t%2][x==0?(NX-1):(x-1)][y]) + \
0.125 * (A[t%2][x][y==NY-1?0:(y+1)] - 2.0 * A[t%2][x][y] + A[t%2][x][y==0?(NY-1):(y-1)]) + \
A[t%2][x][y];
#endif
#define XSLOPE 1
#define YSLOPE 1
#define DATA_TYPE double
#elif point == 9
#define kernel(A) A[(t+1)%2][x][y] = 0.96 * A[t%2][x][y] + \
0.0051 * (A[t%2][x+1][y] + A[t%2][x-1][y] + A[t%2][x][y+1]+A[t%2][x][y-1]) + \
0.0049 * (A[t%2][x+1][y-1] + A[t%2][x-1][y+1] + A[t%2][x-1][y-1] + A[t%2][x+1][y+1]);
#define XSLOPE 1
#define YSLOPE 1
#define DATA_TYPE double
#elif point == 0
#define kernel(A) A[(t+1)%2][x][y] = b2s23(A[t%2][x][y], A[t%2][x-1][y+1] + A[t%2][x-1][y] + \
A[t%2][x-1][y-1] + A[t%2][x][y+1] + \
A[t%2][x][y-1] + A[t%2][x+1][y+1] + \
A[t%2][x+1][y] + A[t%2][x+1][y-1]);
#define XSLOPE 1
#define YSLOPE 1
#define DATA_TYPE int
int b2s23(int cell, int neighbors) {
if((cell == 1 && ((neighbors < 2) || (neighbors > 3)))) {
return 0;
}
if((cell == 1 && (neighbors == 2 || neighbors == 3))) {
return 1;
}
if((cell == 0 && neighbors == 3)) {
return 1;
}
return cell;
}
#endif
#ifdef CHECK
#define TOLERANCE 0
#endif
int main(int argc, char * argv[]) {
struct timeval start, end;
long int t, i, j;
int NX = atoi(argv[1]);
int NY = atoi(argv[2]);
int T = atoi(argv[3]);
int Bx = atoi(argv[4]);
int By = atoi(argv[5]);
int tb = atoi(argv[6]);
if(Bx<(2*XSLOPE+1) || By<(2*YSLOPE+1) || Bx>NX || By>NY || tb>min(((Bx-1)/2)/XSLOPE,((By-1)/2)/YSLOPE)){
return 0;
}
DATA_TYPE (*A)[NX][NY] = (DATA_TYPE (*)[NX][NY])malloc(sizeof(DATA_TYPE)*(NX)*(NY)*2);
if(NULL == A) return 0;
#ifdef CHECK
DATA_TYPE (*B)[NX][NY] = (DATA_TYPE (*)[NX][NY])malloc(sizeof(DATA_TYPE)*(NX)*(NY)*2);
if(NULL == B) return 0;
#endif
srand(100);
for (i = 0; i < NX; i++) {
for (j = 0; j < NY; j++) {
A[0][i][j] = 0;//(DATA_TYPE) (1.0 * (rand() % 1024));
#if point == 0
A[0][i][j] = ((int)A[0][i][j])%2;
#endif
A[1][i][j] = 0;
#ifdef CHECK
B[0][i][j] = A[0][i][j];
B[1][i][j] = 0;
#endif
}
}
int bx = Bx-2*(tb*XSLOPE);
int by = By-2*(tb*YSLOPE);
if(bx < 2 * XSLOPE) return 0;
if(by < 2 * YSLOPE) return 0;
int ix = Bx+bx;
int iy = By+by; // ix and iy are even.
int xnb0 = NX / ix;
int ynb0 = NY / iy;
int xnrestpoints = NX % ix;
int ynrestpoints = NY % iy;
int bx_first_B11 = (bx + xnrestpoints)/2;
int bx_last_B11 = (bx + xnrestpoints) - bx_first_B11;
int by_first_B12 = (by + ynrestpoints)/2;
int by_last_B12 = (by + ynrestpoints) - by_first_B12;
int xnb11 = xnb0;
int ynb11 = ynb0;
int xnb12 = xnb0;
int ynb12 = ynb0; // for periodic boundary stencils xnb11 and ynb12 must be larger than 2.
int xnb2 = xnb0;
int ynb2 = ynb0;
int nb02[2] = {xnb0 * ynb0, xnb2 * ynb2};
int nb1[2] = {(xnb11-1) * ynb11, xnb12 * (ynb12-1)};
int xnb02[2] = {xnb0, xnb2-1};
int xnb1[2] = {xnb11-1, xnb12};
/*
int xleft02[2] = {XSLOPE + bx, XSLOPE - (Bx-bx)/2};
int ybottom02[2] = {YSLOPE + by, YSLOPE - (By-by)/2};
int xleft11[2] = {XSLOPE, XSLOPE + ix/2};
int ybottom11[2] = {YSLOPE + by, YSLOPE - (By-by)/2};
int xleft12[2] = {XSLOPE + bx, XSLOPE - (Bx-bx)/2};
int ybottom12[2] = {YSLOPE, YSLOPE + iy/2};
*/
int xleft02[2] = {bx_first_B11, bx_first_B11 + ix/2};
int ybottom02[2] = {by_first_B12, by_first_B12 + iy/2};
int xleft11[2] = {bx_first_B11 + Bx, bx_first_B11 + (Bx-bx)/2};
int ybottom11[2] = {by_first_B12, by_first_B12 + (By+by)/2};
int xleft12[2] = {bx_first_B11, bx_first_B11 + (Bx+bx)/2};
int ybottom12[2] = {by_first_B12 + By, by_first_B12 + (By-by)/2};
int level = 0;
int tt,n;
int x, y;
register int ymin, ymax;
int xmin,xmax;
gettimeofday(&start,0);
for(tt = -tb; tt < T; tt += tb){
#pragma omp parallel for schedule(dynamic) private(xmin,xmax,ymin,ymax,t,x,y)
for(n = 0; n < nb02[level]; n++){
if(level == 0 || n < nb02[level] - (xnb2 + ynb2 -1)){ // there is no boundary block of B0
for(t = max(tt,0); t < min(tt + 2*tb, T); t++) {
xmin = xleft02[level] + (n%xnb02[level]) * ix + myabs(t+1,tt+tb) * XSLOPE;
xmax = xleft02[level] + (n%xnb02[level]) * ix + Bx - myabs(t+1,tt+tb) * XSLOPE;
ymin = ybottom02[level] + (n/xnb02[level]) * iy + myabs(t+1,tt+tb) * YSLOPE;
ymax = ybottom02[level] + (n/xnb02[level]) * iy + By - myabs(t+1,tt+tb) * YSLOPE;
for(x = xmin; x < xmax; x++) {
#pragma ivdep
#pragma vector always
for(y = ymin; y < ymax; y++) {
kernel(A);
}
}
}
}
else if(n < nb02[level] - xnb2){ // processing ynb2 - 1 left boundary blocks of B2
for(t = tt; t < min(tt + 2*tb, T); t++) {
xmin = NX - bx_last_B11 - (Bx-bx)/2 + myabs(t+1,tt+tb) * XSLOPE;
xmax = bx_first_B11 + (Bx-bx)/2 - myabs(t+1,tt+tb) * XSLOPE;
ymin = by_first_B12 + iy/2 + (nb02[level] - xnb2 - n - 1) * iy + myabs(t+1,tt+tb) * YSLOPE;
ymax = by_first_B12 + iy/2 + By + (nb02[level] - xnb2 - n - 1) * iy - myabs(t+1,tt+tb) * YSLOPE;
for(x = 0; x < XSLOPE; x++){
for(y = ymin; y < ymax; y++) {
kernel_boundary_left(A);
}
}
for(; x < xmax; x++) {
#pragma ivdep
#pragma vector always
for(y = ymin; y < ymax; y++) {
kernel(A);
}
}
for(x = xmin; x < NX-XSLOPE; x++) {
#pragma ivdep
#pragma vector always
for(y = ymin; y < ymax; y++) {
kernel(A);
}
}
for(; x < NX; x++){
for(y = ymin; y < ymax; y++) {
kernel_boundary_right(A);
}
}
}
}
else if(n < nb02[level] - 1){ // processing xnb2 - 1 bottom boundary blocks of B2
for(t = tt; t < min(tt + 2*tb, T); t++) {
ymax = by_first_B12 + (By-by)/2 - myabs(t+1,tt+tb) * YSLOPE;
ymin = NY - by_last_B12 - (By-by)/2 + myabs(t+1,tt+tb) * YSLOPE;
xmin = bx_first_B11 + ix/2 + (nb02[level] - 1 - n - 1) * ix + myabs(t+1,tt+tb) * XSLOPE;
xmax = bx_first_B11 + ix/2 + Bx + (nb02[level] - 1 - n - 1) * ix - myabs(t+1,tt+tb) * XSLOPE;
for(x = xmin; x < xmax; x++) {
for(y = 0; y < YSLOPE; y++) {
kernel_boundary_bottom(A);
}
}
for(x = xmin; x < xmax; x++) {
#pragma ivdep
#pragma vector always
for(y = YSLOPE; y < ymax; y++) {
kernel(A);
}
}
for(x = xmin; x < xmax; x++) {
#pragma ivdep
#pragma vector always
for(y = ymin; y < NY - YSLOPE; y++) {
kernel(A);
}
}
for(x = xmin; x < xmax; x++) {
for(y = NY - YSLOPE; y < NY; y++) {
kernel_boundary_top(A);
}
}
}
}
else{ // processing corner block of B2
for(t = tt; t < min(tt + 2*tb, T); t++) {
xmin = NX - bx_last_B11 - (Bx-bx)/2 + myabs(t+1,tt+tb) * XSLOPE;
xmax = bx_first_B11 + (Bx-bx)/2 - myabs(t+1,tt+tb) * XSLOPE;
ymax = by_first_B12 + (By-by)/2 - myabs(t+1,tt+tb) * YSLOPE;
ymin = NY - by_last_B12 - (By-by)/2 + myabs(t+1,tt+tb) * YSLOPE;
for(x = 0; x < XSLOPE; x++){
for(y = 0; y < YSLOPE; y++) {
kernel_boundary_left_bottom(A);
}
}
for(x = 0; x < XSLOPE; x++){
for(y = YSLOPE; y < ymax; y++) {
kernel_boundary_left(A);
}
}
for(x = XSLOPE; x < xmax; x++){
for(y = 0; y < YSLOPE; y++) {
kernel_boundary_bottom(A);
}
}
for(x = XSLOPE; x < xmax; x++){
#pragma ivdep
#pragma vector always
for(y = YSLOPE; y < ymax; y++) {
kernel(A);
}
}
for(x = 0; x < XSLOPE; x++){
for(y = NY - YSLOPE; y < NY; y++) {
kernel_boundary_left_top(A);
}
}
for(x = 0; x < XSLOPE; x++){
for(y = ymin; y < NY - YSLOPE; y++) {
kernel_boundary_left(A);
}
}
for(x = XSLOPE; x < xmax; x++){
for(y = NY - YSLOPE; y < NY; y++) {
kernel_boundary_top(A);
}
}
for(x = XSLOPE; x < xmax; x++){
#pragma ivdep
#pragma vector always
for(y = ymin; y < NY - YSLOPE; y++) {
kernel(A);
}
}
for(x = NX - XSLOPE; x < NX; x++){
for(y = 0; y < YSLOPE; y++) {
kernel_boundary_right_bottom(A);
}
}
for(x = NX - XSLOPE; x < NX; x++){
for(y = YSLOPE; y < ymax; y++) {
kernel_boundary_right(A);
}
}
for(x = xmin; x < NX - XSLOPE; x++){
for(y = 0; y < YSLOPE; y++) {
kernel_boundary_bottom(A);
}
}
for(x = xmin; x < NX - XSLOPE; x++){
#pragma ivdep
#pragma vector always
for(y = YSLOPE; y < ymax; y++) {
kernel(A);
}
}
for(x = NX - XSLOPE; x < NX; x++){
for(y = NY - YSLOPE; y < NY; y++) {
kernel_boundary_right_top(A);
}
}
for(x = NX - XSLOPE; x < NX; x++){
for(y = ymin; y < NY - YSLOPE; y++) {
kernel_boundary_right(A);
}
}
for(x = xmin; x < NX - XSLOPE; x++){
for(y = NY - YSLOPE; y < NY; y++) {
kernel_boundary_top(A);
}
}
for(x = xmin; x < NX - XSLOPE; x++){
#pragma ivdep
#pragma vector always
for(y = ymin; y < NY - YSLOPE; y++) {
kernel(A);
}
}
}
}
}
#pragma omp parallel for schedule(dynamic) private(xmin,xmax,ymin,ymax,t,x,y)
for(n = 0; n < xnb11 * ynb11 + xnb12 * ynb12; n++) {
if(n < (xnb11-1) * ynb11 + xnb12 * (ynb12 - 1)){
for(t = tt + tb; t < min(tt + 2*tb, T); t++) {
if(n < nb1[level]) {
xmin = xleft11[level] + (n%xnb1[level]) * ix - (t+1-tt-tb) * XSLOPE;
xmax = xleft11[level] + (n%xnb1[level]) * ix + bx + (t+1-tt-tb) * XSLOPE;
ymin = ybottom11[level] + (n/xnb1[level]) * iy + (t+1-tt-tb) * YSLOPE;
ymax = ybottom11[level] + (n/xnb1[level]) * iy + By - (t+1-tt-tb) * YSLOPE;
}
else {
xmin = xleft12[level] + ((n-nb1[level])%xnb1[1-level]) * ix + (t+1-tt-tb) * XSLOPE;
xmax = xleft12[level] + ((n-nb1[level])%xnb1[1-level]) * ix + Bx - (t+1-tt-tb) * XSLOPE;
ymin = ybottom12[level] + ((n-nb1[level])/xnb1[1-level]) * iy - (t+1-tt-tb) * YSLOPE;
ymax = ybottom12[level] + ((n-nb1[level])/xnb1[1-level]) * iy + by + (t+1-tt-tb) * YSLOPE;
}
for(x = xmin; x < xmax; x++) {
#pragma ivdep
#pragma vector always
for(y = ymin; y < ymax; y++) {
kernel(A);
}
}
}
}
else if(n < xnb11 * ynb11 + xnb12 * (ynb12 - 1)){ // processing left column B1 blocks
for(t = tt + tb; t < min(tt + 2*tb, T); t++) {
if(level == 0) {
xmin = NX - bx_last_B11 - (t+1-tt-tb) * XSLOPE;
xmax = bx_first_B11 + (t+1-tt-tb) * XSLOPE;
ymin = by_first_B12 + (n - (xnb11-1) * ynb11 - xnb12 * (ynb12 - 1)) * iy + (t+1-tt-tb) * YSLOPE;
ymax = by_first_B12 + (n - (xnb11-1) * ynb11 - xnb12 * (ynb12 - 1)) * iy + By - (t+1-tt-tb) * YSLOPE;
}
else {
xmin = NX - bx_last_B11 - (Bx - bx)/2 + (t+1-tt-tb) * XSLOPE;
xmax = bx_first_B11 + (Bx - bx)/2 - (t+1-tt-tb) * XSLOPE;
ymin = by_first_B12 + (By - by)/2 + (n - (xnb11-1) * ynb11 - xnb12 * (ynb12 - 1)) * iy - (t+1-tt-tb) * YSLOPE;
ymax = by_first_B12 + (By - by)/2 + (n - (xnb11-1) * ynb11 - xnb12 * (ynb12 - 1)) * iy + by + (t+1-tt-tb) * YSLOPE;
}
for(x = 0; x < XSLOPE; x++){
for(y = ymin; y < ymax; y++) {
kernel_boundary_left(A);
}
}
for(x = XSLOPE; x < xmax; x++) {
#pragma ivdep
#pragma vector always
for(y = ymin; y < ymax; y++) {
kernel(A);
}
}
for(x = xmin; x < NX-XSLOPE; x++) {
#pragma ivdep
#pragma vector always
for(y = ymin; y < ymax; y++) {
kernel(A);
}
}
for(x = NX - XSLOPE; x < NX; x++){
for(y = ymin; y < ymax; y++) {
kernel_boundary_right(A);
}
}
}
}
else{ // processing bottom row B1 blocks
for(t = tt + tb; t < min(tt + 2*tb, T); t++) {
if(level == 0) {
xmin = bx_first_B11 + (n - xnb11 * ynb11 - xnb12 * (ynb12 - 1)) * ix + (t+1-tt-tb) * XSLOPE;
xmax = bx_first_B11 + (n - xnb11 * ynb11 - xnb12 * (ynb12 - 1)) * ix + Bx - (t+1-tt-tb) * XSLOPE;
ymin = NY - by_last_B12 - (t+1-tt-tb) * YSLOPE;
ymax = by_first_B12 + (t+1-tt-tb) * YSLOPE;
}
else {
xmin = bx_first_B11 + (Bx - bx)/2 + (n - xnb11 * ynb11 - xnb12 * (ynb12 - 1)) * ix - (t+1-tt-tb) * XSLOPE;
xmax = bx_first_B11 + (Bx - bx)/2 + (n - xnb11 * ynb11 - xnb12 * (ynb12 - 1)) * ix + bx + (t+1-tt-tb) * XSLOPE;
ymin = NY - by_last_B12 + (t+1-tt-tb) * YSLOPE - (By - by)/2;
ymax = by_first_B12 - (t+1-tt-tb) * YSLOPE + (By - by)/2;
}
for(x = xmin; x < xmax; x++) {
for(y = 0; y < YSLOPE; y++) {
kernel_boundary_bottom(A);
}
}
for(x = xmin; x < xmax; x++) {
#pragma ivdep
#pragma vector always
for(y = YSLOPE; y < ymax; y++) {
kernel(A);
}
}
for(x = xmin; x < xmax; x++) {
#pragma ivdep
#pragma vector always
for(y = ymin; y < NY - YSLOPE; y++) {
kernel(A);
}
}
for(x = xmin; x < xmax; x++) {
for(y = NY-YSLOPE; y < NY; y++) {
kernel_boundary_top(A);
}
}
}
}
}
level = 1 - level;
}
gettimeofday(&end,0);
#ifdef CHECK
printf("%d\t%d\n", updatedpionts, NX * NY * T);
#endif
printf("GStencil/s = %f\n", ((double)NX * NY * T) / (double)(end.tv_sec - start.tv_sec + (end.tv_usec - start.tv_usec) * 1.0e-6) / 1000000000L);
#ifdef CHECK
for (t = 0; t < T; t++) {
for (x = 0; x < NX; x++) {
for (y = 0; y < NY; y++) {
kernel_boundary(B);
}
}
}
for (i = 0; i < NX; i++) {
for (j = 0; j < NY; j++) {
if(myabs(A[T%2][i][j],B[T%2][i][j]) > TOLERANCE)
printf("Naive[%d][%d] = %f, Check = %f: FAILED!\n", i, j, B[T%2][i][j], A[T%2][i][j]);
}
}
#endif
}
|
coverage.h | #ifndef COVERAGE_H
#define COVERAGE_H
#include <boost/container/flat_set.hpp>
#include <boost/dynamic_bitset.hpp>
#include <boost/iostreams/stream.hpp>
#include <boost/iostreams/stream_buffer.hpp>
#include <boost/iostreams/device/file.hpp>
#include <boost/iostreams/filtering_stream.hpp>
#include <boost/iostreams/filter/zlib.hpp>
#include <boost/iostreams/filter/gzip.hpp>
#include <boost/progress.hpp>
#include <htslib/sam.h>
#include "tags.h"
#include "util.h"
#include "msa.h"
#include "split.h"
namespace torali {
struct SpanPoint {
int32_t bppos;
int32_t svt;
uint32_t id;
int32_t chr2;
int32_t otherBppos;
SpanPoint() : bppos(0), svt(0), id(0), chr2(0), otherBppos(0) {}
explicit SpanPoint(int32_t const bp) : bppos(bp), svt(0), id(0), chr2(0), otherBppos(0) {}
SpanPoint(int32_t const bp, int32_t const s, uint32_t const identifier, int32_t const tid, int32_t const obp) : bppos(bp), svt(s), id(identifier), chr2(tid), otherBppos(obp) {}
};
struct BpRegion {
int32_t regionStart;
int32_t regionEnd;
int32_t bppos;
int32_t homLeft;
int32_t homRight;
int32_t svt;
uint32_t id;
uint8_t bpPoint;
BpRegion() : regionStart(0), regionEnd(0), bppos(0), homLeft(0), homRight(0), svt(0), id(0), bpPoint(0) {}
explicit BpRegion(int32_t bp) : regionStart(0), regionEnd(0), bppos(bp), homLeft(0), homRight(0), svt(0), id(0), bpPoint(0) {}
BpRegion(int32_t rs, int32_t re, int32_t bpos, int32_t hl, int32_t hr, int32_t s, uint32_t identifier, uint8_t bpp) : regionStart(rs), regionEnd(re), bppos(bpos), homLeft(hl), homRight(hr), svt(s), id(identifier), bpPoint(bpp) {}
};
template<typename TRecord>
struct SortBp : public std::binary_function<TRecord, TRecord, bool> {
inline bool operator()(TRecord const& s1, TRecord const& s2) const {
return (s1.bppos < s2.bppos);
}
};
struct SpanningCount {
int32_t refh1;
int32_t refh2;
int32_t alth1;
int32_t alth2;
std::vector<uint8_t> ref;
std::vector<uint8_t> alt;
SpanningCount() : refh1(0), refh2(0), alth1(0), alth2(0) {}
};
struct JunctionCount {
int32_t refh1;
int32_t refh2;
int32_t alth1;
int32_t alth2;
std::vector<uint8_t> ref;
std::vector<uint8_t> alt;
JunctionCount() : refh1(0), refh2(0), alth1(0), alth2(0) {}
};
template<typename TAlign, typename TQualities>
inline uint32_t
_getAlignmentQual(TAlign const& align, TQualities const& qual) {
typedef typename TAlign::index TAIndex;
uint32_t baseQualSum = 0;
uint32_t seqPtr = 0;
uint32_t alignedBases = 0;
for(TAIndex j = 0; j < (TAIndex) align.shape()[1]; ++j) {
if (align[1][j] != '-') {
if (align[0][j] != '-') {
++alignedBases;
baseQualSum += qual[seqPtr];
}
++seqPtr;
}
}
return (baseQualSum / alignedBases);
}
template<typename TPos>
inline int32_t
_cutRefStart(TPos const rStart, TPos const rEnd, TPos const offset, unsigned int bpPoint, int32_t const svt) {
if (_translocation(svt)) {
uint8_t ct = _getSpanOrientation(svt);
if (ct == 3) {
if (!bpPoint) return rEnd - offset;
else return rStart - offset;
} else {
if (bpPoint) return rEnd - offset;
else return rStart - offset;
}
} else {
if (svt == 3) {
if (!bpPoint) return rEnd - offset;
else return rStart - offset;
} else {
if (bpPoint) return rEnd - offset;
else return rStart - offset;
}
}
}
template<typename TPos>
inline int32_t
_cutRefEnd(TPos const rStart, TPos const rEnd, TPos const offset, unsigned int bpPoint, int32_t const svt) {
if (_translocation(svt)) {
uint8_t ct = _getSpanOrientation(svt);
if (ct == 3) {
if (!bpPoint) return rEnd + offset;
else return rStart + offset;
} else {
if (bpPoint) return rEnd + offset;
else return rStart + offset;
}
} else {
if (svt == 3) {
if (!bpPoint) return rEnd + offset;
else return rStart + offset;
} else {
if (bpPoint) return rEnd + offset;
else return rStart + offset;
}
}
}
template<typename TConfig, typename TSVs, typename TBreakProbes, typename TGenomicBpRegion>
inline void
_generateProbes(TConfig const& c, bam_hdr_t* hdr, TSVs& svs, TBreakProbes& refProbeArr, TBreakProbes& consProbeArr, TGenomicBpRegion& bpRegion, std::vector<bool>& svOnChr) {
typedef typename TBreakProbes::value_type TProbes;
// Preprocess REF and ALT
boost::posix_time::ptime noww = boost::posix_time::second_clock::local_time();
std::cout << '[' << boost::posix_time::to_simple_string(noww) << "] " << "Generate REF and ALT probes" << std::endl;
boost::progress_display show_progresss( hdr->n_targets );
TProbes refProbes(svs.size());
faidx_t* fai = fai_load(c.genome.string().c_str());
for(int32_t refIndex=0; refIndex < (int32_t) hdr->n_targets; ++refIndex) {
++show_progresss;
char* seq = NULL;
// Iterate all structural variants
for(typename TSVs::iterator itSV = svs.begin(); itSV != svs.end(); ++itSV) {
if ((itSV->chr != refIndex) && (itSV->chr2 != refIndex)) continue;
svOnChr[refIndex] = true;
// Lazy loading of reference sequence
if (seq == NULL) {
int32_t seqlen = -1;
std::string tname(hdr->target_name[refIndex]);
seq = faidx_fetch_seq(fai, tname.c_str(), 0, hdr->target_len[refIndex], &seqlen);
}
// Set tag alleles
if (itSV->chr == refIndex) {
itSV->alleles = _addAlleles(boost::to_upper_copy(std::string(seq + itSV->svStart - 1, seq + itSV->svStart)), std::string(hdr->target_name[itSV->chr2]), *itSV, itSV->svt);
}
if (!itSV->precise) continue;
// Get the reference sequence
if ((itSV->chr != itSV->chr2) && (itSV->chr2 == refIndex)) {
Breakpoint bp(*itSV);
_initBreakpoint(hdr, bp, (int32_t) itSV->consensus.size(), itSV->svt);
refProbes[itSV->id] = _getSVRef(seq, bp, refIndex, itSV->svt);
}
if (itSV->chr == refIndex) {
Breakpoint bp(*itSV);
if (_translocation(itSV->svt)) bp.part1 = refProbes[itSV->id];
if (itSV->svt ==4) {
int32_t bufferSpace = std::max((int32_t) ((itSV->consensus.size() - itSV->insLen) / 3), c.minimumFlankSize);
_initBreakpoint(hdr, bp, bufferSpace, itSV->svt);
} else _initBreakpoint(hdr, bp, (int32_t) itSV->consensus.size(), itSV->svt);
std::string svRefStr = _getSVRef(seq, bp, refIndex, itSV->svt);
// Find breakpoint to reference
typedef boost::multi_array<char, 2> TAlign;
TAlign align;
if (!_consRefAlignment(itSV->consensus, svRefStr, align, itSV->svt)) continue;
AlignDescriptor ad;
if (!_findSplit(c, itSV->consensus, svRefStr, align, ad, itSV->svt)) continue;
// Debug consensus to reference alignment
//std::cerr << itSV->id << std::endl;
//for(uint32_t i = 0; i<align.shape()[0]; ++i) {
//for(uint32_t j = 0; j<align.shape()[1]; ++j) std::cerr << align[i][j];
//std::cerr << std::endl;
//}
//std::cerr << std::endl;
// Iterate all samples
for (unsigned int bpPoint = 0; bpPoint<2; ++bpPoint) {
int32_t regionChr, regionStart, regionEnd, cutConsStart, cutConsEnd, cutRefStart, cutRefEnd, bppos;
if (bpPoint) {
regionChr = itSV->chr2;
regionStart = std::max(0, itSV->svEnd - c.minimumFlankSize);
regionEnd = std::min((uint32_t) (itSV->svEnd + c.minimumFlankSize), hdr->target_len[itSV->chr2]);
cutConsStart = ad.cEnd - ad.homLeft - c.minimumFlankSize;
cutConsEnd = ad.cEnd + ad.homRight + c.minimumFlankSize;
cutRefStart = _cutRefStart(ad.rStart, ad.rEnd, ad.homLeft + c.minimumFlankSize, bpPoint, itSV->svt);
cutRefEnd = _cutRefEnd(ad.rStart, ad.rEnd, ad.homRight + c.minimumFlankSize, bpPoint, itSV->svt);
bppos = itSV->svEnd;
} else {
regionChr = itSV->chr;
regionStart = std::max(0, itSV->svStart - c.minimumFlankSize);
regionEnd = std::min((uint32_t) (itSV->svStart + c.minimumFlankSize), hdr->target_len[itSV->chr]);
cutConsStart = ad.cStart - ad.homLeft - c.minimumFlankSize;
cutConsEnd = ad.cStart + ad.homRight + c.minimumFlankSize;
cutRefStart = _cutRefStart(ad.rStart, ad.rEnd, ad.homLeft + c.minimumFlankSize, bpPoint, itSV->svt);
cutRefEnd = _cutRefEnd(ad.rStart, ad.rEnd, ad.homRight + c.minimumFlankSize, bpPoint, itSV->svt);
bppos = itSV->svStart;
}
consProbeArr[bpPoint][itSV->id] = itSV->consensus.substr(cutConsStart, (cutConsEnd - cutConsStart));
refProbeArr[bpPoint][itSV->id] = svRefStr.substr(cutRefStart, (cutRefEnd - cutRefStart));
bpRegion[regionChr].push_back(BpRegion(regionStart, regionEnd, bppos, ad.homLeft, ad.homRight, itSV->svt, itSV->id, bpPoint));
}
}
}
if (seq != NULL) free(seq);
}
// Clean-up
fai_destroy(fai);
for(int32_t refIndex=0; refIndex < (int32_t) hdr->n_targets; ++refIndex) {
// Sort breakpoint regions
std::sort(bpRegion[refIndex].begin(), bpRegion[refIndex].end(), SortBp<BpRegion>());
}
}
template<typename TConfig, typename TSampleLibrary, typename TSVs, typename TCoverageCount, typename TCountMap, typename TSpanMap>
inline void
annotateCoverage(TConfig& c, TSampleLibrary& sampleLib, TSVs& svs, TCoverageCount& covCount, TCountMap& countMap, TSpanMap& spanMap)
{
typedef typename TCoverageCount::value_type::value_type TCovPair;
typedef typename TSpanMap::value_type::value_type TSpanPair;
typedef typename TCountMap::value_type::value_type TCountPair;
typedef std::vector<uint8_t> TQuality;
// Open file handles
typedef std::vector<samFile*> TSamFile;
typedef std::vector<hts_idx_t*> TIndex;
typedef std::vector<bam_hdr_t*> THeader;
TSamFile samfile(c.files.size());
TIndex idx(c.files.size());
THeader hdr(c.files.size());
int32_t totalTarget = 0;
for(unsigned int file_c = 0; file_c < c.files.size(); ++file_c) {
samfile[file_c] = sam_open(c.files[file_c].string().c_str(), "r");
hts_set_fai_filename(samfile[file_c], c.genome.string().c_str());
idx[file_c] = sam_index_load(samfile[file_c], c.files[file_c].string().c_str());
hdr[file_c] = sam_hdr_read(samfile[file_c]);
totalTarget += hdr[file_c]->n_targets;
}
// Initialize coverage count maps
covCount.resize(c.files.size());
countMap.resize(c.files.size());
spanMap.resize(c.files.size());
for(uint32_t file_c = 0; file_c < c.files.size(); ++file_c) {
covCount[file_c].resize(svs.size(), TCovPair());
countMap[file_c].resize(svs.size(), TCountPair());
spanMap[file_c].resize(svs.size(), TSpanPair());
}
// Reference and consensus probes
typedef std::vector<std::string> TProbes;
typedef std::vector<TProbes> TBreakProbes;
TBreakProbes refProbeArr(2, TProbes()); // Left and right breakpoint
TBreakProbes consProbeArr(2, TProbes()); // Left and right breakpoint
for(uint32_t k = 0; k < 2; ++k) {
refProbeArr[k].resize(svs.size());
consProbeArr[k].resize(svs.size());
}
typedef std::vector<BpRegion> TBpRegion;
typedef std::vector<TBpRegion> TGenomicBpRegion;
TGenomicBpRegion bpRegion(hdr[0]->n_targets, TBpRegion());
std::vector<bool> svOnChr(hdr[0]->n_targets, false);
// Generate probes
_generateProbes(c, hdr[0], svs, refProbeArr, consProbeArr, bpRegion, svOnChr);
// Debug
//for(uint32_t k = 0; k < 2; ++k) {
//for(uint32_t i = 0; i < svs.size(); ++i) {
//std::cerr << k << ',' << i << ',' << refProbeArr[k][i] << ',' << consProbeArr[k][i] << std::endl;
//}
//}
// Iterate all samples
boost::posix_time::ptime now = boost::posix_time::second_clock::local_time();
std::cout << '[' << boost::posix_time::to_simple_string(now) << "] " << "SV annotation" << std::endl;
boost::progress_display show_progress( totalTarget );
typedef std::vector<uint32_t> TRefAlignCount;
typedef std::vector<TRefAlignCount> TFileRefAlignCount;
TFileRefAlignCount refAlignedReadCount(c.files.size(), TRefAlignCount());
TFileRefAlignCount refAlignedSpanCount(c.files.size(), TRefAlignCount());
for(unsigned int file_c = 0; file_c < c.files.size(); ++file_c) {
refAlignedReadCount[file_c].resize(svs.size(), 0);
refAlignedSpanCount[file_c].resize(svs.size(), 0);
}
// Dump file
boost::iostreams::filtering_ostream dumpOut;
if (c.hasDumpFile) {
dumpOut.push(boost::iostreams::gzip_compressor());
dumpOut.push(boost::iostreams::file_sink(c.dumpfile.string().c_str(), std::ios_base::out | std::ios_base::binary));
dumpOut << "#svid\tbam\tqname\tchr\tpos\tmatechr\tmatepos\tmapq\ttype" << std::endl;
}
#pragma omp parallel for default(shared)
for(unsigned int file_c = 0; file_c < c.files.size(); ++file_c) {
// Pair qualities and features
typedef boost::unordered_map<std::size_t, uint8_t> TQualities;
TQualities qualities;
TQualities qualitiestra;
typedef boost::unordered_map<std::size_t, bool> TClip;
TClip clip;
TClip cliptra;
// Iterate chromosomes
for(int32_t refIndex=0; refIndex < (int32_t) hdr[file_c]->n_targets; ++refIndex) {
++show_progress;
// Any SV breakpoints on this chromosome?
if (!svOnChr[refIndex]) continue;
// Check we have mapped reads on this chromosome
bool nodata = true;
std::string suffix("cram");
std::string str(c.files[file_c].string());
if ((str.size() >= suffix.size()) && (str.compare(str.size() - suffix.size(), suffix.size(), suffix) == 0)) nodata = false;
uint64_t mapped = 0;
uint64_t unmapped = 0;
hts_idx_get_stat(idx[file_c], refIndex, &mapped, &unmapped);
if (mapped) nodata = false;
if (nodata) continue;
// Coverage track
typedef uint16_t TCount;
uint32_t maxCoverage = std::numeric_limits<TCount>::max();
typedef std::vector<TCount> TCoverage;
TCoverage covFragment(hdr[file_c]->target_len[refIndex], 0);
TCoverage covBases(hdr[file_c]->target_len[refIndex], 0);
// Flag breakpoint regions
typedef boost::dynamic_bitset<> TBitSet;
TBitSet bpOccupied(hdr[file_c]->target_len[refIndex]);
for(uint32_t i = 0; i < bpRegion[refIndex].size(); ++i) {
for(int32_t k = bpRegion[refIndex][i].regionStart; k < bpRegion[refIndex][i].regionEnd; ++k) {
bpOccupied[k] = 1;
}
}
// Flag spanning breakpoints
typedef std::vector<SpanPoint> TSpanPoint;
TSpanPoint spanPoint;
typedef boost::dynamic_bitset<> TBitSet;
TBitSet spanBp(hdr[file_c]->target_len[refIndex]);
for(typename TSVs::iterator itSV = svs.begin(); itSV != svs.end(); ++itSV) {
if (itSV->peSupport == 0) continue;
if ((itSV->chr == refIndex) && (itSV->svStart < (int32_t) hdr[file_c]->target_len[refIndex])) {
spanBp[itSV->svStart] = 1;
spanPoint.push_back(SpanPoint(itSV->svStart, itSV->svt, itSV->id, itSV->chr2, itSV->svEnd));
}
if ((itSV->chr2 == refIndex) && (itSV->svEnd < (int32_t) hdr[file_c]->target_len[refIndex])) {
spanBp[itSV->svEnd] = 1;
spanPoint.push_back(SpanPoint(itSV->svEnd, itSV->svt, itSV->id, itSV->chr, itSV->svStart));
}
}
std::sort(spanPoint.begin(), spanPoint.end(), SortBp<SpanPoint>());
// Count reads
hts_itr_t* iter = sam_itr_queryi(idx[file_c], refIndex, 0, hdr[file_c]->target_len[refIndex]);
bam1_t* rec = bam_init1();
int32_t lastAlignedPos = 0;
std::set<std::size_t> lastAlignedPosReads;
while (sam_itr_next(samfile[file_c], iter, rec) >= 0) {
if (rec->core.flag & (BAM_FSECONDARY | BAM_FQCFAIL | BAM_FDUP | BAM_FSUPPLEMENTARY | BAM_FUNMAP | BAM_FMUNMAP)) continue;
if (rec->core.qual < c.minGenoQual) continue;
// Count aligned basepair (small InDels)
{
uint32_t rp = 0; // reference pointer
uint32_t* cigar = bam_get_cigar(rec);
for (std::size_t i = 0; i < rec->core.n_cigar; ++i) {
if (bam_cigar_op(cigar[i]) == BAM_CMATCH) {
for(std::size_t k = 0; k<bam_cigar_oplen(cigar[i]);++k) {
if ((rec->core.pos + rp < hdr[file_c]->target_len[refIndex]) && (covBases[rec->core.pos + rp] < maxCoverage - 1)) ++covBases[rec->core.pos + rp];
++rp;
}
} else if (bam_cigar_op(cigar[i]) == BAM_CDEL) {
rp += bam_cigar_oplen(cigar[i]);
} else if (bam_cigar_op(cigar[i]) == BAM_CREF_SKIP) {
rp += bam_cigar_oplen(cigar[i]);
}
}
}
// Any (leading) soft clip
bool hasSoftClip = false;
bool hasClip = false;
int32_t leadingSC = 0;
uint32_t* cigar = bam_get_cigar(rec);
for (std::size_t i = 0; i < rec->core.n_cigar; ++i) {
if (bam_cigar_op(cigar[i]) == BAM_CSOFT_CLIP) {
hasClip = true;
hasSoftClip = true;
if (i == 0) leadingSC = bam_cigar_oplen(cigar[i]);
} else if (bam_cigar_op(cigar[i]) == BAM_CHARD_CLIP) hasClip = true;
}
// Check read length for junction annotation
if (rec->core.l_qseq >= (2 * c.minimumFlankSize)) {
bool bpvalid = false;
int32_t rbegin = std::max(0, (int32_t) rec->core.pos - leadingSC);
for(int32_t k = rbegin; ((k < (rec->core.pos + rec->core.l_qseq)) && (k < (int32_t) hdr[file_c]->target_len[refIndex])); ++k) {
if (bpOccupied[k]) {
bpvalid = true;
break;
}
}
if (bpvalid) {
// Fetch all relevant SVs
typename TBpRegion::iterator itBp = std::lower_bound(bpRegion[refIndex].begin(), bpRegion[refIndex].end(), BpRegion(rbegin), SortBp<BpRegion>());
for(; ((itBp != bpRegion[refIndex].end()) && (rec->core.pos + rec->core.l_qseq >= itBp->bppos)); ++itBp) {
if ((countMap[file_c][itBp->id].ref.size() + countMap[file_c][itBp->id].alt.size()) >= c.maxGenoReadCount) continue;
// Read spans breakpoint?
if ((hasSoftClip) || ((!hasClip) && (rec->core.pos + c.minimumFlankSize + itBp->homLeft <= itBp->bppos) && (rec->core.pos + rec->core.l_qseq >= itBp->bppos + c.minimumFlankSize + itBp->homRight))) {
std::string consProbe = consProbeArr[itBp->bpPoint][itBp->id];
std::string refProbe = refProbeArr[itBp->bpPoint][itBp->id];
// Get sequence
std::string sequence;
sequence.resize(rec->core.l_qseq);
uint8_t* seqptr = bam_get_seq(rec);
for (int i = 0; i < rec->core.l_qseq; ++i) sequence[i] = "=ACMGRSVTWYHKDBN"[bam_seqi(seqptr, i)];
_adjustOrientation(sequence, itBp->bpPoint, itBp->svt);
// Compute alignment to alternative haplotype
typedef boost::multi_array<char, 2> TAlign;
TAlign alignAlt;
DnaScore<int> simple(5, -4, -4, -4);
AlignConfig<true, false> semiglobal;
int32_t scoreA = needle(consProbe, sequence, alignAlt, semiglobal, simple);
int32_t scoreAltThreshold = (int32_t) (c.flankQuality * consProbe.size() * simple.match + (1.0 - c.flankQuality) * consProbe.size() * simple.mismatch);
double scoreAlt = (double) scoreA / (double) scoreAltThreshold;
// Compute alignment to reference haplotype
TAlign alignRef;
int32_t scoreR = needle(refProbe, sequence, alignRef, semiglobal, simple);
int32_t scoreRefThreshold = (int32_t) (c.flankQuality * refProbe.size() * simple.match + (1.0 - c.flankQuality) * refProbe.size() * simple.mismatch);
double scoreRef = (double) scoreR / (double) scoreRefThreshold;
// Any confident alignment?
if ((scoreRef > 1) || (scoreAlt > 1)) {
// Debug alignment to REF and ALT
//std::cerr << "Alt:\t" << scoreAlt << "\tRef:\t" << scoreRef << std::endl;
//for(TAIndex i = 0; i< (TAIndex) alignAlt.shape()[0]; ++i) {
//for(TAIndex j = 0; j< (TAIndex) alignAlt.shape()[1]; ++j) std::cerr << alignAlt[i][j];
//std::cerr << std::endl;
//}
//for(TAIndex i = 0; i< (TAIndex) alignRef.shape()[0]; ++i) {
//for(TAIndex j = 0; j< (TAIndex) alignRef.shape()[1]; ++j) std::cerr << alignRef[i][j];
//std::cerr << std::endl;
//}
if (scoreRef > scoreAlt) {
// Account for reference bias
if (++refAlignedReadCount[file_c][itBp->id] % 2) {
TQuality quality;
quality.resize(rec->core.l_qseq);
uint8_t* qualptr = bam_get_qual(rec);
for (int i = 0; i < rec->core.l_qseq; ++i) quality[i] = qualptr[i];
uint32_t rq = _getAlignmentQual(alignRef, quality);
if (rq >= c.minGenoQual) {
uint8_t* hpptr = bam_aux_get(rec, "HP");
#pragma omp critical
{
countMap[file_c][itBp->id].ref.push_back((uint8_t) std::min(rq, (uint32_t) rec->core.qual));
if (hpptr) {
c.isHaplotagged = true;
int hap = bam_aux2i(hpptr);
if (hap == 1) ++countMap[file_c][itBp->id].refh1;
else ++countMap[file_c][itBp->id].refh2;
}
}
}
}
} else {
TQuality quality;
quality.resize(rec->core.l_qseq);
uint8_t* qualptr = bam_get_qual(rec);
for (int i = 0; i < rec->core.l_qseq; ++i) quality[i] = qualptr[i];
uint32_t aq = _getAlignmentQual(alignAlt, quality);
if (aq >= c.minGenoQual) {
uint8_t* hpptr = bam_aux_get(rec, "HP");
#pragma omp critical
{
if (c.hasDumpFile) {
std::string svid(_addID(itBp->svt));
std::string padNumber = boost::lexical_cast<std::string>(itBp->id);
padNumber.insert(padNumber.begin(), 8 - padNumber.length(), '0');
svid += padNumber;
dumpOut << svid << "\t" << c.files[file_c].string() << "\t" << bam_get_qname(rec) << "\t" << hdr[file_c]->target_name[rec->core.tid] << "\t" << rec->core.pos << "\t" << hdr[file_c]->target_name[rec->core.mtid] << "\t" << rec->core.mpos << "\t" << (int32_t) rec->core.qual << "\tSR" << std::endl;
}
countMap[file_c][itBp->id].alt.push_back((uint8_t) std::min(aq, (uint32_t) rec->core.qual));
if (hpptr) {
c.isHaplotagged = true;
int hap = bam_aux2i(hpptr);
if (hap == 1) ++countMap[file_c][itBp->id].alth1;
else ++countMap[file_c][itBp->id].alth2;
}
}
}
}
}
}
}
}
}
// Read-count and spanning annotation
if ((!(rec->core.flag & BAM_FPAIRED)) || (!svOnChr[rec->core.mtid])) continue;
// Clean-up the read store for identical alignment positions
if (rec->core.pos > lastAlignedPos) {
lastAlignedPosReads.clear();
lastAlignedPos = rec->core.pos;
}
if (_firstPairObs(rec, lastAlignedPosReads)) {
// First read
lastAlignedPosReads.insert(hash_string(bam_get_qname(rec)));
std::size_t hv = hash_pair(rec);
if (rec->core.tid == rec->core.mtid) {
qualities[hv] = rec->core.qual;
clip[hv] = hasSoftClip;
} else {
qualitiestra[hv] = rec->core.qual;
cliptra[hv] = hasSoftClip;
}
} else {
// Second read
std::size_t hv = hash_pair_mate(rec);
uint8_t pairQuality = 0;
bool pairClip = false;
if (rec->core.tid == rec->core.mtid) {
if (qualities.find(hv) == qualities.end()) continue; // Mate discarded
pairQuality = std::min((uint8_t) qualities[hv], (uint8_t) rec->core.qual);
if ((clip[hv]) || (hasSoftClip)) pairClip = true;
qualities[hv] = 0;
clip[hv] = false;
} else {
if (qualitiestra.find(hv) == qualitiestra.end()) continue; // Mate discarded
pairQuality = std::min((uint8_t) qualitiestra[hv], (uint8_t) rec->core.qual);
if ((cliptra[hv]) || (hasSoftClip)) pairClip = true;
qualitiestra[hv] = 0;
cliptra[hv] = false;
}
// Pair quality
if (pairQuality < c.minGenoQual) continue; // Low quality pair
// Read-depth fragment counting
if (rec->core.tid == rec->core.mtid) {
// Count mid point (fragment counting)
int32_t midPoint = rec->core.pos + halfAlignmentLength(rec);
if ((midPoint < (int32_t) hdr[file_c]->target_len[refIndex]) && (covFragment[midPoint] < maxCoverage - 1)) ++covFragment[midPoint];
}
// Spanning counting
int32_t outerISize = 0;
if (rec->core.pos < rec->core.mpos) outerISize = rec->core.mpos + rec->core.l_qseq - rec->core.pos;
else outerISize = rec->core.pos + rec->core.l_qseq - rec->core.mpos;
// Get the library information
if (sampleLib[file_c].median == 0) continue; // Single-end library or non-valid library
// Normal spanning pair
if ((!pairClip) && (getSVType(rec) == 2) && (outerISize >= sampleLib[file_c].minNormalISize) && (outerISize <= sampleLib[file_c].maxNormalISize) && (rec->core.tid==rec->core.mtid)) {
// Take X% of the outerisize as the spanned interval
int32_t spanlen = 0.8 * outerISize;
int32_t pbegin = std::min((int32_t) rec->core.pos, (int32_t) rec->core.mpos);
int32_t st = pbegin + (outerISize - spanlen) / 2;
bool spanvalid = false;
for(int32_t i = st; ((i < (st + spanlen)) && (i < (int32_t) hdr[file_c]->target_len[refIndex])); ++i) {
if (spanBp[i]) {
spanvalid = true;
break;
}
}
if (spanvalid) {
// Fetch all relevant SVs
typename TSpanPoint::iterator itSpan = std::lower_bound(spanPoint.begin(), spanPoint.end(), SpanPoint(st), SortBp<SpanPoint>());
for(; ((itSpan != spanPoint.end()) && (st + spanlen >= itSpan->bppos)); ++itSpan) {
// Account for reference bias
if (++refAlignedSpanCount[file_c][itSpan->id] % 2) {
uint8_t* hpptr = bam_aux_get(rec, "HP");
#pragma omp critical
{
spanMap[file_c][itSpan->id].ref.push_back(pairQuality);
if (hpptr) {
c.isHaplotagged = true;
int hap = bam_aux2i(hpptr);
if (hap == 1) ++spanMap[file_c][itSpan->id].refh1;
else ++spanMap[file_c][itSpan->id].refh2;
}
}
}
}
}
}
// Abnormal spanning coverage
if ((getSVType(rec) != 2) || (outerISize < sampleLib[file_c].minNormalISize) || (outerISize > sampleLib[file_c].maxNormalISize) || (rec->core.tid!=rec->core.mtid)) {
// SV type
int32_t svt = _isizeMappingPos(rec, sampleLib[file_c].maxISizeCutoff);
if (svt == -1) continue;
// Spanning a breakpoint?
bool spanvalid = false;
int32_t pbegin = rec->core.pos;
int32_t pend = std::min((int32_t) rec->core.pos + sampleLib[file_c].maxNormalISize, (int32_t) hdr[file_c]->target_len[refIndex]);
if (rec->core.flag & BAM_FREVERSE) {
pbegin = std::max(0, (int32_t) rec->core.pos + rec->core.l_qseq - sampleLib[file_c].maxNormalISize);
pend = std::min((int32_t) rec->core.pos + rec->core.l_qseq, (int32_t) hdr[file_c]->target_len[refIndex]);
}
for(int32_t i = pbegin; i < pend; ++i) {
if (spanBp[i]) {
spanvalid = true;
break;
}
}
if (spanvalid) {
// Fetch all relevant SVs
typename TSpanPoint::iterator itSpan = std::lower_bound(spanPoint.begin(), spanPoint.end(), SpanPoint(pbegin), SortBp<SpanPoint>());
for(; ((itSpan != spanPoint.end()) && (pend >= itSpan->bppos)); ++itSpan) {
if (svt == itSpan->svt) {
// Make sure, mate is correct
if (rec->core.mtid == itSpan->chr2) {
if (std::abs((int32_t) rec->core.mpos - itSpan->otherBppos) < sampleLib[file_c].maxNormalISize) {
uint8_t* hpptr = bam_aux_get(rec, "HP");
#pragma omp critical
{
if (c.hasDumpFile) {
std::string svid(_addID(itSpan->svt));
std::string padNumber = boost::lexical_cast<std::string>(itSpan->id);
padNumber.insert(padNumber.begin(), 8 - padNumber.length(), '0');
svid += padNumber;
dumpOut << svid << "\t" << c.files[file_c].string() << "\t" << bam_get_qname(rec) << "\t" << hdr[file_c]->target_name[rec->core.tid] << "\t" << rec->core.pos << "\t" << hdr[file_c]->target_name[rec->core.mtid] << "\t" << rec->core.mpos << "\t" << (int32_t) rec->core.qual << "\tPE" << std::endl;
}
spanMap[file_c][itSpan->id].alt.push_back(pairQuality);
if (hpptr) {
c.isHaplotagged = true;
int hap = bam_aux2i(hpptr);
if (hap == 1) ++spanMap[file_c][itSpan->id].alth1;
else ++spanMap[file_c][itSpan->id].alth2;
}
}
}
}
}
}
}
}
}
}
// Clean-up
bam_destroy1(rec);
hts_itr_destroy(iter);
qualities.clear();
clip.clear();
// Assign fragment and base counts to SVs
for(uint32_t i = 0; i < svs.size(); ++i) {
if (svs[i].chr == refIndex) {
// Small or large SV
bool smallSV = false;
int32_t halfSize = (svs[i].svEnd - svs[i].svStart)/2;
if ((_translocation(svs[i].svt)) || (svs[i].svt == 4)) {
halfSize = 500;
smallSV = true;
} else {
if ((svs[i].svEnd - svs[i].svStart) <= c.indelsize) smallSV = true;
}
// Left region
int32_t lstart = std::max(svs[i].svStart - halfSize, 0);
int32_t lend = svs[i].svStart;
int32_t covbase = 0;
for(uint32_t k = lstart; ((k < (uint32_t) lend) && (k < hdr[0]->target_len[refIndex])); ++k) {
if (smallSV) covbase += covBases[k];
else covbase += covFragment[k];
}
covCount[file_c][svs[i].id].leftRC = covbase;
// Actual SV
covbase = 0;
int32_t mstart = svs[i].svStart;
int32_t mend = svs[i].svEnd;
if ((_translocation(svs[i].svt)) || (svs[i].svt == 4)) {
mstart = std::max(svs[i].svStart - halfSize, 0);
mend = std::min(svs[i].svStart + halfSize, (int32_t) hdr[0]->target_len[refIndex]);
}
for(uint32_t k = mstart; ((k < (uint32_t) mend) && (k < hdr[0]->target_len[refIndex])); ++k) {
if (smallSV) covbase += covBases[k];
else covbase += covFragment[k];
}
covCount[file_c][svs[i].id].rc = covbase;
// Right region
covbase = 0;
int32_t rstart = svs[i].svEnd;
int32_t rend = std::min(svs[i].svEnd + halfSize, (int32_t) hdr[0]->target_len[refIndex]);
if ((_translocation(svs[i].svt)) || (svs[i].svt == 4)) {
rstart = svs[i].svStart;
rend = std::min(svs[i].svStart + halfSize, (int32_t) hdr[0]->target_len[refIndex]);
}
for(uint32_t k = rstart; ((k < (uint32_t) rend) && (k < hdr[0]->target_len[refIndex])); ++k) {
if (smallSV) covbase += covBases[k];
else covbase += covFragment[k];
}
covCount[file_c][svs[i].id].rightRC = covbase;
}
}
}
}
// Clean-up
for(unsigned int file_c = 0; file_c < c.files.size(); ++file_c) {
bam_hdr_destroy(hdr[file_c]);
hts_idx_destroy(idx[file_c]);
sam_close(samfile[file_c]);
}
}
}
#endif
|
UniOP.h | #ifndef UNIOP_H_
#define UNIOP_H_
/*
* UniOP.h:
* a simple feed forward neural operation, unary input.
*
* Created on: Apr 22, 2017
* Author: mszhang
*/
#include "Param.h"
#include "MyLib.h"
#include "Node.h"
#include "Graph.h"
#include "ModelUpdate.h"
class UniParams {
public:
Param W;
Param b;
bool bUseB;
public:
UniParams() {
bUseB = true;
}
inline void exportAdaParams(ModelUpdate& ada) {
ada.addParam(&W);
if (bUseB) {
ada.addParam(&b);
}
}
inline void initial(int nOSize, int nISize, bool useB = true) {
W.initial(nOSize, nISize);
bUseB = useB;
if (bUseB) {
b.initial(nOSize, 1);
}
}
inline void save(std::ofstream &os) const {
os << bUseB << std::endl;
W.save(os);
if (bUseB) {
b.save(os);
}
}
inline void load(std::ifstream &is) {
is >> bUseB;
W.load(is);
if (bUseB) {
b.load(is);
}
}
};
// non-linear feed-forward node
// input nodes should be specified by forward function
// for input variables, we exploit column vector,
// which means a concrete input vector x_i is represented by x(0, i), x(1, i), ..., x(n, i)
class UniNode : public Node {
public:
PNode in;
UniParams* param;
dtype(*activate)(const dtype&); // activation function
dtype(*derivate)(const dtype&, const dtype&); // derivation function of activation function
Tensor1D ty, lty;
public:
UniNode() : Node() {
in = NULL;
activate = ftanh;
derivate = dtanh;
param = NULL;
node_type = "uni";
}
~UniNode() {
in = NULL;
}
inline void init(int ndim, dtype dropout) {
Node::init(ndim, dropout);
ty.init(ndim);
lty.init(ndim);
}
inline void setParam(UniParams* paramInit) {
param = paramInit;
}
inline void clearValue() {
Node::clearValue();
in = NULL;
ty = 0;
lty = 0;
}
// define the activate function and its derivation form
inline void setFunctions(dtype(*f)(const dtype&), dtype(*f_deri)(const dtype&, const dtype&)) {
activate = f;
derivate = f_deri;
}
public:
void forward(Graph *cg, PNode x) {
in = x;
degree = 0;
in->addParent(this);
cg->addNode(this);
}
public:
inline void compute() {
ty.mat() = param->W.val.mat() * in->val.mat();
if (param->bUseB) {
ty.vec() += param->b.val.vec();
}
val.vec() = ty.vec().unaryExpr(ptr_fun(activate));
}
inline void backward() {
lty.vec() = loss.vec() * ty.vec().binaryExpr(val.vec(), ptr_fun(derivate));
param->W.grad.mat() += lty.mat() * in->val.tmat();
if (param->bUseB) {
param->b.grad.vec() += lty.vec();
}
in->loss.mat() += param->W.val.mat().transpose() * lty.mat();
}
public:
inline PExecute generate(bool bTrain, dtype cur_drop_factor);
// better to rewrite for deep understanding
inline bool typeEqual(PNode other) {
bool result = Node::typeEqual(other);
if (!result) return false;
UniNode* conv_other = (UniNode*)other;
if (param != conv_other->param) {
return false;
}
if (activate != conv_other->activate || derivate != conv_other->derivate) {
return false;
}
return true;
}
};
// non-linear feed-forward node
// input nodes should be specified by forward function
// for input variables, we exploit column vector,
// which means a concrete input vector x_i is represented by x(0, i), x(1, i), ..., x(n, i)
class LinearUniNode : public Node {
public:
PNode in;
UniParams* param;
public:
LinearUniNode() : Node() {
in = NULL;
param = NULL;
node_type = "linear_uni";
}
inline void setParam(UniParams* paramInit) {
param = paramInit;
}
inline void clearValue() {
Node::clearValue();
in = NULL;
}
public:
void forward(Graph *cg, PNode x) {
in = x;
degree = 0;
in->addParent(this);
cg->addNode(this);
}
public:
inline void compute() {
val.mat() = param->W.val.mat() * in->val.mat();
if (param->bUseB) {
val.vec() += param->b.val.vec();
}
}
inline void backward() {
param->W.grad.mat() += loss.mat() * in->val.tmat();
if (param->bUseB) {
param->b.grad.vec() += loss.vec();
}
in->loss.mat() += param->W.val.mat().transpose() * loss.mat();
}
public:
inline PExecute generate(bool bTrain, dtype cur_drop_factor);
// better to rewrite for deep understanding
inline bool typeEqual(PNode other) {
bool result = Node::typeEqual(other);
if (!result) return false;
LinearUniNode* conv_other = (LinearUniNode*)other;
if (param != conv_other->param) {
return false;
}
return true;
}
};
// non-linear feed-forward node
// input nodes should be specified by forward function
// for input variables, we exploit column vector,
// which means a concrete input vector x_i is represented by x(0, i), x(1, i), ..., x(n, i)
class LinearNode : public Node {
public:
PNode in;
UniParams* param;
public:
LinearNode() : Node() {
in = NULL;
param = NULL;
node_type = "linear";
}
inline void setParam(UniParams* paramInit) {
param = paramInit;
}
inline void clearValue() {
Node::clearValue();
in = NULL;
}
public:
void forward(Graph *cg, PNode x) {
in = x;
degree = 0;
in->addParent(this);
cg->addNode(this);
}
public:
inline void compute() {
val.mat() = param->W.val.mat() * in->val.mat();
}
inline void backward() {
param->W.grad.mat() += loss.mat() * in->val.tmat();
in->loss.mat() += param->W.val.mat().transpose() * loss.mat();
}
public:
inline PExecute generate(bool bTrain, dtype cur_drop_factor);
// better to rewrite for deep understanding
inline bool typeEqual(PNode other) {
bool result = Node::typeEqual(other);
if (!result) return false;
LinearNode* conv_other = (LinearNode*)other;
if (param != conv_other->param) {
return false;
}
return true;
}
};
class UniExecute :public Execute {
public:
bool bTrain;
public:
inline void forward() {
int count = batch.size();
//#pragma omp parallel for
for (int idx = 0; idx < count; idx++) {
batch[idx]->compute();
batch[idx]->forward_drop(bTrain, drop_factor);
}
}
inline void backward() {
int count = batch.size();
//#pragma omp parallel for
for (int idx = 0; idx < count; idx++) {
batch[idx]->backward_drop();
batch[idx]->backward();
}
}
};
inline PExecute UniNode::generate(bool bTrain, dtype cur_drop_factor) {
UniExecute* exec = new UniExecute();
exec->batch.push_back(this);
exec->bTrain = bTrain;
exec->drop_factor = cur_drop_factor;
return exec;
};
class LinearUniExecute :public Execute {
public:
bool bTrain;
public:
inline void forward() {
int count = batch.size();
//#pragma omp parallel for
for (int idx = 0; idx < count; idx++) {
batch[idx]->compute();
batch[idx]->forward_drop(bTrain, drop_factor);
}
}
inline void backward() {
int count = batch.size();
//#pragma omp parallel for
for (int idx = 0; idx < count; idx++) {
batch[idx]->backward_drop();
batch[idx]->backward();
}
}
};
inline PExecute LinearUniNode::generate(bool bTrain, dtype cur_drop_factor) {
LinearUniExecute* exec = new LinearUniExecute();
exec->batch.push_back(this);
exec->bTrain = bTrain;
exec->drop_factor = cur_drop_factor;
return exec;
};
class LinearExecute :public Execute {
public:
bool bTrain;
public:
inline void forward() {
int count = batch.size();
//#pragma omp parallel for
for (int idx = 0; idx < count; idx++) {
batch[idx]->compute();
batch[idx]->forward_drop(bTrain, drop_factor);
}
}
inline void backward() {
int count = batch.size();
//#pragma omp parallel for
for (int idx = 0; idx < count; idx++) {
batch[idx]->backward_drop();
batch[idx]->backward();
}
}
};
inline PExecute LinearNode::generate(bool bTrain, dtype cur_drop_factor) {
LinearExecute* exec = new LinearExecute();
exec->batch.push_back(this);
exec->bTrain = bTrain;
exec->drop_factor = cur_drop_factor;
return exec;
};
#endif /* UNIOP_H_ */
|
common.c | /*******************************************************************************
Collective Matrix Factorization
-------------------------------
This is a module for multi-way factorization of sparse and dense matrices
intended to be used for recommender system with explicit feedback data plus
side information about users and/or items.
The reference papers are:
(a) Cortes, David.
"Cold-start recommendations in Collective Matrix Factorization."
arXiv preprint arXiv:1809.00366 (2018).
(b) Singh, Ajit P., and Geoffrey J. Gordon.
"Relational learning via collective matrix factorization."
Proceedings of the 14th ACM SIGKDD international conference on
Knowledge discovery and data mining. 2008.
(c) Hu, Yifan, Yehuda Koren, and Chris Volinsky.
"Collaborative filtering for implicit feedback datasets."
2008 Eighth IEEE International Conference on Data Mining.
Ieee, 2008.
(d) Takacs, Gabor, Istvan Pilaszy, and Domonkos Tikk.
"Applications of the conjugate gradient method for
implicit feedback collaborative filtering."
Proceedings of the fifth ACM conference on
Recommender systems. 2011.
(e) Rendle, Steffen, Li Zhang, and Yehuda Koren.
"On the difficulty of evaluating baselines:
A study on recommender systems."
arXiv preprint arXiv:1905.01395 (2019).
(f) Franc, Vojtech, Vaclav Hlavac, and Mirko Navara.
"Sequential coordinate-wise algorithm for the
non-negative least squares problem."
International Conference on Computer Analysis of Images
and Patterns. Springer, Berlin, Heidelberg, 2005.
(g) Zhou, Yunhong, et al.
"Large-scale parallel collaborative filtering for
the netflix prize."
International conference on algorithmic applications in management.
Springer, Berlin, Heidelberg, 2008.
For information about the models offered here and how they are fit to
the data, see the files 'collective.c' and 'offsets.c'.
Written for C99 standard and OpenMP version 2.0 or higher, and aimed to be
used either as a stand-alone program, or wrapped into scripting languages
such as Python and R.
<https://www.github.com/david-cortes/cmfrec>
MIT License:
Copyright (c) 2020-2021 David Cortes
All rights reserved.
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to
deal in the Software without restriction, including without limitation the
rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
sell copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
IN THE SOFTWARE.
*******************************************************************************/
#include "cmfrec.h"
/* Note: the descriptions about input parameters of the functions might be
outdated and might not match with the actual code anymore. */
/*******************************************************************************
Function and Gradient for cannonical form
-----------------------------------------
This function computes the gradients in the cannonical-form problem:
min 0.5 * scaling * ||M . W . (X - A*t(B) - bias1 - bias2 - mu)||^2
(Note that it will not add regularization into the formula)
See the files "collective.c" and "offsets.c" for details on what the
formula represents.
The gradients are given as:
E = scaling * (M . W . (A*t(B) - X))
grad(bias1) = sum_cols(E)
grad(bias2) = sum_rows(E)
grad(A) = E * B
grad(B) = t(E) * A
Since the formulas here ignore missing entries, when the matrices are
sparse, the gradients can be calculated more easily as follows:
grad(A) := 0
grad(B) := 0
For i..nnz:
err = <A[row], B[col]> - X[row,col]
grad(A[row]) += B[col] * err
grad(B[col]) += A[row] * err
In order to speed things up, this loop can be parallelized in two ways:
- Let each thread/worker have separate matrices grad(A), grad(B), to which
each adds its own portions based on the non-zero entries it is assigned,
then sum them up into a common matrix (this part can also be parallelized
by rows). This approach is the fastest, but letting each thread/worker
have a full copy of grad(A), grad(B) can require a lot of memory.
- Have copies of X in CSR and CSC formats, then iterate **twice** over it:
(a) first by rows (using the CSR matrix), letting each thread write into
a different row at a time (no race conditions).
(b) then by columns (using the CSC matrix), letting each thread write into
a different column at a time.
This approach is slower, as it requires iterating twice, thus computing
twice as many dot products, but it requires less memory as each thread
writes into the final matrices grad(A), grad(B).
This module implements both approaches, but it is suggested to use the
first one if memory constraints allow for it.
If passing a dense matrix X as input, need to pass a temporary buffer
of dimensions m * n regardless.
Note that the arrays for the gradients must be passed already initialized:
that is, they must be set to all-zeros to obtain the full gradient, or to
the already-obtained gradients from the main factorization if using it for
the collective model.
Parameters
----------
A[m * lda]
Array with the A parameters. Only the portion A(:m,:k) will be taken.
lda
Leading dimension of the array A (>= k).
B[n * ldb]
Array with the B parameters. Only the portion B(:n,:k) will be taken.
ldb
Leading dimension of the array B (>= k).
g_A[m * lda], g_B[n * ldb] (out)
Arrays to which to sum the computed gradients for A and B.
m
Number of rows in X.
n
Number of columns in X.
k
Dimensionality of the A and B matries (a.k.a. latent factors)
ixA[nnz], ixB[nnz], X[nnz], nnz
Matrix X in triplets format (row,col,x). Should only pass one of
'X', 'Xfull', 'Xcsr/Xcsc'. If 'Xfull' is passed, it will be preferred.
Pass NULL if 'X' will be passed in a different format.
Xfull[m * n]
Matrix X in dense form, with missing entries as NAN. Should only pass
one of 'X', 'Xfull', 'Xcsr/Xcsc'. If 'Xfull' is passed, it will be
preferred. Pass NULL if 'X' will be passed in a different format.
full_dense
Whether the X matrix contains no missing values.
Xcsr_p[m+1], Xcsr_i[nnz], Xcsr[nnz], Xcsc_p[n], Xcsc_i[nnz], Xcsc[nnz]
Matrix X in both CSR and CSC formats, in wich array 'p' indicates the
starting position of a row for CSR and column for CSC in the Xcsr array,
and array 'i' indicates the corresponding column for CSR and row for CSC
for that entry. Should only pass one of 'X', 'Xfull', 'Xcsr/Xcsc'. If
'Xfull' is passed, it will be preferred. Pass NULL if 'X' will be passed
in a different format.
user_bias, item_bias
Whether user/row and/or item/column biases are to be used. Ignored if
passing overwrite_grad' = 'false'
biasA[m], biasB[n]
Arrays containing the row/column biases. Pass NULL if not used.
g_biasA[m], g_biasB[n] (out)
Arrays in which to sum the gradient calculations for the biases.
weight[nnz or m*n], weightR[nnz], weightC[nnz]
Observation weights for X. Must match the shape of X - that is, either
'nnz' for sparse, or 'm*n' for dense. If passing CSR/CSC matrices for X,
must pass these in variables 'weightR' and 'weightC', otherwise, should
be passed in 'weight'. Pass NULL if not used.
scaling
Scaling to add to the objective function. Pass '1' for no scaling.
buffer_real_t[m*n]
If passing a dense matrix, temporary array which will be overwritten.
Not required for sparse matrices.
buffer_mt[nthreads * k * (m+n+1)]
If passing X and nthreads > 1, will be used as a temporary array into
which to copy thread-local values for the one-pass parallelization
strategy described at the top. Not required if passing Xcsr/Xcsc,
Xfull, or nthreads = 1.
overwrite_grad
Whether it is allowed to overwrite the gradient arrays. If passing
'false', will ignore the biases. If passing 'true', will assume that
all the gradient arrays are continuous in memory. The reasoning behind
is to allow for a more numerically precise calculation when there is
some scaling parameter.
nthreads
Number of parallel threads to use. Note that BLAS and LAPACK threads
are not controlled through this function.
Returns
-------
f : The evaluated function value.
*******************************************************************************/
real_t fun_grad_cannonical_form
(
real_t *restrict A, int_t lda, real_t *restrict B, int_t ldb,
real_t *restrict g_A, real_t *restrict g_B,
int_t m, int_t n, int_t k,
int_t ixA[], int_t ixB[], real_t *restrict X, size_t nnz,
real_t *restrict Xfull, bool full_dense,
size_t Xcsr_p[], int_t Xcsr_i[], real_t *restrict Xcsr,
size_t Xcsc_p[], int_t Xcsc_i[], real_t *restrict Xcsc,
bool user_bias, bool item_bias,
real_t *restrict biasA, real_t *restrict biasB,
real_t *restrict g_biasA, real_t *restrict g_biasB,
real_t *restrict weight, real_t *restrict weightR, real_t *restrict weightC,
real_t scaling,
real_t *restrict buffer_real_t,
real_t *restrict buffer_mt,
int nthreads
)
{
#if defined(_OPENMP) && \
( (_OPENMP < 200801) /* OpenMP < 3.0 */ \
|| defined(_WIN32) || defined(_WIN64) \
)
long long ix, ia, ib;
#else
size_t ia = 0, ib = 0;
#endif
double f = 0;
double corr = 0;
double tempf = 0;
double fsum = 0;
/* these will be de-referenced, but not updated */
if (!user_bias)
{
if (!item_bias)
g_biasA = g_A;
else if (g_A == g_biasB + n)
g_biasA = g_biasB;
else
g_biasA = g_A;
}
if (!item_bias)
{
if (user_bias && (g_A == g_biasA + m || g_biasA == g_A))
g_biasB = g_biasA;
else if (!user_bias &&
g_B == g_A + (size_t)m*(size_t)lda - (size_t)(lda-k))
g_biasB = g_A;
else
g_biasB = g_B;
}
real_t err;
size_t m_by_n = (size_t)m * (size_t)n;
bool parallel_onepass = (Xfull == NULL && nthreads > 1 &&
Xcsr == NULL && buffer_mt != NULL);
if (parallel_onepass)
set_to_zero_(buffer_mt, (size_t)nthreads
* ( (size_t)(k + (int)user_bias) * (size_t)m
+(size_t)(k + (int)item_bias) * (size_t)n),
nthreads);
if (Xfull == NULL) /* sparse input with NAs - this is the expected case */
{
/* This is the loop as explained at the top.
Note about the differentiation here: if using it for the main
factorization (which allows biases), it's not a problem to overwrite
the gradient matrices over their full leading dimension, and these
are expected to be all continuous in a memory layout. When there is
some scaling applied, it is more numerically precise to apply the
scaling after all the variables have already been summed, but if the
gradients already have some information from a previous factorization
to which these should add instead of overwrite them, it's necessary
to apply the scaling observation-by-observation instead */
#ifdef _OPENMP
if ( nthreads <= 1 ||
(Xcsr == NULL && !parallel_onepass) ||
(Xcsr != NULL && nthreads <= 2) )
#endif
{
for (size_t ix = 0; ix < nnz; ix++)
{
ia = (size_t)ixA[ix];
ib = (size_t)ixB[ix];
err = cblas_tdot(k, A + ia*(size_t)lda, 1,
B + ib*(size_t)ldb, 1)
- X[ix];
err += (user_bias? biasA[ia] : 0.)
+ (item_bias? biasB[ib] : 0.);
tempf = square(err)*((weight==NULL)? 1. : weight[ix])-corr;
fsum = f + tempf;
corr = (fsum - f) - tempf;
f = fsum;
err *= ((weight == NULL)? 1. : weight[ix]);
g_biasA[ia] += user_bias? err : 0.;
g_biasB[ib] += item_bias? err : 0.;
cblas_taxpy(k, err, B + ib*(size_t)ldb, 1,
g_A + ia*(size_t)lda, 1);
cblas_taxpy(k, err, A + ia*(size_t)lda, 1,
g_B + ib*(size_t)ldb, 1);
}
}
#ifdef _OPENMP
else if (parallel_onepass)
{
size_t thr_szA = (size_t)m*(size_t)k;
size_t thr_szB = (size_t)n*(size_t)k;
real_t *restrict g_A_t = buffer_mt;
real_t *restrict g_B_t = g_A_t + (size_t)nthreads*thr_szA;
real_t *restrict g_biasA_t = g_B_t + (size_t)nthreads*thr_szB;
real_t *restrict g_biasB_t = g_biasA_t
+ (user_bias?
((size_t)nthreads * (size_t)m)
: (0));
#pragma omp parallel for schedule(static) num_threads(nthreads)\
reduction(+:f) private(ia, ib, err) \
shared(ixA, ixB, X, nnz, A, B, lda, ldb, k, weight, \
scaling, thr_szA, thr_szB, g_biasA_t, g_biasB_t,\
biasA, biasB, user_bias, item_bias, m, n)
for (size_t_for ix = 0; ix < nnz; ix++)
{
ia = (size_t)ixA[ix];
ib = (size_t)ixB[ix];
err = cblas_tdot(k, A + ia*(size_t)lda, 1,
B + ib*(size_t)ldb, 1)
+ (user_bias? biasA[ia] : 0.)
+ (item_bias? biasB[ib] : 0.)
- X[ix];
f += square(err) * ((weight == NULL)? 1. : weight[ix]);
err *= ((weight == NULL)? 1. : weight[ix]);
g_biasA_t[ia + (size_t)m*(size_t)(omp_get_thread_num())]
+= user_bias? err : 0.;
g_biasB_t[ib + (size_t)n*(size_t)(omp_get_thread_num())]
+= item_bias? err : 0.;
cblas_taxpy(k, err, B + ib*(size_t)ldb, 1,
g_A_t + (size_t)(omp_get_thread_num())*thr_szA
+ ia*(size_t)lda, 1);
cblas_taxpy(k, err, A + ia*(size_t)lda, 1,
g_B_t + (size_t)(omp_get_thread_num())*thr_szB
+ ib*(size_t)ldb, 1);
}
reduce_mat_sum(g_A, lda, g_A_t,
m, k, nthreads);
reduce_mat_sum(g_B, ldb, g_B_t,
n, k, nthreads);
if (user_bias)
reduce_mat_sum(g_biasA, 1, g_biasA_t,
m, 1, nthreads);
if (item_bias)
reduce_mat_sum(g_biasB, 1, g_biasB_t,
n, 1, nthreads);
}
else
{
#pragma omp parallel for schedule(dynamic) reduction(+:f) \
num_threads(nthreads) private(err, ib, tempf) \
shared(m, k, A, B, lda, ldb, Xcsr, Xcsr_p, Xcsr_i, \
scaling, weightR, g_A, user_bias, item_bias, \
biasA, biasB, g_biasA)
for (ia = 0; ia < (size_t)m; ia++)
{
tempf = 0;
for (size_t ix = (size_t)Xcsr_p[ia];
ix < (size_t)Xcsr_p[ia+(size_t)1]; ix++)
{
ib = (size_t)Xcsr_i[ix];
err = cblas_tdot(k, A + ia*(size_t)lda, 1,
B + ib*(size_t)ldb, 1)
+ (user_bias? biasA[ia] : 0.)
+ (item_bias? biasB[ib] : 0.)
- Xcsr[ix];
tempf += square(err)
* ((weightR == NULL)? 1. : weightR[ix]);
err *= ((weightR == NULL)? 1. : weightR[ix]);
g_biasA[ia] += user_bias? err : 0.;
cblas_taxpy(k, err, B + ib*(size_t)ldb, 1,
g_A + ia*(size_t)lda, 1);
}
f += tempf;
}
#pragma omp parallel for schedule(dynamic) \
num_threads(nthreads) private(err, ia) \
shared(n, k, A, B, lda, ldb, Xcsc, Xcsc_p, Xcsc_i, \
scaling, weightC, g_B, user_bias, item_bias, \
biasA, biasB, g_biasB)
for (ib = 0; ib < (size_t)n; ib++)
{
for (size_t ix = (size_t)Xcsc_p[ib];
ix < (size_t)Xcsc_p[ib+(size_t)1]; ix++)
{
ia = (size_t)Xcsc_i[ix];
err = cblas_tdot(k, A + ia*(size_t)lda, 1,
B + ib*(size_t)ldb, 1)
+ (user_bias? biasA[ia] : 0.)
+ (item_bias? biasB[ib] : 0.)
- Xcsc[ix];
err *= ((weightC == NULL)? 1. : weightC[ix]);
g_biasB[ib] += item_bias? err : 0.;
cblas_taxpy(k, err, A + ia*(size_t)lda, 1,
g_B + ib*(size_t)ldb, 1);
}
}
}
#endif
#pragma omp barrier
if (scaling != 1.)
{
/* Note: the gradients should be contiguous in memory in the
following order: biasA, biasB, A, B - hence these conditions.
If passing discontiguous arrays (e.g. when the gradient is
a zeroed-out array and later summed to the previous
gradient), there should be no biases, and the biases
should already be assigned to the same memory locations as
the gradient arrays. Otherwise should pass
'overwrite_grad=false', which should not used within this
module */
if (g_B == g_biasA
+ (size_t)(user_bias? m : 0)
+ (size_t)(item_bias? n : 0)
+ ((size_t)m*(size_t)lda - (size_t)(lda-k)))
{
tscal_large(g_biasA, scaling,
((size_t)m*(size_t)lda + (size_t)n*(size_t)ldb)
+ (size_t)(user_bias? m : 0)
+ (size_t)(item_bias? n : 0)
- (size_t)(lda-k),
nthreads);
}
else if (!user_bias && !item_bias &&
g_B == g_A + (size_t)m*(size_t)lda- (size_t)(lda-k))
{
tscal_large(g_A, scaling,
((size_t)m*(size_t)lda + (size_t)n*(size_t)ldb),
nthreads);
}
else
{
if (user_bias)
cblas_tscal(m, scaling, g_biasA, 1);
if (item_bias)
cblas_tscal(n, scaling, g_biasB, 1);
tscal_large(g_A, scaling,
((size_t)m*(size_t)lda) - (size_t)(lda-k),
nthreads);
tscal_large(g_B, scaling,
((size_t)n*(size_t)ldb) - (size_t)(ldb-k),
nthreads);
}
}
}
else /* dense input - this is usually not optimal, but still supported */
{
/* Buffer = X */
copy_arr_(Xfull, buffer_real_t, m_by_n, nthreads);
/* Buffer = A*t(B) - Buffer */
cblas_tgemm(CblasRowMajor, CblasNoTrans, CblasTrans,
m, n, k,
1., A, lda, B, ldb,
-1., buffer_real_t, n);
/* Buffer += biasA[m,1] + biasB[1,n] */
if (user_bias)
mat_plus_rowvec(buffer_real_t, biasA, m, n, nthreads);
if (item_bias)
mat_plus_colvec(buffer_real_t, biasB, 1., m, n, (size_t)n,nthreads);
/* Buffer *= W (Now buffer becomes E without the scaling) */
if (full_dense) {
if (weight != NULL)
mult_elemwise(buffer_real_t, weight, m_by_n, nthreads);
} else {
if (weight == NULL)
nan_to_zero(buffer_real_t, Xfull, m_by_n, nthreads);
else
mult_if_non_nan(buffer_real_t, Xfull, weight, m_by_n, nthreads);
}
/* f = ||E||^2 */
if (weight == NULL)
f = sum_squares(buffer_real_t, m_by_n, nthreads);
else
f = sum_sq_div_w(buffer_real_t, weight, m_by_n, true, nthreads);
/* grad(bias1) = scaling * sum_rows(E) */
if (user_bias) {
sum_by_rows(buffer_real_t, g_biasA, m, n, nthreads);
if (scaling != 1)
cblas_tscal(m, scaling, g_biasA, 1);
}
/* grad(bias2) = scaling * sum_cols(E) */
if (item_bias) {
sum_by_cols(buffer_real_t, g_biasB, m, n, (size_t)n, nthreads);
if (scaling != 1)
cblas_tscal(n, scaling, g_biasB, 1);
}
/* grad(A) = scaling * E * B */
cblas_tgemm(CblasRowMajor, CblasNoTrans, CblasNoTrans,
m, k, n,
scaling, buffer_real_t, n, B, ldb,
0., g_A, lda);
/* grad(B) = scaling * t(E) * A */
cblas_tgemm(CblasRowMajor, CblasTrans, CblasNoTrans,
n, k, m,
scaling, buffer_real_t, n, A, lda,
0., g_B, ldb);
/* Note: don't apply the scaling earlier as otherwise it
loses precision, even if it manages to save some operations */
}
return (real_t)((scaling / 2.) * f);
}
/*******************************************************************************
Closed-form solution for cannonical form
----------------------------------------
This function uses the closed form to obtain the least-squares minimizer
for a single row of the A matrix:
min ||M . W . (X - A*t(B)) ||^2
The formula is given as follows:
Aopt[1,k] = inv(t(B'*W) * B' + diag(lambda)) * (t(B' * W) * Xa[1,n])
Where B'[n,k] is a matrix constructed from B which has zeros in every row
for which the corresponding entry for row 'a' in Xa[1,n] is missing.
Since the matrix whose inverse is required is symmetric positive-definite,
it's possible to solve this procedure quite efficiently with specialized
methods.
Note that this function can accomodate weights and regulatization, but not
biases. In order to determine the bias for the given row 'a' of X, the B
matrix should get a column of all-ones appended at the end. Doing this
also requires passing a matrix 'X' with the bias already-subtracted from
it when solving for B.
This function is not meant to exploit multi-threading, but it still calls
BLAS and LAPACK functions, which set their number of threads externally.
Parameters
----------
a_vec[k] (out)
The optimal optimal values for A[1,k].
k
The dimensionality of the factorization (a.k.a. latent factors).
B[n*k]
The B matrix with which A is multiplied to approximate X.
n
Number of rows in B, number of columns in X.
ldb
Leading dimension of matrix B (>= k).
Xa_dense[n]
Row of X in dense format. Missing values should appear as NAN.
Will be modified in-place if there are any missing entries. Pass NULL
if X is sparse.
full_dense
Whether the Xa_dense matrix has only non-missing entries.
Xa[nnz], ixB[nnz], nnz
Row of the X matrix in sparse format, with ixB denoting the indices
of the non-missing entries and Xa the values. Pass NULL if X is
given in dense format.
weight[nnz or n]
Observation weights for each non-missing entry in X. Must match the
shape of X passed - that is, if Xa_dense is passed, must have lenght
'n', if Xa is passed, must have length 'nnz'. Pass NULL if the weights
are uniform.
buffer_real_t[k^2 or k^2 + n*k]
Array in which to write temporary values. For sparse X and dense X
with full_dense=true, must have space for k^2 elements. For dense X
with full_dense=false, must have space for k^2 + n*k elements.
lam
Regularization parameter applied on the A matrix.
precomputedTransBtBinvBt
Precomputed matrix inv(t(B)*B + diag(lam))*t(B). Can only be used
if passing 'Xa_dense' + 'full_dense=true' + 'weight=NULL'. This is
used in order to speed up computations, but if not passed, will not be
used. When passed, there is no requirement for any buffer.
precomputedBtBw
Precomputed matrix t(B)*W*B + diag(lam). Will only be used if either:
(a) passing 'Xa_dense' + 'full_dense=false', and the proportion of
missing entries in 'Xa_dense' is <= 10% of the total; or
(b) passing 'Xa_dense=NULL' + 'NA_as_zero=true'.
This is only used to speed up computations, and if not passed, will
not be used, unless passing 'NA_as_zero=true'.
cnt_NA
Number of missing entries in 'Xa_dense'. Only used if passing
'precomputedBtBw' and 'full_dense=false'.
NA_as_zero
Whether to take missing entries from 'Xa' as zero, and assume that
there are no missing entries. This implies a different model in which
the squared error is computed over all values of X. If passing 'true',
must also pass 'precomputedBtBw'. This is ignored if passing 'Xa_dense'.
*******************************************************************************/
#ifdef AVOID_BLAS_SYR
#undef cblas_tsyr
#define cblas_tsyr(order, Uplo, N, alpha, X, incX, A, lda) \
custom_syr(N, alpha, X, A, lda)
#endif
void factors_closed_form
(
real_t *restrict a_vec, int_t k,
real_t *restrict B, int_t n, int_t ldb,
real_t *restrict Xa_dense, bool full_dense,
real_t *restrict Xa, int_t ixB[], size_t nnz,
real_t *restrict weight,
real_t *restrict buffer_real_t,
real_t lam, real_t lam_last,
real_t l1_lam, real_t l1_lam_last,
bool scale_lam, bool scale_bias_const, real_t wsum,
real_t *restrict precomputedTransBtBinvBt,
real_t *restrict precomputedBtB, int_t cnt_NA, int_t ld_BtB,
bool BtB_has_diag, bool BtB_is_scaled, real_t scale_BtB, int_t n_BtB,
real_t *restrict precomputedBtBchol, bool NA_as_zero,
bool use_cg, int_t max_cg_steps,/* <- 'cg' should not be used for new data*/
bool nonneg, int_t max_cd_steps,
real_t *restrict bias_BtX, real_t *restrict bias_X, real_t bias_X_glob,
real_t multiplier_bias_BtX,
bool force_add_diag
)
{
/* TODO: here should add a parameter 'incX' for dense vectors */
real_t *restrict bufferBtB = buffer_real_t;
if (ldb == 0) ldb = k;
bool add_diag = true;
char lo = 'L';
int_t one = 1;
int_t ignore;
if (n_BtB == 0) n_BtB = n;
bool prefer_BtB = (cnt_NA + (n_BtB-n) < 2*k) ||
(nnz > (size_t)(2*k));
if (precomputedBtB == NULL)
prefer_BtB = false;
/* Note: if passing 'NA_as_zero', 'n' and 'n_BtB' cannot be different */
/* Potential bad inputs */
if (( (Xa_dense != NULL && cnt_NA == n) ||
(Xa_dense == NULL && nnz == 0) )
&& !(NA_as_zero && bias_BtX != NULL))
{
zero_out:
set_to_zero(a_vec, k);
return;
}
if (scale_lam)
{
real_t multiplier_lam = 1.;
if (weight == NULL)
{
if (Xa_dense != NULL)
multiplier_lam = (real_t)(n - (full_dense? 0 : cnt_NA));
else if (NA_as_zero)
multiplier_lam = (real_t)n;
else
multiplier_lam = (real_t)nnz;
}
else
{
if (wsum <= 0)
{
wsum = 0.;
if (Xa_dense != NULL) {
for (int_t ix = 0; ix < n; ix++)
wsum += isnan(Xa_dense[ix])? 0. : weight[ix];
}
else {
for (size_t ix = 0; ix < nnz; ix++)
wsum += weight[ix];
}
if (NA_as_zero && Xa_dense == NULL)
wsum += (real_t)(n - (int_t)nnz);
}
if (fabs_t(wsum) < EPSILON_T && bias_BtX == NULL) goto zero_out;
multiplier_lam = wsum;
}
lam *= multiplier_lam;
l1_lam *= multiplier_lam;
if (!scale_bias_const) {
lam_last *= multiplier_lam;
l1_lam_last *= multiplier_lam;
}
}
if (nonneg) use_cg = false;
#ifdef TEST_CG
if (l1_lam || l1_lam_last)
use_cg = false;
#endif
/* If inv(t(B)*B + diag(lam))*B is already given, use it as a shortcut.
The intended use-case for this is for cold-start recommendations
for the collective model with no missing values, given that the
C matrix is already fixed and is the same for all users. */
if (precomputedTransBtBinvBt != NULL && weight == NULL && !nonneg &&
((full_dense && Xa_dense != NULL && n_BtB == n) ||
(Xa_dense == NULL && NA_as_zero && bias_BtX == NULL)) &&
(l1_lam == 0. && l1_lam_last == 0.))
{
if (Xa_dense != NULL)
cblas_tgemv(CblasRowMajor, CblasTrans,
n, k,
1., precomputedTransBtBinvBt, k,
Xa_dense, 1,
0., a_vec, 1);
else
{
set_to_zero(a_vec, k);
tgemv_dense_sp(
n, k, /* <- 'n' doesn't get used*/
1., precomputedTransBtBinvBt, k,
ixB, Xa, nnz,
a_vec
);
}
return;
}
/* If t(B*w)*B + diag(lam) is given, and there are very few mising
values, can still be used as a shortcut by substracting from it */
else if (Xa_dense != NULL && precomputedBtB != NULL && weight == NULL &&
prefer_BtB)
{
add_diag = false;
copy_mat(k, k,
precomputedBtB, ld_BtB,
bufferBtB, k);
if (BtB_is_scaled && scale_BtB != 1.)
cblas_tscal(square(k), 1./scale_BtB, bufferBtB, 1);
add_diag = !BtB_has_diag;
set_to_zero(a_vec, k);
for (size_t ix = 0; ix < (size_t)n; ix++)
{
if (isnan(Xa_dense[ix]))
cblas_tsyr(CblasRowMajor, CblasUpper, k,
-1., B + ix*(size_t)ldb, 1,
bufferBtB, k);
else
cblas_taxpy(k, Xa_dense[ix], B + ix*(size_t)ldb, 1, a_vec, 1);
}
for (size_t ix = (size_t)n; ix < (size_t)n_BtB; ix++)
cblas_tsyr(CblasRowMajor, CblasUpper, k,
-1., B + ix*(size_t)ldb, 1,
bufferBtB, k);
}
/* If the input is sparse and it's assumed that the non-present
entries are zero, with no missing values, or if it is full dense,
it's still possible to use the precomputed and pre-factorized matrix. */
else if ( ( (Xa_dense == NULL && NA_as_zero) ||
(Xa_dense != NULL && full_dense) ) &&
weight == NULL && !nonneg &&
precomputedBtBchol != NULL && l1_lam == 0. && l1_lam_last == 0.)
{
set_to_zero(a_vec, k);
if (Xa_dense == NULL) {
tgemv_dense_sp(n, k,
1., B, (size_t)ldb,
ixB, Xa, nnz,
a_vec);
if (bias_BtX != NULL)
cblas_taxpy(k, 1./multiplier_bias_BtX, bias_BtX, 1, a_vec, 1);
}
else {
cblas_tgemv(CblasRowMajor, CblasTrans,
n, k,
1., B, ldb,
Xa_dense, 1,
0., a_vec, 1);
}
tpotrs_(&lo, &k, &one,
precomputedBtBchol, &k,
a_vec, &k,
&ignore);
return;
}
/* In some cases, the sparse matrices might hold zeros instead
of NAs - here the already-factorized BtBchol should be passed,
but if for some reason it wasn't, will construct the usual
matrices on-the-fly.
If it has weights, could still use the precomputed matrix,
then adjust by substracting from it again. */
else if (Xa_dense == NULL && NA_as_zero && !use_cg)
{
set_to_zero(a_vec, k);
set_to_zero(bufferBtB, square(k));
if (weight != NULL)
{
for (size_t ix = 0; ix < nnz; ix++)
{
cblas_tsyr(CblasRowMajor, CblasUpper,
k, (weight[ix] - 1.),
B + (size_t)ixB[ix]*(size_t)ldb, 1,
bufferBtB, k);
cblas_taxpy(k,
(weight[ix] * Xa[ix])
-
(weight[ix]-1.)
*
(bias_X_glob + ((bias_X == NULL)?
0 : bias_X[ixB[ix]])),
B + (size_t)ixB[ix]*(size_t)ldb, 1,
a_vec, 1);
}
}
else {
tgemv_dense_sp(n, k,
1., B, (size_t)ldb,
ixB, Xa, nnz,
a_vec);
}
if (precomputedBtB == NULL)
cblas_tsyrk(CblasRowMajor, CblasUpper, CblasTrans,
k, max2(n, n_BtB),
1., B, ldb,
1., bufferBtB, k);
else {
if (BtB_is_scaled && scale_BtB != 1.)
cblas_taxpy(square(k), 1./scale_BtB, precomputedBtB, 1,
bufferBtB, 1);
else
sum_mat(k, k,
precomputedBtB, ld_BtB,
bufferBtB, k);
add_diag = !BtB_has_diag;
}
}
/* If none of the above apply, it's faster to get an approximate
solution using the conjugate gradient method.
In this case, will exit the function afterwards as it will
not need to calculate the Cholesky. */
/* Note: 'multiplier_bias_BtX' will only be encountered when solving for
'A' given 'U' alone in the prediction functions, thus it's not necessary
to include here. */
else if (use_cg)
{
if (Xa_dense != NULL)
factors_explicit_cg_dense(
a_vec, k,
B, n, ldb,
Xa_dense, cnt_NA,
weight,
precomputedBtB, ld_BtB,
buffer_real_t,
lam, lam_last,
max_cg_steps
);
else if (NA_as_zero && weight != NULL)
factors_explicit_cg_NA_as_zero_weighted(
a_vec, k,
B, n, ldb,
Xa, ixB, nnz,
weight,
precomputedBtB, ld_BtB,
bias_BtX, bias_X, bias_X_glob,
multiplier_bias_BtX,
buffer_real_t,
lam, lam_last,
max_cg_steps
);
else
factors_explicit_cg(
a_vec, k,
B, n, ldb,
Xa, ixB, nnz,
weight,
buffer_real_t,
lam, lam_last,
max_cg_steps
);
return;
}
/* In more general cases, need to construct the following matrices:
- t(B)*B + diag(lam), with only the rows of B which have a
non-missing entry in X.
- t(B)*t(X), with missing entries in X set to zero.
If X is dense, this can be accomplished by following the steps verbatim.
If X is sparse, it's more efficient to obtain the first one through
SR1 updates to an empty matrix, while the second one can be obtained
with a dense-sparse matrix multiplication */
else if (Xa_dense == NULL)
{
/* Sparse X - obtain t(B)*B through SR1 updates, avoiding a full
matrix-matrix multiplication. This is the expected scenario for
most use-cases. */
/* First obtain t(B)*t(X) from a dense_matrix-sparse_vector product,
avoid again a full matrix-vector multiply. Note that this is
stored in 'a_vec', despite the name */
set_to_zero(a_vec, k);
/* Note: in theory, should pass max2(n, n_BtB) to these functions,
but 'n' is not used so it doesn't matter. */
if (weight == NULL) {
tgemv_dense_sp(n, k,
1., B, (size_t)ldb,
ixB, Xa, nnz,
a_vec);
}
else {
tgemv_dense_sp_weighted(n, k,
weight, B, (size_t)ldb,
ixB, Xa, nnz,
a_vec);
}
/* Now t(B)*B */
set_to_zero(bufferBtB, square(k));
for (size_t ix = 0; ix < nnz; ix++)
cblas_tsyr(CblasRowMajor, CblasUpper, k,
(weight == NULL)? (1.) : (weight[ix]),
B + (size_t)ixB[ix]*(size_t)ldb, 1,
bufferBtB, k);
}
else
{
/* Dense X - in this case, re-construct t(B)*B without the missing
entries - at once if possible, or one-by-one if not.
Note that, if B0 is the B matrix with the rows set to zero
when the entries of X are missing, then the following
equalities will apply:
t(B0)*B == t(B)*B0 == t(B0)*B0 */
if (full_dense && weight == NULL)
{
/* this will only be encountered when calculating factors
after having called 'fit_*'. */
cblas_tsyrk(CblasRowMajor, CblasUpper, CblasTrans,
k, n,
1., B, ldb,
0., bufferBtB, k);
cblas_tgemv(CblasRowMajor, CblasTrans,
n, k,
1., B, ldb,
Xa_dense, 1,
0., a_vec, 1);
}
else
{
set_to_zero(a_vec, k);
set_to_zero(bufferBtB, square(k));
for (size_t ix = 0; ix < (size_t)n; ix++) {
if (!isnan(Xa_dense[ix])) {
cblas_tsyr(CblasRowMajor, CblasUpper,
k, (weight == NULL)? (1.) : (weight[ix]),
B + ix*(size_t)ldb, 1,
bufferBtB, k);
cblas_taxpy(k,
((weight == NULL)? (1.) : (weight[ix]))
* Xa_dense[ix],
B + ix*(size_t)ldb, 1,
a_vec, 1);
}
}
}
}
/* Finally, obtain closed-form through Cholesky factorization,
exploiting the fact that t(B)*B+diag(lam) is symmetric PSD */
if (add_diag || force_add_diag) {
add_to_diag(bufferBtB, lam, k);
if (lam_last != lam) bufferBtB[square(k)-1] += (lam_last - lam);
}
if (bias_BtX != NULL && NA_as_zero)
cblas_taxpy(k, 1./multiplier_bias_BtX, bias_BtX, 1, a_vec, 1);
if (!nonneg && l1_lam == 0. && l1_lam_last == 0.)
tposv_(&lo, &k, &one,
bufferBtB, &k,
a_vec, &k,
&ignore);
else if (!nonneg)
solve_elasticnet(
bufferBtB,
a_vec,
buffer_real_t + square(k),
k,
l1_lam, l1_lam_last,
max_cd_steps,
true
);
else
solve_nonneg(
bufferBtB,
a_vec,
buffer_real_t + square(k),
k,
l1_lam, l1_lam_last,
max_cd_steps,
true
);
/* Note: Function 'posv' is taken from LAPACK for FORTRAN.
If using LAPACKE for C with with Row-Major parameter,
some implementations will copy the matrix to transpose it
and pass it to FORTRAN-posv. */
}
/* https://en.wikipedia.org/wiki/Conjugate_gradient_method */
void factors_explicit_cg
(
real_t *restrict a_vec, int_t k,
real_t *restrict B, int_t n, int_t ldb,
real_t *restrict Xa, int_t ixB[], size_t nnz,
real_t *restrict weight,
real_t *restrict buffer_real_t,
real_t lam, real_t lam_last,
int_t max_cg_steps
)
{
real_t *restrict Ap = buffer_real_t;
real_t *restrict p = Ap + k;
real_t *restrict r = p + k;
set_to_zero(r, k);
real_t coef;
real_t a;
real_t r_old, r_new;
for (size_t ix = 0; ix < nnz; ix++) {
coef = cblas_tdot(k, B + (size_t)ixB[ix]*(size_t)ldb, 1, a_vec, 1);
coef -= Xa[ix];
coef *= (weight == NULL)? 1. : weight[ix];
cblas_taxpy(k, -coef, B + (size_t)ixB[ix]*ldb, 1, r, 1);
}
cblas_taxpy(k, -lam, a_vec, 1, r, 1);
if (lam != lam_last)
r[k-1] -= (lam_last-lam) * a_vec[k-1];
copy_arr(r, p, k);
r_old = cblas_tdot(k, r, 1, r, 1);
#ifdef TEST_CG
if (r_old <= 1e-15)
return;
#else
if (r_old <= 1e-12)
return;
#endif
for (int_t cg_step = 0; cg_step < max_cg_steps; cg_step++)
{
set_to_zero(Ap, k);
for (size_t ix = 0; ix < nnz; ix++) {
coef = cblas_tdot(k, B + (size_t)ixB[ix]*(size_t)ldb, 1, p, 1);
coef *= (weight == NULL)? 1. : weight[ix];
cblas_taxpy(k, coef, B + (size_t)ixB[ix]*ldb, 1, Ap, 1);
}
cblas_taxpy(k, lam, p, 1, Ap, 1);
if (lam != lam_last)
Ap[k-1] += (lam_last-lam) * p[k-1];
a = r_old / cblas_tdot(k, p, 1, Ap, 1);
cblas_taxpy(k, a, p, 1, a_vec, 1);
cblas_taxpy(k, -a, Ap, 1, r, 1);
r_new = cblas_tdot(k, r, 1, r, 1);
#ifdef TEST_CG
if (r_new <= 1e-15)
break;
#else
if (r_new <= 1e-8)
break;
#endif
cblas_tscal(k, r_new / r_old, p, 1);
cblas_taxpy(k, 1., r, 1, p, 1);
r_old = r_new;
}
}
void factors_explicit_cg_NA_as_zero_weighted
(
real_t *restrict a_vec, int_t k,
real_t *restrict B, int_t n, int_t ldb,
real_t *restrict Xa, int_t ixB[], size_t nnz,
real_t *restrict weight,
real_t *restrict precomputedBtB, int_t ld_BtB,
real_t *restrict bias_BtX, real_t *restrict bias_X, real_t bias_X_glob,
real_t multiplier_bias_BtX,
real_t *restrict buffer_real_t,
real_t lam, real_t lam_last,
int_t max_cg_steps
)
{
real_t *restrict Ap = buffer_real_t;
real_t *restrict p = Ap + k;
real_t *restrict r = p + k;
real_t *restrict wr = r + k; /* length is 'n' */
set_to_zero(r, k);
real_t a;
real_t r_old, r_new, coef;
bool prefer_BtB = k < n;
if (precomputedBtB == NULL)
prefer_BtB = false;
if (prefer_BtB)
{
cblas_tsymv(CblasRowMajor, CblasUpper, k,
-1., precomputedBtB, ld_BtB,
a_vec, 1,
0., r, 1);
for (size_t ix = 0; ix < nnz; ix++)
{
coef = cblas_tdot(k, B + (size_t)ixB[ix]*(size_t)ldb, 1, a_vec, 1);
cblas_taxpy(k,
-(weight[ix]-1.)
*
(coef + bias_X_glob + ((bias_X == NULL)?
0: bias_X[ixB[ix]]))
+
(weight[ix] * Xa[ix]),
B + (size_t)ixB[ix]*(size_t)ldb, 1,
r, 1);
}
}
else
{
cblas_tgemv(CblasRowMajor, CblasNoTrans,
n, k,
-1., B, ldb,
a_vec, 1,
0., wr, 1);
for (size_t ix = 0; ix < nnz; ix++)
wr[ixB[ix]] = weight[ix] * (wr[ixB[ix]] + Xa[ix]);
if (bias_X != NULL) {
for (size_t ix = 0; ix < nnz; ix++)
wr[ixB[ix]] -= (weight[ix] - 1.)
*
(bias_X[ixB[ix]] + bias_X_glob);
}
else if (bias_X_glob) {
for (size_t ix = 0; ix < nnz; ix++)
wr[ixB[ix]] -= (weight[ix] - 1.) * bias_X_glob;
}
cblas_tgemv(CblasRowMajor, CblasTrans,
n, k,
1., B, ldb,
wr, 1,
1., r, 1);
}
if (bias_BtX != NULL)
{
cblas_taxpy(k, 1./multiplier_bias_BtX, bias_BtX, 1, r, 1);
}
cblas_taxpy(k, -lam, a_vec, 1, r, 1);
if (lam != lam_last)
r[k-1] -= (lam_last-lam) * a_vec[k-1];
copy_arr(r, p, k);
r_old = cblas_tdot(k, r, 1, r, 1);
#ifdef TEST_CG
if (r_old <= 1e-15)
return;
#else
if (r_old <= 1e-12)
return;
#endif
for (int_t cg_step = 0; cg_step < max_cg_steps; cg_step++)
{
if (precomputedBtB != NULL && prefer_BtB)
{
cblas_tsymv(CblasRowMajor, CblasUpper, k,
1., precomputedBtB, ld_BtB,
p, 1,
0., Ap, 1);
for (size_t ix = 0; ix < nnz; ix++) {
coef = cblas_tdot(k,
B + (size_t)ixB[ix]*(size_t)ldb, 1,
p, 1);
cblas_taxpy(k, (weight[ix] - 1.) * coef,
B + (size_t)ixB[ix]*(size_t)ldb, 1,
Ap, 1);
}
}
else
{
cblas_tgemv(CblasRowMajor, CblasNoTrans,
n, k,
1., B, ldb,
p, 1,
0., wr, 1);
for (size_t ix = 0; ix < nnz; ix++)
wr[ixB[ix]] *= weight[ix];
cblas_tgemv(CblasRowMajor, CblasTrans,
n, k,
1., B, ldb,
wr, 1,
0., Ap, 1);
}
cblas_taxpy(k, lam, p, 1, Ap, 1);
if (lam != lam_last)
Ap[k-1] += (lam_last-lam) * p[k-1];
a = r_old / cblas_tdot(k, p, 1, Ap, 1);
cblas_taxpy(k, a, p, 1, a_vec, 1);
cblas_taxpy(k, -a, Ap, 1, r, 1);
r_new = cblas_tdot(k, r, 1, r, 1);
#ifdef TEST_CG
if (r_new <= 1e-15)
break;
#else
if (r_new <= 1e-8)
break;
#endif
cblas_tscal(k, r_new / r_old, p, 1);
cblas_taxpy(k, 1., r, 1, p, 1);
r_old = r_new;
}
}
void factors_explicit_cg_dense
(
real_t *restrict a_vec, int_t k,
real_t *restrict B, int_t n, int_t ldb,
real_t *restrict Xa_dense, int_t cnt_NA,
real_t *restrict weight,
real_t *restrict precomputedBtB,
int_t ld_BtB,
real_t *restrict buffer_real_t,
real_t lam, real_t lam_last,
int_t max_cg_steps
)
{
real_t *restrict Ap = buffer_real_t;
real_t *restrict p = Ap + k;
real_t *restrict r = p + k;
real_t r_new, r_old;
real_t a, coef, w_this;
bool prefer_BtB = cnt_NA < k && precomputedBtB != NULL && weight == NULL;
if (!prefer_BtB)
set_to_zero(r, k);
if (prefer_BtB)
{
cblas_tsymv(CblasRowMajor, CblasUpper, k,
-1., precomputedBtB, ld_BtB,
a_vec, 1,
0., r, 1);
for (size_t ix = 0; ix < (size_t)n; ix++)
{
if (isnan(Xa_dense[ix])) {
coef = cblas_tdot(k, B + ix*(size_t)ldb, 1, a_vec, 1);
cblas_taxpy(k, coef, B + ix*(size_t)ldb, 1, r, 1);
}
else {
cblas_taxpy(k, Xa_dense[ix], B + ix*(size_t)ldb, 1, r, 1);
}
}
}
else
{
for (size_t ix = 0; ix < (size_t)n; ix++)
{
if (!isnan(Xa_dense[ix]))
{
coef = cblas_tdot(k, B + ix*(size_t)ldb, 1, a_vec, 1);
cblas_taxpy(k,
(-coef + Xa_dense[ix])
*
((weight == NULL)? 1. : weight[ix]),
B + ix*(size_t)ldb, 1, r, 1);
}
}
}
cblas_taxpy(k, -lam, a_vec, 1, r, 1);
if (lam != lam_last)
r[k-1] -= (lam_last-lam) * a_vec[k-1];
copy_arr(r, p, k);
r_old = cblas_tdot(k, r, 1, r, 1);
#ifdef TEST_CG
if (r_old <= 1e-15)
return;
#else
if (r_old <= 1e-12)
return;
#endif
for (int_t cg_step = 0; cg_step < max_cg_steps; cg_step++)
{
if (prefer_BtB)
{
cblas_tsymv(CblasRowMajor, CblasUpper, k,
1., precomputedBtB, ld_BtB,
p, 1,
0., Ap, 1);
for (size_t ix = 0; ix < (size_t)n; ix++)
if (isnan(Xa_dense[ix])) {
coef = cblas_tdot(k, B + ix*(size_t)ldb, 1, p, 1);
cblas_taxpy(k, -coef, B + ix*(size_t)ldb, 1, Ap, 1);
}
}
else
{
set_to_zero(Ap, k);
for (size_t ix = 0; ix < (size_t)n; ix++)
{
if (!isnan(Xa_dense[ix]))
{
w_this = (weight == NULL)? 1. : weight[ix];
coef = cblas_tdot(k, B + ix*(size_t)ldb, 1, p, 1);
cblas_taxpy(k, w_this * coef, B + ix*(size_t)ldb, 1, Ap, 1);
}
}
}
cblas_taxpy(k, lam, p, 1, Ap, 1);
if (lam != lam_last)
Ap[k-1] += (lam_last-lam) * p[k-1];
a = r_old / cblas_tdot(k, p, 1, Ap, 1);
cblas_taxpy(k, a, p, 1, a_vec, 1);
cblas_taxpy(k, -a, Ap, 1, r, 1);
r_new = cblas_tdot(k, r, 1, r, 1);
#ifdef TEST_CG
if (r_new <= 1e-15)
break;
#else
if (r_new <= 1e-8)
break;
#endif
cblas_tscal(k, r_new / r_old, p, 1);
cblas_taxpy(k, 1., r, 1, p, 1);
r_old = r_new;
}
}
/* https://www.benfrederickson.com/fast-implicit-matrix-factorization/ */
void factors_implicit_cg
(
real_t *restrict a_vec, int_t k,
real_t *restrict B, size_t ldb,
real_t *restrict Xa, int_t ixB[], size_t nnz,
real_t lam,
real_t *restrict precomputedBtB, int_t ld_BtB,
int_t max_cg_steps,
real_t *restrict buffer_real_t
)
{
real_t *restrict Ap = buffer_real_t;
real_t *restrict r = Ap + k;
real_t *restrict p = r + k;
real_t coef;
real_t r_old, r_new;
real_t a;
cblas_tsymv(CblasRowMajor, CblasUpper, k,
-1., precomputedBtB, ld_BtB,
a_vec, 1,
0., r, 1);
for (size_t ix = 0; ix < nnz; ix++) {
coef = cblas_tdot(k, B + (size_t)ixB[ix]*ldb, 1, a_vec, 1);
cblas_taxpy(k,
-(coef - 1.) * Xa[ix] - coef,
B + (size_t)ixB[ix]*ldb, 1,
r, 1);
}
cblas_taxpy(k, -lam, a_vec, 1, r, 1);
copy_arr(r, p, k);
r_old = cblas_tdot(k, r, 1, r, 1);
#ifdef TEST_CG
if (r_old <= 1e-15)
return;
#else
if (r_old <= 1e-12)
return;
#endif
for (int_t cg_step = 0; cg_step < max_cg_steps; cg_step++)
{
cblas_tsymv(CblasRowMajor, CblasUpper, k,
1., precomputedBtB, ld_BtB,
p, 1,
0., Ap, 1);
for (size_t ix = 0; ix < nnz; ix++) {
coef = cblas_tdot(k, B + (size_t)ixB[ix]*ldb, 1, p, 1);
cblas_taxpy(k,
coef * (Xa[ix] - 1.) + coef,
B + (size_t)ixB[ix]*ldb, 1,
Ap, 1);
}
cblas_taxpy(k, lam, p, 1, Ap, 1);
a = r_old / cblas_tdot(k, Ap, 1, p, 1);
cblas_taxpy(k, a, p, 1, a_vec, 1);
cblas_taxpy(k, -a, Ap, 1, r, 1);
r_new = cblas_tdot(k, r, 1, r, 1);
#ifdef TEST_CG
if (r_new <= 1e-15)
break;
#else
if (r_new <= 1e-8)
break;
#endif
cblas_tscal(k, r_new / r_old, p, 1);
cblas_taxpy(k, 1., r, 1, p, 1);
r_old = r_new;
}
}
void factors_implicit_chol
(
real_t *restrict a_vec, int_t k,
real_t *restrict B, size_t ldb,
real_t *restrict Xa, int_t ixB[], size_t nnz,
real_t lam, real_t l1_lam,
real_t *restrict precomputedBtB, int_t ld_BtB,
bool nonneg, int_t max_cd_steps,
real_t *restrict buffer_real_t
)
{
char lo = 'L';
int_t one = 1;
int_t ignore;
if (nnz == 0) {
set_to_zero(a_vec, k);
return;
}
for (size_t ix = 0; ix < nnz; ix++) {
cblas_taxpy(k, Xa[ix] + 1.,
B + (size_t)ixB[ix]*ldb, 1, a_vec, 1);
}
real_t *restrict BtB = buffer_real_t;
buffer_real_t += square(k);
set_to_zero(BtB, square(k));
for (size_t ix = 0; ix < nnz; ix++) {
cblas_tsyr(CblasRowMajor, CblasUpper, k,
Xa[ix], B + (size_t)ixB[ix]*ldb, 1,
BtB, k);
}
sum_mat(k, k,
precomputedBtB, ld_BtB,
BtB, k);
if (!nonneg && !l1_lam)
tposv_(&lo, &k, &one,
BtB, &k,
a_vec, &k,
&ignore);
else if (!nonneg)
solve_elasticnet(
BtB,
a_vec,
buffer_real_t,
k,
l1_lam, l1_lam,
max_cd_steps,
false
);
else
solve_nonneg(
BtB,
a_vec,
buffer_real_t,
k,
l1_lam, l1_lam,
max_cd_steps,
true
);
}
/* TODO: this could benefit from a non-zero starting point, could copy this:
https://github.com/rexyai/rsparse/blob/
08609e6f3638e7bccd08bf54a3d0f6109951226b/inst/include/nnls.hpp#L38 */
void solve_nonneg
(
real_t *restrict BtB,
real_t *restrict BtX, /* <- solution will be here */
real_t *restrict buffer_real_t,
int_t k,
real_t l1_lam, real_t l1_lam_last,
size_t max_cd_steps,
bool fill_lower
)
{
real_t diff_iter = 0.;
real_t diff_val = 0.;
real_t newval = 0;
if (fill_lower)
fill_lower_triangle(BtB, k, k);
if (l1_lam != 0.)
{
for (int_t ix = 0; ix < k; ix++)
BtX[ix] -= l1_lam;
if (l1_lam_last != l1_lam)
BtX[k-1] -= (l1_lam_last - l1_lam);
}
real_t *restrict a_prev = buffer_real_t;
set_to_zero(a_prev, k);
if (max_cd_steps == 0) max_cd_steps = INT_MAX;
size_t incr = max_cd_steps > 0;
for (size_t iter = 0; iter < max_cd_steps; iter += incr)
{
diff_iter = 0;
for (int_t ix = 0; ix < k; ix++)
{
newval = a_prev[ix] + BtX[ix] / BtB[ix + ix*k];
newval = max2(newval, 0.);
diff_val = newval - a_prev[ix];
if (fabs_t(diff_val) > 1e-8) {
diff_iter += fabs_t(diff_val);
cblas_taxpy(k, -diff_val, BtB + ix*k, 1, BtX, 1);
a_prev[ix] = newval;
}
}
if (isnan(diff_iter) || !isfinite(diff_iter) || diff_iter < 1e-8)
break;
}
copy_arr(a_prev, BtX, k);
}
void solve_nonneg_batch
(
real_t *restrict BtB,
real_t *restrict BtX, /* <- solution will be here */
real_t *restrict buffer_real_t,
int_t m, int_t k, size_t lda,
real_t l1_lam, real_t l1_lam_last,
size_t max_cd_steps,
int nthreads
)
{
#if defined(_OPENMP) && \
( (_OPENMP < 200801) /* OpenMP < 3.0 */ \
|| defined(_WIN32) || defined(_WIN64) \
)
long long ix;
#endif
fill_lower_triangle(BtB, k, k);
#pragma omp parallel for schedule(dynamic) num_threads(nthreads) \
shared(BtB, BtX, buffer_real_t, m, lda, k, \
l1_lam, l1_lam_last, max_cd_steps)
for (size_t_for ix = 0; ix < (size_t)m; ix++)
{
for (size_t ii = 0; ii < (size_t)k; ii++) {
if (isnan(BtX[ii + ix*lda]))
continue;
}
solve_nonneg(
BtB,
BtX + ix*lda,
buffer_real_t + (size_t)k*(size_t)omp_get_thread_num(),
k,
l1_lam, l1_lam_last,
max_cd_steps,
false
);
}
}
void solve_elasticnet
(
real_t *restrict BtB,
real_t *restrict BtX, /* <- solution will be here */
real_t *restrict buffer_real_t,
int_t k,
real_t l1_lam, real_t l1_lam_last,
size_t max_cd_steps,
bool fill_lower
)
{
real_t diff_iter = 0.;
real_t diff_val = 0.;
real_t newval = 0;
if (fill_lower)
fill_lower_triangle(BtB, k, k);
real_t *restrict BtX_neg = buffer_real_t;
buffer_real_t += k;
real_t *restrict a_prev = buffer_real_t;
buffer_real_t += k;
real_t *restrict a_neg = buffer_real_t;
set_to_zero(a_prev, (size_t)2*(size_t)k);
for (int_t ix = 0; ix < k; ix++)
BtX_neg[ix] = -BtX[ix] - l1_lam;
for (int_t ix = 0; ix < k; ix++)
BtX[ix] -= l1_lam;
if (l1_lam != l1_lam_last) {
BtX[k-1] -= (l1_lam_last - l1_lam);
BtX_neg[k-1] -= (l1_lam_last - l1_lam);
}
if (max_cd_steps == 0) max_cd_steps = INT_MAX;
size_t incr = max_cd_steps > 0;
for (size_t iter = 0; iter < max_cd_steps; iter += incr)
{
diff_iter = 0;
for (int_t ix = 0; ix < k; ix++)
{
newval = a_prev[ix] + BtX[ix] / BtB[ix + ix*k];
newval = max2(newval, 0.);
diff_val = newval - a_prev[ix];
if (fabs_t(diff_val) > 1e-8) {
diff_iter += fabs_t(diff_val);
cblas_taxpy(k, diff_val, BtB + ix*k, 1, BtX_neg, 1);
cblas_taxpy(k, -diff_val, BtB + ix*k, 1, BtX, 1);
a_prev[ix] = newval;
}
}
for (int_t ix = 0; ix < k; ix++)
{
newval = a_neg[ix] + BtX_neg[ix] / BtB[ix + ix*k];
newval = max2(newval, 0.);
diff_val = newval - a_neg[ix];
if (fabs_t(diff_val) > 1e-8) {
diff_iter += fabs_t(diff_val);
cblas_taxpy(k, diff_val, BtB + ix*k, 1, BtX, 1);
cblas_taxpy(k, -diff_val, BtB + ix*k, 1, BtX_neg, 1);
a_neg[ix] = newval;
}
}
if (isnan(diff_iter) || !isfinite(diff_iter) || diff_iter < 1e-8)
break;
}
for (int_t ix = 0; ix < k; ix++)
BtX[ix] = a_prev[ix] - a_neg[ix];
}
void solve_elasticnet_batch
(
real_t *restrict BtB,
real_t *restrict BtX, /* <- solution will be here */
real_t *restrict buffer_real_t,
int_t m, int_t k, size_t lda,
real_t l1_lam, real_t l1_lam_last,
size_t max_cd_steps,
int nthreads
)
{
#if defined(_OPENMP) && \
( (_OPENMP < 200801) /* OpenMP < 3.0 */ \
|| defined(_WIN32) || defined(_WIN64) \
)
long long ix;
#endif
fill_lower_triangle(BtB, k, k);
#pragma omp parallel for schedule(dynamic) num_threads(nthreads) \
shared(BtB, BtX, buffer_real_t, m, lda, k, \
l1_lam, l1_lam_last, max_cd_steps)
for (size_t_for ix = 0; ix < (size_t)m; ix++)
{
for (size_t ii = 0; ii < (size_t)k; ii++) {
if (isnan(BtX[ii + ix*lda]))
continue;
}
solve_elasticnet(
BtB,
BtX + ix*lda,
buffer_real_t + (size_t)3*(size_t)k*(size_t)omp_get_thread_num(),
k,
l1_lam, l1_lam_last,
max_cd_steps,
false
);
}
}
/* TODO: these functions for Adense or Bdense are no longer used,
but should be used in 'factors_multiple' so do not delete yet. */
real_t fun_grad_Adense
(
real_t *restrict g_A,
real_t *restrict A, int_t lda,
real_t *restrict B, int_t ldb,
int_t m, int_t n, int_t k,
real_t *restrict Xfull, real_t *restrict weight,
real_t lam, real_t w, real_t lam_last,
bool do_B, bool reset_grad,
int nthreads,
real_t *restrict buffer_real_t
)
{
#if defined(_OPENMP) && \
( (_OPENMP < 200801) /* OpenMP < 3.0 */ \
|| defined(_WIN32) || defined(_WIN64) \
)
long long ix;
#endif
real_t *g_B = NULL;
if (do_B) g_B = g_A;
real_t f = 0.;
size_t m_by_n = (size_t)m * (size_t)n;
copy_arr_(Xfull, buffer_real_t, m_by_n, nthreads);
cblas_tgemm(CblasRowMajor, CblasNoTrans, CblasTrans,
m, n, k,
1., A, lda, B, ldb,
-1., buffer_real_t, n);
if (weight == NULL) {
nan_to_zero(buffer_real_t, Xfull, m_by_n, nthreads);
f = w * sum_squares(buffer_real_t, m_by_n, nthreads);
}
else {
/* TODO: make it compensated summation */
#pragma omp parallel for schedule(static) num_threads(nthreads) \
shared(buffer_real_t, m, n, weight) reduction(+:f)
for (size_t_for ix = 0; ix < m_by_n; ix++)
f += (!isnan(buffer_real_t[ix]))?
(square(buffer_real_t[ix]) * w*weight[ix]) : (0);
mult_if_non_nan(buffer_real_t, Xfull, weight, m_by_n, nthreads);
}
if (!do_B)
cblas_tgemm(CblasRowMajor, CblasNoTrans, CblasNoTrans,
m, k, n,
w, buffer_real_t, n, B, ldb,
reset_grad? 0. : 1., g_A, lda);
else
cblas_tgemm(CblasRowMajor, CblasTrans, CblasNoTrans,
n, k, m,
w, buffer_real_t, n, A, lda,
reset_grad? 0. : 1., g_B, ldb);
if (lam != 0)
{
if (!do_B)
add_lam_to_grad_and_fun(&f, g_A, A, m, k, lda, lam, nthreads);
else
add_lam_to_grad_and_fun(&f, g_B, B, n, k, ldb, lam, nthreads);
}
if (lam != 0. && lam_last != lam && k >= 1) {
if (!do_B) {
cblas_taxpy(m, lam_last-lam, A + k-1, lda, g_A + k-1, lda);
f += (lam_last-lam) * cblas_tdot(m, A + k-1, lda, A + k-1, lda);
}
else {
cblas_taxpy(n, lam_last-lam, B + k-1, ldb, g_B + k-1, ldb);
f += (lam_last-lam) * cblas_tdot(n, B + k-1, ldb, B + k-1, ldb);
}
}
return f / 2.;
}
void add_lam_to_grad_and_fun
(
real_t *restrict fun,
real_t *restrict grad,
real_t *restrict A,
int_t m, int_t k, int_t lda,
real_t lam, int nthreads
)
{
#if defined(_OPENMP) && \
( (_OPENMP < 200801) /* OpenMP < 3.0 */ \
|| defined(_WIN32) || defined(_WIN64) \
)
long long row;
#endif
if (lda == k)
{
taxpy_large(A, lam, grad, (size_t)m*(size_t)k, nthreads);
*fun += lam * sum_squares(A, (size_t)m*(size_t)k, nthreads);
}
else
{
#pragma omp parallel for schedule(static) num_threads(nthreads) \
shared(m, k, A, grad, lam, lda)
for (size_t_for row = 0; row < (size_t)m; row++)
for (size_t col = 0; col < (size_t)k; col++)
grad[col + row*lda] += lam * A[col + row*lda];
real_t reg = 0;
#pragma omp parallel for schedule(static) num_threads(nthreads) \
shared(m, k, A, lda) reduction(+:reg)
for (size_t_for row = 0; row < (size_t)m; row++)
for (size_t col = 0; col < (size_t)k; col++)
reg += square(A[col + row*lda]);
*fun += lam * reg;
}
}
real_t wrapper_fun_grad_Adense
(
void *instance,
real_t *x,
real_t *g,
const size_t n,
const real_t step
)
{
data_fun_grad_Adense *data = (data_fun_grad_Adense*)instance;
return fun_grad_Adense(
g,
x, data->lda,
data->B, data->ldb,
data->m, data->n, data->k,
data->Xfull, data->weight,
data->lam, data->w, data->lam_last,
false, true,
data->nthreads,
data->buffer_real_t
);
}
real_t wrapper_fun_grad_Bdense
(
void *instance,
real_t *x,
real_t *g,
const size_t n,
const real_t step
)
{
data_fun_grad_Bdense *data = (data_fun_grad_Bdense*)instance;
return fun_grad_Adense(
g,
data->A, data->lda,
x, data->ldb,
data->m, data->n, data->k,
data->Xfull, data->weight,
data->lam, data->w, data->lam_last,
true, true,
data->nthreads,
data->buffer_real_t
);
}
size_t buffer_size_optimizeA
(
size_t n, bool full_dense, bool near_dense, bool some_full, bool do_B,
bool has_dense, bool has_weights, bool NA_as_zero,
bool nonneg, bool has_l1,
size_t k, size_t nthreads,
bool has_bias_static,
bool pass_allocated_BtB, bool keep_precomputedBtB,
bool use_cg, bool finalize_chol
)
{
if (finalize_chol && use_cg)
{
return max2(
buffer_size_optimizeA(
n, full_dense, near_dense, some_full, do_B,
has_dense, has_weights, NA_as_zero,
nonneg, has_l1,
k, nthreads,
has_bias_static,
pass_allocated_BtB, keep_precomputedBtB,
true, false
),
buffer_size_optimizeA(
n, full_dense, near_dense, some_full, do_B,
has_dense, has_weights, NA_as_zero,
nonneg, has_l1,
k, nthreads,
has_bias_static,
pass_allocated_BtB, keep_precomputedBtB,
false, false
)
);
}
if (!has_dense)
do_B = false;
if (has_l1 || nonneg)
use_cg = false;
size_t buffer_size = 0;
bool assigned_to_BtB_copy = false;
/* case 1 */
if (has_dense && (full_dense || near_dense) && !has_weights)
{
if (near_dense)
{
if (pass_allocated_BtB && !keep_precomputedBtB)
{
assigned_to_BtB_copy = true;
buffer_size += 0;
}
else {
buffer_size += square(k);
}
}
if (pass_allocated_BtB && !keep_precomputedBtB && !near_dense &&
!assigned_to_BtB_copy)
buffer_size += 0;
else {
buffer_size += square(k);
}
if (do_B)
buffer_size += n*nthreads;
if (!full_dense)
{
size_t size_thread_buffer = square(k);
if (use_cg)
size_thread_buffer = max2(size_thread_buffer, (3 * k));
if (nonneg)
size_thread_buffer += k;
else if (has_l1)
size_thread_buffer += 3*k;
buffer_size += nthreads * size_thread_buffer;
}
else {
if (nonneg)
buffer_size += k*nthreads;
else if (has_l1)
buffer_size += (size_t)3*k*nthreads;
}
return buffer_size;
}
/* case 2 */
else if (has_dense)
{
if (!has_weights) {
if (pass_allocated_BtB && !keep_precomputedBtB)
buffer_size += 0;
else {
buffer_size += square(k);
}
}
if (do_B) {
buffer_size += n * nthreads;
}
if (do_B && has_weights) {
buffer_size += n * nthreads;
}
if (!has_weights) {
if (some_full && !nonneg && !has_l1)
buffer_size += square(k);
}
size_t size_thread_buffer = square(k) + (use_cg? (3*k) : 0);
if (nonneg)
size_thread_buffer += k;
else if (has_l1)
size_thread_buffer += (size_t)3*k;
return buffer_size + nthreads * size_thread_buffer;
}
/* case 3 */
else if (!has_dense && NA_as_zero && !has_weights)
{
if (pass_allocated_BtB && !keep_precomputedBtB)
buffer_size += 0;
else
buffer_size += square(k);
if (nonneg)
buffer_size += k*nthreads;
else if (has_l1)
buffer_size += (size_t)3*k*nthreads;
if (has_bias_static)
buffer_size += k;
return buffer_size;
}
/* case 4 */
else
{
bool add_diag_to_BtB = !(use_cg && !has_dense && NA_as_zero);
if (!has_dense && NA_as_zero && (!use_cg || has_weights))
{
if (pass_allocated_BtB &&
(!keep_precomputedBtB || !add_diag_to_BtB))
{
buffer_size += 0;
}
else
{
buffer_size += square(k);
}
}
size_t size_thread_buffer = square(k);
if (use_cg)
size_thread_buffer = max2(size_thread_buffer, (3 * k));
if (use_cg && !has_dense)
size_thread_buffer = (3 * k) + ((NA_as_zero && k >= n)? n : 0);
if (nonneg)
size_thread_buffer += k;
else if (has_l1)
size_thread_buffer += (size_t)3*k;
return buffer_size + nthreads * size_thread_buffer;
}
}
size_t buffer_size_optimizeA_implicit
(
size_t k, size_t nthreads,
bool pass_allocated_BtB,
bool nonneg, bool has_l1,
bool use_cg, bool finalize_chol
)
{
if (finalize_chol && use_cg)
{
return max2(
buffer_size_optimizeA_implicit(
k, nthreads,
pass_allocated_BtB,
nonneg, has_l1,
false, false
),
buffer_size_optimizeA_implicit(
k, nthreads,
pass_allocated_BtB,
nonneg, has_l1,
true, false
)
);
}
size_t size_thread_buffer = use_cg? (3 * k) : (square(k));
if (nonneg)
size_thread_buffer += k;
else if (has_l1)
size_thread_buffer += (size_t)3*k;
return (pass_allocated_BtB? (size_t)0 : square(k))
+ nthreads * size_thread_buffer;
}
void optimizeA
(
real_t *restrict A, int_t lda,
real_t *restrict B, int_t ldb,
int_t m, int_t n, int_t k,
size_t Xcsr_p[], int_t Xcsr_i[], real_t *restrict Xcsr,
real_t *restrict Xfull, int_t ldX,
bool full_dense, bool near_dense, bool some_full,
int_t cnt_NA[], real_t *restrict weight, bool NA_as_zero,
real_t lam, real_t lam_last,
real_t l1_lam, real_t l1_lam_last,
bool scale_lam, bool scale_bias_const, real_t *restrict wsumA,
bool do_B, int nthreads,
bool use_cg, int_t max_cg_steps,
bool nonneg, int_t max_cd_steps,
real_t *restrict bias_restore,
real_t *restrict bias_BtX, real_t *restrict bias_X, real_t bias_X_glob,
real_t *restrict bias_static, real_t multiplier_bias_BtX,
bool keep_precomputedBtB,
real_t *restrict precomputedBtB, bool *filled_BtB,
real_t *restrict buffer_real_t
)
{
/* Note: the BtB produced here has diagonal, the one from
collective doesn't. */
/* TODO: handle case of all-missing when the values are not reset to zero */
#if defined(_OPENMP) && \
( (_OPENMP < 200801) /* OpenMP < 3.0 */ \
|| defined(_WIN32) || defined(_WIN64) \
)
long long ix;
#endif
char uplo = 'L';
int_t ignore;
*filled_BtB = false;
if (Xfull == NULL) do_B = false;
if (l1_lam || l1_lam_last || nonneg) use_cg = false;
/* TODO: in many cases here, it's possible to shrink the buffer
size for the threads when using 'use_cg'. */
/* Case 1: X is full dense with few or no missing values.
Here can apply the closed-form solution with only
one multiplication by B for all rows at once.
If there is a small amount of observations with missing
values, can do a post-hoc pass over them to obtain their
solutions individually. */
if (Xfull != NULL && (full_dense || near_dense) && weight == NULL)
{
real_t *restrict bufferBtBcopy = NULL;
if (near_dense)
{
if (precomputedBtB != NULL && !keep_precomputedBtB)
bufferBtBcopy = precomputedBtB;
else {
bufferBtBcopy = buffer_real_t;
buffer_real_t += square(k);
}
}
real_t *restrict bufferBtB = NULL;
if (precomputedBtB != NULL && !keep_precomputedBtB && !near_dense &&
bufferBtBcopy != precomputedBtB)
{
bufferBtB = precomputedBtB;
}
else {
bufferBtB = buffer_real_t;
buffer_real_t += square(k);
}
/* TODO: this function should do away with 'bufferX', replace it instead
with an 'incX' parameter. */
real_t *restrict bufferX = buffer_real_t;
if (do_B)
buffer_real_t += (size_t)n*(size_t)nthreads;
/* t(B)*B + diag(lam) */
cblas_tsyrk(CblasRowMajor, CblasUpper, CblasTrans,
k, n,
1., B, ldb,
0., bufferBtB, k);
if (precomputedBtB != NULL && keep_precomputedBtB) {
copy_arr(bufferBtB, precomputedBtB, square(k));
*filled_BtB = true;
}
add_to_diag(bufferBtB, scale_lam? (lam*(real_t)n) : (lam), k);
if (lam_last != lam)
{
if (!scale_lam)
bufferBtB[square(k)-1] += (lam_last - lam);
else
bufferBtB[square(k)-1] += (lam_last - lam) * (real_t)n;
}
/* Here will also need t(B)*B + diag(lambda) alone (no Cholesky) */
if (bufferBtBcopy != NULL)
memcpy(bufferBtBcopy, bufferBtB, (size_t)square(k)*sizeof(real_t));
/* t(B)*t(X)
Note: this will be passed to LAPACK function which assumes
column-major order, thus must pass the transpose.
Note2: if passing 'do_B=true', the inputs will all be
swapped, so what here says 'A' will be 'B', what says
'm' will be 'n', and so on. But the matrix 'X' remains
the same, so the inputs need to be transposed for B. */
if (!do_B)
cblas_tgemm(CblasRowMajor, CblasNoTrans, CblasNoTrans,
m, k, n,
1., Xfull, n, B, ldb,
0., A, lda);
else
cblas_tgemm(CblasRowMajor, CblasTrans, CblasNoTrans,
m, k, n,
1., Xfull, ldX, B, ldb,
0., A, lda);
#ifdef FORCE_NO_NAN_PROPAGATION
if (!full_dense && !nonneg && !l1_lam && !l1_lam_last)
#pragma omp parallel for schedule(static) \
num_threads(min2(4, nthreads)) \
shared(A, m, lda)
for (size_t_for ix = 0;
ix < (size_t)m*(size_t)lda - (size_t)(lda-k);
ix++)
A[ix] = isnan(A[ix])? 0. : A[ix];
#endif
/* A = t( inv(t(B)*B + diag(lam)) * t(B)*t(X) )
Note: don't try to flip the equation as the 'posv'
function assumes only the LHS is symmetric. */
if (!nonneg && !l1_lam && !l1_lam_last)
tposv_(&uplo, &k, &m,
bufferBtB, &k,
A, &lda,
&ignore);
else if (!nonneg)
solve_elasticnet_batch(
bufferBtB,
A,
buffer_real_t,
m, k, lda,
scale_lam? (l1_lam*n) : (l1_lam),
scale_lam? (l1_lam_last*n) : (l1_lam_last),
max_cd_steps,
nthreads
);
else
solve_nonneg_batch(
bufferBtB,
A,
buffer_real_t,
m, k, lda,
scale_lam? (l1_lam*n) : (l1_lam),
scale_lam? (l1_lam_last*n) : (l1_lam_last),
max_cd_steps,
nthreads
);
/* If there are some few rows with missing values, now do a
post-hoc pass over them only */
if (!full_dense)
{
size_t size_buffer = square(k);
if (use_cg)
size_buffer = max2(size_buffer, (size_t)(3 * k));
if (nonneg)
size_buffer += k;
else if (l1_lam || l1_lam_last)
size_buffer += (size_t)3*(size_t)k;
int nthreads_restore = 1;
set_blas_threads(1, &nthreads_restore);
#pragma omp parallel for schedule(dynamic) num_threads(nthreads) \
shared(A, lda, B, ldb, ldX, m, n, k, \
scale_lam, scale_bias_const, \
lam, lam_last, l1_lam, l1_lam_last, weight,\
cnt_NA, Xfull, buffer_real_t, bufferBtBcopy, \
nthreads, use_cg, nonneg, max_cd_steps) \
firstprivate(bufferX)
for (size_t_for ix = 0; ix < (size_t)m; ix++)
if (cnt_NA[ix])
{
if (cnt_NA[ix] == n) {
set_to_zero(A + ix*(size_t)lda, k);
continue;
}
if (!do_B)
bufferX = Xfull + ix*(size_t)n;
else
cblas_tcopy(n, Xfull + ix, ldX,
bufferX
+ ((size_t)n*(size_t)omp_get_thread_num()), 1);
if (use_cg)
{
set_to_zero(A + ix*(size_t)lda, k);
if (bias_restore != NULL)
A[ix*(size_t)lda + (size_t)(k-1)] =bias_restore[ix];
}
factors_closed_form(
A + ix*(size_t)lda, k,
B, n, ldb,
bufferX + (do_B?
((size_t)n*(size_t)omp_get_thread_num())
:
((size_t)0)), false,
(real_t*)NULL, (int_t*)NULL, (size_t)0,
(real_t*)NULL,
buffer_real_t
+ size_buffer * (size_t)omp_get_thread_num(),
lam, lam_last,
l1_lam, l1_lam_last,
scale_lam, scale_bias_const, 0.,
(real_t*)NULL,
bufferBtBcopy, cnt_NA[ix], k,
true, false, 1., n,
(real_t*)NULL, false,
use_cg, k, /* <- A was reset to zero, need more steps */
nonneg, max_cd_steps,
(real_t*)NULL, (real_t*)NULL, 0., 1.,
false
);
}
set_blas_threads(nthreads_restore, (int*)NULL);
}
}
/* Case 2: X is dense, but has many missing values or has weights.
Here will do them all individually, pre-calculating only
t(B)*B + diag(lam)
in case some rows have few missing values. */
else if (Xfull != NULL)
{
real_t *restrict bufferBtB = NULL;
if (weight == NULL) {
if (precomputedBtB != NULL && !keep_precomputedBtB)
bufferBtB = precomputedBtB;
else {
bufferBtB = buffer_real_t;
buffer_real_t += square(k);
}
}
real_t *restrict bufferX = NULL;
if (do_B) {
bufferX = buffer_real_t;
buffer_real_t += (size_t)n * (size_t)nthreads;
}
real_t *restrict bufferW = NULL;
if (do_B && weight != NULL) {
bufferW = buffer_real_t;
buffer_real_t += (size_t)n * (size_t)nthreads;
}
real_t *restrict bufferBtBchol = NULL;
if (bufferBtB != NULL)
{
cblas_tsyrk(CblasRowMajor, CblasUpper, CblasTrans,
k, n,
1., B, ldb,
0., bufferBtB, k);
if (keep_precomputedBtB && precomputedBtB != NULL)
{
copy_arr(bufferBtB, precomputedBtB, square(k));
*filled_BtB = true;
}
add_to_diag(bufferBtB, scale_lam? (lam*(real_t)n) : (lam), k);
if (lam_last != lam)
{
if (!scale_lam)
bufferBtB[square(k)-1] += (lam_last - lam);
else
bufferBtB[square(k)-1] += (lam_last-lam)*(real_t)n;
}
if (some_full && !nonneg && !l1_lam && !l1_lam_last)
{
bufferBtBchol = buffer_real_t;
buffer_real_t += square(k);
copy_arr(bufferBtB, bufferBtBchol, square(k));
tpotrf_(&uplo, &k, bufferBtBchol, &k, &ignore);
}
}
real_t *restrict buffer_remainder = buffer_real_t;
int nthreads_restore = 1;
set_blas_threads(1, &nthreads_restore);
size_t size_buffer = (size_t)square(k) + (size_t)(use_cg? (3*k) : 0);
#pragma omp parallel for schedule(dynamic) num_threads(nthreads) \
shared(Xfull, weight, do_B, m, n, k, A, lda, B, ldb, ldX, \
lam, lam_last, l1_lam, l1_lam_last, \
scale_lam, scale_bias_const, wsumA, \
bufferBtB, cnt_NA, buffer_remainder, \
use_cg, max_cg_steps, nonneg, max_cd_steps, \
bufferBtBchol) \
firstprivate(bufferX, bufferW)
for (size_t_for ix = 0; ix < (size_t)m; ix++)
{
if (!do_B) {
bufferX = Xfull + ix*(size_t)n;
if (weight != NULL)
bufferW = weight + ix*(size_t)n;
}
else {
cblas_tcopy(n, Xfull + ix, ldX,
bufferX
+ ((size_t)n*(size_t)omp_get_thread_num()), 1);
if (weight != NULL)
cblas_tcopy(n, weight + ix, ldX,
bufferW
+ ((size_t)n*(size_t)omp_get_thread_num()), 1);
}
factors_closed_form(
A + ix*(size_t)lda, k,
B, n, ldb,
bufferX + (do_B? ((size_t)n*(size_t)omp_get_thread_num())
: ((size_t)0)),
cnt_NA[ix] == 0,
(real_t*)NULL, (int_t*)NULL, (size_t)0,
(weight != NULL)?
(bufferW + (do_B? ((size_t)n*(size_t)omp_get_thread_num())
: ((size_t)0)))
:
((real_t*)NULL),
buffer_remainder + size_buffer*(size_t)omp_get_thread_num(),
lam, lam_last,
l1_lam, l1_lam_last,
scale_lam, scale_bias_const,
(weight == NULL || wsumA == NULL)? 0. : wsumA[ix],
(real_t*)NULL,
bufferBtB, cnt_NA[ix], k,
true, false, 1., n,
bufferBtBchol, false,
use_cg, max_cg_steps,
nonneg, max_cd_steps,
(real_t*)NULL, (real_t*)NULL, 0., 1.,
false
);
}
set_blas_threads(nthreads_restore, (int*)NULL);
}
/* Case 3: X is sparse, with missing-as-zero, and no weights.
Here can also use one Cholesky for all rows at once. */
else if (Xfull == NULL && NA_as_zero && weight == NULL)
{
real_t *restrict bufferBtB = NULL;
if (precomputedBtB != NULL && !keep_precomputedBtB)
bufferBtB = precomputedBtB;
else {
bufferBtB = buffer_real_t;
buffer_real_t += square(k);
}
cblas_tsyrk(CblasRowMajor, CblasUpper, CblasTrans,
k, n,
1., B, ldb,
0., bufferBtB, k);
if (keep_precomputedBtB && precomputedBtB != NULL)
{
copy_arr(bufferBtB, precomputedBtB, square(k));
*filled_BtB = true;
}
add_to_diag(bufferBtB, scale_lam? (lam*(real_t)n) : (lam), k);
if (lam_last != lam)
{
if (!scale_lam)
bufferBtB[square(k)-1] += (lam_last - lam);
else
bufferBtB[square(k)-1] += (lam_last - lam) * (real_t)n;
}
if (lda == k)
set_to_zero_(A, (size_t)m*(size_t)k, nthreads);
else
for (size_t row = 0; row < (size_t)m; row++)
memset(A + row*(size_t)lda, 0, (size_t)k*sizeof(real_t));
tgemm_sp_dense(
m, k, 1.,
Xcsr_p, Xcsr_i, Xcsr,
B, (size_t)ldb,
A, (size_t)lda,
nthreads
);
if (bias_BtX != NULL) {
multiplier_bias_BtX = 1./multiplier_bias_BtX;
for (size_t row = 0; row < (size_t)m; row++)
cblas_taxpy(k, multiplier_bias_BtX,
bias_BtX, 1, A + row*(size_t)lda, 1);
}
else if (bias_static != NULL) {
/* Note: this should only be encountered when optimizing the side
info matrices, thus there are no extra routes for the other
cases, as it can only happen in this particular situation. */
real_t *restrict buffer_colsumsB = buffer_real_t;
buffer_real_t += k;
set_to_zero(buffer_colsumsB, k);
sum_by_cols(B, buffer_colsumsB, n, k, ldb, nthreads);
cblas_tger(CblasRowMajor, m, k,
1., bias_static, 1,
buffer_colsumsB, 1,
A, lda);
}
if (!nonneg && !l1_lam && !l1_lam_last)
tposv_(&uplo, &k, &m,
bufferBtB, &k,
A, &lda,
&ignore);
else if (!nonneg)
solve_elasticnet_batch(
bufferBtB,
A,
buffer_real_t,
m, k, lda,
scale_lam? (l1_lam*(real_t)n) : (l1_lam),
scale_lam? (l1_lam_last*(real_t)n) : (l1_lam_last),
max_cd_steps,
nthreads
);
else
solve_nonneg_batch(
bufferBtB,
A,
buffer_real_t,
m, k, lda,
scale_lam? (l1_lam*(real_t)n) : (l1_lam),
scale_lam? (l1_lam_last*(real_t)n) : (l1_lam_last),
max_cd_steps,
nthreads
);
}
/* Case 4: X is sparse, with non-present as NA, or with weights.
This is the expected case for most situations. */
else
{
/* When NAs are treated as zeros, can use a precomputed t(B)*B */
real_t *restrict bufferBtB = NULL;
bool add_diag_to_BtB = !(use_cg && Xfull == NULL && NA_as_zero) &&
!scale_lam;
if (Xfull == NULL && NA_as_zero && (!use_cg || weight != NULL))
{
if (precomputedBtB != NULL &&
(!keep_precomputedBtB || !add_diag_to_BtB))
{
bufferBtB = precomputedBtB;
}
else
{
bufferBtB = buffer_real_t;
buffer_real_t += square(k);
}
cblas_tsyrk(CblasRowMajor, CblasUpper, CblasTrans,
k, n,
1., B, ldb,
0., bufferBtB, k);
if (precomputedBtB != NULL && keep_precomputedBtB &&
bufferBtB != precomputedBtB)
{
copy_arr(bufferBtB, precomputedBtB, square(k));
}
if (add_diag_to_BtB)
{
add_to_diag(bufferBtB, lam, k);
if (lam_last != lam) bufferBtB[square(k)-1] += (lam_last - lam);
}
if (precomputedBtB != NULL)
*filled_BtB = true;
}
size_t size_buffer = square(k);
if (use_cg)
size_buffer = max2(size_buffer, (size_t)(3 * k));
if (use_cg && Xfull == NULL)
size_buffer = (size_t)(3 * k)
+ (size_t)((NA_as_zero && k >= n)? n : 0);
if (nonneg)
size_buffer += k;
else if (l1_lam || l1_lam_last)
size_buffer += (size_t)3*(size_t)k;
int nthreads_restore = 1;
set_blas_threads(1, &nthreads_restore);
#pragma omp parallel for schedule(dynamic) num_threads(nthreads) \
shared(A, lda, B, ldb, m, n, k, \
scale_lam, scale_bias_const, wsumA, \
lam, lam_last, l1_lam, l1_lam_last, weight, cnt_NA, \
Xcsr_p, Xcsr_i, Xcsr, buffer_real_t, NA_as_zero, \
bufferBtB, size_buffer, use_cg, \
nonneg, max_cd_steps, bias_BtX, bias_X, \
bias_X_glob, multiplier_bias_BtX)
for (size_t_for ix = 0; ix < (size_t)m; ix++)
{
if ((Xcsr_p[ix+(size_t)1] > Xcsr_p[ix]) ||
(NA_as_zero && bias_BtX != NULL))
{
factors_closed_form(
A + ix*(size_t)lda, k,
B, n, ldb,
(real_t*)NULL, false,
Xcsr + Xcsr_p[ix], Xcsr_i + Xcsr_p[ix],
Xcsr_p[ix+(size_t)1] - Xcsr_p[ix],
(weight == NULL)? ((real_t*)NULL) : (weight + Xcsr_p[ix]),
buffer_real_t + ((size_t)omp_get_thread_num()*size_buffer),
lam, lam_last,
l1_lam, l1_lam_last,
scale_lam, scale_bias_const,
(weight == NULL || wsumA == NULL)? 0. : wsumA[ix],
(real_t*)NULL,
bufferBtB, 0, k,
add_diag_to_BtB, false, 1., n,
(real_t*)NULL, NA_as_zero,
use_cg, max_cg_steps,
nonneg, max_cd_steps,
bias_BtX, bias_X, bias_X_glob, multiplier_bias_BtX,
false
);
}
}
set_blas_threads(nthreads_restore, (int*)NULL);
}
}
void optimizeA_implicit
(
real_t *restrict A, size_t lda,
real_t *restrict B, size_t ldb,
int_t m, int_t n, int_t k,
size_t Xcsr_p[], int_t Xcsr_i[], real_t *restrict Xcsr,
real_t lam, real_t l1_lam,
int nthreads,
bool use_cg, int_t max_cg_steps,
bool nonneg, int_t max_cd_steps,
real_t *restrict precomputedBtB, /* <- will be calculated if not passed */
real_t *restrict buffer_real_t
)
{
if (nonneg || l1_lam) use_cg = false;
if (precomputedBtB == NULL)
{
precomputedBtB = buffer_real_t;
buffer_real_t += square(k);
}
/* TODO: keep it as a parameter whether the BtB comes precomputed or not,
so as to incorporate this function later into the 'factors_multiple'. */
cblas_tsyrk(CblasRowMajor, CblasUpper, CblasTrans,
k, n,
1., B, ldb,
0., precomputedBtB, k);
if (!use_cg) {
add_to_diag(precomputedBtB, lam, k);
set_to_zero_(A, (size_t)m*(size_t)k - (lda-(size_t)k), nthreads);
}
size_t size_buffer = use_cg? (3 * k) : (square(k));
if (nonneg)
size_buffer += k;
else if (l1_lam)
size_buffer += (size_t)3*(size_t)k;
int nthreads_restore = 1;
set_blas_threads(1, &nthreads_restore);
int_t ix = 0;
if (use_cg)
{
#pragma omp parallel for schedule(dynamic) num_threads(nthreads) \
shared(A, B, lda, ldb, m, n, k, lam, \
Xcsr, Xcsr_i, Xcsr_p, precomputedBtB, buffer_real_t, \
max_cg_steps, size_buffer)
for (ix = 0; ix < m; ix++)
if (Xcsr_p[ix+(size_t)1] > Xcsr_p[ix])
factors_implicit_cg(
A + (size_t)ix*lda, k,
B, ldb,
Xcsr + Xcsr_p[ix], Xcsr_i + Xcsr_p[ix],
Xcsr_p[ix+(size_t)1] - Xcsr_p[ix],
lam,
precomputedBtB, k,
max_cg_steps,
buffer_real_t + ((size_t)omp_get_thread_num() * size_buffer)
);
}
else
{
#pragma omp parallel for schedule(dynamic) num_threads(nthreads) \
shared(A, B, lda, ldb, m, n, k, lam, l1_lam, \
Xcsr, Xcsr_i, Xcsr_p, precomputedBtB, buffer_real_t, \
size_buffer, nonneg, max_cd_steps)
for (ix = 0; ix < m; ix++)
if (Xcsr_p[ix+(size_t)1] > Xcsr_p[ix])
factors_implicit_chol(
A + (size_t)ix*lda, k,
B, ldb,
Xcsr + Xcsr_p[ix], Xcsr_i + Xcsr_p[ix],
Xcsr_p[ix+(size_t)1] - Xcsr_p[ix],
lam, l1_lam,
precomputedBtB, k,
nonneg, max_cd_steps,
buffer_real_t + ((size_t)omp_get_thread_num() * size_buffer)
);
}
set_blas_threads(nthreads_restore, (int*)NULL);
}
int_t calc_mean_and_center
(
int_t ixA[], int_t ixB[], real_t *restrict *X_, size_t nnz,
real_t *restrict *Xfull_, real_t *restrict Xtrans,
int_t m, int_t n,
size_t Xcsr_p[], int_t Xcsr_i[], real_t *restrict Xcsr,
size_t Xcsc_p[], int_t Xcsc_i[], real_t *restrict Xcsc,
real_t *restrict weight,
bool NA_as_zero, bool nonneg, bool center, int nthreads,
real_t *restrict glob_mean,
bool *modified_X, bool *modified_Xfull,
bool allow_overwrite_X
)
{
#if defined(_OPENMP) && \
( (_OPENMP < 200801) /* OpenMP < 3.0 */ \
|| defined(_WIN32) || defined(_WIN64) \
)
long long ix;
#endif
if (!allow_overwrite_X) {
*modified_X = false;
*modified_Xfull = false;
}
if (glob_mean == NULL)
return 0;
if (!center)
{
*glob_mean = 0;
return 0;
}
real_t *restrict X = (X_ == NULL)? NULL : (*X_);
real_t *restrict Xfull = (Xfull_ == NULL)? NULL : (*Xfull_);
size_t m_by_n = (Xfull == NULL)? 0 : ((size_t)m * (size_t)n);
double xsum = 0.;
double wsum = 0.;
size_t cnt = 0;
if (weight == NULL)
{
if (Xfull != NULL)
{
#ifdef _OPENMP
if (nthreads >= 8)
{
#pragma omp parallel for schedule(static) \
num_threads(nthreads) \
reduction(+:xsum,cnt) shared(Xfull, m_by_n)
for (size_t_for ix = 0; ix < m_by_n; ix++) {
xsum += (!isnan(Xfull[ix]))? Xfull[ix] : 0;
cnt += !isnan(Xfull[ix]);
}
*glob_mean = (real_t)(xsum / (double)cnt);
}
else
#endif
{
for (size_t ix = 0; ix < m_by_n; ix++) {
xsum += isnan(Xfull[ix])?
0 : ((Xfull[ix] - xsum) / (double)(++cnt));
}
*glob_mean = xsum;
}
if (!cnt)
fprintf(stderr, "Warning: 'X' has all entries missing.\n");
}
else
{
#ifdef _OPENMP
if (nthreads >= 8)
{
#pragma omp parallel for schedule(static) \
num_threads(nthreads) \
reduction(+:xsum) shared(X, nnz)
for (size_t_for ix = 0; ix < nnz; ix++)
xsum += X[ix];
*glob_mean = (real_t)(xsum / (double)nnz);
}
else
#endif
{
for (size_t ix = 0; ix < nnz; ix++)
xsum += (X[ix] - xsum) / (double)(++cnt);
*glob_mean = xsum;
}
if (!xsum)
fprintf(stderr, "Warning: 'X' has only zeros.\n");
if (NA_as_zero)
*glob_mean = (long double)(*glob_mean)
*
((long double)nnz
/
((long double)m * (long double)n));
}
}
else /* <- has weights */
{
if (Xfull != NULL)
{
#ifdef _OPENMP
if (nthreads >= 8)
{
#pragma omp parallel for schedule(static) \
num_threads(nthreads) \
reduction(+:xsum,wsum) shared(Xfull, weight, m_by_n)
for (size_t_for ix = 0; ix < m_by_n; ix++) {
xsum += (!isnan(Xfull[ix]))? (Xfull[ix]) : (0);
wsum += isnan(Xfull[ix])? 0. : weight[ix];
}
*glob_mean = (real_t)(xsum / wsum);
}
else
#endif
{
wsum = DBL_EPSILON;
for (size_t ix = 0; ix < m_by_n; ix++) {
xsum += isnan(Xfull[ix])?
0 : (((Xfull[ix] - xsum) * weight[ix])
/
(wsum += weight[ix]));
}
*glob_mean = xsum;
}
}
else
{
#ifdef _OPENMP
if (nthreads >= 8)
{
#pragma omp parallel for schedule(static) \
num_threads(nthreads) \
reduction(+:xsum,wsum) shared(X, weight, nnz)
for (size_t_for ix = 0; ix < nnz; ix++) {
xsum += X[ix];
wsum += weight[ix];
}
*glob_mean = (real_t)(xsum / wsum);
}
else
#endif
{
wsum = DBL_EPSILON;
for (size_t ix = 0; ix < nnz; ix++) {
xsum += ((X[ix] - xsum) * weight[ix])
/
(wsum += weight[ix]);
}
*glob_mean = xsum;
}
if (NA_as_zero)
{
long double wsum_l = compensated_sum(weight, nnz);
*glob_mean = (long double)(*glob_mean)
/
(wsum_l / (wsum_l + ((long double)m*(long double)n
- (long double)nnz)));
}
}
if (wsum <= 0)
fprintf(stderr, "Warning: weights are not positive.\n");
}
if (nonneg)
*glob_mean = fmax_t(*glob_mean, 0);
/* If the obtained mean is too small, simply ignore it */
if (fabs_t(*glob_mean) < sqrt_t(EPSILON_T))
*glob_mean = 0;
/* Now center X in-place */
if (*glob_mean != 0 && !(Xfull == NULL && NA_as_zero))
{
if (Xfull != NULL) {
if (!allow_overwrite_X) {
Xfull = (real_t*)malloc(m_by_n*sizeof(real_t));
if (Xfull == NULL) return 1;
copy_arr_(*Xfull_, Xfull, m_by_n, nthreads);
*Xfull_ = Xfull;
*modified_Xfull = true;
}
for (size_t_for ix = 0; ix < m_by_n; ix++)
Xfull[ix] = isnan(Xfull[ix])?
(NAN_) : (Xfull[ix] - (*glob_mean));
if (Xtrans != NULL) {
for (size_t_for ix = 0; ix < m_by_n; ix++)
Xtrans[ix] = isnan(Xtrans[ix])?
(NAN_) : (Xtrans[ix] - (*glob_mean));
}
} else if (Xcsr != NULL) {
for (size_t_for ix = 0; ix < nnz; ix++) {
Xcsr[ix] -= *glob_mean;
Xcsc[ix] -= *glob_mean;
}
} else if (X != NULL && nnz) {
if (!allow_overwrite_X) {
X = (real_t*)malloc(nnz*sizeof(real_t));
if (X == NULL) return 1;
copy_arr_(*X_, X, nnz, nthreads);
*X_ = X;
*modified_X = true;
}
for (size_t_for ix = 0; ix < nnz; ix++)
X[ix] -= *glob_mean;
}
}
return 0;
}
/* TODO: factor out this function */
int_t initialize_biases
(
real_t *restrict glob_mean, real_t *restrict biasA, real_t *restrict biasB,
bool user_bias, bool item_bias, bool center,
real_t lam_user, real_t lam_item,
bool scale_lam, bool scale_bias_const,
bool force_calc_user_scale, bool force_calc_item_scale,
real_t *restrict scaling_biasA, real_t *restrict scaling_biasB,
int_t m, int_t n,
int_t m_bias, int_t n_bias,
int_t ixA[], int_t ixB[], real_t *restrict *X_, size_t nnz,
real_t *restrict *Xfull_, real_t *restrict Xtrans,
size_t Xcsr_p[], int_t Xcsr_i[], real_t *restrict Xcsr,
size_t Xcsc_p[], int_t Xcsc_i[], real_t *restrict Xcsc,
real_t *restrict weight, real_t *restrict Wtrans,
real_t *restrict weightR, real_t *restrict weightC,
bool nonneg,
int nthreads,
bool *modified_X, bool *modified_Xfull,
bool allow_overwrite_X
)
{
int_t retval = 0;
#if defined(_OPENMP) && \
( (_OPENMP < 200801) /* OpenMP < 3.0 */ \
|| defined(_WIN32) || defined(_WIN64) \
)
long long row, col;
#endif
if (!allow_overwrite_X) {
*modified_X = false;
*modified_Xfull = false;
}
real_t *restrict X = (X_ == NULL)? NULL : (*X_);
real_t *restrict Xfull = (Xfull_ == NULL)? NULL : (*Xfull_);
size_t m_by_n = (Xfull == NULL)? (size_t)0 : ((size_t)m * (size_t)n);
size_t *restrict buffer_cnt = NULL;
double *restrict buffer_w = NULL;
size_t cnt = 0;
long double wsum = 0;
if (user_bias || item_bias)
{
size_t dim_buffer = (user_bias && item_bias)?
max2(m_bias, n_bias)
:
(user_bias? m_bias : n_bias);
if (weight == NULL)
{
buffer_cnt = (size_t*)calloc(dim_buffer, sizeof(size_t));
if (buffer_cnt == NULL) goto throw_oom;
}
else
{
buffer_w = (double*)calloc(dim_buffer, sizeof(double));
if (buffer_w == NULL) goto throw_oom;
}
}
/* First calculate the global mean */
if (center)
{
calc_mean_and_center(
ixA, ixB, &X, nnz,
&Xfull, Xtrans,
m, n,
Xcsr_p, Xcsr_i, Xcsr,
Xcsc_p, Xcsc_i, Xcsc,
weight,
false, nonneg, center, nthreads,
glob_mean, modified_X, modified_Xfull,
allow_overwrite_X
);
if (!allow_overwrite_X) {
if (*modified_X && X != NULL) *X_ = X;
if (*modified_Xfull && Xfull != NULL) *Xfull_ = Xfull;
}
}
/* If not centering, might still need to know the number of non-missing
entries in order to calculate the scaling of the biases. */
else if ((Xfull != NULL || weight != NULL) &&
(user_bias || item_bias ||
force_calc_user_scale || force_calc_item_scale) &&
scale_lam && scale_bias_const)
{
if (weight == NULL)
{
if (Xfull != NULL)
for (size_t ix = 0; ix < m_by_n; ix++)
cnt += !isnan(Xfull[ix]);
}
else
{
if (Xfull != NULL)
for (size_t ix = 0; ix < m_by_n; ix++)
wsum += isnan(Xfull[ix])? 0. : weight[ix];
else
for (size_t ix = 0; ix < nnz; ix++)
wsum += weight[ix];
}
}
if ((user_bias || item_bias ||
force_calc_user_scale || force_calc_item_scale) &&
scale_lam && scale_bias_const)
{
if (weight == NULL)
{
if (Xfull != NULL)
{
if (user_bias || force_calc_user_scale)
*scaling_biasA = (long double)cnt / (long double)m;
if (item_bias || force_calc_item_scale)
*scaling_biasB = (long double)cnt / (long double)n;
}
else
{
if (user_bias || force_calc_user_scale)
*scaling_biasA = (long double)nnz / (long double)m;
if (item_bias || force_calc_item_scale)
*scaling_biasB = (long double)nnz / (long double)n;
}
}
else
{
if (user_bias || force_calc_user_scale)
*scaling_biasA = (long double)wsum / (long double)m;
if (item_bias || force_calc_item_scale)
*scaling_biasB = (long double)wsum / (long double)n;
}
if (user_bias || force_calc_user_scale)
lam_user *= *scaling_biasA;
if (item_bias || force_calc_item_scale)
lam_item *= *scaling_biasB;
scale_lam = false;
scale_bias_const = false;
}
/* Note: the original papers suggested starting these values by
obtaining user biases first, then item biases, but I've found
that doing it the other way around leads to better results
with the ALS method **when** the ALS method updates the main matrices
in that same order (which this software does, unlike most other
implementations). Thus, both the ALS procedure and this function
make updates over items first and users later. */
if (user_bias) set_to_zero(biasA, m_bias);
if (item_bias) set_to_zero(biasA, n_bias);
/* Calculate item biases, but don't apply them to X */
if (item_bias)
{
if (Xtrans != NULL)
{
if (weight == NULL)
{
#pragma omp parallel for schedule(static) \
num_threads(cap_to_4(nthreads)) \
shared(m, n_bias, Xtrans, biasB, buffer_cnt)
for (size_t_for col = 0; col < (size_t)n_bias; col++)
{
double bsum = 0;
size_t cnt = 0;
for (size_t row = 0; row < (size_t)m; row++)
{
bsum += (!isnan(Xtrans[row + col*(size_t)m]))?
Xtrans[row + col*(size_t)m] : 0.;
cnt += !isnan(Xtrans[row + col*(size_t)m]);
}
biasB[col] = bsum;
buffer_cnt[col] = cnt;
}
}
else
{
#pragma omp parallel for schedule(static) \
num_threads(cap_to_4(nthreads)) \
shared(m, n_bias, Xtrans, Wtrans, biasB, buffer_w)
for (size_t_for col = 0; col < (size_t)n_bias; col++)
{
double bsum = 0;
double wsum = 0;
for (size_t row = 0; row < (size_t)m; row++)
{
bsum += (!isnan(Xtrans[row + col*(size_t)m]))?
Xtrans[row + col*(size_t)m] : 0.;
wsum += (!isnan(Xtrans[row + col*(size_t)m]))?
Wtrans[row + col*(size_t)m] : 0.;
}
biasB[col] = bsum;
buffer_w[col] = wsum;
}
}
}
else if (Xfull != NULL)
{
if (weight == NULL)
{
for (size_t row = 0; row < (size_t)m; row++)
for (size_t col = 0; col < (size_t)n_bias; col++) {
biasB[col] += (!isnan(Xfull[col + row*(size_t)n]))?
(Xfull[col + row*(size_t)n]) : (0.);
buffer_cnt[col] += !isnan(Xfull[col + row*(size_t)n]);
}
}
else
{
for (size_t row = 0; row < (size_t)m; row++)
for (size_t col = 0; col < (size_t)n_bias; col++) {
biasB[col] += (!isnan(Xfull[col + row*(size_t)n]))?
(Xfull[col + row*(size_t)n]) : (0.);
buffer_w[col] += (!isnan(Xfull[col + row*(size_t)n]))?
(weight[col + row*(size_t)n]) : (0.);
}
}
}
else if (Xcsc != NULL)
{
if (weight == NULL)
{
#pragma omp parallel for schedule(dynamic) \
num_threads(cap_to_4(nthreads)) \
shared(n_bias, Xcsc_p, Xcsc, biasB, buffer_cnt)
for (size_t_for col = 0; col < (size_t)n_bias; col++)
{
buffer_cnt[col] = Xcsc_p[col+(size_t)1] - Xcsc_p[col];
double bsum = 0;
for (size_t ix=Xcsc_p[col]; ix < Xcsc_p[col+(size_t)1];ix++)
{
bsum += Xcsc[ix];
}
biasB[col] = bsum;
}
}
else
{
#pragma omp parallel for schedule(dynamic) \
num_threads(cap_to_4(nthreads)) \
shared(n_bias, Xcsc_p, Xcsc, biasB, weightC, buffer_w)
for (size_t_for col = 0; col < (size_t)n_bias; col++)
{
double bsum = 0;
double wsum = 0;
for (size_t ix=Xcsc_p[col]; ix < Xcsc_p[col+(size_t)1];ix++)
{
bsum += Xcsc[ix];
wsum += weightC[ix];
}
biasB[col] = bsum;
buffer_w[col] = wsum;
}
}
}
else
{
if (weight == NULL)
{
for (size_t ix = 0; ix < nnz; ix++) {
biasB[ixB[ix]] += X[ix];
buffer_cnt[ixB[ix]]++;
}
}
else
{
for (size_t ix = 0; ix < nnz; ix++) {
biasB[ixB[ix]] += X[ix];
buffer_w[ixB[ix]] += weight[ix];
}
}
}
if (weight == NULL)
{
for (int_t ix = 0; ix < n_bias; ix++)
biasB[ix] /= ((double)buffer_cnt[ix]
+ (lam_item
*
(scale_lam? (double)buffer_cnt[ix] : 1.)));
}
else
{
for (int_t ix = 0; ix < n_bias; ix++)
biasB[ix] /= (buffer_w[ix]
+ (lam_item
*
(scale_lam? buffer_w[ix] : 1.)));
}
for (int_t ix = 0; ix < n_bias; ix++)
biasB[ix] = (!isnan(biasB[ix]))? biasB[ix] : 0.;
if (nonneg)
{
for (int_t ix = 0; ix < n_bias; ix++)
biasB[ix] = (biasB[ix] >= 0.)? biasB[ix] : 0.;
}
}
/* Finally, user biases */
if (user_bias)
{
if (item_bias)
{
if (buffer_cnt != NULL)
memset(buffer_cnt, 0, (size_t)m_bias*sizeof(size_t));
if (buffer_w != NULL)
memset(buffer_w, 0, (size_t)m_bias*sizeof(double));
}
if (Xfull != NULL)
{
if (weight == NULL)
{
#pragma omp parallel for schedule(static) \
num_threads(cap_to_4(nthreads)) \
shared(m_bias, n, Xfull, biasA, biasB, buffer_cnt)
for (size_t_for row = 0; row < (size_t)m_bias; row++)
{
double asum = 0;
size_t cnt = 0;
for (size_t col = 0; col < (size_t)n; col++) {
asum += (!isnan(Xfull[col + row*(size_t)n]))?
(Xfull[col + row*(size_t)n]
- (item_bias? biasB[col] : 0.))
: (0.);
cnt += !isnan(Xfull[col + row*(size_t)n]);
}
biasA[row] = asum;
buffer_cnt[row] = cnt;
}
}
else
{
#pragma omp parallel for schedule(static) \
num_threads(cap_to_4(nthreads)) \
shared(m_bias, n, Xfull, biasA, biasB, buffer_w)
for (size_t_for row = 0; row < (size_t)m_bias; row++)
{
double asum = 0;
double wsum = 0;
for (size_t col = 0; col < (size_t)n; col++) {
asum += (!isnan(Xfull[col + row*(size_t)n]))?
(Xfull[col + row*(size_t)n]
- (item_bias? biasB[col] : 0.))
: (0.);
wsum += (!isnan(Xfull[col + row*(size_t)n]))?
weight[col + row*(size_t)n] : 0.;
}
biasA[row] = asum;
buffer_w[row] = wsum;
}
}
}
else if (Xcsr != NULL)
{
if (weight == NULL)
{
#pragma omp parallel for schedule(dynamic) \
num_threads(nthreads) \
shared(m_bias, Xcsr_p, Xcsr_i, Xcsr, biasA, biasB, \
item_bias, buffer_cnt)
for (size_t_for row = 0; row < (size_t)m_bias; row++)
{
buffer_cnt[row] = Xcsr_p[row+(size_t)1] - Xcsr_p[row];
double asum = 0;
for (size_t ix=Xcsr_p[row]; ix < Xcsr_p[row+(size_t)1];ix++)
{
asum += Xcsr[ix] - (item_bias? biasB[Xcsr_i[ix]] : 0.);
}
biasA[row] = asum;
}
}
else
{
#pragma omp parallel for schedule(dynamic) \
num_threads(cap_to_4(nthreads)) \
shared(m_bias, Xcsr_p, Xcsr_i, Xcsr, biasA, biasB, \
item_bias, buffer_w, weightR)
for (size_t_for row = 0; row < (size_t)m_bias; row++)
{
double asum = 0;
double wsum = 0;
for (size_t ix=Xcsr_p[row]; ix < Xcsr_p[row+(size_t)1];ix++)
{
asum += Xcsr[ix] - (item_bias? biasB[Xcsr_i[ix]] : 0.);
wsum += weightR[ix];
}
biasA[row] = asum;
buffer_w[row] = wsum;
}
}
}
else
{
if (weight == NULL)
{
for (size_t ix = 0; ix < nnz; ix++) {
biasA[ixA[ix]]
+=
X[ix] - (item_bias? (biasB[ixB[ix]]) : (0.));
buffer_cnt[ixA[ix]]++;
}
}
else
{
for (size_t ix = 0; ix < nnz; ix++) {
biasA[ixA[ix]]
+=
X[ix] - (item_bias? (biasB[ixB[ix]]) : (0.));
buffer_w[ixA[ix]] += weight[ix];
}
}
}
if (weight == NULL)
{
for (int_t ix = 0; ix < m_bias; ix++)
biasA[ix] /= ((double)buffer_cnt[ix]
+ (lam_user
*
(scale_lam? (double)buffer_cnt[ix] : 1.)));
}
else
{
for (int_t ix = 0; ix < m_bias; ix++)
biasA[ix] /= (buffer_w[ix]
+ (lam_user
*
(scale_lam? buffer_w[ix] : 1.)));
}
for (int_t ix = 0; ix < m_bias; ix++)
biasA[ix] = (!isnan(biasA[ix]))? biasA[ix] : 0.;
if (nonneg)
{
for (int_t ix = 0; ix < m_bias; ix++)
biasA[ix] = (biasA[ix] >= 0.)? biasA[ix] : 0.;
}
}
cleanup:
free(buffer_cnt);
free(buffer_w);
return retval;
throw_oom:
retval = 1;
goto cleanup;
}
/* Here, 'X' should already be centered, unless using 'NA_as_zero'. */
int_t initialize_biases_onesided
(
real_t *restrict Xfull, int_t m, int_t n, bool do_B, int_t *restrict cnt_NA,
size_t Xcsr_p[], int_t Xcsr_i[], real_t *restrict Xcsr,
real_t *restrict weight, real_t *restrict weightR,
real_t glob_mean, bool NA_as_zero, bool nonneg,
real_t lam, bool scale_lam,
real_t *restrict wsumA,
real_t *restrict biasA,
int nthreads
)
{
#if defined(_OPENMP) && \
( (_OPENMP < 200801) /* OpenMP < 3.0 */ \
|| defined(_WIN32) || defined(_WIN64) \
)
long long row;
#endif
if (fabs_t(lam) < EPSILON_T)
lam = EPSILON_T;
if (Xfull != NULL && weight == NULL)
{
if (!do_B)
{
#pragma omp parallel for schedule(static) \
num_threads(cap_to_4(nthreads)) \
shared(Xfull, m, n, biasA, wsumA, scale_lam, lam)
for (size_t_for row = 0; row < (size_t)m; row++)
{
double bmean = 0;
int_t cnt = 0;
for (size_t col = 0; col < (size_t)n; col++)
{
bmean += (isnan(Xfull[col + row*n]))?
(0) : ((Xfull[col + row*n] - bmean)
/
(double)(++cnt));
}
bmean *= (double)cnt
/
((double)cnt
+
(lam * ((wsumA != NULL)?
((double)wsumA[row])
:
(scale_lam? (double)max2(cnt,1) : 1.))));
biasA[row] = bmean;
}
}
else /* <- in this case, X is transposed */
{
set_to_zero(biasA, m);
int_t tmp = m; m = n; n = tmp;
real_t *restrict biasB = biasA;
for (size_t row = 0; row < (size_t)m; row++)
for (size_t col = 0; col < (size_t)n; col++)
biasB[col] += (isnan(Xfull[col + row*n]))?
0 : Xfull[col + row*n];
for (int_t col = 0; col < n; col++)
biasB[col] /= (double)(n - cnt_NA[col])
+
(lam * ((wsumA != NULL)?
((double)wsumA[col])
:
(scale_lam?
(double)max2(n - cnt_NA[col], 1) : 1.)));
}
}
else if (Xfull != NULL) /* <- has weights */
{
if (!do_B)
{
#pragma omp parallel for schedule(static) \
num_threads(cap_to_4(nthreads)) \
shared(Xfull, m, n, biasA, weight, wsumA, scale_lam, lam)
for (size_t_for row = 0; row < (size_t)m; row++)
{
double bmean = 0;
double wsum = DBL_EPSILON;
for (size_t col = 0; col < (size_t)n; col++)
{
bmean += (isnan(Xfull[col + row*n]))?
(0) : ( ( (Xfull[col + row*n] - bmean)
* weight[col + row*n])
/
(double)(wsum += weight[col + row*n]) );
}
if (m > cnt_NA[row]) {
bmean *= wsum
/
(wsum
+
(lam * ((wsumA != NULL)?
(wsumA[row])
:
(scale_lam? wsum : (real_t)1.))));
}
biasA[row] = bmean;
}
}
else /* <- in this case, X is transposed */
{
set_to_zero(biasA, m);
int_t tmp = m; m = n; n = tmp;
real_t *restrict biasB = biasA;
real_t *restrict wsum = (real_t*)calloc(n, sizeof(real_t));
if (wsum == NULL) return 1;
for (size_t row = 0; row < (size_t)m; row++)
{
for (size_t col = 0; col < (size_t)n; col++)
{
biasB[col] += (isnan(Xfull[col + row*n]))?
0 : Xfull[col + row*n];
wsum[col] += (isnan(Xfull[col + row*n]))?
0 : weight[col + row*n];
}
}
for (int_t col = 0; col < n; col++)
wsum[col] = (cnt_NA[col] < n)? wsum[col] : 1;
for (int_t col = 0; col < n; col++)
biasB[col] /= wsum[col]
+
(lam * ((wsumA != NULL)?
((double)wsumA[col])
:
(scale_lam? wsum[col] : (real_t)1.)));
free(wsum);
}
}
else if (weightR == NULL && !NA_as_zero)
{
#pragma omp parallel for schedule(dynamic) \
num_threads(cap_to_4(nthreads)) \
shared(Xcsr_p, Xcsr, m, biasA, wsumA, scale_lam, lam)
for (size_t_for row = 0; row < (size_t)m; row++)
{
double bmean = 0;
for (size_t ix = Xcsr_p[row]; ix < Xcsr_p[row+1]; ix++)
bmean += (Xcsr[ix] - bmean) / (double)(ix - Xcsr_p[row] + 1);
bmean *= (double)(Xcsr_p[row+1] - Xcsr_p[row])
/
((double)(Xcsr_p[row+1] - Xcsr_p[row])
+ lam * ((wsumA != NULL)?
(double)wsumA[row]
:
(scale_lam?
(double)max2(
Xcsr_p[row+1] - Xcsr_p[row],
1)
:
1.)));
biasA[row] = bmean;
}
}
else if (!NA_as_zero) /* <- has weights */
{
#pragma omp parallel for schedule(dynamic) \
num_threads(cap_to_4(nthreads)) \
shared(Xcsr_p, Xcsr, weightR, m, biasA, wsumA, scale_lam, lam)
for (size_t_for row = 0; row < (size_t)m; row++)
{
double bmean = 0;
double wsum = DBL_EPSILON;
for (size_t ix = Xcsr_p[row]; ix < Xcsr_p[row+1]; ix++)
bmean += ((Xcsr[ix] - bmean) * weightR[ix])
/
(wsum += weightR[ix]);
wsum = (Xcsr_p[row+1] > Xcsr_p[row])? wsum : 0;
bmean *= wsum
/
(wsum
+ lam * ((wsumA != NULL)?
(double)wsumA[row]
:
(scale_lam? max2(wsum, DBL_EPSILON) : 1.)));
biasA[row] = bmean;
}
}
else if (weightR == NULL) /* <- has NA_as_zero */
{
#pragma omp parallel for schedule(dynamic) \
num_threads(cap_to_4(nthreads)) \
shared(Xcsr_p, Xcsr, m, biasA, wsumA, n, \
scale_lam, lam, glob_mean)
for (size_t_for row = 0; row < (size_t)m; row++)
{
double bmean = 0;
for (size_t ix = Xcsr_p[row]; ix < Xcsr_p[row+1]; ix++)
bmean += (Xcsr[ix] - bmean) / (double)(ix - Xcsr_p[row] + 1);
bmean -= glob_mean
/
((double)(Xcsr_p[row+1] - Xcsr_p[row]) / (double)n);
bmean *= (double)(Xcsr_p[row+1] - Xcsr_p[row])
/
((double)n
+ lam * ((wsumA != NULL)?
(double)wsumA[row]
:
(scale_lam? (double)n : 1.)));
biasA[row] = (Xcsr_p[row+1] > Xcsr_p[row])?
bmean
:
(-glob_mean
/
((double)n
/
((double)n + lam * ((wsumA != NULL)?
(double)wsumA[row]
:
(scale_lam?
(double)n : 1.)))));
}
}
else /* <- has NA_as_zero and weights */
{
#pragma omp parallel for schedule(dynamic) \
num_threads(cap_to_4(nthreads)) \
shared(Xcsr_p, Xcsr, m, biasA, wsumA, n, scale_lam, \
weightR, glob_mean)
for (size_t_for row = 0; row < (size_t)m; row++)
{
double bmean = 0;
double wsum = DBL_EPSILON;
for (size_t ix = Xcsr_p[row]; ix < Xcsr_p[row+1]; ix++)
bmean += ((Xcsr[ix] - bmean) * weightR[ix])
/
(wsum += weightR[ix]);
wsum = (Xcsr_p[row+1] > Xcsr_p[row])? wsum : 0;
bmean -= glob_mean
/
(wsum / ((double)(n
- (int_t)(Xcsr_p[row+1] - Xcsr_p[row]))
+ wsum));
bmean *= wsum
/
(((double)(n - (Xcsr_p[row+1] - Xcsr_p[row])) + wsum)
+ lam * ((wsumA != NULL)?
(double)wsumA[row]
:
(scale_lam?
((double)(n
-(int_t)(Xcsr_p[row+1]
-Xcsr_p[row]))
+ wsum)
:
1.)));
biasA[row] = (Xcsr_p[row+1] > Xcsr_p[row])?
bmean
:
(-glob_mean
/
((double)n
/
((double)n + lam * ((wsumA != NULL)?
(double)wsumA[row]
:
(scale_lam?
(double)n : 1.)))));
}
}
if (nonneg)
{
for (int_t row = 0; row < m; row++)
biasA[row] = (biasA[row] >= 0)? biasA[row] : 0;
}
return 0;
}
int_t initialize_biases_twosided
(
real_t *restrict Xfull, real_t *restrict Xtrans,
int_t *restrict cnt_NA_byrow, int_t *restrict cnt_NA_bycol,
int_t m, int_t n,
bool NA_as_zero, bool nonneg, double glob_mean,
size_t *restrict Xcsr_p, int_t *restrict Xcsr_i, real_t *Xcsr,
size_t *restrict Xcsc_p, int_t *restrict Xcsc_i, real_t *Xcsc,
real_t *restrict weight, real_t *restrict Wtrans,
real_t *restrict weightR, real_t *restrict weightC,
real_t lam_user, real_t lam_item, bool scale_lam,
real_t *restrict wsumA, real_t *restrict wsumB,
real_t *restrict biasA, real_t *restrict biasB,
int nthreads
)
{
#if defined(_OPENMP) && \
( (_OPENMP < 200801) /* OpenMP < 3.0 */ \
|| defined(_WIN32) || defined(_WIN64) \
)
long long row, col;
#endif
if (fabs_t(lam_user) < EPSILON_T)
lam_user = EPSILON_T;
if (fabs_t(lam_item) < EPSILON_T)
lam_item = EPSILON_T;
int_t retval = 0;
double *restrict meanA = NULL;
double *restrict meanB = NULL;
double *restrict adjA = NULL;
double *restrict adjB = NULL;
real_t *restrict wsum_B = NULL;
int niter = nonneg? 15 : 5;
if (NA_as_zero)
{
meanA = (double*)malloc((size_t)m*sizeof(double));
meanB = (double*)malloc((size_t)n*sizeof(double));
if (meanA == NULL || meanB == NULL) goto throw_oom;
if (weight != NULL)
{
adjA = (double*)malloc((size_t)m*sizeof(double));
adjB = (double*)malloc((size_t)n*sizeof(double));
if (adjA == NULL || adjB == NULL) goto throw_oom;
}
if (weight == NULL)
{
for (int_t row = 0; row < m; row++) {
double xmean = 0;
for (size_t ix = Xcsr_p[row]; ix < Xcsr_p[row+1]; ix++)
xmean += (Xcsr[ix] - xmean) / (double)(ix - Xcsr_p[row] +1);
xmean *= (double)(Xcsr_p[row+1] - Xcsr_p[row]) / (double)n;
meanA[row] = xmean;
}
for (int_t col = 0; col < n; col++) {
double xmean = 0;
for (size_t ix = Xcsc_p[col]; ix < Xcsc_p[col+1]; ix++)
xmean += (Xcsc[ix] - xmean) / (double)(ix - Xcsc_p[col] +1);
xmean *= (double)(Xcsc_p[col+1] - Xcsc_p[col]) / (double)m;
meanB[col] = xmean;
}
}
else
{
#pragma omp parallel for schedule(dynamic) \
num_threads(cap_to_4(nthreads)) \
shared(Xcsr_p, Xcsr, weightR, m, n, meanA, \
adjA, scale_lam, wsumA, lam_user)
for (size_t_for row = 0; row < (size_t)m; row++)
{
double xmean = 0;
double wsum = DBL_EPSILON;
for (size_t ix = Xcsr_p[row]; ix < Xcsr_p[row+1]; ix++)
xmean += (weightR[ix] * (Xcsr[ix] - xmean))
/ (wsum += weightR[ix]);
wsum = (Xcsr_p[row+1] > Xcsr_p[row])? wsum : 0;
xmean *= wsum
/ (wsum + (double)(n - (Xcsr_p[row+1] - Xcsr_p[row])));
wsum += (double)(n - (Xcsr_p[row+1] - Xcsr_p[row]));
wsum /= (wsum + lam_user * ((wsumA != NULL)?
(wsumA[row])
:
(scale_lam? wsum : 1.)));
meanA[row] = xmean;
adjA[row] = wsum;
}
#pragma omp parallel for schedule(dynamic) \
num_threads(cap_to_4(nthreads)) \
shared(Xcsc_p, Xcsc, weightC, m, n, meanB, \
adjB, scale_lam, wsumB, lam_item)
for (size_t_for col = 0; col < (size_t)n; col++)
{
double xmean = 0;
double wsum = DBL_EPSILON;
for (size_t ix = Xcsc_p[col]; ix < Xcsc_p[col+1]; ix++)
xmean += (weightC[ix] * (Xcsc[ix] - xmean))
/ (wsum += weightC[ix]);
wsum = (Xcsc_p[col+1] > Xcsc_p[col])? wsum : 0;
xmean *= wsum
/ (wsum + (double)(m - (Xcsc_p[col+1] - Xcsc_p[col])));
wsum += (double)(m - (Xcsc_p[col+1] - Xcsc_p[col]));
wsum /= (wsum + lam_item * ((wsumB != NULL)?
(wsumB[col])
:
(scale_lam? wsum : 1.)));
meanB[col] = xmean;
adjB[col] = wsum;
}
}
}
if (Xtrans == NULL && Xfull != NULL && weight != NULL)
{
wsum_B = (real_t*)malloc((size_t)n*sizeof(int_t));
if (wsum_B == NULL) goto throw_oom;
}
set_to_zero(biasA, m);
set_to_zero(biasB, n);
for (int iter = 0; iter < niter; iter++)
{
/* First by items */
if (Xtrans != NULL && weight == NULL)
{
#pragma omp parallel for schedule(dynamic) \
num_threads(cap_to_4(nthreads)) \
shared(Xtrans, m, n, biasA, biasB, lam_item)
for (size_t_for col = 0; col < (size_t)n; col++)
{
double xmean = 0;
int_t cnt = 0;
for (size_t row = 0; row < (size_t)m; row++)
xmean += (isnan(Xtrans[row + col*(size_t)m]))?
0 : ((Xtrans[row + col*(size_t)m]
- biasA[row]
- xmean)
/ (double)(++cnt));
xmean *= (double)cnt
/ ((double)cnt
+ lam_item * ((wsumB != NULL)?
wsumB[col]
:
(scale_lam?
(double)max2(cnt,1) : 1.)));
biasB[col] = xmean;
}
}
else if (Xtrans != NULL) /* <- has weights */
{
#pragma omp parallel for schedule(dynamic) \
num_threads(cap_to_4(nthreads)) \
shared(Xtrans, m, n, biasA, biasB, Wtrans, lam_item)
for (size_t_for col = 0; col < (size_t)n; col++)
{
double xmean = 0;
double wsum = DBL_EPSILON;
for (size_t row = 0; row < (size_t)m; row++)
xmean += (isnan(Xtrans[row + col*(size_t)m]))?
0 : ((Wtrans[row + col*(size_t)m]
* (Xtrans[row + col*(size_t)m]
- biasA[row]
- xmean))
/ (wsum += Wtrans[row + col*(size_t)m]));
if (cnt_NA_bycol[col] < m)
xmean *= wsum
/ (wsum + lam_item * ((wsumB != NULL)?
wsumB[col]
:
(scale_lam? wsum :1.)));
biasB[col] = xmean;
}
}
else if (Xfull != NULL && weight == NULL)
{
set_to_zero(biasB, n);
for (size_t row = 0; row < (size_t)m; row++)
{
for (size_t col = 0; col < (size_t)n; col++)
{
biasB[col] += (isnan(Xfull[col + row*(size_t)n]))?
0 : Xfull[col + row*(size_t)n];
}
}
for (int_t col = 0; col < n; col++)
biasB[col] /= (real_t)(m - cnt_NA_bycol[col])
+ lam_item * ((wsumB != NULL)?
wsumB[col]
:
(scale_lam?
(real_t)max2(
m-cnt_NA_bycol[col],
1)
:
1.));
}
else if (Xfull != NULL) /* <- has weights */
{
set_to_zero(biasB, n);
set_to_zero(wsum_B, n);
for (size_t row = 0; row < (size_t)m; row++)
{
for (size_t col = 0; col < (size_t)n; col++)
{
biasB[col] += (isnan(Xfull[col + row*(size_t)n]))?
0 : Xfull[col + row*(size_t)n];
wsum_B[col] += (isnan(Xfull[col + row*(size_t)n]))?
0 : weight[col + row*(size_t)n];
}
}
for (int_t col = 0; col < n; col++)
wsum_B[col] = (cnt_NA_bycol[col] == m)? wsum_B[col] : 1;
for (int_t col = 0; col < n; col++)
biasB[col] /= wsum_B[col]
+ lam_item * ((wsumB != NULL)?
wsumB[col]
:
(scale_lam? wsum_B[col] : 1.));
}
else if (!NA_as_zero && weight == NULL)
{
#pragma omp parallel for schedule(dynamic) \
num_threads(cap_to_4(nthreads)) \
shared(Xcsc_p, Xcsc_i, Xcsc, biasA, biasB, \
n, wsumB, scale_lam, lam_item)
for (size_t_for col = 0; col < (size_t)n; col++)
{
double bmean = 0;
for (size_t ix = Xcsc_p[col]; ix < Xcsc_p[col+1]; ix++)
bmean += (Xcsc[ix] - biasA[Xcsc_i[ix]] - bmean)
/ (double)(ix - Xcsc_p[col] + 1);
bmean *= (double)(Xcsc_p[col+1] - Xcsc_p[col])
/
((double)(Xcsc_p[col+1] - Xcsc_p[col])
+ lam_item * ((wsumB != NULL)?
((double)wsumB[col])
:
(scale_lam?
(double)max2(
Xcsc_p[col+1]-Xcsc_p[col],
1)
:
1.)));
biasB[col] = bmean;
}
}
else if (!NA_as_zero) /* <- has weights */
{
#pragma omp parallel for schedule(dynamic) \
num_threads(cap_to_4(nthreads)) \
shared(Xcsc_p, Xcsc_i, Xcsc, weightC, biasA, biasB, \
m, n, wsumB, scale_lam, lam_item)
for (size_t_for col = 0; col < (size_t)n; col++)
{
double bmean = 0;
double wsum = DBL_EPSILON;
for (size_t ix = Xcsc_p[col]; ix < Xcsc_p[col+1]; ix++)
bmean += (weightC[ix] * (Xcsc[ix]-biasA[Xcsc_i[ix]]-bmean))
/ (wsum += weightC[ix]);
if (Xcsc_p[col+1] > Xcsc_p[col])
bmean *= wsum / (wsum + lam_item * ((wsumB != NULL)?
wsumB[col]
:
(scale_lam?
wsum : 1.)));
biasB[col] = bmean;
}
}
else if (weight == NULL) /* <- has 'NA_as_zero' */
{
double bmean = 0;
if (iter > 0) {
for (int_t row = 0; row < n; row++)
bmean += (biasA[row] - bmean) / (double)(row+1);
}
for (int_t col = 0; col < n; col++)
biasB[col] = (meanB[col] - bmean - glob_mean)
* ( (double)m
/
((double)m + lam_item * ((wsumB != NULL)?
wsumB[col]
:
(scale_lam?
(double)m
:
1.))) );
}
else /* <- has 'NA_as_zero' and weights */
{
double bmean = 0;
if (iter > 0) {
for (int_t row = 0; row < m; row++)
bmean += (biasA[row] - bmean) / (double)(row+1);
}
#pragma omp parallel for schedule(dynamic) \
num_threads(cap_to_4(nthreads)) \
shared(Xcsc_p, Xcsc_i, m, n, weightC, bmean, \
biasB, meanB, adjB, glob_mean)
for (size_t_for col = 0; col < (size_t)n; col++)
{
double wsum = m;
double bmean_this = bmean;
for (size_t ix = Xcsc_p[col]; ix < Xcsc_p[col+1]; ix++)
bmean_this += ( (weightC[ix] - 1)
* (biasB[Xcsc_i[ix]] - bmean_this) )
/ (wsum += (weightC[ix] - 1));
biasB[col] = (meanB[col] - bmean_this - glob_mean) * adjB[col];
}
}
if (nonneg)
for (int_t col = 0; col < n; col++)
biasB[col] = (biasB[col] >= (real_t)0)? biasB[col] : (real_t)0;
/* Then by users */
if (Xfull != NULL && weight == NULL)
{
#pragma omp parallel for schedule(dynamic) \
num_threads(cap_to_4(nthreads)) \
shared(Xfull, m, n, biasA, biasB, lam_user)
for (size_t_for row = 0; row < (size_t)m; row++)
{
double xmean = 0;
int_t cnt = 0;
for (size_t col = 0; col < (size_t)n; col++)
xmean += (isnan(Xfull[col + row*(size_t)n]))?
0 : ((Xfull[col + row*(size_t)n]
- biasB[col]
- xmean)
/ (double)(++cnt));
xmean *= (double)cnt
/ ((double)cnt + lam_user * ((wsumA != NULL)?
wsumA[row]
:
(scale_lam?
(double)max2(cnt,1)
:
1.)));
biasA[row] = xmean;
}
}
else if (Xfull != NULL) /* <- has weights */
{
#pragma omp parallel for schedule(dynamic) \
num_threads(cap_to_4(nthreads)) \
shared(Xfull, m, n, biasA, biasB, weight, lam_user)
for (size_t_for row = 0; row < (size_t)m; row++)
{
double xmean = 0;
double wsum = DBL_EPSILON;
for (size_t col = 0; col < (size_t)n; col++)
xmean += (isnan(Xfull[col + row*(size_t)n]))?
0 : ((weight[col + row*(size_t)n]
* (Xfull[col + row*(size_t)n]
- biasB[col]
- xmean))
/ (wsum += weight[col + row*(size_t)n]));
if (cnt_NA_byrow[row] < n)
xmean *= wsum
/ (wsum + lam_user * ((wsumA != NULL)?
wsumA[row]
:
(scale_lam? wsum :1.)));
biasA[row] = xmean;
}
}
else if (!NA_as_zero && weight == NULL)
{
#pragma omp parallel for schedule(dynamic) \
num_threads(cap_to_4(nthreads)) \
shared(Xcsr_p, Xcsr_i, Xcsr, biasA, biasB, \
m, wsumA, scale_lam, lam_user)
for (size_t_for row = 0; row < (size_t)m; row++)
{
double bmean = 0;
for (size_t ix = Xcsr_p[row]; ix < Xcsr_p[row+1]; ix++)
bmean += (Xcsr[ix] - biasB[Xcsr_i[ix]] - bmean)
/ (double)(ix - Xcsr_p[row] + 1);
if (Xcsr_p[row+1] > Xcsr_p[row])
bmean *= (double)(Xcsr_p[row+1] - Xcsr_p[row])
/
((double)(Xcsr_p[row+1] - Xcsr_p[row])
+ lam_user * ((wsumA != NULL)?
((double)wsumA[row])
:
(scale_lam?
(double)(Xcsr_p[row+1]
-Xcsr_p[row])
:
1.)));
biasA[row] = bmean;
}
}
else if (!NA_as_zero) /* <- has weights */
{
#pragma omp parallel for schedule(dynamic) \
num_threads(cap_to_4(nthreads)) \
shared(Xcsr_p, Xcsr_i, Xcsr, weightR, biasA, biasB, \
m, n, wsumA, scale_lam, lam_user)
for (size_t_for row = 0; row < (size_t)m; row++)
{
double bmean = 0;
double wsum = DBL_EPSILON;
for (size_t ix = Xcsr_p[row]; ix < Xcsr_p[row+1]; ix++)
bmean += (weightR[ix] * (Xcsr[ix]-biasB[Xcsr_i[ix]]-bmean))
/ (wsum += weightR[ix]);
if (Xcsr_p[row+1] > Xcsr_p[row])
bmean *= wsum
/ (wsum + lam_user * ((wsumA != NULL)?
wsumA[row]
:
(scale_lam? wsum :1.)));
biasA[row] = bmean;
}
}
else if (weight == NULL) /* <- has 'NA_as_zero' */
{
double bmean = 0;
if (iter > 0) {
for (int_t col = 0; col < n; col++)
bmean += (biasB[col] - bmean) / (double)(col+1);
}
for (int_t row = 0; row < m; row++)
biasA[row] = (meanA[row] - bmean - glob_mean)
* ( (double)n
/
((double)n + lam_user * ((wsumA != NULL)?
wsumA[row]
:
(scale_lam?
(double)n
:
1.))) );
}
else /* <- has weights and 'NA_as_zero' */
{
double bmean = 0;
for (int_t col = 0; col < n; col++)
bmean += (biasB[col] - bmean) / (double)(col+1);
#pragma omp parallel for schedule(dynamic) \
num_threads(cap_to_4(nthreads)) \
shared(Xcsr_p, Xcsr_i, m, n, weightR, bmean, \
biasA, meanA, adjA, glob_mean)
for (size_t_for row = 0; row < (size_t)m; row++)
{
double wsum = n;
double bmean_this = bmean;
for (size_t ix = Xcsr_p[row]; ix < Xcsr_p[row+1]; ix++)
bmean_this += ( (weightR[ix] - 1)
* (biasB[Xcsr_i[ix]] - bmean_this) )
/ (wsum += (weightR[ix] - 1));
biasA[row] = (meanA[row] - bmean_this - glob_mean) * adjA[row];
}
}
if (nonneg)
for (int_t row = 0; row < m; row++)
biasA[row] = (biasA[row] >= (real_t)0)? biasA[row] : (real_t)0;
}
cleanup:
free(meanA);
free(meanB);
free(adjA);
free(adjB);
free(wsum_B);
return retval;
throw_oom:
retval = 1;
goto cleanup;
}
int_t center_by_cols
(
real_t *restrict col_means,
real_t *restrict *Xfull_, int_t m, int_t n,
int_t ixA[], int_t ixB[], real_t *restrict *X_, size_t nnz,
size_t Xcsr_p[], int_t Xcsr_i[], real_t *restrict Xcsr,
size_t Xcsc_p[], int_t Xcsc_i[], real_t *restrict Xcsc,
int nthreads, bool *modified_X, bool *modified_Xfull
)
{
#if defined(_OPENMP) && \
( (_OPENMP < 200801) /* OpenMP < 3.0 */ \
|| defined(_WIN32) || defined(_WIN64) \
)
long long ix, ib;
#endif
int_t retval = 0;
real_t *restrict X = (X_ == NULL)? NULL : (*X_);
real_t *restrict Xfull = (Xfull_ == NULL)? NULL : (*Xfull_);
int_t *restrict cnt_by_col = NULL;
if (Xfull != NULL || Xcsc == NULL) {
cnt_by_col = (int_t*)calloc(n, sizeof(int_t));
if (cnt_by_col == NULL) goto throw_oom;
}
set_to_zero(col_means, n);
if (Xfull != NULL)
{
for (size_t row = 0; row < (size_t)m; row++)
for (size_t col = 0; col < (size_t)n; col++) {
col_means[col] += (!isnan(Xfull[col + row*(size_t)n]))?
(Xfull[col + row*(size_t)n]) : (0.);
cnt_by_col[col] += !isnan(Xfull[col + row*(size_t)n]);
}
}
else if (Xcsc != NULL)
{
#pragma omp parallel for schedule(dynamic) num_threads(nthreads) \
shared(n, Xcsc, Xcsc_p, col_means)
for (size_t_for ib = 0; ib < (size_t)n; ib++)
{
double csum = 0;
for (size_t ix = Xcsc_p[ib]; ix < Xcsc_p[ib+(size_t)1]; ix++)
csum += Xcsc[ix];
col_means[ib] = csum;
}
}
else if (Xcsr != NULL && X == NULL)
{
for (size_t ia = 0; ia < (size_t)m; ia++)
for (size_t ix = Xcsr_p[ia]; ix < Xcsr_p[ia+(size_t)1]; ix++)
col_means[Xcsr_i[ix]] += Xcsr[ix];
}
else
{
for (size_t ix = 0; ix < nnz; ix++) {
col_means[ixB[ix]] += X[ix];
cnt_by_col[ixB[ix]]++;
}
}
/* -------- */
if (Xfull != NULL || Xcsc == NULL)
for (size_t ix = 0; ix < (size_t)n; ix++)
col_means[ix] /= (double)cnt_by_col[ix];
else
for (size_t ix = 0; ix < (size_t)n; ix++)
col_means[ix] /= (double)(Xcsc_p[ix+(size_t)1] - Xcsc_p[ix]);
/* -------- */
if (Xfull != NULL)
{
Xfull = (real_t*)malloc((size_t)m*(size_t)n*sizeof(real_t));
if (Xfull == NULL) goto throw_oom;
copy_arr_(*Xfull_, Xfull, ((size_t)m*(size_t)n), nthreads);
*Xfull_ = Xfull;
*modified_Xfull = true;
for (size_t row = 0; row < (size_t)m; row++)
for (size_t col = 0; col < (size_t)n; col++)
Xfull[col + row*(size_t)n] -= col_means[col];
}
else if (Xcsc != NULL || Xcsr != NULL)
{
if (Xcsc != NULL)
{
#pragma omp parallel for schedule(dynamic) num_threads(nthreads) \
shared(Xcsc, Xcsc_p, n, col_means)
for (size_t_for ib = 0; ib < (size_t)n; ib++)
for (size_t ix = Xcsc_p[ib]; ix < Xcsc_p[ib+(size_t)1]; ix++)
Xcsc[ix] -= col_means[ib];
}
if (Xcsr != NULL)
{
for (size_t ix = 0; ix < Xcsr_p[m-1]; ix++)
Xcsr[ix] -= col_means[Xcsr_i[ix]];
}
}
else
{
X = (real_t*)malloc(nnz*sizeof(real_t));
if (X == NULL) goto throw_oom;
copy_arr_(*X_, X, nnz, nthreads);
*X_ = X;
*modified_X = true;
nthreads = cap_to_4(nthreads);
#pragma omp parallel for schedule(static) num_threads(nthreads) \
shared(nnz, X, col_means, ixB)
for (size_t_for ix = 0; ix < nnz; ix++)
X[ix] -= col_means[ixB[ix]];
}
cleanup:
free(cnt_by_col);
return retval;
throw_oom:
retval = 1;
goto cleanup;
}
bool check_sparse_indices
(
int_t n, int_t p,
real_t *restrict u_vec_sp, int_t u_vec_ixB[], size_t nnz_u_vec,
real_t *restrict Xa, int_t ixB[], size_t nnz
)
{
if (nnz) {
for (size_t ix = 0; ix < nnz; ix++) {
if (ixB[ix] < 0 || ixB[ix] >= ((n > 0)? n : INT_MAX)) {
return true;
}
}
}
if (nnz_u_vec) {
for (size_t ix = 0; ix < nnz_u_vec; ix++) {
if (u_vec_ixB[ix] < 0 || u_vec_ixB[ix] >= ((p > 0)? p : INT_MAX)) {
return true;
}
}
}
return false;
}
void predict_multiple
(
real_t *restrict A, int_t k_user,
real_t *restrict B, int_t k_item,
real_t *restrict biasA, real_t *restrict biasB,
real_t glob_mean,
int_t k, int_t k_main,
int_t m, int_t n,
int_t predA[], int_t predB[], size_t nnz,
real_t *restrict outp,
int nthreads
)
{
int nthreads_restore = 1;
if (nnz > 1 && nthreads > 1)
set_blas_threads(1, &nthreads_restore);
size_t lda = (size_t)k_user + (size_t)k + (size_t)k_main;
size_t ldb = (size_t)k_item + (size_t)k + (size_t)k_main;
A += k_user;
B += k_item;
if (m == 0) m = INT_MAX;
if (n == 0) n = INT_MAX;
#if defined(_OPENMP) && \
( (_OPENMP < 200801) /* OpenMP < 3.0 */ \
|| defined(_WIN32) || defined(_WIN64) \
)
long long ix;
#endif
#pragma omp parallel for schedule(static) num_threads(nthreads) \
shared(A, B, outp, nnz, predA, predB, lda, ldb, k)
for (size_t_for ix = 0; ix < nnz; ix++)
outp[ix] = (predA[ix] >= m || predA[ix] < 0 ||
predB[ix] >= n || predB[ix] < 0)?
(NAN_)
:
(cblas_tdot(k, A + (size_t)predA[ix]*lda, 1,
B + (size_t)predB[ix]*ldb, 1)
+ ((biasA != NULL)? biasA[predA[ix]] : 0.)
+ ((biasB != NULL)? biasB[predB[ix]] : 0.)
+ glob_mean);
if (nnz > 1 && nthreads > 1)
set_blas_threads(nthreads_restore, (int*)NULL);
}
int_t cmp_int(const void *a, const void *b)
{
return *((int_t*)a) - *((int_t*)b);
}
real_t *ptr_real_t_glob = NULL;
// #pragma omp threadprivate(ptr_real_t_glob)
// ptr_real_t_glob = NULL;
int_t cmp_argsort(const void *a, const void *b)
{
real_t v1 = ptr_real_t_glob[*((int_t*)a)];
real_t v2 = ptr_real_t_glob[*((int_t*)b)];
return (v1 == v2)? 0 : ((v1 < v2)? 1 : -1);
}
int_t topN
(
real_t *restrict a_vec, int_t k_user,
real_t *restrict B, int_t k_item,
real_t *restrict biasB,
real_t glob_mean, real_t biasA,
int_t k, int_t k_main,
int_t *restrict include_ix, int_t n_include,
int_t *restrict exclude_ix, int_t n_exclude,
int_t *restrict outp_ix, real_t *restrict outp_score,
int_t n_top, int_t n, int nthreads
)
{
int_t retval = 0;
if (include_ix != NULL && exclude_ix != NULL) {
fprintf(stderr, "Cannot pass both 'include_ix' and 'exclude_ix'.\n");
retval = 2;
}
if (n_top == 0) {
fprintf(stderr, "'n_top' must be greater than zero.\n");
retval = 2;
}
if (n_exclude > n-n_top) {
fprintf(stderr, "Number of rankeable entities is less than 'n_top'\n");
retval = 2;
}
if (n_include > n) {
fprintf(stderr, "Number of entities to include is larger than 'n'.\n");
retval = 2;
}
if (include_ix != NULL)
{
for (int_t ix = 0; ix < n_include; ix++)
if (include_ix[ix] < 0 || include_ix[ix] >= n)
{
fprintf(stderr, "'include_ix' contains invalid entries\n");
retval = 2;
break;
}
}
if (exclude_ix != NULL)
{
for (int_t ix = 0; ix < n_exclude; ix++)
if (exclude_ix[ix] < 0 || exclude_ix[ix] >= n)
{
fprintf(stderr, "'exclude_ix' contains invalid entries\n");
retval = 2;
break;
}
}
for (int_t ix = 0; ix < k_user+k+k_main; ix++)
{
if (isnan(a_vec[ix])) {
fprintf(stderr, "The latent factors contain NAN values\n");
retval = 2;
break;
}
}
if (isnan(biasA)) {
fprintf(stderr, "The bias is a NAN value\n");
retval = 2;
}
if (retval == 2)
{
fflush(stderr);
return retval;
}
int_t ix = 0;
int nthreads_restore = 1;
int_t k_pred = k + k_main;
int_t k_totB = k_item + k + k_main;
size_t n_take = (include_ix != NULL)?
(size_t)n_include :
((exclude_ix == NULL)? (size_t)n : (size_t)(n-n_exclude) );
real_t *restrict buffer_scores = NULL;
int_t *restrict buffer_ix = NULL;
int_t *restrict buffer_mask = NULL;
a_vec += k_user;
if (include_ix != NULL) {
buffer_ix = include_ix;
}
else {
buffer_ix = (int_t*)malloc((size_t)n*sizeof(int_t));
if (buffer_ix == NULL) goto throw_oom;
for (int_t ix = 0; ix < n; ix++) buffer_ix[ix] = ix;
}
if (exclude_ix != NULL)
{
int_t move_to = n-1;
int_t temp;
if (!check_is_sorted(exclude_ix, n_exclude))
qsort(exclude_ix, n_exclude, sizeof(int_t), cmp_int);
for (int_t ix = n_exclude-1; ix >= 0; ix--) {
temp = buffer_ix[move_to];
buffer_ix[move_to] = exclude_ix[ix];
buffer_ix[exclude_ix[ix]] = temp;
move_to--;
}
}
/* Case 1: there is a potentially small number of items to include.
Here can produce predictons only for those, then make
an argsort with doubly-masked indices. */
if (include_ix != NULL)
{
if (nthreads > 1)
set_blas_threads(1, &nthreads_restore);
buffer_scores = (real_t*)malloc((size_t)n_include*sizeof(real_t));
buffer_mask = (int_t*)malloc((size_t)n_include*sizeof(int_t));
if (buffer_scores == NULL || buffer_mask == NULL) goto throw_oom;
#pragma omp parallel for schedule(static) num_threads(nthreads) \
shared(a_vec, B, k_pred, k_item, n_include, k_totB, \
include_ix, biasB, buffer_scores)
for (ix = 0; ix < n_include; ix++) {
buffer_scores[ix] = cblas_tdot(k_pred, a_vec, 1,
B + k_item + (size_t)include_ix[ix]
* (size_t)k_totB, 1)
+ ((biasB != NULL)? biasB[include_ix[ix]] : 0.);
}
for (int_t ix = 0; ix < n_include; ix++)
buffer_mask[ix] = ix;
if (nthreads > 1)
set_blas_threads(nthreads_restore, (int*)NULL);
}
/* Case 2: there is a large number of items to exclude.
Here can also produce predictions only for the included ones
and then make a full or partial argsort. */
else if (exclude_ix != NULL && (double)n_exclude > (double)n/20)
{
if (nthreads > 1)
set_blas_threads(1, &nthreads_restore);
buffer_scores = (real_t*)malloc(n_take*sizeof(real_t));
buffer_mask = (int_t*)malloc(n_take*sizeof(int_t));
if (buffer_scores == NULL || buffer_mask == NULL) goto throw_oom;
#pragma omp parallel for schedule(static) num_threads(nthreads) \
shared(a_vec, B, k_pred, k_item, n_take, k_totB, \
buffer_ix, biasB, buffer_scores)
for (ix = 0; ix < (int_t)n_take; ix++)
buffer_scores[ix] = cblas_tdot(k_pred, a_vec, 1,
B + k_item + (size_t)buffer_ix[ix]
* (size_t)k_totB, 1)
+ ((biasB != NULL)? biasB[buffer_ix[ix]] : 0.);
for (int_t ix = 0; ix < (int_t)n_take; ix++)
buffer_mask[ix] = ix;
if (nthreads > 1)
set_blas_threads(nthreads_restore, (int*)NULL);
}
/* General case: make predictions for all the entries, then
a partial argsort (this is faster since it makes use of
optimized BLAS gemv, but it's not memory-efficient) */
else
{
buffer_scores = (real_t*)malloc((size_t)n*sizeof(real_t));
if (buffer_scores == NULL) goto throw_oom;
cblas_tgemv(CblasRowMajor, CblasNoTrans,
n, k_pred,
1., B + k_item, k_totB,
a_vec, 1,
0., buffer_scores, 1);
if (biasB != NULL)
cblas_taxpy(n, 1., biasB, 1, buffer_scores, 1);
}
/* If there is no double-mask for indices, do a partial argsort */
ptr_real_t_glob = buffer_scores;
if (buffer_mask == NULL)
{
/* If the number of elements is very small, it's faster to
make a full argsort, taking advantage of qsort's optimizations */
if (n_take <= 50 || n_take >= (double)n*0.75)
{
qsort(buffer_ix, n_take, sizeof(int_t), cmp_argsort);
}
/* Otherwise, do a proper partial sort */
else
{
qs_argpartition(buffer_ix, buffer_scores, n_take, n_top);
qsort(buffer_ix, n_top, sizeof(int_t), cmp_argsort);
}
memcpy(outp_ix, buffer_ix, (size_t)n_top*sizeof(int_t));
}
/* Otherwise, do a partial argsort with doubly-indexed arrays */
else
{
if (n_take <= 50 || n_take >= (double)n*0.75)
{
qsort(buffer_mask, n_take, sizeof(int_t), cmp_argsort);
}
else
{
qs_argpartition(buffer_mask, buffer_scores, n_take, n_top);
qsort(buffer_mask, n_top, sizeof(int_t), cmp_argsort);
}
for (int_t ix = 0; ix < n_top; ix++)
outp_ix[ix] = buffer_ix[buffer_mask[ix]];
}
ptr_real_t_glob = NULL;
/* If scores were requested, need to also output those */
if (outp_score != NULL)
{
glob_mean += biasA;
if (buffer_mask == NULL)
for (int_t ix = 0; ix < n_top; ix++)
outp_score[ix] = buffer_scores[outp_ix[ix]] + glob_mean;
else
for (int_t ix = 0; ix < n_top; ix++)
outp_score[ix] = buffer_scores[buffer_mask[ix]] + glob_mean;
}
cleanup:
free(buffer_scores);
if (include_ix == NULL)
free(buffer_ix);
free(buffer_mask);
return retval;
throw_oom:
{
retval = 1;
goto cleanup;
}
}
int_t fit_most_popular
(
real_t *restrict biasA, real_t *restrict biasB,
real_t *restrict glob_mean,
real_t lam_user, real_t lam_item,
bool scale_lam, bool scale_bias_const,
real_t alpha,
int_t m, int_t n,
int_t ixA[], int_t ixB[], real_t *restrict X, size_t nnz,
real_t *restrict Xfull,
real_t *restrict weight,
bool implicit, bool adjust_weight, bool apply_log_transf,
bool nonneg, bool NA_as_zero,
real_t *restrict w_main_multiplier,
int nthreads
)
{
if (implicit)
{
if (NA_as_zero) {
fprintf(stderr,
"Warning: 'NA_as_zero' ignored with 'implicit=true'.\n");
NA_as_zero = false;
}
if (scale_lam) {
fprintf(stderr,
"Warning: 'scale_lam' ignored with 'implicit=true'.\n");
scale_lam = false;
}
if (weight != NULL) {
fprintf(stderr,
"Warning: 'weight' ignored with 'implicit=true'.\n");
weight = NULL;
}
if (Xfull != NULL) {
fprintf(stderr,
"Error: cannot pass dense 'X' with 'implicit=true'.\n");
return 2;
}
}
else
{
if (adjust_weight) {
fprintf(stderr,
"Warning: 'adjust_weight' ignored with 'implicit=false'.\n");
adjust_weight = false;
}
if (apply_log_transf) {
fprintf(stderr,
"Warning: 'apply_log_transf' ignored with 'implicit=false'.\n");
apply_log_transf = false;
}
}
if (biasB == NULL)
{
fprintf(stderr, "Error: must pass 'biasB'.\n");
return 2;
}
if (!scale_lam) scale_bias_const = false;
int_t retval = 0;
if (glob_mean != NULL)
*glob_mean = 0;
size_t *restrict Xcsr_p = NULL;
int_t *restrict Xcsr_i = NULL;
real_t *restrict Xcsr = NULL;
size_t *restrict Xcsc_p = NULL;
int_t *restrict Xcsc_i = NULL;
real_t *restrict Xcsc = NULL;
real_t *restrict weightR = NULL;
real_t *restrict weightC = NULL;
real_t *restrict ones = NULL;
real_t *restrict wsumA = NULL;
real_t *restrict wsumB = NULL;
bool free_X = false;
bool free_Xfull = false;
if (NA_as_zero && Xfull == NULL)
{
if (glob_mean != NULL)
calc_mean_and_center(
ixA, ixB, &X, nnz,
(real_t**)NULL, (real_t*)NULL,
m, n,
(size_t*)NULL, (int_t*)NULL, (real_t*)NULL,
(size_t*)NULL, (int_t*)NULL, (real_t*)NULL,
weight,
NA_as_zero, nonneg, true, nthreads,
glob_mean, &free_X, &free_Xfull,
false
);
if (biasA != NULL)
{
retval = coo_to_csr_plus_alloc(
ixA, ixB, X,
weight,
m, n, nnz,
&Xcsr_p, &Xcsr_i, &Xcsr,
&weightR
);
if (retval == 1) goto throw_oom;
if (retval != 0) goto cleanup;
}
if (biasB != NULL)
{
retval = coo_to_csr_plus_alloc(
ixB, ixA, X,
weight,
n, m, nnz,
&Xcsc_p, &Xcsc_i, &Xcsc,
&weightC
);
if (retval == 1) goto throw_oom;
if (retval != 0) goto cleanup;
}
if (scale_lam && weight != NULL)
{
if (biasA != NULL)
{
wsumA = (real_t*)calloc(m, sizeof(real_t));
if (wsumA == NULL) goto throw_oom;
for (int_t row = 0; row < m; row++) {
for (size_t ix = Xcsr_p[row]; ix < Xcsr_p[row+1]; ix++)
wsumA[row] += weightR[ix];
wsumA[row] /= wsumA[row]
+ (real_t)(n - (Xcsr_p[row+1] - Xcsr_p[row]));
}
}
if (biasB != NULL)
{
wsumB = (real_t*)calloc(n, sizeof(real_t));
if (wsumB == NULL) goto throw_oom;
for (int_t col = 0; col < n; col++) {
for (size_t ix = Xcsc_p[col]; ix < Xcsc_p[col+1]; ix++)
wsumB[col] += weightC[ix];
wsumB[col] /= wsumB[col]
+ (real_t)(m - (Xcsc_p[col+1] - Xcsc_p[col]));
}
}
}
if (scale_bias_const)
{
scale_bias_const = false;
scale_lam = false;
if (weight == NULL)
{
lam_user *= n;
lam_item *= m;
}
else
{
double wmean = 0;
if (biasA != NULL)
{
for (int_t row = 0; row < m; row++)
wmean += (wsumA[row] - wmean) / (double)(row+1);
lam_user *= wmean;
}
wmean = 0;
if (biasB != NULL)
{
for (int_t col = 0; col < n; col++)
wmean += (wsumB[col] - wmean) / (double)(col+1);
lam_item *= wmean;
}
free(wsumA); wsumA = NULL;
free(wsumB); wsumB = NULL;
}
}
if (biasA != NULL && biasB != NULL)
retval = initialize_biases_twosided(
(real_t*)NULL, (real_t*)NULL,
(int_t*)NULL, (int_t*)NULL,
m, n,
NA_as_zero, nonneg, (glob_mean == NULL)? 0. : (*glob_mean),
Xcsr_p, Xcsr_i, Xcsr,
Xcsc_p, Xcsc_i, Xcsc,
weight, (real_t*)NULL,
weightR, weightC,
lam_user, lam_item, scale_lam,
wsumA, wsumB,
biasA, biasB,
nthreads
);
else
initialize_biases_onesided(
(real_t*)NULL, n, m, false, (int_t*)NULL,
Xcsc_p, Xcsc_i, Xcsc,
weight, weightC,
(glob_mean == NULL)? 0. : (*glob_mean), NA_as_zero, nonneg,
lam_item, scale_lam,
wsumB,
biasB,
nthreads
);
if (retval == 1) goto throw_oom;
goto cleanup;
}
if (implicit && biasA != NULL)
{
if (!free_X)
{
real_t *restrict temp = (real_t*)malloc(nnz*sizeof(real_t));
if (temp == NULL) goto throw_oom;
copy_arr_(X, temp, nnz, nthreads);
X = temp;
free_X = true;
}
for (size_t ix = 0; ix < nnz; ix++)
X[ix] += 1;
if (apply_log_transf)
for (size_t ix = 0; ix < nnz; ix++)
X[ix] = log_t(X[ix]);
retval = coo_to_csr_plus_alloc(
ixA, ixB, X,
(real_t*)NULL,
m, n, nnz,
&Xcsr_p, &Xcsr_i, &Xcsr,
(real_t**)NULL
);
if (retval == 1) goto throw_oom;
if (retval != 0) goto cleanup;
retval = coo_to_csr_plus_alloc(
ixB, ixA, X,
(real_t*)NULL,
n, m, nnz,
&Xcsc_p, &Xcsc_i, &Xcsc,
(real_t**)NULL
);
if (retval == 1) goto throw_oom;
if (retval != 0) goto cleanup;
ones = (real_t*)malloc(nnz*sizeof(real_t));
if (ones == NULL) goto throw_oom;
for (size_t ix = 0; ix < nnz; ix++) ones[ix] = 1;
if (adjust_weight)
{
*w_main_multiplier
=
(long double)nnz / ((long double)m * (long double)n);
lam_item /= *w_main_multiplier;
lam_user /= *w_main_multiplier;
}
retval = initialize_biases_twosided(
(real_t*)NULL, (real_t*)NULL,
(int_t*)NULL, (int_t*)NULL,
m, n,
true, nonneg, 0.,
Xcsr_p, Xcsr_i, ones,
Xcsc_p, Xcsc_i, ones,
X, (real_t*)NULL,
Xcsr, Xcsc,
lam_user, lam_item, false,
(real_t*)NULL, (real_t*)NULL,
biasA, biasB,
nthreads
);
if (retval == 1) goto throw_oom;
goto cleanup;
}
retval = fit_most_popular_internal(
biasA, biasB,
glob_mean, glob_mean != NULL,
lam_user, lam_item,
scale_lam, scale_bias_const,
alpha,
m, n,
ixA, ixB, &X, nnz,
&Xfull,
weight,
implicit, adjust_weight, apply_log_transf,
nonneg,
w_main_multiplier,
nthreads,
&free_X, &free_Xfull,
free_X || free_Xfull
);
if (retval == 1) goto throw_oom;
cleanup:
free(Xcsr_p);
free(Xcsr_i);
free(Xcsr);
free(Xcsc_p);
free(Xcsc_i);
free(Xcsc);
free(weightR);
free(weightC);
free(ones);
free(wsumA);
free(wsumB);
if (free_X)
free(X);
if (free_Xfull)
free(Xfull);
return retval;
throw_oom:
retval = 1;
print_oom_message();
goto cleanup;
}
/* TODO: factor out this function */
int_t fit_most_popular_internal
(
real_t *restrict biasA, real_t *restrict biasB,
real_t *restrict glob_mean, bool center,
real_t lam_user, real_t lam_item,
bool scale_lam, bool scale_bias_const,
real_t alpha,
int_t m, int_t n,
int_t ixA[], int_t ixB[], real_t *restrict *X_, size_t nnz,
real_t *restrict *Xfull_,
real_t *restrict weight,
bool implicit, bool adjust_weight, bool apply_log_transf,
bool nonneg,
real_t *restrict w_main_multiplier,
int nthreads,
bool *free_X, bool *free_Xfull,
bool allow_overwrite_X
)
{
int_t retval = 0;
int_t *restrict cnt_by_col = NULL;
int_t *restrict cnt_by_row = NULL;
real_t *restrict sum_by_col = NULL;
real_t *restrict sum_by_row = NULL;
int_t maxiter = 5;
real_t *restrict X = (X_ == NULL)? NULL : (*X_);
real_t *restrict Xfull = (Xfull_ == NULL)? NULL : (*Xfull_);
if (implicit)
{
cnt_by_col = (int_t*)calloc((size_t)n, sizeof(int_t));
sum_by_col = (real_t*)calloc((size_t)n, sizeof(real_t));
if (cnt_by_col == NULL || sum_by_col == NULL) goto throw_oom;
if (apply_log_transf)
{
if (Xfull != NULL)
{
size_t m_by_n = (size_t)m * (size_t)n;
if (!allow_overwrite_X)
{
Xfull = (real_t*)malloc(m_by_n*sizeof(real_t));
if (Xfull == NULL) goto throw_oom;
copy_arr_(*Xfull_, Xfull, m_by_n, nthreads);
*Xfull_ = Xfull;
*free_Xfull = true;
allow_overwrite_X = true;
}
for (size_t ix = 0; ix < m_by_n; ix++)
Xfull[ix] = log_t(Xfull[ix]);
}
else
{
if (!allow_overwrite_X)
{
X = (real_t*)malloc(nnz*sizeof(real_t));
if (X == NULL) goto throw_oom;
copy_arr_(*X_, X, nnz, nthreads);
*X_ = X;
*free_X = true;
allow_overwrite_X = true;
}
for (size_t ix = 0; ix < nnz; ix++)
X[ix] = log_t(X[ix]);
}
}
if (Xfull != NULL)
{
for (size_t row = 0; row < (size_t)m; row++) {
for (size_t col = 0; col < (size_t)n; col++) {
cnt_by_col[col] += !isnan(Xfull[col + row*(size_t)n]);
sum_by_col[col] += (!isnan(Xfull[col + row*(size_t)n]))?
(Xfull[col + row*(size_t)n] +1.) : (0.);
}
}
}
else
{
for (size_t ix = 0; ix < nnz; ix++) {
cnt_by_col[ixB[ix]]++;
sum_by_col[ixB[ix]] += X[ix] + 1.;
}
}
if (adjust_weight) {
nnz = 0;
for (int_t ix = 0; ix < n; ix++)
nnz += (size_t)cnt_by_col[ix];
*w_main_multiplier
=
(long double)nnz / ((long double)m * (long double)n);
lam_item /= *w_main_multiplier;
}
for (int_t ix = 0; ix < n; ix++)
biasB[ix] = alpha * sum_by_col[ix]
/ (alpha * sum_by_col[ix]
+ (double)(m - cnt_by_col[ix])
+ lam_item);
goto cleanup;
}
if (biasA != NULL) {
if (weight == NULL || scale_lam) {
cnt_by_col = (int_t*)calloc((size_t)n, sizeof(int_t));
cnt_by_row = (int_t*)calloc((size_t)m, sizeof(int_t));
if (cnt_by_col == NULL || cnt_by_row == NULL)
goto throw_oom;
}
if (weight != NULL) {
sum_by_col = (real_t*)calloc((size_t)n, sizeof(real_t));
sum_by_row = (real_t*)calloc((size_t)m, sizeof(real_t));
if (sum_by_col == NULL || sum_by_row == NULL)
goto throw_oom;
}
}
retval = initialize_biases(
glob_mean, biasA, biasB,
false, biasA == NULL, center,
lam_user, lam_item,
scale_lam, scale_bias_const,
biasA != NULL, biasB != NULL,
(real_t*)NULL, (real_t*)NULL,
m, n,
m, n,
ixA, ixB, &X, nnz,
&Xfull, (real_t*)NULL,
(size_t*)NULL, (int_t*)NULL, (real_t*)NULL,
(size_t*)NULL, (int_t*)NULL, (real_t*)NULL,
weight, (real_t*)NULL,
(real_t*)NULL, (real_t*)NULL,
nonneg,
nthreads,
free_X, free_Xfull,
allow_overwrite_X
);
if (!allow_overwrite_X) {
if (*free_X && X != NULL) *X_ = X;
if (*free_Xfull && Xfull != NULL) *Xfull_ = Xfull;
}
if (retval == 1) goto throw_oom;
if (biasA == NULL && !implicit)
{
goto cleanup;
}
if (Xfull != NULL)
{
if (weight == NULL || scale_lam)
for (size_t row = 0; row < (size_t)m; row++) {
for (size_t col = 0; col < (size_t)n; col++) {
cnt_by_row[row] += !isnan(Xfull[col + row*(size_t)n]);
cnt_by_col[col] += !isnan(Xfull[col + row*(size_t)n]);
}
}
if (weight != NULL)
for (size_t row = 0; row < (size_t)m; row++) {
for (size_t col = 0; col < (size_t)n; col++) {
sum_by_row[row] += (!isnan(Xfull[col + row*(size_t)n]))?
(weight[col + row*(size_t)n]) : (0.);
sum_by_col[col] += (!isnan(Xfull[col + row*(size_t)n]))?
(weight[col + row*(size_t)n]) : (0.);
}
}
}
else
{
if (weight == NULL || scale_lam)
for (size_t ix = 0; ix < nnz; ix++) {
cnt_by_row[ixA[ix]]++;
cnt_by_col[ixB[ix]]++;
}
if (weight != NULL)
for (size_t ix = 0; ix < nnz; ix++) {
sum_by_row[ixA[ix]] += weight[ix];
sum_by_col[ixB[ix]] += weight[ix];
}
}
if (scale_lam && scale_bias_const)
{
if (weight != NULL)
{
double wmean = 0;
for (int_t row = 0; row < m; row++)
wmean += (sum_by_row[row] - wmean) / (double)(row+1);
lam_user *= wmean;
wmean = 0;
for (int_t col = 0; col < n; col++)
wmean += (sum_by_col[col] - wmean) / (double)(col+1);
lam_item *= wmean;
}
else
{
double cmean = 0;
for (int_t row = 0; row < m; row++)
cmean += ((double)cnt_by_row[row] - cmean) / (double)(row+1);
lam_user *= cmean;
cmean = 0;
for (int_t col = 0; col < n; col++)
cmean += ((double)cnt_by_col[col] - cmean) / (double)(col+1);
lam_item *= cmean;
}
scale_bias_const = false;
scale_lam = false;
}
set_to_zero(biasA, m);
set_to_zero(biasB, n);
maxiter = nonneg? 15 : 5;
for (int_t iter = 0; iter <= maxiter; iter++)
{
if (Xfull != NULL)
{
if (iter > 0)
set_to_zero(biasB, n);
if (weight == NULL)
{
for (size_t row = 0; row < (size_t)m; row++)
for (size_t col = 0; col < (size_t)n; col++)
biasB[col] += (!isnan(Xfull[col + row*(size_t)n]))?
(Xfull[col + row*(size_t)n] - biasA[row])
: (0.);
for (int_t ix = 0; ix < n; ix++)
biasB[ix] /= ((double)cnt_by_col[ix]
+ (lam_item
*
(scale_lam?
(double)cnt_by_col[ix] : 1.)));
}
else
{
for (size_t row = 0; row < (size_t)m; row++)
for (size_t col = 0; col < (size_t)n; col++)
biasB[col] += (!isnan(Xfull[col + row*(size_t)n]))?
weight[col + row*(size_t)n] * (Xfull[col + row*(size_t)n] - biasA[row])
: (0.);
for (int_t ix = 0; ix < n; ix++)
biasB[ix] /= (sum_by_col[ix]
+ (lam_item
*
(scale_lam?
(double)sum_by_col[ix] : 1.)));
}
for (int_t ix = 0; ix < n; ix++)
biasB[ix] = (!isnan(biasB[ix]))? biasB[ix] : 0.;
if (nonneg)
for (int_t ix = 0; ix < n; ix++)
biasB[ix] = (biasB[ix] >= 0.)? biasB[ix] : 0.;
if (iter > 0)
set_to_zero(biasA, m);
if (weight == NULL)
{
for (size_t row = 0; row < (size_t)m; row++)
for (size_t col = 0; col < (size_t)n; col++)
biasA[row] += (!isnan(Xfull[col + row*(size_t)n]))?
(Xfull[col + row*(size_t)n] - biasB[col])
: (0.);
for (int_t ix = 0; ix < m; ix++)
biasA[ix] /= ((double)cnt_by_row[ix]
+ (lam_user
*
(scale_lam?
(double)cnt_by_row[ix] : 1.)));
}
else
{
for (size_t row = 0; row < (size_t)m; row++)
for (size_t col = 0; col < (size_t)n; col++)
biasA[row] += (!isnan(Xfull[col + row*(size_t)n]))?
weight[col + row*(size_t)n] * (Xfull[col + row*(size_t)n] - biasB[col])
: (0.);
for (int_t ix = 0; ix < m; ix++)
biasA[ix] /= (sum_by_row[ix]
+ (lam_user
*
(scale_lam?
(double)sum_by_row[ix] : 1.)));
}
for (int_t ix = 0; ix < m; ix++)
biasA[ix] = (!isnan(biasA[ix]))? biasA[ix] : 0.;
if (nonneg)
for (int_t ix = 0; ix < m; ix++)
biasA[ix] = (biasA[ix] >= 0.)? biasA[ix] : 0.;
}
else
{
if (iter > 0)
set_to_zero(biasB, n);
if (weight == NULL)
{
for (size_t ix = 0; ix < nnz; ix++)
biasB[ixB[ix]] += (X[ix] - biasA[ixA[ix]]);
for (int_t ix = 0; ix < n; ix++)
biasB[ix] /= ((double)cnt_by_col[ix]
+ (lam_item
*
(scale_lam?
(double)cnt_by_col[ix] : 1.)));
}
else
{
for (size_t ix = 0; ix < nnz; ix++)
biasB[ixB[ix]] += weight[ix] * (X[ix] - biasA[ixA[ix]]);
for (int_t ix = 0; ix < n; ix++)
biasB[ix] /= (sum_by_col[ix]
+ (lam_item
*
(scale_lam?
(double)sum_by_col[ix] : 1.)));
}
for (int_t ix = 0; ix < n; ix++)
biasB[ix] = (!isnan(biasB[ix]))? biasB[ix] : 0.;
if (nonneg)
for (int_t ix = 0; ix < n; ix++)
biasB[ix] = (biasB[ix] >= 0.)? biasB[ix] : 0.;
if (iter > 0)
set_to_zero(biasA, m);
if (weight == NULL)
{
for (size_t ix = 0; ix < nnz; ix++)
biasA[ixA[ix]] += (X[ix] - biasB[ixB[ix]]);
for (int_t ix = 0; ix < m; ix++)
biasA[ix] /= ((double)cnt_by_row[ix]
+ (lam_user
*
(scale_lam?
(double)cnt_by_row[ix] : 1.)));
}
else
{
for (size_t ix = 0; ix < nnz; ix++)
biasA[ixA[ix]] += weight[ix] * (X[ix] - biasB[ixB[ix]]);
for (int_t ix = 0; ix < m; ix++)
biasA[ix] /= (sum_by_row[ix]
+ (lam_user
*
(scale_lam?
(double)sum_by_row[ix] : 1.)));
}
for (int_t ix = 0; ix < m; ix++)
biasA[ix] = (!isnan(biasA[ix]))? biasA[ix] : 0.;
if (nonneg)
for (int_t ix = 0; ix < m; ix++)
biasA[ix] = (biasA[ix] >= 0.)? biasA[ix] : 0.;
}
}
cleanup:
free(cnt_by_col);
free(cnt_by_row);
free(sum_by_col);
free(sum_by_row);
return retval;
throw_oom:
{
retval = 1;
print_oom_message();
goto cleanup;
}
}
int_t topN_old_most_popular
(
bool user_bias,
real_t a_bias,
real_t *restrict biasA, int_t row_index,
real_t *restrict biasB,
real_t glob_mean,
int_t *restrict include_ix, int_t n_include,
int_t *restrict exclude_ix, int_t n_exclude,
int_t *restrict outp_ix, real_t *restrict outp_score,
int_t n_top, int_t n
)
{
int_t retval = 0;
real_t one = 1.;
real_t *restrict ones = (real_t*)malloc((size_t)n*sizeof(real_t));
if (ones == NULL) goto throw_oom;
for (int_t ix = 0; ix < n; ix++)
ones[ix] = 1.;
if (biasA != NULL && user_bias)
a_bias = biasA[row_index];
if (!user_bias)
a_bias = 0.;
retval = topN(
&one, 0,
biasB, 0,
(real_t*)NULL,
glob_mean, a_bias,
1, 0,
include_ix, n_include,
exclude_ix, n_exclude,
outp_ix, outp_score,
n_top, n, 1
);
cleanup:
free(ones);
return retval;
throw_oom:
{
retval = 1;
goto cleanup;
}
}
int_t predict_X_old_most_popular
(
int_t row[], int_t col[], real_t *restrict predicted, size_t n_predict,
real_t *restrict biasA, real_t *restrict biasB,
real_t glob_mean,
int_t m, int_t n
)
{
if (m == 0) m = INT_MAX;
if (n == 0) n = INT_MAX;
bool user_bias = biasA != NULL;
for (size_t ix = 0; ix < n_predict; ix++)
predicted[ix] = (row[ix] >= m || row[ix] < 0 ||
col[ix] >= n || col[ix] < 0)?
(NAN_)
:
(biasB[col[ix]]
+ (user_bias? biasA[row[ix]] : 0.)
+ glob_mean);
return 0;
}
|
ast-dump-openmp-target-teams-distribute-parallel-for.c | // RUN: %clang_cc1 -triple x86_64-unknown-unknown -fopenmp -ast-dump %s | FileCheck --match-full-lines -implicit-check-not=openmp_structured_block %s
void test_one(int x) {
#pragma omp target teams distribute parallel for
for (int i = 0; i < x; i++)
;
}
void test_two(int x, int y) {
#pragma omp target teams distribute parallel for
for (int i = 0; i < x; i++)
for (int i = 0; i < y; i++)
;
}
void test_three(int x, int y) {
#pragma omp target teams distribute parallel for collapse(1)
for (int i = 0; i < x; i++)
for (int i = 0; i < y; i++)
;
}
void test_four(int x, int y) {
#pragma omp target teams distribute parallel for collapse(2)
for (int i = 0; i < x; i++)
for (int i = 0; i < y; i++)
;
}
void test_five(int x, int y, int z) {
#pragma omp target teams distribute parallel for collapse(2)
for (int i = 0; i < x; i++)
for (int i = 0; i < y; i++)
for (int i = 0; i < z; i++)
;
}
// CHECK: TranslationUnitDecl {{.*}} <<invalid sloc>> <invalid sloc>
// CHECK: |-FunctionDecl {{.*}} <{{.*}}ast-dump-openmp-target-teams-distribute-parallel-for.c:3:1, line:7:1> line:3:6 test_one 'void (int)'
// CHECK-NEXT: | |-ParmVarDecl {{.*}} <col:15, col:19> col:19 used x 'int'
// CHECK-NEXT: | `-CompoundStmt {{.*}} <col:22, line:7:1>
// CHECK-NEXT: | `-OMPTargetTeamsDistributeParallelForDirective {{.*}} <line:4:9, col:49>
// CHECK-NEXT: | |-OMPFirstprivateClause {{.*}} <<invalid sloc>> <implicit>
// CHECK-NEXT: | | `-DeclRefExpr {{.*}} <line:5:23> 'int' lvalue ParmVar {{.*}} 'x' 'int'
// CHECK-NEXT: | `-CapturedStmt {{.*}} <col:3, line:6:5>
// CHECK-NEXT: | |-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow
// CHECK-NEXT: | | |-CapturedStmt {{.*}} <line:5:3, line:6:5>
// CHECK-NEXT: | | | |-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow
// CHECK-NEXT: | | | | |-CapturedStmt {{.*}} <line:5:3, line:6:5>
// CHECK-NEXT: | | | | | |-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow
// CHECK-NEXT: | | | | | | |-CapturedStmt {{.*}} <line:5:3, line:6:5>
// CHECK-NEXT: | | | | | | | |-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow
// CHECK-NEXT: | | | | | | | | |-ForStmt {{.*}} <line:5:3, line:6:5>
// CHECK-NEXT: | | | | | | | | | |-DeclStmt {{.*}} <line:5:8, col:17>
// CHECK-NEXT: | | | | | | | | | | `-VarDecl {{.*}} <col:8, col:16> col:12 used i 'int' cinit
// CHECK-NEXT: | | | | | | | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0
// CHECK-NEXT: | | | | | | | | | |-<<<NULL>>>
// CHECK-NEXT: | | | | | | | | | |-BinaryOperator {{.*}} <col:19, col:23> 'int' '<'
// CHECK-NEXT: | | | | | | | | | | |-ImplicitCastExpr {{.*}} <col:19> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | | | | | | `-DeclRefExpr {{.*}} <col:19> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | | | | | | `-ImplicitCastExpr {{.*}} <col:23> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | | | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue ParmVar {{.*}} 'x' 'int'
// CHECK-NEXT: | | | | | | | | | |-UnaryOperator {{.*}} <col:26, col:27> 'int' postfix '++'
// CHECK-NEXT: | | | | | | | | | | `-DeclRefExpr {{.*}} <col:26> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | | | | | `-NullStmt {{.*}} <line:6:5> openmp_structured_block
// CHECK-NEXT: | | | | | | | | |-ImplicitParamDecl {{.*}} <line:4:9> col:9 implicit .global_tid. 'const int *const restrict'
// CHECK-NEXT: | | | | | | | | |-ImplicitParamDecl {{.*}} <col:9> col:9 implicit .bound_tid. 'const int *const restrict'
// CHECK-NEXT: | | | | | | | | |-ImplicitParamDecl {{.*}} <col:9> col:9 implicit used .previous.lb. 'const unsigned long'
// CHECK-NEXT: | | | | | | | | |-ImplicitParamDecl {{.*}} <col:9> col:9 implicit used .previous.ub. 'const unsigned long'
// CHECK-NEXT: | | | | | | | | |-ImplicitParamDecl {{.*}} <col:9> col:9 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-target-teams-distribute-parallel-for.c:4:9) *const restrict'
// CHECK-NEXT: | | | | | | | | `-VarDecl {{.*}} <line:5:8, col:16> col:12 used i 'int' cinit
// CHECK-NEXT: | | | | | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0
// CHECK-NEXT: | | | | | | | `-DeclRefExpr {{.*}} <col:3> 'int' lvalue ParmVar {{.*}} 'x' 'int'
// CHECK-NEXT: | | | | | | |-ImplicitParamDecl {{.*}} <line:4:9> col:9 implicit .global_tid. 'const int *const restrict'
// CHECK-NEXT: | | | | | | |-ImplicitParamDecl {{.*}} <col:9> col:9 implicit .bound_tid. 'const int *const restrict'
// CHECK-NEXT: | | | | | | |-ImplicitParamDecl {{.*}} <col:9> col:9 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-target-teams-distribute-parallel-for.c:4:9) *const restrict'
// CHECK-NEXT: | | | | | | |-RecordDecl {{.*}} <col:9> col:9 implicit struct definition
// CHECK-NEXT: | | | | | | | |-CapturedRecordAttr {{.*}} <<invalid sloc>> Implicit
// CHECK-NEXT: | | | | | | | `-FieldDecl {{.*}} <line:5:3> col:3 implicit 'int'
// CHECK-NEXT: | | | | | | | `-OMPCaptureKindAttr {{.*}} <<invalid sloc>> Implicit 9
// CHECK-NEXT: | | | | | | `-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow
// CHECK-NEXT: | | | | | | |-ForStmt {{.*}} <col:3, line:6:5>
// CHECK-NEXT: | | | | | | | |-DeclStmt {{.*}} <line:5:8, col:17>
// CHECK-NEXT: | | | | | | | | `-VarDecl {{.*}} <col:8, col:16> col:12 used i 'int' cinit
// CHECK-NEXT: | | | | | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0
// CHECK-NEXT: | | | | | | | |-<<<NULL>>>
// CHECK-NEXT: | | | | | | | |-BinaryOperator {{.*}} <col:19, col:23> 'int' '<'
// CHECK-NEXT: | | | | | | | | |-ImplicitCastExpr {{.*}} <col:19> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | | | | `-DeclRefExpr {{.*}} <col:19> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | | | | `-ImplicitCastExpr {{.*}} <col:23> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue ParmVar {{.*}} 'x' 'int'
// CHECK-NEXT: | | | | | | | |-UnaryOperator {{.*}} <col:26, col:27> 'int' postfix '++'
// CHECK-NEXT: | | | | | | | | `-DeclRefExpr {{.*}} <col:26> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | | | `-NullStmt {{.*}} <line:6:5> openmp_structured_block
// CHECK-NEXT: | | | | | | |-ImplicitParamDecl {{.*}} <line:4:9> col:9 implicit .global_tid. 'const int *const restrict'
// CHECK-NEXT: | | | | | | |-ImplicitParamDecl {{.*}} <col:9> col:9 implicit .bound_tid. 'const int *const restrict'
// CHECK-NEXT: | | | | | | |-ImplicitParamDecl {{.*}} <col:9> col:9 implicit used .previous.lb. 'const unsigned long'
// CHECK-NEXT: | | | | | | |-ImplicitParamDecl {{.*}} <col:9> col:9 implicit used .previous.ub. 'const unsigned long'
// CHECK-NEXT: | | | | | | |-ImplicitParamDecl {{.*}} <col:9> col:9 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-target-teams-distribute-parallel-for.c:4:9) *const restrict'
// CHECK-NEXT: | | | | | | `-VarDecl {{.*}} <line:5:8, col:16> col:12 used i 'int' cinit
// CHECK-NEXT: | | | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0
// CHECK-NEXT: | | | | | `-DeclRefExpr {{.*}} <col:3> 'int' lvalue ParmVar {{.*}} 'x' 'int'
// CHECK-NEXT: | | | | |-ImplicitParamDecl {{.*}} <line:4:9> col:9 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-target-teams-distribute-parallel-for.c:4:9) *const restrict'
// CHECK-NEXT: | | | | |-RecordDecl {{.*}} <col:9> col:9 implicit struct definition
// CHECK-NEXT: | | | | | |-CapturedRecordAttr {{.*}} <<invalid sloc>> Implicit
// CHECK-NEXT: | | | | | `-FieldDecl {{.*}} <line:5:3> col:3 implicit 'int'
// CHECK-NEXT: | | | | | `-OMPCaptureKindAttr {{.*}} <<invalid sloc>> Implicit 9
// CHECK-NEXT: | | | | `-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow
// CHECK-NEXT: | | | | |-CapturedStmt {{.*}} <col:3, line:6:5>
// CHECK-NEXT: | | | | | |-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow
// CHECK-NEXT: | | | | | | |-ForStmt {{.*}} <line:5:3, line:6:5>
// CHECK-NEXT: | | | | | | | |-DeclStmt {{.*}} <line:5:8, col:17>
// CHECK-NEXT: | | | | | | | | `-VarDecl {{.*}} <col:8, col:16> col:12 used i 'int' cinit
// CHECK-NEXT: | | | | | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0
// CHECK-NEXT: | | | | | | | |-<<<NULL>>>
// CHECK-NEXT: | | | | | | | |-BinaryOperator {{.*}} <col:19, col:23> 'int' '<'
// CHECK-NEXT: | | | | | | | | |-ImplicitCastExpr {{.*}} <col:19> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | | | | `-DeclRefExpr {{.*}} <col:19> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | | | | `-ImplicitCastExpr {{.*}} <col:23> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue ParmVar {{.*}} 'x' 'int'
// CHECK-NEXT: | | | | | | | |-UnaryOperator {{.*}} <col:26, col:27> 'int' postfix '++'
// CHECK-NEXT: | | | | | | | | `-DeclRefExpr {{.*}} <col:26> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | | | `-NullStmt {{.*}} <line:6:5> openmp_structured_block
// CHECK-NEXT: | | | | | | |-ImplicitParamDecl {{.*}} <line:4:9> col:9 implicit .global_tid. 'const int *const restrict'
// CHECK-NEXT: | | | | | | |-ImplicitParamDecl {{.*}} <col:9> col:9 implicit .bound_tid. 'const int *const restrict'
// CHECK-NEXT: | | | | | | |-ImplicitParamDecl {{.*}} <col:9> col:9 implicit used .previous.lb. 'const unsigned long'
// CHECK-NEXT: | | | | | | |-ImplicitParamDecl {{.*}} <col:9> col:9 implicit used .previous.ub. 'const unsigned long'
// CHECK-NEXT: | | | | | | |-ImplicitParamDecl {{.*}} <col:9> col:9 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-target-teams-distribute-parallel-for.c:4:9) *const restrict'
// CHECK-NEXT: | | | | | | `-VarDecl {{.*}} <line:5:8, col:16> col:12 used i 'int' cinit
// CHECK-NEXT: | | | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0
// CHECK-NEXT: | | | | | `-DeclRefExpr {{.*}} <col:3> 'int' lvalue ParmVar {{.*}} 'x' 'int'
// CHECK-NEXT: | | | | |-ImplicitParamDecl {{.*}} <line:4:9> col:9 implicit .global_tid. 'const int *const restrict'
// CHECK-NEXT: | | | | |-ImplicitParamDecl {{.*}} <col:9> col:9 implicit .bound_tid. 'const int *const restrict'
// CHECK-NEXT: | | | | |-ImplicitParamDecl {{.*}} <col:9> col:9 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-target-teams-distribute-parallel-for.c:4:9) *const restrict'
// CHECK-NEXT: | | | | |-RecordDecl {{.*}} <col:9> col:9 implicit struct definition
// CHECK-NEXT: | | | | | |-CapturedRecordAttr {{.*}} <<invalid sloc>> Implicit
// CHECK-NEXT: | | | | | `-FieldDecl {{.*}} <line:5:3> col:3 implicit 'int'
// CHECK-NEXT: | | | | | `-OMPCaptureKindAttr {{.*}} <<invalid sloc>> Implicit 9
// CHECK-NEXT: | | | | `-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow
// CHECK-NEXT: | | | | |-ForStmt {{.*}} <col:3, line:6:5>
// CHECK-NEXT: | | | | | |-DeclStmt {{.*}} <line:5:8, col:17>
// CHECK-NEXT: | | | | | | `-VarDecl {{.*}} <col:8, col:16> col:12 used i 'int' cinit
// CHECK-NEXT: | | | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0
// CHECK-NEXT: | | | | | |-<<<NULL>>>
// CHECK-NEXT: | | | | | |-BinaryOperator {{.*}} <col:19, col:23> 'int' '<'
// CHECK-NEXT: | | | | | | |-ImplicitCastExpr {{.*}} <col:19> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | | `-DeclRefExpr {{.*}} <col:19> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | | `-ImplicitCastExpr {{.*}} <col:23> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue ParmVar {{.*}} 'x' 'int'
// CHECK-NEXT: | | | | | |-UnaryOperator {{.*}} <col:26, col:27> 'int' postfix '++'
// CHECK-NEXT: | | | | | | `-DeclRefExpr {{.*}} <col:26> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | `-NullStmt {{.*}} <line:6:5> openmp_structured_block
// CHECK-NEXT: | | | | |-ImplicitParamDecl {{.*}} <line:4:9> col:9 implicit .global_tid. 'const int *const restrict'
// CHECK-NEXT: | | | | |-ImplicitParamDecl {{.*}} <col:9> col:9 implicit .bound_tid. 'const int *const restrict'
// CHECK-NEXT: | | | | |-ImplicitParamDecl {{.*}} <col:9> col:9 implicit used .previous.lb. 'const unsigned long'
// CHECK-NEXT: | | | | |-ImplicitParamDecl {{.*}} <col:9> col:9 implicit used .previous.ub. 'const unsigned long'
// CHECK-NEXT: | | | | |-ImplicitParamDecl {{.*}} <col:9> col:9 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-target-teams-distribute-parallel-for.c:4:9) *const restrict'
// CHECK-NEXT: | | | | `-VarDecl {{.*}} <line:5:8, col:16> col:12 used i 'int' cinit
// CHECK-NEXT: | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0
// CHECK-NEXT: | | | `-DeclRefExpr {{.*}} <col:3> 'int' lvalue ParmVar {{.*}} 'x' 'int'
// CHECK-NEXT: | | |-AlwaysInlineAttr {{.*}} <<invalid sloc>> Implicit __forceinline
// CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <line:4:9> col:9 implicit .global_tid. 'const int'
// CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <col:9> col:9 implicit .part_id. 'const int *const restrict'
// CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <col:9> col:9 implicit .privates. 'void *const restrict'
// CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <col:9> col:9 implicit .copy_fn. 'void (*const restrict)(void *const restrict, ...)'
// CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <col:9> col:9 implicit .task_t. 'void *const'
// CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <col:9> col:9 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-target-teams-distribute-parallel-for.c:4:9) *const restrict'
// CHECK-NEXT: | | |-RecordDecl {{.*}} <col:9> col:9 implicit struct definition
// CHECK-NEXT: | | | |-CapturedRecordAttr {{.*}} <<invalid sloc>> Implicit
// CHECK-NEXT: | | | `-FieldDecl {{.*}} <line:5:3> col:3 implicit 'int'
// CHECK-NEXT: | | | `-OMPCaptureKindAttr {{.*}} <<invalid sloc>> Implicit 9
// CHECK-NEXT: | | `-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow
// CHECK-NEXT: | | |-CapturedStmt {{.*}} <col:3, line:6:5>
// CHECK-NEXT: | | | |-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow
// CHECK-NEXT: | | | | |-CapturedStmt {{.*}} <line:5:3, line:6:5>
// CHECK-NEXT: | | | | | |-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow
// CHECK-NEXT: | | | | | | |-ForStmt {{.*}} <line:5:3, line:6:5>
// CHECK-NEXT: | | | | | | | |-DeclStmt {{.*}} <line:5:8, col:17>
// CHECK-NEXT: | | | | | | | | `-VarDecl {{.*}} <col:8, col:16> col:12 used i 'int' cinit
// CHECK-NEXT: | | | | | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0
// CHECK-NEXT: | | | | | | | |-<<<NULL>>>
// CHECK-NEXT: | | | | | | | |-BinaryOperator {{.*}} <col:19, col:23> 'int' '<'
// CHECK-NEXT: | | | | | | | | |-ImplicitCastExpr {{.*}} <col:19> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | | | | `-DeclRefExpr {{.*}} <col:19> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | | | | `-ImplicitCastExpr {{.*}} <col:23> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue ParmVar {{.*}} 'x' 'int'
// CHECK-NEXT: | | | | | | | |-UnaryOperator {{.*}} <col:26, col:27> 'int' postfix '++'
// CHECK-NEXT: | | | | | | | | `-DeclRefExpr {{.*}} <col:26> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | | | `-NullStmt {{.*}} <line:6:5> openmp_structured_block
// CHECK-NEXT: | | | | | | |-ImplicitParamDecl {{.*}} <line:4:9> col:9 implicit .global_tid. 'const int *const restrict'
// CHECK-NEXT: | | | | | | |-ImplicitParamDecl {{.*}} <col:9> col:9 implicit .bound_tid. 'const int *const restrict'
// CHECK-NEXT: | | | | | | |-ImplicitParamDecl {{.*}} <col:9> col:9 implicit used .previous.lb. 'const unsigned long'
// CHECK-NEXT: | | | | | | |-ImplicitParamDecl {{.*}} <col:9> col:9 implicit used .previous.ub. 'const unsigned long'
// CHECK-NEXT: | | | | | | |-ImplicitParamDecl {{.*}} <col:9> col:9 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-target-teams-distribute-parallel-for.c:4:9) *const restrict'
// CHECK-NEXT: | | | | | | `-VarDecl {{.*}} <line:5:8, col:16> col:12 used i 'int' cinit
// CHECK-NEXT: | | | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0
// CHECK-NEXT: | | | | | `-DeclRefExpr {{.*}} <col:3> 'int' lvalue ParmVar {{.*}} 'x' 'int'
// CHECK-NEXT: | | | | |-ImplicitParamDecl {{.*}} <line:4:9> col:9 implicit .global_tid. 'const int *const restrict'
// CHECK-NEXT: | | | | |-ImplicitParamDecl {{.*}} <col:9> col:9 implicit .bound_tid. 'const int *const restrict'
// CHECK-NEXT: | | | | |-ImplicitParamDecl {{.*}} <col:9> col:9 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-target-teams-distribute-parallel-for.c:4:9) *const restrict'
// CHECK-NEXT: | | | | |-RecordDecl {{.*}} <col:9> col:9 implicit struct definition
// CHECK-NEXT: | | | | | |-CapturedRecordAttr {{.*}} <<invalid sloc>> Implicit
// CHECK-NEXT: | | | | | `-FieldDecl {{.*}} <line:5:3> col:3 implicit 'int'
// CHECK-NEXT: | | | | | `-OMPCaptureKindAttr {{.*}} <<invalid sloc>> Implicit 9
// CHECK-NEXT: | | | | `-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow
// CHECK-NEXT: | | | | |-ForStmt {{.*}} <col:3, line:6:5>
// CHECK-NEXT: | | | | | |-DeclStmt {{.*}} <line:5:8, col:17>
// CHECK-NEXT: | | | | | | `-VarDecl {{.*}} <col:8, col:16> col:12 used i 'int' cinit
// CHECK-NEXT: | | | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0
// CHECK-NEXT: | | | | | |-<<<NULL>>>
// CHECK-NEXT: | | | | | |-BinaryOperator {{.*}} <col:19, col:23> 'int' '<'
// CHECK-NEXT: | | | | | | |-ImplicitCastExpr {{.*}} <col:19> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | | `-DeclRefExpr {{.*}} <col:19> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | | `-ImplicitCastExpr {{.*}} <col:23> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue ParmVar {{.*}} 'x' 'int'
// CHECK-NEXT: | | | | | |-UnaryOperator {{.*}} <col:26, col:27> 'int' postfix '++'
// CHECK-NEXT: | | | | | | `-DeclRefExpr {{.*}} <col:26> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | `-NullStmt {{.*}} <line:6:5> openmp_structured_block
// CHECK-NEXT: | | | | |-ImplicitParamDecl {{.*}} <line:4:9> col:9 implicit .global_tid. 'const int *const restrict'
// CHECK-NEXT: | | | | |-ImplicitParamDecl {{.*}} <col:9> col:9 implicit .bound_tid. 'const int *const restrict'
// CHECK-NEXT: | | | | |-ImplicitParamDecl {{.*}} <col:9> col:9 implicit used .previous.lb. 'const unsigned long'
// CHECK-NEXT: | | | | |-ImplicitParamDecl {{.*}} <col:9> col:9 implicit used .previous.ub. 'const unsigned long'
// CHECK-NEXT: | | | | |-ImplicitParamDecl {{.*}} <col:9> col:9 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-target-teams-distribute-parallel-for.c:4:9) *const restrict'
// CHECK-NEXT: | | | | `-VarDecl {{.*}} <line:5:8, col:16> col:12 used i 'int' cinit
// CHECK-NEXT: | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0
// CHECK-NEXT: | | | `-DeclRefExpr {{.*}} <col:3> 'int' lvalue ParmVar {{.*}} 'x' 'int'
// CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <line:4:9> col:9 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-target-teams-distribute-parallel-for.c:4:9) *const restrict'
// CHECK-NEXT: | | |-RecordDecl {{.*}} <col:9> col:9 implicit struct definition
// CHECK-NEXT: | | | |-CapturedRecordAttr {{.*}} <<invalid sloc>> Implicit
// CHECK-NEXT: | | | `-FieldDecl {{.*}} <line:5:3> col:3 implicit 'int'
// CHECK-NEXT: | | | `-OMPCaptureKindAttr {{.*}} <<invalid sloc>> Implicit 9
// CHECK-NEXT: | | `-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow
// CHECK-NEXT: | | |-CapturedStmt {{.*}} <col:3, line:6:5>
// CHECK-NEXT: | | | |-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow
// CHECK-NEXT: | | | | |-ForStmt {{.*}} <line:5:3, line:6:5>
// CHECK-NEXT: | | | | | |-DeclStmt {{.*}} <line:5:8, col:17>
// CHECK-NEXT: | | | | | | `-VarDecl {{.*}} <col:8, col:16> col:12 used i 'int' cinit
// CHECK-NEXT: | | | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0
// CHECK-NEXT: | | | | | |-<<<NULL>>>
// CHECK-NEXT: | | | | | |-BinaryOperator {{.*}} <col:19, col:23> 'int' '<'
// CHECK-NEXT: | | | | | | |-ImplicitCastExpr {{.*}} <col:19> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | | `-DeclRefExpr {{.*}} <col:19> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | | `-ImplicitCastExpr {{.*}} <col:23> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue ParmVar {{.*}} 'x' 'int'
// CHECK-NEXT: | | | | | |-UnaryOperator {{.*}} <col:26, col:27> 'int' postfix '++'
// CHECK-NEXT: | | | | | | `-DeclRefExpr {{.*}} <col:26> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | `-NullStmt {{.*}} <line:6:5> openmp_structured_block
// CHECK-NEXT: | | | | |-ImplicitParamDecl {{.*}} <line:4:9> col:9 implicit .global_tid. 'const int *const restrict'
// CHECK-NEXT: | | | | |-ImplicitParamDecl {{.*}} <col:9> col:9 implicit .bound_tid. 'const int *const restrict'
// CHECK-NEXT: | | | | |-ImplicitParamDecl {{.*}} <col:9> col:9 implicit used .previous.lb. 'const unsigned long'
// CHECK-NEXT: | | | | |-ImplicitParamDecl {{.*}} <col:9> col:9 implicit used .previous.ub. 'const unsigned long'
// CHECK-NEXT: | | | | |-ImplicitParamDecl {{.*}} <col:9> col:9 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-target-teams-distribute-parallel-for.c:4:9) *const restrict'
// CHECK-NEXT: | | | | `-VarDecl {{.*}} <line:5:8, col:16> col:12 used i 'int' cinit
// CHECK-NEXT: | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0
// CHECK-NEXT: | | | `-DeclRefExpr {{.*}} <col:3> 'int' lvalue ParmVar {{.*}} 'x' 'int'
// CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <line:4:9> col:9 implicit .global_tid. 'const int *const restrict'
// CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <col:9> col:9 implicit .bound_tid. 'const int *const restrict'
// CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <col:9> col:9 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-target-teams-distribute-parallel-for.c:4:9) *const restrict'
// CHECK-NEXT: | | |-RecordDecl {{.*}} <col:9> col:9 implicit struct definition
// CHECK-NEXT: | | | |-CapturedRecordAttr {{.*}} <<invalid sloc>> Implicit
// CHECK-NEXT: | | | `-FieldDecl {{.*}} <line:5:3> col:3 implicit 'int'
// CHECK-NEXT: | | | `-OMPCaptureKindAttr {{.*}} <<invalid sloc>> Implicit 9
// CHECK-NEXT: | | `-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow
// CHECK-NEXT: | | |-ForStmt {{.*}} <col:3, line:6:5>
// CHECK-NEXT: | | | |-DeclStmt {{.*}} <line:5:8, col:17>
// CHECK-NEXT: | | | | `-VarDecl {{.*}} <col:8, col:16> col:12 used i 'int' cinit
// CHECK-NEXT: | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0
// CHECK-NEXT: | | | |-<<<NULL>>>
// CHECK-NEXT: | | | |-BinaryOperator {{.*}} <col:19, col:23> 'int' '<'
// CHECK-NEXT: | | | | |-ImplicitCastExpr {{.*}} <col:19> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | `-DeclRefExpr {{.*}} <col:19> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | `-ImplicitCastExpr {{.*}} <col:23> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue ParmVar {{.*}} 'x' 'int'
// CHECK-NEXT: | | | |-UnaryOperator {{.*}} <col:26, col:27> 'int' postfix '++'
// CHECK-NEXT: | | | | `-DeclRefExpr {{.*}} <col:26> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | `-NullStmt {{.*}} <line:6:5> openmp_structured_block
// CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <line:4:9> col:9 implicit .global_tid. 'const int *const restrict'
// CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <col:9> col:9 implicit .bound_tid. 'const int *const restrict'
// CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <col:9> col:9 implicit used .previous.lb. 'const unsigned long'
// CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <col:9> col:9 implicit used .previous.ub. 'const unsigned long'
// CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <col:9> col:9 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-target-teams-distribute-parallel-for.c:4:9) *const restrict'
// CHECK-NEXT: | | `-VarDecl {{.*}} <line:5:8, col:16> col:12 used i 'int' cinit
// CHECK-NEXT: | | `-IntegerLiteral {{.*}} <col:16> 'int' 0
// CHECK-NEXT: | `-DeclRefExpr {{.*}} <col:3> 'int' lvalue ParmVar {{.*}} 'x' 'int'
// CHECK-NEXT: |-FunctionDecl {{.*}} <line:9:1, line:14:1> line:9:6 test_two 'void (int, int)'
// CHECK-NEXT: | |-ParmVarDecl {{.*}} <col:15, col:19> col:19 used x 'int'
// CHECK-NEXT: | |-ParmVarDecl {{.*}} <col:22, col:26> col:26 used y 'int'
// CHECK-NEXT: | `-CompoundStmt {{.*}} <col:29, line:14:1>
// CHECK-NEXT: | `-OMPTargetTeamsDistributeParallelForDirective {{.*}} <line:10:9, col:49>
// CHECK-NEXT: | |-OMPFirstprivateClause {{.*}} <<invalid sloc>> <implicit>
// CHECK-NEXT: | | |-DeclRefExpr {{.*}} <line:11:23> 'int' lvalue ParmVar {{.*}} 'x' 'int'
// CHECK-NEXT: | | `-DeclRefExpr {{.*}} <line:12:25> 'int' lvalue ParmVar {{.*}} 'y' 'int'
// CHECK-NEXT: | `-CapturedStmt {{.*}} <line:11:3, line:13:7>
// CHECK-NEXT: | |-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow
// CHECK-NEXT: | | |-CapturedStmt {{.*}} <line:11:3, line:13:7>
// CHECK-NEXT: | | | |-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow
// CHECK-NEXT: | | | | |-CapturedStmt {{.*}} <line:11:3, line:13:7>
// CHECK-NEXT: | | | | | |-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow
// CHECK-NEXT: | | | | | | |-CapturedStmt {{.*}} <line:11:3, line:13:7>
// CHECK-NEXT: | | | | | | | |-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow
// CHECK-NEXT: | | | | | | | | |-ForStmt {{.*}} <line:11:3, line:13:7>
// CHECK-NEXT: | | | | | | | | | |-DeclStmt {{.*}} <line:11:8, col:17>
// CHECK-NEXT: | | | | | | | | | | `-VarDecl {{.*}} <col:8, col:16> col:12 used i 'int' cinit
// CHECK-NEXT: | | | | | | | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0
// CHECK-NEXT: | | | | | | | | | |-<<<NULL>>>
// CHECK-NEXT: | | | | | | | | | |-BinaryOperator {{.*}} <col:19, col:23> 'int' '<'
// CHECK-NEXT: | | | | | | | | | | |-ImplicitCastExpr {{.*}} <col:19> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | | | | | | `-DeclRefExpr {{.*}} <col:19> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | | | | | | `-ImplicitCastExpr {{.*}} <col:23> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | | | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue ParmVar {{.*}} 'x' 'int'
// CHECK-NEXT: | | | | | | | | | |-UnaryOperator {{.*}} <col:26, col:27> 'int' postfix '++'
// CHECK-NEXT: | | | | | | | | | | `-DeclRefExpr {{.*}} <col:26> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | | | | | `-ForStmt {{.*}} <line:12:5, line:13:7> openmp_structured_block
// CHECK-NEXT: | | | | | | | | | |-DeclStmt {{.*}} <line:12:10, col:19>
// CHECK-NEXT: | | | | | | | | | | `-VarDecl {{.*}} <col:10, col:18> col:14 used i 'int' cinit
// CHECK-NEXT: | | | | | | | | | | `-IntegerLiteral {{.*}} <col:18> 'int' 0
// CHECK-NEXT: | | | | | | | | | |-<<<NULL>>>
// CHECK-NEXT: | | | | | | | | | |-BinaryOperator {{.*}} <col:21, col:25> 'int' '<'
// CHECK-NEXT: | | | | | | | | | | |-ImplicitCastExpr {{.*}} <col:21> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | | | | | | `-DeclRefExpr {{.*}} <col:21> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | | | | | | `-ImplicitCastExpr {{.*}} <col:25> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | | | | | `-DeclRefExpr {{.*}} <col:25> 'int' lvalue ParmVar {{.*}} 'y' 'int'
// CHECK-NEXT: | | | | | | | | | |-UnaryOperator {{.*}} <col:28, col:29> 'int' postfix '++'
// CHECK-NEXT: | | | | | | | | | | `-DeclRefExpr {{.*}} <col:28> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | | | | | `-NullStmt {{.*}} <line:13:7>
// CHECK-NEXT: | | | | | | | | |-ImplicitParamDecl {{.*}} <line:10:9> col:9 implicit .global_tid. 'const int *const restrict'
// CHECK-NEXT: | | | | | | | | |-ImplicitParamDecl {{.*}} <col:9> col:9 implicit .bound_tid. 'const int *const restrict'
// CHECK-NEXT: | | | | | | | | |-ImplicitParamDecl {{.*}} <col:9> col:9 implicit used .previous.lb. 'const unsigned long'
// CHECK-NEXT: | | | | | | | | |-ImplicitParamDecl {{.*}} <col:9> col:9 implicit used .previous.ub. 'const unsigned long'
// CHECK-NEXT: | | | | | | | | |-ImplicitParamDecl {{.*}} <col:9> col:9 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-target-teams-distribute-parallel-for.c:10:9) *const restrict'
// CHECK-NEXT: | | | | | | | | |-VarDecl {{.*}} <line:11:8, col:16> col:12 used i 'int' cinit
// CHECK-NEXT: | | | | | | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0
// CHECK-NEXT: | | | | | | | | `-VarDecl {{.*}} <line:12:10, col:18> col:14 used i 'int' cinit
// CHECK-NEXT: | | | | | | | | `-IntegerLiteral {{.*}} <col:18> 'int' 0
// CHECK-NEXT: | | | | | | | |-DeclRefExpr {{.*}} <line:11:3> 'int' lvalue ParmVar {{.*}} 'x' 'int'
// CHECK-NEXT: | | | | | | | `-DeclRefExpr {{.*}} <line:12:25> 'int' lvalue ParmVar {{.*}} 'y' 'int'
// CHECK-NEXT: | | | | | | |-ImplicitParamDecl {{.*}} <line:10:9> col:9 implicit .global_tid. 'const int *const restrict'
// CHECK-NEXT: | | | | | | |-ImplicitParamDecl {{.*}} <col:9> col:9 implicit .bound_tid. 'const int *const restrict'
// CHECK-NEXT: | | | | | | |-ImplicitParamDecl {{.*}} <col:9> col:9 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-target-teams-distribute-parallel-for.c:10:9) *const restrict'
// CHECK-NEXT: | | | | | | |-RecordDecl {{.*}} <col:9> col:9 implicit struct definition
// CHECK-NEXT: | | | | | | | |-CapturedRecordAttr {{.*}} <<invalid sloc>> Implicit
// CHECK-NEXT: | | | | | | | |-FieldDecl {{.*}} <line:11:3> col:3 implicit 'int'
// CHECK-NEXT: | | | | | | | | `-OMPCaptureKindAttr {{.*}} <<invalid sloc>> Implicit 9
// CHECK-NEXT: | | | | | | | `-FieldDecl {{.*}} <line:12:25> col:25 implicit 'int'
// CHECK-NEXT: | | | | | | | `-OMPCaptureKindAttr {{.*}} <<invalid sloc>> Implicit 9
// CHECK-NEXT: | | | | | | `-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow
// CHECK-NEXT: | | | | | | |-ForStmt {{.*}} <line:11:3, line:13:7>
// CHECK-NEXT: | | | | | | | |-DeclStmt {{.*}} <line:11:8, col:17>
// CHECK-NEXT: | | | | | | | | `-VarDecl {{.*}} <col:8, col:16> col:12 used i 'int' cinit
// CHECK-NEXT: | | | | | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0
// CHECK-NEXT: | | | | | | | |-<<<NULL>>>
// CHECK-NEXT: | | | | | | | |-BinaryOperator {{.*}} <col:19, col:23> 'int' '<'
// CHECK-NEXT: | | | | | | | | |-ImplicitCastExpr {{.*}} <col:19> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | | | | `-DeclRefExpr {{.*}} <col:19> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | | | | `-ImplicitCastExpr {{.*}} <col:23> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue ParmVar {{.*}} 'x' 'int'
// CHECK-NEXT: | | | | | | | |-UnaryOperator {{.*}} <col:26, col:27> 'int' postfix '++'
// CHECK-NEXT: | | | | | | | | `-DeclRefExpr {{.*}} <col:26> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | | | `-ForStmt {{.*}} <line:12:5, line:13:7> openmp_structured_block
// CHECK-NEXT: | | | | | | | |-DeclStmt {{.*}} <line:12:10, col:19>
// CHECK-NEXT: | | | | | | | | `-VarDecl {{.*}} <col:10, col:18> col:14 used i 'int' cinit
// CHECK-NEXT: | | | | | | | | `-IntegerLiteral {{.*}} <col:18> 'int' 0
// CHECK-NEXT: | | | | | | | |-<<<NULL>>>
// CHECK-NEXT: | | | | | | | |-BinaryOperator {{.*}} <col:21, col:25> 'int' '<'
// CHECK-NEXT: | | | | | | | | |-ImplicitCastExpr {{.*}} <col:21> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | | | | `-DeclRefExpr {{.*}} <col:21> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | | | | `-ImplicitCastExpr {{.*}} <col:25> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | | | `-DeclRefExpr {{.*}} <col:25> 'int' lvalue ParmVar {{.*}} 'y' 'int'
// CHECK-NEXT: | | | | | | | |-UnaryOperator {{.*}} <col:28, col:29> 'int' postfix '++'
// CHECK-NEXT: | | | | | | | | `-DeclRefExpr {{.*}} <col:28> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | | | `-NullStmt {{.*}} <line:13:7>
// CHECK-NEXT: | | | | | | |-ImplicitParamDecl {{.*}} <line:10:9> col:9 implicit .global_tid. 'const int *const restrict'
// CHECK-NEXT: | | | | | | |-ImplicitParamDecl {{.*}} <col:9> col:9 implicit .bound_tid. 'const int *const restrict'
// CHECK-NEXT: | | | | | | |-ImplicitParamDecl {{.*}} <col:9> col:9 implicit used .previous.lb. 'const unsigned long'
// CHECK-NEXT: | | | | | | |-ImplicitParamDecl {{.*}} <col:9> col:9 implicit used .previous.ub. 'const unsigned long'
// CHECK-NEXT: | | | | | | |-ImplicitParamDecl {{.*}} <col:9> col:9 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-target-teams-distribute-parallel-for.c:10:9) *const restrict'
// CHECK-NEXT: | | | | | | |-VarDecl {{.*}} <line:11:8, col:16> col:12 used i 'int' cinit
// CHECK-NEXT: | | | | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0
// CHECK-NEXT: | | | | | | `-VarDecl {{.*}} <line:12:10, col:18> col:14 used i 'int' cinit
// CHECK-NEXT: | | | | | | `-IntegerLiteral {{.*}} <col:18> 'int' 0
// CHECK-NEXT: | | | | | |-DeclRefExpr {{.*}} <line:11:3> 'int' lvalue ParmVar {{.*}} 'x' 'int'
// CHECK-NEXT: | | | | | `-DeclRefExpr {{.*}} <line:12:25> 'int' lvalue ParmVar {{.*}} 'y' 'int'
// CHECK-NEXT: | | | | |-ImplicitParamDecl {{.*}} <line:10:9> col:9 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-target-teams-distribute-parallel-for.c:10:9) *const restrict'
// CHECK-NEXT: | | | | |-RecordDecl {{.*}} <col:9> col:9 implicit struct definition
// CHECK-NEXT: | | | | | |-CapturedRecordAttr {{.*}} <<invalid sloc>> Implicit
// CHECK-NEXT: | | | | | |-FieldDecl {{.*}} <line:11:3> col:3 implicit 'int'
// CHECK-NEXT: | | | | | | `-OMPCaptureKindAttr {{.*}} <<invalid sloc>> Implicit 9
// CHECK-NEXT: | | | | | `-FieldDecl {{.*}} <line:12:25> col:25 implicit 'int'
// CHECK-NEXT: | | | | | `-OMPCaptureKindAttr {{.*}} <<invalid sloc>> Implicit 9
// CHECK-NEXT: | | | | `-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow
// CHECK-NEXT: | | | | |-CapturedStmt {{.*}} <line:11:3, line:13:7>
// CHECK-NEXT: | | | | | |-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow
// CHECK-NEXT: | | | | | | |-ForStmt {{.*}} <line:11:3, line:13:7>
// CHECK-NEXT: | | | | | | | |-DeclStmt {{.*}} <line:11:8, col:17>
// CHECK-NEXT: | | | | | | | | `-VarDecl {{.*}} <col:8, col:16> col:12 used i 'int' cinit
// CHECK-NEXT: | | | | | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0
// CHECK-NEXT: | | | | | | | |-<<<NULL>>>
// CHECK-NEXT: | | | | | | | |-BinaryOperator {{.*}} <col:19, col:23> 'int' '<'
// CHECK-NEXT: | | | | | | | | |-ImplicitCastExpr {{.*}} <col:19> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | | | | `-DeclRefExpr {{.*}} <col:19> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | | | | `-ImplicitCastExpr {{.*}} <col:23> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue ParmVar {{.*}} 'x' 'int'
// CHECK-NEXT: | | | | | | | |-UnaryOperator {{.*}} <col:26, col:27> 'int' postfix '++'
// CHECK-NEXT: | | | | | | | | `-DeclRefExpr {{.*}} <col:26> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | | | `-ForStmt {{.*}} <line:12:5, line:13:7> openmp_structured_block
// CHECK-NEXT: | | | | | | | |-DeclStmt {{.*}} <line:12:10, col:19>
// CHECK-NEXT: | | | | | | | | `-VarDecl {{.*}} <col:10, col:18> col:14 used i 'int' cinit
// CHECK-NEXT: | | | | | | | | `-IntegerLiteral {{.*}} <col:18> 'int' 0
// CHECK-NEXT: | | | | | | | |-<<<NULL>>>
// CHECK-NEXT: | | | | | | | |-BinaryOperator {{.*}} <col:21, col:25> 'int' '<'
// CHECK-NEXT: | | | | | | | | |-ImplicitCastExpr {{.*}} <col:21> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | | | | `-DeclRefExpr {{.*}} <col:21> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | | | | `-ImplicitCastExpr {{.*}} <col:25> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | | | `-DeclRefExpr {{.*}} <col:25> 'int' lvalue ParmVar {{.*}} 'y' 'int'
// CHECK-NEXT: | | | | | | | |-UnaryOperator {{.*}} <col:28, col:29> 'int' postfix '++'
// CHECK-NEXT: | | | | | | | | `-DeclRefExpr {{.*}} <col:28> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | | | `-NullStmt {{.*}} <line:13:7>
// CHECK-NEXT: | | | | | | |-ImplicitParamDecl {{.*}} <line:10:9> col:9 implicit .global_tid. 'const int *const restrict'
// CHECK-NEXT: | | | | | | |-ImplicitParamDecl {{.*}} <col:9> col:9 implicit .bound_tid. 'const int *const restrict'
// CHECK-NEXT: | | | | | | |-ImplicitParamDecl {{.*}} <col:9> col:9 implicit used .previous.lb. 'const unsigned long'
// CHECK-NEXT: | | | | | | |-ImplicitParamDecl {{.*}} <col:9> col:9 implicit used .previous.ub. 'const unsigned long'
// CHECK-NEXT: | | | | | | |-ImplicitParamDecl {{.*}} <col:9> col:9 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-target-teams-distribute-parallel-for.c:10:9) *const restrict'
// CHECK-NEXT: | | | | | | |-VarDecl {{.*}} <line:11:8, col:16> col:12 used i 'int' cinit
// CHECK-NEXT: | | | | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0
// CHECK-NEXT: | | | | | | `-VarDecl {{.*}} <line:12:10, col:18> col:14 used i 'int' cinit
// CHECK-NEXT: | | | | | | `-IntegerLiteral {{.*}} <col:18> 'int' 0
// CHECK-NEXT: | | | | | |-DeclRefExpr {{.*}} <line:11:3> 'int' lvalue ParmVar {{.*}} 'x' 'int'
// CHECK-NEXT: | | | | | `-DeclRefExpr {{.*}} <line:12:25> 'int' lvalue ParmVar {{.*}} 'y' 'int'
// CHECK-NEXT: | | | | |-ImplicitParamDecl {{.*}} <line:10:9> col:9 implicit .global_tid. 'const int *const restrict'
// CHECK-NEXT: | | | | |-ImplicitParamDecl {{.*}} <col:9> col:9 implicit .bound_tid. 'const int *const restrict'
// CHECK-NEXT: | | | | |-ImplicitParamDecl {{.*}} <col:9> col:9 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-target-teams-distribute-parallel-for.c:10:9) *const restrict'
// CHECK-NEXT: | | | | |-RecordDecl {{.*}} <col:9> col:9 implicit struct definition
// CHECK-NEXT: | | | | | |-CapturedRecordAttr {{.*}} <<invalid sloc>> Implicit
// CHECK-NEXT: | | | | | |-FieldDecl {{.*}} <line:11:3> col:3 implicit 'int'
// CHECK-NEXT: | | | | | | `-OMPCaptureKindAttr {{.*}} <<invalid sloc>> Implicit 9
// CHECK-NEXT: | | | | | `-FieldDecl {{.*}} <line:12:25> col:25 implicit 'int'
// CHECK-NEXT: | | | | | `-OMPCaptureKindAttr {{.*}} <<invalid sloc>> Implicit 9
// CHECK-NEXT: | | | | `-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow
// CHECK-NEXT: | | | | |-ForStmt {{.*}} <line:11:3, line:13:7>
// CHECK-NEXT: | | | | | |-DeclStmt {{.*}} <line:11:8, col:17>
// CHECK-NEXT: | | | | | | `-VarDecl {{.*}} <col:8, col:16> col:12 used i 'int' cinit
// CHECK-NEXT: | | | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0
// CHECK-NEXT: | | | | | |-<<<NULL>>>
// CHECK-NEXT: | | | | | |-BinaryOperator {{.*}} <col:19, col:23> 'int' '<'
// CHECK-NEXT: | | | | | | |-ImplicitCastExpr {{.*}} <col:19> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | | `-DeclRefExpr {{.*}} <col:19> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | | `-ImplicitCastExpr {{.*}} <col:23> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue ParmVar {{.*}} 'x' 'int'
// CHECK-NEXT: | | | | | |-UnaryOperator {{.*}} <col:26, col:27> 'int' postfix '++'
// CHECK-NEXT: | | | | | | `-DeclRefExpr {{.*}} <col:26> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | `-ForStmt {{.*}} <line:12:5, line:13:7> openmp_structured_block
// CHECK-NEXT: | | | | | |-DeclStmt {{.*}} <line:12:10, col:19>
// CHECK-NEXT: | | | | | | `-VarDecl {{.*}} <col:10, col:18> col:14 used i 'int' cinit
// CHECK-NEXT: | | | | | | `-IntegerLiteral {{.*}} <col:18> 'int' 0
// CHECK-NEXT: | | | | | |-<<<NULL>>>
// CHECK-NEXT: | | | | | |-BinaryOperator {{.*}} <col:21, col:25> 'int' '<'
// CHECK-NEXT: | | | | | | |-ImplicitCastExpr {{.*}} <col:21> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | | `-DeclRefExpr {{.*}} <col:21> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | | `-ImplicitCastExpr {{.*}} <col:25> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | `-DeclRefExpr {{.*}} <col:25> 'int' lvalue ParmVar {{.*}} 'y' 'int'
// CHECK-NEXT: | | | | | |-UnaryOperator {{.*}} <col:28, col:29> 'int' postfix '++'
// CHECK-NEXT: | | | | | | `-DeclRefExpr {{.*}} <col:28> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | `-NullStmt {{.*}} <line:13:7>
// CHECK-NEXT: | | | | |-ImplicitParamDecl {{.*}} <line:10:9> col:9 implicit .global_tid. 'const int *const restrict'
// CHECK-NEXT: | | | | |-ImplicitParamDecl {{.*}} <col:9> col:9 implicit .bound_tid. 'const int *const restrict'
// CHECK-NEXT: | | | | |-ImplicitParamDecl {{.*}} <col:9> col:9 implicit used .previous.lb. 'const unsigned long'
// CHECK-NEXT: | | | | |-ImplicitParamDecl {{.*}} <col:9> col:9 implicit used .previous.ub. 'const unsigned long'
// CHECK-NEXT: | | | | |-ImplicitParamDecl {{.*}} <col:9> col:9 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-target-teams-distribute-parallel-for.c:10:9) *const restrict'
// CHECK-NEXT: | | | | |-VarDecl {{.*}} <line:11:8, col:16> col:12 used i 'int' cinit
// CHECK-NEXT: | | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0
// CHECK-NEXT: | | | | `-VarDecl {{.*}} <line:12:10, col:18> col:14 used i 'int' cinit
// CHECK-NEXT: | | | | `-IntegerLiteral {{.*}} <col:18> 'int' 0
// CHECK-NEXT: | | | |-DeclRefExpr {{.*}} <line:11:3> 'int' lvalue ParmVar {{.*}} 'x' 'int'
// CHECK-NEXT: | | | `-DeclRefExpr {{.*}} <line:12:25> 'int' lvalue ParmVar {{.*}} 'y' 'int'
// CHECK-NEXT: | | |-AlwaysInlineAttr {{.*}} <<invalid sloc>> Implicit __forceinline
// CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <line:10:9> col:9 implicit .global_tid. 'const int'
// CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <col:9> col:9 implicit .part_id. 'const int *const restrict'
// CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <col:9> col:9 implicit .privates. 'void *const restrict'
// CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <col:9> col:9 implicit .copy_fn. 'void (*const restrict)(void *const restrict, ...)'
// CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <col:9> col:9 implicit .task_t. 'void *const'
// CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <col:9> col:9 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-target-teams-distribute-parallel-for.c:10:9) *const restrict'
// CHECK-NEXT: | | |-RecordDecl {{.*}} <col:9> col:9 implicit struct definition
// CHECK-NEXT: | | | |-CapturedRecordAttr {{.*}} <<invalid sloc>> Implicit
// CHECK-NEXT: | | | |-FieldDecl {{.*}} <line:11:3> col:3 implicit 'int'
// CHECK-NEXT: | | | | `-OMPCaptureKindAttr {{.*}} <<invalid sloc>> Implicit 9
// CHECK-NEXT: | | | `-FieldDecl {{.*}} <line:12:25> col:25 implicit 'int'
// CHECK-NEXT: | | | `-OMPCaptureKindAttr {{.*}} <<invalid sloc>> Implicit 9
// CHECK-NEXT: | | `-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow
// CHECK-NEXT: | | |-CapturedStmt {{.*}} <line:11:3, line:13:7>
// CHECK-NEXT: | | | |-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow
// CHECK-NEXT: | | | | |-CapturedStmt {{.*}} <line:11:3, line:13:7>
// CHECK-NEXT: | | | | | |-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow
// CHECK-NEXT: | | | | | | |-ForStmt {{.*}} <line:11:3, line:13:7>
// CHECK-NEXT: | | | | | | | |-DeclStmt {{.*}} <line:11:8, col:17>
// CHECK-NEXT: | | | | | | | | `-VarDecl {{.*}} <col:8, col:16> col:12 used i 'int' cinit
// CHECK-NEXT: | | | | | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0
// CHECK-NEXT: | | | | | | | |-<<<NULL>>>
// CHECK-NEXT: | | | | | | | |-BinaryOperator {{.*}} <col:19, col:23> 'int' '<'
// CHECK-NEXT: | | | | | | | | |-ImplicitCastExpr {{.*}} <col:19> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | | | | `-DeclRefExpr {{.*}} <col:19> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | | | | `-ImplicitCastExpr {{.*}} <col:23> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue ParmVar {{.*}} 'x' 'int'
// CHECK-NEXT: | | | | | | | |-UnaryOperator {{.*}} <col:26, col:27> 'int' postfix '++'
// CHECK-NEXT: | | | | | | | | `-DeclRefExpr {{.*}} <col:26> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | | | `-ForStmt {{.*}} <line:12:5, line:13:7> openmp_structured_block
// CHECK-NEXT: | | | | | | | |-DeclStmt {{.*}} <line:12:10, col:19>
// CHECK-NEXT: | | | | | | | | `-VarDecl {{.*}} <col:10, col:18> col:14 used i 'int' cinit
// CHECK-NEXT: | | | | | | | | `-IntegerLiteral {{.*}} <col:18> 'int' 0
// CHECK-NEXT: | | | | | | | |-<<<NULL>>>
// CHECK-NEXT: | | | | | | | |-BinaryOperator {{.*}} <col:21, col:25> 'int' '<'
// CHECK-NEXT: | | | | | | | | |-ImplicitCastExpr {{.*}} <col:21> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | | | | `-DeclRefExpr {{.*}} <col:21> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | | | | `-ImplicitCastExpr {{.*}} <col:25> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | | | `-DeclRefExpr {{.*}} <col:25> 'int' lvalue ParmVar {{.*}} 'y' 'int'
// CHECK-NEXT: | | | | | | | |-UnaryOperator {{.*}} <col:28, col:29> 'int' postfix '++'
// CHECK-NEXT: | | | | | | | | `-DeclRefExpr {{.*}} <col:28> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | | | `-NullStmt {{.*}} <line:13:7>
// CHECK-NEXT: | | | | | | |-ImplicitParamDecl {{.*}} <line:10:9> col:9 implicit .global_tid. 'const int *const restrict'
// CHECK-NEXT: | | | | | | |-ImplicitParamDecl {{.*}} <col:9> col:9 implicit .bound_tid. 'const int *const restrict'
// CHECK-NEXT: | | | | | | |-ImplicitParamDecl {{.*}} <col:9> col:9 implicit used .previous.lb. 'const unsigned long'
// CHECK-NEXT: | | | | | | |-ImplicitParamDecl {{.*}} <col:9> col:9 implicit used .previous.ub. 'const unsigned long'
// CHECK-NEXT: | | | | | | |-ImplicitParamDecl {{.*}} <col:9> col:9 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-target-teams-distribute-parallel-for.c:10:9) *const restrict'
// CHECK-NEXT: | | | | | | |-VarDecl {{.*}} <line:11:8, col:16> col:12 used i 'int' cinit
// CHECK-NEXT: | | | | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0
// CHECK-NEXT: | | | | | | `-VarDecl {{.*}} <line:12:10, col:18> col:14 used i 'int' cinit
// CHECK-NEXT: | | | | | | `-IntegerLiteral {{.*}} <col:18> 'int' 0
// CHECK-NEXT: | | | | | |-DeclRefExpr {{.*}} <line:11:3> 'int' lvalue ParmVar {{.*}} 'x' 'int'
// CHECK-NEXT: | | | | | `-DeclRefExpr {{.*}} <line:12:25> 'int' lvalue ParmVar {{.*}} 'y' 'int'
// CHECK-NEXT: | | | | |-ImplicitParamDecl {{.*}} <line:10:9> col:9 implicit .global_tid. 'const int *const restrict'
// CHECK-NEXT: | | | | |-ImplicitParamDecl {{.*}} <col:9> col:9 implicit .bound_tid. 'const int *const restrict'
// CHECK-NEXT: | | | | |-ImplicitParamDecl {{.*}} <col:9> col:9 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-target-teams-distribute-parallel-for.c:10:9) *const restrict'
// CHECK-NEXT: | | | | |-RecordDecl {{.*}} <col:9> col:9 implicit struct definition
// CHECK-NEXT: | | | | | |-CapturedRecordAttr {{.*}} <<invalid sloc>> Implicit
// CHECK-NEXT: | | | | | |-FieldDecl {{.*}} <line:11:3> col:3 implicit 'int'
// CHECK-NEXT: | | | | | | `-OMPCaptureKindAttr {{.*}} <<invalid sloc>> Implicit 9
// CHECK-NEXT: | | | | | `-FieldDecl {{.*}} <line:12:25> col:25 implicit 'int'
// CHECK-NEXT: | | | | | `-OMPCaptureKindAttr {{.*}} <<invalid sloc>> Implicit 9
// CHECK-NEXT: | | | | `-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow
// CHECK-NEXT: | | | | |-ForStmt {{.*}} <line:11:3, line:13:7>
// CHECK-NEXT: | | | | | |-DeclStmt {{.*}} <line:11:8, col:17>
// CHECK-NEXT: | | | | | | `-VarDecl {{.*}} <col:8, col:16> col:12 used i 'int' cinit
// CHECK-NEXT: | | | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0
// CHECK-NEXT: | | | | | |-<<<NULL>>>
// CHECK-NEXT: | | | | | |-BinaryOperator {{.*}} <col:19, col:23> 'int' '<'
// CHECK-NEXT: | | | | | | |-ImplicitCastExpr {{.*}} <col:19> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | | `-DeclRefExpr {{.*}} <col:19> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | | `-ImplicitCastExpr {{.*}} <col:23> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue ParmVar {{.*}} 'x' 'int'
// CHECK-NEXT: | | | | | |-UnaryOperator {{.*}} <col:26, col:27> 'int' postfix '++'
// CHECK-NEXT: | | | | | | `-DeclRefExpr {{.*}} <col:26> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | `-ForStmt {{.*}} <line:12:5, line:13:7> openmp_structured_block
// CHECK-NEXT: | | | | | |-DeclStmt {{.*}} <line:12:10, col:19>
// CHECK-NEXT: | | | | | | `-VarDecl {{.*}} <col:10, col:18> col:14 used i 'int' cinit
// CHECK-NEXT: | | | | | | `-IntegerLiteral {{.*}} <col:18> 'int' 0
// CHECK-NEXT: | | | | | |-<<<NULL>>>
// CHECK-NEXT: | | | | | |-BinaryOperator {{.*}} <col:21, col:25> 'int' '<'
// CHECK-NEXT: | | | | | | |-ImplicitCastExpr {{.*}} <col:21> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | | `-DeclRefExpr {{.*}} <col:21> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | | `-ImplicitCastExpr {{.*}} <col:25> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | `-DeclRefExpr {{.*}} <col:25> 'int' lvalue ParmVar {{.*}} 'y' 'int'
// CHECK-NEXT: | | | | | |-UnaryOperator {{.*}} <col:28, col:29> 'int' postfix '++'
// CHECK-NEXT: | | | | | | `-DeclRefExpr {{.*}} <col:28> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | `-NullStmt {{.*}} <line:13:7>
// CHECK-NEXT: | | | | |-ImplicitParamDecl {{.*}} <line:10:9> col:9 implicit .global_tid. 'const int *const restrict'
// CHECK-NEXT: | | | | |-ImplicitParamDecl {{.*}} <col:9> col:9 implicit .bound_tid. 'const int *const restrict'
// CHECK-NEXT: | | | | |-ImplicitParamDecl {{.*}} <col:9> col:9 implicit used .previous.lb. 'const unsigned long'
// CHECK-NEXT: | | | | |-ImplicitParamDecl {{.*}} <col:9> col:9 implicit used .previous.ub. 'const unsigned long'
// CHECK-NEXT: | | | | |-ImplicitParamDecl {{.*}} <col:9> col:9 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-target-teams-distribute-parallel-for.c:10:9) *const restrict'
// CHECK-NEXT: | | | | |-VarDecl {{.*}} <line:11:8, col:16> col:12 used i 'int' cinit
// CHECK-NEXT: | | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0
// CHECK-NEXT: | | | | `-VarDecl {{.*}} <line:12:10, col:18> col:14 used i 'int' cinit
// CHECK-NEXT: | | | | `-IntegerLiteral {{.*}} <col:18> 'int' 0
// CHECK-NEXT: | | | |-DeclRefExpr {{.*}} <line:11:3> 'int' lvalue ParmVar {{.*}} 'x' 'int'
// CHECK-NEXT: | | | `-DeclRefExpr {{.*}} <line:12:25> 'int' lvalue ParmVar {{.*}} 'y' 'int'
// CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <line:10:9> col:9 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-target-teams-distribute-parallel-for.c:10:9) *const restrict'
// CHECK-NEXT: | | |-RecordDecl {{.*}} <col:9> col:9 implicit struct definition
// CHECK-NEXT: | | | |-CapturedRecordAttr {{.*}} <<invalid sloc>> Implicit
// CHECK-NEXT: | | | |-FieldDecl {{.*}} <line:11:3> col:3 implicit 'int'
// CHECK-NEXT: | | | | `-OMPCaptureKindAttr {{.*}} <<invalid sloc>> Implicit 9
// CHECK-NEXT: | | | `-FieldDecl {{.*}} <line:12:25> col:25 implicit 'int'
// CHECK-NEXT: | | | `-OMPCaptureKindAttr {{.*}} <<invalid sloc>> Implicit 9
// CHECK-NEXT: | | `-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow
// CHECK-NEXT: | | |-CapturedStmt {{.*}} <line:11:3, line:13:7>
// CHECK-NEXT: | | | |-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow
// CHECK-NEXT: | | | | |-ForStmt {{.*}} <line:11:3, line:13:7>
// CHECK-NEXT: | | | | | |-DeclStmt {{.*}} <line:11:8, col:17>
// CHECK-NEXT: | | | | | | `-VarDecl {{.*}} <col:8, col:16> col:12 used i 'int' cinit
// CHECK-NEXT: | | | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0
// CHECK-NEXT: | | | | | |-<<<NULL>>>
// CHECK-NEXT: | | | | | |-BinaryOperator {{.*}} <col:19, col:23> 'int' '<'
// CHECK-NEXT: | | | | | | |-ImplicitCastExpr {{.*}} <col:19> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | | `-DeclRefExpr {{.*}} <col:19> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | | `-ImplicitCastExpr {{.*}} <col:23> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue ParmVar {{.*}} 'x' 'int'
// CHECK-NEXT: | | | | | |-UnaryOperator {{.*}} <col:26, col:27> 'int' postfix '++'
// CHECK-NEXT: | | | | | | `-DeclRefExpr {{.*}} <col:26> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | `-ForStmt {{.*}} <line:12:5, line:13:7> openmp_structured_block
// CHECK-NEXT: | | | | | |-DeclStmt {{.*}} <line:12:10, col:19>
// CHECK-NEXT: | | | | | | `-VarDecl {{.*}} <col:10, col:18> col:14 used i 'int' cinit
// CHECK-NEXT: | | | | | | `-IntegerLiteral {{.*}} <col:18> 'int' 0
// CHECK-NEXT: | | | | | |-<<<NULL>>>
// CHECK-NEXT: | | | | | |-BinaryOperator {{.*}} <col:21, col:25> 'int' '<'
// CHECK-NEXT: | | | | | | |-ImplicitCastExpr {{.*}} <col:21> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | | `-DeclRefExpr {{.*}} <col:21> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | | `-ImplicitCastExpr {{.*}} <col:25> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | `-DeclRefExpr {{.*}} <col:25> 'int' lvalue ParmVar {{.*}} 'y' 'int'
// CHECK-NEXT: | | | | | |-UnaryOperator {{.*}} <col:28, col:29> 'int' postfix '++'
// CHECK-NEXT: | | | | | | `-DeclRefExpr {{.*}} <col:28> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | `-NullStmt {{.*}} <line:13:7>
// CHECK-NEXT: | | | | |-ImplicitParamDecl {{.*}} <line:10:9> col:9 implicit .global_tid. 'const int *const restrict'
// CHECK-NEXT: | | | | |-ImplicitParamDecl {{.*}} <col:9> col:9 implicit .bound_tid. 'const int *const restrict'
// CHECK-NEXT: | | | | |-ImplicitParamDecl {{.*}} <col:9> col:9 implicit used .previous.lb. 'const unsigned long'
// CHECK-NEXT: | | | | |-ImplicitParamDecl {{.*}} <col:9> col:9 implicit used .previous.ub. 'const unsigned long'
// CHECK-NEXT: | | | | |-ImplicitParamDecl {{.*}} <col:9> col:9 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-target-teams-distribute-parallel-for.c:10:9) *const restrict'
// CHECK-NEXT: | | | | |-VarDecl {{.*}} <line:11:8, col:16> col:12 used i 'int' cinit
// CHECK-NEXT: | | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0
// CHECK-NEXT: | | | | `-VarDecl {{.*}} <line:12:10, col:18> col:14 used i 'int' cinit
// CHECK-NEXT: | | | | `-IntegerLiteral {{.*}} <col:18> 'int' 0
// CHECK-NEXT: | | | |-DeclRefExpr {{.*}} <line:11:3> 'int' lvalue ParmVar {{.*}} 'x' 'int'
// CHECK-NEXT: | | | `-DeclRefExpr {{.*}} <line:12:25> 'int' lvalue ParmVar {{.*}} 'y' 'int'
// CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <line:10:9> col:9 implicit .global_tid. 'const int *const restrict'
// CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <col:9> col:9 implicit .bound_tid. 'const int *const restrict'
// CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <col:9> col:9 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-target-teams-distribute-parallel-for.c:10:9) *const restrict'
// CHECK-NEXT: | | |-RecordDecl {{.*}} <col:9> col:9 implicit struct definition
// CHECK-NEXT: | | | |-CapturedRecordAttr {{.*}} <<invalid sloc>> Implicit
// CHECK-NEXT: | | | |-FieldDecl {{.*}} <line:11:3> col:3 implicit 'int'
// CHECK-NEXT: | | | | `-OMPCaptureKindAttr {{.*}} <<invalid sloc>> Implicit 9
// CHECK-NEXT: | | | `-FieldDecl {{.*}} <line:12:25> col:25 implicit 'int'
// CHECK-NEXT: | | | `-OMPCaptureKindAttr {{.*}} <<invalid sloc>> Implicit 9
// CHECK-NEXT: | | `-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow
// CHECK-NEXT: | | |-ForStmt {{.*}} <line:11:3, line:13:7>
// CHECK-NEXT: | | | |-DeclStmt {{.*}} <line:11:8, col:17>
// CHECK-NEXT: | | | | `-VarDecl {{.*}} <col:8, col:16> col:12 used i 'int' cinit
// CHECK-NEXT: | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0
// CHECK-NEXT: | | | |-<<<NULL>>>
// CHECK-NEXT: | | | |-BinaryOperator {{.*}} <col:19, col:23> 'int' '<'
// CHECK-NEXT: | | | | |-ImplicitCastExpr {{.*}} <col:19> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | `-DeclRefExpr {{.*}} <col:19> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | `-ImplicitCastExpr {{.*}} <col:23> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue ParmVar {{.*}} 'x' 'int'
// CHECK-NEXT: | | | |-UnaryOperator {{.*}} <col:26, col:27> 'int' postfix '++'
// CHECK-NEXT: | | | | `-DeclRefExpr {{.*}} <col:26> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | `-ForStmt {{.*}} <line:12:5, line:13:7> openmp_structured_block
// CHECK-NEXT: | | | |-DeclStmt {{.*}} <line:12:10, col:19>
// CHECK-NEXT: | | | | `-VarDecl {{.*}} <col:10, col:18> col:14 used i 'int' cinit
// CHECK-NEXT: | | | | `-IntegerLiteral {{.*}} <col:18> 'int' 0
// CHECK-NEXT: | | | |-<<<NULL>>>
// CHECK-NEXT: | | | |-BinaryOperator {{.*}} <col:21, col:25> 'int' '<'
// CHECK-NEXT: | | | | |-ImplicitCastExpr {{.*}} <col:21> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | `-DeclRefExpr {{.*}} <col:21> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | `-ImplicitCastExpr {{.*}} <col:25> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | `-DeclRefExpr {{.*}} <col:25> 'int' lvalue ParmVar {{.*}} 'y' 'int'
// CHECK-NEXT: | | | |-UnaryOperator {{.*}} <col:28, col:29> 'int' postfix '++'
// CHECK-NEXT: | | | | `-DeclRefExpr {{.*}} <col:28> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | `-NullStmt {{.*}} <line:13:7>
// CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <line:10:9> col:9 implicit .global_tid. 'const int *const restrict'
// CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <col:9> col:9 implicit .bound_tid. 'const int *const restrict'
// CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <col:9> col:9 implicit used .previous.lb. 'const unsigned long'
// CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <col:9> col:9 implicit used .previous.ub. 'const unsigned long'
// CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <col:9> col:9 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-target-teams-distribute-parallel-for.c:10:9) *const restrict'
// CHECK-NEXT: | | |-VarDecl {{.*}} <line:11:8, col:16> col:12 used i 'int' cinit
// CHECK-NEXT: | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0
// CHECK-NEXT: | | `-VarDecl {{.*}} <line:12:10, col:18> col:14 used i 'int' cinit
// CHECK-NEXT: | | `-IntegerLiteral {{.*}} <col:18> 'int' 0
// CHECK-NEXT: | |-DeclRefExpr {{.*}} <line:11:3> 'int' lvalue ParmVar {{.*}} 'x' 'int'
// CHECK-NEXT: | `-DeclRefExpr {{.*}} <line:12:25> 'int' lvalue ParmVar {{.*}} 'y' 'int'
// CHECK-NEXT: |-FunctionDecl {{.*}} <line:16:1, line:21:1> line:16:6 test_three 'void (int, int)'
// CHECK-NEXT: | |-ParmVarDecl {{.*}} <col:17, col:21> col:21 used x 'int'
// CHECK-NEXT: | |-ParmVarDecl {{.*}} <col:24, col:28> col:28 used y 'int'
// CHECK-NEXT: | `-CompoundStmt {{.*}} <col:31, line:21:1>
// CHECK-NEXT: | `-OMPTargetTeamsDistributeParallelForDirective {{.*}} <line:17:9, col:61>
// CHECK-NEXT: | |-OMPCollapseClause {{.*}} <col:50, col:60>
// CHECK-NEXT: | | `-ConstantExpr {{.*}} <col:59> 'int'
// CHECK-NEXT: | | `-IntegerLiteral {{.*}} <col:59> 'int' 1
// CHECK-NEXT: | |-OMPFirstprivateClause {{.*}} <<invalid sloc>> <implicit>
// CHECK-NEXT: | | |-DeclRefExpr {{.*}} <line:18:23> 'int' lvalue ParmVar {{.*}} 'x' 'int'
// CHECK-NEXT: | | `-DeclRefExpr {{.*}} <line:19:25> 'int' lvalue ParmVar {{.*}} 'y' 'int'
// CHECK-NEXT: | `-CapturedStmt {{.*}} <line:18:3, line:20:7>
// CHECK-NEXT: | |-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow
// CHECK-NEXT: | | |-CapturedStmt {{.*}} <line:18:3, line:20:7>
// CHECK-NEXT: | | | |-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow
// CHECK-NEXT: | | | | |-CapturedStmt {{.*}} <line:18:3, line:20:7>
// CHECK-NEXT: | | | | | |-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow
// CHECK-NEXT: | | | | | | |-CapturedStmt {{.*}} <line:18:3, line:20:7>
// CHECK-NEXT: | | | | | | | |-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow
// CHECK-NEXT: | | | | | | | | |-ForStmt {{.*}} <line:18:3, line:20:7>
// CHECK-NEXT: | | | | | | | | | |-DeclStmt {{.*}} <line:18:8, col:17>
// CHECK-NEXT: | | | | | | | | | | `-VarDecl {{.*}} <col:8, col:16> col:12 used i 'int' cinit
// CHECK-NEXT: | | | | | | | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0
// CHECK-NEXT: | | | | | | | | | |-<<<NULL>>>
// CHECK-NEXT: | | | | | | | | | |-BinaryOperator {{.*}} <col:19, col:23> 'int' '<'
// CHECK-NEXT: | | | | | | | | | | |-ImplicitCastExpr {{.*}} <col:19> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | | | | | | `-DeclRefExpr {{.*}} <col:19> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | | | | | | `-ImplicitCastExpr {{.*}} <col:23> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | | | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue ParmVar {{.*}} 'x' 'int'
// CHECK-NEXT: | | | | | | | | | |-UnaryOperator {{.*}} <col:26, col:27> 'int' postfix '++'
// CHECK-NEXT: | | | | | | | | | | `-DeclRefExpr {{.*}} <col:26> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | | | | | `-ForStmt {{.*}} <line:19:5, line:20:7> openmp_structured_block
// CHECK-NEXT: | | | | | | | | | |-DeclStmt {{.*}} <line:19:10, col:19>
// CHECK-NEXT: | | | | | | | | | | `-VarDecl {{.*}} <col:10, col:18> col:14 used i 'int' cinit
// CHECK-NEXT: | | | | | | | | | | `-IntegerLiteral {{.*}} <col:18> 'int' 0
// CHECK-NEXT: | | | | | | | | | |-<<<NULL>>>
// CHECK-NEXT: | | | | | | | | | |-BinaryOperator {{.*}} <col:21, col:25> 'int' '<'
// CHECK-NEXT: | | | | | | | | | | |-ImplicitCastExpr {{.*}} <col:21> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | | | | | | `-DeclRefExpr {{.*}} <col:21> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | | | | | | `-ImplicitCastExpr {{.*}} <col:25> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | | | | | `-DeclRefExpr {{.*}} <col:25> 'int' lvalue ParmVar {{.*}} 'y' 'int'
// CHECK-NEXT: | | | | | | | | | |-UnaryOperator {{.*}} <col:28, col:29> 'int' postfix '++'
// CHECK-NEXT: | | | | | | | | | | `-DeclRefExpr {{.*}} <col:28> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | | | | | `-NullStmt {{.*}} <line:20:7>
// CHECK-NEXT: | | | | | | | | |-ImplicitParamDecl {{.*}} <line:17:9> col:9 implicit .global_tid. 'const int *const restrict'
// CHECK-NEXT: | | | | | | | | |-ImplicitParamDecl {{.*}} <col:9> col:9 implicit .bound_tid. 'const int *const restrict'
// CHECK-NEXT: | | | | | | | | |-ImplicitParamDecl {{.*}} <col:9> col:9 implicit used .previous.lb. 'const unsigned long'
// CHECK-NEXT: | | | | | | | | |-ImplicitParamDecl {{.*}} <col:9> col:9 implicit used .previous.ub. 'const unsigned long'
// CHECK-NEXT: | | | | | | | | |-ImplicitParamDecl {{.*}} <col:9> col:9 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-target-teams-distribute-parallel-for.c:17:9) *const restrict'
// CHECK-NEXT: | | | | | | | | |-VarDecl {{.*}} <line:18:8, col:16> col:12 used i 'int' cinit
// CHECK-NEXT: | | | | | | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0
// CHECK-NEXT: | | | | | | | | `-VarDecl {{.*}} <line:19:10, col:18> col:14 used i 'int' cinit
// CHECK-NEXT: | | | | | | | | `-IntegerLiteral {{.*}} <col:18> 'int' 0
// CHECK-NEXT: | | | | | | | |-DeclRefExpr {{.*}} <line:18:3> 'int' lvalue ParmVar {{.*}} 'x' 'int'
// CHECK-NEXT: | | | | | | | `-DeclRefExpr {{.*}} <line:19:25> 'int' lvalue ParmVar {{.*}} 'y' 'int'
// CHECK-NEXT: | | | | | | |-ImplicitParamDecl {{.*}} <line:17:9> col:9 implicit .global_tid. 'const int *const restrict'
// CHECK-NEXT: | | | | | | |-ImplicitParamDecl {{.*}} <col:9> col:9 implicit .bound_tid. 'const int *const restrict'
// CHECK-NEXT: | | | | | | |-ImplicitParamDecl {{.*}} <col:9> col:9 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-target-teams-distribute-parallel-for.c:17:9) *const restrict'
// CHECK-NEXT: | | | | | | |-RecordDecl {{.*}} <col:9> col:9 implicit struct definition
// CHECK-NEXT: | | | | | | | |-CapturedRecordAttr {{.*}} <<invalid sloc>> Implicit
// CHECK-NEXT: | | | | | | | |-FieldDecl {{.*}} <line:18:3> col:3 implicit 'int'
// CHECK-NEXT: | | | | | | | | `-OMPCaptureKindAttr {{.*}} <<invalid sloc>> Implicit 9
// CHECK-NEXT: | | | | | | | `-FieldDecl {{.*}} <line:19:25> col:25 implicit 'int'
// CHECK-NEXT: | | | | | | | `-OMPCaptureKindAttr {{.*}} <<invalid sloc>> Implicit 9
// CHECK-NEXT: | | | | | | `-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow
// CHECK-NEXT: | | | | | | |-ForStmt {{.*}} <line:18:3, line:20:7>
// CHECK-NEXT: | | | | | | | |-DeclStmt {{.*}} <line:18:8, col:17>
// CHECK-NEXT: | | | | | | | | `-VarDecl {{.*}} <col:8, col:16> col:12 used i 'int' cinit
// CHECK-NEXT: | | | | | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0
// CHECK-NEXT: | | | | | | | |-<<<NULL>>>
// CHECK-NEXT: | | | | | | | |-BinaryOperator {{.*}} <col:19, col:23> 'int' '<'
// CHECK-NEXT: | | | | | | | | |-ImplicitCastExpr {{.*}} <col:19> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | | | | `-DeclRefExpr {{.*}} <col:19> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | | | | `-ImplicitCastExpr {{.*}} <col:23> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue ParmVar {{.*}} 'x' 'int'
// CHECK-NEXT: | | | | | | | |-UnaryOperator {{.*}} <col:26, col:27> 'int' postfix '++'
// CHECK-NEXT: | | | | | | | | `-DeclRefExpr {{.*}} <col:26> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | | | `-ForStmt {{.*}} <line:19:5, line:20:7> openmp_structured_block
// CHECK-NEXT: | | | | | | | |-DeclStmt {{.*}} <line:19:10, col:19>
// CHECK-NEXT: | | | | | | | | `-VarDecl {{.*}} <col:10, col:18> col:14 used i 'int' cinit
// CHECK-NEXT: | | | | | | | | `-IntegerLiteral {{.*}} <col:18> 'int' 0
// CHECK-NEXT: | | | | | | | |-<<<NULL>>>
// CHECK-NEXT: | | | | | | | |-BinaryOperator {{.*}} <col:21, col:25> 'int' '<'
// CHECK-NEXT: | | | | | | | | |-ImplicitCastExpr {{.*}} <col:21> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | | | | `-DeclRefExpr {{.*}} <col:21> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | | | | `-ImplicitCastExpr {{.*}} <col:25> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | | | `-DeclRefExpr {{.*}} <col:25> 'int' lvalue ParmVar {{.*}} 'y' 'int'
// CHECK-NEXT: | | | | | | | |-UnaryOperator {{.*}} <col:28, col:29> 'int' postfix '++'
// CHECK-NEXT: | | | | | | | | `-DeclRefExpr {{.*}} <col:28> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | | | `-NullStmt {{.*}} <line:20:7>
// CHECK-NEXT: | | | | | | |-ImplicitParamDecl {{.*}} <line:17:9> col:9 implicit .global_tid. 'const int *const restrict'
// CHECK-NEXT: | | | | | | |-ImplicitParamDecl {{.*}} <col:9> col:9 implicit .bound_tid. 'const int *const restrict'
// CHECK-NEXT: | | | | | | |-ImplicitParamDecl {{.*}} <col:9> col:9 implicit used .previous.lb. 'const unsigned long'
// CHECK-NEXT: | | | | | | |-ImplicitParamDecl {{.*}} <col:9> col:9 implicit used .previous.ub. 'const unsigned long'
// CHECK-NEXT: | | | | | | |-ImplicitParamDecl {{.*}} <col:9> col:9 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-target-teams-distribute-parallel-for.c:17:9) *const restrict'
// CHECK-NEXT: | | | | | | |-VarDecl {{.*}} <line:18:8, col:16> col:12 used i 'int' cinit
// CHECK-NEXT: | | | | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0
// CHECK-NEXT: | | | | | | `-VarDecl {{.*}} <line:19:10, col:18> col:14 used i 'int' cinit
// CHECK-NEXT: | | | | | | `-IntegerLiteral {{.*}} <col:18> 'int' 0
// CHECK-NEXT: | | | | | |-DeclRefExpr {{.*}} <line:18:3> 'int' lvalue ParmVar {{.*}} 'x' 'int'
// CHECK-NEXT: | | | | | `-DeclRefExpr {{.*}} <line:19:25> 'int' lvalue ParmVar {{.*}} 'y' 'int'
// CHECK-NEXT: | | | | |-ImplicitParamDecl {{.*}} <line:17:9> col:9 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-target-teams-distribute-parallel-for.c:17:9) *const restrict'
// CHECK-NEXT: | | | | |-RecordDecl {{.*}} <col:9> col:9 implicit struct definition
// CHECK-NEXT: | | | | | |-CapturedRecordAttr {{.*}} <<invalid sloc>> Implicit
// CHECK-NEXT: | | | | | |-FieldDecl {{.*}} <line:18:3> col:3 implicit 'int'
// CHECK-NEXT: | | | | | | `-OMPCaptureKindAttr {{.*}} <<invalid sloc>> Implicit 9
// CHECK-NEXT: | | | | | `-FieldDecl {{.*}} <line:19:25> col:25 implicit 'int'
// CHECK-NEXT: | | | | | `-OMPCaptureKindAttr {{.*}} <<invalid sloc>> Implicit 9
// CHECK-NEXT: | | | | `-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow
// CHECK-NEXT: | | | | |-CapturedStmt {{.*}} <line:18:3, line:20:7>
// CHECK-NEXT: | | | | | |-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow
// CHECK-NEXT: | | | | | | |-ForStmt {{.*}} <line:18:3, line:20:7>
// CHECK-NEXT: | | | | | | | |-DeclStmt {{.*}} <line:18:8, col:17>
// CHECK-NEXT: | | | | | | | | `-VarDecl {{.*}} <col:8, col:16> col:12 used i 'int' cinit
// CHECK-NEXT: | | | | | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0
// CHECK-NEXT: | | | | | | | |-<<<NULL>>>
// CHECK-NEXT: | | | | | | | |-BinaryOperator {{.*}} <col:19, col:23> 'int' '<'
// CHECK-NEXT: | | | | | | | | |-ImplicitCastExpr {{.*}} <col:19> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | | | | `-DeclRefExpr {{.*}} <col:19> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | | | | `-ImplicitCastExpr {{.*}} <col:23> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue ParmVar {{.*}} 'x' 'int'
// CHECK-NEXT: | | | | | | | |-UnaryOperator {{.*}} <col:26, col:27> 'int' postfix '++'
// CHECK-NEXT: | | | | | | | | `-DeclRefExpr {{.*}} <col:26> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | | | `-ForStmt {{.*}} <line:19:5, line:20:7> openmp_structured_block
// CHECK-NEXT: | | | | | | | |-DeclStmt {{.*}} <line:19:10, col:19>
// CHECK-NEXT: | | | | | | | | `-VarDecl {{.*}} <col:10, col:18> col:14 used i 'int' cinit
// CHECK-NEXT: | | | | | | | | `-IntegerLiteral {{.*}} <col:18> 'int' 0
// CHECK-NEXT: | | | | | | | |-<<<NULL>>>
// CHECK-NEXT: | | | | | | | |-BinaryOperator {{.*}} <col:21, col:25> 'int' '<'
// CHECK-NEXT: | | | | | | | | |-ImplicitCastExpr {{.*}} <col:21> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | | | | `-DeclRefExpr {{.*}} <col:21> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | | | | `-ImplicitCastExpr {{.*}} <col:25> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | | | `-DeclRefExpr {{.*}} <col:25> 'int' lvalue ParmVar {{.*}} 'y' 'int'
// CHECK-NEXT: | | | | | | | |-UnaryOperator {{.*}} <col:28, col:29> 'int' postfix '++'
// CHECK-NEXT: | | | | | | | | `-DeclRefExpr {{.*}} <col:28> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | | | `-NullStmt {{.*}} <line:20:7>
// CHECK-NEXT: | | | | | | |-ImplicitParamDecl {{.*}} <line:17:9> col:9 implicit .global_tid. 'const int *const restrict'
// CHECK-NEXT: | | | | | | |-ImplicitParamDecl {{.*}} <col:9> col:9 implicit .bound_tid. 'const int *const restrict'
// CHECK-NEXT: | | | | | | |-ImplicitParamDecl {{.*}} <col:9> col:9 implicit used .previous.lb. 'const unsigned long'
// CHECK-NEXT: | | | | | | |-ImplicitParamDecl {{.*}} <col:9> col:9 implicit used .previous.ub. 'const unsigned long'
// CHECK-NEXT: | | | | | | |-ImplicitParamDecl {{.*}} <col:9> col:9 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-target-teams-distribute-parallel-for.c:17:9) *const restrict'
// CHECK-NEXT: | | | | | | |-VarDecl {{.*}} <line:18:8, col:16> col:12 used i 'int' cinit
// CHECK-NEXT: | | | | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0
// CHECK-NEXT: | | | | | | `-VarDecl {{.*}} <line:19:10, col:18> col:14 used i 'int' cinit
// CHECK-NEXT: | | | | | | `-IntegerLiteral {{.*}} <col:18> 'int' 0
// CHECK-NEXT: | | | | | |-DeclRefExpr {{.*}} <line:18:3> 'int' lvalue ParmVar {{.*}} 'x' 'int'
// CHECK-NEXT: | | | | | `-DeclRefExpr {{.*}} <line:19:25> 'int' lvalue ParmVar {{.*}} 'y' 'int'
// CHECK-NEXT: | | | | |-ImplicitParamDecl {{.*}} <line:17:9> col:9 implicit .global_tid. 'const int *const restrict'
// CHECK-NEXT: | | | | |-ImplicitParamDecl {{.*}} <col:9> col:9 implicit .bound_tid. 'const int *const restrict'
// CHECK-NEXT: | | | | |-ImplicitParamDecl {{.*}} <col:9> col:9 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-target-teams-distribute-parallel-for.c:17:9) *const restrict'
// CHECK-NEXT: | | | | |-RecordDecl {{.*}} <col:9> col:9 implicit struct definition
// CHECK-NEXT: | | | | | |-CapturedRecordAttr {{.*}} <<invalid sloc>> Implicit
// CHECK-NEXT: | | | | | |-FieldDecl {{.*}} <line:18:3> col:3 implicit 'int'
// CHECK-NEXT: | | | | | | `-OMPCaptureKindAttr {{.*}} <<invalid sloc>> Implicit 9
// CHECK-NEXT: | | | | | `-FieldDecl {{.*}} <line:19:25> col:25 implicit 'int'
// CHECK-NEXT: | | | | | `-OMPCaptureKindAttr {{.*}} <<invalid sloc>> Implicit 9
// CHECK-NEXT: | | | | `-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow
// CHECK-NEXT: | | | | |-ForStmt {{.*}} <line:18:3, line:20:7>
// CHECK-NEXT: | | | | | |-DeclStmt {{.*}} <line:18:8, col:17>
// CHECK-NEXT: | | | | | | `-VarDecl {{.*}} <col:8, col:16> col:12 used i 'int' cinit
// CHECK-NEXT: | | | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0
// CHECK-NEXT: | | | | | |-<<<NULL>>>
// CHECK-NEXT: | | | | | |-BinaryOperator {{.*}} <col:19, col:23> 'int' '<'
// CHECK-NEXT: | | | | | | |-ImplicitCastExpr {{.*}} <col:19> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | | `-DeclRefExpr {{.*}} <col:19> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | | `-ImplicitCastExpr {{.*}} <col:23> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue ParmVar {{.*}} 'x' 'int'
// CHECK-NEXT: | | | | | |-UnaryOperator {{.*}} <col:26, col:27> 'int' postfix '++'
// CHECK-NEXT: | | | | | | `-DeclRefExpr {{.*}} <col:26> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | `-ForStmt {{.*}} <line:19:5, line:20:7> openmp_structured_block
// CHECK-NEXT: | | | | | |-DeclStmt {{.*}} <line:19:10, col:19>
// CHECK-NEXT: | | | | | | `-VarDecl {{.*}} <col:10, col:18> col:14 used i 'int' cinit
// CHECK-NEXT: | | | | | | `-IntegerLiteral {{.*}} <col:18> 'int' 0
// CHECK-NEXT: | | | | | |-<<<NULL>>>
// CHECK-NEXT: | | | | | |-BinaryOperator {{.*}} <col:21, col:25> 'int' '<'
// CHECK-NEXT: | | | | | | |-ImplicitCastExpr {{.*}} <col:21> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | | `-DeclRefExpr {{.*}} <col:21> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | | `-ImplicitCastExpr {{.*}} <col:25> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | `-DeclRefExpr {{.*}} <col:25> 'int' lvalue ParmVar {{.*}} 'y' 'int'
// CHECK-NEXT: | | | | | |-UnaryOperator {{.*}} <col:28, col:29> 'int' postfix '++'
// CHECK-NEXT: | | | | | | `-DeclRefExpr {{.*}} <col:28> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | `-NullStmt {{.*}} <line:20:7>
// CHECK-NEXT: | | | | |-ImplicitParamDecl {{.*}} <line:17:9> col:9 implicit .global_tid. 'const int *const restrict'
// CHECK-NEXT: | | | | |-ImplicitParamDecl {{.*}} <col:9> col:9 implicit .bound_tid. 'const int *const restrict'
// CHECK-NEXT: | | | | |-ImplicitParamDecl {{.*}} <col:9> col:9 implicit used .previous.lb. 'const unsigned long'
// CHECK-NEXT: | | | | |-ImplicitParamDecl {{.*}} <col:9> col:9 implicit used .previous.ub. 'const unsigned long'
// CHECK-NEXT: | | | | |-ImplicitParamDecl {{.*}} <col:9> col:9 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-target-teams-distribute-parallel-for.c:17:9) *const restrict'
// CHECK-NEXT: | | | | |-VarDecl {{.*}} <line:18:8, col:16> col:12 used i 'int' cinit
// CHECK-NEXT: | | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0
// CHECK-NEXT: | | | | `-VarDecl {{.*}} <line:19:10, col:18> col:14 used i 'int' cinit
// CHECK-NEXT: | | | | `-IntegerLiteral {{.*}} <col:18> 'int' 0
// CHECK-NEXT: | | | |-DeclRefExpr {{.*}} <line:18:3> 'int' lvalue ParmVar {{.*}} 'x' 'int'
// CHECK-NEXT: | | | `-DeclRefExpr {{.*}} <line:19:25> 'int' lvalue ParmVar {{.*}} 'y' 'int'
// CHECK-NEXT: | | |-AlwaysInlineAttr {{.*}} <<invalid sloc>> Implicit __forceinline
// CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <line:17:9> col:9 implicit .global_tid. 'const int'
// CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <col:9> col:9 implicit .part_id. 'const int *const restrict'
// CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <col:9> col:9 implicit .privates. 'void *const restrict'
// CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <col:9> col:9 implicit .copy_fn. 'void (*const restrict)(void *const restrict, ...)'
// CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <col:9> col:9 implicit .task_t. 'void *const'
// CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <col:9> col:9 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-target-teams-distribute-parallel-for.c:17:9) *const restrict'
// CHECK-NEXT: | | |-RecordDecl {{.*}} <col:9> col:9 implicit struct definition
// CHECK-NEXT: | | | |-CapturedRecordAttr {{.*}} <<invalid sloc>> Implicit
// CHECK-NEXT: | | | |-FieldDecl {{.*}} <line:18:3> col:3 implicit 'int'
// CHECK-NEXT: | | | | `-OMPCaptureKindAttr {{.*}} <<invalid sloc>> Implicit 9
// CHECK-NEXT: | | | `-FieldDecl {{.*}} <line:19:25> col:25 implicit 'int'
// CHECK-NEXT: | | | `-OMPCaptureKindAttr {{.*}} <<invalid sloc>> Implicit 9
// CHECK-NEXT: | | `-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow
// CHECK-NEXT: | | |-CapturedStmt {{.*}} <line:18:3, line:20:7>
// CHECK-NEXT: | | | |-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow
// CHECK-NEXT: | | | | |-CapturedStmt {{.*}} <line:18:3, line:20:7>
// CHECK-NEXT: | | | | | |-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow
// CHECK-NEXT: | | | | | | |-ForStmt {{.*}} <line:18:3, line:20:7>
// CHECK-NEXT: | | | | | | | |-DeclStmt {{.*}} <line:18:8, col:17>
// CHECK-NEXT: | | | | | | | | `-VarDecl {{.*}} <col:8, col:16> col:12 used i 'int' cinit
// CHECK-NEXT: | | | | | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0
// CHECK-NEXT: | | | | | | | |-<<<NULL>>>
// CHECK-NEXT: | | | | | | | |-BinaryOperator {{.*}} <col:19, col:23> 'int' '<'
// CHECK-NEXT: | | | | | | | | |-ImplicitCastExpr {{.*}} <col:19> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | | | | `-DeclRefExpr {{.*}} <col:19> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | | | | `-ImplicitCastExpr {{.*}} <col:23> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue ParmVar {{.*}} 'x' 'int'
// CHECK-NEXT: | | | | | | | |-UnaryOperator {{.*}} <col:26, col:27> 'int' postfix '++'
// CHECK-NEXT: | | | | | | | | `-DeclRefExpr {{.*}} <col:26> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | | | `-ForStmt {{.*}} <line:19:5, line:20:7> openmp_structured_block
// CHECK-NEXT: | | | | | | | |-DeclStmt {{.*}} <line:19:10, col:19>
// CHECK-NEXT: | | | | | | | | `-VarDecl {{.*}} <col:10, col:18> col:14 used i 'int' cinit
// CHECK-NEXT: | | | | | | | | `-IntegerLiteral {{.*}} <col:18> 'int' 0
// CHECK-NEXT: | | | | | | | |-<<<NULL>>>
// CHECK-NEXT: | | | | | | | |-BinaryOperator {{.*}} <col:21, col:25> 'int' '<'
// CHECK-NEXT: | | | | | | | | |-ImplicitCastExpr {{.*}} <col:21> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | | | | `-DeclRefExpr {{.*}} <col:21> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | | | | `-ImplicitCastExpr {{.*}} <col:25> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | | | `-DeclRefExpr {{.*}} <col:25> 'int' lvalue ParmVar {{.*}} 'y' 'int'
// CHECK-NEXT: | | | | | | | |-UnaryOperator {{.*}} <col:28, col:29> 'int' postfix '++'
// CHECK-NEXT: | | | | | | | | `-DeclRefExpr {{.*}} <col:28> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | | | `-NullStmt {{.*}} <line:20:7>
// CHECK-NEXT: | | | | | | |-ImplicitParamDecl {{.*}} <line:17:9> col:9 implicit .global_tid. 'const int *const restrict'
// CHECK-NEXT: | | | | | | |-ImplicitParamDecl {{.*}} <col:9> col:9 implicit .bound_tid. 'const int *const restrict'
// CHECK-NEXT: | | | | | | |-ImplicitParamDecl {{.*}} <col:9> col:9 implicit used .previous.lb. 'const unsigned long'
// CHECK-NEXT: | | | | | | |-ImplicitParamDecl {{.*}} <col:9> col:9 implicit used .previous.ub. 'const unsigned long'
// CHECK-NEXT: | | | | | | |-ImplicitParamDecl {{.*}} <col:9> col:9 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-target-teams-distribute-parallel-for.c:17:9) *const restrict'
// CHECK-NEXT: | | | | | | |-VarDecl {{.*}} <line:18:8, col:16> col:12 used i 'int' cinit
// CHECK-NEXT: | | | | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0
// CHECK-NEXT: | | | | | | `-VarDecl {{.*}} <line:19:10, col:18> col:14 used i 'int' cinit
// CHECK-NEXT: | | | | | | `-IntegerLiteral {{.*}} <col:18> 'int' 0
// CHECK-NEXT: | | | | | |-DeclRefExpr {{.*}} <line:18:3> 'int' lvalue ParmVar {{.*}} 'x' 'int'
// CHECK-NEXT: | | | | | `-DeclRefExpr {{.*}} <line:19:25> 'int' lvalue ParmVar {{.*}} 'y' 'int'
// CHECK-NEXT: | | | | |-ImplicitParamDecl {{.*}} <line:17:9> col:9 implicit .global_tid. 'const int *const restrict'
// CHECK-NEXT: | | | | |-ImplicitParamDecl {{.*}} <col:9> col:9 implicit .bound_tid. 'const int *const restrict'
// CHECK-NEXT: | | | | |-ImplicitParamDecl {{.*}} <col:9> col:9 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-target-teams-distribute-parallel-for.c:17:9) *const restrict'
// CHECK-NEXT: | | | | |-RecordDecl {{.*}} <col:9> col:9 implicit struct definition
// CHECK-NEXT: | | | | | |-CapturedRecordAttr {{.*}} <<invalid sloc>> Implicit
// CHECK-NEXT: | | | | | |-FieldDecl {{.*}} <line:18:3> col:3 implicit 'int'
// CHECK-NEXT: | | | | | | `-OMPCaptureKindAttr {{.*}} <<invalid sloc>> Implicit 9
// CHECK-NEXT: | | | | | `-FieldDecl {{.*}} <line:19:25> col:25 implicit 'int'
// CHECK-NEXT: | | | | | `-OMPCaptureKindAttr {{.*}} <<invalid sloc>> Implicit 9
// CHECK-NEXT: | | | | `-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow
// CHECK-NEXT: | | | | |-ForStmt {{.*}} <line:18:3, line:20:7>
// CHECK-NEXT: | | | | | |-DeclStmt {{.*}} <line:18:8, col:17>
// CHECK-NEXT: | | | | | | `-VarDecl {{.*}} <col:8, col:16> col:12 used i 'int' cinit
// CHECK-NEXT: | | | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0
// CHECK-NEXT: | | | | | |-<<<NULL>>>
// CHECK-NEXT: | | | | | |-BinaryOperator {{.*}} <col:19, col:23> 'int' '<'
// CHECK-NEXT: | | | | | | |-ImplicitCastExpr {{.*}} <col:19> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | | `-DeclRefExpr {{.*}} <col:19> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | | `-ImplicitCastExpr {{.*}} <col:23> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue ParmVar {{.*}} 'x' 'int'
// CHECK-NEXT: | | | | | |-UnaryOperator {{.*}} <col:26, col:27> 'int' postfix '++'
// CHECK-NEXT: | | | | | | `-DeclRefExpr {{.*}} <col:26> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | `-ForStmt {{.*}} <line:19:5, line:20:7> openmp_structured_block
// CHECK-NEXT: | | | | | |-DeclStmt {{.*}} <line:19:10, col:19>
// CHECK-NEXT: | | | | | | `-VarDecl {{.*}} <col:10, col:18> col:14 used i 'int' cinit
// CHECK-NEXT: | | | | | | `-IntegerLiteral {{.*}} <col:18> 'int' 0
// CHECK-NEXT: | | | | | |-<<<NULL>>>
// CHECK-NEXT: | | | | | |-BinaryOperator {{.*}} <col:21, col:25> 'int' '<'
// CHECK-NEXT: | | | | | | |-ImplicitCastExpr {{.*}} <col:21> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | | `-DeclRefExpr {{.*}} <col:21> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | | `-ImplicitCastExpr {{.*}} <col:25> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | `-DeclRefExpr {{.*}} <col:25> 'int' lvalue ParmVar {{.*}} 'y' 'int'
// CHECK-NEXT: | | | | | |-UnaryOperator {{.*}} <col:28, col:29> 'int' postfix '++'
// CHECK-NEXT: | | | | | | `-DeclRefExpr {{.*}} <col:28> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | `-NullStmt {{.*}} <line:20:7>
// CHECK-NEXT: | | | | |-ImplicitParamDecl {{.*}} <line:17:9> col:9 implicit .global_tid. 'const int *const restrict'
// CHECK-NEXT: | | | | |-ImplicitParamDecl {{.*}} <col:9> col:9 implicit .bound_tid. 'const int *const restrict'
// CHECK-NEXT: | | | | |-ImplicitParamDecl {{.*}} <col:9> col:9 implicit used .previous.lb. 'const unsigned long'
// CHECK-NEXT: | | | | |-ImplicitParamDecl {{.*}} <col:9> col:9 implicit used .previous.ub. 'const unsigned long'
// CHECK-NEXT: | | | | |-ImplicitParamDecl {{.*}} <col:9> col:9 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-target-teams-distribute-parallel-for.c:17:9) *const restrict'
// CHECK-NEXT: | | | | |-VarDecl {{.*}} <line:18:8, col:16> col:12 used i 'int' cinit
// CHECK-NEXT: | | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0
// CHECK-NEXT: | | | | `-VarDecl {{.*}} <line:19:10, col:18> col:14 used i 'int' cinit
// CHECK-NEXT: | | | | `-IntegerLiteral {{.*}} <col:18> 'int' 0
// CHECK-NEXT: | | | |-DeclRefExpr {{.*}} <line:18:3> 'int' lvalue ParmVar {{.*}} 'x' 'int'
// CHECK-NEXT: | | | `-DeclRefExpr {{.*}} <line:19:25> 'int' lvalue ParmVar {{.*}} 'y' 'int'
// CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <line:17:9> col:9 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-target-teams-distribute-parallel-for.c:17:9) *const restrict'
// CHECK-NEXT: | | |-RecordDecl {{.*}} <col:9> col:9 implicit struct definition
// CHECK-NEXT: | | | |-CapturedRecordAttr {{.*}} <<invalid sloc>> Implicit
// CHECK-NEXT: | | | |-FieldDecl {{.*}} <line:18:3> col:3 implicit 'int'
// CHECK-NEXT: | | | | `-OMPCaptureKindAttr {{.*}} <<invalid sloc>> Implicit 9
// CHECK-NEXT: | | | `-FieldDecl {{.*}} <line:19:25> col:25 implicit 'int'
// CHECK-NEXT: | | | `-OMPCaptureKindAttr {{.*}} <<invalid sloc>> Implicit 9
// CHECK-NEXT: | | `-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow
// CHECK-NEXT: | | |-CapturedStmt {{.*}} <line:18:3, line:20:7>
// CHECK-NEXT: | | | |-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow
// CHECK-NEXT: | | | | |-ForStmt {{.*}} <line:18:3, line:20:7>
// CHECK-NEXT: | | | | | |-DeclStmt {{.*}} <line:18:8, col:17>
// CHECK-NEXT: | | | | | | `-VarDecl {{.*}} <col:8, col:16> col:12 used i 'int' cinit
// CHECK-NEXT: | | | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0
// CHECK-NEXT: | | | | | |-<<<NULL>>>
// CHECK-NEXT: | | | | | |-BinaryOperator {{.*}} <col:19, col:23> 'int' '<'
// CHECK-NEXT: | | | | | | |-ImplicitCastExpr {{.*}} <col:19> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | | `-DeclRefExpr {{.*}} <col:19> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | | `-ImplicitCastExpr {{.*}} <col:23> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue ParmVar {{.*}} 'x' 'int'
// CHECK-NEXT: | | | | | |-UnaryOperator {{.*}} <col:26, col:27> 'int' postfix '++'
// CHECK-NEXT: | | | | | | `-DeclRefExpr {{.*}} <col:26> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | `-ForStmt {{.*}} <line:19:5, line:20:7> openmp_structured_block
// CHECK-NEXT: | | | | | |-DeclStmt {{.*}} <line:19:10, col:19>
// CHECK-NEXT: | | | | | | `-VarDecl {{.*}} <col:10, col:18> col:14 used i 'int' cinit
// CHECK-NEXT: | | | | | | `-IntegerLiteral {{.*}} <col:18> 'int' 0
// CHECK-NEXT: | | | | | |-<<<NULL>>>
// CHECK-NEXT: | | | | | |-BinaryOperator {{.*}} <col:21, col:25> 'int' '<'
// CHECK-NEXT: | | | | | | |-ImplicitCastExpr {{.*}} <col:21> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | | `-DeclRefExpr {{.*}} <col:21> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | | `-ImplicitCastExpr {{.*}} <col:25> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | `-DeclRefExpr {{.*}} <col:25> 'int' lvalue ParmVar {{.*}} 'y' 'int'
// CHECK-NEXT: | | | | | |-UnaryOperator {{.*}} <col:28, col:29> 'int' postfix '++'
// CHECK-NEXT: | | | | | | `-DeclRefExpr {{.*}} <col:28> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | `-NullStmt {{.*}} <line:20:7>
// CHECK-NEXT: | | | | |-ImplicitParamDecl {{.*}} <line:17:9> col:9 implicit .global_tid. 'const int *const restrict'
// CHECK-NEXT: | | | | |-ImplicitParamDecl {{.*}} <col:9> col:9 implicit .bound_tid. 'const int *const restrict'
// CHECK-NEXT: | | | | |-ImplicitParamDecl {{.*}} <col:9> col:9 implicit used .previous.lb. 'const unsigned long'
// CHECK-NEXT: | | | | |-ImplicitParamDecl {{.*}} <col:9> col:9 implicit used .previous.ub. 'const unsigned long'
// CHECK-NEXT: | | | | |-ImplicitParamDecl {{.*}} <col:9> col:9 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-target-teams-distribute-parallel-for.c:17:9) *const restrict'
// CHECK-NEXT: | | | | |-VarDecl {{.*}} <line:18:8, col:16> col:12 used i 'int' cinit
// CHECK-NEXT: | | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0
// CHECK-NEXT: | | | | `-VarDecl {{.*}} <line:19:10, col:18> col:14 used i 'int' cinit
// CHECK-NEXT: | | | | `-IntegerLiteral {{.*}} <col:18> 'int' 0
// CHECK-NEXT: | | | |-DeclRefExpr {{.*}} <line:18:3> 'int' lvalue ParmVar {{.*}} 'x' 'int'
// CHECK-NEXT: | | | `-DeclRefExpr {{.*}} <line:19:25> 'int' lvalue ParmVar {{.*}} 'y' 'int'
// CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <line:17:9> col:9 implicit .global_tid. 'const int *const restrict'
// CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <col:9> col:9 implicit .bound_tid. 'const int *const restrict'
// CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <col:9> col:9 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-target-teams-distribute-parallel-for.c:17:9) *const restrict'
// CHECK-NEXT: | | |-RecordDecl {{.*}} <col:9> col:9 implicit struct definition
// CHECK-NEXT: | | | |-CapturedRecordAttr {{.*}} <<invalid sloc>> Implicit
// CHECK-NEXT: | | | |-FieldDecl {{.*}} <line:18:3> col:3 implicit 'int'
// CHECK-NEXT: | | | | `-OMPCaptureKindAttr {{.*}} <<invalid sloc>> Implicit 9
// CHECK-NEXT: | | | `-FieldDecl {{.*}} <line:19:25> col:25 implicit 'int'
// CHECK-NEXT: | | | `-OMPCaptureKindAttr {{.*}} <<invalid sloc>> Implicit 9
// CHECK-NEXT: | | `-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow
// CHECK-NEXT: | | |-ForStmt {{.*}} <line:18:3, line:20:7>
// CHECK-NEXT: | | | |-DeclStmt {{.*}} <line:18:8, col:17>
// CHECK-NEXT: | | | | `-VarDecl {{.*}} <col:8, col:16> col:12 used i 'int' cinit
// CHECK-NEXT: | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0
// CHECK-NEXT: | | | |-<<<NULL>>>
// CHECK-NEXT: | | | |-BinaryOperator {{.*}} <col:19, col:23> 'int' '<'
// CHECK-NEXT: | | | | |-ImplicitCastExpr {{.*}} <col:19> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | `-DeclRefExpr {{.*}} <col:19> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | `-ImplicitCastExpr {{.*}} <col:23> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue ParmVar {{.*}} 'x' 'int'
// CHECK-NEXT: | | | |-UnaryOperator {{.*}} <col:26, col:27> 'int' postfix '++'
// CHECK-NEXT: | | | | `-DeclRefExpr {{.*}} <col:26> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | `-ForStmt {{.*}} <line:19:5, line:20:7> openmp_structured_block
// CHECK-NEXT: | | | |-DeclStmt {{.*}} <line:19:10, col:19>
// CHECK-NEXT: | | | | `-VarDecl {{.*}} <col:10, col:18> col:14 used i 'int' cinit
// CHECK-NEXT: | | | | `-IntegerLiteral {{.*}} <col:18> 'int' 0
// CHECK-NEXT: | | | |-<<<NULL>>>
// CHECK-NEXT: | | | |-BinaryOperator {{.*}} <col:21, col:25> 'int' '<'
// CHECK-NEXT: | | | | |-ImplicitCastExpr {{.*}} <col:21> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | `-DeclRefExpr {{.*}} <col:21> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | `-ImplicitCastExpr {{.*}} <col:25> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | `-DeclRefExpr {{.*}} <col:25> 'int' lvalue ParmVar {{.*}} 'y' 'int'
// CHECK-NEXT: | | | |-UnaryOperator {{.*}} <col:28, col:29> 'int' postfix '++'
// CHECK-NEXT: | | | | `-DeclRefExpr {{.*}} <col:28> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | `-NullStmt {{.*}} <line:20:7>
// CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <line:17:9> col:9 implicit .global_tid. 'const int *const restrict'
// CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <col:9> col:9 implicit .bound_tid. 'const int *const restrict'
// CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <col:9> col:9 implicit used .previous.lb. 'const unsigned long'
// CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <col:9> col:9 implicit used .previous.ub. 'const unsigned long'
// CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <col:9> col:9 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-target-teams-distribute-parallel-for.c:17:9) *const restrict'
// CHECK-NEXT: | | |-VarDecl {{.*}} <line:18:8, col:16> col:12 used i 'int' cinit
// CHECK-NEXT: | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0
// CHECK-NEXT: | | `-VarDecl {{.*}} <line:19:10, col:18> col:14 used i 'int' cinit
// CHECK-NEXT: | | `-IntegerLiteral {{.*}} <col:18> 'int' 0
// CHECK-NEXT: | |-DeclRefExpr {{.*}} <line:18:3> 'int' lvalue ParmVar {{.*}} 'x' 'int'
// CHECK-NEXT: | `-DeclRefExpr {{.*}} <line:19:25> 'int' lvalue ParmVar {{.*}} 'y' 'int'
// CHECK-NEXT: |-FunctionDecl {{.*}} <line:23:1, line:28:1> line:23:6 test_four 'void (int, int)'
// CHECK-NEXT: | |-ParmVarDecl {{.*}} <col:16, col:20> col:20 used x 'int'
// CHECK-NEXT: | |-ParmVarDecl {{.*}} <col:23, col:27> col:27 used y 'int'
// CHECK-NEXT: | `-CompoundStmt {{.*}} <col:30, line:28:1>
// CHECK-NEXT: | `-OMPTargetTeamsDistributeParallelForDirective {{.*}} <line:24:9, col:61>
// CHECK-NEXT: | |-OMPCollapseClause {{.*}} <col:50, col:60>
// CHECK-NEXT: | | `-ConstantExpr {{.*}} <col:59> 'int'
// CHECK-NEXT: | | `-IntegerLiteral {{.*}} <col:59> 'int' 2
// CHECK-NEXT: | |-OMPFirstprivateClause {{.*}} <<invalid sloc>> <implicit>
// CHECK-NEXT: | | |-DeclRefExpr {{.*}} <line:25:23> 'int' lvalue ParmVar {{.*}} 'x' 'int'
// CHECK-NEXT: | | `-DeclRefExpr {{.*}} <line:26:25> 'int' lvalue ParmVar {{.*}} 'y' 'int'
// CHECK-NEXT: | `-CapturedStmt {{.*}} <line:25:3, line:27:7>
// CHECK-NEXT: | |-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow
// CHECK-NEXT: | | |-CapturedStmt {{.*}} <line:25:3, line:27:7>
// CHECK-NEXT: | | | |-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow
// CHECK-NEXT: | | | | |-CapturedStmt {{.*}} <line:25:3, line:27:7>
// CHECK-NEXT: | | | | | |-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow
// CHECK-NEXT: | | | | | | |-CapturedStmt {{.*}} <line:25:3, line:27:7>
// CHECK-NEXT: | | | | | | | |-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow
// CHECK-NEXT: | | | | | | | | |-ForStmt {{.*}} <line:25:3, line:27:7>
// CHECK-NEXT: | | | | | | | | | |-DeclStmt {{.*}} <line:25:8, col:17>
// CHECK-NEXT: | | | | | | | | | | `-VarDecl {{.*}} <col:8, col:16> col:12 used i 'int' cinit
// CHECK-NEXT: | | | | | | | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0
// CHECK-NEXT: | | | | | | | | | |-<<<NULL>>>
// CHECK-NEXT: | | | | | | | | | |-BinaryOperator {{.*}} <col:19, col:23> 'int' '<'
// CHECK-NEXT: | | | | | | | | | | |-ImplicitCastExpr {{.*}} <col:19> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | | | | | | `-DeclRefExpr {{.*}} <col:19> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | | | | | | `-ImplicitCastExpr {{.*}} <col:23> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | | | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue ParmVar {{.*}} 'x' 'int'
// CHECK-NEXT: | | | | | | | | | |-UnaryOperator {{.*}} <col:26, col:27> 'int' postfix '++'
// CHECK-NEXT: | | | | | | | | | | `-DeclRefExpr {{.*}} <col:26> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | | | | | `-ForStmt {{.*}} <line:26:5, line:27:7>
// CHECK-NEXT: | | | | | | | | | |-DeclStmt {{.*}} <line:26:10, col:19>
// CHECK-NEXT: | | | | | | | | | | `-VarDecl {{.*}} <col:10, col:18> col:14 used i 'int' cinit
// CHECK-NEXT: | | | | | | | | | | `-IntegerLiteral {{.*}} <col:18> 'int' 0
// CHECK-NEXT: | | | | | | | | | |-<<<NULL>>>
// CHECK-NEXT: | | | | | | | | | |-BinaryOperator {{.*}} <col:21, col:25> 'int' '<'
// CHECK-NEXT: | | | | | | | | | | |-ImplicitCastExpr {{.*}} <col:21> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | | | | | | `-DeclRefExpr {{.*}} <col:21> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | | | | | | `-ImplicitCastExpr {{.*}} <col:25> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | | | | | `-DeclRefExpr {{.*}} <col:25> 'int' lvalue ParmVar {{.*}} 'y' 'int'
// CHECK-NEXT: | | | | | | | | | |-UnaryOperator {{.*}} <col:28, col:29> 'int' postfix '++'
// CHECK-NEXT: | | | | | | | | | | `-DeclRefExpr {{.*}} <col:28> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | | | | | `-NullStmt {{.*}} <line:27:7> openmp_structured_block
// CHECK-NEXT: | | | | | | | | |-ImplicitParamDecl {{.*}} <line:24:9> col:9 implicit .global_tid. 'const int *const restrict'
// CHECK-NEXT: | | | | | | | | |-ImplicitParamDecl {{.*}} <col:9> col:9 implicit .bound_tid. 'const int *const restrict'
// CHECK-NEXT: | | | | | | | | |-ImplicitParamDecl {{.*}} <col:9> col:9 implicit used .previous.lb. 'const unsigned long'
// CHECK-NEXT: | | | | | | | | |-ImplicitParamDecl {{.*}} <col:9> col:9 implicit used .previous.ub. 'const unsigned long'
// CHECK-NEXT: | | | | | | | | |-ImplicitParamDecl {{.*}} <col:9> col:9 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-target-teams-distribute-parallel-for.c:24:9) *const restrict'
// CHECK-NEXT: | | | | | | | | |-VarDecl {{.*}} <line:25:8, col:16> col:12 used i 'int' cinit
// CHECK-NEXT: | | | | | | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0
// CHECK-NEXT: | | | | | | | | `-VarDecl {{.*}} <line:26:10, col:18> col:14 used i 'int' cinit
// CHECK-NEXT: | | | | | | | | `-IntegerLiteral {{.*}} <col:18> 'int' 0
// CHECK-NEXT: | | | | | | | |-DeclRefExpr {{.*}} <line:25:3> 'int' lvalue ParmVar {{.*}} 'x' 'int'
// CHECK-NEXT: | | | | | | | `-DeclRefExpr {{.*}} <line:26:5> 'int' lvalue ParmVar {{.*}} 'y' 'int'
// CHECK-NEXT: | | | | | | |-ImplicitParamDecl {{.*}} <line:24:9> col:9 implicit .global_tid. 'const int *const restrict'
// CHECK-NEXT: | | | | | | |-ImplicitParamDecl {{.*}} <col:9> col:9 implicit .bound_tid. 'const int *const restrict'
// CHECK-NEXT: | | | | | | |-ImplicitParamDecl {{.*}} <col:9> col:9 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-target-teams-distribute-parallel-for.c:24:9) *const restrict'
// CHECK-NEXT: | | | | | | |-RecordDecl {{.*}} <col:9> col:9 implicit struct definition
// CHECK-NEXT: | | | | | | | |-CapturedRecordAttr {{.*}} <<invalid sloc>> Implicit
// CHECK-NEXT: | | | | | | | |-FieldDecl {{.*}} <line:25:3> col:3 implicit 'int'
// CHECK-NEXT: | | | | | | | | `-OMPCaptureKindAttr {{.*}} <<invalid sloc>> Implicit 9
// CHECK-NEXT: | | | | | | | `-FieldDecl {{.*}} <line:26:5> col:5 implicit 'int'
// CHECK-NEXT: | | | | | | | `-OMPCaptureKindAttr {{.*}} <<invalid sloc>> Implicit 9
// CHECK-NEXT: | | | | | | `-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow
// CHECK-NEXT: | | | | | | |-ForStmt {{.*}} <line:25:3, line:27:7>
// CHECK-NEXT: | | | | | | | |-DeclStmt {{.*}} <line:25:8, col:17>
// CHECK-NEXT: | | | | | | | | `-VarDecl {{.*}} <col:8, col:16> col:12 used i 'int' cinit
// CHECK-NEXT: | | | | | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0
// CHECK-NEXT: | | | | | | | |-<<<NULL>>>
// CHECK-NEXT: | | | | | | | |-BinaryOperator {{.*}} <col:19, col:23> 'int' '<'
// CHECK-NEXT: | | | | | | | | |-ImplicitCastExpr {{.*}} <col:19> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | | | | `-DeclRefExpr {{.*}} <col:19> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | | | | `-ImplicitCastExpr {{.*}} <col:23> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue ParmVar {{.*}} 'x' 'int'
// CHECK-NEXT: | | | | | | | |-UnaryOperator {{.*}} <col:26, col:27> 'int' postfix '++'
// CHECK-NEXT: | | | | | | | | `-DeclRefExpr {{.*}} <col:26> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | | | `-ForStmt {{.*}} <line:26:5, line:27:7>
// CHECK-NEXT: | | | | | | | |-DeclStmt {{.*}} <line:26:10, col:19>
// CHECK-NEXT: | | | | | | | | `-VarDecl {{.*}} <col:10, col:18> col:14 used i 'int' cinit
// CHECK-NEXT: | | | | | | | | `-IntegerLiteral {{.*}} <col:18> 'int' 0
// CHECK-NEXT: | | | | | | | |-<<<NULL>>>
// CHECK-NEXT: | | | | | | | |-BinaryOperator {{.*}} <col:21, col:25> 'int' '<'
// CHECK-NEXT: | | | | | | | | |-ImplicitCastExpr {{.*}} <col:21> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | | | | `-DeclRefExpr {{.*}} <col:21> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | | | | `-ImplicitCastExpr {{.*}} <col:25> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | | | `-DeclRefExpr {{.*}} <col:25> 'int' lvalue ParmVar {{.*}} 'y' 'int'
// CHECK-NEXT: | | | | | | | |-UnaryOperator {{.*}} <col:28, col:29> 'int' postfix '++'
// CHECK-NEXT: | | | | | | | | `-DeclRefExpr {{.*}} <col:28> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | | | `-NullStmt {{.*}} <line:27:7> openmp_structured_block
// CHECK-NEXT: | | | | | | |-ImplicitParamDecl {{.*}} <line:24:9> col:9 implicit .global_tid. 'const int *const restrict'
// CHECK-NEXT: | | | | | | |-ImplicitParamDecl {{.*}} <col:9> col:9 implicit .bound_tid. 'const int *const restrict'
// CHECK-NEXT: | | | | | | |-ImplicitParamDecl {{.*}} <col:9> col:9 implicit used .previous.lb. 'const unsigned long'
// CHECK-NEXT: | | | | | | |-ImplicitParamDecl {{.*}} <col:9> col:9 implicit used .previous.ub. 'const unsigned long'
// CHECK-NEXT: | | | | | | |-ImplicitParamDecl {{.*}} <col:9> col:9 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-target-teams-distribute-parallel-for.c:24:9) *const restrict'
// CHECK-NEXT: | | | | | | |-VarDecl {{.*}} <line:25:8, col:16> col:12 used i 'int' cinit
// CHECK-NEXT: | | | | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0
// CHECK-NEXT: | | | | | | `-VarDecl {{.*}} <line:26:10, col:18> col:14 used i 'int' cinit
// CHECK-NEXT: | | | | | | `-IntegerLiteral {{.*}} <col:18> 'int' 0
// CHECK-NEXT: | | | | | |-DeclRefExpr {{.*}} <line:25:3> 'int' lvalue ParmVar {{.*}} 'x' 'int'
// CHECK-NEXT: | | | | | `-DeclRefExpr {{.*}} <line:26:5> 'int' lvalue ParmVar {{.*}} 'y' 'int'
// CHECK-NEXT: | | | | |-ImplicitParamDecl {{.*}} <line:24:9> col:9 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-target-teams-distribute-parallel-for.c:24:9) *const restrict'
// CHECK-NEXT: | | | | |-RecordDecl {{.*}} <col:9> col:9 implicit struct definition
// CHECK-NEXT: | | | | | |-CapturedRecordAttr {{.*}} <<invalid sloc>> Implicit
// CHECK-NEXT: | | | | | |-FieldDecl {{.*}} <line:25:3> col:3 implicit 'int'
// CHECK-NEXT: | | | | | | `-OMPCaptureKindAttr {{.*}} <<invalid sloc>> Implicit 9
// CHECK-NEXT: | | | | | `-FieldDecl {{.*}} <line:26:5> col:5 implicit 'int'
// CHECK-NEXT: | | | | | `-OMPCaptureKindAttr {{.*}} <<invalid sloc>> Implicit 9
// CHECK-NEXT: | | | | `-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow
// CHECK-NEXT: | | | | |-CapturedStmt {{.*}} <line:25:3, line:27:7>
// CHECK-NEXT: | | | | | |-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow
// CHECK-NEXT: | | | | | | |-ForStmt {{.*}} <line:25:3, line:27:7>
// CHECK-NEXT: | | | | | | | |-DeclStmt {{.*}} <line:25:8, col:17>
// CHECK-NEXT: | | | | | | | | `-VarDecl {{.*}} <col:8, col:16> col:12 used i 'int' cinit
// CHECK-NEXT: | | | | | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0
// CHECK-NEXT: | | | | | | | |-<<<NULL>>>
// CHECK-NEXT: | | | | | | | |-BinaryOperator {{.*}} <col:19, col:23> 'int' '<'
// CHECK-NEXT: | | | | | | | | |-ImplicitCastExpr {{.*}} <col:19> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | | | | `-DeclRefExpr {{.*}} <col:19> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | | | | `-ImplicitCastExpr {{.*}} <col:23> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue ParmVar {{.*}} 'x' 'int'
// CHECK-NEXT: | | | | | | | |-UnaryOperator {{.*}} <col:26, col:27> 'int' postfix '++'
// CHECK-NEXT: | | | | | | | | `-DeclRefExpr {{.*}} <col:26> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | | | `-ForStmt {{.*}} <line:26:5, line:27:7>
// CHECK-NEXT: | | | | | | | |-DeclStmt {{.*}} <line:26:10, col:19>
// CHECK-NEXT: | | | | | | | | `-VarDecl {{.*}} <col:10, col:18> col:14 used i 'int' cinit
// CHECK-NEXT: | | | | | | | | `-IntegerLiteral {{.*}} <col:18> 'int' 0
// CHECK-NEXT: | | | | | | | |-<<<NULL>>>
// CHECK-NEXT: | | | | | | | |-BinaryOperator {{.*}} <col:21, col:25> 'int' '<'
// CHECK-NEXT: | | | | | | | | |-ImplicitCastExpr {{.*}} <col:21> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | | | | `-DeclRefExpr {{.*}} <col:21> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | | | | `-ImplicitCastExpr {{.*}} <col:25> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | | | `-DeclRefExpr {{.*}} <col:25> 'int' lvalue ParmVar {{.*}} 'y' 'int'
// CHECK-NEXT: | | | | | | | |-UnaryOperator {{.*}} <col:28, col:29> 'int' postfix '++'
// CHECK-NEXT: | | | | | | | | `-DeclRefExpr {{.*}} <col:28> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | | | `-NullStmt {{.*}} <line:27:7> openmp_structured_block
// CHECK-NEXT: | | | | | | |-ImplicitParamDecl {{.*}} <line:24:9> col:9 implicit .global_tid. 'const int *const restrict'
// CHECK-NEXT: | | | | | | |-ImplicitParamDecl {{.*}} <col:9> col:9 implicit .bound_tid. 'const int *const restrict'
// CHECK-NEXT: | | | | | | |-ImplicitParamDecl {{.*}} <col:9> col:9 implicit used .previous.lb. 'const unsigned long'
// CHECK-NEXT: | | | | | | |-ImplicitParamDecl {{.*}} <col:9> col:9 implicit used .previous.ub. 'const unsigned long'
// CHECK-NEXT: | | | | | | |-ImplicitParamDecl {{.*}} <col:9> col:9 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-target-teams-distribute-parallel-for.c:24:9) *const restrict'
// CHECK-NEXT: | | | | | | |-VarDecl {{.*}} <line:25:8, col:16> col:12 used i 'int' cinit
// CHECK-NEXT: | | | | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0
// CHECK-NEXT: | | | | | | `-VarDecl {{.*}} <line:26:10, col:18> col:14 used i 'int' cinit
// CHECK-NEXT: | | | | | | `-IntegerLiteral {{.*}} <col:18> 'int' 0
// CHECK-NEXT: | | | | | |-DeclRefExpr {{.*}} <line:25:3> 'int' lvalue ParmVar {{.*}} 'x' 'int'
// CHECK-NEXT: | | | | | `-DeclRefExpr {{.*}} <line:26:5> 'int' lvalue ParmVar {{.*}} 'y' 'int'
// CHECK-NEXT: | | | | |-ImplicitParamDecl {{.*}} <line:24:9> col:9 implicit .global_tid. 'const int *const restrict'
// CHECK-NEXT: | | | | |-ImplicitParamDecl {{.*}} <col:9> col:9 implicit .bound_tid. 'const int *const restrict'
// CHECK-NEXT: | | | | |-ImplicitParamDecl {{.*}} <col:9> col:9 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-target-teams-distribute-parallel-for.c:24:9) *const restrict'
// CHECK-NEXT: | | | | |-RecordDecl {{.*}} <col:9> col:9 implicit struct definition
// CHECK-NEXT: | | | | | |-CapturedRecordAttr {{.*}} <<invalid sloc>> Implicit
// CHECK-NEXT: | | | | | |-FieldDecl {{.*}} <line:25:3> col:3 implicit 'int'
// CHECK-NEXT: | | | | | | `-OMPCaptureKindAttr {{.*}} <<invalid sloc>> Implicit 9
// CHECK-NEXT: | | | | | `-FieldDecl {{.*}} <line:26:5> col:5 implicit 'int'
// CHECK-NEXT: | | | | | `-OMPCaptureKindAttr {{.*}} <<invalid sloc>> Implicit 9
// CHECK-NEXT: | | | | `-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow
// CHECK-NEXT: | | | | |-ForStmt {{.*}} <line:25:3, line:27:7>
// CHECK-NEXT: | | | | | |-DeclStmt {{.*}} <line:25:8, col:17>
// CHECK-NEXT: | | | | | | `-VarDecl {{.*}} <col:8, col:16> col:12 used i 'int' cinit
// CHECK-NEXT: | | | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0
// CHECK-NEXT: | | | | | |-<<<NULL>>>
// CHECK-NEXT: | | | | | |-BinaryOperator {{.*}} <col:19, col:23> 'int' '<'
// CHECK-NEXT: | | | | | | |-ImplicitCastExpr {{.*}} <col:19> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | | `-DeclRefExpr {{.*}} <col:19> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | | `-ImplicitCastExpr {{.*}} <col:23> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue ParmVar {{.*}} 'x' 'int'
// CHECK-NEXT: | | | | | |-UnaryOperator {{.*}} <col:26, col:27> 'int' postfix '++'
// CHECK-NEXT: | | | | | | `-DeclRefExpr {{.*}} <col:26> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | `-ForStmt {{.*}} <line:26:5, line:27:7>
// CHECK-NEXT: | | | | | |-DeclStmt {{.*}} <line:26:10, col:19>
// CHECK-NEXT: | | | | | | `-VarDecl {{.*}} <col:10, col:18> col:14 used i 'int' cinit
// CHECK-NEXT: | | | | | | `-IntegerLiteral {{.*}} <col:18> 'int' 0
// CHECK-NEXT: | | | | | |-<<<NULL>>>
// CHECK-NEXT: | | | | | |-BinaryOperator {{.*}} <col:21, col:25> 'int' '<'
// CHECK-NEXT: | | | | | | |-ImplicitCastExpr {{.*}} <col:21> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | | `-DeclRefExpr {{.*}} <col:21> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | | `-ImplicitCastExpr {{.*}} <col:25> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | `-DeclRefExpr {{.*}} <col:25> 'int' lvalue ParmVar {{.*}} 'y' 'int'
// CHECK-NEXT: | | | | | |-UnaryOperator {{.*}} <col:28, col:29> 'int' postfix '++'
// CHECK-NEXT: | | | | | | `-DeclRefExpr {{.*}} <col:28> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | `-NullStmt {{.*}} <line:27:7> openmp_structured_block
// CHECK-NEXT: | | | | |-ImplicitParamDecl {{.*}} <line:24:9> col:9 implicit .global_tid. 'const int *const restrict'
// CHECK-NEXT: | | | | |-ImplicitParamDecl {{.*}} <col:9> col:9 implicit .bound_tid. 'const int *const restrict'
// CHECK-NEXT: | | | | |-ImplicitParamDecl {{.*}} <col:9> col:9 implicit used .previous.lb. 'const unsigned long'
// CHECK-NEXT: | | | | |-ImplicitParamDecl {{.*}} <col:9> col:9 implicit used .previous.ub. 'const unsigned long'
// CHECK-NEXT: | | | | |-ImplicitParamDecl {{.*}} <col:9> col:9 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-target-teams-distribute-parallel-for.c:24:9) *const restrict'
// CHECK-NEXT: | | | | |-VarDecl {{.*}} <line:25:8, col:16> col:12 used i 'int' cinit
// CHECK-NEXT: | | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0
// CHECK-NEXT: | | | | `-VarDecl {{.*}} <line:26:10, col:18> col:14 used i 'int' cinit
// CHECK-NEXT: | | | | `-IntegerLiteral {{.*}} <col:18> 'int' 0
// CHECK-NEXT: | | | |-DeclRefExpr {{.*}} <line:25:3> 'int' lvalue ParmVar {{.*}} 'x' 'int'
// CHECK-NEXT: | | | `-DeclRefExpr {{.*}} <line:26:5> 'int' lvalue ParmVar {{.*}} 'y' 'int'
// CHECK-NEXT: | | |-AlwaysInlineAttr {{.*}} <<invalid sloc>> Implicit __forceinline
// CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <line:24:9> col:9 implicit .global_tid. 'const int'
// CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <col:9> col:9 implicit .part_id. 'const int *const restrict'
// CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <col:9> col:9 implicit .privates. 'void *const restrict'
// CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <col:9> col:9 implicit .copy_fn. 'void (*const restrict)(void *const restrict, ...)'
// CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <col:9> col:9 implicit .task_t. 'void *const'
// CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <col:9> col:9 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-target-teams-distribute-parallel-for.c:24:9) *const restrict'
// CHECK-NEXT: | | |-RecordDecl {{.*}} <col:9> col:9 implicit struct definition
// CHECK-NEXT: | | | |-CapturedRecordAttr {{.*}} <<invalid sloc>> Implicit
// CHECK-NEXT: | | | |-FieldDecl {{.*}} <line:25:3> col:3 implicit 'int'
// CHECK-NEXT: | | | | `-OMPCaptureKindAttr {{.*}} <<invalid sloc>> Implicit 9
// CHECK-NEXT: | | | `-FieldDecl {{.*}} <line:26:5> col:5 implicit 'int'
// CHECK-NEXT: | | | `-OMPCaptureKindAttr {{.*}} <<invalid sloc>> Implicit 9
// CHECK-NEXT: | | `-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow
// CHECK-NEXT: | | |-CapturedStmt {{.*}} <line:25:3, line:27:7>
// CHECK-NEXT: | | | |-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow
// CHECK-NEXT: | | | | |-CapturedStmt {{.*}} <line:25:3, line:27:7>
// CHECK-NEXT: | | | | | |-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow
// CHECK-NEXT: | | | | | | |-ForStmt {{.*}} <line:25:3, line:27:7>
// CHECK-NEXT: | | | | | | | |-DeclStmt {{.*}} <line:25:8, col:17>
// CHECK-NEXT: | | | | | | | | `-VarDecl {{.*}} <col:8, col:16> col:12 used i 'int' cinit
// CHECK-NEXT: | | | | | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0
// CHECK-NEXT: | | | | | | | |-<<<NULL>>>
// CHECK-NEXT: | | | | | | | |-BinaryOperator {{.*}} <col:19, col:23> 'int' '<'
// CHECK-NEXT: | | | | | | | | |-ImplicitCastExpr {{.*}} <col:19> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | | | | `-DeclRefExpr {{.*}} <col:19> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | | | | `-ImplicitCastExpr {{.*}} <col:23> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue ParmVar {{.*}} 'x' 'int'
// CHECK-NEXT: | | | | | | | |-UnaryOperator {{.*}} <col:26, col:27> 'int' postfix '++'
// CHECK-NEXT: | | | | | | | | `-DeclRefExpr {{.*}} <col:26> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | | | `-ForStmt {{.*}} <line:26:5, line:27:7>
// CHECK-NEXT: | | | | | | | |-DeclStmt {{.*}} <line:26:10, col:19>
// CHECK-NEXT: | | | | | | | | `-VarDecl {{.*}} <col:10, col:18> col:14 used i 'int' cinit
// CHECK-NEXT: | | | | | | | | `-IntegerLiteral {{.*}} <col:18> 'int' 0
// CHECK-NEXT: | | | | | | | |-<<<NULL>>>
// CHECK-NEXT: | | | | | | | |-BinaryOperator {{.*}} <col:21, col:25> 'int' '<'
// CHECK-NEXT: | | | | | | | | |-ImplicitCastExpr {{.*}} <col:21> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | | | | `-DeclRefExpr {{.*}} <col:21> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | | | | `-ImplicitCastExpr {{.*}} <col:25> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | | | `-DeclRefExpr {{.*}} <col:25> 'int' lvalue ParmVar {{.*}} 'y' 'int'
// CHECK-NEXT: | | | | | | | |-UnaryOperator {{.*}} <col:28, col:29> 'int' postfix '++'
// CHECK-NEXT: | | | | | | | | `-DeclRefExpr {{.*}} <col:28> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | | | `-NullStmt {{.*}} <line:27:7> openmp_structured_block
// CHECK-NEXT: | | | | | | |-ImplicitParamDecl {{.*}} <line:24:9> col:9 implicit .global_tid. 'const int *const restrict'
// CHECK-NEXT: | | | | | | |-ImplicitParamDecl {{.*}} <col:9> col:9 implicit .bound_tid. 'const int *const restrict'
// CHECK-NEXT: | | | | | | |-ImplicitParamDecl {{.*}} <col:9> col:9 implicit used .previous.lb. 'const unsigned long'
// CHECK-NEXT: | | | | | | |-ImplicitParamDecl {{.*}} <col:9> col:9 implicit used .previous.ub. 'const unsigned long'
// CHECK-NEXT: | | | | | | |-ImplicitParamDecl {{.*}} <col:9> col:9 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-target-teams-distribute-parallel-for.c:24:9) *const restrict'
// CHECK-NEXT: | | | | | | |-VarDecl {{.*}} <line:25:8, col:16> col:12 used i 'int' cinit
// CHECK-NEXT: | | | | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0
// CHECK-NEXT: | | | | | | `-VarDecl {{.*}} <line:26:10, col:18> col:14 used i 'int' cinit
// CHECK-NEXT: | | | | | | `-IntegerLiteral {{.*}} <col:18> 'int' 0
// CHECK-NEXT: | | | | | |-DeclRefExpr {{.*}} <line:25:3> 'int' lvalue ParmVar {{.*}} 'x' 'int'
// CHECK-NEXT: | | | | | `-DeclRefExpr {{.*}} <line:26:5> 'int' lvalue ParmVar {{.*}} 'y' 'int'
// CHECK-NEXT: | | | | |-ImplicitParamDecl {{.*}} <line:24:9> col:9 implicit .global_tid. 'const int *const restrict'
// CHECK-NEXT: | | | | |-ImplicitParamDecl {{.*}} <col:9> col:9 implicit .bound_tid. 'const int *const restrict'
// CHECK-NEXT: | | | | |-ImplicitParamDecl {{.*}} <col:9> col:9 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-target-teams-distribute-parallel-for.c:24:9) *const restrict'
// CHECK-NEXT: | | | | |-RecordDecl {{.*}} <col:9> col:9 implicit struct definition
// CHECK-NEXT: | | | | | |-CapturedRecordAttr {{.*}} <<invalid sloc>> Implicit
// CHECK-NEXT: | | | | | |-FieldDecl {{.*}} <line:25:3> col:3 implicit 'int'
// CHECK-NEXT: | | | | | | `-OMPCaptureKindAttr {{.*}} <<invalid sloc>> Implicit 9
// CHECK-NEXT: | | | | | `-FieldDecl {{.*}} <line:26:5> col:5 implicit 'int'
// CHECK-NEXT: | | | | | `-OMPCaptureKindAttr {{.*}} <<invalid sloc>> Implicit 9
// CHECK-NEXT: | | | | `-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow
// CHECK-NEXT: | | | | |-ForStmt {{.*}} <line:25:3, line:27:7>
// CHECK-NEXT: | | | | | |-DeclStmt {{.*}} <line:25:8, col:17>
// CHECK-NEXT: | | | | | | `-VarDecl {{.*}} <col:8, col:16> col:12 used i 'int' cinit
// CHECK-NEXT: | | | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0
// CHECK-NEXT: | | | | | |-<<<NULL>>>
// CHECK-NEXT: | | | | | |-BinaryOperator {{.*}} <col:19, col:23> 'int' '<'
// CHECK-NEXT: | | | | | | |-ImplicitCastExpr {{.*}} <col:19> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | | `-DeclRefExpr {{.*}} <col:19> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | | `-ImplicitCastExpr {{.*}} <col:23> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue ParmVar {{.*}} 'x' 'int'
// CHECK-NEXT: | | | | | |-UnaryOperator {{.*}} <col:26, col:27> 'int' postfix '++'
// CHECK-NEXT: | | | | | | `-DeclRefExpr {{.*}} <col:26> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | `-ForStmt {{.*}} <line:26:5, line:27:7>
// CHECK-NEXT: | | | | | |-DeclStmt {{.*}} <line:26:10, col:19>
// CHECK-NEXT: | | | | | | `-VarDecl {{.*}} <col:10, col:18> col:14 used i 'int' cinit
// CHECK-NEXT: | | | | | | `-IntegerLiteral {{.*}} <col:18> 'int' 0
// CHECK-NEXT: | | | | | |-<<<NULL>>>
// CHECK-NEXT: | | | | | |-BinaryOperator {{.*}} <col:21, col:25> 'int' '<'
// CHECK-NEXT: | | | | | | |-ImplicitCastExpr {{.*}} <col:21> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | | `-DeclRefExpr {{.*}} <col:21> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | | `-ImplicitCastExpr {{.*}} <col:25> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | `-DeclRefExpr {{.*}} <col:25> 'int' lvalue ParmVar {{.*}} 'y' 'int'
// CHECK-NEXT: | | | | | |-UnaryOperator {{.*}} <col:28, col:29> 'int' postfix '++'
// CHECK-NEXT: | | | | | | `-DeclRefExpr {{.*}} <col:28> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | `-NullStmt {{.*}} <line:27:7> openmp_structured_block
// CHECK-NEXT: | | | | |-ImplicitParamDecl {{.*}} <line:24:9> col:9 implicit .global_tid. 'const int *const restrict'
// CHECK-NEXT: | | | | |-ImplicitParamDecl {{.*}} <col:9> col:9 implicit .bound_tid. 'const int *const restrict'
// CHECK-NEXT: | | | | |-ImplicitParamDecl {{.*}} <col:9> col:9 implicit used .previous.lb. 'const unsigned long'
// CHECK-NEXT: | | | | |-ImplicitParamDecl {{.*}} <col:9> col:9 implicit used .previous.ub. 'const unsigned long'
// CHECK-NEXT: | | | | |-ImplicitParamDecl {{.*}} <col:9> col:9 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-target-teams-distribute-parallel-for.c:24:9) *const restrict'
// CHECK-NEXT: | | | | |-VarDecl {{.*}} <line:25:8, col:16> col:12 used i 'int' cinit
// CHECK-NEXT: | | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0
// CHECK-NEXT: | | | | `-VarDecl {{.*}} <line:26:10, col:18> col:14 used i 'int' cinit
// CHECK-NEXT: | | | | `-IntegerLiteral {{.*}} <col:18> 'int' 0
// CHECK-NEXT: | | | |-DeclRefExpr {{.*}} <line:25:3> 'int' lvalue ParmVar {{.*}} 'x' 'int'
// CHECK-NEXT: | | | `-DeclRefExpr {{.*}} <line:26:5> 'int' lvalue ParmVar {{.*}} 'y' 'int'
// CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <line:24:9> col:9 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-target-teams-distribute-parallel-for.c:24:9) *const restrict'
// CHECK-NEXT: | | |-RecordDecl {{.*}} <col:9> col:9 implicit struct definition
// CHECK-NEXT: | | | |-CapturedRecordAttr {{.*}} <<invalid sloc>> Implicit
// CHECK-NEXT: | | | |-FieldDecl {{.*}} <line:25:3> col:3 implicit 'int'
// CHECK-NEXT: | | | | `-OMPCaptureKindAttr {{.*}} <<invalid sloc>> Implicit 9
// CHECK-NEXT: | | | `-FieldDecl {{.*}} <line:26:5> col:5 implicit 'int'
// CHECK-NEXT: | | | `-OMPCaptureKindAttr {{.*}} <<invalid sloc>> Implicit 9
// CHECK-NEXT: | | `-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow
// CHECK-NEXT: | | |-CapturedStmt {{.*}} <line:25:3, line:27:7>
// CHECK-NEXT: | | | |-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow
// CHECK-NEXT: | | | | |-ForStmt {{.*}} <line:25:3, line:27:7>
// CHECK-NEXT: | | | | | |-DeclStmt {{.*}} <line:25:8, col:17>
// CHECK-NEXT: | | | | | | `-VarDecl {{.*}} <col:8, col:16> col:12 used i 'int' cinit
// CHECK-NEXT: | | | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0
// CHECK-NEXT: | | | | | |-<<<NULL>>>
// CHECK-NEXT: | | | | | |-BinaryOperator {{.*}} <col:19, col:23> 'int' '<'
// CHECK-NEXT: | | | | | | |-ImplicitCastExpr {{.*}} <col:19> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | | `-DeclRefExpr {{.*}} <col:19> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | | `-ImplicitCastExpr {{.*}} <col:23> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue ParmVar {{.*}} 'x' 'int'
// CHECK-NEXT: | | | | | |-UnaryOperator {{.*}} <col:26, col:27> 'int' postfix '++'
// CHECK-NEXT: | | | | | | `-DeclRefExpr {{.*}} <col:26> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | `-ForStmt {{.*}} <line:26:5, line:27:7>
// CHECK-NEXT: | | | | | |-DeclStmt {{.*}} <line:26:10, col:19>
// CHECK-NEXT: | | | | | | `-VarDecl {{.*}} <col:10, col:18> col:14 used i 'int' cinit
// CHECK-NEXT: | | | | | | `-IntegerLiteral {{.*}} <col:18> 'int' 0
// CHECK-NEXT: | | | | | |-<<<NULL>>>
// CHECK-NEXT: | | | | | |-BinaryOperator {{.*}} <col:21, col:25> 'int' '<'
// CHECK-NEXT: | | | | | | |-ImplicitCastExpr {{.*}} <col:21> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | | `-DeclRefExpr {{.*}} <col:21> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | | `-ImplicitCastExpr {{.*}} <col:25> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | `-DeclRefExpr {{.*}} <col:25> 'int' lvalue ParmVar {{.*}} 'y' 'int'
// CHECK-NEXT: | | | | | |-UnaryOperator {{.*}} <col:28, col:29> 'int' postfix '++'
// CHECK-NEXT: | | | | | | `-DeclRefExpr {{.*}} <col:28> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | `-NullStmt {{.*}} <line:27:7> openmp_structured_block
// CHECK-NEXT: | | | | |-ImplicitParamDecl {{.*}} <line:24:9> col:9 implicit .global_tid. 'const int *const restrict'
// CHECK-NEXT: | | | | |-ImplicitParamDecl {{.*}} <col:9> col:9 implicit .bound_tid. 'const int *const restrict'
// CHECK-NEXT: | | | | |-ImplicitParamDecl {{.*}} <col:9> col:9 implicit used .previous.lb. 'const unsigned long'
// CHECK-NEXT: | | | | |-ImplicitParamDecl {{.*}} <col:9> col:9 implicit used .previous.ub. 'const unsigned long'
// CHECK-NEXT: | | | | |-ImplicitParamDecl {{.*}} <col:9> col:9 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-target-teams-distribute-parallel-for.c:24:9) *const restrict'
// CHECK-NEXT: | | | | |-VarDecl {{.*}} <line:25:8, col:16> col:12 used i 'int' cinit
// CHECK-NEXT: | | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0
// CHECK-NEXT: | | | | `-VarDecl {{.*}} <line:26:10, col:18> col:14 used i 'int' cinit
// CHECK-NEXT: | | | | `-IntegerLiteral {{.*}} <col:18> 'int' 0
// CHECK-NEXT: | | | |-DeclRefExpr {{.*}} <line:25:3> 'int' lvalue ParmVar {{.*}} 'x' 'int'
// CHECK-NEXT: | | | `-DeclRefExpr {{.*}} <line:26:5> 'int' lvalue ParmVar {{.*}} 'y' 'int'
// CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <line:24:9> col:9 implicit .global_tid. 'const int *const restrict'
// CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <col:9> col:9 implicit .bound_tid. 'const int *const restrict'
// CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <col:9> col:9 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-target-teams-distribute-parallel-for.c:24:9) *const restrict'
// CHECK-NEXT: | | |-RecordDecl {{.*}} <col:9> col:9 implicit struct definition
// CHECK-NEXT: | | | |-CapturedRecordAttr {{.*}} <<invalid sloc>> Implicit
// CHECK-NEXT: | | | |-FieldDecl {{.*}} <line:25:3> col:3 implicit 'int'
// CHECK-NEXT: | | | | `-OMPCaptureKindAttr {{.*}} <<invalid sloc>> Implicit 9
// CHECK-NEXT: | | | `-FieldDecl {{.*}} <line:26:5> col:5 implicit 'int'
// CHECK-NEXT: | | | `-OMPCaptureKindAttr {{.*}} <<invalid sloc>> Implicit 9
// CHECK-NEXT: | | `-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow
// CHECK-NEXT: | | |-ForStmt {{.*}} <line:25:3, line:27:7>
// CHECK-NEXT: | | | |-DeclStmt {{.*}} <line:25:8, col:17>
// CHECK-NEXT: | | | | `-VarDecl {{.*}} <col:8, col:16> col:12 used i 'int' cinit
// CHECK-NEXT: | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0
// CHECK-NEXT: | | | |-<<<NULL>>>
// CHECK-NEXT: | | | |-BinaryOperator {{.*}} <col:19, col:23> 'int' '<'
// CHECK-NEXT: | | | | |-ImplicitCastExpr {{.*}} <col:19> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | `-DeclRefExpr {{.*}} <col:19> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | `-ImplicitCastExpr {{.*}} <col:23> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue ParmVar {{.*}} 'x' 'int'
// CHECK-NEXT: | | | |-UnaryOperator {{.*}} <col:26, col:27> 'int' postfix '++'
// CHECK-NEXT: | | | | `-DeclRefExpr {{.*}} <col:26> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | `-ForStmt {{.*}} <line:26:5, line:27:7>
// CHECK-NEXT: | | | |-DeclStmt {{.*}} <line:26:10, col:19>
// CHECK-NEXT: | | | | `-VarDecl {{.*}} <col:10, col:18> col:14 used i 'int' cinit
// CHECK-NEXT: | | | | `-IntegerLiteral {{.*}} <col:18> 'int' 0
// CHECK-NEXT: | | | |-<<<NULL>>>
// CHECK-NEXT: | | | |-BinaryOperator {{.*}} <col:21, col:25> 'int' '<'
// CHECK-NEXT: | | | | |-ImplicitCastExpr {{.*}} <col:21> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | `-DeclRefExpr {{.*}} <col:21> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | `-ImplicitCastExpr {{.*}} <col:25> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | `-DeclRefExpr {{.*}} <col:25> 'int' lvalue ParmVar {{.*}} 'y' 'int'
// CHECK-NEXT: | | | |-UnaryOperator {{.*}} <col:28, col:29> 'int' postfix '++'
// CHECK-NEXT: | | | | `-DeclRefExpr {{.*}} <col:28> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | `-NullStmt {{.*}} <line:27:7> openmp_structured_block
// CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <line:24:9> col:9 implicit .global_tid. 'const int *const restrict'
// CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <col:9> col:9 implicit .bound_tid. 'const int *const restrict'
// CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <col:9> col:9 implicit used .previous.lb. 'const unsigned long'
// CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <col:9> col:9 implicit used .previous.ub. 'const unsigned long'
// CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <col:9> col:9 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-target-teams-distribute-parallel-for.c:24:9) *const restrict'
// CHECK-NEXT: | | |-VarDecl {{.*}} <line:25:8, col:16> col:12 used i 'int' cinit
// CHECK-NEXT: | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0
// CHECK-NEXT: | | `-VarDecl {{.*}} <line:26:10, col:18> col:14 used i 'int' cinit
// CHECK-NEXT: | | `-IntegerLiteral {{.*}} <col:18> 'int' 0
// CHECK-NEXT: | |-DeclRefExpr {{.*}} <line:25:3> 'int' lvalue ParmVar {{.*}} 'x' 'int'
// CHECK-NEXT: | `-DeclRefExpr {{.*}} <line:26:5> 'int' lvalue ParmVar {{.*}} 'y' 'int'
// CHECK-NEXT: `-FunctionDecl {{.*}} <line:30:1, line:36:1> line:30:6 test_five 'void (int, int, int)'
// CHECK-NEXT: |-ParmVarDecl {{.*}} <col:16, col:20> col:20 used x 'int'
// CHECK-NEXT: |-ParmVarDecl {{.*}} <col:23, col:27> col:27 used y 'int'
// CHECK-NEXT: |-ParmVarDecl {{.*}} <col:30, col:34> col:34 used z 'int'
// CHECK-NEXT: `-CompoundStmt {{.*}} <col:37, line:36:1>
// CHECK-NEXT: `-OMPTargetTeamsDistributeParallelForDirective {{.*}} <line:31:9, col:61>
// CHECK-NEXT: |-OMPCollapseClause {{.*}} <col:50, col:60>
// CHECK-NEXT: | `-ConstantExpr {{.*}} <col:59> 'int'
// CHECK-NEXT: | `-IntegerLiteral {{.*}} <col:59> 'int' 2
// CHECK-NEXT: |-OMPFirstprivateClause {{.*}} <<invalid sloc>> <implicit>
// CHECK-NEXT: | |-DeclRefExpr {{.*}} <line:32:23> 'int' lvalue ParmVar {{.*}} 'x' 'int'
// CHECK-NEXT: | |-DeclRefExpr {{.*}} <line:33:25> 'int' lvalue ParmVar {{.*}} 'y' 'int'
// CHECK-NEXT: | `-DeclRefExpr {{.*}} <line:34:27> 'int' lvalue ParmVar {{.*}} 'z' 'int'
// CHECK-NEXT: `-CapturedStmt {{.*}} <line:32:3, line:35:9>
// CHECK-NEXT: |-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow
// CHECK-NEXT: | |-CapturedStmt {{.*}} <line:32:3, line:35:9>
// CHECK-NEXT: | | |-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow
// CHECK-NEXT: | | | |-CapturedStmt {{.*}} <line:32:3, line:35:9>
// CHECK-NEXT: | | | | |-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow
// CHECK-NEXT: | | | | | |-CapturedStmt {{.*}} <line:32:3, line:35:9>
// CHECK-NEXT: | | | | | | |-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow
// CHECK-NEXT: | | | | | | | |-ForStmt {{.*}} <line:32:3, line:35:9>
// CHECK-NEXT: | | | | | | | | |-DeclStmt {{.*}} <line:32:8, col:17>
// CHECK-NEXT: | | | | | | | | | `-VarDecl {{.*}} <col:8, col:16> col:12 used i 'int' cinit
// CHECK-NEXT: | | | | | | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0
// CHECK-NEXT: | | | | | | | | |-<<<NULL>>>
// CHECK-NEXT: | | | | | | | | |-BinaryOperator {{.*}} <col:19, col:23> 'int' '<'
// CHECK-NEXT: | | | | | | | | | |-ImplicitCastExpr {{.*}} <col:19> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | | | | | `-DeclRefExpr {{.*}} <col:19> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | | | | | `-ImplicitCastExpr {{.*}} <col:23> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue ParmVar {{.*}} 'x' 'int'
// CHECK-NEXT: | | | | | | | | |-UnaryOperator {{.*}} <col:26, col:27> 'int' postfix '++'
// CHECK-NEXT: | | | | | | | | | `-DeclRefExpr {{.*}} <col:26> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | | | | `-ForStmt {{.*}} <line:33:5, line:35:9>
// CHECK-NEXT: | | | | | | | | |-DeclStmt {{.*}} <line:33:10, col:19>
// CHECK-NEXT: | | | | | | | | | `-VarDecl {{.*}} <col:10, col:18> col:14 used i 'int' cinit
// CHECK-NEXT: | | | | | | | | | `-IntegerLiteral {{.*}} <col:18> 'int' 0
// CHECK-NEXT: | | | | | | | | |-<<<NULL>>>
// CHECK-NEXT: | | | | | | | | |-BinaryOperator {{.*}} <col:21, col:25> 'int' '<'
// CHECK-NEXT: | | | | | | | | | |-ImplicitCastExpr {{.*}} <col:21> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | | | | | `-DeclRefExpr {{.*}} <col:21> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | | | | | `-ImplicitCastExpr {{.*}} <col:25> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | | | | `-DeclRefExpr {{.*}} <col:25> 'int' lvalue ParmVar {{.*}} 'y' 'int'
// CHECK-NEXT: | | | | | | | | |-UnaryOperator {{.*}} <col:28, col:29> 'int' postfix '++'
// CHECK-NEXT: | | | | | | | | | `-DeclRefExpr {{.*}} <col:28> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | | | | `-ForStmt {{.*}} <line:34:7, line:35:9> openmp_structured_block
// CHECK-NEXT: | | | | | | | | |-DeclStmt {{.*}} <line:34:12, col:21>
// CHECK-NEXT: | | | | | | | | | `-VarDecl {{.*}} <col:12, col:20> col:16 used i 'int' cinit
// CHECK-NEXT: | | | | | | | | | `-IntegerLiteral {{.*}} <col:20> 'int' 0
// CHECK-NEXT: | | | | | | | | |-<<<NULL>>>
// CHECK-NEXT: | | | | | | | | |-BinaryOperator {{.*}} <col:23, col:27> 'int' '<'
// CHECK-NEXT: | | | | | | | | | |-ImplicitCastExpr {{.*}} <col:23> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | | | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | | | | | `-ImplicitCastExpr {{.*}} <col:27> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | | | | `-DeclRefExpr {{.*}} <col:27> 'int' lvalue ParmVar {{.*}} 'z' 'int'
// CHECK-NEXT: | | | | | | | | |-UnaryOperator {{.*}} <col:30, col:31> 'int' postfix '++'
// CHECK-NEXT: | | | | | | | | | `-DeclRefExpr {{.*}} <col:30> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | | | | `-NullStmt {{.*}} <line:35:9>
// CHECK-NEXT: | | | | | | | |-ImplicitParamDecl {{.*}} <line:31:9> col:9 implicit .global_tid. 'const int *const restrict'
// CHECK-NEXT: | | | | | | | |-ImplicitParamDecl {{.*}} <col:9> col:9 implicit .bound_tid. 'const int *const restrict'
// CHECK-NEXT: | | | | | | | |-ImplicitParamDecl {{.*}} <col:9> col:9 implicit used .previous.lb. 'const unsigned long'
// CHECK-NEXT: | | | | | | | |-ImplicitParamDecl {{.*}} <col:9> col:9 implicit used .previous.ub. 'const unsigned long'
// CHECK-NEXT: | | | | | | | |-ImplicitParamDecl {{.*}} <col:9> col:9 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-target-teams-distribute-parallel-for.c:31:9) *const restrict'
// CHECK-NEXT: | | | | | | | |-VarDecl {{.*}} <line:32:8, col:16> col:12 used i 'int' cinit
// CHECK-NEXT: | | | | | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0
// CHECK-NEXT: | | | | | | | |-VarDecl {{.*}} <line:33:10, col:18> col:14 used i 'int' cinit
// CHECK-NEXT: | | | | | | | | `-IntegerLiteral {{.*}} <col:18> 'int' 0
// CHECK-NEXT: | | | | | | | `-VarDecl {{.*}} <line:34:12, col:20> col:16 used i 'int' cinit
// CHECK-NEXT: | | | | | | | `-IntegerLiteral {{.*}} <col:20> 'int' 0
// CHECK-NEXT: | | | | | | |-DeclRefExpr {{.*}} <line:32:3> 'int' lvalue ParmVar {{.*}} 'x' 'int'
// CHECK-NEXT: | | | | | | |-DeclRefExpr {{.*}} <line:33:5> 'int' lvalue ParmVar {{.*}} 'y' 'int'
// CHECK-NEXT: | | | | | | `-DeclRefExpr {{.*}} <line:34:27> 'int' lvalue ParmVar {{.*}} 'z' 'int'
// CHECK-NEXT: | | | | | |-ImplicitParamDecl {{.*}} <line:31:9> col:9 implicit .global_tid. 'const int *const restrict'
// CHECK-NEXT: | | | | | |-ImplicitParamDecl {{.*}} <col:9> col:9 implicit .bound_tid. 'const int *const restrict'
// CHECK-NEXT: | | | | | |-ImplicitParamDecl {{.*}} <col:9> col:9 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-target-teams-distribute-parallel-for.c:31:9) *const restrict'
// CHECK-NEXT: | | | | | |-RecordDecl {{.*}} <col:9> col:9 implicit struct definition
// CHECK-NEXT: | | | | | | |-CapturedRecordAttr {{.*}} <<invalid sloc>> Implicit
// CHECK-NEXT: | | | | | | |-FieldDecl {{.*}} <line:32:3> col:3 implicit 'int'
// CHECK-NEXT: | | | | | | | `-OMPCaptureKindAttr {{.*}} <<invalid sloc>> Implicit 9
// CHECK-NEXT: | | | | | | |-FieldDecl {{.*}} <line:33:5> col:5 implicit 'int'
// CHECK-NEXT: | | | | | | | `-OMPCaptureKindAttr {{.*}} <<invalid sloc>> Implicit 9
// CHECK-NEXT: | | | | | | `-FieldDecl {{.*}} <line:34:27> col:27 implicit 'int'
// CHECK-NEXT: | | | | | | `-OMPCaptureKindAttr {{.*}} <<invalid sloc>> Implicit 9
// CHECK-NEXT: | | | | | `-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow
// CHECK-NEXT: | | | | | |-ForStmt {{.*}} <line:32:3, line:35:9>
// CHECK-NEXT: | | | | | | |-DeclStmt {{.*}} <line:32:8, col:17>
// CHECK-NEXT: | | | | | | | `-VarDecl {{.*}} <col:8, col:16> col:12 used i 'int' cinit
// CHECK-NEXT: | | | | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0
// CHECK-NEXT: | | | | | | |-<<<NULL>>>
// CHECK-NEXT: | | | | | | |-BinaryOperator {{.*}} <col:19, col:23> 'int' '<'
// CHECK-NEXT: | | | | | | | |-ImplicitCastExpr {{.*}} <col:19> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | | | `-DeclRefExpr {{.*}} <col:19> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | | | `-ImplicitCastExpr {{.*}} <col:23> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue ParmVar {{.*}} 'x' 'int'
// CHECK-NEXT: | | | | | | |-UnaryOperator {{.*}} <col:26, col:27> 'int' postfix '++'
// CHECK-NEXT: | | | | | | | `-DeclRefExpr {{.*}} <col:26> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | | `-ForStmt {{.*}} <line:33:5, line:35:9>
// CHECK-NEXT: | | | | | | |-DeclStmt {{.*}} <line:33:10, col:19>
// CHECK-NEXT: | | | | | | | `-VarDecl {{.*}} <col:10, col:18> col:14 used i 'int' cinit
// CHECK-NEXT: | | | | | | | `-IntegerLiteral {{.*}} <col:18> 'int' 0
// CHECK-NEXT: | | | | | | |-<<<NULL>>>
// CHECK-NEXT: | | | | | | |-BinaryOperator {{.*}} <col:21, col:25> 'int' '<'
// CHECK-NEXT: | | | | | | | |-ImplicitCastExpr {{.*}} <col:21> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | | | `-DeclRefExpr {{.*}} <col:21> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | | | `-ImplicitCastExpr {{.*}} <col:25> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | | `-DeclRefExpr {{.*}} <col:25> 'int' lvalue ParmVar {{.*}} 'y' 'int'
// CHECK-NEXT: | | | | | | |-UnaryOperator {{.*}} <col:28, col:29> 'int' postfix '++'
// CHECK-NEXT: | | | | | | | `-DeclRefExpr {{.*}} <col:28> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | | `-ForStmt {{.*}} <line:34:7, line:35:9> openmp_structured_block
// CHECK-NEXT: | | | | | | |-DeclStmt {{.*}} <line:34:12, col:21>
// CHECK-NEXT: | | | | | | | `-VarDecl {{.*}} <col:12, col:20> col:16 used i 'int' cinit
// CHECK-NEXT: | | | | | | | `-IntegerLiteral {{.*}} <col:20> 'int' 0
// CHECK-NEXT: | | | | | | |-<<<NULL>>>
// CHECK-NEXT: | | | | | | |-BinaryOperator {{.*}} <col:23, col:27> 'int' '<'
// CHECK-NEXT: | | | | | | | |-ImplicitCastExpr {{.*}} <col:23> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | | | `-ImplicitCastExpr {{.*}} <col:27> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | | `-DeclRefExpr {{.*}} <col:27> 'int' lvalue ParmVar {{.*}} 'z' 'int'
// CHECK-NEXT: | | | | | | |-UnaryOperator {{.*}} <col:30, col:31> 'int' postfix '++'
// CHECK-NEXT: | | | | | | | `-DeclRefExpr {{.*}} <col:30> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | | `-NullStmt {{.*}} <line:35:9>
// CHECK-NEXT: | | | | | |-ImplicitParamDecl {{.*}} <line:31:9> col:9 implicit .global_tid. 'const int *const restrict'
// CHECK-NEXT: | | | | | |-ImplicitParamDecl {{.*}} <col:9> col:9 implicit .bound_tid. 'const int *const restrict'
// CHECK-NEXT: | | | | | |-ImplicitParamDecl {{.*}} <col:9> col:9 implicit used .previous.lb. 'const unsigned long'
// CHECK-NEXT: | | | | | |-ImplicitParamDecl {{.*}} <col:9> col:9 implicit used .previous.ub. 'const unsigned long'
// CHECK-NEXT: | | | | | |-ImplicitParamDecl {{.*}} <col:9> col:9 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-target-teams-distribute-parallel-for.c:31:9) *const restrict'
// CHECK-NEXT: | | | | | |-VarDecl {{.*}} <line:32:8, col:16> col:12 used i 'int' cinit
// CHECK-NEXT: | | | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0
// CHECK-NEXT: | | | | | |-VarDecl {{.*}} <line:33:10, col:18> col:14 used i 'int' cinit
// CHECK-NEXT: | | | | | | `-IntegerLiteral {{.*}} <col:18> 'int' 0
// CHECK-NEXT: | | | | | `-VarDecl {{.*}} <line:34:12, col:20> col:16 used i 'int' cinit
// CHECK-NEXT: | | | | | `-IntegerLiteral {{.*}} <col:20> 'int' 0
// CHECK-NEXT: | | | | |-DeclRefExpr {{.*}} <line:32:3> 'int' lvalue ParmVar {{.*}} 'x' 'int'
// CHECK-NEXT: | | | | |-DeclRefExpr {{.*}} <line:33:5> 'int' lvalue ParmVar {{.*}} 'y' 'int'
// CHECK-NEXT: | | | | `-DeclRefExpr {{.*}} <line:34:27> 'int' lvalue ParmVar {{.*}} 'z' 'int'
// CHECK-NEXT: | | | |-ImplicitParamDecl {{.*}} <line:31:9> col:9 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-target-teams-distribute-parallel-for.c:31:9) *const restrict'
// CHECK-NEXT: | | | |-RecordDecl {{.*}} <col:9> col:9 implicit struct definition
// CHECK-NEXT: | | | | |-CapturedRecordAttr {{.*}} <<invalid sloc>> Implicit
// CHECK-NEXT: | | | | |-FieldDecl {{.*}} <line:32:3> col:3 implicit 'int'
// CHECK-NEXT: | | | | | `-OMPCaptureKindAttr {{.*}} <<invalid sloc>> Implicit 9
// CHECK-NEXT: | | | | |-FieldDecl {{.*}} <line:33:5> col:5 implicit 'int'
// CHECK-NEXT: | | | | | `-OMPCaptureKindAttr {{.*}} <<invalid sloc>> Implicit 9
// CHECK-NEXT: | | | | `-FieldDecl {{.*}} <line:34:27> col:27 implicit 'int'
// CHECK-NEXT: | | | | `-OMPCaptureKindAttr {{.*}} <<invalid sloc>> Implicit 9
// CHECK-NEXT: | | | `-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow
// CHECK-NEXT: | | | |-CapturedStmt {{.*}} <line:32:3, line:35:9>
// CHECK-NEXT: | | | | |-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow
// CHECK-NEXT: | | | | | |-ForStmt {{.*}} <line:32:3, line:35:9>
// CHECK-NEXT: | | | | | | |-DeclStmt {{.*}} <line:32:8, col:17>
// CHECK-NEXT: | | | | | | | `-VarDecl {{.*}} <col:8, col:16> col:12 used i 'int' cinit
// CHECK-NEXT: | | | | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0
// CHECK-NEXT: | | | | | | |-<<<NULL>>>
// CHECK-NEXT: | | | | | | |-BinaryOperator {{.*}} <col:19, col:23> 'int' '<'
// CHECK-NEXT: | | | | | | | |-ImplicitCastExpr {{.*}} <col:19> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | | | `-DeclRefExpr {{.*}} <col:19> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | | | `-ImplicitCastExpr {{.*}} <col:23> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue ParmVar {{.*}} 'x' 'int'
// CHECK-NEXT: | | | | | | |-UnaryOperator {{.*}} <col:26, col:27> 'int' postfix '++'
// CHECK-NEXT: | | | | | | | `-DeclRefExpr {{.*}} <col:26> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | | `-ForStmt {{.*}} <line:33:5, line:35:9>
// CHECK-NEXT: | | | | | | |-DeclStmt {{.*}} <line:33:10, col:19>
// CHECK-NEXT: | | | | | | | `-VarDecl {{.*}} <col:10, col:18> col:14 used i 'int' cinit
// CHECK-NEXT: | | | | | | | `-IntegerLiteral {{.*}} <col:18> 'int' 0
// CHECK-NEXT: | | | | | | |-<<<NULL>>>
// CHECK-NEXT: | | | | | | |-BinaryOperator {{.*}} <col:21, col:25> 'int' '<'
// CHECK-NEXT: | | | | | | | |-ImplicitCastExpr {{.*}} <col:21> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | | | `-DeclRefExpr {{.*}} <col:21> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | | | `-ImplicitCastExpr {{.*}} <col:25> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | | `-DeclRefExpr {{.*}} <col:25> 'int' lvalue ParmVar {{.*}} 'y' 'int'
// CHECK-NEXT: | | | | | | |-UnaryOperator {{.*}} <col:28, col:29> 'int' postfix '++'
// CHECK-NEXT: | | | | | | | `-DeclRefExpr {{.*}} <col:28> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | | `-ForStmt {{.*}} <line:34:7, line:35:9> openmp_structured_block
// CHECK-NEXT: | | | | | | |-DeclStmt {{.*}} <line:34:12, col:21>
// CHECK-NEXT: | | | | | | | `-VarDecl {{.*}} <col:12, col:20> col:16 used i 'int' cinit
// CHECK-NEXT: | | | | | | | `-IntegerLiteral {{.*}} <col:20> 'int' 0
// CHECK-NEXT: | | | | | | |-<<<NULL>>>
// CHECK-NEXT: | | | | | | |-BinaryOperator {{.*}} <col:23, col:27> 'int' '<'
// CHECK-NEXT: | | | | | | | |-ImplicitCastExpr {{.*}} <col:23> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | | | `-ImplicitCastExpr {{.*}} <col:27> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | | `-DeclRefExpr {{.*}} <col:27> 'int' lvalue ParmVar {{.*}} 'z' 'int'
// CHECK-NEXT: | | | | | | |-UnaryOperator {{.*}} <col:30, col:31> 'int' postfix '++'
// CHECK-NEXT: | | | | | | | `-DeclRefExpr {{.*}} <col:30> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | | `-NullStmt {{.*}} <line:35:9>
// CHECK-NEXT: | | | | | |-ImplicitParamDecl {{.*}} <line:31:9> col:9 implicit .global_tid. 'const int *const restrict'
// CHECK-NEXT: | | | | | |-ImplicitParamDecl {{.*}} <col:9> col:9 implicit .bound_tid. 'const int *const restrict'
// CHECK-NEXT: | | | | | |-ImplicitParamDecl {{.*}} <col:9> col:9 implicit used .previous.lb. 'const unsigned long'
// CHECK-NEXT: | | | | | |-ImplicitParamDecl {{.*}} <col:9> col:9 implicit used .previous.ub. 'const unsigned long'
// CHECK-NEXT: | | | | | |-ImplicitParamDecl {{.*}} <col:9> col:9 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-target-teams-distribute-parallel-for.c:31:9) *const restrict'
// CHECK-NEXT: | | | | | |-VarDecl {{.*}} <line:32:8, col:16> col:12 used i 'int' cinit
// CHECK-NEXT: | | | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0
// CHECK-NEXT: | | | | | |-VarDecl {{.*}} <line:33:10, col:18> col:14 used i 'int' cinit
// CHECK-NEXT: | | | | | | `-IntegerLiteral {{.*}} <col:18> 'int' 0
// CHECK-NEXT: | | | | | `-VarDecl {{.*}} <line:34:12, col:20> col:16 used i 'int' cinit
// CHECK-NEXT: | | | | | `-IntegerLiteral {{.*}} <col:20> 'int' 0
// CHECK-NEXT: | | | | |-DeclRefExpr {{.*}} <line:32:3> 'int' lvalue ParmVar {{.*}} 'x' 'int'
// CHECK-NEXT: | | | | |-DeclRefExpr {{.*}} <line:33:5> 'int' lvalue ParmVar {{.*}} 'y' 'int'
// CHECK-NEXT: | | | | `-DeclRefExpr {{.*}} <line:34:27> 'int' lvalue ParmVar {{.*}} 'z' 'int'
// CHECK-NEXT: | | | |-ImplicitParamDecl {{.*}} <line:31:9> col:9 implicit .global_tid. 'const int *const restrict'
// CHECK-NEXT: | | | |-ImplicitParamDecl {{.*}} <col:9> col:9 implicit .bound_tid. 'const int *const restrict'
// CHECK-NEXT: | | | |-ImplicitParamDecl {{.*}} <col:9> col:9 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-target-teams-distribute-parallel-for.c:31:9) *const restrict'
// CHECK-NEXT: | | | |-RecordDecl {{.*}} <col:9> col:9 implicit struct definition
// CHECK-NEXT: | | | | |-CapturedRecordAttr {{.*}} <<invalid sloc>> Implicit
// CHECK-NEXT: | | | | |-FieldDecl {{.*}} <line:32:3> col:3 implicit 'int'
// CHECK-NEXT: | | | | | `-OMPCaptureKindAttr {{.*}} <<invalid sloc>> Implicit 9
// CHECK-NEXT: | | | | |-FieldDecl {{.*}} <line:33:5> col:5 implicit 'int'
// CHECK-NEXT: | | | | | `-OMPCaptureKindAttr {{.*}} <<invalid sloc>> Implicit 9
// CHECK-NEXT: | | | | `-FieldDecl {{.*}} <line:34:27> col:27 implicit 'int'
// CHECK-NEXT: | | | | `-OMPCaptureKindAttr {{.*}} <<invalid sloc>> Implicit 9
// CHECK-NEXT: | | | `-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow
// CHECK-NEXT: | | | |-ForStmt {{.*}} <line:32:3, line:35:9>
// CHECK-NEXT: | | | | |-DeclStmt {{.*}} <line:32:8, col:17>
// CHECK-NEXT: | | | | | `-VarDecl {{.*}} <col:8, col:16> col:12 used i 'int' cinit
// CHECK-NEXT: | | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0
// CHECK-NEXT: | | | | |-<<<NULL>>>
// CHECK-NEXT: | | | | |-BinaryOperator {{.*}} <col:19, col:23> 'int' '<'
// CHECK-NEXT: | | | | | |-ImplicitCastExpr {{.*}} <col:19> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | `-DeclRefExpr {{.*}} <col:19> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | `-ImplicitCastExpr {{.*}} <col:23> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue ParmVar {{.*}} 'x' 'int'
// CHECK-NEXT: | | | | |-UnaryOperator {{.*}} <col:26, col:27> 'int' postfix '++'
// CHECK-NEXT: | | | | | `-DeclRefExpr {{.*}} <col:26> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | `-ForStmt {{.*}} <line:33:5, line:35:9>
// CHECK-NEXT: | | | | |-DeclStmt {{.*}} <line:33:10, col:19>
// CHECK-NEXT: | | | | | `-VarDecl {{.*}} <col:10, col:18> col:14 used i 'int' cinit
// CHECK-NEXT: | | | | | `-IntegerLiteral {{.*}} <col:18> 'int' 0
// CHECK-NEXT: | | | | |-<<<NULL>>>
// CHECK-NEXT: | | | | |-BinaryOperator {{.*}} <col:21, col:25> 'int' '<'
// CHECK-NEXT: | | | | | |-ImplicitCastExpr {{.*}} <col:21> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | `-DeclRefExpr {{.*}} <col:21> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | `-ImplicitCastExpr {{.*}} <col:25> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | `-DeclRefExpr {{.*}} <col:25> 'int' lvalue ParmVar {{.*}} 'y' 'int'
// CHECK-NEXT: | | | | |-UnaryOperator {{.*}} <col:28, col:29> 'int' postfix '++'
// CHECK-NEXT: | | | | | `-DeclRefExpr {{.*}} <col:28> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | `-ForStmt {{.*}} <line:34:7, line:35:9> openmp_structured_block
// CHECK-NEXT: | | | | |-DeclStmt {{.*}} <line:34:12, col:21>
// CHECK-NEXT: | | | | | `-VarDecl {{.*}} <col:12, col:20> col:16 used i 'int' cinit
// CHECK-NEXT: | | | | | `-IntegerLiteral {{.*}} <col:20> 'int' 0
// CHECK-NEXT: | | | | |-<<<NULL>>>
// CHECK-NEXT: | | | | |-BinaryOperator {{.*}} <col:23, col:27> 'int' '<'
// CHECK-NEXT: | | | | | |-ImplicitCastExpr {{.*}} <col:23> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | `-ImplicitCastExpr {{.*}} <col:27> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | `-DeclRefExpr {{.*}} <col:27> 'int' lvalue ParmVar {{.*}} 'z' 'int'
// CHECK-NEXT: | | | | |-UnaryOperator {{.*}} <col:30, col:31> 'int' postfix '++'
// CHECK-NEXT: | | | | | `-DeclRefExpr {{.*}} <col:30> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | `-NullStmt {{.*}} <line:35:9>
// CHECK-NEXT: | | | |-ImplicitParamDecl {{.*}} <line:31:9> col:9 implicit .global_tid. 'const int *const restrict'
// CHECK-NEXT: | | | |-ImplicitParamDecl {{.*}} <col:9> col:9 implicit .bound_tid. 'const int *const restrict'
// CHECK-NEXT: | | | |-ImplicitParamDecl {{.*}} <col:9> col:9 implicit used .previous.lb. 'const unsigned long'
// CHECK-NEXT: | | | |-ImplicitParamDecl {{.*}} <col:9> col:9 implicit used .previous.ub. 'const unsigned long'
// CHECK-NEXT: | | | |-ImplicitParamDecl {{.*}} <col:9> col:9 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-target-teams-distribute-parallel-for.c:31:9) *const restrict'
// CHECK-NEXT: | | | |-VarDecl {{.*}} <line:32:8, col:16> col:12 used i 'int' cinit
// CHECK-NEXT: | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0
// CHECK-NEXT: | | | |-VarDecl {{.*}} <line:33:10, col:18> col:14 used i 'int' cinit
// CHECK-NEXT: | | | | `-IntegerLiteral {{.*}} <col:18> 'int' 0
// CHECK-NEXT: | | | `-VarDecl {{.*}} <line:34:12, col:20> col:16 used i 'int' cinit
// CHECK-NEXT: | | | `-IntegerLiteral {{.*}} <col:20> 'int' 0
// CHECK-NEXT: | | |-DeclRefExpr {{.*}} <line:32:3> 'int' lvalue ParmVar {{.*}} 'x' 'int'
// CHECK-NEXT: | | |-DeclRefExpr {{.*}} <line:33:5> 'int' lvalue ParmVar {{.*}} 'y' 'int'
// CHECK-NEXT: | | `-DeclRefExpr {{.*}} <line:34:27> 'int' lvalue ParmVar {{.*}} 'z' 'int'
// CHECK-NEXT: | |-AlwaysInlineAttr {{.*}} <<invalid sloc>> Implicit __forceinline
// CHECK-NEXT: | |-ImplicitParamDecl {{.*}} <line:31:9> col:9 implicit .global_tid. 'const int'
// CHECK-NEXT: | |-ImplicitParamDecl {{.*}} <col:9> col:9 implicit .part_id. 'const int *const restrict'
// CHECK-NEXT: | |-ImplicitParamDecl {{.*}} <col:9> col:9 implicit .privates. 'void *const restrict'
// CHECK-NEXT: | |-ImplicitParamDecl {{.*}} <col:9> col:9 implicit .copy_fn. 'void (*const restrict)(void *const restrict, ...)'
// CHECK-NEXT: | |-ImplicitParamDecl {{.*}} <col:9> col:9 implicit .task_t. 'void *const'
// CHECK-NEXT: | |-ImplicitParamDecl {{.*}} <col:9> col:9 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-target-teams-distribute-parallel-for.c:31:9) *const restrict'
// CHECK-NEXT: | |-RecordDecl {{.*}} <col:9> col:9 implicit struct definition
// CHECK-NEXT: | | |-CapturedRecordAttr {{.*}} <<invalid sloc>> Implicit
// CHECK-NEXT: | | |-FieldDecl {{.*}} <line:32:3> col:3 implicit 'int'
// CHECK-NEXT: | | | `-OMPCaptureKindAttr {{.*}} <<invalid sloc>> Implicit 9
// CHECK-NEXT: | | |-FieldDecl {{.*}} <line:33:5> col:5 implicit 'int'
// CHECK-NEXT: | | | `-OMPCaptureKindAttr {{.*}} <<invalid sloc>> Implicit 9
// CHECK-NEXT: | | `-FieldDecl {{.*}} <line:34:27> col:27 implicit 'int'
// CHECK-NEXT: | | `-OMPCaptureKindAttr {{.*}} <<invalid sloc>> Implicit 9
// CHECK-NEXT: | `-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow
// CHECK-NEXT: | |-CapturedStmt {{.*}} <line:32:3, line:35:9>
// CHECK-NEXT: | | |-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow
// CHECK-NEXT: | | | |-CapturedStmt {{.*}} <line:32:3, line:35:9>
// CHECK-NEXT: | | | | |-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow
// CHECK-NEXT: | | | | | |-ForStmt {{.*}} <line:32:3, line:35:9>
// CHECK-NEXT: | | | | | | |-DeclStmt {{.*}} <line:32:8, col:17>
// CHECK-NEXT: | | | | | | | `-VarDecl {{.*}} <col:8, col:16> col:12 used i 'int' cinit
// CHECK-NEXT: | | | | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0
// CHECK-NEXT: | | | | | | |-<<<NULL>>>
// CHECK-NEXT: | | | | | | |-BinaryOperator {{.*}} <col:19, col:23> 'int' '<'
// CHECK-NEXT: | | | | | | | |-ImplicitCastExpr {{.*}} <col:19> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | | | `-DeclRefExpr {{.*}} <col:19> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | | | `-ImplicitCastExpr {{.*}} <col:23> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue ParmVar {{.*}} 'x' 'int'
// CHECK-NEXT: | | | | | | |-UnaryOperator {{.*}} <col:26, col:27> 'int' postfix '++'
// CHECK-NEXT: | | | | | | | `-DeclRefExpr {{.*}} <col:26> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | | `-ForStmt {{.*}} <line:33:5, line:35:9>
// CHECK-NEXT: | | | | | | |-DeclStmt {{.*}} <line:33:10, col:19>
// CHECK-NEXT: | | | | | | | `-VarDecl {{.*}} <col:10, col:18> col:14 used i 'int' cinit
// CHECK-NEXT: | | | | | | | `-IntegerLiteral {{.*}} <col:18> 'int' 0
// CHECK-NEXT: | | | | | | |-<<<NULL>>>
// CHECK-NEXT: | | | | | | |-BinaryOperator {{.*}} <col:21, col:25> 'int' '<'
// CHECK-NEXT: | | | | | | | |-ImplicitCastExpr {{.*}} <col:21> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | | | `-DeclRefExpr {{.*}} <col:21> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | | | `-ImplicitCastExpr {{.*}} <col:25> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | | `-DeclRefExpr {{.*}} <col:25> 'int' lvalue ParmVar {{.*}} 'y' 'int'
// CHECK-NEXT: | | | | | | |-UnaryOperator {{.*}} <col:28, col:29> 'int' postfix '++'
// CHECK-NEXT: | | | | | | | `-DeclRefExpr {{.*}} <col:28> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | | `-ForStmt {{.*}} <line:34:7, line:35:9> openmp_structured_block
// CHECK-NEXT: | | | | | | |-DeclStmt {{.*}} <line:34:12, col:21>
// CHECK-NEXT: | | | | | | | `-VarDecl {{.*}} <col:12, col:20> col:16 used i 'int' cinit
// CHECK-NEXT: | | | | | | | `-IntegerLiteral {{.*}} <col:20> 'int' 0
// CHECK-NEXT: | | | | | | |-<<<NULL>>>
// CHECK-NEXT: | | | | | | |-BinaryOperator {{.*}} <col:23, col:27> 'int' '<'
// CHECK-NEXT: | | | | | | | |-ImplicitCastExpr {{.*}} <col:23> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | | | `-ImplicitCastExpr {{.*}} <col:27> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | | `-DeclRefExpr {{.*}} <col:27> 'int' lvalue ParmVar {{.*}} 'z' 'int'
// CHECK-NEXT: | | | | | | |-UnaryOperator {{.*}} <col:30, col:31> 'int' postfix '++'
// CHECK-NEXT: | | | | | | | `-DeclRefExpr {{.*}} <col:30> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | | `-NullStmt {{.*}} <line:35:9>
// CHECK-NEXT: | | | | | |-ImplicitParamDecl {{.*}} <line:31:9> col:9 implicit .global_tid. 'const int *const restrict'
// CHECK-NEXT: | | | | | |-ImplicitParamDecl {{.*}} <col:9> col:9 implicit .bound_tid. 'const int *const restrict'
// CHECK-NEXT: | | | | | |-ImplicitParamDecl {{.*}} <col:9> col:9 implicit used .previous.lb. 'const unsigned long'
// CHECK-NEXT: | | | | | |-ImplicitParamDecl {{.*}} <col:9> col:9 implicit used .previous.ub. 'const unsigned long'
// CHECK-NEXT: | | | | | |-ImplicitParamDecl {{.*}} <col:9> col:9 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-target-teams-distribute-parallel-for.c:31:9) *const restrict'
// CHECK-NEXT: | | | | | |-VarDecl {{.*}} <line:32:8, col:16> col:12 used i 'int' cinit
// CHECK-NEXT: | | | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0
// CHECK-NEXT: | | | | | |-VarDecl {{.*}} <line:33:10, col:18> col:14 used i 'int' cinit
// CHECK-NEXT: | | | | | | `-IntegerLiteral {{.*}} <col:18> 'int' 0
// CHECK-NEXT: | | | | | `-VarDecl {{.*}} <line:34:12, col:20> col:16 used i 'int' cinit
// CHECK-NEXT: | | | | | `-IntegerLiteral {{.*}} <col:20> 'int' 0
// CHECK-NEXT: | | | | |-DeclRefExpr {{.*}} <line:32:3> 'int' lvalue ParmVar {{.*}} 'x' 'int'
// CHECK-NEXT: | | | | |-DeclRefExpr {{.*}} <line:33:5> 'int' lvalue ParmVar {{.*}} 'y' 'int'
// CHECK-NEXT: | | | | `-DeclRefExpr {{.*}} <line:34:27> 'int' lvalue ParmVar {{.*}} 'z' 'int'
// CHECK-NEXT: | | | |-ImplicitParamDecl {{.*}} <line:31:9> col:9 implicit .global_tid. 'const int *const restrict'
// CHECK-NEXT: | | | |-ImplicitParamDecl {{.*}} <col:9> col:9 implicit .bound_tid. 'const int *const restrict'
// CHECK-NEXT: | | | |-ImplicitParamDecl {{.*}} <col:9> col:9 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-target-teams-distribute-parallel-for.c:31:9) *const restrict'
// CHECK-NEXT: | | | |-RecordDecl {{.*}} <col:9> col:9 implicit struct definition
// CHECK-NEXT: | | | | |-CapturedRecordAttr {{.*}} <<invalid sloc>> Implicit
// CHECK-NEXT: | | | | |-FieldDecl {{.*}} <line:32:3> col:3 implicit 'int'
// CHECK-NEXT: | | | | | `-OMPCaptureKindAttr {{.*}} <<invalid sloc>> Implicit 9
// CHECK-NEXT: | | | | |-FieldDecl {{.*}} <line:33:5> col:5 implicit 'int'
// CHECK-NEXT: | | | | | `-OMPCaptureKindAttr {{.*}} <<invalid sloc>> Implicit 9
// CHECK-NEXT: | | | | `-FieldDecl {{.*}} <line:34:27> col:27 implicit 'int'
// CHECK-NEXT: | | | | `-OMPCaptureKindAttr {{.*}} <<invalid sloc>> Implicit 9
// CHECK-NEXT: | | | `-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow
// CHECK-NEXT: | | | |-ForStmt {{.*}} <line:32:3, line:35:9>
// CHECK-NEXT: | | | | |-DeclStmt {{.*}} <line:32:8, col:17>
// CHECK-NEXT: | | | | | `-VarDecl {{.*}} <col:8, col:16> col:12 used i 'int' cinit
// CHECK-NEXT: | | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0
// CHECK-NEXT: | | | | |-<<<NULL>>>
// CHECK-NEXT: | | | | |-BinaryOperator {{.*}} <col:19, col:23> 'int' '<'
// CHECK-NEXT: | | | | | |-ImplicitCastExpr {{.*}} <col:19> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | `-DeclRefExpr {{.*}} <col:19> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | `-ImplicitCastExpr {{.*}} <col:23> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue ParmVar {{.*}} 'x' 'int'
// CHECK-NEXT: | | | | |-UnaryOperator {{.*}} <col:26, col:27> 'int' postfix '++'
// CHECK-NEXT: | | | | | `-DeclRefExpr {{.*}} <col:26> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | `-ForStmt {{.*}} <line:33:5, line:35:9>
// CHECK-NEXT: | | | | |-DeclStmt {{.*}} <line:33:10, col:19>
// CHECK-NEXT: | | | | | `-VarDecl {{.*}} <col:10, col:18> col:14 used i 'int' cinit
// CHECK-NEXT: | | | | | `-IntegerLiteral {{.*}} <col:18> 'int' 0
// CHECK-NEXT: | | | | |-<<<NULL>>>
// CHECK-NEXT: | | | | |-BinaryOperator {{.*}} <col:21, col:25> 'int' '<'
// CHECK-NEXT: | | | | | |-ImplicitCastExpr {{.*}} <col:21> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | `-DeclRefExpr {{.*}} <col:21> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | `-ImplicitCastExpr {{.*}} <col:25> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | `-DeclRefExpr {{.*}} <col:25> 'int' lvalue ParmVar {{.*}} 'y' 'int'
// CHECK-NEXT: | | | | |-UnaryOperator {{.*}} <col:28, col:29> 'int' postfix '++'
// CHECK-NEXT: | | | | | `-DeclRefExpr {{.*}} <col:28> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | `-ForStmt {{.*}} <line:34:7, line:35:9> openmp_structured_block
// CHECK-NEXT: | | | | |-DeclStmt {{.*}} <line:34:12, col:21>
// CHECK-NEXT: | | | | | `-VarDecl {{.*}} <col:12, col:20> col:16 used i 'int' cinit
// CHECK-NEXT: | | | | | `-IntegerLiteral {{.*}} <col:20> 'int' 0
// CHECK-NEXT: | | | | |-<<<NULL>>>
// CHECK-NEXT: | | | | |-BinaryOperator {{.*}} <col:23, col:27> 'int' '<'
// CHECK-NEXT: | | | | | |-ImplicitCastExpr {{.*}} <col:23> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | `-ImplicitCastExpr {{.*}} <col:27> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | `-DeclRefExpr {{.*}} <col:27> 'int' lvalue ParmVar {{.*}} 'z' 'int'
// CHECK-NEXT: | | | | |-UnaryOperator {{.*}} <col:30, col:31> 'int' postfix '++'
// CHECK-NEXT: | | | | | `-DeclRefExpr {{.*}} <col:30> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | `-NullStmt {{.*}} <line:35:9>
// CHECK-NEXT: | | | |-ImplicitParamDecl {{.*}} <line:31:9> col:9 implicit .global_tid. 'const int *const restrict'
// CHECK-NEXT: | | | |-ImplicitParamDecl {{.*}} <col:9> col:9 implicit .bound_tid. 'const int *const restrict'
// CHECK-NEXT: | | | |-ImplicitParamDecl {{.*}} <col:9> col:9 implicit used .previous.lb. 'const unsigned long'
// CHECK-NEXT: | | | |-ImplicitParamDecl {{.*}} <col:9> col:9 implicit used .previous.ub. 'const unsigned long'
// CHECK-NEXT: | | | |-ImplicitParamDecl {{.*}} <col:9> col:9 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-target-teams-distribute-parallel-for.c:31:9) *const restrict'
// CHECK-NEXT: | | | |-VarDecl {{.*}} <line:32:8, col:16> col:12 used i 'int' cinit
// CHECK-NEXT: | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0
// CHECK-NEXT: | | | |-VarDecl {{.*}} <line:33:10, col:18> col:14 used i 'int' cinit
// CHECK-NEXT: | | | | `-IntegerLiteral {{.*}} <col:18> 'int' 0
// CHECK-NEXT: | | | `-VarDecl {{.*}} <line:34:12, col:20> col:16 used i 'int' cinit
// CHECK-NEXT: | | | `-IntegerLiteral {{.*}} <col:20> 'int' 0
// CHECK-NEXT: | | |-DeclRefExpr {{.*}} <line:32:3> 'int' lvalue ParmVar {{.*}} 'x' 'int'
// CHECK-NEXT: | | |-DeclRefExpr {{.*}} <line:33:5> 'int' lvalue ParmVar {{.*}} 'y' 'int'
// CHECK-NEXT: | | `-DeclRefExpr {{.*}} <line:34:27> 'int' lvalue ParmVar {{.*}} 'z' 'int'
// CHECK-NEXT: | |-ImplicitParamDecl {{.*}} <line:31:9> col:9 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-target-teams-distribute-parallel-for.c:31:9) *const restrict'
// CHECK-NEXT: | |-RecordDecl {{.*}} <col:9> col:9 implicit struct definition
// CHECK-NEXT: | | |-CapturedRecordAttr {{.*}} <<invalid sloc>> Implicit
// CHECK-NEXT: | | |-FieldDecl {{.*}} <line:32:3> col:3 implicit 'int'
// CHECK-NEXT: | | | `-OMPCaptureKindAttr {{.*}} <<invalid sloc>> Implicit 9
// CHECK-NEXT: | | |-FieldDecl {{.*}} <line:33:5> col:5 implicit 'int'
// CHECK-NEXT: | | | `-OMPCaptureKindAttr {{.*}} <<invalid sloc>> Implicit 9
// CHECK-NEXT: | | `-FieldDecl {{.*}} <line:34:27> col:27 implicit 'int'
// CHECK-NEXT: | | `-OMPCaptureKindAttr {{.*}} <<invalid sloc>> Implicit 9
// CHECK-NEXT: | `-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow
// CHECK-NEXT: | |-CapturedStmt {{.*}} <line:32:3, line:35:9>
// CHECK-NEXT: | | |-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow
// CHECK-NEXT: | | | |-ForStmt {{.*}} <line:32:3, line:35:9>
// CHECK-NEXT: | | | | |-DeclStmt {{.*}} <line:32:8, col:17>
// CHECK-NEXT: | | | | | `-VarDecl {{.*}} <col:8, col:16> col:12 used i 'int' cinit
// CHECK-NEXT: | | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0
// CHECK-NEXT: | | | | |-<<<NULL>>>
// CHECK-NEXT: | | | | |-BinaryOperator {{.*}} <col:19, col:23> 'int' '<'
// CHECK-NEXT: | | | | | |-ImplicitCastExpr {{.*}} <col:19> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | `-DeclRefExpr {{.*}} <col:19> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | `-ImplicitCastExpr {{.*}} <col:23> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue ParmVar {{.*}} 'x' 'int'
// CHECK-NEXT: | | | | |-UnaryOperator {{.*}} <col:26, col:27> 'int' postfix '++'
// CHECK-NEXT: | | | | | `-DeclRefExpr {{.*}} <col:26> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | `-ForStmt {{.*}} <line:33:5, line:35:9>
// CHECK-NEXT: | | | | |-DeclStmt {{.*}} <line:33:10, col:19>
// CHECK-NEXT: | | | | | `-VarDecl {{.*}} <col:10, col:18> col:14 used i 'int' cinit
// CHECK-NEXT: | | | | | `-IntegerLiteral {{.*}} <col:18> 'int' 0
// CHECK-NEXT: | | | | |-<<<NULL>>>
// CHECK-NEXT: | | | | |-BinaryOperator {{.*}} <col:21, col:25> 'int' '<'
// CHECK-NEXT: | | | | | |-ImplicitCastExpr {{.*}} <col:21> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | `-DeclRefExpr {{.*}} <col:21> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | `-ImplicitCastExpr {{.*}} <col:25> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | `-DeclRefExpr {{.*}} <col:25> 'int' lvalue ParmVar {{.*}} 'y' 'int'
// CHECK-NEXT: | | | | |-UnaryOperator {{.*}} <col:28, col:29> 'int' postfix '++'
// CHECK-NEXT: | | | | | `-DeclRefExpr {{.*}} <col:28> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | `-ForStmt {{.*}} <line:34:7, line:35:9> openmp_structured_block
// CHECK-NEXT: | | | | |-DeclStmt {{.*}} <line:34:12, col:21>
// CHECK-NEXT: | | | | | `-VarDecl {{.*}} <col:12, col:20> col:16 used i 'int' cinit
// CHECK-NEXT: | | | | | `-IntegerLiteral {{.*}} <col:20> 'int' 0
// CHECK-NEXT: | | | | |-<<<NULL>>>
// CHECK-NEXT: | | | | |-BinaryOperator {{.*}} <col:23, col:27> 'int' '<'
// CHECK-NEXT: | | | | | |-ImplicitCastExpr {{.*}} <col:23> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | `-ImplicitCastExpr {{.*}} <col:27> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | `-DeclRefExpr {{.*}} <col:27> 'int' lvalue ParmVar {{.*}} 'z' 'int'
// CHECK-NEXT: | | | | |-UnaryOperator {{.*}} <col:30, col:31> 'int' postfix '++'
// CHECK-NEXT: | | | | | `-DeclRefExpr {{.*}} <col:30> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | `-NullStmt {{.*}} <line:35:9>
// CHECK-NEXT: | | | |-ImplicitParamDecl {{.*}} <line:31:9> col:9 implicit .global_tid. 'const int *const restrict'
// CHECK-NEXT: | | | |-ImplicitParamDecl {{.*}} <col:9> col:9 implicit .bound_tid. 'const int *const restrict'
// CHECK-NEXT: | | | |-ImplicitParamDecl {{.*}} <col:9> col:9 implicit used .previous.lb. 'const unsigned long'
// CHECK-NEXT: | | | |-ImplicitParamDecl {{.*}} <col:9> col:9 implicit used .previous.ub. 'const unsigned long'
// CHECK-NEXT: | | | |-ImplicitParamDecl {{.*}} <col:9> col:9 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-target-teams-distribute-parallel-for.c:31:9) *const restrict'
// CHECK-NEXT: | | | |-VarDecl {{.*}} <line:32:8, col:16> col:12 used i 'int' cinit
// CHECK-NEXT: | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0
// CHECK-NEXT: | | | |-VarDecl {{.*}} <line:33:10, col:18> col:14 used i 'int' cinit
// CHECK-NEXT: | | | | `-IntegerLiteral {{.*}} <col:18> 'int' 0
// CHECK-NEXT: | | | `-VarDecl {{.*}} <line:34:12, col:20> col:16 used i 'int' cinit
// CHECK-NEXT: | | | `-IntegerLiteral {{.*}} <col:20> 'int' 0
// CHECK-NEXT: | | |-DeclRefExpr {{.*}} <line:32:3> 'int' lvalue ParmVar {{.*}} 'x' 'int'
// CHECK-NEXT: | | |-DeclRefExpr {{.*}} <line:33:5> 'int' lvalue ParmVar {{.*}} 'y' 'int'
// CHECK-NEXT: | | `-DeclRefExpr {{.*}} <line:34:27> 'int' lvalue ParmVar {{.*}} 'z' 'int'
// CHECK-NEXT: | |-ImplicitParamDecl {{.*}} <line:31:9> col:9 implicit .global_tid. 'const int *const restrict'
// CHECK-NEXT: | |-ImplicitParamDecl {{.*}} <col:9> col:9 implicit .bound_tid. 'const int *const restrict'
// CHECK-NEXT: | |-ImplicitParamDecl {{.*}} <col:9> col:9 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-target-teams-distribute-parallel-for.c:31:9) *const restrict'
// CHECK-NEXT: | |-RecordDecl {{.*}} <col:9> col:9 implicit struct definition
// CHECK-NEXT: | | |-CapturedRecordAttr {{.*}} <<invalid sloc>> Implicit
// CHECK-NEXT: | | |-FieldDecl {{.*}} <line:32:3> col:3 implicit 'int'
// CHECK-NEXT: | | | `-OMPCaptureKindAttr {{.*}} <<invalid sloc>> Implicit 9
// CHECK-NEXT: | | |-FieldDecl {{.*}} <line:33:5> col:5 implicit 'int'
// CHECK-NEXT: | | | `-OMPCaptureKindAttr {{.*}} <<invalid sloc>> Implicit 9
// CHECK-NEXT: | | `-FieldDecl {{.*}} <line:34:27> col:27 implicit 'int'
// CHECK-NEXT: | | `-OMPCaptureKindAttr {{.*}} <<invalid sloc>> Implicit 9
// CHECK-NEXT: | `-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow
// CHECK-NEXT: | |-ForStmt {{.*}} <line:32:3, line:35:9>
// CHECK-NEXT: | | |-DeclStmt {{.*}} <line:32:8, col:17>
// CHECK-NEXT: | | | `-VarDecl {{.*}} <col:8, col:16> col:12 used i 'int' cinit
// CHECK-NEXT: | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0
// CHECK-NEXT: | | |-<<<NULL>>>
// CHECK-NEXT: | | |-BinaryOperator {{.*}} <col:19, col:23> 'int' '<'
// CHECK-NEXT: | | | |-ImplicitCastExpr {{.*}} <col:19> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | `-DeclRefExpr {{.*}} <col:19> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | `-ImplicitCastExpr {{.*}} <col:23> 'int' <LValueToRValue>
// CHECK-NEXT: | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue ParmVar {{.*}} 'x' 'int'
// CHECK-NEXT: | | |-UnaryOperator {{.*}} <col:26, col:27> 'int' postfix '++'
// CHECK-NEXT: | | | `-DeclRefExpr {{.*}} <col:26> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | `-ForStmt {{.*}} <line:33:5, line:35:9>
// CHECK-NEXT: | | |-DeclStmt {{.*}} <line:33:10, col:19>
// CHECK-NEXT: | | | `-VarDecl {{.*}} <col:10, col:18> col:14 used i 'int' cinit
// CHECK-NEXT: | | | `-IntegerLiteral {{.*}} <col:18> 'int' 0
// CHECK-NEXT: | | |-<<<NULL>>>
// CHECK-NEXT: | | |-BinaryOperator {{.*}} <col:21, col:25> 'int' '<'
// CHECK-NEXT: | | | |-ImplicitCastExpr {{.*}} <col:21> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | `-DeclRefExpr {{.*}} <col:21> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | `-ImplicitCastExpr {{.*}} <col:25> 'int' <LValueToRValue>
// CHECK-NEXT: | | | `-DeclRefExpr {{.*}} <col:25> 'int' lvalue ParmVar {{.*}} 'y' 'int'
// CHECK-NEXT: | | |-UnaryOperator {{.*}} <col:28, col:29> 'int' postfix '++'
// CHECK-NEXT: | | | `-DeclRefExpr {{.*}} <col:28> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | `-ForStmt {{.*}} <line:34:7, line:35:9> openmp_structured_block
// CHECK-NEXT: | | |-DeclStmt {{.*}} <line:34:12, col:21>
// CHECK-NEXT: | | | `-VarDecl {{.*}} <col:12, col:20> col:16 used i 'int' cinit
// CHECK-NEXT: | | | `-IntegerLiteral {{.*}} <col:20> 'int' 0
// CHECK-NEXT: | | |-<<<NULL>>>
// CHECK-NEXT: | | |-BinaryOperator {{.*}} <col:23, col:27> 'int' '<'
// CHECK-NEXT: | | | |-ImplicitCastExpr {{.*}} <col:23> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | `-ImplicitCastExpr {{.*}} <col:27> 'int' <LValueToRValue>
// CHECK-NEXT: | | | `-DeclRefExpr {{.*}} <col:27> 'int' lvalue ParmVar {{.*}} 'z' 'int'
// CHECK-NEXT: | | |-UnaryOperator {{.*}} <col:30, col:31> 'int' postfix '++'
// CHECK-NEXT: | | | `-DeclRefExpr {{.*}} <col:30> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | `-NullStmt {{.*}} <line:35:9>
// CHECK-NEXT: | |-ImplicitParamDecl {{.*}} <line:31:9> col:9 implicit .global_tid. 'const int *const restrict'
// CHECK-NEXT: | |-ImplicitParamDecl {{.*}} <col:9> col:9 implicit .bound_tid. 'const int *const restrict'
// CHECK-NEXT: | |-ImplicitParamDecl {{.*}} <col:9> col:9 implicit used .previous.lb. 'const unsigned long'
// CHECK-NEXT: | |-ImplicitParamDecl {{.*}} <col:9> col:9 implicit used .previous.ub. 'const unsigned long'
// CHECK-NEXT: | |-ImplicitParamDecl {{.*}} <col:9> col:9 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-target-teams-distribute-parallel-for.c:31:9) *const restrict'
// CHECK-NEXT: | |-VarDecl {{.*}} <line:32:8, col:16> col:12 used i 'int' cinit
// CHECK-NEXT: | | `-IntegerLiteral {{.*}} <col:16> 'int' 0
// CHECK-NEXT: | |-VarDecl {{.*}} <line:33:10, col:18> col:14 used i 'int' cinit
// CHECK-NEXT: | | `-IntegerLiteral {{.*}} <col:18> 'int' 0
// CHECK-NEXT: | `-VarDecl {{.*}} <line:34:12, col:20> col:16 used i 'int' cinit
// CHECK-NEXT: | `-IntegerLiteral {{.*}} <col:20> 'int' 0
// CHECK-NEXT: |-DeclRefExpr {{.*}} <line:32:3> 'int' lvalue ParmVar {{.*}} 'x' 'int'
// CHECK-NEXT: |-DeclRefExpr {{.*}} <line:33:5> 'int' lvalue ParmVar {{.*}} 'y' 'int'
// CHECK-NEXT: `-DeclRefExpr {{.*}} <line:34:27> 'int' lvalue ParmVar {{.*}} 'z' 'int'
|
valid.yolo1.src.h | #pragma once
#include "ukr.h"
#include "omp.h"
#include "transpose.h"
#include "gen_ukr_A6B2gemm_1_32_544_544_3_3_3.h"
#include "gen_ukr_A4B2gemm_1_32_544_544_3_3_3.h"
void testrun(float* A ,float*B, float*C, float*oriB ){
int tid = omp_get_thread_num();
int Nx = 544;
int Ny = 544;
int Nh = 3;
long long Astrides[6] = {0,1,2,3,4,5};
int b1 = 0;
for (int fpck = (tid%1)*16; fpck < uNf; fpck+=1*16){
for(int cwh = (tid/1)*8; cwh < uNc*uNw*uNh/8*8; cwh+=8*1){
transpose8x8_avx(oriB+ (fpck+0)*uNc*uNw*uNh + cwh, B + fpck*uNc*uNw*uNh + cwh* 16 + 0, uNc*uNw*uNh, 16);
transpose8x8_avx(oriB+ (fpck+8)*uNc*uNw*uNh + cwh, B + fpck*uNc*uNw*uNh + cwh* 16 + 8, uNc*uNw*uNh, 16);
}
if((tid/1)*8==0){
int cwh = uNc*uNw*uNh/8*8;
transpose3x8_avx(oriB+ (fpck+0)*uNc*uNw*uNh + cwh, B + fpck*uNc*uNw*uNh + cwh* 16 + 0, uNc*uNw*uNh, 16);
transpose3x8_avx(oriB+ (fpck+8)*uNc*uNw*uNh + cwh, B + fpck*uNc*uNw*uNh + cwh* 16 + 8, uNc*uNw*uNh, 16);
}
}
#pragma omp barrier// begin push button generated block
for(int c5=0;c5<3+0;c5+=3)
{
for(int f5=0;f5<32+0;f5+=32)
{
for(int xy5=0;xy5<295936+0;xy5+=295936)
{
for(int c4=c5;c4<min(3, 3+c5);c4+=3)
{
for(int xy4=xy5;xy4<min(295936, 295936+xy5);xy4+=295936)
{
for(int c3=c4;c3<min(3, 3+c4);c3+=Tc1)
{
for(int xy3=xy4;xy3<min(295936, 295936+xy4);xy3+=Txy3)
{
for(int f4=f5;f4<min(32, 32+f5);f4+=32)
{
for(int f3=f4;f3<min(32, 32+f4);f3+=Tf2)
{
for(int f2=f3;f2<min(32, Tf2+f3);f2+=16)
{
for(int xy2=xy3;xy2<min(295936, Txy3+xy3);xy2+=6)
{
for(int c2=c3;c2<min(3, Tc1+c3);c2+=Tc1)
{
for(int c1=c2;c1<min(3, Tc1+c2);c1+=Tc1)
{
for(int xy1=xy2;xy1<min(295936, 6+xy2);xy1+=6)
{
for(int f1=f2;f1<min(32, 16+f2);f1+=16)
{
int ctile=min(Tc1, 3-c1);
int x1=xy1/544;
int y1=xy1%544/1;
int c1_1=c1/1;
int c1_2=c1%1/1;
int kf1_1=f1/16;
int kf1_2=f1%16/1;
int of1_1=f1/1;
int of1_2=f1%1/1;
int offsetA=0+b1*894348+c1_1*298116+1*x1*546+1*y1*1+c1_2*1;
int offsetB=0+kf1_1*432+c1*144+0*48+0*16+kf1_2*1;
int offsetC=0+b1*9469952+of1_1*295936+x1*544+y1*1+of1_2*1;
if(544-y1>=6){
cnn_ukr_float_scatter_6x2v_cxycgemm(A+offsetA, B+offsetB, C+offsetC, ctile, Astrides);
}
else if(544*544-xy1>=6){
for(int sti=544-y1;sti<6;sti+=1)
{
Astrides[sti]+=2;
}
cnn_ukr_float_scatter_6x2v_cxycgemm(A+offsetA, B+offsetB, C+offsetC, ctile, Astrides);
for(int sti=544-y1;sti<6;sti+=1)
{
Astrides[sti]-=2;
}
}
else{
cnn_ukr_float_scatter_4x2v_cxycgemm(A+offsetA, B+offsetB, C+offsetC, ctile, Astrides);
}
}
}
}
}
}
}
}
}
}
}
}
}
}
}
}
// end push button generated block
} |
load_data_omp45.c | #include "mytypes.h"
#include <stdio.h>
#include <cuda_runtime.h>
#include "load_data.h"
/* Put extern declarations in an OpenMP "declare target" directive */
#define OMP45_DECLARE
#define __parm_init__
#include "runparm.h"
#include "pf3dbench.h"
#include "pf3dbenchvars.h"
/* tN_new has no guard zones but tvar has one in x and y. */
#define tN_new(a,b,c) tN_new[CELTNDX3(a,b,c)]
#define tvar(a,b,c) tvar[CELTNDX(a,b,c)]
void load_scalars_omp45(int nx_in, int ny_in, int nxl_in,
int nyl_in, int nzl_in, int nxa_in, int nya_in,
int nza_in, int ngrd_in, long nplng_in, long ngtot_in,
int num_teams_in, long ntheta_in,
real clight_in, real csound_in,
real dt_in, real dx_in, real dy_in, real dz_in,
real timunit_in,
int mp_rank_in, int mp_size_in, int mp_p_in, int mp_q_in,
int mp_r_in, int mp_myp_in, int mp_myq_in, int mp_myr_in)
{
/* Launch one team and have the lead thread of that team do everything */
#pragma omp target teams num_teams(1)
{
nx= nx_in; ny= ny_in; nxl= nxl_in; nyl= nyl_in; nzl= nzl_in;
nxa= nxa_in; nya= nya_in; nza= nza_in; ngrd= ngrd_in; nplng= nplng_in;
ngtot= ngtot_in; ntheta= ntheta_in;
num_teams= num_teams_in;
clight= clight_in; csound= csound_in;
dt= dt_in; dx= dx_in; dy= dy_in; dz= dz_in;
timunit= timunit_in;
mp_rank= mp_rank_in;
mp_size= mp_size_in;
mp_p= mp_p_in;
mp_q= mp_q_in;
mp_r= mp_r_in;
mp_myp= mp_myp_in;
mp_myq= mp_myq_in;
mp_myr= mp_myr_in;
if(mp_rank == 0) printf("nxl=%d, nyl=%d, nzl=%d\n", nxl, nyl, nzl);
}
if(mp_rank == 0) puts("scalars have been initialized");
}
void load_arrays_omp45(void)
{
#pragma omp target enter data map(to:theta[0:ntheta])
#pragma omp target enter data map(to:cbuf[0:NCBUF])
if(mp_rank == 0) puts("arrays have been initialized");
}
void copy_tN_pre(rcomplex * restrict tN, rcomplex * restrict tN_new)
{
int ii;
#pragma omp distribute parallel for
for(ii= 0; ii < ngtot; ii++) {
tN_new[ii]= tN[ii];
}
}
void copy_to_tN_omp45(rcomplex * restrict tvar, rcomplex * restrict tN_new)
{
int ix, iy, iz;
#ifdef _OPENMP
#pragma omp distribute parallel for private(ix, iy, iz)
#endif
for(iz= 0; iz < nzl; iz++) {
for (iy=0; iy<nyl; iy++) {
for (ix=0; ix<nxl; ix++) {
tN_new(ix, iy, iz)= tvar(ix, iy, iz);
}
}
}
}
void copy_from_tN_omp45(rcomplex * restrict tN_new, rcomplex * restrict tvar)
{
int ix, iy, iz;
#ifdef _OPENMP
#pragma omp distribute parallel for private(ix, iy, iz)
#endif
for(iz= 0; iz < nzl; iz++) {
for (iy=0; iy<nyl; iy++) {
for (ix=0; ix<nxl; ix++) {
tvar(ix, iy, iz)= tN_new(ix, iy, iz);
}
}
}
}
|
plot.h | #ifndef OPENMC_PLOT_H
#define OPENMC_PLOT_H
#include <unordered_map>
#include <sstream>
#include "pugixml.hpp"
#include "xtensor/xarray.hpp"
#include "hdf5.h"
#include "openmc/position.h"
#include "openmc/constants.h"
#include "openmc/cell.h"
#include "openmc/error.h"
#include "openmc/geometry.h"
#include "openmc/particle.h"
#include "openmc/xml_interface.h"
#include "openmc/random_lcg.h"
namespace openmc {
//===============================================================================
// Global variables
//===============================================================================
class Plot;
namespace model {
extern std::unordered_map<int, int> plot_map; //!< map of plot ids to index
extern vector<Plot> plots; //!< Plot instance container
extern uint64_t plotter_prn_seeds[N_STREAMS]; // Random number seeds used for plotter
extern int plotter_stream; // Stream index used by the plotter
} // namespace model
//===============================================================================
// RGBColor holds color information for plotted objects
//===============================================================================
struct RGBColor {
//Constructors
RGBColor() : red(0), green(0), blue(0) { };
RGBColor(const int v[3]) : red(v[0]), green(v[1]), blue(v[2]) { };
RGBColor(int r, int g, int b) : red(r), green(g), blue(b) { };
RGBColor(const vector<int>& v)
{
if (v.size() != 3) {
throw std::out_of_range("Incorrect vector size for RGBColor.");
}
red = v[0];
green = v[1];
blue = v[2];
}
bool operator ==(const RGBColor& other) {
return red == other.red && green == other.green && blue == other.blue;
}
// Members
uint8_t red, green, blue;
};
// some default colors
const RGBColor WHITE {255, 255, 255};
const RGBColor RED {255, 0, 0};
typedef xt::xtensor<RGBColor, 2> ImageData;
struct IdData {
// Constructor
IdData(size_t h_res, size_t v_res);
// Methods
void set_value(size_t y, size_t x, const Particle& p, int level);
void set_overlap(size_t y, size_t x);
// Members
xt::xtensor<int32_t, 3> data_; //!< 2D array of cell & material ids
};
struct PropertyData {
// Constructor
PropertyData(size_t h_res, size_t v_res);
// Methods
void set_value(size_t y, size_t x, const Particle& p, int level);
void set_overlap(size_t y, size_t x);
// Members
xt::xtensor<double, 3> data_; //!< 2D array of temperature & density data
};
enum class PlotType {
slice = 1,
voxel = 2
};
enum class PlotBasis {
xy = 1,
xz = 2,
yz = 3
};
enum class PlotColorBy {
cells = 0,
mats = 1
};
//===============================================================================
// Plot class
//===============================================================================
class PlotBase {
public:
template<class T> T get_map() const;
// Members
public:
Position origin_; //!< Plot origin in geometry
Position width_; //!< Plot width in geometry
PlotBasis basis_; //!< Plot basis (XY/XZ/YZ)
array<size_t, 3> pixels_; //!< Plot size in pixels
bool color_overlaps_; //!< Show overlapping cells?
int level_; //!< Plot universe level
};
template<class T>
T PlotBase::get_map() const {
size_t width = pixels_[0];
size_t height = pixels_[1];
// get pixel size
double in_pixel = (width_[0])/static_cast<double>(width);
double out_pixel = (width_[1])/static_cast<double>(height);
// size data array
T data(width, height);
// setup basis indices and initial position centered on pixel
int in_i, out_i;
Position xyz = origin_;
switch(basis_) {
case PlotBasis::xy :
in_i = 0;
out_i = 1;
break;
case PlotBasis::xz :
in_i = 0;
out_i = 2;
break;
case PlotBasis::yz :
in_i = 1;
out_i = 2;
break;
default:
UNREACHABLE();
}
// set initial position
xyz[in_i] = origin_[in_i] - width_[0] / 2. + in_pixel / 2.;
xyz[out_i] = origin_[out_i] + width_[1] / 2. - out_pixel / 2.;
// arbitrary direction
Direction dir = {0.7071, 0.7071, 0.0};
#pragma omp parallel
{
Particle p;
p.r() = xyz;
p.u() = dir;
p.coord(0).universe = model::root_universe;
int level = level_;
int j{};
#pragma omp for
for (int y = 0; y < height; y++) {
p.r()[out_i] = xyz[out_i] - out_pixel * y;
for (int x = 0; x < width; x++) {
p.r()[in_i] = xyz[in_i] + in_pixel * x;
p.n_coord() = 1;
// local variables
bool found_cell = exhaustive_find_cell(p);
j = p.n_coord() - 1;
if (level >= 0) { j = level; }
if (found_cell) {
data.set_value(y, x, p, j);
}
if (color_overlaps_ && check_cell_overlap(p, false)) {
data.set_overlap(y, x);
}
} // inner for
} // outer for
} // omp parallel
return data;
}
class Plot : public PlotBase {
public:
// Constructor
Plot(pugi::xml_node plot);
// Methods
private:
void set_id(pugi::xml_node plot_node);
void set_type(pugi::xml_node plot_node);
void set_output_path(pugi::xml_node plot_node);
void set_bg_color(pugi::xml_node plot_node);
void set_basis(pugi::xml_node plot_node);
void set_origin(pugi::xml_node plot_node);
void set_width(pugi::xml_node plot_node);
void set_universe(pugi::xml_node plot_node);
void set_default_colors(pugi::xml_node plot_node);
void set_user_colors(pugi::xml_node plot_node);
void set_meshlines(pugi::xml_node plot_node);
void set_mask(pugi::xml_node plot_node);
void set_overlap_color(pugi::xml_node plot_node);
// Members
public:
int id_; //!< Plot ID
PlotType type_; //!< Plot type (Slice/Voxel)
PlotColorBy color_by_; //!< Plot coloring (cell/material)
int meshlines_width_; //!< Width of lines added to the plot
int index_meshlines_mesh_ {-1}; //!< Index of the mesh to draw on the plot
RGBColor meshlines_color_; //!< Color of meshlines on the plot
RGBColor not_found_ {WHITE}; //!< Plot background color
RGBColor overlap_color_ {RED}; //!< Plot overlap color
vector<RGBColor> colors_; //!< Plot colors
std::string path_plot_; //!< Plot output filename
};
//===============================================================================
// Non-member functions
//===============================================================================
//! Add mesh lines to image data of a plot object
//! \param[in] plot object
//! \param[out] image data associated with the plot object
void draw_mesh_lines(Plot const& pl, ImageData& data);
//! Write a ppm image to file using a plot object's image data
//! \param[in] plot object
//! \param[out] image data associated with the plot object
void output_ppm(Plot const& pl, const ImageData& data);
//! Initialize a voxel file
//! \param[in] id of an open hdf5 file
//! \param[in] dimensions of the voxel file (dx, dy, dz)
//! \param[out] dataspace pointer to voxel data
//! \param[out] dataset pointer to voxesl data
//! \param[out] pointer to memory space of voxel data
void voxel_init(hid_t file_id, const hsize_t* dims, hid_t* dspace,
hid_t* dset, hid_t* memspace);
//! Write a section of the voxel data to hdf5
//! \param[in] voxel slice
//! \param[out] dataspace pointer to voxel data
//! \param[out] dataset pointer to voxesl data
//! \param[out] pointer to data to write
void voxel_write_slice(int x, hid_t dspace, hid_t dset,
hid_t memspace, void* buf);
//! Close voxel file entities
//! \param[in] data space to close
//! \param[in] dataset to close
//! \param[in] memory space to close
void voxel_finalize(hid_t dspace, hid_t dset, hid_t memspace);
//===============================================================================
// External functions
//===============================================================================
//! Read plot specifications from a plots.xml file
void read_plots_xml();
//! Create a ppm image for a plot object
//! \param[in] plot object
void create_ppm(Plot const& pl);
//! Create an hdf5 voxel file for a plot object
//! \param[in] plot object
void create_voxel(Plot const& pl);
//! Create a randomly generated RGB color
//! \return RGBColor with random value
RGBColor random_color();
} // namespace openmc
#endif // OPENMC_PLOT_H
|
mandelbrot.c | /*
To compile:
gcc -O3 -o mandelbrot mandelbrot.c png_util.c -I. -lpng -lm -fopenmp
Or just type:
module load gcc
make
To create an image with 4096 x 4096 pixels (last argument will be used to set number of threads):
./mandelbrot 4096 4096 1
*/
#include <math.h>
#include <stdio.h>
#include <stdlib.h>
#include "png_util.h"
#include <omp.h>
#define MXITER 1000
typedef struct {
double r;
double i;
}complex_t;
// return iterations before z leaves mandelbrot set for given c
int testpoint(complex_t c){
int iter;
complex_t z;
double temp;
z = c;
for(iter=0; iter<MXITER; iter++){
temp = (z.r*z.r) - (z.i*z.i) + c.r;
z.i = z.r*z.i*2. + c.i;
z.r = temp;
if((z.r*z.r+z.i*z.i)>4.0){
return iter;
}
}
return iter;
}
// perform Mandelbrot iteration on a grid of numbers in the complex plane
// record the iteration counts in the count array
void mandelbrot(int Nre, int Nim, complex_t cmin, complex_t cmax, float *count){
int n,m;
complex_t c;
double dr = (cmax.r-cmin.r)/(Nre-1);
double di = (cmax.i-cmin.i)/(Nim-1);;
// Q2c: add a compiler directive to split the outer for loop amongst threads here
#pragma omp parallel for
for(n=0;n<Nim;++n){
for(m=0;m<Nre;++m){
c.r = cmin.r + dr*m;
c.i = cmin.i + di*n;
count[m+n*Nre] = testpoint(c);
}
}
}
int main(int argc, char **argv){
// to create a 4096x4096 pixel image [ last argument is placeholder for number of threads ]
// usage: ./mandelbrot 4096 4096 1
int Nre = atoi(argv[1]);
int Nim = atoi(argv[2]);
int Nthreads = atoi(argv[3]);
// Q2b: set the number of OpenMP threads to be Nthreads here:
Nthreads = atoi(argv[argc-1]);
omp_set_num_threads(Nthreads);
// storage for the iteration counts
float *count = (float*) malloc(Nre*Nim*sizeof(float));
// Parameters for a bounding box for "c" that generates an interesting image
const float centRe = -.759856, centIm= .125547;
const float diam = 0.151579;
complex_t cmin;
complex_t cmax;
cmin.r = centRe - 0.5*diam;
cmax.r = centRe + 0.5*diam;
cmin.i = centIm - 0.5*diam;
cmax.i = centIm + 0.5*diam;
// Q2d: complete this to read time before calling mandelbrot with OpenMP API wall clock time
double start = omp_get_wtime();
// compute mandelbrot set
mandelbrot(Nre, Nim, cmin, cmax, count);
// Q2d: complete this to read time after calling mandelbrot using OpenMP wall clock time
double end = omp_get_wtime();
// print elapsed time
printf("elapsed = %g\n", end-start);
// output mandelbrot to png format image
FILE *fp = fopen("mandelbrot.png", "w");
write_hot_png(fp, Nre, Nim, count, 0, 80);
exit(0);
return 0;
}
|
teams_notarget_get_num_teams.c | #include <stdlib.h>
#include <stdio.h>
#include <omp.h>
#define N 10000
#define MAX_TEAMS 64
int main() {
int n = N;
int team_id, cur_teams;
int teams_sizes[10] = {1, 2, 4, 8, 16, 32, 64, 128, 256, 512};
int *a = (int *)malloc(n*sizeof(int));
int err = 0;
for (int j = 0; j < 10; j++) {
cur_teams = 0;
#pragma omp teams distribute num_teams(teams_sizes[j])
for (int i = 0; i < n; i++) {
cur_teams = omp_get_num_teams();
a[i] = i;
}
err = 0;
for (int i = 0; i < n; i++) {
if (a[i] != i) {
printf("Error at %d: a = %d, should be %d\n", i, a[i], i);
err++;
if (err > 10) break;
}
}
// If we have bigger number than MAX_TEAMS in num_teams() clause we will
// get omp_get_num_teams() as MAX_TEAMS.
if ( ((cur_teams > MAX_TEAMS) && (cur_teams != MAX_TEAMS)) && (cur_teams != teams_sizes[j]) ) {
printf("omp_get_num_teams() : %d but we tried to set num_teams(%d)\n", cur_teams, teams_sizes[j]);
err++;
}
}
return err;
}
|
GB_unaryop__identity_int8_int8.c | //------------------------------------------------------------------------------
// GB_unaryop: hard-coded functions for each built-in unary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2019, All Rights Reserved.
// http://suitesparse.com See GraphBLAS/Doc/License.txt for license.
//------------------------------------------------------------------------------
// If this file is in the Generated/ folder, do not edit it (auto-generated).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_control.h"
#include "GB_iterator.h"
#include "GB_unaryop__include.h"
// C=unop(A) is defined by the following types and operators:
// op(A) function: GB_unop__identity_int8_int8
// op(A') function: GB_tran__identity_int8_int8
// C type: int8_t
// A type: int8_t
// cast: int8_t cij = (int8_t) aij
// unaryop: cij = aij
#define GB_ATYPE \
int8_t
#define GB_CTYPE \
int8_t
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
int8_t aij = Ax [pA]
#define GB_CX(p) Cx [p]
// unary operator
#define GB_OP(z, x) \
z = x ;
// casting
#define GB_CASTING(z, x) \
int8_t z = (int8_t) x ;
// cij = op (cast (aij))
#define GB_CAST_OP(pC,pA) \
{ \
/* aij = Ax [pA] */ \
GB_GETA (aij, Ax, pA) ; \
/* Cx [pC] = op (cast (aij)) */ \
GB_CASTING (x, aij) ; \
GB_OP (GB_CX (pC), x) ; \
}
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_IDENTITY || GxB_NO_INT8)
//------------------------------------------------------------------------------
// Cx = op (cast (Ax)): apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_unop__identity_int8_int8
(
int8_t *restrict Cx,
const int8_t *restrict Ax,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (int64_t p = 0 ; p < anz ; p++)
{
GB_CAST_OP (p, p) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (cast (A')): transpose, typecast, and apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_tran__identity_int8_int8
(
GrB_Matrix C,
const GrB_Matrix A,
int64_t *restrict *Rowcounts,
GBI_single_iterator Iter,
const int64_t *restrict A_slice,
int naslice
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#define GB_PHASE_2_OF_2
#include "GB_unaryop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
Example_tasking.15.c | /*
* @@name: tasking.15c
* @@type: C
* @@compilable: yes
* @@linkable: yes
* @@expect: success
* @@version: omp_4.0
*/
#include <stdio.h>
int main()
{
int x = 1;
#pragma omp parallel
#pragma omp single
{
#pragma omp task shared(x) depend(out: x)
x = 2;
#pragma omp task shared(x) depend(in: x)
printf("x = %d\n", x);
}
return 0;
}
|
core_csyr2k.c | /**
*
* @file
*
* PLASMA is a software package provided by:
* University of Tennessee, US,
* University of Manchester, UK.
*
* @generated from /home/luszczek/workspace/plasma/bitbucket/plasma/core_blas/core_zsyr2k.c, normal z -> c, Fri Sep 28 17:38:23 2018
*
**/
#include <plasma_core_blas.h>
#include "plasma_types.h"
#include "core_lapack.h"
/***************************************************************************//**
*
* @ingroup core_syr2k
*
* Performs one of the symmetric rank 2k operations
*
* \f[ C = \alpha A \times B^T + \alpha B \times A^T + \beta C, \f]
* or
* \f[ C = \alpha A^T \times B + \alpha B^T \times A + \beta C, \f]
*
* where alpha and beta are scalars,
* C is an n-by-n symmetric matrix, and A and B are n-by-k matrices
* in the first case and k-by-n matrices in the second case.
*
*******************************************************************************
*
* @param[in] uplo
* - PlasmaUpper: Upper triangle of C is stored;
* - PlasmaLower: Lower triangle of C is stored.
*
* @param[in] trans
* - PlasmaNoTrans:
* \f[ C = \alpha A \times B^T + \alpha B \times A^T + \beta C; \f]
* - PlasmaTrans:
* \f[ C = \alpha A^T \times B + \alpha B^T \times A + \beta C. \f]
*
* @param[in] n
* The order of the matrix C. n >= zero.
*
* @param[in] k
* If trans = PlasmaNoTrans, number of columns of the A and B matrices;
* if trans = PlasmaTrans, number of rows of the A and B matrices.
*
* @param[in] alpha
* The scalar alpha.
*
* @param[in] A
* An lda-by-ka matrix.
* If trans = PlasmaNoTrans, ka = k;
* if trans = PlasmaTrans, ka = n.
*
* @param[in] lda
* The leading dimension of the array A.
* If trans = PlasmaNoTrans, lda >= max(1, n);
* if trans = PlasmaTrans, lda >= max(1, k).
*
* @param[in] B
* An ldb-by-kb matrix.
* If trans = PlasmaNoTrans, kb = k;
* if trans = PlasmaTrans, kb = n.
*
* @param[in] ldb
* The leading dimension of the array B.
* If trans = PlasmaNoTrans, ldb >= max(1, n);
* if trans = PlasmaTrans, ldb >= max(1, k).
*
* @param[in] beta
* The scalar beta.
*
* @param[in,out] C
* An ldc-by-n matrix.
* On exit, the uplo part of the matrix is overwritten
* by the uplo part of the updated matrix.
*
* @param[in] ldc
* The leading dimension of the array C. ldc >= max(1, n).
*
******************************************************************************/
__attribute__((weak))
void plasma_core_csyr2k(plasma_enum_t uplo, plasma_enum_t trans,
int n, int k,
plasma_complex32_t alpha, const plasma_complex32_t *A, int lda,
const plasma_complex32_t *B, int ldb,
plasma_complex32_t beta, plasma_complex32_t *C, int ldc)
{
cblas_csyr2k(CblasColMajor,
(CBLAS_UPLO)uplo, (CBLAS_TRANSPOSE)trans,
n, k,
CBLAS_SADDR(alpha), A, lda,
B, ldb,
CBLAS_SADDR(beta), C, ldc);
}
/******************************************************************************/
void plasma_core_omp_csyr2k(
plasma_enum_t uplo, plasma_enum_t trans,
int n, int k,
plasma_complex32_t alpha, const plasma_complex32_t *A, int lda,
const plasma_complex32_t *B, int ldb,
plasma_complex32_t beta, plasma_complex32_t *C, int ldc,
plasma_sequence_t *sequence, plasma_request_t *request)
{
int ak;
int bk;
if (trans == PlasmaNoTrans) {
ak = k;
bk = k;
}
else {
ak = n;
bk = n;
}
#pragma omp task depend(in:A[0:lda*ak]) \
depend(in:B[0:ldb*bk]) \
depend(inout:C[0:ldc*n])
{
if (sequence->status == PlasmaSuccess)
plasma_core_csyr2k(uplo, trans,
n, k,
alpha, A, lda,
B, ldb,
beta, C, ldc);
}
}
|
openmp-ex01.c | #include <stdio.h>
/* OpenMP includes some library calls, for which we need the header file */
#include <omp.h>
int main(void)
{
int max_threads = 5;
printf ("You're all individuals!\n");
/* one library call sets the number of threads in the next parallel region */
omp_set_num_threads(max_threads);
#pragma omp parallel
{
printf("Yes, we're all individuals!\n");
}
return 0;
}
|
OverlayGraph.h | /*
* OverlayGraph.h
*
* Created on: Dec 14, 2015
* Author: Matthias Wolf & Michael Wegner
*
* Copyright (c) 2016 Michael Wegner and Matthias Wolf
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to deal
* in the Software without restriction, including without limitation the rights
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
* copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
#ifndef OVERLAYGRAPH_H_
#define OVERLAYGRAPH_H_
#include <vector>
#include <unordered_map>
#include "../constants.h"
#include "LevelInfo.h"
#include <iostream>
namespace CRP {
class Graph;
class MultiLevelPartition;
} /* namespace CRP */
namespace CRP {
/**
* Stores an overlay vertex with all necessary information.
* The neighborOverlayVertex is the neighboring vertex incident to originalEdge.
* The originalEdge is either an index to a @ref ForwardEdge (in case the overlay vertex is an exit vertex) or an index to a @ref BackwardEdge
* (in case the overlay vertex is an entry vertex).
* The vector entryExitPoint stores for each level l on which this vertex is an overlay vertex the entry/exit point index in its cell on level l.
*/
struct OverlayVertex {
index originalVertex;
index neighborOverlayVertex;
pv cellNumber;
index originalEdge;
std::vector<index> entryExitPoint;
};
struct Cell {
index numEntryPoints; // p_C
index numExitPoints; // q_C
index cellOffset; // f_C
index overlayIdOffset; // f_C~, maps entry/exit point of cell to overlay vertex.
};
class OverlayGraph {
public:
OverlayGraph(const std::vector<OverlayVertex>& overlayVertices, const std::vector<index>& vertexCountInLevel,
const std::vector<std::unordered_map<pv, Cell>>& cellMapping, const std::vector<index>& overlayIdMapping,
const LevelInfo& levelInfo, count weightVectorSize) : overlayVertices(overlayVertices),
vertexCountInLevel(vertexCountInLevel), cellMapping(cellMapping), overlayIdMapping(overlayIdMapping),
levelInfo(levelInfo), weightVectorSize(weightVectorSize) {}
OverlayGraph(Graph &graph, const MultiLevelPartition &mlp);
OverlayGraph() = default;
inline const OverlayVertex& getVertex(index u) const {
assert(u < overlayVertices.size());
return overlayVertices[u];
}
const Cell& getCell(pv cellNumber, level l) const;
template <typename L> void forVertices(L handle) const;
/**
* Iterates over all outgoing neighbors of @a u.
* @param u An entry vertex
* @param l Level of the cell
* @param handle must handle an index to exit OverlayVertex and an index into the weight array
*/
template <typename L> void forOutNeighborsOf(index u, level l, L handle) const;
/**
* Iterates over all incoming neighbors of @a u.
* @param v An exit vertex
* @param l Level of the cell
* @param handle must handle an index to entry OverlayVertex and an index into the weight array
*/
template <typename L> void forInNeighborsOf(index v, level l, L handle) const;
/**
* Iterates over all cells in level @a l.
* @param l the level
* @param handle must handle a const Cell& and its (truncated) pv.
*/
template <typename L> void forCells(level l, L handle) const;
/**
* Iterates over all cells in level @a l in parallel.
* @param l the level
* @param handle must handle a const Cell& and its (truncated) pv.
*/
template <typename L> void parallelForCells(level l, L handle) const;
inline count numberOfVertices() const {
return overlayVertices.size();
}
inline count numberOfVerticesInLevel(level l) const {
assert(0 < l && l <= vertexCountInLevel.size());
return vertexCountInLevel[l - 1];
}
inline count numberOfCellsInLevel(level l) const {
assert(0 < l && l <= cellMapping.size());
return cellMapping[l - 1].size();
}
/**
* Returns the index of the OverlayVertex that is the entry point of the @a cell
* with index @a entryPointIndex
* @param cell
* @param entryPointIndex
* @return
*/
inline index getEntryPoint(const Cell& cell, index entryPointIndex) const {
assert(entryPointIndex < cell.numEntryPoints);
return overlayIdMapping[cell.overlayIdOffset + entryPointIndex];
}
/**
* Returns the index of the OverlayVertex that is the exit point of the @a cell
* with index @a exitPointIndex
* @param cell
* @param exitPointIndex
* @return
*/
inline index getExitPoint(const Cell& cell, index exitPointIndex) const {
assert(exitPointIndex < cell.numExitPoints);
return overlayIdMapping[cell.overlayIdOffset + cell.numEntryPoints + exitPointIndex];
}
inline const LevelInfo& getLevelInfo() const {
return levelInfo;
}
inline level getQueryLevel(const pv sCellNumber, const pv tCellNumber, const pv vCellNumber) const {
return levelInfo.getQueryLevel(sCellNumber, tCellNumber, vCellNumber);
}
inline const count getWeightVectorSize() const {
return weightVectorSize;
}
inline const std::vector<index> getOverlayIdMapping() const {
return overlayIdMapping;
}
private:
std::vector<OverlayVertex> overlayVertices;
std::vector<count> vertexCountInLevel;
std::vector<std::unordered_map<pv, Cell>> cellMapping;
std::vector<index> overlayIdMapping;
LevelInfo levelInfo;
count weightVectorSize;
void build(Graph &graph, level numberOfLevels);
/**
* Builds the overlay vertices but does not set the OverlayVertex::entryExitPoint (as this is
* still unknown). It does however reserve the memory needed to store this information, i.e.
* in the following it may be assumed that the vector has already the correct size.
* @param graph the graph
* @param numberOfLevels the number of levels
*/
std::vector<bool> buildOverlayVertices(Graph &graph, level numberOfLevels);
/**
* Builds the cells and sets OverlayVertex::entryExitPoint for all overlayVertices.
* @param graph the original graph
* @param numberOfLevels the number of levels
* above)
*/
void buildCells(Graph &graph, level numberOfLevels, std::vector<bool> &exitFlagsArray);
};
template<typename L>
void OverlayGraph::forVertices(L handle) const {
for (const OverlayVertex& vertex : overlayVertices) {
handle(vertex);
}
}
template<typename L>
void OverlayGraph::forOutNeighborsOf(index u, level l, L handle) const {
const OverlayVertex& vertex = getVertex(u);
assert(0 < l && l <= vertex.entryExitPoint.size());
index entryPoint = vertex.entryExitPoint[l - 1];
const Cell& cell = getCell(vertex.cellNumber, l);
index weightOffset = cell.cellOffset + entryPoint * cell.numExitPoints;
index overlayIdOffset = cell.overlayIdOffset + cell.numEntryPoints;
for (index i = 0; i < cell.numExitPoints; ++i) {
assert(overlayIdOffset+i < overlayIdMapping.size());
handle(overlayIdMapping[overlayIdOffset + i], weightOffset + i);
}
}
template<typename L>
void OverlayGraph::forInNeighborsOf(index v, level l, L handle) const {
const OverlayVertex& vertex = getVertex(v);
assert(0 < l && l <= vertex.entryExitPoint.size());
index exitPoint = vertex.entryExitPoint[l - 1];
const Cell& cell = getCell(vertex.cellNumber, l);
index weightOffset = cell.cellOffset + exitPoint;
index overlayIdOffset = cell.overlayIdOffset;
for (index i = 0; i < cell.numEntryPoints; ++i) {
assert(overlayIdOffset+i < overlayIdMapping.size());
handle(overlayIdMapping[overlayIdOffset + i], weightOffset + cell.numExitPoints * i);
}
}
template<typename L>
void OverlayGraph::forCells(level l, L handle) const {
assert(0 < l && l <= levelInfo.getLevelCount());
for (auto it = cellMapping[l-1].begin(); it != cellMapping[l-1].end(); ++it) {
handle(it->second, it->first);
}
}
template<typename L>
void OverlayGraph::parallelForCells(level l, L handle) const {
assert(0 < l && l <= levelInfo.getLevelCount());
std::vector<pv> keys(cellMapping[l-1].size());
std::vector<Cell> cells(cellMapping[l-1].size());
index i = 0;
for (auto it = cellMapping[l-1].begin(); it != cellMapping[l-1].end(); ++it, ++i) {
keys[i] = it->first;
cells[i] = it->second;
}
#pragma omp parallel for schedule(dynamic)
for (index i = 0; i < keys.size(); ++i) {
handle(cells[i], keys[i]);
}
}
} /* namespace CRP */
#endif /* OVERLAYGRAPH_H_ */
|
DRB028-privatemissing-orig-yes.c | /*
Copyright (c) 2017, Lawrence Livermore National Security, LLC.
Produced at the Lawrence Livermore National Laboratory
Written by Chunhua Liao, Pei-Hung Lin, Joshua Asplund,
Markus Schordan, and Ian Karlin
(email: liao6@llnl.gov, lin32@llnl.gov, asplund1@llnl.gov,
schordan1@llnl.gov, karlin1@llnl.gov)
LLNL-CODE-732144
All rights reserved.
This file is part of DataRaceBench. For details, see
https://github.com/LLNL/dataracebench. Please also see the LICENSE file
for our additional BSD notice.
Redistribution and use in source and binary forms, with
or without modification, are permitted provided that the following
conditions are met:
* Redistributions of source code must retain the above copyright
notice, this list of conditions and the disclaimer below.
* Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the disclaimer (as noted below)
in the documentation and/or other materials provided with the
distribution.
* Neither the name of the LLNS/LLNL nor the names of its contributors
may be used to endorse or promote products derived from this
software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND
CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES,
INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL LAWRENCE LIVERMORE NATIONAL
SECURITY, LLC, THE U.S. DEPARTMENT OF ENERGY OR CONTRIBUTORS BE
LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY,
OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
THE POSSIBILITY OF SUCH DAMAGE.
*/
/*
tmp should be annotated as private to avoid race condition.
Data race pairs: tmp@65:5 vs. tmp@66:12
tmp@65:5 vs. tmp@65:5
*/
#include <stdlib.h>
#include <stdio.h>
int main(int argc, char* argv[])
{
int i;
int tmp;
int len=100;
int a[100];
#pragma omp parallel for simd
for (i=0;i<len;i++)
a[i]=i;
#pragma omp parallel for simd private(tmp)
for (i=0;i<len;i++)
{
tmp =a[i]+i;
a[i] = tmp;
}
printf("a[50]=%d\n", a[50]);
return 0;
}
|
alignment.c | /**********************************************************************************************/
/* This program is part of the Barcelona OpenMP Tasks Suite */
/* Copyright (C) 2009 Barcelona Supercomputing Center - Centro Nacional de Supercomputacion */
/* Copyright (C) 2009 Universitat Politecnica de Catalunya */
/* */
/* This program is free software; you can redistribute it and/or modify */
/* it under the terms of the GNU General Public License as published by */
/* the Free Software Foundation; either version 2 of the License, or */
/* (at your option) any later version. */
/* */
/* This program is distributed in the hope that it will be useful, */
/* but WITHOUT ANY WARRANTY; without even the implied warranty of */
/* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the */
/* GNU General Public License for more details. */
/* */
/* You should have received a copy of the GNU General Public License */
/* along with this program; if not, write to the Free Software */
/* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA */
/**********************************************************************************************/
/* Original code from the Application Kernel Matrix by Cray */
/* that was based on the ClustalW application */
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <math.h>
#include <sys/time.h>
#include <libgen.h>
#include "param.h"
#include "sequence.h"
#include "alignment.h"
#include "bots.h"
int ktup, window, signif;
int prot_ktup, prot_window, prot_signif;
int gap_pos1, gap_pos2, mat_avscore;
int nseqs, max_aa;
#define MAX_ALN_LENGTH 5000
int *seqlen_array, def_aa_xref[NUMRES+1];
int *bench_output, *seq_output;
double gap_open, gap_extend;
double prot_gap_open, prot_gap_extend;
double pw_go_penalty, pw_ge_penalty;
double prot_pw_go_penalty, prot_pw_ge_penalty;
char **args, **names, **seq_array;
int matrix[NUMRES][NUMRES];
double gap_open_scale;
double gap_extend_scale;
// dnaFlag default value is false
int dnaFlag = FALSE;
// clustalw default value is false
int clustalw = FALSE;
#define INT_SCALE 100
#define MIN(a,b) ((a)<(b)?(a):(b))
#define tbgap(k) ((k) <= 0 ? 0 : tb + gh * (k))
#define tegap(k) ((k) <= 0 ? 0 : te + gh * (k))
/***********************************************************************
* :
**********************************************************************/
void del(int k, int *print_ptr, int *last_print, int *displ)
{
if (*last_print<0) *last_print = displ[(*print_ptr)-1] -= k;
else *last_print = displ[(*print_ptr)++] = -k;
}
/***********************************************************************
* :
**********************************************************************/
void add(int v, int *print_ptr, int *last_print, int *displ)
{
if (*last_print < 0) {
displ[(*print_ptr)-1] = v;
displ[(*print_ptr)++] = *last_print;
} else {
*last_print = displ[(*print_ptr)++] = v;
}
}
/***********************************************************************
* :
**********************************************************************/
int calc_score(int iat, int jat, int v1, int v2, int seq1, int seq2)
{
int i, j, ipos, jpos;
ipos = v1 + iat;
jpos = v2 + jat;
i = seq_array[seq1][ipos];
j = seq_array[seq2][jpos];
return (matrix[i][j]);
}
/***********************************************************************
* :
**********************************************************************/
int get_matrix(int *matptr, int *xref, int scale)
{
int gg_score = 0;
int gr_score = 0;
int i, j, k, ti, tj, ix;
int av1, av2, av3, min, max, maxres;
for (i = 0; i <= max_aa; i++)
for (j = 0; j <= max_aa; j++) matrix[i][j] = 0;
ix = 0;
maxres = 0;
for (i = 0; i <= max_aa; i++) {
ti = xref[i];
for (j = 0; j <= i; j++) {
tj = xref[j];
if ((ti != -1) && (tj != -1)) {
k = matptr[ix];
if (ti == tj) {
matrix[ti][ti] = k * scale;
maxres++;
} else {
matrix[ti][tj] = k * scale;
matrix[tj][ti] = k * scale;
}
ix++;
}
}
}
maxres--;
av1 = av2 = av3 = 0;
for (i = 0; i <= max_aa; i++) {
for (j = 0; j <= i; j++) {
av1 += matrix[i][j];
if (i == j) av2 += matrix[i][j];
else av3 += matrix[i][j];
}
}
av1 /= (maxres*maxres)/2;
av2 /= maxres;
av3 /= (int) (((double)(maxres*maxres-maxres))/2);
mat_avscore = -av3;
min = max = matrix[0][0];
for (i = 0; i <= max_aa; i++)
for (j = 1; j <= i; j++) {
if (matrix[i][j] < min) min = matrix[i][j];
if (matrix[i][j] > max) max = matrix[i][j];
}
for (i = 0; i < gap_pos1; i++) {
matrix[i][gap_pos1] = gr_score;
matrix[gap_pos1][i] = gr_score;
matrix[i][gap_pos2] = gr_score;
matrix[gap_pos2][i] = gr_score;
}
matrix[gap_pos1][gap_pos1] = gg_score;
matrix[gap_pos2][gap_pos2] = gg_score;
matrix[gap_pos2][gap_pos1] = gg_score;
matrix[gap_pos1][gap_pos2] = gg_score;
maxres += 2;
return(maxres);
}
/***********************************************************************
* :
**********************************************************************/
void forward_pass(char *ia, char *ib, int n, int m, int *se1, int *se2, int *maxscore, int g, int gh)
{
int i, j, f, p, t, hh;
int HH[MAX_ALN_LENGTH];
int DD[MAX_ALN_LENGTH];
*maxscore = 0;
*se1 = *se2 = 0;
for (i = 0; i <= m; i++) {HH[i] = 0; DD[i] = -g;}
for (i = 1; i <= n; i++) {
hh = p = 0;
f = -g;
for (j = 1; j <= m; j++) {
f -= gh;
t = hh - g - gh;
if (f < t) f = t;
DD[j] -= gh;
t = HH[j] - g - gh;
if (DD[j] < t) DD[j] = t;
hh = p + matrix[(int)ia[i]][(int)ib[j]];
if (hh < f) hh = f;
if (hh < DD[j]) hh = DD[j];
if (hh < 0) hh = 0;
p = HH[j];
HH[j] = hh;
if (hh > *maxscore) {*maxscore = hh; *se1 = i; *se2 = j;}
}
}
}
/***********************************************************************
* :
**********************************************************************/
void reverse_pass(char *ia, char *ib, int se1, int se2, int *sb1, int *sb2, int maxscore, int g, int gh)
{
int i, j, f, p, t, hh, cost;
int HH[MAX_ALN_LENGTH];
int DD[MAX_ALN_LENGTH];
cost = 0;
*sb1 = *sb2 = 1;
for (i = se2; i > 0; i--){ HH[i] = -1; DD[i] = -1;}
for (i = se1; i > 0; i--) {
hh = f = -1;
if (i == se1) p = 0; else p = -1;
for (j = se2; j > 0; j--) {
f -= gh;
t = hh - g - gh;
if (f < t) f = t;
DD[j] -= gh;
t = HH[j] - g - gh;
if (DD[j] < t) DD[j] = t;
hh = p + matrix[(int)ia[i]][(int)ib[j]];
if (hh < f) hh = f;
if (hh < DD[j]) hh = DD[j];
p = HH[j];
HH[j] = hh;
if (hh > cost) {
cost = hh; *sb1 = i; *sb2 = j;
if (cost >= maxscore) break;
}
}
if (cost >= maxscore) break;
}
}
/***********************************************************************
* :
**********************************************************************/
int diff (int A, int B, int M, int N, int tb, int te, int *print_ptr, int *last_print, int *displ, int seq1, int seq2, int g, int gh)
{
int i, j, f, e, s, t, hh;
int midi, midj, midh, type;
int HH[MAX_ALN_LENGTH];
int DD[MAX_ALN_LENGTH];
int RR[MAX_ALN_LENGTH];
int SS[MAX_ALN_LENGTH];
if (N <= 0) {if (M > 0) del(M, print_ptr, last_print, displ); return( - (int) tbgap(M)); }
if (M <= 1) {
if (M <= 0) {add(N, print_ptr, last_print, displ); return( - (int)tbgap(N));}
midh = -(tb+gh) - tegap(N);
hh = -(te+gh) - tbgap(N);
if (hh > midh) midh = hh;
midj = 0;
for (j = 1; j <= N; j++) {
hh = calc_score(1,j,A,B,seq1,seq2) - tegap(N-j) - tbgap(j-1);
if (hh > midh) {midh = hh; midj = j;}
}
if (midj == 0) {
del(1, print_ptr, last_print, displ);
add(N, print_ptr, last_print, displ);
} else {
if (midj > 1) add(midj-1, print_ptr, last_print, displ);
displ[(*print_ptr)++] = *last_print = 0;
if (midj < N) add(N-midj, print_ptr, last_print, displ);
}
return midh;
}
midi = M / 2;
HH[0] = 0.0;
t = -tb;
for (j = 1; j <= N; j++) {
HH[j] = t = t - gh;
DD[j] = t - g;
}
t = -tb;
for (i = 1; i <= midi; i++) {
s = HH[0];
HH[0] = hh = t = t - gh;
f = t - g;
for (j = 1; j <= N; j++) {
if ((hh = hh - g - gh) > (f = f - gh)) f = hh;
if ((hh = HH[j] - g - gh) > (e = DD[j]- gh)) e = hh;
hh = s + calc_score(i,j,A,B,seq1,seq2);
if (f > hh) hh = f;
if (e > hh) hh = e;
s = HH[j];
HH[j] = hh;
DD[j] = e;
}
}
DD[0] = HH[0];
RR[N] = 0;
t = -te;
for (j = N-1; j >= 0; j--) {RR[j] = t = t - gh; SS[j] = t - g;}
t = -te;
for (i = M - 1; i >= midi; i--) {
s = RR[N];
RR[N] = hh = t = t-gh;
f = t - g;
for (j = N - 1; j >= 0; j--) {
if ((hh = hh - g - gh) > (f = f - gh)) f = hh;
if ((hh = RR[j] - g - gh) > (e = SS[j] - gh)) e = hh;
hh = s + calc_score(i+1,j+1,A,B,seq1,seq2);
if (f > hh) hh = f;
if (e > hh) hh = e;
s = RR[j];
RR[j] = hh;
SS[j] = e;
}
}
SS[N] = RR[N];
midh = HH[0] + RR[0];
midj = 0;
type = 1;
for (j = 0; j <= N; j++) {
hh = HH[j] + RR[j];
if (hh >= midh)
if (hh > midh || (HH[j] != DD[j] && RR[j] == SS[j]))
{midh = hh; midj = j;}
}
for (j = N; j >= 0; j--) {
hh = DD[j] + SS[j] + g;
if (hh > midh) {midh = hh;midj = j;type = 2;}
}
if (type == 1) {
diff(A, B, midi, midj, tb, g, print_ptr, last_print, displ, seq1, seq2, g, gh);
diff(A+midi, B+midj, M-midi, N-midj, g, te, print_ptr, last_print, displ, seq1, seq2, g, gh);
} else {
diff(A, B, midi-1, midj, tb, 0.0, print_ptr, last_print, displ, seq1, seq2, g, gh);
del(2, print_ptr, last_print, displ);
diff(A+midi+1, B+midj, M-midi-1, N-midj, 0.0, te, print_ptr, last_print, displ, seq1, seq2, g, gh);
}
return midh;
}
/***********************************************************************
* :
**********************************************************************/
double tracepath(int tsb1, int tsb2, int *print_ptr, int *displ, int seq1, int seq2)
{
int i, k;
int i1 = tsb1;
int i2 = tsb2;
int pos = 0;
int count = 0;
for (i = 1; i <= *print_ptr - 1; ++i) {
if (displ[i]==0) {
char c1 = seq_array[seq1][i1];
char c2 = seq_array[seq2][i2];
if ((c1!=gap_pos1) && (c1 != gap_pos2) && (c1 == c2)) count++;
++i1; ++i2; ++pos;
} else if ((k = displ[i]) > 0) {
i2 += k;
pos += k;
} else {
i1 -= k;
pos -= k;
}
}
return (100.0 * (double) count);
}
int pairalign()
{
int i, n, m, si, sj;
int len1, len2, maxres;
double gg, mm_score;
int *mat_xref, *matptr;
matptr = gon250mt;
mat_xref = def_aa_xref;
maxres = get_matrix(matptr, mat_xref, 10);
if (maxres == 0) return(-1);
bots_message("Start aligning ");
#pragma omp parallel
{
#pragma omp single private(i,n,si,sj,len1,m)
for (si = 0; si < nseqs; si++) {
n = seqlen_array[si+1];
for (i = 1, len1 = 0; i <= n; i++) {
char c = seq_array[si+1][i];
if ((c != gap_pos1) && (c != gap_pos2)) len1++;
}
for (sj = si + 1; sj < nseqs; sj++)
{
m = seqlen_array[sj+1];
if ( n == 0 || m == 0 ) {
bench_output[si*nseqs+sj] = (int) 1.0;
} else {
#pragma omp task \
private(i,gg,len2,mm_score) firstprivate(m,n,si,sj,len1) \
shared(nseqs, bench_output,seqlen_array,seq_array,gap_pos1,gap_pos2,pw_ge_penalty,pw_go_penalty,mat_avscore)
{
int se1, se2, sb1, sb2, maxscore, seq1, seq2, g, gh;
int displ[2*MAX_ALN_LENGTH+1];
int print_ptr, last_print;
for (i = 1, len2 = 0; i <= m; i++) {
char c = seq_array[sj+1][i];
if ((c != gap_pos1) && (c != gap_pos2)) len2++;
}
if ( dnaFlag == TRUE ) {
g = (int) ( 2 * INT_SCALE * pw_go_penalty * gap_open_scale ); // gapOpen
gh = (int) (INT_SCALE * pw_ge_penalty * gap_extend_scale); //gapExtend
} else {
gg = pw_go_penalty + log((double) MIN(n, m)); // temporary value
g = (int) ((mat_avscore <= 0) ? (2 * INT_SCALE * gg) : (2 * mat_avscore * gg * gap_open_scale) ); // gapOpen
gh = (int) (INT_SCALE * pw_ge_penalty); //gapExtend
}
seq1 = si + 1;
seq2 = sj + 1;
forward_pass(&seq_array[seq1][0], &seq_array[seq2][0], n, m, &se1, &se2, &maxscore, g, gh);
reverse_pass(&seq_array[seq1][0], &seq_array[seq2][0], se1, se2, &sb1, &sb2, maxscore, g, gh);
print_ptr = 1;
last_print = 0;
diff(sb1-1, sb2-1, se1-sb1+1, se2-sb2+1, 0, 0, &print_ptr, &last_print, displ, seq1, seq2, g, gh);
mm_score = tracepath(sb1, sb2, &print_ptr, displ, seq1, seq2);
if (len1 == 0 || len2 == 0) mm_score = 0.0;
else mm_score /= (double) MIN(len1,len2);
bench_output[si*nseqs+sj] = (int) mm_score;
} // end task
} // end if (n == 0 || m == 0)
} // for (j)
} // end parallel for (i)
} // end parallel
bots_message(" completed!\n");
return 0;
}
int pairalign_seq()
{
int i, n, m, si, sj;
int len1, len2, maxres;
double gg, mm_score;
int *mat_xref, *matptr;
matptr = gon250mt;
mat_xref = def_aa_xref;
maxres = get_matrix(matptr, mat_xref, 10);
if (maxres == 0) return(-1);
for (si = 0; si < nseqs; si++) {
n = seqlen_array[si+1];
for (i = 1, len1 = 0; i <= n; i++) {
char c = seq_array[si+1][i];
if ((c != gap_pos1) && (c != gap_pos2)) len1++;
}
for (sj = si + 1; sj < nseqs; sj++) {
m = seqlen_array[sj+1];
if ( n == 0 || m == 0 ) {
seq_output[si*nseqs+sj] = (int) 1.0;
} else {
int se1, se2, sb1, sb2, maxscore, seq1, seq2, g, gh;
int displ[2*MAX_ALN_LENGTH+1];
int print_ptr, last_print;
for (i = 1, len2 = 0; i <= m; i++) {
char c = seq_array[sj+1][i];
if ((c != gap_pos1) && (c != gap_pos2)) len2++;
}
if ( dnaFlag == TRUE ) {
g = (int) ( 2 * INT_SCALE * pw_go_penalty * gap_open_scale ); // gapOpen
gh = (int) (INT_SCALE * pw_ge_penalty * gap_extend_scale); //gapExtend
} else {
gg = pw_go_penalty + log((double) MIN(n, m)); // temporary value
g = (int) ((mat_avscore <= 0) ? (2 * INT_SCALE * gg) : (2 * mat_avscore * gg * gap_open_scale) ); // gapOpen
gh = (int) (INT_SCALE * pw_ge_penalty); //gapExtend
}
seq1 = si + 1;
seq2 = sj + 1;
forward_pass(&seq_array[seq1][0], &seq_array[seq2][0], n, m, &se1, &se2, &maxscore, g, gh);
reverse_pass(&seq_array[seq1][0], &seq_array[seq2][0], se1, se2, &sb1, &sb2, maxscore, g, gh);
print_ptr = 1;
last_print = 0;
diff(sb1-1, sb2-1, se1-sb1+1, se2-sb2+1, 0, 0, &print_ptr, &last_print, displ, seq1, seq2, g, gh);
mm_score = tracepath(sb1, sb2, &print_ptr, displ, seq1, seq2);
if (len1 == 0 || len2 == 0) mm_score = 0.0;
else mm_score /= (double) MIN(len1,len2);
seq_output[si*nseqs+sj] = (int) mm_score;
}
}
}
return 0;
}
/***********************************************************************
* :
**********************************************************************/
void init_matrix(void)
{
int i, j;
char c1, c2;
gap_pos1 = NUMRES - 2;
gap_pos2 = NUMRES - 1;
max_aa = strlen(amino_acid_codes) - 2;
for (i = 0; i < NUMRES; i++) def_aa_xref[i] = -1;
for (i = 0; (c1 = amino_acid_order[i]); i++)
for (j = 0; (c2 = amino_acid_codes[j]); j++)
if (c1 == c2) {def_aa_xref[i] = j; break;}
}
void pairalign_init (char *filename)
{
int i;
if (!filename || !filename[0]) {
bots_error(0, "Please specify an input file with the -f option\n");
}
init_matrix();
nseqs = readseqs(filename);
bots_message("Multiple Pairwise Alignment (%d sequences)\n",nseqs);
for (i = 1; i <= nseqs; i++)
bots_debug("Sequence %d: %s %6.d aa\n", i, names[i], seqlen_array[i]);
if ( clustalw == TRUE ) {
gap_open_scale = 0.6667;
gap_extend_scale = 0.751;
} else {
gap_open_scale = 1.0;
gap_extend_scale = 1.0;
}
if ( dnaFlag == TRUE ) {
// Using DNA parameters
ktup = 2;
window = 4;
signif = 4;
gap_open = 15.00;
gap_extend = 6.66;
pw_go_penalty = 15.00;
pw_ge_penalty = 6.66;
} else {
// Using protein parameters
ktup = 1;
window = 5;
signif = 5;
gap_open = 10.0;
gap_extend = 0.2;
pw_go_penalty = 10.0;
pw_ge_penalty = 0.1;
}
}
void align_init ()
{
int i,j;
bench_output = (int *) malloc(sizeof(int)*nseqs*nseqs);
for(i = 0; i<nseqs; i++)
for(j = 0; j<nseqs; j++)
bench_output[i*nseqs+j] = 0;
}
void align()
{
pairalign();
}
void align_seq_init ()
{
int i,j;
seq_output = (int *) malloc(sizeof(int)*nseqs*nseqs);
bench_output = (int *) malloc(sizeof(int)*nseqs*nseqs);
for(i = 0; i<nseqs; i++)
for(j = 0; j<nseqs; j++)
seq_output[i*nseqs+j] = 0;
}
void align_seq()
{
pairalign_seq();
}
void align_end ()
{
int i,j;
for(i = 0; i<nseqs; i++)
for(j = 0; j<nseqs; j++)
if (bench_output[i*nseqs+j] != 0)
bots_debug("Benchmark sequences (%d:%d) Aligned. Score: %d\n", i+1 , j+1 , (int) bench_output[i*nseqs+j]);
}
int align_verify ()
{
int i,j;
int result = BOTS_RESULT_SUCCESSFUL;
for(i = 0; i<nseqs; i++)
{
for(j = 0; j<nseqs; j++)
{
if (bench_output[i*nseqs+j] != seq_output[i*nseqs+j])
{
bots_message("Error: Optimized prot. (%3d:%3d)=%5d Sequential prot. (%3d:%3d)=%5d\n",
i+1, j+1, (int) bench_output[i*nseqs+j],
i+1, j+1, (int) seq_output[i*nseqs+j]);
result = BOTS_RESULT_UNSUCCESSFUL;
}
}
}
return result;
}
|
trsm_x_csc_u_hi_col.c | #include "alphasparse/opt.h"
#include "alphasparse/kernel.h"
#include "alphasparse/util.h"
alphasparse_status_t ONAME(const ALPHA_Number alpha, const ALPHA_SPMAT_CSC *A, const ALPHA_Number *x, const ALPHA_INT columns, const ALPHA_INT ldx, ALPHA_Number *y, const ALPHA_INT ldy)
{
const ALPHA_INT m = A->rows;
const ALPHA_INT n = A->cols;
ALPHA_INT num_thread = alpha_get_thread_num();
#ifdef _OPENMP
#pragma omp parallel for num_threads(num_thread)
#endif
for(ALPHA_INT out_y_col = 0; out_y_col < columns;out_y_col++){
for(int i = 0 ; i < n ; i++){
//initialize y[] as x[]*aplha
alpha_mul(y[index2(out_y_col,i,ldy)], alpha, x[index2(out_y_col,i,ldx)]);
}
for(ALPHA_INT c = n - 1; c >= 0;--c){//csc format, traverse by column
for(ALPHA_INT ai = A->cols_end[c]-1; ai >= A->cols_start[c];ai--){
ALPHA_INT ar = A->row_indx[ai];
if(ar < c){
alpha_msube(y[index2(out_y_col,ar,ldy)], A->values[ai], y[index2(out_y_col,c,ldy)]);
}
}
}
}
return ALPHA_SPARSE_STATUS_SUCCESS;
}
|
GB_unop__lnot_fp64_fp64.c | //------------------------------------------------------------------------------
// GB_unop: hard-coded functions for each built-in unary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// If this file is in the Generated2/ folder, do not edit it
// (it is auto-generated from Generator/*).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_control.h"
#include "GB_atomics.h"
#include "GB_unop__include.h"
// C=unop(A) is defined by the following types and operators:
// op(A) function: GB (_unop_apply__lnot_fp64_fp64)
// op(A') function: GB (_unop_tran__lnot_fp64_fp64)
// C type: double
// A type: double
// cast: double cij = aij
// unaryop: cij = !(aij != 0)
#define GB_ATYPE \
double
#define GB_CTYPE \
double
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
double aij = Ax [pA]
#define GB_CX(p) Cx [p]
// unary operator
#define GB_OP(z, x) \
z = !(x != 0) ;
// casting
#define GB_CAST(z, aij) \
double z = aij ;
// cij = op (aij)
#define GB_CAST_OP(pC,pA) \
{ \
/* aij = Ax [pA] */ \
double aij = Ax [pA] ; \
/* Cx [pC] = op (cast (aij)) */ \
double z = aij ; \
Cx [pC] = !(z != 0) ; \
}
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_LNOT || GxB_NO_FP64)
//------------------------------------------------------------------------------
// Cx = op (cast (Ax)): apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB (_unop_apply__lnot_fp64_fp64)
(
double *Cx, // Cx and Ax may be aliased
const double *Ax,
const int8_t *restrict Ab, // A->b if A is bitmap
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
if (Ab == NULL)
{
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
double aij = Ax [p] ;
double z = aij ;
Cx [p] = !(z != 0) ;
}
}
else
{
// bitmap case, no transpose; A->b already memcpy'd into C->b
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!Ab [p]) continue ;
double aij = Ax [p] ;
double z = aij ;
Cx [p] = !(z != 0) ;
}
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (cast (A')): transpose, typecast, and apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB (_unop_tran__lnot_fp64_fp64)
(
GrB_Matrix C,
const GrB_Matrix A,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
GB_kroner.c | //------------------------------------------------------------------------------
// GB_kroner: Kronecker product, C = kron (A,B)
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2022, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// C = kron(A,B) where op determines the binary multiplier to use. The type of
// A and B are compatible with the x and y inputs of z=op(x,y), but can be
// different. The type of C is the type of z. C is hypersparse if either A
// or B are hypersparse.
// FUTURE: this would be faster with built-in types and operators.
// FUTURE: at most one thread is used for each vector of C=kron(A,B). The
// matrix C is normally very large, but if both A and B are n-by-1, then C is
// n^2-by-1 and only a single thread is used. A better method for this case
// would construct vectors of C in parallel.
// FUTURE: each vector C(:,k) takes O(nnz(C(:,k))) work, but this is not
// accounted for in the parallel load-balancing.
#define GB_FREE_WORKSPACE \
{ \
GB_Matrix_free (&A2) ; \
GB_Matrix_free (&B2) ; \
}
#define GB_FREE_ALL \
{ \
GB_FREE_WORKSPACE ; \
GB_phbix_free (C) ; \
}
#include "GB_kron.h"
#include "GB_emult.h"
GrB_Info GB_kroner // C = kron (A,B)
(
GrB_Matrix C, // output matrix
const bool C_is_csc, // desired format of C
const GrB_BinaryOp op, // multiply operator
const GrB_Matrix A_in, // input matrix
bool A_is_pattern, // true if values of A are not used
const GrB_Matrix B_in, // input matrix
bool B_is_pattern, // true if values of B are not used
GB_Context Context
)
{
//--------------------------------------------------------------------------
// check inputs
//--------------------------------------------------------------------------
GrB_Info info ;
ASSERT (C != NULL && (C->static_header || GBNSTATIC)) ;
struct GB_Matrix_opaque A2_header, B2_header ;
GrB_Matrix A2 = NULL, B2 = NULL ;
ASSERT_MATRIX_OK (A_in, "A_in for kron (A,B)", GB0) ;
ASSERT_MATRIX_OK (B_in, "B_in for kron (A,B)", GB0) ;
ASSERT_BINARYOP_OK (op, "op for kron (A,B)", GB0) ;
//--------------------------------------------------------------------------
// finish any pending work
//--------------------------------------------------------------------------
GB_MATRIX_WAIT (A_in) ;
GB_MATRIX_WAIT (B_in) ;
//--------------------------------------------------------------------------
// bitmap case: create sparse copies of A and B if they are bitmap
//--------------------------------------------------------------------------
GrB_Matrix A = A_in ;
if (GB_IS_BITMAP (A))
{
GBURBLE ("A:") ;
// set A2->iso = A->iso OK: no need for burble
GB_CLEAR_STATIC_HEADER (A2, &A2_header) ;
GB_OK (GB_dup_worker (&A2, A->iso, A, true, NULL, Context)) ;
ASSERT_MATRIX_OK (A2, "dup A2 for kron (A,B)", GB0) ;
GB_OK (GB_convert_bitmap_to_sparse (A2, Context)) ;
ASSERT_MATRIX_OK (A2, "to sparse, A2 for kron (A,B)", GB0) ;
A = A2 ;
}
GrB_Matrix B = B_in ;
if (GB_IS_BITMAP (B))
{
GBURBLE ("B:") ;
// set B2->iso = B->iso OK: no need for burble
GB_CLEAR_STATIC_HEADER (B2, &B2_header) ;
GB_OK (GB_dup_worker (&B2, B->iso, B, true, NULL, Context)) ;
ASSERT_MATRIX_OK (B2, "dup B2 for kron (A,B)", GB0) ;
GB_OK (GB_convert_bitmap_to_sparse (B2, Context)) ;
ASSERT_MATRIX_OK (B2, "to sparse, B2 for kron (A,B)", GB0) ;
B = B2 ;
}
//--------------------------------------------------------------------------
// get inputs
//--------------------------------------------------------------------------
const int64_t *restrict Ap = A->p ;
const int64_t *restrict Ah = A->h ;
const int64_t *restrict Ai = A->i ;
const GB_void *restrict Ax = A_is_pattern ? NULL : ((GB_void *) A->x) ;
const int64_t asize = A->type->size ;
const int64_t avlen = A->vlen ;
const int64_t avdim = A->vdim ;
int64_t anvec = A->nvec ;
int64_t anz = GB_nnz (A) ;
const int64_t *restrict Bp = B->p ;
const int64_t *restrict Bh = B->h ;
const int64_t *restrict Bi = B->i ;
const GB_void *restrict Bx = B_is_pattern ? NULL : ((GB_void *) B->x) ;
const int64_t bsize = B->type->size ;
const int64_t bvlen = B->vlen ;
const int64_t bvdim = B->vdim ;
int64_t bnvec = B->nvec ;
int64_t bnz = GB_nnz (B) ;
//--------------------------------------------------------------------------
// determine the number of threads to use
//--------------------------------------------------------------------------
double work = ((double) anz) * ((double) bnz)
+ (((double) anvec) * ((double) bnvec)) ;
GB_GET_NTHREADS_MAX (nthreads_max, chunk, Context) ;
int nthreads = GB_nthreads (work, chunk, nthreads_max) ;
//--------------------------------------------------------------------------
// check if C is iso and compute its iso value if it is
//--------------------------------------------------------------------------
GrB_Type ctype = op->ztype ;
const size_t csize = ctype->size ;
GB_void cscalar [GB_VLA(csize)] ;
bool C_iso = GB_iso_emult (cscalar, ctype, A, B, op) ;
//--------------------------------------------------------------------------
// allocate the output matrix C
//--------------------------------------------------------------------------
// C has the same type as z for the multiply operator, z=op(x,y)
GrB_Index cvlen, cvdim, cnzmax, cnvec ;
bool ok = GB_int64_multiply (&cvlen, avlen, bvlen) ;
ok = ok & GB_int64_multiply (&cvdim, avdim, bvdim) ;
ok = ok & GB_int64_multiply (&cnzmax, anz, bnz) ;
ok = ok & GB_int64_multiply (&cnvec, anvec, bnvec) ;
ASSERT (ok) ;
if (C_iso)
{
// the values of A and B are no longer needed if C is iso
GBURBLE ("(iso kron) ") ;
A_is_pattern = true ;
B_is_pattern = true ;
}
// C is hypersparse if either A or B are hypersparse. It is never bitmap.
bool C_is_hyper = (cvdim > 1) && (Ah != NULL || Bh != NULL) ;
bool C_is_full = GB_as_if_full (A) && GB_as_if_full (B) ;
int sparsity = C_is_full ? GxB_FULL :
((C_is_hyper) ? GxB_HYPERSPARSE : GxB_SPARSE) ;
// set C->iso = C_iso OK
GB_OK (GB_new_bix (&C, // full, sparse, or hyper; existing header
ctype, (int64_t) cvlen, (int64_t) cvdim, GB_Ap_malloc, C_is_csc,
sparsity, true, B->hyper_switch, cnvec, cnzmax, true, C_iso, Context)) ;
//--------------------------------------------------------------------------
// get C and the operator
//--------------------------------------------------------------------------
int64_t *restrict Cp = C->p ;
int64_t *restrict Ch = C->h ;
int64_t *restrict Ci = C->i ;
GB_void *restrict Cx = (GB_void *) C->x ;
int64_t *restrict Cx_int64 = NULL ;
int32_t *restrict Cx_int32 = NULL ;
GxB_binary_function fmult = op->binop_function ;
GB_Opcode opcode = op->opcode ;
bool op_is_positional = GB_OPCODE_IS_POSITIONAL (opcode) ;
GB_cast_function cast_A = NULL, cast_B = NULL ;
if (!A_is_pattern)
{
cast_A = GB_cast_factory (op->xtype->code, A->type->code) ;
}
if (!B_is_pattern)
{
cast_B = GB_cast_factory (op->ytype->code, B->type->code) ;
}
int64_t offset = 0 ;
if (op_is_positional)
{
offset = GB_positional_offset (opcode, NULL) ;
Cx_int64 = (int64_t *) Cx ;
Cx_int32 = (int32_t *) Cx ;
}
bool is64 = (ctype == GrB_INT64) ;
//--------------------------------------------------------------------------
// compute the column counts of C, and C->h if C is hypersparse
//--------------------------------------------------------------------------
int64_t kC ;
if (!C_is_full)
{
#pragma omp parallel for num_threads(nthreads) schedule(guided)
for (kC = 0 ; kC < cnvec ; kC++)
{
const int64_t kA = kC / bnvec ;
const int64_t kB = kC % bnvec ;
// get A(:,jA), the (kA)th vector of A
const int64_t jA = GBH (Ah, kA) ;
const int64_t aknz = (Ap == NULL) ? avlen : (Ap [kA+1] - Ap [kA]) ;
// get B(:,jB), the (kB)th vector of B
const int64_t jB = GBH (Bh, kB) ;
const int64_t bknz = (Bp == NULL) ? bvlen : (Bp [kB+1] - Bp [kB]) ;
// determine # entries in C(:,jC), the (kC)th vector of C
// int64_t kC = kA * bnvec + kB ;
if (!C_is_full)
{
Cp [kC] = aknz * bknz ;
}
if (C_is_hyper)
{
Ch [kC] = jA * bvdim + jB ;
}
}
GB_cumsum (Cp, cnvec, &(C->nvec_nonempty), nthreads, Context) ;
if (C_is_hyper) C->nvec = cnvec ;
}
C->magic = GB_MAGIC ;
//--------------------------------------------------------------------------
// C = kron (A,B) where C is iso and full
//--------------------------------------------------------------------------
if (C_iso)
{
// Cx [0] = cscalar = op (A,B)
memcpy (C->x, cscalar, csize) ;
if (C_is_full)
{
// no more work to do if C is iso and full
ASSERT_MATRIX_OK (C, "C=kron(A,B), iso full", GB0) ;
GB_FREE_WORKSPACE ;
return (GrB_SUCCESS) ;
}
}
//--------------------------------------------------------------------------
// C = kron (A,B)
//--------------------------------------------------------------------------
const bool A_iso = A->iso ;
const bool B_iso = B->iso ;
#pragma omp parallel for num_threads(nthreads) schedule(guided)
for (kC = 0 ; kC < cnvec ; kC++)
{
int64_t kA = kC / bnvec ;
int64_t kB = kC % bnvec ;
// get B(:,jB), the (kB)th vector of B
int64_t jB = GBH (Bh, kB) ;
int64_t pB_start = GBP (Bp, kB, bvlen) ;
int64_t pB_end = GBP (Bp, kB+1, bvlen) ;
int64_t bknz = pB_start - pB_end ;
if (bknz == 0) continue ;
GB_void bwork [GB_VLA(bsize)] ;
if (!B_is_pattern && B_iso)
{
cast_B (bwork, Bx, bsize) ;
}
// get C(:,jC), the (kC)th vector of C
// int64_t kC = kA * bnvec + kB ;
int64_t pC = GBP (Cp, kC, cvlen) ;
// get A(:,jA), the (kA)th vector of A
int64_t jA = GBH (Ah, kA) ;
int64_t pA_start = GBP (Ap, kA, avlen) ;
int64_t pA_end = GBP (Ap, kA+1, avlen) ;
GB_void awork [GB_VLA(asize)] ;
if (!A_is_pattern && A_iso)
{
cast_A (awork, Ax, asize) ;
}
for (int64_t pA = pA_start ; pA < pA_end ; pA++)
{
// awork = A(iA,jA), typecasted to op->xtype
int64_t iA = GBI (Ai, pA, avlen) ;
int64_t iAblock = iA * bvlen ;
if (!A_is_pattern && !A_iso)
{
cast_A (awork, Ax + (pA*asize), asize) ;
}
for (int64_t pB = pB_start ; pB < pB_end ; pB++)
{
// bwork = B(iB,jB), typecasted to op->ytype
int64_t iB = GBI (Bi, pB, bvlen) ;
if (!B_is_pattern && !B_iso)
{
cast_B (bwork, Bx +(pB*bsize), bsize) ;
}
// C(iC,jC) = A(iA,jA) * B(iB,jB)
if (!C_is_full)
{
int64_t iC = iAblock + iB ;
Ci [pC] = iC ;
}
if (op_is_positional)
{
// positional binary operator
switch (opcode)
{
case GB_FIRSTI_binop_code :
// z = first_i(A(iA,jA),y) == iA
case GB_FIRSTI1_binop_code :
// z = first_i1(A(iA,jA),y) == iA+1
if (is64)
{
Cx_int64 [pC] = iA + offset ;
}
else
{
Cx_int32 [pC] = (int32_t) (iA + offset) ;
}
break ;
case GB_FIRSTJ_binop_code :
// z = first_j(A(iA,jA),y) == jA
case GB_FIRSTJ1_binop_code :
// z = first_j1(A(iA,jA),y) == jA+1
if (is64)
{
Cx_int64 [pC] = jA + offset ;
}
else
{
Cx_int32 [pC] = (int32_t) (jA + offset) ;
}
break ;
case GB_SECONDI_binop_code :
// z = second_i(x,B(iB,jB)) == iB
case GB_SECONDI1_binop_code :
// z = second_i1(x,B(iB,jB)) == iB+1
if (is64)
{
Cx_int64 [pC] = iB + offset ;
}
else
{
Cx_int32 [pC] = (int32_t) (iB + offset) ;
}
break ;
case GB_SECONDJ_binop_code :
// z = second_j(x,B(iB,jB)) == jB
case GB_SECONDJ1_binop_code :
// z = second_j1(x,B(iB,jB)) == jB+1
if (is64)
{
Cx_int64 [pC] = jB + offset ;
}
else
{
Cx_int32 [pC] = (int32_t) (jB + offset) ;
}
break ;
default: ;
}
}
else if (!C_iso)
{
// standard binary operator
fmult (Cx +(pC*csize), awork, bwork) ;
}
pC++ ;
}
}
}
//--------------------------------------------------------------------------
// remove empty vectors from C, if hypersparse
//--------------------------------------------------------------------------
GB_OK (GB_hypermatrix_prune (C, Context)) ;
//--------------------------------------------------------------------------
// return result
//--------------------------------------------------------------------------
ASSERT_MATRIX_OK (C, "C=kron(A,B)", GB0) ;
GB_FREE_WORKSPACE ;
return (GrB_SUCCESS) ;
}
|
uni_dir_ctx.h | /*
* Copyright (c) 2018 Intel Corporation. All rights reserved.
* This software is available to you under the BSD license below:
*
* Redistribution and use in source and binary forms, with or
* without modification, are permitted provided that the following
* conditions are met:
*
* - Redistributions of source code must retain the above
* copyright notice, this list of conditions and the following
* disclaimer.
*
* - Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following
* disclaimer in the documentation and/or other materials
* provided with the distribution.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
static inline
void uni_bw_ctx(int len, perf_metrics_t *metric_info, int streaming_node)
{
double start = 0.0, end = 0.0;
unsigned long int i, j;
int dest = partner_node(metric_info);
static int check_once = 0;
if (!check_once) {
/* check to see whether sender and receiver are the same process */
if (dest == metric_info->my_node) {
fprintf(stderr, "Warning: Sender and receiver are the same "
"process (%d)\n", dest);
}
/* hostname validation for all sender and receiver processes */
int status = check_hostname_validation(metric_info);
if (status != 0) return;
check_once++;
}
shmem_barrier_all();
if (streaming_node) {
#pragma omp parallel default(none) firstprivate(len, dest) private(i, j) \
shared(metric_info, start, end) num_threads(metric_info->nthreads)
{
const int thread_id = omp_get_thread_num();
shmem_ctx_t ctx;
int err = shmem_ctx_create(SHMEM_CTX_PRIVATE, &ctx);
if (err) {
printf("PE %d, Thr. %d: Error, context creation failed\n",
metric_info->my_node, thread_id);
shmem_global_exit(1);
}
for (i = 0; i < metric_info->warmup; i++) {
for (j = 0; j < metric_info->window_size; j++) {
#ifdef USE_NONBLOCKING_API
shmem_ctx_putmem_nbi(ctx, metric_info->dest + thread_id * len,
metric_info->src + thread_id * len, len, dest);
#else
shmem_ctx_putmem(ctx, metric_info->dest + thread_id * len,
metric_info->src + thread_id * len, len, dest);
#endif
}
shmem_ctx_quiet(ctx);
}
shmem_ctx_destroy(ctx);
}
}
shmem_barrier_all();
if (streaming_node) {
#pragma omp parallel default(none) firstprivate(len, dest) private(i, j) \
shared(metric_info, start, end) num_threads(metric_info->nthreads)
{
const int thread_id = omp_get_thread_num();
shmem_ctx_t ctx;
int err = shmem_ctx_create(SHMEM_CTX_PRIVATE, &ctx);
if (err) {
printf("PE %d, Thr. %d: Error, context creation failed\n",
metric_info->my_node, thread_id);
shmem_global_exit(1);
}
#pragma omp barrier
#pragma omp master
{
start = perf_shmemx_wtime();
}
for (i = 0; i < metric_info->trials; i++) {
for (j = 0; j < metric_info->window_size; j++) {
#ifdef USE_NONBLOCKING_API
shmem_ctx_putmem_nbi(ctx, metric_info->dest + thread_id * len,
metric_info->src + thread_id * len, len, dest);
#else
shmem_ctx_putmem(ctx, metric_info->dest + thread_id * len,
metric_info->src + thread_id * len, len, dest);
#endif
}
shmem_ctx_quiet(ctx);
}
shmem_ctx_destroy(ctx);
}
}
shmem_barrier_all();
if (streaming_node) {
end = perf_shmemx_wtime();
calc_and_print_results(end, start, len, metric_info);
}
shmem_barrier_all();
}
|
move_shallow_water_particle_utility.h | // | / |
// ' / __| _` | __| _ \ __|
// . \ | ( | | ( |\__ `
// _|\_\_| \__,_|\__|\___/ ____/
// Multi-Physics
//
// License: BSD License
// Kratos default license: kratos/license.txt
//
// Main authors: Miguel Maso Sotomayor
// Pablo Becker
//
#if !defined(KRATOS_MOVE_SHALLOW_WATER_PARTICLE_UTILITY_H_INCLUDED)
#define KRATOS_MOVE_SHALLOW_WATER_PARTICLE_UTILITY_H_INCLUDED
///@defgroup MoveShallowWaterParticleUtility
///@brief Utility to move particles on the eulerian mesh with an
/// explicit scheme. This is the basic tool of the pfem2 framework
// System includes
#include <string>
#include <iostream>
#include <algorithm>
// External includes
// Project includes
#include "includes/define.h"
#include "includes/node.h"
#include "includes/checks.h"
#include "includes/dof.h"
#include "includes/variables.h"
#include "containers/array_1d.h"
#include "containers/data_value_container.h"
#include "includes/mesh.h"
#include "utilities/math_utils.h"
#include "processes/node_erase_process.h"
#include "utilities/geometry_utilities.h"
#include "includes/model_part.h"
#include "includes/kratos_parameters.h"
#include "spatial_containers/spatial_containers.h"
#include "spatial_containers/bounding_box.h"
#include "spatial_containers/cell.h"
#include "spatial_containers/bins_dynamic_objects.h"
#include "utilities/spatial_containers_configure.h"
#include "geometries/line_2d_2.h"
#include "geometries/triangle_2d_3.h"
#include "geometries/triangle_3d_3.h"
#include "geometries/point.h"
#include "shallow_water_application.h"
#include "shallow_water_particle.h"
#include "utilities/openmp_utils.h"
#include "time.h"
//#include "processes/process.h"
namespace Kratos
{
//this class is to be modified by the user to customize the interpolation process
template< unsigned int TDim>
class MoveShallowWaterParticleUtility
{
public:
typedef SpatialContainersConfigure<TDim> Configure;
typedef typename Configure::PointType PointType;
typedef typename Configure::ContainerType ContainerType;
typedef typename Configure::IteratorType IteratorType;
typedef typename Configure::ResultContainerType ResultContainerType;
typedef typename Configure::ResultIteratorType ResultIteratorType;
typedef PointerVector< ShallowParticle, ShallowParticle*, std::vector<ShallowParticle*> > ParticlePointerVector;
KRATOS_CLASS_POINTER_DEFINITION(MoveShallowWaterParticleUtility);
//template<unsigned int TDim>
MoveShallowWaterParticleUtility(ModelPart& rModelPart, Parameters rParameters) :
mrModelPart(rModelPart),
mScalarVar1(KratosComponents< Variable<double> >::Get( rParameters["convection_scalar_variable"].GetString() ) ),
mVectorVar1(KratosComponents< Variable<array_1d<double,3> > >::Get( rParameters["convection_vector_variable"].GetString() ) )
{
KRATOS_TRY
std::cout << "Initializing moveparticle utility for scalar transport" << std::endl;
Parameters default_parameters( R"(
{
"convection_scalar_variable" : "HEIGHT",
"convection_vector_variable" : "VELOCITY",
"maximum_number_of_particles" : 16
} )" );
// Now validate agains defaults -- this also ensures no type mismatch
rParameters.ValidateAndAssignDefaults(default_parameters);
m_scalar_var1_name = rParameters["convection_scalar_variable"].GetString();
m_vector_var1_name = rParameters["convection_vector_variable"].GetString();
mMaxNumberOfParticles = rParameters["maximum_number_of_particles"].GetDouble();
Check();
//storing water and air density and their inverses, just in case it is needed for the streamline integration
//loop in elements to change their ID to their position in the array. Easier to get information later.
//DO NOT PARALELIZE THIS! IT MUST BE SERIAL!!!!!!!!!!!!!!!!!!!!!!
ModelPart::ElementsContainerType::iterator ielembegin = mrModelPart.ElementsBegin();
for(unsigned int ii=0; ii<mrModelPart.Elements().size(); ii++)
{
ModelPart::ElementsContainerType::iterator ielem = ielembegin+ii;
ielem->SetId(ii+1);
}
mLastElemId= (mrModelPart.ElementsEnd()-1)->Id();
int node_id=0;
// we look for the smallest edge. could be used as a weighting function when going lagrangian->eulerian instead of traditional shape functions(method currently used)
ModelPart::NodesContainerType::iterator inodebegin = mrModelPart.NodesBegin();
std::vector<unsigned int> node_partition;
#ifdef _OPENMP
int number_of_threads = omp_get_max_threads();
#else
int number_of_threads = 1;
#endif
OpenMPUtils::CreatePartition(number_of_threads, mrModelPart.Nodes().size(), node_partition);
#pragma omp parallel for
for(int kkk=0; kkk<number_of_threads; kkk++)
{
for(unsigned int ii=node_partition[kkk]; ii<node_partition[kkk+1]; ii++)
{
ModelPart::NodesContainerType::iterator pnode = inodebegin+ii;
array_1d<double,3> position_node;
double distance=0.0;
position_node = pnode->Coordinates();
WeakPointerVector< Node<3> >& rneigh = pnode->GetValue(NEIGHBOUR_NODES);
//we loop all the nodes to check all the edges
const double number_of_neighbours = static_cast<double>(rneigh.size());
for( WeakPointerVector<Node<3> >::iterator inode = rneigh.begin(); inode!=rneigh.end(); inode++)
{
array_1d<double,3> position_difference;
position_difference = inode->Coordinates() - position_node;
const double current_distance = norm_2( position_difference );
distance += current_distance / number_of_neighbours;
}
//and we save the largest edge.
pnode->SetValue(MEAN_SIZE, distance);
node_id=pnode->GetId();
}
}
mLastNodeId=node_id;
//we also calculate the element mean size in the same way, for the courant number
//also we set the right size to the LHS column for the pressure enrichments, in order to recover correctly the enrichment pressure
std::vector<unsigned int> element_partition;
OpenMPUtils::CreatePartition(number_of_threads, mrModelPart.Elements().size(), element_partition);
//before doing anything we must reset the vector of nodes contained by each element (particles that are inside each element.
#pragma omp parallel for
for(int kkk=0; kkk<number_of_threads; kkk++)
{
for(unsigned int ii=element_partition[kkk]; ii<element_partition[kkk+1]; ii++)
{
ModelPart::ElementsContainerType::iterator ielem = ielembegin+ii;
double elem_size;
array_1d<double,3> Edge(3,0.0);
Edge = ielem->GetGeometry()[1].Coordinates() - ielem->GetGeometry()[0].Coordinates();
elem_size = Edge[0]*Edge[0];
for (unsigned int d = 1; d < TDim; d++)
elem_size += Edge[d]*Edge[d];
for (unsigned int i = 2; i < (TDim+1); i++)
for(unsigned int j = 0; j < i; j++)
{
Edge = ielem->GetGeometry()[i].Coordinates() - ielem->GetGeometry()[j].Coordinates();
double Length = Edge[0]*Edge[0];
for (unsigned int d = 1; d < TDim; d++)
Length += Edge[d]*Edge[d];
if (Length < elem_size) elem_size = Length;
}
elem_size = sqrt(elem_size);
ielem->SetValue(MEAN_SIZE, elem_size);
}
}
//matrix containing the position of the 4/15/45 particles that we will seed at the beggining
BoundedMatrix<double, 5*(1+TDim), 3 > pos;
BoundedMatrix<double, 5*(1+TDim), (1+TDim) > N;
int particle_id=0;
mNElems = mrModelPart.Elements().size();
std::cout << " about to resize vectors" << std::endl;
//setting the right size to the vector containing the particles assigned to each element
//particles vector. this vector contains ALL the particles in the simulation.
mParticlesVector.resize(mNElems*mMaxNumberOfParticles);
//and this vector contains the current number of particles that are in each element (currently zero)
mNumOfParticlesInElems.resize(mNElems);
mNumOfParticlesInElems=ZeroVector(mNElems);
//when moving the particles, an auxiliary vector is necessary (to store the previous number)
mNumOfParticlesInElemsAux.resize(mNElems);
//each element will have a list of pointers to all the particles that are inside.
//this vector contains the pointers to the vector of (particle) pointers of each element.
mVectorOfParticlePointersVectors.resize(mNElems);
//int artz;
//std::cin >> artz;
int i_int=0; //careful! it's not the id, but the position inside the array!
std::cout << " about to create particles" << std::endl;
//now we seed: LOOP IN ELEMENTS
//using loop index, DO NOT paralelize this! change lines : mparticles_in_elems_pointers((ii*mMaxNumberOfParticles)+mparticles_in_elems_integers(ii)) = pparticle; and the next one
mOffset=0;
//ShallowParticle& firstparticle = mParticlesVector[0];
for(unsigned int ii=0; ii<mrModelPart.Elements().size(); ii++)
{
ModelPart::ElementsContainerType::iterator ielem = ielembegin+ii;
//(ielem->GetValue(BED_PARTICLE_POINTERS)) = ParticlePointerVector( mMaxNumberOfParticles*2, &firstparticle );
//ParticlePointerVector& particle_pointers = (ielem->GetValue(BED_PARTICLE_POINTERS));
//now we link the mpointers_to_particle_pointers_vectors to the corresponding element
//mpointers_to_particle_pointers_vectors(ii) = &particle_pointers;
//now we resize the vector of particle pointers. it is double sized because we move the particles from an initial position (first half) to a final position (second half).
//for(int j=0; j<(mMaxNumberOfParticles*2); j++)
// particle_pointers.push_back(&firstparticle);
mVectorOfParticlePointersVectors[ii] = ParticlePointerVector( mMaxNumberOfParticles*2 );
ParticlePointerVector& particle_pointers = mVectorOfParticlePointersVectors[ii];
//int & number_of_particles = ielem->GetValue(NUMBER_OF_BED_PARTICLES);
int & number_of_particles = mNumOfParticlesInElems[ii];
number_of_particles=0;
Geometry< Node<3> >& geom = ielem->GetGeometry();
//unsigned int elem_id = ielem->Id();
ComputeGaussPointPositions_initial(geom, pos, N); //we also have the standard (4), and 45
//now we seed the particles in the current element
for (unsigned int j = 0; j < pos.size1(); j++)
{
++particle_id;
ShallowParticle& pparticle = mParticlesVector[particle_id-1];
//~ pparticle.X()=pos(j,0);
//~ pparticle.Y()=pos(j,1);
//~ pparticle.Z()=pos(j,2);
pparticle.Coordinates() = row(pos,j);
pparticle.GetEraseFlag()=false;
array_1d<float, 3 > & vector1 = pparticle.GetVector1();
float & scalar1 = pparticle.GetScalar1();
noalias(vector1) = ZeroVector(3);
scalar1=0.0;
for (unsigned int k = 0; k < (TDim+1); k++)
{
scalar1 += N(j, k) * geom[k].FastGetSolutionStepValue(mScalarVar1);
noalias(vector1) += N(j, k) * geom[k].FastGetSolutionStepValue(mVectorVar1);
}
particle_pointers(j) = &pparticle;
number_of_particles++ ;
}
++i_int;
}
mNParticles=particle_id; //we save the last particle created as the total number of particles we have. For the moment this is true.
std::cout << " [Creating particles : " << mNParticles << " particles created]" << std::endl;
mParticlePrintingToolInitialized=false;
KRATOS_CATCH("")
}
~MoveShallowWaterParticleUtility()
{}
void MountBin()
{
KRATOS_TRY
//copy the elements to a new container, as the list will
//be shuffled duringthe construction of the tree
ContainerType& rElements = mrModelPart.ElementsArray();
IteratorType it_begin = rElements.begin();
IteratorType it_end = rElements.end();
//const int number_of_elem = rElements.size();
typename BinsObjectDynamic<Configure>::Pointer paux = typename BinsObjectDynamic<Configure>::Pointer(new BinsObjectDynamic<Configure>(it_begin, it_end ) );
paux.swap(mpBinsObjectDynamic);
//BinsObjectDynamic<Configure> mpBinsObjectDynamic(it_begin, it_end );
std::cout << " finished mounting Bins" << std::endl;
KRATOS_CATCH("")
}
/// Calculates the mean velocity
/** This function computes the mean velocity within an element and
* stores it in MEAN_VEL_OVER_ELEM_SIZE variable.
* This variable keeps the courant number aprox 0.1 in each substep
*
* @see MoveParticle
* @see MoveParticleInverseWay
*/
void CalculateVelOverElemSize()
{
KRATOS_TRY
//ProcessInfo& CurrentProcessInfo = mrModelPart.GetProcessInfo();
const double nodal_weight = 1.0/ (1.0 + double (TDim) );
ModelPart::ElementsContainerType::iterator ielembegin = mrModelPart.ElementsBegin();
std::vector<unsigned int> element_partition;
#ifdef _OPENMP
int number_of_threads = omp_get_max_threads();
#else
int number_of_threads = 1;
#endif
OpenMPUtils::CreatePartition(number_of_threads, mrModelPart.Elements().size(), element_partition);
#pragma omp parallel for
for(int kkk=0; kkk<number_of_threads; kkk++)
{
for(unsigned int ii=element_partition[kkk]; ii<element_partition[kkk+1]; ii++)
{
ModelPart::ElementsContainerType::iterator ielem = ielembegin+ii;
Geometry<Node<3> >& geom = ielem->GetGeometry();
array_1d<double, 3 >vector_mean_velocity=ZeroVector(3);
for (unsigned int i=0; i != (TDim+1) ; i++)
vector_mean_velocity += geom[i].FastGetSolutionStepValue(VELOCITY);
vector_mean_velocity *= nodal_weight;
//~ const double mean_velocity = sqrt ( pow(vector_mean_velocity[0],2) + pow(vector_mean_velocity[1],2) + pow(vector_mean_velocity[2],2) );
const double mean_velocity = norm_2( vector_mean_velocity );
ielem->SetValue(MEAN_VEL_OVER_ELEM_SIZE, mean_velocity / ( ielem->GetValue(MEAN_SIZE) ) );
}
}
KRATOS_CATCH("")
}
/// Reset the boundary conditions
/** When a variable is fixed this function resets the nodal values
* with the previous time step
*/
void ResetBoundaryConditions()
{
KRATOS_TRY
typedef VariableComponent<VectorComponentAdaptor<array_1d<double, 3> > > component_type;
component_type vector_var_x = KratosComponents< component_type >::Get(m_vector_var1_name+std::string("_X"));
component_type vector_var_y = KratosComponents< component_type >::Get(m_vector_var1_name+std::string("_Y"));
component_type vector_var_z = KratosComponents< component_type >::Get(m_vector_var1_name+std::string("_Z"));
ModelPart::NodesContainerType::iterator inodebegin = mrModelPart.NodesBegin();
std::vector<unsigned int> node_partition;
#ifdef _OPENMP
int number_of_threads = omp_get_max_threads();
#else
int number_of_threads = 1;
#endif
OpenMPUtils::CreatePartition(number_of_threads, mrModelPart.Nodes().size(), node_partition);
#pragma omp parallel for
for(int kkk=0; kkk<number_of_threads; kkk++)
{
for(unsigned int ii=node_partition[kkk]; ii<node_partition[kkk+1]; ii++)
{
ModelPart::NodesContainerType::iterator inode = inodebegin+ii;
if (inode->IsFixed(mScalarVar1))
{
inode->FastGetSolutionStepValue(mScalarVar1)=inode->GetSolutionStepValue(mScalarVar1,1);
}
if (inode->IsFixed(vector_var_x))
{
inode->FastGetSolutionStepValue(vector_var_x)=inode->GetSolutionStepValue(vector_var_x,1);
}
if (inode->IsFixed(vector_var_y))
{
inode->FastGetSolutionStepValue(vector_var_y)=inode->GetSolutionStepValue(vector_var_y,1);
}
if (inode->IsFixed(vector_var_z))
{
inode->FastGetSolutionStepValue(vector_var_z)=inode->GetSolutionStepValue(vector_var_z,1);
}
}
}
KRATOS_CATCH("")
}
/// Auxiliar function to compute the "delta variables"
/** Delta variables are the difference between two time steps.
* It's value is used to update particles info
*
* @see CorrectParticlesWithoutMovingUsingDeltaVariables
*/
void CalculateDeltaVariables()
{
KRATOS_TRY
ModelPart::NodesContainerType::iterator inodebegin = mrModelPart.NodesBegin();
std::vector<unsigned int> node_partition;
#ifdef _OPENMP
int number_of_threads = omp_get_max_threads();
#else
int number_of_threads = 1;
#endif
OpenMPUtils::CreatePartition(number_of_threads, mrModelPart.Nodes().size(), node_partition);
#pragma omp parallel for
for(int kkk=0; kkk<number_of_threads; kkk++)
{
for(unsigned int ii=node_partition[kkk]; ii<node_partition[kkk+1]; ii++)
{
ModelPart::NodesContainerType::iterator inode = inodebegin+ii;
inode->FastGetSolutionStepValue(DELTA_SCALAR1) = inode->FastGetSolutionStepValue(mScalarVar1) - inode->FastGetSolutionStepValue(PROJECTED_SCALAR1); //PROJECTED_SCALAR1
inode->FastGetSolutionStepValue(DELTA_VECTOR1) = inode->FastGetSolutionStepValue(mVectorVar1) - inode->FastGetSolutionStepValue(PROJECTED_VECTOR1); //PROJECTED_VECTOR1
}
}
KRATOS_CATCH("")
}
/// Auxiliar function
/** This function copy a scalar variable value to the previous time step
*/
void CopyScalarVarToPreviousTimeStep(const Variable<double>& OriginVariable,
ModelPart::NodesContainerType& rNodes)
{
KRATOS_TRY
ModelPart::NodesContainerType::iterator inodebegin = rNodes.begin();
std::vector<unsigned int> node_partition;
#ifdef _OPENMP
int number_of_threads = omp_get_max_threads();
#else
int number_of_threads = 1;
#endif
OpenMPUtils::CreatePartition(number_of_threads, rNodes.size(), node_partition);
#pragma omp parallel for
for(int kkk=0; kkk<number_of_threads; kkk++)
{
for(unsigned int ii=node_partition[kkk]; ii<node_partition[kkk+1]; ii++)
{
ModelPart::NodesContainerType::iterator inode = inodebegin+ii;
inode->GetSolutionStepValue(OriginVariable,1) = inode->FastGetSolutionStepValue(OriginVariable);
}
}
KRATOS_CATCH("")
}
/// Auxiliar function
/** This function copy a vector variable value to the previous time step
*/
void CopyVectorVarToPreviousTimeStep(const Variable<array_1d<double,3>>& OriginVariable,
ModelPart::NodesContainerType& rNodes)
{
KRATOS_TRY
ModelPart::NodesContainerType::iterator inodebegin = rNodes.begin();
std::vector<unsigned int> node_partition;
#ifdef _OPENMP
int number_of_threads = omp_get_max_threads();
#else
int number_of_threads = 1;
#endif
OpenMPUtils::CreatePartition(number_of_threads, rNodes.size(), node_partition);
#pragma omp parallel for
for(int kkk=0; kkk<number_of_threads; kkk++)
{
for(unsigned int ii=node_partition[kkk]; ii<node_partition[kkk+1]; ii++)
{
ModelPart::NodesContainerType::iterator inode = inodebegin+ii;
noalias(inode->GetSolutionStepValue(OriginVariable,1)) = inode->FastGetSolutionStepValue(OriginVariable);
}
}
KRATOS_CATCH("")
}
/// Move all the particles
/** This function moves the particles across the streamlines
* according to the velocity given by VELOCITY variable. The
* movement is performed in nsubsteps, during a total time
* of DELTA_TIME
*
* @see Moveparticle
*/
void MoveParticles()
{
KRATOS_TRY
ProcessInfo& CurrentProcessInfo = mrModelPart.GetProcessInfo();
const int offset = mOffset; //the array of pointers for each element has twice the required size so that we use a part in odd timesteps and the other in even ones.
//moveparticlesdiff reads from the pointers of one part (ie odd) and saves into the other part (ie even part)
//since it is the only function in the whole procedure that does this, it must use alternatively one part and the other.
bool even_timestep;
if (offset!=0) even_timestep=false;
else even_timestep=true;
const int post_offset = mMaxNumberOfParticles * static_cast<int>(even_timestep); //and we also save the offset to know the location in which we will save the pointers after we've moved the particles
double delta_t = CurrentProcessInfo[DELTA_TIME];
array_1d<double,TDim+1> N;
const unsigned int max_results = 10000;
//double integration_distance= 2.0;
mMaxSubSteps = 10;
mMaxSubStepDt = delta_t / static_cast<double>(mMaxSubSteps);
std::vector<unsigned int> element_partition;
#ifdef _OPENMP
int number_of_threads = omp_get_max_threads();
#else
int number_of_threads = 1;
#endif
OpenMPUtils::CreatePartition(number_of_threads, mrModelPart.Elements().size(), element_partition);
ModelPart::ElementsContainerType::iterator ielembegin = mrModelPart.ElementsBegin();
//before doing anything we must reset the vector of nodes contained by each element (particles that are inside each element.
#pragma omp parallel for
for(int kkk=0; kkk<number_of_threads; kkk++)
{
for(unsigned int ii=element_partition[kkk]; ii<element_partition[kkk+1]; ii++)
{
//ModelPart::ElementsContainerType::iterator old_element = ielembegin+ii;
int & number_of_particles = mNumOfParticlesInElems[ii]; //old_element->GetValue(NUMBER_OF_BED_PARTICLES);
mNumOfParticlesInElemsAux[ii] = number_of_particles;
mNumOfParticlesInElems[ii] = 0;
//we reset the local vectors for a faster access;
}
}
std::cout << "convecting particles" << std::endl;
//We move the particles across the fixed mesh and saving change data into them (using the function MoveParticle)
#pragma omp barrier
#pragma omp parallel for
for(int kkk=0; kkk<number_of_threads; kkk++)
{
ResultContainerType results(max_results);
WeakPointerVector< Element > elements_in_trajectory;
elements_in_trajectory.resize(20);
for(unsigned int ielem = element_partition[kkk]; ielem<element_partition[kkk+1]; ielem++)
{
ModelPart::ElementsContainerType::iterator old_element = ielembegin+ielem;
const int old_element_id = old_element->Id();
ParticlePointerVector& old_element_particle_pointers = mVectorOfParticlePointersVectors[old_element_id-1];
if ( (results.size()) != max_results )
results.resize(max_results);
unsigned int number_of_elements_in_trajectory = 0; //excluding the origin one (current one, ielem)
for (int ii = 0; ii < mNumOfParticlesInElemsAux[ielem]; ii++)
{
ShallowParticle& pparticle = old_element_particle_pointers[offset+ii];
Element::Pointer pcurrent_element( *old_element.base() );
ResultIteratorType result_begin = results.begin();
bool & erase_flag=pparticle.GetEraseFlag();
if (erase_flag == false){
MoveParticle(pparticle,pcurrent_element,elements_in_trajectory,number_of_elements_in_trajectory,result_begin,max_results); //saqué N de los argumentos, no lo necesito ya q empieza SIEMPRE en un nodo y no me importa donde termina
const int current_element_id = pcurrent_element->Id();
int & number_of_particles_in_current_elem = mNumOfParticlesInElems[current_element_id-1];
if (number_of_particles_in_current_elem < mMaxNumberOfParticles && erase_flag == false)
{
ParticlePointerVector& current_element_particle_pointers = mVectorOfParticlePointersVectors[current_element_id-1];
#pragma omp critical
{
if (number_of_particles_in_current_elem < mMaxNumberOfParticles) // we cant go over this node, there's no room. otherwise we would be in the position of the first particle of the next element!!
{
current_element_particle_pointers(post_offset+number_of_particles_in_current_elem) = &pparticle;
number_of_particles_in_current_elem++ ;
KRATOS_ERROR_IF( number_of_particles_in_current_elem > mMaxNumberOfParticles ) <<
"In move shallow water particle utility: exceeded maximum number of particles" << std::endl;
//~ if (number_of_particles_in_current_elem > mMaxNumberOfParticles)
//~ KRATOS_WATCH("MAL");
}
else
{
pparticle.GetEraseFlag()=true; //so we just delete it!
}
}
}
else
{
pparticle.GetEraseFlag()=true; //so we just delete it!
}
}
}
}
}
// After having changed everything we change the status of the mOddTimeStep flag:
mOffset = post_offset;; //
KRATOS_CATCH("")
}
/// Transfer particles information to the mesh nodes
/** This function explicitly projects data from particles (lagrangian)
* onto the eulerian mesh. Shape functions of the elements determine
* the particle location within the element and its contribution to
* each node as a weighting function.
*/
void TransferLagrangianToEulerian() //explicit
{
KRATOS_TRY
//ProcessInfo& CurrentProcessInfo = mrModelPart.GetProcessInfo();
//const double delta_t =CurrentProcessInfo[DELTA_TIME];
const double threshold = 1e-10 / (static_cast<double>(TDim)+1.0);
std::cout << "projecting info to mesh" << std::endl;
const int offset = mOffset;
// the array of pointers for each element has twice the required size so that
// we use a part in odd timesteps and the other in even ones.
//(flag managed only by MoveParticles)
// We must project data from the particles (lagrangian) onto the eulerian mesh
//int nnodes = mrModelPart.Nodes().size();
//array_1d<double,(n_nodes)> eulerian_nodes_sumweights;
// We save data from previous time step of the eulerian mesh in case we must reuse it later
// cos no particle was found around the nodes though we could've use a bigger buffer, to be changed later!
// after having saved data, we reset them to zero, this way it's easier to add the contribution
// of the surrounding particles.
ModelPart::NodesContainerType::iterator inodebegin = mrModelPart.NodesBegin();
std::vector<unsigned int> node_partition;
#ifdef _OPENMP
int number_of_threads = omp_get_max_threads();
#else
int number_of_threads = 1;
#endif
OpenMPUtils::CreatePartition(number_of_threads, mrModelPart.Nodes().size(), node_partition);
#pragma omp parallel for
for(int kkk=0; kkk<number_of_threads; kkk++)
{
for(unsigned int ii=node_partition[kkk]; ii<node_partition[kkk+1]; ii++)
{
ModelPart::NodesContainerType::iterator inode = inodebegin+ii;
inode->FastGetSolutionStepValue(PROJECTED_SCALAR1)=0.0;
inode->FastGetSolutionStepValue(PROJECTED_VECTOR1)=ZeroVector(3);
inode->FastGetSolutionStepValue(YP)=0.0;
}
}
// Adding contribution, loop on elements, since each element has stored the particles found inside of it
std::vector<unsigned int> element_partition;
OpenMPUtils::CreatePartition(number_of_threads, mrModelPart.Elements().size(), element_partition);
ModelPart::ElementsContainerType::iterator ielembegin = mrModelPart.ElementsBegin();
#pragma omp parallel for
for(int kkk=0; kkk<number_of_threads; kkk++)
{
for(unsigned int ii=element_partition[kkk]; ii<element_partition[kkk+1]; ii++)
{
ModelPart::ElementsContainerType::iterator ielem = ielembegin+ii;
array_1d<double,3*(TDim+1)> nodes_positions;
array_1d<double,3*(TDim+1)> nodes_added_vector1 = ZeroVector(3*(TDim+1));
array_1d<double,(TDim+1)> nodes_added_scalar1 = ZeroVector((TDim+1));
array_1d<double,(TDim+1)> nodes_added_weights = ZeroVector((TDim+1));
//array_1d<double,(TDim+1)> weighting_inverse_divisor;
Geometry<Node<3> >& geom = ielem->GetGeometry();
for (int i=0 ; i!=(TDim+1) ; ++i)
{
nodes_positions[i*3+0]=geom[i].X();
nodes_positions[i*3+1]=geom[i].Y();
nodes_positions[i*3+2]=geom[i].Z();
//weighting_inverse_divisor[i]=1.0/((geom[i].FastGetSolutionStepValue(MEAN_SIZE))*1.01);
}
int & number_of_particles_in_elem= mNumOfParticlesInElems[ii];
ParticlePointerVector& element_particle_pointers = mVectorOfParticlePointersVectors[ii];
for (int iii=0; iii<number_of_particles_in_elem ; iii++ )
{
if (iii==mMaxNumberOfParticles) // It means we are out of our portion of the array, abort loop!
break;
ShallowParticle& pparticle = element_particle_pointers[offset+iii];
if (pparticle.GetEraseFlag()==false)
{
array_1d<double,3> & position = pparticle.Coordinates();
const float& particle_scalar1 = pparticle.GetScalar1();
const array_1d<float,3>& particle_vector1 = pparticle.GetVector1();
array_1d<double,TDim+1> N;
bool is_found = CalculatePosition(nodes_positions,position[0],position[1],position[2],N);
if (is_found==false) // Something went wrong. if it was close enough to the edge we simply send it inside the element.
{
KRATOS_INFO("MoveShallowWaterParticleUtility") << N << std::endl;
for (int j=0 ; j!=(TDim+1); j++)
if (N[j]<0.0 && N[j]> -1e-5)
N[j]=1e-10;
}
for (int j=0 ; j!=(TDim+1); j++) //going through the 3/4 nodes of the element
{
// These lines for a weighting function based on the distance (or square distance) from the node insteadof the shape functions
//double sq_dist = 0;
//for (int k=0 ; k!=(TDim); k++) sq_dist += ((position[k] - nodes_positions[j*3+k])*(position[k] - nodes_positions[j*3+k]));
//double weight = (1.0 - (sqrt(sq_dist)*weighting_inverse_divisor[j] ) );
double weight=N(j)*N(j);
//weight=N(j)*N(j)*N(j);
if (weight<threshold) weight=1e-10;
nodes_added_weights[j] += weight;
nodes_added_scalar1[j] += weight*static_cast<double>(particle_scalar1);
for (int k=0 ; k!=(TDim); k++) //x,y,(z)
{
nodes_added_vector1[j*3+k] += weight * static_cast<double>(particle_vector1[k]);
}
}
}
}
for (int i=0 ; i!=(TDim+1) ; ++i) {
geom[i].SetLock();
geom[i].FastGetSolutionStepValue(PROJECTED_SCALAR1) += nodes_added_scalar1[i];
geom[i].FastGetSolutionStepValue(PROJECTED_VECTOR1_X) += nodes_added_vector1[3*i+0];
geom[i].FastGetSolutionStepValue(PROJECTED_VECTOR1_Y) += nodes_added_vector1[3*i+1];
geom[i].FastGetSolutionStepValue(PROJECTED_VECTOR1_Z) += nodes_added_vector1[3*i+2];
geom[i].FastGetSolutionStepValue(YP) += nodes_added_weights[i];
geom[i].UnSetLock();
}
}
}
#pragma omp parallel for
for(int kkk=0; kkk<number_of_threads; kkk++)
{
for(unsigned int ii=node_partition[kkk]; ii<node_partition[kkk+1]; ii++)
{
ModelPart::NodesContainerType::iterator inode = inodebegin+ii;
double sum_weights = inode->FastGetSolutionStepValue(YP);
if (sum_weights>0.00001)
{
double & scalar = inode->FastGetSolutionStepValue(PROJECTED_SCALAR1);
array_1d<double,3> & vector = inode->FastGetSolutionStepValue(PROJECTED_VECTOR1);
scalar /=sum_weights; // resetting the scalar1
vector /=sum_weights; // resetting the vector1
}
else // This should never happen because other ways to recover the information have been executed before, but leaving it just in case..
{
inode->FastGetSolutionStepValue(PROJECTED_SCALAR1)=inode->FastGetSolutionStepValue(mScalarVar1,1); // Resetting the convected scalar
inode->FastGetSolutionStepValue(PROJECTED_VECTOR1)=inode->FastGetSolutionStepValue(mVectorVar1,1); // Resetting the convected vector
}
}
}
KRATOS_CATCH("")
}
/// Update all the particles without moving them
/** This function updates all the particles variables using the
* "delta variables" from the nodal database.
*
* @see CorrectParticleUsingDeltaVariables
*/
void CorrectParticlesWithoutMovingUsingDeltaVariables()
{
KRATOS_TRY
//std::cout << "updating particles" << std::endl;
//ProcessInfo& CurrentProcessInfo = mrModelPart.GetProcessInfo();
const int offset = mOffset; //the array of pointers for each element has twice the required size so that we use a part in odd timesteps and the other in even ones.
//(flag managed only by MoveParticles)
ModelPart::ElementsContainerType::iterator ielembegin = mrModelPart.ElementsBegin();
std::vector<unsigned int> element_partition;
#ifdef _OPENMP
int number_of_threads = omp_get_max_threads();
#else
int number_of_threads = 1;
#endif
OpenMPUtils::CreatePartition(number_of_threads, mrModelPart.Elements().size(), element_partition);
#pragma omp parallel for
for(int kkk=0; kkk<number_of_threads; kkk++)
{
for(unsigned int ii=element_partition[kkk]; ii<element_partition[kkk+1]; ii++)
{
//const int & elem_id = ielem->Id();
ModelPart::ElementsContainerType::iterator ielem = ielembegin+ii;
Element::Pointer pelement(*ielem.base());
Geometry<Node<3> >& geom = ielem->GetGeometry();
//ParticlePointerVector& element_particle_pointers = (ielem->GetValue(BED_PARTICLE_POINTERS));
//int & number_of_particles_in_elem=ielem->GetValue(NUMBER_OF_BED_PARTICLES);
int & number_of_particles_in_elem= mNumOfParticlesInElems[ii];
ParticlePointerVector& element_particle_pointers = mVectorOfParticlePointersVectors[ii];
for (int iii=0; iii<number_of_particles_in_elem ; iii++ )
{
if (iii>mMaxNumberOfParticles) //it means we are out of our portion of the array, abort loop!
break;
ShallowParticle & pparticle = element_particle_pointers[offset+iii];
bool erase_flag= pparticle.GetEraseFlag();
if (erase_flag==false)
{
CorrectParticleUsingDeltaVariables(pparticle,pelement,geom); //'lite' version, we pass by reference the geometry, so much cheaper
}
}
}
}
KRATOS_CATCH("")
}
/// AddUniqueWeakPointer
template< class TDataType >
void AddUniqueWeakPointer
(WeakPointerVector< TDataType >& v, const typename TDataType::WeakPointer candidate)
{
typename WeakPointerVector< TDataType >::iterator i = v.begin();
typename WeakPointerVector< TDataType >::iterator endit = v.end();
while ( i != endit && (i)->Id() != (candidate.lock())->Id())
{
i++;
}
if( i == endit )
{
v.push_back(candidate);
}
}
/// Fill an element with particles
/** This function is to be executed after moving particles and
* before tranferring data from lagrangian particles to eulerian mesh
* If an element finishes with less particles than "minimum number
* of particles", then PreReseed adds particles inside it.
* A minimal reseed is performed in order to not disturb the projection
* from lagrangian to euelrian.
*
* @see MinimumNumberOfParticles
*
* @see MoveParticles
* @see MoveParticleInverseWay: is called to get the particle values
*/
void PreReseed(int MinimumNumberOfParticles)
{
KRATOS_TRY
//ProcessInfo& CurrentProcessInfo = mrModelPart.GetProcessInfo();
const int offset =mOffset;
const int max_results = 1000;
//tools for the paralelization
unsigned int number_of_threads = OpenMPUtils::GetNumThreads();
std::vector<unsigned int> elem_partition;
int number_of_rows = mrModelPart.Elements().size();
elem_partition.resize(number_of_threads + 1);
int elem_partition_size = number_of_rows / number_of_threads;
elem_partition[0] = 0;
elem_partition[number_of_threads] = number_of_rows;
//KRATOS_WATCH(elem_partition_size);
for (unsigned int i = 1; i < number_of_threads; i++)
elem_partition[i] = elem_partition[i - 1] + elem_partition_size;
ModelPart::ElementsContainerType::iterator ielembegin = mrModelPart.ElementsBegin();
#pragma omp parallel firstprivate(elem_partition)
{
ResultContainerType results(max_results);
int k = OpenMPUtils::ThisThread();
//ModelPart::ElementsContainerType::iterator it_begin = mrModelPart.ElementsBegin() + elem_partition[k];
//ModelPart::ElementsContainerType::iterator it_end = mrModelPart.ElementsBegin() + elem_partition[k+1] ;
//ModelPart::NodesContainerType local_list=aux[k];
//PointerVectorSet<ShallowParticle, IndexedObject> & list=aux[k];
BoundedMatrix<double, (TDim+1), 3 > pos;
BoundedMatrix<double, (TDim+1) , (TDim+1) > N;
unsigned int freeparticle=0; //we start with the first position in the particles array
//int local_id=1;
for(unsigned int ii=elem_partition[k]; ii<elem_partition[k+1]; ii++)
{
//const int & elem_id = ielem->Id();
ModelPart::ElementsContainerType::iterator ielem = ielembegin+ii;
results.resize(max_results);
//const int & elem_id = ielem->Id();
//ParticlePointerVector& element_particle_pointers = (ielem->GetValue(BED_PARTICLE_POINTERS));
//int & number_of_particles_in_elem=ielem->GetValue(NUMBER_OF_BED_PARTICLES);
int & number_of_particles_in_elem = mNumOfParticlesInElems[ii];
ParticlePointerVector& element_particle_pointers = mVectorOfParticlePointersVectors[ii];
if (number_of_particles_in_elem < (MinimumNumberOfParticles)) // && (ielem->GetGeometry())[0].Y()<0.10 )
{
Geometry< Node<3> >& geom = ielem->GetGeometry();
ComputeGaussPointPositionsForPreReseed(geom, pos, N);
for (unsigned int j = 0; j < (pos.size1()); j++) // I am dropping the last one, the one in the middle of the element
{
bool keep_looking = true;
while(keep_looking)
{
if (mParticlesVector[freeparticle].GetEraseFlag()==true)
{
#pragma omp critical
{
if (mParticlesVector[freeparticle].GetEraseFlag()==true)
{
mParticlesVector[freeparticle].GetEraseFlag()=false;
keep_looking=false;
}
}
if (keep_looking==false)
break;
else
freeparticle++;
}
else
freeparticle++;
}
ShallowParticle pparticle(pos(j,0),pos(j,1),pos(j,2));
array_1d<double,TDim+1>aux2_N;
bool is_found = CalculatePosition(geom,pos(j,0),pos(j,1),pos(j,2),aux2_N);
KRATOS_ERROR_IF_NOT( is_found ) <<
"In move shallow water particle utility: particle not found in domain" << std::endl;
pparticle.GetEraseFlag()=false;
ResultIteratorType result_begin = results.begin();
Element::Pointer pelement( *ielem.base() );
MoveParticleInverseWay(pparticle, pelement, result_begin, max_results);
//and we copy it to the array:
mParticlesVector[freeparticle] = pparticle;
element_particle_pointers(offset+number_of_particles_in_elem) = &mParticlesVector[freeparticle];
pparticle.GetEraseFlag()=false;
number_of_particles_in_elem++;
}
}
}
}
KRATOS_CATCH("")
}
/// Fill an element with particles
/** This function is to be executed after the mesh stage solver is
* called and the particles are updated.
* If an element contains less particles than "minimum number of
* particles", then PostReseed adds particles inside it.
* A full reseed is performed and the particle gets it's convected
* variables directly from the eulerian mesh
*
* @param MinimumNumberOfParticles
*
* @see PreReseed
*/
void PostReseed(int MinimumNumberOfParticles) //pooyan's way
{
KRATOS_TRY
//ProcessInfo& CurrentProcessInfo = mrModelPart.GetProcessInfo();
const int offset = mOffset;
//TOOLS FOR THE PARALELIZATION
unsigned int number_of_threads = OpenMPUtils::GetNumThreads();
std::vector<unsigned int> elem_partition;
int number_of_rows=mrModelPart.Elements().size();
//KRATOS_THROW_ERROR(std::logic_error, "Add ----NODAL_H---- variable!!!!!! ERROR", "");
elem_partition.resize(number_of_threads + 1);
int elem_partition_size = number_of_rows / number_of_threads;
elem_partition[0] = 0;
elem_partition[number_of_threads] = number_of_rows;
for (unsigned int i = 1; i < number_of_threads; i++)
elem_partition[i] = elem_partition[i - 1] + elem_partition_size;
ModelPart::ElementsContainerType::iterator ielembegin = mrModelPart.ElementsBegin();
#pragma omp parallel firstprivate(elem_partition) // firstprivate(results)//we will add the nodes in different parts of aux and later assemple everything toghether, remaming particles ids to get consecutive ids
{
unsigned int reused_particles=0;
unsigned int freeparticle = 0; //we start by the first position;
int k = OpenMPUtils::ThisThread();
BoundedMatrix<double, (3+2*TDim), 3 > pos; //7 particles (2D) or 9 particles (3D)
BoundedMatrix<double, (3+2*TDim), (TDim+1) > N;
double mesh_scalar1;
array_1d<double,3> mesh_vector1;
array_1d<int, (3+2*TDim) > positions;
unsigned int number_of_reseeded_particles;
for(unsigned int ii=elem_partition[k]; ii<elem_partition[k+1]; ii++)
{
//const int & elem_id = ielem->Id();
ModelPart::ElementsContainerType::iterator ielem = ielembegin+ii;
int & number_of_particles_in_elem = mNumOfParticlesInElems[ii];
ParticlePointerVector& element_particle_pointers = mVectorOfParticlePointersVectors[ii];
Geometry< Node<3> >& geom = ielem->GetGeometry();
if ( number_of_particles_in_elem < (MinimumNumberOfParticles) ) // && (geom[0].Y()<0.10) ) || (number_of_water_particles_in_elem>2 && number_of_particles_in_elem<(MinimumNumberOfParticles) ) )
{
//bool reseed_more=false;
number_of_reseeded_particles = 0;
//reseed_more=true;
number_of_reseeded_particles = 3 + 2*TDim;
ComputeGaussPointPositionsForPostReseed(geom, pos, N);
for (unsigned int j = 0; j < number_of_reseeded_particles; j++)
{
// Now we have to find an empty space (a particle that was about to be deleted) in the
// particles model part. once found. there will be our renewed particle:
bool keep_looking = true;
while(keep_looking)
{
if (mParticlesVector[freeparticle].GetEraseFlag()==true)
{
#pragma omp critical
{
if (mParticlesVector[freeparticle].GetEraseFlag()==true)
{
mParticlesVector[freeparticle].GetEraseFlag()=false;
keep_looking=false;
}
}
if (keep_looking==false)
break;
else
freeparticle++;
}
else
freeparticle++;
}
ShallowParticle pparticle(pos(j,0),pos(j,1),pos(j,2));
array_1d<double,TDim+1>aux_N;
bool is_found = CalculatePosition(geom,pos(j,0),pos(j,1),pos(j,2),aux_N);
KRATOS_ERROR_IF_NOT( is_found ) <<
"In move shallow water particle utility: particle not found in domain" << std::endl;
mesh_scalar1 = 0.0;
mesh_vector1 = ZeroVector(3);
for (unsigned int l = 0; l < (TDim+1); l++)
{
mesh_scalar1 += N(j,l) * geom[l].FastGetSolutionStepValue(mScalarVar1);
noalias(mesh_vector1) += N(j, l) * geom[l].FastGetSolutionStepValue(mVectorVar1);
}
pparticle.GetScalar1()=mesh_scalar1;
pparticle.GetVector1()=mesh_vector1;
pparticle.GetEraseFlag()=false;
mParticlesVector[freeparticle]=pparticle;
element_particle_pointers(offset+number_of_particles_in_elem) = &mParticlesVector[freeparticle];
number_of_particles_in_elem++;
KRATOS_ERROR_IF( keep_looking ) <<
"In move shallow water particle utility: Finished the list and couldnt find a free cell for the new particle!" << std::endl;
reused_particles++;
}
}
}
}
KRATOS_CATCH("")
}
/// Fill a model part with particles
/** This function prints the particles to a model part
*
* @param rLagrangianModelPart: empty model part to print particles
* @param FilterFactor: the function will print one particle of every "filter factor"
*/
void ExecuteParticlesPrintingTool( ModelPart& rLagrangianModelPart, unsigned int FilterFactor )
{
KRATOS_TRY
// We will only print one out of every "filter factor" particles of the total particle list
if (mParticlePrintingToolInitialized == false)
{
KRATOS_ERROR_IF( rLagrangianModelPart.NodesBegin() - rLagrangianModelPart.NodesEnd() > 0 ) <<
"In move shallow water particle utility: an empty model part is required for the particles printing tool" << std::endl;
rLagrangianModelPart.AddNodalSolutionStepVariable(mScalarVar1);
rLagrangianModelPart.AddNodalSolutionStepVariable(DISPLACEMENT);
for (unsigned int i = 0; i != ((mMaxNumberOfParticles*mNElems)/FilterFactor) + FilterFactor; i++)
{
Node < 3 > ::Pointer pnode = rLagrangianModelPart.CreateNewNode( i+mLastNodeId+1 , 0.0, 0.0, 0.0); //recordar que es el nueevo model part!!
//pnode->SetBufferSize(mrModelPart.NodesBegin()->GetBufferSize());
pnode->SetBufferSize(1);
}
mParticlePrintingToolInitialized=true;
}
// Resetting data of the unused particles
const double inactive_particle_position = -10.0;
array_1d<double,3>inactive_particle_position_vector;
inactive_particle_position_vector(0)=inactive_particle_position;
inactive_particle_position_vector(1)=inactive_particle_position;
inactive_particle_position_vector(2)=inactive_particle_position;
ModelPart::NodesContainerType::iterator inodebegin = rLagrangianModelPart.NodesBegin();
for(unsigned int ii = 0; ii < rLagrangianModelPart.Nodes().size(); ii++)
{
ModelPart::NodesContainerType::iterator inode = inodebegin+ii;
inode->FastGetSolutionStepValue(mScalarVar1) = 0.0;
inode->FastGetSolutionStepValue(DISPLACEMENT) = inactive_particle_position_vector;
}
int counter = 0;
//ModelPart::NodesContainerType::iterator it_begin = rLagrangianModelPart.NodesBegin();
for (int i = 0; i != mMaxNumberOfParticles*mNElems; i++)
{
ShallowParticle& pparticle = mParticlesVector[i];
if(pparticle.GetEraseFlag() == false && i%FilterFactor == 0)
{
ModelPart::NodesContainerType::iterator inode = inodebegin + counter; //copying info from the particle to the (printing) node.
inode->FastGetSolutionStepValue(mScalarVar1) = pparticle.GetScalar1();
inode->FastGetSolutionStepValue(DISPLACEMENT) = pparticle.Coordinates();
counter++;
}
}
KRATOS_CATCH("")
}
protected:
private:
/// Move a particle
/** this function moves a particle according to the velocity given
* by VELOCITY variable. The movement is performed in nsubsteps,
* during a total time of DELTA_TIME
*
* @param pParticle
* @param pElement
* @param rElementsInTrajectory
* @param rNumberOfElementsInTrajectory
* @param ResultBegin
* @param MaxNumberOfResults
*
* @see MoveParticles
*/
void MoveParticle(ShallowParticle & pParticle,
Element::Pointer & pElement,
WeakPointerVector< Element >& rElementsInTrajectory,
unsigned int & rNumberOfElementsInTrajectory,
ResultIteratorType ResultBegin,
const unsigned int MaxNumberOfResults)
{
ProcessInfo& CurrentProcessInfo = mrModelPart.GetProcessInfo();
double delta_t = CurrentProcessInfo[DELTA_TIME];
unsigned int nsubsteps;
double substep_dt;
bool keep_integrating = false;
bool is_found;
array_1d<double,3> vel;
array_1d<double,3> vel_without_other_phase_nodes=ZeroVector(3);
array_1d<double,3> position;
array_1d<double,3> mid_position;
array_1d<double,TDim+1> N;
//we start with the first position, then it will enter the loop.
position = pParticle.Coordinates(); //initial coordinates
double only_integral = 0.0 ;
is_found = FindNodeOnMesh(position, N, pElement, ResultBegin, MaxNumberOfResults); //good, now we know where this point is:
if(is_found == true)
{
keep_integrating=true;
Geometry< Node<3> >& geom = pElement->GetGeometry();//the element we're in
vel=ZeroVector(3);
for(unsigned int j=0; j<(TDim+1); j++)
{
noalias(vel) += geom[j].FastGetSolutionStepValue(VELOCITY)*N[j];
}
//calculating substep to get +- courant(substep) = 0.1
nsubsteps = 10.0 * (delta_t * pElement->GetValue(MEAN_VEL_OVER_ELEM_SIZE));
if (nsubsteps<1)
nsubsteps=1;
substep_dt = delta_t / double(nsubsteps);
only_integral = 1.0;// weight;//*double(nsubsteps);
position += vel*substep_dt;//weight;
// DONE THE FIRST LOCATION OF THE PARTICLE, NOW WE PROCEED TO STREAMLINE INTEGRATION USING THE MESH VELOCITY
unsigned int check_from_element_number = 0;
for(unsigned int i=0; i<(nsubsteps-1); i++)// this is for the substeps n+1. in the first one we already knew the position of the particle.
{
if (keep_integrating == true)
{
is_found = FindNodeOnMesh(position, N, pElement, rElementsInTrajectory, rNumberOfElementsInTrajectory, check_from_element_number, ResultBegin, MaxNumberOfResults); //good, now we know where this point is:
if(is_found == true)
{
Geometry< Node<3> >& geom = pElement->GetGeometry();//the element we're in
vel = ZeroVector(3);
for(unsigned int j=0; j<(TDim+1); j++)
{
noalias(vel) += geom[j].FastGetSolutionStepValue(VELOCITY)*N[j];
}
only_integral += 1.0; //values saved for the current time step
position+=vel*substep_dt;//weight;
}
else
{
keep_integrating=false;
break;
}
}
else
break;
}
}
if (keep_integrating == false) (pParticle.GetEraseFlag()=true);
else is_found = FindNodeOnMesh(position, N ,pElement,ResultBegin,MaxNumberOfResults); //we must save the pointer of the last element that we're in (inside the pointervector pElement)
if (is_found == false) ( pParticle.GetEraseFlag()=true);
pParticle.Coordinates() = position;
}
/// This function updates a particle
/** This function updates a particle variables using the "delta
* variables" from the nodal database.
*
* @param pParticle
* @param pElement
* @param rGeom
*
* @see CorrectParticlesWithoutMovingUsingDeltaVariables
*/
void CorrectParticleUsingDeltaVariables(ShallowParticle & pParticle,
Element::Pointer & pElement,
Geometry< Node<3> >& rGeom)
{
array_1d<double,TDim+1> N;
//we start with the first position, then it will enter the loop.
array_1d<double,3> coords = pParticle.Coordinates();
float & particle_scalar1 = pParticle.GetScalar1();
array_1d<float,3> & particle_vector1 = pParticle.GetVector1();
//double distance=0.0;
double delta_scalar1 = 0.0;
array_1d<double,3> delta_vector1 = ZeroVector(3);
bool is_found = CalculatePosition(rGeom,coords[0],coords[1],coords[2],N);
if(is_found == false)
{
KRATOS_INFO("MoveShallowWaterParticleUtility") << N << std::endl;
for (int j=0 ; j!=(TDim+1); j++)
if (N[j]<0.0 )
N[j]=1e-10;
}
for(unsigned int j=0; j<(TDim+1); j++)
{
delta_scalar1 += rGeom[j].FastGetSolutionStepValue(DELTA_SCALAR1)*N[j];
noalias(delta_vector1) += rGeom[j].FastGetSolutionStepValue(DELTA_VECTOR1)*N[j];
}
particle_scalar1 = particle_scalar1 + delta_scalar1;
particle_vector1 = particle_vector1 + delta_vector1;
}
/// Move a particle in the inverse way
/** this function moves a particle according to the -velocity given
* by VELOCITY variable. The movement is performed by a backward
* integration in nsubsteps, during a total time of DELTA_TIME
* Before the particle goes out of the element, gets the value
* of the eulerian mesh and stores it
*
* @param pParticle
* @param pElement
* @param ResultBegin
* @param MaxNumberOfResults
*
* @see PreReseed
*/
void MoveParticleInverseWay(ShallowParticle & pParticle,
Element::Pointer & pElement, //NOT A REFERENCE!! WE SHALL NOT OVERWRITE THE ELEMENT IT BELONGS TO!
ResultIteratorType ResultBegin,
const unsigned int MaxNumberOfResults)
{
ProcessInfo& CurrentProcessInfo = mrModelPart.GetProcessInfo();
double delta_t = CurrentProcessInfo[DELTA_TIME];
unsigned int nsubsteps;
double substep_dt;
bool keep_integrating = false;
bool is_found;
double scalar1 = 0.0;
array_1d<double,3> vector1;
array_1d<double,3> vel;
array_1d<double,3> position;
array_1d<double,3> mid_position;
array_1d<double,TDim+1> N;
//we start with the first position, then it will enter the loop.
position = pParticle.Coordinates(); // + (pParticle)->FastGetSolutionStepValue(DISPLACEMENT); //initial coordinates
double only_integral = 0.0 ;
is_found = FindNodeOnMesh(position, N, pElement, ResultBegin, MaxNumberOfResults); //good, now we know where this point is:
if(is_found == true)
{
keep_integrating = true;
Geometry< Node<3> >& geom = pElement->GetGeometry(); //the element we're in
scalar1 = 0.0;
vector1 = ZeroVector(3);
vel = ZeroVector(3);
for(unsigned int j=0; j<(TDim+1); j++)
{
scalar1 += geom[j].FastGetSolutionStepValue(mScalarVar1)*N[j];
noalias(vector1) += geom[j].FastGetSolutionStepValue(mVectorVar1)*N[j];
noalias(vel) += geom[j].FastGetSolutionStepValue(VELOCITY)*N[j];
}
//calculating substep to get +- courant(substep) = 1/4
nsubsteps = 10.0 * (delta_t * pElement->GetValue(MEAN_VEL_OVER_ELEM_SIZE));
if (nsubsteps<1)
nsubsteps=1;
substep_dt = delta_t / double(nsubsteps);
only_integral = 1.0; // weight;//*double(nsubsteps);
position -= vel*substep_dt; //weight;
for(unsigned int i=0; i<(nsubsteps-1); i++) // this is for the substeps n+1. in the first one we already knew the position of the particle.
{
if (keep_integrating == true)
{
is_found = FindNodeOnMesh(position, N, pElement, ResultBegin, MaxNumberOfResults); //good, now we know where this point is:
if (is_found == true)
{
Geometry< Node<3> >& geom = pElement->GetGeometry();//the element we're in
scalar1 = 0.0;
vector1 = ZeroVector(3);
vel = ZeroVector(3);
for(unsigned int j=0; j<(TDim+1); j++)
{
scalar1 += geom[j].FastGetSolutionStepValue(mScalarVar1)*N(j);
noalias(vector1) += geom[j].FastGetSolutionStepValue(mVectorVar1)*N[j];
noalias(vel) += geom[j].FastGetSolutionStepValue(VELOCITY)*N[j];
}
only_integral += 1.0; //weight ; //values saved for the current time step
position -= vel*substep_dt; //weight;
}
else keep_integrating = false;
}
}
pParticle.GetScalar1() = scalar1;
pParticle.GetVector1() = vector1;
}
}
/// Find the element into which a given node is located
/** This function should find the element into which a given node
* is located and return a pointer to the element and the vector
* containing the shape functions that define the positions within
* the element.
* If false is returned the element is not found
*
* @param position of the node
* @param N: return shape functions that define the positions within the elem
* @param pElement: return a pointer to the element
* @param ResultBegin
* @param MaxNumberOfResults
* @return FindNodeOnMesh if the element is found of not
*
* @see CalculatePosition
*/
bool FindNodeOnMesh( const array_1d<double,3>& rPosition,
array_1d<double,TDim+1>& N,
Element::Pointer & pElement,
ResultIteratorType ResultBegin,
const unsigned int MaxNumberOfResults)
{
typedef std::size_t SizeType;
array_1d<double,TDim+1> aux_N;
//before using the bin to search for possible elements we check first the last element in which the particle was.
Geometry<Node<3> >& geom_default = pElement->GetGeometry(); //(*(i))->GetGeometry();
bool is_found_1 = CalculatePosition(geom_default,rPosition[0],rPosition[1],rPosition[2],N);
if (is_found_1) //that was easy!
{
return true;
}
// To begin with we check the neighbour elements; it is a bit more expensive
WeakPointerVector< Element >& neighb_elems = pElement->GetValue(NEIGHBOUR_ELEMENTS);
for (unsigned int i=0;i!=(neighb_elems.size());i++)
{
Geometry<Node<3> >& geom = neighb_elems[i].GetGeometry();
bool is_found_2 = CalculatePosition(geom,rPosition[0],rPosition[1],rPosition[2],N);
if (is_found_2)
{
pElement=Element::Pointer(((neighb_elems(i))));
return true;
}
}
// If checking all the neighbour elements did not work, we have to use the bins
// ask to the container for the list of candidate elements
SizeType results_found = mpBinsObjectDynamic->SearchObjectsInCell(Point{rPosition}, ResultBegin, MaxNumberOfResults );
if (results_found>0)
{
//loop over the candidate elements and check if the particle falls within
for(SizeType i = 0; i< results_found; i++)
{
Geometry<Node<3> >& geom = (*(ResultBegin+i))->GetGeometry();
//find local position
bool is_found_3 = CalculatePosition(geom,rPosition[0],rPosition[1],rPosition[2],N);
if (is_found_3)
{
pElement=Element::Pointer((*(ResultBegin+i)));
return true;
}
}
}
//if nothing worked, then:
//not found case
return false;
}
/// Find the element into which a given node is located
/** This function should find the element into which a given node
* is located and return a pointer to the element and the vector
* containing the shape functions that define the positions within
* the element.
* If false is returned the element is not found
* This version includes predefined elements following a trajectory
*
* @param rPosition of the node
* @param N Output shape functions that define the positions within the elem
* @param pElement Output a pointer to the element
* @param rElementsInTrajectory
* @param rNumberOfElementsInTrajectory Output
* @param CheckFromElementNumber
* @param ResultBegin
* @param MaxNumberOfResults
* @return FindNodeOnMesh if the element is found of not
*
* @see CalculatePosition
*/
bool FindNodeOnMesh( const array_1d<double,3>& rPosition,
array_1d<double,TDim+1>& N,
Element::Pointer & pElement,
WeakPointerVector< Element >& rElementsInTrajectory,
unsigned int & rNumberOfElementsInTrajectory,
unsigned int & rCheckFromElementNumber,
ResultIteratorType ResultBegin,
const unsigned int MaxNumberOfResults)
{
typedef std::size_t SizeType;
//~ const array_1d<double,3>& coords = rPosition;
array_1d<double,TDim+1> aux_N;
//before using the bin to search for possible elements we check first the last element in which the particle was.
Geometry<Node<3> >& geom_default = pElement->GetGeometry(); //(*(i))->GetGeometry();
bool is_found_1 = CalculatePosition(geom_default,rPosition[0],rPosition[1],rPosition[2],N);
if(is_found_1 == true)
{
return true; //that was easy!
}
// If it was not found in the first element, we can proceed to check in the following elements (in the trajectory defined by previous particles that started from the same element.
for (unsigned int i=(rCheckFromElementNumber);i!=rNumberOfElementsInTrajectory;i++)
{
Geometry<Node<3> >& geom = rElementsInTrajectory[i].GetGeometry();
bool is_found_2 = CalculatePosition(geom,rPosition[0],rPosition[1],rPosition[2],aux_N);
if (is_found_2)
{
pElement = Element::Pointer(((rElementsInTrajectory(i))));
N = aux_N;
rCheckFromElementNumber = i+1 ; //now i element matches pElement, so to avoid cheching twice the same element we send the counter to the following element.
return true;
}
}
// Now we check the neighbour elements:
WeakPointerVector< Element >& neighb_elems = pElement->GetValue(NEIGHBOUR_ELEMENTS);
for (unsigned int i=0;i!=(neighb_elems.size());i++)
{
Geometry<Node<3> >& geom = neighb_elems[i].GetGeometry();
bool is_found_2 = CalculatePosition(geom,rPosition[0],rPosition[1],rPosition[2],N);
if (is_found_2)
{
pElement=Element::Pointer(((neighb_elems(i))));
if (rNumberOfElementsInTrajectory<20)
{
rElementsInTrajectory(rNumberOfElementsInTrajectory) = pElement;
rNumberOfElementsInTrajectory++;
rCheckFromElementNumber = rNumberOfElementsInTrajectory; //we do it after doing the ++ to the counter, so we woudlnt enter the loop that searches in the rElementsInTrajectory list. we are the particle that is adding elements to the list
}
return true;
}
}
// If checking all the neighbour elements did not work, we have to use the bins
// ask to the container for the list of candidate elements
SizeType results_found = mpBinsObjectDynamic->SearchObjectsInCell(Point{rPosition}, ResultBegin, MaxNumberOfResults );
if(results_found>0)
{
//loop over the candidate elements and check if the particle falls within
for(SizeType i = 0; i< results_found; i++)
{
Geometry<Node<3> >& geom = (*(ResultBegin+i))->GetGeometry();
//find local position
bool is_found = CalculatePosition(geom,rPosition[0],rPosition[1],rPosition[2],N);
if (is_found)
{
pElement=Element::Pointer((*(ResultBegin+i)));
if (rNumberOfElementsInTrajectory<20)
{
rElementsInTrajectory(rNumberOfElementsInTrajectory) = pElement;
rNumberOfElementsInTrajectory++;
rCheckFromElementNumber = rNumberOfElementsInTrajectory; //we do it after doing the ++ to the counter, so we woudlnt enter the loop that searches in the rElementsInTrajectory list. we are the particle that is adding elements to the list
}
return true;
}
}
}
//not found case
return false;
}
/// Calculate the position of a given particle inside an element
/** This function calculates the position of a given particle inside
* an element and returns the shape functions that define it position
* within the element and returns false if the particle is otuside
* the element
*
* @param rGeom: the element (a triangle)
* @param xc: the postition of the particle
* @param yc: the postition of the particle
* @param zc: the postition of the particle
* @param N: the shape functions to define the particle position
*
* @return CalculatePosition
*/
inline bool CalculatePosition( const Geometry<Node < 3 > >&rGeom,
const double xc,
const double yc,
const double zc,
array_1d<double,3> & N )
{
double x0 = rGeom[0].X();
double y0 = rGeom[0].Y();
double x1 = rGeom[1].X();
double y1 = rGeom[1].Y();
double x2 = rGeom[2].X();
double y2 = rGeom[2].Y();
double area = CalculateVol(x0, y0, x1, y1, x2, y2);
KRATOS_ERROR_IF( area == 0.0 ) << "In move shallow water particle utility: element with zero area found" << std::endl;
double inv_area = 1.0 / area;
N[0] = CalculateVol(x1, y1, x2, y2, xc, yc) * inv_area;
N[1] = CalculateVol(x2, y2, x0, y0, xc, yc) * inv_area;
N[2] = CalculateVol(x0, y0, x1, y1, xc, yc) * inv_area;
if (N[0] >= 0.0 && N[1] >= 0.0 && N[2] >= 0.0 && N[0] <= 1.0 && N[1] <= 1.0 && N[2] <= 1.0)
//if the xc yc is inside the triangle return true
return true;
return false;
}
/// Calculate the position of a given particle inside an element
/** This function calculates the position of a given particle inside
* an element and returns the shape functions that define it position
* within the element and returns false if the particle is otuside
* the element
*
* @param rNodesPositions of the element (a triangle)
* @param xc: the postition of the particle
* @param yc: the postition of the particle
* @param zc: the postition of the particle
* @param N: the shape functions to define the particle position
*
* @return CalculatePosition
*/
inline bool CalculatePosition( const array_1d<double,3*(TDim+1)>& rNodesPositions,
const double xc,
const double yc,
const double zc,
array_1d<double,3> & N )
{
const double& x0 = rNodesPositions[0];
const double& y0 = rNodesPositions[1];
const double& x1 = rNodesPositions[3];
const double& y1 = rNodesPositions[4];
const double& x2 = rNodesPositions[6];
const double& y2 = rNodesPositions[7];
double area = CalculateVol(x0, y0, x1, y1, x2, y2);
KRATOS_ERROR_IF( area == 0.0 ) << "In move shallow water particle utility: element with zero area found" << std::endl;
double inv_area = 1.0 / area;
N[0] = CalculateVol(x1, y1, x2, y2, xc, yc) * inv_area;
N[1] = CalculateVol(x2, y2, x0, y0, xc, yc) * inv_area;
N[2] = CalculateVol(x0, y0, x1, y1, xc, yc) * inv_area;
if (N[0] >= 0.0 && N[1] >= 0.0 && N[2] >= 0.0 && N[0] <= 1.0 && N[1] <= 1.0 && N[2] <= 1.0)
//if the xc yc is inside the triangle return true
return true;
return false;
}
/// Calculate the position of a given particle inside an element
/** This function calculates the position of a given particle inside
* an element and returns the shape functions that define it position
* within the element and returns false if the particle is otuside
* the element
*
* @param rGeom: the element (a tetrahedron)
* @param xc: the postition of the particle
* @param yc: the postition of the particle
* @param zc: the postition of the particle
* @param N: the shape functions to define the particle position
*
* @return CalculatePosition
*/
inline bool CalculatePosition( const Geometry<Node < 3 > >&rGeom,
const double xc,
const double yc,
const double zc,
array_1d<double, 4 > & N )
{
double x0 = rGeom[0].X();
double y0 = rGeom[0].Y();
double z0 = rGeom[0].Z();
double x1 = rGeom[1].X();
double y1 = rGeom[1].Y();
double z1 = rGeom[1].Z();
double x2 = rGeom[2].X();
double y2 = rGeom[2].Y();
double z2 = rGeom[2].Z();
double x3 = rGeom[3].X();
double y3 = rGeom[3].Y();
double z3 = rGeom[3].Z();
double vol = CalculateVol(x0, y0, z0, x1, y1, z1, x2, y2, z2, x3, y3, z3);
KRATOS_ERROR_IF( vol == 0.0 ) << "In move shallow water particle utility: element with zero vol found" << std::endl;
double inv_vol = 1.0 / vol;
N[0] = CalculateVol(x1, y1, z1, x3, y3, z3, x2, y2, z2, xc, yc, zc) * inv_vol;
N[1] = CalculateVol(x0, y0, z0, x1, y1, z1, x2, y2, z2, xc, yc, zc) * inv_vol;
N[2] = CalculateVol(x3, y3, z3, x1, y1, z1, x0, y0, z0, xc, yc, zc) * inv_vol;
N[3] = CalculateVol(x3, y3, z3, x0, y0, z0, x2, y2, z2, xc, yc, zc) * inv_vol;
if (N[0] >= 0.0 && N[1] >= 0.0 && N[2] >= 0.0 && N[3] >= 0.0 &&
N[0] <= 1.0 && N[1] <= 1.0 && N[2] <= 1.0 && N[3] <= 1.0)
//if the xc yc zc is inside the tetrahedron return true
return true;
return false;
}
/// Calculate the position of a given particle inside an element
/** This function calculates the position of a given particle inside
* an element and returns the shape functions that define it position
* within the element and returns false if the particle is otuside
* the element
*
* @param rNodesPositions of the element (a tetrahedron)
* @param xc: the postition of the particle
* @param yc: the postition of the particle
* @param zc: the postition of the particle
* @param N: the shape functions to define the particle position
*
* @return CalculatePosition
*/
inline bool CalculatePosition( const array_1d<double,3*(TDim+1)>& rNodesPositions,
const double xc,
const double yc,
const double zc,
array_1d<double, 4 > & N )
{
const double& x0 = rNodesPositions[0];
const double& y0 = rNodesPositions[1];
const double& z0 = rNodesPositions[2];
const double& x1 = rNodesPositions[3];
const double& y1 = rNodesPositions[4];
const double& z1 = rNodesPositions[5];
const double& x2 = rNodesPositions[6];
const double& y2 = rNodesPositions[7];
const double& z2 = rNodesPositions[8];
const double& x3 = rNodesPositions[9];
const double& y3 = rNodesPositions[10];
const double& z3 = rNodesPositions[11];
double vol = CalculateVol(x0, y0, z0, x1, y1, z1, x2, y2, z2, x3, y3, z3);
KRATOS_ERROR_IF( vol == 0.0 ) << "In move shallow water particle utility: element with zero vol found" << std::endl;
double inv_vol = 1.0 / vol;
N[0] = CalculateVol(x1, y1, z1, x3, y3, z3, x2, y2, z2, xc, yc, zc) * inv_vol;
N[1] = CalculateVol(x0, y0, z0, x1, y1, z1, x2, y2, z2, xc, yc, zc) * inv_vol;
N[2] = CalculateVol(x3, y3, z3, x1, y1, z1, x0, y0, z0, xc, yc, zc) * inv_vol;
N[3] = CalculateVol(x3, y3, z3, x0, y0, z0, x2, y2, z2, xc, yc, zc) * inv_vol;
if (N[0] >= 0.0 && N[1] >= 0.0 && N[2] >= 0.0 && N[3] >= 0.0 &&
N[0] <= 1.0 && N[1] <= 1.0 && N[2] <= 1.0 && N[3] <= 1.0)
//if the xc yc zc is inside the tetrahedron return true
return true;
return false;
}
/// Calculate the volume
/** This function computes the area of a triangle
*/
inline double CalculateVol( const double x0, const double y0,
const double x1, const double y1,
const double x2, const double y2 )
{
return 0.5 * ((x1 - x0)*(y2 - y0)- (y1 - y0)*(x2 - x0));
}
/// Calculate the volume
/** This function computes the volume of a tetrahedron
*/
inline double CalculateVol( const double x0, const double y0, const double z0,
const double x1, const double y1, const double z1,
const double x2, const double y2, const double z2,
const double x3, const double y3, const double z3 )
{
double x10 = x1 - x0;
double y10 = y1 - y0;
double z10 = z1 - z0;
double x20 = x2 - x0;
double y20 = y2 - y0;
double z20 = z2 - z0;
double x30 = x3 - x0;
double y30 = y3 - y0;
double z30 = z3 - z0;
double detJ = x10 * y20 * z30 - x10 * y30 * z20 + y10 * z20 * x30 - y10 * x20 * z30 + z10 * x20 * y30 - z10 * y20 * x30;
return detJ * 0.1666666666666666666667;
}
/// Compute the Gauss points
/**
*/
void ComputeGaussPointPositions_4( Geometry< Node < 3 > >& geom,
BoundedMatrix<double, 7, 3 > & pos,
BoundedMatrix<double, 7, 3 > & N )
{
double one_third = 1.0 / 3.0;
double one_sixt = 0.15; //1.0 / 6.0;
double two_third = 0.7; //2.0 * one_third;
N(0, 0) = one_sixt;
N(0, 1) = one_sixt;
N(0, 2) = two_third;
N(1, 0) = two_third;
N(1, 1) = one_sixt;
N(1, 2) = one_sixt;
N(2, 0) = one_sixt;
N(2, 1) = two_third;
N(2, 2) = one_sixt;
N(3, 0) = one_third;
N(3, 1) = one_third;
N(3, 2) = one_third;
//first
pos(0, 0) = one_sixt * geom[0].X() + one_sixt * geom[1].X() + two_third * geom[2].X();
pos(0, 1) = one_sixt * geom[0].Y() + one_sixt * geom[1].Y() + two_third * geom[2].Y();
pos(0, 2) = one_sixt * geom[0].Z() + one_sixt * geom[1].Z() + two_third * geom[2].Z();
//second
pos(1, 0) = two_third * geom[0].X() + one_sixt * geom[1].X() + one_sixt * geom[2].X();
pos(1, 1) = two_third * geom[0].Y() + one_sixt * geom[1].Y() + one_sixt * geom[2].Y();
pos(1, 2) = two_third * geom[0].Z() + one_sixt * geom[1].Z() + one_sixt * geom[2].Z();
//third
pos(2, 0) = one_sixt * geom[0].X() + two_third * geom[1].X() + one_sixt * geom[2].X();
pos(2, 1) = one_sixt * geom[0].Y() + two_third * geom[1].Y() + one_sixt * geom[2].Y();
pos(2, 2) = one_sixt * geom[0].Z() + two_third * geom[1].Z() + one_sixt * geom[2].Z();
//fourth
pos(3, 0) = one_third * geom[0].X() + one_third * geom[1].X() + one_third * geom[2].X();
pos(3, 1) = one_third * geom[0].Y() + one_third * geom[1].Y() + one_third * geom[2].Y();
pos(3, 2) = one_third * geom[0].Z() + one_third * geom[1].Z() + one_third * geom[2].Z();
}
/// Compute the Gauss points
/** For a triangle
*
* @see PostReseed
*/
void ComputeGaussPointPositionsForPostReseed( Geometry< Node < 3 > >& geom,
BoundedMatrix<double, 7, 3 > & pos,
BoundedMatrix<double, 7, 3 > & N ) //2d
{
double one_third = 1.0 / 3.0;
double one_eight = 0.12; //1.0 / 6.0;
double three_quarters = 0.76; //2.0 * one_third;
N(0, 0) = one_eight;
N(0, 1) = one_eight;
N(0, 2) = three_quarters;
N(1, 0) = three_quarters;
N(1, 1) = one_eight;
N(1, 2) = one_eight;
N(2, 0) = one_eight;
N(2, 1) = three_quarters;
N(2, 2) = one_eight;
N(3, 0) = one_third;
N(3, 1) = one_third;
N(3, 2) = one_third;
N(4, 0) = one_eight;
N(4, 1) = 0.44;
N(4, 2) = 0.44;
N(5, 0) = 0.44;
N(5, 1) = one_eight;
N(5, 2) = 0.44;
N(6, 0) = 0.44;
N(6, 1) = 0.44;
N(6, 2) = one_eight;
//first
pos(0, 0) = one_eight * geom[0].X() + one_eight * geom[1].X() + three_quarters * geom[2].X();
pos(0, 1) = one_eight * geom[0].Y() + one_eight * geom[1].Y() + three_quarters * geom[2].Y();
pos(0, 2) = one_eight * geom[0].Z() + one_eight * geom[1].Z() + three_quarters * geom[2].Z();
//second
pos(1, 0) = three_quarters * geom[0].X() + one_eight * geom[1].X() + one_eight * geom[2].X();
pos(1, 1) = three_quarters * geom[0].Y() + one_eight * geom[1].Y() + one_eight * geom[2].Y();
pos(1, 2) = three_quarters * geom[0].Z() + one_eight * geom[1].Z() + one_eight * geom[2].Z();
//third
pos(2, 0) = one_eight * geom[0].X() + three_quarters * geom[1].X() + one_eight * geom[2].X();
pos(2, 1) = one_eight * geom[0].Y() + three_quarters * geom[1].Y() + one_eight * geom[2].Y();
pos(2, 2) = one_eight * geom[0].Z() + three_quarters * geom[1].Z() + one_eight * geom[2].Z();
//fourth
pos(3, 0) = one_third * geom[0].X() + one_third * geom[1].X() + one_third * geom[2].X();
pos(3, 1) = one_third * geom[0].Y() + one_third * geom[1].Y() + one_third * geom[2].Y();
pos(3, 2) = one_third * geom[0].Z() + one_third * geom[1].Z() + one_third * geom[2].Z();
//fifth
pos(4, 0) = one_eight * geom[0].X() + 0.44 * geom[1].X() + 0.44 * geom[2].X();
pos(4, 1) = one_eight * geom[0].Y() + 0.44 * geom[1].Y() + 0.44 * geom[2].Y();
pos(4, 2) = one_eight * geom[0].Z() + 0.44 * geom[1].Z() + 0.44 * geom[2].Z();
//sixth
pos(5, 0) = 0.44 * geom[0].X() + one_eight * geom[1].X() + 0.44 * geom[2].X();
pos(5, 1) = 0.44 * geom[0].Y() + one_eight * geom[1].Y() + 0.44 * geom[2].Y();
pos(5, 2) = 0.44 * geom[0].Z() + one_eight * geom[1].Z() + 0.44 * geom[2].Z();
//seventh
pos(6, 0) = 0.44 * geom[0].X() + 0.44 * geom[1].X() + one_eight * geom[2].X();
pos(6, 1) = 0.44 * geom[0].Y() + 0.44 * geom[1].Y() + one_eight * geom[2].Y();
pos(6, 2) = 0.44 * geom[0].Z() + 0.44 * geom[1].Z() + one_eight * geom[2].Z();
}
/// Compute the Gauss points
/** For a tetrahedron
*
* @see PostReseed
*/
void ComputeGaussPointPositionsForPostReseed( Geometry< Node < 3 > >& geom,
BoundedMatrix<double, 9, 3 > & pos,
BoundedMatrix<double, 9, 4 > & N ) //3D
{
double one_quarter = 0.25;
double small_fraction = 0.1; //1.0 / 6.0;
double big_fraction = 0.7; //2.0 * one_third;
double mid_fraction = 0.3; //2.0 * one_third;
N(0, 0) = big_fraction;
N(0, 1) = small_fraction;
N(0, 2) = small_fraction;
N(0, 3) = small_fraction;
N(1, 0) = small_fraction;
N(1, 1) = big_fraction;
N(1, 2) = small_fraction;
N(1, 3) = small_fraction;
N(2, 0) = small_fraction;
N(2, 1) = small_fraction;
N(2, 2) = big_fraction;
N(2, 3) = small_fraction;
N(3, 0) = small_fraction;
N(3, 1) = small_fraction;
N(3, 2) = small_fraction;
N(3, 3) = big_fraction;
N(4, 0) = one_quarter;
N(4, 1) = one_quarter;
N(4, 2) = one_quarter;
N(4, 3) = one_quarter;
N(5, 0) = small_fraction;
N(5, 1) = mid_fraction;
N(5, 2) = mid_fraction;
N(5, 3) = mid_fraction;
N(6, 0) = mid_fraction;
N(6, 1) = small_fraction;
N(6, 2) = mid_fraction;
N(6, 3) = mid_fraction;
N(7, 0) = mid_fraction;
N(7, 1) = mid_fraction;
N(7, 2) = small_fraction;
N(7, 3) = mid_fraction;
N(8, 0) = mid_fraction;
N(8, 1) = mid_fraction;
N(8, 2) = mid_fraction;
N(8, 3) = small_fraction;
pos=ZeroMatrix(9,3);
for (unsigned int i=0; i!=4; i++) //going through the 4 nodes
{
array_1d<double, 3 > & coordinates = geom[i].Coordinates();
for (unsigned int j=0; j!=9; j++) //going through the 9 particles
{
for (unsigned int k=0; k!=3; k++) //x,y,z
pos(j,k) += N(j,i) * coordinates[k];
}
}
}
/// Compute the Gauss points
/** For a triangle
*
* @see PreReseed
*/
void ComputeGaussPointPositionsForPreReseed( Geometry< Node < 3 > >& geom,
BoundedMatrix<double, 3, 3 > & pos,
BoundedMatrix<double, 3, 3 > & N ) //2D
{
N(0, 0) = 0.5;
N(0, 1) = 0.25;
N(0, 2) = 0.25;
N(1, 0) = 0.25;
N(1, 1) = 0.5;
N(1, 2) = 0.25;
N(2, 0) = 0.25;
N(2, 1) = 0.25;
N(2, 2) = 0.5;
//first
pos(0, 0) = 0.5 * geom[0].X() + 0.25 * geom[1].X() + 0.25 * geom[2].X();
pos(0, 1) = 0.5 * geom[0].Y() + 0.25 * geom[1].Y() + 0.25 * geom[2].Y();
pos(0, 2) = 0.5 * geom[0].Z() + 0.25 * geom[1].Z() + 0.25 * geom[2].Z();
//second
pos(1, 0) = 0.25 * geom[0].X() + 0.5 * geom[1].X() + 0.25 * geom[2].X();
pos(1, 1) = 0.25 * geom[0].Y() + 0.5 * geom[1].Y() + 0.25 * geom[2].Y();
pos(1, 2) = 0.25 * geom[0].Z() + 0.5 * geom[1].Z() + 0.25 * geom[2].Z();
//third
pos(2, 0) = 0.25 * geom[0].X() + 0.25 * geom[1].X() + 0.5 * geom[2].X();
pos(2, 1) = 0.25 * geom[0].Y() + 0.25 * geom[1].Y() + 0.5 * geom[2].Y();
pos(2, 2) = 0.25 * geom[0].Z() + 0.25 * geom[1].Z() + 0.5 * geom[2].Z();
}
/// Compute the Gauss points
/** For a tetrahedron
*
* @see PreReseed
*/
void ComputeGaussPointPositionsForPreReseed( Geometry< Node < 3 > >& geom,
BoundedMatrix<double, 4, 3 > & pos,
BoundedMatrix<double, 4, 4 > & N ) //3D
{
//creating 4 particles, each will be closer to a node and equidistant to the other nodes
N(0, 0) = 0.4;
N(0, 1) = 0.2;
N(0, 2) = 0.2;
N(0, 3) = 0.2;
N(1, 0) = 0.2;
N(1, 1) = 0.4;
N(1, 2) = 0.2;
N(1, 3) = 0.2;
N(2, 0) = 0.2;
N(2, 1) = 0.2;
N(2, 2) = 0.4;
N(2, 3) = 0.2;
N(3, 0) = 0.2;
N(3, 1) = 0.2;
N(3, 2) = 0.2;
N(3, 3) = 0.4;
pos=ZeroMatrix(4,3);
for (unsigned int i=0; i!=4; i++) //going through the 4 nodes
{
array_1d<double, 3 > & coordinates = geom[i].Coordinates();
for (unsigned int j=0; j!=4; j++) //going through the 4 particles
{
for (unsigned int k=0; k!=3; k++) //x,y,z
pos(j,k) += N(j,i) * coordinates[k];
}
}
}
/// Compute the Gauss points
/**
*/
void ComputeGaussPointPositions_45( Geometry< Node < 3 > >& geom,
BoundedMatrix<double, 45, 3 > & pos,
BoundedMatrix<double, 45, 3 > & N )
{
unsigned int counter=0;
for (unsigned int i=0; i!=9;i++)
{
for (unsigned int j=0; j!=(9-i);j++)
{
N(counter,0)=0.05+double(i)*0.1;
N(counter,1)=0.05+double(j)*0.1;
N(counter,2)=1.0 - ( N(counter,1)+ N(counter,0) ) ;
pos(counter, 0) = N(counter,0) * geom[0].X() + N(counter,1) * geom[1].X() + N(counter,2) * geom[2].X();
pos(counter, 1) = N(counter,0) * geom[0].Y() + N(counter,1) * geom[1].Y() + N(counter,2) * geom[2].Y();
pos(counter, 2) = N(counter,0) * geom[0].Z() + N(counter,1) * geom[1].Z() + N(counter,2) * geom[2].Z();
counter++;
}
}
}
/// Compute the Gauss points
/**
*/
void ComputeGaussPointPositions_initial( Geometry< Node < 3 > >& geom,
BoundedMatrix<double, 15, 3 > & pos,
BoundedMatrix<double, 15, 3 > & N ) //2D
{
unsigned int counter=0;
for (unsigned int i=0; i!=5;i++)
{
for (unsigned int j=0; j!=(5-i);j++)
{
N(counter,0)=0.05+double(i)*0.2;
N(counter,1)=0.05+double(j)*0.2;
N(counter,2)=1.0 - ( N(counter,1)+ N(counter,0) ) ;
pos(counter, 0) = N(counter,0) * geom[0].X() + N(counter,1) * geom[1].X() + N(counter,2) * geom[2].X();
pos(counter, 1) = N(counter,0) * geom[0].Y() + N(counter,1) * geom[1].Y() + N(counter,2) * geom[2].Y();
pos(counter, 2) = N(counter,0) * geom[0].Z() + N(counter,1) * geom[1].Z() + N(counter,2) * geom[2].Z();
counter++;
}
}
}
/// Compute the Gauss points
/**
*/
void ComputeGaussPointPositions_initial( Geometry< Node < 3 > >& geom,
BoundedMatrix<double, 20, 3 > & pos,
BoundedMatrix<double, 20, 4 > & N ) //3D
{
double fraction_increment;
unsigned int counter=0;
for (unsigned int i=0; i!=4;i++) //going to build a particle "pyramid"(tetrahedra) by layers. the first layer will be made by a triangle of 4 base X 4 height. since it is a triangle, it means it will have 10 particles
{
for (unsigned int j=0; j!=(4-i);j++)
{
for (unsigned int k=0; k!=(4-i-j);k++)
{
N(counter,0)= 0.27 * ( 0.175 + double(i) ) ; //this is our "surface" in which we will build each layer, so we must construct a triangle using what's left of the shape functions total (a total of 1)
//total = 1.0 - N(counter,0);
fraction_increment = 0.27; //
N(counter,1)=fraction_increment * (0.175 + double(j));
N(counter,2)=fraction_increment * (0.175 + double(k));
N(counter,3)=1.0 - ( N(counter,0)+ N(counter,1) + N(counter,2) ) ;
pos(counter, 0) = N(counter,0) * geom[0].X() + N(counter,1) * geom[1].X() + N(counter,2) * geom[2].X() + N(counter,3) * geom[3].X();
pos(counter, 1) = N(counter,0) * geom[0].Y() + N(counter,1) * geom[1].Y() + N(counter,2) * geom[2].Y() + N(counter,3) * geom[3].Y();
pos(counter, 2) = N(counter,0) * geom[0].Z() + N(counter,1) * geom[1].Z() + N(counter,2) * geom[2].Z() + N(counter,3) * geom[3].Z();
counter++;
}
}
}
}
/// check function
virtual int Check()
{
KRATOS_TRY
Node<3>& rnode = *mrModelPart.NodesBegin();
KRATOS_CHECK_VARIABLE_IN_NODAL_DATA(mVectorVar1, rnode)
KRATOS_CHECK_VARIABLE_IN_NODAL_DATA(mScalarVar1, rnode)
KRATOS_CHECK_VARIABLE_IN_NODAL_DATA(VELOCITY, rnode)
KRATOS_CHECK_VARIABLE_IN_NODAL_DATA(DELTA_VECTOR1, rnode)
KRATOS_CHECK_VARIABLE_IN_NODAL_DATA(DELTA_SCALAR1, rnode)
KRATOS_CHECK_VARIABLE_IN_NODAL_DATA(PROJECTED_VECTOR1, rnode)
KRATOS_CHECK_VARIABLE_IN_NODAL_DATA(PROJECTED_SCALAR1, rnode)
KRATOS_CHECK_VARIABLE_IN_NODAL_DATA(MEAN_SIZE, rnode)
KRATOS_CHECK_VARIABLE_IN_NODAL_DATA(YP, rnode)
return 0;
KRATOS_CATCH("")
}
/// Member variables
ModelPart& mrModelPart;
int mNParticles;
int mNElems;
int mOffset;
int mMaxSubSteps;
double mMaxSubStepDt;
int mMaxNumberOfParticles;
std::vector< ShallowParticle > mParticlesVector;
int mLastElemId;
bool mOddTimeStep;
bool mParticlePrintingToolInitialized;
unsigned int mLastNodeId;
DenseVector<int> mNumOfParticlesInElems;
DenseVector<int> mNumOfParticlesInElemsAux;
DenseVector<ParticlePointerVector> mVectorOfParticlePointersVectors;
typename BinsObjectDynamic<Configure>::Pointer mpBinsObjectDynamic;
Variable<double> mScalarVar1;
Variable<array_1d<double,3>> mVectorVar1;
std::string m_scalar_var1_name;
std::string m_vector_var1_name;
}; // class MoveShallowWaterParticleUtility
} // namespace Kratos.
#endif // KRATOS_MOVE_SHALLOW_WATER_PARTICLE_UTILITY_H_INCLUDED defined
|
6964.c | /* POLYBENCH/GPU-OPENMP
*
* This file is a part of the Polybench/GPU-OpenMP suite
*
* Contact:
* William Killian <killian@udel.edu>
*
* Copyright 2013, The University of Delaware
*/
#include <stdio.h>
#include <unistd.h>
#include <string.h>
#include <math.h>
/* Include polybench common header. */
#include <polybench.h>
/* Include benchmark-specific header. */
/* Default data type is double, default size is 4000. */
#include "3mm.h"
/* Array initialization. */
static
void init_array(int ni, int nj, int nk, int nl, int nm,
DATA_TYPE POLYBENCH_2D(A,NI,NK,ni,nk),
DATA_TYPE POLYBENCH_2D(B,NK,NJ,nk,nj),
DATA_TYPE POLYBENCH_2D(C,NJ,NM,nj,nm),
DATA_TYPE POLYBENCH_2D(D,NM,NL,nm,nl))
{
int i, j;
for (i = 0; i < ni; i++)
for (j = 0; j < nk; j++)
A[i][j] = ((DATA_TYPE) i*j) / ni;
for (i = 0; i < nk; i++)
for (j = 0; j < nj; j++)
B[i][j] = ((DATA_TYPE) i*(j+1)) / nj;
for (i = 0; i < nj; i++)
for (j = 0; j < nm; j++)
C[i][j] = ((DATA_TYPE) i*(j+3)) / nl;
for (i = 0; i < nm; i++)
for (j = 0; j < nl; j++)
D[i][j] = ((DATA_TYPE) i*(j+2)) / nk;
}
/* DCE code. Must scan the entire live-out data.
Can be used also to check the correctness of the output. */
static
void print_array(int ni, int nl,
DATA_TYPE POLYBENCH_2D(G,NI,NL,ni,nl))
{
int i, j;
for (i = 0; i < ni; i++)
for (j = 0; j < nl; j++) {
fprintf (stderr, DATA_PRINTF_MODIFIER, G[i][j]);
if ((i * ni + j) % 20 == 0) fprintf (stderr, "\n");
}
fprintf (stderr, "\n");
}
/* Main computational kernel. The whole function will be timed,
including the call and return. */
static
void kernel_3mm(int ni, int nj, int nk, int nl, int nm,
DATA_TYPE POLYBENCH_2D(E,NI,NJ,ni,nj),
DATA_TYPE POLYBENCH_2D(A,NI,NK,ni,nk),
DATA_TYPE POLYBENCH_2D(B,NK,NJ,nk,nj),
DATA_TYPE POLYBENCH_2D(F,NJ,NL,nj,nl),
DATA_TYPE POLYBENCH_2D(C,NJ,NM,nj,nm),
DATA_TYPE POLYBENCH_2D(D,NM,NL,nm,nl),
DATA_TYPE POLYBENCH_2D(G,NI,NL,ni,nl))
{
int i, j, k;
#pragma scop
#pragma omp parallel private (j, k) num_threads(2)
{
/* E := A*B */
#pragma omp for schedule(static, 1)
for (i = 0; i < _PB_NI; i++)
for (j = 0; j < _PB_NJ; j++)
{
E[i][j] = 0;
for (k = 0; k < _PB_NK; ++k)
E[i][j] += A[i][k] * B[k][j];
}
/* F := C*D */
#pragma omp for schedule(static, 1)
for (i = 0; i < _PB_NJ; i++)
for (j = 0; j < _PB_NL; j++)
{
F[i][j] = 0;
for (k = 0; k < _PB_NM; ++k)
F[i][j] += C[i][k] * D[k][j];
}
/* G := E*F */
#pragma omp for schedule(static, 1)
for (i = 0; i < _PB_NI; i++)
for (j = 0; j < _PB_NL; j++)
{
G[i][j] = 0;
for (k = 0; k < _PB_NJ; ++k)
G[i][j] += E[i][k] * F[k][j];
}
}
#pragma endscop
}
int main(int argc, char** argv)
{
/* Retrieve problem size. */
int ni = NI;
int nj = NJ;
int nk = NK;
int nl = NL;
int nm = NM;
/* Variable declaration/allocation. */
POLYBENCH_2D_ARRAY_DECL(E, DATA_TYPE, NI, NJ, ni, nj);
POLYBENCH_2D_ARRAY_DECL(A, DATA_TYPE, NI, NK, ni, nk);
POLYBENCH_2D_ARRAY_DECL(B, DATA_TYPE, NK, NJ, nk, nj);
POLYBENCH_2D_ARRAY_DECL(F, DATA_TYPE, NJ, NL, nj, nl);
POLYBENCH_2D_ARRAY_DECL(C, DATA_TYPE, NJ, NM, nj, nm);
POLYBENCH_2D_ARRAY_DECL(D, DATA_TYPE, NM, NL, nm, nl);
POLYBENCH_2D_ARRAY_DECL(G, DATA_TYPE, NI, NL, ni, nl);
/* Initialize array(s). */
init_array (ni, nj, nk, nl, nm,
POLYBENCH_ARRAY(A),
POLYBENCH_ARRAY(B),
POLYBENCH_ARRAY(C),
POLYBENCH_ARRAY(D));
/* Start timer. */
polybench_start_instruments;
/* Run kernel. */
kernel_3mm (ni, nj, nk, nl, nm,
POLYBENCH_ARRAY(E),
POLYBENCH_ARRAY(A),
POLYBENCH_ARRAY(B),
POLYBENCH_ARRAY(F),
POLYBENCH_ARRAY(C),
POLYBENCH_ARRAY(D),
POLYBENCH_ARRAY(G));
/* Stop and print timer. */
polybench_stop_instruments;
polybench_print_instruments;
/* Prevent dead-code elimination. All live-out data must be printed
by the function call in argument. */
polybench_prevent_dce(print_array(ni, nl, POLYBENCH_ARRAY(G)));
/* Be clean. */
POLYBENCH_FREE_ARRAY(E);
POLYBENCH_FREE_ARRAY(A);
POLYBENCH_FREE_ARRAY(B);
POLYBENCH_FREE_ARRAY(F);
POLYBENCH_FREE_ARRAY(C);
POLYBENCH_FREE_ARRAY(D);
POLYBENCH_FREE_ARRAY(G);
return 0;
}
|
convolution_2x2_pack8_fp16.h | // Tencent is pleased to support the open source community by making ncnn available.
//
// Copyright (C) 2019 THL A29 Limited, a Tencent company. All rights reserved.
//
// Licensed under the BSD 3-Clause License (the "License"); you may not use this file except
// in compliance with the License. You may obtain a copy of the License at
//
// https://opensource.org/licenses/BSD-3-Clause
//
// Unless required by applicable law or agreed to in writing, software distributed
// under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
// CONDITIONS OF ANY KIND, either express or implied. See the License for the
// specific language governing permissions and limitations under the License.
static void conv2x2s1_weight_fp16_pack8_avx(const Mat& kernel, Mat& kernel_tm_pack8, int num_input, int num_output)
{
// src = kw-kh-inch-outch
// dst = 8b-8a-kw-kh-inch/8a-outch/8b
Mat weight_data_r2 = kernel.reshape(4, num_input, num_output);
kernel_tm_pack8.create(4, num_input / 8, num_output / 8, (size_t)2 * 64, 64);
for (int q = 0; q + 7 < num_output; q += 8)
{
const Mat k0 = weight_data_r2.channel(q);
const Mat k1 = weight_data_r2.channel(q + 1);
const Mat k2 = weight_data_r2.channel(q + 2);
const Mat k3 = weight_data_r2.channel(q + 3);
const Mat k4 = weight_data_r2.channel(q + 4);
const Mat k5 = weight_data_r2.channel(q + 5);
const Mat k6 = weight_data_r2.channel(q + 6);
const Mat k7 = weight_data_r2.channel(q + 7);
Mat g0 = kernel_tm_pack8.channel(q / 8);
for (int p = 0; p + 7 < num_input; p += 8)
{
const float* k00 = k0.row(p);
const float* k01 = k0.row(p + 1);
const float* k02 = k0.row(p + 2);
const float* k03 = k0.row(p + 3);
const float* k04 = k0.row(p + 4);
const float* k05 = k0.row(p + 5);
const float* k06 = k0.row(p + 6);
const float* k07 = k0.row(p + 7);
const float* k10 = k1.row(p);
const float* k11 = k1.row(p + 1);
const float* k12 = k1.row(p + 2);
const float* k13 = k1.row(p + 3);
const float* k14 = k1.row(p + 4);
const float* k15 = k1.row(p + 5);
const float* k16 = k1.row(p + 6);
const float* k17 = k1.row(p + 7);
const float* k20 = k2.row(p);
const float* k21 = k2.row(p + 1);
const float* k22 = k2.row(p + 2);
const float* k23 = k2.row(p + 3);
const float* k24 = k2.row(p + 4);
const float* k25 = k2.row(p + 5);
const float* k26 = k2.row(p + 6);
const float* k27 = k2.row(p + 7);
const float* k30 = k3.row(p);
const float* k31 = k3.row(p + 1);
const float* k32 = k3.row(p + 2);
const float* k33 = k3.row(p + 3);
const float* k34 = k3.row(p + 4);
const float* k35 = k3.row(p + 5);
const float* k36 = k3.row(p + 6);
const float* k37 = k3.row(p + 7);
const float* k40 = k4.row(p);
const float* k41 = k4.row(p + 1);
const float* k42 = k4.row(p + 2);
const float* k43 = k4.row(p + 3);
const float* k44 = k4.row(p + 4);
const float* k45 = k4.row(p + 5);
const float* k46 = k4.row(p + 6);
const float* k47 = k4.row(p + 7);
const float* k50 = k5.row(p);
const float* k51 = k5.row(p + 1);
const float* k52 = k5.row(p + 2);
const float* k53 = k5.row(p + 3);
const float* k54 = k5.row(p + 4);
const float* k55 = k5.row(p + 5);
const float* k56 = k5.row(p + 6);
const float* k57 = k5.row(p + 7);
const float* k60 = k6.row(p);
const float* k61 = k6.row(p + 1);
const float* k62 = k6.row(p + 2);
const float* k63 = k6.row(p + 3);
const float* k64 = k6.row(p + 4);
const float* k65 = k6.row(p + 5);
const float* k66 = k6.row(p + 6);
const float* k67 = k6.row(p + 7);
const float* k70 = k7.row(p);
const float* k71 = k7.row(p + 1);
const float* k72 = k7.row(p + 2);
const float* k73 = k7.row(p + 3);
const float* k74 = k7.row(p + 4);
const float* k75 = k7.row(p + 5);
const float* k76 = k7.row(p + 6);
const float* k77 = k7.row(p + 7);
unsigned short* g00 = (unsigned short*)g0.row(p / 8);
for (int k = 0; k < 4; k++)
{
g00[0] = float32_to_float16(k00[k]);
g00[1] = float32_to_float16(k10[k]);
g00[2] = float32_to_float16(k20[k]);
g00[3] = float32_to_float16(k30[k]);
g00[4] = float32_to_float16(k40[k]);
g00[5] = float32_to_float16(k50[k]);
g00[6] = float32_to_float16(k60[k]);
g00[7] = float32_to_float16(k70[k]);
g00 += 8;
g00[0] = float32_to_float16(k01[k]);
g00[1] = float32_to_float16(k11[k]);
g00[2] = float32_to_float16(k21[k]);
g00[3] = float32_to_float16(k31[k]);
g00[4] = float32_to_float16(k41[k]);
g00[5] = float32_to_float16(k51[k]);
g00[6] = float32_to_float16(k61[k]);
g00[7] = float32_to_float16(k71[k]);
g00 += 8;
g00[0] = float32_to_float16(k02[k]);
g00[1] = float32_to_float16(k12[k]);
g00[2] = float32_to_float16(k22[k]);
g00[3] = float32_to_float16(k32[k]);
g00[4] = float32_to_float16(k42[k]);
g00[5] = float32_to_float16(k52[k]);
g00[6] = float32_to_float16(k62[k]);
g00[7] = float32_to_float16(k72[k]);
g00 += 8;
g00[0] = float32_to_float16(k03[k]);
g00[1] = float32_to_float16(k13[k]);
g00[2] = float32_to_float16(k23[k]);
g00[3] = float32_to_float16(k33[k]);
g00[4] = float32_to_float16(k43[k]);
g00[5] = float32_to_float16(k53[k]);
g00[6] = float32_to_float16(k63[k]);
g00[7] = float32_to_float16(k73[k]);
g00 += 8;
g00[0] = float32_to_float16(k04[k]);
g00[1] = float32_to_float16(k14[k]);
g00[2] = float32_to_float16(k24[k]);
g00[3] = float32_to_float16(k34[k]);
g00[4] = float32_to_float16(k44[k]);
g00[5] = float32_to_float16(k54[k]);
g00[6] = float32_to_float16(k64[k]);
g00[7] = float32_to_float16(k74[k]);
g00 += 8;
g00[0] = float32_to_float16(k05[k]);
g00[1] = float32_to_float16(k15[k]);
g00[2] = float32_to_float16(k25[k]);
g00[3] = float32_to_float16(k35[k]);
g00[4] = float32_to_float16(k45[k]);
g00[5] = float32_to_float16(k55[k]);
g00[6] = float32_to_float16(k65[k]);
g00[7] = float32_to_float16(k75[k]);
g00 += 8;
g00[0] = float32_to_float16(k06[k]);
g00[1] = float32_to_float16(k16[k]);
g00[2] = float32_to_float16(k26[k]);
g00[3] = float32_to_float16(k36[k]);
g00[4] = float32_to_float16(k46[k]);
g00[5] = float32_to_float16(k56[k]);
g00[6] = float32_to_float16(k66[k]);
g00[7] = float32_to_float16(k76[k]);
g00 += 8;
g00[0] = float32_to_float16(k07[k]);
g00[1] = float32_to_float16(k17[k]);
g00[2] = float32_to_float16(k27[k]);
g00[3] = float32_to_float16(k37[k]);
g00[4] = float32_to_float16(k47[k]);
g00[5] = float32_to_float16(k57[k]);
g00[6] = float32_to_float16(k67[k]);
g00[7] = float32_to_float16(k77[k]);
g00 += 8;
}
}
}
}
static void conv2x2s1_fp16_pack8_avx(const Mat& bottom_blob, Mat& top_blob, const Mat& kernel, const Mat& _bias, const Option& opt)
{
int inch = bottom_blob.c;
int outw = top_blob.w;
int outh = top_blob.h;
int outch = top_blob.c;
const float* bias = _bias;
#pragma omp parallel for num_threads(opt.num_threads)
for (int p = 0; p < outch; p++)
{
Mat out0 = top_blob.channel(p);
__m256 _bias0 = bias ? _mm256_loadu_ps((const float*)bias + p * 8) : _mm256_set1_ps(0.f);
out0.fill(_bias0);
for (int q = 0; q < inch; q++)
{
float* outptr0 = out0.row(0);
const Mat img0 = bottom_blob.channel(q);
const float* r0 = img0.row(0);
const float* r1 = img0.row(1);
const unsigned short* kptr = (const unsigned short*)kernel.channel(p).row(q);
// const float* kptr = (const float*)kernel + 4 * inch * p * 64;
int i = 0;
for (; i < outh; i++)
{
int j = 0;
for (; j + 1 < outw; j += 2)
{
__m256 _sum0 = _mm256_loadu_ps(outptr0);
__m256 _sum1 = _mm256_loadu_ps(outptr0 + 8);
__m256 _r00 = _mm256_broadcast_ss(r0);
__m256 _r01 = _mm256_broadcast_ss(r0 + 1);
__m256 _r02 = _mm256_broadcast_ss(r0 + 2);
__m256 _r03 = _mm256_broadcast_ss(r0 + 3);
__m256 _r04 = _mm256_broadcast_ss(r0 + 4);
__m256 _r05 = _mm256_broadcast_ss(r0 + 5);
__m256 _r06 = _mm256_broadcast_ss(r0 + 6);
__m256 _r07 = _mm256_broadcast_ss(r0 + 7);
r0 += 8;
__m256 _k00 = loadfp16(kptr);
__m256 _k01 = loadfp16(kptr + 8);
__m256 _k02 = loadfp16(kptr + 16);
__m256 _k03 = loadfp16(kptr + 24);
kptr += 32;
_sum0 = _mm256_fmadd_ps(_k00, _r00, _sum0);
_sum0 = _mm256_fmadd_ps(_k01, _r01, _sum0);
_sum0 = _mm256_fmadd_ps(_k02, _r02, _sum0);
_sum0 = _mm256_fmadd_ps(_k03, _r03, _sum0);
__m256 _k04 = loadfp16(kptr);
__m256 _k05 = loadfp16(kptr + 8);
__m256 _k06 = loadfp16(kptr + 16);
__m256 _k07 = loadfp16(kptr + 24);
kptr += 32;
_sum0 = _mm256_fmadd_ps(_k04, _r04, _sum0);
_sum0 = _mm256_fmadd_ps(_k05, _r05, _sum0);
_sum0 = _mm256_fmadd_ps(_k06, _r06, _sum0);
_sum0 = _mm256_fmadd_ps(_k07, _r07, _sum0);
//========================================
_r00 = _mm256_broadcast_ss(r0);
_r01 = _mm256_broadcast_ss(r0 + 1);
_r02 = _mm256_broadcast_ss(r0 + 2);
_r03 = _mm256_broadcast_ss(r0 + 3);
_r04 = _mm256_broadcast_ss(r0 + 4);
_r05 = _mm256_broadcast_ss(r0 + 5);
_r06 = _mm256_broadcast_ss(r0 + 6);
_r07 = _mm256_broadcast_ss(r0 + 7);
r0 += 8;
_sum1 = _mm256_fmadd_ps(_k00, _r00, _sum1);
_sum1 = _mm256_fmadd_ps(_k01, _r01, _sum1);
_sum1 = _mm256_fmadd_ps(_k02, _r02, _sum1);
_sum1 = _mm256_fmadd_ps(_k03, _r03, _sum1);
_sum1 = _mm256_fmadd_ps(_k04, _r04, _sum1);
_sum1 = _mm256_fmadd_ps(_k05, _r05, _sum1);
_sum1 = _mm256_fmadd_ps(_k06, _r06, _sum1);
_sum1 = _mm256_fmadd_ps(_k07, _r07, _sum1);
_k00 = loadfp16(kptr);
_k01 = loadfp16(kptr + 8);
_k02 = loadfp16(kptr + 16);
_k03 = loadfp16(kptr + 24);
kptr += 32;
_sum0 = _mm256_fmadd_ps(_k00, _r00, _sum0);
_sum0 = _mm256_fmadd_ps(_k01, _r01, _sum0);
_sum0 = _mm256_fmadd_ps(_k02, _r02, _sum0);
_sum0 = _mm256_fmadd_ps(_k03, _r03, _sum0);
_k04 = loadfp16(kptr);
_k05 = loadfp16(kptr + 8);
_k06 = loadfp16(kptr + 16);
_k07 = loadfp16(kptr + 24);
kptr += 32;
_sum0 = _mm256_fmadd_ps(_k04, _r04, _sum0);
_sum0 = _mm256_fmadd_ps(_k05, _r05, _sum0);
_sum0 = _mm256_fmadd_ps(_k06, _r06, _sum0);
_sum0 = _mm256_fmadd_ps(_k07, _r07, _sum0);
_r00 = _mm256_broadcast_ss(r0);
_r01 = _mm256_broadcast_ss(r0 + 1);
_r02 = _mm256_broadcast_ss(r0 + 2);
_r03 = _mm256_broadcast_ss(r0 + 3);
_r04 = _mm256_broadcast_ss(r0 + 4);
_r05 = _mm256_broadcast_ss(r0 + 5);
_r06 = _mm256_broadcast_ss(r0 + 6);
_r07 = _mm256_broadcast_ss(r0 + 7);
_sum1 = _mm256_fmadd_ps(_k00, _r00, _sum1);
_sum1 = _mm256_fmadd_ps(_k01, _r01, _sum1);
_sum1 = _mm256_fmadd_ps(_k02, _r02, _sum1);
_sum1 = _mm256_fmadd_ps(_k03, _r03, _sum1);
_sum1 = _mm256_fmadd_ps(_k04, _r04, _sum1);
_sum1 = _mm256_fmadd_ps(_k05, _r05, _sum1);
_sum1 = _mm256_fmadd_ps(_k06, _r06, _sum1);
_sum1 = _mm256_fmadd_ps(_k07, _r07, _sum1);
//===============
__m256 _r10 = _mm256_broadcast_ss(r1);
__m256 _r11 = _mm256_broadcast_ss(r1 + 1);
__m256 _r12 = _mm256_broadcast_ss(r1 + 2);
__m256 _r13 = _mm256_broadcast_ss(r1 + 3);
__m256 _r14 = _mm256_broadcast_ss(r1 + 4);
__m256 _r15 = _mm256_broadcast_ss(r1 + 5);
__m256 _r16 = _mm256_broadcast_ss(r1 + 6);
__m256 _r17 = _mm256_broadcast_ss(r1 + 7);
__m256 _k10 = loadfp16(kptr);
__m256 _k11 = loadfp16(kptr + 8);
__m256 _k12 = loadfp16(kptr + 16);
__m256 _k13 = loadfp16(kptr + 24);
kptr += 32;
_sum0 = _mm256_fmadd_ps(_k10, _r10, _sum0);
_sum0 = _mm256_fmadd_ps(_k11, _r11, _sum0);
_sum0 = _mm256_fmadd_ps(_k12, _r12, _sum0);
_sum0 = _mm256_fmadd_ps(_k13, _r13, _sum0);
__m256 _k14 = loadfp16(kptr);
__m256 _k15 = loadfp16(kptr + 8);
__m256 _k16 = loadfp16(kptr + 16);
__m256 _k17 = loadfp16(kptr + 24);
kptr += 32;
_sum0 = _mm256_fmadd_ps(_k14, _r14, _sum0);
_sum0 = _mm256_fmadd_ps(_k15, _r15, _sum0);
_sum0 = _mm256_fmadd_ps(_k16, _r16, _sum0);
_sum0 = _mm256_fmadd_ps(_k17, _r17, _sum0);
//=======================================
r1 += 8;
_r10 = _mm256_broadcast_ss(r1);
_r11 = _mm256_broadcast_ss(r1 + 1);
_r12 = _mm256_broadcast_ss(r1 + 2);
_r13 = _mm256_broadcast_ss(r1 + 3);
_r14 = _mm256_broadcast_ss(r1 + 4);
_r15 = _mm256_broadcast_ss(r1 + 5);
_r16 = _mm256_broadcast_ss(r1 + 6);
_r17 = _mm256_broadcast_ss(r1 + 7);
_sum1 = _mm256_fmadd_ps(_k10, _r10, _sum1);
_sum1 = _mm256_fmadd_ps(_k11, _r11, _sum1);
_sum1 = _mm256_fmadd_ps(_k12, _r12, _sum1);
_sum1 = _mm256_fmadd_ps(_k13, _r13, _sum1);
_sum1 = _mm256_fmadd_ps(_k14, _r14, _sum1);
_sum1 = _mm256_fmadd_ps(_k15, _r15, _sum1);
_sum1 = _mm256_fmadd_ps(_k16, _r16, _sum1);
_sum1 = _mm256_fmadd_ps(_k17, _r17, _sum1);
_k10 = loadfp16(kptr);
_k11 = loadfp16(kptr + 8);
_k12 = loadfp16(kptr + 16);
_k13 = loadfp16(kptr + 24);
kptr += 32;
_sum0 = _mm256_fmadd_ps(_k10, _r10, _sum0);
_sum0 = _mm256_fmadd_ps(_k11, _r11, _sum0);
_sum0 = _mm256_fmadd_ps(_k12, _r12, _sum0);
_sum0 = _mm256_fmadd_ps(_k13, _r13, _sum0);
_k14 = loadfp16(kptr);
_k15 = loadfp16(kptr + 8);
_k16 = loadfp16(kptr + 16);
_k17 = loadfp16(kptr + 24);
_sum0 = _mm256_fmadd_ps(_k14, _r14, _sum0);
_sum0 = _mm256_fmadd_ps(_k15, _r15, _sum0);
_sum0 = _mm256_fmadd_ps(_k16, _r16, _sum0);
_sum0 = _mm256_fmadd_ps(_k17, _r17, _sum0);
r1 += 8;
_r10 = _mm256_broadcast_ss(r1);
_r11 = _mm256_broadcast_ss(r1 + 1);
_r12 = _mm256_broadcast_ss(r1 + 2);
_r13 = _mm256_broadcast_ss(r1 + 3);
_r14 = _mm256_broadcast_ss(r1 + 4);
_r15 = _mm256_broadcast_ss(r1 + 5);
_r16 = _mm256_broadcast_ss(r1 + 6);
_r17 = _mm256_broadcast_ss(r1 + 7);
_sum1 = _mm256_fmadd_ps(_k10, _r10, _sum1);
_sum1 = _mm256_fmadd_ps(_k11, _r11, _sum1);
_sum1 = _mm256_fmadd_ps(_k12, _r12, _sum1);
_sum1 = _mm256_fmadd_ps(_k13, _r13, _sum1);
_sum1 = _mm256_fmadd_ps(_k14, _r14, _sum1);
_sum1 = _mm256_fmadd_ps(_k15, _r15, _sum1);
_sum1 = _mm256_fmadd_ps(_k16, _r16, _sum1);
_sum1 = _mm256_fmadd_ps(_k17, _r17, _sum1);
kptr -= 224;
_mm256_storeu_ps(outptr0, _sum0);
_mm256_storeu_ps(outptr0 + 8, _sum1);
outptr0 += 16;
}
for (; j < outw; j++)
{
__m256 _sum = _mm256_loadu_ps(outptr0);
__m256 _r00 = _mm256_broadcast_ss(r0);
__m256 _r01 = _mm256_broadcast_ss(r0 + 1);
__m256 _r02 = _mm256_broadcast_ss(r0 + 2);
__m256 _r03 = _mm256_broadcast_ss(r0 + 3);
__m256 _r04 = _mm256_broadcast_ss(r0 + 4);
__m256 _r05 = _mm256_broadcast_ss(r0 + 5);
__m256 _r06 = _mm256_broadcast_ss(r0 + 6);
__m256 _r07 = _mm256_broadcast_ss(r0 + 7);
__m256 _k00 = loadfp16(kptr);
__m256 _k01 = loadfp16(kptr + 8);
__m256 _k02 = loadfp16(kptr + 16);
__m256 _k03 = loadfp16(kptr + 24);
kptr += 32;
_sum = _mm256_fmadd_ps(_k00, _r00, _sum);
_sum = _mm256_fmadd_ps(_k01, _r01, _sum);
_sum = _mm256_fmadd_ps(_k02, _r02, _sum);
_sum = _mm256_fmadd_ps(_k03, _r03, _sum);
__m256 _k04 = loadfp16(kptr);
__m256 _k05 = loadfp16(kptr + 8);
__m256 _k06 = loadfp16(kptr + 16);
__m256 _k07 = loadfp16(kptr + 24);
kptr += 32;
_sum = _mm256_fmadd_ps(_k04, _r04, _sum);
_sum = _mm256_fmadd_ps(_k05, _r05, _sum);
_sum = _mm256_fmadd_ps(_k06, _r06, _sum);
_sum = _mm256_fmadd_ps(_k07, _r07, _sum);
//========================================
r0 += 8;
_r00 = _mm256_broadcast_ss(r0);
_r01 = _mm256_broadcast_ss(r0 + 1);
_r02 = _mm256_broadcast_ss(r0 + 2);
_r03 = _mm256_broadcast_ss(r0 + 3);
_r04 = _mm256_broadcast_ss(r0 + 4);
_r05 = _mm256_broadcast_ss(r0 + 5);
_r06 = _mm256_broadcast_ss(r0 + 6);
_r07 = _mm256_broadcast_ss(r0 + 7);
_k00 = loadfp16(kptr);
_k01 = loadfp16(kptr + 8);
_k02 = loadfp16(kptr + 16);
_k03 = loadfp16(kptr + 24);
kptr += 32;
_sum = _mm256_fmadd_ps(_k00, _r00, _sum);
_sum = _mm256_fmadd_ps(_k01, _r01, _sum);
_sum = _mm256_fmadd_ps(_k02, _r02, _sum);
_sum = _mm256_fmadd_ps(_k03, _r03, _sum);
_k04 = loadfp16(kptr);
_k05 = loadfp16(kptr + 8);
_k06 = loadfp16(kptr + 16);
_k07 = loadfp16(kptr + 24);
kptr += 32;
_sum = _mm256_fmadd_ps(_k04, _r04, _sum);
_sum = _mm256_fmadd_ps(_k05, _r05, _sum);
_sum = _mm256_fmadd_ps(_k06, _r06, _sum);
_sum = _mm256_fmadd_ps(_k07, _r07, _sum);
//===============
__m256 _r10 = _mm256_broadcast_ss(r1);
__m256 _r11 = _mm256_broadcast_ss(r1 + 1);
__m256 _r12 = _mm256_broadcast_ss(r1 + 2);
__m256 _r13 = _mm256_broadcast_ss(r1 + 3);
__m256 _r14 = _mm256_broadcast_ss(r1 + 4);
__m256 _r15 = _mm256_broadcast_ss(r1 + 5);
__m256 _r16 = _mm256_broadcast_ss(r1 + 6);
__m256 _r17 = _mm256_broadcast_ss(r1 + 7);
__m256 _k10 = loadfp16(kptr);
__m256 _k11 = loadfp16(kptr + 8);
__m256 _k12 = loadfp16(kptr + 16);
__m256 _k13 = loadfp16(kptr + 24);
kptr += 32;
_sum = _mm256_fmadd_ps(_k10, _r10, _sum);
_sum = _mm256_fmadd_ps(_k11, _r11, _sum);
_sum = _mm256_fmadd_ps(_k12, _r12, _sum);
_sum = _mm256_fmadd_ps(_k13, _r13, _sum);
__m256 _k14 = loadfp16(kptr);
__m256 _k15 = loadfp16(kptr + 8);
__m256 _k16 = loadfp16(kptr + 16);
__m256 _k17 = loadfp16(kptr + 24);
kptr += 32;
_sum = _mm256_fmadd_ps(_k14, _r14, _sum);
_sum = _mm256_fmadd_ps(_k15, _r15, _sum);
_sum = _mm256_fmadd_ps(_k16, _r16, _sum);
_sum = _mm256_fmadd_ps(_k17, _r17, _sum);
//=======================================
r1 += 8;
_r10 = _mm256_broadcast_ss(r1);
_r11 = _mm256_broadcast_ss(r1 + 1);
_r12 = _mm256_broadcast_ss(r1 + 2);
_r13 = _mm256_broadcast_ss(r1 + 3);
_r14 = _mm256_broadcast_ss(r1 + 4);
_r15 = _mm256_broadcast_ss(r1 + 5);
_r16 = _mm256_broadcast_ss(r1 + 6);
_r17 = _mm256_broadcast_ss(r1 + 7);
_k10 = loadfp16(kptr);
_k11 = loadfp16(kptr + 8);
_k12 = loadfp16(kptr + 16);
_k13 = loadfp16(kptr + 24);
kptr += 32;
_sum = _mm256_fmadd_ps(_k10, _r10, _sum);
_sum = _mm256_fmadd_ps(_k11, _r11, _sum);
_sum = _mm256_fmadd_ps(_k12, _r12, _sum);
_sum = _mm256_fmadd_ps(_k13, _r13, _sum);
_k14 = loadfp16(kptr);
_k15 = loadfp16(kptr + 8);
_k16 = loadfp16(kptr + 16);
_k17 = loadfp16(kptr + 24);
_sum = _mm256_fmadd_ps(_k14, _r14, _sum);
_sum = _mm256_fmadd_ps(_k15, _r15, _sum);
_sum = _mm256_fmadd_ps(_k16, _r16, _sum);
_sum = _mm256_fmadd_ps(_k17, _r17, _sum);
kptr -= 224;
_mm256_storeu_ps(outptr0, _sum);
outptr0 += 8;
}
r0 += 8;
r1 += 8;
}
}
}
}
|
CGOpenMPRuntime.h | //===----- CGOpenMPRuntime.h - Interface to OpenMP Runtimes -----*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
// This provides a class for OpenMP runtime code generation.
//
//===----------------------------------------------------------------------===//
#ifndef LLVM_CLANG_LIB_CODEGEN_CGOPENMPRUNTIME_H
#define LLVM_CLANG_LIB_CODEGEN_CGOPENMPRUNTIME_H
#include "CGValue.h"
#include "clang/AST/DeclOpenMP.h"
#include "clang/AST/GlobalDecl.h"
#include "clang/AST/Type.h"
#include "clang/Basic/OpenMPKinds.h"
#include "clang/Basic/SourceLocation.h"
#include "llvm/ADT/DenseMap.h"
#include "llvm/ADT/PointerIntPair.h"
#include "llvm/ADT/SmallPtrSet.h"
#include "llvm/ADT/StringMap.h"
#include "llvm/ADT/StringSet.h"
#include "llvm/Frontend/OpenMP/OMPConstants.h"
#include "llvm/Frontend/OpenMP/OMPIRBuilder.h"
#include "llvm/IR/Function.h"
#include "llvm/IR/ValueHandle.h"
#include "llvm/Support/AtomicOrdering.h"
namespace llvm {
class ArrayType;
class Constant;
class FunctionType;
class GlobalVariable;
class Type;
class Value;
class OpenMPIRBuilder;
} // namespace llvm
namespace clang {
class Expr;
class OMPDependClause;
class OMPExecutableDirective;
class OMPLoopDirective;
class VarDecl;
class OMPDeclareReductionDecl;
namespace CodeGen {
class Address;
class CodeGenFunction;
class CodeGenModule;
/// A basic class for pre|post-action for advanced codegen sequence for OpenMP
/// region.
class PrePostActionTy {
public:
explicit PrePostActionTy() {}
virtual void Enter(CodeGenFunction &CGF) {}
virtual void Exit(CodeGenFunction &CGF) {}
virtual ~PrePostActionTy() {}
};
/// Class provides a way to call simple version of codegen for OpenMP region, or
/// an advanced with possible pre|post-actions in codegen.
class RegionCodeGenTy final {
intptr_t CodeGen;
typedef void (*CodeGenTy)(intptr_t, CodeGenFunction &, PrePostActionTy &);
CodeGenTy Callback;
mutable PrePostActionTy *PrePostAction;
RegionCodeGenTy() = delete;
template <typename Callable>
static void CallbackFn(intptr_t CodeGen, CodeGenFunction &CGF,
PrePostActionTy &Action) {
return (*reinterpret_cast<Callable *>(CodeGen))(CGF, Action);
}
public:
template <typename Callable>
RegionCodeGenTy(
Callable &&CodeGen,
std::enable_if_t<!std::is_same<std::remove_reference_t<Callable>,
RegionCodeGenTy>::value> * = nullptr)
: CodeGen(reinterpret_cast<intptr_t>(&CodeGen)),
Callback(CallbackFn<std::remove_reference_t<Callable>>),
PrePostAction(nullptr) {}
void setAction(PrePostActionTy &Action) const { PrePostAction = &Action; }
void operator()(CodeGenFunction &CGF) const;
};
struct OMPTaskDataTy final {
SmallVector<const Expr *, 4> PrivateVars;
SmallVector<const Expr *, 4> PrivateCopies;
SmallVector<const Expr *, 4> FirstprivateVars;
SmallVector<const Expr *, 4> FirstprivateCopies;
SmallVector<const Expr *, 4> FirstprivateInits;
SmallVector<const Expr *, 4> LastprivateVars;
SmallVector<const Expr *, 4> LastprivateCopies;
SmallVector<const Expr *, 4> ReductionVars;
SmallVector<const Expr *, 4> ReductionOrigs;
SmallVector<const Expr *, 4> ReductionCopies;
SmallVector<const Expr *, 4> ReductionOps;
SmallVector<CanonicalDeclPtr<const VarDecl>, 4> PrivateLocals;
struct DependData {
OpenMPDependClauseKind DepKind = OMPC_DEPEND_unknown;
const Expr *IteratorExpr = nullptr;
SmallVector<const Expr *, 4> DepExprs;
explicit DependData() = default;
DependData(OpenMPDependClauseKind DepKind, const Expr *IteratorExpr)
: DepKind(DepKind), IteratorExpr(IteratorExpr) {}
};
SmallVector<DependData, 4> Dependences;
llvm::PointerIntPair<llvm::Value *, 1, bool> Final;
llvm::PointerIntPair<llvm::Value *, 1, bool> Schedule;
llvm::PointerIntPair<llvm::Value *, 1, bool> Priority;
llvm::Value *Reductions = nullptr;
unsigned NumberOfParts = 0;
bool Tied = true;
bool Nogroup = false;
bool IsReductionWithTaskMod = false;
bool IsWorksharingReduction = false;
};
/// Class intended to support codegen of all kind of the reduction clauses.
class ReductionCodeGen {
private:
/// Data required for codegen of reduction clauses.
struct ReductionData {
/// Reference to the item shared between tasks to reduce into.
const Expr *Shared = nullptr;
/// Reference to the original item.
const Expr *Ref = nullptr;
/// Helper expression for generation of private copy.
const Expr *Private = nullptr;
/// Helper expression for generation reduction operation.
const Expr *ReductionOp = nullptr;
ReductionData(const Expr *Shared, const Expr *Ref, const Expr *Private,
const Expr *ReductionOp)
: Shared(Shared), Ref(Ref), Private(Private), ReductionOp(ReductionOp) {
}
};
/// List of reduction-based clauses.
SmallVector<ReductionData, 4> ClausesData;
/// List of addresses of shared variables/expressions.
SmallVector<std::pair<LValue, LValue>, 4> SharedAddresses;
/// List of addresses of original variables/expressions.
SmallVector<std::pair<LValue, LValue>, 4> OrigAddresses;
/// Sizes of the reduction items in chars.
SmallVector<std::pair<llvm::Value *, llvm::Value *>, 4> Sizes;
/// Base declarations for the reduction items.
SmallVector<const VarDecl *, 4> BaseDecls;
/// Emits lvalue for shared expression.
LValue emitSharedLValue(CodeGenFunction &CGF, const Expr *E);
/// Emits upper bound for shared expression (if array section).
LValue emitSharedLValueUB(CodeGenFunction &CGF, const Expr *E);
/// Performs aggregate initialization.
/// \param N Number of reduction item in the common list.
/// \param PrivateAddr Address of the corresponding private item.
/// \param SharedAddr Address of the original shared variable.
/// \param DRD Declare reduction construct used for reduction item.
void emitAggregateInitialization(CodeGenFunction &CGF, unsigned N,
Address PrivateAddr, Address SharedAddr,
const OMPDeclareReductionDecl *DRD);
public:
ReductionCodeGen(ArrayRef<const Expr *> Shareds, ArrayRef<const Expr *> Origs,
ArrayRef<const Expr *> Privates,
ArrayRef<const Expr *> ReductionOps);
/// Emits lvalue for the shared and original reduction item.
/// \param N Number of the reduction item.
void emitSharedOrigLValue(CodeGenFunction &CGF, unsigned N);
/// Emits the code for the variable-modified type, if required.
/// \param N Number of the reduction item.
void emitAggregateType(CodeGenFunction &CGF, unsigned N);
/// Emits the code for the variable-modified type, if required.
/// \param N Number of the reduction item.
/// \param Size Size of the type in chars.
void emitAggregateType(CodeGenFunction &CGF, unsigned N, llvm::Value *Size);
/// Performs initialization of the private copy for the reduction item.
/// \param N Number of the reduction item.
/// \param PrivateAddr Address of the corresponding private item.
/// \param DefaultInit Default initialization sequence that should be
/// performed if no reduction specific initialization is found.
/// \param SharedAddr Address of the original shared variable.
void
emitInitialization(CodeGenFunction &CGF, unsigned N, Address PrivateAddr,
Address SharedAddr,
llvm::function_ref<bool(CodeGenFunction &)> DefaultInit);
/// Returns true if the private copy requires cleanups.
bool needCleanups(unsigned N);
/// Emits cleanup code for the reduction item.
/// \param N Number of the reduction item.
/// \param PrivateAddr Address of the corresponding private item.
void emitCleanups(CodeGenFunction &CGF, unsigned N, Address PrivateAddr);
/// Adjusts \p PrivatedAddr for using instead of the original variable
/// address in normal operations.
/// \param N Number of the reduction item.
/// \param PrivateAddr Address of the corresponding private item.
Address adjustPrivateAddress(CodeGenFunction &CGF, unsigned N,
Address PrivateAddr);
/// Returns LValue for the reduction item.
LValue getSharedLValue(unsigned N) const { return SharedAddresses[N].first; }
/// Returns LValue for the original reduction item.
LValue getOrigLValue(unsigned N) const { return OrigAddresses[N].first; }
/// Returns the size of the reduction item (in chars and total number of
/// elements in the item), or nullptr, if the size is a constant.
std::pair<llvm::Value *, llvm::Value *> getSizes(unsigned N) const {
return Sizes[N];
}
/// Returns the base declaration of the reduction item.
const VarDecl *getBaseDecl(unsigned N) const { return BaseDecls[N]; }
/// Returns the base declaration of the reduction item.
const Expr *getRefExpr(unsigned N) const { return ClausesData[N].Ref; }
/// Returns true if the initialization of the reduction item uses initializer
/// from declare reduction construct.
bool usesReductionInitializer(unsigned N) const;
/// Return the type of the private item.
QualType getPrivateType(unsigned N) const {
return cast<VarDecl>(cast<DeclRefExpr>(ClausesData[N].Private)->getDecl())
->getType();
}
};
class CGOpenMPRuntime {
public:
/// Allows to disable automatic handling of functions used in target regions
/// as those marked as `omp declare target`.
class DisableAutoDeclareTargetRAII {
CodeGenModule &CGM;
bool SavedShouldMarkAsGlobal;
public:
DisableAutoDeclareTargetRAII(CodeGenModule &CGM);
~DisableAutoDeclareTargetRAII();
};
/// Manages list of nontemporal decls for the specified directive.
class NontemporalDeclsRAII {
CodeGenModule &CGM;
const bool NeedToPush;
public:
NontemporalDeclsRAII(CodeGenModule &CGM, const OMPLoopDirective &S);
~NontemporalDeclsRAII();
};
/// Manages list of nontemporal decls for the specified directive.
class UntiedTaskLocalDeclsRAII {
CodeGenModule &CGM;
const bool NeedToPush;
public:
UntiedTaskLocalDeclsRAII(
CodeGenFunction &CGF,
const llvm::MapVector<CanonicalDeclPtr<const VarDecl>,
std::pair<Address, Address>> &LocalVars);
~UntiedTaskLocalDeclsRAII();
};
/// Maps the expression for the lastprivate variable to the global copy used
/// to store new value because original variables are not mapped in inner
/// parallel regions. Only private copies are captured but we need also to
/// store private copy in shared address.
/// Also, stores the expression for the private loop counter and it
/// threaprivate name.
struct LastprivateConditionalData {
llvm::MapVector<CanonicalDeclPtr<const Decl>, SmallString<16>>
DeclToUniqueName;
LValue IVLVal;
llvm::Function *Fn = nullptr;
bool Disabled = false;
};
/// Manages list of lastprivate conditional decls for the specified directive.
class LastprivateConditionalRAII {
enum class ActionToDo {
DoNotPush,
PushAsLastprivateConditional,
DisableLastprivateConditional,
};
CodeGenModule &CGM;
ActionToDo Action = ActionToDo::DoNotPush;
/// Check and try to disable analysis of inner regions for changes in
/// lastprivate conditional.
void tryToDisableInnerAnalysis(const OMPExecutableDirective &S,
llvm::DenseSet<CanonicalDeclPtr<const Decl>>
&NeedToAddForLPCsAsDisabled) const;
LastprivateConditionalRAII(CodeGenFunction &CGF,
const OMPExecutableDirective &S);
public:
explicit LastprivateConditionalRAII(CodeGenFunction &CGF,
const OMPExecutableDirective &S,
LValue IVLVal);
static LastprivateConditionalRAII disable(CodeGenFunction &CGF,
const OMPExecutableDirective &S);
~LastprivateConditionalRAII();
};
llvm::OpenMPIRBuilder &getOMPBuilder() { return OMPBuilder; }
protected:
CodeGenModule &CGM;
StringRef FirstSeparator, Separator;
/// An OpenMP-IR-Builder instance.
llvm::OpenMPIRBuilder OMPBuilder;
/// Constructor allowing to redefine the name separator for the variables.
explicit CGOpenMPRuntime(CodeGenModule &CGM, StringRef FirstSeparator,
StringRef Separator);
/// Creates offloading entry for the provided entry ID \a ID,
/// address \a Addr, size \a Size, and flags \a Flags.
virtual void createOffloadEntry(llvm::Constant *ID, llvm::Constant *Addr,
uint64_t Size, int32_t Flags,
llvm::GlobalValue::LinkageTypes Linkage);
/// Helper to emit outlined function for 'target' directive.
/// \param D Directive to emit.
/// \param ParentName Name of the function that encloses the target region.
/// \param OutlinedFn Outlined function value to be defined by this call.
/// \param OutlinedFnID Outlined function ID value to be defined by this call.
/// \param IsOffloadEntry True if the outlined function is an offload entry.
/// \param CodeGen Lambda codegen specific to an accelerator device.
/// An outlined function may not be an entry if, e.g. the if clause always
/// evaluates to false.
virtual void emitTargetOutlinedFunctionHelper(const OMPExecutableDirective &D,
StringRef ParentName,
llvm::Function *&OutlinedFn,
llvm::Constant *&OutlinedFnID,
bool IsOffloadEntry,
const RegionCodeGenTy &CodeGen);
/// Emits object of ident_t type with info for source location.
/// \param Flags Flags for OpenMP location.
///
llvm::Value *emitUpdateLocation(CodeGenFunction &CGF, SourceLocation Loc,
unsigned Flags = 0);
/// Emit the number of teams for a target directive. Inspect the num_teams
/// clause associated with a teams construct combined or closely nested
/// with the target directive.
///
/// Emit a team of size one for directives such as 'target parallel' that
/// have no associated teams construct.
///
/// Otherwise, return nullptr.
const Expr *getNumTeamsExprForTargetDirective(CodeGenFunction &CGF,
const OMPExecutableDirective &D,
int32_t &DefaultVal);
llvm::Value *emitNumTeamsForTargetDirective(CodeGenFunction &CGF,
const OMPExecutableDirective &D);
/// Emit the number of threads for a target directive. Inspect the
/// thread_limit clause associated with a teams construct combined or closely
/// nested with the target directive.
///
/// Emit the num_threads clause for directives such as 'target parallel' that
/// have no associated teams construct.
///
/// Otherwise, return nullptr.
const Expr *
getNumThreadsExprForTargetDirective(CodeGenFunction &CGF,
const OMPExecutableDirective &D,
int32_t &DefaultVal);
llvm::Value *
emitNumThreadsForTargetDirective(CodeGenFunction &CGF,
const OMPExecutableDirective &D);
/// Returns pointer to ident_t type.
llvm::Type *getIdentTyPointerTy();
/// Gets thread id value for the current thread.
///
llvm::Value *getThreadID(CodeGenFunction &CGF, SourceLocation Loc);
/// Get the function name of an outlined region.
// The name can be customized depending on the target.
//
virtual StringRef getOutlinedHelperName() const { return ".omp_outlined."; }
/// Emits \p Callee function call with arguments \p Args with location \p Loc.
void emitCall(CodeGenFunction &CGF, SourceLocation Loc,
llvm::FunctionCallee Callee,
ArrayRef<llvm::Value *> Args = llvm::None) const;
/// Emits address of the word in a memory where current thread id is
/// stored.
virtual Address emitThreadIDAddress(CodeGenFunction &CGF, SourceLocation Loc);
void setLocThreadIdInsertPt(CodeGenFunction &CGF,
bool AtCurrentPoint = false);
void clearLocThreadIdInsertPt(CodeGenFunction &CGF);
/// Check if the default location must be constant.
/// Default is false to support OMPT/OMPD.
virtual bool isDefaultLocationConstant() const { return false; }
/// Returns additional flags that can be stored in reserved_2 field of the
/// default location.
virtual unsigned getDefaultLocationReserved2Flags() const { return 0; }
/// Returns default flags for the barriers depending on the directive, for
/// which this barier is going to be emitted.
static unsigned getDefaultFlagsForBarriers(OpenMPDirectiveKind Kind);
/// Get the LLVM type for the critical name.
llvm::ArrayType *getKmpCriticalNameTy() const {return KmpCriticalNameTy;}
/// Returns corresponding lock object for the specified critical region
/// name. If the lock object does not exist it is created, otherwise the
/// reference to the existing copy is returned.
/// \param CriticalName Name of the critical region.
///
llvm::Value *getCriticalRegionLock(StringRef CriticalName);
private:
/// Map for SourceLocation and OpenMP runtime library debug locations.
typedef llvm::DenseMap<SourceLocation, llvm::Value *> OpenMPDebugLocMapTy;
OpenMPDebugLocMapTy OpenMPDebugLocMap;
/// The type for a microtask which gets passed to __kmpc_fork_call().
/// Original representation is:
/// typedef void (kmpc_micro)(kmp_int32 global_tid, kmp_int32 bound_tid,...);
llvm::FunctionType *Kmpc_MicroTy = nullptr;
/// Stores debug location and ThreadID for the function.
struct DebugLocThreadIdTy {
llvm::Value *DebugLoc;
llvm::Value *ThreadID;
/// Insert point for the service instructions.
llvm::AssertingVH<llvm::Instruction> ServiceInsertPt = nullptr;
};
/// Map of local debug location, ThreadId and functions.
typedef llvm::DenseMap<llvm::Function *, DebugLocThreadIdTy>
OpenMPLocThreadIDMapTy;
OpenMPLocThreadIDMapTy OpenMPLocThreadIDMap;
/// Map of UDRs and corresponding combiner/initializer.
typedef llvm::DenseMap<const OMPDeclareReductionDecl *,
std::pair<llvm::Function *, llvm::Function *>>
UDRMapTy;
UDRMapTy UDRMap;
/// Map of functions and locally defined UDRs.
typedef llvm::DenseMap<llvm::Function *,
SmallVector<const OMPDeclareReductionDecl *, 4>>
FunctionUDRMapTy;
FunctionUDRMapTy FunctionUDRMap;
/// Map from the user-defined mapper declaration to its corresponding
/// functions.
llvm::DenseMap<const OMPDeclareMapperDecl *, llvm::Function *> UDMMap;
/// Map of functions and their local user-defined mappers.
using FunctionUDMMapTy =
llvm::DenseMap<llvm::Function *,
SmallVector<const OMPDeclareMapperDecl *, 4>>;
FunctionUDMMapTy FunctionUDMMap;
/// Maps local variables marked as lastprivate conditional to their internal
/// types.
llvm::DenseMap<llvm::Function *,
llvm::DenseMap<CanonicalDeclPtr<const Decl>,
std::tuple<QualType, const FieldDecl *,
const FieldDecl *, LValue>>>
LastprivateConditionalToTypes;
/// Maps function to the position of the untied task locals stack.
llvm::DenseMap<llvm::Function *, unsigned> FunctionToUntiedTaskStackMap;
/// Type kmp_critical_name, originally defined as typedef kmp_int32
/// kmp_critical_name[8];
llvm::ArrayType *KmpCriticalNameTy;
/// An ordered map of auto-generated variables to their unique names.
/// It stores variables with the following names: 1) ".gomp_critical_user_" +
/// <critical_section_name> + ".var" for "omp critical" directives; 2)
/// <mangled_name_for_global_var> + ".cache." for cache for threadprivate
/// variables.
llvm::StringMap<llvm::AssertingVH<llvm::GlobalVariable>,
llvm::BumpPtrAllocator> InternalVars;
/// Type typedef kmp_int32 (* kmp_routine_entry_t)(kmp_int32, void *);
llvm::Type *KmpRoutineEntryPtrTy = nullptr;
QualType KmpRoutineEntryPtrQTy;
/// Type typedef struct kmp_task {
/// void * shareds; /**< pointer to block of pointers to
/// shared vars */
/// kmp_routine_entry_t routine; /**< pointer to routine to call for
/// executing task */
/// kmp_int32 part_id; /**< part id for the task */
/// kmp_routine_entry_t destructors; /* pointer to function to invoke
/// deconstructors of firstprivate C++ objects */
/// } kmp_task_t;
QualType KmpTaskTQTy;
/// Saved kmp_task_t for task directive.
QualType SavedKmpTaskTQTy;
/// Saved kmp_task_t for taskloop-based directive.
QualType SavedKmpTaskloopTQTy;
/// Type typedef struct kmp_depend_info {
/// kmp_intptr_t base_addr;
/// size_t len;
/// struct {
/// bool in:1;
/// bool out:1;
/// } flags;
/// } kmp_depend_info_t;
QualType KmpDependInfoTy;
/// Type typedef struct kmp_task_affinity_info {
/// kmp_intptr_t base_addr;
/// size_t len;
/// struct {
/// bool flag1 : 1;
/// bool flag2 : 1;
/// kmp_int32 reserved : 30;
/// } flags;
/// } kmp_task_affinity_info_t;
QualType KmpTaskAffinityInfoTy;
/// struct kmp_dim { // loop bounds info casted to kmp_int64
/// kmp_int64 lo; // lower
/// kmp_int64 up; // upper
/// kmp_int64 st; // stride
/// };
QualType KmpDimTy;
/// Type struct __tgt_offload_entry{
/// void *addr; // Pointer to the offload entry info.
/// // (function or global)
/// char *name; // Name of the function or global.
/// size_t size; // Size of the entry info (0 if it a function).
/// int32_t flags;
/// int32_t reserved;
/// };
QualType TgtOffloadEntryQTy;
/// Entity that registers the offloading constants that were emitted so
/// far.
class OffloadEntriesInfoManagerTy {
CodeGenModule &CGM;
/// Number of entries registered so far.
unsigned OffloadingEntriesNum = 0;
public:
/// Base class of the entries info.
class OffloadEntryInfo {
public:
/// Kind of a given entry.
enum OffloadingEntryInfoKinds : unsigned {
/// Entry is a target region.
OffloadingEntryInfoTargetRegion = 0,
/// Entry is a declare target variable.
OffloadingEntryInfoDeviceGlobalVar = 1,
/// Invalid entry info.
OffloadingEntryInfoInvalid = ~0u
};
protected:
OffloadEntryInfo() = delete;
explicit OffloadEntryInfo(OffloadingEntryInfoKinds Kind) : Kind(Kind) {}
explicit OffloadEntryInfo(OffloadingEntryInfoKinds Kind, unsigned Order,
uint32_t Flags)
: Flags(Flags), Order(Order), Kind(Kind) {}
~OffloadEntryInfo() = default;
public:
bool isValid() const { return Order != ~0u; }
unsigned getOrder() const { return Order; }
OffloadingEntryInfoKinds getKind() const { return Kind; }
uint32_t getFlags() const { return Flags; }
void setFlags(uint32_t NewFlags) { Flags = NewFlags; }
llvm::Constant *getAddress() const {
return cast_or_null<llvm::Constant>(Addr);
}
void setAddress(llvm::Constant *V) {
assert(!Addr.pointsToAliveValue() && "Address has been set before!");
Addr = V;
}
static bool classof(const OffloadEntryInfo *Info) { return true; }
private:
/// Address of the entity that has to be mapped for offloading.
llvm::WeakTrackingVH Addr;
/// Flags associated with the device global.
uint32_t Flags = 0u;
/// Order this entry was emitted.
unsigned Order = ~0u;
OffloadingEntryInfoKinds Kind = OffloadingEntryInfoInvalid;
};
/// Return true if a there are no entries defined.
bool empty() const;
/// Return number of entries defined so far.
unsigned size() const { return OffloadingEntriesNum; }
OffloadEntriesInfoManagerTy(CodeGenModule &CGM) : CGM(CGM) {}
//
// Target region entries related.
//
/// Kind of the target registry entry.
enum OMPTargetRegionEntryKind : uint32_t {
/// Mark the entry as target region.
OMPTargetRegionEntryTargetRegion = 0x0,
/// Mark the entry as a global constructor.
OMPTargetRegionEntryCtor = 0x02,
/// Mark the entry as a global destructor.
OMPTargetRegionEntryDtor = 0x04,
};
/// Target region entries info.
class OffloadEntryInfoTargetRegion final : public OffloadEntryInfo {
/// Address that can be used as the ID of the entry.
llvm::Constant *ID = nullptr;
public:
OffloadEntryInfoTargetRegion()
: OffloadEntryInfo(OffloadingEntryInfoTargetRegion) {}
explicit OffloadEntryInfoTargetRegion(unsigned Order,
llvm::Constant *Addr,
llvm::Constant *ID,
OMPTargetRegionEntryKind Flags)
: OffloadEntryInfo(OffloadingEntryInfoTargetRegion, Order, Flags),
ID(ID) {
setAddress(Addr);
}
llvm::Constant *getID() const { return ID; }
void setID(llvm::Constant *V) {
assert(!ID && "ID has been set before!");
ID = V;
}
static bool classof(const OffloadEntryInfo *Info) {
return Info->getKind() == OffloadingEntryInfoTargetRegion;
}
};
/// Initialize target region entry.
void initializeTargetRegionEntryInfo(unsigned DeviceID, unsigned FileID,
StringRef ParentName, unsigned LineNum,
unsigned Order);
/// Register target region entry.
void registerTargetRegionEntryInfo(unsigned DeviceID, unsigned FileID,
StringRef ParentName, unsigned LineNum,
llvm::Constant *Addr, llvm::Constant *ID,
OMPTargetRegionEntryKind Flags);
/// Return true if a target region entry with the provided information
/// exists.
bool hasTargetRegionEntryInfo(unsigned DeviceID, unsigned FileID,
StringRef ParentName, unsigned LineNum,
bool IgnoreAddressId = false) const;
/// brief Applies action \a Action on all registered entries.
typedef llvm::function_ref<void(unsigned, unsigned, StringRef, unsigned,
const OffloadEntryInfoTargetRegion &)>
OffloadTargetRegionEntryInfoActTy;
void actOnTargetRegionEntriesInfo(
const OffloadTargetRegionEntryInfoActTy &Action);
//
// Device global variable entries related.
//
/// Kind of the global variable entry..
enum OMPTargetGlobalVarEntryKind : uint32_t {
/// Mark the entry as a to declare target.
OMPTargetGlobalVarEntryTo = 0x0,
/// Mark the entry as a to declare target link.
OMPTargetGlobalVarEntryLink = 0x1,
};
/// Device global variable entries info.
class OffloadEntryInfoDeviceGlobalVar final : public OffloadEntryInfo {
/// Type of the global variable.
CharUnits VarSize;
llvm::GlobalValue::LinkageTypes Linkage;
public:
OffloadEntryInfoDeviceGlobalVar()
: OffloadEntryInfo(OffloadingEntryInfoDeviceGlobalVar) {}
explicit OffloadEntryInfoDeviceGlobalVar(unsigned Order,
OMPTargetGlobalVarEntryKind Flags)
: OffloadEntryInfo(OffloadingEntryInfoDeviceGlobalVar, Order, Flags) {}
explicit OffloadEntryInfoDeviceGlobalVar(
unsigned Order, llvm::Constant *Addr, CharUnits VarSize,
OMPTargetGlobalVarEntryKind Flags,
llvm::GlobalValue::LinkageTypes Linkage)
: OffloadEntryInfo(OffloadingEntryInfoDeviceGlobalVar, Order, Flags),
VarSize(VarSize), Linkage(Linkage) {
setAddress(Addr);
}
CharUnits getVarSize() const { return VarSize; }
void setVarSize(CharUnits Size) { VarSize = Size; }
llvm::GlobalValue::LinkageTypes getLinkage() const { return Linkage; }
void setLinkage(llvm::GlobalValue::LinkageTypes LT) { Linkage = LT; }
static bool classof(const OffloadEntryInfo *Info) {
return Info->getKind() == OffloadingEntryInfoDeviceGlobalVar;
}
};
/// Initialize device global variable entry.
void initializeDeviceGlobalVarEntryInfo(StringRef Name,
OMPTargetGlobalVarEntryKind Flags,
unsigned Order);
/// Register device global variable entry.
void
registerDeviceGlobalVarEntryInfo(StringRef VarName, llvm::Constant *Addr,
CharUnits VarSize,
OMPTargetGlobalVarEntryKind Flags,
llvm::GlobalValue::LinkageTypes Linkage);
/// Checks if the variable with the given name has been registered already.
bool hasDeviceGlobalVarEntryInfo(StringRef VarName) const {
return OffloadEntriesDeviceGlobalVar.count(VarName) > 0;
}
/// Applies action \a Action on all registered entries.
typedef llvm::function_ref<void(StringRef,
const OffloadEntryInfoDeviceGlobalVar &)>
OffloadDeviceGlobalVarEntryInfoActTy;
void actOnDeviceGlobalVarEntriesInfo(
const OffloadDeviceGlobalVarEntryInfoActTy &Action);
private:
// Storage for target region entries kind. The storage is to be indexed by
// file ID, device ID, parent function name and line number.
typedef llvm::DenseMap<unsigned, OffloadEntryInfoTargetRegion>
OffloadEntriesTargetRegionPerLine;
typedef llvm::StringMap<OffloadEntriesTargetRegionPerLine>
OffloadEntriesTargetRegionPerParentName;
typedef llvm::DenseMap<unsigned, OffloadEntriesTargetRegionPerParentName>
OffloadEntriesTargetRegionPerFile;
typedef llvm::DenseMap<unsigned, OffloadEntriesTargetRegionPerFile>
OffloadEntriesTargetRegionPerDevice;
typedef OffloadEntriesTargetRegionPerDevice OffloadEntriesTargetRegionTy;
OffloadEntriesTargetRegionTy OffloadEntriesTargetRegion;
/// Storage for device global variable entries kind. The storage is to be
/// indexed by mangled name.
typedef llvm::StringMap<OffloadEntryInfoDeviceGlobalVar>
OffloadEntriesDeviceGlobalVarTy;
OffloadEntriesDeviceGlobalVarTy OffloadEntriesDeviceGlobalVar;
};
OffloadEntriesInfoManagerTy OffloadEntriesInfoManager;
bool ShouldMarkAsGlobal = true;
/// List of the emitted declarations.
llvm::DenseSet<CanonicalDeclPtr<const Decl>> AlreadyEmittedTargetDecls;
/// List of the global variables with their addresses that should not be
/// emitted for the target.
llvm::StringMap<llvm::WeakTrackingVH> EmittedNonTargetVariables;
/// List of variables that can become declare target implicitly and, thus,
/// must be emitted.
llvm::SmallDenseSet<const VarDecl *> DeferredGlobalVariables;
using NontemporalDeclsSet = llvm::SmallDenseSet<CanonicalDeclPtr<const Decl>>;
/// Stack for list of declarations in current context marked as nontemporal.
/// The set is the union of all current stack elements.
llvm::SmallVector<NontemporalDeclsSet, 4> NontemporalDeclsStack;
using UntiedLocalVarsAddressesMap =
llvm::MapVector<CanonicalDeclPtr<const VarDecl>,
std::pair<Address, Address>>;
llvm::SmallVector<UntiedLocalVarsAddressesMap, 4> UntiedLocalVarsStack;
/// Stack for list of addresses of declarations in current context marked as
/// lastprivate conditional. The set is the union of all current stack
/// elements.
llvm::SmallVector<LastprivateConditionalData, 4> LastprivateConditionalStack;
/// Flag for keeping track of weather a requires unified_shared_memory
/// directive is present.
bool HasRequiresUnifiedSharedMemory = false;
/// Atomic ordering from the omp requires directive.
llvm::AtomicOrdering RequiresAtomicOrdering = llvm::AtomicOrdering::Monotonic;
/// Flag for keeping track of weather a target region has been emitted.
bool HasEmittedTargetRegion = false;
/// Flag for keeping track of weather a device routine has been emitted.
/// Device routines are specific to the
bool HasEmittedDeclareTargetRegion = false;
/// Loads all the offload entries information from the host IR
/// metadata.
void loadOffloadInfoMetadata();
/// Returns __tgt_offload_entry type.
QualType getTgtOffloadEntryQTy();
/// Start scanning from statement \a S and and emit all target regions
/// found along the way.
/// \param S Starting statement.
/// \param ParentName Name of the function declaration that is being scanned.
void scanForTargetRegionsFunctions(const Stmt *S, StringRef ParentName);
/// Build type kmp_routine_entry_t (if not built yet).
void emitKmpRoutineEntryT(QualType KmpInt32Ty);
/// Returns pointer to kmpc_micro type.
llvm::Type *getKmpc_MicroPointerTy();
/// Returns __kmpc_for_static_init_* runtime function for the specified
/// size \a IVSize and sign \a IVSigned. Will create a distribute call
/// __kmpc_distribute_static_init* if \a IsGPUDistribute is set.
llvm::FunctionCallee createForStaticInitFunction(unsigned IVSize,
bool IVSigned,
bool IsGPUDistribute);
/// Returns __kmpc_dispatch_init_* runtime function for the specified
/// size \a IVSize and sign \a IVSigned.
llvm::FunctionCallee createDispatchInitFunction(unsigned IVSize,
bool IVSigned);
/// Returns __kmpc_dispatch_next_* runtime function for the specified
/// size \a IVSize and sign \a IVSigned.
llvm::FunctionCallee createDispatchNextFunction(unsigned IVSize,
bool IVSigned);
/// Returns __kmpc_dispatch_fini_* runtime function for the specified
/// size \a IVSize and sign \a IVSigned.
llvm::FunctionCallee createDispatchFiniFunction(unsigned IVSize,
bool IVSigned);
/// If the specified mangled name is not in the module, create and
/// return threadprivate cache object. This object is a pointer's worth of
/// storage that's reserved for use by the OpenMP runtime.
/// \param VD Threadprivate variable.
/// \return Cache variable for the specified threadprivate.
llvm::Constant *getOrCreateThreadPrivateCache(const VarDecl *VD);
/// Gets (if variable with the given name already exist) or creates
/// internal global variable with the specified Name. The created variable has
/// linkage CommonLinkage by default and is initialized by null value.
/// \param Ty Type of the global variable. If it is exist already the type
/// must be the same.
/// \param Name Name of the variable.
llvm::GlobalVariable *getOrCreateInternalVariable(llvm::Type *Ty,
const llvm::Twine &Name,
unsigned AddressSpace = 0);
/// Set of threadprivate variables with the generated initializer.
llvm::StringSet<> ThreadPrivateWithDefinition;
/// Set of declare target variables with the generated initializer.
llvm::StringSet<> DeclareTargetWithDefinition;
/// Emits initialization code for the threadprivate variables.
/// \param VDAddr Address of the global variable \a VD.
/// \param Ctor Pointer to a global init function for \a VD.
/// \param CopyCtor Pointer to a global copy function for \a VD.
/// \param Dtor Pointer to a global destructor function for \a VD.
/// \param Loc Location of threadprivate declaration.
void emitThreadPrivateVarInit(CodeGenFunction &CGF, Address VDAddr,
llvm::Value *Ctor, llvm::Value *CopyCtor,
llvm::Value *Dtor, SourceLocation Loc);
/// Emit the array initialization or deletion portion for user-defined mapper
/// code generation.
void emitUDMapperArrayInitOrDel(CodeGenFunction &MapperCGF,
llvm::Value *Handle, llvm::Value *BasePtr,
llvm::Value *Ptr, llvm::Value *Size,
llvm::Value *MapType, llvm::Value *MapName,
CharUnits ElementSize,
llvm::BasicBlock *ExitBB, bool IsInit);
struct TaskResultTy {
llvm::Value *NewTask = nullptr;
llvm::Function *TaskEntry = nullptr;
llvm::Value *NewTaskNewTaskTTy = nullptr;
LValue TDBase;
const RecordDecl *KmpTaskTQTyRD = nullptr;
llvm::Value *TaskDupFn = nullptr;
};
/// Emit task region for the task directive. The task region is emitted in
/// several steps:
/// 1. Emit a call to kmp_task_t *__kmpc_omp_task_alloc(ident_t *, kmp_int32
/// gtid, kmp_int32 flags, size_t sizeof_kmp_task_t, size_t sizeof_shareds,
/// kmp_routine_entry_t *task_entry). Here task_entry is a pointer to the
/// function:
/// kmp_int32 .omp_task_entry.(kmp_int32 gtid, kmp_task_t *tt) {
/// TaskFunction(gtid, tt->part_id, tt->shareds);
/// return 0;
/// }
/// 2. Copy a list of shared variables to field shareds of the resulting
/// structure kmp_task_t returned by the previous call (if any).
/// 3. Copy a pointer to destructions function to field destructions of the
/// resulting structure kmp_task_t.
/// \param D Current task directive.
/// \param TaskFunction An LLVM function with type void (*)(i32 /*gtid*/, i32
/// /*part_id*/, captured_struct */*__context*/);
/// \param SharedsTy A type which contains references the shared variables.
/// \param Shareds Context with the list of shared variables from the \p
/// TaskFunction.
/// \param Data Additional data for task generation like tiednsee, final
/// state, list of privates etc.
TaskResultTy emitTaskInit(CodeGenFunction &CGF, SourceLocation Loc,
const OMPExecutableDirective &D,
llvm::Function *TaskFunction, QualType SharedsTy,
Address Shareds, const OMPTaskDataTy &Data);
/// Emit code that pushes the trip count of loops associated with constructs
/// 'target teams distribute' and 'teams distribute parallel for'.
/// \param SizeEmitter Emits the int64 value for the number of iterations of
/// the associated loop.
void emitTargetNumIterationsCall(
CodeGenFunction &CGF, const OMPExecutableDirective &D,
llvm::Value *DeviceID,
llvm::function_ref<llvm::Value *(CodeGenFunction &CGF,
const OMPLoopDirective &D)>
SizeEmitter);
/// Emit update for lastprivate conditional data.
void emitLastprivateConditionalUpdate(CodeGenFunction &CGF, LValue IVLVal,
StringRef UniqueDeclName, LValue LVal,
SourceLocation Loc);
/// Returns the number of the elements and the address of the depobj
/// dependency array.
/// \return Number of elements in depobj array and the pointer to the array of
/// dependencies.
std::pair<llvm::Value *, LValue> getDepobjElements(CodeGenFunction &CGF,
LValue DepobjLVal,
SourceLocation Loc);
SmallVector<llvm::Value *, 4>
emitDepobjElementsSizes(CodeGenFunction &CGF, QualType &KmpDependInfoTy,
const OMPTaskDataTy::DependData &Data);
void emitDepobjElements(CodeGenFunction &CGF, QualType &KmpDependInfoTy,
LValue PosLVal, const OMPTaskDataTy::DependData &Data,
Address DependenciesArray);
public:
explicit CGOpenMPRuntime(CodeGenModule &CGM)
: CGOpenMPRuntime(CGM, ".", ".") {}
virtual ~CGOpenMPRuntime() {}
virtual void clear();
/// Emits code for OpenMP 'if' clause using specified \a CodeGen
/// function. Here is the logic:
/// if (Cond) {
/// ThenGen();
/// } else {
/// ElseGen();
/// }
void emitIfClause(CodeGenFunction &CGF, const Expr *Cond,
const RegionCodeGenTy &ThenGen,
const RegionCodeGenTy &ElseGen);
/// Checks if the \p Body is the \a CompoundStmt and returns its child
/// statement iff there is only one that is not evaluatable at the compile
/// time.
static const Stmt *getSingleCompoundChild(ASTContext &Ctx, const Stmt *Body);
/// Get the platform-specific name separator.
std::string getName(ArrayRef<StringRef> Parts) const;
/// Emit code for the specified user defined reduction construct.
virtual void emitUserDefinedReduction(CodeGenFunction *CGF,
const OMPDeclareReductionDecl *D);
/// Get combiner/initializer for the specified user-defined reduction, if any.
virtual std::pair<llvm::Function *, llvm::Function *>
getUserDefinedReduction(const OMPDeclareReductionDecl *D);
/// Emit the function for the user defined mapper construct.
void emitUserDefinedMapper(const OMPDeclareMapperDecl *D,
CodeGenFunction *CGF = nullptr);
/// Get the function for the specified user-defined mapper. If it does not
/// exist, create one.
llvm::Function *
getOrCreateUserDefinedMapperFunc(const OMPDeclareMapperDecl *D);
/// Emits outlined function for the specified OpenMP parallel directive
/// \a D. This outlined function has type void(*)(kmp_int32 *ThreadID,
/// kmp_int32 BoundID, struct context_vars*).
/// \param D OpenMP directive.
/// \param ThreadIDVar Variable for thread id in the current OpenMP region.
/// \param InnermostKind Kind of innermost directive (for simple directives it
/// is a directive itself, for combined - its innermost directive).
/// \param CodeGen Code generation sequence for the \a D directive.
virtual llvm::Function *emitParallelOutlinedFunction(
const OMPExecutableDirective &D, const VarDecl *ThreadIDVar,
OpenMPDirectiveKind InnermostKind, const RegionCodeGenTy &CodeGen);
/// Emits outlined function for the specified OpenMP teams directive
/// \a D. This outlined function has type void(*)(kmp_int32 *ThreadID,
/// kmp_int32 BoundID, struct context_vars*).
/// \param D OpenMP directive.
/// \param ThreadIDVar Variable for thread id in the current OpenMP region.
/// \param InnermostKind Kind of innermost directive (for simple directives it
/// is a directive itself, for combined - its innermost directive).
/// \param CodeGen Code generation sequence for the \a D directive.
virtual llvm::Function *emitTeamsOutlinedFunction(
const OMPExecutableDirective &D, const VarDecl *ThreadIDVar,
OpenMPDirectiveKind InnermostKind, const RegionCodeGenTy &CodeGen);
/// Emits outlined function for the OpenMP task directive \a D. This
/// outlined function has type void(*)(kmp_int32 ThreadID, struct task_t*
/// TaskT).
/// \param D OpenMP directive.
/// \param ThreadIDVar Variable for thread id in the current OpenMP region.
/// \param PartIDVar Variable for partition id in the current OpenMP untied
/// task region.
/// \param TaskTVar Variable for task_t argument.
/// \param InnermostKind Kind of innermost directive (for simple directives it
/// is a directive itself, for combined - its innermost directive).
/// \param CodeGen Code generation sequence for the \a D directive.
/// \param Tied true if task is generated for tied task, false otherwise.
/// \param NumberOfParts Number of parts in untied task. Ignored for tied
/// tasks.
///
virtual llvm::Function *emitTaskOutlinedFunction(
const OMPExecutableDirective &D, const VarDecl *ThreadIDVar,
const VarDecl *PartIDVar, const VarDecl *TaskTVar,
OpenMPDirectiveKind InnermostKind, const RegionCodeGenTy &CodeGen,
bool Tied, unsigned &NumberOfParts);
/// Cleans up references to the objects in finished function.
///
virtual void functionFinished(CodeGenFunction &CGF);
/// Emits code for parallel or serial call of the \a OutlinedFn with
/// variables captured in a record which address is stored in \a
/// CapturedStruct.
/// \param OutlinedFn Outlined function to be run in parallel threads. Type of
/// this function is void(*)(kmp_int32 *, kmp_int32, struct context_vars*).
/// \param CapturedVars A pointer to the record with the references to
/// variables used in \a OutlinedFn function.
/// \param IfCond Condition in the associated 'if' clause, if it was
/// specified, nullptr otherwise.
/// \param NumThreads The value corresponding to the num_threads clause, if
/// any, or nullptr.
///
virtual void emitParallelCall(CodeGenFunction &CGF, SourceLocation Loc,
llvm::Function *OutlinedFn,
ArrayRef<llvm::Value *> CapturedVars,
const Expr *IfCond, llvm::Value *NumThreads);
/// Emits a critical region.
/// \param CriticalName Name of the critical region.
/// \param CriticalOpGen Generator for the statement associated with the given
/// critical region.
/// \param Hint Value of the 'hint' clause (optional).
virtual void emitCriticalRegion(CodeGenFunction &CGF, StringRef CriticalName,
const RegionCodeGenTy &CriticalOpGen,
SourceLocation Loc,
const Expr *Hint = nullptr);
/// Emits a master region.
/// \param MasterOpGen Generator for the statement associated with the given
/// master region.
virtual void emitMasterRegion(CodeGenFunction &CGF,
const RegionCodeGenTy &MasterOpGen,
SourceLocation Loc);
/// Emits a masked region.
/// \param MaskedOpGen Generator for the statement associated with the given
/// masked region.
virtual void emitMaskedRegion(CodeGenFunction &CGF,
const RegionCodeGenTy &MaskedOpGen,
SourceLocation Loc,
const Expr *Filter = nullptr);
/// Emits code for a taskyield directive.
virtual void emitTaskyieldCall(CodeGenFunction &CGF, SourceLocation Loc);
/// Emit a taskgroup region.
/// \param TaskgroupOpGen Generator for the statement associated with the
/// given taskgroup region.
virtual void emitTaskgroupRegion(CodeGenFunction &CGF,
const RegionCodeGenTy &TaskgroupOpGen,
SourceLocation Loc);
/// Emits a single region.
/// \param SingleOpGen Generator for the statement associated with the given
/// single region.
virtual void emitSingleRegion(CodeGenFunction &CGF,
const RegionCodeGenTy &SingleOpGen,
SourceLocation Loc,
ArrayRef<const Expr *> CopyprivateVars,
ArrayRef<const Expr *> DestExprs,
ArrayRef<const Expr *> SrcExprs,
ArrayRef<const Expr *> AssignmentOps);
/// Emit an ordered region.
/// \param OrderedOpGen Generator for the statement associated with the given
/// ordered region.
virtual void emitOrderedRegion(CodeGenFunction &CGF,
const RegionCodeGenTy &OrderedOpGen,
SourceLocation Loc, bool IsThreads);
/// Emit an implicit/explicit barrier for OpenMP threads.
/// \param Kind Directive for which this implicit barrier call must be
/// generated. Must be OMPD_barrier for explicit barrier generation.
/// \param EmitChecks true if need to emit checks for cancellation barriers.
/// \param ForceSimpleCall true simple barrier call must be emitted, false if
/// runtime class decides which one to emit (simple or with cancellation
/// checks).
///
virtual void emitBarrierCall(CodeGenFunction &CGF, SourceLocation Loc,
OpenMPDirectiveKind Kind,
bool EmitChecks = true,
bool ForceSimpleCall = false);
/// Check if the specified \a ScheduleKind is static non-chunked.
/// This kind of worksharing directive is emitted without outer loop.
/// \param ScheduleKind Schedule kind specified in the 'schedule' clause.
/// \param Chunked True if chunk is specified in the clause.
///
virtual bool isStaticNonchunked(OpenMPScheduleClauseKind ScheduleKind,
bool Chunked) const;
/// Check if the specified \a ScheduleKind is static non-chunked.
/// This kind of distribute directive is emitted without outer loop.
/// \param ScheduleKind Schedule kind specified in the 'dist_schedule' clause.
/// \param Chunked True if chunk is specified in the clause.
///
virtual bool isStaticNonchunked(OpenMPDistScheduleClauseKind ScheduleKind,
bool Chunked) const;
/// Check if the specified \a ScheduleKind is static chunked.
/// \param ScheduleKind Schedule kind specified in the 'schedule' clause.
/// \param Chunked True if chunk is specified in the clause.
///
virtual bool isStaticChunked(OpenMPScheduleClauseKind ScheduleKind,
bool Chunked) const;
/// Check if the specified \a ScheduleKind is static non-chunked.
/// \param ScheduleKind Schedule kind specified in the 'dist_schedule' clause.
/// \param Chunked True if chunk is specified in the clause.
///
virtual bool isStaticChunked(OpenMPDistScheduleClauseKind ScheduleKind,
bool Chunked) const;
/// Check if the specified \a ScheduleKind is dynamic.
/// This kind of worksharing directive is emitted without outer loop.
/// \param ScheduleKind Schedule Kind specified in the 'schedule' clause.
///
virtual bool isDynamic(OpenMPScheduleClauseKind ScheduleKind) const;
/// struct with the values to be passed to the dispatch runtime function
struct DispatchRTInput {
/// Loop lower bound
llvm::Value *LB = nullptr;
/// Loop upper bound
llvm::Value *UB = nullptr;
/// Chunk size specified using 'schedule' clause (nullptr if chunk
/// was not specified)
llvm::Value *Chunk = nullptr;
DispatchRTInput() = default;
DispatchRTInput(llvm::Value *LB, llvm::Value *UB, llvm::Value *Chunk)
: LB(LB), UB(UB), Chunk(Chunk) {}
};
/// Call the appropriate runtime routine to initialize it before start
/// of loop.
/// This is used for non static scheduled types and when the ordered
/// clause is present on the loop construct.
/// Depending on the loop schedule, it is necessary to call some runtime
/// routine before start of the OpenMP loop to get the loop upper / lower
/// bounds \a LB and \a UB and stride \a ST.
///
/// \param CGF Reference to current CodeGenFunction.
/// \param Loc Clang source location.
/// \param ScheduleKind Schedule kind, specified by the 'schedule' clause.
/// \param IVSize Size of the iteration variable in bits.
/// \param IVSigned Sign of the iteration variable.
/// \param Ordered true if loop is ordered, false otherwise.
/// \param DispatchValues struct containing llvm values for lower bound, upper
/// bound, and chunk expression.
/// For the default (nullptr) value, the chunk 1 will be used.
///
virtual void emitForDispatchInit(CodeGenFunction &CGF, SourceLocation Loc,
const OpenMPScheduleTy &ScheduleKind,
unsigned IVSize, bool IVSigned, bool Ordered,
const DispatchRTInput &DispatchValues);
/// Struct with the values to be passed to the static runtime function
struct StaticRTInput {
/// Size of the iteration variable in bits.
unsigned IVSize = 0;
/// Sign of the iteration variable.
bool IVSigned = false;
/// true if loop is ordered, false otherwise.
bool Ordered = false;
/// Address of the output variable in which the flag of the last iteration
/// is returned.
Address IL = Address::invalid();
/// Address of the output variable in which the lower iteration number is
/// returned.
Address LB = Address::invalid();
/// Address of the output variable in which the upper iteration number is
/// returned.
Address UB = Address::invalid();
/// Address of the output variable in which the stride value is returned
/// necessary to generated the static_chunked scheduled loop.
Address ST = Address::invalid();
/// Value of the chunk for the static_chunked scheduled loop. For the
/// default (nullptr) value, the chunk 1 will be used.
llvm::Value *Chunk = nullptr;
StaticRTInput(unsigned IVSize, bool IVSigned, bool Ordered, Address IL,
Address LB, Address UB, Address ST,
llvm::Value *Chunk = nullptr)
: IVSize(IVSize), IVSigned(IVSigned), Ordered(Ordered), IL(IL), LB(LB),
UB(UB), ST(ST), Chunk(Chunk) {}
};
/// Call the appropriate runtime routine to initialize it before start
/// of loop.
///
/// This is used only in case of static schedule, when the user did not
/// specify a ordered clause on the loop construct.
/// Depending on the loop schedule, it is necessary to call some runtime
/// routine before start of the OpenMP loop to get the loop upper / lower
/// bounds LB and UB and stride ST.
///
/// \param CGF Reference to current CodeGenFunction.
/// \param Loc Clang source location.
/// \param DKind Kind of the directive.
/// \param ScheduleKind Schedule kind, specified by the 'schedule' clause.
/// \param Values Input arguments for the construct.
///
virtual void emitForStaticInit(CodeGenFunction &CGF, SourceLocation Loc,
OpenMPDirectiveKind DKind,
const OpenMPScheduleTy &ScheduleKind,
const StaticRTInput &Values);
///
/// \param CGF Reference to current CodeGenFunction.
/// \param Loc Clang source location.
/// \param SchedKind Schedule kind, specified by the 'dist_schedule' clause.
/// \param Values Input arguments for the construct.
///
virtual void emitDistributeStaticInit(CodeGenFunction &CGF,
SourceLocation Loc,
OpenMPDistScheduleClauseKind SchedKind,
const StaticRTInput &Values);
/// Call the appropriate runtime routine to notify that we finished
/// iteration of the ordered loop with the dynamic scheduling.
///
/// \param CGF Reference to current CodeGenFunction.
/// \param Loc Clang source location.
/// \param IVSize Size of the iteration variable in bits.
/// \param IVSigned Sign of the iteration variable.
///
virtual void emitForOrderedIterationEnd(CodeGenFunction &CGF,
SourceLocation Loc, unsigned IVSize,
bool IVSigned);
/// Call the appropriate runtime routine to notify that we finished
/// all the work with current loop.
///
/// \param CGF Reference to current CodeGenFunction.
/// \param Loc Clang source location.
/// \param DKind Kind of the directive for which the static finish is emitted.
///
virtual void emitForStaticFinish(CodeGenFunction &CGF, SourceLocation Loc,
OpenMPDirectiveKind DKind);
/// Call __kmpc_dispatch_next(
/// ident_t *loc, kmp_int32 tid, kmp_int32 *p_lastiter,
/// kmp_int[32|64] *p_lower, kmp_int[32|64] *p_upper,
/// kmp_int[32|64] *p_stride);
/// \param IVSize Size of the iteration variable in bits.
/// \param IVSigned Sign of the iteration variable.
/// \param IL Address of the output variable in which the flag of the
/// last iteration is returned.
/// \param LB Address of the output variable in which the lower iteration
/// number is returned.
/// \param UB Address of the output variable in which the upper iteration
/// number is returned.
/// \param ST Address of the output variable in which the stride value is
/// returned.
virtual llvm::Value *emitForNext(CodeGenFunction &CGF, SourceLocation Loc,
unsigned IVSize, bool IVSigned,
Address IL, Address LB,
Address UB, Address ST);
/// Emits call to void __kmpc_push_num_threads(ident_t *loc, kmp_int32
/// global_tid, kmp_int32 num_threads) to generate code for 'num_threads'
/// clause.
/// \param NumThreads An integer value of threads.
virtual void emitNumThreadsClause(CodeGenFunction &CGF,
llvm::Value *NumThreads,
SourceLocation Loc);
/// Emit call to void __kmpc_push_proc_bind(ident_t *loc, kmp_int32
/// global_tid, int proc_bind) to generate code for 'proc_bind' clause.
virtual void emitProcBindClause(CodeGenFunction &CGF,
llvm::omp::ProcBindKind ProcBind,
SourceLocation Loc);
/// Returns address of the threadprivate variable for the current
/// thread.
/// \param VD Threadprivate variable.
/// \param VDAddr Address of the global variable \a VD.
/// \param Loc Location of the reference to threadprivate var.
/// \return Address of the threadprivate variable for the current thread.
virtual Address getAddrOfThreadPrivate(CodeGenFunction &CGF,
const VarDecl *VD,
Address VDAddr,
SourceLocation Loc);
/// Returns the address of the variable marked as declare target with link
/// clause OR as declare target with to clause and unified memory.
virtual Address getAddrOfDeclareTargetVar(const VarDecl *VD);
/// Emit a code for initialization of threadprivate variable. It emits
/// a call to runtime library which adds initial value to the newly created
/// threadprivate variable (if it is not constant) and registers destructor
/// for the variable (if any).
/// \param VD Threadprivate variable.
/// \param VDAddr Address of the global variable \a VD.
/// \param Loc Location of threadprivate declaration.
/// \param PerformInit true if initialization expression is not constant.
virtual llvm::Function *
emitThreadPrivateVarDefinition(const VarDecl *VD, Address VDAddr,
SourceLocation Loc, bool PerformInit,
CodeGenFunction *CGF = nullptr);
/// Emit a code for initialization of declare target variable.
/// \param VD Declare target variable.
/// \param Addr Address of the global variable \a VD.
/// \param PerformInit true if initialization expression is not constant.
virtual bool emitDeclareTargetVarDefinition(const VarDecl *VD,
llvm::GlobalVariable *Addr,
bool PerformInit);
/// Creates artificial threadprivate variable with name \p Name and type \p
/// VarType.
/// \param VarType Type of the artificial threadprivate variable.
/// \param Name Name of the artificial threadprivate variable.
virtual Address getAddrOfArtificialThreadPrivate(CodeGenFunction &CGF,
QualType VarType,
StringRef Name);
/// Emit flush of the variables specified in 'omp flush' directive.
/// \param Vars List of variables to flush.
virtual void emitFlush(CodeGenFunction &CGF, ArrayRef<const Expr *> Vars,
SourceLocation Loc, llvm::AtomicOrdering AO);
/// Emit task region for the task directive. The task region is
/// emitted in several steps:
/// 1. Emit a call to kmp_task_t *__kmpc_omp_task_alloc(ident_t *, kmp_int32
/// gtid, kmp_int32 flags, size_t sizeof_kmp_task_t, size_t sizeof_shareds,
/// kmp_routine_entry_t *task_entry). Here task_entry is a pointer to the
/// function:
/// kmp_int32 .omp_task_entry.(kmp_int32 gtid, kmp_task_t *tt) {
/// TaskFunction(gtid, tt->part_id, tt->shareds);
/// return 0;
/// }
/// 2. Copy a list of shared variables to field shareds of the resulting
/// structure kmp_task_t returned by the previous call (if any).
/// 3. Copy a pointer to destructions function to field destructions of the
/// resulting structure kmp_task_t.
/// 4. Emit a call to kmp_int32 __kmpc_omp_task(ident_t *, kmp_int32 gtid,
/// kmp_task_t *new_task), where new_task is a resulting structure from
/// previous items.
/// \param D Current task directive.
/// \param TaskFunction An LLVM function with type void (*)(i32 /*gtid*/, i32
/// /*part_id*/, captured_struct */*__context*/);
/// \param SharedsTy A type which contains references the shared variables.
/// \param Shareds Context with the list of shared variables from the \p
/// TaskFunction.
/// \param IfCond Not a nullptr if 'if' clause was specified, nullptr
/// otherwise.
/// \param Data Additional data for task generation like tiednsee, final
/// state, list of privates etc.
virtual void emitTaskCall(CodeGenFunction &CGF, SourceLocation Loc,
const OMPExecutableDirective &D,
llvm::Function *TaskFunction, QualType SharedsTy,
Address Shareds, const Expr *IfCond,
const OMPTaskDataTy &Data);
/// Emit task region for the taskloop directive. The taskloop region is
/// emitted in several steps:
/// 1. Emit a call to kmp_task_t *__kmpc_omp_task_alloc(ident_t *, kmp_int32
/// gtid, kmp_int32 flags, size_t sizeof_kmp_task_t, size_t sizeof_shareds,
/// kmp_routine_entry_t *task_entry). Here task_entry is a pointer to the
/// function:
/// kmp_int32 .omp_task_entry.(kmp_int32 gtid, kmp_task_t *tt) {
/// TaskFunction(gtid, tt->part_id, tt->shareds);
/// return 0;
/// }
/// 2. Copy a list of shared variables to field shareds of the resulting
/// structure kmp_task_t returned by the previous call (if any).
/// 3. Copy a pointer to destructions function to field destructions of the
/// resulting structure kmp_task_t.
/// 4. Emit a call to void __kmpc_taskloop(ident_t *loc, int gtid, kmp_task_t
/// *task, int if_val, kmp_uint64 *lb, kmp_uint64 *ub, kmp_int64 st, int
/// nogroup, int sched, kmp_uint64 grainsize, void *task_dup ), where new_task
/// is a resulting structure from
/// previous items.
/// \param D Current task directive.
/// \param TaskFunction An LLVM function with type void (*)(i32 /*gtid*/, i32
/// /*part_id*/, captured_struct */*__context*/);
/// \param SharedsTy A type which contains references the shared variables.
/// \param Shareds Context with the list of shared variables from the \p
/// TaskFunction.
/// \param IfCond Not a nullptr if 'if' clause was specified, nullptr
/// otherwise.
/// \param Data Additional data for task generation like tiednsee, final
/// state, list of privates etc.
virtual void emitTaskLoopCall(CodeGenFunction &CGF, SourceLocation Loc,
const OMPLoopDirective &D,
llvm::Function *TaskFunction,
QualType SharedsTy, Address Shareds,
const Expr *IfCond, const OMPTaskDataTy &Data);
/// Emit code for the directive that does not require outlining.
///
/// \param InnermostKind Kind of innermost directive (for simple directives it
/// is a directive itself, for combined - its innermost directive).
/// \param CodeGen Code generation sequence for the \a D directive.
/// \param HasCancel true if region has inner cancel directive, false
/// otherwise.
virtual void emitInlinedDirective(CodeGenFunction &CGF,
OpenMPDirectiveKind InnermostKind,
const RegionCodeGenTy &CodeGen,
bool HasCancel = false);
/// Emits reduction function.
/// \param ArgsElemType Array type containing pointers to reduction variables.
/// \param Privates List of private copies for original reduction arguments.
/// \param LHSExprs List of LHS in \a ReductionOps reduction operations.
/// \param RHSExprs List of RHS in \a ReductionOps reduction operations.
/// \param ReductionOps List of reduction operations in form 'LHS binop RHS'
/// or 'operator binop(LHS, RHS)'.
llvm::Function *emitReductionFunction(SourceLocation Loc,
llvm::Type *ArgsElemType,
ArrayRef<const Expr *> Privates,
ArrayRef<const Expr *> LHSExprs,
ArrayRef<const Expr *> RHSExprs,
ArrayRef<const Expr *> ReductionOps);
/// Emits single reduction combiner
void emitSingleReductionCombiner(CodeGenFunction &CGF,
const Expr *ReductionOp,
const Expr *PrivateRef,
const DeclRefExpr *LHS,
const DeclRefExpr *RHS);
struct ReductionOptionsTy {
bool WithNowait;
bool SimpleReduction;
OpenMPDirectiveKind ReductionKind;
};
/// Emit a code for reduction clause. Next code should be emitted for
/// reduction:
/// \code
///
/// static kmp_critical_name lock = { 0 };
///
/// void reduce_func(void *lhs[<n>], void *rhs[<n>]) {
/// ...
/// *(Type<i>*)lhs[i] = RedOp<i>(*(Type<i>*)lhs[i], *(Type<i>*)rhs[i]);
/// ...
/// }
///
/// ...
/// void *RedList[<n>] = {&<RHSExprs>[0], ..., &<RHSExprs>[<n>-1]};
/// switch (__kmpc_reduce{_nowait}(<loc>, <gtid>, <n>, sizeof(RedList),
/// RedList, reduce_func, &<lock>)) {
/// case 1:
/// ...
/// <LHSExprs>[i] = RedOp<i>(*<LHSExprs>[i], *<RHSExprs>[i]);
/// ...
/// __kmpc_end_reduce{_nowait}(<loc>, <gtid>, &<lock>);
/// break;
/// case 2:
/// ...
/// Atomic(<LHSExprs>[i] = RedOp<i>(*<LHSExprs>[i], *<RHSExprs>[i]));
/// ...
/// break;
/// default:;
/// }
/// \endcode
///
/// \param Privates List of private copies for original reduction arguments.
/// \param LHSExprs List of LHS in \a ReductionOps reduction operations.
/// \param RHSExprs List of RHS in \a ReductionOps reduction operations.
/// \param ReductionOps List of reduction operations in form 'LHS binop RHS'
/// or 'operator binop(LHS, RHS)'.
/// \param Options List of options for reduction codegen:
/// WithNowait true if parent directive has also nowait clause, false
/// otherwise.
/// SimpleReduction Emit reduction operation only. Used for omp simd
/// directive on the host.
/// ReductionKind The kind of reduction to perform.
virtual void emitReduction(CodeGenFunction &CGF, SourceLocation Loc,
ArrayRef<const Expr *> Privates,
ArrayRef<const Expr *> LHSExprs,
ArrayRef<const Expr *> RHSExprs,
ArrayRef<const Expr *> ReductionOps,
ReductionOptionsTy Options);
/// Emit a code for initialization of task reduction clause. Next code
/// should be emitted for reduction:
/// \code
///
/// _taskred_item_t red_data[n];
/// ...
/// red_data[i].shar = &shareds[i];
/// red_data[i].orig = &origs[i];
/// red_data[i].size = sizeof(origs[i]);
/// red_data[i].f_init = (void*)RedInit<i>;
/// red_data[i].f_fini = (void*)RedDest<i>;
/// red_data[i].f_comb = (void*)RedOp<i>;
/// red_data[i].flags = <Flag_i>;
/// ...
/// void* tg1 = __kmpc_taskred_init(gtid, n, red_data);
/// \endcode
/// For reduction clause with task modifier it emits the next call:
/// \code
///
/// _taskred_item_t red_data[n];
/// ...
/// red_data[i].shar = &shareds[i];
/// red_data[i].orig = &origs[i];
/// red_data[i].size = sizeof(origs[i]);
/// red_data[i].f_init = (void*)RedInit<i>;
/// red_data[i].f_fini = (void*)RedDest<i>;
/// red_data[i].f_comb = (void*)RedOp<i>;
/// red_data[i].flags = <Flag_i>;
/// ...
/// void* tg1 = __kmpc_taskred_modifier_init(loc, gtid, is_worksharing, n,
/// red_data);
/// \endcode
/// \param LHSExprs List of LHS in \a Data.ReductionOps reduction operations.
/// \param RHSExprs List of RHS in \a Data.ReductionOps reduction operations.
/// \param Data Additional data for task generation like tiedness, final
/// state, list of privates, reductions etc.
virtual llvm::Value *emitTaskReductionInit(CodeGenFunction &CGF,
SourceLocation Loc,
ArrayRef<const Expr *> LHSExprs,
ArrayRef<const Expr *> RHSExprs,
const OMPTaskDataTy &Data);
/// Emits the following code for reduction clause with task modifier:
/// \code
/// __kmpc_task_reduction_modifier_fini(loc, gtid, is_worksharing);
/// \endcode
virtual void emitTaskReductionFini(CodeGenFunction &CGF, SourceLocation Loc,
bool IsWorksharingReduction);
/// Required to resolve existing problems in the runtime. Emits threadprivate
/// variables to store the size of the VLAs/array sections for
/// initializer/combiner/finalizer functions.
/// \param RCG Allows to reuse an existing data for the reductions.
/// \param N Reduction item for which fixups must be emitted.
virtual void emitTaskReductionFixups(CodeGenFunction &CGF, SourceLocation Loc,
ReductionCodeGen &RCG, unsigned N);
/// Get the address of `void *` type of the privatue copy of the reduction
/// item specified by the \p SharedLVal.
/// \param ReductionsPtr Pointer to the reduction data returned by the
/// emitTaskReductionInit function.
/// \param SharedLVal Address of the original reduction item.
virtual Address getTaskReductionItem(CodeGenFunction &CGF, SourceLocation Loc,
llvm::Value *ReductionsPtr,
LValue SharedLVal);
/// Emit code for 'taskwait' directive.
virtual void emitTaskwaitCall(CodeGenFunction &CGF, SourceLocation Loc,
const OMPTaskDataTy &Data);
/// Emit code for 'cancellation point' construct.
/// \param CancelRegion Region kind for which the cancellation point must be
/// emitted.
///
virtual void emitCancellationPointCall(CodeGenFunction &CGF,
SourceLocation Loc,
OpenMPDirectiveKind CancelRegion);
/// Emit code for 'cancel' construct.
/// \param IfCond Condition in the associated 'if' clause, if it was
/// specified, nullptr otherwise.
/// \param CancelRegion Region kind for which the cancel must be emitted.
///
virtual void emitCancelCall(CodeGenFunction &CGF, SourceLocation Loc,
const Expr *IfCond,
OpenMPDirectiveKind CancelRegion);
/// Emit outilined function for 'target' directive.
/// \param D Directive to emit.
/// \param ParentName Name of the function that encloses the target region.
/// \param OutlinedFn Outlined function value to be defined by this call.
/// \param OutlinedFnID Outlined function ID value to be defined by this call.
/// \param IsOffloadEntry True if the outlined function is an offload entry.
/// \param CodeGen Code generation sequence for the \a D directive.
/// An outlined function may not be an entry if, e.g. the if clause always
/// evaluates to false.
virtual void emitTargetOutlinedFunction(const OMPExecutableDirective &D,
StringRef ParentName,
llvm::Function *&OutlinedFn,
llvm::Constant *&OutlinedFnID,
bool IsOffloadEntry,
const RegionCodeGenTy &CodeGen);
/// Emit the target offloading code associated with \a D. The emitted
/// code attempts offloading the execution to the device, an the event of
/// a failure it executes the host version outlined in \a OutlinedFn.
/// \param D Directive to emit.
/// \param OutlinedFn Host version of the code to be offloaded.
/// \param OutlinedFnID ID of host version of the code to be offloaded.
/// \param IfCond Expression evaluated in if clause associated with the target
/// directive, or null if no if clause is used.
/// \param Device Expression evaluated in device clause associated with the
/// target directive, or null if no device clause is used and device modifier.
/// \param SizeEmitter Callback to emit number of iterations for loop-based
/// directives.
virtual void emitTargetCall(
CodeGenFunction &CGF, const OMPExecutableDirective &D,
llvm::Function *OutlinedFn, llvm::Value *OutlinedFnID, const Expr *IfCond,
llvm::PointerIntPair<const Expr *, 2, OpenMPDeviceClauseModifier> Device,
llvm::function_ref<llvm::Value *(CodeGenFunction &CGF,
const OMPLoopDirective &D)>
SizeEmitter);
/// Emit the target regions enclosed in \a GD function definition or
/// the function itself in case it is a valid device function. Returns true if
/// \a GD was dealt with successfully.
/// \param GD Function to scan.
virtual bool emitTargetFunctions(GlobalDecl GD);
/// Emit the global variable if it is a valid device global variable.
/// Returns true if \a GD was dealt with successfully.
/// \param GD Variable declaration to emit.
virtual bool emitTargetGlobalVariable(GlobalDecl GD);
/// Checks if the provided global decl \a GD is a declare target variable and
/// registers it when emitting code for the host.
virtual void registerTargetGlobalVariable(const VarDecl *VD,
llvm::Constant *Addr);
/// Emit the global \a GD if it is meaningful for the target. Returns
/// if it was emitted successfully.
/// \param GD Global to scan.
virtual bool emitTargetGlobal(GlobalDecl GD);
/// Creates and returns a registration function for when at least one
/// requires directives was used in the current module.
llvm::Function *emitRequiresDirectiveRegFun();
/// Creates all the offload entries in the current compilation unit
/// along with the associated metadata.
void createOffloadEntriesAndInfoMetadata();
/// Emits code for teams call of the \a OutlinedFn with
/// variables captured in a record which address is stored in \a
/// CapturedStruct.
/// \param OutlinedFn Outlined function to be run by team masters. Type of
/// this function is void(*)(kmp_int32 *, kmp_int32, struct context_vars*).
/// \param CapturedVars A pointer to the record with the references to
/// variables used in \a OutlinedFn function.
///
virtual void emitTeamsCall(CodeGenFunction &CGF,
const OMPExecutableDirective &D,
SourceLocation Loc, llvm::Function *OutlinedFn,
ArrayRef<llvm::Value *> CapturedVars);
/// Emits call to void __kmpc_push_num_teams(ident_t *loc, kmp_int32
/// global_tid, kmp_int32 num_teams, kmp_int32 thread_limit) to generate code
/// for num_teams clause.
/// \param NumTeams An integer expression of teams.
/// \param ThreadLimit An integer expression of threads.
virtual void emitNumTeamsClause(CodeGenFunction &CGF, const Expr *NumTeams,
const Expr *ThreadLimit, SourceLocation Loc);
/// Struct that keeps all the relevant information that should be kept
/// throughout a 'target data' region.
class TargetDataInfo {
/// Set to true if device pointer information have to be obtained.
bool RequiresDevicePointerInfo = false;
/// Set to true if Clang emits separate runtime calls for the beginning and
/// end of the region. These calls might have separate map type arrays.
bool SeparateBeginEndCalls = false;
public:
/// The array of base pointer passed to the runtime library.
llvm::Value *BasePointersArray = nullptr;
/// The array of section pointers passed to the runtime library.
llvm::Value *PointersArray = nullptr;
/// The array of sizes passed to the runtime library.
llvm::Value *SizesArray = nullptr;
/// The array of map types passed to the runtime library for the beginning
/// of the region or for the entire region if there are no separate map
/// types for the region end.
llvm::Value *MapTypesArray = nullptr;
/// The array of map types passed to the runtime library for the end of the
/// region, or nullptr if there are no separate map types for the region
/// end.
llvm::Value *MapTypesArrayEnd = nullptr;
/// The array of user-defined mappers passed to the runtime library.
llvm::Value *MappersArray = nullptr;
/// The array of original declaration names of mapped pointers sent to the
/// runtime library for debugging
llvm::Value *MapNamesArray = nullptr;
/// Indicate whether any user-defined mapper exists.
bool HasMapper = false;
/// The total number of pointers passed to the runtime library.
unsigned NumberOfPtrs = 0u;
/// Map between the a declaration of a capture and the corresponding base
/// pointer address where the runtime returns the device pointers.
llvm::DenseMap<const ValueDecl *, Address> CaptureDeviceAddrMap;
explicit TargetDataInfo() {}
explicit TargetDataInfo(bool RequiresDevicePointerInfo,
bool SeparateBeginEndCalls)
: RequiresDevicePointerInfo(RequiresDevicePointerInfo),
SeparateBeginEndCalls(SeparateBeginEndCalls) {}
/// Clear information about the data arrays.
void clearArrayInfo() {
BasePointersArray = nullptr;
PointersArray = nullptr;
SizesArray = nullptr;
MapTypesArray = nullptr;
MapTypesArrayEnd = nullptr;
MapNamesArray = nullptr;
MappersArray = nullptr;
HasMapper = false;
NumberOfPtrs = 0u;
}
/// Return true if the current target data information has valid arrays.
bool isValid() {
return BasePointersArray && PointersArray && SizesArray &&
MapTypesArray && (!HasMapper || MappersArray) && NumberOfPtrs;
}
bool requiresDevicePointerInfo() { return RequiresDevicePointerInfo; }
bool separateBeginEndCalls() { return SeparateBeginEndCalls; }
};
/// Emit the target data mapping code associated with \a D.
/// \param D Directive to emit.
/// \param IfCond Expression evaluated in if clause associated with the
/// target directive, or null if no device clause is used.
/// \param Device Expression evaluated in device clause associated with the
/// target directive, or null if no device clause is used.
/// \param Info A record used to store information that needs to be preserved
/// until the region is closed.
virtual void emitTargetDataCalls(CodeGenFunction &CGF,
const OMPExecutableDirective &D,
const Expr *IfCond, const Expr *Device,
const RegionCodeGenTy &CodeGen,
TargetDataInfo &Info);
/// Emit the data mapping/movement code associated with the directive
/// \a D that should be of the form 'target [{enter|exit} data | update]'.
/// \param D Directive to emit.
/// \param IfCond Expression evaluated in if clause associated with the target
/// directive, or null if no if clause is used.
/// \param Device Expression evaluated in device clause associated with the
/// target directive, or null if no device clause is used.
virtual void emitTargetDataStandAloneCall(CodeGenFunction &CGF,
const OMPExecutableDirective &D,
const Expr *IfCond,
const Expr *Device);
/// Marks function \a Fn with properly mangled versions of vector functions.
/// \param FD Function marked as 'declare simd'.
/// \param Fn LLVM function that must be marked with 'declare simd'
/// attributes.
virtual void emitDeclareSimdFunction(const FunctionDecl *FD,
llvm::Function *Fn);
/// Emit initialization for doacross loop nesting support.
/// \param D Loop-based construct used in doacross nesting construct.
virtual void emitDoacrossInit(CodeGenFunction &CGF, const OMPLoopDirective &D,
ArrayRef<Expr *> NumIterations);
/// Emit code for doacross ordered directive with 'depend' clause.
/// \param C 'depend' clause with 'sink|source' dependency kind.
virtual void emitDoacrossOrdered(CodeGenFunction &CGF,
const OMPDependClause *C);
/// Translates the native parameter of outlined function if this is required
/// for target.
/// \param FD Field decl from captured record for the parameter.
/// \param NativeParam Parameter itself.
virtual const VarDecl *translateParameter(const FieldDecl *FD,
const VarDecl *NativeParam) const {
return NativeParam;
}
/// Gets the address of the native argument basing on the address of the
/// target-specific parameter.
/// \param NativeParam Parameter itself.
/// \param TargetParam Corresponding target-specific parameter.
virtual Address getParameterAddress(CodeGenFunction &CGF,
const VarDecl *NativeParam,
const VarDecl *TargetParam) const;
/// Choose default schedule type and chunk value for the
/// dist_schedule clause.
virtual void getDefaultDistScheduleAndChunk(CodeGenFunction &CGF,
const OMPLoopDirective &S, OpenMPDistScheduleClauseKind &ScheduleKind,
llvm::Value *&Chunk) const {}
/// Choose default schedule type and chunk value for the
/// schedule clause.
virtual void getDefaultScheduleAndChunk(CodeGenFunction &CGF,
const OMPLoopDirective &S, OpenMPScheduleClauseKind &ScheduleKind,
const Expr *&ChunkExpr) const;
/// Emits call of the outlined function with the provided arguments,
/// translating these arguments to correct target-specific arguments.
virtual void
emitOutlinedFunctionCall(CodeGenFunction &CGF, SourceLocation Loc,
llvm::FunctionCallee OutlinedFn,
ArrayRef<llvm::Value *> Args = llvm::None) const;
/// Emits OpenMP-specific function prolog.
/// Required for device constructs.
virtual void emitFunctionProlog(CodeGenFunction &CGF, const Decl *D);
/// Gets the OpenMP-specific address of the local variable.
virtual Address getAddressOfLocalVariable(CodeGenFunction &CGF,
const VarDecl *VD);
/// Marks the declaration as already emitted for the device code and returns
/// true, if it was marked already, and false, otherwise.
bool markAsGlobalTarget(GlobalDecl GD);
/// Emit deferred declare target variables marked for deferred emission.
void emitDeferredTargetDecls() const;
/// Adjust some parameters for the target-based directives, like addresses of
/// the variables captured by reference in lambdas.
virtual void
adjustTargetSpecificDataForLambdas(CodeGenFunction &CGF,
const OMPExecutableDirective &D) const;
/// Perform check on requires decl to ensure that target architecture
/// supports unified addressing
virtual void processRequiresDirective(const OMPRequiresDecl *D);
/// Gets default memory ordering as specified in requires directive.
llvm::AtomicOrdering getDefaultMemoryOrdering() const;
/// Checks if the variable has associated OMPAllocateDeclAttr attribute with
/// the predefined allocator and translates it into the corresponding address
/// space.
virtual bool hasAllocateAttributeForGlobalVar(const VarDecl *VD, LangAS &AS);
/// Return whether the unified_shared_memory has been specified.
bool hasRequiresUnifiedSharedMemory() const;
/// Checks if the \p VD variable is marked as nontemporal declaration in
/// current context.
bool isNontemporalDecl(const ValueDecl *VD) const;
/// Create specialized alloca to handle lastprivate conditionals.
Address emitLastprivateConditionalInit(CodeGenFunction &CGF,
const VarDecl *VD);
/// Checks if the provided \p LVal is lastprivate conditional and emits the
/// code to update the value of the original variable.
/// \code
/// lastprivate(conditional: a)
/// ...
/// <type> a;
/// lp_a = ...;
/// #pragma omp critical(a)
/// if (last_iv_a <= iv) {
/// last_iv_a = iv;
/// global_a = lp_a;
/// }
/// \endcode
virtual void checkAndEmitLastprivateConditional(CodeGenFunction &CGF,
const Expr *LHS);
/// Checks if the lastprivate conditional was updated in inner region and
/// writes the value.
/// \code
/// lastprivate(conditional: a)
/// ...
/// <type> a;bool Fired = false;
/// #pragma omp ... shared(a)
/// {
/// lp_a = ...;
/// Fired = true;
/// }
/// if (Fired) {
/// #pragma omp critical(a)
/// if (last_iv_a <= iv) {
/// last_iv_a = iv;
/// global_a = lp_a;
/// }
/// Fired = false;
/// }
/// \endcode
virtual void checkAndEmitSharedLastprivateConditional(
CodeGenFunction &CGF, const OMPExecutableDirective &D,
const llvm::DenseSet<CanonicalDeclPtr<const VarDecl>> &IgnoredDecls);
/// Gets the address of the global copy used for lastprivate conditional
/// update, if any.
/// \param PrivLVal LValue for the private copy.
/// \param VD Original lastprivate declaration.
virtual void emitLastprivateConditionalFinalUpdate(CodeGenFunction &CGF,
LValue PrivLVal,
const VarDecl *VD,
SourceLocation Loc);
/// Emits list of dependecies based on the provided data (array of
/// dependence/expression pairs).
/// \returns Pointer to the first element of the array casted to VoidPtr type.
std::pair<llvm::Value *, Address>
emitDependClause(CodeGenFunction &CGF,
ArrayRef<OMPTaskDataTy::DependData> Dependencies,
SourceLocation Loc);
/// Emits list of dependecies based on the provided data (array of
/// dependence/expression pairs) for depobj construct. In this case, the
/// variable is allocated in dynamically. \returns Pointer to the first
/// element of the array casted to VoidPtr type.
Address emitDepobjDependClause(CodeGenFunction &CGF,
const OMPTaskDataTy::DependData &Dependencies,
SourceLocation Loc);
/// Emits the code to destroy the dependency object provided in depobj
/// directive.
void emitDestroyClause(CodeGenFunction &CGF, LValue DepobjLVal,
SourceLocation Loc);
/// Updates the dependency kind in the specified depobj object.
/// \param DepobjLVal LValue for the main depobj object.
/// \param NewDepKind New dependency kind.
void emitUpdateClause(CodeGenFunction &CGF, LValue DepobjLVal,
OpenMPDependClauseKind NewDepKind, SourceLocation Loc);
/// Initializes user defined allocators specified in the uses_allocators
/// clauses.
void emitUsesAllocatorsInit(CodeGenFunction &CGF, const Expr *Allocator,
const Expr *AllocatorTraits);
/// Destroys user defined allocators specified in the uses_allocators clause.
void emitUsesAllocatorsFini(CodeGenFunction &CGF, const Expr *Allocator);
/// Returns true if the variable is a local variable in untied task.
bool isLocalVarInUntiedTask(CodeGenFunction &CGF, const VarDecl *VD) const;
};
/// Class supports emissionof SIMD-only code.
class CGOpenMPSIMDRuntime final : public CGOpenMPRuntime {
public:
explicit CGOpenMPSIMDRuntime(CodeGenModule &CGM) : CGOpenMPRuntime(CGM) {}
~CGOpenMPSIMDRuntime() override {}
/// Emits outlined function for the specified OpenMP parallel directive
/// \a D. This outlined function has type void(*)(kmp_int32 *ThreadID,
/// kmp_int32 BoundID, struct context_vars*).
/// \param D OpenMP directive.
/// \param ThreadIDVar Variable for thread id in the current OpenMP region.
/// \param InnermostKind Kind of innermost directive (for simple directives it
/// is a directive itself, for combined - its innermost directive).
/// \param CodeGen Code generation sequence for the \a D directive.
llvm::Function *
emitParallelOutlinedFunction(const OMPExecutableDirective &D,
const VarDecl *ThreadIDVar,
OpenMPDirectiveKind InnermostKind,
const RegionCodeGenTy &CodeGen) override;
/// Emits outlined function for the specified OpenMP teams directive
/// \a D. This outlined function has type void(*)(kmp_int32 *ThreadID,
/// kmp_int32 BoundID, struct context_vars*).
/// \param D OpenMP directive.
/// \param ThreadIDVar Variable for thread id in the current OpenMP region.
/// \param InnermostKind Kind of innermost directive (for simple directives it
/// is a directive itself, for combined - its innermost directive).
/// \param CodeGen Code generation sequence for the \a D directive.
llvm::Function *
emitTeamsOutlinedFunction(const OMPExecutableDirective &D,
const VarDecl *ThreadIDVar,
OpenMPDirectiveKind InnermostKind,
const RegionCodeGenTy &CodeGen) override;
/// Emits outlined function for the OpenMP task directive \a D. This
/// outlined function has type void(*)(kmp_int32 ThreadID, struct task_t*
/// TaskT).
/// \param D OpenMP directive.
/// \param ThreadIDVar Variable for thread id in the current OpenMP region.
/// \param PartIDVar Variable for partition id in the current OpenMP untied
/// task region.
/// \param TaskTVar Variable for task_t argument.
/// \param InnermostKind Kind of innermost directive (for simple directives it
/// is a directive itself, for combined - its innermost directive).
/// \param CodeGen Code generation sequence for the \a D directive.
/// \param Tied true if task is generated for tied task, false otherwise.
/// \param NumberOfParts Number of parts in untied task. Ignored for tied
/// tasks.
///
llvm::Function *emitTaskOutlinedFunction(
const OMPExecutableDirective &D, const VarDecl *ThreadIDVar,
const VarDecl *PartIDVar, const VarDecl *TaskTVar,
OpenMPDirectiveKind InnermostKind, const RegionCodeGenTy &CodeGen,
bool Tied, unsigned &NumberOfParts) override;
/// Emits code for parallel or serial call of the \a OutlinedFn with
/// variables captured in a record which address is stored in \a
/// CapturedStruct.
/// \param OutlinedFn Outlined function to be run in parallel threads. Type of
/// this function is void(*)(kmp_int32 *, kmp_int32, struct context_vars*).
/// \param CapturedVars A pointer to the record with the references to
/// variables used in \a OutlinedFn function.
/// \param IfCond Condition in the associated 'if' clause, if it was
/// specified, nullptr otherwise.
/// \param NumThreads The value corresponding to the num_threads clause, if
/// any, or nullptr.
///
void emitParallelCall(CodeGenFunction &CGF, SourceLocation Loc,
llvm::Function *OutlinedFn,
ArrayRef<llvm::Value *> CapturedVars,
const Expr *IfCond, llvm::Value *NumThreads) override;
/// Emits a critical region.
/// \param CriticalName Name of the critical region.
/// \param CriticalOpGen Generator for the statement associated with the given
/// critical region.
/// \param Hint Value of the 'hint' clause (optional).
void emitCriticalRegion(CodeGenFunction &CGF, StringRef CriticalName,
const RegionCodeGenTy &CriticalOpGen,
SourceLocation Loc,
const Expr *Hint = nullptr) override;
/// Emits a master region.
/// \param MasterOpGen Generator for the statement associated with the given
/// master region.
void emitMasterRegion(CodeGenFunction &CGF,
const RegionCodeGenTy &MasterOpGen,
SourceLocation Loc) override;
/// Emits a masked region.
/// \param MaskedOpGen Generator for the statement associated with the given
/// masked region.
void emitMaskedRegion(CodeGenFunction &CGF,
const RegionCodeGenTy &MaskedOpGen, SourceLocation Loc,
const Expr *Filter = nullptr) override;
/// Emits a masked region.
/// \param MaskedOpGen Generator for the statement associated with the given
/// masked region.
/// Emits code for a taskyield directive.
void emitTaskyieldCall(CodeGenFunction &CGF, SourceLocation Loc) override;
/// Emit a taskgroup region.
/// \param TaskgroupOpGen Generator for the statement associated with the
/// given taskgroup region.
void emitTaskgroupRegion(CodeGenFunction &CGF,
const RegionCodeGenTy &TaskgroupOpGen,
SourceLocation Loc) override;
/// Emits a single region.
/// \param SingleOpGen Generator for the statement associated with the given
/// single region.
void emitSingleRegion(CodeGenFunction &CGF,
const RegionCodeGenTy &SingleOpGen, SourceLocation Loc,
ArrayRef<const Expr *> CopyprivateVars,
ArrayRef<const Expr *> DestExprs,
ArrayRef<const Expr *> SrcExprs,
ArrayRef<const Expr *> AssignmentOps) override;
/// Emit an ordered region.
/// \param OrderedOpGen Generator for the statement associated with the given
/// ordered region.
void emitOrderedRegion(CodeGenFunction &CGF,
const RegionCodeGenTy &OrderedOpGen,
SourceLocation Loc, bool IsThreads) override;
/// Emit an implicit/explicit barrier for OpenMP threads.
/// \param Kind Directive for which this implicit barrier call must be
/// generated. Must be OMPD_barrier for explicit barrier generation.
/// \param EmitChecks true if need to emit checks for cancellation barriers.
/// \param ForceSimpleCall true simple barrier call must be emitted, false if
/// runtime class decides which one to emit (simple or with cancellation
/// checks).
///
void emitBarrierCall(CodeGenFunction &CGF, SourceLocation Loc,
OpenMPDirectiveKind Kind, bool EmitChecks = true,
bool ForceSimpleCall = false) override;
/// This is used for non static scheduled types and when the ordered
/// clause is present on the loop construct.
/// Depending on the loop schedule, it is necessary to call some runtime
/// routine before start of the OpenMP loop to get the loop upper / lower
/// bounds \a LB and \a UB and stride \a ST.
///
/// \param CGF Reference to current CodeGenFunction.
/// \param Loc Clang source location.
/// \param ScheduleKind Schedule kind, specified by the 'schedule' clause.
/// \param IVSize Size of the iteration variable in bits.
/// \param IVSigned Sign of the iteration variable.
/// \param Ordered true if loop is ordered, false otherwise.
/// \param DispatchValues struct containing llvm values for lower bound, upper
/// bound, and chunk expression.
/// For the default (nullptr) value, the chunk 1 will be used.
///
void emitForDispatchInit(CodeGenFunction &CGF, SourceLocation Loc,
const OpenMPScheduleTy &ScheduleKind,
unsigned IVSize, bool IVSigned, bool Ordered,
const DispatchRTInput &DispatchValues) override;
/// Call the appropriate runtime routine to initialize it before start
/// of loop.
///
/// This is used only in case of static schedule, when the user did not
/// specify a ordered clause on the loop construct.
/// Depending on the loop schedule, it is necessary to call some runtime
/// routine before start of the OpenMP loop to get the loop upper / lower
/// bounds LB and UB and stride ST.
///
/// \param CGF Reference to current CodeGenFunction.
/// \param Loc Clang source location.
/// \param DKind Kind of the directive.
/// \param ScheduleKind Schedule kind, specified by the 'schedule' clause.
/// \param Values Input arguments for the construct.
///
void emitForStaticInit(CodeGenFunction &CGF, SourceLocation Loc,
OpenMPDirectiveKind DKind,
const OpenMPScheduleTy &ScheduleKind,
const StaticRTInput &Values) override;
///
/// \param CGF Reference to current CodeGenFunction.
/// \param Loc Clang source location.
/// \param SchedKind Schedule kind, specified by the 'dist_schedule' clause.
/// \param Values Input arguments for the construct.
///
void emitDistributeStaticInit(CodeGenFunction &CGF, SourceLocation Loc,
OpenMPDistScheduleClauseKind SchedKind,
const StaticRTInput &Values) override;
/// Call the appropriate runtime routine to notify that we finished
/// iteration of the ordered loop with the dynamic scheduling.
///
/// \param CGF Reference to current CodeGenFunction.
/// \param Loc Clang source location.
/// \param IVSize Size of the iteration variable in bits.
/// \param IVSigned Sign of the iteration variable.
///
void emitForOrderedIterationEnd(CodeGenFunction &CGF, SourceLocation Loc,
unsigned IVSize, bool IVSigned) override;
/// Call the appropriate runtime routine to notify that we finished
/// all the work with current loop.
///
/// \param CGF Reference to current CodeGenFunction.
/// \param Loc Clang source location.
/// \param DKind Kind of the directive for which the static finish is emitted.
///
void emitForStaticFinish(CodeGenFunction &CGF, SourceLocation Loc,
OpenMPDirectiveKind DKind) override;
/// Call __kmpc_dispatch_next(
/// ident_t *loc, kmp_int32 tid, kmp_int32 *p_lastiter,
/// kmp_int[32|64] *p_lower, kmp_int[32|64] *p_upper,
/// kmp_int[32|64] *p_stride);
/// \param IVSize Size of the iteration variable in bits.
/// \param IVSigned Sign of the iteration variable.
/// \param IL Address of the output variable in which the flag of the
/// last iteration is returned.
/// \param LB Address of the output variable in which the lower iteration
/// number is returned.
/// \param UB Address of the output variable in which the upper iteration
/// number is returned.
/// \param ST Address of the output variable in which the stride value is
/// returned.
llvm::Value *emitForNext(CodeGenFunction &CGF, SourceLocation Loc,
unsigned IVSize, bool IVSigned, Address IL,
Address LB, Address UB, Address ST) override;
/// Emits call to void __kmpc_push_num_threads(ident_t *loc, kmp_int32
/// global_tid, kmp_int32 num_threads) to generate code for 'num_threads'
/// clause.
/// \param NumThreads An integer value of threads.
void emitNumThreadsClause(CodeGenFunction &CGF, llvm::Value *NumThreads,
SourceLocation Loc) override;
/// Emit call to void __kmpc_push_proc_bind(ident_t *loc, kmp_int32
/// global_tid, int proc_bind) to generate code for 'proc_bind' clause.
void emitProcBindClause(CodeGenFunction &CGF,
llvm::omp::ProcBindKind ProcBind,
SourceLocation Loc) override;
/// Returns address of the threadprivate variable for the current
/// thread.
/// \param VD Threadprivate variable.
/// \param VDAddr Address of the global variable \a VD.
/// \param Loc Location of the reference to threadprivate var.
/// \return Address of the threadprivate variable for the current thread.
Address getAddrOfThreadPrivate(CodeGenFunction &CGF, const VarDecl *VD,
Address VDAddr, SourceLocation Loc) override;
/// Emit a code for initialization of threadprivate variable. It emits
/// a call to runtime library which adds initial value to the newly created
/// threadprivate variable (if it is not constant) and registers destructor
/// for the variable (if any).
/// \param VD Threadprivate variable.
/// \param VDAddr Address of the global variable \a VD.
/// \param Loc Location of threadprivate declaration.
/// \param PerformInit true if initialization expression is not constant.
llvm::Function *
emitThreadPrivateVarDefinition(const VarDecl *VD, Address VDAddr,
SourceLocation Loc, bool PerformInit,
CodeGenFunction *CGF = nullptr) override;
/// Creates artificial threadprivate variable with name \p Name and type \p
/// VarType.
/// \param VarType Type of the artificial threadprivate variable.
/// \param Name Name of the artificial threadprivate variable.
Address getAddrOfArtificialThreadPrivate(CodeGenFunction &CGF,
QualType VarType,
StringRef Name) override;
/// Emit flush of the variables specified in 'omp flush' directive.
/// \param Vars List of variables to flush.
void emitFlush(CodeGenFunction &CGF, ArrayRef<const Expr *> Vars,
SourceLocation Loc, llvm::AtomicOrdering AO) override;
/// Emit task region for the task directive. The task region is
/// emitted in several steps:
/// 1. Emit a call to kmp_task_t *__kmpc_omp_task_alloc(ident_t *, kmp_int32
/// gtid, kmp_int32 flags, size_t sizeof_kmp_task_t, size_t sizeof_shareds,
/// kmp_routine_entry_t *task_entry). Here task_entry is a pointer to the
/// function:
/// kmp_int32 .omp_task_entry.(kmp_int32 gtid, kmp_task_t *tt) {
/// TaskFunction(gtid, tt->part_id, tt->shareds);
/// return 0;
/// }
/// 2. Copy a list of shared variables to field shareds of the resulting
/// structure kmp_task_t returned by the previous call (if any).
/// 3. Copy a pointer to destructions function to field destructions of the
/// resulting structure kmp_task_t.
/// 4. Emit a call to kmp_int32 __kmpc_omp_task(ident_t *, kmp_int32 gtid,
/// kmp_task_t *new_task), where new_task is a resulting structure from
/// previous items.
/// \param D Current task directive.
/// \param TaskFunction An LLVM function with type void (*)(i32 /*gtid*/, i32
/// /*part_id*/, captured_struct */*__context*/);
/// \param SharedsTy A type which contains references the shared variables.
/// \param Shareds Context with the list of shared variables from the \p
/// TaskFunction.
/// \param IfCond Not a nullptr if 'if' clause was specified, nullptr
/// otherwise.
/// \param Data Additional data for task generation like tiednsee, final
/// state, list of privates etc.
void emitTaskCall(CodeGenFunction &CGF, SourceLocation Loc,
const OMPExecutableDirective &D,
llvm::Function *TaskFunction, QualType SharedsTy,
Address Shareds, const Expr *IfCond,
const OMPTaskDataTy &Data) override;
/// Emit task region for the taskloop directive. The taskloop region is
/// emitted in several steps:
/// 1. Emit a call to kmp_task_t *__kmpc_omp_task_alloc(ident_t *, kmp_int32
/// gtid, kmp_int32 flags, size_t sizeof_kmp_task_t, size_t sizeof_shareds,
/// kmp_routine_entry_t *task_entry). Here task_entry is a pointer to the
/// function:
/// kmp_int32 .omp_task_entry.(kmp_int32 gtid, kmp_task_t *tt) {
/// TaskFunction(gtid, tt->part_id, tt->shareds);
/// return 0;
/// }
/// 2. Copy a list of shared variables to field shareds of the resulting
/// structure kmp_task_t returned by the previous call (if any).
/// 3. Copy a pointer to destructions function to field destructions of the
/// resulting structure kmp_task_t.
/// 4. Emit a call to void __kmpc_taskloop(ident_t *loc, int gtid, kmp_task_t
/// *task, int if_val, kmp_uint64 *lb, kmp_uint64 *ub, kmp_int64 st, int
/// nogroup, int sched, kmp_uint64 grainsize, void *task_dup ), where new_task
/// is a resulting structure from
/// previous items.
/// \param D Current task directive.
/// \param TaskFunction An LLVM function with type void (*)(i32 /*gtid*/, i32
/// /*part_id*/, captured_struct */*__context*/);
/// \param SharedsTy A type which contains references the shared variables.
/// \param Shareds Context with the list of shared variables from the \p
/// TaskFunction.
/// \param IfCond Not a nullptr if 'if' clause was specified, nullptr
/// otherwise.
/// \param Data Additional data for task generation like tiednsee, final
/// state, list of privates etc.
void emitTaskLoopCall(CodeGenFunction &CGF, SourceLocation Loc,
const OMPLoopDirective &D, llvm::Function *TaskFunction,
QualType SharedsTy, Address Shareds, const Expr *IfCond,
const OMPTaskDataTy &Data) override;
/// Emit a code for reduction clause. Next code should be emitted for
/// reduction:
/// \code
///
/// static kmp_critical_name lock = { 0 };
///
/// void reduce_func(void *lhs[<n>], void *rhs[<n>]) {
/// ...
/// *(Type<i>*)lhs[i] = RedOp<i>(*(Type<i>*)lhs[i], *(Type<i>*)rhs[i]);
/// ...
/// }
///
/// ...
/// void *RedList[<n>] = {&<RHSExprs>[0], ..., &<RHSExprs>[<n>-1]};
/// switch (__kmpc_reduce{_nowait}(<loc>, <gtid>, <n>, sizeof(RedList),
/// RedList, reduce_func, &<lock>)) {
/// case 1:
/// ...
/// <LHSExprs>[i] = RedOp<i>(*<LHSExprs>[i], *<RHSExprs>[i]);
/// ...
/// __kmpc_end_reduce{_nowait}(<loc>, <gtid>, &<lock>);
/// break;
/// case 2:
/// ...
/// Atomic(<LHSExprs>[i] = RedOp<i>(*<LHSExprs>[i], *<RHSExprs>[i]));
/// ...
/// break;
/// default:;
/// }
/// \endcode
///
/// \param Privates List of private copies for original reduction arguments.
/// \param LHSExprs List of LHS in \a ReductionOps reduction operations.
/// \param RHSExprs List of RHS in \a ReductionOps reduction operations.
/// \param ReductionOps List of reduction operations in form 'LHS binop RHS'
/// or 'operator binop(LHS, RHS)'.
/// \param Options List of options for reduction codegen:
/// WithNowait true if parent directive has also nowait clause, false
/// otherwise.
/// SimpleReduction Emit reduction operation only. Used for omp simd
/// directive on the host.
/// ReductionKind The kind of reduction to perform.
void emitReduction(CodeGenFunction &CGF, SourceLocation Loc,
ArrayRef<const Expr *> Privates,
ArrayRef<const Expr *> LHSExprs,
ArrayRef<const Expr *> RHSExprs,
ArrayRef<const Expr *> ReductionOps,
ReductionOptionsTy Options) override;
/// Emit a code for initialization of task reduction clause. Next code
/// should be emitted for reduction:
/// \code
///
/// _taskred_item_t red_data[n];
/// ...
/// red_data[i].shar = &shareds[i];
/// red_data[i].orig = &origs[i];
/// red_data[i].size = sizeof(origs[i]);
/// red_data[i].f_init = (void*)RedInit<i>;
/// red_data[i].f_fini = (void*)RedDest<i>;
/// red_data[i].f_comb = (void*)RedOp<i>;
/// red_data[i].flags = <Flag_i>;
/// ...
/// void* tg1 = __kmpc_taskred_init(gtid, n, red_data);
/// \endcode
/// For reduction clause with task modifier it emits the next call:
/// \code
///
/// _taskred_item_t red_data[n];
/// ...
/// red_data[i].shar = &shareds[i];
/// red_data[i].orig = &origs[i];
/// red_data[i].size = sizeof(origs[i]);
/// red_data[i].f_init = (void*)RedInit<i>;
/// red_data[i].f_fini = (void*)RedDest<i>;
/// red_data[i].f_comb = (void*)RedOp<i>;
/// red_data[i].flags = <Flag_i>;
/// ...
/// void* tg1 = __kmpc_taskred_modifier_init(loc, gtid, is_worksharing, n,
/// red_data);
/// \endcode
/// \param LHSExprs List of LHS in \a Data.ReductionOps reduction operations.
/// \param RHSExprs List of RHS in \a Data.ReductionOps reduction operations.
/// \param Data Additional data for task generation like tiedness, final
/// state, list of privates, reductions etc.
llvm::Value *emitTaskReductionInit(CodeGenFunction &CGF, SourceLocation Loc,
ArrayRef<const Expr *> LHSExprs,
ArrayRef<const Expr *> RHSExprs,
const OMPTaskDataTy &Data) override;
/// Emits the following code for reduction clause with task modifier:
/// \code
/// __kmpc_task_reduction_modifier_fini(loc, gtid, is_worksharing);
/// \endcode
void emitTaskReductionFini(CodeGenFunction &CGF, SourceLocation Loc,
bool IsWorksharingReduction) override;
/// Required to resolve existing problems in the runtime. Emits threadprivate
/// variables to store the size of the VLAs/array sections for
/// initializer/combiner/finalizer functions + emits threadprivate variable to
/// store the pointer to the original reduction item for the custom
/// initializer defined by declare reduction construct.
/// \param RCG Allows to reuse an existing data for the reductions.
/// \param N Reduction item for which fixups must be emitted.
void emitTaskReductionFixups(CodeGenFunction &CGF, SourceLocation Loc,
ReductionCodeGen &RCG, unsigned N) override;
/// Get the address of `void *` type of the privatue copy of the reduction
/// item specified by the \p SharedLVal.
/// \param ReductionsPtr Pointer to the reduction data returned by the
/// emitTaskReductionInit function.
/// \param SharedLVal Address of the original reduction item.
Address getTaskReductionItem(CodeGenFunction &CGF, SourceLocation Loc,
llvm::Value *ReductionsPtr,
LValue SharedLVal) override;
/// Emit code for 'taskwait' directive.
void emitTaskwaitCall(CodeGenFunction &CGF, SourceLocation Loc,
const OMPTaskDataTy &Data) override;
/// Emit code for 'cancellation point' construct.
/// \param CancelRegion Region kind for which the cancellation point must be
/// emitted.
///
void emitCancellationPointCall(CodeGenFunction &CGF, SourceLocation Loc,
OpenMPDirectiveKind CancelRegion) override;
/// Emit code for 'cancel' construct.
/// \param IfCond Condition in the associated 'if' clause, if it was
/// specified, nullptr otherwise.
/// \param CancelRegion Region kind for which the cancel must be emitted.
///
void emitCancelCall(CodeGenFunction &CGF, SourceLocation Loc,
const Expr *IfCond,
OpenMPDirectiveKind CancelRegion) override;
/// Emit outilined function for 'target' directive.
/// \param D Directive to emit.
/// \param ParentName Name of the function that encloses the target region.
/// \param OutlinedFn Outlined function value to be defined by this call.
/// \param OutlinedFnID Outlined function ID value to be defined by this call.
/// \param IsOffloadEntry True if the outlined function is an offload entry.
/// \param CodeGen Code generation sequence for the \a D directive.
/// An outlined function may not be an entry if, e.g. the if clause always
/// evaluates to false.
void emitTargetOutlinedFunction(const OMPExecutableDirective &D,
StringRef ParentName,
llvm::Function *&OutlinedFn,
llvm::Constant *&OutlinedFnID,
bool IsOffloadEntry,
const RegionCodeGenTy &CodeGen) override;
/// Emit the target offloading code associated with \a D. The emitted
/// code attempts offloading the execution to the device, an the event of
/// a failure it executes the host version outlined in \a OutlinedFn.
/// \param D Directive to emit.
/// \param OutlinedFn Host version of the code to be offloaded.
/// \param OutlinedFnID ID of host version of the code to be offloaded.
/// \param IfCond Expression evaluated in if clause associated with the target
/// directive, or null if no if clause is used.
/// \param Device Expression evaluated in device clause associated with the
/// target directive, or null if no device clause is used and device modifier.
void emitTargetCall(
CodeGenFunction &CGF, const OMPExecutableDirective &D,
llvm::Function *OutlinedFn, llvm::Value *OutlinedFnID, const Expr *IfCond,
llvm::PointerIntPair<const Expr *, 2, OpenMPDeviceClauseModifier> Device,
llvm::function_ref<llvm::Value *(CodeGenFunction &CGF,
const OMPLoopDirective &D)>
SizeEmitter) override;
/// Emit the target regions enclosed in \a GD function definition or
/// the function itself in case it is a valid device function. Returns true if
/// \a GD was dealt with successfully.
/// \param GD Function to scan.
bool emitTargetFunctions(GlobalDecl GD) override;
/// Emit the global variable if it is a valid device global variable.
/// Returns true if \a GD was dealt with successfully.
/// \param GD Variable declaration to emit.
bool emitTargetGlobalVariable(GlobalDecl GD) override;
/// Emit the global \a GD if it is meaningful for the target. Returns
/// if it was emitted successfully.
/// \param GD Global to scan.
bool emitTargetGlobal(GlobalDecl GD) override;
/// Emits code for teams call of the \a OutlinedFn with
/// variables captured in a record which address is stored in \a
/// CapturedStruct.
/// \param OutlinedFn Outlined function to be run by team masters. Type of
/// this function is void(*)(kmp_int32 *, kmp_int32, struct context_vars*).
/// \param CapturedVars A pointer to the record with the references to
/// variables used in \a OutlinedFn function.
///
void emitTeamsCall(CodeGenFunction &CGF, const OMPExecutableDirective &D,
SourceLocation Loc, llvm::Function *OutlinedFn,
ArrayRef<llvm::Value *> CapturedVars) override;
/// Emits call to void __kmpc_push_num_teams(ident_t *loc, kmp_int32
/// global_tid, kmp_int32 num_teams, kmp_int32 thread_limit) to generate code
/// for num_teams clause.
/// \param NumTeams An integer expression of teams.
/// \param ThreadLimit An integer expression of threads.
void emitNumTeamsClause(CodeGenFunction &CGF, const Expr *NumTeams,
const Expr *ThreadLimit, SourceLocation Loc) override;
/// Emit the target data mapping code associated with \a D.
/// \param D Directive to emit.
/// \param IfCond Expression evaluated in if clause associated with the
/// target directive, or null if no device clause is used.
/// \param Device Expression evaluated in device clause associated with the
/// target directive, or null if no device clause is used.
/// \param Info A record used to store information that needs to be preserved
/// until the region is closed.
void emitTargetDataCalls(CodeGenFunction &CGF,
const OMPExecutableDirective &D, const Expr *IfCond,
const Expr *Device, const RegionCodeGenTy &CodeGen,
TargetDataInfo &Info) override;
/// Emit the data mapping/movement code associated with the directive
/// \a D that should be of the form 'target [{enter|exit} data | update]'.
/// \param D Directive to emit.
/// \param IfCond Expression evaluated in if clause associated with the target
/// directive, or null if no if clause is used.
/// \param Device Expression evaluated in device clause associated with the
/// target directive, or null if no device clause is used.
void emitTargetDataStandAloneCall(CodeGenFunction &CGF,
const OMPExecutableDirective &D,
const Expr *IfCond,
const Expr *Device) override;
/// Emit initialization for doacross loop nesting support.
/// \param D Loop-based construct used in doacross nesting construct.
void emitDoacrossInit(CodeGenFunction &CGF, const OMPLoopDirective &D,
ArrayRef<Expr *> NumIterations) override;
/// Emit code for doacross ordered directive with 'depend' clause.
/// \param C 'depend' clause with 'sink|source' dependency kind.
void emitDoacrossOrdered(CodeGenFunction &CGF,
const OMPDependClause *C) override;
/// Translates the native parameter of outlined function if this is required
/// for target.
/// \param FD Field decl from captured record for the parameter.
/// \param NativeParam Parameter itself.
const VarDecl *translateParameter(const FieldDecl *FD,
const VarDecl *NativeParam) const override;
/// Gets the address of the native argument basing on the address of the
/// target-specific parameter.
/// \param NativeParam Parameter itself.
/// \param TargetParam Corresponding target-specific parameter.
Address getParameterAddress(CodeGenFunction &CGF, const VarDecl *NativeParam,
const VarDecl *TargetParam) const override;
/// Gets the OpenMP-specific address of the local variable.
Address getAddressOfLocalVariable(CodeGenFunction &CGF,
const VarDecl *VD) override {
return Address::invalid();
}
};
} // namespace CodeGen
} // namespace clang
#endif
|
core_zlange.c | /**
*
* @file
*
* PLASMA is a software package provided by:
* University of Tennessee, US,
* University of Manchester, UK.
*
* @precisions normal z -> c d s
*
**/
#include <plasma_core_blas.h>
#include "plasma_types.h"
#include "core_lapack.h"
#include <math.h>
/***************************************************************************//**
*
* @ingroup core_lange
*
* Calculates max, one, infinity or Frobenius norm of a given matrix.
*
*******************************************************************************
*
* @param[in] norm
* - PlasmaMaxNorm: Max norm
* - PlasmaOneNorm: One norm
* - PlasmaInfNorm: Infinity norm
* - PlasmaFrobeniusNorm: Frobenius norm
*
* @param[in] m
* The number of rows of the matrix A. m >= 0. When m = 0,
* the returned value is set to zero.
*
* @param[in] n
* The number of columns of the matrix A. n >= 0. When n = 0,
* the returned value is set to zero.
*
* @param[in] A
* The m-by-n matrix A.
*
* @param[in] lda
* The leading dimension of the array A. lda >= max(1,m).
*
* @param[in] work
* The auxiliary work array.
*
* @param[out] value
* The specified norm of the given matrix A
*
******************************************************************************/
__attribute__((weak))
void plasma_core_zlange(plasma_enum_t norm, int m, int n,
const plasma_complex64_t *A, int lda,
double *work, double *value)
{
*value = LAPACKE_zlange_work(LAPACK_COL_MAJOR,
lapack_const(norm),
m, n, A, lda, work);
}
/******************************************************************************/
void plasma_core_omp_zlange(plasma_enum_t norm, int m, int n,
const plasma_complex64_t *A, int lda,
double *work, double *value,
plasma_sequence_t *sequence, plasma_request_t *request)
{
#pragma omp task depend(in:A[0:lda*n]) \
depend(out:value[0:1])
{
if (sequence->status == PlasmaSuccess)
plasma_core_zlange(norm, m, n, A, lda, work, value);
}
}
/******************************************************************************/
void plasma_core_omp_zlange_aux(plasma_enum_t norm, int m, int n,
const plasma_complex64_t *A, int lda,
double *value,
plasma_sequence_t *sequence, plasma_request_t *request)
{
switch (norm) {
case PlasmaOneNorm:
#pragma omp task depend(in:A[0:lda*n]) \
depend(out:value[0:n])
{
if (sequence->status == PlasmaSuccess) {
for (int j = 0; j < n; j++) {
value[j] = cabs(A[lda*j]);
for (int i = 1; i < m; i++) {
value[j] += cabs(A[lda*j+i]);
}
}
}
}
break;
case PlasmaInfNorm:
#pragma omp task depend(in:A[0:lda*n]) \
depend(out:value[0:m])
{
if (sequence->status == PlasmaSuccess) {
for (int i = 0; i < m; i++)
value[i] = 0.0;
for (int j = 0; j < n; j++) {
for (int i = 0; i < m; i++) {
value[i] += cabs(A[lda*j+i]);
}
}
}
}
break;
}
}
|
GB_unaryop__ainv_bool_int64.c | //------------------------------------------------------------------------------
// GB_unaryop: hard-coded functions for each built-in unary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2020, All Rights Reserved.
// http://suitesparse.com See GraphBLAS/Doc/License.txt for license.
//------------------------------------------------------------------------------
// If this file is in the Generated/ folder, do not edit it (auto-generated).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_control.h"
#include "GB_iterator.h"
#include "GB_unaryop__include.h"
// C=unop(A) is defined by the following types and operators:
// op(A) function: GB_unop__ainv_bool_int64
// op(A') function: GB_tran__ainv_bool_int64
// C type: bool
// A type: int64_t
// cast: bool cij = (bool) aij
// unaryop: cij = aij
#define GB_ATYPE \
int64_t
#define GB_CTYPE \
bool
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
int64_t aij = Ax [pA]
#define GB_CX(p) Cx [p]
// unary operator
#define GB_OP(z, x) \
z = x ;
// casting
#define GB_CASTING(z, aij) \
bool z = (bool) aij ;
// cij = op (cast (aij))
#define GB_CAST_OP(pC,pA) \
{ \
/* aij = Ax [pA] */ \
GB_GETA (aij, Ax, pA) ; \
/* Cx [pC] = op (cast (aij)) */ \
GB_CASTING (z, aij) ; \
GB_OP (GB_CX (pC), z) ; \
}
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_AINV || GxB_NO_BOOL || GxB_NO_INT64)
//------------------------------------------------------------------------------
// Cx = op (cast (Ax)): apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_unop__ainv_bool_int64
(
bool *Cx, // Cx and Ax may be aliased
int64_t *Ax,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
GB_CAST_OP (p, p) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (cast (A')): transpose, typecast, and apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_tran__ainv_bool_int64
(
GrB_Matrix C,
const GrB_Matrix A,
int64_t *GB_RESTRICT *Rowcounts,
GBI_single_iterator Iter,
const int64_t *GB_RESTRICT A_slice,
int naslice
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#define GB_PHASE_2_OF_2
#include "GB_unaryop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
dijkstra_openmp.c | # include <stdlib.h>
# include <stdio.h>
# include <time.h>
# include <omp.h>
# define NV 6
int main ( int argc, char **argv );
int *dijkstra_distance ( int ohd[NV][NV] );
void find_nearest ( int s, int e, int mind[NV], int connected[NV], int *d,
int *v );
void init ( int ohd[NV][NV] );
void timestamp ( void );
void update_mind ( int s, int e, int mv, int connected[NV], int ohd[NV][NV],
int mind[NV] );
/******************************************************************************/
int main ( int argc, char **argv )
/******************************************************************************/
/*
Purpose:
MAIN runs an example of Dijkstra's minimum distance algorithm.
Discussion:
Given the distance matrix that defines a graph, we seek a list
of the minimum distances between node 0 and all other nodes.
This program sets up a small example problem and solves it.
The correct minimum distances are:
0 35 15 45 49 41
Licensing:
This code is distributed under the GNU LGPL license.
Modified:
01 July 2010
Author:
Original C version by Norm Matloff, CS Dept, UC Davis.
This C version by John Burkardt.
*/
{
int i;
int i4_huge = 2147483647;
int j;
int *mind;
int ohd[NV][NV];
timestamp ( );
fprintf ( stdout, "\n" );
fprintf ( stdout, "DIJKSTRA_OPENMP\n" );
fprintf ( stdout, " C version\n" );
fprintf ( stdout, " Use Dijkstra's algorithm to determine the minimum\n" );
fprintf ( stdout, " distance from node 0 to each node in a graph,\n" );
fprintf ( stdout, " given the distances between each pair of nodes.\n" );
fprintf ( stdout, "\n" );
fprintf ( stdout, " Although a very small example is considered, we\n" );
fprintf ( stdout, " demonstrate the use of OpenMP directives for\n" );
fprintf ( stdout, " parallel execution.\n" );
/*
Initialize the problem data.
*/
init ( ohd );
/*
Print the distance matrix.
*/
fprintf ( stdout, "\n" );
fprintf ( stdout, " Distance matrix:\n" );
fprintf ( stdout, "\n" );
for ( i = 0; i < NV; i++ )
{
for ( j = 0; j < NV; j++ )
{
if ( ohd[i][j] == i4_huge )
{
fprintf ( stdout, " Inf" );
}
else
{
fprintf ( stdout, " %3d", ohd[i][j] );
}
}
fprintf ( stdout, "\n" );
}
/*
Carry out the algorithm.
*/
mind = dijkstra_distance ( ohd );
/*
Print the results.
*/
fprintf ( stdout, "\n" );
fprintf ( stdout, " Minimum distances from node 0:\n");
fprintf ( stdout, "\n" );
for ( i = 0; i < NV; i++ )
{
fprintf ( stdout, " %2d %2d\n", i, mind[i] );
}
/*
Free memory.
*/
free ( mind );
/*
Terminate.
*/
fprintf ( stdout, "\n" );
fprintf ( stdout, "DIJKSTRA_OPENMP\n" );
fprintf ( stdout, " Normal end of execution.\n" );
fprintf ( stdout, "\n" );
timestamp ( );
return 0;
}
/******************************************************************************/
int *dijkstra_distance ( int ohd[NV][NV] )
/******************************************************************************/
/*
Purpose:
DIJKSTRA_DISTANCE uses Dijkstra's minimum distance algorithm.
Discussion:
We essentially build a tree. We start with only node 0 connected
to the tree, and this is indicated by setting CONNECTED[0] = 1.
We initialize MIND[I] to the one step distance from node 0 to node I.
Now we search among the unconnected nodes for the node MV whose minimum
distance is smallest, and connect it to the tree. For each remaining
unconnected node I, we check to see whether the distance from 0 to MV
to I is less than that recorded in MIND[I], and if so, we can reduce
the distance.
After NV-1 steps, we have connected all the nodes to 0, and computed
the correct minimum distances.
Licensing:
This code is distributed under the GNU LGPL license.
Modified:
02 July 2010
Author:
Original C version by Norm Matloff, CS Dept, UC Davis.
This C version by John Burkardt.
Parameters:
Input, int OHD[NV][NV], the distance of the direct link between
nodes I and J.
Output, int DIJKSTRA_DISTANCE[NV], the minimum distance from
node 0 to each node.
*/
{
int *connected;
int i;
int i4_huge = 2147483647;
int md;
int *mind;
int mv;
int my_first;
int my_id;
int my_last;
int my_md;
int my_mv;
int my_step;
int nth;
/*
Start out with only node 0 connected to the tree.
*/
connected = ( int * ) malloc ( NV * sizeof ( int ) );
connected[0] = 1;
for ( i = 1; i < NV; i++ )
{
connected[i] = 0;
}
/*
Initial estimate of minimum distance is the 1-step distance.
*/
mind = ( int * ) malloc ( NV * sizeof ( int ) );
for ( i = 0; i < NV; i++ )
{
mind[i] = ohd[0][i];
}
/*
Begin the parallel region.
*/
# pragma omp parallel private ( my_first, my_id, my_last, my_md, my_mv, my_step ) \
shared ( connected, md, mind, mv, nth, ohd )
{
my_id = omp_get_thread_num ( );
nth = omp_get_num_threads ( );
my_first = ( my_id * NV ) / nth;
my_last = ( ( my_id + 1 ) * NV ) / nth - 1;
/*
The SINGLE directive means that the block is to be executed by only
one thread, and that thread will be whichever one gets here first.
*/
# pragma omp single
{
printf ( "\n" );
printf ( " P%d: Parallel region begins with %d threads\n", my_id, nth );
printf ( "\n" );
}
fprintf ( stdout, " P%d: First=%d Last=%d\n", my_id, my_first, my_last );
for ( my_step = 1; my_step < NV; my_step++ )
{
/*
Before we compare the results of each thread, set the shared variable
MD to a big value. Only one thread needs to do this.
*/
# pragma omp single
{
md = i4_huge;
mv = -1;
}
/*
Each thread finds the nearest unconnected node in its part of the graph.
Some threads might have no unconnected nodes left.
*/
find_nearest ( my_first, my_last, mind, connected, &my_md, &my_mv );
/*
In order to determine the minimum of all the MY_MD's, we must insist
that only one thread at a time execute this block!
*/
# pragma omp critical
{
if ( my_md < md )
{
md = my_md;
mv = my_mv;
}
}
/*
This barrier means that ALL threads have executed the critical
block, and therefore MD and MV have the correct value. Only then
can we proceed.
*/
# pragma omp barrier
/*
If MV is -1, then NO thread found an unconnected node, so we're done early.
OpenMP does not like to BREAK out of a parallel region, so we'll just have
to let the iteration run to the end, while we avoid doing any more updates.
Otherwise, we connect the nearest node.
*/
# pragma omp single
{
if ( mv != - 1 )
{
connected[mv] = 1;
printf ( " P%d: Connecting node %d.\n", my_id, mv );
}
}
/*
Again, we don't want any thread to proceed until the value of
CONNECTED is updated.
*/
# pragma omp barrier
/*
Now each thread should update its portion of the MIND vector,
by checking to see whether the trip from 0 to MV plus the step
from MV to a node is closer than the current record.
*/
if ( mv != -1 )
{
update_mind ( my_first, my_last, mv, connected, ohd, mind );
}
/*
Before starting the next step of the iteration, we need all threads
to complete the updating, so we set a BARRIER here.
*/
#pragma omp barrier
}
/*
Once all the nodes have been connected, we can exit.
*/
# pragma omp single
{
printf ( "\n" );
printf ( " P%d: Exiting parallel region.\n", my_id );
}
}
free ( connected );
return mind;
}
/******************************************************************************/
void find_nearest ( int s, int e, int mind[NV], int connected[NV], int *d,
int *v )
/******************************************************************************/
/*
Purpose:
FIND_NEAREST finds the nearest unconnected node.
Licensing:
This code is distributed under the GNU LGPL license.
Modified:
02 July 2010
Author:
Original C version by Norm Matloff, CS Dept, UC Davis.
This C version by John Burkardt.
Parameters:
Input, int S, E, the first and last nodes that are to be checked.
Input, int MIND[NV], the currently computed minimum distance from
node 0 to each node.
Input, int CONNECTED[NV], is 1 for each connected node, whose
minimum distance to node 0 has been determined.
Output, int *D, the distance from node 0 to the nearest unconnected
node in the range S to E.
Output, int *V, the index of the nearest unconnected node in the range
S to E.
*/
{
int i;
int i4_huge = 2147483647;
*d = i4_huge;
*v = -1;
for ( i = s; i <= e; i++ )
{
if ( !connected[i] && ( mind[i] < *d ) )
{
*d = mind[i];
*v = i;
}
}
return;
}
/******************************************************************************/
void init ( int ohd[NV][NV] )
/******************************************************************************/
/*
Purpose:
INIT initializes the problem data.
Discussion:
The graph uses 6 nodes, and has the following diagram and
distance matrix:
N0--15--N2-100--N3 0 40 15 Inf Inf Inf
\ | / 40 0 20 10 25 6
\ | / 15 20 0 100 Inf Inf
40 20 10 Inf 10 100 0 Inf Inf
\ | / Inf 25 Inf Inf 0 8
\ | / Inf 6 Inf Inf 8 0
N1
/ \
/ \
6 25
/ \
/ \
N5----8-----N4
Licensing:
This code is distributed under the GNU LGPL license.
Modified:
02 July 2010
Author:
Original C version by Norm Matloff, CS Dept, UC Davis.
This C version by John Burkardt.
Parameters:
Output, int OHD[NV][NV], the distance of the direct link between
nodes I and J.
*/
{
int i;
int i4_huge = 2147483647;
int j;
for ( i = 0; i < NV; i++ )
{
for ( j = 0; j < NV; j++ )
{
if ( i == j )
{
ohd[i][i] = 0;
}
else
{
ohd[i][j] = i4_huge;
}
}
}
ohd[0][1] = ohd[1][0] = 40;
ohd[0][2] = ohd[2][0] = 15;
ohd[1][2] = ohd[2][1] = 20;
ohd[1][3] = ohd[3][1] = 10;
ohd[1][4] = ohd[4][1] = 25;
ohd[2][3] = ohd[3][2] = 100;
ohd[1][5] = ohd[5][1] = 6;
ohd[4][5] = ohd[5][4] = 8;
return;
}
/******************************************************************************/
void timestamp ( void )
/******************************************************************************/
/*
Purpose:
TIMESTAMP prints the current YMDHMS date as a time stamp.
Example:
31 May 2001 09:45:54 AM
Licensing:
This code is distributed under the GNU LGPL license.
Modified:
24 September 2003
Author:
John Burkardt
Parameters:
None
*/
{
# define TIME_SIZE 40
static char time_buffer[TIME_SIZE];
const struct tm *tm;
size_t len;
time_t now;
now = time ( NULL );
tm = localtime ( &now );
len = strftime ( time_buffer, TIME_SIZE, "%d %B %Y %I:%M:%S %p", tm );
printf ( "%s\n", time_buffer );
return;
# undef TIME_SIZE
}
/******************************************************************************/
void update_mind ( int s, int e, int mv, int connected[NV], int ohd[NV][NV],
int mind[NV] )
/******************************************************************************/
/*
Purpose:
UPDATE_MIND updates the minimum distance vector.
Discussion:
We've just determined the minimum distance to node MV.
For each unconnected node I in the range S to E,
check whether the route from node 0 to MV to I is shorter
than the currently known minimum distance.
Licensing:
This code is distributed under the GNU LGPL license.
Modified:
02 July 2010
Author:
Original C version by Norm Matloff, CS Dept, UC Davis.
This C version by John Burkardt.
Parameters:
Input, int S, E, the first and last nodes that are to be checked.
Input, int MV, the node whose minimum distance to node 0
has just been determined.
Input, int CONNECTED[NV], is 1 for each connected node, whose
minimum distance to node 0 has been determined.
Input, int OHD[NV][NV], the distance of the direct link between
nodes I and J.
Input/output, int MIND[NV], the currently computed minimum distances
from node 0 to each node. On output, the values for nodes S through
E have been updated.
*/
{
int i;
int i4_huge = 2147483647;
for ( i = s; i <= e; i++ )
{
if ( !connected[i] )
{
if ( ohd[mv][i] < i4_huge )
{
if ( mind[mv] + ohd[mv][i] < mind[i] )
{
mind[i] = mind[mv] + ohd[mv][i];
}
}
}
}
return;
}
|
lrn_kernel_arm.c | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* License); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* AS IS BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
/*
* Copyright (c) 2021, OPEN AI LAB
* Author: haitao@openailab.com
*/
#include "lrn_kernel_arm.h"
#include "graph/tensor.h"
#include "graph/node.h"
#include "graph/graph.h"
#include "utility/sys_port.h"
#include "utility/float.h"
#include "utility/log.h"
#include "device/cpu/cpu_node.h"
#include "device/cpu/cpu_graph.h"
#include "device/cpu/cpu_module.h"
#include <math.h>
#include <arm_neon.h>
#define MAX(a, b) ((a) > (b) ? (a) : (b))
#define MIN(a, b) ((a) < (b) ? (a) : (b))
static struct tab exp_tab;
static struct tab log_tab;
static void init_tab(void)
{
/* Exponent polynomial coefficients */
exp_tab.a0 = vdupq_n_f32(1.f);
exp_tab.a1 = vdupq_n_f32(0.0416598916054f);
exp_tab.a2 = vdupq_n_f32(0.500000596046f);
exp_tab.a3 = vdupq_n_f32(0.0014122662833f);
exp_tab.a4 = vdupq_n_f32(1.00000011921f);
exp_tab.a5 = vdupq_n_f32(0.00833693705499f);
exp_tab.a6 = vdupq_n_f32(0.166665703058f);
exp_tab.a7 = vdupq_n_f32(0.000195780929062f);
/* Logarithm polynomial coefficients */
log_tab.a0 = vdupq_n_f32(-2.29561495781f);
log_tab.a1 = vdupq_n_f32(-2.47071170807f);
log_tab.a2 = vdupq_n_f32(-5.68692588806f);
log_tab.a3 = vdupq_n_f32(-0.165253549814f);
log_tab.a4 = vdupq_n_f32(5.17591238022f);
log_tab.a5 = vdupq_n_f32(0.844007015228f);
log_tab.a6 = vdupq_n_f32(4.58445882797f);
log_tab.a7 = vdupq_n_f32(0.0141278216615f);
}
static inline float32x4_t vfloorq_f32(float32x4_t val)
{
const float32x4_t CONST_1 = vdupq_n_f32(1.f);
const int32x4_t z = vcvtq_s32_f32(val);
const float32x4_t r = vcvtq_f32_s32(z);
return vbslq_f32(vcgtq_f32(r, val), vsubq_f32(r, CONST_1), r);
}
static inline float32x2_t vinvsqrt_f32(float32x2_t x)
{
float32x2_t sqrt_reciprocal = vrsqrte_f32(x);
sqrt_reciprocal = vmul_f32(vrsqrts_f32(vmul_f32(x, sqrt_reciprocal), sqrt_reciprocal), sqrt_reciprocal);
sqrt_reciprocal = vmul_f32(vrsqrts_f32(vmul_f32(x, sqrt_reciprocal), sqrt_reciprocal), sqrt_reciprocal);
return sqrt_reciprocal;
}
static inline float32x4_t vinvsqrtq_f32(float32x4_t x)
{
float32x4_t sqrt_reciprocal = vrsqrteq_f32(x);
sqrt_reciprocal = vmulq_f32(vrsqrtsq_f32(vmulq_f32(x, sqrt_reciprocal), sqrt_reciprocal), sqrt_reciprocal);
sqrt_reciprocal = vmulq_f32(vrsqrtsq_f32(vmulq_f32(x, sqrt_reciprocal), sqrt_reciprocal), sqrt_reciprocal);
return sqrt_reciprocal;
}
static inline float32x2_t vinv_f32(float32x2_t x)
{
float32x2_t recip = vrecpe_f32(x);
recip = vmul_f32(vrecps_f32(x, recip), recip);
recip = vmul_f32(vrecps_f32(x, recip), recip);
return recip;
}
static inline float32x4_t vinvq_f32(float32x4_t x)
{
float32x4_t recip = vrecpeq_f32(x);
recip = vmulq_f32(vrecpsq_f32(x, recip), recip);
recip = vmulq_f32(vrecpsq_f32(x, recip), recip);
return recip;
}
static inline float32x4_t vtaylor_polyq_f32(float32x4_t x, struct tab* coeffs)
{
float32x4_t A = vmlaq_f32(coeffs->a0, coeffs->a4, x);
float32x4_t B = vmlaq_f32(coeffs->a2, coeffs->a6, x);
float32x4_t C = vmlaq_f32(coeffs->a1, coeffs->a5, x);
float32x4_t D = vmlaq_f32(coeffs->a3, coeffs->a7, x);
float32x4_t x2 = vmulq_f32(x, x);
float32x4_t x4 = vmulq_f32(x2, x2);
float32x4_t res = vmlaq_f32(vmlaq_f32(A, B, x2), vmlaq_f32(C, D, x2), x4);
return res;
}
static inline float32x4_t vexpq_f32(float32x4_t x)
{
const float32x4_t CONST_LN2 = vdupq_n_f32(0.6931471805f); // ln(2)
const float32x4_t CONST_INV_LN2 = vdupq_n_f32(1.4426950408f); // 1/ln(2)
const float32x4_t CONST_0 = vdupq_n_f32(0.f);
const int32x4_t CONST_NEGATIVE_126 = vdupq_n_s32(-126);
// Perform range reduction [-log(2),log(2)]
int32x4_t m = vcvtq_s32_f32(vmulq_f32(x, CONST_INV_LN2));
float32x4_t val = vmlsq_f32(x, vcvtq_f32_s32(m), CONST_LN2);
// Polynomial Approximation
float32x4_t poly = vtaylor_polyq_f32(val, &exp_tab);
// Reconstruct
poly = vreinterpretq_f32_s32(vqaddq_s32(vreinterpretq_s32_f32(poly), vqshlq_n_s32(m, 23)));
poly = vbslq_f32(vcltq_s32(m, CONST_NEGATIVE_126), CONST_0, poly);
return poly;
}
static inline float32x4_t vlogq_f32(float32x4_t x)
{
const int32x4_t CONST_127 = vdupq_n_s32(127); // 127
const float32x4_t CONST_LN2 = vdupq_n_f32(0.6931471805f); // ln(2)
// Extract exponent
int32x4_t m = vsubq_s32(vreinterpretq_s32_u32(vshrq_n_u32(vreinterpretq_u32_f32(x), 23)), CONST_127);
float32x4_t val = vreinterpretq_f32_s32(vsubq_s32(vreinterpretq_s32_f32(x), vshlq_n_s32(m, 23)));
// Polynomial Approximation
float32x4_t poly = vtaylor_polyq_f32(val, &log_tab);
// Reconstruct
poly = vmlaq_f32(poly, vcvtq_f32_s32(m), CONST_LN2);
return poly;
}
static inline float32x4_t vtanhq_f32(float32x4_t val)
{
const float32x4_t CONST_1 = vdupq_n_f32(1.f);
const float32x4_t CONST_2 = vdupq_n_f32(2.f);
const float32x4_t CONST_MIN_TANH = vdupq_n_f32(-10.f);
const float32x4_t CONST_MAX_TANH = vdupq_n_f32(10.f);
float32x4_t x = vminq_f32(vmaxq_f32(val, CONST_MIN_TANH), CONST_MAX_TANH);
float32x4_t exp2x = vexpq_f32(vmulq_f32(CONST_2, x));
float32x4_t num = vsubq_f32(exp2x, CONST_1);
float32x4_t den = vaddq_f32(exp2x, CONST_1);
float32x4_t tanh = vmulq_f32(num, vinvq_f32(den));
return tanh;
}
static inline float32x4_t vpowq_f32(float32x4_t val, float32x4_t n)
{
return vexpq_f32(vmulq_f32(n, vlogq_f32(val)));
}
static void lrn_kernel(int i, int id, void* data, const float* input, float* output, float* square, float alpha,
float beta, float bias, int local_size, int channel_size, int channel_num, int num_thread)
{
int step = ((int*)data)[0];
const float32x4_t alpha_vec = vdupq_n_f32(alpha / local_size);
const float32x4_t beta_vec = vdupq_n_f32(beta);
const float32x4_t bias_vec = vdupq_n_f32(bias);
int mod = channel_size / 4;
int start_c = step * id;
int end_c = step * id + step;
// #pragma omp parallel for num_threads(num_thread)
for (int j = start_c; j < end_c; j++)
{
int c_start = j - local_size / 2;
int c_end = j + local_size / 2;
c_start = MAX(0, c_start);
c_end = MIN(c_end, channel_num - 1);
const float* cur_input = input + j * channel_size;
float* cur_output = output + j * channel_size;
for (int m = 0; m < mod; m++)
{
float32x4_t accu = vdupq_n_f32(0.f);
for (int l = c_start; l <= c_end; l++)
{
accu = vaddq_f32(accu, vld1q_f32(square + l * channel_size + m * 4));
}
const float32x4_t normalized = vpowq_f32(vmlaq_f32(bias_vec, alpha_vec, accu), beta_vec);
const float32x4_t normalized_pixel = vmulq_f32(vld1q_f32(cur_input), vinvq_f32(normalized));
vst1q_f32(cur_output, normalized_pixel);
cur_input += 4;
cur_output += 4;
}
float alpha_over_size = alpha / local_size;
for (int m = 4 * mod; m < channel_size; m++)
{
float sum = 0;
for (int l = c_start; l <= c_end; l++)
{
sum = sum + square[l * channel_size + m];
}
*cur_output++ = *cur_input++ * pow(bias + alpha_over_size * sum, -beta);
}
}
}
int lrn_run(struct tensor* output_tensor, struct tensor* input_tensor, struct lrn_param* lrn_param,
int num_thread)
{
init_tab();
const float* input = (float*)input_tensor->data;
float* output = (float*)output_tensor->data;
float* square = (float*)(malloc(input_tensor->elem_num * sizeof(float)));
int n = input_tensor->dims[0];
int c = input_tensor->dims[1];
int h = input_tensor->dims[2];
int w = input_tensor->dims[3];
int img_size = c * h * w;
int channel_size = h * w;
float alpha = lrn_param->alpha;
float beta = lrn_param->beta;
float bias = lrn_param->k;
int local_size = lrn_param->local_size;
for (int i = 0; i < n; i++)
{
/* get square value */
const float* img_base = input + i * img_size;
float* out_base = output + i * img_size;
int j = 0;
for (; j < (img_size & -4); j += 4)
{
float32x4_t in = vld1q_f32(img_base + j);
in = vmulq_f32(in, in);
vst1q_f32(square + j, in);
}
for (; j < img_size; j++)
square[j] = img_base[j] * img_base[j];
if (lrn_param->norm_region != 0)
{
sys_free(square);
TLOG_ERR("LRN: Only support across channels\n");
return -1;
}
lrn_kernel(0, 0, &c, img_base, out_base, square, alpha, beta, bias, local_size, channel_size, c, num_thread);
}
free(square);
return 0;
}
|
inputTimed.c | #include<stdlib.h>
#include<sys/time.h>
#include<stdio.h>
#include<omp.h>
#define SIZE 100
#define THRESHOLD 0.1
int main(int argc, char *argv[]) {
double threshold = THRESHOLD;
if (argc == 2) {
sscanf(argv[1], "%lf", &threshold);
}
printf("Threshold: %lf\n", threshold);
// Getting current time to help randomize the system
srand(10);
// Initializing the 2D ocean
float arr[SIZE][SIZE];
int p=0, q=0;
for(p=0; p < SIZE; p++) {
for(q = 0; q < SIZE; q++) {
arr[p][q] = rand() % (SIZE*10);
}
}
// Printing the Input Ocean
//for(p = 0; p < SIZE; p++) {
// for(q = 0; q < SIZE; q++) {
// fprintf(stderr, "%.2f\t", arr[p][q]);
// }
// fprintf(stderr, "\n");
//}
float diff = 0;
// Propagating the Ocean Waves
#pragma omp parallel shared(diff)
{
/**
fprintf(stderr, "\n Hello World by Thread # %d\n", omp_get_thread_num());
**/
int done = 0;
float mydiff = 0;
int loop = 0;
struct timeval tvStart, tvStop, bStart, bStop;
long total = 0;
gettimeofday(&tvStart, NULL);
while(!done) {
loop++;
mydiff = 0;
diff = 0;
gettimeofday(&bStart, NULL);
#pragma omp barrier
gettimeofday(&bStop, NULL);
total += (1000000*(bStop.tv_sec - bStart.tv_sec) + (bStop.tv_usec - bStart.tv_usec));
int mymin = (SIZE/omp_get_num_threads())*omp_get_thread_num(); // Assuming SIZE%omp_get_num_threads()==0
int mymax = mymin + SIZE/omp_get_num_threads();
int i, j;
for (i = mymin; i < mymax; i++) { // i spans from mymin (inclusive) to mymax (exclusive)
for(j = 0; j < SIZE; j++) {
float temp = arr[i][j];
arr[i][j] = 0.2 * (arr[i][j] + ((i+1)>=SIZE?0:arr[i+1][j]) + ((j+1)>=SIZE?0:arr[i][j+1]) + ((i-1)<0?0:arr[i-1][j]) + ((j-1)<0?0:arr[i][j-1]));
mydiff += arr[i][j] < temp ? temp - arr[i][j] : arr[i][j] -temp;
}
}
#pragma omp critical
{
diff += mydiff;
}
gettimeofday(&bStart, NULL);
#pragma omp barrier
gettimeofday(&bStop, NULL);
total += (1000000*(bStop.tv_sec - bStart.tv_sec) + (bStop.tv_usec - bStart.tv_usec));
if(((float)diff)/(SIZE*SIZE) < threshold || loop > 100000) {
done = 1;
}
gettimeofday(&bStart, NULL);
#pragma omp barrier
gettimeofday(&bStop, NULL);
total += (1000000*(bStop.tv_sec - bStart.tv_sec) + (bStop.tv_usec - bStart.tv_usec));
}
double stop = (double)clock();
gettimeofday(&tvStop, 0);
fprintf(stdout, "Time taken by thread %d, for %d loops is %ld microseconds.\n",omp_get_thread_num(), loop, 1000000*(tvStop.tv_sec-tvStart.tv_sec) + (tvStop.tv_usec-tvStart.tv_usec));
fprintf(stdout, "Time taken by thread %d, for synchronization is %ld microseconds.\n",omp_get_thread_num(), total);
}
for(p = 0; p < SIZE; p++) {
for(q = 0; q < SIZE; q++) {
fprintf(stderr, "%.2f\t", arr[p][q]);
break;
}
fprintf(stderr, "\n");
break;
}
}
|
ctrsm.c | #include "blas.h"
#include "error.h"
#include <stdio.h>
#include "handle.h"
#include "config.h"
#include "ctrsm.fatbin.c"
static inline size_t min(size_t a, size_t b) { return (a < b) ? a : b; }
static inline size_t max(size_t a, size_t b) { return (a > b) ? a : b; }
static inline CUresult cuMemcpyHtoD2DAsync(CUdeviceptr A, size_t lda, size_t ai, size_t aj,
const void * B, size_t ldb, size_t bi, size_t bj,
size_t m, size_t n, size_t elemSize, CUstream stream) {
CUDA_MEMCPY2D copy = {
bi * elemSize, bj, CU_MEMORYTYPE_HOST, B, 0, 0, ldb * elemSize,
ai * elemSize, aj, CU_MEMORYTYPE_DEVICE, NULL, A, 0, lda * elemSize,
m * elemSize, n };
return cuMemcpy2DAsync(©, stream);
}
static inline CUresult cuMemcpyDtoH2DAsync(void * A, size_t lda, size_t ai, size_t aj,
CUdeviceptr B, size_t ldb, size_t bi, size_t bj,
size_t m, size_t n, size_t elemSize, CUstream stream) {
CUDA_MEMCPY2D copy = {
bi * elemSize, bj, CU_MEMORYTYPE_DEVICE, NULL, B, 0, ldb * elemSize,
ai * elemSize, aj, CU_MEMORYTYPE_HOST, A, 0, 0, lda * elemSize,
m * elemSize, n };
return cuMemcpy2DAsync(©, stream);
}
static const float complex zero = 0.0f + 0.0f * I;
static const float complex one = 1.0f + 0.0f * I;
void ctrsm(CBlasSide side, CBlasUplo uplo, CBlasTranspose transA, CBlasDiag diag,
size_t m, size_t n,
float complex alpha, const float complex * restrict A, size_t lda,
float complex * restrict B, size_t ldb) {
const size_t nRowA = (side == CBlasLeft) ? m : n;
int info = 0;
if (lda < nRowA)
info = 9;
else if (ldb < m)
info = 11;
if (info != 0) {
XERBLA(info);
return;
}
if (m == 0 || n == 0)
return;
if (alpha == zero) {
#pragma omp parallel for
for (size_t j = 0; j < n; j++) {
for (size_t i = 0; i < m; i++)
B[j * ldb + i] = zero;
}
return;
}
if (side == CBlasLeft) {
if (transA == CBlasNoTrans) {
if (uplo == CBlasUpper) {
#pragma omp parallel for
for (size_t j = 0; j < n; j++) {
if (alpha != one) {
for (size_t i = 0; i < m; i++)
B[j * ldb + i] *= alpha;
}
size_t k = m - 1;
do {
if (B[j * ldb + k] != zero) {
if (diag == CBlasNonUnit) B[j * ldb + k] /= A[k * lda + k];
register float complex temp = B[j * ldb + k];
for (size_t i = 0; i < k; i++)
B[j * ldb + i] -= temp * A[k * lda + i];
}
} while (k-- > 0);
}
}
else {
#pragma omp parallel for
for (size_t j = 0; j < n; j++) {
if (alpha != one) {
for (size_t i = 0; i < m; i++)
B[j * ldb + i] *= alpha;
}
for (size_t k = 0; k < m; k++) {
if (B[j * ldb + k] != zero) {
if (diag == CBlasNonUnit) B[j * ldb + k] /= A[k * lda + k];
register float complex temp = B[j * ldb + k];
for (size_t i = k + 1; i < m; i++)
B[j * ldb + i] -= temp * A[k * lda + i];
}
}
}
}
}
else {
if (uplo == CBlasUpper) {
#pragma omp parallel for
for (size_t j = 0; j < n; j++) {
for (size_t i = 0; i < m; i++) {
register float complex temp = alpha * B[j * ldb + i];
if (transA == CBlasTrans) {
for (size_t k = 0; k < i; k++)
temp -= A[i * lda + k] * B[j * ldb + k];
if (diag == CBlasNonUnit) temp /= A[i * lda + i];
}
else {
for (size_t k = 0; k < i; k++)
temp -= conjf(A[i * lda + k]) * B[j * ldb + k];
if (diag == CBlasNonUnit) temp /= conjf(A[i * lda + i]);
}
B[j * ldb + i] = temp;
}
}
}
else {
#pragma omp parallel for
for (size_t j = 0; j < n; j++) {
size_t i = m - 1;
do {
register float complex temp = alpha * B[j * ldb + i];
if (transA == CBlasTrans) {
for (size_t k = i + 1; k < m; k++)
temp -= A[i * lda + k] * B[j * ldb + k];
if (diag == CBlasNonUnit) temp /= A[i * lda + i];
}
else {
for (size_t k = i + 1; k < m; k++)
temp -= conjf(A[i * lda + k]) * B[j * ldb + k];
if (diag == CBlasNonUnit) temp /= conjf(A[i * lda + i]);
}
B[j * ldb + i] = temp;
} while (i-- > 0);
}
}
}
}
else {
if (transA == CBlasNoTrans) {
if (uplo == CBlasUpper) {
for (size_t j = 0; j < n; j++) {
if (alpha != one) {
for (size_t i = 0; i < m; i++)
B[j * ldb + i] *= alpha;
}
for (size_t k = 0; k < j; k++) {
if (A[j * lda + k] != zero) {
register float complex temp = A[j * lda + k];
for (size_t i = 0; i < m; i++)
B[j * ldb + i] -= temp * B[k * ldb + i];
}
}
if (diag == CBlasNonUnit) {
register float complex temp = one / A[j * lda + j];
for (size_t i = 0; i < m; i++)
B[j * ldb + i] *= temp;
}
}
}
else {
size_t j = n - 1;
do {
if (alpha != one) {
for (size_t i = 0; i < m; i++)
B[j * ldb + i] *= alpha;
}
for (size_t k = j + 1; k < n; k++) {
if (A[j * lda + k] != zero) {
register float complex temp = A[j * lda + k];
for (size_t i = 0; i < m; i++)
B[j * ldb + i] -= temp * B[k * ldb + i];
}
}
if (diag == CBlasNonUnit) {
register float complex temp = one / A[j * lda + j];
for (size_t i = 0; i < m; i++)
B[j * ldb + i] *= temp;
}
} while (j-- > 0);
}
}
else {
if (uplo == CBlasUpper) {
size_t k = n - 1;
do {
if (diag == CBlasNonUnit) {
register float complex temp;
if (transA == CBlasTrans)
temp = one / A[k * lda + k];
else
temp = one / conjf(A[k * lda + k]);
for (size_t i = 0; i < m; i++)
B[k * ldb + i] *= temp;
}
for (size_t j = 0; j < k; j++) {
if (A[k * lda + j] != zero) {
register float complex temp;
if (transA == CBlasTrans)
temp = A[k * lda + j];
else
temp = conjf(A[k * lda + j]);
for (size_t i = 0; i < m; i++)
B[j * ldb + i] -= temp * B[k * ldb + i];
}
}
if (alpha != one) {
for (size_t i = 0; i < m; i++)
B[k * ldb + i] *= alpha;
}
} while (k-- > 0);
}
else {
for (size_t k = 0; k < n; k++) {
if (diag == CBlasNonUnit) {
register float complex temp;
if (transA == CBlasTrans)
temp = one / A[k * lda + k];
else
temp = one / conjf(A[k * lda + k]);
for (size_t i = 0; i < m; i++)
B[k * ldb + i] *= temp;
}
for (size_t j = k + 1; j < n; j++) {
if (A[k * lda + j] != zero) {
register float complex temp;
if (transA == CBlasTrans)
temp = A[k * lda + j];
else
temp = conjf(A[k * lda + j]);
for (size_t i = 0; i < m; i++)
B[j * ldb + i] -= temp * B[k * ldb + i];
}
}
if (alpha != one) {
for (size_t i = 0; i < m; i++)
B[k * ldb + i] *= alpha;
}
}
}
}
}
}
CUresult cuCtrsm(CUBLAShandle handle,
CBlasSide side, CBlasUplo uplo, CBlasTranspose transA, CBlasDiag diag,
size_t m, size_t n,
float complex alpha, CUdeviceptr A, size_t lda,
CUdeviceptr B, size_t ldb, CUstream stream) {
const size_t nRowA = (side == CBlasLeft) ? m : n;
int info = 0;
if (lda < nRowA)
info = 9;
else if (ldb < m)
info = 11;
if (info != 0) {
XERBLA(info);
return CUDA_ERROR_INVALID_VALUE;
}
if (m == 0 || n == 0)
return CUDA_SUCCESS;
CU_ERROR_CHECK(cuCtxPushCurrent(handle->context));
if (handle->ctrsm == NULL)
CU_ERROR_CHECK(cuModuleLoadData(&handle->ctrsm, imageBytes));
const unsigned int bx = 4;
const unsigned int by = 4;
const unsigned int mb = (side == CBlasLeft) ? 4 : 16;
const unsigned int nb = (side == CBlasLeft) ? 16 : 4;
char name[112];
snprintf(name, 112,
"_Z5ctrsmIL9CBlasSide%dEL9CBlasUplo%dEL14CBlasTranspose%dEL9CBlasDiag%dELj%uELj%uELj%uELj%uEEvPK6float2PS4_S4_iiii",
side, uplo, transA, diag, mb, nb, bx, by);
CUfunction function;
CU_ERROR_CHECK(cuModuleGetFunction(&function, handle->ctrsm, name));
void * params[] = { &A, &B, &alpha, &lda, &ldb, &m, &n };
CU_ERROR_CHECK(cuLaunchKernel(function, (unsigned int)(m + mb - 1) / mb, (unsigned int)(n + nb - 1) / nb, 1,
bx, by, 1, 0, stream, params, NULL));
CU_ERROR_CHECK(cuCtxPopCurrent(&handle->context));
return CUDA_SUCCESS;
}
CUresult cuMultiGPUCtrsm(CUmultiGPUBLAShandle handle,
CBlasSide side, CBlasUplo uplo, CBlasTranspose transA, CBlasDiag diag,
size_t m, size_t n,
float complex alpha, const float complex * restrict A, size_t lda,
float complex * restrict B, size_t ldb) {
const size_t nRowA = (side == CBlasLeft) ? m : n;
int info = 0;
if (lda < nRowA)
info = 9;
else if (ldb < m)
info = 11;
if (info != 0) {
XERBLA(info);
return CUDA_ERROR_INVALID_VALUE;
}
if (m == 0 || n == 0)
return CUDA_SUCCESS;
if (alpha == zero) {
cgemm(CBlasNoTrans, CBlasNoTrans, m, n, 0, zero, A, lda, B, ldb, zero, B, ldb);
return CUDA_SUCCESS;
}
const size_t mb = (transA == CBlasNoTrans) ? CGEMM_N_MB : CGEMM_C_MB;
const size_t nb = CGEMM_N_NB;
if (side == CBlasLeft) {
if (transA == CBlasNoTrans) {
if (uplo == CBlasUpper) {
size_t r = m % mb;
size_t i = (r == 0) ? m : m + mb - r;
do {
i -= mb;
const size_t ib = min(mb, m - i);
CU_ERROR_CHECK(cuMultiGPUCgemm(handle, CBlasNoTrans, CBlasNoTrans, ib, n, m - i - ib, -one, &A[(i + ib) * lda + i], lda, &B[i + ib], ldb, alpha, &B[i], ldb));
CU_ERROR_CHECK(cuMultiGPUBLASSynchronize(handle));
ctrsm(CBlasLeft, CBlasUpper, CBlasNoTrans, diag, ib, n, one, &A[i * lda + i], lda, &B[i], ldb);
} while (i > 0);
}
else {
for (size_t i = 0; i < m; i += mb) {
const size_t ib = min(mb, m - i);
CU_ERROR_CHECK(cuMultiGPUCgemm(handle, CBlasNoTrans, CBlasNoTrans, ib, n, i, -one, &A[i], lda, B, ldb, alpha, &B[i], ldb));
CU_ERROR_CHECK(cuMultiGPUBLASSynchronize(handle));
ctrsm(CBlasLeft, CBlasLower, CBlasNoTrans, diag, ib, n, one, &A[i * lda + i], lda, &B[i], ldb);
}
}
}
else {
if (uplo == CBlasUpper) {
for (size_t i = 0; i < m; i += mb) {
const size_t ib = min(mb, m - i);
CU_ERROR_CHECK(cuMultiGPUCgemm(handle, transA, CBlasNoTrans, ib, n, i, -one, &A[i * lda], lda, B, ldb, alpha, &B[i], ldb));
CU_ERROR_CHECK(cuMultiGPUBLASSynchronize(handle));
ctrsm(CBlasLeft, CBlasUpper, transA, diag, ib, n, one, &A[i * lda + i], lda, &B[i], ldb);
}
}
else {
size_t r = m % mb;
size_t i = (r == 0) ? m : m + mb - r;
do {
i -= mb;
const size_t ib = min(mb, m - i);
CU_ERROR_CHECK(cuMultiGPUCgemm(handle, transA, CBlasNoTrans, ib, n, m - i - ib, -one, &A[i * lda + i + ib], lda, &B[i + ib], ldb, alpha, &B[i], ldb));
CU_ERROR_CHECK(cuMultiGPUBLASSynchronize(handle));
ctrsm(CBlasLeft, CBlasLower, transA, diag, ib, n, one, &A[i * lda + i], lda, &B[i], ldb);
} while (i > 0);
}
}
}
else {
if (transA == CBlasNoTrans) {
if (uplo == CBlasUpper) {
for (size_t j = 0; j < n; j += nb) {
const size_t jb = min(nb, n - j);
CU_ERROR_CHECK(cuMultiGPUCgemm(handle, CBlasNoTrans, CBlasNoTrans, m, jb, j, -one, B, ldb, &A[j * lda], lda, alpha, &B[j * ldb], ldb));
CU_ERROR_CHECK(cuMultiGPUBLASSynchronize(handle));
ctrsm(CBlasRight, CBlasUpper, CBlasNoTrans, diag, m, jb, one, &A[j * lda + j], lda, &B[j * ldb], ldb);
}
}
else {
size_t r = n % nb;
size_t j = (r == 0) ? n : n + nb - r;
do {
j -= nb;
const size_t jb = min(nb, n - j);
CU_ERROR_CHECK(cuMultiGPUCgemm(handle, CBlasNoTrans, CBlasNoTrans, m, jb, n - j - jb, -one, &B[(j + jb) * ldb], ldb, &A[j * lda + j + jb], lda, alpha, &B[j * ldb], ldb));
CU_ERROR_CHECK(cuMultiGPUBLASSynchronize(handle));
ctrsm(CBlasRight, CBlasLower, CBlasNoTrans, diag, m, jb, one, &A[j * lda + j], lda, &B[j * ldb], ldb);
} while (j > 0);
}
}
else {
if (uplo == CBlasUpper) {
size_t r = n % nb;
size_t j = (r == 0) ? n : n + nb - r;
do {
j -= nb;
const size_t jb = min(nb, n - j);
CU_ERROR_CHECK(cuMultiGPUCgemm(handle, CBlasNoTrans, transA, m, jb, n - j - jb, -one, &B[(j + jb) * ldb], ldb, &A[(j + jb) * lda + j], lda, alpha, &B[j * ldb], ldb));
CU_ERROR_CHECK(cuMultiGPUBLASSynchronize(handle));
ctrsm(CBlasRight, CBlasUpper, transA, diag, m, jb, one, &A[j * lda + j], lda, &B[j * ldb], ldb);
} while (j > 0);
}
else {
for (size_t j = 0; j < n; j += nb) {
const size_t jb = min(nb, n - j);
CU_ERROR_CHECK(cuMultiGPUCgemm(handle, CBlasNoTrans, transA, m, jb, j, -one, B, ldb, &A[j], lda, alpha, &B[j * ldb], ldb));
CU_ERROR_CHECK(cuMultiGPUBLASSynchronize(handle));
ctrsm(CBlasRight, CBlasLower, transA, diag, m, jb, one, &A[j * lda + j], lda, &B[j * ldb], ldb);
}
}
}
}
return CUDA_SUCCESS;
}
|
GB_unaryop__ainv_uint64_int32.c | //------------------------------------------------------------------------------
// GB_unaryop: hard-coded functions for each built-in unary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2019, All Rights Reserved.
// http://suitesparse.com See GraphBLAS/Doc/License.txt for license.
//------------------------------------------------------------------------------
// If this file is in the Generated/ folder, do not edit it (auto-generated).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_control.h"
#include "GB_iterator.h"
#include "GB_unaryop__include.h"
// C=unop(A) is defined by the following types and operators:
// op(A) function: GB_unop__ainv_uint64_int32
// op(A') function: GB_tran__ainv_uint64_int32
// C type: uint64_t
// A type: int32_t
// cast: uint64_t cij = (uint64_t) aij
// unaryop: cij = -aij
#define GB_ATYPE \
int32_t
#define GB_CTYPE \
uint64_t
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
int32_t aij = Ax [pA]
#define GB_CX(p) Cx [p]
// unary operator
#define GB_OP(z, x) \
z = -x ;
// casting
#define GB_CASTING(z, x) \
uint64_t z = (uint64_t) x ;
// cij = op (cast (aij))
#define GB_CAST_OP(pC,pA) \
{ \
/* aij = Ax [pA] */ \
GB_GETA (aij, Ax, pA) ; \
/* Cx [pC] = op (cast (aij)) */ \
GB_CASTING (x, aij) ; \
GB_OP (GB_CX (pC), x) ; \
}
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_AINV || GxB_NO_UINT64 || GxB_NO_INT32)
//------------------------------------------------------------------------------
// Cx = op (cast (Ax)): apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_unop__ainv_uint64_int32
(
uint64_t *restrict Cx,
const int32_t *restrict Ax,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (int64_t p = 0 ; p < anz ; p++)
{
GB_CAST_OP (p, p) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (cast (A')): transpose, typecast, and apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_tran__ainv_uint64_int32
(
GrB_Matrix C,
const GrB_Matrix A,
int64_t **Rowcounts,
GBI_single_iterator Iter,
const int64_t *restrict A_slice,
int naslice
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#define GB_PHASE_2_OF_2
#include "GB_unaryop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
sudoValidator.c |
/*
Universidad del Valle de Guatemala
Sistemas operativos
Ing. Erick Pineda
sudokoValidator.c
Proposito: Dado una solucion en txt, se verifica si la solucion es correcta
Lab3
*/
#include <fcntl.h>
#include <stdio.h>
#include <unistd.h>
#include <sys/shm.h>
#include <sys/stat.h>
#include <sys/syscall.h>
#include <sys/mman.h>
#include <sys/types.h>
#include <stdlib.h>
#include <string.h>
#include <pthread.h>
#include <omp.h>
//Sudoko instanciado de caracteres strins
char sudoku[9][9];
int correct_columns, correct_rows;
// Verificacion de filas en el sudoku
int verify_rows()
{
//Paralelizacion nested
omp_set_nested(1);
// 9 hilos para cada celda/columna
omp_set_num_threads(9);
int grid[9];
int valid = 0;
#pragma omp parallel for private(grid) schedule(dynamic)
for (int i = 0; i < 9; i++)
{
char nums_validos[] = "123456789";
char *num;
//Basado en panda, descartananddo \0
for (num = &nums_validos[0]; *num != '\0'; num++)
{
int not_num = 0;
int j = 0;
while (not_num == 0 && j < 9)
{
if (sudoku[i][j] == *num)
not_num = 1;
j++;
}
if (not_num == 0)
valid = -1;
}
printf("En la verificacion de las filas el hilo en proceso es: %ld \n", syscall(SYS_gettid));
}
return valid;
}
int verify_columns()
{
//Paralelizacion nested
omp_set_nested(1);
omp_set_num_threads(9);
int grid[9];
int valid = 0;
#pragma omp parallel for schedule(dynamic)
for (int i = 0; i < 9; i++)
{
char nums_validos[] = "123456789";
char *num;
//Basado en panda, descartananddo \0
for (num = &nums_validos[0]; *num != '\0'; num++)
{
int not_num = 0;
int j = 0;
while (not_num == 0 && j < 9)
{
if (sudoku[j][i] == *num)
not_num = 1;
j++;
}
if (not_num == 0)
valid = -1;
}
printf("En la verificacion de las columnas el hilo en proceso es: %ld \n", syscall(SYS_gettid));
}
return valid;
}
int verify_rows_nums(char temp[9][9])
{
omp_set_nested(1);
omp_set_num_threads(9);
int grid[9];
int valid = 0;
#pragma omp parallel for private(grid) schedule(dynamic)
for (int i = 0; i < 9; i++)
{
char nums_validos[] = "123456789";
char *num;
//Basado en panda, descartananddo \0
for (num = &nums_validos[0]; *num != '\0'; num++)
{
int not_num = 0;
int j = 0;
while (not_num == 0 && j < 9)
{
if (temp[i][j] == *num)
not_num = 1;
j++;
}
if (not_num == 0)
valid = -1;
}
}
return valid;
}
//Verificacion de los subarrays sacados de: https://www.geeksforgeeks.org/check-if-given-sudoku-solution-is-valid-or-not/
int verify_Subarrays()
{
omp_set_nested(1);
omp_set_num_threads(3);
char temp_sudoku[9][9];
int row = 0, column = 0;
int grid[9];
// #pragma omp parallel for private(grid)
for (int x = 0; x < 3; x++)
{
//#pragma omp parallel for
for (int y = 0; y < 3; y++)
{
// #pragma omp parallel for
for (int i = 0; i < 3; i++)
{
// #pragma omp parallel for
for (int j = 0; j < 3; j++)
{
temp_sudoku[9][9] = sudoku[i + (x * 3)][j + (y * 3)];
column++;
}
}
column = 0;
row++;
}
}
return verify_rows_nums(temp_sudoku);
}
void *complete_column_verification()
{
printf("Hijo Columna ID: %ld \n", syscall(SYS_gettid));
correct_columns = verify_columns();
pthread_exit(0);
}
void *complete_row_verification()
{
printf("Hijo Fila ID: %ld \n", syscall(SYS_gettid));
correct_rows = verify_rows();
pthread_exit(0);
}
/* Maps Sudoku string to array */
void mapping_Sudoku(int fd)
{
//Paralelizacion nested
omp_set_nested(1);
omp_set_num_threads(9);
struct stat stat_s;
int status_sudoku = fstat(fd, &stat_s);
int size = stat_s.st_size;
//Mappeando el archivo txt
char *ptr = (char *)mmap(0, size, PROT_READ, MAP_PRIVATE, fd, 0);
int char_pos = 0;
int grid[9];
#pragma omp parallel for private(grid) schedule(dynamic)
for (int i = 0; i < 9; i++)
{
#pragma omp parallel for
for (int j = 0; j < 9; j++)
{
sudoku[i][j] = ptr[char_pos];
char_pos++;
}
}
munmap(ptr,size);
close(fd);
}
int main(int argc, char *argv[])
{
omp_set_num_threads(1);
//gcc -fopenmp -pthreads -o sudokoValidator sudokoValidator.c
if (argc < 2)
{
printf("Archivo de suduko ingresado incorrectamente. \n");
return 1;
}
int input;
// Lee el txt, y saca error en caso no se encuentra el archivo txt
if ((input = open(argv[1], O_RDONLY)) < 0)
{
perror("Archivo de sudoko que fue ingresado no se puede abrir. \n");
return 1;
}
else
{
mapping_Sudoku(input);
// Idetinficiacion del padre antes del primer hijo
pid_t parent_pid = getpid();
int child = fork();
if (child < 0)
{
perror("Error de hacer Fork.");
return 1;
}
else if (child == 0)
{
// Identidades a strings del proceso hijo
char p_pid[6];
sprintf(p_pid, "%d", (int)parent_pid);
execlp("ps", "ps", "-p", p_pid, "-lLf", NULL);
}
else
{
// Proceso padre al inicio del programa
pthread_t col_verification;
// instancia de los hilos que verifica las columnas
if (pthread_create(&col_verification, NULL, complete_column_verification, NULL))
{
perror("Error al crear el Hilo");
return 1;
}
if (pthread_join(col_verification, NULL))
{
perror("Error al intentar unirse el hilo.");
return 1;
}
printf("Hijo principal es : %ld \n", syscall(SYS_gettid));
usleep(30000);
printf("Hijo terminado ... \n");
// instancia de los hilos que verifica las filas
pthread_t row_verification;
if (pthread_create(&row_verification, NULL, complete_row_verification, NULL))
{
perror("Error al crear el Hilo");
return 1;
}
if (pthread_join(row_verification, NULL))
{
perror("Error al intentar unirse el hilo.");
return 1;
}
// Verificacion del sudoku
if (correct_rows == 0 && correct_columns == 0)
{
printf("|--- Solucion Valida ---| \n");
}
else
{
printf("|--- Solucion Invalida :/ ---|\n");
}
// Segundo hijo
int child2 = fork();
if (child2 == 0)
{
//Mismo procedimiento que el anterior
char p_pid[6];
sprintf(p_pid, "%d", (int)parent_pid);
execlp("ps", "ps", "-p", p_pid, "-lLf", NULL);
}
else
{
//Proceso sigue en el padre
usleep(30000);
printf("Hijo terminado... \n");
return 0;
}
}
}
} |
fc_kernel_fp16_arm82.c | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* License); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* AS IS BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
/*
* Copyright (c) 2020, Open AI Lab
* Author: xlchen@openailab.com
*/
#include <stdint.h>
#include <stdlib.h>
#include <math.h>
#include <arm_neon.h>
#include "fc_kernel_fp16_arm82.h"
#include "compiler_fp16.h"
void hgemv_1x8_a55(__fp16* biases, __fp16* input, __fp16* kernel, long kernel_size, __fp16* output);
void hgemv_1x2_a55(__fp16* biases, __fp16* input, __fp16* kernel, long kernel_size, __fp16* output);
// start and end channel must be 8 aligned
void hgemv1x8(const __fp16* input, const __fp16* output, __fp16* weight_interleaved, const __fp16* biases,
int kernel_size, int start_channel, int end_channel, int num_thread, int cpu_affinity)
{
int ch = 0;
__fp16 *cur_kernel, *cur_biases, *cur_result;
// #pragma omp parallel for num_threads(num_thread)
for(ch = start_channel; ch < end_channel; ch += 8)
{
cur_kernel = ( __fp16* )(weight_interleaved + kernel_size * ch);
cur_result = ( __fp16* )(output + ch);
cur_biases = biases ? ( __fp16* )(biases + ch) : NULL;
hgemv_1x8_a55(cur_biases, ( __fp16* )input, cur_kernel, kernel_size, cur_result); // todo implement with A76
}
}
// start channel must be 2 aligned
void hgemv1x2(const __fp16* input, const __fp16* output, __fp16* weight_interleaved, const __fp16* biases,
int kernel_size, int start_channel, int end_channel, int num_thread, int cpu_affinity)
{
__fp16 sum;
int ch = 0;
__fp16 *cur_kernel, *cur_biases, *cur_result;
for(ch = start_channel; ch < (end_channel & -2); ch += 2)
{
cur_kernel = ( __fp16* )(weight_interleaved + kernel_size * ch);
cur_result = ( __fp16* )(output + ch);
cur_biases = biases ? ( __fp16* )(biases + ch) : NULL;
hgemv_1x2_a55(cur_biases, ( __fp16* )input, cur_kernel, kernel_size, cur_result);
}
if(end_channel & 0x1)
{
cur_kernel = ( __fp16* )(weight_interleaved + kernel_size * ch);
cur_result = ( __fp16* )(output + ch);
sum = biases ? *(biases + ch) : 0.f;
for(int j = 0; j < kernel_size; j++)
sum = sum + input[j] * cur_kernel[j];
*cur_result = sum;
}
}
static void interleave_kernel(const __fp16* kernel, __fp16* kernel_interleaved, int out_chan, int kernel_size)
{
int i, j, k;
__fp16* cur_kernel[8];
__fp16* cur_kernel_interleaved;
// interleave 8 kernel
for(i = 0; i < (out_chan & -8); i += 8)
{
for(j = 0; j < 8; j++)
cur_kernel[j] = ( __fp16* )kernel + kernel_size * (i + j);
cur_kernel_interleaved = ( __fp16* )kernel_interleaved + kernel_size * i;
for(k = 0; k < kernel_size; k++)
for(j = 0; j < 8; j++)
cur_kernel_interleaved[8 * k + j] = *(cur_kernel[j] + k);
}
// interleave 2 kernel
for(; i < (out_chan & -2); i += 2)
{
for(j = 0; j < 2; j++)
cur_kernel[j] = ( __fp16* )kernel + kernel_size * (i + j);
cur_kernel_interleaved = ( __fp16* )kernel_interleaved + kernel_size * i;
for(k = 0; k < kernel_size; k++)
for(j = 0; j < 2; j++)
cur_kernel_interleaved[2 * k + j] = *(cur_kernel[j] + k);
}
// copy last kernel
if(out_chan & 0x1)
{
cur_kernel[0] = ( __fp16* )kernel + kernel_size * i;
cur_kernel_interleaved = ( __fp16* )kernel_interleaved + kernel_size * i;
for(k = 0; k < kernel_size; k++)
cur_kernel_interleaved[k] = *(cur_kernel[0] + k);
}
return;
}
int fp16_fc_kernel_prerun(struct ir_tensor* input_tensor , \
struct ir_tensor* filter_tensor , \
struct ir_tensor* output_tensor , \
struct fc_priv_info* priv_info , \
struct fc_param* param)
{
int num_output = param->num_output;
int kernel_size = filter_tensor->dims[1];
int kernel_align = ((kernel_size + 1) & -2);
if (!priv_info->interleave_buffer)
{
int mem_size = sizeof(__fp16) * num_output * kernel_align;
void* mem = sys_malloc(mem_size);
priv_info->interleave_buffer = mem;
priv_info->interleave_buffer_size = mem_size;
}
if (!priv_info->input_buffer)
{
int mem_size = sizeof(__fp16) * kernel_align;
void* mem = sys_malloc(mem_size);
priv_info->input_buffer = mem;
priv_info->input_buffer_size = mem_size;
}
__fp16* filter_data = (__fp16*)filter_tensor->data;
interleave_kernel(filter_data, (__fp16*)priv_info->interleave_buffer, num_output, kernel_size);
return 0;
}
int fp16_fc_kernel_run(struct ir_tensor* input_tensor , \
struct ir_tensor* filter_tensor , \
struct ir_tensor* bias_tensor , \
struct ir_tensor* output_tensor , \
struct fc_priv_info* priv_info , \
struct fc_param* param, \
int num_thread, int cpu_affinity)
{
int out_num = param->num_output;
int kernel_size = filter_tensor->dims[1];
__fp16* input = (__fp16*)input_tensor->data;
__fp16* output = (__fp16*)output_tensor->data;
__fp16* weight = (__fp16*)priv_info->interleave_buffer;
__fp16* biases = NULL;
if (bias_tensor)
biases = (__fp16*)bias_tensor->data;
int out_num_8 = out_num & ~7;
for(int i = 0; i < input_tensor->dims[0]; i++)
{
__fp16* cur_input = input + i * kernel_size;
__fp16* cur_output = output + i * out_num;
hgemv1x8(cur_input, cur_output, weight, biases, kernel_size, 0, out_num_8, num_thread, cpu_affinity);
if(out_num & 0x7)
hgemv1x2(cur_input, cur_output, weight, biases, kernel_size, out_num_8, out_num, num_thread, cpu_affinity);
}
return 0 ;
}
|
opencl_dashlane_fmt_plug.c | /*
* JtR OpenCL format to crack Dashlane Password Manager databases
*
* This software is Copyright (c) 2017, Dhiru Kholia <dhiru at openwall.com>,
* and it is hereby released to the general public under the following terms:
* Redistribution and use in source and binary forms, with or without
* modification, are permitted.
*
* The OpenCL boilerplate code is borrowed from other OpenCL formats.
*/
#ifdef HAVE_OPENCL
#include "arch.h"
#if !AC_BUILT
#define HAVE_LIBZ 1 /* legacy build has -lz in LDFLAGS */
#endif
#if HAVE_LIBZ
#if FMT_EXTERNS_H
extern struct fmt_main fmt_opencl_dashlane;
#elif FMT_REGISTERS_H
john_register_one(&fmt_opencl_dashlane);
#else
#include <string.h>
#include <stdint.h>
#ifdef _OPENMP
#include <omp.h>
#endif
#include "arch.h"
#include "formats.h"
#include "common.h"
#include "dashlane_common.h"
#include "options.h"
#include "jumbo.h"
#include "common-opencl.h"
#include "misc.h"
#define OUTLEN (32)
#include "opencl_pbkdf2_hmac_sha1.h"
#define FORMAT_LABEL "dashlane-opencl"
#define OCL_ALGORITHM_NAME "PBKDF2-SHA1 OpenCL"
#define CPU_ALGORITHM_NAME " AES"
#define ALGORITHM_NAME OCL_ALGORITHM_NAME CPU_ALGORITHM_NAME
#define BENCHMARK_COMMENT ""
#define BENCHMARK_LENGTH -1
#define MIN_KEYS_PER_CRYPT 1
#define MAX_KEYS_PER_CRYPT 1
#define BINARY_SIZE 0
#define BINARY_ALIGN MEM_ALIGN_WORD
#define PLAINTEXT_LENGTH 64
#define SALT_SIZE sizeof(*cur_salt)
#define SALT_ALIGN MEM_ALIGN_WORD
/* This handles all widths */
#define GETPOS(i, index) (((index) % ocl_v_width) * 4 + ((i) & ~3U) * ocl_v_width + (((i) & 3) ^ 3) + ((index) / ocl_v_width) * 64 * ocl_v_width)
static int *cracked;
static int any_cracked;
static struct custom_salt *cur_salt;
static size_t key_buf_size;
static unsigned int *inbuffer;
static pbkdf2_out *output;
static pbkdf2_salt currentsalt;
static cl_mem mem_in, mem_out, mem_salt, mem_state;
static size_t key_buf_size;
static int new_keys;
static struct fmt_main *self;
static cl_kernel pbkdf2_init, pbkdf2_loop, pbkdf2_final;
#define cracked_size (sizeof(*cracked) * global_work_size * ocl_v_width)
/*
* HASH_LOOPS is ideally made by factors of (iteration count - 1) and should
* be chosen for a kernel duration of not more than 200 ms
*/
#define HASH_LOOPS (3 * 271)
#define ITERATIONS 100000 /* Just for auto tune */
#define LOOP_COUNT (((currentsalt.iterations - 1 + HASH_LOOPS - 1)) / HASH_LOOPS)
#define STEP 0
#define SEED 128
static const char * warn[] = {
"P xfer: " , ", init: " , ", loop: " , ", final: ", ", res xfer: "
};
static int split_events[] = { 2, -1, -1 };
//This file contains auto-tuning routine(s). Has to be included after formats definitions.
#include "opencl_autotune.h"
#include "memdbg.h"
/* ------- Helper functions ------- */
static size_t get_task_max_work_group_size()
{
size_t s;
s = autotune_get_task_max_work_group_size(FALSE, 0, pbkdf2_init);
s = MIN(s, autotune_get_task_max_work_group_size(FALSE, 0, pbkdf2_loop));
s = MIN(s, autotune_get_task_max_work_group_size(FALSE, 0, pbkdf2_final));
return s;
}
static void create_clobj(size_t gws, struct fmt_main *self)
{
gws *= ocl_v_width;
key_buf_size = 64 * gws;
// Allocate memory
inbuffer = mem_calloc(1, key_buf_size);
output = mem_alloc(sizeof(pbkdf2_out) * gws);
cracked = mem_calloc(1, cracked_size);
mem_in = clCreateBuffer(context[gpu_id], CL_MEM_READ_ONLY, key_buf_size, NULL, &ret_code);
HANDLE_CLERROR(ret_code, "Error allocating mem in");
mem_salt = clCreateBuffer(context[gpu_id], CL_MEM_READ_ONLY, sizeof(pbkdf2_salt), NULL, &ret_code);
HANDLE_CLERROR(ret_code, "Error allocating mem setting");
mem_out = clCreateBuffer(context[gpu_id], CL_MEM_WRITE_ONLY, sizeof(pbkdf2_out) * gws, NULL, &ret_code);
HANDLE_CLERROR(ret_code, "Error allocating mem out");
mem_state = clCreateBuffer(context[gpu_id], CL_MEM_READ_WRITE, sizeof(pbkdf2_state) * gws, NULL, &ret_code);
HANDLE_CLERROR(ret_code, "Error allocating mem_state");
HANDLE_CLERROR(clSetKernelArg(pbkdf2_init, 0, sizeof(mem_in), &mem_in), "Error while setting mem_in kernel argument");
HANDLE_CLERROR(clSetKernelArg(pbkdf2_init, 1, sizeof(mem_salt), &mem_salt), "Error while setting mem_salt kernel argument");
HANDLE_CLERROR(clSetKernelArg(pbkdf2_init, 2, sizeof(mem_state), &mem_state), "Error while setting mem_state kernel argument");
HANDLE_CLERROR(clSetKernelArg(pbkdf2_loop, 0, sizeof(mem_state), &mem_state), "Error while setting mem_state kernel argument");
HANDLE_CLERROR(clSetKernelArg(pbkdf2_final, 0, sizeof(mem_salt), &mem_salt), "Error while setting mem_salt kernel argument");
HANDLE_CLERROR(clSetKernelArg(pbkdf2_final, 1, sizeof(mem_out), &mem_out), "Error while setting mem_out kernel argument");
HANDLE_CLERROR(clSetKernelArg(pbkdf2_final, 2, sizeof(mem_state), &mem_state), "Error while setting mem_state kernel argument");
}
static void release_clobj(void)
{
if (cracked) {
HANDLE_CLERROR(clReleaseMemObject(mem_in), "Release mem in");
HANDLE_CLERROR(clReleaseMemObject(mem_salt), "Release mem setting");
HANDLE_CLERROR(clReleaseMemObject(mem_state), "Release mem state");
HANDLE_CLERROR(clReleaseMemObject(mem_out), "Release mem out");
MEM_FREE(inbuffer);
MEM_FREE(output);
MEM_FREE(cracked);
}
}
static void done(void)
{
if (autotuned) {
release_clobj();
HANDLE_CLERROR(clReleaseKernel(pbkdf2_init), "Release kernel");
HANDLE_CLERROR(clReleaseKernel(pbkdf2_loop), "Release kernel");
HANDLE_CLERROR(clReleaseKernel(pbkdf2_final), "Release kernel");
HANDLE_CLERROR(clReleaseProgram(program[gpu_id]), "Release Program");
autotuned--;
}
}
static void init(struct fmt_main *_self)
{
static char valgo[sizeof(ALGORITHM_NAME) + 8] = "";
self = _self;
opencl_prepare_dev(gpu_id);
/* VLIW5 does better with just 2x vectors due to GPR pressure */
if (!options.v_width && amd_vliw5(device_info[gpu_id]))
ocl_v_width = 2;
else
ocl_v_width = opencl_get_vector_width(gpu_id, sizeof(cl_int));
if (ocl_v_width > 1) {
/* Run vectorized kernel */
snprintf(valgo, sizeof(valgo),
OCL_ALGORITHM_NAME " %ux" CPU_ALGORITHM_NAME, ocl_v_width);
self->params.algorithm_name = valgo;
}
}
static void reset(struct db_main *db)
{
if (!autotuned) {
char build_opts[64];
snprintf(build_opts, sizeof(build_opts),
"-DHASH_LOOPS=%u -DOUTLEN=%u "
"-DPLAINTEXT_LENGTH=%u -DV_WIDTH=%u",
HASH_LOOPS, OUTLEN, PLAINTEXT_LENGTH, ocl_v_width);
opencl_init("$JOHN/kernels/pbkdf2_hmac_sha1_kernel.cl", gpu_id, build_opts);
pbkdf2_init = clCreateKernel(program[gpu_id], "pbkdf2_init", &ret_code);
HANDLE_CLERROR(ret_code, "Error creating kernel");
crypt_kernel = pbkdf2_loop = clCreateKernel(program[gpu_id], "pbkdf2_loop", &ret_code);
HANDLE_CLERROR(ret_code, "Error creating kernel");
pbkdf2_final = clCreateKernel(program[gpu_id], "pbkdf2_final", &ret_code);
HANDLE_CLERROR(ret_code, "Error creating kernel");
// Initialize openCL tuning (library) for this format.
opencl_init_auto_setup(SEED, 2 * HASH_LOOPS, split_events,
warn, 2, self, create_clobj,
release_clobj,
ocl_v_width * sizeof(pbkdf2_state), 0, db);
// Auto tune execution from shared/included code.
autotune_run(self, 2 * (ITERATIONS - 1) + 4, 0,
(cpu(device_info[gpu_id]) ?
1000000000 : 10000000000ULL));
}
}
static void set_salt(void *salt)
{
cur_salt = (struct custom_salt*)salt;
memcpy((char*)currentsalt.salt, cur_salt->salt, 32);
currentsalt.length = 32;
currentsalt.iterations = 10204;
currentsalt.outlen = 32;
HANDLE_CLERROR(clEnqueueWriteBuffer(queue[gpu_id], mem_salt, CL_FALSE, 0, sizeof(pbkdf2_salt), ¤tsalt, 0, NULL, NULL), "Copy salt to gpu");
}
static void clear_keys(void)
{
memset(inbuffer, 0, key_buf_size);
}
static void dashlane_set_key(char *key, int index)
{
int i;
int length = strlen(key);
for (i = 0; i < length; i++)
((char*)inbuffer)[GETPOS(i, index)] = key[i];
new_keys = 1;
}
static char *get_key(int index)
{
static char ret[PLAINTEXT_LENGTH + 1];
int i = 0;
while (i < PLAINTEXT_LENGTH &&
(ret[i] = ((char*)inbuffer)[GETPOS(i, index)]))
i++;
ret[i] = 0;
return ret;
}
static int crypt_all(int *pcount, struct db_salt *salt)
{
const int count = *pcount;
int i, j, index;
size_t scalar_gws;
size_t *lws = local_work_size ? &local_work_size : NULL;
global_work_size = GET_MULTIPLE_OR_BIGGER_VW(count, local_work_size);
scalar_gws = global_work_size * ocl_v_width;
if (any_cracked) {
memset(cracked, 0, cracked_size);
any_cracked = 0;
}
// Copy data to gpu
if (ocl_autotune_running || new_keys) {
BENCH_CLERROR(clEnqueueWriteBuffer(queue[gpu_id], mem_in, CL_FALSE, 0, key_buf_size, inbuffer, 0, NULL, multi_profilingEvent[0]), "Copy data to gpu");
new_keys = 0;
}
// Run kernels
BENCH_CLERROR(clEnqueueNDRangeKernel(queue[gpu_id], pbkdf2_init, 1, NULL, &global_work_size, lws, 0, NULL, multi_profilingEvent[1]), "Run initial kernel");
for (j = 0; j < (ocl_autotune_running ? 1 : (currentsalt.outlen + 19) / 20); j++) {
for (i = 0; i < (ocl_autotune_running ? 1 : LOOP_COUNT); i++) {
BENCH_CLERROR(clEnqueueNDRangeKernel(queue[gpu_id], pbkdf2_loop, 1, NULL, &global_work_size, lws, 0, NULL, multi_profilingEvent[2]), "Run loop kernel");
BENCH_CLERROR(clFinish(queue[gpu_id]), "Error running loop kernel");
opencl_process_event();
}
BENCH_CLERROR(clEnqueueNDRangeKernel(queue[gpu_id], pbkdf2_final, 1, NULL, &global_work_size, lws, 0, NULL, multi_profilingEvent[3]), "Run intermediate kernel");
}
// Read the result back
BENCH_CLERROR(clEnqueueReadBuffer(queue[gpu_id], mem_out, CL_TRUE, 0, sizeof(pbkdf2_out) * scalar_gws, output, 0, NULL, multi_profilingEvent[4]), "Copy result back");
if (!ocl_autotune_running) {
#ifdef _OPENMP
#pragma omp parallel for
#endif
for (index = 0; index < count; index++) {
if (dashlane_verify(cur_salt, (unsigned char*)output[index].dk)) {
cracked[index] = 1;
#ifdef _OPENMP
#pragma omp atomic
#endif
any_cracked |= 1;
}
}
}
return count;
}
static int cmp_all(void *binary, int count)
{
return any_cracked;
}
static int cmp_one(void *binary, int index)
{
return cracked[index];
}
static int cmp_exact(char *source, int index)
{
return 1;
}
struct fmt_main fmt_opencl_dashlane = {
{
FORMAT_LABEL,
FORMAT_NAME,
ALGORITHM_NAME,
BENCHMARK_COMMENT,
BENCHMARK_LENGTH,
0,
PLAINTEXT_LENGTH,
BINARY_SIZE,
BINARY_ALIGN,
SALT_SIZE,
SALT_ALIGN,
MIN_KEYS_PER_CRYPT,
MAX_KEYS_PER_CRYPT,
FMT_CASE | FMT_8_BIT | FMT_OMP | FMT_HUGE_INPUT,
{ NULL },
{ FORMAT_TAG },
dashlane_tests
}, {
init,
done,
reset,
fmt_default_prepare,
dashlane_valid,
fmt_default_split,
fmt_default_binary,
dashlane_get_salt,
{ NULL },
fmt_default_source,
{
fmt_default_binary_hash
},
fmt_default_salt_hash,
NULL,
set_salt,
dashlane_set_key,
get_key,
clear_keys,
crypt_all,
{
fmt_default_get_hash
},
cmp_all,
cmp_one,
cmp_exact
}
};
#endif /* plugin stanza */
#endif /* HAVE_LIBZ */
#endif /* HAVE_OPENCL */
|
Interp1PrimFifthOrderHCWENO.c | /*! @file Interp1PrimFifthOrderHCWENO.c
@author Debojyoti Ghosh
@brief hybrid compact-WENO5 Scheme (Component-wise application to vectors)
*/
#include <stdio.h>
#include <basic.h>
#include <arrayfunctions.h>
#include <mathfunctions.h>
#include <interpolation.h>
#include <tridiagLU.h>
#include <mpivars.h>
#include <hypar.h>
#ifdef with_omp
#include <omp.h>
#endif
#undef _MINIMUM_GHOSTS_
/*! \def _MINIMUM_GHOSTS_
* Minimum number of ghost points required for this interpolation
* method.
*/
#define _MINIMUM_GHOSTS_ 3
/*! @brief 5th order hybrid compact-WENO reconstruction (component-wise) on a uniform grid
Computes the interpolated values of the first primitive of a function \f${\bf f}\left({\bf u}\right)\f$
at the interfaces from the cell-centered values of the function using the fifth order hybrid compact-WENO scheme on a
uniform grid. The tridiagonal system is solved using tridiagLU() (see also #TridiagLU, tridiagLU.h). See references
below for a complete description of the method implemented here.
\b Implementation \b Notes:
+ This method assumes a uniform grid in the spatial dimension corresponding to the interpolation.
+ The scalar interpolation method is applied to the vector function in a component-wise manner.
+ The WENO weights are computed in WENOFifthOrderCalculateWeights().
+ The function computes the interpolant for the entire grid in one call. It loops over all the grid lines along the interpolation direction
and carries out the 1D interpolation along these grid lines.
+ Location of cell-centers and cell interfaces along the spatial dimension of the interpolation is shown in the following figure:
@image html chap1_1Ddomain.png
@image latex chap1_1Ddomain.eps width=0.9\textwidth
\b Function \b arguments:
Argument | Type | Explanation
--------- | --------- | ---------------------------------------------
fI | double* | Array to hold the computed interpolant at the grid interfaces. This array must have the same layout as the solution, but with \b no \b ghost \b points. Its size should be the same as u in all dimensions, except dir (the dimension along which to interpolate) along which it should be larger by 1 (number of interfaces is 1 more than the number of interior cell centers).
fC | double* | Array with the cell-centered values of the flux function \f${\bf f}\left({\bf u}\right)\f$. This array must have the same layout and size as the solution, \b with \b ghost \b points.
u | double* | The solution array \f${\bf u}\f$ (with ghost points). If the interpolation is characteristic based, this is needed to compute the eigendecomposition. For a multidimensional problem, the layout is as follows: u is a contiguous 1D array of size (nvars*dim[0]*dim[1]*...*dim[D-1]) corresponding to the multi-dimensional solution, with the following ordering - nvars, dim[0], dim[1], ..., dim[D-1], where nvars is the number of solution components (#HyPar::nvars), dim is the local size (#HyPar::dim_local), D is the number of spatial dimensions.
x | double* | The grid array (with ghost points). This is used only by non-uniform-grid interpolation methods. For multidimensional problems, the layout is as follows: x is a contiguous 1D array of size (dim[0]+dim[1]+...+dim[D-1]), with the spatial coordinates along dim[0] stored from 0,...,dim[0]-1, the spatial coordinates along dim[1] stored along dim[0],...,dim[0]+dim[1]-1, and so forth.
upw | int | Upwinding direction: if positive, a left-biased interpolant will be computed; if negative, a right-biased interpolant will be computed. If the interpolation method is central, then this has no effect.
dir | int | Spatial dimension along which to interpolate (eg: 0 for 1D; 0 or 1 for 2D; 0,1 or 2 for 3D)
s | void* | Solver object of type #HyPar: the following variables are needed - #HyPar::ghosts, #HyPar::ndims, #HyPar::nvars, #HyPar::dim_local.
m | void* | MPI object of type #MPIVariables: this is needed only by compact interpolation method that need to solve a global implicit system across MPI ranks.
uflag | int | A flag indicating if the function being interpolated \f${\bf f}\f$ is the solution itself \f${\bf u}\f$ (if 1, \f${\bf f}\left({\bf u}\right) \equiv {\bf u}\f$).
\b Reference:
+ Pirozzoli, S., Conservative Hybrid Compact-WENO Schemes for Shock-Turbulence Interaction, J. Comput. Phys., 178 (1), 2002, pp. 81-117, http://dx.doi.org/10.1006/jcph.2002.7021
+ Ren, Y.-X., Liu, M., Zhang, H., A characteristic-wise hybrid compact-WENO scheme for solving hyperbolic conservation laws, J. Comput. Phys., 192 (2), 2003, pp. 365-386,
http://dx.doi.org/10.1016/j.jcp.2003.07.006
*/
int Interp1PrimFifthOrderHCWENO(
double *fI, /*!< Array of interpolated function values at the interfaces */
double *fC, /*!< Array of cell-centered values of the function \f${\bf f}\left({\bf u}\right)\f$ */
double *u, /*!< Array of cell-centered values of the solution \f${\bf u}\f$ */
double *x, /*!< Grid coordinates */
int upw, /*!< Upwind direction (left or right biased) */
int dir, /*!< Spatial dimension along which to interpolation */
void *s, /*!< Object of type #HyPar containing solver-related variables */
void *m, /*!< Object of type #MPIVariables containing MPI-related variables */
int uflag /*!< Flag to indicate if \f$f(u) \equiv u\f$, i.e, if the solution is being reconstructed */
)
{
HyPar *solver = (HyPar*) s;
MPIVariables *mpi = (MPIVariables*) m;
CompactScheme *compact= (CompactScheme*) solver->compact;
WENOParameters *weno = (WENOParameters*) solver->interp;
TridiagLU *lu = (TridiagLU*) solver->lusolver;
int sys,Nsys,d;
_DECLARE_IERR_;
int ghosts = solver->ghosts;
int ndims = solver->ndims;
int nvars = solver->nvars;
int *dim = solver->dim_local;
/* define some constants */
static const double one_half = 1.0/2.0;
static const double one_third = 1.0/3.0;
static const double one_sixth = 1.0/6.0;
double *ww1, *ww2, *ww3;
ww1 = weno->w1 + (upw < 0 ? 2*weno->size : 0) + (uflag ? weno->size : 0) + weno->offset[dir];
ww2 = weno->w2 + (upw < 0 ? 2*weno->size : 0) + (uflag ? weno->size : 0) + weno->offset[dir];
ww3 = weno->w3 + (upw < 0 ? 2*weno->size : 0) + (uflag ? weno->size : 0) + weno->offset[dir];
/* create index and bounds for the outer loop, i.e., to loop over all 1D lines along
dimension "dir" */
int indexC[ndims], indexI[ndims], index_outer[ndims], bounds_outer[ndims], bounds_inter[ndims];
_ArrayCopy1D_(dim,bounds_outer,ndims); bounds_outer[dir] = 1;
_ArrayCopy1D_(dim,bounds_inter,ndims); bounds_inter[dir] += 1;
int N_outer; _ArrayProduct1D_(bounds_outer,ndims,N_outer);
/* calculate total number of tridiagonal systems to solve */
_ArrayProduct1D_(bounds_outer,ndims,Nsys); Nsys *= nvars;
/* Allocate arrays for tridiagonal system */
double *A = compact->A;
double *B = compact->B;
double *C = compact->C;
double *R = compact->R;
#pragma omp parallel for schedule(auto) default(shared) private(sys,d,index_outer,indexC,indexI)
for (sys=0; sys < N_outer; sys++) {
_ArrayIndexnD_(ndims,sys,bounds_outer,index_outer,0);
_ArrayCopy1D_(index_outer,indexC,ndims);
_ArrayCopy1D_(index_outer,indexI,ndims);
for (indexI[dir] = 0; indexI[dir] < dim[dir]+1; indexI[dir]++) {
int qm1,qm2,qm3,qp1,qp2,p;
_ArrayIndex1D_(ndims,bounds_inter,indexI,0,p);
if (upw > 0) {
indexC[dir] = indexI[dir]-3; _ArrayIndex1D_(ndims,dim,indexC,ghosts,qm3);
indexC[dir] = indexI[dir]-2; _ArrayIndex1D_(ndims,dim,indexC,ghosts,qm2);
indexC[dir] = indexI[dir]-1; _ArrayIndex1D_(ndims,dim,indexC,ghosts,qm1);
indexC[dir] = indexI[dir] ; _ArrayIndex1D_(ndims,dim,indexC,ghosts,qp1);
indexC[dir] = indexI[dir]+1; _ArrayIndex1D_(ndims,dim,indexC,ghosts,qp2);
} else {
indexC[dir] = indexI[dir]+2; _ArrayIndex1D_(ndims,dim,indexC,ghosts,qm3);
indexC[dir] = indexI[dir]+1; _ArrayIndex1D_(ndims,dim,indexC,ghosts,qm2);
indexC[dir] = indexI[dir] ; _ArrayIndex1D_(ndims,dim,indexC,ghosts,qm1);
indexC[dir] = indexI[dir]-1; _ArrayIndex1D_(ndims,dim,indexC,ghosts,qp1);
indexC[dir] = indexI[dir]-2; _ArrayIndex1D_(ndims,dim,indexC,ghosts,qp2);
}
int v;
for (v=0; v<nvars; v++) {
/* Defining stencil points */
double fm3, fm2, fm1, fp1, fp2;
fm3 = fC[qm3*nvars+v];
fm2 = fC[qm2*nvars+v];
fm1 = fC[qm1*nvars+v];
fp1 = fC[qp1*nvars+v];
fp2 = fC[qp2*nvars+v];
/* Candidate stencils and their optimal weights*/
double f1, f2, f3;
f1 = (2*one_sixth)*fm3 - (7.0*one_sixth)*fm2 + (11.0*one_sixth)*fm1;
f2 = (-one_sixth)*fm2 + (5.0*one_sixth)*fm1 + (2*one_sixth)*fp1;
f3 = (2*one_sixth)*fm1 + (5*one_sixth)*fp1 - (one_sixth)*fp2;
/* calculate WENO weights */
double w1,w2,w3;
w1 = *(ww1+p*nvars+v);
w2 = *(ww2+p*nvars+v);
w3 = *(ww3+p*nvars+v);
/* calculate the hybridization parameter */
double sigma;
if ( ((mpi->ip[dir] == 0 ) && (indexI[dir] == 0 ))
|| ((mpi->ip[dir] == mpi->iproc[dir]-1) && (indexI[dir] == dim[dir])) ) {
/* Standard WENO at physical boundaries */
sigma = 0.0;
} else {
double cuckoo, df_jm12, df_jp12, df_jp32, r_j, r_jp1, r_int;
cuckoo = (0.9*weno->rc / (1.0-0.9*weno->rc)) * weno->xi * weno->xi;
df_jm12 = fm1 - fm2;
df_jp12 = fp1 - fm1;
df_jp32 = fp2 - fp1;
r_j = (absolute(2*df_jp12*df_jm12)+cuckoo)/(df_jp12*df_jp12+df_jm12*df_jm12+cuckoo);
r_jp1 = (absolute(2*df_jp32*df_jp12)+cuckoo)/(df_jp32*df_jp32+df_jp12*df_jp12+cuckoo);
r_int = min(r_j, r_jp1);
sigma = min((r_int/weno->rc), 1.0);
}
if (upw > 0) {
A[sys*nvars+v+Nsys*indexI[dir]] = one_half * sigma;
B[sys*nvars+v+Nsys*indexI[dir]] = 1.0;
C[sys*nvars+v+Nsys*indexI[dir]] = one_sixth * sigma;
} else {
C[sys*nvars+v+Nsys*indexI[dir]] = one_half * sigma;
B[sys*nvars+v+Nsys*indexI[dir]] = 1.0;
A[sys*nvars+v+Nsys*indexI[dir]] = one_sixth * sigma;
}
double fWENO, fCompact;
fWENO = w1*f1 + w2*f2 + w3*f3;
fCompact = one_sixth * (one_third*fm2 + 19.0*one_third*fm1 + 10.0*one_third*fp1);
R[sys*nvars+v+Nsys*indexI[dir]] = sigma*fCompact + (1.0-sigma)*fWENO;
}
}
}
#ifdef serial
/* Solve the tridiagonal system */
IERR tridiagLU(A,B,C,R,dim[dir]+1,Nsys,lu,NULL); CHECKERR(ierr);
#else
/* Solve the tridiagonal system */
/* all processes except the last will solve without the last interface to avoid overlap */
if (mpi->ip[dir] != mpi->iproc[dir]-1) { IERR tridiagLU(A,B,C,R,dim[dir] ,Nsys,lu,&mpi->comm[dir]); CHECKERR(ierr); }
else { IERR tridiagLU(A,B,C,R,dim[dir]+1,Nsys,lu,&mpi->comm[dir]); CHECKERR(ierr); }
/* Now get the solution to the last interface from the next proc */
double *sendbuf = compact->sendbuf;
double *recvbuf = compact->recvbuf;
MPI_Request req[2] = {MPI_REQUEST_NULL,MPI_REQUEST_NULL};
if (mpi->ip[dir]) for (d=0; d<Nsys; d++) sendbuf[d] = R[d];
if (mpi->ip[dir] != mpi->iproc[dir]-1) MPI_Irecv(recvbuf,Nsys,MPI_DOUBLE,mpi->ip[dir]+1,214,mpi->comm[dir],&req[0]);
if (mpi->ip[dir]) MPI_Isend(sendbuf,Nsys,MPI_DOUBLE,mpi->ip[dir]-1,214,mpi->comm[dir],&req[1]);
MPI_Waitall(2,&req[0],MPI_STATUS_IGNORE);
if (mpi->ip[dir] != mpi->iproc[dir]-1) for (d=0; d<Nsys; d++) R[d+Nsys*dim[dir]] = recvbuf[d];
#endif
/* save the solution to fI */
#pragma omp parallel for schedule(auto) default(shared) private(sys,d,index_outer,indexC,indexI)
for (sys=0; sys < N_outer; sys++) {
_ArrayIndexnD_(ndims,sys,bounds_outer,index_outer,0);
_ArrayCopy1D_(index_outer,indexI,ndims);
for (indexI[dir] = 0; indexI[dir] < dim[dir]+1; indexI[dir]++) {
int p; _ArrayIndex1D_(ndims,bounds_inter,indexI,0,p);
_ArrayCopy1D_((R+sys*nvars+Nsys*indexI[dir]),(fI+nvars*p),nvars);
}
}
return(0);
}
|
dnn.c | //------------------------------------------------------------------------------
// LAGraph/Test/DNN/dnn: run all neural networks from http://graphchallenge.org
//------------------------------------------------------------------------------
/*
LAGraph: graph algorithms based on GraphBLAS
Copyright 2019 LAGraph Contributors.
(see Contributors.txt for a full list of Contributors; see
ContributionInstructions.txt for information on how you can Contribute to
this project).
All Rights Reserved.
NO WARRANTY. THIS MATERIAL IS FURNISHED ON AN "AS-IS" BASIS. THE LAGRAPH
CONTRIBUTORS MAKE NO WARRANTIES OF ANY KIND, EITHER EXPRESSED OR IMPLIED,
AS TO ANY MATTER INCLUDING, BUT NOT LIMITED TO, WARRANTY OF FITNESS FOR
PURPOSE OR MERCHANTABILITY, EXCLUSIVITY, OR RESULTS OBTAINED FROM USE OF
THE MATERIAL. THE CONTRIBUTORS DO NOT MAKE ANY WARRANTY OF ANY KIND WITH
RESPECT TO FREEDOM FROM PATENT, TRADEMARK, OR COPYRIGHT INFRINGEMENT.
Released under a BSD license, please see the LICENSE file distributed with
this Software or contact permission@sei.cmu.edu for full terms.
Created, in part, with funding and support from the United States
Government. (see Acknowledgments.txt file).
This program includes and/or can make use of certain third party source
code, object code, documentation and other files ("Third Party Software").
See LICENSE file for more details.
*/
//------------------------------------------------------------------------------
// LAGraph/Test/DNN/dnn: test for LAGraph_dnn. Contributed by Tim Davis,
// Texas A&M University.
// Usage: ./build/dnn nproblems
// nproblems is the # of test problems to solve. If not present, it defaults
// to 12 (run all 12 DNN's). The problems are solved in order from small to
// big. The Makefile just runs the first and smallest problem.
// NOTE: this test currently uses many GxB_* extensions in
// SuiteSparse:GraphBLAS. It optionally uses OpenMP.
#include <LAGraph.h>
#define LAGRAPH_FREE_ALL ;
int main (int argc, char **argv)
{
//--------------------------------------------------------------------------
// start LAGraph and GraphBLAS
//--------------------------------------------------------------------------
GrB_Info info ;
LAGRAPH_OK (LAGraph_init ( )) ;
//--------------------------------------------------------------------------
// problem size definitions
//--------------------------------------------------------------------------
// The 12 problems and their sizes are hard-coded below.
// It would be better to define these from the input files, but the problem
// data files are not formatted in a way that makes this easy to do. A
// Matrix Market file format would be better (which can specify the type
// and size of each matrix), with the additional of a problem specification
// file that defines each of the 12 problems to solve.
// Each problem is defined by a set of files in the DNN_DATA directory,
// which can be obtained from http://graphchallenge.org . The simplest way
// to redefine the location of the data files is to make ./dnn_data a
// symbolic link, and leave DNN_DATA unchanged. The .gitignore file will
// prevent dnn_data from syncing to github, so you could also simply change
// ./dnn_data to a true directory and place all files there. Or, change
// the DNN_DATA macro to point to your data files.
#define DNN_DATA "./dnn_data"
// Each of the 12 problems is defined by the # of neurons at each layer, N
// = (1024, 4096, 16384, 65536), and the # of layers, L = (120, 480, or
// 1920). Each problem has the same number of features (F = 60000). The
// input files for a given problem (N,L) are as follows:
// Input feature vectors: an F-by-N sparse matrix
// ./dnn_data/MNIST/sparse-images-(N).tsv
// Neural network layers, for i = 1 to L, each an N-by-N sparse matrix:
// ./dnn_data/DNN/neuron(N)/n(N)-l(i).tsv
// True categories, a list of integers, one per line:
// ./dnn_data/DNN/neuron(N)-l(L)-categories.tsv
// The Bias vectors are defined with the single scalar, neuralNetBias[ ],
// with one scalar for each value of N. This scalar is used to construct
// the diagonal Bias matrices for each layer. All the layers share the
// same matrix, but they are treated as different matrices here. In a more
// general problem, the Bias matrices would differ for each layer and
// perhaps for each neuron. As a result, this test is not permitted to
// exploit the fact that all neurons are biased the same way.
// Note that for a given number of neurons, N, each of the 3 problems for
// different layers shares the same weight matrices for the first layers.
// That is, the first 120 layers of the (1024,480) problem are the same as
// the 120 layers of the (1024,120) problem. This is not exploited in
// LAGraph_dnn, but it is exploited here, simply to reduce the time to load
// the problems.
int len = 1024 ;
char filename [len] ;
#define NMAXLAYERS 3
int maxLayers [NMAXLAYERS] = { 120, 480, 1920 } ;
// #define NMAXNEURONS 1
// int Nneurons [NMAXNEURONS] = { 65536 } ;
// double neuralNetBias [NMAXNEURONS] = { -0.45 } ;
#define NMAXNEURONS 4
int Nneurons [NMAXNEURONS] = { 1024, 4096, 16384, 65536 } ;
double neuralNetBias [NMAXNEURONS] = { -0.3, -0.35, -0.4, -0.45 } ;
int nfeatures = 60000 ;
GrB_Matrix Y0 = NULL, Y = NULL, W [65536], Bias [65536] ;
GrB_Vector TrueCategories = NULL, Categories = NULL, C = NULL ;
for (int layer = 0 ; layer < 65536 ; layer++)
{
W [layer] = NULL ;
Bias [layer] = NULL ;
}
#undef LAGRAPH_FREE_ALL
#define LAGRAPH_FREE_ALL \
{ \
GrB_free (&TrueCategories) ; \
GrB_free (&Categories) ; \
GrB_free (&C) ; \
GrB_free (&Y) ; \
GrB_free (&Y0) ; \
for (int layer = 0 ; layer < 65536 ; layer++) \
{ \
GrB_free (& (W [layer])) ; \
GrB_free (& (Bias [layer])) ; \
} \
}
// select the type. GrB_FP32 is faster and passes all the tests.
// GrB_Type type = GrB_FP64 ;
GrB_Type type = GrB_FP32 ;
printf ("type: ") ;
if (type == GrB_FP64) printf ("double\n") ; else printf ("float\n") ;
// get the max # of threads that can be used
int nthreads_max ;
LAGRAPH_OK (GxB_get (GxB_NTHREADS, &nthreads_max)) ;
printf ("max # of nthreads: %d\n", nthreads_max) ;
#define NNTHREADS 12
int nthreads_list [NNTHREADS] =
{ 1, 2, 4, 8, 16, 20, 32, 40, 64, 128, 160, 256 } ;
// #define NNTHREADS 1
// int nthreads_list [NNTHREADS] = { 40 } ;
// determine the # of problems to solve
int nproblems = NMAXNEURONS * NMAXLAYERS ;
if (argc > 1)
{
sscanf (argv [1], "%d", &nproblems) ;
}
printf ("# of problems to solve: %d\n", nproblems) ;
int problem = 0 ;
//--------------------------------------------------------------------------
// run all problems
//--------------------------------------------------------------------------
for (int kn = 0 ; kn < NMAXNEURONS ; kn++)
{
//----------------------------------------------------------------------
// check if this problem is to be solved
//----------------------------------------------------------------------
if (problem > nproblems) continue ;
//----------------------------------------------------------------------
// get the number of nneurons and neural bias
//----------------------------------------------------------------------
double tic [2] ;
LAGraph_tic (tic) ;
int nneurons = Nneurons [kn] ;
double b = neuralNetBias [kn] ;
printf ("\n# neurons: %d bias: %g\n", nneurons, b) ;
//----------------------------------------------------------------------
// read in the initial feature vectors
//----------------------------------------------------------------------
sprintf (filename, "%s/MNIST/sparse-images-%d.tsv", DNN_DATA, nneurons);
FILE *f = fopen (filename, "r") ;
if (!f) { printf ("cannot open %s\n", filename) ; abort ( ) ; }
LAGRAPH_OK (LAGraph_tsvread (&Y0, f, type, nfeatures, nneurons)) ;
fclose (f) ;
double t = LAGraph_toc (tic) ;
printf ("# features: %" PRIu64 " read time: %g sec\n", nfeatures, t) ;
GrB_Index nvals ;
LAGRAPH_OK (GrB_Matrix_nvals (&nvals, Y0)) ;
printf ("# entries in Y0: %g million\n", (double) nvals / 1e6) ;
fflush (stdout) ;
//----------------------------------------------------------------------
// run each problem size (for all #'s of layers)
//----------------------------------------------------------------------
for (int kl = 0 ; kl < NMAXLAYERS ; kl++)
{
//------------------------------------------------------------------
// check if this problem is to be solved
//------------------------------------------------------------------
problem++ ;
if (problem > nproblems) continue ;
//------------------------------------------------------------------
// get the number of layers in this neural net
//------------------------------------------------------------------
int nlayers = maxLayers [kl] ;
printf ("\n--------------------------------------"
"neurons per layer: %d layers: %d\n", nneurons, nlayers) ;
//------------------------------------------------------------------
// read in the layers in parallel
//------------------------------------------------------------------
LAGraph_tic (tic) ;
int first_layer = (kl == 0) ? 0 : maxLayers [kl-1] ;
bool ok = true ;
// assume the I/O system can handle 2-way parallelism
#pragma omp parallel for schedule(dynamic,1) reduction(&&:ok) \
num_threads (2)
for (int layer = first_layer ; layer < nlayers ; layer++)
{
// read the neuron layer: W [layer]
char my_filename [1024] ;
sprintf (my_filename, "%s/DNN/neuron%d/n%d-l%d.tsv", DNN_DATA,
nneurons, nneurons, layer+1) ;
FILE *my_file = fopen (my_filename, "r") ;
bool my_ok = true ;
if (!my_file)
{
printf ("cannot open %s\n", my_filename) ;
my_ok = false ;
continue ;
}
GrB_Info my_info = LAGraph_tsvread (&(W [layer]), my_file,
type, nneurons, nneurons) ;
fclose (my_file) ;
my_ok = my_ok && (my_info == GrB_SUCCESS) ;
// construct the bias matrix: Bias [layer]. Note that all Bias
// matrices are the same for all layers, and all diagonal
// entries are also the same, but this test must not exploit
// that fact.
my_info = GrB_Matrix_new (&(Bias [layer]), type,
nneurons, nneurons) ;
my_ok = my_ok && (my_info == GrB_SUCCESS) ;
for (int i = 0 ; i < nneurons ; i++)
{
my_info = GrB_Matrix_setElement (Bias [layer], b, i, i) ;
my_ok = my_ok && (my_info == GrB_SUCCESS) ;
}
GrB_Index ignore ;
my_info = GrB_Matrix_nvals (&ignore, Bias [layer]) ;
my_ok = my_ok && (my_info == GrB_SUCCESS) ;
ok = ok && my_ok ;
}
if (!ok)
{
printf ("neural read failure\n") ;
abort ( ) ;
}
t = LAGraph_toc (tic) ;
printf ("read net time %g sec\n", t) ;
double nedges = 0 ;
for (int layer = 0 ; layer < nlayers ; layer++)
{
GrB_Index nvals ;
LAGRAPH_OK (GrB_Matrix_nvals (&nvals, W [layer])) ;
nedges += nvals ;
}
printf ("# edges in all layers: %g million\n\n",
(double) nedges / 1e6) ;
fflush (stdout) ;
// read TrueCategories as a boolean nfeatures-by-1 vector
LAGRAPH_OK (GrB_Vector_new (&TrueCategories, GrB_BOOL,
nfeatures)) ;
sprintf (filename, "%s/DNN/neuron%d-l%d-categories.tsv", DNN_DATA,
nneurons, nlayers) ;
f = fopen (filename, "r") ;
bool check_result = (f != NULL) ;
if (check_result)
{
while (1)
{
int category ;
if (fscanf (f, "%d\n", &category) == EOF) break ;
LAGRAPH_OK (GrB_Vector_setElement (TrueCategories,
(bool) true, category-1)) ;
}
fclose (f) ;
}
else
{
printf ("cannot open %s\n", filename) ;
}
//------------------------------------------------------------------
// solve the problem with 1, 2, 4, ..., nthreads_max threads
//------------------------------------------------------------------
double t1 = 0, tcheck = 0 ;
GrB_Index final_ynvals ;
for (int kth = 0 ; kth < NNTHREADS ; kth++)
{
//--------------------------------------------------------------
// set the # of threads to use
//--------------------------------------------------------------
int nthreads = nthreads_list [kth] ;
if (nthreads > nthreads_max) break ;
LAGRAPH_OK (GxB_set (GxB_NTHREADS, nthreads)) ;
printf ("nthreads %2d: ", nthreads) ;
fflush (stdout) ;
//--------------------------------------------------------------
// solve the problem
//--------------------------------------------------------------
LAGraph_tic (tic) ;
LAGRAPH_OK (LAGraph_dnn (&Y, W, Bias, nlayers, Y0)) ;
t = LAGraph_toc (tic) ;
printf ("solution time %12.2f sec", t) ;
if (nthreads == 1)
{
t1 = t ;
}
else
{
printf (" speedup %8.2f", t1/t) ;
}
//--------------------------------------------------------------
// check the result
//--------------------------------------------------------------
// this is so fast, it's hardly worth timing ...
LAGraph_tic (tic) ;
LAGRAPH_OK (GrB_Matrix_nvals (&final_ynvals, Y)) ;
// C = sum (Y)
LAGRAPH_OK (GrB_Vector_new (&C, type, nfeatures)) ;
LAGRAPH_OK (GrB_reduce (C, NULL, NULL, GrB_PLUS_FP64, Y, NULL));
// Categories = pattern of C
LAGRAPH_OK (GrB_Vector_new (&Categories, GrB_BOOL, nfeatures)) ;
LAGRAPH_OK (GrB_apply (Categories, NULL, NULL, GxB_ONE_BOOL,
C, NULL)) ;
// write out Categories, as a 1-based file
sprintf (filename, "my_neuron%d-l%d-categories_threads%d.tsv",
nneurons, nlayers, nthreads) ;
FILE *ff = fopen (filename, "w") ;
for (int i = 0 ; i < nfeatures ; i++)
{
bool c = false ;
LAGRAPH_OK (GrB_Vector_extractElement (&c, Categories, i)) ;
if (c) fprintf (ff, "%d\n", i + 1) ;
}
fclose (ff) ;
if (check_result)
{
// check if Categories and TrueCategories are the same
bool isequal ;
LAGRAPH_OK (LAGraph_Vector_isequal (&isequal,
TrueCategories, Categories, NULL)) ;
if (!isequal)
{
// GxB_print (TrueCategories, 3) ;
// GxB_print (Categories, 3) ;
printf ("test failure!\n") ;
// LAGRAPH_FREE_ALL ;
// abort ( ) ;
}
else
{
printf (" test passed") ;
}
}
printf ("\n") ;
GrB_free (&Categories) ;
GrB_free (&C) ;
GrB_free (&Y) ;
tcheck = LAGraph_toc (tic) ;
}
printf ("\n# entries in final Y: %g million\n",
(double) final_ynvals / 1e6) ;
printf ("check time: %g sec\n", tcheck) ;
LAGRAPH_OK (GxB_set (GxB_NTHREADS, nthreads_max)) ;
}
//----------------------------------------------------------------------
// free the problem
//----------------------------------------------------------------------
LAGRAPH_FREE_ALL ;
}
//--------------------------------------------------------------------------
// finalize LAGraph and GraphBLAS
//--------------------------------------------------------------------------
LAGRAPH_OK (LAGraph_finalize ( )) ;
printf ("all tests passed\n") ;
return (GrB_SUCCESS) ;
}
|
ops.h | /*******************************************************************************
* Copyright (c) 2015-2018 Skymind, Inc.
*
* This program and the accompanying materials are made available under the
* terms of the Apache License, Version 2.0 which is available at
* https://www.apache.org/licenses/LICENSE-2.0.
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
* License for the specific language governing permissions and limitations
* under the License.
*
* SPDX-License-Identifier: Apache-2.0
******************************************************************************/
#pragma once
#ifndef OPS_H_
#define OPS_H_
#include <op_boilerplate.h>
#include <array/DataTypeUtils.h>
#include <helpers/shape.h>
#include <vector>
#include <Environment.h>
#include <loops/summarystatsreduce.h>
#include <loops/ReduceType.h>
#define MIN_V 1e-12
#define MAX_FLOAT 1e37
#define MIN_FLOAT 1e-37
#define MAX_INT 2147483647
#define MIN_CUTFOFF -3.79297773665f
#define FLOAT_MIN_NORMAL 1.17549435e-38
#define EPS 1e-5
#define AFFINITY close
#define DOUBLE_PI_T T(2.0 * 3.14159265358979323846)
#define DOUBLE_PI_X X(2.0 * 3.14159265358979323846)
#define no_op_exec_special_any static const bool requiresSpecial = false; static void execSpecial(X *dx, Nd4jLong *xShapeBuffer, Z *result, Nd4jLong *resultShapeBuffer, X *extraParams, Nd4jLong *tadShapeInfo, Nd4jLong *tadOffsets) {}
#define no_op_exec_special_bool static const bool requiresSpecial = false; static void execSpecial(X *dx, Nd4jLong *xShapeBuffer, Z *result, Nd4jLong *resultShapeBuffer, X *extraParams, Nd4jLong *tadShapeInfo, Nd4jLong *tadOffsets) {}
#define no_op_exec_special_same static const bool requiresSpecial = false; static void execSpecial(X *dx, Nd4jLong *xShapeBuffer, X *result, Nd4jLong *resultShapeBuffer, X *extraParams, Nd4jLong *tadShapeInfo, Nd4jLong *tadOffsets) {}
#define no_op_exec_special static const bool requiresSpecial = false; static void execSpecial(X *dx, Nd4jLong *xShapeBuffer, Z *result, Nd4jLong *resultShapeBuffer, Z *extraParams, Nd4jLong *tadShapeInfo, Nd4jLong *tadOffsets) {}
#define no_op_exec_special_accumulation static const bool requiresSpecialAccumulation = false; static void execSpecial(X *x, Nd4jLong *xShapeInfo, Z *extraParams, Z *result, Nd4jLong *resultShapeInfoBuffer, int *dimension, int dimensionLength, Nd4jLong *tadShapeInfo, Nd4jLong *tadOffset){}
#define no_op_exec_special_accumulation_long static const bool requiresSpecialAccumulation = false; static void execSpecial(X *x, Nd4jLong *xShapeInfo, X *extraParams, Z *result, Nd4jLong *resultShapeInfoBuffer, int *dimension, int dimensionLength, Nd4jLong *tadShapeInfo, Nd4jLong *tadOffset){}
#define no_op_exec_special_accumulation_same static const bool requiresSpecialAccumulation = false; static void execSpecial(X *x, Nd4jLong *xShapeInfo, X *extraParams, X *result, Nd4jLong *resultShapeInfoBuffer, int *dimension, int dimensionLength, Nd4jLong *tadShapeInfo, Nd4jLong *tadOffset){}
#ifdef __CUDACC__
#define no_op_exec_special_any_cuda static __device__ void execSpecialCuda(X *dx, Nd4jLong *xShapeBuffer, Z *result, Nd4jLong *resultShapeBuffer, X *extraParams, int *allocationPointer, Z *reductionPointer, Nd4jLong *tadShapeInfo, Nd4jLong *tadOffsets) {}
#define no_op_exec_special_bool_cuda static __device__ void execSpecialCuda(X *dx, Nd4jLong *xShapeBuffer, Z *result, Nd4jLong *resultShapeBuffer, X *extraParams, int *allocationPointer, Z *reductionPointer, Nd4jLong *tadShapeInfo, Nd4jLong *tadOffsets) {}
#define no_op_exec_special_same_cuda static __device__ void execSpecialCuda(X *dx, Nd4jLong *xShapeBuffer, X *result, Nd4jLong *resultShapeBuffer, X *extraParams, int *allocationPointer, X *reductionPointer, Nd4jLong *tadShapeInfo, Nd4jLong *tadOffsets) {}
#define no_op_exec_special_cuda static __device__ void execSpecialCuda(X *dx, Nd4jLong *xShapeBuffer,Z *result, Nd4jLong *resultShapeBuffer,Z *extraParams, int *allocationPointer, Z *reductionPointer, Nd4jLong *tadShapeInfo, Nd4jLong *tadOffsets) {}
#define no_op_exec_special_accumulation_same_cuda static inline __device__ void execSpecialCuda(X *dx, Nd4jLong *xShapeInfo, X *extraParams, X *result, Nd4jLong *resultShapeInfo, int *dimension, int dimensionLength, X *reductionBuffer, Nd4jLong *tadOnlyShapeInfo, Nd4jLong *tadOffsets) {}
#define no_op_exec_special_accumulation_long_cuda static inline __device__ void execSpecialCuda(X *dx, Nd4jLong *xShapeInfo, X *extraParams, Z *result, Nd4jLong *resultShapeInfo, int *dimension, int dimensionLength, Z *reductionBuffer, Nd4jLong *tadOnlyShapeInfo, Nd4jLong *tadOffsets) {}
#define no_op_exec_special_accumulation_cuda static inline __device__ void execSpecialCuda(X *dx, Nd4jLong *xShapeInfo, Z *extraParams, Z *result, Nd4jLong *resultShapeInfo, int *dimension, int dimensionLength, Z *reductionBuffer, Nd4jLong *tadOnlyShapeInfo, Nd4jLong *tadOffsets) {}
#else
// hacky fix for isnan/being being out of scope
//#ifdef IOS
//#define isinf(x) 0 // this isn't right. But std::isinf fails
//#define isnan(x) 0
//#else
//#define isnan std::isnan
//#define isinf std::isinf
//#endif
#define no_op_exec_special_cuda
#define no_op_exec_special_accumulation_cuda
#define no_op_exec_special_accumulation_same_cuda
#define no_op_exec_special_accumulation_long_cuda
#define no_op_exec_special_any_cuda
#define no_op_exec_special_bool_cuda
#define no_op_exec_special_same_cuda
#define no_op_exec_special_accumulation_same_cuda
#endif
#define SELU_ALPHA 1.6732632423543772848170429916717
#define SELU_LAMBDA 1.0507009873554804934193349852946
#ifdef _OPENMP
#pragma omp declare reduction(maxTF : float,double,float16,bfloat16 : \
omp_out = nd4j::math::nd4j_max(omp_in, omp_out) )\
initializer (omp_priv=-MAX_FLOAT)
#pragma omp declare reduction(minTF : float,double,float16,bfloat16 : \
omp_out = nd4j::math::nd4j_min(omp_in, omp_out) )\
initializer (omp_priv=MAX_FLOAT)
#pragma omp declare reduction(maxT : float,double,float16,bfloat16,int,Nd4jLong,Nd4jULong,int8_t,uint8_t,bool,int16_t,uint16_t,uint32_t : \
omp_out = nd4j::math::nd4j_max(omp_in, omp_out) )\
initializer (omp_priv=0)
#pragma omp declare reduction(minT : float,double,float16,bfloat16,int,Nd4jLong,Nd4jULong,int8_t,uint8_t,bool,int16_t,uint16_t,uint32_t : \
omp_out = nd4j::math::nd4j_min(omp_in, omp_out) )\
initializer (omp_priv=0)
#pragma omp declare reduction(amaxT : float,double,float16,bfloat16,int,Nd4jLong,Nd4jULong,int8_t,uint8_t,bool,int16_t,uint16_t,uint32_t : \
omp_out = nd4j::math::nd4j_max(nd4j::math::nd4j_abs(omp_in), nd4j::math::nd4j_abs(omp_out)) )
#pragma omp declare reduction(aminT : float,double,float16,bfloat16,int,Nd4jLong,Nd4jULong,int8_t,uint8_t,bool,int16_t,uint16_t,uint32_t : \
omp_out = nd4j::math::nd4j_min(nd4j::math::nd4j_abs(omp_in), nd4j::math::nd4j_abs(omp_out)) )
#pragma omp declare reduction(asumT : float,double,float16,bfloat16,int,Nd4jLong,Nd4jULong,int8_t,uint8_t,bool,int16_t,uint16_t,uint32_t : \
omp_out = nd4j::math::nd4j_abs(omp_in) + nd4j::math::nd4j_abs(omp_out))\
initializer (omp_priv=0)
#pragma omp declare reduction(sumT : float,double,float16,bfloat16,int,Nd4jLong,Nd4jULong,int8_t,uint8_t,bool,int16_t,uint16_t,uint32_t : \
omp_out = omp_in + omp_out)\
initializer (omp_priv=0)
#pragma omp declare reduction(prodT : float,double,float16,bfloat16,int,Nd4jLong,Nd4jULong,int8_t,uint8_t,bool,int16_t,uint16_t,uint32_t : \
omp_out = omp_in * omp_out)\
initializer (omp_priv=1)
#endif
namespace functions {
namespace indexreduce {
template <typename T>
struct IndexValue {
T value;
Nd4jLong index;
_CUDA_HD IndexValue() = default;
_CUDA_HD IndexValue(const T val, const Nd4jLong ind): index(ind), value(val) {}
};
}
namespace summarystats {
template <typename T>
class SummaryStatsData;
}
}
namespace simdOps {
template <typename X, typename Y, typename Z>
class Add {
public:
op_def static Z op(X d1, Y d2) {
return static_cast<Z>(d1 + d2);
}
op_def static Z op(X d1, Y d2, Z *params) {
return static_cast<Z>(d1 + d2);
}
op_def static Z op(X d1) {
return static_cast<Z>(d1);
}
// op for MetaOps
op_def static Z op(X d1, Y *params) {
return static_cast<Z>(d1 + params[0]);
}
op_def static X startingValue() {
return static_cast<X>(0.f);
}
};
template <typename X, typename Y>
class NewAdd {
public:
op_def static X op(X d1, Y d2, X *params) {
return d1 + d2;
}
};
template <typename X, typename Y, typename Z>
class Subtract {
public:
op_def static Z op(X d1, Y d2) {
return static_cast<Z>(d1 - d2);
}
op_def static Z op(X d1, Y d2, Z *params) {
return static_cast<Z>(d1 - d2);
}
op_def static Z op(X d1) {
return static_cast<Z>(d1);
}
// op for MetaOps
op_def static Z op(X d1, Y *params) {
return static_cast<Z>(d1 - params[0]);
}
};
template <typename X, typename Y, typename Z>
class SquaredSubtract {
public:
op_def static Z op(X d1, Y d2) {
auto d = static_cast<Z>(d1 - d2);
return d * d;
}
op_def static Z op(X d1, Y d2, Z *params) {
auto d = static_cast<Z>(d1 - d2);
return d * d;
}
op_def static Z op(X d1) {
return d1;
}
// op for MetaOps
op_def static Z op(X d1, Y *params) {
auto d = static_cast<Z>(d1 - params[0]);
return d * d;
}
};
template <typename X, typename Y, typename Z>
class SquaredReverseSubtract {
public:
op_def static Z op(X d1, Y d2) {
auto d = static_cast<Z>(d2 - d1);
return d * d;
}
op_def static Z op(X d1, Y d2, Z *params) {
auto d = static_cast<Z>(d2 - d1);
return d * d;
}
op_def static Z op(X d1) {
return d1;
}
// op for MetaOps
op_def static Z op(X d1, Y *params) {
auto d = static_cast<Z>(params[0] - d1);
return d * d;
}
};
template <typename X, typename Y, typename Z>
class ReverseSubtract {
public:
op_def static Z op(X d1, Y d2) {
return static_cast<Z>(d2 - d1);
}
op_def static Z op(X d1, Y d2, Z *params) {
return static_cast<Z>(d2 - d1);
}
op_def static Z op(X d1) {
return d1;
}
// op for MetaOps
op_def static Z op(X d1, Y *params) {
return static_cast<Z>(params[0] - d1);
}
};
template <typename X, typename Y, typename Z>
class LogPoissonLossFull {
public:
op_def static Z op(X z, Y c) {
auto zz = static_cast<Z>(z);
auto zc = static_cast<Z>(c);
return (nd4j::math::nd4j_exp<Y, Z>(c) - zz * zc + (zz * nd4j::math::nd4j_log<X, Z>(z) - zz + static_cast<Z>(0.5f) * nd4j::math::nd4j_log<Z, Z>(static_cast<Z>(DOUBLE_PI_X) * zz)));
}
op_def static Z op(X z, Y c, Z *params) {
auto zz = static_cast<Z>(z);
auto zc = static_cast<Z>(c);
return (nd4j::math::nd4j_exp<Y, Z>(c) - zz * zc + (zz * nd4j::math::nd4j_log<X, Z>(z) - zz + static_cast<Z>(0.5f) * nd4j::math::nd4j_log<Z, Z>(static_cast<Z>(DOUBLE_PI_X) * zz)));
}
op_def static Z op(X z) {
auto zz = static_cast<Z>(z);
return (zz * nd4j::math::nd4j_log<Y, Z>(z) - zz + static_cast<Z>(0.5f) * nd4j::math::nd4j_log<Z, Z>(static_cast<Z>(DOUBLE_PI_X) * zz));
}
// op for MetaOps
op_def static X op(X z, Y *params) {
return (nd4j::math::nd4j_exp<X, X>(params[0]) - z * params[0] + (z * nd4j::math::nd4j_log<X, Z>(z) - z + static_cast<X>(0.5f) * nd4j::math::nd4j_log<X, Z>(DOUBLE_PI_X * z)));
}
};
template <typename X, typename Y, typename Z>
class LogPoissonLoss {
public:
op_def static Z op(X z, Y c) {
auto zz = static_cast<Z>(z);
auto zc = static_cast<Z>(c);
return (nd4j::math::nd4j_exp<Y, Z>(c) - zz * zc);
}
op_def static Z op(X z, Y c, Z *params) {
auto zz = static_cast<Z>(z);
auto zc = static_cast<Z>(c);
return (nd4j::math::nd4j_exp<Y, Z>(c) - zz * zc);
}
op_def static Z op(X z) {
return static_cast<Z>(z);
}
// op for MetaOps
op_def static Z op(X z, Y *params) {
return (nd4j::math::nd4j_exp<Y, Z>(params[0]) - static_cast<Z>(z) * static_cast<Z>(params[0]));
}
};
template <typename X, typename Y, typename Z>
class Multiply {
public:
op_def static Z op(X d1, Y d2) {
return static_cast<Z>(d1 * d2);
}
op_def static Z op(X d1, Y d2, Z *params) {
return static_cast<Z>(d1 * d2);
}
op_def static Z op(X d1) {
return static_cast<Z>(d1);
}
// op for MetaOps
op_def static Z op(X d1, Y *params) {
return static_cast<Z>(d1 * params[0]);
}
op_def static X startingValue() {
return static_cast<X>(1.f);
}
};
template <typename X, typename Y, typename Z>
class Divide {
public:
op_def static Z op(X d1, Y d2) {
return static_cast<Z>(d1 / d2);
}
op_def static Z op(X d1, Y d2, Z *params) {
return static_cast<Z>(d1 / d2);
}
op_def static Z op(X d1) {
return static_cast<Z>(d1);
}
// op for MetaOps
op_def static Z op(X d1, Y *params) {
return static_cast<Z>(d1 / params[0]);
}
op_def static X startingValue() {
return static_cast<X>(1);
}
};
template <typename X, typename Y, typename Z>
class SafeDivide {
public:
op_def static Z op(X d1, Y d2) {
if(d2 == static_cast<Y>(0))
return static_cast<Z>(0);
return static_cast<Z>(d1 / d2);
}
op_def static Z op(X d1, Y d2, Z *params) {
if(d2 == static_cast<Y>(0))
return static_cast<Z>(0);
return static_cast<Z>(d1 / d2);
}
op_def static Z op(X d1) {
return static_cast<Z>(d1);
}
// op for MetaOps
op_def static Z op(X d1, Y *params) {
if(params[0] == static_cast<Y>(0))
return static_cast<Z>(0);
return static_cast<Z>(d1 / params[0]);
}
};
template <typename X, typename Y, typename Z>
class FloorDiv {
public:
op_def static Z op(X d1, Y d2) {
return nd4j::math::nd4j_floor<Z,Z>(static_cast<Z>(d1 / d2));
}
op_def static Z op(X d1, Y d2, Z *params) {
return nd4j::math::nd4j_floor<Z,Z>(static_cast<Z>(d1 / d2));
}
op_def static Z op(X d1) {
return nd4j::math::nd4j_floor<Z,Z>(static_cast<Z>(d1));
}
// op for MetaOps
op_def static Z op(X d1, Y *params) {
return nd4j::math::nd4j_floor<Z,Z>(static_cast<Z>(d1 / params[0]));
}
};
template <typename X, typename Y, typename Z>
class TruncateDiv {
public:
op_def static Z op(X d1, Y d2) {
auto i1 = static_cast<int>(d1);
auto i2 = static_cast<int>(d2);
return static_cast<Z>(i1 / i2);
}
op_def static Z op(X d1, Y d2, Z *params) {
auto i1 = static_cast<int>(d1);
auto i2 = static_cast<int>(d2);
return static_cast<Z>(i1 / i2);
}
op_def static Z op(X d1) {
return d1;
}
// op for MetaOps
op_def static Z op(X d1, Y *params) {
auto i1 = static_cast<int>(d1);
auto i2 = static_cast<int>(params[0]);
return static_cast<Z>(i1 / i2);
}
};
template <typename X, typename Y, typename Z>
class TruncateMod {
public:
op_def static Z op(X d1, Y d2) {
auto i1 = static_cast<int>(d1);
auto i2 = static_cast<int>(d2);
return static_cast<Z>(i1 % i2);
}
op_def static Z op(X d1, Y d2, Z *params) {
auto i1 = static_cast<int>(d1);
auto i2 = static_cast<int>(d2);
return static_cast<Z>(i1 % i2);
}
op_def static Z op(X d1) {
return static_cast<Z>(d1);
}
// op for MetaOps
op_def static Z op(X d1, Y *params) {
auto i1 = static_cast<int>(d1);
auto i2 = static_cast<int>(params[0]);
return static_cast<Z>(i1 % i2);
}
};
template<typename X, typename Y, typename Z>
class Remainder {
public:
op_def static Z op(X d1, Y d2) {
return nd4j::math::nd4j_remainder<X, Y, Z>(d1, d2);
}
op_def static Z op(X d1, Y d2, Z *params) {
return nd4j::math::nd4j_remainder<X, Y, Z>(d1, d2);
}
op_def static Z op(X d1) {
return d1;
}
// op for MetaOps
op_def static Z op(X d1, Y *params) {
return nd4j::math::nd4j_remainder<X, Y, Z>(d1, params[0]);
}
};
template <typename X, typename Y, typename Z>
class FMod {
public:
op_def static Z op(X d1, Y d2) {
return nd4j::math::nd4j_fmod<X, Y, Z>(d1, d2);
}
op_def static Z op(X d1, Y d2, Z *params) {
return nd4j::math::nd4j_fmod<X, Y, Z>(d1, d2);
}
op_def static Z op(X d1) {
return d1;
}
// op for MetaOps
op_def static Z op(X d1, Y *params) {
return nd4j::math::nd4j_fmod<X, Y, Z>(d1, params[0]);
}
};
template <typename X, typename Y, typename Z>
class FloorMod {
public:
op_def static Z op(X d1, Y d2) {
auto m = nd4j::math::nd4j_fmod<X, Y, Z>(d1, d2);
return (d1 < static_cast<X>(0)) == (d2 < static_cast<Y>(0)) ? m : nd4j::math::nd4j_fmod<Z, Y, Z>(m + static_cast<Z>(d2), d2);
}
op_def static Z op(X d1, Y d2, Z *params) {
auto m = nd4j::math::nd4j_fmod<X, Y, Z>(d1, d2);
return (d1 < static_cast<X>(0.0f)) == (d2 < static_cast<Y>(0)) ? m : nd4j::math::nd4j_fmod<Z, Y, Z>(m + static_cast<Z>(d2), d2);
}
op_def static Z op(X d1) {
return d1;
}
// op for MetaOps
op_def static Z op(X d1, Y *params) {
return op(d1, params[0]);
}
};
template <typename X, typename Y, typename Z>
class ReverseDivide {
public:
op_def static Z op(X d1, Y d2) {
return static_cast<Z>(d2 / d1);
}
op_def static Z op(X d1, Y d2, Z *params) {
return static_cast<Z>(d2 / d1);
}
op_def static Z op(X d1) {
return static_cast<Z>(d1);
}
// op for MetaOps
op_def static Z op(X d1, Y *params) {
return static_cast<Z>(params[0] / d1);
}
};
template <typename X, typename Y, typename Z>
class CopyPws {
public:
op_def static Z op(X d1, Y d2) {
return static_cast<Z>(d2);
}
op_def static Z op(X d1, Y d2, Z *params) {
return static_cast<Z>(d2);
}
op_def static Z op(X d1) {
return static_cast<Z>(d1);
}
op_def static Z op(X d1, Y *params) {
return static_cast<Z>(d1);
}
};
template <typename X>
class Copy {
public:
no_op_exec_special_same
no_op_exec_special_same_cuda
op_def static X op(X d1, X *params) {
return d1;
}
};
template <typename X, typename Y, typename Z>
class Copy2 {
public:
op_def static Z op(X d1, Y d2) {
return static_cast<Z>(d2);
}
op_def static Z op(X d1, Y d2, Z *params) {
return static_cast<Z>(d2);
}
op_def static Z op(X d1) {
return static_cast<Z>(d1);
}
op_def static Z op(X d1, Y *params) {
return static_cast<Z>(d1);
}
};
template <typename X, typename Y, typename Z>
class Axpy {
public:
op_def static Z op(X d1, Y d2) {
return static_cast<Z>(d2 + d1);
}
op_def static Z op(X d1, Y d2, Z *params) {
auto alpha = params[0];
return alpha * static_cast<Z>(d1) + static_cast<Z>(d2);
}
op_def static Z op(X d1) {
return static_cast<Z>(d1);
}
};
template <typename X, typename Z>
class Assign {
public:
no_op_exec_special_any
no_op_exec_special_any_cuda
op_def static Z op(X d1, X *params) {
return static_cast<Z>(d1);
}
};
template <typename X, typename Z>
class And {
public:
no_op_exec_special_bool
no_op_exec_special_bool_cuda
op_def static Z op(X d1, X d2) {
return d2 + d1;
}
op_def static Z op(X d1, X d2, X *params) {
if (params != nullptr) {
auto comp = params[0];
return d1 != comp && d2 != comp ? static_cast<Z>(1) : static_cast<Z>(0);
} else {
auto b1 = static_cast<bool>(d1);
auto b2 = static_cast<bool>(d2);
return (b1 && b2) ? static_cast<Z>(1) : static_cast<Z>(0);
}
}
op_def static Z op(X d1) {
return d1;
}
// op for MetaOps
op_def static Z op(X d1, X *params) {
return static_cast<Z>(119);
}
};
template <typename X, typename Z>
class Or {
public:
no_op_exec_special_bool
no_op_exec_special_bool_cuda
op_def static Z op(X d1, X d2) {
return d2 + d1;
}
op_def static Z op(X d1, X d2, X *params) {
if (params != nullptr) {
auto comp = params[0];
return d1 != comp || d2 != comp ? static_cast<Z>(1) : static_cast<Z>(0);
} else {
auto b1 = static_cast<bool>(d1);
auto b2 = static_cast<bool>(d2);
return b1 || b2 ? static_cast<Z>(1) : static_cast<Z>(0);
}
}
op_def static Z op(X d1) {
return d1;
}
// op for MetaOps
op_def static Z op(X d1, X *params) {
return static_cast<Z>(119);
}
};
template <typename X, typename Z>
class Xor {
public:
no_op_exec_special_bool
no_op_exec_special_bool_cuda
op_def static Z op(X d1, X d2) {
return d2 + d1;
}
op_def static Z op(X d1, X d2, X *params) {
if (params != nullptr) {
auto comp = params[0];
return ((d1 == comp && d2 != comp) || (d1 != comp && d2 == comp)) ? static_cast<Z>(1) : static_cast<Z>(0);
} else {
auto b1 = static_cast<bool>(d1);
auto b2 = static_cast<bool>(d2);
return (!b1 && b2 )||(b1 && !b2) ? static_cast<Z>(1) : static_cast<Z>(0);
}
}
op_def static Z op(X d1) {
return d1;
}
};
template <typename X, typename Z>
class Not {
public:
no_op_exec_special_bool
no_op_exec_special_bool_cuda
op_def static Z op(X d1, X d2) {
return static_cast<Z>(0);
}
op_def static Z op(X d1, X d2, X *params) {
return d1 != d2 ? static_cast<Z>(1) : static_cast<Z>(0);
}
// this transform op should run only on boolean input
op_def static Z op(X d1, X *params) {
auto b1 = static_cast<bool>(d1);
return !b1;
}
};
template <typename X, typename Y, typename Z>
class LogicalNot {
public:
op_def static Z op(X d1, Y d2) {
return !((int) d1 && (int) d2);
}
op_def static Z op(X d1, Y d2, Z *params) {
return static_cast<X>(!(static_cast<int>(d1) && static_cast<int>(d2)));
}
op_def static Z op(X d1) {
return d1;
}
// op for MetaOps
op_def static Z op(X d1, Y *params) {
return static_cast<X>(119);
}
};
template <typename X, typename Y, typename Z>
class LogicalXor {
public:
op_def static Z op(X d1, Y d2) {
auto i1 = static_cast<int>(d1);
auto i2 = static_cast<int>(d2);
return (i1 | i2) &~ (i1 & i2);
}
op_def static Z op(X d1, Y d2, Z *params) {
return op(d1, d2);
}
op_def static Z op(X d1) {
return d1;
}
// op for MetaOps
op_def static Z op(X d1, Y *params) {
return static_cast<Z>(119);
}
};
template <typename X, typename Y, typename Z>
class LogicalAnd {
public:
op_def static Z op(X d1, Y d2) {
return static_cast<int>(d1) & static_cast<int>(d2);
}
op_def static Z op(X d1, Y d2, Z *params) {
return op(d1, d2);
}
op_def static Z op(Y d1) {
return d1;
}
// op for MetaOps
op_def static Z op(X d1, Y *params) {
return static_cast<Z>(119);
}
};
template <typename X, typename Y, typename Z>
class LogicalOr {
public:
op_def static Z op(X d1, Y d2) {
return static_cast<int>(d1) | static_cast<int>(d2);
}
op_def static Z op(X d1, Y d2, Z *params) {
return op(d1, d2);
}
op_def static Z op(X d1) {
return d1;
}
// op for MetaOps
op_def static Z op(X d1, Y *params) {
return static_cast<X>(119);
}
};
template <typename X, typename Y, typename Z>
class Mod {
public:
/*
// just a optional note, feel free to remove later
op_def static half op(half d1, half d2, half *params) {
return __float2half(simdOps::Mod<float>::op(__half2float(d1), __half2float(d2), nullptr));
}
*/
op_def static Z op(X d1, Y d2) {
return static_cast<int>(d1) % static_cast<int>(d2);
}
op_def static Z op(X d1, Y d2, Z *params) {
return op(d1, d2);
}
// op for MetaOp
op_def static Z op(X d1, Y *params) {
return op(d1, params[0]);
}
};
template <typename X, typename Y, typename Z>
class ReverseMod {
public:
op_def static Z op(X d1, Y d2) {
return static_cast<int>(d2) % static_cast<int>(d1);
}
op_def static Z op(X d1, Y d2, Z *params) {
return op(d1, d2);
}
// op for MetaOp
op_def static Z op(X d1, Y *params) {
return op(d1, params[0]);
}
};
/**
* Whether 2 elements in an array
* are epsilion equal
*/
template <typename X, typename Z>
class Epsilon {
public:
op_def static Z op(X d1, X d2) {
X diff = d1 - d2;
X absDiff = nd4j::math::nd4j_abs<X>(diff);
if (absDiff <= static_cast<X>(MIN_V))
return static_cast<Z>(1);
return static_cast<Z>(0);
}
op_def static Z op(X d1, X d2, X *params) {
return op(d1, d2);
}
op_def static Z op(X d1, X *params) {
return d1;
}
};
template <typename X, typename Z>
class EqualTo {
public:
op_def static Z op(X d1, X d2) {
return d1 == d2;
}
op_def static Z op(X d1, X d2, X *params) {
return op(d1, d2);
}
op_def static Z op(X d1, X *params) {
return d1;
}
};
template <typename X, typename Z>
class NotEqualTo {
public:
op_def static Z op(X d1, X d2) {
return d1 != d2;
}
op_def static Z op(X d1, X d2, X *params) {
return op(d1, d2);
}
op_def static Z op(X d1, X *params) {
return d1;
}
};
template <typename X, typename Z>
class GreaterThanOrEqual {
public:
op_def static Z op(X d1, X d2) {
return d1 >= d2;
}
op_def static Z op(X d1, X d2, X *params) {
return op(d1, d2);
}
// FIXME: this signature clashes with MetaOp stuff
op_def static Z op(X d1, X *params) {
return d1;
}
};
template <typename X, typename Z>
class GreaterThan {
public:
op_def static Z op(X d1, X d2) {
return d1 > d2;
}
op_def static Z op(X d1, X d2, X *params) {
return op(d1, d2);
}
// FIXME: this signature clashes with MetaOp stuff
op_def static Z op(X d1, X *params) {
return d1;
}
};
template <typename X, typename Z>
class LessThan {
public:
op_def static Z op(X d1, X d2) {
return d1 < d2;
}
op_def static Z op(X d1, X d2, X *params) {
return op(d1, d2);
}
op_def static Z op(X d1, X *params) {
return d1;
}
};
template <typename X, typename Z>
class LessThanOrEqual {
public:
op_def static Z op(X d1, X d2) {
return d1 <= d2;
}
op_def static Z op(X d1, X d2, X *params) {
return op(d1, d2);
}
op_def static Z op(X d1, X *params) {
return d1;
}
};
template <typename X>
class Abs {
public:
no_op_exec_special_same
no_op_exec_special_same_cuda
op_def static X op(X d1, X *params) {
return nd4j::math::nd4j_abs<X>(d1);
}
};
template <typename X>
class Ceiling {
public:
no_op_exec_special_same
no_op_exec_special_same_cuda
op_def static X op(X d1, X *params) {
return nd4j::math::nd4j_ceil<X,X>(d1);
}
};
template <typename X>
class Cosine {
public:
no_op_exec_special_same
no_op_exec_special_same_cuda
op_def static X op(X d1, X *params) {
return nd4j::math::nd4j_cos<X,X>(d1);
}
};
template <typename X>
class Exp {
public:
no_op_exec_special_same
no_op_exec_special_same_cuda
op_def static X op(X d1, X *params) {
return nd4j::math::nd4j_exp<X, X>(d1);
}
};
template <typename X>
class HardTanhDerivative {
public:
no_op_exec_special_same
no_op_exec_special_same_cuda
op_def static X op(X d1, X *params) {
return ((d1 >= static_cast<X>(-1.f) && d1 <= static_cast<X>(1.f)) ? static_cast<X>(1.f) : static_cast<X>(0.f));
}
};
template <typename X>
class HardTanh {
public:
no_op_exec_special_same
no_op_exec_special_same_cuda
op_def static X op(X d1, X *params) {
if (d1 < static_cast<X>(-1))
return static_cast<X>(-1);
else if (d1 > static_cast<X>(1))
return static_cast<X>(1);
else
return d1;
}
};
template <typename X>
class Floor {
public:
no_op_exec_special_same
no_op_exec_special_same_cuda
op_def static X op(X d1, X *params) {
return nd4j::math::nd4j_floor<X,X>(d1);
}
};
template <typename X>
class Log {
public:
no_op_exec_special_same
no_op_exec_special_same_cuda
op_def static X op(X d1, X *params) {
return nd4j::math::nd4j_log<X, X>(d1);
}
};
template <typename X>
class Log1p {
public:
no_op_exec_special_same
no_op_exec_special_same_cuda
op_def static X op(X d1, X *params) {
return nd4j::math::nd4j_log<X, X>(1 + d1);
}
};
template <typename X, typename Y, typename Z>
class LogX {
public:
op_def static Z op(X d1, Y d2, Z *params) {
return nd4j::math::nd4j_log<X, Z>(d1) / nd4j::math::nd4j_log<Y, Z>(d2) ;
}
};
template <typename X>
class StabilizeFP16 {
public:
no_op_exec_special_same
no_op_exec_special_same_cuda
op_def static X op(X d1, X *params) {
if (d1 <= static_cast<X>(0))
return static_cast<X>(nd4j::DataTypeUtils::min<float16>());
else return d1;
}
};
template <typename X>
class StabilizeX {
public:
no_op_exec_special_same
no_op_exec_special_same_cuda
op_def static X op(X d1, X *params) {
if (d1 <= static_cast<X>(0))
return nd4j::DataTypeUtils::min<X>();
else return d1;
}
};
template <typename X>
class SpecialDerivative {
public:
no_op_exec_special_same
no_op_exec_special_same_cuda
op_def static X op(X d1, X *params) {
return d1 * (static_cast<X>(1.f) - d1);
}
};
template <typename X>
class Neg {
public:
no_op_exec_special_same
no_op_exec_special_same_cuda
op_def static X op(X d1, X *params) {
return -d1;
}
};
template <typename X>
class Erf {
public:
no_op_exec_special_same
no_op_exec_special_same_cuda
op_def static X op(X d1, X *params) {
return nd4j::math::nd4j_erf<X,X>(d1);
}
};
template <typename X>
class Erfc {
public:
no_op_exec_special_same
no_op_exec_special_same_cuda
op_def static X op(X d1, X *params) {
return nd4j::math::nd4j_erfc<X,X>(d1);
}
};
template <typename X>
class Reciprocal {
public:
no_op_exec_special_same
no_op_exec_special_same_cuda
// op_def static T op(T d1) {
// return (T(1.0f) / d1);
// }
// op for MetaOps
op_def static X op(X d1, X *params) {
return (static_cast<X>(1) / d1);
}
};
template <typename X, typename Z>
class Sqr {
public:
no_op_exec_special
no_op_exec_special_cuda
op_def static Z op(X d1, Z *params) {
return nd4j::math::nd4j_pow<X, X, Z>(d1, static_cast<X>(2));
}
op_def static Z op(X d1) {
return nd4j::math::nd4j_pow<X, X, Z>(d1, static_cast<X>(2));
}
};
template <typename X, typename Y, typename Z>
class RelativeError {
public:
no_op_exec_special
no_op_exec_special_cuda
op_def static Z op(X d1, Y d2) {
return nd4j::math::nd4j_re<X>(d1, d2);
}
op_def static Z op(X d1, Y d2, Z *params) {
return op(d1, d2);
}
op_def static Z op(X d1) {
return static_cast<Z>(0);
}
};
template <typename X, typename Y, typename Z>
class BinaryRelativeError {
public:
no_op_exec_special
no_op_exec_special_cuda
op_def static Z op(X d1, Y d2, Z *params) {
X threshold = params[0];
return nd4j::math::nd4j_re<X>(d1, d2) > threshold ? static_cast<Z>(1) : static_cast<Z>(0);
}
op_def static Z op(X d1) {
return static_cast<Z>(0);
}
};
template <typename X, typename Y, typename Z>
class BinaryMinimumAbsoluteRelativeError {
public:
no_op_exec_special
no_op_exec_special_cuda
op_def static Z op(X d1, X *params) {
X d2 = params[0];
X thresholdRelative = params[1];
X thresholdAbsolute = params[2];
return nd4j::math::nd4j_re<X>(d1, d2) > thresholdRelative ? (nd4j::math::nd4j_abs<X>(d1 - static_cast<X>(d2)) < thresholdAbsolute ? static_cast<Z>(0) : static_cast<Z>(1)) : static_cast<Z>(0);
}
op_def static Z op(X d1, Y d2, Z *params) {
X thresholdRelative = params[0];
X thresholdAbsolute = params[1];
return nd4j::math::nd4j_re<X>(d1, d2) > thresholdRelative ? (nd4j::math::nd4j_abs<X>(d1 - static_cast<X>(d2)) < thresholdAbsolute ? static_cast<Z>(0) : static_cast<Z>(1)) : static_cast<Z>(0);
}
op_def static Z op(X d1) {
return static_cast<Z>(0);
}
};
template <typename X, typename Y, typename Z>
class ReversePow {
public:
no_op_exec_special
no_op_exec_special_cuda
op_def static Z op(X d1, Z *params) {
return nd4j::math::nd4j_pow<X, X, Z>(params[0], d1);
}
op_def static Z op(X d1, Y d2) {
return nd4j::math::nd4j_pow<X, Y, Z>(d2, d1);
}
op_def static Z op(X d1, Y d2, Z *params) {
return nd4j::math::nd4j_pow<X, Y, Z>(d2, d1);
}
op_def static Z op(X d1) {
return d1;
}
};
template <typename X, typename Y, typename Z>
class Pow {
public:
no_op_exec_special
no_op_exec_special_cuda
op_def static Z op(X d1, Z *params) {
return nd4j::math::nd4j_pow<X, X, Z>(d1, params[0]);
}
op_def static Z op(X d1, Y d2) {
return nd4j::math::nd4j_pow<X, Y, Z>(d1, d2);
}
op_def static Z op(X d1, Y d2, Z *params) {
return nd4j::math::nd4j_pow<X, Y, Z>(d1, d2);
}
op_def static Z op(X d1) {
return d1;
}
};
template <typename X, typename Y, typename Z>
class PowDerivative {
public:
no_op_exec_special
no_op_exec_special_cuda
op_def static Z op(X d1, Z *params) {
return params[0] * nd4j::math::nd4j_pow<X, Z, Z>(d1, static_cast<Z>(params[0]) - static_cast<Z>(1.f));
}
op_def static Z op(X d1, Y d2) {
return static_cast<Z>(d2) * nd4j::math::nd4j_pow<X, Z, Z>(d1, static_cast<Z>(d2) - static_cast<Z>(1.f));
}
op_def static Z op(X d1, Y d2, Z *params) {
return static_cast<Z>(d2) * nd4j::math::nd4j_pow<X, Z, Z>(d1, static_cast<Z>(d2) - static_cast<Z>(1.f));
}
op_def static Z op(X d1) {
return d1;
}
};
template <typename X>
class Round {
public:
no_op_exec_special_same
no_op_exec_special_same_cuda
op_def static X op(X d1, X *params) {
return nd4j::math::nd4j_round<X,X>(d1);
}
};
template <typename X, typename Z>
class IsNan {
public:
no_op_exec_special_bool
no_op_exec_special_bool_cuda
no_op_exec_special_accumulation
no_op_exec_special_accumulation_cuda
op_def static Z op(X d1, X *params) {
return nd4j::math::nd4j_isnan(d1) ? static_cast<X>(1) : static_cast<X>(0);
}
op_def static X startingValue(const X *input) {
return static_cast<X>(0);
}
op_def static Z merge(X old, X opOutput, X *extraParams) {
return opOutput + old;
}
op_def static Z update(X old, X opOutput, X *extraParams) {
return opOutput + old;
}
op_def static Z postProcess(X reduction, Nd4jLong n, X *extraParams) {
return reduction;
}
};
template <typename X>
class Expm1 {
public:
no_op_exec_special_same
no_op_exec_special_same_cuda
op_def static X op(X d1, X *params) {
return nd4j::math::nd4j_exp<X, X>(d1) - static_cast<X>(1);
}
};
template <typename X, typename Z>
class IsPositive {
public:
no_op_exec_special_bool
no_op_exec_special_bool_cuda
no_op_exec_special_accumulation
no_op_exec_special_accumulation_cuda
op_def static Z op(X d1, X *params) {
return d1 > (X)0.f;
}
op_def static X startingValue(const X *input) {
return static_cast<X>(0);
}
op_def static Z merge(X old, X opOutput, X *extraParams) {
return opOutput + old;
}
op_def static Z update(X old, X opOutput, X *extraParams) {
return opOutput + old;
}
op_def static Z postProcess(X reduction, Nd4jLong n, X *extraParams) {
return reduction;
}
};
template <typename X, typename Z>
class IsInf {
public:
no_op_exec_special_bool
no_op_exec_special_bool_cuda
no_op_exec_special_accumulation
no_op_exec_special_accumulation_cuda
op_def static Z op(X d1, X *params) {
return nd4j::math::nd4j_isinf<X>(d1) ? static_cast<Z>(1) : static_cast<Z>(0);
}
op_def static X startingValue(const X *input) {
return static_cast<X>(0);
}
op_def static Z merge(X old, X opOutput, X *extraParams) {
return opOutput + old;
}
op_def static Z update(X old, X opOutput, X *extraParams) {
return opOutput + old;
}
op_def static Z postProcess(X reduction, Nd4jLong n, X *extraParams) {
return reduction;
}
};
template <typename X, typename Z>
class IsInfOrNan{
public:
no_op_exec_special_bool
no_op_exec_special_bool_cuda
no_op_exec_special_accumulation
no_op_exec_special_accumulation_cuda
op_def static Z op(X d1, X *params) {
return nd4j::math::nd4j_isfin<X>(d1) ? static_cast<Z>(0) : static_cast<Z>(1);
}
op_def static X startingValue(const X *input) {
return static_cast<X>(0);
}
op_def static Z merge(X old, X opOutput, X *extraParams) {
return opOutput + old;
}
op_def static Z update(X old, X opOutput, X *extraParams) {
return opOutput + old;
}
op_def static Z postProcess(X reduction, Nd4jLong n, X *extraParams) {
return reduction;
}
};
template <typename X, typename Z>
class IsFinite {
public:
no_op_exec_special_bool
no_op_exec_special_bool_cuda
no_op_exec_special_accumulation
no_op_exec_special_accumulation_cuda
op_def static Z op(X d1, X *params) {
return nd4j::math::nd4j_isfin<X>(d1) ? static_cast<Z>(1) : static_cast<Z>(0);
}
op_def static X startingValue(const X *input) {
return static_cast<X>(0);
}
op_def static Z merge(X old, X opOutput, X *extraParams) {
return opOutput + old;
}
op_def static Z update(X old, X opOutput, X *extraParams) {
return opOutput + old;
}
op_def static Z postProcess(X reduction, Nd4jLong n, X *extraParams) {
return reduction;
}
};
template <typename X>
class ClipByValue {
public:
no_op_exec_special_same
no_op_exec_special_same_cuda
op_def static X op(X d1, X *params) {
if (d1 > params[1])
return params[1];
if (d1 < params[0])
return params[0];
return d1;
}
};
template <typename X, typename Y, typename Z>
class LstmClip {
public:
no_op_exec_special
no_op_exec_special_cuda
op_def static Z op(X d1, Y d2, Z *params) {
X _v = (X) d2;
if (d1 > _v)
return _v;
else if (d1 < -_v)
return -_v;
else return d1;
}
};
template <typename X>
class Swish {
public:
no_op_exec_special_same
no_op_exec_special_same_cuda
op_def static X op(X d1, X *params) {
return d1 * nd4j::math::nd4j_sigmoid<X,X>(d1);
}
};
template <typename X>
class GELU {
public:
no_op_exec_special_same
no_op_exec_special_same_cuda
op_def static X op(X d1, X *params) {
return d1 * nd4j::math::nd4j_sigmoid<X,X>(static_cast<X>(1.702f) * d1);
}
};
template <typename X>
class PreciseGELU {
public:
no_op_exec_special_same
no_op_exec_special_same_cuda
op_def static X op(X d1, X *params) {
auto sp = nd4j::math::nd4j_sqrt<X, X>(static_cast<X>(2) / static_cast<X>(M_PI));
auto xp = d1 + nd4j::math::nd4j_pow<X, X, X>(static_cast<X>(0.044715) * d1, static_cast<X>(3));
return (d1 / static_cast<X>(2)) * (static_cast<X>(1) + nd4j::math::nd4j_tanh<X, X>(sp * xp));
}
};
template <typename X>
class GELUDerivative {
public:
no_op_exec_special_same
no_op_exec_special_same_cuda
op_def static X op(X d1, X *params) {
auto x17 = static_cast<X>(1.702f) * d1;
auto ep = nd4j::math::nd4j_pow<X,X,X>(static_cast<X>(M_E), x17);
// (E^(1.702 x) (1. + E^(1.702 x) + 1.702 x))/(1. + E^(1.702 x))^2
return (ep * (static_cast<X>(1.f) + ep + x17)) / nd4j::math::nd4j_pow<X, int, X>((static_cast<X>(1.f) + ep), 2);
}
};
template <typename X>
class PreciseGELUDerivative {
public:
no_op_exec_special_same
no_op_exec_special_same_cuda
op_def static X op(X d1, X *params) {
auto x79 = static_cast<X>(0.797885) * d1;
auto x03 = nd4j::math::nd4j_pow<X, int, X>(static_cast<X>(0.0356774) * d1, 3);
auto x39 = static_cast<X>(0.398942) * d1;
auto x05 = nd4j::math::nd4j_pow<X, int, X>(static_cast<X>(0.0535161) * d1, 3);
auto scz = nd4j::math::nd4j_sech<X, X>(x79 + x03);
// 0.5 + (0.398942 x + 0.0535161 x^3) Sech[0.797885 x + 0.0356774 x^3]^2 + 0.5 Tanh[0.797885 x + 0.0356774 x^3]
return static_cast<X>(0.5) + (x39 + x05) * (scz * scz) + static_cast<X>(0.5) * nd4j::math::nd4j_tanh<X, X>(x79 + x03);
}
};
template <typename X>
class SwishDerivative {
public:
no_op_exec_special_same
no_op_exec_special_same_cuda
op_def static X op(X d1, X *params) {
X ex = nd4j::math::nd4j_pow<X, X, X>(static_cast<X>(M_E), d1);
return (ex * (d1 + ex + static_cast<X>(1.f))) / nd4j::math::nd4j_pow<X, X, X>((ex + static_cast<X>(1.f)) , static_cast<X>(2.f));
}
};
template <typename X>
class LogSigmoid {
public:
no_op_exec_special_same
no_op_exec_special_same_cuda
op_def static X op(X d1, X *params) {
return nd4j::math::nd4j_log<X, X>(nd4j::math::nd4j_sigmoid<X, X>(d1));
}
};
template <typename X>
class LogSigmoidDerivative {
public:
no_op_exec_special_same
no_op_exec_special_same_cuda
op_def static X op(X d1, X *params) {
X ex = nd4j::math::nd4j_pow<X, X, X>(M_E, d1);
return static_cast<X>(1.f) / (ex + static_cast<X>(1.f));
}
};
template <typename X>
class Sigmoid {
public:
no_op_exec_special_same
no_op_exec_special_same_cuda
op_def static X op(X d1, X *params) {
return nd4j::math::nd4j_sigmoid<X, X>(d1);
}
};
template <typename X>
class SigmoidDerivative {
public:
no_op_exec_special_same
no_op_exec_special_same_cuda
op_def static X op(X d1, X *params) {
return nd4j::math::nd4j_sigmoidderivative<X, X>(d1);
}
};
template <typename X>
class HardSigmoid {
public:
no_op_exec_special_same
no_op_exec_special_same_cuda
op_def static X op(X d1, X *params) {
return nd4j::math::nd4j_min<X>(static_cast<X>(1), nd4j::math::nd4j_max<X>(static_cast<X>(0), (static_cast<X>(0.2f)) * d1 + static_cast<X>(0.5f)));
}
};
template <typename X>
class HardSigmoidDerivative {
public:
no_op_exec_special_same
no_op_exec_special_same_cuda
op_def static X op(X d1, X *params) {
return d1 < static_cast<X>(-2.5f) || d1 > static_cast<X>(2.5f) ? static_cast<X>(0.f) : static_cast<X>(0.2f);
}
};
/**
* Scale to be between a min and max
*/
template <typename X>
class SetRange {
public:
no_op_exec_special_same
no_op_exec_special_same_cuda
op_def static X op(X d1, X *params) {
auto min = params[0];
auto max = params[1];
if (static_cast<X>(d1) >= min && static_cast<X>(d1) <= max)
return d1;
if (min == static_cast<X>(0) && max == static_cast<X>(1)) {
auto val = static_cast<X>(1) / (static_cast<X>(1) + nd4j::math::nd4j_exp<X, X>(-d1));
return (nd4j::math::nd4j_floor<X,X>(val * (max - min)) + min);
}
return (nd4j::math::nd4j_floor<X,X>(d1 * (max - min)) + min);
}
};
template <typename X>
class Sin {
public:
no_op_exec_special_same
no_op_exec_special_same_cuda
op_def static X op(X d1, X *params) {
return nd4j::math::nd4j_sin<X,X>(d1);
}
};
template <typename X>
class Square {
public:
no_op_exec_special_same
no_op_exec_special_same_cuda
op_def static X op(X d1, X *params) {
return d1 * d1;
}
};
template <typename X, typename Z>
class Sqrt {
public:
no_op_exec_special
no_op_exec_special_cuda
op_def static Z op(X d1, Z *params) {
return nd4j::math::nd4j_sqrt<X, Z>(d1);
}
};
template <typename X, typename Z>
class RSqrt {
public:
no_op_exec_special
no_op_exec_special_cuda
op_def static Z op(X d1, Z *params) {
return static_cast<Z>(1) / nd4j::math::nd4j_sqrt<X, Z>(d1);
}
};
template <typename X>
class Rint {
public:
no_op_exec_special_same
no_op_exec_special_same_cuda
op_def static X op(X d1, X *params) {
return nd4j::math::nd4j_rint<X,X>(d1);
}
};
template <typename X>
class SoftPlus {
public:
no_op_exec_special_same
no_op_exec_special_same_cuda
op_def static X op(X d1, X *params) {
return nd4j::math::softplus<X, X>(d1);
}
};
template <typename X>
class Sign {
public:
no_op_exec_special_same
no_op_exec_special_same_cuda
op_def static X op(X d1, X *params) {
return (d1 > static_cast<X>(0)) - (d1 < static_cast<X>(0));
}
};
template <typename X>
class TimesOneMinus {
public:
no_op_exec_special_same
no_op_exec_special_same_cuda
op_def static X op(X d1, X *params) {
return d1 * (static_cast<X>(1) - d1);
}
};
template <typename X>
class RationalTanh {
public:
no_op_exec_special_same
no_op_exec_special_same_cuda
op_def static X op(X d1, X *params) {
// keep 2/3 as runtime variable, to match precision
auto dis = (static_cast<X>(2) / static_cast<X>(3)) * d1;
auto tanh = nd4j::math::nd4j_sgn<X,X>(dis) * (static_cast<X>(1) - (static_cast<X>(1) / (static_cast<X>(1) + static_cast<X>(nd4j::math::nd4j_abs<X>(dis)) + nd4j::math::nd4j_pow<X, X, X>(dis, static_cast<X>(2)) + static_cast<X>(1.41645f) * nd4j::math::nd4j_pow<X, X, X>(dis, static_cast<X>(4)) )));
return static_cast<X>(1.7159f) * tanh;
}
};
template <typename X>
class RationalTanhDerivative {
public:
no_op_exec_special_same
no_op_exec_special_same_cuda
op_def static X op(X d1, X *params) {
auto dis = (static_cast<X>(2.f) / static_cast<X>(3.f)) * d1;
auto a = static_cast<X>(1.f) + nd4j::math::nd4j_abs<X>(dis) + nd4j::math::nd4j_pow<X, X, X>(dis, static_cast<X>(2.f)) + static_cast<X>(1.41645f) * nd4j::math::nd4j_pow<X, X, X>(dis, static_cast<X>(4));
auto tDeriv = (static_cast<X>(1.f) + nd4j::math::nd4j_sign<X,X>(dis) * (static_cast<X>(2.f) * dis + static_cast<X>(4.f) * static_cast<X>(1.41645f) * nd4j::math::nd4j_pow<X, X, X>(dis, static_cast<X>(3)))) / (a * a);
return static_cast<X>(1.7159f) * (static_cast<X>(2.f) / static_cast<X>(3.f)) * tDeriv;
}
};
template <typename X>
class Tanh {
public:
no_op_exec_special_same
no_op_exec_special_same_cuda
op_def static X op(X d1, X *params) {
return nd4j::math::nd4j_tanh<X, X>(d1);
}
};
template <typename X>
class RectifiedTanh {
public:
no_op_exec_special_same
no_op_exec_special_same_cuda
op_def static X op(X d1, X *params) {
return nd4j::math::nd4j_max<X>(static_cast<X>(0), nd4j::math::nd4j_tanh<X,X>(d1));
}
};
template <typename X>
class RectifiedTanhDerivative {
public:
no_op_exec_special_same
no_op_exec_special_same_cuda
op_def static X op(X d1, X *params) {
return d1 > static_cast<X>(0.f) ? nd4j::math::nd4j_tanhderivative<X,X>(d1) : static_cast<X>(0.f);
}
};
template <typename X>
class ATanh {
public:
no_op_exec_special_same
no_op_exec_special_same_cuda
op_def static X op(X d1, X *params) {
return nd4j::math::nd4j_atanh<X,X>(d1);
}
};
template <typename X>
class TanhDerivative {
public:
no_op_exec_special_same
no_op_exec_special_same_cuda
op_def static X op(X d1, X *params) {
return nd4j::math::nd4j_tanhderivative<X,X>(d1);
}
};
template <typename X>
class Cube {
public:
no_op_exec_special_same
no_op_exec_special_same_cuda
op_def static X op(X d1, X *params) {
return d1 * d1 * d1;
}
};
template <typename X>
class CubeDerivative {
public:
no_op_exec_special_same
no_op_exec_special_same_cuda
op_def static X op(X d1, X *params) {
return static_cast<X>(3) * d1 * d1;
}
};
template <typename X>
class ACos {
public:
no_op_exec_special_same
no_op_exec_special_same_cuda
op_def static X op(X d1, X *params) {
return nd4j::math::nd4j_acos<X, X>(d1);
}
};
template <typename X>
class ASinh {
public:
no_op_exec_special_same
no_op_exec_special_same_cuda
op_def static X op(X d1, X *params) {
return nd4j::math::nd4j_asinh<X, X>(d1);
}
};
template <typename X>
class ASinhDerivative {
public:
no_op_exec_special_same
no_op_exec_special_same_cuda
op_def static X op(X d1, X *params) {
return static_cast<X>(1.f) / (nd4j::math::nd4j_sqrt<X, X>(nd4j::math::nd4j_pow<X, X, X>(d1, static_cast<X>(2.f)) + static_cast<X>(1.f)));
}
};
template <typename X>
class ACosh {
public:
no_op_exec_special_same
no_op_exec_special_same_cuda
op_def static X op(X d1, X *params) {
return nd4j::math::nd4j_acosh<X, X>(d1);
}
};
template <typename X>
class ACoshDerivative {
public:
no_op_exec_special_same
no_op_exec_special_same_cuda
op_def static X op(X d1, X *params) {
return static_cast<X>(1.f) / (nd4j::math::nd4j_sqrt<X, X>(d1 - static_cast<X>(1.f)) * nd4j::math::nd4j_sqrt<X, X>(d1 + static_cast<X>(1.f)));
}
};
template <typename X>
class Ones {
public:
no_op_exec_special_same
no_op_exec_special_same_cuda
op_def static X op(X d1, X *params) {
return static_cast<X>(1.0f);
}
};
template <typename X>
class SoftSign {
public:
no_op_exec_special_same
no_op_exec_special_same_cuda
op_def static X op(X d1, X *params) {
return nd4j::math::nd4j_softsign<X, X>(d1);
}
};
template <typename X>
class SoftSignDerivative {
public:
no_op_exec_special_same
no_op_exec_special_same_cuda
op_def static X op(X d1, X *params) {
return nd4j::math::nd4j_softsignderivative<X,X>(d1);
}
};
template <typename X, typename Z>
class MatchConditionBool {
public:
no_op_exec_special_bool
no_op_exec_special_bool_cuda
// this op return 1.0 if condition met, 0.0 otherwise
op_def static Z op(X d1, X *extraParams) {
X compare = extraParams[0];
X eps = extraParams[1];
auto mode = static_cast<int>(extraParams[2]);
//nd4j_printf("value: %f; comp: %f; eps: %f; mode: %i;\n", d1, compare, eps, mode);
switch (mode) {
case 0: // equals
return nd4j::math::nd4j_abs<X>(d1 - compare) <= eps ? true : false;
case 1: // not equals
return nd4j::math::nd4j_abs<X>(d1 - compare) > eps ? true : false;
case 2: // less_than
return d1 < compare ? true : false;
case 3: // greater_than
return d1 > compare ? true : false;
case 4: // less_or_equals_than
return d1 <= compare ? true : false;
case 5: // greater_or_equals_than
return d1 >= compare ? true : false;
case 6: // abs_less_than
return nd4j::math::nd4j_abs<X>(d1) < compare ? true : false;
case 7: // abs_greater_than
return nd4j::math::nd4j_abs<X>(d1) > compare ? true : false;
case 8: // is inf
return nd4j::math::nd4j_isinf(d1) ? true : false;
case 9: // is nan
return nd4j::math::nd4j_isnan(d1) ? true : false;
case 10:
return (d1 == compare) ? true : false;
case 11:
return (d1 != compare) ? true : false;
case 12: // abs_greater_or_equals_than
return nd4j::math::nd4j_abs<X>(d1) >= compare ? true : false;
case 13: // abs_less_or_equals_than
return nd4j::math::nd4j_abs<X>(d1) <= compare ? true : false;
case 14:
// isFinite
return !(nd4j::math::nd4j_isinf(d1) || nd4j::math::nd4j_isnan(d1));
case 15:
// isInfinite
return nd4j::math::nd4j_isinf(d1) || nd4j::math::nd4j_isnan(d1);
default:
printf("Undefined match condition: [%i]\n", mode);
}
return d1;
}
};
template <typename X, typename Z>
class MatchCondition {
public:
no_op_exec_special
no_op_exec_special_cuda
no_op_exec_special_accumulation_long
no_op_exec_special_accumulation_cuda
op_def static Z startingValue(const X *input) {
return static_cast<Z>(0);
}
op_def static Z merge(Z old, Z opOutput, X *extraParams) {
return old + opOutput;
}
op_def static Z update(Z old, Z opOutput, X *extraParams) {
return old + opOutput;
}
// this op return 1.0 if condition met, 0.0 otherwise
op_def static Z op(X d1, X *extraParams) {
X compare = extraParams[0];
X eps = extraParams[1];
auto mode = static_cast<int>(extraParams[2]);
//printf("value: %f; comp: %f; eps: %f; mode: %i;\n", (float) d1, (float) compare, (float) eps, mode);
switch (mode) {
case 0: // equals
return nd4j::math::nd4j_abs<X>(d1 - compare) <= eps ? 1 : 0;
case 1: // not equals
return nd4j::math::nd4j_abs<X>(d1 - compare) > eps ? 1 : 0;
case 2: // less_than
return d1 < compare ? 1 : 0;
case 3: // greater_than
return d1 > compare ? 1 : 0;
case 4: // less_or_equals_than
return d1 <= compare ? 1 : 0;
case 5: // greater_or_equals_than
return d1 >= compare ? 1 : 0;
case 6: // abs_less_than
return nd4j::math::nd4j_abs<X>(d1) < compare ? 1 : 0;
case 7: // abs_greater_than
return nd4j::math::nd4j_abs<X>(d1) > compare ? 1 : 0;
case 8: // is inf
return nd4j::math::nd4j_isinf(d1) ? 1 : 0;
case 9: // is nan
return nd4j::math::nd4j_isnan(d1) ? 1 : 0;
case 10:
return (d1 == compare) ? 1 : 0;
case 11:
return (d1 != compare) ? 1 : 0;
case 12: // abs_greater_or_equals_than
return nd4j::math::nd4j_abs<X>(d1) >= compare ? 1 : 0;
case 13: // abs_less_or_equals_than
return nd4j::math::nd4j_abs<X>(d1) <= compare ? 1 : 0;
case 14:
// isFinite
return !(nd4j::math::nd4j_isinf(d1) || nd4j::math::nd4j_isnan(d1)) ? 1 : 0;
case 15:
// isInfinite
return nd4j::math::nd4j_isinf(d1) || nd4j::math::nd4j_isnan(d1) ? 1 : 0;
default:
printf("Undefined match condition: [%i]\n", mode);
}
return d1;
}
op_def static Z postProcess(Z reduction, Nd4jLong n, X *extraParams) {
return reduction;
}
};
template <typename X>
class ELU {
public:
no_op_exec_special_same
no_op_exec_special_same_cuda
op_def static X op(X d1, X *params) {
return nd4j::math::nd4j_elu<X,X>(d1);
}
};
template <typename X>
class ELUDerivative {
public:
no_op_exec_special_same
no_op_exec_special_same_cuda
op_def static X op(X d1, X *params) {
return nd4j::math::nd4j_eluderivative<X,X>(d1);
}
};
template <typename X, typename Y, typename Z>
class RELU {
public:
no_op_exec_special_same
no_op_exec_special_same_cuda
op_def static Z op(X d1, Y d2, Z *params) {
auto xt = static_cast<Z>(d1);
auto xf = static_cast<Z>(d2);
return xt < xf ? xf : xt;
}
};
template <typename X, typename Y, typename Z>
class SXELogitsSmoother {
public:
op_def static Z op(X d1, Y d2, Z *params) {
return d1 * ((X)1.f - (X) d2) + (X)(0.5f) * (X) d2;
}
};
template <typename X, typename Y, typename Z>
class RELU6 {
public:
no_op_exec_special_same
no_op_exec_special_same_cuda
op_def static Z op(X d1, Y d2, Z *params) {
auto relu = simdOps::RELU<X,Y,Z>::op(d1, d2, params);
return relu < static_cast<Z>(6) ? relu : static_cast<Z>(6);
}
};
template <typename X, typename Y, typename Z>
class LeakyRELU {
public:
no_op_exec_special
no_op_exec_special_cuda
op_def static Z op(X d1, Y d2, Z *params) {
auto val = static_cast<Z>(d1);
auto alpha = static_cast<Z>(d2);
return val < 0.0f ? alpha * val : val;
}
};
template <typename X>
class SELU {
public:
no_op_exec_special_same
no_op_exec_special_same_cuda
op_def static X op(X d1, X *params) {
return d1 > static_cast<X>(0.0f) ? static_cast<X>(SELU_LAMBDA) * static_cast<X>(d1) : static_cast<X>(SELU_LAMBDA) * (static_cast<X>(SELU_ALPHA) * nd4j::math::nd4j_exp<X, X>(d1) - static_cast<X>(SELU_ALPHA));
}
};
template <typename X>
class SELUDerivative {
public:
no_op_exec_special_same
no_op_exec_special_same_cuda
op_def static X op(X d1, X *params) {
return d1 > static_cast<X>(0.f) ? static_cast<X>(SELU_LAMBDA) : static_cast<X>(SELU_ALPHA) * static_cast<X>(SELU_LAMBDA) * nd4j::math::nd4j_exp<X, X>(d1);
}
};
template <typename X, typename Y, typename Z>
class LeakyRELUDerivative {
public:
no_op_exec_special
no_op_exec_special_cuda
op_def static Z op(X d1, Y d2, Z *params) {
if (d1 >= static_cast<X>(0))
return static_cast<Z>(1);
else
return static_cast<Z>(d2);
}
};
template <typename X>
class ASin {
public:
no_op_exec_special_same
no_op_exec_special_same_cuda
op_def static X op(X d1, X *params) {
return nd4j::math::nd4j_asin<X,X>(d1);
}
};
template <typename X>
class Sinh {
public:
no_op_exec_special_same
no_op_exec_special_same_cuda
op_def static X op(X d1, X *params) {
return nd4j::math::nd4j_sinh<X,X>(d1);
}
};
template <typename X>
class SinhDerivative {
public:
no_op_exec_special_same
no_op_exec_special_same_cuda
op_def static X op(X d1, X *params) {
return nd4j::math::nd4j_cosh<X, X>(d1);
}
};
template <typename X>
class Cosh {
public:
no_op_exec_special_same
no_op_exec_special_same_cuda
op_def static X op(X d1, X *params) {
return nd4j::math::nd4j_cosh<X,X>(d1);
}
};
template <typename X>
class Tan {
public:
no_op_exec_special_same
no_op_exec_special_same_cuda
op_def static X op(X d1, X *params) {
return nd4j::math::nd4j_tan<X,X>(d1);
}
};
template <typename X>
class TanDerivative {
public:
no_op_exec_special_same
no_op_exec_special_same_cuda
op_def static X op(X d1, X *params) {
return static_cast<X>(1.f) / nd4j::math::nd4j_pow<X, X, X>(nd4j::math::nd4j_cos<X, X>(d1), static_cast<X>(2.0f));
}
};
template <typename X>
class ATan {
public:
no_op_exec_special_same
no_op_exec_special_same_cuda
op_def static X op(X d1, X *params) {
return nd4j::math::nd4j_atan<X, X>(d1);
}
};
template <typename X, typename Y, typename Z>
class Atan2 {
public:
no_op_exec_special
no_op_exec_special_cuda
op_def static Z op(X d1, Y d2) {
return nd4j::math::nd4j_atan2<X, Z>(d2, d1);
}
op_def static Z op(X d1, Y d2, Z *params) {
return op(d1, d2);
}
// op for MetaOps
op_def static Z op(X d1, Y *params) {
return op(d1, params[0]);
}
};
template <typename X>
class Identity {
public:
no_op_exec_special_same
no_op_exec_special_same_cuda
op_def static X op(X d1, X *params) {
return d1;
}
};
template <typename X>
class Stabilize {
public:
no_op_exec_special_same
no_op_exec_special_same_cuda
op_def static X op(X d1, X *params) {
X k = params[0];
if (d1 * k > static_cast<X>(- MIN_CUTFOFF))
return static_cast<X>(- MIN_CUTFOFF) / k;
else if (d1 * k < static_cast<X>(MIN_CUTFOFF))
return static_cast<X>(MIN_CUTFOFF) / k;
return d1;
}
};
template <typename X, typename Y, typename Z>
class Step {
public:
no_op_exec_special_same
no_op_exec_special_same_cuda
op_def static Z op(X d1, Y d2, Z *params) {
return (d1 > static_cast<X>(d2) ? static_cast<Z>(1) : static_cast<Z>(0));
}
};
template <typename X>
class OneMinus {
public:
no_op_exec_special_same
no_op_exec_special_same_cuda
op_def static X op(X d1, X *params) {
return static_cast<X>(1) - d1;
}
};
template <typename X>
class Sum {
public:
no_op_exec_special_accumulation_same
no_op_exec_special_accumulation_same_cuda
op_def static X startingValue(const X *input) {
return static_cast<X>(0.0f);
}
op_def static X merge(X old, X opOutput, X *extraParams) {
return opOutput + old;
}
op_def static X update(X old, X opOutput, X *extraParams) {
return opOutput + old;
}
op_def static X op(X d1, X *extraParams) {
return d1;
}
op_def static X postProcess(X reduction, Nd4jLong n, X *extraParams) {
return reduction;
}
};
template <typename X>
class ReduceSameBenchmarkOp {
public:
no_op_exec_special_accumulation_same
no_op_exec_special_accumulation_same_cuda
const static functions::ReduceType reduceType = functions::ReduceType::SUM;
op_def static X startingValue(const X *input) {
return static_cast<X>(0.0f);
}
op_def static X merge(X old, X opOutput, X *extraParams) {
return opOutput + old;
}
op_def static X update(X old, X opOutput, X *extraParams) {
return opOutput + old;
}
op_def static X op(X d1, X *extraParams) {
auto f1 = static_cast<float>(d1);
return static_cast<X>(nd4j::math::nd4j_pow<float,float,float>(f1, 3)
+ nd4j::math::nd4j_log<float,float>(f1) * nd4j::math::nd4j_sin<float,float>(f1)
/ nd4j::math::nd4j_tanh<float,float>(static_cast<float>(M_E) * static_cast<float>(M_PI) * f1)
* nd4j::math::nd4j_sqrt<float,float>(static_cast<float>(M_PI) / f1)
- nd4j::math::nd4j_atan<float,float>(static_cast<float>(M_E) / f1));
}
op_def static X postProcess(X reduction, Nd4jLong n, X *extraParams) {
return reduction;
}
};
template <typename X, typename Z>
class ShannonEntropy {
public:
no_op_exec_special_accumulation
no_op_exec_special_accumulation_cuda
const static functions::ReduceType reduceType = functions::ReduceType::SUM;
op_def static X startingValue(const X *input) {
return static_cast<X>(0);
}
op_def static Z merge(Z old, Z opOutput, Z *extraParams) {
return opOutput + old;
}
op_def static Z update(Z old, Z opOutput, Z *extraParams) {
return opOutput + old;
}
op_def static Z op(X d1, Z *extraParams) {
auto p = d1 * d1;
return static_cast<Z>(p) * nd4j::math::nd4j_log<X, Z>(p);
}
op_def static Z postProcess(Z reduction, Nd4jLong n, Z *extraParams) {
return -reduction;
}
};
template <typename X, typename Z>
class LogEntropy {
public:
no_op_exec_special_accumulation
no_op_exec_special_accumulation_cuda
const static functions::ReduceType reduceType = functions::ReduceType::SUM;
op_def static X startingValue(const X *input) {
return static_cast<X>(0);
}
op_def static Z merge(Z old, Z opOutput, Z *extraParams) {
return opOutput + old;
}
op_def static Z update(Z old, Z opOutput, Z *extraParams) {
return opOutput + old;
}
op_def static Z op(X d1, Z *extraParams) {
return static_cast<Z>(d1) * nd4j::math::nd4j_log<X, Z>(d1);
}
op_def static Z postProcess(Z reduction, Nd4jLong n, Z *extraParams) {
//entropy is -sum(p(x) * log(p(x))); log entropy is log of this
return nd4j::math::nd4j_log<Z, Z>(-reduction);
}
};
template <typename X, typename Z>
class Entropy {
public:
no_op_exec_special_accumulation
no_op_exec_special_accumulation_cuda
const static functions::ReduceType reduceType = functions::ReduceType::SUM;
op_def static X startingValue(const X *input) {
return static_cast<X>(0);
}
op_def static Z merge(Z old, Z opOutput, Z *extraParams) {
return opOutput + old;
}
op_def static Z update(Z old, Z opOutput, Z *extraParams) {
return opOutput + old;
}
op_def static Z op(X d1, Z *extraParams) {
return static_cast<Z>(d1) * nd4j::math::nd4j_log<X, Z>(d1);
}
op_def static Z postProcess(Z reduction, Nd4jLong n, Z *extraParams) {
return static_cast<Z>(-reduction); //entropy is -sum(p(x) * log(p(x)))
}
};
template <typename X>
class ASum {
public:
no_op_exec_special_accumulation_same
no_op_exec_special_accumulation_same_cuda
const static functions::ReduceType reduceType = functions::ReduceType::ASUM;
op_def static X startingValue(const X *input) {
return static_cast<X>(0);
}
op_def static X merge(X old, X opOutput, X *extraParams) {
return nd4j::math::nd4j_abs<X>(opOutput) + nd4j::math::nd4j_abs<X>(old);
}
op_def static X update(X old, X opOutput, X *extraParams) {
return nd4j::math::nd4j_abs<X>(opOutput) + nd4j::math::nd4j_abs<X>(old);
}
op_def static X op(X d1, X *extraParams) {
return nd4j::math::nd4j_abs<X>(d1);
}
op_def static X postProcess(X reduction, Nd4jLong n, X *extraParams) {
return nd4j::math::nd4j_abs<X>(reduction);
}
};
template <typename X, typename Z>
class CountNonZero {
public:
no_op_exec_special_accumulation_long
no_op_exec_special_accumulation_cuda
const static functions::ReduceType reduceType = functions::ReduceType::ASUM;
op_def static Z startingValue(const X *input) {
return static_cast<Z>(0);
}
op_def static Z merge(Z old, Z opOutput, X *extraParams) {
return opOutput + old;
}
op_def static Z update(Z old, Z opOutput, X *extraParams) {
return opOutput + old;
}
op_def static Z op(X d1, X *extraParams) {
return d1 == static_cast<X>(0.0f) ? static_cast<Z>(0.0f) : static_cast<Z>(1.0f);
}
op_def static Z postProcess(Z reduction, Nd4jLong n, X *extraParams) {
return reduction;
}
};
template <typename X, typename Z>
class CountZero {
public:
no_op_exec_special_accumulation_long
no_op_exec_special_accumulation_cuda
const static functions::ReduceType reduceType = functions::ReduceType::SUM;
op_def static Z startingValue(const X *input) {
return static_cast<Z>(0.0f);
}
op_def static Z merge(Z old, Z opOutput, X *extraParams) {
return opOutput + old;
}
op_def static Z update(Z old, Z opOutput, X *extraParams) {
return opOutput + old;
}
op_def static Z op(X d1, X *extraParams) {
return d1 == static_cast<X>(0) ? static_cast<X>(1) : static_cast<X>(0);
}
op_def static Z postProcess(X reduction, Nd4jLong n, X *extraParams) {
return static_cast<Z>(reduction);
}
};
template <typename X>
class Prod {
public:
no_op_exec_special_accumulation_same
no_op_exec_special_accumulation_same_cuda
const static functions::ReduceType reduceType = functions::ReduceType::PRODUCT;
op_def static X startingValue(const X *input) {
return static_cast<X>(1);
}
op_def static X merge(X old, X opOutput, X *extraParams) {
return opOutput * old;
}
op_def static X update(X old, X opOutput, X *extraParams) {
return opOutput * old;
}
op_def static X op(X d1, X *extraParams) {
return d1;
}
op_def static X postProcess(X reduction, Nd4jLong n, X *extraParams) {
return reduction;
}
};
template <typename X, typename Z>
class Any {
public:
no_op_exec_special_accumulation
no_op_exec_special_accumulation_cuda
const static functions::ReduceType reduceType = functions::ReduceType::SUM;
op_def static X startingValue(const X *input) {
return static_cast<X>(0.0f);
}
op_def static Z merge(X old, X opOutput, X *extraParams) {
return opOutput + old;
}
op_def static Z update(X old, X opOutput, X *extraParams) {
return opOutput + old;
}
op_def static Z op(X d1, X *extraParams) {
return d1;
}
op_def static Z postProcess(X reduction, Nd4jLong n, X *extraParams) {
return reduction > static_cast<X>(0) ? static_cast<Z>(1) : static_cast<Z>(0) ;
}
};
template <typename X, typename Z>
class All {
public:
no_op_exec_special_accumulation
no_op_exec_special_accumulation_cuda
const static functions::ReduceType reduceType = functions::ReduceType::PRODUCT;
op_def static X startingValue(const X *input) {
return static_cast<X>(1);
}
op_def static Z merge(X old, X opOutput, X *extraParams) {
return opOutput * old;
}
op_def static Z update(X old, X opOutput, X *extraParams) {
return opOutput * old;
}
op_def static Z op(X d1, X *extraParams) {
return d1;
}
op_def static Z postProcess(X reduction, Nd4jLong n, X *extraParams) {
return reduction > static_cast<X>(0) ? static_cast<Z>(1) : static_cast<Z>(0);
}
};
template <typename X, typename Z>
class Mean {
public:
no_op_exec_special_accumulation
no_op_exec_special_accumulation_cuda
const static functions::ReduceType reduceType = functions::ReduceType::SUM;
op_def static X startingValue(const X *input) {
return static_cast<X>(0);
}
op_def static Z merge(Z old, Z opOutput, Z *extraParams) {
return opOutput + old;
}
op_def static Z update(Z old, Z opOutput, Z *extraParams) {
return opOutput + old;
}
op_def static Z op(X d1, Z *extraParams) {
return d1;
}
op_def static Z postProcess(Z reduction, Nd4jLong n, Z *extraParams) {
return reduction / (Z) n;
}
};
template <typename X, typename Z>
class ReduceFloatBenchmarkOp {
public:
no_op_exec_special_accumulation
no_op_exec_special_accumulation_cuda
const static functions::ReduceType reduceType = functions::ReduceType::SUM;
op_def static X startingValue(const X *input) {
return static_cast<X>(0);
}
op_def static Z merge(Z old, Z opOutput, Z *extraParams) {
return opOutput + old;
}
op_def static Z update(Z old, Z opOutput, Z *extraParams) {
return opOutput + old;
}
op_def static Z op(X d1, Z *extraParams) {
auto f1 = static_cast<float>(d1);
return static_cast<Z>(nd4j::math::nd4j_pow<float,float,float>(f1, 3)
+ nd4j::math::nd4j_log<float,float>(f1) * nd4j::math::nd4j_sin<float,float>(f1)
/ nd4j::math::nd4j_tanh<float,float>(static_cast<float>(M_E) * static_cast<float>(M_PI) * f1)
* nd4j::math::nd4j_sqrt<float,float>(static_cast<float>(M_PI) / f1)
- nd4j::math::nd4j_atan<float,float>(static_cast<float>(M_E) / f1));
}
op_def static Z postProcess(Z reduction, Nd4jLong n, Z *extraParams) {
return (Z) reduction / (Z) n;
}
};
template <typename X, typename Z>
class AMean {
public:
no_op_exec_special_accumulation
no_op_exec_special_accumulation_cuda
const static functions::ReduceType reduceType = functions::ReduceType::SUM;
op_def static X startingValue(const X *input) {
return static_cast<X>(0);
}
op_def static Z merge(Z old, Z opOutput, Z *extraParams) {
return nd4j::math::nd4j_abs<X>(opOutput) + nd4j::math::nd4j_abs<X>(old);
}
op_def static Z update(Z old, Z opOutput, Z *extraParams) {
return opOutput + old;
}
op_def static Z op(X d1, Z *extraParams) {
return nd4j::math::nd4j_abs<X>(d1);
}
op_def static Z postProcess(Z reduction, Nd4jLong n, Z *extraParams) {
return nd4j::math::nd4j_abs<Z>(reduction) / static_cast<Z>(n);
}
};
template <typename X>
class Max {
public:
no_op_exec_special_accumulation_same
no_op_exec_special_accumulation_same_cuda
const static functions::ReduceType reduceType = functions::ReduceType::MAX;
op_def static X startingValue(const X *input) {
return -nd4j::DataTypeUtils::max<X>();
}
op_def static X merge(X old, X opOutput, X *extraParams) {
return nd4j::math::nd4j_max<X>(old, opOutput);
}
op_def static X update(X old, X opOutput, X *extraParams) {
return nd4j::math::nd4j_max<X>(opOutput, old);
}
op_def static X op(X d1, X d2, X *params) {
return nd4j::math::nd4j_max<X>(d1, d2);
}
op_def static X op(X d1, X d2) {
return nd4j::math::nd4j_max<X>(d1, d2);
}
// FIXME: this signature overlaps with MetaOp
op_def static X op(X d1, X *extraParams) {
return d1;
}
op_def static X postProcess(X reduction, Nd4jLong n, X *extraParams) {
return reduction;
}
};
template <typename X, typename Y, typename Z>
class AMaxPairwise {
public:
op_def static Z op(X d1, Y d2, Z *params) {
return op(d1, d2);
}
op_def static Z op(X d1, Y d2) {
auto z1 = static_cast<Z>(d1);
auto z2 = static_cast<Z>(d2);
if (nd4j::math::nd4j_abs<Z>(z1) > nd4j::math::nd4j_abs<Z>(z2))
return z1;
else
return z2;
}
};
template <typename X, typename Y, typename Z>
class AMinPairwise {
public:
op_def static Z op(X d1, Y d2, Z *params) {
return op(d1, d2);
}
op_def static Z op(X d1, Y d2) {
auto z1 = static_cast<Z>(d1);
auto z2 = static_cast<Z>(d2);
if (nd4j::math::nd4j_abs<Z>(z1) < nd4j::math::nd4j_abs<Z>(z2))
return z1;
else
return z2;
}
};
template <typename X, typename Y, typename Z>
class MaxPairwise {
public:
op_def static Z op(X d1, Y d2, Z *params) {
return nd4j::math::nd4j_max<Z>(static_cast<Z>(d1), static_cast<Z>(d2));
}
op_def static Z op(X d1, Y d2) {
return nd4j::math::nd4j_max<Z>(static_cast<Z>(d1), static_cast<Z>(d2));
}
};
template <typename X, typename Y, typename Z>
class MinPairwise {
public:
op_def static Z op(X d1, Y d2, Z *params) {
return nd4j::math::nd4j_min<Z>(static_cast<Z>(d1), static_cast<Z>(d2));
}
op_def static Z op(X d1, Y d2) {
return nd4j::math::nd4j_min<Z>(static_cast<Z>(d1), static_cast<Z>(d2));
}
};
template <typename X>
class AMax {
public:
no_op_exec_special_accumulation_same
no_op_exec_special_accumulation_same_cuda
const static functions::ReduceType reduceType = functions::ReduceType::AMAX;
op_def static X startingValue(const X *input) {
return input[0];
}
op_def static X merge(X old, X opOutput, X *extraParams) {
return nd4j::math::nd4j_max<X>(nd4j::math::nd4j_abs<X>(old), nd4j::math::nd4j_abs<X>(opOutput));
}
op_def static X update(X old, X opOutput, X *extraParams) {
return nd4j::math::nd4j_max<X>(nd4j::math::nd4j_abs<X>(opOutput), nd4j::math::nd4j_abs<X>(old));
}
op_def static X op(X d1, X d2, X *params) {
return nd4j::math::nd4j_max<X>(nd4j::math::nd4j_abs<X>(d1), nd4j::math::nd4j_abs<X>(d2));
}
op_def static X op(X d1, X d2) {
return nd4j::math::nd4j_abs<X>(d1) > nd4j::math::nd4j_abs<X>(d2) ? d1 : d2;
}
// FIXME: this signature overlaps with MetaOp
op_def static X op(X d1, X *extraParams) {
return nd4j::math::nd4j_abs<X>(d1);
}
op_def static X postProcess(X reduction, Nd4jLong n, X *extraParams) {
return nd4j::math::nd4j_abs<X>(reduction);
}
};
template <typename X>
class AMin {
public:
no_op_exec_special_accumulation_same
no_op_exec_special_accumulation_same_cuda
const static functions::ReduceType reduceType = functions::ReduceType::AMIN;
op_def static X startingValue(const X *input) {
return input[0];
}
op_def static X merge(X old, X opOutput, X *extraParams) {
return nd4j::math::nd4j_min<X>(nd4j::math::nd4j_abs<X>(old), nd4j::math::nd4j_abs<X>(opOutput));
}
op_def static X update(X old, X opOutput, X *extraParams) {
return nd4j::math::nd4j_min<X>(nd4j::math::nd4j_abs<X>(opOutput), nd4j::math::nd4j_abs<X>(old));
}
op_def static X op(X d1, X d2, X *params) {
return nd4j::math::nd4j_min<X>(nd4j::math::nd4j_abs<X>(d1), nd4j::math::nd4j_abs<X>(d2));
}
op_def static X op(X d1, X d2) {
return nd4j::math::nd4j_min<X>(nd4j::math::nd4j_abs<X>(d1), nd4j::math::nd4j_abs<X>(d2));
}
// FIXME: this signature overlaps with MetaOp
op_def static X op(X d1, X *extraParams) {
return nd4j::math::nd4j_abs<X>(d1);
}
op_def static X postProcess(X reduction, Nd4jLong n, X *extraParams) {
return nd4j::math::nd4j_abs<X>(reduction);
}
};
template <typename X>
class Min {
public:
no_op_exec_special_accumulation_same
no_op_exec_special_accumulation_same_cuda
const static functions::ReduceType reduceType = functions::ReduceType::MIN;
op_def static X startingValue(const X *input) {
return nd4j::DataTypeUtils::max<X>();
}
op_def static X merge(X old, X opOutput, X *extraParams) {
return nd4j::math::nd4j_min<X>(old, opOutput);
}
op_def static X update(X old, X opOutput, X *extraParams) {
return nd4j::math::nd4j_min<X>(opOutput, old);
}
op_def static X op(X d1, X d2, X *params) {
return nd4j::math::nd4j_min<X>(d1, d2);
}
op_def static X op(X d1, X d2) {
return nd4j::math::nd4j_min<X>(d1, d2);
}
// FIXME: this signature overlaps with MetaOp
op_def static X op(X d1, X *extraParams) {
return d1;
}
op_def static X postProcess(X reduction, Nd4jLong n, X *extraParams) {
return reduction;
}
};
template <typename X, typename Z>
class Norm1 {
public:
no_op_exec_special_accumulation
no_op_exec_special_accumulation_cuda
const static functions::ReduceType reduceType = functions::ReduceType::SUM;
op_def static X startingValue(const X *input) {
return static_cast<X>(0);
}
op_def static Z merge(Z old, Z opOutput, Z *extraParams) {
return opOutput + old;
}
op_def static Z update(Z old, Z opOutput, Z *extraParams) {
return opOutput + old;
}
op_def static Z op(X d1, Z *extraParams) {
return static_cast<Z>(nd4j::math::nd4j_abs<X>(d1));
}
op_def static Z postProcess(Z reduction, Nd4jLong n, Z *extraParams) {
return reduction;
}
};
template <typename X, typename Z>
class Norm2 {
public:
no_op_exec_special_accumulation
no_op_exec_special_accumulation_cuda
const static functions::ReduceType reduceType = functions::ReduceType::SUM;
op_def static X startingValue(const X *input) {
return static_cast<X>(0);
}
op_def static Z merge(Z old, Z opOutput, Z *extraParams) {
return opOutput + old;
}
op_def static Z update(Z old, Z opOutput, Z *extraParams) {
return opOutput + old;
}
op_def static Z postProcess(Z reduction, Nd4jLong n, Z *extraParams) {
return nd4j::math::nd4j_sqrt<Z, Z>(reduction);
}
op_def static Z op(X d1, Z *extraParams) {
return static_cast<Z>(d1 * d1);
}
};
template <typename X, typename Z>
class SquaredNorm {
public:
no_op_exec_special_accumulation
no_op_exec_special_accumulation_cuda
const static functions::ReduceType reduceType = functions::ReduceType::SUM;
op_def static X startingValue(const X *input) {
return static_cast<X>(0);
}
op_def static Z merge(Z old, Z opOutput, Z *extraParams) {
return opOutput + old;
}
op_def static Z update(Z old, Z opOutput, Z *extraParams) {
return opOutput + old;
}
op_def static Z op(X d1, Z *extraParams) {
return static_cast<Z>(d1 * d1);
}
op_def static Z postProcess(Z reduction, Nd4jLong n, Z *extraParams) {
return reduction;
}
};
template <typename X, typename Z>
class NormFrobenius {
public:
no_op_exec_special_accumulation
no_op_exec_special_accumulation_cuda
const static functions::ReduceType reduceType = functions::ReduceType::SUM;
op_def static X startingValue(const X *input) {
return static_cast<X>(0);
}
op_def static Z merge(Z old, Z opOutput, Z *extraParams) {
return opOutput + old;
}
op_def static Z update(Z old, Z opOutput, Z *extraParams) {
return opOutput + old;
}
op_def static Z op(X d1, Z *extraParams) {
X v = nd4j::math::nd4j_abs<X>(d1);
return static_cast<Z>(v * v);
}
op_def static Z postProcess(Z reduction, Nd4jLong n, Z *extraParams) {
return nd4j::math::nd4j_sqrt<Z, Z>(reduction);
}
};
template <typename X, typename Z>
class NormP {
public:
no_op_exec_special_accumulation
no_op_exec_special_accumulation_cuda
const static functions::ReduceType reduceType = functions::ReduceType::SUM;
op_def static X startingValue(const X *input) {
return static_cast<X>(0);
}
op_def static Z merge(Z old, Z opOutput, Z *extraParams) {
return opOutput + old;
}
op_def static Z update(Z old, Z opOutput, Z *extraParams) {
return opOutput + old;
}
op_def static Z op(X d1, Z *extraParams) {
return nd4j::math::nd4j_pow<X, Z, Z>(nd4j::math::nd4j_abs<X>(d1), extraParams[0]);
}
op_def static Z postProcess(Z reduction, Nd4jLong n, Z *extraParams) {
return nd4j::math::nd4j_pow<Z, Z, Z>(reduction, static_cast<Z>(1.0f) / extraParams[0]);
}
};
template <typename X, typename Z>
class NormMax {
public:
no_op_exec_special_accumulation
no_op_exec_special_accumulation_cuda
const static functions::ReduceType reduceType = functions::ReduceType::SUM;
op_def static X startingValue(const X *input) {
return static_cast<X>(0);
}
op_def static Z merge(Z old, Z opOutput, Z *extraParams) {
return opOutput + old;
}
op_def static Z update(Z old, Z opOutput, Z *extraParams) {
return nd4j::math::nd4j_max<Z>(nd4j::math::nd4j_abs<Z>(old),
nd4j::math::nd4j_abs<Z>(opOutput));
}
op_def static Z op(X d1, Z *extraParams) {
return static_cast<Z>(d1);
}
op_def static Z postProcess(Z reduction, Nd4jLong n, Z *extraParams) {
return nd4j::math::nd4j_max<Z>(nd4j::math::nd4j_abs<Z>(reduction), nd4j::math::nd4j_abs<Z>(reduction));
}
};
template <typename X, typename Z>
class Variance {
public:
no_op_exec_special_accumulation
no_op_exec_special_accumulation_cuda
const static functions::ReduceType reduceType = functions::ReduceType::SUM;
op_def static X startingValue(const X *input) {
return static_cast<X>(0.0f);
}
op_def static Z merge(X old, X opOutput, Z *extraParams) {
return old + opOutput;
}
op_def static Z update(X old, X opOutput, Z *extraParams) {
return old + opOutput;
}
op_def static X op(X d1, Z *extraParams) {
X mean = static_cast<X>(extraParams[0]);
X ret = d1 - mean;
return ret * ret;
}
op_def static Z postProcess(X reduction, Nd4jLong n, Z *extraParams) {
// T bias = extraParams[1];
// return (reduction - (nd4j::math::nd4j_pow<T>(bias, static_cast<T>(2.0f)) / static_cast<T>(n))) / (n - 1)
return static_cast<Z>(reduction) / static_cast<Z>(n - 1);
}
};
/**
* Standard deviation of a buffer
*/
template <typename X, typename Z>
class StandardDeviation {
public:
no_op_exec_special_accumulation
no_op_exec_special_accumulation_cuda
const static functions::ReduceType reduceType = functions::ReduceType::SUM;
op_def static X startingValue(const X *input) {
return static_cast<X>(0.0f);
}
op_def static Z merge(X old, X opOutput, Z *extraParams) {
return old + opOutput;
}
op_def static Z update(X old, X opOutput, Z *extraParams) {
return old + opOutput;
}
op_def static Z op(X d1, Z *extraParams) {
X mean = extraParams[0];
X ret = d1 - mean;
return ret * ret;
}
op_def static Z postProcess(X reduction, Nd4jLong n, Z *extraParams) {
Z ret = Variance<X,Z>::postProcess(reduction, n, extraParams);
Z sqrtRet = nd4j::math::nd4j_sqrt<X, Z>(ret);
return sqrtRet;
}
};
template <typename X, typename Y>
class CosineSimilarity {
public:
static const int extraParamsLen = 2;
op_def static X *generateExtraParams() {
//T *extraParams = new T[2];
return nullptr;
}
op_def static void finalizeExtraParams(X *extraParams) {
//delete[] extraParams;
}
op_def static Y startingValue(const X *input) {
return static_cast<Y>(0.0f);
}
op_def static Y postProcess(Y reduction, Nd4jLong n, Y *extraParams) {
return reduction / (nd4j::math::nd4j_sqrt<Y, Y>(extraParams[0]) * nd4j::math::nd4j_sqrt<Y, Y>(extraParams[1]));
}
op_def static Y op(X d1, X d2, Y *extraParams) {
extraParams[0] += static_cast<Y>(d1 * d1);
extraParams[1] += static_cast<Y>(d2 * d2);
return static_cast<Y>(d1 * d2);
}
op_def static void aggregateExtraParams(Y *extraParamsTotal, Y *extraParamsLocal) {
extraParamsTotal[0] += extraParamsLocal[0];
extraParamsTotal[1] += extraParamsLocal[1];
}
#ifdef __CUDACC__
static _CUDA_D inline Y opAtomic(X d1, X d2, Y *extraParams) {
nd4j::math::atomics::nd4j_atomicAdd(&extraParams[0],static_cast<Y>(d1 * d1));
nd4j::math::atomics::nd4j_atomicAdd(&extraParams[1],static_cast<Y>(d2 * d2));
return static_cast<Y>(d1 * d2);
}
#endif
op_def static Y update(Y old, Y opOutput, Y *extraParams) {
return old + opOutput;
}
op_def static Y merge(Y old, Y opOutput, Y *extraParams) {
return update(old, opOutput, extraParams);
}
};
template <typename X, typename Y>
class JaccardDistance {
public:
static const int extraParamsLen = 2;
op_def static X *generateExtraParams() {
//T *extraParams = new T[2];
return nullptr;
}
op_def static void finalizeExtraParams(X *extraParams) {
//delete[] extraParams;
}
op_def static Y startingValue(const X *input) {
return static_cast<X>(0.0f);
}
op_def static Y postProcess(Y reduction, Nd4jLong n, Y *extraParams) {
// num / denom
return (static_cast<Y>(1.0f)) - (extraParams[0] / extraParams[1]);
}
op_def static Y num(X d1, X d2) {
return nd4j::math::nd4j_min<X>(d1, d2);
}
op_def static Y denom(X d1, X d2) {
return nd4j::math::nd4j_max<X>(d1, d2);
}
op_def static Y op(X d1, X d2, Y *extraParams) {
extraParams[0] += static_cast<Y>(num(d1, d2));
extraParams[1] += static_cast<Y>(denom(d1, d2));
return static_cast<Y>(0.0f);
}
op_def static void aggregateExtraParams(Y *extraParamsTotal, Y *extraParamsLocal) {
extraParamsTotal[0] += extraParamsLocal[0];
extraParamsTotal[1] += extraParamsLocal[1];
}
#ifdef __CUDACC__
__device__
static inline Y opAtomic(X d1, X d2, Y *extraParams) {
nd4j::math::atomics::nd4j_atomicAdd(&extraParams[0],num(d1, d2));
nd4j::math::atomics::nd4j_atomicAdd(&extraParams[1], denom(d1, d2));
return static_cast<Y>(0.0f);
}
#endif
op_def static Y update(Y old, Y opOutput, Y *extraParams) {
return old + opOutput;
}
op_def static Y merge(Y old, Y opOutput, Y *extraParams) {
return update(old, opOutput, extraParams);
}
};
template <typename X, typename Y>
class SimpleHammingDistance {
public:
static const int extraParamsLen = 0;
op_def static X *generateExtraParams() {
//T *extraParams = new T[2];
return nullptr;
}
op_def static void finalizeExtraParams(X *extraParams) {
//delete[] extraParams;
}
op_def static Y startingValue(const X *input) {
return static_cast<Y>(0.0f);
}
op_def static Y postProcess(Y reduction, Nd4jLong n, Y *extraParams) {
return static_cast<Y>(reduction / n);
}
op_def static Y op(X d1, X d2, Y *extraParams) {
return (d1 == d2) ? static_cast<Y>(0.0f) : static_cast<Y>(1.0f);
}
op_def static void aggregateExtraParams(Y *extraParamsTotal, Y *extraParamsLocal) {
}
#ifdef __CUDACC__
__device__
static inline Y opAtomic(X d1, X d2, Y *extraParams) {
return op(d1, d2, extraParams);
}
#endif
op_def static Y update(Y old, Y opOutput, Y *extraParams) {
return old + opOutput;
}
op_def static Y merge(Y old, Y opOutput, Y *extraParams) {
return update(old, opOutput, extraParams);
}
};
template <typename X, typename Y>
class CosineDistance {
public:
static const int extraParamsLen = 2;
op_def static X *generateExtraParams() {
//T *extraParams = new T[2];
return nullptr;
}
op_def static void finalizeExtraParams(X *extraParams) {
//delete[] extraParams;
}
op_def static Y startingValue(const X *input) {
return static_cast<Y>(0.0f);
}
op_def static Y postProcess(Y reduction, Nd4jLong n, Y *extraParams) {
return (static_cast<Y>(1.0f)) - (reduction / (nd4j::math::nd4j_sqrt<Y, Y>(extraParams[0]) * nd4j::math::nd4j_sqrt<Y, Y>(extraParams[1])));
}
op_def static Y op(X d1, X d2, Y *extraParams) {
extraParams[0] += static_cast<Y>(nd4j::math::nd4j_abs<X>(d1) * nd4j::math::nd4j_abs<X>(d1));
extraParams[1] += static_cast<Y>(nd4j::math::nd4j_abs<X>(d2) * nd4j::math::nd4j_abs<X>(d2));
return (d1 * d2);
}
op_def static void aggregateExtraParams(Y *extraParamsTotal, Y *extraParamsLocal) {
extraParamsTotal[0] += extraParamsLocal[0];
extraParamsTotal[1] += extraParamsLocal[1];
}
#ifdef __CUDACC__
static _CUDA_D inline Y opAtomic(X d1, X d2, Y *extraParams) {
nd4j::math::atomics::nd4j_atomicAdd(&extraParams[0], nd4j::math::nd4j_abs<Y>(d1) * nd4j::math::nd4j_abs<Y>(d1));
nd4j::math::atomics::nd4j_atomicAdd(&extraParams[1], nd4j::math::nd4j_abs<Y>(d2) * nd4j::math::nd4j_abs<Y>(d2));
return (d1 * d2);
}
#endif
op_def static Y update(Y old, Y opOutput, Y *extraParams) {
return old + opOutput;
}
op_def static Y merge(Y old, Y opOutput, Y *extraParams) {
return update(old, opOutput, extraParams);
}
};
/**
* Dot product between 2 arrays
*/
template <typename X, typename Y>
class Dot {
public:
static const int extraParamsLen = 0;
op_def static X * generateExtraParams() {
return nullptr;
}
op_def static void finalizeExtraParams(X *extraParamsRef) {
//no-op
//delete[] * extraParamsRef;
}
op_def static Y startingValue(const X *input) {
return static_cast<Y>(0.0f);
}
op_def static Y postProcess(Y reduction, Nd4jLong n, Y *extraParamsRef) {
return reduction;
}
op_def static Y op(X d1, X d2, Y *extraParamsRef) {
return static_cast<Y>(d1 * d2);
}
#ifdef __CUDACC__
__device__
static inline Y opAtomic(X d1, X d2, Y *extraParamsRef) {
return op(d1, d2, extraParamsRef);
}
#endif
op_def static Y update(Y old, Y opOutput, Y *extraParamsRef) {
return opOutput + old;
}
op_def static Y merge(Y old, Y opOutput, Y *extraParamsRef) {
return update(old, opOutput, extraParamsRef);
}
op_def static void aggregateExtraParams(Y *extraParamsTotal, Y *extraParamsLocal) {}
};
/**
* Op to check equality within arrays
*/
template <typename X, typename Z>
class EqualsWithEps {
public:
static const int extraParamsLen = 0;
op_def static X * generateExtraParams() {
return nullptr;
}
op_def static void finalizeExtraParams(X *extraParamsRef) {
//no-op
}
op_def static Z startingValue(const X *input) {
return static_cast<Z>(0.0f);
}
op_def static Z postProcess(Z reduction, Nd4jLong n, Z *extraParamsRef) {
return reduction;
}
op_def static Z op(X d1, X d2, Z *extraParamsRef) {
double eps = nd4j::math::nd4j_abs<double>(extraParamsRef[2]);
return static_cast<Z>(!nd4j::math::nd4j_eq<X>(d1, d2, eps));
}
#ifdef __CUDACC__
__device__
static inline Z opAtomic(X d1, X d2, Z *extraParamsRef) {
return op(d1, d2, extraParamsRef);
}
#endif
op_def static Z update(Z old, Z opOutput, Z *extraParamsRef) {
return opOutput + old;
}
op_def static Z merge(X old, Z opOutput, Z *extraParamsRef) {
return update(old, opOutput, extraParamsRef);
}
op_def static void aggregateExtraParams(Z *extraParamsTotal, Z *extraParamsLocal) {}
};
template <typename X, typename Y>
class EuclideanDistance {
public:
static const int extraParamsLen = 0;
op_def static X * generateExtraParams() {
return nullptr;
}
op_def static void finalizeExtraParams(X *extraParamsRef) {
//no-op
}
op_def static Y startingValue(const X *input) {
return static_cast<Y>(0.0f);
}
op_def static Y postProcess(Y reduction, Nd4jLong n, Y *extraParamsRef) {
return nd4j::math::nd4j_sqrt<Y, Y>(reduction);
}
op_def static Y op(X d1, X d2, Y *extraParamsRef) {
X ret = d1 - d2;
return static_cast<Y>(ret * ret);
}
#ifdef __CUDACC__
__device__
static inline Y opAtomic(X d1, X d2, Y *extraParamsRef) {
return op(d1, d2, extraParamsRef);
}
#endif
op_def static Y update(Y old, Y opOutput, Y *extraParamsRef) {
return opOutput + old;
}
op_def static Y merge(Y old, Y opOutput, Y *extraParamsRef) {
return update(old, opOutput, extraParamsRef);
}
op_def static void aggregateExtraParams(Y *extraParamsTotal, Y *extraParamsLocal) {}
};
template <typename X, typename Y>
class ManhattanDistance {
public:
static const int extraParamsLen = 0;
op_def static X * generateExtraParams() {
return nullptr;
}
op_def static void finalizeExtraParams(X *extraParamsRef) {
//no-op
}
op_def static Y startingValue(const X *input) {
return static_cast<Y>(0.0f);
}
op_def static Y postProcess(Y reduction, Nd4jLong n, Y *extraParamsRef) {
return reduction;
}
op_def static Y op(X d1, X d2, Y *extraParamsRef) {
return nd4j::math::nd4j_abs<X>(d1 - d2);
}
op_def static Y update(Y old, Y opOutput, Y *extraParamsRef) {
return old + opOutput;
}
op_def static void aggregateExtraParams(Y *extraParamsTotal, Y *extraParamsLocal) {
}
#ifdef __CUDACC__
__device__
static inline Y opAtomic(X d1, X d2, Y *extraParamsRef) {
return op(d1, d2, extraParamsRef);
}
#endif
#ifndef __clang__
#pragma omp declare simd uniform(extraParamsRef)
#endif
op_def static Y merge(X old, X opOutput, X *extraParamsRef) {
return update(old, opOutput, extraParamsRef);
}
};
template <typename X>
class IndexAbsoluteMax {
public:
static _CUDA_HD inline functions::indexreduce::IndexValue<X> op(functions::indexreduce::IndexValue<X> val, X *extraParams) {
return nd4j::math::nd4j_abs<X>(val);
}
static _CUDA_HD inline functions::indexreduce::IndexValue<X> update(functions::indexreduce::IndexValue<X> &old, functions::indexreduce::IndexValue<X> &opOutput, X *extraParams) {
opOutput.value = nd4j::math::nd4j_abs<X>(opOutput.value);
old.value = nd4j::math::nd4j_abs<X>(old.value);
if (opOutput.value > old.value)
return opOutput;
#ifdef __CUDACC__
// workaround for cuda race condition at merge phase
else if (opOutput.value == old.value && opOutput.index < old.index)
return opOutput;
#elif defined(__GNUC__)
#endif
return old;
}
static _CUDA_HD inline functions::indexreduce::IndexValue<X> merge(
functions::indexreduce::IndexValue<X> f1,
functions::indexreduce::IndexValue<X> f2, X *extraParams) {
if (nd4j::math::nd4j_abs<X>(f1.value) > nd4j::math::nd4j_abs<X>(f2.value))
return f2;
return f1;
}
static _CUDA_HD inline functions::indexreduce::IndexValue<X> postProcess(
functions::indexreduce::IndexValue<X> reduction, int n, int xOffset,
X *dx, int incx, X *extraParams, X *result) {
return reduction;
}
static _CUDA_HD inline X startingValue(const X *input) {
return 0;
}
static _CUDA_HD inline functions::indexreduce::IndexValue<X> startingIndexValue(X *input) {
functions::indexreduce::IndexValue<X> local;
local.value = startingValue(input);
local.index = 0;
return local;
}
static _CUDA_HD inline functions::indexreduce::IndexValue<X> op(functions::indexreduce::IndexValue<X> d1,
functions::indexreduce::IndexValue<X> d2, X *extraParams) {
return d1;
}
};
template <typename X>
class FirstIndex {
public:
static _CUDA_HD inline functions::indexreduce::IndexValue<X> op(functions::indexreduce::IndexValue<X> val, X *extraParams) {
return val;
}
static _CUDA_HD functions::indexreduce::IndexValue<X> update(functions::indexreduce::IndexValue<X> &old, functions::indexreduce::IndexValue<X> &opOutput, X *extraParams) {
#ifdef __CUDACC__
if (opOutput.index < 0)
return old;
#endif
auto res = simdOps::MatchCondition<X,X>::op(opOutput.value, extraParams);
//printf("res: %f; oldIdx: %i; newIdx: %i\n", res, old.index, opOutput.index);
if (res == static_cast<X>(0))
return old;
if (old.index < 0)
return opOutput;
if (old.index > opOutput.index)
return opOutput;
return old;
}
static _CUDA_HD inline X startingValue(const X *input) {
return -nd4j::DataTypeUtils::max<X>();
}
static _CUDA_HD inline functions::indexreduce::IndexValue<X> startingIndexValue(X *input) {
functions::indexreduce::IndexValue<X> local;
local.value = startingValue(input);
local.index = -1;
return local;
}
static _CUDA_HD inline functions::indexreduce::IndexValue<X> op(functions::indexreduce::IndexValue<X> d1,
functions::indexreduce::IndexValue<X> d2, X *extraParams) {
return d1;
}
static _CUDA_HD inline functions::indexreduce::IndexValue<X> merge(
functions::indexreduce::IndexValue<X> f1,
functions::indexreduce::IndexValue<X> f2, X *extraParams) {
if (f1.index > f2.index)
return f2;
return f1;
}
static _CUDA_HD inline functions::indexreduce::IndexValue<X> postProcess(
functions::indexreduce::IndexValue<X> reduction, int n, int xOffset,
X *dx, int incx, X *extraParams, X *result) {
return reduction;
}
};
template <typename X>
class LastIndex {
public:
static _CUDA_HD inline functions::indexreduce::IndexValue<X> op(functions::indexreduce::IndexValue<X> val, X *extraParams) {
return val;
}
static _CUDA_HD functions::indexreduce::IndexValue<X> update(functions::indexreduce::IndexValue<X> &old, functions::indexreduce::IndexValue<X> &opOutput, X *extraParams) {
#ifdef __CUDACC__
if (opOutput.index < 0)
return old;
#endif
auto res = simdOps::MatchCondition<X,X>::op(opOutput.value, extraParams);
if (res == static_cast<X>(0))
return old;
if (old.index < 0)
return opOutput;
if (old.index < opOutput.index)
return opOutput;
return old;
}
static _CUDA_HD inline X startingValue(const X *input) {
return -nd4j::DataTypeUtils::max<X>();
}
static _CUDA_HD inline functions::indexreduce::IndexValue<X> startingIndexValue(X *input) {
functions::indexreduce::IndexValue<X> local;
local.value = startingValue(input);
local.index = -1;
return local;
}
static _CUDA_HD inline functions::indexreduce::IndexValue<X> op(functions::indexreduce::IndexValue<X> d1,
functions::indexreduce::IndexValue<X> d2, X *extraParams) {
return d1;
}
static _CUDA_HD inline functions::indexreduce::IndexValue<X> merge(
functions::indexreduce::IndexValue<X> f1,
functions::indexreduce::IndexValue<X> f2, X *extraParams) {
if (f1.index < f2.index)
return f2;
return f1;
}
static _CUDA_HD inline functions::indexreduce::IndexValue<X> postProcess(
functions::indexreduce::IndexValue<X> reduction, int n, int xOffset,
X *dx, int incx, X *extraParams, X *result) {
return reduction;
}
};
template <typename X>
class IndexMax {
public:
static _CUDA_HD inline functions::indexreduce::IndexValue<X> op(functions::indexreduce::IndexValue<X> val, X *extraParams) {
return val;
}
static _CUDA_HD functions::indexreduce::IndexValue<X> update(functions::indexreduce::IndexValue<X> &old, functions::indexreduce::IndexValue<X> &opOutput, X *extraParams) {
if (opOutput.value > old.value) {
return opOutput;
}
#ifdef __CUDACC__
// workaround for cuda race condition at merge phase
else if (opOutput.value == old.value && opOutput.index < old.index)
return opOutput;
#elif defined(__GNUC__)
#endif
return old;
}
static _CUDA_HD inline functions::indexreduce::IndexValue<X> merge(
functions::indexreduce::IndexValue<X> f1,
functions::indexreduce::IndexValue<X> f2, X *extraParams) {
if (f1.value > f2.value)
return f2;
return f1;
}
static _CUDA_HD inline functions::indexreduce::IndexValue<X> postProcess(
functions::indexreduce::IndexValue<X> reduction, int n, int xOffset,
X *dx, int incx, X *extraParams, X *result) {
return reduction;
}
static _CUDA_HD inline X startingValue(const X *input) {
return -nd4j::DataTypeUtils::max<X>();
}
static _CUDA_HD inline functions::indexreduce::IndexValue<X> startingIndexValue(X *input) {
functions::indexreduce::IndexValue<X> local;
local.value = startingValue(input);
local.index = 0;
return local;
}
static _CUDA_HD inline functions::indexreduce::IndexValue<X> op(functions::indexreduce::IndexValue<X> d1,
functions::indexreduce::IndexValue<X> d2, X *extraParams) {
return d1;
}
};
template <typename X>
class IndexAbsoluteMin {
public:
static _CUDA_HD inline functions::indexreduce::IndexValue<X> op(
functions::indexreduce::IndexValue<X> val, X *extraParams) {
return val;
}
static _CUDA_HD inline X startingValue(const X *input) {
return nd4j::DataTypeUtils::max<X>();
}
static _CUDA_HD inline functions::indexreduce::IndexValue<X> startingIndexValue(X *input) {
functions::indexreduce::IndexValue<X> local;
local.value = startingValue(input);
local.index = 0;
return local;
}
static _CUDA_HD inline functions::indexreduce::IndexValue<X> update(functions::indexreduce::IndexValue<X> &old, functions::indexreduce::IndexValue<X> &opOutput, X *extraParams) {
opOutput.value = nd4j::math::nd4j_abs<X>(opOutput.value);
old.value = nd4j::math::nd4j_abs<X>(old.value);
if (opOutput.value < old.value)
return opOutput;
#ifdef __CUDACC__
// workaround for cuda race condition at merge phase
else if (opOutput.value == old.value && opOutput.index < old.index)
return opOutput;
#elif defined(__GNUC__)
#endif
return old;
}
static _CUDA_HD inline functions::indexreduce::IndexValue<X> merge(
functions::indexreduce::IndexValue<X> f1,
functions::indexreduce::IndexValue<X> f2, X *extraParams) {
if (nd4j::math::nd4j_abs<X>(f1.value) < nd4j::math::nd4j_abs<X>(f2.value))
return f2;
return f1;
}
static _CUDA_HD inline functions::indexreduce::IndexValue<X> postProcess(
functions::indexreduce::IndexValue<X> reduction, int n, int xOffset,
X *dx, int incx, X *extraParams, X *result) {
return reduction;
}
static _CUDA_HD inline functions::indexreduce::IndexValue<X> op(functions::indexreduce::IndexValue<X> d1,
functions::indexreduce::IndexValue<X> d2, X *extraParams) {
return d1;
}
};
template <typename X>
class IndexMin {
public:
static _CUDA_HD inline functions::indexreduce::IndexValue<X> op(
functions::indexreduce::IndexValue<X> val, X *extraParams) {
return val;
}
static _CUDA_HD inline X startingValue(const X *input) {
return nd4j::DataTypeUtils::max<X>();
}
static _CUDA_HD inline functions::indexreduce::IndexValue<X> startingIndexValue(X *input) {
functions::indexreduce::IndexValue<X> local;
local.value = startingValue(input);
local.index = 0;
return local;
}
static _CUDA_HD inline functions::indexreduce::IndexValue<X> update(functions::indexreduce::IndexValue<X> &old, functions::indexreduce::IndexValue<X> &opOutput, X *extraParams) {
if (opOutput.value < old.value)
return opOutput;
#ifdef __CUDACC__
// workaround for cuda race condition at merge phase
else if (opOutput.value == old.value && opOutput.index < old.index)
return opOutput;
#elif defined(__GNUC__)
#endif
return old;
}
static _CUDA_HD inline functions::indexreduce::IndexValue<X> merge(
functions::indexreduce::IndexValue<X> f1,
functions::indexreduce::IndexValue<X> f2, X *extraParams) {
if (f1.value < f2.value)
return f2;
return f1;
}
static _CUDA_HD inline functions::indexreduce::IndexValue<X> postProcess(
functions::indexreduce::IndexValue<X> reduction, int n, int xOffset,
X *dx, int incx, X *extraParams, X *result) {
return reduction;
}
static _CUDA_HD inline functions::indexreduce::IndexValue<X> op(functions::indexreduce::IndexValue<X> d1,
functions::indexreduce::IndexValue<X> d2, X *extraParams) {
return d1;
}
};
template <typename X, typename Z>
class SummaryStatsVariance {
public:
static _CUDA_HD inline Z getValue(const bool biasCorrected, functions::summarystats::SummaryStatsData<X> val) {
if (biasCorrected) {
Z ret = static_cast<Z>(val.varianceBiasCorrected());
if (ret < static_cast<Z>(0.0f))
return static_cast<Z>(val.variance());
return ret;
}
return static_cast<Z>(val.variance());
}
static _CUDA_HD inline functions::summarystats::SummaryStatsData<X> op(functions::summarystats::SummaryStatsData<X> d1, Z *extraParams) {
return d1;
}
};
template <typename X, typename Z>
class SummaryStatsStandardDeviation {
public:
static _CUDA_HD inline Z getValue(const bool biasCorrected, functions::summarystats::SummaryStatsData<X> val) {
if (biasCorrected) {
auto ret = static_cast<Z>(val.varianceBiasCorrected());
if (ret < static_cast<Z>(0.0f))
return nd4j::math::nd4j_sqrt<double, Z>(val.variance());
else
return nd4j::math::nd4j_sqrt<double, Z>(ret);
}
return nd4j::math::nd4j_sqrt<double, Z>(val.variance());
}
static _CUDA_HD inline functions::summarystats::SummaryStatsData<X> op(functions::summarystats::SummaryStatsData<X> d1, Z *extraParams) {
return d1;
}
};
template <typename X>
class DropOut {
public:
no_op_exec_special_same
no_op_exec_special_same_cuda
inline _CUDA_D static X op(X d1, X *params) {
X prob = params[0];
#ifdef __CUDACC__
X length = params[1];
X tid = blockIdx.x * blockDim.x + threadIdx.x;
X rnd = nd4j::math::nd4j_abs<X>(nd4j::math::nd4j_cos<X>(static_cast<X>(clock64()) * static_cast<X>(tid) + static_cast<X>(length) * static_cast<X>(tid)));
#else
X rnd = static_cast<X>(rand() / RAND_MAX);
#endif
return rnd >= prob ? static_cast<X>(0.0f) : d1;
}
};
template <typename X, typename Y, typename Z>
class DropOutInverted {
public:
no_op_exec_special
no_op_exec_special_cuda
#ifdef __CUDACC__
__device__
#endif
inline static Z op(X d1, Y d2, Z *params) {
Y prob = d2;
#ifdef __CUDACC__
X length = params[1];
X tid = blockIdx.x * blockDim.x + threadIdx.x;
X rnd = nd4j::math::nd4j_abs<X>(nd4j::math::nd4j_cos<X>(static_cast<X>(clock64()) * static_cast<X>(tid) + static_cast<X>(length) * static_cast<X>(tid)));
#else
X rnd = static_cast<X>(rand() / RAND_MAX);
#endif
return rnd >= static_cast<X>(prob) ? static_cast<Z>(0.0f) : reinterpret_cast<Z>(d1 / static_cast<X>(prob));
}
};
template <typename X, typename Y, typename Z>
class ReplaceNans {
public:
no_op_exec_special
no_op_exec_special_cuda
op_def static Z op(X d1, Y d2, Z *params) {
return nd4j::math::nd4j_isnan(d1) ? static_cast<Z>(d2) : static_cast<Z>(d1) ;
}
};
// this op is used for conditional pairwise transforms only
template <typename X, typename Y, typename Z>
class CompareAndReplace{
public:
// op definition for PairWise Transform
op_def static Z op(X d1, Y d2, Z *params) {
auto zd1 = static_cast<Z>(d1);
auto zd2 = static_cast<Z>(d2);
auto compare = params[0];
auto eps = params[2];
int mode = (int) params[3];
if (mode == 0) // equals
if (nd4j::math::nd4j_abs<Z>(zd1 - compare) <= eps)
return zd2;
else
return zd1;
else if (mode == 1) // not equals eps
if (nd4j::math::nd4j_abs<Z>(zd1 - compare) > eps)
return zd2;
else
return zd1;
else if (mode == 2) // less_than eps
if (zd1 < compare)
return zd2;
else
return zd1;
else if (mode ==3) // greater_than
if (zd1 > compare)
return zd2;
else
return zd1;
else if (mode == 4) // less_or_equals_than
if (zd1 <= compare)
return zd2;
else
return zd1;
else if (mode == 5) // greater_or_equals_than
if (zd1 >= compare)
return zd2;
else
return zd1;
else if (mode == 6) // abs_less_than
if (nd4j::math::nd4j_abs<Z>(zd1) < compare)
return zd2;
else
return zd1;
else if (mode == 7) // abs_greater_than
if (nd4j::math::nd4j_abs<Z>(zd1) > compare)
return zd2;
else
return zd1;
else if (mode == 8) // is inf
if (nd4j::math::nd4j_isinf(zd1))
return zd2;
else
return zd1;
else if (mode == 9) // is nan
if (nd4j::math::nd4j_isnan(zd1))
return zd2;
else
return zd1;
else if (mode == 10)
if (zd1 == compare)
return zd2;
else
return zd1;
else if (mode == 11)
if (zd1 != compare)
return zd2;
else
return zd1;
else if (mode == 12) // abs_greater_or_equals_than
if (nd4j::math::nd4j_abs<Z>(zd1) >= compare)
return zd2;
else
return zd1;
else if (mode == 13) // abs_less_or_equals_than
if (nd4j::math::nd4j_abs<Z>(zd1) <= compare)
return zd2;
else
return zd1;
else
printf("Undefined boolean operation: [%i]\n", mode);
return zd1;
}
};
template <typename X, typename Y, typename Z>
class CompareAndSet {
public:
// op definition for PairWise Transform
op_def static Z op(X dX, Y dY, Z *params) {
auto d1 = static_cast<Z>(dX);
auto d2 = static_cast<Z>(dY);
auto compare = params[0];
auto eps = params[2];
auto mode = static_cast<int>(params[3]);
if (mode == 0) // equals
if (nd4j::math::nd4j_abs<Z>(d2 - compare) <= eps)
return d2;
else
return d1;
else if (mode == 1) // not equals
if (nd4j::math::nd4j_abs<Z>(d2 - compare) > eps)
return d2;
else
return d1;
else if (mode == 2) // less_than
if (d2 < compare)
return d2;
else
return d1;
else if (mode ==3) // greater_than
if (d2 > compare)
return d2;
else
return d1;
else if (mode == 4) // less_or_equals_than
if (d2 <= compare)
return d2;
else
return d1;
else if (mode == 5) // greater_or_equals_than
if (d2 >= compare)
return d2;
else
return d1;
else if (mode == 6) // abs_less_than
if (nd4j::math::nd4j_abs<Z>(d2) < compare)
return d2;
else
return d1;
else if (mode == 7) // abs_greater_than
if (nd4j::math::nd4j_abs<Z>(d2) > compare)
return d2;
else
return d1;
else if (mode == 8) // is inf
if (nd4j::math::nd4j_isinf(d2))
return d2;
else
return d1;
else if (mode == 9) // is nan
if (nd4j::math::nd4j_isnan(d2))
return d2;
else
return d1;
else if (mode == 10)
if (d2 == compare)
return d2;
else
return d1;
else if (mode == 11)
if (d2 != compare)
return d2;
else
return d1;
else if (mode == 12) // abs_greater_or_equals_than
if (nd4j::math::nd4j_abs<Z>(d1) >= compare)
return d2;
else
return d1;
else if (mode == 13) // abs_less_or_equals_than
if (nd4j::math::nd4j_abs<Z>(d1) <= compare)
return d2;
else
return d1;
else
printf("Undefined boolean operation: [%i]\n", mode);
return d1;
}
};
template <typename X>
class CompareAndSetTransform {
public:
no_op_exec_special_same
no_op_exec_special_same_cuda
// op definition for Transform
op_def static X op(X d1, X *params) {
auto compare = params[0];
auto set = params[1];
auto eps = params[2];
// with mode == 0 we do set if d1 equals to compare, and with mode == 1 - we go otherwise
int mode = (int) params[3];
if (mode == 0) // equals
if (nd4j::math::nd4j_abs<X>(d1 - compare) <= eps)
return set;
else
return d1;
//return nd4j::math::nd4j_abs<T>(d1 - compare) <= eps ? set : d1;
else if (mode == 1) // not equals
if (nd4j::math::nd4j_abs<X>(d1 - compare) > eps)
return set;
else
return d1;
//return nd4j::math::nd4j_abs<T>(d1 - compare) > eps ? set : d1;
else if (mode == 2) // less_than
if (d1 < compare)
return set;
else
return d1;
else if (mode ==3) // greater_than
if (d1 > compare)
return set;
else
return d1;
else if (mode == 4) // less_or_equals_than
if (d1 <= compare)
return set;
else
return d1;
else if (mode == 5) // greater_or_equals_than
if (d1 >= compare)
return set;
else
return d1;
else if (mode == 6) // abs_less_than
if (nd4j::math::nd4j_abs<X>(d1) < compare)
return set;
else
return d1;
else if (mode == 7) // abs_greater_than
if (nd4j::math::nd4j_abs<X>(d1) > compare)
return set;
else
return d1;
else if (mode == 8) // is inf
if (nd4j::math::nd4j_isinf(d1))
return set;
else
return d1;
else if (mode == 9) // is nan
if (nd4j::math::nd4j_isnan(d1))
return set;
else
return d1;
else if (mode == 10)
if (d1 == compare)
return set;
else
return d1;
else if (mode == 11)
if (d1 != compare)
return set;
else
return d1;
else if (mode == 12) // abs_greater_or_equals_than
if (nd4j::math::nd4j_abs<X>(d1) >= compare)
return set;
else
return d1;
else if (mode == 13) // abs_less_or_equals_than
if (nd4j::math::nd4j_abs<X>(d1) <= compare)
return set;
else
return d1;
else
printf("Undefined boolean operation: [%i]\n", mode);
return d1;
}
};
}
#endif
|
lotus85_fmt_plug.c | /*
* This software is Copyright (c) 2013 Sébastien Kaczmarek <skaczmarek@quarkslab.com>,
* and it is hereby released to the general public under the following terms:
* Redistribution and use in source and binary forms, with or without
* modification, are permitted.
*
* Fixed the format to crack multiple hashes + added OMP support (Dhiru
* Kholia).
*/
#if FMT_EXTERNS_H
extern struct fmt_main fmt_lotus_85;
#elif FMT_REGISTERS_H
john_register_one(&fmt_lotus_85);
#else
#include <stdio.h>
#include <string.h>
#include <stdint.h>
#include "sha.h"
#include <openssl/rc2.h>
#ifdef _OPENMP
#include <omp.h>
#ifndef OMP_SCALE
#define OMP_SCALE 64 // XXX tune me!
#endif
static int omp_t = 1;
#endif
#include "formats.h"
#include "common.h"
#include "memdbg.h"
/* Plugin definition */
#define FORMAT_LABEL "lotus85"
#define FORMAT_NAME "Lotus Notes/Domino 8.5"
#define ALGORITHM_NAME "8/" ARCH_BITS_STR
#define BENCHMARK_COMMENT ""
#define BENCHMARK_LENGTH -1
#define PLAINTEXT_LENGTH 32
#define CIPHERTEXT_LENGTH (LOTUS85_MAX_BLOB_SIZE * 2)
#define BINARY_SIZE 0
#define BINARY_LENGTH 5
#define BINARY_ALIGN 1
#define SALT_SIZE sizeof(struct custom_salt)
#define SALT_ALIGN 4
#define MIN_KEYS_PER_CRYPT 1
// #define MAX_KEYS_PER_CRYPT 0x900 // WTF?
#define MAX_KEYS_PER_CRYPT 1
#define LOTUS85_MAX_BLOB_SIZE 0x64
#define LOTUS85_MIN_BLOB_SIZE 40 // XXX fictional value, but isn't this length fixed?
/* Globals */
static const char LOTUS85_UNIQUE_STRING[] = "Lotus Notes Password Pad Uniquifier";
static uint8_t ebits_to_num[256]=
{
0xbd, 0x56, 0xea, 0xf2, 0xa2, 0xf1, 0xac, 0x2a,
0xb0, 0x93, 0xd1, 0x9c, 0x1b, 0x33, 0xfd, 0xd0,
0x30, 0x04, 0xb6, 0xdc, 0x7d, 0xdf, 0x32, 0x4b,
0xf7, 0xcb, 0x45, 0x9b, 0x31, 0xbb, 0x21, 0x5a,
0x41, 0x9f, 0xe1, 0xd9, 0x4a, 0x4d, 0x9e, 0xda,
0xa0, 0x68, 0x2c, 0xc3, 0x27, 0x5f, 0x80, 0x36,
0x3e, 0xee, 0xfb, 0x95, 0x1a, 0xfe, 0xce, 0xa8,
0x34, 0xa9, 0x13, 0xf0, 0xa6, 0x3f, 0xd8, 0x0c,
0x78, 0x24, 0xaf, 0x23, 0x52, 0xc1, 0x67, 0x17,
0xf5, 0x66, 0x90, 0xe7, 0xe8, 0x07, 0xb8, 0x60,
0x48, 0xe6, 0x1e, 0x53, 0xf3, 0x92, 0xa4, 0x72,
0x8c, 0x08, 0x15, 0x6e, 0x86, 0x00, 0x84, 0xfa,
0xf4, 0x7f, 0x8a, 0x42, 0x19, 0xf6, 0xdb, 0xcd,
0x14, 0x8d, 0x50, 0x12, 0xba, 0x3c, 0x06, 0x4e,
0xec, 0xb3, 0x35, 0x11, 0xa1, 0x88, 0x8e, 0x2b,
0x94, 0x99, 0xb7, 0x71, 0x74, 0xd3, 0xe4, 0xbf,
0x3a, 0xde, 0x96, 0x0e, 0xbc, 0x0a, 0xed, 0x77,
0xfc, 0x37, 0x6b, 0x03, 0x79, 0x89, 0x62, 0xc6,
0xd7, 0xc0, 0xd2, 0x7c, 0x6a, 0x8b, 0x22, 0xa3,
0x5b, 0x05, 0x5d, 0x02, 0x75, 0xd5, 0x61, 0xe3,
0x18, 0x8f, 0x55, 0x51, 0xad, 0x1f, 0x0b, 0x5e,
0x85, 0xe5, 0xc2, 0x57, 0x63, 0xca, 0x3d, 0x6c,
0xb4, 0xc5, 0xcc, 0x70, 0xb2, 0x91, 0x59, 0x0d,
0x47, 0x20, 0xc8, 0x4f, 0x58, 0xe0, 0x01, 0xe2,
0x16, 0x38, 0xc4, 0x6f, 0x3b, 0x0f, 0x65, 0x46,
0xbe, 0x7e, 0x2d, 0x7b, 0x82, 0xf9, 0x40, 0xb5,
0x1d, 0x73, 0xf8, 0xeb, 0x26, 0xc7, 0x87, 0x97,
0x25, 0x54, 0xb1, 0x28, 0xaa, 0x98, 0x9d, 0xa5,
0x64, 0x6d, 0x7a, 0xd4, 0x10, 0x81, 0x44, 0xef,
0x49, 0xd6, 0xae, 0x2e, 0xdd, 0x76, 0x5c, 0x2f,
0xa7, 0x1c, 0xc9, 0x09, 0x69, 0x9a, 0x83, 0xcf,
0x29, 0x39, 0xb9, 0xe9, 0x4c, 0xff, 0x43, 0xab,
};
static struct custom_salt {
uint8_t lotus85_user_blob[LOTUS85_MAX_BLOB_SIZE];
uint32_t lotus85_user_blob_len;
} *cur_salt;
/*
* 5 bytes digest computed by the algorithm
* As the password is used to derive a RC2 key and decipher the user blob
* the reference digest is always different and we should track them all
*/
static uint8_t (*lotus85_last_binary_hash1)[BINARY_LENGTH];
static uint8_t (*lotus85_last_binary_hash2)[BINARY_LENGTH];
/* Plaintext passwords history requested by JtR engine */
static char (*lotus85_saved_passwords)[PLAINTEXT_LENGTH+1];
/* Decipher user.id user blob */
static void decipher_userid_blob(uint8_t *ciphered_blob, uint32_t len, uint8_t *userid_key, uint8_t *deciphered_blob)
{
RC2_KEY rc_key;
uint8_t buf[LOTUS85_MAX_BLOB_SIZE+8],rc_iv[8];
memset(buf, 0x0, sizeof(buf));
memset(rc_iv, 0, sizeof(rc_iv));
RC2_set_key(&rc_key, 8, userid_key, 64);
RC2_cbc_encrypt(ciphered_blob, buf, len, &rc_key, rc_iv, RC2_DECRYPT);
memcpy(deciphered_blob, buf, len);
}
/* Custom hash transformation function */
static void custom_password_hash_trans(uint8_t *data, uint8_t *out, uint8_t *state)
{
uint8_t buffer[48];
size_t i, j;
uint8_t c;
memset(buffer, 0, sizeof(buffer));
memcpy(buffer, state, 16);
memcpy(buffer + 16, data, 16);
for (i=0;i<16;i+=4)
{
buffer[32+i] = data[i] ^ state[i];
buffer[32+i+1] = data[i+1] ^ state[i+1];
buffer[32+i+2] = data[i+2] ^ state[i+2];
buffer[32+i+3] = data[i+3] ^ state[i+3];
}
for (j=c=0;j<18;j++)
{
for (i=0;i<sizeof(buffer);i+=6)
{
buffer[i] ^= ebits_to_num[(c-i+48) & 0xFF];
buffer[i+1] ^= ebits_to_num[(buffer[i]-i+47) & 0xFF];
buffer[i+2] ^= ebits_to_num[(buffer[i+1]-i+46) & 0xFF];
buffer[i+3] ^= ebits_to_num[(buffer[i+2]-i+45) & 0xFF];
buffer[i+4] ^= ebits_to_num[(buffer[i+3]-i+44) & 0xFF];
buffer[i+5] ^= ebits_to_num[(buffer[i+4]-i+43) & 0xFF];
c = buffer[i+5];
}
}
memcpy(state, buffer, 16);
c = out[15];
for (i=0;i<16;i+=4)
{
out[i] ^= ebits_to_num[data[i] ^ c];
out[i+1] ^= ebits_to_num[data[i+1] ^ out[i]];
out[i+2] ^= ebits_to_num[data[i+2] ^ out[i+1]];
out[i+3] ^= ebits_to_num[data[i+3] ^ out[i+2]];
c = out[i+3];
}
}
/* Custom hash function */
static void custom_password_hash(const char *password, uint8_t *out)
{
uint8_t block1[16], state[16], block2[16];
size_t len, rlen, block_pos = 0;
len = strlen(password);
memset(state, 0, sizeof(state));
memset(block2, 0, sizeof(block2));
while((block_pos + 15) < len)
{
memcpy(block1, password+block_pos, sizeof(block1));
custom_password_hash_trans(block1, state, block2);
block_pos += 16;
}
if (block_pos != len)
{
rlen = len - block_pos;
memcpy(block1, password+block_pos, rlen);
memset(block1+rlen, 16-rlen, 16-rlen);
custom_password_hash_trans(block1, state, block2);
}
else
{
memset(block1, sizeof(block1), sizeof(block1));
custom_password_hash_trans(block1, state, block2);
}
custom_password_hash_trans(state, state, block2);
memcpy(out, block2, sizeof(block2));
}
/* Hash cste::password with sha1 */
static void password_hash(const char *password, uint8_t *hash)
{
SHA_CTX s_ctx;
uint8_t digest[SHA_DIGEST_LENGTH];
SHA1_Init(&s_ctx);
SHA1_Update(&s_ctx, LOTUS85_UNIQUE_STRING, strlen(LOTUS85_UNIQUE_STRING));
SHA1_Update(&s_ctx, password, strlen(password));
SHA1_Final(digest, &s_ctx);
memcpy(hash, digest, sizeof(digest));
}
/* Hash/checksum function used for key derivation from plaintext password */
static void compute_key_mac(uint8_t *key, size_t len, uint8_t *mac, size_t mac_len)
{
size_t i, j, mlen=mac_len-1;
uint8_t k;
for (i=0;i<16;i++)
{
k = ebits_to_num[mac[0] ^ mac[1]];
for (j=0;j<mlen;j++)
{
mac[j] = mac[j+1];
}
mac[mlen] = key[i] ^ k;
}
}
/* Hash/checksum function used for digest storage */
static void compute_msg_mac(uint8_t *msg, size_t len, uint8_t *msg_mac)
{
size_t i, j;
uint8_t c;
for (i=j=0;i<len;i++)
{
if (j!=4)
{
msg_mac[j] = msg[i] ^ ebits_to_num[msg_mac[j] ^ msg_mac[j+1]];
j++;
}
else
{
msg_mac[j] = msg[i] ^ ebits_to_num[msg_mac[j] ^ msg_mac[0]];
j = 0;
}
}
c = msg_mac[0];
for (i=0;i<4;i++)
{
msg_mac[i] = msg_mac[i+1];
}
msg_mac[i] = c;
}
/*
* Derive password to retrieve the RC2 secret key
* used when deciphering user blob stored in user.id file
*/
static void get_user_id_secret_key(const char *password, uint8_t *secret_key)
{
uint8_t key[16+20], mac[8];
memset(key, 0, sizeof(key));
memset(mac, 0, sizeof(mac));
custom_password_hash(password, key);
password_hash(password, key+16);
compute_key_mac(key, sizeof(key), mac, sizeof(mac));
memcpy(secret_key, mac, sizeof(mac));
}
/* Plugin initialization */
static void lotus85_init(struct fmt_main *self)
{
#if defined (_OPENMP)
omp_t = omp_get_max_threads();
self->params.min_keys_per_crypt *= omp_t;
omp_t *= OMP_SCALE;
self->params.max_keys_per_crypt *= omp_t;
#endif
lotus85_saved_passwords = mem_calloc(self->params.max_keys_per_crypt,
PLAINTEXT_LENGTH + 1);
lotus85_last_binary_hash1 = mem_calloc(self->params.max_keys_per_crypt,
BINARY_LENGTH);
lotus85_last_binary_hash2 = mem_calloc(self->params.max_keys_per_crypt,
BINARY_LENGTH);
}
static void done(void)
{
MEM_FREE(lotus85_last_binary_hash2);
MEM_FREE(lotus85_last_binary_hash1);
MEM_FREE(lotus85_saved_passwords);
}
/* Check if given ciphertext (hash) format is valid */
static int lotus85_valid(char *ciphertext,struct fmt_main *self)
{
int len, extra;
len = strnlen(ciphertext, CIPHERTEXT_LENGTH + 1);
if (len % 2)
return 0;
if ((len >> 1) > LOTUS85_MAX_BLOB_SIZE)
return 0;
if ((len >> 1) < LOTUS85_MIN_BLOB_SIZE)
return 0;
if (hexlenu(ciphertext, &extra) != len || extra)
return 0;
return 1;
}
static void *get_salt(char *ciphertext)
{
int i,len;
static struct custom_salt cs;
memset(&cs, 0, sizeof(cs));
len = strlen(ciphertext) >> 1;
for (i = 0; i < len; i++)
cs.lotus85_user_blob[i] = (atoi16[ARCH_INDEX(ciphertext[i << 1])] << 4) + atoi16[ARCH_INDEX(ciphertext[(i << 1) + 1])];
cs.lotus85_user_blob_len = len;
return (void*)&cs;
}
static void set_salt(void *salt)
{
cur_salt = (struct custom_salt *)salt;
}
/* Set password at given index */
static void lotus85_set_key(char *key,int index)
{
strnzcpy(lotus85_saved_passwords[index],key,sizeof(lotus85_saved_passwords[index]));
}
/* Return password at given index as string */
static char *lotus85_get_key(int index)
{
return lotus85_saved_passwords[index];
}
/* Main callback to compute lotus digest */
static int lotus85_crypt_all(int *pcount, struct db_salt *salt)
{
int count = *pcount;
int index = 0;
/* Compute digest for all given plaintext passwords */
#ifdef _OPENMP
#pragma omp parallel for
#endif
for (index = 0; index < count; index++)
{
unsigned char user_key[8], deciphered_userid[LOTUS85_MAX_BLOB_SIZE];
memset(lotus85_last_binary_hash1[index], 0, BINARY_LENGTH);
memset(lotus85_last_binary_hash2[index], 0, BINARY_LENGTH);
memset(user_key, 0, sizeof(user_key));
memset(deciphered_userid, 0, sizeof(deciphered_userid));
/* Derive password and retrieve RC2 key */
get_user_id_secret_key(lotus85_saved_passwords[index], user_key);
/* Deciphered user blob stored in user.id file */
decipher_userid_blob(cur_salt->lotus85_user_blob, cur_salt->lotus85_user_blob_len, user_key, deciphered_userid);
/* Store first deciphered digest */
memcpy(lotus85_last_binary_hash1[index], deciphered_userid + cur_salt->lotus85_user_blob_len - BINARY_LENGTH, BINARY_LENGTH);
/* Compute digest of deciphered message */
compute_msg_mac(deciphered_userid, cur_salt->lotus85_user_blob_len - BINARY_LENGTH, lotus85_last_binary_hash2[index]);
}
return count;
}
/* Check if one of last computed hashs match */
static int lotus85_cmp_all(void *binary,int count)
{
int i;
for (i = 0; i < count; i++)
{
if (!memcmp(lotus85_last_binary_hash1[i],lotus85_last_binary_hash2[i],BINARY_LENGTH))
return 1;
}
return 0;
}
/* Check if last computed hash match */
static int lotus85_cmp_one(void *binary,int index)
{
return !memcmp(lotus85_last_binary_hash1[index],lotus85_last_binary_hash2[index],BINARY_LENGTH);
}
/* No ASCII ciphertext, thus returns true */
static int lotus85_cmp_exact(char *source,int index)
{
return 1;
}
static struct fmt_tests lotus85_tests[] =
{
{"0040B2B17C344C236953F955B28E4865014034D1F664489D7F42B35FB6928A94DCFFEF7750CE029F94C83A582A80B4662D49B3FA45816143", "notesisterrible"},
{"CBCFC612FAE3154316223787C7CD29AD39BEDF4288FCDE310B32FD809C75F5FDC521667D5F6E7A047766F0E60952F7891593FFAF45AD0C15", "openwall"},
{NULL}
};
/* JtR lotus 8.5 structure registration */
struct fmt_main fmt_lotus_85 =
{
{
FORMAT_LABEL,
FORMAT_NAME,
ALGORITHM_NAME,
BENCHMARK_COMMENT,
BENCHMARK_LENGTH,
0,
PLAINTEXT_LENGTH,
BINARY_SIZE,
BINARY_ALIGN,
SALT_SIZE,
SALT_ALIGN,
MIN_KEYS_PER_CRYPT,
MAX_KEYS_PER_CRYPT,
FMT_CASE | FMT_8_BIT | FMT_OMP,
{ NULL },
{ NULL },
lotus85_tests
}, {
lotus85_init,
done,
fmt_default_reset,
fmt_default_prepare,
lotus85_valid,
fmt_default_split,
fmt_default_binary,
get_salt,
{ NULL },
fmt_default_source,
{
fmt_default_binary_hash
},
fmt_default_salt_hash,
NULL,
set_salt,
lotus85_set_key, /* Set plaintext password */
lotus85_get_key, /* Get plaintext password */
fmt_default_clear_keys,
lotus85_crypt_all, /* Main hash function */
{
fmt_default_get_hash
},
lotus85_cmp_all, /* Compare * hash (binary) */
lotus85_cmp_one, /* Compare 1 hash (binary) */
lotus85_cmp_exact
}
};
#endif /* plugin stanza */
|
pzgstrs_lsum.c | /*! \file
Copyright (c) 2003, The Regents of the University of California, through
Lawrence Berkeley National Laboratory (subject to receipt of any required
approvals from U.S. Dept. of Energy)
All rights reserved.
The source code is distributed under BSD license, see the file License.txt
at the top-level directory.
*/
/*! @file
* \brief Perform local block modifications: lsum[i] -= L_i,k * X[k]
*
* <pre>
* -- Distributed SuperLU routine (version 6.1) --
* Lawrence Berkeley National Lab, Univ. of California Berkeley.
* March 15, 2003
*
* Modified:
* Feburary 7, 2001 use MPI_Isend/MPI_Irecv
* October 2, 2001 use MPI_Isend/MPI_Irecv with MPI_Test
* February 8, 2019 version 6.1.1
* </pre>
*/
#include "superlu_zdefs.h"
#include "superlu_defs.h"
#ifndef CACHELINE
#define CACHELINE 64 /* bytes, Xeon Phi KNL, Cori haswell, Edision */
#endif
#define ISEND_IRECV
/*
* Function prototypes
*/
#ifdef _CRAY
fortran void CTRSM(_fcd, _fcd, _fcd, _fcd, int*, int*, doublecomplex*,
doublecomplex*, int*, doublecomplex*, int*);
fortran void CGEMM(_fcd, _fcd, int*, int*, int*, doublecomplex*, doublecomplex*,
int*, doublecomplex*, int*, doublecomplex*, doublecomplex*, int*);
_fcd ftcs1;
_fcd ftcs2;
_fcd ftcs3;
#endif
/************************************************************************/
/*! \brief
*
* <pre>
* Purpose
* =======
* Perform local block modifications: lsum[i] -= L_i,k * X[k].
* </pre>
*/
void zlsum_fmod
/************************************************************************/
(
doublecomplex *lsum, /* Sum of local modifications. */
doublecomplex *x, /* X array (local) */
doublecomplex *xk, /* X[k]. */
doublecomplex *rtemp, /* Result of full matrix-vector multiply. */
int nrhs, /* Number of right-hand sides. */
int knsupc, /* Size of supernode k. */
int_t k, /* The k-th component of X. */
int_t *fmod, /* Modification count for L-solve. */
int_t nlb, /* Number of L blocks. */
int_t lptr, /* Starting position in lsub[*]. */
int_t luptr, /* Starting position in lusup[*]. */
int_t *xsup,
gridinfo_t *grid,
LocalLU_t *Llu,
MPI_Request send_req[], /* input/output */
SuperLUStat_t *stat
)
{
doublecomplex alpha = {1.0, 0.0}, beta = {0.0, 0.0};
doublecomplex *lusup, *lusup1;
doublecomplex *dest;
int iam, iknsupc, myrow, nbrow, nsupr, nsupr1, p, pi;
int_t i, ii, ik, il, ikcol, irow, j, lb, lk, lib, rel;
int_t *lsub, *lsub1, nlb1, lptr1, luptr1;
int_t *ilsum = Llu->ilsum; /* Starting position of each supernode in lsum. */
int_t *frecv = Llu->frecv;
int_t **fsendx_plist = Llu->fsendx_plist;
MPI_Status status;
int test_flag;
#if ( PROFlevel>=1 )
double t1, t2;
float msg_vol = 0, msg_cnt = 0;
#endif
#if ( PROFlevel>=1 )
TIC(t1);
#endif
iam = grid->iam;
myrow = MYROW( iam, grid );
lk = LBj( k, grid ); /* Local block number, column-wise. */
lsub = Llu->Lrowind_bc_ptr[lk];
lusup = Llu->Lnzval_bc_ptr[lk];
nsupr = lsub[1];
for (lb = 0; lb < nlb; ++lb) {
ik = lsub[lptr]; /* Global block number, row-wise. */
nbrow = lsub[lptr+1];
#ifdef _CRAY
CGEMM( ftcs2, ftcs2, &nbrow, &nrhs, &knsupc,
&alpha, &lusup[luptr], &nsupr, xk,
&knsupc, &beta, rtemp, &nbrow );
#elif defined (USE_VENDOR_BLAS)
zgemm_( "N", "N", &nbrow, &nrhs, &knsupc,
&alpha, &lusup[luptr], &nsupr, xk,
&knsupc, &beta, rtemp, &nbrow, 1, 1 );
#else
zgemm_( "N", "N", &nbrow, &nrhs, &knsupc,
&alpha, &lusup[luptr], &nsupr, xk,
&knsupc, &beta, rtemp, &nbrow );
#endif
stat->ops[SOLVE] += 8 * nbrow * nrhs * knsupc + 2 * nbrow * nrhs;
lk = LBi( ik, grid ); /* Local block number, row-wise. */
iknsupc = SuperSize( ik );
il = LSUM_BLK( lk );
dest = &lsum[il];
lptr += LB_DESCRIPTOR;
rel = xsup[ik]; /* Global row index of block ik. */
for (i = 0; i < nbrow; ++i) {
irow = lsub[lptr++] - rel; /* Relative row. */
RHS_ITERATE(j)
z_sub(&dest[irow + j*iknsupc],
&dest[irow + j*iknsupc],
&rtemp[i + j*nbrow]);
}
luptr += nbrow;
#if ( PROFlevel>=1 )
TOC(t2, t1);
stat->utime[SOL_GEMM] += t2;
#endif
if ( (--fmod[lk])==0 ) { /* Local accumulation done. */
ikcol = PCOL( ik, grid );
p = PNUM( myrow, ikcol, grid );
if ( iam != p ) {
#ifdef ISEND_IRECV
MPI_Isend( &lsum[il - LSUM_H], iknsupc * nrhs + LSUM_H,
SuperLU_MPI_DOUBLE_COMPLEX, p, LSUM, grid->comm,
&send_req[Llu->SolveMsgSent++] );
#else
#ifdef BSEND
MPI_Bsend( &lsum[il - LSUM_H], iknsupc * nrhs + LSUM_H,
SuperLU_MPI_DOUBLE_COMPLEX, p, LSUM, grid->comm );
#else
MPI_Send( &lsum[il - LSUM_H], iknsupc * nrhs + LSUM_H,
SuperLU_MPI_DOUBLE_COMPLEX, p, LSUM, grid->comm );
#endif
#endif
#if ( DEBUGlevel>=2 )
printf("(%2d) Sent LSUM[%2.0f], size %2d, to P %2d\n",
iam, lsum[il-LSUM_H], iknsupc*nrhs+LSUM_H, p);
#endif
} else { /* Diagonal process: X[i] += lsum[i]. */
ii = X_BLK( lk );
RHS_ITERATE(j)
for (i = 0; i < iknsupc; ++i)
z_add(&x[i + ii + j*iknsupc],
&x[i + ii + j*iknsupc],
&lsum[i + il + j*iknsupc]);
if ( frecv[lk]==0 ) { /* Becomes a leaf node. */
fmod[lk] = -1; /* Do not solve X[k] in the future. */
lk = LBj( ik, grid );/* Local block number, column-wise. */
lsub1 = Llu->Lrowind_bc_ptr[lk];
lusup1 = Llu->Lnzval_bc_ptr[lk];
nsupr1 = lsub1[1];
#if ( PROFlevel>=1 )
TIC(t1);
#endif
#ifdef _CRAY
CTRSM(ftcs1, ftcs1, ftcs2, ftcs3, &iknsupc, &nrhs, &alpha,
lusup1, &nsupr1, &x[ii], &iknsupc);
#elif defined (USE_VENDOR_BLAS)
ztrsm_("L", "L", "N", "U", &iknsupc, &nrhs, &alpha,
lusup1, &nsupr1, &x[ii], &iknsupc, 1, 1, 1, 1);
#else
ztrsm_("L", "L", "N", "U", &iknsupc, &nrhs, &alpha,
lusup1, &nsupr1, &x[ii], &iknsupc);
#endif
#if ( PROFlevel>=1 )
TOC(t2, t1);
stat->utime[SOL_TRSM] += t2;
#endif
stat->ops[SOLVE] += 4 * iknsupc * (iknsupc - 1) * nrhs
+ 10 * knsupc * nrhs; /* complex division */
#if ( DEBUGlevel>=2 )
printf("(%2d) Solve X[%2d]\n", iam, ik);
#endif
/*
* Send Xk to process column Pc[k].
*/
for (p = 0; p < grid->nprow; ++p) {
if ( fsendx_plist[lk][p] != EMPTY ) {
pi = PNUM( p, ikcol, grid );
#ifdef ISEND_IRECV
MPI_Isend( &x[ii - XK_H], iknsupc * nrhs + XK_H,
SuperLU_MPI_DOUBLE_COMPLEX, pi, Xk, grid->comm,
&send_req[Llu->SolveMsgSent++] );
#else
#ifdef BSEND
MPI_Bsend( &x[ii - XK_H], iknsupc * nrhs + XK_H,
SuperLU_MPI_DOUBLE_COMPLEX, pi, Xk, grid->comm );
#else
MPI_Send( &x[ii - XK_H], iknsupc * nrhs + XK_H,
SuperLU_MPI_DOUBLE_COMPLEX, pi, Xk, grid->comm );
#endif
#endif
#if ( DEBUGlevel>=2 )
printf("(%2d) Sent X[%2.0f] to P %2d\n",
iam, x[ii-XK_H], pi);
#endif
}
}
/*
* Perform local block modifications.
*/
nlb1 = lsub1[0] - 1;
lptr1 = BC_HEADER + LB_DESCRIPTOR + iknsupc;
luptr1 = iknsupc; /* Skip diagonal block L(I,I). */
zlsum_fmod(lsum, x, &x[ii], rtemp, nrhs, iknsupc, ik,
fmod, nlb1, lptr1, luptr1, xsup,
grid, Llu, send_req, stat);
} /* if frecv[lk] == 0 */
} /* if iam == p */
} /* if fmod[lk] == 0 */
} /* for lb ... */
} /* zLSUM_FMOD */
/************************************************************************/
void zlsum_bmod
/************************************************************************/
(
doublecomplex *lsum, /* Sum of local modifications. */
doublecomplex *x, /* X array (local). */
doublecomplex *xk, /* X[k]. */
int nrhs, /* Number of right-hand sides. */
int_t k, /* The k-th component of X. */
int_t *bmod, /* Modification count for L-solve. */
int_t *Urbs, /* Number of row blocks in each block column of U.*/
Ucb_indptr_t **Ucb_indptr,/* Vertical linked list pointing to Uindex[].*/
int_t **Ucb_valptr, /* Vertical linked list pointing to Unzval[]. */
int_t *xsup,
gridinfo_t *grid,
LocalLU_t *Llu,
MPI_Request send_req[], /* input/output */
SuperLUStat_t *stat
)
{
/*
* Purpose
* =======
* Perform local block modifications: lsum[i] -= U_i,k * X[k].
*/
doublecomplex alpha = {1.0, 0.0}, beta = {0.0, 0.0};
int iam, iknsupc, knsupc, myrow, nsupr, p, pi;
int_t fnz, gik, gikcol, i, ii, ik, ikfrow, iklrow, il, irow,
j, jj, lk, lk1, nub, ub, uptr;
int_t *usub;
doublecomplex *uval, *dest, *y;
doublecomplex temp;
int_t *lsub;
doublecomplex *lusup;
int_t *ilsum = Llu->ilsum; /* Starting position of each supernode in lsum. */
int_t *brecv = Llu->brecv;
int_t **bsendx_plist = Llu->bsendx_plist;
MPI_Status status;
int test_flag;
iam = grid->iam;
myrow = MYROW( iam, grid );
knsupc = SuperSize( k );
lk = LBj( k, grid ); /* Local block number, column-wise. */
nub = Urbs[lk]; /* Number of U blocks in block column lk */
for (ub = 0; ub < nub; ++ub) {
ik = Ucb_indptr[lk][ub].lbnum; /* Local block number, row-wise. */
usub = Llu->Ufstnz_br_ptr[ik];
uval = Llu->Unzval_br_ptr[ik];
i = Ucb_indptr[lk][ub].indpos; /* Start of the block in usub[]. */
i += UB_DESCRIPTOR;
il = LSUM_BLK( ik );
gik = ik * grid->nprow + myrow;/* Global block number, row-wise. */
iknsupc = SuperSize( gik );
ikfrow = FstBlockC( gik );
iklrow = FstBlockC( gik+1 );
RHS_ITERATE(j) {
dest = &lsum[il + j*iknsupc];
y = &xk[j*knsupc];
uptr = Ucb_valptr[lk][ub]; /* Start of the block in uval[]. */
for (jj = 0; jj < knsupc; ++jj) {
fnz = usub[i + jj];
if ( fnz < iklrow ) { /* Nonzero segment. */
/* AXPY */
for (irow = fnz; irow < iklrow; ++irow) {
zz_mult(&temp, &uval[uptr], &y[jj]);
z_sub(&dest[irow - ikfrow], &dest[irow - ikfrow],
&temp);
++uptr;
}
stat->ops[SOLVE] += 8 * (iklrow - fnz);
}
} /* for jj ... */
}
if ( (--bmod[ik]) == 0 ) { /* Local accumulation done. */
gikcol = PCOL( gik, grid );
p = PNUM( myrow, gikcol, grid );
if ( iam != p ) {
#ifdef ISEND_IRECV
MPI_Isend( &lsum[il - LSUM_H], iknsupc * nrhs + LSUM_H,
SuperLU_MPI_DOUBLE_COMPLEX, p, LSUM, grid->comm,
&send_req[Llu->SolveMsgSent++] );
#else
#ifdef BSEND
MPI_Bsend( &lsum[il - LSUM_H], iknsupc * nrhs + LSUM_H,
SuperLU_MPI_DOUBLE_COMPLEX, p, LSUM, grid->comm );
#else
MPI_Send( &lsum[il - LSUM_H], iknsupc * nrhs + LSUM_H,
SuperLU_MPI_DOUBLE_COMPLEX, p, LSUM, grid->comm );
#endif
#endif
#if ( DEBUGlevel>=2 )
printf("(%2d) Sent LSUM[%2.0f], size %2d, to P %2d\n",
iam, lsum[il-LSUM_H], iknsupc*nrhs+LSUM_H, p);
#endif
} else { /* Diagonal process: X[i] += lsum[i]. */
ii = X_BLK( ik );
dest = &x[ii];
RHS_ITERATE(j)
for (i = 0; i < iknsupc; ++i)
z_add(&dest[i + j*iknsupc], &dest[i + j*iknsupc],
&lsum[i + il + j*iknsupc]);
if ( !brecv[ik] ) { /* Becomes a leaf node. */
bmod[ik] = -1; /* Do not solve X[k] in the future. */
lk1 = LBj( gik, grid ); /* Local block number. */
lsub = Llu->Lrowind_bc_ptr[lk1];
lusup = Llu->Lnzval_bc_ptr[lk1];
nsupr = lsub[1];
#ifdef _CRAY
CTRSM(ftcs1, ftcs3, ftcs2, ftcs2, &iknsupc, &nrhs, &alpha,
lusup, &nsupr, &x[ii], &iknsupc);
#elif defined (USE_VENDOR_BLAS)
ztrsm_("L", "U", "N", "N", &iknsupc, &nrhs, &alpha,
lusup, &nsupr, &x[ii], &iknsupc, 1, 1, 1, 1);
#else
ztrsm_("L", "U", "N", "N", &iknsupc, &nrhs, &alpha,
lusup, &nsupr, &x[ii], &iknsupc);
#endif
stat->ops[SOLVE] += 4 * iknsupc * (iknsupc + 1) * nrhs
+ 10 * iknsupc * nrhs; /* complex division */
#if ( DEBUGlevel>=2 )
printf("(%2d) Solve X[%2d]\n", iam, gik);
#endif
/*
* Send Xk to process column Pc[k].
*/
for (p = 0; p < grid->nprow; ++p) {
if ( bsendx_plist[lk1][p] != EMPTY ) {
pi = PNUM( p, gikcol, grid );
#ifdef ISEND_IRECV
MPI_Isend( &x[ii - XK_H], iknsupc * nrhs + XK_H,
SuperLU_MPI_DOUBLE_COMPLEX, pi, Xk, grid->comm,
&send_req[Llu->SolveMsgSent++] );
#else
#ifdef BSEND
MPI_Bsend( &x[ii - XK_H], iknsupc * nrhs + XK_H,
SuperLU_MPI_DOUBLE_COMPLEX, pi, Xk, grid->comm );
#else
MPI_Send( &x[ii - XK_H], iknsupc * nrhs + XK_H,
SuperLU_MPI_DOUBLE_COMPLEX, pi, Xk, grid->comm );
#endif
#endif
#if ( DEBUGlevel>=2 )
printf("(%2d) Sent X[%2.0f] to P %2d\n",
iam, x[ii-XK_H], pi);
#endif
}
}
/*
* Perform local block modifications.
*/
if ( Urbs[lk1] )
zlsum_bmod(lsum, x, &x[ii], nrhs, gik, bmod, Urbs,
Ucb_indptr, Ucb_valptr, xsup, grid, Llu,
send_req, stat);
} /* if brecv[ik] == 0 */
}
} /* if bmod[ik] == 0 */
} /* for ub ... */
} /* zlSUM_BMOD */
/************************************************************************/
/*! \brief
*
* <pre>
* Purpose
* =======
* Perform local block modifications: lsum[i] -= L_i,k * X[k].
* </pre>
*/
void zlsum_fmod_inv
/************************************************************************/
(
doublecomplex *lsum, /* Sum of local modifications. */
doublecomplex *x, /* X array (local) */
doublecomplex *xk, /* X[k]. */
doublecomplex *rtemp, /* Result of full matrix-vector multiply. */
int nrhs, /* Number of right-hand sides. */
int_t k, /* The k-th component of X. */
int_t *fmod, /* Modification count for L-solve. */
int_t *xsup,
gridinfo_t *grid,
LocalLU_t *Llu,
SuperLUStat_t **stat,
int_t *leaf_send,
int_t *nleaf_send,
int_t sizelsum,
int_t sizertemp,
int_t recurlevel,
int_t maxsuper,
int thread_id,
int num_thread
)
{
doublecomplex alpha = {1.0, 0.0}, beta = {0.0, 0.0},malpha={-1.0, 0.0};
doublecomplex *lusup, *lusup1;
doublecomplex *dest;
doublecomplex *Linv;/* Inverse of diagonal block */
int iam, iknsupc, myrow, krow, nbrow, nbrow1, nbrow_ref, nsupr, nsupr1, p, pi, idx_r,m;
int_t i, ii,jj, ik, il, ikcol, irow, j, lb, lk, rel, lib,lready;
int_t *lsub, *lsub1, nlb1, lptr1, luptr1,*lloc;
int_t *ilsum = Llu->ilsum; /* Starting position of each supernode in lsum. */
int_t *frecv = Llu->frecv;
int_t **fsendx_plist = Llu->fsendx_plist;
int_t luptr_tmp,luptr_tmp1,lptr1_tmp,maxrecvsz, idx_i, idx_v,idx_n, idx_l, fmod_tmp, lbstart,lbend,nn,Nchunk,nlb_loc,remainder;
int thread_id1;
flops_t ops_loc=0.0;
MPI_Status status;
int test_flag;
yes_no_t done;
BcTree *LBtree_ptr = Llu->LBtree_ptr;
RdTree *LRtree_ptr = Llu->LRtree_ptr;
int_t* idx_lsum,idx_lsum1;
doublecomplex *rtemp_loc;
int_t ldalsum;
int_t nleaf_send_tmp;
int_t lptr; /* Starting position in lsub[*]. */
int_t luptr; /* Starting position in lusup[*]. */
int_t iword = sizeof(int_t);
int_t dword = sizeof (double);
int_t aln_d,aln_i;
aln_d = ceil(CACHELINE/(double)dword);
aln_i = ceil(CACHELINE/(double)iword);
int knsupc; /* Size of supernode k. */
int_t nlb; /* Number of L blocks. */
knsupc = SuperSize( k );
lk = LBj( k, grid ); /* Local block number, column-wise. */
lsub = Llu->Lrowind_bc_ptr[lk];
nlb = lsub[0] - 1;
ldalsum=Llu->ldalsum;
rtemp_loc = &rtemp[sizertemp* thread_id];
// #if ( PROFlevel>=1 )
double t1, t2, t3, t4;
float msg_vol = 0, msg_cnt = 0;
// #endif
if(nlb>0){
iam = grid->iam;
myrow = MYROW( iam, grid );
lusup = Llu->Lnzval_bc_ptr[lk];
lloc = Llu->Lindval_loc_bc_ptr[lk];
nsupr = lsub[1];
// printf("nlb: %5d lk: %5d\n",nlb,lk);
// fflush(stdout);
krow = PROW( k, grid );
if(myrow==krow){
idx_n = 1;
idx_i = nlb+2;
idx_v = 2*nlb+3;
luptr_tmp = lloc[idx_v];
m = nsupr-knsupc;
}else{
idx_n = 0;
idx_i = nlb;
idx_v = 2*nlb;
luptr_tmp = lloc[idx_v];
m = nsupr;
}
assert(m>0);
if(m>8*maxsuper){
// if(0){
// Nchunk=floor(num_thread/2.0)+1;
Nchunk=SUPERLU_MIN(num_thread,nlb);
// Nchunk=1;
nlb_loc = floor(((double)nlb)/Nchunk);
remainder = nlb % Nchunk;
#ifdef _OPENMP
#pragma omp taskloop private (lptr1,luptr1,nlb1,thread_id1,lsub1,lusup1,nsupr1,Linv,nn,lbstart,lbend,luptr_tmp1,nbrow,lb,lptr1_tmp,rtemp_loc,nbrow_ref,lptr,nbrow1,ik,rel,lk,iknsupc,il,i,irow,fmod_tmp,ikcol,p,ii,jj,t1,t2,j,nleaf_send_tmp) untied nogroup
#endif
for (nn=0;nn<Nchunk;++nn){
#ifdef _OPENMP
thread_id1 = omp_get_thread_num ();
#else
thread_id1 = 0;
#endif
rtemp_loc = &rtemp[sizertemp* thread_id1];
if(nn<remainder){
lbstart = nn*(nlb_loc+1);
lbend = (nn+1)*(nlb_loc+1);
}else{
lbstart = remainder+nn*nlb_loc;
lbend = remainder + (nn+1)*nlb_loc;
}
if(lbstart<lbend){
#if ( PROFlevel>=1 )
TIC(t1);
#endif
luptr_tmp1 = lloc[lbstart+idx_v];
nbrow=0;
for (lb = lbstart; lb < lbend; ++lb){
lptr1_tmp = lloc[lb+idx_i];
nbrow += lsub[lptr1_tmp+1];
}
#ifdef _CRAY
CGEMM( ftcs2, ftcs2, &nbrow, &nrhs, &knsupc,
&alpha, &lusup[luptr_tmp1], &nsupr, xk,
&knsupc, &beta, rtemp_loc, &nbrow );
#elif defined (USE_VENDOR_BLAS)
zgemm_( "N", "N", &nbrow, &nrhs, &knsupc,
&alpha, &lusup[luptr_tmp1], &nsupr, xk,
&knsupc, &beta, rtemp_loc, &nbrow, 1, 1 );
#else
zgemm_( "N", "N", &nbrow, &nrhs, &knsupc,
&alpha, &lusup[luptr_tmp1], &nsupr, xk,
&knsupc, &beta, rtemp_loc, &nbrow );
#endif
nbrow_ref=0;
for (lb = lbstart; lb < lbend; ++lb){
lptr1_tmp = lloc[lb+idx_i];
lptr= lptr1_tmp+2;
nbrow1 = lsub[lptr1_tmp+1];
ik = lsub[lptr1_tmp]; /* Global block number, row-wise. */
rel = xsup[ik]; /* Global row index of block ik. */
lk = LBi( ik, grid ); /* Local block number, row-wise. */
iknsupc = SuperSize( ik );
il = LSUM_BLK( lk );
RHS_ITERATE(j)
#ifdef _OPENMP
#pragma omp simd
#endif
for (i = 0; i < nbrow1; ++i) {
irow = lsub[lptr+i] - rel; /* Relative row. */
z_sub(&lsum[il+irow + j*iknsupc+sizelsum*thread_id1],
&lsum[il+irow + j*iknsupc+sizelsum*thread_id1],
&rtemp_loc[nbrow_ref+i + j*nbrow]);
}
nbrow_ref+=nbrow1;
}
#if ( PROFlevel>=1 )
TOC(t2, t1);
stat[thread_id1]->utime[SOL_GEMM] += t2;
#endif
for (lb=lbstart;lb<lbend;lb++){
lk = lloc[lb+idx_n];
#ifdef _OPENMP
#pragma omp atomic capture
#endif
fmod_tmp=--fmod[lk*aln_i];
if ( fmod_tmp==0 ) { /* Local accumulation done. */
lptr1_tmp = lloc[lb+idx_i];
ik = lsub[lptr1_tmp]; /* Global block number, row-wise. */
lk = LBi( ik, grid ); /* Local block number, row-wise. */
iknsupc = SuperSize( ik );
il = LSUM_BLK( lk );
ikcol = PCOL( ik, grid );
p = PNUM( myrow, ikcol, grid );
if ( iam != p ) {
for (ii=1;ii<num_thread;ii++)
#ifdef _OPENMP
#pragma omp simd
#endif
for (jj=0;jj<iknsupc*nrhs;jj++)
z_add(&lsum[il + jj ],
&lsum[il + jj ],
&lsum[il + jj + ii*sizelsum]);
#ifdef _OPENMP
#pragma omp atomic capture
#endif
nleaf_send_tmp = ++nleaf_send[0];
leaf_send[(nleaf_send_tmp-1)*aln_i] = -lk-1;
// RdTree_forwardMessageSimple(LRtree_ptr[lk],&lsum[il - LSUM_H ],'z');
} else { /* Diagonal process: X[i] += lsum[i]. */
#if ( PROFlevel>=1 )
TIC(t1);
#endif
for (ii=1;ii<num_thread;ii++)
#ifdef _OPENMP
#pragma omp simd
#endif
for (jj=0;jj<iknsupc*nrhs;jj++)
z_add(&lsum[il + jj ],
&lsum[il + jj ],
&lsum[il + jj + ii*sizelsum]);
ii = X_BLK( lk );
RHS_ITERATE(j)
#ifdef _OPENMP
#pragma omp simd
#endif
for (i = 0; i < iknsupc; ++i)
z_add(&x[i + ii + j*iknsupc],
&x[i + ii + j*iknsupc],
&lsum[i + il + j*iknsupc] );
// fmod[lk] = -1; /* Do not solve X[k] in the future. */
lk = LBj( ik, grid );/* Local block number, column-wise. */
lsub1 = Llu->Lrowind_bc_ptr[lk];
lusup1 = Llu->Lnzval_bc_ptr[lk];
nsupr1 = lsub1[1];
if(Llu->inv == 1){
Linv = Llu->Linv_bc_ptr[lk];
#ifdef _CRAY
CGEMM( ftcs2, ftcs2, &iknsupc, &nrhs, &iknsupc,
&alpha, Linv, &iknsupc, &x[ii],
&iknsupc, &beta, rtemp_loc, &iknsupc );
#elif defined (USE_VENDOR_BLAS)
zgemm_( "N", "N", &iknsupc, &nrhs, &iknsupc,
&alpha, Linv, &iknsupc, &x[ii],
&iknsupc, &beta, rtemp_loc, &iknsupc, 1, 1 );
#else
zgemm_( "N", "N", &iknsupc, &nrhs, &iknsupc,
&alpha, Linv, &iknsupc, &x[ii],
&iknsupc, &beta, rtemp_loc, &iknsupc );
#endif
#ifdef _OPENMP
#pragma omp simd
#endif
for (i=0 ; i<iknsupc*nrhs ; i++){
z_copy(&x[ii+i],&rtemp_loc[i]);
}
}else{
#ifdef _CRAY
CTRSM(ftcs1, ftcs1, ftcs2, ftcs3, &iknsupc, &nrhs, &alpha,
lusup1, &nsupr1, &x[ii], &iknsupc);
#elif defined (USE_VENDOR_BLAS)
ztrsm_("L", "L", "N", "U", &iknsupc, &nrhs, &alpha,
lusup1, &nsupr1, &x[ii], &iknsupc, 1, 1, 1, 1);
#else
ztrsm_("L", "L", "N", "U", &iknsupc, &nrhs, &alpha,
lusup1, &nsupr1, &x[ii], &iknsupc);
#endif
}
// for (i=0 ; i<iknsupc*nrhs ; i++){
// printf("x_lsum: %f %f\n",x[ii+i].r,x[ii+i].i);
// fflush(stdout);
// }
#if ( PROFlevel>=1 )
TOC(t2, t1);
stat[thread_id1]->utime[SOL_TRSM] += t2;
#endif
stat[thread_id1]->ops[SOLVE] += 4 * iknsupc * (iknsupc - 1) * nrhs
+ 10 * knsupc * nrhs; /* complex division */
#if ( DEBUGlevel>=2 )
printf("(%2d) Solve X[%2d]\n", iam, ik);
#endif
/*
* Send Xk to process column Pc[k].
*/
if(LBtree_ptr[lk]!=NULL){
#ifdef _OPENMP
#pragma omp atomic capture
#endif
nleaf_send_tmp = ++nleaf_send[0];
leaf_send[(nleaf_send_tmp-1)*aln_i] = lk;
}
/*
* Perform local block modifications.
*/
// #ifdef _OPENMP
// #pragma omp task firstprivate (Llu,sizelsum,iknsupc,ii,ik,lsub1,x,rtemp,fmod,lsum,stat,nrhs,grid,xsup,recurlevel) private(lptr1,luptr1,nlb1,thread_id1) untied priority(1)
// #endif
{
zlsum_fmod_inv(lsum, x, &x[ii], rtemp, nrhs, ik,
fmod, xsup,
grid, Llu, stat, leaf_send, nleaf_send ,sizelsum,sizertemp,1+recurlevel,maxsuper,thread_id1,num_thread);
}
// } /* if frecv[lk] == 0 */
} /* if iam == p */
} /* if fmod[lk] == 0 */
}
}
}
}else{
#if ( PROFlevel>=1 )
TIC(t1);
#endif
#ifdef _CRAY
CGEMM( ftcs2, ftcs2, &m, &nrhs, &knsupc,
&alpha, &lusup[luptr_tmp], &nsupr, xk,
&knsupc, &beta, rtemp_loc, &m );
#elif defined (USE_VENDOR_BLAS)
zgemm_( "N", "N", &m, &nrhs, &knsupc,
&alpha, &lusup[luptr_tmp], &nsupr, xk,
&knsupc, &beta, rtemp_loc, &m, 1, 1 );
#else
zgemm_( "N", "N", &m, &nrhs, &knsupc,
&alpha, &lusup[luptr_tmp], &nsupr, xk,
&knsupc, &beta, rtemp_loc, &m );
#endif
nbrow=0;
for (lb = 0; lb < nlb; ++lb){
lptr1_tmp = lloc[lb+idx_i];
nbrow += lsub[lptr1_tmp+1];
}
nbrow_ref=0;
for (lb = 0; lb < nlb; ++lb){
lptr1_tmp = lloc[lb+idx_i];
lptr= lptr1_tmp+2;
nbrow1 = lsub[lptr1_tmp+1];
ik = lsub[lptr1_tmp]; /* Global block number, row-wise. */
rel = xsup[ik]; /* Global row index of block ik. */
lk = LBi( ik, grid ); /* Local block number, row-wise. */
iknsupc = SuperSize( ik );
il = LSUM_BLK( lk );
RHS_ITERATE(j)
#ifdef _OPENMP
#pragma omp simd
#endif
for (i = 0; i < nbrow1; ++i) {
irow = lsub[lptr+i] - rel; /* Relative row. */
z_sub(&lsum[il+irow + j*iknsupc+sizelsum*thread_id],
&lsum[il+irow + j*iknsupc+sizelsum*thread_id],
&rtemp_loc[nbrow_ref+i + j*nbrow]);
}
nbrow_ref+=nbrow1;
}
// TOC(t3, t1);
#if ( PROFlevel>=1 )
TOC(t2, t1);
stat[thread_id]->utime[SOL_GEMM] += t2;
#endif
for (lb=0;lb<nlb;lb++){
lk = lloc[lb+idx_n];
#ifdef _OPENMP
#pragma omp atomic capture
#endif
fmod_tmp=--fmod[lk*aln_i];
if ( fmod_tmp==0 ) { /* Local accumulation done. */
lptr1_tmp = lloc[lb+idx_i];
ik = lsub[lptr1_tmp]; /* Global block number, row-wise. */
lk = LBi( ik, grid ); /* Local block number, row-wise. */
iknsupc = SuperSize( ik );
il = LSUM_BLK( lk );
ikcol = PCOL( ik, grid );
p = PNUM( myrow, ikcol, grid );
if ( iam != p ) {
for (ii=1;ii<num_thread;ii++)
#ifdef _OPENMP
#pragma omp simd
#endif
for (jj=0;jj<iknsupc*nrhs;jj++)
z_add(&lsum[il + jj ],
&lsum[il + jj ],
&lsum[il + jj + ii*sizelsum]);
#ifdef _OPENMP
#pragma omp atomic capture
#endif
nleaf_send_tmp = ++nleaf_send[0];
leaf_send[(nleaf_send_tmp-1)*aln_i] = -lk-1;
} else { /* Diagonal process: X[i] += lsum[i]. */
#if ( PROFlevel>=1 )
TIC(t1);
#endif
for (ii=1;ii<num_thread;ii++)
#ifdef _OPENMP
#pragma omp simd
#endif
for (jj=0;jj<iknsupc*nrhs;jj++)
z_add(&lsum[il + jj ],
&lsum[il + jj ],
&lsum[il + jj + ii*sizelsum]);
ii = X_BLK( lk );
RHS_ITERATE(j)
#ifdef _OPENMP
#pragma omp simd
#endif
for (i = 0; i < iknsupc; ++i)
z_add(&x[i + ii + j*iknsupc],
&x[i + ii + j*iknsupc],
&lsum[i + il + j*iknsupc] );
lk = LBj( ik, grid );/* Local block number, column-wise. */
lsub1 = Llu->Lrowind_bc_ptr[lk];
lusup1 = Llu->Lnzval_bc_ptr[lk];
nsupr1 = lsub1[1];
if(Llu->inv == 1){
Linv = Llu->Linv_bc_ptr[lk];
#ifdef _CRAY
CGEMM( ftcs2, ftcs2, &iknsupc, &nrhs, &iknsupc,
&alpha, Linv, &iknsupc, &x[ii],
&iknsupc, &beta, rtemp_loc, &iknsupc );
#elif defined (USE_VENDOR_BLAS)
zgemm_( "N", "N", &iknsupc, &nrhs, &iknsupc,
&alpha, Linv, &iknsupc, &x[ii],
&iknsupc, &beta, rtemp_loc, &iknsupc, 1, 1 );
#else
zgemm_( "N", "N", &iknsupc, &nrhs, &iknsupc,
&alpha, Linv, &iknsupc, &x[ii],
&iknsupc, &beta, rtemp_loc, &iknsupc );
#endif
#ifdef _OPENMP
#pragma omp simd
#endif
for (i=0 ; i<iknsupc*nrhs ; i++){
z_copy(&x[ii+i],&rtemp_loc[i]);
}
}else{
#ifdef _CRAY
CTRSM(ftcs1, ftcs1, ftcs2, ftcs3, &iknsupc, &nrhs, &alpha,
lusup1, &nsupr1, &x[ii], &iknsupc);
#elif defined (USE_VENDOR_BLAS)
ztrsm_("L", "L", "N", "U", &iknsupc, &nrhs, &alpha,
lusup1, &nsupr1, &x[ii], &iknsupc, 1, 1, 1, 1);
#else
ztrsm_("L", "L", "N", "U", &iknsupc, &nrhs, &alpha,
lusup1, &nsupr1, &x[ii], &iknsupc);
#endif
}
// for (i=0 ; i<iknsupc*nrhs ; i++){
// printf("x_lsum: %f %f\n",x[ii+i].r,x[ii+i].i);
// fflush(stdout);
// }
#if ( PROFlevel>=1 )
TOC(t2, t1);
stat[thread_id]->utime[SOL_TRSM] += t2;
#endif
stat[thread_id]->ops[SOLVE] += 4 * iknsupc * (iknsupc - 1) * nrhs
+ 10 * knsupc * nrhs; /* complex division */
#if ( DEBUGlevel>=2 )
printf("(%2d) Solve X[%2d]\n", iam, ik);
#endif
/*
* Send Xk to process column Pc[k].
*/
if(LBtree_ptr[lk]!=NULL){
#ifdef _OPENMP
#pragma omp atomic capture
#endif
nleaf_send_tmp = ++nleaf_send[0];
// printf("nleaf_send_tmp %5d lk %5d\n",nleaf_send_tmp);
leaf_send[(nleaf_send_tmp-1)*aln_i] = lk;
// BcTree_forwardMessageSimple(LBtree_ptr[lk],&x[ii - XK_H],'z');
}
/*
* Perform local block modifications.
*/
// #ifdef _OPENMP
// #pragma omp task firstprivate (Llu,sizelsum,iknsupc,ii,ik,lsub1,x,rtemp,fmod,lsum,stat,nrhs,grid,xsup,recurlevel) private(lptr1,luptr1,nlb1) untied priority(1)
// #endif
{
zlsum_fmod_inv(lsum, x, &x[ii], rtemp, nrhs, ik,
fmod, xsup,
grid, Llu, stat, leaf_send, nleaf_send ,sizelsum,sizertemp,1+recurlevel,maxsuper,thread_id,num_thread);
}
// } /* if frecv[lk] == 0 */
} /* if iam == p */
} /* if fmod[lk] == 0 */
}
// }
}
stat[thread_id]->ops[SOLVE] += 8 * m * nrhs * knsupc;
} /* if nlb>0*/
} /* zLSUM_FMOD_INV */
/************************************************************************/
/*! \brief
*
* <pre>
* Purpose
* =======
* Perform local block modifications: lsum[i] -= L_i,k * X[k].
* </pre>
*/
void zlsum_fmod_inv_master
/************************************************************************/
(
doublecomplex *lsum, /* Sum of local modifications. */
doublecomplex *x, /* X array (local) */
doublecomplex *xk, /* X[k]. */
doublecomplex *rtemp, /* Result of full matrix-vector multiply. */
int nrhs, /* Number of right-hand sides. */
int knsupc, /* Size of supernode k. */
int_t k, /* The k-th component of X. */
int_t *fmod, /* Modification count for L-solve. */
int_t nlb, /* Number of L blocks. */
int_t *xsup,
gridinfo_t *grid,
LocalLU_t *Llu,
SuperLUStat_t **stat,
int_t sizelsum,
int_t sizertemp,
int_t recurlevel,
int_t maxsuper,
int thread_id,
int num_thread
)
{
doublecomplex alpha = {1.0, 0.0}, beta = {0.0, 0.0},malpha={-1.0, 0.0};
doublecomplex *lusup, *lusup1;
doublecomplex *dest;
doublecomplex *Linv;/* Inverse of diagonal block */
int iam, iknsupc, myrow, krow, nbrow, nbrow1, nbrow_ref, nsupr, nsupr1, p, pi, idx_r;
int_t i, ii,jj, ik, il, ikcol, irow, j, lb, lk, rel, lib,lready;
int_t *lsub, *lsub1, nlb1, lptr1, luptr1,*lloc;
int_t *ilsum = Llu->ilsum; /* Starting position of each supernode in lsum. */
int_t *frecv = Llu->frecv;
int_t **fsendx_plist = Llu->fsendx_plist;
int_t luptr_tmp,luptr_tmp1,lptr1_tmp,maxrecvsz, idx_i, idx_v,idx_n, idx_l, fmod_tmp, lbstart,lbend,nn,Nchunk,nlb_loc,remainder;
int thread_id1;
int m;
flops_t ops_loc=0.0;
MPI_Status status;
int test_flag;
yes_no_t done;
BcTree *LBtree_ptr = Llu->LBtree_ptr;
RdTree *LRtree_ptr = Llu->LRtree_ptr;
int_t* idx_lsum,idx_lsum1;
doublecomplex *rtemp_loc;
int_t ldalsum;
int_t nleaf_send_tmp;
int_t lptr; /* Starting position in lsub[*]. */
int_t luptr; /* Starting position in lusup[*]. */
int_t iword = sizeof(int_t);
int_t dword = sizeof (double);
int_t aln_d,aln_i;
aln_d = ceil(CACHELINE/(double)dword);
aln_i = ceil(CACHELINE/(double)iword);
ldalsum=Llu->ldalsum;
rtemp_loc = &rtemp[sizertemp* thread_id];
// #if ( PROFlevel>=1 )
double t1, t2, t3, t4;
float msg_vol = 0, msg_cnt = 0;
// #endif
if(nlb>0){
iam = grid->iam;
myrow = MYROW( iam, grid );
lk = LBj( k, grid ); /* Local block number, column-wise. */
// printf("ya1 %5d k %5d lk %5d\n",thread_id,k,lk);
// fflush(stdout);
lsub = Llu->Lrowind_bc_ptr[lk];
// printf("ya2 %5d k %5d lk %5d\n",thread_id,k,lk);
// fflush(stdout);
lusup = Llu->Lnzval_bc_ptr[lk];
lloc = Llu->Lindval_loc_bc_ptr[lk];
// idx_lsum = Llu->Lrowind_bc_2_lsum[lk];
nsupr = lsub[1];
// printf("nlb: %5d lk: %5d\n",nlb,lk);
// fflush(stdout);
krow = PROW( k, grid );
if(myrow==krow){
idx_n = 1;
idx_i = nlb+2;
idx_v = 2*nlb+3;
luptr_tmp = lloc[idx_v];
m = nsupr-knsupc;
}else{
idx_n = 0;
idx_i = nlb;
idx_v = 2*nlb;
luptr_tmp = lloc[idx_v];
m = nsupr;
}
assert(m>0);
if(m>4*maxsuper || nrhs>10){
// if(m<1){
// TIC(t1);
Nchunk=num_thread;
nlb_loc = floor(((double)nlb)/Nchunk);
remainder = nlb % Nchunk;
#ifdef _OPENMP
#pragma omp taskloop private (lptr1,luptr1,nlb1,thread_id1,lsub1,lusup1,nsupr1,Linv,nn,lbstart,lbend,luptr_tmp1,nbrow,lb,lptr1_tmp,rtemp_loc,nbrow_ref,lptr,nbrow1,ik,rel,lk,iknsupc,il,i,irow,fmod_tmp,ikcol,p,ii,jj,t1,t2,j) untied
#endif
for (nn=0;nn<Nchunk;++nn){
#ifdef _OPENMP
thread_id1 = omp_get_thread_num ();
#else
thread_id1 = 0;
#endif
rtemp_loc = &rtemp[sizertemp* thread_id1];
if(nn<remainder){
lbstart = nn*(nlb_loc+1);
lbend = (nn+1)*(nlb_loc+1);
}else{
lbstart = remainder+nn*nlb_loc;
lbend = remainder + (nn+1)*nlb_loc;
}
if(lbstart<lbend){
#if ( PROFlevel>=1 )
TIC(t1);
#endif
luptr_tmp1 = lloc[lbstart+idx_v];
nbrow=0;
for (lb = lbstart; lb < lbend; ++lb){
lptr1_tmp = lloc[lb+idx_i];
nbrow += lsub[lptr1_tmp+1];
}
#ifdef _CRAY
CGEMM( ftcs2, ftcs2, &nbrow, &nrhs, &knsupc,
&alpha, &lusup[luptr_tmp1], &nsupr, xk,
&knsupc, &beta, rtemp_loc, &nbrow );
#elif defined (USE_VENDOR_BLAS)
zgemm_( "N", "N", &nbrow, &nrhs, &knsupc,
&alpha, &lusup[luptr_tmp1], &nsupr, xk,
&knsupc, &beta, rtemp_loc, &nbrow, 1, 1 );
#else
zgemm_( "N", "N", &nbrow, &nrhs, &knsupc,
&alpha, &lusup[luptr_tmp1], &nsupr, xk,
&knsupc, &beta, rtemp_loc, &nbrow );
#endif
nbrow_ref=0;
for (lb = lbstart; lb < lbend; ++lb){
lptr1_tmp = lloc[lb+idx_i];
lptr= lptr1_tmp+2;
nbrow1 = lsub[lptr1_tmp+1];
ik = lsub[lptr1_tmp]; /* Global block number, row-wise. */
rel = xsup[ik]; /* Global row index of block ik. */
lk = LBi( ik, grid ); /* Local block number, row-wise. */
iknsupc = SuperSize( ik );
il = LSUM_BLK( lk );
RHS_ITERATE(j)
#ifdef _OPENMP
#pragma omp simd lastprivate(irow)
#endif
for (i = 0; i < nbrow1; ++i) {
irow = lsub[lptr+i] - rel; /* Relative row. */
z_sub(&lsum[il+irow + j*iknsupc],
&lsum[il+irow + j*iknsupc],
&rtemp_loc[nbrow_ref+i + j*nbrow]);
}
nbrow_ref+=nbrow1;
}
#if ( PROFlevel>=1 )
TOC(t2, t1);
stat[thread_id1]->utime[SOL_GEMM] += t2;
#endif
}
}
}else{
#if ( PROFlevel>=1 )
TIC(t1);
#endif
#ifdef _CRAY
CGEMM( ftcs2, ftcs2, &m, &nrhs, &knsupc,
&alpha, &lusup[luptr_tmp], &nsupr, xk,
&knsupc, &beta, rtemp_loc, &m );
#elif defined (USE_VENDOR_BLAS)
zgemm_( "N", "N", &m, &nrhs, &knsupc,
&alpha, &lusup[luptr_tmp], &nsupr, xk,
&knsupc, &beta, rtemp_loc, &m, 1, 1 );
#else
zgemm_( "N", "N", &m, &nrhs, &knsupc,
&alpha, &lusup[luptr_tmp], &nsupr, xk,
&knsupc, &beta, rtemp_loc, &m );
#endif
nbrow=0;
for (lb = 0; lb < nlb; ++lb){
lptr1_tmp = lloc[lb+idx_i];
nbrow += lsub[lptr1_tmp+1];
}
nbrow_ref=0;
for (lb = 0; lb < nlb; ++lb){
lptr1_tmp = lloc[lb+idx_i];
lptr= lptr1_tmp+2;
nbrow1 = lsub[lptr1_tmp+1];
ik = lsub[lptr1_tmp]; /* Global block number, row-wise. */
rel = xsup[ik]; /* Global row index of block ik. */
lk = LBi( ik, grid ); /* Local block number, row-wise. */
iknsupc = SuperSize( ik );
il = LSUM_BLK( lk );
RHS_ITERATE(j)
#ifdef _OPENMP
#pragma omp simd lastprivate(irow)
#endif
for (i = 0; i < nbrow1; ++i) {
irow = lsub[lptr+i] - rel; /* Relative row. */
z_sub(&lsum[il+irow + j*iknsupc+sizelsum*thread_id],
&lsum[il+irow + j*iknsupc+sizelsum*thread_id],
&rtemp_loc[nbrow_ref+i + j*nbrow]);
}
nbrow_ref+=nbrow1;
}
#if ( PROFlevel>=1 )
TOC(t2, t1);
stat[thread_id]->utime[SOL_GEMM] += t2;
#endif
}
// TOC(t3, t1);
rtemp_loc = &rtemp[sizertemp* thread_id];
for (lb=0;lb<nlb;lb++){
lk = lloc[lb+idx_n];
// #ifdef _OPENMP
// #pragma omp atomic capture
// #endif
fmod_tmp=--fmod[lk*aln_i];
if ( fmod_tmp==0 ) { /* Local accumulation done. */
// --fmod[lk];
lptr1_tmp = lloc[lb+idx_i];
// luptr_tmp = lloc[lb+idx_v];
ik = lsub[lptr1_tmp]; /* Global block number, row-wise. */
lk = LBi( ik, grid ); /* Local block number, row-wise. */
iknsupc = SuperSize( ik );
il = LSUM_BLK( lk );
// nbrow = lsub[lptr1_tmp+1];
ikcol = PCOL( ik, grid );
p = PNUM( myrow, ikcol, grid );
if ( iam != p ) {
// if(frecv[lk]==0){
// fmod[lk] = -1;
for (ii=1;ii<num_thread;ii++)
// if(ii!=thread_id)
#ifdef _OPENMP
#pragma omp simd
#endif
for (jj=0;jj<iknsupc*nrhs;jj++)
z_add(&lsum[il + jj ],
&lsum[il + jj ],
&lsum[il + jj + ii*sizelsum]);
RdTree_forwardMessageSimple(LRtree_ptr[lk],&lsum[il - LSUM_H ],RdTree_GetMsgSize(LRtree_ptr[lk],'z')*nrhs+LSUM_H,'z');
// }
} else { /* Diagonal process: X[i] += lsum[i]. */
// if ( frecv[lk]==0 ) { /* Becomes a leaf node. */
#if ( PROFlevel>=1 )
TIC(t1);
#endif
for (ii=1;ii<num_thread;ii++)
// if(ii!=thread_id)
#ifdef _OPENMP
#pragma omp simd
#endif
for (jj=0;jj<iknsupc*nrhs;jj++)
z_add(&lsum[il + jj ],
&lsum[il + jj ],
&lsum[il + jj + ii*sizelsum]);
ii = X_BLK( lk );
// for (jj=0;jj<num_thread;jj++)
RHS_ITERATE(j)
#ifdef _OPENMP
#pragma omp simd
#endif
for (i = 0; i < iknsupc; ++i)
z_add(&x[i + ii + j*iknsupc],
&x[i + ii + j*iknsupc],
&lsum[i + il + j*iknsupc] );
// fmod[lk] = -1; /* Do not solve X[k] in the future. */
lk = LBj( ik, grid );/* Local block number, column-wise. */
lsub1 = Llu->Lrowind_bc_ptr[lk];
lusup1 = Llu->Lnzval_bc_ptr[lk];
nsupr1 = lsub1[1];
if(Llu->inv == 1){
Linv = Llu->Linv_bc_ptr[lk];
#ifdef _CRAY
CGEMM( ftcs2, ftcs2, &iknsupc, &nrhs, &iknsupc,
&alpha, Linv, &iknsupc, &x[ii],
&iknsupc, &beta, rtemp_loc, &iknsupc );
#elif defined (USE_VENDOR_BLAS)
zgemm_( "N", "N", &iknsupc, &nrhs, &iknsupc,
&alpha, Linv, &iknsupc, &x[ii],
&iknsupc, &beta, rtemp_loc, &iknsupc, 1, 1 );
#else
zgemm_( "N", "N", &iknsupc, &nrhs, &iknsupc,
&alpha, Linv, &iknsupc, &x[ii],
&iknsupc, &beta, rtemp_loc, &iknsupc );
#endif
#ifdef _OPENMP
#pragma omp simd
#endif
for (i=0 ; i<iknsupc*nrhs ; i++){
z_copy(&x[ii+i],&rtemp_loc[i]);
}
}else{
#ifdef _CRAY
CTRSM(ftcs1, ftcs1, ftcs2, ftcs3, &iknsupc, &nrhs, &alpha,
lusup1, &nsupr1, &x[ii], &iknsupc);
#elif defined (USE_VENDOR_BLAS)
ztrsm_("L", "L", "N", "U", &iknsupc, &nrhs, &alpha,
lusup1, &nsupr1, &x[ii], &iknsupc, 1, 1, 1, 1);
#else
ztrsm_("L", "L", "N", "U", &iknsupc, &nrhs, &alpha,
lusup1, &nsupr1, &x[ii], &iknsupc);
#endif
}
// for (i=0 ; i<iknsupc*nrhs ; i++){
// printf("x_usum: %f %f\n",x[ii+i].r,x[ii+i].i);
// fflush(stdout);
// }
#if ( PROFlevel>=1 )
TOC(t2, t1);
stat[thread_id]->utime[SOL_TRSM] += t2;
#endif
stat[thread_id]->ops[SOLVE] += 4 * iknsupc * (iknsupc - 1) * nrhs
+ 10 * knsupc * nrhs; /* complex division */
#if ( DEBUGlevel>=2 )
printf("(%2d) Solve X[%2d]\n", iam, ik);
#endif
/*
* Send Xk to process column Pc[k].
*/
if(LBtree_ptr[lk]!=NULL)
BcTree_forwardMessageSimple(LBtree_ptr[lk],&x[ii - XK_H],BcTree_GetMsgSize(LBtree_ptr[lk],'z')*nrhs+XK_H,'z');
/*
* Perform local block modifications.
*/
// #ifdef _OPENMP
// #pragma omp task firstprivate (Llu,sizelsum,iknsupc,ii,ik,lsub1,x,rtemp,fmod,lsum,stat,nrhs,grid,xsup,recurlevel) private(lptr1,luptr1,nlb1,thread_id1) untied priority(1)
// #endif
{
nlb1 = lsub1[0] - 1;
zlsum_fmod_inv_master(lsum, x, &x[ii], rtemp, nrhs, iknsupc, ik,
fmod, nlb1, xsup,
grid, Llu, stat,sizelsum,sizertemp,1+recurlevel,maxsuper,thread_id,num_thread);
}
// } /* if frecv[lk] == 0 */
} /* if iam == p */
} /* if fmod[lk] == 0 */
}
// }
stat[thread_id]->ops[SOLVE] += 8 * m * nrhs * knsupc;
} /* if nlb>0*/
} /* zLSUM_FMOD_INV */
/************************************************************************/
void zlsum_bmod_inv
/************************************************************************/
(
doublecomplex *lsum, /* Sum of local modifications. */
doublecomplex *x, /* X array (local). */
doublecomplex *xk, /* X[k]. */
doublecomplex *rtemp, /* Result of full matrix-vector multiply. */
int nrhs, /* Number of right-hand sides. */
int_t k, /* The k-th component of X. */
int_t *bmod, /* Modification count for L-solve. */
int_t *Urbs, /* Number of row blocks in each block column of U.*/
Ucb_indptr_t **Ucb_indptr,/* Vertical linked list pointing to Uindex[].*/
int_t **Ucb_valptr, /* Vertical linked list pointing to Unzval[]. */
int_t *xsup,
gridinfo_t *grid,
LocalLU_t *Llu,
SuperLUStat_t **stat,
int_t* root_send,
int_t* nroot_send,
int_t sizelsum,
int_t sizertemp,
int thread_id,
int num_thread
)
{
/*
* Purpose
* =======
* Perform local block modifications: lsum[i] -= U_i,k * X[k].
*/
doublecomplex alpha = {1.0, 0.0}, beta = {0.0, 0.0};
int iam, iknsupc, knsupc, myrow, nsupr, p, pi;
int_t fnz, gik, gikcol, i, ii, ik, ikfrow, iklrow, il, irow,
j, jj, lk, lk1, nub, ub, uptr;
int_t *usub;
doublecomplex *uval, *dest, *y;
int_t *lsub;
doublecomplex *lusup;
int_t *ilsum = Llu->ilsum; /* Starting position of each supernode in lsum. */
int_t *brecv = Llu->brecv;
int_t **bsendx_plist = Llu->bsendx_plist;
BcTree *UBtree_ptr = Llu->UBtree_ptr;
RdTree *URtree_ptr = Llu->URtree_ptr;
MPI_Status status;
int test_flag;
int_t bmod_tmp;
int thread_id1;
doublecomplex *rtemp_loc;
int_t nroot_send_tmp;
doublecomplex *Uinv;/* Inverse of diagonal block */
doublecomplex temp;
double t1, t2;
float msg_vol = 0, msg_cnt = 0;
int_t Nchunk, nub_loc,remainder,nn,lbstart,lbend;
int_t iword = sizeof(int_t);
int_t dword = sizeof (double);
int_t aln_d,aln_i;
aln_d = ceil(CACHELINE/(double)dword);
aln_i = ceil(CACHELINE/(double)iword);
iam = grid->iam;
myrow = MYROW( iam, grid );
knsupc = SuperSize( k );
lk = LBj( k, grid ); /* Local block number, column-wise. */
nub = Urbs[lk]; /* Number of U blocks in block column lk */
if(Llu->Unnz[lk]>knsupc*64 || nub>16){
// if(nub>num_thread){
// if(nub>16){
// // // // if(Urbs2[lk]>num_thread){
// if(Urbs2[lk]>0){
Nchunk=SUPERLU_MIN(num_thread,nub);
nub_loc = floor(((double)nub)/Nchunk);
remainder = nub % Nchunk;
// printf("Unnz: %5d nub: %5d knsupc: %5d\n",Llu->Unnz[lk],nub,knsupc);
#ifdef _OPENMP
#pragma omp taskloop firstprivate (stat) private (thread_id1,Uinv,nn,lbstart,lbend,ub,temp,rtemp_loc,ik,lk1,gik,gikcol,usub,uval,lsub,lusup,iknsupc,il,i,irow,bmod_tmp,p,ii,jj,t1,t2,j,ikfrow,iklrow,dest,y,uptr,fnz,nsupr) untied nogroup
#endif
for (nn=0;nn<Nchunk;++nn){
#ifdef _OPENMP
thread_id1 = omp_get_thread_num ();
#else
thread_id1 = 0;
#endif
rtemp_loc = &rtemp[sizertemp* thread_id1];
if(nn<remainder){
lbstart = nn*(nub_loc+1);
lbend = (nn+1)*(nub_loc+1);
}else{
lbstart = remainder+nn*nub_loc;
lbend = remainder + (nn+1)*nub_loc;
}
for (ub = lbstart; ub < lbend; ++ub){
ik = Ucb_indptr[lk][ub].lbnum; /* Local block number, row-wise. */
usub = Llu->Ufstnz_br_ptr[ik];
uval = Llu->Unzval_br_ptr[ik];
i = Ucb_indptr[lk][ub].indpos; /* Start of the block in usub[]. */
i += UB_DESCRIPTOR;
il = LSUM_BLK( ik );
gik = ik * grid->nprow + myrow;/* Global block number, row-wise. */
iknsupc = SuperSize( gik );
ikfrow = FstBlockC( gik );
iklrow = FstBlockC( gik+1 );
#if ( PROFlevel>=1 )
TIC(t1);
#endif
RHS_ITERATE(j) {
dest = &lsum[il + j*iknsupc+sizelsum*thread_id1];
y = &xk[j*knsupc];
uptr = Ucb_valptr[lk][ub]; /* Start of the block in uval[]. */
for (jj = 0; jj < knsupc; ++jj) {
fnz = usub[i + jj];
if ( fnz < iklrow ) { /* Nonzero segment. */
/* AXPY */
#ifdef _OPENMP
#pragma omp simd
#endif
for (irow = fnz; irow < iklrow; ++irow)
{
zz_mult(&temp, &uval[uptr], &y[jj]);
z_sub(&dest[irow - ikfrow], &dest[irow - ikfrow],
&temp);
++uptr;
}
stat[thread_id1]->ops[SOLVE] += 8 * (iklrow - fnz);
}
} /* for jj ... */
}
#if ( PROFlevel>=1 )
TOC(t2, t1);
stat[thread_id1]->utime[SOL_GEMM] += t2;
#endif
#ifdef _OPENMP
#pragma omp atomic capture
#endif
bmod_tmp=--bmod[ik*aln_i];
if ( bmod_tmp == 0 ) { /* Local accumulation done. */
gikcol = PCOL( gik, grid );
p = PNUM( myrow, gikcol, grid );
if ( iam != p ) {
for (ii=1;ii<num_thread;ii++)
// if(ii!=thread_id1)
#ifdef _OPENMP
#pragma omp simd
#endif
for (jj=0;jj<iknsupc*nrhs;jj++)
z_add(&lsum[il + jj ],
&lsum[il + jj ],
&lsum[il + jj + ii*sizelsum]);
#ifdef _OPENMP
#pragma omp atomic capture
#endif
nroot_send_tmp = ++nroot_send[0];
root_send[(nroot_send_tmp-1)*aln_i] = -ik-1;
// RdTree_forwardMessageSimple(URtree_ptr[ik],&lsum[il - LSUM_H ],'z');
#if ( DEBUGlevel>=2 )
printf("(%2d) Sent LSUM[%2.0f], size %2d, to P %2d\n",
iam, lsum[il-LSUM_H], iknsupc*nrhs+LSUM_H, p);
#endif
} else { /* Diagonal process: X[i] += lsum[i]. */
#if ( PROFlevel>=1 )
TIC(t1);
#endif
for (ii=1;ii<num_thread;ii++)
// if(ii!=thread_id1)
#ifdef _OPENMP
#pragma omp simd
#endif
for (jj=0;jj<iknsupc*nrhs;jj++)
z_add(&lsum[il + jj ],
&lsum[il + jj ],
&lsum[il + jj + ii*sizelsum]);
ii = X_BLK( ik );
dest = &x[ii];
RHS_ITERATE(j)
#ifdef _OPENMP
#pragma omp simd
#endif
for (i = 0; i < iknsupc; ++i)
z_add(&dest[i + j*iknsupc],
&dest[i + j*iknsupc],
&lsum[i + il + j*iknsupc]);
// if ( !brecv[ik] ) { /* Becomes a leaf node. */
// bmod[ik] = -1; /* Do not solve X[k] in the future. */
lk1 = LBj( gik, grid ); /* Local block number. */
lsub = Llu->Lrowind_bc_ptr[lk1];
lusup = Llu->Lnzval_bc_ptr[lk1];
nsupr = lsub[1];
if(Llu->inv == 1){
Uinv = Llu->Uinv_bc_ptr[lk1];
#ifdef _CRAY
CGEMM( ftcs2, ftcs2, &iknsupc, &nrhs, &iknsupc,
&alpha, Uinv, &iknsupc, &x[ii],
&iknsupc, &beta, rtemp_loc, &iknsupc );
#elif defined (USE_VENDOR_BLAS)
zgemm_( "N", "N", &iknsupc, &nrhs, &iknsupc,
&alpha, Uinv, &iknsupc, &x[ii],
&iknsupc, &beta, rtemp_loc, &iknsupc, 1, 1 );
#else
zgemm_( "N", "N", &iknsupc, &nrhs, &iknsupc,
&alpha, Uinv, &iknsupc, &x[ii],
&iknsupc, &beta, rtemp_loc, &iknsupc );
#endif
#ifdef _OPENMP
#pragma omp simd
#endif
for (i=0 ; i<iknsupc*nrhs ; i++){
z_copy(&x[ii+i],&rtemp_loc[i]);
}
}else{
#ifdef _CRAY
CTRSM(ftcs1, ftcs3, ftcs2, ftcs2, &iknsupc, &nrhs, &alpha,
lusup, &nsupr, &x[ii], &iknsupc);
#elif defined (USE_VENDOR_BLAS)
ztrsm_("L", "U", "N", "N", &iknsupc, &nrhs, &alpha,
lusup, &nsupr, &x[ii], &iknsupc, 1, 1, 1, 1);
#else
ztrsm_("L", "U", "N", "N", &iknsupc, &nrhs, &alpha,
lusup, &nsupr, &x[ii], &iknsupc);
#endif
}
// for (i=0 ; i<iknsupc*nrhs ; i++){
// printf("x_usum: %f %f\n",x[ii+i].r,x[ii+i].i);
// fflush(stdout);
// }
#if ( PROFlevel>=1 )
TOC(t2, t1);
stat[thread_id1]->utime[SOL_TRSM] += t2;
#endif
stat[thread_id1]->ops[SOLVE] += 4 * iknsupc * (iknsupc + 1) * nrhs
+ 10 * knsupc * nrhs; /* complex division */
#if ( DEBUGlevel>=2 )
printf("(%2d) Solve X[%2d]\n", iam, gik);
#endif
/*
* Send Xk to process column Pc[k].
*/
// for (i=0 ; i<iknsupc*nrhs ; i++){
// printf("xre: %f\n",x[ii+i]);
// fflush(stdout);
// }
if(UBtree_ptr[lk1]!=NULL){
#ifdef _OPENMP
#pragma omp atomic capture
#endif
nroot_send_tmp = ++nroot_send[0];
root_send[(nroot_send_tmp-1)*aln_i] = lk1;
// BcTree_forwardMessageSimple(UBtree_ptr[lk1],&x[ii - XK_H],'z');
}
/*
* Perform local block modifications.
*/
if ( Urbs[lk1] ){
// #ifdef _OPENMP
// #pragma omp task firstprivate (Ucb_indptr,Ucb_valptr,Llu,sizelsum,ii,gik,x,rtemp,bmod,Urbs,lsum,stat,nrhs,grid,xsup) untied
// #endif
{
zlsum_bmod_inv(lsum, x, &x[ii], rtemp, nrhs, gik, bmod, Urbs,
Ucb_indptr, Ucb_valptr, xsup, grid, Llu,
stat, root_send, nroot_send, sizelsum,sizertemp,thread_id1,num_thread);
}
}
// } /* if brecv[ik] == 0 */
}
} /* if bmod[ik] == 0 */
}
}
} else {
rtemp_loc = &rtemp[sizertemp* thread_id];
for (ub = 0; ub < nub; ++ub) {
ik = Ucb_indptr[lk][ub].lbnum; /* Local block number, row-wise. */
usub = Llu->Ufstnz_br_ptr[ik];
uval = Llu->Unzval_br_ptr[ik];
i = Ucb_indptr[lk][ub].indpos; /* Start of the block in usub[]. */
i += UB_DESCRIPTOR;
il = LSUM_BLK( ik );
gik = ik * grid->nprow + myrow;/* Global block number, row-wise. */
iknsupc = SuperSize( gik );
ikfrow = FstBlockC( gik );
iklrow = FstBlockC( gik+1 );
#if ( PROFlevel>=1 )
TIC(t1);
#endif
RHS_ITERATE(j) {
dest = &lsum[il + j*iknsupc+sizelsum*thread_id];
y = &xk[j*knsupc];
uptr = Ucb_valptr[lk][ub]; /* Start of the block in uval[]. */
for (jj = 0; jj < knsupc; ++jj) {
fnz = usub[i + jj];
if ( fnz < iklrow ) { /* Nonzero segment. */
/* AXPY */
#ifdef _OPENMP
#pragma omp simd
#endif
for (irow = fnz; irow < iklrow; ++irow)
{
zz_mult(&temp, &uval[uptr], &y[jj]);
z_sub(&dest[irow - ikfrow], &dest[irow - ikfrow],
&temp);
++uptr;
}
stat[thread_id]->ops[SOLVE] += 8 * (iklrow - fnz);
}
} /* for jj ... */
}
#if ( PROFlevel>=1 )
TOC(t2, t1);
stat[thread_id]->utime[SOL_GEMM] += t2;
#endif
#ifdef _OPENMP
#pragma omp atomic capture
#endif
bmod_tmp=--bmod[ik*aln_i];
if ( bmod_tmp == 0 ) { /* Local accumulation done. */
gikcol = PCOL( gik, grid );
p = PNUM( myrow, gikcol, grid );
if ( iam != p ) {
for (ii=1;ii<num_thread;ii++)
// if(ii!=thread_id)
#ifdef _OPENMP
#pragma omp simd
#endif
for (jj=0;jj<iknsupc*nrhs;jj++)
z_add(&lsum[il + jj ],
&lsum[il + jj ],
&lsum[il + jj + ii*sizelsum]);
#ifdef _OPENMP
#pragma omp atomic capture
#endif
nroot_send_tmp = ++nroot_send[0];
root_send[(nroot_send_tmp-1)*aln_i] = -ik-1;
// RdTree_forwardMessageSimple(URtree_ptr[ik],&lsum[il - LSUM_H ],'z');
#if ( DEBUGlevel>=2 )
printf("(%2d) Sent LSUM[%2.0f], size %2d, to P %2d\n",
iam, lsum[il-LSUM_H], iknsupc*nrhs+LSUM_H, p);
#endif
} else { /* Diagonal process: X[i] += lsum[i]. */
#if ( PROFlevel>=1 )
TIC(t1);
#endif
for (ii=1;ii<num_thread;ii++)
// if(ii!=thread_id)
#ifdef _OPENMP
#pragma omp simd
#endif
for (jj=0;jj<iknsupc*nrhs;jj++)
z_add(&lsum[il + jj ],
&lsum[il + jj ],
&lsum[il + jj + ii*sizelsum]);
ii = X_BLK( ik );
dest = &x[ii];
RHS_ITERATE(j)
#ifdef _OPENMP
#pragma omp simd
#endif
for (i = 0; i < iknsupc; ++i)
z_add(&dest[i + j*iknsupc],
&dest[i + j*iknsupc],
&lsum[i + il + j*iknsupc]);
// if ( !brecv[ik] ) { /* Becomes a leaf node. */
// bmod[ik] = -1; /* Do not solve X[k] in the future. */
lk1 = LBj( gik, grid ); /* Local block number. */
lsub = Llu->Lrowind_bc_ptr[lk1];
lusup = Llu->Lnzval_bc_ptr[lk1];
nsupr = lsub[1];
if(Llu->inv == 1){
Uinv = Llu->Uinv_bc_ptr[lk1];
#ifdef _CRAY
CGEMM( ftcs2, ftcs2, &iknsupc, &nrhs, &iknsupc,
&alpha, Uinv, &iknsupc, &x[ii],
&iknsupc, &beta, rtemp_loc, &iknsupc );
#elif defined (USE_VENDOR_BLAS)
zgemm_( "N", "N", &iknsupc, &nrhs, &iknsupc,
&alpha, Uinv, &iknsupc, &x[ii],
&iknsupc, &beta, rtemp_loc, &iknsupc, 1, 1 );
#else
zgemm_( "N", "N", &iknsupc, &nrhs, &iknsupc,
&alpha, Uinv, &iknsupc, &x[ii],
&iknsupc, &beta, rtemp_loc, &iknsupc );
#endif
#ifdef _OPENMP
#pragma omp simd
#endif
for (i=0 ; i<iknsupc*nrhs ; i++){
z_copy(&x[ii+i],&rtemp_loc[i]);
}
}else{
#ifdef _CRAY
CTRSM(ftcs1, ftcs3, ftcs2, ftcs2, &iknsupc, &nrhs, &alpha,
lusup, &nsupr, &x[ii], &iknsupc);
#elif defined (USE_VENDOR_BLAS)
ztrsm_("L", "U", "N", "N", &iknsupc, &nrhs, &alpha,
lusup, &nsupr, &x[ii], &iknsupc, 1, 1, 1, 1);
#else
ztrsm_("L", "U", "N", "N", &iknsupc, &nrhs, &alpha,
lusup, &nsupr, &x[ii], &iknsupc);
#endif
}
#if ( PROFlevel>=1 )
TOC(t2, t1);
stat[thread_id]->utime[SOL_TRSM] += t2;
#endif
stat[thread_id]->ops[SOLVE] += 4 * iknsupc * (iknsupc + 1) * nrhs
+ 10 * knsupc * nrhs; /* complex division */
#if ( DEBUGlevel>=2 )
printf("(%2d) Solve X[%2d]\n", iam, gik);
#endif
/*
* Send Xk to process column Pc[k].
*/
// for (i=0 ; i<iknsupc*nrhs ; i++){
// printf("xre: %f\n",x[ii+i]);
// fflush(stdout);
// }
if(UBtree_ptr[lk1]!=NULL){
#ifdef _OPENMP
#pragma omp atomic capture
#endif
nroot_send_tmp = ++nroot_send[0];
root_send[(nroot_send_tmp-1)*aln_i] = lk1;
// BcTree_forwardMessageSimple(UBtree_ptr[lk1],&x[ii - XK_H],'z');
}
/*
* Perform local block modifications.
*/
if ( Urbs[lk1] )
// if(Urbs[lk1]>16){
// #ifdef _OPENMP
// #pragma omp task firstprivate (Ucb_indptr,Ucb_valptr,Llu,sizelsum,ii,gik,x,rtemp,bmod,Urbs,lsum,stat,nrhs,grid,xsup) untied
// #endif
// zlsum_bmod_inv(lsum, x, &x[ii], rtemp, nrhs, gik, bmod, Urbs,
// Ucb_indptr, Ucb_valptr, xsup, grid, Llu,
// stat, root_send, nroot_send, sizelsum,sizertemp);
//}else{
zlsum_bmod_inv(lsum, x, &x[ii], rtemp, nrhs, gik, bmod, Urbs,
Ucb_indptr, Ucb_valptr, xsup, grid, Llu,
stat, root_send, nroot_send, sizelsum,sizertemp,thread_id,num_thread);
//}
// } /* if brecv[ik] == 0 */
}
} /* if bmod[ik] == 0 */
} /* for ub ... */
}
} /* zlSUM_BMOD_inv */
/************************************************************************/
void zlsum_bmod_inv_master
/************************************************************************/
(
doublecomplex *lsum, /* Sum of local modifications. */
doublecomplex *x, /* X array (local). */
doublecomplex *xk, /* X[k]. */
doublecomplex *rtemp, /* Result of full matrix-vector multiply. */
int nrhs, /* Number of right-hand sides. */
int_t k, /* The k-th component of X. */
int_t *bmod, /* Modification count for L-solve. */
int_t *Urbs, /* Number of row blocks in each block column of U.*/
Ucb_indptr_t **Ucb_indptr,/* Vertical linked list pointing to Uindex[].*/
int_t **Ucb_valptr, /* Vertical linked list pointing to Unzval[]. */
int_t *xsup,
gridinfo_t *grid,
LocalLU_t *Llu,
SuperLUStat_t **stat,
int_t sizelsum,
int_t sizertemp,
int thread_id,
int num_thread
)
{
/*
* Purpose
* =======
* Perform local block modifications: lsum[i] -= U_i,k * X[k].
*/
doublecomplex alpha = {1.0, 0.0}, beta = {0.0, 0.0};
int iam, iknsupc, knsupc, myrow, nsupr, p, pi;
int_t fnz, gik, gikcol, i, ii, ik, ikfrow, iklrow, il, irow,
j, jj, lk, lk1, nub, ub, uptr;
int_t *usub;
doublecomplex *uval, *dest, *y;
int_t *lsub;
doublecomplex *lusup;
int_t *ilsum = Llu->ilsum; /* Starting position of each supernode in lsum. */
int_t *brecv = Llu->brecv;
int_t **bsendx_plist = Llu->bsendx_plist;
BcTree *UBtree_ptr = Llu->UBtree_ptr;
RdTree *URtree_ptr = Llu->URtree_ptr;
MPI_Status status;
int test_flag;
int_t bmod_tmp;
int thread_id1;
doublecomplex *rtemp_loc;
doublecomplex temp;
doublecomplex *Uinv;/* Inverse of diagonal block */
double t1, t2;
float msg_vol = 0, msg_cnt = 0;
int_t Nchunk, nub_loc,remainder,nn,lbstart,lbend;
int_t iword = sizeof(int_t);
int_t dword = sizeof (double);
int_t aln_d,aln_i;
aln_d = ceil(CACHELINE/(double)dword);
aln_i = ceil(CACHELINE/(double)iword);
rtemp_loc = &rtemp[sizertemp* thread_id];
iam = grid->iam;
myrow = MYROW( iam, grid );
knsupc = SuperSize( k );
lk = LBj( k, grid ); /* Local block number, column-wise. */
nub = Urbs[lk]; /* Number of U blocks in block column lk */
// printf("Urbs2[lk] %5d lk %5d nub %5d\n",Urbs2[lk],lk,nub);
// fflush(stdout);
if(nub>num_thread){
// if(nub>0){
Nchunk=num_thread;
nub_loc = floor(((double)nub)/Nchunk);
remainder = nub % Nchunk;
//#ifdef _OPENMP
//#pragma omp taskloop firstprivate (stat) private (thread_id1,nn,lbstart,lbend,ub,temp,rtemp_loc,ik,gik,usub,uval,iknsupc,il,i,irow,jj,t1,t2,j,ikfrow,iklrow,dest,y,uptr,fnz) untied
//#endif
for (nn=0;nn<Nchunk;++nn){
#ifdef _OPENMP
thread_id1 = omp_get_thread_num ();
#else
thread_id1 = 0;
#endif
rtemp_loc = &rtemp[sizertemp* thread_id1];
#if ( PROFlevel>=1 )
TIC(t1);
#endif
if(nn<remainder){
lbstart = nn*(nub_loc+1);
lbend = (nn+1)*(nub_loc+1);
}else{
lbstart = remainder+nn*nub_loc;
lbend = remainder + (nn+1)*nub_loc;
}
for (ub = lbstart; ub < lbend; ++ub){
ik = Ucb_indptr[lk][ub].lbnum; /* Local block number, row-wise. */
usub = Llu->Ufstnz_br_ptr[ik];
uval = Llu->Unzval_br_ptr[ik];
i = Ucb_indptr[lk][ub].indpos; /* Start of the block in usub[]. */
i += UB_DESCRIPTOR;
il = LSUM_BLK( ik );
gik = ik * grid->nprow + myrow;/* Global block number, row-wise. */
iknsupc = SuperSize( gik );
ikfrow = FstBlockC( gik );
iklrow = FstBlockC( gik+1 );
RHS_ITERATE(j) {
dest = &lsum[il + j*iknsupc+sizelsum*thread_id1];
y = &xk[j*knsupc];
uptr = Ucb_valptr[lk][ub]; /* Start of the block in uval[]. */
for (jj = 0; jj < knsupc; ++jj) {
fnz = usub[i + jj];
if ( fnz < iklrow ) { /* Nonzero segment. */
/* AXPY */
#ifdef _OPENMP
#pragma omp simd
#endif
for (irow = fnz; irow < iklrow; ++irow)
{
zz_mult(&temp, &uval[uptr], &y[jj]);
z_sub(&dest[irow - ikfrow], &dest[irow - ikfrow],
&temp);
++uptr;
}
stat[thread_id1]->ops[SOLVE] += 8 * (iklrow - fnz);
}
} /* for jj ... */
}
}
#if ( PROFlevel>=1 )
TOC(t2, t1);
stat[thread_id1]->utime[SOL_GEMM] += t2;
#endif
}
}else{
rtemp_loc = &rtemp[sizertemp* thread_id];
#if ( PROFlevel>=1 )
TIC(t1);
#endif
for (ub = 0; ub < nub; ++ub) {
ik = Ucb_indptr[lk][ub].lbnum; /* Local block number, row-wise. */
usub = Llu->Ufstnz_br_ptr[ik];
uval = Llu->Unzval_br_ptr[ik];
i = Ucb_indptr[lk][ub].indpos; /* Start of the block in usub[]. */
i += UB_DESCRIPTOR;
il = LSUM_BLK( ik );
gik = ik * grid->nprow + myrow;/* Global block number, row-wise. */
iknsupc = SuperSize( gik );
ikfrow = FstBlockC( gik );
iklrow = FstBlockC( gik+1 );
RHS_ITERATE(j) {
dest = &lsum[il + j*iknsupc+sizelsum*thread_id];
y = &xk[j*knsupc];
uptr = Ucb_valptr[lk][ub]; /* Start of the block in uval[]. */
for (jj = 0; jj < knsupc; ++jj) {
fnz = usub[i + jj];
if ( fnz < iklrow ) { /* Nonzero segment. */
/* AXPY */
#ifdef _OPENMP
#pragma omp simd
#endif
for (irow = fnz; irow < iklrow; ++irow)
{
zz_mult(&temp, &uval[uptr], &y[jj]);
z_sub(&dest[irow - ikfrow], &dest[irow - ikfrow],
&temp);
++uptr;
}
stat[thread_id]->ops[SOLVE] += 8 * (iklrow - fnz);
}
} /* for jj ... */
}
}
#if ( PROFlevel>=1 )
TOC(t2, t1);
stat[thread_id]->utime[SOL_GEMM] += t2;
#endif
}
rtemp_loc = &rtemp[sizertemp* thread_id];
for (ub = 0; ub < nub; ++ub){
ik = Ucb_indptr[lk][ub].lbnum; /* Local block number, row-wise. */
il = LSUM_BLK( ik );
gik = ik * grid->nprow + myrow;/* Global block number, row-wise. */
iknsupc = SuperSize( gik );
// #ifdef _OPENMP
// #pragma omp atomic capture
// #endif
bmod_tmp=--bmod[ik*aln_i];
if ( bmod_tmp == 0 ) { /* Local accumulation done. */
gikcol = PCOL( gik, grid );
p = PNUM( myrow, gikcol, grid );
if ( iam != p ) {
for (ii=1;ii<num_thread;ii++)
// if(ii!=thread_id)
#ifdef _OPENMP
#pragma omp simd
#endif
for (jj=0;jj<iknsupc*nrhs;jj++)
z_add(&lsum[il + jj ],
&lsum[il + jj ],
&lsum[il + jj + ii*sizelsum]);
RdTree_forwardMessageSimple(URtree_ptr[ik],&lsum[il - LSUM_H ],RdTree_GetMsgSize(URtree_ptr[ik],'z')*nrhs+LSUM_H,'z');
#if ( DEBUGlevel>=2 )
printf("(%2d) Sent LSUM[%2.0f], size %2d, to P %2d\n",
iam, lsum[il-LSUM_H], iknsupc*nrhs+LSUM_H, p);
#endif
} else { /* Diagonal process: X[i] += lsum[i]. */
#if ( PROFlevel>=1 )
TIC(t1);
#endif
for (ii=1;ii<num_thread;ii++)
// if(ii!=thread_id)
#ifdef _OPENMP
#pragma omp simd
#endif
for (jj=0;jj<iknsupc*nrhs;jj++)
z_add(&lsum[il + jj ],
&lsum[il + jj ],
&lsum[il + jj + ii*sizelsum]);
ii = X_BLK( ik );
dest = &x[ii];
RHS_ITERATE(j)
#ifdef _OPENMP
#pragma omp simd
#endif
for (i = 0; i < iknsupc; ++i)
z_add(&dest[i + j*iknsupc],
&dest[i + j*iknsupc],
&lsum[i + il + j*iknsupc]);
// if ( !brecv[ik] ) { /* Becomes a leaf node. */
// bmod[ik] = -1; /* Do not solve X[k] in the future. */
lk1 = LBj( gik, grid ); /* Local block number. */
lsub = Llu->Lrowind_bc_ptr[lk1];
lusup = Llu->Lnzval_bc_ptr[lk1];
nsupr = lsub[1];
if(Llu->inv == 1){
Uinv = Llu->Uinv_bc_ptr[lk1];
#ifdef _CRAY
CGEMM( ftcs2, ftcs2, &iknsupc, &nrhs, &iknsupc,
&alpha, Uinv, &iknsupc, &x[ii],
&iknsupc, &beta, rtemp_loc, &iknsupc );
#elif defined (USE_VENDOR_BLAS)
zgemm_( "N", "N", &iknsupc, &nrhs, &iknsupc,
&alpha, Uinv, &iknsupc, &x[ii],
&iknsupc, &beta, rtemp_loc, &iknsupc, 1, 1 );
#else
zgemm_( "N", "N", &iknsupc, &nrhs, &iknsupc,
&alpha, Uinv, &iknsupc, &x[ii],
&iknsupc, &beta, rtemp_loc, &iknsupc );
#endif
#ifdef _OPENMP
#pragma omp simd
#endif
for (i=0 ; i<iknsupc*nrhs ; i++){
z_copy(&x[ii+i],&rtemp_loc[i]);
}
}else{
#ifdef _CRAY
CTRSM(ftcs1, ftcs3, ftcs2, ftcs2, &iknsupc, &nrhs, &alpha,
lusup, &nsupr, &x[ii], &iknsupc);
#elif defined (USE_VENDOR_BLAS)
ztrsm_("L", "U", "N", "N", &iknsupc, &nrhs, &alpha,
lusup, &nsupr, &x[ii], &iknsupc, 1, 1, 1, 1);
#else
ztrsm_("L", "U", "N", "N", &iknsupc, &nrhs, &alpha,
lusup, &nsupr, &x[ii], &iknsupc);
#endif
}
#if ( PROFlevel>=1 )
TOC(t2, t1);
stat[thread_id]->utime[SOL_TRSM] += t2;
#endif
stat[thread_id]->ops[SOLVE] += 4 * iknsupc * (iknsupc + 1) * nrhs
+ 10 * knsupc * nrhs; /* complex division */
#if ( DEBUGlevel>=2 )
printf("(%2d) Solve X[%2d]\n", iam, gik);
#endif
/*
* Send Xk to process column Pc[k].
*/
// for (i=0 ; i<iknsupc*nrhs ; i++){
// printf("xre: %f\n",x[ii+i]);
// fflush(stdout);
// }
if(UBtree_ptr[lk1]!=NULL){
BcTree_forwardMessageSimple(UBtree_ptr[lk1],&x[ii - XK_H],BcTree_GetMsgSize(UBtree_ptr[lk1],'z')*nrhs+XK_H,'z');
}
/*
* Perform local block modifications.
*/
if ( Urbs[lk1] ){
// #ifdef _OPENMP
// #pragma omp task firstprivate (Ucb_indptr,Ucb_valptr,Llu,sizelsum,ii,gik,x,rtemp,bmod,Urbs,lsum,stat,nrhs,grid,xsup) untied
// #endif
{
zlsum_bmod_inv_master(lsum, x, &x[ii], rtemp, nrhs, gik, bmod, Urbs,
Ucb_indptr, Ucb_valptr, xsup, grid, Llu,
stat, sizelsum,sizertemp,thread_id,num_thread);
}
}
// } /* if brecv[ik] == 0 */
}
} /* if bmod[ik] == 0 */
}
} /* zlsum_bmod_inv_master */
|
LBMClass.h | #ifndef LIDDRIVENCAVITYLBM_LBMCLASS_H
#define LIDDRIVENCAVITYLBM_LBMCLASS_H
#include <iostream>
#include <vector>
#include <math.h>
#include <omp.h>
using namespace std;
extern const double Re;
extern const unsigned int N;
extern const double Utop;
extern const double Ubot;
extern const double Vlef;
extern const double Vrig;
extern const int SAVETXT;
extern const double THRESH;
extern const int THREADS;
extern const int MBounds;
extern const int MCollide;
extern const int BC;
extern const unsigned int Q;
extern const double MAXITER;
extern const double NU;
extern const double TAU;
extern const double MAGIC;
extern const double RHO0;
extern const unsigned int N2;
extern const unsigned int NQ;
extern const double w[9];
extern const int c[9][2];
extern const int opp[9];
extern const int half[4];
extern const int prntInt;
extern const unsigned int Nm1;
extern const unsigned int Nm2;
extern const int GM[9][9];
extern const double GMinv[9][9];
extern double start,stop;
class LBMClass{
public:
LBMClass(): _f1(NQ,0.0), _f2(NQ,0.0), _fstar(NQ, 0.0),
_u1(N2, 0.0) ,_u2(N2, 0.0), _rho(N2, RHO0), _x(N, 0.0), _error(100, 0.0),_vmag(N2,0.0),_stress(N2, 0.0), _vort(N2, 0.0),_df(),_df0(), _filename1(),_filename2(),
_OMEGA(0.0), _OMEGAm(0.0), _CS(0.0), _MACH(0.0), _rhobar(1.0),_omega_e(),_omega_eps(), _omega_q(),_omega_nu(), _GS{}
{
// Initialize x
if (MBounds == 0)
linspace(_x,(0.5 / N), (N - 0.5) / N,N);
else
linspace(_x, 0.0,1.0 ,N);
_CS = 1.0 / sqrt(3.0);
_MACH = Utop / _CS;
_OMEGA = 1.0 / TAU;
_OMEGAm = 1.0 / (0.5 + ((MAGIC) / ((1.0 / (_OMEGA)) - 0.5)));
_omega_e = 1.1; // Bulk viscosity
_omega_eps = 1.1; // free parameter
_omega_q = 1.1; // free parameter
_omega_nu = _OMEGA; // Shear viscosity
_GS[0] = 0.0;
_GS[1] = _omega_e;
_GS[2] = _omega_eps;
_GS[3] = 0.0;
_GS[4] = _omega_q;
_GS[5] = 0.0;
_GS[6] = _omega_q;
_GS[7] = _omega_nu;
_GS[8] = _omega_nu;
sprintf(_filename1,"Solution_n=%d_Re=%.0f_BCM=%d_CM=%d_U=%0.2f.dat",N,Re,MBounds,MCollide, Utop);
sprintf(_filename2,"Error_n=%d_Re=%.0f_BCM=%d_CM=%d_U=%0.2f.txt",N,Re,MBounds,MCollide,Utop);
// Initialize f
int ind1,ind2;
#pragma omp parallel for private(ind1,ind2) collapse(2)
for (unsigned int i = 0; i < N; i++) {
for (unsigned int j = 0; j < N; j++) {
ind1 = LU2(i, j);
for (unsigned int k = 0; k < Q; k++) {
ind2 = LU3(i, j, k);
_f1[ind2] = calcfeq(k,ind1,1);
_f2[ind2] = _f1[ind2];
}
}
}
// Print parameters
printf("Re =\t%.0f\n", Re);
printf("U =\t%.3e\n", Utop);
printf("M =\t%.3e\n", Utop * sqrt(3));
printf("N =\t%d\n", N);
printf("tau =\t%.3e\n", TAU);
printf("nu =\t%.3e\n", NU);
}
// Collide Methods
inline void collideSRT() {
int ind1{}, ind2{};
double Fsource{};
#pragma omp parallel for private(ind1, ind2,Fsource) collapse(2)
for (unsigned int i = 0; i < N; i++) {
for (unsigned int j = 0; j < N; j++) {
ind1 = LU2(i, j);
for (unsigned int k = 0; k < Q; k++) {
ind2 = LU3(i, j, k);
_fstar[ind2] = (1.0 - _OMEGA) * _f1[ind2] + _OMEGA * calcfeq(k, ind1,1);
if (BC == 1){
Fsource = (1.0 - 0.5 * _OMEGA) * w[k] *
(3.0 * (c[k][0] - _u1[ind1]) + 9.0 * (c[k][0] * _u1[ind1] + c[k][1] * _u2[ind1]) * c[k][0]) * _forceX +
(3.0 * (c[k][1] - _u2[ind1]) + 9.0 * (c[k][0] * _u1[ind1] + c[k][1] * _u2[ind1]) * c[k][1]) * _forceY;
_fstar[ind2] += Fsource;
}
}
}
}
if (BC == 0 || BC == 1)
virtualnode();
}
inline void collideTRT(){
double fplus{},fminus{},feqplus{},feqminus{},feq[9]{};
int ind1{},l{},notl{};
#pragma omp parallel for private(fplus,fminus,feqplus,feqminus,feq,l,notl,ind1) collapse(2)
for (unsigned int i = 0; i < N; i++){
for (unsigned int j = 0; j < N; j++){
ind1 = LU2(i, j);
for (unsigned int k = 0; k < Q; k++)
feq[k] = calcfeq(k,ind1,1);
// Rest population
fplus = _f1[LU3(i,j,0)];
feqplus = feq[0];
_fstar[LU3(i, j, 0)] = _f1[LU3(i, j, 0)] - _OMEGA * (fplus - feqplus);
for (unsigned int k = 0; k < 4; k++) {
l = half[k];
notl = opp[l];
fplus = 0.5 * (_f1[LU3(i,j,l)] + _f1[LU3(i,j,notl)]);
fminus = 0.5 * (_f1[LU3(i,j,l)] - _f1[LU3(i,j,notl)]);
feqplus = 0.5 * (feq[l] + feq[notl]);
feqminus = 0.5 * (feq[l] - feq[notl]);
fplus = _OMEGA * (fplus - feqplus);
fminus = _OMEGAm * (fminus - feqminus);
_fstar[LU3(i, j, l)] = _f1[LU3(i, j, l)] - fplus - fminus;
_fstar[LU3(i, j, notl)] = _f1[LU3(i, j, notl)] - fplus + fminus;
}
}
}
if (BC == 0 || BC == 1)
virtualnode();
}
inline void collideMRT(){
#pragma omp parallel
{
int ind1;
vector<double> _meq(Q,0.0),_mstar(Q,0.0); // moments
double _m{}; // moments
#pragma omp for collapse(2)
for (unsigned int i = 0; i < N; i++) {
for (unsigned int j = 0; j < N; j++) {
ind1 = LU2(i,j);
calcmeq(_meq, _u1[ind1], _u2[ind1], _rho[ind1]);
for (unsigned int k = 0; k < Q; k++){
_m = GM[k][0] * _f1[LU3(i,j,0)] + GM[k][1] * _f1[LU3(i,j,1)] + GM[k][2] * _f1[LU3(i,j,2)] +GM[k][3] * _f1[LU3(i,j,3)] + GM[k][4] * _f1[LU3(i,j,4)]
+ GM[k][5] * _f1[LU3(i,j,5)] + GM[k][6] * _f1[LU3(i,j,6)] + GM[k][7] * _f1[LU3(i,j,7)] + GM[k][8] * _f1[LU3(i,j,8)];
_mstar[k] = _m - _GS[k] * (_m - _meq[k]);
}
if (BC == 1) {
_mstar[1] += (1.0 - 0.5 * _GS[1]) * (6.0 * (_forceX * _u1[ind1] + _forceY * _u2[ind1]));
_mstar[2] += (1.0 - 0.5 * _GS[2]) * (-6.0 * (_forceX * _u1[ind1] + _forceY * _u2[ind1]));
_mstar[3] += (1.0 - 0.5 * _GS[3]) * (_forceX);
_mstar[4] += (1.0 - 0.5 * _GS[4]) * (-_forceX);
_mstar[5] += (1.0 - 0.5 * _GS[5]) * (_forceY);
_mstar[6] += (1.0 - 0.5 * _GS[6]) * (-_forceY);
_mstar[7] += (1.0 - 0.5 * _GS[7]) * (2.0 * (_forceX * _u1[ind1] - _forceY * _u2[ind1]));
_mstar[8] += (1.0 - 0.5 * _GS[8]) * (_forceY * _u1[ind1] + _forceX * _u2[ind1]);
}
for (unsigned int k = 0; k < Q; k++){
_fstar[LU3(i, j, k)] = GMinv[k][0] * _mstar[0] + GMinv[k][1] * _mstar[1] + GMinv[k][2] * _mstar[2] + GMinv[k][3] * _mstar[3]
+ GMinv[k][4] * _mstar[4] + GMinv[k][5] * _mstar[5] + GMinv[k][6] * _mstar[6] + GMinv[k][7] * _mstar[7] + GMinv[k][8] * _mstar[8];
}
}
}
}
if (BC == 0 || BC == 1)
virtualnode();
}
// Stream Methods
inline void streamPush() {
#pragma omp parallel
{
int inew, jnew;
#pragma omp for collapse(2)
for (unsigned int i = 0; i < N; i++) {
for (unsigned int j = 0; j < N; j++) {
for (unsigned int k = 0; k < Q; k++) {
inew = i + c[k][0];
jnew = j + c[k][1];
if (inew < N && inew >= 0 && jnew < N && jnew >= 0)
_f2[LU3(inew, jnew, k)] = _fstar[LU3(i, j, k)];
}
}
}
}
}
inline void streamPull() {
#pragma omp parallel
{
int iold, jold;
#pragma omp for collapse(2)
for (unsigned int i = 0; i < N; i++) {
for (unsigned int j = 0; j < N; j++) {
for (unsigned int k = 0; k < Q; k++) {
iold = i - c[k][0];
jold = j - c[k][1];
if (iold < N && iold >= 0 && jold < N && jold >= 0)
_f2[LU3(i, j, k)] = _fstar[LU3(iold, jold, k)];
}
}
}
}
}
// Bounday Condition Methods
inline void NEBB(){
switch (BC){
case 2 : { // Lid-driven cavity flow
const double sixth = 1.0 / 6.0, twothirds = 2.0 / 3.0, twelfth = 1.0 / 12.0;
double rho{_rhobar};
for (unsigned int i = 1; i < N - 1; i++) {
// Left wall, general case
rho = _f2[LU3(0, i, 0)] + _f2[LU3(0, i, 2)] + _f2[LU3(0, i, 4)] + 2.0 * (_f2[LU3(0, i, 3)] + _f2[LU3(0, i, 6)] + _f2[LU3(0, i, 7)]);
_f2[LU3(0, i, 1)] = _f2[LU3(0, i, 3)];
_f2[LU3(0, i, 5)] = _f2[LU3(0, i, 7)] - 0.5 * (_f2[LU3(0, i, 2)] - _f2[LU3(0, i, 4)]) + 0.5 * rho * Vlef;
_f2[LU3(0, i, 8)] = _f2[LU3(0, i, 6)] + 0.5 * (_f2[LU3(0, i, 2)] - _f2[LU3(0, i, 4)]) - 0.5 * rho * Vlef;
// Right wall, general case
rho = _f2[LU3(Nm1, i, 0)] + _f2[LU3(Nm1, i, 2)] + _f2[LU3(Nm1, i, 4)] + 2.0 * (_f2[LU3(Nm1, i, 1)] + _f2[LU3(Nm1, i, 5)] + _f2[LU3(Nm1, i, 8)]);
_f2[LU3(Nm1, i, 3)] = _f2[LU3(Nm1, i, 1)];
_f2[LU3(Nm1, i, 7)] = _f2[LU3(Nm1, i, 5)] + 0.5 * (_f2[LU3(Nm1, i, 2)] - _f2[LU3(Nm1, i, 4)]) - 0.5 * rho * Vrig;
_f2[LU3(Nm1, i, 6)] = _f2[LU3(Nm1, i, 8)] - 0.5 * (_f2[LU3(Nm1, i, 2)] - _f2[LU3(Nm1, i, 4)]) + 0.5 * rho * Vrig;
// Top wall, general case
rho = _f2[LU3(i, Nm1, 0)] + _f2[LU3(i, Nm1, 1)] + _f2[LU3(i, Nm1, 3)] + 2.0 * (_f2[LU3(i, Nm1, 2)] + _f2[LU3(i, Nm1, 6)] + _f2[LU3(i, Nm1, 5)]);
_f2[LU3(i, Nm1, 4)] = _f2[LU3(i, Nm1, 2)];
_f2[LU3(i, Nm1, 7)] = _f2[LU3(i, Nm1, 5)] + 0.5 * (_f2[LU3(i, Nm1, 1)] - _f2[LU3(i, Nm1, 3)]) - 0.5 * rho * Utop;
_f2[LU3(i, Nm1, 8)] = _f2[LU3(i, Nm1, 6)] - 0.5 * (_f2[LU3(i, Nm1, 1)] - _f2[LU3(i, Nm1, 3)]) + 0.5 * rho * Utop;
// Bottom wall, general case
rho = _f2[LU3(i, 0, 0)] + _f2[LU3(i, 0, 1)] + _f2[LU3(i, 0, 3)] + 2.0 * (_f2[LU3(i, 0, 4)] + _f2[LU3(i, 0, 7)] + _f2[LU3(i, 0, 8)]);
_f2[LU3(i, 0, 2)] = _f2[LU3(i, 0, 4)];
_f2[LU3(i, 0, 5)] = _f2[LU3(i, 0, 7)] - 0.5 * (_f2[LU3(i, 0, 1)] - _f2[LU3(i, 0, 3)]) + 0.5 * rho * Ubot;
_f2[LU3(i, 0, 6)] = _f2[LU3(i, 0, 8)] + 0.5 * (_f2[LU3(i, 0, 1)] - _f2[LU3(i, 0, 3)]) - 0.5 * rho * Ubot;
}
// Corners
rho = 1.0;
// Bottom Left (0,0) knowns: 1,5,2, unknowns: 0,6,8
_f2[LU3(0, 0, 1)] = _f2[LU3(0, 0, 3)] + twothirds * rho * Ubot;
_f2[LU3(0, 0, 2)] = _f2[LU3(0, 0, 4)] + twothirds * rho * Vlef;
_f2[LU3(0, 0, 5)] = _f2[LU3(0, 0, 7)] + sixth * rho * (Ubot + Vlef);
_f2[LU3(0, 0, 6)] = twelfth * rho * (Vlef - Ubot);
_f2[LU3(0, 0, 8)] = -_f2[LU3(0, 0, 6)];
_f2[LU3(0, 0, 0)] = rho - (_f2[LU3(0, 0, 1)] + _f2[LU3(0, 0, 2)] + _f2[LU3(0, 0, 3)] + _f2[LU3(0, 0, 4)] + _f2[LU3(0, 0, 5)] + _f2[LU3(0, 0, 6)] + _f2[LU3(0, 0, 7)] + _f2[LU3(0, 0, 8)]);
// Bottom Right (Nm1,0) knowns: 2,3,6, unknowns: 0, 5, 7
// rho = 0.5 * (_rho[LU2(Nm2,0)] + _rho[LU2(Nm1,1)]);
_f2[LU3(Nm1, 0, 2)] = _f2[LU3(Nm1, 0, 4)] + twothirds * rho * Vrig;
_f2[LU3(Nm1, 0, 3)] = _f2[LU3(Nm1, 0, 1)] - twothirds * rho * Ubot;
_f2[LU3(Nm1, 0, 6)] = _f2[LU3(Nm1, 0, 8)] + sixth * rho * (-Ubot + Vrig);
_f2[LU3(Nm1, 0, 5)] = twelfth * rho * (Vrig + Ubot);
_f2[LU3(Nm1, 0, 7)] = -_f2[LU3(Nm1, 0, 5)];
_f2[LU3(Nm1, 0, 0)] = rho - (_f2[LU3(Nm1, 0, 1)] + _f2[LU3(Nm1, 0, 2)] + _f2[LU3(Nm1, 0, 3)] + _f2[LU3(Nm1, 0, 4)] + _f2[LU3(Nm1, 0, 5)] + _f2[LU3(Nm1, 0, 6)] + _f2[LU3(Nm1, 0, 7)] + _f2[LU3(Nm1, 0, 8)]);
// Top Left (0,Nm1) knowns: 1,4,8, unknowns: 0, 5, 7
// rho = 0.5 * (_rho[LU2(0, Nm2)] + _rho[LU2(1, Nm1)]);
_f2[LU3(0, Nm1, 1)] = _f2[LU3(0, Nm1, 3)] + twothirds * rho * Utop;
_f2[LU3(0, Nm1, 4)] = _f2[LU3(0, Nm1, 2)] - twothirds * rho * Vlef;
_f2[LU3(0, Nm1, 8)] = _f2[LU3(0, Nm1, 6)] + sixth * rho * (Utop - Vlef);
_f2[LU3(0, Nm1, 5)] = twelfth * rho * (Vlef + Utop);
_f2[LU3(0, Nm1, 7)] = -_f2[LU3(0, Nm1, 5)];
_f2[LU3(0, Nm1, 0)] = rho - (_f2[LU3(0, Nm1, 1)] + _f2[LU3(0, Nm1, 2)] + _f2[LU3(0, Nm1, 3)] + _f2[LU3(0, Nm1, 4)] + _f2[LU3(0, Nm1, 5)] + _f2[LU3(0, Nm1, 6)] + _f2[LU3(0, Nm1, 7)] + _f2[LU3(0, Nm1, 8)]);
// Top Right (Nm1,Nm1) knowns: 3,7,4, unknowns: 0, 6, 8
// rho = 0.5 * (_rho[LU2(Nm2, Nm1)] + _rho[LU2(Nm1, Nm2)]);
_f2[LU3(Nm1, Nm1, 4)] = _f2[LU3(Nm1, Nm1, 2)] - twothirds * rho * Vrig;
_f2[LU3(Nm1, Nm1, 3)] = _f2[LU3(Nm1, Nm1, 1)] - twothirds * rho * Utop;
_f2[LU3(Nm1, Nm1, 7)] = _f2[LU3(Nm1, Nm1, 5)] - sixth * rho * (Utop + Vrig);
_f2[LU3(Nm1, Nm1, 6)] = twelfth * rho * (Vrig - Utop);
_f2[LU3(Nm1, Nm1, 8)] = -_f2[LU3(Nm1, Nm1, 6)];
_f2[LU3(Nm1, Nm1, 0)] = rho - (_f2[LU3(Nm1, Nm1, 1)] + _f2[LU3(Nm1, Nm1, 2)] + _f2[LU3(Nm1, Nm1, 3)] + _f2[LU3(Nm1, Nm1, 4)] + _f2[LU3(Nm1, Nm1, 5)] + _f2[LU3(Nm1, Nm1, 6)] + _f2[LU3(Nm1, Nm1, 7)] + _f2[LU3(Nm1, Nm1, 8)]);
break;
}
case 1: { // Poiseuille Flow
for (unsigned int i = 0; i < N; i++) {
// Top wall, general case
_f2[LU3(i, Nm1, 4)] = _f2[LU3(i, Nm1, 2)];
_f2[LU3(i, Nm1, 7)] = _f2[LU3(i, Nm1, 5)] + 0.5 * (_f2[LU3(i, Nm1, 1)] - _f2[LU3(i, Nm1, 3)]);
_f2[LU3(i, Nm1, 8)] = _f2[LU3(i, Nm1, 6)] - 0.5 * (_f2[LU3(i, Nm1, 1)] - _f2[LU3(i, Nm1, 3)]);
// Bottom wall, general case
_f2[LU3(i, 0, 2)] = _f2[LU3(i, 0, 4)];
_f2[LU3(i, 0, 5)] = _f2[LU3(i, 0, 7)] - 0.5 * (_f2[LU3(i, 0, 1)] - _f2[LU3(i, 0, 3)]);
_f2[LU3(i, 0, 6)] = _f2[LU3(i, 0, 8)] + 0.5 * (_f2[LU3(i, 0, 1)] - _f2[LU3(i, 0, 3)]);
}
break;
}
case 0: { // Couette Flow
double rho{};
for (unsigned int i = 0; i < N; i++) {
// Top wall, general case
rho = _f2[LU3(i, Nm1, 0)] + _f2[LU3(i, Nm1, 1)] + _f2[LU3(i, Nm1, 3)] + 2.0 * (_f2[LU3(i, Nm1, 2)] + _f2[LU3(i, Nm1, 6)] + _f2[LU3(i, Nm1, 5)]);
_f2[LU3(i, Nm1, 4)] = _f2[LU3(i, Nm1, 2)];
_f2[LU3(i, Nm1, 7)] = _f2[LU3(i, Nm1, 5)] + 0.5 * (_f2[LU3(i, Nm1, 1)] - _f2[LU3(i, Nm1, 3)]) - 0.5 * rho * Utop;
_f2[LU3(i, Nm1, 8)] = _f2[LU3(i, Nm1, 6)] - 0.5 * (_f2[LU3(i, Nm1, 1)] - _f2[LU3(i, Nm1, 3)]) + 0.5 * rho * Utop;
// Bottom wall, general case
rho = _f2[LU3(i, 0, 0)] + _f2[LU3(i, 0, 1)] + _f2[LU3(i, 0, 3)] + 2.0 * (_f2[LU3(i, 0, 4)] + _f2[LU3(i, 0, 7)] + _f2[LU3(i, 0, 8)]);
_f2[LU3(i, 0, 2)] = _f2[LU3(i, 0, 4)];
_f2[LU3(i, 0, 5)] = _f2[LU3(i, 0, 7)] - 0.5 * (_f2[LU3(i, 0, 1)] - _f2[LU3(i, 0, 3)]) + 0.5 * rho * Ubot;
_f2[LU3(i, 0, 6)] = _f2[LU3(i, 0, 8)] + 0.5 * (_f2[LU3(i, 0, 1)] - _f2[LU3(i, 0, 3)]) - 0.5 * rho * Ubot;
}
break;
}
default: {
std::printf("Error: Invalid Boundary condition case number\n");
exit(1);
}
}
}
// Non-equilibrium extrapolation
inline void NEE() {
switch (BC){
case 2: { // Lid-driven cavity flow
#pragma omp parallel
{
double rhof{},u1f{},u2f{};
double rhowall{1.0};
#pragma omp for
for (unsigned int i = 1; i < N-1; i++){
// Bottom wall, (i, 0)
rhowall = _f2[LU3(i,0,0)] +_f2[LU3(i,0,1)] +_f2[LU3(i,0,3)] + 2.0 * (_f2[LU3(i,0,4)] +_f2[LU3(i,0,7)] +_f2[LU3(i,0,8)]);
rhof = _f2[LU3(i,1,0)] + _f2[LU3(i,1,1)] + _f2[LU3(i,1,2)] + _f2[LU3(i,1,3)] + _f2[LU3(i,1,4)] + _f2[LU3(i,1,5)]+ _f2[LU3(i,1,6)]+ _f2[LU3(i,1,7)]+ _f2[LU3(i,1,8)];
u1f = ((_f2[LU3(i,1,1)] + _f2[LU3(i,1,5)] + _f2[LU3(i,1,8)]) - (_f2[LU3(i,1,3)] + _f2[LU3(i,1,6)] + _f2[LU3(i,1,7)])) / rhof;
u2f = ((_f2[LU3(i,1,2)] + _f2[LU3(i,1,5)] + _f2[LU3(i,1,6)]) - (_f2[LU3(i,1,4)] + _f2[LU3(i,1,7)] + _f2[LU3(i,1,8)])) / rhof;
for (unsigned int k = 0; k < Q; k++)
_f2[LU3(i,0,k)] = calcfeq(k,Ubot,0.0,rhowall,1) + (_f2[LU3(i,1,k)] - calcfeq(k, u1f, u2f, rhof,1));
// Top Wall, (i,Nm1)
rhowall = _f2[LU3(i,Nm1,0)] +_f2[LU3(i,Nm1,1)] +_f2[LU3(i,Nm1,3)] + 2.0 * (_f2[LU3(i,Nm1,2)] +_f2[LU3(i,Nm1,6)] +_f2[LU3(i,Nm1,5)]);
rhof = _f2[LU3(i,Nm2,0)] + _f2[LU3(i,Nm2,1)] + _f2[LU3(i,Nm2,2)] + _f2[LU3(i,Nm2,3)] + _f2[LU3(i,Nm2,4)] + _f2[LU3(i,Nm2,5)]+ _f2[LU3(i,Nm2,6)]+ _f2[LU3(i,Nm2,7)]+ _f2[LU3(i,Nm2,8)];
u1f = ((_f2[LU3(i,Nm2,1)] + _f2[LU3(i,Nm2,5)] + _f2[LU3(i,Nm2,8)]) - (_f2[LU3(i,Nm2,3)] + _f2[LU3(i,Nm2,6)] + _f2[LU3(i,Nm2,7)])) / rhof;
u2f = ((_f2[LU3(i,Nm2,2)] + _f2[LU3(i,Nm2,5)] + _f2[LU3(i,Nm2,6)]) - (_f2[LU3(i,Nm2,4)] + _f2[LU3(i,Nm2,7)] + _f2[LU3(i,Nm2,8)])) / rhof;
for (unsigned int k = 0; k < Q; k++)
_f2[LU3(i,Nm1,k)] = calcfeq(k,Utop,0.0,rhowall,1) + (_f2[LU3(i,Nm2,k)] - calcfeq(k, u1f, u2f, rhof,1));
// Left wall, (0,i)
rhowall = _f2[LU3(0,i,0)] +_f2[LU3(0,i,2)] +_f2[LU3(0,i,4)] + 2.0 * (_f2[LU3(0,i,3)] +_f2[LU3(0,i,6)] +_f2[LU3(0,i,7)]);
rhof = _f2[LU3(1,i,0)] + _f2[LU3(1,i,1)] + _f2[LU3(1,i,2)] + _f2[LU3(1,i,3)] + _f2[LU3(1,i,4)] + _f2[LU3(1,i,5)]+ _f2[LU3(1,i,6)]+ _f2[LU3(1,i,7)]+ _f2[LU3(1,i,8)];
u1f = ((_f2[LU3(1,i,1)] + _f2[LU3(1,i,5)] + _f2[LU3(1,i,8)]) - (_f2[LU3(1,i,3)] + _f2[LU3(1,i,6)] + _f2[LU3(1,i,7)])) / rhof;
u2f = ((_f2[LU3(1,i,2)] + _f2[LU3(1,i,5)] + _f2[LU3(1,i,6)]) - (_f2[LU3(1,i,4)] + _f2[LU3(1,i,7)] + _f2[LU3(1,i,8)])) / rhof;
for (unsigned int k = 0; k < Q; k++)
_f2[LU3(0,i,k)] = calcfeq(k,0.0,Vlef,rhowall,1) + (_f2[LU3(1,i,k)] - calcfeq(k, u1f, u2f, rhof,1));
// Right Wall (Nm1, i)
rhowall = _f2[LU3(Nm1,i,0)] +_f2[LU3(Nm1,i,2)] +_f2[LU3(Nm1,i,4)] + 2.0 * (_f2[LU3(Nm1,i,1)] +_f2[LU3(Nm1,i,5)] +_f2[LU3(Nm1,i,8)]);
rhof = _f2[LU3(Nm2,i,0)] + _f2[LU3(Nm2,i,1)] + _f2[LU3(Nm2,i,2)] + _f2[LU3(Nm2,i,3)] + _f2[LU3(Nm2,i,4)] + _f2[LU3(Nm2,i,5)]+ _f2[LU3(Nm2,i,6)]+ _f2[LU3(Nm2,i,7)]+ _f2[LU3(Nm2,i,8)];
u1f = ((_f2[LU3(Nm2,i,1)] + _f2[LU3(Nm2,i,5)] + _f2[LU3(Nm2,i,8)]) - (_f2[LU3(Nm2,i,3)] + _f2[LU3(Nm2,i,6)] + _f2[LU3(Nm2,i,7)])) / rhof;
u2f = ((_f2[LU3(Nm2,i,2)] + _f2[LU3(Nm2,i,5)] + _f2[LU3(Nm2,i,6)]) - (_f2[LU3(Nm2,i,4)] + _f2[LU3(Nm2,i,7)] + _f2[LU3(Nm2,i,8)])) / rhof;
for (unsigned int k = 0; k < Q; k++)
_f2[LU3(Nm1,i,k)] = calcfeq(k,0.0,Vrig,rhowall,1) + (_f2[LU3(Nm2,i,k)] - calcfeq(k, u1f, u2f, rhof,1));
}
// Corners
// Bottom left (0,0)
rhof = _f2[LU3(1,1,0)] + _f2[LU3(1,1,1)] + _f2[LU3(1,1,2)] + _f2[LU3(1,1,3)] + _f2[LU3(1,1,4)] + _f2[LU3(1,1,5)]+ _f2[LU3(1,1,6)]+ _f2[LU3(1,1,7)]+ _f2[LU3(1,1,8)];
u1f = ((_f2[LU3(1,1,1)] + _f2[LU3(1,1,5)] + _f2[LU3(1,1,8)]) - (_f2[LU3(1,1,3)] + _f2[LU3(1,1,6)] + _f2[LU3(1,1,7)])) / rhof;
u2f = ((_f2[LU3(1,1,2)] + _f2[LU3(1,1,5)] + _f2[LU3(1,1,6)]) - (_f2[LU3(1,1,4)] + _f2[LU3(1,1,7)] + _f2[LU3(1,1,8)])) / rhof;
rhowall = 1.0;
#pragma omp for
for (unsigned int k = 0; k < Q; k++)
_f2[LU3(0,0,k)] = calcfeq(k,Ubot,Vlef,rhowall,1) + (_f2[LU3(1,1,k)] - calcfeq(k, u1f, u2f, rhof,1));
// Bottom Right (Nm1,0)
rhof = _f2[LU3(Nm2,1,0)] + _f2[LU3(Nm2,1,1)] + _f2[LU3(Nm2,1,2)] + _f2[LU3(Nm2,1,3)] + _f2[LU3(Nm2,1,4)] + _f2[LU3(Nm2,1,5)]+ _f2[LU3(Nm2,1,6)]+ _f2[LU3(Nm2,1,7)]+ _f2[LU3(Nm2,1,8)];
u1f = ((_f2[LU3(Nm2,1,1)] + _f2[LU3(Nm2,1,5)] + _f2[LU3(Nm2,1,8)]) - (_f2[LU3(Nm2,1,3)] + _f2[LU3(Nm2,1,6)] + _f2[LU3(Nm2,1,7)])) / rhof;
u2f = ((_f2[LU3(Nm2,1,2)] + _f2[LU3(Nm2,1,5)] + _f2[LU3(Nm2,1,6)]) - (_f2[LU3(Nm2,1,4)] + _f2[LU3(Nm2,1,7)] + _f2[LU3(Nm2,1,8)])) / rhof;
#pragma omp for
for (unsigned int k = 0; k < Q; k++)
_f2[LU3(Nm1,0,k)] = calcfeq(k,Ubot,Vrig,rhowall,1) + (_f2[LU3(Nm2,1,k)] - calcfeq(k, u1f, u2f, rhof,1));
// Top Right (Nm1,Nm1)
rhof = _f2[LU3(Nm2,Nm2,0)] + _f2[LU3(Nm2,Nm2,1)] + _f2[LU3(Nm2,Nm2,2)] + _f2[LU3(Nm2,Nm2,3)] + _f2[LU3(Nm2,Nm2,4)] + _f2[LU3(Nm2,Nm2,5)]+ _f2[LU3(Nm2,Nm2,6)]+ _f2[LU3(Nm2,Nm2,7)]+ _f2[LU3(Nm2,Nm2,8)];
u1f = ((_f2[LU3(Nm2,Nm2,1)] + _f2[LU3(Nm2,Nm2,5)] + _f2[LU3(Nm2,Nm2,8)]) - (_f2[LU3(Nm2,Nm2,3)] + _f2[LU3(Nm2,Nm2,6)] + _f2[LU3(Nm2,Nm2,7)])) / rhof;
u2f = ((_f2[LU3(Nm2,Nm2,2)] + _f2[LU3(Nm2,Nm2,5)] + _f2[LU3(Nm2,Nm2,6)]) - (_f2[LU3(Nm2,Nm2,4)] + _f2[LU3(Nm2,Nm2,7)] + _f2[LU3(Nm2,Nm2,8)])) / rhof;
#pragma omp for
for (unsigned int k = 0; k < Q; k++)
_f2[LU3(Nm1,Nm1,k)] = calcfeq(k,Utop,Vrig,rhowall,1) + (_f2[LU3(Nm2,Nm2,k)] - calcfeq(k, u1f, u2f, rhof,1));
// Top Left (0,Nm1)
rhof = _f2[LU3(1,Nm2,0)] + _f2[LU3(1,Nm2,1)] + _f2[LU3(1,Nm2,2)] + _f2[LU3(1,Nm2,3)] + _f2[LU3(1,Nm2,4)] + _f2[LU3(1,Nm2,5)]+ _f2[LU3(1,Nm2,6)]+ _f2[LU3(1,Nm2,7)]+ _f2[LU3(1,Nm2,8)];
u1f = ((_f2[LU3(1,Nm2,1)] + _f2[LU3(1,Nm2,5)] + _f2[LU3(1,Nm2,8)]) - (_f2[LU3(1,Nm2,3)] + _f2[LU3(1,Nm2,6)] + _f2[LU3(1,Nm2,7)])) / rhof;
u2f = ((_f2[LU3(1,Nm2,2)] + _f2[LU3(1,Nm2,5)] + _f2[LU3(1,Nm2,6)]) - (_f2[LU3(1,Nm2,4)] + _f2[LU3(1,Nm2,7)] + _f2[LU3(1,Nm2,8)])) / rhof;
#pragma omp for
for (unsigned int k = 0; k < Q; k++)
_f2[LU3(0,Nm1,k)] = calcfeq(k,Utop,Vlef,rhowall,1) + (_f2[LU3(1,Nm2,k)] - calcfeq(k, u1f, u2f, rhof,1));
};
break;
}
case 1: { // Poiseuille Flow
#pragma omp parallel
{
double rhof{},u1f{},u2f{};
double rhowall{1.0};
#pragma omp for
for (unsigned int i = 1; i < N-1; i++){
// Bottom wall, (i, 0)
rhowall = _f2[LU3(i,0,0)] +_f2[LU3(i,0,1)] +_f2[LU3(i,0,3)] + 2.0 * (_f2[LU3(i,0,4)] +_f2[LU3(i,0,7)] +_f2[LU3(i,0,8)]);
rhof = _f2[LU3(i,1,0)] + _f2[LU3(i,1,1)] + _f2[LU3(i,1,2)] + _f2[LU3(i,1,3)] + _f2[LU3(i,1,4)] + _f2[LU3(i,1,5)]+ _f2[LU3(i,1,6)]+ _f2[LU3(i,1,7)]+ _f2[LU3(i,1,8)];
u1f = ((_f2[LU3(i,1,1)] + _f2[LU3(i,1,5)] + _f2[LU3(i,1,8)]) - (_f2[LU3(i,1,3)] + _f2[LU3(i,1,6)] + _f2[LU3(i,1,7)])) / rhof;
u2f = ((_f2[LU3(i,1,2)] + _f2[LU3(i,1,5)] + _f2[LU3(i,1,6)]) - (_f2[LU3(i,1,4)] + _f2[LU3(i,1,7)] + _f2[LU3(i,1,8)])) / rhof;
for (unsigned int k = 0; k < Q; k++)
_f2[LU3(i,0,k)] = calcfeq(k,0.0,0.0,rhowall,1) + (_f2[LU3(i,1,k)] - calcfeq(k, u1f, u2f, rhof,1));
// Top Wall, (i,Nm1)
rhowall = _f2[LU3(i,Nm1,0)] +_f2[LU3(i,Nm1,1)] +_f2[LU3(i,Nm1,3)] + 2.0 * (_f2[LU3(i,Nm1,2)] +_f2[LU3(i,Nm1,6)] +_f2[LU3(i,Nm1,5)]);
rhof = _f2[LU3(i,Nm2,0)] + _f2[LU3(i,Nm2,1)] + _f2[LU3(i,Nm2,2)] + _f2[LU3(i,Nm2,3)] + _f2[LU3(i,Nm2,4)] + _f2[LU3(i,Nm2,5)]+ _f2[LU3(i,Nm2,6)]+ _f2[LU3(i,Nm2,7)]+ _f2[LU3(i,Nm2,8)];
u1f = ((_f2[LU3(i,Nm2,1)] + _f2[LU3(i,Nm2,5)] + _f2[LU3(i,Nm2,8)]) - (_f2[LU3(i,Nm2,3)] + _f2[LU3(i,Nm2,6)] + _f2[LU3(i,Nm2,7)])) / rhof;
u2f = ((_f2[LU3(i,Nm2,2)] + _f2[LU3(i,Nm2,5)] + _f2[LU3(i,Nm2,6)]) - (_f2[LU3(i,Nm2,4)] + _f2[LU3(i,Nm2,7)] + _f2[LU3(i,Nm2,8)])) / rhof;
for (unsigned int k = 0; k < Q; k++)
_f2[LU3(i,Nm1,k)] = calcfeq(k,0.0,0.0,rhowall,1) + (_f2[LU3(i,Nm2,k)] - calcfeq(k, u1f, u2f, rhof,1));
}
}
break;
}
case 0: { // Couette Flow
#pragma omp parallel
{
double rhof{},u1f{},u2f{};
double rhowall{1.0};
#pragma omp for
for (unsigned int i = 1; i < N-1; i++){
// Bottom wall, (i, 0)
rhowall = _f2[LU3(i,0,0)] +_f2[LU3(i,0,1)] +_f2[LU3(i,0,3)] + 2.0 * (_f2[LU3(i,0,4)] +_f2[LU3(i,0,7)] +_f2[LU3(i,0,8)]);
rhof = _f2[LU3(i,1,0)] + _f2[LU3(i,1,1)] + _f2[LU3(i,1,2)] + _f2[LU3(i,1,3)] + _f2[LU3(i,1,4)] + _f2[LU3(i,1,5)]+ _f2[LU3(i,1,6)]+ _f2[LU3(i,1,7)]+ _f2[LU3(i,1,8)];
u1f = ((_f2[LU3(i,1,1)] + _f2[LU3(i,1,5)] + _f2[LU3(i,1,8)]) - (_f2[LU3(i,1,3)] + _f2[LU3(i,1,6)] + _f2[LU3(i,1,7)])) / rhof;
u2f = ((_f2[LU3(i,1,2)] + _f2[LU3(i,1,5)] + _f2[LU3(i,1,6)]) - (_f2[LU3(i,1,4)] + _f2[LU3(i,1,7)] + _f2[LU3(i,1,8)])) / rhof;
for (unsigned int k = 0; k < Q; k++)
_f2[LU3(i,0,k)] = calcfeq(k,Ubot,0.0,rhowall,1) + (_f2[LU3(i,1,k)] - calcfeq(k, u1f, u2f, rhof,1));
// Top Wall, (i,Nm1)
rhowall = _f2[LU3(i,Nm1,0)] +_f2[LU3(i,Nm1,1)] +_f2[LU3(i,Nm1,3)] + 2.0 * (_f2[LU3(i,Nm1,2)] +_f2[LU3(i,Nm1,6)] +_f2[LU3(i,Nm1,5)]);
rhof = _f2[LU3(i,Nm2,0)] + _f2[LU3(i,Nm2,1)] + _f2[LU3(i,Nm2,2)] + _f2[LU3(i,Nm2,3)] + _f2[LU3(i,Nm2,4)] + _f2[LU3(i,Nm2,5)]+ _f2[LU3(i,Nm2,6)]+ _f2[LU3(i,Nm2,7)]+ _f2[LU3(i,Nm2,8)];
u1f = ((_f2[LU3(i,Nm2,1)] + _f2[LU3(i,Nm2,5)] + _f2[LU3(i,Nm2,8)]) - (_f2[LU3(i,Nm2,3)] + _f2[LU3(i,Nm2,6)] + _f2[LU3(i,Nm2,7)])) / rhof;
u2f = ((_f2[LU3(i,Nm2,2)] + _f2[LU3(i,Nm2,5)] + _f2[LU3(i,Nm2,6)]) - (_f2[LU3(i,Nm2,4)] + _f2[LU3(i,Nm2,7)] + _f2[LU3(i,Nm2,8)])) / rhof;
for (unsigned int k = 0; k < Q; k++)
_f2[LU3(i,Nm1,k)] = calcfeq(k,Utop,0.0,rhowall,1) + (_f2[LU3(i,Nm2,k)] - calcfeq(k, u1f, u2f, rhof,1));
}
}
break;
}
default: {
std::printf("Error: Invalid Boundary condition case number\n");
exit(1);
}
}
};
inline void HWBB(){
double rhowall = 1.0;
switch (BC){
case 2: { // Lid-driven cavity flow
#pragma omp parallel for
for (unsigned int i = 1; i < N-1; i++){
// Bottom wall
_f2[LU3(i, 0, 6)] = _fstar[LU3(i, 0, 8)] - 2.0 * w[8] * rhowall * c[8][0] * Ubot * 3.0;
_f2[LU3(i, 0, 2)] = _fstar[LU3(i, 0, 4)];
_f2[LU3(i, 0, 5)] = _fstar[LU3(i, 0, 7)] - 2.0 * w[7] * rhowall * c[7][0] * Ubot * 3.0;
// Top wall
_f2[LU3(i, Nm1, 8)] = _fstar[LU3(i, Nm1, 6)] - 2.0 * w[6] * rhowall * c[6][0] * Utop * 3.0;
_f2[LU3(i, Nm1, 4)] = _fstar[LU3(i, Nm1, 2)];
_f2[LU3(i, Nm1, 7)] = _fstar[LU3(i, Nm1, 5)] - 2.0 * w[5] * rhowall * c[5][0] * Utop * 3.0;
// Left wall
_f2[LU3(0, i, 5)] = _fstar[LU3(0, i, 7)] - 2.0 * w[7] * rhowall * c[7][1] * Vlef * 3.0;
_f2[LU3(0, i, 1)] = _fstar[LU3(0, i, 3)];
_f2[LU3(0, i, 8)] = _fstar[LU3(0, i, 6)] - 2.0 * w[6] * rhowall * c[6][1] * Vlef * 3.0;
// Right wall
_f2[LU3(Nm1, i, 7)] = _fstar[LU3(Nm1, i, 5)] - 2.0 * w[5] * rhowall * c[5][1] * Vrig * 3.0;
_f2[LU3(Nm1, i, 3)] = _fstar[LU3(Nm1, i, 1)];
_f2[LU3(Nm1, i, 6)] = _fstar[LU3(Nm1, i, 8)] - 2.0 * w[8] * rhowall * c[8][1] * Vrig * 3.0;
}
// Corners
// Top left
_f2[LU3(0, Nm1, 1)] = _fstar[LU3(0, Nm1, 3)];
_f2[LU3(0, Nm1, 8)] = _fstar[LU3(0, Nm1, 6)] - 2.0 * w[6] * rhowall * (c[6][0] * Utop + c[6][1] * Vlef) * 3.0;
_f2[LU3(0, Nm1, 4)] = _fstar[LU3(0, Nm1, 2)];
// Bottom Left
_f2[LU3(0, 0, 1)] = _fstar[LU3(0, 0, 3)];
_f2[LU3(0, 0, 5)] = _fstar[LU3(0, 0, 7)] - 2.0 * w[7] * rhowall * (c[7][0] * Ubot + c[7][1] * Vlef) * 3.0;
_f2[LU3(0, 0, 2)] = _fstar[LU3(0, 0, 4)];
// Top Right
_f2[LU3(Nm1, Nm1, 3)] = _fstar[LU3(Nm1, Nm1, 1)];
_f2[LU3(Nm1, Nm1, 7)] = _fstar[LU3(Nm1, Nm1, 5)] - 2.0 * w[5] * rhowall * (c[5][0] * Utop + c[5][1] * Vrig) * 3.0;
_f2[LU3(Nm1, Nm1, 4)] = _fstar[LU3(Nm1, Nm1, 2)];
// Bottom Right
_f2[LU3(Nm1, 0, 3)] = _fstar[LU3(Nm1, 0, 1)];
_f2[LU3(Nm1, 0, 6)] = _fstar[LU3(Nm1, 0, 8)] - 2.0 * w[8] * rhowall * (c[8][0] * Ubot + c[8][1] * Vrig) * 3.0;
_f2[LU3(Nm1, 0, 2)] = _fstar[LU3(Nm1, 0, 4)];
break;
}
case 1: { // Poiseuille Flow
#pragma omp parallel for
for (unsigned int i = 0; i < N; i++){
// Bottom wall
_f2[LU3(i, 0, 6)] = _fstar[LU3(i, 0, 8)];
_f2[LU3(i, 0, 2)] = _fstar[LU3(i, 0, 4)];
_f2[LU3(i, 0, 5)] = _fstar[LU3(i, 0, 7)];
// Top wall
_f2[LU3(i, Nm1, 8)] = _fstar[LU3(i, Nm1, 6)];
_f2[LU3(i, Nm1, 4)] = _fstar[LU3(i, Nm1, 2)];
_f2[LU3(i, Nm1, 7)] = _fstar[LU3(i, Nm1, 5)];
}
break;
}
case 0: { // Couette flow
#pragma omp parallel for
for (unsigned int i = 0; i < N; i++){
// Bottom wall
_f2[LU3(i, 0, 6)] = _fstar[LU3(i, 0, 8)] - 2.0 * w[8] * _rhobar * c[8][0] * Ubot * 3.0;
_f2[LU3(i, 0, 2)] = _fstar[LU3(i, 0, 4)] - 2.0 * w[4] * _rhobar * c[4][0] * Ubot * 3.0;
_f2[LU3(i, 0, 5)] = _fstar[LU3(i, 0, 7)] - 2.0 * w[7] * _rhobar * c[7][0] * Ubot * 3.0;
// Top wall
_f2[LU3(i, Nm1, 8)] = _fstar[LU3(i, Nm1, 6)] - 2.0 * w[6] * _rhobar * c[6][0] * Utop * 3.0;
_f2[LU3(i, Nm1, 4)] = _fstar[LU3(i, Nm1, 2)] - 2.0 * w[2] * _rhobar * c[2][0] * Utop * 3.0;
_f2[LU3(i, Nm1, 7)] = _fstar[LU3(i, Nm1, 5)] - 2.0 * w[5] * _rhobar * c[5][0] * Utop * 3.0;
}
break;
}
default: {
std::printf("Error: Invalid Boundary condition case number\n");
exit(1);
}
}
}
inline void swap(){
_f1.swap(_f2);
}
inline bool convergence(const unsigned int t){
if (t == 0)
_df0 = 1.0 / rmsError();
if (t % 1000 == 0) {
_df = rmsError() * _df0;
if (t / 1000 == _error.size())
_error.resize(2 * t / 1000);
_error[t / 1000] = _df;
}
if (t % prntInt == 0) {
cout << "\nIteration " << t << ":" << endl;
printf("df/df0:\t%.3e\n", _df);
stop = omp_get_wtime();
printf("Time:\t%.3e s\n", stop-start);
printf("rho:\t%.3e\n", _rhobar);
start = omp_get_wtime();
}
return (_df < THRESH);
}
inline void macroVars(){
double temp;
int ind1;
_rhobar = 0.0;
#pragma omp parallel for private(temp,ind1) collapse(2) reduction(+:_rhobar)
for (unsigned int i = 0; i < N; i++){
for (unsigned int j = 0; j < N; j++){
ind1 = LU2(i,j);
temp = _f2[LU3(i,j,0)] + _f2[LU3(i,j,1)] + _f2[LU3(i,j,2)] + _f2[LU3(i,j,3)] + _f2[LU3(i,j,4)] + _f2[LU3(i,j,5)] + _f2[LU3(i,j,6)] + _f2[LU3(i,j,7)] + _f2[LU3(i,j,8)];
_rho[ind1] = temp;
_rhobar += (temp / N2);
_u1[ind1] = ((_f2[LU3(i,j,1)] + _f2[LU3(i,j,5)] + _f2[LU3(i,j,8)]) - (_f2[LU3(i,j,3)] + _f2[LU3(i,j,6)] + _f2[LU3(i,j,7)])) / temp;
_u2[ind1] = ((_f2[LU3(i,j,2)] + _f2[LU3(i,j,5)] + _f2[LU3(i,j,6)]) - (_f2[LU3(i,j,4)] + _f2[LU3(i,j,7)] + _f2[LU3(i,j,8)])) / temp;
if (BC == 1) {
_u1[ind1] += 0.5 * _forceX / temp;
_u2[ind1] += 0.5 * _forceY / temp;
}
}
}
}
inline void output(){
calcvmag();
calcstress();
calcVort();
FILE *f = fopen(_filename1,"w");
int ind1;
if (f == nullptr) {
printf("Error opening file!\n");
exit(1);
}
fprintf(f, "TITLE=\"%s\" VARIABLES=\"x\", \"y\", \"u\", \"v\", \"vmag\", \"omegaxy\", \"vortz\" ZONE T=\"%s\" I=%d J=%d F=POINT\n", _filename1, _filename1,N,N);
for (unsigned int i = 0; i < N; i++) {
for (unsigned int j = 0; j < N ; j++) {
ind1 = LU2(i,j);
fprintf(f, "%.10f, %.10f, %.10f, %.10f, %.10f, %.10f, %.10f\n", _x[i], _x[j], _u1[ind1] / _Umax,_u2[ind1] / _Umax, _vmag[ind1], _stress[ind1], _vort[ind1]);
}
}
fclose(f);
FILE *f2 = fopen(_filename2,"w");
if (f2 == nullptr) {
printf("Error opening file!\n");
exit(1);
}
for (unsigned int i = 0; i < _error.size(); i++) {
if (_error[i] == 0.0)
break;
else
fprintf(f2, "%d\t%.10e\n", 1000*i,_error[i]);
}
fclose(f2);
}
private:
vector<double> _f1;
vector<double> _f2;
vector<double> _fstar;
vector<double> _u1;
vector<double> _u2;
vector<double> _rho;
vector<double> _x;
vector<double> _error;
vector<double> _vmag;
vector<double> _stress;
vector<double> _vort;
double _df, _df0, _OMEGA, _OMEGAm, _CS, _MACH, _rhobar;
char _filename1[80];
char _filename2[80];
double _omega_e,_omega_eps,_omega_q,_omega_nu, _GS[9];
double _Umax = max(max(Utop,Ubot),max(Vlef,Vrig));
const double _forceX = 8.0 * NU * max(max(Utop, Ubot),max(Vlef, Vrig)) / N2, _forceY = 0.0;
// Left-right periodicity
inline void virtualnode(){
#pragma omp parallel for
for (unsigned int j = 1; j < Nm1; j++) {
_fstar[LU3(0, j, 1)] = _fstar[LU3(Nm2, j, 1)];
_fstar[LU3(0, j, 5)] = _fstar[LU3(Nm2, j, 5)];
_fstar[LU3(0, j, 8)] = _fstar[LU3(Nm2, j, 8)];
_fstar[LU3(Nm1, j, 3)] = _fstar[LU3(1, j, 3)];
_fstar[LU3(Nm1, j, 6)] = _fstar[LU3(1, j, 6)];
_fstar[LU3(Nm1, j, 7)] = _fstar[LU3(1, j, 7)];
}
// Top Left
_fstar[LU3(0, Nm1, 1)] = _fstar[LU3(Nm2, Nm1, 1)];
_fstar[LU3(0, Nm1, 4)] = _fstar[LU3(Nm2, Nm1, 4)];
_fstar[LU3(0, Nm1, 8)] = _fstar[LU3(Nm2, Nm1, 8)];
// Top Right
_fstar[LU3(Nm1, Nm1, 3)] = _fstar[LU3(1, Nm1, 3)];
_fstar[LU3(Nm1, Nm1, 7)] = _fstar[LU3(1, Nm1, 7)];
_fstar[LU3(Nm1, Nm1, 4)] = _fstar[LU3(1, Nm1, 4)];
// Bottom Left
_fstar[LU3(0, 0, 1)] = _fstar[LU3(Nm2, 0, 1)];
_fstar[LU3(0, 0, 2)] = _fstar[LU3(Nm2, 0, 2)];
_fstar[LU3(0, 0, 5)] = _fstar[LU3(Nm2, 0, 5)];
// Bottom Right
_fstar[LU3(Nm1, 0, 3)] = _fstar[LU3(1, 0, 3)];
_fstar[LU3(Nm1, 0, 2)] = _fstar[LU3(1, 0, 2)];
_fstar[LU3(Nm1, 0, 6)] = _fstar[LU3(1, 0, 6)];
}
// Intitializes x vector
inline void linspace(vector<double> &x, const double _start, const double _end, const int _num){
for (unsigned int i = 0; i < _num; i++)
x[i] = _start + i * (_end - _start) / (_num - 1.0);
}
inline double calcfeq(const int k, const int ind, const int check){
const double u1ij=_u1[ind], u2ij = _u2[ind];
double cdotu{},feq{},u2{},rho0=_rho[ind];
u2 = P2(u1ij) + P2(u2ij);
cdotu = c[k][0] * u1ij + c[k][1] * u2ij;
feq = w[k] * (_rho[ind] + rho0 * (3.0 * cdotu + (4.5 * P2(cdotu) - 1.5 * u2)));
if (check == 1)
checkfeq(feq);
return feq;
}
inline double calcfeq(const int k, const double u1ij, const double u2ij, const double rhoij, const int check){
double cdotu{},feq{},u2{}, rho0=rhoij;
u2 = P2(u1ij) + P2(u2ij);
cdotu = c[k][0] * u1ij + c[k][1] * u2ij;
feq = w[k] * (rhoij + rho0 * (3.0 * cdotu + (4.5 * P2(cdotu) - 1.5 * u2)));
if (check == 1)
checkfeq(feq);
return feq;
}
// Checks for negative feq
inline void checkfeq(const double value){
if (value < 0) {
printf("Error: negative feq. Therefore, unstable.\n");
exit(1);
}
}
inline double rmsError(){
double difference{};
#pragma omp parallel for reduction(+:difference)
for (unsigned int i = 0; i < NQ; i++){
difference += P2(_f2[i] - _f1[i]);
}
return sqrt(difference / NQ);
}
inline void calcmeq(vector<double> &meq, const double u1, const double u2, const double rho) {
const double u12 = P2(u1);
const double u22 = P2(u2);
const double rho0 = rho;
meq[0] = rho; // rho
meq[1] = -2 * rho + 3 * rho0 * (u12 + u22); // e
meq[2] = rho - 3 * rho0 * (u12 + u22); // eps
meq[3] = rho0 * u1; // jx
meq[4] = -meq[3]; // qx
meq[5] = rho0 * u2; // jy
meq[6] = -meq[5]; // qy
meq[7] = rho0 * (u12 - u22); // pxx
meq[8] = rho0 * u1 * u2; // pxy
}
inline int LU2(const int i, const int j) {
return N*j + i;
}
inline int LU3(const int i, const int j, const int k) {
return N2*k + N*j + i;
}
inline double P2(const double value){
return (value * value);
}
inline void calcvmag(){
#pragma omp parallel for
for (unsigned int i = 0; i < N2; i++)
_vmag[i] = sqrt(P2(_u1[i]) + P2(_u2[i]));
}
inline void calcstress(){
#pragma omp parallel
{
int ind1;
#pragma omp for collapse(2)
for (unsigned int i = 0; i < N; i++){
for (unsigned int j = 0; j < N; j++){
ind1 = LU2(i,j);
for (unsigned int k = 0; k < Q; k++){
_stress[ind1] -= (1.0 - 0.5 * _OMEGA) * c[k][0] * c[k][1] * (_f2[LU3(i,j,k)] - calcfeq(k,ind1,1));
}
if (BC == 1)
_stress[ind1] -= 0.5 * (1.0 - 0.5 * _OMEGA) * (_forceX * _u2[ind1] + _forceY * _u1[ind1]);
}
}
}
}
inline void calcVort(){
double dvdx{},dudy{};
const double h2 = P2(_x[1] - _x[0]);
int ind1;
#pragma omp parallel for private(ind1,dvdx,dudy), collapse(2)
// Internal region
for (unsigned int i = 1; i < Nm1; i++) {
for (unsigned int j = 1; j < Nm1; j++) {
ind1 = LU2(i,j);
dvdx = (_u2[LU2(i-1,j)] - 2.0 * _u2[ind1] + _u2[LU2(i+1,j)]) / (h2 * _Umax);
dudy = (_u1[LU2(i,j-1)] - 2.0 * _u1[ind1] + _u1[LU2(i,j+1)]) / (h2 * _Umax);
_vort[ind1] = dvdx - dudy;
}
}
#pragma omp parallel for private(dvdx,dudy)
// boundary
for (unsigned int i = 1; i < N-1; i++) {
// Top
dvdx = (_u2[LU2(i-1,Nm1)] - 2.0 * _u2[LU2(i-1,Nm1)] + _u2[LU2(i+1,Nm1)]) / (h2 * _Umax);
dudy = (_u1[LU2(i,Nm1)] - 2.0 * _u1[LU2(i,Nm2)] + _u1[LU2(i,N - 3)]) / (h2 * _Umax);
_vort[LU2(i,Nm1)] = dvdx - dudy;
// Bottom
dvdx = (_u2[LU2(i-1,0)] - 2.0 * _u2[LU2(i-1,0)] + _u2[LU2(i+1,0)]) / (h2 * _Umax);
dudy = (_u1[LU2(i,2)] - 2.0 * _u1[LU2(i,1)] + _u1[LU2(i,0)]) / (h2 * _Umax);
_vort[LU2(i,0)] = dvdx - dudy;
// Left
dvdx = (_u2[LU2(2,i)] - 2.0 * _u2[LU2(1,i)] + _u2[LU2(0,i)]) / (h2 * _Umax);
dudy = (_u1[LU2(0,i+1)] - 2.0 * _u1[LU2(0,i)] + _u1[LU2(0,i-1)]) / (h2 * _Umax);
_vort[LU2(0,i)] = dvdx - dudy;
// Right
dvdx = (_u2[LU2(Nm1,i)] - 2.0 * _u2[LU2(Nm2,i)] + _u2[LU2(N - 3,i)]) / (h2 * _Umax);
dudy = (_u1[LU2(Nm1,i+1)] - 2.0 * _u1[LU2(Nm1,i)] + _u1[LU2(Nm1,i-1)]) / (h2 * _Umax);
_vort[LU2(0,i)] = dvdx - dudy;
}
// Corners
// Bottom Left
dvdx = (_u2[LU2(2,0)] - 2.0 * _u2[LU2(1,0)] + _u2[LU2(0,0)]) / (h2 * _Umax);
dudy = (_u1[LU2(0,2)] - 2.0 * _u1[LU2(0,1)] + _u1[LU2(0,0)]) / (h2 * _Umax);
_vort[LU2(0,0)] = dvdx - dudy;
// Bottom Right
dvdx = (_u2[LU2(Nm1,0)] - 2.0 * _u2[LU2(Nm2,0)] + _u2[LU2(N - 3,0)]) / (h2 * _Umax);
dudy = (_u1[LU2(Nm1,2)] - 2.0 * _u1[LU2(Nm1,1)] + _u1[LU2(Nm1,0)]) / (h2 * _Umax);
_vort[LU2(Nm1,0)] = dvdx - dudy;
// Top Left
dvdx = (_u2[LU2(2,Nm1)] - 2.0 * _u2[LU2(1,Nm1)] + _u2[LU2(0,Nm1)]) / (h2 * _Umax);
dudy = (_u1[LU2(0,Nm1)] - 2.0 * _u1[LU2(0,Nm2)] + _u1[LU2(0,N - 3)]) / (h2 * _Umax);
_vort[LU2(0,Nm1)] = dvdx - dudy;
// Top Right
dvdx = (_u2[LU2(Nm1,Nm1)] - 2.0 * _u2[LU2(Nm2,Nm1)] + _u2[LU2(N - 3,Nm1)]) / (h2 * _Umax);
dudy = (_u1[LU2(Nm1,Nm1)] - 2.0 * _u1[LU2(Nm1,Nm2)] + _u1[LU2(Nm1,N - 3)]) / (h2 * _Umax);
_vort[LU2(Nm1,Nm1)] = dvdx - dudy;
}
};
#endif //LIDDRIVENCAVITYLBM_LBMCLASS_H
|
main.c | /* Copyright (C) 2010 The Trustees of Indiana University. */
/* */
/* Use, modification and distribution is subject to the Boost Software */
/* License, Version 1.0. (See accompanying file LICENSE_1_0.txt or copy at */
/* http://www.boost.org/LICENSE_1_0.txt) */
/* */
/* Authors: Jeremiah Willcock */
/* Andrew Lumsdaine */
/* These need to be before any possible inclusions of stdint.h or inttypes.h.
* */
#ifndef __STDC_LIMIT_MACROS
#define __STDC_LIMIT_MACROS
#endif
#ifndef __STDC_FORMAT_MACROS
#define __STDC_FORMAT_MACROS
#endif
// debug
#define GEN_ONLY
#include "../generator/make_graph.h"
#include "../generator/utils.h"
#include "common.h"
#include <math.h>
#include <mpi.h>
#include <assert.h>
#include <string.h>
#include <stdlib.h>
#include <stddef.h>
#include <stdio.h>
#include <limits.h>
#include <stdint.h>
#include <inttypes.h>
static int compare_doubles(const void* a, const void* b) {
double aa = *(const double*)a;
double bb = *(const double*)b;
return (aa < bb) ? -1 : (aa == bb) ? 0 : 1;
}
enum {s_minimum, s_firstquartile, s_median, s_thirdquartile, s_maximum, s_mean, s_std, s_LAST};
static void get_statistics(const double x[], int n, double r[s_LAST]) {
double temp;
int i;
/* Compute mean. */
temp = 0;
for (i = 0; i < n; ++i) temp += x[i];
temp /= n;
r[s_mean] = temp;
/* Compute std. dev. */
temp = 0;
for (i = 0; i < n; ++i) temp += (x[i] - r[s_mean]) * (x[i] - r[s_mean]);
temp /= n - 1;
r[s_std] = sqrt(temp);
/* Sort x. */
double* xx = (double*)xmalloc(n * sizeof(double));
memcpy(xx, x, n * sizeof(double));
qsort(xx, n, sizeof(double), compare_doubles);
/* Get order statistics. */
r[s_minimum] = xx[0];
r[s_firstquartile] = (xx[(n - 1) / 4] + xx[n / 4]) * .5;
r[s_median] = (xx[(n - 1) / 2] + xx[n / 2]) * .5;
r[s_thirdquartile] = (xx[n - 1 - (n - 1) / 4] + xx[n - 1 - n / 4]) * .5;
r[s_maximum] = xx[n - 1];
/* Clean up. */
free(xx);
}
int main(int argc, char** argv) {
MPI_Init(&argc, &argv);
setup_globals();
/* Parse arguments. */
int SCALE = 16;
int edgefactor = 16; /* nedges / nvertices, i.e., 2*avg. degree */
// if (argc >= 2) SCALE = atoi(argv[1]);
// if (argc >= 3) edgefactor = atoi(argv[2]);
char* name = argv[1];
if (argc >= 3) SCALE = atoi(argv[2]);
if (argc >= 4) edgefactor = atoi(argv[3]);
// if (argc <= 1 || argc >= 4 || SCALE == 0 || edgefactor == 0) {
// if (rank == 0) {
// fprintf(stderr, "Usage: %s SCALE edgefactor\n SCALE = log_2(# vertices) [integer, required]\n edgefactor = (# edges) / (# vertices) = .5 * (average vertex degree) [integer, defaults to 16]\n(Random number seed and Kronecker initiator are in main.c)\n", argv[0]);
// }
if (argc <= 2 || argc >= 5 || SCALE == 0 || edgefactor == 0) {
if (rank == 0) {
fprintf(stderr, "Usage: %s filename SCALE edgefactor\n SCALE = log_2(# vertices) [integer, required]\n edgefactor = (# edges) / (# vertices) = .5 * (average vertex degree) [integer, defaults to 16]\n(Random number seed and Kronecker initiator are in main.c)\n", argv[0]);
}
MPI_Abort(MPI_COMM_WORLD, 1);
}
uint64_t seed1 = 2, seed2 = 3;
// const char* filename = getenv("TMPFILE");
const char* filename = name;
/* If filename is NULL, store data in memory */
tuple_graph tg;
tg.nglobaledges = (int64_t)(edgefactor) << SCALE;
int64_t nglobalverts = (int64_t)(1) << SCALE;
tg.data_in_file = (filename != NULL);
if (tg.data_in_file) {
printf("data in file \n");
MPI_File_set_errhandler(MPI_FILE_NULL, MPI_ERRORS_ARE_FATAL);
// MPI_File_open(MPI_COMM_WORLD, (char*)filename, MPI_MODE_RDWR | MPI_MODE_CREATE | MPI_MODE_EXCL | MPI_MODE_DELETE_ON_CLOSE | MPI_MODE_UNIQUE_OPEN, MPI_INFO_NULL, &tg.edgefile);
MPI_File_open(MPI_COMM_WORLD, (char*)filename, MPI_MODE_RDWR | MPI_MODE_CREATE | MPI_MODE_EXCL | MPI_MODE_UNIQUE_OPEN, MPI_INFO_NULL, &tg.edgefile);
MPI_File_set_size(tg.edgefile, tg.nglobaledges * sizeof(packed_edge));
MPI_File_set_view(tg.edgefile, 0, packed_edge_mpi_type, packed_edge_mpi_type, "native", MPI_INFO_NULL);
MPI_File_set_atomicity(tg.edgefile, 0);
}
/* Make the raw graph edges. */
/* Get roots for BFS runs, plus maximum vertex with non-zero degree (used by
* validator). */
int num_bfs_roots = 64;
int64_t* bfs_roots = (int64_t*)xmalloc(num_bfs_roots * sizeof(int64_t));
int64_t max_used_vertex = 0;
double make_graph_start = MPI_Wtime();
{
/* Spread the two 64-bit numbers into five nonzero values in the correct
* range. */
uint_fast32_t seed[5];
make_mrg_seed(seed1, seed2, seed);
/* As the graph is being generated, also keep a bitmap of vertices with
* incident edges. We keep a grid of processes, each row of which has a
* separate copy of the bitmap (distributed among the processes in the
* row), and then do an allreduce at the end. This scheme is used to avoid
* non-local communication and reading the file separately just to find BFS
* roots. */
MPI_Offset nchunks_in_file = (tg.nglobaledges + FILE_CHUNKSIZE - 1) / FILE_CHUNKSIZE;
int64_t bitmap_size_in_bytes = int64_min(BITMAPSIZE, (nglobalverts + CHAR_BIT - 1) / CHAR_BIT);
if (bitmap_size_in_bytes * size * CHAR_BIT < nglobalverts) {
bitmap_size_in_bytes = (nglobalverts + size * CHAR_BIT - 1) / (size * CHAR_BIT);
}
int ranks_per_row = ((nglobalverts + CHAR_BIT - 1) / CHAR_BIT + bitmap_size_in_bytes - 1) / bitmap_size_in_bytes;
int nrows = size / ranks_per_row;
int my_row = -1, my_col = -1;
unsigned char* restrict has_edge = NULL;
MPI_Comm cart_comm;
{
int dims[2] = {size / ranks_per_row, ranks_per_row};
int periods[2] = {0, 0};
MPI_Cart_create(MPI_COMM_WORLD, 2, dims, periods, 1, &cart_comm);
}
int in_generating_rectangle = 0;
if (cart_comm != MPI_COMM_NULL) {
in_generating_rectangle = 1;
{
int dims[2], periods[2], coords[2];
MPI_Cart_get(cart_comm, 2, dims, periods, coords);
my_row = coords[0];
my_col = coords[1];
}
MPI_Comm this_col;
MPI_Comm_split(cart_comm, my_col, my_row, &this_col);
MPI_Comm_free(&cart_comm);
has_edge = (unsigned char*)xMPI_Alloc_mem(bitmap_size_in_bytes);
memset(has_edge, 0, bitmap_size_in_bytes);
/* Every rank in a given row creates the same vertices (for updating the
* bitmap); only one writes them to the file (or final memory buffer). */
packed_edge* buf = (packed_edge*)xmalloc(FILE_CHUNKSIZE * sizeof(packed_edge));
MPI_Offset block_limit = (nchunks_in_file + nrows - 1) / nrows;
// fprintf(stderr, "%d: nchunks_in_file = %" PRId64 ", block_limit = %" PRId64 " in grid of %d rows, %d cols\n", rank, (int64_t)nchunks_in_file, (int64_t)block_limit, nrows, ranks_per_row);
if (tg.data_in_file) {
tg.edgememory_size = 0;
tg.edgememory = NULL;
} else {
int my_pos = my_row + my_col * nrows;
int last_pos = (tg.nglobaledges % ((int64_t)FILE_CHUNKSIZE * nrows * ranks_per_row) != 0) ?
(tg.nglobaledges / FILE_CHUNKSIZE) % (nrows * ranks_per_row) :
-1;
int64_t edges_left = tg.nglobaledges % FILE_CHUNKSIZE;
int64_t nedges = FILE_CHUNKSIZE * (tg.nglobaledges / ((int64_t)FILE_CHUNKSIZE * nrows * ranks_per_row)) +
FILE_CHUNKSIZE * (my_pos < (tg.nglobaledges / FILE_CHUNKSIZE) % (nrows * ranks_per_row)) +
(my_pos == last_pos ? edges_left : 0);
/* fprintf(stderr, "%d: nedges = %" PRId64 " of %" PRId64 "\n", rank, (int64_t)nedges, (int64_t)tg.nglobaledges); */
tg.edgememory_size = nedges;
tg.edgememory = (packed_edge*)xmalloc(nedges * sizeof(packed_edge));
}
MPI_Offset block_idx;
for (block_idx = 0; block_idx < block_limit; ++block_idx) {
/* fprintf(stderr, "%d: On block %d of %d\n", rank, (int)block_idx, (int)block_limit); */
MPI_Offset start_edge_index = int64_min(FILE_CHUNKSIZE * (block_idx * nrows + my_row), tg.nglobaledges);
MPI_Offset edge_count = int64_min(tg.nglobaledges - start_edge_index, FILE_CHUNKSIZE);
packed_edge* actual_buf = (!tg.data_in_file && block_idx % ranks_per_row == my_col) ?
tg.edgememory + FILE_CHUNKSIZE * (block_idx / ranks_per_row) :
buf;
/* fprintf(stderr, "%d: My range is [%" PRId64 ", %" PRId64 ") %swriting into index %" PRId64 "\n", rank, (int64_t)start_edge_index, (int64_t)(start_edge_index + edge_count), (my_col == (block_idx % ranks_per_row)) ? "" : "not ", (int64_t)(FILE_CHUNKSIZE * (block_idx / ranks_per_row))); */
if (!tg.data_in_file && block_idx % ranks_per_row == my_col) {
assert (FILE_CHUNKSIZE * (block_idx / ranks_per_row) + edge_count <= tg.edgememory_size);
}
// debug
char* wtxbuf = (char*)xmalloc(FILE_CHUNKSIZE * sizeof(packed_edge));
// generate_kronecker_range(seed, SCALE, start_edge_index, start_edge_index + edge_count, actual_buf);
generate_kronecker_range(seed, SCALE, start_edge_index, start_edge_index + edge_count, actual_buf);
if (tg.data_in_file && my_col == (block_idx % ranks_per_row)) { /* Try to spread writes among ranks */
// MPI_File_write_at(tg.edgefile, start_edge_index, actual_buf, edge_count, packed_edge_mpi_type, MPI_STATUS_IGNORE);
// debug
printf("%d: %d, %d\n", rank, start_edge_index, edge_count);
int i;
// for (i = start_edge_index; i < start_edge_index + 3; i++) {
// if(block_idx == 0) {
// for (i = 0; i < 3; i++) {
// if (edge_count > 3)
// printf("%d: %d\t%d\n", rank, actual_buf[i].v0, actual_buf[i].v1);
// }
// }
MPI_File_write_at(tg.edgefile, start_edge_index, actual_buf, edge_count, packed_edge_mpi_type, MPI_STATUS_IGNORE);
}
ptrdiff_t i;
#ifdef _OPENMP
#pragma omp parallel for
#endif
for (i = 0; i < edge_count; ++i) {
int64_t src = get_v0_from_edge(&actual_buf[i]);
int64_t tgt = get_v1_from_edge(&actual_buf[i]);
if (src == tgt) continue;
if (src / bitmap_size_in_bytes / CHAR_BIT == my_col) {
#ifdef _OPENMP
#pragma omp atomic
#endif
has_edge[(src / CHAR_BIT) % bitmap_size_in_bytes] |= (1 << (src % CHAR_BIT));
}
if (tgt / bitmap_size_in_bytes / CHAR_BIT == my_col) {
#ifdef _OPENMP
#pragma omp atomic
#endif
has_edge[(tgt / CHAR_BIT) % bitmap_size_in_bytes] |= (1 << (tgt % CHAR_BIT));
}
}
}
free(buf);
#if 0
/* The allreduce for each root acts like we did this: */
MPI_Allreduce(MPI_IN_PLACE, has_edge, bitmap_size_in_bytes, MPI_UNSIGNED_CHAR, MPI_BOR, this_col);
#endif
MPI_Comm_free(&this_col);
} else {
tg.edgememory = NULL;
tg.edgememory_size = 0;
}
MPI_Allreduce(&tg.edgememory_size, &tg.max_edgememory_size, 1, MPI_INT64_T, MPI_MAX, MPI_COMM_WORLD);
#ifndef GEN_ONLY
/* Find roots and max used vertex */
{
uint64_t counter = 0;
int bfs_root_idx;
for (bfs_root_idx = 0; bfs_root_idx < num_bfs_roots; ++bfs_root_idx) {
int64_t root;
while (1) {
double d[2];
make_random_numbers(2, seed1, seed2, counter, d);
root = (int64_t)((d[0] + d[1]) * nglobalverts) % nglobalverts;
counter += 2;
if (counter > 2 * nglobalverts) break;
int is_duplicate = 0;
int i;
for (i = 0; i < bfs_root_idx; ++i) {
if (root == bfs_roots[i]) {
is_duplicate = 1;
break;
}
}
if (is_duplicate) continue; /* Everyone takes the same path here */
int root_ok = 0;
if (in_generating_rectangle && (root / CHAR_BIT / bitmap_size_in_bytes) == my_col) {
root_ok = (has_edge[(root / CHAR_BIT) % bitmap_size_in_bytes] & (1 << (root % CHAR_BIT))) != 0;
}
MPI_Allreduce(MPI_IN_PLACE, &root_ok, 1, MPI_INT, MPI_LOR, MPI_COMM_WORLD);
if (root_ok) break;
}
bfs_roots[bfs_root_idx] = root;
}
num_bfs_roots = bfs_root_idx;
/* Find maximum non-zero-degree vertex. */
{
int64_t i;
max_used_vertex = 0;
if (in_generating_rectangle) {
for (i = bitmap_size_in_bytes * CHAR_BIT; i > 0; --i) {
if (i > nglobalverts) continue;
if (has_edge[(i - 1) / CHAR_BIT] & (1 << ((i - 1) % CHAR_BIT))) {
max_used_vertex = (i - 1) + my_col * CHAR_BIT * bitmap_size_in_bytes;
break;
}
}
}
MPI_Allreduce(MPI_IN_PLACE, &max_used_vertex, 1, MPI_INT64_T, MPI_MAX, MPI_COMM_WORLD);
}
}
#endif
if (in_generating_rectangle) {
MPI_Free_mem(has_edge);
}
if (tg.data_in_file) {
MPI_File_sync(tg.edgefile);
}
}
double make_graph_stop = MPI_Wtime();
double make_graph_time = make_graph_stop - make_graph_start;
if (rank == 0) { /* Not an official part of the results */
fprintf(stderr, "graph_generation: %f s\n", make_graph_time);
}
//debug
#ifndef GEN_ONLY //!GEN_ONLY
/* Make user's graph data structure. */
double data_struct_start = MPI_Wtime();
make_graph_data_structure(&tg);
double data_struct_stop = MPI_Wtime();
double data_struct_time = data_struct_stop - data_struct_start;
if (rank == 0) { /* Not an official part of the results */
fprintf(stderr, "construction_time: %f s\n", data_struct_time);
}
/* Number of edges visited in each BFS; a double so get_statistics can be
* used directly. */
double* edge_counts = (double*)xmalloc(num_bfs_roots * sizeof(double));
/* Run BFS. */
int validation_passed = 1;
double* bfs_times = (double*)xmalloc(num_bfs_roots * sizeof(double));
double* validate_times = (double*)xmalloc(num_bfs_roots * sizeof(double));
uint64_t nlocalverts = get_nlocalverts_for_pred();
int64_t* pred = (int64_t*)xMPI_Alloc_mem(nlocalverts * sizeof(int64_t));
int bfs_root_idx;
for (bfs_root_idx = 0; bfs_root_idx < num_bfs_roots; ++bfs_root_idx) {
int64_t root = bfs_roots[bfs_root_idx];
if (rank == 0) fprintf(stderr, "Running BFS %d\n", bfs_root_idx);
/* Clear the pred array. */
memset(pred, 0, nlocalverts * sizeof(int64_t));
/* Do the actual BFS. */
double bfs_start = MPI_Wtime();
run_bfs(root, &pred[0]);
double bfs_stop = MPI_Wtime();
bfs_times[bfs_root_idx] = bfs_stop - bfs_start;
if (rank == 0) fprintf(stderr, "Time for BFS %d is %f\n", bfs_root_idx, bfs_times[bfs_root_idx]);
/* Validate result. */
if (rank == 0) fprintf(stderr, "Validating BFS %d\n", bfs_root_idx);
double validate_start = MPI_Wtime();
int64_t edge_visit_count;
int validation_passed_one = validate_bfs_result(&tg, max_used_vertex + 1, nlocalverts, root, pred, &edge_visit_count);
double validate_stop = MPI_Wtime();
validate_times[bfs_root_idx] = validate_stop - validate_start;
if (rank == 0) fprintf(stderr, "Validate time for BFS %d is %f\n", bfs_root_idx, validate_times[bfs_root_idx]);
edge_counts[bfs_root_idx] = (double)edge_visit_count;
if (rank == 0) fprintf(stderr, "TEPS for BFS %d is %g\n", bfs_root_idx, edge_visit_count / bfs_times[bfs_root_idx]);
if (!validation_passed_one) {
validation_passed = 0;
if (rank == 0) fprintf(stderr, "Validation failed for this BFS root; skipping rest.\n");
break;
}
}
MPI_Free_mem(pred);
free(bfs_roots);
free_graph_data_structure();
#endif //!GEN_ONLY
if (tg.data_in_file) {
MPI_File_close(&tg.edgefile);
} else {
free(tg.edgememory); tg.edgememory = NULL;
}
#ifndef GEN_ONLY
/* Print results. */
if (rank == 0) {
if (!validation_passed) {
fprintf(stdout, "No results printed for invalid run.\n");
} else {
int i;
fprintf(stdout, "SCALE: %d\n", SCALE);
fprintf(stdout, "edgefactor: %d\n", edgefactor);
fprintf(stdout, "NBFS: %d\n", num_bfs_roots);
fprintf(stdout, "graph_generation: %g\n", make_graph_time);
fprintf(stdout, "num_mpi_processes: %d\n", size);
fprintf(stdout, "construction_time: %g\n", data_struct_time);
double stats[s_LAST];
get_statistics(bfs_times, num_bfs_roots, stats);
fprintf(stdout, "min_time: %g\n", stats[s_minimum]);
fprintf(stdout, "firstquartile_time: %g\n", stats[s_firstquartile]);
fprintf(stdout, "median_time: %g\n", stats[s_median]);
fprintf(stdout, "thirdquartile_time: %g\n", stats[s_thirdquartile]);
fprintf(stdout, "max_time: %g\n", stats[s_maximum]);
fprintf(stdout, "mean_time: %g\n", stats[s_mean]);
fprintf(stdout, "stddev_time: %g\n", stats[s_std]);
get_statistics(edge_counts, num_bfs_roots, stats);
fprintf(stdout, "min_nedge: %.11g\n", stats[s_minimum]);
fprintf(stdout, "firstquartile_nedge: %.11g\n", stats[s_firstquartile]);
fprintf(stdout, "median_nedge: %.11g\n", stats[s_median]);
fprintf(stdout, "thirdquartile_nedge: %.11g\n", stats[s_thirdquartile]);
fprintf(stdout, "max_nedge: %.11g\n", stats[s_maximum]);
fprintf(stdout, "mean_nedge: %.11g\n", stats[s_mean]);
fprintf(stdout, "stddev_nedge: %.11g\n", stats[s_std]);
double* secs_per_edge = (double*)xmalloc(num_bfs_roots * sizeof(double));
for (i = 0; i < num_bfs_roots; ++i) secs_per_edge[i] = bfs_times[i] / edge_counts[i];
get_statistics(secs_per_edge, num_bfs_roots, stats);
fprintf(stdout, "min_TEPS: %g\n", 1. / stats[s_maximum]);
fprintf(stdout, "firstquartile_TEPS: %g\n", 1. / stats[s_thirdquartile]);
fprintf(stdout, "median_TEPS: %g\n", 1. / stats[s_median]);
fprintf(stdout, "thirdquartile_TEPS: %g\n", 1. / stats[s_firstquartile]);
fprintf(stdout, "max_TEPS: %g\n", 1. / stats[s_minimum]);
fprintf(stdout, "harmonic_mean_TEPS: %g\n", 1. / stats[s_mean]);
/* Formula from:
* Title: The Standard Errors of the Geometric and Harmonic Means and
* Their Application to Index Numbers
* Author(s): Nilan Norris
* Source: The Annals of Mathematical Statistics, Vol. 11, No. 4 (Dec., 1940), pp. 445-448
* Publisher(s): Institute of Mathematical Statistics
* Stable URL: http://www.jstor.org/stable/2235723
* (same source as in specification). */
fprintf(stdout, "harmonic_stddev_TEPS: %g\n", stats[s_std] / (stats[s_mean] * stats[s_mean] * sqrt(num_bfs_roots - 1)));
free(secs_per_edge); secs_per_edge = NULL;
free(edge_counts); edge_counts = NULL;
get_statistics(validate_times, num_bfs_roots, stats);
fprintf(stdout, "min_validate: %g\n", stats[s_minimum]);
fprintf(stdout, "firstquartile_validate: %g\n", stats[s_firstquartile]);
fprintf(stdout, "median_validate: %g\n", stats[s_median]);
fprintf(stdout, "thirdquartile_validate: %g\n", stats[s_thirdquartile]);
fprintf(stdout, "max_validate: %g\n", stats[s_maximum]);
fprintf(stdout, "mean_validate: %g\n", stats[s_mean]);
fprintf(stdout, "stddev_validate: %g\n", stats[s_std]);
#if 0
for (i = 0; i < num_bfs_roots; ++i) {
fprintf(stdout, "Run %3d: %g s, validation %g s\n", i + 1, bfs_times[i], validate_times[i]);
}
#endif
}
}
free(bfs_times);
free(validate_times);
#endif
cleanup_globals();
MPI_Finalize();
return 0;
}
|
rkb_screen.c | /*
*
*/
#include <stdlib.h>
#include <string.h>
#include <math.h>
#include <complex.h>
#include <assert.h>
#include "cint.h"
#include "cvhf.h"
#include "fblas.h"
#include "optimizer.h"
#define MAX(I,J) ((I) > (J) ? (I) : (J))
#define LL 0
#define SS 1
#define SL 2
#define LS 3
int int2e_spinor();
int int2e_spsp1spsp2_spinor();
int CVHFrkbllll_prescreen(int *shls, CVHFOpt *opt,
int *atm, int *bas, double *env)
{
if (!opt) {
return 1; // no screen
}
int i = shls[0];
int j = shls[1];
int k = shls[2];
int l = shls[3];
int n = opt->nbas;
assert(opt->q_cond);
assert(opt->dm_cond);
assert(i < n);
assert(j < n);
assert(k < n);
assert(l < n);
double qijkl = opt->q_cond[i*n+j] * opt->q_cond[k*n+l];
double dmin = opt->direct_scf_cutoff / qijkl;
return qijkl > opt->direct_scf_cutoff
&&((opt->dm_cond[j*n+i] > dmin)
|| (opt->dm_cond[l*n+k] > dmin)
|| (opt->dm_cond[j*n+k] > dmin)
|| (opt->dm_cond[j*n+l] > dmin)
|| (opt->dm_cond[i*n+k] > dmin)
|| (opt->dm_cond[i*n+l] > dmin));
}
int CVHFrkbllll_vkscreen(int *shls, CVHFOpt *opt,
double **dms_cond, int n_dm, double *dm_atleast,
int *atm, int *bas, double *env)
{
int i = shls[0];
int j = shls[1];
int k = shls[2];
int l = shls[3];
int nbas = opt->nbas;
int idm;
double qijkl = opt->q_cond[i*nbas+j] * opt->q_cond[k*nbas+l];
double *pdmscond = opt->dm_cond + nbas*nbas;
for (idm = 0; idm < n_dm/2; idm++) {
// note in _vhf.rdirect_mapdm, J and K share the same DM
dms_cond[idm*2+0] = pdmscond + idm*nbas*nbas; // for vj
dms_cond[idm*2+1] = pdmscond + idm*nbas*nbas; // for vk
}
*dm_atleast = opt->direct_scf_cutoff / qijkl;
return 1;
}
int CVHFrkbssll_prescreen(int *shls, CVHFOpt *opt,
int *atm, int *bas, double *env)
{
if (!opt) {
return 1; // no screen
}
int i = shls[0];
int j = shls[1];
int k = shls[2];
int l = shls[3];
int n = opt->nbas;
assert(opt->q_cond);
assert(opt->dm_cond);
assert(i < n);
assert(j < n);
assert(k < n);
assert(l < n);
double *dmsl = opt->dm_cond + n*n*SL;
double qijkl = opt->q_cond[n*n*SS+i*n+j] * opt->q_cond[k*n+l];
double dmin = opt->direct_scf_cutoff / qijkl;
return qijkl > opt->direct_scf_cutoff
&&((opt->dm_cond[n*n*SS+j*n+i] > dmin)
|| (opt->dm_cond[l*n+k] > dmin)
|| (dmsl[j*n+k] > dmin)
|| (dmsl[j*n+l] > dmin)
|| (dmsl[i*n+k] > dmin)
|| (dmsl[i*n+l] > dmin));
}
// be careful with the order in dms_cond, the current order (dmll, dmss, dmsl)
// is consistent to the function _call_veff_ssll in dhf.py
int CVHFrkbssll_vkscreen(int *shls, CVHFOpt *opt,
double **dms_cond, int n_dm, double *dm_atleast,
int *atm, int *bas, double *env)
{
int i = shls[0];
int j = shls[1];
int k = shls[2];
int l = shls[3];
int nbas = opt->nbas;
int idm;
double qijkl = opt->q_cond[nbas*nbas*SS+i*nbas+j] * opt->q_cond[k*nbas+l];
double *pdmscond = opt->dm_cond + 4*nbas*nbas;
int nset = n_dm / 3;
double *dmscondll = pdmscond + nset*nbas*nbas*LL;
double *dmscondss = pdmscond + nset*nbas*nbas*SS;
double *dmscondsl = pdmscond + nset*nbas*nbas*SL;
for (idm = 0; idm < nset; idm++) {
dms_cond[nset*0+idm] = dmscondll + idm*nbas*nbas;
dms_cond[nset*1+idm] = dmscondss + idm*nbas*nbas;
dms_cond[nset*2+idm] = dmscondsl + idm*nbas*nbas;
}
*dm_atleast = opt->direct_scf_cutoff / qijkl;
return 1;
}
static void set_qcond(int (*intor)(), double *qcond,
int *atm, int natm, int *bas, int nbas, double *env)
{
int shls_slice[] = {0, nbas};
const int cache_size = GTOmax_cache_size(intor, shls_slice, 1,
atm, natm, bas, nbas, env);
#pragma omp parallel default(none) \
shared(intor, qcond, atm, natm, bas, nbas, env)
{
double qtmp, tmp;
int i, j, ij, di, dj, ish, jsh;
int shls[4];
double *cache = malloc(sizeof(double) * cache_size);
di = 0;
for (ish = 0; ish < nbas; ish++) {
dj = CINTcgto_spinor(ish, bas);
di = MAX(di, dj);
}
double complex *buf = malloc(sizeof(double complex) * di*di*di*di);
#pragma omp for schedule(dynamic, 4)
for (ij = 0; ij < nbas*(nbas+1)/2; ij++) {
ish = (int)(sqrt(2*ij+.25) - .5 + 1e-7);
jsh = ij - ish*(ish+1)/2;
di = CINTcgto_spinor(ish, bas);
dj = CINTcgto_spinor(jsh, bas);
shls[0] = ish;
shls[1] = jsh;
shls[2] = ish;
shls[3] = jsh;
qtmp = 1e-100;
if (0 != (*intor)(buf, NULL, shls, atm, natm, bas, nbas, env, NULL, cache)) {
for (i = 0; i < di; i++) {
for (j = 0; j < dj; j++) {
tmp = cabs(buf[i+di*j+di*dj*i+di*dj*di*j]);
qtmp = MAX(qtmp, tmp);
} }
qtmp = sqrt(qtmp);
}
qcond[ish*nbas+jsh] = qtmp;
qcond[jsh*nbas+ish] = qtmp;
}
free(buf);
free(cache);
}
}
void CVHFrkbllll_direct_scf(CVHFOpt *opt, int *atm, int natm,
int *bas, int nbas, double *env)
{
if (opt->q_cond) {
free(opt->q_cond);
}
opt->q_cond = (double *)malloc(sizeof(double) * nbas*nbas);
set_qcond(&int2e_spinor, opt->q_cond, atm, natm, bas, nbas, env);
}
void CVHFrkbssss_direct_scf(CVHFOpt *opt, int *atm, int natm,
int *bas, int nbas, double *env)
{
if (opt->q_cond) {
free(opt->q_cond);
}
opt->q_cond = (double *)malloc(sizeof(double) * nbas*nbas);
const int INC1 = 1;
int nn = nbas * nbas;
double c1 = .25/(env[PTR_LIGHT_SPEED]*env[PTR_LIGHT_SPEED]);
set_qcond(&int2e_spsp1spsp2_spinor, opt->q_cond, atm, natm, bas, nbas, env);
dscal_(&nn, &c1, opt->q_cond, &INC1);
}
void CVHFrkbssll_direct_scf(CVHFOpt *opt, int *atm, int natm,
int *bas, int nbas, double *env)
{
if (opt->q_cond) {
free(opt->q_cond);
}
opt->q_cond = (double *)malloc(sizeof(double) * nbas*nbas*2);
const int INC1 = 1;
int nn = nbas * nbas;
double c1 = 1/(.25/(env[PTR_LIGHT_SPEED]*env[PTR_LIGHT_SPEED]));
set_qcond(&int2e_spinor, opt->q_cond, atm, natm, bas, nbas, env);
set_qcond(&int2e_spsp1spsp2_spinor, opt->q_cond+nbas*nbas,
atm, natm, bas, nbas, env);
dscal_(&nn, &c1, opt->q_cond+nbas*nbas, &INC1);
}
static void set_dmcond(double *dmcond, double *dmscond, double complex *dm,
double direct_scf_cutoff, int nset, int *ao_loc,
int *atm, int natm, int *bas, int nbas, double *env)
{
const int nao = ao_loc[nbas];
double dmax, dmaxi, tmp;
int i, j, ish, jsh;
int iset;
double complex *pdm;
for (ish = 0; ish < nbas; ish++) {
for (jsh = 0; jsh < nbas; jsh++) {
dmax = 0;
for (iset = 0; iset < nset; iset++) {
dmaxi = 0;
pdm = dm + nao*nao*iset;
for (i = ao_loc[ish]; i < ao_loc[ish+1]; i++) {
for (j = ao_loc[jsh]; j < ao_loc[jsh+1]; j++) {
tmp = cabs(pdm[i*nao+j]);
dmaxi = MAX(dmaxi, tmp);
} }
dmscond[iset*nbas*nbas+ish*nbas+jsh] = dmaxi;
dmax = MAX(dmax, dmaxi);
}
dmcond[ish*nbas+jsh] = dmax;
} }
}
// dm_cond ~ 1+nset, dm_cond + dms_cond
void CVHFrkbllll_direct_scf_dm(CVHFOpt *opt, double complex *dm, int nset,
int *ao_loc, int *atm, int natm,
int *bas, int nbas, double *env)
{
if (opt->dm_cond) { // NOT reuse opt->dm_cond because nset may be diff in different call
free(opt->dm_cond);
}
opt->dm_cond = (double *)malloc(sizeof(double)*nbas*nbas*(1+nset));
memset(opt->dm_cond, 0, sizeof(double)*nbas*nbas*(1+nset));
// dmcond followed by dmscond which are max matrix element for each dm
set_dmcond(opt->dm_cond, opt->dm_cond+nbas*nbas, dm,
opt->direct_scf_cutoff, nset, ao_loc, atm, natm, bas, nbas, env);
}
void CVHFrkbssss_direct_scf_dm(CVHFOpt *opt, double complex *dm, int nset,
int *ao_loc, int *atm, int natm,
int *bas, int nbas, double *env)
{
if (opt->dm_cond) {
free(opt->dm_cond);
}
opt->dm_cond = (double *)malloc(sizeof(double)*nbas*nbas*(1+nset));
memset(opt->dm_cond, 0, sizeof(double)*nbas*nbas*(1+nset));
set_dmcond(opt->dm_cond, opt->dm_cond+nbas*nbas, dm,
opt->direct_scf_cutoff, nset, ao_loc, atm, natm, bas, nbas, env);
}
// the current order of dmscond (dmll, dmss, dmsl) is consistent to the
// function _call_veff_ssll in dhf.py
void CVHFrkbssll_direct_scf_dm(CVHFOpt *opt, double complex *dm, int nset,
int *ao_loc, int *atm, int natm,
int *bas, int nbas, double *env)
{
if (opt->dm_cond) {
free(opt->dm_cond);
}
nset = nset / 3;
opt->dm_cond = (double *)malloc(sizeof(double)*nbas*nbas*4*(1+nset));
memset(opt->dm_cond, 0, sizeof(double)*nbas*nbas*4*(1+nset));
// 4 types of dmcond (LL,SS,SL,SS) followed by 4 types of dmscond
int n2c = CINTtot_cgto_spinor(bas, nbas);
double *dmcondll = opt->dm_cond + nbas*nbas*LL;
double *dmcondss = opt->dm_cond + nbas*nbas*SS;
double *dmcondsl = opt->dm_cond + nbas*nbas*SL;
//double *dmcondls = opt->dm_cond + nbas*nbas*LS;
double *pdmscond = opt->dm_cond + nbas*nbas*4;
double *dmscondll = pdmscond + nset*nbas*nbas*LL;
double *dmscondss = pdmscond + nset*nbas*nbas*SS;
double *dmscondsl = pdmscond + nset*nbas*nbas*SL;
//double *dmscondls = dmscond + nset*nbas*nbas*LS;
double complex *dmll = dm + n2c*n2c*LL*nset;
double complex *dmss = dm + n2c*n2c*SS*nset;
double complex *dmsl = dm + n2c*n2c*SL*nset;
//double complex *dmls = dm + n2c*n2c*LS*nset;
set_dmcond(dmcondll, dmscondll, dmll,
opt->direct_scf_cutoff, nset, ao_loc, atm, natm, bas, nbas, env);
set_dmcond(dmcondss, dmscondss, dmss,
opt->direct_scf_cutoff, nset, ao_loc, atm, natm, bas, nbas, env);
set_dmcond(dmcondsl, dmscondsl, dmsl,
opt->direct_scf_cutoff, nset, ao_loc, atm, natm, bas, nbas, env);
}
|
ordered_dependences.c | // RUN: %libomp-compile-and-run | %sort-threads | FileCheck %s
// REQUIRES: ompt
// UNSUPPORTED: gcc-4, gcc-5, gcc-6, gcc-7
#include "callback.h"
#include <omp.h>
int main() {
int a[10][10];
#pragma omp parallel num_threads(2)
#pragma omp for ordered(2)
for (int i = 0; i < 2; i++)
for (int j = 0; j < 2; j++) {
a[i][j] = i + j + 1;
printf("%d, %d\n", i, j);
#pragma omp ordered depend(sink : i - 1, j) depend(sink : i, j - 1)
if (i > 0 && j > 0)
a[i][j] = a[i - 1][j] + a[i][j - 1] + 1;
printf("%d, %d\n", i, j);
#pragma omp ordered depend(source)
}
return 0;
}
// CHECK: 0: NULL_POINTER=[[NULL:.*$]]
// CHECK: {{^}}[[MASTER:[0-9]+]]: ompt_event_loop_begin:
// CHECK-SAME: parallel_id={{[0-9]+}}, parent_task_id=[[ITASK:[0-9]+]],
// CHECK: {{^}}[[MASTER]]: ompt_event_dependences: task_id=[[ITASK]],
// CHECK-SAME: deps=[(0, ompt_dependence_type_source), (0,
// CHECK-SAME: ompt_dependence_type_source)], ndeps=2
// CHECK: {{^}}[[MASTER]]: ompt_event_dependences: task_id=[[ITASK]],
// CHECK-SAME: deps=[(0, ompt_dependence_type_sink), (0,
// CHECK-SAME: ompt_dependence_type_sink)], ndeps=2
// CHECK: {{^}}[[MASTER]]: ompt_event_dependences: task_id=[[ITASK]],
// CHECK-SAME: deps=[(0, ompt_dependence_type_source), (1,
// CHECK-SAME: ompt_dependence_type_source)], ndeps=2
// CHECK: {{^}}[[WORKER:[0-9]+]]: ompt_event_loop_begin:
// CHECK-SAME: parallel_id={{[0-9]+}}, parent_task_id=[[ITASK:[0-9]+]],
// CHECK: {{^}}[[WORKER]]: ompt_event_dependences: task_id=[[ITASK]],
// CHECK-SAME: deps=[(0, ompt_dependence_type_sink), (0,
// CHECK-SAME: ompt_dependence_type_sink)], ndeps=2
// CHECK: {{^}}[[WORKER]]: ompt_event_dependences: task_id=[[ITASK]],
// CHECK-SAME: deps=[(1, ompt_dependence_type_source), (0,
// CHECK-SAME: ompt_dependence_type_source)], ndeps=2
// either can be first for last iteration
// CHECK-DAG: [[ITASK]]{{.*}}deps=[(0{{.*}}sink), (1,{{.*}}sink)]
// CHECK-DAG: [[ITASK]]{{.*}}deps=[(1{{.*}}sink), (0,{{.*}}sink)]
// CHECK: {{^}}[[WORKER]]: ompt_event_dependences: task_id=[[ITASK]],
// CHECK-SAME: deps=[(1, ompt_dependence_type_source), (1,
// CHECK-SAME: ompt_dependence_type_source)], ndeps=2
|
GB_unaryop__identity_int16_int16.c | //------------------------------------------------------------------------------
// GB_unaryop: hard-coded functions for each built-in unary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2019, All Rights Reserved.
// http://suitesparse.com See GraphBLAS/Doc/License.txt for license.
//------------------------------------------------------------------------------
// If this file is in the Generated/ folder, do not edit it (auto-generated).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_control.h"
#include "GB_iterator.h"
#include "GB_unaryop__include.h"
// C=unop(A) is defined by the following types and operators:
// op(A) function: GB_unop__identity_int16_int16
// op(A') function: GB_tran__identity_int16_int16
// C type: int16_t
// A type: int16_t
// cast: int16_t cij = (int16_t) aij
// unaryop: cij = aij
#define GB_ATYPE \
int16_t
#define GB_CTYPE \
int16_t
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
int16_t aij = Ax [pA]
#define GB_CX(p) Cx [p]
// unary operator
#define GB_OP(z, x) \
z = x ;
// casting
#define GB_CASTING(z, x) \
int16_t z = (int16_t) x ;
// cij = op (cast (aij))
#define GB_CAST_OP(pC,pA) \
{ \
/* aij = Ax [pA] */ \
GB_GETA (aij, Ax, pA) ; \
/* Cx [pC] = op (cast (aij)) */ \
GB_CASTING (x, aij) ; \
GB_OP (GB_CX (pC), x) ; \
}
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_IDENTITY || GxB_NO_INT16)
//------------------------------------------------------------------------------
// Cx = op (cast (Ax)): apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_unop__identity_int16_int16
(
int16_t *restrict Cx,
const int16_t *restrict Ax,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (int64_t p = 0 ; p < anz ; p++)
{
GB_CAST_OP (p, p) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (cast (A')): transpose, typecast, and apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_tran__identity_int16_int16
(
GrB_Matrix C,
const GrB_Matrix A,
int64_t **Rowcounts,
GBI_single_iterator Iter,
const int64_t *restrict A_slice,
int naslice
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#define GB_PHASE_2_OF_2
#include "GB_unaryop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
cache.c | /*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% CCCC AAA CCCC H H EEEEE %
% C A A C H H E %
% C AAAAA C HHHHH EEE %
% C A A C H H E %
% CCCC A A CCCC H H EEEEE %
% %
% %
% MagickCore Pixel Cache Methods %
% %
% Software Design %
% Cristy %
% July 1999 %
% %
% %
% Copyright 1999-2014 ImageMagick Studio LLC, a non-profit organization %
% dedicated to making software imaging solutions freely available. %
% %
% You may not use this file except in compliance with the License. You may %
% obtain a copy of the License at %
% %
% http://www.imagemagick.org/script/license.php %
% %
% Unless required by applicable law or agreed to in writing, software %
% distributed under the License is distributed on an "AS IS" BASIS, %
% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. %
% See the License for the specific language governing permissions and %
% limitations under the License. %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
%
%
*/
/*
Include declarations.
*/
#include "magick/studio.h"
#include "magick/blob.h"
#include "magick/blob-private.h"
#include "magick/cache.h"
#include "magick/cache-private.h"
#include "magick/color-private.h"
#include "magick/colorspace.h"
#include "magick/colorspace-private.h"
#include "magick/composite-private.h"
#include "magick/distribute-cache-private.h"
#include "magick/exception.h"
#include "magick/exception-private.h"
#include "magick/geometry.h"
#include "magick/list.h"
#include "magick/log.h"
#include "magick/magick.h"
#include "magick/memory_.h"
#include "magick/memory-private.h"
#include "magick/nt-base-private.h"
#include "magick/option.h"
#include "magick/pixel.h"
#include "magick/pixel-accessor.h"
#include "magick/pixel-private.h"
#include "magick/policy.h"
#include "magick/quantum.h"
#include "magick/random_.h"
#include "magick/registry.h"
#include "magick/resource_.h"
#include "magick/semaphore.h"
#include "magick/splay-tree.h"
#include "magick/string_.h"
#include "magick/string-private.h"
#include "magick/thread-private.h"
#include "magick/utility.h"
#include "magick/utility-private.h"
#if defined(MAGICKCORE_ZLIB_DELEGATE)
#include "zlib.h"
#endif
/*
Define declarations.
*/
#define CacheTick(offset,extent) QuantumTick((MagickOffsetType) offset,extent)
#define IsFileDescriptorLimitExceeded() (GetMagickResource(FileResource) > \
GetMagickResourceLimit(FileResource) ? MagickTrue : MagickFalse)
/*
Typedef declarations.
*/
typedef struct _MagickModulo
{
ssize_t
quotient,
remainder;
} MagickModulo;
/*
Forward declarations.
*/
#if defined(__cplusplus) || defined(c_plusplus)
extern "C" {
#endif
static Cache
GetImagePixelCache(Image *,const MagickBooleanType,ExceptionInfo *)
magick_hot_spot;
static const IndexPacket
*GetVirtualIndexesFromCache(const Image *);
static const PixelPacket
*GetVirtualPixelCache(const Image *,const VirtualPixelMethod,const ssize_t,
const ssize_t,const size_t,const size_t,ExceptionInfo *),
*GetVirtualPixelsCache(const Image *);
static MagickBooleanType
GetOneAuthenticPixelFromCache(Image *,const ssize_t,const ssize_t,
PixelPacket *,ExceptionInfo *),
GetOneVirtualPixelFromCache(const Image *,const VirtualPixelMethod,
const ssize_t,const ssize_t,PixelPacket *,ExceptionInfo *),
OpenPixelCache(Image *,const MapMode,ExceptionInfo *),
ReadPixelCacheIndexes(CacheInfo *,NexusInfo *,ExceptionInfo *),
ReadPixelCachePixels(CacheInfo *,NexusInfo *,ExceptionInfo *),
SyncAuthenticPixelsCache(Image *,ExceptionInfo *),
WritePixelCacheIndexes(CacheInfo *,NexusInfo *,ExceptionInfo *),
WritePixelCachePixels(CacheInfo *,NexusInfo *,ExceptionInfo *);
static PixelPacket
*GetAuthenticPixelsCache(Image *,const ssize_t,const ssize_t,const size_t,
const size_t,ExceptionInfo *),
*QueueAuthenticPixelsCache(Image *,const ssize_t,const ssize_t,const size_t,
const size_t,ExceptionInfo *),
*SetPixelCacheNexusPixels(const CacheInfo *,const MapMode,
const RectangleInfo *,const MagickBooleanType,NexusInfo *,ExceptionInfo *)
magick_hot_spot;
#if defined(__cplusplus) || defined(c_plusplus)
}
#endif
/*
Global declarations.
*/
static volatile MagickBooleanType
instantiate_cache = MagickFalse;
static SemaphoreInfo
*cache_semaphore = (SemaphoreInfo *) NULL;
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ A c q u i r e P i x e l C a c h e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% AcquirePixelCache() acquires a pixel cache.
%
% The format of the AcquirePixelCache() method is:
%
% Cache AcquirePixelCache(const size_t number_threads)
%
% A description of each parameter follows:
%
% o number_threads: the number of nexus threads.
%
*/
MagickExport Cache AcquirePixelCache(const size_t number_threads)
{
CacheInfo
*restrict cache_info;
char
*synchronize;
cache_info=(CacheInfo *) AcquireQuantumMemory(1,sizeof(*cache_info));
if (cache_info == (CacheInfo *) NULL)
ThrowFatalException(ResourceLimitFatalError,"MemoryAllocationFailed");
(void) ResetMagickMemory(cache_info,0,sizeof(*cache_info));
cache_info->type=UndefinedCache;
cache_info->mode=IOMode;
cache_info->colorspace=sRGBColorspace;
cache_info->channels=4;
cache_info->file=(-1);
cache_info->id=GetMagickThreadId();
cache_info->number_threads=number_threads;
if (GetOpenMPMaximumThreads() > cache_info->number_threads)
cache_info->number_threads=GetOpenMPMaximumThreads();
if (GetMagickResourceLimit(ThreadResource) > cache_info->number_threads)
cache_info->number_threads=(size_t) GetMagickResourceLimit(ThreadResource);
if (cache_info->number_threads == 0)
cache_info->number_threads=1;
cache_info->nexus_info=AcquirePixelCacheNexus(cache_info->number_threads);
if (cache_info->nexus_info == (NexusInfo **) NULL)
ThrowFatalException(ResourceLimitFatalError,"MemoryAllocationFailed");
synchronize=GetEnvironmentValue("MAGICK_SYNCHRONIZE");
if (synchronize != (const char *) NULL)
{
cache_info->synchronize=IsStringTrue(synchronize);
synchronize=DestroyString(synchronize);
}
cache_info->semaphore=AllocateSemaphoreInfo();
cache_info->reference_count=1;
cache_info->file_semaphore=AllocateSemaphoreInfo();
cache_info->debug=IsEventLogging();
cache_info->signature=MagickSignature;
return((Cache ) cache_info);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% A c q u i r e P i x e l C a c h e N e x u s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% AcquirePixelCacheNexus() allocates the NexusInfo structure.
%
% The format of the AcquirePixelCacheNexus method is:
%
% NexusInfo **AcquirePixelCacheNexus(const size_t number_threads)
%
% A description of each parameter follows:
%
% o number_threads: the number of nexus threads.
%
*/
MagickExport NexusInfo **AcquirePixelCacheNexus(const size_t number_threads)
{
NexusInfo
**restrict nexus_info;
register ssize_t
i;
nexus_info=(NexusInfo **) MagickAssumeAligned(AcquireAlignedMemory(
number_threads,sizeof(*nexus_info)));
if (nexus_info == (NexusInfo **) NULL)
ThrowFatalException(ResourceLimitFatalError,"MemoryAllocationFailed");
nexus_info[0]=(NexusInfo *) AcquireQuantumMemory(number_threads,
sizeof(**nexus_info));
if (nexus_info[0] == (NexusInfo *) NULL)
ThrowFatalException(ResourceLimitFatalError,"MemoryAllocationFailed");
(void) ResetMagickMemory(nexus_info[0],0,number_threads*sizeof(**nexus_info));
for (i=0; i < (ssize_t) number_threads; i++)
{
nexus_info[i]=(&nexus_info[0][i]);
nexus_info[i]->signature=MagickSignature;
}
return(nexus_info);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ A c q u i r e P i x e l C a c h e P i x e l s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% AcquirePixelCachePixels() returns the pixels associated with the specified
% image.
%
% The format of the AcquirePixelCachePixels() method is:
%
% const void *AcquirePixelCachePixels(const Image *image,
% MagickSizeType *length,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o length: the pixel cache length.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport const void *AcquirePixelCachePixels(const Image *image,
MagickSizeType *length,ExceptionInfo *exception)
{
CacheInfo
*restrict cache_info;
assert(image != (const Image *) NULL);
assert(image->signature == MagickSignature);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickSignature);
assert(image->cache != (Cache) NULL);
cache_info=(CacheInfo *) image->cache;
assert(cache_info->signature == MagickSignature);
(void) exception;
*length=0;
if ((cache_info->type != MemoryCache) && (cache_info->type != MapCache))
return((const void *) NULL);
*length=cache_info->length;
return((const void *) cache_info->pixels);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ C a c h e C o m p o n e n t G e n e s i s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% CacheComponentGenesis() instantiates the cache component.
%
% The format of the CacheComponentGenesis method is:
%
% MagickBooleanType CacheComponentGenesis(void)
%
*/
MagickExport MagickBooleanType CacheComponentGenesis(void)
{
if (cache_semaphore == (SemaphoreInfo *) NULL)
cache_semaphore=AllocateSemaphoreInfo();
return(MagickTrue);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ C a c h e C o m p o n e n t T e r m i n u s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% CacheComponentTerminus() destroys the cache component.
%
% The format of the CacheComponentTerminus() method is:
%
% CacheComponentTerminus(void)
%
*/
MagickExport void CacheComponentTerminus(void)
{
if (cache_semaphore == (SemaphoreInfo *) NULL)
ActivateSemaphoreInfo(&cache_semaphore);
LockSemaphoreInfo(cache_semaphore);
instantiate_cache=MagickFalse;
UnlockSemaphoreInfo(cache_semaphore);
DestroySemaphoreInfo(&cache_semaphore);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ C l i p P i x e l C a c h e N e x u s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% ClipPixelCacheNexus() clips the cache nexus as defined by the image clip
% mask. The method returns MagickTrue if the pixel region is clipped,
% otherwise MagickFalse.
%
% The format of the ClipPixelCacheNexus() method is:
%
% MagickBooleanType ClipPixelCacheNexus(Image *image,NexusInfo *nexus_info,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o nexus_info: the cache nexus to clip.
%
% o exception: return any errors or warnings in this structure.
%
*/
static MagickBooleanType ClipPixelCacheNexus(Image *image,
NexusInfo *nexus_info,ExceptionInfo *exception)
{
CacheInfo
*restrict cache_info;
MagickSizeType
number_pixels;
NexusInfo
**restrict clip_nexus,
**restrict image_nexus;
register const PixelPacket
*restrict r;
register IndexPacket
*restrict nexus_indexes,
*restrict indexes;
register PixelPacket
*restrict p,
*restrict q;
register ssize_t
i;
/*
Apply clip mask.
*/
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
if ((image->clip_mask == (Image *) NULL) ||
(image->storage_class == PseudoClass))
return(MagickTrue);
cache_info=(CacheInfo *) image->cache;
if (cache_info == (Cache) NULL)
return(MagickFalse);
image_nexus=AcquirePixelCacheNexus(1);
clip_nexus=AcquirePixelCacheNexus(1);
if ((image_nexus == (NexusInfo **) NULL) ||
(clip_nexus == (NexusInfo **) NULL))
ThrowBinaryException(CacheError,"UnableToGetCacheNexus",image->filename);
p=GetAuthenticPixelCacheNexus(image,nexus_info->region.x,nexus_info->region.y,
nexus_info->region.width,nexus_info->region.height,image_nexus[0],
exception);
indexes=image_nexus[0]->indexes;
q=nexus_info->pixels;
nexus_indexes=nexus_info->indexes;
r=GetVirtualPixelsFromNexus(image->clip_mask,MaskVirtualPixelMethod,
nexus_info->region.x,nexus_info->region.y,nexus_info->region.width,
nexus_info->region.height,clip_nexus[0],exception);
number_pixels=(MagickSizeType) nexus_info->region.width*
nexus_info->region.height;
for (i=0; i < (ssize_t) number_pixels; i++)
{
if ((p == (PixelPacket *) NULL) || (r == (const PixelPacket *) NULL))
break;
if (GetPixelIntensity(image,r) > (QuantumRange/2))
{
SetPixelRed(q,GetPixelRed(p));
SetPixelGreen(q,GetPixelGreen(p));
SetPixelBlue(q,GetPixelBlue(p));
SetPixelOpacity(q,GetPixelOpacity(p));
if (cache_info->active_index_channel != MagickFalse)
SetPixelIndex(nexus_indexes+i,GetPixelIndex(indexes+i));
}
p++;
q++;
r++;
}
clip_nexus=DestroyPixelCacheNexus(clip_nexus,1);
image_nexus=DestroyPixelCacheNexus(image_nexus,1);
if (i < (ssize_t) number_pixels)
return(MagickFalse);
return(MagickTrue);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ C l o n e P i x e l C a c h e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% ClonePixelCache() clones a pixel cache.
%
% The format of the ClonePixelCache() method is:
%
% Cache ClonePixelCache(const Cache cache)
%
% A description of each parameter follows:
%
% o cache: the pixel cache.
%
*/
MagickExport Cache ClonePixelCache(const Cache cache)
{
CacheInfo
*restrict clone_info;
const CacheInfo
*restrict cache_info;
assert(cache != NULL);
cache_info=(const CacheInfo *) cache;
assert(cache_info->signature == MagickSignature);
if (cache_info->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",
cache_info->filename);
clone_info=(CacheInfo *) AcquirePixelCache(cache_info->number_threads);
if (clone_info == (Cache) NULL)
return((Cache) NULL);
clone_info->virtual_pixel_method=cache_info->virtual_pixel_method;
return((Cache ) clone_info);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ C l o n e P i x e l C a c h e M e t h o d s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% ClonePixelCacheMethods() clones the pixel cache methods from one cache to
% another.
%
% The format of the ClonePixelCacheMethods() method is:
%
% void ClonePixelCacheMethods(Cache clone,const Cache cache)
%
% A description of each parameter follows:
%
% o clone: Specifies a pointer to a Cache structure.
%
% o cache: the pixel cache.
%
*/
MagickExport void ClonePixelCacheMethods(Cache clone,const Cache cache)
{
CacheInfo
*restrict cache_info,
*restrict source_info;
assert(clone != (Cache) NULL);
source_info=(CacheInfo *) clone;
assert(source_info->signature == MagickSignature);
if (source_info->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",
source_info->filename);
assert(cache != (Cache) NULL);
cache_info=(CacheInfo *) cache;
assert(cache_info->signature == MagickSignature);
source_info->methods=cache_info->methods;
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ C l o n e P i x e l C a c h e R e p o s i t o r y %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% %
% ClonePixelCacheRepository() clones the source pixel cache to the destination
% cache.
%
% The format of the ClonePixelCacheRepository() method is:
%
% MagickBooleanType ClonePixelCacheRepository(CacheInfo *cache_info,
% CacheInfo *source_info,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o cache_info: the pixel cache.
%
% o source_info: the source pixel cache.
%
% o exception: return any errors or warnings in this structure.
%
*/
static inline void CopyPixels(PixelPacket *destination,
const PixelPacket *source,const MagickSizeType number_pixels)
{
#if !defined(MAGICKCORE_OPENMP_SUPPORT) || (MAGICKCORE_QUANTUM_DEPTH <= 8)
(void) memcpy(destination,source,(size_t) number_pixels*sizeof(*source));
#else
{
register MagickOffsetType
i;
if ((number_pixels*sizeof(*source)) < MagickMaxBufferExtent)
{
(void) memcpy(destination,source,(size_t) number_pixels*
sizeof(*source));
return;
}
#pragma omp parallel for
for (i=0; i < (MagickOffsetType) number_pixels; i++)
destination[i]=source[i];
}
#endif
}
static inline MagickSizeType MagickMin(const MagickSizeType x,
const MagickSizeType y)
{
if (x < y)
return(x);
return(y);
}
static MagickBooleanType ClonePixelCacheRepository(
CacheInfo *restrict clone_info,CacheInfo *restrict cache_info,
ExceptionInfo *exception)
{
#define MaxCacheThreads 2
#define cache_threads(source,destination,chunk) \
num_threads((chunk) < (16*GetMagickResourceLimit(ThreadResource)) ? 1 : \
GetMagickResourceLimit(ThreadResource) < MaxCacheThreads ? \
GetMagickResourceLimit(ThreadResource) : MaxCacheThreads)
MagickBooleanType
status;
NexusInfo
**restrict cache_nexus,
**restrict clone_nexus;
size_t
length;
ssize_t
y;
assert(cache_info != (CacheInfo *) NULL);
assert(clone_info != (CacheInfo *) NULL);
assert(exception != (ExceptionInfo *) NULL);
if (cache_info->type == PingCache)
return(MagickTrue);
if (((cache_info->type == MemoryCache) || (cache_info->type == MapCache)) &&
((clone_info->type == MemoryCache) || (clone_info->type == MapCache)) &&
(cache_info->columns == clone_info->columns) &&
(cache_info->rows == clone_info->rows) &&
(cache_info->active_index_channel == clone_info->active_index_channel))
{
/*
Identical pixel cache morphology.
*/
CopyPixels(clone_info->pixels,cache_info->pixels,cache_info->columns*
cache_info->rows);
if ((cache_info->active_index_channel != MagickFalse) &&
(clone_info->active_index_channel != MagickFalse))
(void) memcpy(clone_info->indexes,cache_info->indexes,
cache_info->columns*cache_info->rows*sizeof(*cache_info->indexes));
return(MagickTrue);
}
/*
Mismatched pixel cache morphology.
*/
cache_nexus=AcquirePixelCacheNexus(MaxCacheThreads);
clone_nexus=AcquirePixelCacheNexus(MaxCacheThreads);
if ((cache_nexus == (NexusInfo **) NULL) ||
(clone_nexus == (NexusInfo **) NULL))
ThrowFatalException(ResourceLimitFatalError,"MemoryAllocationFailed");
length=(size_t) MagickMin(cache_info->columns,clone_info->columns)*
sizeof(*cache_info->pixels);
status=MagickTrue;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static,4) shared(status) \
cache_threads(cache_info,clone_info,cache_info->rows)
#endif
for (y=0; y < (ssize_t) cache_info->rows; y++)
{
const int
id = GetOpenMPThreadId();
PixelPacket
*pixels;
RectangleInfo
region;
if (status == MagickFalse)
continue;
if (y >= (ssize_t) clone_info->rows)
continue;
region.width=cache_info->columns;
region.height=1;
region.x=0;
region.y=y;
pixels=SetPixelCacheNexusPixels(cache_info,ReadMode,®ion,MagickTrue,
cache_nexus[id],exception);
if (pixels == (PixelPacket *) NULL)
continue;
status=ReadPixelCachePixels(cache_info,cache_nexus[id],exception);
if (status == MagickFalse)
continue;
region.width=clone_info->columns;
pixels=SetPixelCacheNexusPixels(clone_info,WriteMode,®ion,MagickTrue,
clone_nexus[id],exception);
if (pixels == (PixelPacket *) NULL)
continue;
(void) ResetMagickMemory(clone_nexus[id]->pixels,0,(size_t)
clone_nexus[id]->length);
(void) memcpy(clone_nexus[id]->pixels,cache_nexus[id]->pixels,length);
status=WritePixelCachePixels(clone_info,clone_nexus[id],exception);
}
if ((cache_info->active_index_channel != MagickFalse) &&
(clone_info->active_index_channel != MagickFalse))
{
/*
Clone indexes.
*/
length=(size_t) MagickMin(cache_info->columns,clone_info->columns)*
sizeof(*cache_info->indexes);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static,4) shared(status) \
cache_threads(cache_info,clone_info,cache_info->rows)
#endif
for (y=0; y < (ssize_t) cache_info->rows; y++)
{
const int
id = GetOpenMPThreadId();
PixelPacket
*pixels;
RectangleInfo
region;
if (status == MagickFalse)
continue;
if (y >= (ssize_t) clone_info->rows)
continue;
region.width=cache_info->columns;
region.height=1;
region.x=0;
region.y=y;
pixels=SetPixelCacheNexusPixels(cache_info,ReadMode,®ion,MagickTrue,
cache_nexus[id],exception);
if (pixels == (PixelPacket *) NULL)
continue;
status=ReadPixelCacheIndexes(cache_info,cache_nexus[id],exception);
if (status == MagickFalse)
continue;
region.width=clone_info->columns;
pixels=SetPixelCacheNexusPixels(clone_info,WriteMode,®ion,MagickTrue,
clone_nexus[id],exception);
if (pixels == (PixelPacket *) NULL)
continue;
(void) memcpy(clone_nexus[id]->indexes,cache_nexus[id]->indexes,length);
status=WritePixelCacheIndexes(clone_info,clone_nexus[id],exception);
}
}
cache_nexus=DestroyPixelCacheNexus(cache_nexus,MaxCacheThreads);
clone_nexus=DestroyPixelCacheNexus(clone_nexus,MaxCacheThreads);
if (cache_info->debug != MagickFalse)
{
char
message[MaxTextExtent];
(void) FormatLocaleString(message,MaxTextExtent,"%s => %s",
CommandOptionToMnemonic(MagickCacheOptions,(ssize_t) cache_info->type),
CommandOptionToMnemonic(MagickCacheOptions,(ssize_t) clone_info->type));
(void) LogMagickEvent(CacheEvent,GetMagickModule(),"%s",message);
}
return(status);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ D e s t r o y I m a g e P i x e l C a c h e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% DestroyImagePixelCache() deallocates memory associated with the pixel cache.
%
% The format of the DestroyImagePixelCache() method is:
%
% void DestroyImagePixelCache(Image *image)
%
% A description of each parameter follows:
%
% o image: the image.
%
*/
static void DestroyImagePixelCache(Image *image)
{
assert(image != (Image *) NULL);
assert(image->signature == MagickSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
if (image->cache == (void *) NULL)
return;
image->cache=DestroyPixelCache(image->cache);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ D e s t r o y I m a g e P i x e l s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% DestroyImagePixels() deallocates memory associated with the pixel cache.
%
% The format of the DestroyImagePixels() method is:
%
% void DestroyImagePixels(Image *image)
%
% A description of each parameter follows:
%
% o image: the image.
%
*/
MagickExport void DestroyImagePixels(Image *image)
{
CacheInfo
*restrict cache_info;
assert(image != (const Image *) NULL);
assert(image->signature == MagickSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(image->cache != (Cache) NULL);
cache_info=(CacheInfo *) image->cache;
assert(cache_info->signature == MagickSignature);
if (cache_info->methods.destroy_pixel_handler != (DestroyPixelHandler) NULL)
{
cache_info->methods.destroy_pixel_handler(image);
return;
}
image->cache=DestroyPixelCache(image->cache);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ D e s t r o y P i x e l C a c h e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% DestroyPixelCache() deallocates memory associated with the pixel cache.
%
% The format of the DestroyPixelCache() method is:
%
% Cache DestroyPixelCache(Cache cache)
%
% A description of each parameter follows:
%
% o cache: the pixel cache.
%
*/
static MagickBooleanType ClosePixelCacheOnDisk(CacheInfo *cache_info)
{
int
status;
status=(-1);
if (cache_info->file != -1)
{
status=close(cache_info->file);
cache_info->file=(-1);
RelinquishMagickResource(FileResource,1);
}
return(status == -1 ? MagickFalse : MagickTrue);
}
static inline void RelinquishPixelCachePixels(CacheInfo *cache_info)
{
switch (cache_info->type)
{
case MemoryCache:
{
if (cache_info->mapped == MagickFalse)
cache_info->pixels=(PixelPacket *) RelinquishAlignedMemory(
cache_info->pixels);
else
{
(void) UnmapBlob(cache_info->pixels,(size_t) cache_info->length);
cache_info->pixels=(PixelPacket *) NULL;
}
RelinquishMagickResource(MemoryResource,cache_info->length);
break;
}
case MapCache:
{
(void) UnmapBlob(cache_info->pixels,(size_t) cache_info->length);
cache_info->pixels=(PixelPacket *) NULL;
if (cache_info->mode != ReadMode)
(void) RelinquishUniqueFileResource(cache_info->cache_filename);
*cache_info->cache_filename='\0';
RelinquishMagickResource(MapResource,cache_info->length);
}
case DiskCache:
{
if (cache_info->file != -1)
(void) ClosePixelCacheOnDisk(cache_info);
if (cache_info->mode != ReadMode)
(void) RelinquishUniqueFileResource(cache_info->cache_filename);
*cache_info->cache_filename='\0';
RelinquishMagickResource(DiskResource,cache_info->length);
break;
}
case DistributedCache:
{
*cache_info->cache_filename='\0';
(void) RelinquishDistributePixelCache((DistributeCacheInfo *)
cache_info->server_info);
break;
}
default:
break;
}
cache_info->type=UndefinedCache;
cache_info->mapped=MagickFalse;
cache_info->indexes=(IndexPacket *) NULL;
}
MagickExport Cache DestroyPixelCache(Cache cache)
{
CacheInfo
*restrict cache_info;
assert(cache != (Cache) NULL);
cache_info=(CacheInfo *) cache;
assert(cache_info->signature == MagickSignature);
if (cache_info->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",
cache_info->filename);
LockSemaphoreInfo(cache_info->semaphore);
cache_info->reference_count--;
if (cache_info->reference_count != 0)
{
UnlockSemaphoreInfo(cache_info->semaphore);
return((Cache) NULL);
}
UnlockSemaphoreInfo(cache_info->semaphore);
if (cache_info->debug != MagickFalse)
{
char
message[MaxTextExtent];
(void) FormatLocaleString(message,MaxTextExtent,"destroy %s",
cache_info->filename);
(void) LogMagickEvent(CacheEvent,GetMagickModule(),"%s",message);
}
RelinquishPixelCachePixels(cache_info);
if (cache_info->server_info != (DistributeCacheInfo *) NULL)
cache_info->server_info=DestroyDistributeCacheInfo((DistributeCacheInfo *)
cache_info->server_info);
if (cache_info->nexus_info != (NexusInfo **) NULL)
cache_info->nexus_info=DestroyPixelCacheNexus(cache_info->nexus_info,
cache_info->number_threads);
if (cache_info->random_info != (RandomInfo *) NULL)
cache_info->random_info=DestroyRandomInfo(cache_info->random_info);
if (cache_info->file_semaphore != (SemaphoreInfo *) NULL)
DestroySemaphoreInfo(&cache_info->file_semaphore);
if (cache_info->semaphore != (SemaphoreInfo *) NULL)
DestroySemaphoreInfo(&cache_info->semaphore);
cache_info->signature=(~MagickSignature);
cache_info=(CacheInfo *) RelinquishMagickMemory(cache_info);
cache=(Cache) NULL;
return(cache);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ D e s t r o y P i x e l C a c h e N e x u s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% DestroyPixelCacheNexus() destroys a pixel cache nexus.
%
% The format of the DestroyPixelCacheNexus() method is:
%
% NexusInfo **DestroyPixelCacheNexus(NexusInfo *nexus_info,
% const size_t number_threads)
%
% A description of each parameter follows:
%
% o nexus_info: the nexus to destroy.
%
% o number_threads: the number of nexus threads.
%
*/
static inline void RelinquishCacheNexusPixels(NexusInfo *nexus_info)
{
if (nexus_info->mapped == MagickFalse)
(void) RelinquishAlignedMemory(nexus_info->cache);
else
(void) UnmapBlob(nexus_info->cache,(size_t) nexus_info->length);
nexus_info->cache=(PixelPacket *) NULL;
nexus_info->pixels=(PixelPacket *) NULL;
nexus_info->indexes=(IndexPacket *) NULL;
nexus_info->length=0;
nexus_info->mapped=MagickFalse;
}
MagickExport NexusInfo **DestroyPixelCacheNexus(NexusInfo **nexus_info,
const size_t number_threads)
{
register ssize_t
i;
assert(nexus_info != (NexusInfo **) NULL);
for (i=0; i < (ssize_t) number_threads; i++)
{
if (nexus_info[i]->cache != (PixelPacket *) NULL)
RelinquishCacheNexusPixels(nexus_info[i]);
nexus_info[i]->signature=(~MagickSignature);
}
nexus_info[0]=(NexusInfo *) RelinquishMagickMemory(nexus_info[0]);
nexus_info=(NexusInfo **) RelinquishAlignedMemory(nexus_info);
return(nexus_info);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ G e t A u t h e n t i c I n d e x e s F r o m C a c h e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetAuthenticIndexesFromCache() returns the indexes associated with the last
% call to QueueAuthenticPixelsCache() or GetAuthenticPixelsCache().
%
% The format of the GetAuthenticIndexesFromCache() method is:
%
% IndexPacket *GetAuthenticIndexesFromCache(const Image *image)
%
% A description of each parameter follows:
%
% o image: the image.
%
*/
static IndexPacket *GetAuthenticIndexesFromCache(const Image *image)
{
CacheInfo
*restrict cache_info;
const int
id = GetOpenMPThreadId();
assert(image != (const Image *) NULL);
assert(image->signature == MagickSignature);
assert(image->cache != (Cache) NULL);
cache_info=(CacheInfo *) image->cache;
assert(cache_info->signature == MagickSignature);
assert(id < (int) cache_info->number_threads);
return(cache_info->nexus_info[id]->indexes);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% G e t A u t h e n t i c I n d e x Q u e u e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetAuthenticIndexQueue() returns the authentic black channel or the colormap
% indexes associated with the last call to QueueAuthenticPixels() or
% GetVirtualPixels(). NULL is returned if the black channel or colormap
% indexes are not available.
%
% The format of the GetAuthenticIndexQueue() method is:
%
% IndexPacket *GetAuthenticIndexQueue(const Image *image)
%
% A description of each parameter follows:
%
% o image: the image.
%
*/
MagickExport IndexPacket *GetAuthenticIndexQueue(const Image *image)
{
CacheInfo
*restrict cache_info;
const int
id = GetOpenMPThreadId();
assert(image != (const Image *) NULL);
assert(image->signature == MagickSignature);
assert(image->cache != (Cache) NULL);
cache_info=(CacheInfo *) image->cache;
assert(cache_info->signature == MagickSignature);
if (cache_info->methods.get_authentic_indexes_from_handler !=
(GetAuthenticIndexesFromHandler) NULL)
return(cache_info->methods.get_authentic_indexes_from_handler(image));
assert(id < (int) cache_info->number_threads);
return(cache_info->nexus_info[id]->indexes);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ G e t A u t h e n t i c P i x e l C a c h e N e x u s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetAuthenticPixelCacheNexus() gets authentic pixels from the in-memory or
% disk pixel cache as defined by the geometry parameters. A pointer to the
% pixels is returned if the pixels are transferred, otherwise a NULL is
% returned.
%
% The format of the GetAuthenticPixelCacheNexus() method is:
%
% PixelPacket *GetAuthenticPixelCacheNexus(Image *image,const ssize_t x,
% const ssize_t y,const size_t columns,const size_t rows,
% NexusInfo *nexus_info,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o x,y,columns,rows: These values define the perimeter of a region of
% pixels.
%
% o nexus_info: the cache nexus to return.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport PixelPacket *GetAuthenticPixelCacheNexus(Image *image,
const ssize_t x,const ssize_t y,const size_t columns,const size_t rows,
NexusInfo *nexus_info,ExceptionInfo *exception)
{
CacheInfo
*restrict cache_info;
PixelPacket
*restrict pixels;
/*
Transfer pixels from the cache.
*/
assert(image != (Image *) NULL);
assert(image->signature == MagickSignature);
pixels=QueueAuthenticPixelCacheNexus(image,x,y,columns,rows,MagickTrue,
nexus_info,exception);
if (pixels == (PixelPacket *) NULL)
return((PixelPacket *) NULL);
cache_info=(CacheInfo *) image->cache;
assert(cache_info->signature == MagickSignature);
if (nexus_info->authentic_pixel_cache != MagickFalse)
return(pixels);
if (ReadPixelCachePixels(cache_info,nexus_info,exception) == MagickFalse)
return((PixelPacket *) NULL);
if (cache_info->active_index_channel != MagickFalse)
if (ReadPixelCacheIndexes(cache_info,nexus_info,exception) == MagickFalse)
return((PixelPacket *) NULL);
return(pixels);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ G e t A u t h e n t i c P i x e l s F r o m C a c h e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetAuthenticPixelsFromCache() returns the pixels associated with the last
% call to the QueueAuthenticPixelsCache() or GetAuthenticPixelsCache() methods.
%
% The format of the GetAuthenticPixelsFromCache() method is:
%
% PixelPacket *GetAuthenticPixelsFromCache(const Image image)
%
% A description of each parameter follows:
%
% o image: the image.
%
*/
static PixelPacket *GetAuthenticPixelsFromCache(const Image *image)
{
CacheInfo
*restrict cache_info;
const int
id = GetOpenMPThreadId();
assert(image != (const Image *) NULL);
assert(image->signature == MagickSignature);
assert(image->cache != (Cache) NULL);
cache_info=(CacheInfo *) image->cache;
assert(cache_info->signature == MagickSignature);
assert(id < (int) cache_info->number_threads);
return(cache_info->nexus_info[id]->pixels);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% G e t A u t h e n t i c P i x e l Q u e u e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetAuthenticPixelQueue() returns the authentic pixels associated with the
% last call to QueueAuthenticPixels() or GetAuthenticPixels().
%
% The format of the GetAuthenticPixelQueue() method is:
%
% PixelPacket *GetAuthenticPixelQueue(const Image image)
%
% A description of each parameter follows:
%
% o image: the image.
%
*/
MagickExport PixelPacket *GetAuthenticPixelQueue(const Image *image)
{
CacheInfo
*restrict cache_info;
const int
id = GetOpenMPThreadId();
assert(image != (const Image *) NULL);
assert(image->signature == MagickSignature);
assert(image->cache != (Cache) NULL);
cache_info=(CacheInfo *) image->cache;
assert(cache_info->signature == MagickSignature);
if (cache_info->methods.get_authentic_pixels_from_handler !=
(GetAuthenticPixelsFromHandler) NULL)
return(cache_info->methods.get_authentic_pixels_from_handler(image));
assert(id < (int) cache_info->number_threads);
return(cache_info->nexus_info[id]->pixels);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% G e t A u t h e n t i c P i x e l s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetAuthenticPixels() obtains a pixel region for read/write access. If the
% region is successfully accessed, a pointer to a PixelPacket array
% representing the region is returned, otherwise NULL is returned.
%
% The returned pointer may point to a temporary working copy of the pixels
% or it may point to the original pixels in memory. Performance is maximized
% if the selected region is part of one row, or one or more full rows, since
% then there is opportunity to access the pixels in-place (without a copy)
% if the image is in memory, or in a memory-mapped file. The returned pointer
% must *never* be deallocated by the user.
%
% Pixels accessed via the returned pointer represent a simple array of type
% PixelPacket. If the image type is CMYK or if the storage class is
% PseduoClass, call GetAuthenticIndexQueue() after invoking
% GetAuthenticPixels() to obtain the black color component or colormap indexes
% (of type IndexPacket) corresponding to the region. Once the PixelPacket
% (and/or IndexPacket) array has been updated, the changes must be saved back
% to the underlying image using SyncAuthenticPixels() or they may be lost.
%
% The format of the GetAuthenticPixels() method is:
%
% PixelPacket *GetAuthenticPixels(Image *image,const ssize_t x,
% const ssize_t y,const size_t columns,const size_t rows,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o x,y,columns,rows: These values define the perimeter of a region of
% pixels.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport PixelPacket *GetAuthenticPixels(Image *image,const ssize_t x,
const ssize_t y,const size_t columns,const size_t rows,
ExceptionInfo *exception)
{
CacheInfo
*restrict cache_info;
const int
id = GetOpenMPThreadId();
assert(image != (Image *) NULL);
assert(image->signature == MagickSignature);
assert(image->cache != (Cache) NULL);
cache_info=(CacheInfo *) image->cache;
assert(cache_info->signature == MagickSignature);
if (cache_info->methods.get_authentic_pixels_handler !=
(GetAuthenticPixelsHandler) NULL)
return(cache_info->methods.get_authentic_pixels_handler(image,x,y,columns,
rows,exception));
assert(id < (int) cache_info->number_threads);
return(GetAuthenticPixelCacheNexus(image,x,y,columns,rows,
cache_info->nexus_info[id],exception));
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ G e t A u t h e n t i c P i x e l s C a c h e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetAuthenticPixelsCache() gets pixels from the in-memory or disk pixel cache
% as defined by the geometry parameters. A pointer to the pixels is returned
% if the pixels are transferred, otherwise a NULL is returned.
%
% The format of the GetAuthenticPixelsCache() method is:
%
% PixelPacket *GetAuthenticPixelsCache(Image *image,const ssize_t x,
% const ssize_t y,const size_t columns,const size_t rows,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o x,y,columns,rows: These values define the perimeter of a region of
% pixels.
%
% o exception: return any errors or warnings in this structure.
%
*/
static PixelPacket *GetAuthenticPixelsCache(Image *image,const ssize_t x,
const ssize_t y,const size_t columns,const size_t rows,
ExceptionInfo *exception)
{
CacheInfo
*restrict cache_info;
const int
id = GetOpenMPThreadId();
assert(image != (const Image *) NULL);
assert(image->signature == MagickSignature);
assert(image->cache != (Cache) NULL);
cache_info=(CacheInfo *) image->cache;
if (cache_info == (Cache) NULL)
return((PixelPacket *) NULL);
assert(cache_info->signature == MagickSignature);
assert(id < (int) cache_info->number_threads);
return(GetAuthenticPixelCacheNexus(image,x,y,columns,rows,
cache_info->nexus_info[id],exception));
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ G e t I m a g e E x t e n t %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetImageExtent() returns the extent of the pixels associated with the
% last call to QueueAuthenticPixels() or GetAuthenticPixels().
%
% The format of the GetImageExtent() method is:
%
% MagickSizeType GetImageExtent(const Image *image)
%
% A description of each parameter follows:
%
% o image: the image.
%
*/
MagickExport MagickSizeType GetImageExtent(const Image *image)
{
CacheInfo
*restrict cache_info;
const int
id = GetOpenMPThreadId();
assert(image != (Image *) NULL);
assert(image->signature == MagickSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(image->cache != (Cache) NULL);
cache_info=(CacheInfo *) image->cache;
assert(cache_info->signature == MagickSignature);
assert(id < (int) cache_info->number_threads);
return(GetPixelCacheNexusExtent(cache_info,cache_info->nexus_info[id]));
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ G e t I m a g e P i x e l C a c h e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetImagePixelCache() ensures that there is only a single reference to the
% pixel cache to be modified, updating the provided cache pointer to point to
% a clone of the original pixel cache if necessary.
%
% The format of the GetImagePixelCache method is:
%
% Cache GetImagePixelCache(Image *image,const MagickBooleanType clone,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o clone: any value other than MagickFalse clones the cache pixels.
%
% o exception: return any errors or warnings in this structure.
%
*/
static inline MagickBooleanType ValidatePixelCacheMorphology(
const Image *restrict image)
{
CacheInfo
*restrict cache_info;
/*
Does the image match the pixel cache morphology?
*/
cache_info=(CacheInfo *) image->cache;
if ((image->storage_class != cache_info->storage_class) ||
(image->colorspace != cache_info->colorspace) ||
(image->channels != cache_info->channels) ||
(image->columns != cache_info->columns) ||
(image->rows != cache_info->rows) ||
(cache_info->nexus_info == (NexusInfo **) NULL))
return(MagickFalse);
return(MagickTrue);
}
static Cache GetImagePixelCache(Image *image,const MagickBooleanType clone,
ExceptionInfo *exception)
{
CacheInfo
*restrict cache_info;
MagickBooleanType
destroy,
status;
static MagickSizeType
cpu_throttle = 0,
cycles = 0,
time_limit = 0;
static time_t
cache_timestamp = 0;
status=MagickTrue;
LockSemaphoreInfo(image->semaphore);
if (cpu_throttle == 0)
cpu_throttle=GetMagickResourceLimit(ThrottleResource);
if ((cpu_throttle != MagickResourceInfinity) && ((cycles++ % 32) == 0))
MagickDelay(cpu_throttle);
if (time_limit == 0)
{
/*
Set the expire time in seconds.
*/
time_limit=GetMagickResourceLimit(TimeResource);
cache_timestamp=time((time_t *) NULL);
}
if ((time_limit != MagickResourceInfinity) &&
((MagickSizeType) (time((time_t *) NULL)-cache_timestamp) >= time_limit))
{
#if defined(ECANCELED)
errno=ECANCELED;
#endif
ThrowFatalException(ResourceLimitFatalError,"TimeLimitExceeded");
}
assert(image->cache != (Cache) NULL);
cache_info=(CacheInfo *) image->cache;
destroy=MagickFalse;
if ((cache_info->reference_count > 1) || (cache_info->mode == ReadMode))
{
LockSemaphoreInfo(cache_info->semaphore);
if ((cache_info->reference_count > 1) || (cache_info->mode == ReadMode))
{
CacheInfo
*clone_info;
Image
clone_image;
/*
Clone pixel cache.
*/
clone_image=(*image);
clone_image.semaphore=AllocateSemaphoreInfo();
clone_image.reference_count=1;
clone_image.cache=ClonePixelCache(cache_info);
clone_info=(CacheInfo *) clone_image.cache;
status=OpenPixelCache(&clone_image,IOMode,exception);
if (status != MagickFalse)
{
if (clone != MagickFalse)
status=ClonePixelCacheRepository(clone_info,cache_info,
exception);
if (status != MagickFalse)
{
if (cache_info->reference_count == 1)
cache_info->nexus_info=(NexusInfo **) NULL;
destroy=MagickTrue;
image->cache=clone_image.cache;
}
}
DestroySemaphoreInfo(&clone_image.semaphore);
}
UnlockSemaphoreInfo(cache_info->semaphore);
}
if (destroy != MagickFalse)
cache_info=(CacheInfo *) DestroyPixelCache(cache_info);
if (status != MagickFalse)
{
/*
Ensure the image matches the pixel cache morphology.
*/
image->type=UndefinedType;
if (ValidatePixelCacheMorphology(image) == MagickFalse)
{
status=OpenPixelCache(image,IOMode,exception);
cache_info=(CacheInfo *) image->cache;
if (cache_info->type == DiskCache)
(void) ClosePixelCacheOnDisk(cache_info);
}
}
UnlockSemaphoreInfo(image->semaphore);
if (status == MagickFalse)
return((Cache) NULL);
return(image->cache);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ G e t I m a g e P i x e l C a c h e T y p e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetImagePixelCacheType() returns the pixel cache type: UndefinedCache,
% DiskCache, MapCache, MemoryCache, or PingCache.
%
% The format of the GetImagePixelCacheType() method is:
%
% CacheType GetImagePixelCacheType(const Image *image)
%
% A description of each parameter follows:
%
% o image: the image.
%
*/
MagickExport CacheType GetPixelCacheType(const Image *image)
{
return(GetImagePixelCacheType(image));
}
MagickExport CacheType GetImagePixelCacheType(const Image *image)
{
CacheInfo
*restrict cache_info;
assert(image != (Image *) NULL);
assert(image->signature == MagickSignature);
assert(image->cache != (Cache) NULL);
cache_info=(CacheInfo *) image->cache;
assert(cache_info->signature == MagickSignature);
return(cache_info->type);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% G e t O n e A u t h e n t i c P i x e l %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetOneAuthenticPixel() returns a single pixel at the specified (x,y)
% location. The image background color is returned if an error occurs.
%
% The format of the GetOneAuthenticPixel() method is:
%
% MagickBooleanType GetOneAuthenticPixel(const Image image,const ssize_t x,
% const ssize_t y,PixelPacket *pixel,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o x,y: These values define the location of the pixel to return.
%
% o pixel: return a pixel at the specified (x,y) location.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport MagickBooleanType GetOneAuthenticPixel(Image *image,
const ssize_t x,const ssize_t y,PixelPacket *pixel,ExceptionInfo *exception)
{
CacheInfo
*restrict cache_info;
PixelPacket
*restrict pixels;
assert(image != (Image *) NULL);
assert(image->signature == MagickSignature);
assert(image->cache != (Cache) NULL);
cache_info=(CacheInfo *) image->cache;
assert(cache_info->signature == MagickSignature);
*pixel=image->background_color;
if (cache_info->methods.get_one_authentic_pixel_from_handler !=
(GetOneAuthenticPixelFromHandler) NULL)
return(cache_info->methods.get_one_authentic_pixel_from_handler(image,x,y,
pixel,exception));
pixels=GetAuthenticPixelsCache(image,x,y,1UL,1UL,exception);
if (pixels == (PixelPacket *) NULL)
return(MagickFalse);
*pixel=(*pixels);
return(MagickTrue);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ G e t O n e A u t h e n t i c P i x e l F r o m C a c h e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetOneAuthenticPixelFromCache() returns a single pixel at the specified (x,y)
% location. The image background color is returned if an error occurs.
%
% The format of the GetOneAuthenticPixelFromCache() method is:
%
% MagickBooleanType GetOneAuthenticPixelFromCache(const Image image,
% const ssize_t x,const ssize_t y,PixelPacket *pixel,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o x,y: These values define the location of the pixel to return.
%
% o pixel: return a pixel at the specified (x,y) location.
%
% o exception: return any errors or warnings in this structure.
%
*/
static MagickBooleanType GetOneAuthenticPixelFromCache(Image *image,
const ssize_t x,const ssize_t y,PixelPacket *pixel,ExceptionInfo *exception)
{
CacheInfo
*restrict cache_info;
const int
id = GetOpenMPThreadId();
PixelPacket
*restrict pixels;
assert(image != (const Image *) NULL);
assert(image->signature == MagickSignature);
assert(image->cache != (Cache) NULL);
cache_info=(CacheInfo *) image->cache;
assert(cache_info->signature == MagickSignature);
*pixel=image->background_color;
assert(id < (int) cache_info->number_threads);
pixels=GetAuthenticPixelCacheNexus(image,x,y,1UL,1UL,
cache_info->nexus_info[id],exception);
if (pixels == (PixelPacket *) NULL)
return(MagickFalse);
*pixel=(*pixels);
return(MagickTrue);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% G e t O n e V i r t u a l M a g i c k P i x e l %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetOneVirtualMagickPixel() returns a single pixel at the specified (x,y)
% location. The image background color is returned if an error occurs. If
% you plan to modify the pixel, use GetOneAuthenticPixel() instead.
%
% The format of the GetOneVirtualMagickPixel() method is:
%
% MagickBooleanType GetOneVirtualMagickPixel(const Image image,
% const ssize_t x,const ssize_t y,MagickPixelPacket *pixel,
% ExceptionInfo exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o x,y: these values define the location of the pixel to return.
%
% o pixel: return a pixel at the specified (x,y) location.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport MagickBooleanType GetOneVirtualMagickPixel(const Image *image,
const ssize_t x,const ssize_t y,MagickPixelPacket *pixel,
ExceptionInfo *exception)
{
CacheInfo
*restrict cache_info;
const int
id = GetOpenMPThreadId();
register const IndexPacket
*restrict indexes;
register const PixelPacket
*restrict pixels;
assert(image != (const Image *) NULL);
assert(image->signature == MagickSignature);
assert(image->cache != (Cache) NULL);
cache_info=(CacheInfo *) image->cache;
assert(cache_info->signature == MagickSignature);
assert(id < (int) cache_info->number_threads);
pixels=GetVirtualPixelsFromNexus(image,GetPixelCacheVirtualMethod(image),x,y,
1UL,1UL,cache_info->nexus_info[id],exception);
GetMagickPixelPacket(image,pixel);
if (pixels == (const PixelPacket *) NULL)
return(MagickFalse);
indexes=GetVirtualIndexesFromNexus(cache_info,cache_info->nexus_info[id]);
SetMagickPixelPacket(image,pixels,indexes,pixel);
return(MagickTrue);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% G e t O n e V i r t u a l M e t h o d P i x e l %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetOneVirtualMethodPixel() returns a single pixel at the specified (x,y)
% location as defined by specified pixel method. The image background color
% is returned if an error occurs. If you plan to modify the pixel, use
% GetOneAuthenticPixel() instead.
%
% The format of the GetOneVirtualMethodPixel() method is:
%
% MagickBooleanType GetOneVirtualMethodPixel(const Image image,
% const VirtualPixelMethod virtual_pixel_method,const ssize_t x,
% const ssize_t y,Pixelpacket *pixel,ExceptionInfo exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o virtual_pixel_method: the virtual pixel method.
%
% o x,y: These values define the location of the pixel to return.
%
% o pixel: return a pixel at the specified (x,y) location.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport MagickBooleanType GetOneVirtualMethodPixel(const Image *image,
const VirtualPixelMethod virtual_pixel_method,const ssize_t x,const ssize_t y,
PixelPacket *pixel,ExceptionInfo *exception)
{
CacheInfo
*restrict cache_info;
const int
id = GetOpenMPThreadId();
const PixelPacket
*restrict pixels;
assert(image != (const Image *) NULL);
assert(image->signature == MagickSignature);
assert(image->cache != (Cache) NULL);
cache_info=(CacheInfo *) image->cache;
assert(cache_info->signature == MagickSignature);
*pixel=image->background_color;
if (cache_info->methods.get_one_virtual_pixel_from_handler !=
(GetOneVirtualPixelFromHandler) NULL)
return(cache_info->methods.get_one_virtual_pixel_from_handler(image,
virtual_pixel_method,x,y,pixel,exception));
assert(id < (int) cache_info->number_threads);
pixels=GetVirtualPixelsFromNexus(image,virtual_pixel_method,x,y,1UL,1UL,
cache_info->nexus_info[id],exception);
if (pixels == (const PixelPacket *) NULL)
return(MagickFalse);
*pixel=(*pixels);
return(MagickTrue);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% G e t O n e V i r t u a l P i x e l %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetOneVirtualPixel() returns a single virtual pixel at the specified
% (x,y) location. The image background color is returned if an error occurs.
% If you plan to modify the pixel, use GetOneAuthenticPixel() instead.
%
% The format of the GetOneVirtualPixel() method is:
%
% MagickBooleanType GetOneVirtualPixel(const Image image,const ssize_t x,
% const ssize_t y,PixelPacket *pixel,ExceptionInfo exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o x,y: These values define the location of the pixel to return.
%
% o pixel: return a pixel at the specified (x,y) location.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport MagickBooleanType GetOneVirtualPixel(const Image *image,
const ssize_t x,const ssize_t y,PixelPacket *pixel,ExceptionInfo *exception)
{
CacheInfo
*restrict cache_info;
const int
id = GetOpenMPThreadId();
const PixelPacket
*restrict pixels;
assert(image != (const Image *) NULL);
assert(image->signature == MagickSignature);
assert(image->cache != (Cache) NULL);
cache_info=(CacheInfo *) image->cache;
assert(cache_info->signature == MagickSignature);
*pixel=image->background_color;
if (cache_info->methods.get_one_virtual_pixel_from_handler !=
(GetOneVirtualPixelFromHandler) NULL)
return(cache_info->methods.get_one_virtual_pixel_from_handler(image,
GetPixelCacheVirtualMethod(image),x,y,pixel,exception));
assert(id < (int) cache_info->number_threads);
pixels=GetVirtualPixelsFromNexus(image,GetPixelCacheVirtualMethod(image),x,y,
1UL,1UL,cache_info->nexus_info[id],exception);
if (pixels == (const PixelPacket *) NULL)
return(MagickFalse);
*pixel=(*pixels);
return(MagickTrue);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ G e t O n e V i r t u a l P i x e l F r o m C a c h e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetOneVirtualPixelFromCache() returns a single virtual pixel at the
% specified (x,y) location. The image background color is returned if an
% error occurs.
%
% The format of the GetOneVirtualPixelFromCache() method is:
%
% MagickBooleanType GetOneVirtualPixelFromCache(const Image image,
% const VirtualPixelPacket method,const ssize_t x,const ssize_t y,
% PixelPacket *pixel,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o virtual_pixel_method: the virtual pixel method.
%
% o x,y: These values define the location of the pixel to return.
%
% o pixel: return a pixel at the specified (x,y) location.
%
% o exception: return any errors or warnings in this structure.
%
*/
static MagickBooleanType GetOneVirtualPixelFromCache(const Image *image,
const VirtualPixelMethod virtual_pixel_method,const ssize_t x,const ssize_t y,
PixelPacket *pixel,ExceptionInfo *exception)
{
CacheInfo
*restrict cache_info;
const int
id = GetOpenMPThreadId();
const PixelPacket
*restrict pixels;
assert(image != (const Image *) NULL);
assert(image->signature == MagickSignature);
assert(image->cache != (Cache) NULL);
cache_info=(CacheInfo *) image->cache;
assert(cache_info->signature == MagickSignature);
assert(id < (int) cache_info->number_threads);
*pixel=image->background_color;
pixels=GetVirtualPixelsFromNexus(image,virtual_pixel_method,x,y,1UL,1UL,
cache_info->nexus_info[id],exception);
if (pixels == (const PixelPacket *) NULL)
return(MagickFalse);
*pixel=(*pixels);
return(MagickTrue);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ G e t P i x e l C a c h e C h a n n e l s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetPixelCacheChannels() returns the number of pixel channels associated
% with this instance of the pixel cache.
%
% The format of the GetPixelCacheChannels() method is:
%
% size_t GetPixelCacheChannels(Cache cache)
%
% A description of each parameter follows:
%
% o type: GetPixelCacheChannels returns DirectClass or PseudoClass.
%
% o cache: the pixel cache.
%
*/
MagickExport size_t GetPixelCacheChannels(const Cache cache)
{
CacheInfo
*restrict cache_info;
assert(cache != (Cache) NULL);
cache_info=(CacheInfo *) cache;
assert(cache_info->signature == MagickSignature);
if (cache_info->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",
cache_info->filename);
return(cache_info->channels);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ G e t P i x e l C a c h e C o l o r s p a c e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetPixelCacheColorspace() returns the class type of the pixel cache.
%
% The format of the GetPixelCacheColorspace() method is:
%
% Colorspace GetPixelCacheColorspace(Cache cache)
%
% A description of each parameter follows:
%
% o cache: the pixel cache.
%
*/
MagickExport ColorspaceType GetPixelCacheColorspace(const Cache cache)
{
CacheInfo
*restrict cache_info;
assert(cache != (Cache) NULL);
cache_info=(CacheInfo *) cache;
assert(cache_info->signature == MagickSignature);
if (cache_info->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",
cache_info->filename);
return(cache_info->colorspace);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ G e t P i x e l C a c h e M e t h o d s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetPixelCacheMethods() initializes the CacheMethods structure.
%
% The format of the GetPixelCacheMethods() method is:
%
% void GetPixelCacheMethods(CacheMethods *cache_methods)
%
% A description of each parameter follows:
%
% o cache_methods: Specifies a pointer to a CacheMethods structure.
%
*/
MagickExport void GetPixelCacheMethods(CacheMethods *cache_methods)
{
assert(cache_methods != (CacheMethods *) NULL);
(void) ResetMagickMemory(cache_methods,0,sizeof(*cache_methods));
cache_methods->get_virtual_pixel_handler=GetVirtualPixelCache;
cache_methods->get_virtual_pixels_handler=GetVirtualPixelsCache;
cache_methods->get_virtual_indexes_from_handler=GetVirtualIndexesFromCache;
cache_methods->get_one_virtual_pixel_from_handler=GetOneVirtualPixelFromCache;
cache_methods->get_authentic_pixels_handler=GetAuthenticPixelsCache;
cache_methods->get_authentic_indexes_from_handler=
GetAuthenticIndexesFromCache;
cache_methods->get_authentic_pixels_from_handler=GetAuthenticPixelsFromCache;
cache_methods->get_one_authentic_pixel_from_handler=
GetOneAuthenticPixelFromCache;
cache_methods->queue_authentic_pixels_handler=QueueAuthenticPixelsCache;
cache_methods->sync_authentic_pixels_handler=SyncAuthenticPixelsCache;
cache_methods->destroy_pixel_handler=DestroyImagePixelCache;
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ G e t P i x e l C a c h e N e x u s E x t e n t %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetPixelCacheNexusExtent() returns the extent of the pixels associated with
% the last call to SetPixelCacheNexusPixels() or GetPixelCacheNexusPixels().
%
% The format of the GetPixelCacheNexusExtent() method is:
%
% MagickSizeType GetPixelCacheNexusExtent(const Cache cache,
% NexusInfo *nexus_info)
%
% A description of each parameter follows:
%
% o nexus_info: the nexus info.
%
*/
MagickExport MagickSizeType GetPixelCacheNexusExtent(const Cache cache,
NexusInfo *nexus_info)
{
CacheInfo
*restrict cache_info;
MagickSizeType
extent;
assert(cache != NULL);
cache_info=(CacheInfo *) cache;
assert(cache_info->signature == MagickSignature);
extent=(MagickSizeType) nexus_info->region.width*nexus_info->region.height;
if (extent == 0)
return((MagickSizeType) cache_info->columns*cache_info->rows);
return(extent);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ G e t P i x e l C a c h e P i x e l s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetPixelCachePixels() returns the pixels associated with the specified image.
%
% The format of the GetPixelCachePixels() method is:
%
% void *GetPixelCachePixels(Image *image,MagickSizeType *length,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o length: the pixel cache length.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport void *GetPixelCachePixels(Image *image,MagickSizeType *length,
ExceptionInfo *exception)
{
CacheInfo
*restrict cache_info;
assert(image != (const Image *) NULL);
assert(image->signature == MagickSignature);
assert(image->cache != (Cache) NULL);
assert(length != (MagickSizeType *) NULL);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickSignature);
cache_info=(CacheInfo *) image->cache;
assert(cache_info->signature == MagickSignature);
(void) exception;
*length=0;
if ((cache_info->type != MemoryCache) && (cache_info->type != MapCache))
return((void *) NULL);
*length=cache_info->length;
return((void *) cache_info->pixels);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ G e t P i x e l C a c h e S t o r a g e C l a s s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetPixelCacheStorageClass() returns the class type of the pixel cache.
%
% The format of the GetPixelCacheStorageClass() method is:
%
% ClassType GetPixelCacheStorageClass(Cache cache)
%
% A description of each parameter follows:
%
% o type: GetPixelCacheStorageClass returns DirectClass or PseudoClass.
%
% o cache: the pixel cache.
%
*/
MagickExport ClassType GetPixelCacheStorageClass(const Cache cache)
{
CacheInfo
*restrict cache_info;
assert(cache != (Cache) NULL);
cache_info=(CacheInfo *) cache;
assert(cache_info->signature == MagickSignature);
if (cache_info->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",
cache_info->filename);
return(cache_info->storage_class);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ G e t P i x e l C a c h e T i l e S i z e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetPixelCacheTileSize() returns the pixel cache tile size.
%
% The format of the GetPixelCacheTileSize() method is:
%
% void GetPixelCacheTileSize(const Image *image,size_t *width,
% size_t *height)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o width: the optimize cache tile width in pixels.
%
% o height: the optimize cache tile height in pixels.
%
*/
MagickExport void GetPixelCacheTileSize(const Image *image,size_t *width,
size_t *height)
{
assert(image != (Image *) NULL);
assert(image->signature == MagickSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
*width=2048UL/sizeof(PixelPacket);
if (GetImagePixelCacheType(image) == DiskCache)
*width=8192UL/sizeof(PixelPacket);
*height=(*width);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ G e t P i x e l C a c h e V i r t u a l M e t h o d %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetPixelCacheVirtualMethod() gets the "virtual pixels" method for the
% pixel cache. A virtual pixel is any pixel access that is outside the
% boundaries of the image cache.
%
% The format of the GetPixelCacheVirtualMethod() method is:
%
% VirtualPixelMethod GetPixelCacheVirtualMethod(const Image *image)
%
% A description of each parameter follows:
%
% o image: the image.
%
*/
MagickExport VirtualPixelMethod GetPixelCacheVirtualMethod(const Image *image)
{
CacheInfo
*restrict cache_info;
assert(image != (Image *) NULL);
assert(image->signature == MagickSignature);
assert(image->cache != (Cache) NULL);
cache_info=(CacheInfo *) image->cache;
assert(cache_info->signature == MagickSignature);
return(cache_info->virtual_pixel_method);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ G e t V i r t u a l I n d e x e s F r o m C a c h e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetVirtualIndexesFromCache() returns the indexes associated with the last
% call to QueueAuthenticPixelsCache() or GetVirtualPixelCache().
%
% The format of the GetVirtualIndexesFromCache() method is:
%
% IndexPacket *GetVirtualIndexesFromCache(const Image *image)
%
% A description of each parameter follows:
%
% o image: the image.
%
*/
static const IndexPacket *GetVirtualIndexesFromCache(const Image *image)
{
CacheInfo
*restrict cache_info;
const int
id = GetOpenMPThreadId();
assert(image != (const Image *) NULL);
assert(image->signature == MagickSignature);
assert(image->cache != (Cache) NULL);
cache_info=(CacheInfo *) image->cache;
assert(cache_info->signature == MagickSignature);
assert(id < (int) cache_info->number_threads);
return(GetVirtualIndexesFromNexus(cache_info,cache_info->nexus_info[id]));
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ G e t V i r t u a l I n d e x e s F r o m N e x u s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetVirtualIndexesFromNexus() returns the indexes associated with the
% specified cache nexus.
%
% The format of the GetVirtualIndexesFromNexus() method is:
%
% const IndexPacket *GetVirtualIndexesFromNexus(const Cache cache,
% NexusInfo *nexus_info)
%
% A description of each parameter follows:
%
% o cache: the pixel cache.
%
% o nexus_info: the cache nexus to return the colormap indexes.
%
*/
MagickExport const IndexPacket *GetVirtualIndexesFromNexus(const Cache cache,
NexusInfo *nexus_info)
{
CacheInfo
*restrict cache_info;
assert(cache != (Cache) NULL);
cache_info=(CacheInfo *) cache;
assert(cache_info->signature == MagickSignature);
if (cache_info->storage_class == UndefinedClass)
return((IndexPacket *) NULL);
return(nexus_info->indexes);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% G e t V i r t u a l I n d e x Q u e u e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetVirtualIndexQueue() returns the virtual black channel or the
% colormap indexes associated with the last call to QueueAuthenticPixels() or
% GetVirtualPixels(). NULL is returned if the black channel or colormap
% indexes are not available.
%
% The format of the GetVirtualIndexQueue() method is:
%
% const IndexPacket *GetVirtualIndexQueue(const Image *image)
%
% A description of each parameter follows:
%
% o image: the image.
%
*/
MagickExport const IndexPacket *GetVirtualIndexQueue(const Image *image)
{
CacheInfo
*restrict cache_info;
const int
id = GetOpenMPThreadId();
assert(image != (const Image *) NULL);
assert(image->signature == MagickSignature);
assert(image->cache != (Cache) NULL);
cache_info=(CacheInfo *) image->cache;
assert(cache_info->signature == MagickSignature);
if (cache_info->methods.get_virtual_indexes_from_handler !=
(GetVirtualIndexesFromHandler) NULL)
return(cache_info->methods.get_virtual_indexes_from_handler(image));
assert(id < (int) cache_info->number_threads);
return(GetVirtualIndexesFromNexus(cache_info,cache_info->nexus_info[id]));
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ G e t V i r t u a l P i x e l s F r o m N e x u s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetVirtualPixelsFromNexus() gets virtual pixels from the in-memory or disk
% pixel cache as defined by the geometry parameters. A pointer to the pixels
% is returned if the pixels are transferred, otherwise a NULL is returned.
%
% The format of the GetVirtualPixelsFromNexus() method is:
%
% PixelPacket *GetVirtualPixelsFromNexus(const Image *image,
% const VirtualPixelMethod method,const ssize_t x,const ssize_t y,
% const size_t columns,const size_t rows,NexusInfo *nexus_info,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o virtual_pixel_method: the virtual pixel method.
%
% o x,y,columns,rows: These values define the perimeter of a region of
% pixels.
%
% o nexus_info: the cache nexus to acquire.
%
% o exception: return any errors or warnings in this structure.
%
*/
static ssize_t
DitherMatrix[64] =
{
0, 48, 12, 60, 3, 51, 15, 63,
32, 16, 44, 28, 35, 19, 47, 31,
8, 56, 4, 52, 11, 59, 7, 55,
40, 24, 36, 20, 43, 27, 39, 23,
2, 50, 14, 62, 1, 49, 13, 61,
34, 18, 46, 30, 33, 17, 45, 29,
10, 58, 6, 54, 9, 57, 5, 53,
42, 26, 38, 22, 41, 25, 37, 21
};
static inline ssize_t DitherX(const ssize_t x,const size_t columns)
{
ssize_t
index;
index=x+DitherMatrix[x & 0x07]-32L;
if (index < 0L)
return(0L);
if (index >= (ssize_t) columns)
return((ssize_t) columns-1L);
return(index);
}
static inline ssize_t DitherY(const ssize_t y,const size_t rows)
{
ssize_t
index;
index=y+DitherMatrix[y & 0x07]-32L;
if (index < 0L)
return(0L);
if (index >= (ssize_t) rows)
return((ssize_t) rows-1L);
return(index);
}
static inline ssize_t EdgeX(const ssize_t x,const size_t columns)
{
if (x < 0L)
return(0L);
if (x >= (ssize_t) columns)
return((ssize_t) (columns-1));
return(x);
}
static inline ssize_t EdgeY(const ssize_t y,const size_t rows)
{
if (y < 0L)
return(0L);
if (y >= (ssize_t) rows)
return((ssize_t) (rows-1));
return(y);
}
static inline ssize_t RandomX(RandomInfo *random_info,const size_t columns)
{
return((ssize_t) (columns*GetPseudoRandomValue(random_info)));
}
static inline ssize_t RandomY(RandomInfo *random_info,const size_t rows)
{
return((ssize_t) (rows*GetPseudoRandomValue(random_info)));
}
/*
VirtualPixelModulo() computes the remainder of dividing offset by extent. It
returns not only the quotient (tile the offset falls in) but also the positive
remainer within that tile such that 0 <= remainder < extent. This method is
essentially a ldiv() using a floored modulo division rather than the normal
default truncated modulo division.
*/
static inline MagickModulo VirtualPixelModulo(const ssize_t offset,
const size_t extent)
{
MagickModulo
modulo;
modulo.quotient=offset/(ssize_t) extent;
if (offset < 0L)
modulo.quotient--;
modulo.remainder=offset-modulo.quotient*(ssize_t) extent;
return(modulo);
}
MagickExport const PixelPacket *GetVirtualPixelsFromNexus(const Image *image,
const VirtualPixelMethod virtual_pixel_method,const ssize_t x,const ssize_t y,
const size_t columns,const size_t rows,NexusInfo *nexus_info,
ExceptionInfo *exception)
{
CacheInfo
*restrict cache_info;
IndexPacket
virtual_index;
MagickOffsetType
offset;
MagickSizeType
length,
number_pixels;
NexusInfo
**restrict virtual_nexus;
PixelPacket
*restrict pixels,
virtual_pixel;
RectangleInfo
region;
register const IndexPacket
*restrict virtual_indexes;
register const PixelPacket
*restrict p;
register IndexPacket
*restrict indexes;
register PixelPacket
*restrict q;
register ssize_t
u,
v;
/*
Acquire pixels.
*/
assert(image != (const Image *) NULL);
assert(image->signature == MagickSignature);
assert(image->cache != (Cache) NULL);
cache_info=(CacheInfo *) image->cache;
assert(cache_info->signature == MagickSignature);
if (cache_info->type == UndefinedCache)
return((const PixelPacket *) NULL);
region.x=x;
region.y=y;
region.width=columns;
region.height=rows;
pixels=SetPixelCacheNexusPixels(cache_info,ReadMode,®ion,
(image->clip_mask != (Image *) NULL) || (image->mask != (Image *) NULL) ?
MagickTrue : MagickFalse,nexus_info,exception);
if (pixels == (PixelPacket *) NULL)
return((const PixelPacket *) NULL);
offset=(MagickOffsetType) nexus_info->region.y*cache_info->columns+
nexus_info->region.x;
length=(MagickSizeType) (nexus_info->region.height-1L)*cache_info->columns+
nexus_info->region.width-1L;
number_pixels=(MagickSizeType) cache_info->columns*cache_info->rows;
if ((offset >= 0) && (((MagickSizeType) offset+length) < number_pixels))
if ((x >= 0) && ((ssize_t) (x+columns) <= (ssize_t) cache_info->columns) &&
(y >= 0) && ((ssize_t) (y+rows) <= (ssize_t) cache_info->rows))
{
MagickBooleanType
status;
/*
Pixel request is inside cache extents.
*/
if (nexus_info->authentic_pixel_cache != MagickFalse)
return(pixels);
status=ReadPixelCachePixels(cache_info,nexus_info,exception);
if (status == MagickFalse)
return((const PixelPacket *) NULL);
if ((cache_info->storage_class == PseudoClass) ||
(cache_info->colorspace == CMYKColorspace))
{
status=ReadPixelCacheIndexes(cache_info,nexus_info,exception);
if (status == MagickFalse)
return((const PixelPacket *) NULL);
}
return(pixels);
}
/*
Pixel request is outside cache extents.
*/
q=pixels;
indexes=nexus_info->indexes;
virtual_nexus=AcquirePixelCacheNexus(1);
if (virtual_nexus == (NexusInfo **) NULL)
{
(void) ThrowMagickException(exception,GetMagickModule(),CacheError,
"UnableToGetCacheNexus","`%s'",image->filename);
return((const PixelPacket *) NULL);
}
switch (virtual_pixel_method)
{
case BlackVirtualPixelMethod:
{
SetPixelRed(&virtual_pixel,0);
SetPixelGreen(&virtual_pixel,0);
SetPixelBlue(&virtual_pixel,0);
SetPixelOpacity(&virtual_pixel,OpaqueOpacity);
break;
}
case GrayVirtualPixelMethod:
{
SetPixelRed(&virtual_pixel,QuantumRange/2);
SetPixelGreen(&virtual_pixel,QuantumRange/2);
SetPixelBlue(&virtual_pixel,QuantumRange/2);
SetPixelOpacity(&virtual_pixel,OpaqueOpacity);
break;
}
case TransparentVirtualPixelMethod:
{
SetPixelRed(&virtual_pixel,0);
SetPixelGreen(&virtual_pixel,0);
SetPixelBlue(&virtual_pixel,0);
SetPixelOpacity(&virtual_pixel,TransparentOpacity);
break;
}
case MaskVirtualPixelMethod:
case WhiteVirtualPixelMethod:
{
SetPixelRed(&virtual_pixel,QuantumRange);
SetPixelGreen(&virtual_pixel,QuantumRange);
SetPixelBlue(&virtual_pixel,QuantumRange);
SetPixelOpacity(&virtual_pixel,OpaqueOpacity);
break;
}
default:
{
virtual_pixel=image->background_color;
break;
}
}
virtual_index=0;
for (v=0; v < (ssize_t) rows; v++)
{
ssize_t
y_offset;
y_offset=y+v;
if ((virtual_pixel_method == EdgeVirtualPixelMethod) ||
(virtual_pixel_method == UndefinedVirtualPixelMethod))
y_offset=EdgeY(y_offset,cache_info->rows);
for (u=0; u < (ssize_t) columns; u+=length)
{
ssize_t
x_offset;
x_offset=x+u;
length=(MagickSizeType) MagickMin(cache_info->columns-x_offset,columns-u);
if (((x_offset < 0) || (x_offset >= (ssize_t) cache_info->columns)) ||
((y_offset < 0) || (y_offset >= (ssize_t) cache_info->rows)) ||
(length == 0))
{
MagickModulo
x_modulo,
y_modulo;
/*
Transfer a single pixel.
*/
length=(MagickSizeType) 1;
switch (virtual_pixel_method)
{
case BackgroundVirtualPixelMethod:
case ConstantVirtualPixelMethod:
case BlackVirtualPixelMethod:
case GrayVirtualPixelMethod:
case TransparentVirtualPixelMethod:
case MaskVirtualPixelMethod:
case WhiteVirtualPixelMethod:
{
p=(&virtual_pixel);
virtual_indexes=(&virtual_index);
break;
}
case EdgeVirtualPixelMethod:
default:
{
p=GetVirtualPixelsFromNexus(image,virtual_pixel_method,
EdgeX(x_offset,cache_info->columns),
EdgeY(y_offset,cache_info->rows),1UL,1UL,*virtual_nexus,
exception);
virtual_indexes=GetVirtualIndexesFromNexus(cache_info,
*virtual_nexus);
break;
}
case RandomVirtualPixelMethod:
{
if (cache_info->random_info == (RandomInfo *) NULL)
cache_info->random_info=AcquireRandomInfo();
p=GetVirtualPixelsFromNexus(image,virtual_pixel_method,
RandomX(cache_info->random_info,cache_info->columns),
RandomY(cache_info->random_info,cache_info->rows),1UL,1UL,
*virtual_nexus,exception);
virtual_indexes=GetVirtualIndexesFromNexus(cache_info,
*virtual_nexus);
break;
}
case DitherVirtualPixelMethod:
{
p=GetVirtualPixelsFromNexus(image,virtual_pixel_method,
DitherX(x_offset,cache_info->columns),
DitherY(y_offset,cache_info->rows),1UL,1UL,*virtual_nexus,
exception);
virtual_indexes=GetVirtualIndexesFromNexus(cache_info,
*virtual_nexus);
break;
}
case TileVirtualPixelMethod:
{
x_modulo=VirtualPixelModulo(x_offset,cache_info->columns);
y_modulo=VirtualPixelModulo(y_offset,cache_info->rows);
p=GetVirtualPixelsFromNexus(image,virtual_pixel_method,
x_modulo.remainder,y_modulo.remainder,1UL,1UL,*virtual_nexus,
exception);
virtual_indexes=GetVirtualIndexesFromNexus(cache_info,
*virtual_nexus);
break;
}
case MirrorVirtualPixelMethod:
{
x_modulo=VirtualPixelModulo(x_offset,cache_info->columns);
if ((x_modulo.quotient & 0x01) == 1L)
x_modulo.remainder=(ssize_t) cache_info->columns-
x_modulo.remainder-1L;
y_modulo=VirtualPixelModulo(y_offset,cache_info->rows);
if ((y_modulo.quotient & 0x01) == 1L)
y_modulo.remainder=(ssize_t) cache_info->rows-
y_modulo.remainder-1L;
p=GetVirtualPixelsFromNexus(image,virtual_pixel_method,
x_modulo.remainder,y_modulo.remainder,1UL,1UL,*virtual_nexus,
exception);
virtual_indexes=GetVirtualIndexesFromNexus(cache_info,
*virtual_nexus);
break;
}
case CheckerTileVirtualPixelMethod:
{
x_modulo=VirtualPixelModulo(x_offset,cache_info->columns);
y_modulo=VirtualPixelModulo(y_offset,cache_info->rows);
if (((x_modulo.quotient ^ y_modulo.quotient) & 0x01) != 0L)
{
p=(&virtual_pixel);
virtual_indexes=(&virtual_index);
break;
}
p=GetVirtualPixelsFromNexus(image,virtual_pixel_method,
x_modulo.remainder,y_modulo.remainder,1UL,1UL,*virtual_nexus,
exception);
virtual_indexes=GetVirtualIndexesFromNexus(cache_info,
*virtual_nexus);
break;
}
case HorizontalTileVirtualPixelMethod:
{
if ((y_offset < 0) || (y_offset >= (ssize_t) cache_info->rows))
{
p=(&virtual_pixel);
virtual_indexes=(&virtual_index);
break;
}
x_modulo=VirtualPixelModulo(x_offset,cache_info->columns);
y_modulo=VirtualPixelModulo(y_offset,cache_info->rows);
p=GetVirtualPixelsFromNexus(image,virtual_pixel_method,
x_modulo.remainder,y_modulo.remainder,1UL,1UL,*virtual_nexus,
exception);
virtual_indexes=GetVirtualIndexesFromNexus(cache_info,
*virtual_nexus);
break;
}
case VerticalTileVirtualPixelMethod:
{
if ((x_offset < 0) || (x_offset >= (ssize_t) cache_info->columns))
{
p=(&virtual_pixel);
virtual_indexes=(&virtual_index);
break;
}
x_modulo=VirtualPixelModulo(x_offset,cache_info->columns);
y_modulo=VirtualPixelModulo(y_offset,cache_info->rows);
p=GetVirtualPixelsFromNexus(image,virtual_pixel_method,
x_modulo.remainder,y_modulo.remainder,1UL,1UL,*virtual_nexus,
exception);
virtual_indexes=GetVirtualIndexesFromNexus(cache_info,
*virtual_nexus);
break;
}
case HorizontalTileEdgeVirtualPixelMethod:
{
x_modulo=VirtualPixelModulo(x_offset,cache_info->columns);
p=GetVirtualPixelsFromNexus(image,virtual_pixel_method,
x_modulo.remainder,EdgeY(y_offset,cache_info->rows),1UL,1UL,
*virtual_nexus,exception);
virtual_indexes=GetVirtualIndexesFromNexus(cache_info,
*virtual_nexus);
break;
}
case VerticalTileEdgeVirtualPixelMethod:
{
y_modulo=VirtualPixelModulo(y_offset,cache_info->rows);
p=GetVirtualPixelsFromNexus(image,virtual_pixel_method,
EdgeX(x_offset,cache_info->columns),y_modulo.remainder,1UL,1UL,
*virtual_nexus,exception);
virtual_indexes=GetVirtualIndexesFromNexus(cache_info,
*virtual_nexus);
break;
}
}
if (p == (const PixelPacket *) NULL)
break;
*q++=(*p);
if ((indexes != (IndexPacket *) NULL) &&
(virtual_indexes != (const IndexPacket *) NULL))
*indexes++=(*virtual_indexes);
continue;
}
/*
Transfer a run of pixels.
*/
p=GetVirtualPixelsFromNexus(image,virtual_pixel_method,x_offset,y_offset,
(size_t) length,1UL,*virtual_nexus,exception);
if (p == (const PixelPacket *) NULL)
break;
virtual_indexes=GetVirtualIndexesFromNexus(cache_info,*virtual_nexus);
(void) memcpy(q,p,(size_t) length*sizeof(*p));
q+=length;
if ((indexes != (IndexPacket *) NULL) &&
(virtual_indexes != (const IndexPacket *) NULL))
{
(void) memcpy(indexes,virtual_indexes,(size_t) length*
sizeof(*virtual_indexes));
indexes+=length;
}
}
}
virtual_nexus=DestroyPixelCacheNexus(virtual_nexus,1);
return(pixels);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ G e t V i r t u a l P i x e l C a c h e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetVirtualPixelCache() get virtual pixels from the in-memory or disk pixel
% cache as defined by the geometry parameters. A pointer to the pixels
% is returned if the pixels are transferred, otherwise a NULL is returned.
%
% The format of the GetVirtualPixelCache() method is:
%
% const PixelPacket *GetVirtualPixelCache(const Image *image,
% const VirtualPixelMethod virtual_pixel_method,const ssize_t x,
% const ssize_t y,const size_t columns,const size_t rows,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o virtual_pixel_method: the virtual pixel method.
%
% o x,y,columns,rows: These values define the perimeter of a region of
% pixels.
%
% o exception: return any errors or warnings in this structure.
%
*/
static const PixelPacket *GetVirtualPixelCache(const Image *image,
const VirtualPixelMethod virtual_pixel_method,const ssize_t x,const ssize_t y,
const size_t columns,const size_t rows,ExceptionInfo *exception)
{
CacheInfo
*restrict cache_info;
const int
id = GetOpenMPThreadId();
assert(image != (const Image *) NULL);
assert(image->signature == MagickSignature);
assert(image->cache != (Cache) NULL);
cache_info=(CacheInfo *) image->cache;
assert(cache_info->signature == MagickSignature);
assert(id < (int) cache_info->number_threads);
return(GetVirtualPixelsFromNexus(image,virtual_pixel_method,x,y,columns,rows,
cache_info->nexus_info[id],exception));
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% G e t V i r t u a l P i x e l Q u e u e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetVirtualPixelQueue() returns the virtual pixels associated with the
% last call to QueueAuthenticPixels() or GetVirtualPixels().
%
% The format of the GetVirtualPixelQueue() method is:
%
% const PixelPacket *GetVirtualPixelQueue(const Image image)
%
% A description of each parameter follows:
%
% o image: the image.
%
*/
MagickExport const PixelPacket *GetVirtualPixelQueue(const Image *image)
{
CacheInfo
*restrict cache_info;
const int
id = GetOpenMPThreadId();
assert(image != (const Image *) NULL);
assert(image->signature == MagickSignature);
assert(image->cache != (Cache) NULL);
cache_info=(CacheInfo *) image->cache;
assert(cache_info->signature == MagickSignature);
if (cache_info->methods.get_virtual_pixels_handler !=
(GetVirtualPixelsHandler) NULL)
return(cache_info->methods.get_virtual_pixels_handler(image));
assert(id < (int) cache_info->number_threads);
return(GetVirtualPixelsNexus(cache_info,cache_info->nexus_info[id]));
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% G e t V i r t u a l P i x e l s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetVirtualPixels() returns an immutable pixel region. If the
% region is successfully accessed, a pointer to it is returned, otherwise
% NULL is returned. The returned pointer may point to a temporary working
% copy of the pixels or it may point to the original pixels in memory.
% Performance is maximized if the selected region is part of one row, or one
% or more full rows, since there is opportunity to access the pixels in-place
% (without a copy) if the image is in memory, or in a memory-mapped file. The
% returned pointer must *never* be deallocated by the user.
%
% Pixels accessed via the returned pointer represent a simple array of type
% PixelPacket. If the image type is CMYK or the storage class is PseudoClass,
% call GetAuthenticIndexQueue() after invoking GetAuthenticPixels() to access
% the black color component or to obtain the colormap indexes (of type
% IndexPacket) corresponding to the region.
%
% If you plan to modify the pixels, use GetAuthenticPixels() instead.
%
% Note, the GetVirtualPixels() and GetAuthenticPixels() methods are not thread-
% safe. In a threaded environment, use GetCacheViewVirtualPixels() or
% GetCacheViewAuthenticPixels() instead.
%
% The format of the GetVirtualPixels() method is:
%
% const PixelPacket *GetVirtualPixels(const Image *image,const ssize_t x,
% const ssize_t y,const size_t columns,const size_t rows,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o x,y,columns,rows: These values define the perimeter of a region of
% pixels.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport const PixelPacket *GetVirtualPixels(const Image *image,
const ssize_t x,const ssize_t y,const size_t columns,const size_t rows,
ExceptionInfo *exception)
{
CacheInfo
*restrict cache_info;
const int
id = GetOpenMPThreadId();
assert(image != (const Image *) NULL);
assert(image->signature == MagickSignature);
assert(image->cache != (Cache) NULL);
cache_info=(CacheInfo *) image->cache;
assert(cache_info->signature == MagickSignature);
if (cache_info->methods.get_virtual_pixel_handler !=
(GetVirtualPixelHandler) NULL)
return(cache_info->methods.get_virtual_pixel_handler(image,
GetPixelCacheVirtualMethod(image),x,y,columns,rows,exception));
assert(id < (int) cache_info->number_threads);
return(GetVirtualPixelsFromNexus(image,GetPixelCacheVirtualMethod(image),x,y,
columns,rows,cache_info->nexus_info[id],exception));
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ G e t V i r t u a l P i x e l s F r o m C a c h e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetVirtualPixelsCache() returns the pixels associated with the last call
% to QueueAuthenticPixelsCache() or GetVirtualPixelCache().
%
% The format of the GetVirtualPixelsCache() method is:
%
% PixelPacket *GetVirtualPixelsCache(const Image *image)
%
% A description of each parameter follows:
%
% o image: the image.
%
*/
static const PixelPacket *GetVirtualPixelsCache(const Image *image)
{
CacheInfo
*restrict cache_info;
const int
id = GetOpenMPThreadId();
assert(image != (const Image *) NULL);
assert(image->signature == MagickSignature);
assert(image->cache != (Cache) NULL);
cache_info=(CacheInfo *) image->cache;
assert(cache_info->signature == MagickSignature);
assert(id < (int) cache_info->number_threads);
return(GetVirtualPixelsNexus(image->cache,cache_info->nexus_info[id]));
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ G e t V i r t u a l P i x e l s N e x u s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetVirtualPixelsNexus() returns the pixels associated with the specified
% cache nexus.
%
% The format of the GetVirtualPixelsNexus() method is:
%
% const IndexPacket *GetVirtualPixelsNexus(const Cache cache,
% NexusInfo *nexus_info)
%
% A description of each parameter follows:
%
% o cache: the pixel cache.
%
% o nexus_info: the cache nexus to return the colormap pixels.
%
*/
MagickExport const PixelPacket *GetVirtualPixelsNexus(const Cache cache,
NexusInfo *nexus_info)
{
CacheInfo
*restrict cache_info;
assert(cache != (Cache) NULL);
cache_info=(CacheInfo *) cache;
assert(cache_info->signature == MagickSignature);
if (cache_info->storage_class == UndefinedClass)
return((PixelPacket *) NULL);
return((const PixelPacket *) nexus_info->pixels);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ M a s k P i x e l C a c h e N e x u s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% MaskPixelCacheNexus() masks the cache nexus as defined by the image mask.
% The method returns MagickTrue if the pixel region is masked, otherwise
% MagickFalse.
%
% The format of the MaskPixelCacheNexus() method is:
%
% MagickBooleanType MaskPixelCacheNexus(Image *image,
% NexusInfo *nexus_info,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o nexus_info: the cache nexus to clip.
%
% o exception: return any errors or warnings in this structure.
%
*/
static inline void MagickPixelCompositeMask(const MagickPixelPacket *p,
const MagickRealType alpha,const MagickPixelPacket *q,
const MagickRealType beta,MagickPixelPacket *composite)
{
double
gamma;
if (alpha == TransparentOpacity)
{
*composite=(*q);
return;
}
gamma=1.0-QuantumScale*QuantumScale*alpha*beta;
gamma=PerceptibleReciprocal(gamma);
composite->red=gamma*MagickOver_(p->red,alpha,q->red,beta);
composite->green=gamma*MagickOver_(p->green,alpha,q->green,beta);
composite->blue=gamma*MagickOver_(p->blue,alpha,q->blue,beta);
if ((p->colorspace == CMYKColorspace) && (q->colorspace == CMYKColorspace))
composite->index=gamma*MagickOver_(p->index,alpha,q->index,beta);
}
static MagickBooleanType MaskPixelCacheNexus(Image *image,NexusInfo *nexus_info,
ExceptionInfo *exception)
{
CacheInfo
*restrict cache_info;
MagickPixelPacket
alpha,
beta;
MagickSizeType
number_pixels;
NexusInfo
**restrict clip_nexus,
**restrict image_nexus;
register const PixelPacket
*restrict r;
register IndexPacket
*restrict nexus_indexes,
*restrict indexes;
register PixelPacket
*restrict p,
*restrict q;
register ssize_t
i;
/*
Apply clip mask.
*/
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
if ((image->mask == (Image *) NULL) ||
(image->storage_class == PseudoClass))
return(MagickTrue);
cache_info=(CacheInfo *) image->cache;
if (cache_info == (Cache) NULL)
return(MagickFalse);
image_nexus=AcquirePixelCacheNexus(1);
clip_nexus=AcquirePixelCacheNexus(1);
if ((image_nexus == (NexusInfo **) NULL) ||
(clip_nexus == (NexusInfo **) NULL))
ThrowBinaryException(CacheError,"UnableToGetCacheNexus",image->filename);
p=GetAuthenticPixelCacheNexus(image,nexus_info->region.x,
nexus_info->region.y,nexus_info->region.width,nexus_info->region.height,
image_nexus[0],exception);
indexes=image_nexus[0]->indexes;
q=nexus_info->pixels;
nexus_indexes=nexus_info->indexes;
r=GetVirtualPixelsFromNexus(image->mask,MaskVirtualPixelMethod,
nexus_info->region.x,nexus_info->region.y,nexus_info->region.width,
nexus_info->region.height,clip_nexus[0],&image->exception);
GetMagickPixelPacket(image,&alpha);
GetMagickPixelPacket(image,&beta);
number_pixels=(MagickSizeType) nexus_info->region.width*
nexus_info->region.height;
for (i=0; i < (ssize_t) number_pixels; i++)
{
if ((p == (PixelPacket *) NULL) || (r == (const PixelPacket *) NULL))
break;
SetMagickPixelPacket(image,p,indexes+i,&alpha);
SetMagickPixelPacket(image,q,nexus_indexes+i,&beta);
MagickPixelCompositeMask(&beta,GetPixelIntensity(image,r),&alpha,
alpha.opacity,&beta);
SetPixelRed(q,ClampToQuantum(beta.red));
SetPixelGreen(q,ClampToQuantum(beta.green));
SetPixelBlue(q,ClampToQuantum(beta.blue));
SetPixelOpacity(q,ClampToQuantum(beta.opacity));
if (cache_info->active_index_channel != MagickFalse)
SetPixelIndex(nexus_indexes+i,GetPixelIndex(indexes+i));
p++;
q++;
r++;
}
clip_nexus=DestroyPixelCacheNexus(clip_nexus,1);
image_nexus=DestroyPixelCacheNexus(image_nexus,1);
if (i < (ssize_t) number_pixels)
return(MagickFalse);
return(MagickTrue);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ O p e n P i x e l C a c h e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% OpenPixelCache() allocates the pixel cache. This includes defining the cache
% dimensions, allocating space for the image pixels and optionally the
% colormap indexes, and memory mapping the cache if it is disk based. The
% cache nexus array is initialized as well.
%
% The format of the OpenPixelCache() method is:
%
% MagickBooleanType OpenPixelCache(Image *image,const MapMode mode,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o mode: ReadMode, WriteMode, or IOMode.
%
% o exception: return any errors or warnings in this structure.
%
*/
static inline void AllocatePixelCachePixels(CacheInfo *cache_info)
{
cache_info->mapped=MagickFalse;
cache_info->pixels=(PixelPacket *) MagickAssumeAligned(
AcquireAlignedMemory(1,(size_t) cache_info->length));
if (cache_info->pixels == (PixelPacket *) NULL)
{
cache_info->mapped=MagickTrue;
cache_info->pixels=(PixelPacket *) MapBlob(-1,IOMode,0,(size_t)
cache_info->length);
}
}
#if defined(__cplusplus) || defined(c_plusplus)
extern "C" {
#endif
#if defined(SIGBUS)
static void CacheSignalHandler(int status)
{
ThrowFatalException(CacheFatalError,"UnableToExtendPixelCache");
}
#endif
#if defined(__cplusplus) || defined(c_plusplus)
}
#endif
static MagickBooleanType OpenPixelCacheOnDisk(CacheInfo *cache_info,
const MapMode mode)
{
int
file;
/*
Open pixel cache on disk.
*/
if (cache_info->file != -1)
return(MagickTrue); /* cache already open */
if (*cache_info->cache_filename == '\0')
file=AcquireUniqueFileResource(cache_info->cache_filename);
else
switch (mode)
{
case ReadMode:
{
file=open_utf8(cache_info->cache_filename,O_RDONLY | O_BINARY,0);
break;
}
case WriteMode:
{
file=open_utf8(cache_info->cache_filename,O_WRONLY | O_CREAT |
O_BINARY | O_EXCL,S_MODE);
if (file == -1)
file=open_utf8(cache_info->cache_filename,O_WRONLY | O_BINARY,S_MODE);
break;
}
case IOMode:
default:
{
file=open_utf8(cache_info->cache_filename,O_RDWR | O_CREAT | O_BINARY |
O_EXCL,S_MODE);
if (file == -1)
file=open_utf8(cache_info->cache_filename,O_RDWR | O_BINARY,S_MODE);
break;
}
}
if (file == -1)
return(MagickFalse);
(void) AcquireMagickResource(FileResource,1);
cache_info->file=file;
cache_info->mode=mode;
return(MagickTrue);
}
static inline MagickOffsetType WritePixelCacheRegion(
const CacheInfo *restrict cache_info,const MagickOffsetType offset,
const MagickSizeType length,const unsigned char *restrict buffer)
{
register MagickOffsetType
i;
ssize_t
count;
#if !defined(MAGICKCORE_HAVE_PWRITE)
if (lseek(cache_info->file,offset,SEEK_SET) < 0)
return((MagickOffsetType) -1);
#endif
count=0;
for (i=0; i < (MagickOffsetType) length; i+=count)
{
#if !defined(MAGICKCORE_HAVE_PWRITE)
count=write(cache_info->file,buffer+i,(size_t) MagickMin(length-i,
(MagickSizeType) SSIZE_MAX));
#else
count=pwrite(cache_info->file,buffer+i,(size_t) MagickMin(length-i,
(MagickSizeType) SSIZE_MAX),(off_t) (offset+i));
#endif
if (count <= 0)
{
count=0;
if (errno != EINTR)
break;
}
}
return(i);
}
static MagickBooleanType SetPixelCacheExtent(Image *image,MagickSizeType length)
{
CacheInfo
*restrict cache_info;
MagickOffsetType
count,
extent,
offset;
cache_info=(CacheInfo *) image->cache;
if (image->debug != MagickFalse)
{
char
format[MaxTextExtent],
message[MaxTextExtent];
(void) FormatMagickSize(length,MagickFalse,format);
(void) FormatLocaleString(message,MaxTextExtent,
"extend %s (%s[%d], disk, %s)",cache_info->filename,
cache_info->cache_filename,cache_info->file,format);
(void) LogMagickEvent(CacheEvent,GetMagickModule(),"%s",message);
}
if (length != (MagickSizeType) ((MagickOffsetType) length))
return(MagickFalse);
offset=(MagickOffsetType) lseek(cache_info->file,0,SEEK_END);
if (offset < 0)
return(MagickFalse);
if ((MagickSizeType) offset >= length)
return(MagickTrue);
extent=(MagickOffsetType) length-1;
count=WritePixelCacheRegion(cache_info,extent,1,(const unsigned char *) "");
#if defined(MAGICKCORE_HAVE_POSIX_FALLOCATE)
if (cache_info->synchronize != MagickFalse)
{
int
status;
status=posix_fallocate(cache_info->file,offset+1,extent-offset);
if (status != 0)
return(MagickFalse);
}
#endif
#if defined(SIGBUS)
(void) signal(SIGBUS,CacheSignalHandler);
#endif
return(count != (MagickOffsetType) 1 ? MagickFalse : MagickTrue);
}
static MagickBooleanType OpenPixelCache(Image *image,const MapMode mode,
ExceptionInfo *exception)
{
CacheInfo
*restrict cache_info,
source_info;
char
format[MaxTextExtent],
message[MaxTextExtent];
const char
*type;
MagickSizeType
length,
number_pixels;
MagickStatusType
status;
size_t
columns,
packet_size;
assert(image != (const Image *) NULL);
assert(image->signature == MagickSignature);
assert(image->cache != (Cache) NULL);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
if ((image->columns == 0) || (image->rows == 0))
ThrowBinaryException(CacheError,"NoPixelsDefinedInCache",image->filename);
cache_info=(CacheInfo *) image->cache;
assert(cache_info->signature == MagickSignature);
source_info=(*cache_info);
source_info.file=(-1);
(void) FormatLocaleString(cache_info->filename,MaxTextExtent,"%s[%.20g]",
image->filename,(double) GetImageIndexInList(image));
cache_info->mode=mode;
cache_info->rows=image->rows;
cache_info->columns=image->columns;
cache_info->channels=image->channels;
cache_info->active_index_channel=((image->storage_class == PseudoClass) ||
(image->colorspace == CMYKColorspace)) ? MagickTrue : MagickFalse;
if (image->ping != MagickFalse)
{
cache_info->storage_class=image->storage_class;
cache_info->colorspace=image->colorspace;
cache_info->type=PingCache;
cache_info->pixels=(PixelPacket *) NULL;
cache_info->indexes=(IndexPacket *) NULL;
cache_info->length=0;
return(MagickTrue);
}
number_pixels=(MagickSizeType) cache_info->columns*cache_info->rows;
packet_size=sizeof(PixelPacket);
if (cache_info->active_index_channel != MagickFalse)
packet_size+=sizeof(IndexPacket);
length=number_pixels*packet_size;
columns=(size_t) (length/cache_info->rows/packet_size);
if (cache_info->columns != columns)
ThrowBinaryException(ResourceLimitError,"PixelCacheAllocationFailed",
image->filename);
cache_info->length=length;
status=AcquireMagickResource(AreaResource,cache_info->length);
length=number_pixels*(sizeof(PixelPacket)+sizeof(IndexPacket));
if ((status != MagickFalse) && (length == (MagickSizeType) ((size_t) length)))
{
status=AcquireMagickResource(MemoryResource,cache_info->length);
if (((cache_info->type == UndefinedCache) && (status != MagickFalse)) ||
(cache_info->type == MemoryCache))
{
AllocatePixelCachePixels(cache_info);
if (cache_info->pixels == (PixelPacket *) NULL)
cache_info->pixels=source_info.pixels;
else
{
/*
Create memory pixel cache.
*/
cache_info->colorspace=image->colorspace;
cache_info->type=MemoryCache;
cache_info->indexes=(IndexPacket *) NULL;
if (cache_info->active_index_channel != MagickFalse)
cache_info->indexes=(IndexPacket *) (cache_info->pixels+
number_pixels);
if ((source_info.storage_class != UndefinedClass) &&
(mode != ReadMode))
{
status&=ClonePixelCacheRepository(cache_info,&source_info,
exception);
RelinquishPixelCachePixels(&source_info);
}
if (image->debug != MagickFalse)
{
(void) FormatMagickSize(cache_info->length,MagickTrue,format);
type=CommandOptionToMnemonic(MagickCacheOptions,(ssize_t)
cache_info->type);
(void) FormatLocaleString(message,MaxTextExtent,
"open %s (%s %s, %.20gx%.20g %s)",cache_info->filename,
cache_info->mapped != MagickFalse ? "Anonymous" : "Heap",
type,(double) cache_info->columns,(double) cache_info->rows,
format);
(void) LogMagickEvent(CacheEvent,GetMagickModule(),"%s",
message);
}
cache_info->storage_class=image->storage_class;
return(MagickTrue);
}
}
RelinquishMagickResource(MemoryResource,cache_info->length);
}
/*
Create pixel cache on disk.
*/
status=AcquireMagickResource(DiskResource,cache_info->length);
if ((status == MagickFalse) || (cache_info->type == DistributedCache))
{
DistributeCacheInfo
*server_info;
if (cache_info->type == DistributedCache)
RelinquishMagickResource(DiskResource,cache_info->length);
server_info=AcquireDistributeCacheInfo(exception);
if (server_info != (DistributeCacheInfo *) NULL)
{
status=OpenDistributePixelCache(server_info,image);
if (status == MagickFalse)
{
ThrowFileException(exception,CacheError,"UnableToOpenPixelCache",
GetDistributeCacheHostname(server_info));
server_info=DestroyDistributeCacheInfo(server_info);
}
else
{
/*
Create a distributed pixel cache.
*/
cache_info->type=DistributedCache;
cache_info->storage_class=image->storage_class;
cache_info->colorspace=image->colorspace;
cache_info->server_info=server_info;
(void) FormatLocaleString(cache_info->cache_filename,
MaxTextExtent,"%s:%d",GetDistributeCacheHostname(
(DistributeCacheInfo *) cache_info->server_info),
GetDistributeCachePort((DistributeCacheInfo *)
cache_info->server_info));
if ((source_info.storage_class != UndefinedClass) &&
(mode != ReadMode))
{
status=ClonePixelCacheRepository(cache_info,&source_info,
exception);
RelinquishPixelCachePixels(&source_info);
}
if (image->debug != MagickFalse)
{
(void) FormatMagickSize(cache_info->length,MagickFalse,
format);
type=CommandOptionToMnemonic(MagickCacheOptions,(ssize_t)
cache_info->type);
(void) FormatLocaleString(message,MaxTextExtent,
"open %s (%s[%d], %s, %.20gx%.20g %s)",cache_info->filename,
cache_info->cache_filename,GetDistributeCacheFile(
(DistributeCacheInfo *) cache_info->server_info),type,
(double) cache_info->columns,(double) cache_info->rows,
format);
(void) LogMagickEvent(CacheEvent,GetMagickModule(),"%s",
message);
}
return(MagickTrue);
}
}
RelinquishMagickResource(DiskResource,cache_info->length);
(void) ThrowMagickException(exception,GetMagickModule(),CacheError,
"CacheResourcesExhausted","`%s'",image->filename);
return(MagickFalse);
}
if ((source_info.storage_class != UndefinedClass) && (mode != ReadMode))
{
(void) ClosePixelCacheOnDisk(cache_info);
*cache_info->cache_filename='\0';
}
if (OpenPixelCacheOnDisk(cache_info,mode) == MagickFalse)
{
RelinquishMagickResource(DiskResource,cache_info->length);
ThrowFileException(exception,CacheError,"UnableToOpenPixelCache",
image->filename);
return(MagickFalse);
}
status=SetPixelCacheExtent(image,(MagickSizeType) cache_info->offset+
cache_info->length);
if (status == MagickFalse)
{
ThrowFileException(exception,CacheError,"UnableToExtendCache",
image->filename);
return(MagickFalse);
}
cache_info->storage_class=image->storage_class;
cache_info->colorspace=image->colorspace;
length=number_pixels*(sizeof(PixelPacket)+sizeof(IndexPacket));
if (length != (MagickSizeType) ((size_t) length))
cache_info->type=DiskCache;
else
{
status=AcquireMagickResource(MapResource,cache_info->length);
if ((status == MagickFalse) && (cache_info->type != MapCache) &&
(cache_info->type != MemoryCache))
cache_info->type=DiskCache;
else
{
cache_info->pixels=(PixelPacket *) MapBlob(cache_info->file,mode,
cache_info->offset,(size_t) cache_info->length);
if (cache_info->pixels == (PixelPacket *) NULL)
{
cache_info->pixels=source_info.pixels;
cache_info->type=DiskCache;
}
else
{
/*
Create file-backed memory-mapped pixel cache.
*/
(void) ClosePixelCacheOnDisk(cache_info);
cache_info->type=MapCache;
cache_info->mapped=MagickTrue;
cache_info->indexes=(IndexPacket *) NULL;
if (cache_info->active_index_channel != MagickFalse)
cache_info->indexes=(IndexPacket *) (cache_info->pixels+
number_pixels);
if ((source_info.storage_class != UndefinedClass) &&
(mode != ReadMode))
{
status=ClonePixelCacheRepository(cache_info,&source_info,
exception);
RelinquishPixelCachePixels(&source_info);
}
if (image->debug != MagickFalse)
{
(void) FormatMagickSize(cache_info->length,MagickTrue,format);
type=CommandOptionToMnemonic(MagickCacheOptions,(ssize_t)
cache_info->type);
(void) FormatLocaleString(message,MaxTextExtent,
"open %s (%s[%d], %s, %.20gx%.20g %s)",
cache_info->filename,cache_info->cache_filename,
cache_info->file,type,(double) cache_info->columns,(double)
cache_info->rows,format);
(void) LogMagickEvent(CacheEvent,GetMagickModule(),"%s",
message);
}
return(MagickTrue);
}
}
RelinquishMagickResource(MapResource,cache_info->length);
}
if ((source_info.storage_class != UndefinedClass) && (mode != ReadMode))
{
status=ClonePixelCacheRepository(cache_info,&source_info,exception);
RelinquishPixelCachePixels(&source_info);
}
if (image->debug != MagickFalse)
{
(void) FormatMagickSize(cache_info->length,MagickFalse,format);
type=CommandOptionToMnemonic(MagickCacheOptions,(ssize_t)
cache_info->type);
(void) FormatLocaleString(message,MaxTextExtent,
"open %s (%s[%d], %s, %.20gx%.20g %s)",cache_info->filename,
cache_info->cache_filename,cache_info->file,type,(double)
cache_info->columns,(double) cache_info->rows,format);
(void) LogMagickEvent(CacheEvent,GetMagickModule(),"%s",message);
}
return(MagickTrue);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ P e r s i s t P i x e l C a c h e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% PersistPixelCache() attaches to or initializes a persistent pixel cache. A
% persistent pixel cache is one that resides on disk and is not destroyed
% when the program exits.
%
% The format of the PersistPixelCache() method is:
%
% MagickBooleanType PersistPixelCache(Image *image,const char *filename,
% const MagickBooleanType attach,MagickOffsetType *offset,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o filename: the persistent pixel cache filename.
%
% o attach: A value other than zero initializes the persistent pixel cache.
%
% o initialize: A value other than zero initializes the persistent pixel
% cache.
%
% o offset: the offset in the persistent cache to store pixels.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport MagickBooleanType PersistPixelCache(Image *image,
const char *filename,const MagickBooleanType attach,MagickOffsetType *offset,
ExceptionInfo *exception)
{
CacheInfo
*restrict cache_info,
*restrict clone_info;
Image
clone_image;
MagickBooleanType
status;
ssize_t
page_size;
assert(image != (Image *) NULL);
assert(image->signature == MagickSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(image->cache != (void *) NULL);
assert(filename != (const char *) NULL);
assert(offset != (MagickOffsetType *) NULL);
page_size=GetMagickPageSize();
cache_info=(CacheInfo *) image->cache;
assert(cache_info->signature == MagickSignature);
if (attach != MagickFalse)
{
/*
Attach existing persistent pixel cache.
*/
if (image->debug != MagickFalse)
(void) LogMagickEvent(CacheEvent,GetMagickModule(),
"attach persistent cache");
(void) CopyMagickString(cache_info->cache_filename,filename,
MaxTextExtent);
cache_info->type=DiskCache;
cache_info->offset=(*offset);
if (OpenPixelCache(image,ReadMode,exception) == MagickFalse)
return(MagickFalse);
*offset+=cache_info->length+page_size-(cache_info->length % page_size);
return(MagickTrue);
}
if ((cache_info->mode != ReadMode) && (cache_info->type != MemoryCache) &&
(cache_info->reference_count == 1))
{
LockSemaphoreInfo(cache_info->semaphore);
if ((cache_info->mode != ReadMode) && (cache_info->type != MemoryCache) &&
(cache_info->reference_count == 1))
{
int
status;
/*
Usurp existing persistent pixel cache.
*/
status=rename_utf8(cache_info->cache_filename,filename);
if (status == 0)
{
(void) CopyMagickString(cache_info->cache_filename,filename,
MaxTextExtent);
*offset+=cache_info->length+page_size-(cache_info->length %
page_size);
UnlockSemaphoreInfo(cache_info->semaphore);
cache_info=(CacheInfo *) ReferencePixelCache(cache_info);
if (image->debug != MagickFalse)
(void) LogMagickEvent(CacheEvent,GetMagickModule(),
"Usurp resident persistent cache");
return(MagickTrue);
}
}
UnlockSemaphoreInfo(cache_info->semaphore);
}
/*
Clone persistent pixel cache.
*/
clone_image=(*image);
clone_info=(CacheInfo *) clone_image.cache;
image->cache=ClonePixelCache(cache_info);
cache_info=(CacheInfo *) ReferencePixelCache(image->cache);
(void) CopyMagickString(cache_info->cache_filename,filename,MaxTextExtent);
cache_info->type=DiskCache;
cache_info->offset=(*offset);
cache_info=(CacheInfo *) image->cache;
status=OpenPixelCache(image,IOMode,exception);
if (status != MagickFalse)
status=ClonePixelCacheRepository(cache_info,clone_info,&image->exception);
*offset+=cache_info->length+page_size-(cache_info->length % page_size);
clone_info=(CacheInfo *) DestroyPixelCache(clone_info);
return(status);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ Q u e u e A u t h e n t i c P i x e l C a c h e N e x u s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% QueueAuthenticPixelCacheNexus() allocates an region to store image pixels as
% defined by the region rectangle and returns a pointer to the region. This
% region is subsequently transferred from the pixel cache with
% SyncAuthenticPixelsCache(). A pointer to the pixels is returned if the
% pixels are transferred, otherwise a NULL is returned.
%
% The format of the QueueAuthenticPixelCacheNexus() method is:
%
% PixelPacket *QueueAuthenticPixelCacheNexus(Image *image,const ssize_t x,
% const ssize_t y,const size_t columns,const size_t rows,
% const MagickBooleanType clone,NexusInfo *nexus_info,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o x,y,columns,rows: These values define the perimeter of a region of
% pixels.
%
% o nexus_info: the cache nexus to set.
%
% o clone: clone the pixel cache.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport PixelPacket *QueueAuthenticPixel(Image *image,const ssize_t x,
const ssize_t y,const size_t columns,const size_t rows,
const MagickBooleanType clone,NexusInfo *nexus_info,
ExceptionInfo *exception)
{
return(QueueAuthenticPixelCacheNexus(image,x,y,columns,rows,clone,nexus_info,
exception));
}
MagickExport PixelPacket *QueueAuthenticPixelCacheNexus(Image *image,
const ssize_t x,const ssize_t y,const size_t columns,const size_t rows,
const MagickBooleanType clone,NexusInfo *nexus_info,ExceptionInfo *exception)
{
CacheInfo
*restrict cache_info;
MagickOffsetType
offset;
MagickSizeType
number_pixels;
PixelPacket
*restrict pixels;
RectangleInfo
region;
/*
Validate pixel cache geometry.
*/
assert(image != (const Image *) NULL);
assert(image->signature == MagickSignature);
assert(image->cache != (Cache) NULL);
cache_info=(CacheInfo *) GetImagePixelCache(image,clone,exception);
if (cache_info == (Cache) NULL)
return((PixelPacket *) NULL);
assert(cache_info->signature == MagickSignature);
if ((cache_info->columns == 0) || (cache_info->rows == 0) || (x < 0) ||
(y < 0) || (x >= (ssize_t) cache_info->columns) ||
(y >= (ssize_t) cache_info->rows))
{
(void) ThrowMagickException(exception,GetMagickModule(),CacheError,
"PixelsAreNotAuthentic","`%s'",image->filename);
return((PixelPacket *) NULL);
}
offset=(MagickOffsetType) y*cache_info->columns+x;
if (offset < 0)
return((PixelPacket *) NULL);
number_pixels=(MagickSizeType) cache_info->columns*cache_info->rows;
offset+=(MagickOffsetType) (rows-1)*cache_info->columns+columns-1;
if ((MagickSizeType) offset >= number_pixels)
return((PixelPacket *) NULL);
/*
Return pixel cache.
*/
region.x=x;
region.y=y;
region.width=columns;
region.height=rows;
pixels=SetPixelCacheNexusPixels(cache_info,WriteMode,®ion,
(image->clip_mask != (Image *) NULL) || (image->mask != (Image *) NULL) ?
MagickTrue : MagickFalse,nexus_info,exception);
return(pixels);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ Q u e u e A u t h e n t i c P i x e l s C a c h e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% QueueAuthenticPixelsCache() allocates an region to store image pixels as
% defined by the region rectangle and returns a pointer to the region. This
% region is subsequently transferred from the pixel cache with
% SyncAuthenticPixelsCache(). A pointer to the pixels is returned if the
% pixels are transferred, otherwise a NULL is returned.
%
% The format of the QueueAuthenticPixelsCache() method is:
%
% PixelPacket *QueueAuthenticPixelsCache(Image *image,const ssize_t x,
% const ssize_t y,const size_t columns,const size_t rows,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o x,y,columns,rows: These values define the perimeter of a region of
% pixels.
%
% o exception: return any errors or warnings in this structure.
%
*/
static PixelPacket *QueueAuthenticPixelsCache(Image *image,const ssize_t x,
const ssize_t y,const size_t columns,const size_t rows,
ExceptionInfo *exception)
{
CacheInfo
*restrict cache_info;
const int
id = GetOpenMPThreadId();
assert(image != (const Image *) NULL);
assert(image->signature == MagickSignature);
assert(image->cache != (Cache) NULL);
cache_info=(CacheInfo *) image->cache;
assert(cache_info->signature == MagickSignature);
assert(id < (int) cache_info->number_threads);
return(QueueAuthenticPixelCacheNexus(image,x,y,columns,rows,MagickFalse,
cache_info->nexus_info[id],exception));
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% Q u e u e A u t h e n t i c P i x e l s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% QueueAuthenticPixels() queues a mutable pixel region. If the region is
% successfully initialized a pointer to a PixelPacket array representing the
% region is returned, otherwise NULL is returned. The returned pointer may
% point to a temporary working buffer for the pixels or it may point to the
% final location of the pixels in memory.
%
% Write-only access means that any existing pixel values corresponding to
% the region are ignored. This is useful if the initial image is being
% created from scratch, or if the existing pixel values are to be
% completely replaced without need to refer to their pre-existing values.
% The application is free to read and write the pixel buffer returned by
% QueueAuthenticPixels() any way it pleases. QueueAuthenticPixels() does not
% initialize the pixel array values. Initializing pixel array values is the
% application's responsibility.
%
% Performance is maximized if the selected region is part of one row, or
% one or more full rows, since then there is opportunity to access the
% pixels in-place (without a copy) if the image is in memory, or in a
% memory-mapped file. The returned pointer must *never* be deallocated
% by the user.
%
% Pixels accessed via the returned pointer represent a simple array of type
% PixelPacket. If the image type is CMYK or the storage class is PseudoClass,
% call GetAuthenticIndexQueue() after invoking GetAuthenticPixels() to obtain
% the black color component or the colormap indexes (of type IndexPacket)
% corresponding to the region. Once the PixelPacket (and/or IndexPacket)
% array has been updated, the changes must be saved back to the underlying
% image using SyncAuthenticPixels() or they may be lost.
%
% The format of the QueueAuthenticPixels() method is:
%
% PixelPacket *QueueAuthenticPixels(Image *image,const ssize_t x,
% const ssize_t y,const size_t columns,const size_t rows,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o x,y,columns,rows: These values define the perimeter of a region of
% pixels.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport PixelPacket *QueueAuthenticPixels(Image *image,const ssize_t x,
const ssize_t y,const size_t columns,const size_t rows,
ExceptionInfo *exception)
{
CacheInfo
*restrict cache_info;
const int
id = GetOpenMPThreadId();
assert(image != (Image *) NULL);
assert(image->signature == MagickSignature);
assert(image->cache != (Cache) NULL);
cache_info=(CacheInfo *) image->cache;
assert(cache_info->signature == MagickSignature);
if (cache_info->methods.queue_authentic_pixels_handler !=
(QueueAuthenticPixelsHandler) NULL)
return(cache_info->methods.queue_authentic_pixels_handler(image,x,y,columns,
rows,exception));
assert(id < (int) cache_info->number_threads);
return(QueueAuthenticPixelCacheNexus(image,x,y,columns,rows,MagickFalse,
cache_info->nexus_info[id],exception));
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ R e a d P i x e l C a c h e I n d e x e s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% ReadPixelCacheIndexes() reads colormap indexes from the specified region of
% the pixel cache.
%
% The format of the ReadPixelCacheIndexes() method is:
%
% MagickBooleanType ReadPixelCacheIndexes(CacheInfo *cache_info,
% NexusInfo *nexus_info,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o cache_info: the pixel cache.
%
% o nexus_info: the cache nexus to read the colormap indexes.
%
% o exception: return any errors or warnings in this structure.
%
*/
static inline MagickOffsetType ReadPixelCacheRegion(
const CacheInfo *restrict cache_info,const MagickOffsetType offset,
const MagickSizeType length,unsigned char *restrict buffer)
{
register MagickOffsetType
i;
ssize_t
count;
#if !defined(MAGICKCORE_HAVE_PREAD)
if (lseek(cache_info->file,offset,SEEK_SET) < 0)
return((MagickOffsetType) -1);
#endif
count=0;
for (i=0; i < (MagickOffsetType) length; i+=count)
{
#if !defined(MAGICKCORE_HAVE_PREAD)
count=read(cache_info->file,buffer+i,(size_t) MagickMin(length-i,
(MagickSizeType) SSIZE_MAX));
#else
count=pread(cache_info->file,buffer+i,(size_t) MagickMin(length-i,
(MagickSizeType) SSIZE_MAX),(off_t) (offset+i));
#endif
if (count <= 0)
{
count=0;
if (errno != EINTR)
break;
}
}
return(i);
}
static MagickBooleanType ReadPixelCacheIndexes(CacheInfo *restrict cache_info,
NexusInfo *restrict nexus_info,ExceptionInfo *exception)
{
MagickOffsetType
count,
offset;
MagickSizeType
extent,
length;
register IndexPacket
*restrict q;
register ssize_t
y;
size_t
rows;
if (cache_info->active_index_channel == MagickFalse)
return(MagickFalse);
if (nexus_info->authentic_pixel_cache != MagickFalse)
return(MagickTrue);
offset=(MagickOffsetType) nexus_info->region.y*cache_info->columns+
nexus_info->region.x;
length=(MagickSizeType) nexus_info->region.width*sizeof(IndexPacket);
rows=nexus_info->region.height;
extent=length*rows;
q=nexus_info->indexes;
y=0;
switch (cache_info->type)
{
case MemoryCache:
case MapCache:
{
register IndexPacket
*restrict p;
/*
Read indexes from memory.
*/
if ((cache_info->columns == nexus_info->region.width) &&
(extent == (MagickSizeType) ((size_t) extent)))
{
length=extent;
rows=1UL;
}
p=cache_info->indexes+offset;
for (y=0; y < (ssize_t) rows; y++)
{
(void) memcpy(q,p,(size_t) length);
p+=cache_info->columns;
q+=nexus_info->region.width;
}
break;
}
case DiskCache:
{
/*
Read indexes from disk.
*/
LockSemaphoreInfo(cache_info->file_semaphore);
if (OpenPixelCacheOnDisk(cache_info,IOMode) == MagickFalse)
{
ThrowFileException(exception,FileOpenError,"UnableToOpenFile",
cache_info->cache_filename);
UnlockSemaphoreInfo(cache_info->file_semaphore);
return(MagickFalse);
}
if ((cache_info->columns == nexus_info->region.width) &&
(extent <= MagickMaxBufferExtent))
{
length=extent;
rows=1UL;
}
extent=(MagickSizeType) cache_info->columns*cache_info->rows;
for (y=0; y < (ssize_t) rows; y++)
{
count=ReadPixelCacheRegion(cache_info,cache_info->offset+extent*
sizeof(PixelPacket)+offset*sizeof(*q),length,(unsigned char *) q);
if ((MagickSizeType) count < length)
break;
offset+=cache_info->columns;
q+=nexus_info->region.width;
}
if (IsFileDescriptorLimitExceeded() != MagickFalse)
(void) ClosePixelCacheOnDisk(cache_info);
UnlockSemaphoreInfo(cache_info->file_semaphore);
break;
}
case DistributedCache:
{
RectangleInfo
region;
/*
Read indexes from distributed cache.
*/
LockSemaphoreInfo(cache_info->file_semaphore);
region=nexus_info->region;
if ((cache_info->columns != nexus_info->region.width) ||
(extent > MagickMaxBufferExtent))
region.height=1UL;
else
{
length=extent;
rows=1UL;
}
for (y=0; y < (ssize_t) rows; y++)
{
count=ReadDistributePixelCacheIndexes((DistributeCacheInfo *)
cache_info->server_info,®ion,length,(unsigned char *) q);
if (count != (MagickOffsetType) length)
break;
q+=nexus_info->region.width;
region.y++;
}
UnlockSemaphoreInfo(cache_info->file_semaphore);
break;
}
default:
break;
}
if (y < (ssize_t) rows)
{
ThrowFileException(exception,CacheError,"UnableToReadPixelCache",
cache_info->cache_filename);
return(MagickFalse);
}
if ((cache_info->debug != MagickFalse) &&
(CacheTick(nexus_info->region.y,cache_info->rows) != MagickFalse))
(void) LogMagickEvent(CacheEvent,GetMagickModule(),
"%s[%.20gx%.20g%+.20g%+.20g]",cache_info->filename,(double)
nexus_info->region.width,(double) nexus_info->region.height,(double)
nexus_info->region.x,(double) nexus_info->region.y);
return(MagickTrue);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ R e a d P i x e l C a c h e P i x e l s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% ReadPixelCachePixels() reads pixels from the specified region of the pixel
% cache.
%
% The format of the ReadPixelCachePixels() method is:
%
% MagickBooleanType ReadPixelCachePixels(CacheInfo *cache_info,
% NexusInfo *nexus_info,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o cache_info: the pixel cache.
%
% o nexus_info: the cache nexus to read the pixels.
%
% o exception: return any errors or warnings in this structure.
%
*/
static MagickBooleanType ReadPixelCachePixels(CacheInfo *restrict cache_info,
NexusInfo *restrict nexus_info,ExceptionInfo *exception)
{
MagickOffsetType
count,
offset;
MagickSizeType
extent,
length;
register PixelPacket
*restrict q;
register ssize_t
y;
size_t
rows;
if (nexus_info->authentic_pixel_cache != MagickFalse)
return(MagickTrue);
offset=(MagickOffsetType) nexus_info->region.y*cache_info->columns+
nexus_info->region.x;
length=(MagickSizeType) nexus_info->region.width*sizeof(PixelPacket);
rows=nexus_info->region.height;
extent=length*rows;
q=nexus_info->pixels;
y=0;
switch (cache_info->type)
{
case MemoryCache:
case MapCache:
{
register PixelPacket
*restrict p;
/*
Read pixels from memory.
*/
if ((cache_info->columns == nexus_info->region.width) &&
(extent == (MagickSizeType) ((size_t) extent)))
{
length=extent;
rows=1UL;
}
p=cache_info->pixels+offset;
for (y=0; y < (ssize_t) rows; y++)
{
(void) memcpy(q,p,(size_t) length);
p+=cache_info->columns;
q+=nexus_info->region.width;
}
break;
}
case DiskCache:
{
/*
Read pixels from disk.
*/
LockSemaphoreInfo(cache_info->file_semaphore);
if (OpenPixelCacheOnDisk(cache_info,IOMode) == MagickFalse)
{
ThrowFileException(exception,FileOpenError,"UnableToOpenFile",
cache_info->cache_filename);
UnlockSemaphoreInfo(cache_info->file_semaphore);
return(MagickFalse);
}
if ((cache_info->columns == nexus_info->region.width) &&
(extent <= MagickMaxBufferExtent))
{
length=extent;
rows=1UL;
}
for (y=0; y < (ssize_t) rows; y++)
{
count=ReadPixelCacheRegion(cache_info,cache_info->offset+offset*
sizeof(*q),length,(unsigned char *) q);
if ((MagickSizeType) count < length)
break;
offset+=cache_info->columns;
q+=nexus_info->region.width;
}
if (IsFileDescriptorLimitExceeded() != MagickFalse)
(void) ClosePixelCacheOnDisk(cache_info);
UnlockSemaphoreInfo(cache_info->file_semaphore);
break;
}
case DistributedCache:
{
RectangleInfo
region;
/*
Read pixels from distributed cache.
*/
LockSemaphoreInfo(cache_info->file_semaphore);
region=nexus_info->region;
if ((cache_info->columns != nexus_info->region.width) ||
(extent > MagickMaxBufferExtent))
region.height=1UL;
else
{
length=extent;
rows=1UL;
}
for (y=0; y < (ssize_t) rows; y++)
{
count=ReadDistributePixelCachePixels((DistributeCacheInfo *)
cache_info->server_info,®ion,length,(unsigned char *) q);
if (count != (MagickOffsetType) length)
break;
q+=nexus_info->region.width;
region.y++;
}
UnlockSemaphoreInfo(cache_info->file_semaphore);
break;
}
default:
break;
}
if (y < (ssize_t) rows)
{
ThrowFileException(exception,CacheError,"UnableToReadPixelCache",
cache_info->cache_filename);
return(MagickFalse);
}
if ((cache_info->debug != MagickFalse) &&
(CacheTick(nexus_info->region.y,cache_info->rows) != MagickFalse))
(void) LogMagickEvent(CacheEvent,GetMagickModule(),
"%s[%.20gx%.20g%+.20g%+.20g]",cache_info->filename,(double)
nexus_info->region.width,(double) nexus_info->region.height,(double)
nexus_info->region.x,(double) nexus_info->region.y);
return(MagickTrue);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ R e f e r e n c e P i x e l C a c h e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% ReferencePixelCache() increments the reference count associated with the
% pixel cache returning a pointer to the cache.
%
% The format of the ReferencePixelCache method is:
%
% Cache ReferencePixelCache(Cache cache_info)
%
% A description of each parameter follows:
%
% o cache_info: the pixel cache.
%
*/
MagickExport Cache ReferencePixelCache(Cache cache)
{
CacheInfo
*restrict cache_info;
assert(cache != (Cache *) NULL);
cache_info=(CacheInfo *) cache;
assert(cache_info->signature == MagickSignature);
LockSemaphoreInfo(cache_info->semaphore);
cache_info->reference_count++;
UnlockSemaphoreInfo(cache_info->semaphore);
return(cache_info);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ S e t P i x e l C a c h e M e t h o d s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% SetPixelCacheMethods() sets the image pixel methods to the specified ones.
%
% The format of the SetPixelCacheMethods() method is:
%
% SetPixelCacheMethods(Cache *,CacheMethods *cache_methods)
%
% A description of each parameter follows:
%
% o cache: the pixel cache.
%
% o cache_methods: Specifies a pointer to a CacheMethods structure.
%
*/
MagickExport void SetPixelCacheMethods(Cache cache,CacheMethods *cache_methods)
{
CacheInfo
*restrict cache_info;
GetOneAuthenticPixelFromHandler
get_one_authentic_pixel_from_handler;
GetOneVirtualPixelFromHandler
get_one_virtual_pixel_from_handler;
/*
Set cache pixel methods.
*/
assert(cache != (Cache) NULL);
assert(cache_methods != (CacheMethods *) NULL);
cache_info=(CacheInfo *) cache;
assert(cache_info->signature == MagickSignature);
if (cache_info->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",
cache_info->filename);
if (cache_methods->get_virtual_pixel_handler != (GetVirtualPixelHandler) NULL)
cache_info->methods.get_virtual_pixel_handler=
cache_methods->get_virtual_pixel_handler;
if (cache_methods->destroy_pixel_handler != (DestroyPixelHandler) NULL)
cache_info->methods.destroy_pixel_handler=
cache_methods->destroy_pixel_handler;
if (cache_methods->get_virtual_indexes_from_handler !=
(GetVirtualIndexesFromHandler) NULL)
cache_info->methods.get_virtual_indexes_from_handler=
cache_methods->get_virtual_indexes_from_handler;
if (cache_methods->get_authentic_pixels_handler !=
(GetAuthenticPixelsHandler) NULL)
cache_info->methods.get_authentic_pixels_handler=
cache_methods->get_authentic_pixels_handler;
if (cache_methods->queue_authentic_pixels_handler !=
(QueueAuthenticPixelsHandler) NULL)
cache_info->methods.queue_authentic_pixels_handler=
cache_methods->queue_authentic_pixels_handler;
if (cache_methods->sync_authentic_pixels_handler !=
(SyncAuthenticPixelsHandler) NULL)
cache_info->methods.sync_authentic_pixels_handler=
cache_methods->sync_authentic_pixels_handler;
if (cache_methods->get_authentic_pixels_from_handler !=
(GetAuthenticPixelsFromHandler) NULL)
cache_info->methods.get_authentic_pixels_from_handler=
cache_methods->get_authentic_pixels_from_handler;
if (cache_methods->get_authentic_indexes_from_handler !=
(GetAuthenticIndexesFromHandler) NULL)
cache_info->methods.get_authentic_indexes_from_handler=
cache_methods->get_authentic_indexes_from_handler;
get_one_virtual_pixel_from_handler=
cache_info->methods.get_one_virtual_pixel_from_handler;
if (get_one_virtual_pixel_from_handler !=
(GetOneVirtualPixelFromHandler) NULL)
cache_info->methods.get_one_virtual_pixel_from_handler=
cache_methods->get_one_virtual_pixel_from_handler;
get_one_authentic_pixel_from_handler=
cache_methods->get_one_authentic_pixel_from_handler;
if (get_one_authentic_pixel_from_handler !=
(GetOneAuthenticPixelFromHandler) NULL)
cache_info->methods.get_one_authentic_pixel_from_handler=
cache_methods->get_one_authentic_pixel_from_handler;
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ S e t P i x e l C a c h e N e x u s P i x e l s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% SetPixelCacheNexusPixels() defines the region of the cache for the
% specified cache nexus.
%
% The format of the SetPixelCacheNexusPixels() method is:
%
% PixelPacket SetPixelCacheNexusPixels(const CacheInfo *cache_info,
% const MapMode mode,const RectangleInfo *region,
% const MagickBooleanType buffered,NexusInfo *nexus_info,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o cache_info: the pixel cache.
%
% o mode: ReadMode, WriteMode, or IOMode.
%
% o region: A pointer to the RectangleInfo structure that defines the
% region of this particular cache nexus.
%
% o buffered: pixels are buffered.
%
% o nexus_info: the cache nexus to set.
%
% o exception: return any errors or warnings in this structure.
%
*/
static inline MagickBooleanType AcquireCacheNexusPixels(
const CacheInfo *restrict cache_info,NexusInfo *nexus_info,
ExceptionInfo *exception)
{
if (nexus_info->length != (MagickSizeType) ((size_t) nexus_info->length))
return(MagickFalse);
nexus_info->mapped=MagickFalse;
nexus_info->cache=(PixelPacket *) MagickAssumeAligned(AcquireAlignedMemory(1,
(size_t) nexus_info->length));
if (nexus_info->cache == (PixelPacket *) NULL)
{
nexus_info->mapped=MagickTrue;
nexus_info->cache=(PixelPacket *) MapBlob(-1,IOMode,0,(size_t)
nexus_info->length);
}
if (nexus_info->cache == (PixelPacket *) NULL)
{
(void) ThrowMagickException(exception,GetMagickModule(),
ResourceLimitError,"MemoryAllocationFailed","`%s'",
cache_info->filename);
return(MagickFalse);
}
return(MagickTrue);
}
static inline MagickBooleanType IsAuthenticPixelCache(
const CacheInfo *restrict cache_info,const NexusInfo *restrict nexus_info)
{
MagickBooleanType
status;
MagickOffsetType
offset;
/*
Does nexus pixels point directly to in-core cache pixels or is it buffered?
*/
if (cache_info->type == PingCache)
return(MagickTrue);
offset=(MagickOffsetType) nexus_info->region.y*cache_info->columns+
nexus_info->region.x;
status=nexus_info->pixels == (cache_info->pixels+offset) ? MagickTrue :
MagickFalse;
return(status);
}
static inline void PrefetchPixelCacheNexusPixels(const NexusInfo *nexus_info,
const MapMode mode)
{
magick_unreferenced(nexus_info);
magick_unreferenced(mode);
if (mode == ReadMode)
{
MagickCachePrefetch((unsigned char *) nexus_info->pixels,0,1);
return;
}
MagickCachePrefetch((unsigned char *) nexus_info->pixels,1,1);
}
static PixelPacket *SetPixelCacheNexusPixels(const CacheInfo *cache_info,
const MapMode mode,const RectangleInfo *region,
const MagickBooleanType buffered,NexusInfo *nexus_info,
ExceptionInfo *exception)
{
MagickBooleanType
status;
MagickSizeType
length,
number_pixels;
assert(cache_info != (const CacheInfo *) NULL);
assert(cache_info->signature == MagickSignature);
if (cache_info->type == UndefinedCache)
return((PixelPacket *) NULL);
nexus_info->region=(*region);
if (((cache_info->type == MemoryCache) || (cache_info->type == MapCache)) &&
(buffered == MagickFalse))
{
ssize_t
x,
y;
x=nexus_info->region.x+(ssize_t) nexus_info->region.width-1;
y=nexus_info->region.y+(ssize_t) nexus_info->region.height-1;
if (((nexus_info->region.x >= 0) && (x < (ssize_t) cache_info->columns) &&
(nexus_info->region.y >= 0) && (y < (ssize_t) cache_info->rows)) &&
((nexus_info->region.height == 1UL) || ((nexus_info->region.x == 0) &&
((nexus_info->region.width == cache_info->columns) ||
((nexus_info->region.width % cache_info->columns) == 0)))))
{
MagickOffsetType
offset;
/*
Pixels are accessed directly from memory.
*/
offset=(MagickOffsetType) nexus_info->region.y*cache_info->columns+
nexus_info->region.x;
nexus_info->pixels=cache_info->pixels+offset;
nexus_info->indexes=(IndexPacket *) NULL;
if (cache_info->active_index_channel != MagickFalse)
nexus_info->indexes=cache_info->indexes+offset;
PrefetchPixelCacheNexusPixels(nexus_info,mode);
nexus_info->authentic_pixel_cache=IsAuthenticPixelCache(cache_info,
nexus_info);
return(nexus_info->pixels);
}
}
/*
Pixels are stored in a staging region until they are synced to the cache.
*/
number_pixels=(MagickSizeType) nexus_info->region.width*
nexus_info->region.height;
length=number_pixels*sizeof(PixelPacket);
if (cache_info->active_index_channel != MagickFalse)
length+=number_pixels*sizeof(IndexPacket);
if (nexus_info->cache == (PixelPacket *) NULL)
{
nexus_info->length=length;
status=AcquireCacheNexusPixels(cache_info,nexus_info,exception);
if (status == MagickFalse)
{
nexus_info->length=0;
return((PixelPacket *) NULL);
}
}
else
if (nexus_info->length < length)
{
RelinquishCacheNexusPixels(nexus_info);
nexus_info->length=length;
status=AcquireCacheNexusPixels(cache_info,nexus_info,exception);
if (status == MagickFalse)
{
nexus_info->length=0;
return((PixelPacket *) NULL);
}
}
nexus_info->pixels=nexus_info->cache;
nexus_info->indexes=(IndexPacket *) NULL;
if (cache_info->active_index_channel != MagickFalse)
nexus_info->indexes=(IndexPacket *) (nexus_info->pixels+number_pixels);
PrefetchPixelCacheNexusPixels(nexus_info,mode);
nexus_info->authentic_pixel_cache=IsAuthenticPixelCache(cache_info,
nexus_info);
return(nexus_info->pixels);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% S e t P i x e l C a c h e V i r t u a l M e t h o d %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% SetPixelCacheVirtualMethod() sets the "virtual pixels" method for the
% pixel cache and returns the previous setting. A virtual pixel is any pixel
% access that is outside the boundaries of the image cache.
%
% The format of the SetPixelCacheVirtualMethod() method is:
%
% VirtualPixelMethod SetPixelCacheVirtualMethod(const Image *image,
% const VirtualPixelMethod virtual_pixel_method)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o virtual_pixel_method: choose the type of virtual pixel.
%
*/
static MagickBooleanType SetCacheAlphaChannel(Image *image,
const Quantum opacity)
{
CacheInfo
*restrict cache_info;
CacheView
*restrict image_view;
MagickBooleanType
status;
ssize_t
y;
assert(image != (Image *) NULL);
assert(image->signature == MagickSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(image->cache != (Cache) NULL);
cache_info=(CacheInfo *) image->cache;
assert(cache_info->signature == MagickSignature);
image->matte=MagickTrue;
status=MagickTrue;
image_view=AcquireVirtualCacheView(image,&image->exception); /* must be virtual */
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static,4) shared(status) \
magick_threads(image,image,1,1)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
register PixelPacket
*restrict q;
register ssize_t
x;
if (status == MagickFalse)
continue;
q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,
&image->exception);
if (q == (PixelPacket *) NULL)
{
status=MagickFalse;
continue;
}
for (x=0; x < (ssize_t) image->columns; x++)
{
q->opacity=opacity;
q++;
}
status=SyncCacheViewAuthenticPixels(image_view,&image->exception);
}
image_view=DestroyCacheView(image_view);
return(status);
}
MagickExport VirtualPixelMethod SetPixelCacheVirtualMethod(const Image *image,
const VirtualPixelMethod virtual_pixel_method)
{
CacheInfo
*restrict cache_info;
VirtualPixelMethod
method;
assert(image != (Image *) NULL);
assert(image->signature == MagickSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(image->cache != (Cache) NULL);
cache_info=(CacheInfo *) image->cache;
assert(cache_info->signature == MagickSignature);
method=cache_info->virtual_pixel_method;
cache_info->virtual_pixel_method=virtual_pixel_method;
if ((image->columns != 0) && (image->rows != 0))
switch (virtual_pixel_method)
{
case BackgroundVirtualPixelMethod:
{
if ((image->background_color.opacity != OpaqueOpacity) &&
(image->matte == MagickFalse))
(void) SetCacheAlphaChannel((Image *) image,OpaqueOpacity);
if ((IsPixelGray(&image->background_color) == MagickFalse) &&
(IsGrayColorspace(image->colorspace) != MagickFalse))
(void) SetImageColorspace((Image *) image,sRGBColorspace);
break;
}
case TransparentVirtualPixelMethod:
{
if (image->matte == MagickFalse)
(void) SetCacheAlphaChannel((Image *) image,OpaqueOpacity);
break;
}
default:
break;
}
return(method);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ S y n c A u t h e n t i c P i x e l C a c h e N e x u s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% SyncAuthenticPixelCacheNexus() saves the authentic image pixels to the
% in-memory or disk cache. The method returns MagickTrue if the pixel region
% is synced, otherwise MagickFalse.
%
% The format of the SyncAuthenticPixelCacheNexus() method is:
%
% MagickBooleanType SyncAuthenticPixelCacheNexus(Image *image,
% NexusInfo *nexus_info,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o nexus_info: the cache nexus to sync.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport MagickBooleanType SyncAuthenticPixelCacheNexus(Image *image,
NexusInfo *restrict nexus_info,ExceptionInfo *exception)
{
CacheInfo
*restrict cache_info;
MagickBooleanType
status;
/*
Transfer pixels to the cache.
*/
assert(image != (Image *) NULL);
assert(image->signature == MagickSignature);
if (image->cache == (Cache) NULL)
ThrowBinaryException(CacheError,"PixelCacheIsNotOpen",image->filename);
cache_info=(CacheInfo *) image->cache;
assert(cache_info->signature == MagickSignature);
if (cache_info->type == UndefinedCache)
return(MagickFalse);
if ((image->storage_class == DirectClass) &&
(image->clip_mask != (Image *) NULL) &&
(ClipPixelCacheNexus(image,nexus_info,exception) == MagickFalse))
return(MagickFalse);
if ((image->storage_class == DirectClass) &&
(image->mask != (Image *) NULL) &&
(MaskPixelCacheNexus(image,nexus_info,exception) == MagickFalse))
return(MagickFalse);
if (nexus_info->authentic_pixel_cache != MagickFalse)
{
image->taint=MagickTrue;
return(MagickTrue);
}
assert(cache_info->signature == MagickSignature);
status=WritePixelCachePixels(cache_info,nexus_info,exception);
if ((cache_info->active_index_channel != MagickFalse) &&
(WritePixelCacheIndexes(cache_info,nexus_info,exception) == MagickFalse))
return(MagickFalse);
if (status != MagickFalse)
image->taint=MagickTrue;
return(status);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ S y n c A u t h e n t i c P i x e l C a c h e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% SyncAuthenticPixelsCache() saves the authentic image pixels to the in-memory
% or disk cache. The method returns MagickTrue if the pixel region is synced,
% otherwise MagickFalse.
%
% The format of the SyncAuthenticPixelsCache() method is:
%
% MagickBooleanType SyncAuthenticPixelsCache(Image *image,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o exception: return any errors or warnings in this structure.
%
*/
static MagickBooleanType SyncAuthenticPixelsCache(Image *image,
ExceptionInfo *exception)
{
CacheInfo
*restrict cache_info;
const int
id = GetOpenMPThreadId();
MagickBooleanType
status;
assert(image != (Image *) NULL);
assert(image->signature == MagickSignature);
assert(image->cache != (Cache) NULL);
cache_info=(CacheInfo *) image->cache;
assert(cache_info->signature == MagickSignature);
assert(id < (int) cache_info->number_threads);
status=SyncAuthenticPixelCacheNexus(image,cache_info->nexus_info[id],
exception);
return(status);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% S y n c A u t h e n t i c P i x e l s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% SyncAuthenticPixels() saves the image pixels to the in-memory or disk cache.
% The method returns MagickTrue if the pixel region is flushed, otherwise
% MagickFalse.
%
% The format of the SyncAuthenticPixels() method is:
%
% MagickBooleanType SyncAuthenticPixels(Image *image,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport MagickBooleanType SyncAuthenticPixels(Image *image,
ExceptionInfo *exception)
{
CacheInfo
*restrict cache_info;
const int
id = GetOpenMPThreadId();
MagickBooleanType
status;
assert(image != (Image *) NULL);
assert(image->signature == MagickSignature);
assert(image->cache != (Cache) NULL);
cache_info=(CacheInfo *) image->cache;
assert(cache_info->signature == MagickSignature);
if (cache_info->methods.sync_authentic_pixels_handler !=
(SyncAuthenticPixelsHandler) NULL)
return(cache_info->methods.sync_authentic_pixels_handler(image,exception));
assert(id < (int) cache_info->number_threads);
status=SyncAuthenticPixelCacheNexus(image,cache_info->nexus_info[id],
exception);
return(status);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ S y n c I m a g e P i x e l C a c h e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% SyncImagePixelCache() saves the image pixels to the in-memory or disk cache.
% The method returns MagickTrue if the pixel region is flushed, otherwise
% MagickFalse.
%
% The format of the SyncImagePixelCache() method is:
%
% MagickBooleanType SyncImagePixelCache(Image *image,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickPrivate MagickBooleanType SyncImagePixelCache(Image *image,
ExceptionInfo *exception)
{
CacheInfo
*restrict cache_info;
assert(image != (Image *) NULL);
assert(exception != (ExceptionInfo *) NULL);
cache_info=(CacheInfo *) GetImagePixelCache(image,MagickTrue,exception);
return(cache_info == (CacheInfo *) NULL ? MagickFalse : MagickTrue);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ W r i t e P i x e l C a c h e I n d e x e s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% WritePixelCacheIndexes() writes the colormap indexes to the specified
% region of the pixel cache.
%
% The format of the WritePixelCacheIndexes() method is:
%
% MagickBooleanType WritePixelCacheIndexes(CacheInfo *cache_info,
% NexusInfo *nexus_info,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o cache_info: the pixel cache.
%
% o nexus_info: the cache nexus to write the colormap indexes.
%
% o exception: return any errors or warnings in this structure.
%
*/
static MagickBooleanType WritePixelCacheIndexes(CacheInfo *cache_info,
NexusInfo *restrict nexus_info,ExceptionInfo *exception)
{
MagickOffsetType
count,
offset;
MagickSizeType
extent,
length;
register const IndexPacket
*restrict p;
register ssize_t
y;
size_t
rows;
if (cache_info->active_index_channel == MagickFalse)
return(MagickFalse);
if (nexus_info->authentic_pixel_cache != MagickFalse)
return(MagickTrue);
offset=(MagickOffsetType) nexus_info->region.y*cache_info->columns+
nexus_info->region.x;
length=(MagickSizeType) nexus_info->region.width*sizeof(IndexPacket);
rows=nexus_info->region.height;
extent=(MagickSizeType) length*rows;
p=nexus_info->indexes;
y=0;
switch (cache_info->type)
{
case MemoryCache:
case MapCache:
{
register IndexPacket
*restrict q;
/*
Write indexes to memory.
*/
if ((cache_info->columns == nexus_info->region.width) &&
(extent == (MagickSizeType) ((size_t) extent)))
{
length=extent;
rows=1UL;
}
q=cache_info->indexes+offset;
for (y=0; y < (ssize_t) rows; y++)
{
(void) memcpy(q,p,(size_t) length);
p+=nexus_info->region.width;
q+=cache_info->columns;
}
break;
}
case DiskCache:
{
/*
Write indexes to disk.
*/
LockSemaphoreInfo(cache_info->file_semaphore);
if (OpenPixelCacheOnDisk(cache_info,IOMode) == MagickFalse)
{
ThrowFileException(exception,FileOpenError,"UnableToOpenFile",
cache_info->cache_filename);
UnlockSemaphoreInfo(cache_info->file_semaphore);
return(MagickFalse);
}
if ((cache_info->columns == nexus_info->region.width) &&
(extent <= MagickMaxBufferExtent))
{
length=extent;
rows=1UL;
}
extent=(MagickSizeType) cache_info->columns*cache_info->rows;
for (y=0; y < (ssize_t) rows; y++)
{
count=WritePixelCacheRegion(cache_info,cache_info->offset+extent*
sizeof(PixelPacket)+offset*sizeof(*p),length,(const unsigned char *)
p);
if ((MagickSizeType) count < length)
break;
p+=nexus_info->region.width;
offset+=cache_info->columns;
}
if (IsFileDescriptorLimitExceeded() != MagickFalse)
(void) ClosePixelCacheOnDisk(cache_info);
UnlockSemaphoreInfo(cache_info->file_semaphore);
break;
}
case DistributedCache:
{
RectangleInfo
region;
/*
Write indexes to distributed cache.
*/
LockSemaphoreInfo(cache_info->file_semaphore);
region=nexus_info->region;
if ((cache_info->columns != nexus_info->region.width) ||
(extent > MagickMaxBufferExtent))
region.height=1UL;
else
{
length=extent;
rows=1UL;
}
for (y=0; y < (ssize_t) rows; y++)
{
count=WriteDistributePixelCacheIndexes((DistributeCacheInfo *)
cache_info->server_info,®ion,length,(const unsigned char *) p);
if (count != (MagickOffsetType) length)
break;
p+=nexus_info->region.width;
region.y++;
}
UnlockSemaphoreInfo(cache_info->file_semaphore);
break;
}
default:
break;
}
if (y < (ssize_t) rows)
{
ThrowFileException(exception,CacheError,"UnableToWritePixelCache",
cache_info->cache_filename);
return(MagickFalse);
}
if ((cache_info->debug != MagickFalse) &&
(CacheTick(nexus_info->region.y,cache_info->rows) != MagickFalse))
(void) LogMagickEvent(CacheEvent,GetMagickModule(),
"%s[%.20gx%.20g%+.20g%+.20g]",cache_info->filename,(double)
nexus_info->region.width,(double) nexus_info->region.height,(double)
nexus_info->region.x,(double) nexus_info->region.y);
return(MagickTrue);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ W r i t e C a c h e P i x e l s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% WritePixelCachePixels() writes image pixels to the specified region of the
% pixel cache.
%
% The format of the WritePixelCachePixels() method is:
%
% MagickBooleanType WritePixelCachePixels(CacheInfo *cache_info,
% NexusInfo *nexus_info,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o cache_info: the pixel cache.
%
% o nexus_info: the cache nexus to write the pixels.
%
% o exception: return any errors or warnings in this structure.
%
*/
static MagickBooleanType WritePixelCachePixels(CacheInfo *cache_info,
NexusInfo *restrict nexus_info,ExceptionInfo *exception)
{
MagickOffsetType
count,
offset;
MagickSizeType
extent,
length;
register const PixelPacket
*restrict p;
register ssize_t
y;
size_t
rows;
if (nexus_info->authentic_pixel_cache != MagickFalse)
return(MagickTrue);
offset=(MagickOffsetType) nexus_info->region.y*cache_info->columns+
nexus_info->region.x;
length=(MagickSizeType) nexus_info->region.width*sizeof(PixelPacket);
rows=nexus_info->region.height;
extent=length*rows;
p=nexus_info->pixels;
y=0;
switch (cache_info->type)
{
case MemoryCache:
case MapCache:
{
register PixelPacket
*restrict q;
/*
Write pixels to memory.
*/
if ((cache_info->columns == nexus_info->region.width) &&
(extent == (MagickSizeType) ((size_t) extent)))
{
length=extent;
rows=1UL;
}
q=cache_info->pixels+offset;
for (y=0; y < (ssize_t) rows; y++)
{
(void) memcpy(q,p,(size_t) length);
p+=nexus_info->region.width;
q+=cache_info->columns;
}
break;
}
case DiskCache:
{
/*
Write pixels to disk.
*/
LockSemaphoreInfo(cache_info->file_semaphore);
if (OpenPixelCacheOnDisk(cache_info,IOMode) == MagickFalse)
{
ThrowFileException(exception,FileOpenError,"UnableToOpenFile",
cache_info->cache_filename);
UnlockSemaphoreInfo(cache_info->file_semaphore);
return(MagickFalse);
}
if ((cache_info->columns == nexus_info->region.width) &&
(extent <= MagickMaxBufferExtent))
{
length=extent;
rows=1UL;
}
for (y=0; y < (ssize_t) rows; y++)
{
count=WritePixelCacheRegion(cache_info,cache_info->offset+offset*
sizeof(*p),length,(const unsigned char *) p);
if ((MagickSizeType) count < length)
break;
p+=nexus_info->region.width;
offset+=cache_info->columns;
}
if (IsFileDescriptorLimitExceeded() != MagickFalse)
(void) ClosePixelCacheOnDisk(cache_info);
UnlockSemaphoreInfo(cache_info->file_semaphore);
break;
}
case DistributedCache:
{
RectangleInfo
region;
/*
Write pixels to distributed cache.
*/
LockSemaphoreInfo(cache_info->file_semaphore);
region=nexus_info->region;
if ((cache_info->columns != nexus_info->region.width) ||
(extent > MagickMaxBufferExtent))
region.height=1UL;
else
{
length=extent;
rows=1UL;
}
for (y=0; y < (ssize_t) rows; y++)
{
count=WriteDistributePixelCachePixels((DistributeCacheInfo *)
cache_info->server_info,®ion,length,(const unsigned char *) p);
if (count != (MagickOffsetType) length)
break;
p+=nexus_info->region.width;
region.y++;
}
UnlockSemaphoreInfo(cache_info->file_semaphore);
break;
}
default:
break;
}
if (y < (ssize_t) rows)
{
ThrowFileException(exception,CacheError,"UnableToWritePixelCache",
cache_info->cache_filename);
return(MagickFalse);
}
if ((cache_info->debug != MagickFalse) &&
(CacheTick(nexus_info->region.y,cache_info->rows) != MagickFalse))
(void) LogMagickEvent(CacheEvent,GetMagickModule(),
"%s[%.20gx%.20g%+.20g%+.20g]",cache_info->filename,(double)
nexus_info->region.width,(double) nexus_info->region.height,(double)
nexus_info->region.x,(double) nexus_info->region.y);
return(MagickTrue);
}
|
set_constants.c | //-------------------------------------------------------------------------//
// //
// This benchmark is a serial C version of the NPB SP code. This C //
// version is developed by the Center for Manycore Programming at Seoul //
// National University and derived from the serial Fortran versions in //
// "NPB3.3-SER" developed by NAS. //
// //
// Permission to use, copy, distribute and modify this software for any //
// purpose with or without fee is hereby granted. This software is //
// provided "as is" without express or implied warranty. //
// //
// Information on NPB 3.3, including the technical report, the original //
// specifications, source code, results and information on how to submit //
// new results, is available at: //
// //
// http://www.nas.nasa.gov/Software/NPB/ //
// //
// Send comments or suggestions for this C version to cmp@aces.snu.ac.kr //
// //
// Center for Manycore Programming //
// School of Computer Science and Engineering //
// Seoul National University //
// Seoul 151-744, Korea //
// //
// E-mail: cmp@aces.snu.ac.kr //
// //
//-------------------------------------------------------------------------//
//-------------------------------------------------------------------------//
// Authors: Sangmin Seo, Jungwon Kim, Jun Lee, Jeongho Nah, Gangwon Jo, //
// and Jaejin Lee //
//-------------------------------------------------------------------------//
#include "header.h"
#include <math.h>
void set_constants()
{
ce[0][0] = 2.0;
ce[0][1] = 0.0;
ce[0][2] = 0.0;
ce[0][3] = 4.0;
ce[0][4] = 5.0;
ce[0][5] = 3.0;
ce[0][6] = 0.5;
ce[0][7] = 0.02;
ce[0][8] = 0.01;
ce[0][9] = 0.03;
ce[0][10] = 0.5;
ce[0][11] = 0.4;
ce[0][12] = 0.3;
ce[1][0] = 1.0;
ce[1][1] = 0.0;
ce[1][2] = 0.0;
ce[1][3] = 0.0;
ce[1][4] = 1.0;
ce[1][5] = 2.0;
ce[1][6] = 3.0;
ce[1][7] = 0.01;
ce[1][8] = 0.03;
ce[1][9] = 0.02;
ce[1][10] = 0.4;
ce[1][11] = 0.3;
ce[1][12] = 0.5;
ce[2][0] = 2.0;
ce[2][1] = 2.0;
ce[2][2] = 0.0;
ce[2][3] = 0.0;
ce[2][4] = 0.0;
ce[2][5] = 2.0;
ce[2][6] = 3.0;
ce[2][7] = 0.04;
ce[2][8] = 0.03;
ce[2][9] = 0.05;
ce[2][10] = 0.3;
ce[2][11] = 0.5;
ce[2][12] = 0.4;
ce[3][0] = 2.0;
ce[3][1] = 2.0;
ce[3][2] = 0.0;
ce[3][3] = 0.0;
ce[3][4] = 0.0;
ce[3][5] = 2.0;
ce[3][6] = 3.0;
ce[3][7] = 0.03;
ce[3][8] = 0.05;
ce[3][9] = 0.04;
ce[3][10] = 0.2;
ce[3][11] = 0.1;
ce[3][12] = 0.3;
ce[4][0] = 5.0;
ce[4][1] = 4.0;
ce[4][2] = 3.0;
ce[4][3] = 2.0;
ce[4][4] = 0.1;
ce[4][5] = 0.4;
ce[4][6] = 0.3;
ce[4][7] = 0.05;
ce[4][8] = 0.04;
ce[4][9] = 0.03;
ce[4][10] = 0.1;
ce[4][11] = 0.3;
ce[4][12] = 0.2;
c1 = 1.4;
c2 = 0.4;
c3 = 0.1;
c4 = 1.0;
c5 = 1.4;
bt = sqrt(0.5);
dnxm1 = 1.0 / (double)(grid_points[0]-1);
dnym1 = 1.0 / (double)(grid_points[1]-1);
dnzm1 = 1.0 / (double)(grid_points[2]-1);
c1c2 = c1 * c2;
c1c5 = c1 * c5;
c3c4 = c3 * c4;
c1345 = c1c5 * c3c4;
conz1 = (1.0-c1c5);
tx1 = 1.0 / (dnxm1 * dnxm1);
tx2 = 1.0 / (2.0 * dnxm1);
tx3 = 1.0 / dnxm1;
ty1 = 1.0 / (dnym1 * dnym1);
ty2 = 1.0 / (2.0 * dnym1);
ty3 = 1.0 / dnym1;
tz1 = 1.0 / (dnzm1 * dnzm1);
tz2 = 1.0 / (2.0 * dnzm1);
tz3 = 1.0 / dnzm1;
dx1 = 0.75;
dx2 = 0.75;
dx3 = 0.75;
dx4 = 0.75;
dx5 = 0.75;
dy1 = 0.75;
dy2 = 0.75;
dy3 = 0.75;
dy4 = 0.75;
dy5 = 0.75;
dz1 = 1.0;
dz2 = 1.0;
dz3 = 1.0;
dz4 = 1.0;
dz5 = 1.0;
dxmax = fmax(dx3, dx4);
dymax = fmax(dy2, dy4);
dzmax = fmax(dz2, dz3);
dssp = 0.25 * fmax(dx1, fmax(dy1, dz1));
c4dssp = 4.0 * dssp;
c5dssp = 5.0 * dssp;
dttx1 = dt*tx1;
dttx2 = dt*tx2;
dtty1 = dt*ty1;
dtty2 = dt*ty2;
dttz1 = dt*tz1;
dttz2 = dt*tz2;
c2dttx1 = 2.0*dttx1;
c2dtty1 = 2.0*dtty1;
c2dttz1 = 2.0*dttz1;
dtdssp = dt*dssp;
comz1 = dtdssp;
comz4 = 4.0*dtdssp;
comz5 = 5.0*dtdssp;
comz6 = 6.0*dtdssp;
c3c4tx3 = c3c4*tx3;
c3c4ty3 = c3c4*ty3;
c3c4tz3 = c3c4*tz3;
dx1tx1 = dx1*tx1;
dx2tx1 = dx2*tx1;
dx3tx1 = dx3*tx1;
dx4tx1 = dx4*tx1;
dx5tx1 = dx5*tx1;
dy1ty1 = dy1*ty1;
dy2ty1 = dy2*ty1;
dy3ty1 = dy3*ty1;
dy4ty1 = dy4*ty1;
dy5ty1 = dy5*ty1;
dz1tz1 = dz1*tz1;
dz2tz1 = dz2*tz1;
dz3tz1 = dz3*tz1;
dz4tz1 = dz4*tz1;
dz5tz1 = dz5*tz1;
c2iv = 2.5;
con43 = 4.0/3.0;
con16 = 1.0/6.0;
xxcon1 = c3c4tx3*con43*tx3;
xxcon2 = c3c4tx3*tx3;
xxcon3 = c3c4tx3*conz1*tx3;
xxcon4 = c3c4tx3*con16*tx3;
xxcon5 = c3c4tx3*c1c5*tx3;
yycon1 = c3c4ty3*con43*ty3;
yycon2 = c3c4ty3*ty3;
yycon3 = c3c4ty3*conz1*ty3;
yycon4 = c3c4ty3*con16*ty3;
yycon5 = c3c4ty3*c1c5*ty3;
zzcon1 = c3c4tz3*con43*tz3;
zzcon2 = c3c4tz3*tz3;
zzcon3 = c3c4tz3*conz1*tz3;
zzcon4 = c3c4tz3*con16*tz3;
zzcon5 = c3c4tz3*c1c5*tz3;
#pragma omp target update to(\
zzcon2,zzcon3,zzcon4,zzcon5,dzmax,tz2,dz1tz1,dz2tz1,dz3tz1,\
dz4tz1,dz5tz1,dttz1,dttz2,dz1,dz2,dz3,dz4,dz5,c2dttz1,\
yycon2,yycon3,yycon4,yycon5,dymax,ty2,dy1ty1,dy2ty1,dy3ty1,\
dy4ty1,dy5ty1,dtty1,dtty2,dy1,dy2,dy3,dy4,dy5,c2dtty1,\
xxcon2,xxcon3,xxcon4,xxcon5,dxmax,tx2,dx1tx1,dx2tx1,dx3tx1,\
dx4tx1,dx5tx1,dttx1,dttx2,dx1,dx2,dx3,dx4,dx5,c2dttx1,\
dssp,c1c5,c1c2,c3c4,con43,bt,c1,c2,c2iv,comz1,comz4,comz5,comz6)
}
|
GB_unop__log10_fc32_fc32.c | //------------------------------------------------------------------------------
// GB_unop: hard-coded functions for each built-in unary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// If this file is in the Generated/ folder, do not edit it (auto-generated).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_control.h"
#include "GB_atomics.h"
#include "GB_unop__include.h"
// C=unop(A) is defined by the following types and operators:
// op(A) function: GB_unop_apply__log10_fc32_fc32
// op(A') function: GB_unop_tran__log10_fc32_fc32
// C type: GxB_FC32_t
// A type: GxB_FC32_t
// cast: GxB_FC32_t cij = aij
// unaryop: cij = GB_clog10f (aij)
#define GB_ATYPE \
GxB_FC32_t
#define GB_CTYPE \
GxB_FC32_t
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
GxB_FC32_t aij = Ax [pA]
#define GB_CX(p) Cx [p]
// unary operator
#define GB_OP(z, x) \
z = GB_clog10f (x) ;
// casting
#define GB_CAST(z, aij) \
GxB_FC32_t z = aij ;
// cij = op (aij)
#define GB_CAST_OP(pC,pA) \
{ \
/* aij = Ax [pA] */ \
GxB_FC32_t aij = Ax [pA] ; \
/* Cx [pC] = op (cast (aij)) */ \
GxB_FC32_t z = aij ; \
Cx [pC] = GB_clog10f (z) ; \
}
// true if operator is the identity op with no typecasting
#define GB_OP_IS_IDENTITY_WITH_NO_TYPECAST \
0
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_LOG10 || GxB_NO_FC32)
//------------------------------------------------------------------------------
// Cx = op (cast (Ax)): apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_unop_apply__log10_fc32_fc32
(
GxB_FC32_t *Cx, // Cx and Ax may be aliased
const GxB_FC32_t *Ax,
const int8_t *GB_RESTRICT Ab, // A->b if A is bitmap
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
if (Ab == NULL)
{
#if ( GB_OP_IS_IDENTITY_WITH_NO_TYPECAST )
GB_memcpy (Cx, Ax, anz * sizeof (GxB_FC32_t), nthreads) ;
#else
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
GxB_FC32_t aij = Ax [p] ;
GxB_FC32_t z = aij ;
Cx [p] = GB_clog10f (z) ;
}
#endif
}
else
{
// bitmap case, no transpose; A->b already memcpy'd into C->b
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!Ab [p]) continue ;
GxB_FC32_t aij = Ax [p] ;
GxB_FC32_t z = aij ;
Cx [p] = GB_clog10f (z) ;
}
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (cast (A')): transpose, typecast, and apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_unop_tran__log10_fc32_fc32
(
GrB_Matrix C,
const GrB_Matrix A,
int64_t *GB_RESTRICT *Workspaces,
const int64_t *GB_RESTRICT A_slice,
int nworkspaces,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
VectorMatrix.h | // Copyright 2015 Christina Teflioudi
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
/*
* VectorMatrix.h
*
* Created on: Oct 10, 2013
* Author: chteflio
*/
#ifndef VECTORMATRIX_H_
#define VECTORMATRIX_H_
#include <boost/numeric/ublas/matrix_proxy.hpp>
#include <fstream>
#include <iostream>
#include <cmath>
#include <util/exception.h>
#include <util/io.h>
#include <string>
#include <ostream>
#include <iomanip>
#include <boost/unordered_map.hpp>
#include <boost/algorithm/string/predicate.hpp>
#ifdef WITH_SIMD
#include <pmmintrin.h> //SSE3
#endif
using boost::unordered_map;
namespace mips {
inline void skipLineFromFile(std::ifstream & file) {
char c = 0;
while (c != '\n' && !file.eof() && file.good()) {
file >> std::noskipws >> c;
}
file >> std::skipws;
}
inline void computeDefaultBlockOffsets(row_type size, row_type blocks, std::vector<row_type>& blockOffsets, row_type start = 0) {
blockOffsets.resize(blocks);
row_type minSize = size / blocks;
row_type remainder = size % blocks;
for (row_type i = 0; i < blocks; ++i) {
if (i == 0) {
blockOffsets[i] = start;
} else {
blockOffsets[i] = minSize + blockOffsets[i - 1];
if (remainder > 0) {
++blockOffsets[i];
--remainder;
}
}
}
};
inline void scaleAndCopy(double* v1, const double* v2, double scale, col_type colNum) {
for (int j = 0; j < colNum; ++j) {
v1[j] = v2[j] * scale;
}
}
inline void copy(double* v1, const double* v2, col_type colNum) {
for (int j = 0; j < colNum; ++j) {
v1[j] = v2[j];
}
// std::memcpy((void*) v1, (void*) v2, sizeof (double)*colNum);
}
inline double calculateLength(const double* vec, col_type colNum) {
double len = 0;
for (int j = 0; j < colNum; ++j) {
len += vec[j] * vec[j];
}
return sqrt(len);
}
class VectorMatrix {
double* data;
bool shuffled, normalized, extraMult;
row_type offset;
col_type lengthOffset;
int sizeDiv2; // for simd instruction
inline void zeroOutLastPadding() {
for (row_type i = 0; i < rowNum; ++i) {
data[(i + 1) * offset - 1] = 0; // zero-out the last padding
}
}
// stuff that needs to be done in both read methods
// rowNum and colNum need to be initialized before calling this method
inline void readFromFileCommon() {
if (pow(2, sizeof (col_type) * 8) - 1 < colNum) {
std::cerr << "Your vectors have dimensionality " << colNum << " which is more than what lemp is compiled to store. Change the col_type in BasicStructs.h and recompile!" << std::endl;
exit(1);
}
if (pow(2, sizeof (row_type) * 8) - 1 < rowNum) {
std::cerr << "Your dataset has " << rowNum << " vectors which is more than what lemp is compiled to store. Change the row_type in BasicStructs.h and recompile!" << std::endl;
exit(1);
}
initializeBasics(colNum, rowNum, false);
if (colNum < NUM_LISTS) {
std::cout << "[WARNING] Your vectors have dimensionality" << colNum << " and the tuner will try to search among " << NUM_LISTS <<
". Perhaps you want to change the parameter NUM_LISTS in Definitions.h and recompile!" << std::endl;
}
if (LOWER_LIMIT_PER_BUCKET >= rowNum) {
std::cout << "[WARNING] You have " << rowNum << " vectors and the tuner will try to take a sample of at least " << LOWER_LIMIT_PER_BUCKET <<
" vectors per probe bucket. Perhaps you want to change the parameter LOWER_LIMIT_PER_BUCKET in Definitions.h and recompile!" << std::endl;
}
for (int i = 0; i < rowNum; i++) {
setLengthInData(i, 1);
}
}
inline void readFromFileCSV(const std::string& fileName, ta_size_type col, ta_size_type row) {
std::ifstream file(fileName.c_str(), std::ios_base::in);
if (!file.is_open()) {
std::cout << "[ERROR] Fail to open file: " << fileName << std::endl;
exit(1);
}
rowNum = row;
colNum = col;
std::cout << "[INFO] VectorMatrix will be read from " << fileName << " (" << rowNum << " vectors with dimensionality " << (0 + colNum) << ")" << std::endl;
VectorMatrix::readFromFileCommon();
std::string buffer;
if (file) {
for (ta_size_type i = 0; i < row; i++) {
double* d = getMatrixRowPtr(i);
for (ta_size_type j = 0; j < col; j++) {
double f;
file >> f;
if (j != col - 1) {
std::getline(file, buffer, ',');
}
d[j] = f;
}
std::getline(file, buffer);
}
}
file.close();
}
inline void readFromFileMMA(const std::string& fileName, bool left = true) {
std::ifstream file(fileName.c_str(), std::ios_base::in);
if (!file.is_open()) {
std::cout << "[ERROR] Fail to open file: " << fileName << std::endl;
exit(1);
}
while (file.peek() == '%') {
skipLineFromFile(file);
}
ta_size_type col; // columns
ta_size_type row; // rows
file >> row >> col;
rowNum = (left ? row : col);
colNum = (left ? col : row);
std::cout << "[INFO] VectorMatrix will be read from " << fileName << " (" << rowNum << " vectors with dimensionality " << (0 + colNum) << ")" << std::endl;
VectorMatrix::readFromFileCommon();
if (left) {
if (file) {
for (ta_size_type i = 0; i < col; i++) {// read one column
for (ta_size_type j = 0; j < row; j++) {
double f;
file >> f;
double* d = getMatrixRowPtr(j);
d[i] = f;
}
}
}
file.close();
} else {
if (file) {
for (ta_size_type i = 0; i < col; i++) {// read one column
for (ta_size_type j = 0; j < row; j++) {
double f;
file >> f;
double* d = getMatrixRowPtr(i);
d[j] = f;
}
}
}
file.close();
}
}
// const VectorMatrix& operator =(const VectorMatrix& m);
public:
std::vector<double> cweights; // forAP
std::vector<double> maxVectorCoord; // forAP
std::vector<row_type> vectorNNZ; // forAP
std::vector<QueueElement> lengthInfo; // data: length id: vectorId
std::vector<double> epsilonEquivalents;
col_type colNum;
row_type rowNum;
friend void splitMatrices(const VectorMatrix& originalMatrix, std::vector<VectorMatrix>& matrices);
friend void initializeMatrices(const VectorMatrix& originalMatrix, std::vector<VectorMatrix>& matrices, bool sort, bool ignoreLengths, double epsilon);
inline VectorMatrix() : data(nullptr), shuffled(false), normalized(false), lengthOffset(1) {////////////////////// 1 is for padding
}
inline VectorMatrix(const std::vector<std::vector<double> > m) : data(nullptr), shuffled(false), normalized(false), lengthOffset(1){
initializeBasics(m[0].size(), m.size(), false);
#pragma omp parallel for schedule(static, 1000)
for (int i = 0; i < rowNum; ++i) {
double* v1 = getMatrixRowPtr(i);
const double * v2 = &m[i][0];
// std::memcpy((void*) v1, (void*) v2, sizeof (double)*colNum);
copy(v1, v2, colNum);
// for (int j = 0; j < colNum; ++j) {
//// v1[j] = v2[j];
// std::cout<<v1[j]<<" ";
// }
// std::cout<<std::endl;
}
// std::cout<<"offset: "<<(int)offset<<" "<<(int)lengthOffset<<std::endl;
}
VectorMatrix& operator=(const VectorMatrix& r) {
colNum = r.colNum;
rowNum = r.rowNum;
shuffled = r.shuffled;
normalized = r.normalized;
extraMult = r.extraMult;
offset = r.offset;
lengthOffset = r.lengthOffset;
sizeDiv2 = r.sizeDiv2;
lengthInfo.clear();
lengthInfo.reserve(r.lengthInfo.size());
std::copy(r.lengthInfo.begin(), r.lengthInfo.end(), back_inserter(lengthInfo));
cweights.clear();
cweights.reserve(r.cweights.size());
std::copy(r.cweights.begin(), r.cweights.end(), back_inserter(cweights));
maxVectorCoord.clear();
maxVectorCoord.reserve(r.maxVectorCoord.size());
std::copy(r.maxVectorCoord.begin(), r.maxVectorCoord.end(), back_inserter(maxVectorCoord));
vectorNNZ.clear();
vectorNNZ.reserve(r.vectorNNZ.size());
std::copy(r.vectorNNZ.begin(), r.vectorNNZ.end(), back_inserter(vectorNNZ));
epsilonEquivalents.clear();
epsilonEquivalents.reserve(r.epsilonEquivalents.size());
std::copy(r.epsilonEquivalents.begin(), r.epsilonEquivalents.end(), back_inserter(epsilonEquivalents));
int res = posix_memalign((void **) &(data), 16, sizeof (double)* offset * rowNum);
if (res != 0) {
std::cout << "[ERROR] Problem with allocating memory for VectorMatrix!" << std::endl;
exit(1);
}
std::memcpy((void*) data, (void*) r.data, sizeof (double)* offset * rowNum);
}
inline ~VectorMatrix() {
if (data != nullptr) {
free(data);
data = nullptr;
}
}
inline void fillInRandom(row_type rows, col_type cols) {
initializeBasics(cols, rows, false);
rg::Random32 rand(time(nullptr));
for (int i = 0; i < rowNum; ++i) {
double * vec = getMatrixRowPtr(i);
for (int j = 0; j < colNum; ++j) {
vec[j] = rand.nextDouble();
}
}
}
inline void initializeBasics(col_type numOfColumns, row_type numOfRows, bool norm) {
colNum = numOfColumns;
offset = colNum + 2;
sizeDiv2 = colNum & (-2);
extraMult = (sizeDiv2 < colNum);
if (extraMult)
offset++;
rowNum = numOfRows;
normalized = norm;
lengthInfo.resize(rowNum);
int res = posix_memalign((void **) &(data), 16, sizeof (double)* offset * rowNum);
if (res != 0) {
std::cout << "[ERROR] Problem with allocating memory for VectorMatrix!" << std::endl;
exit(1);
}
if (extraMult) {
zeroOutLastPadding();
}
}
inline void readFromFile(const std::string& fileName, int numCoordinates, int numVectors, bool left = true) {
if (boost::algorithm::ends_with(fileName, ".csv")) {
if (numCoordinates == 0 || numVectors == 0) {
std::cerr << "When using csv files, you should provide the number of coordinates (--r) and the number of vectors (--m or --n)!" << std::endl;
exit(1);
}
readFromFileCSV(fileName, numCoordinates, numVectors);
} else if (boost::algorithm::ends_with(fileName, ".mma")) {
readFromFileMMA(fileName, left);
} else {
std::cerr << "No valid input file format to read a VectorMatrix from!" << std::endl;
exit(1);
}
}
inline void init(const VectorMatrix& matrix, bool sort, bool ignoreLength) {
initializeBasics(matrix.colNum, matrix.rowNum, true);
if (ignoreLength) {
#pragma omp parallel for schedule(static, 1000)
// get lengths
for (int i = 0; i < rowNum; ++i) {
const double* vec = matrix.getMatrixRowPtr(i);
double len = calculateLength(vec, colNum);
lengthInfo[i] = QueueElement(1, i);
setLengthInData(i, 1);
double x = 1 / len;
double * d1 = getMatrixRowPtr(i);
scaleAndCopy(d1, vec, x, colNum);
}
} else {
#pragma omp parallel for schedule(static,1000)
for (int i = 0; i < rowNum; ++i) {
const double* vec = matrix.getMatrixRowPtr(i);
double len = calculateLength(vec, colNum);
lengthInfo[i] = QueueElement(len, i);
}
if (sort) {
shuffled = true;
std::sort(lengthInfo.begin(), lengthInfo.end(), std::greater<QueueElement>());
}
#pragma omp parallel for schedule(static,1000)
for (int i = 0; i < rowNum; ++i) {
setLengthInData(i, lengthInfo[i].data);
double x = 1 / lengthInfo[i].data;
double * d1 = getMatrixRowPtr(i);
double * d2 = matrix.getMatrixRowPtr(lengthInfo[i].id);
scaleAndCopy(d1, d2, x, colNum);
}
}
}
inline void addVectors(const VectorMatrix& matrix, const std::vector<row_type>& dataIds) {
initializeBasics(matrix.colNum, dataIds.size(), false);
for (int i = 0; i < rowNum; ++i) {
const double* vec = matrix.getMatrixRowPtr(dataIds[i]);
lengthInfo[i] = QueueElement(1, dataIds[i]);
double * d1 = getMatrixRowPtr(i);
scaleAndCopy(d1, vec, 1, colNum);
}
}
inline double* getMatrixRowPtr(row_type row) const {// the row starts from pos 1. Do ptr[-1] to get the length
return &data[row * offset + 1 + lengthOffset];
}
inline void print(row_type row) const {
const double* vec = getMatrixRowPtr(row);
for (int i = 0; i < colNum; ++i) {
std::cout << i << ":" << vec[i] << " ";
}
std::cout << std::endl;
std::cout << "Length: " << vec[-1] << " or " << lengthInfo[row].data << std::endl;
std::cout << "hasId: " << lengthInfo[row].id << std::endl;
}
inline double getVectorLength(row_type row) const {
return data[row * offset + lengthOffset];
}
inline double setLengthInData(row_type row, double len) {
return data[row * offset + lengthOffset] = len;
}
inline row_type getId(row_type row) const {
return (normalized ? lengthInfo[row].id : row);
}
inline double cosine(row_type row, const double* query) const {
const double* d_ptr = getMatrixRowPtr(row);
double cosine = 0;
#ifdef WITH_SIMD
__m128d sum = _mm_set1_pd(0.0);
int size = colNum + extraMult;
for (int i = 0; i < size; i += 2) {
sum = _mm_add_pd(sum, _mm_mul_pd(_mm_load_pd(d_ptr + i), _mm_load_pd(query + i)));
}
cosine = _mm_cvtsd_f64(_mm_hadd_pd(sum, sum));
return cosine;
#else
for (int i = 0; i < colNum; ++i) {
cosine += query[i] * d_ptr[i];
}
return cosine;
#endif
}
inline double L2Distance(row_type row, const double* query)const {
const double* d_ptr = getMatrixRowPtr(row);
double dist = 0;
if (normalized) {
for (int i = 0; i < colNum; ++i) {
double value = query[i] * query[-1] - d_ptr[i] * d_ptr[-1]; // unnormalize
dist += value * value;
}
} else {
for (int i = 0; i < colNum; ++i) {
dist += (query[i] - d_ptr[i]) * (query[i] - d_ptr[i]);
}
}
return sqrt(dist);
}
inline double L2Distance2(row_type row, const double* query)const {
// I assume non normalized case as needed in PCA trees
const double* d_ptr = getMatrixRowPtr(row);
double dist = 0;
for (int i = 0; i < colNum; ++i) {
dist += (query[i] - d_ptr[i]) * (query[i] - d_ptr[i]);
}
return dist;
}
inline double innerProduct(row_type row, const double* query) const {
const double ip = query[-1] * getVectorLength(row) * cosine(row, query);
return ip;
}
inline std::pair<bool, double> passesThreshold(row_type row, const double* query, double theta) const {
std::pair<bool, double> p;
double ip = 1;
if (normalized) {
ip = query[-1] * getVectorLength(row);
if (ip < theta) {
p.first = false;
return p;
}
}
ip *= cosine(row, query);
p.second = ip;
if (ip < theta) {
p.first = false;
return p;
} else {
p.first = true;
return p;
}
}
};
// ignores the lengths
inline void splitMatrices(const VectorMatrix& originalMatrix, std::vector<VectorMatrix>& matrices) {
row_type threads = matrices.size();
if (threads == 1) {
matrices[0].initializeBasics(originalMatrix.colNum, originalMatrix.rowNum, false);
for (int i = 0; i < matrices[0].rowNum; ++i) {
const double* vec = originalMatrix.getMatrixRowPtr(i);
matrices[0].lengthInfo[i] = QueueElement(1, i);
matrices[0].setLengthInData(i, 1);
double * d1 = matrices[0].getMatrixRowPtr(i);
scaleAndCopy(d1, vec, 1, originalMatrix.colNum);
}
} else {
omp_set_num_threads(threads);
std::vector<row_type> permuteVector(originalMatrix.rowNum);
std::iota(permuteVector.begin(), permuteVector.end(), 0);
rg::Random32 random(123);
rg::shuffle(permuteVector.begin(), permuteVector.end(), random);
std::vector<row_type> blockOffsets;
computeDefaultBlockOffsets(permuteVector.size(), threads, blockOffsets);
#pragma omp parallel
{
row_type tid = omp_get_thread_num();
row_type start = blockOffsets[tid];
row_type end = (tid == blockOffsets.size() - 1 ? originalMatrix.rowNum : blockOffsets[tid + 1]);
matrices[tid].initializeBasics(originalMatrix.colNum, end - start, true);
for (int i = start; i < end; ++i) {
row_type ind = permuteVector[i];
const double* vec = originalMatrix.getMatrixRowPtr(ind);
matrices[tid].lengthInfo[i - start] = QueueElement(1, i - start);
matrices[tid].setLengthInData(i - start, 1);
double * d1 = matrices[tid].getMatrixRowPtr(i - start);
scaleAndCopy(d1, vec, 1, originalMatrix.colNum);
matrices[tid].lengthInfo[i - start].id = ind; // the original id
}
}
}
}
/* map: id: original matrix id, first: thread second: posInMatrix
*/
inline void initializeMatrices(const VectorMatrix& originalMatrix, std::vector<VectorMatrix>& matrices, bool sort, bool ignoreLengths, double epsilon = 0) {
row_type threads = matrices.size();
if (threads == 1) {
matrices[0].initializeBasics(originalMatrix.colNum, originalMatrix.rowNum, true);
if (ignoreLengths) {
#if defined(ABS_APPROX) || defined(HYBRID_APPROX)
matrices[0].epsilonEquivalents.resize(matrices[0].rowNum, epsilon);
#endif
for (int i = 0; i < matrices[0].rowNum; ++i) {
const double* vec = originalMatrix.getMatrixRowPtr(i);
double len = calculateLength(vec, matrices[0].colNum);
matrices[0].lengthInfo[i] = QueueElement(1, i);
matrices[0].setLengthInData(i, 1);
double x = 1 / len;
double * d1 = matrices[0].getMatrixRowPtr(i);
scaleAndCopy(d1, vec, x, originalMatrix.colNum);
#if defined(ABS_APPROX) || defined(HYBRID_APPROX)
matrices[0].epsilonEquivalents[i] *= x;
#endif
}
} else {
for (int i = 0; i < matrices[0].rowNum; ++i) {
const double* vec = originalMatrix.getMatrixRowPtr(i);
double len = calculateLength(vec, matrices[0].colNum);
matrices[0].lengthInfo[i] = QueueElement(len, i);
}
if (sort) {
matrices[0].shuffled = true;
std::sort(matrices[0].lengthInfo.begin(), matrices[0].lengthInfo.end(), std::greater<QueueElement>());
}
for (int i = 0; i < matrices[0].rowNum; ++i) {
matrices[0].setLengthInData(i, matrices[0].lengthInfo[i].data);
double x = 1 / matrices[0].lengthInfo[i].data;
double * d1 = matrices[0].getMatrixRowPtr(i);
double * d2 = originalMatrix.getMatrixRowPtr(matrices[0].lengthInfo[i].id);
scaleAndCopy(d1, d2, x, originalMatrix.colNum);
}
}
} else { // multiple threads
omp_set_num_threads(threads);
std::vector<row_type> permuteVector(originalMatrix.rowNum);
std::iota(permuteVector.begin(), permuteVector.end(), 0);
rg::Random32 random(123);
rg::shuffle(permuteVector.begin(), permuteVector.end(), random);
std::vector<row_type> blockOffsets;
computeDefaultBlockOffsets(permuteVector.size(), threads, blockOffsets);
#pragma omp parallel
{
row_type tid = omp_get_thread_num();
row_type start = blockOffsets[tid];
row_type end = (tid == blockOffsets.size() - 1 ? originalMatrix.rowNum : blockOffsets[tid + 1]);
matrices[tid].initializeBasics(originalMatrix.colNum, end - start, true);
if (ignoreLengths) {
#if defined(ABS_APPROX) || defined(HYBRID_APPROX)
matrices[tid].epsilonEquivalents.resize(matrices[tid].rowNum, epsilon);
#endif
for (int i = start; i < end; ++i) {
row_type ind = permuteVector[i];
const double* vec = originalMatrix.getMatrixRowPtr(ind);
double len = calculateLength(vec, matrices[tid].colNum);
matrices[tid].lengthInfo[i - start] = QueueElement(1, i - start);
matrices[tid].setLengthInData(i - start, 1);
double x = 1 / len;
double * d1 = matrices[tid].getMatrixRowPtr(i - start);
scaleAndCopy(d1, vec, x, originalMatrix.colNum);
matrices[tid].lengthInfo[i - start].id = ind; // the original id
#if defined(ABS_APPROX) || defined(HYBRID_APPROX)
matrices[tid].epsilonEquivalents[i] *= x;
#endif
}
} else {
for (int i = start; i < end; ++i) {
row_type ind = permuteVector[i];
const double* vec = originalMatrix.getMatrixRowPtr(ind);
double len = calculateLength(vec, matrices[tid].colNum);
matrices[tid].lengthInfo[i - start] = QueueElement(len, i - start);
}
if (sort) {
matrices[tid].shuffled = true;
std::sort(matrices[tid].lengthInfo.begin(), matrices[tid].lengthInfo.end(), std::greater<QueueElement>());
}
for (int i = 0; i < matrices[tid].rowNum; ++i) {
matrices[tid].setLengthInData(i, matrices[tid].lengthInfo[i].data);
double x = 1 / matrices[tid].lengthInfo[i].data;
row_type ind = permuteVector[matrices[tid].lengthInfo[i].id + start];
double * d1 = matrices[tid].getMatrixRowPtr(i);
double * d2 = originalMatrix.getMatrixRowPtr(ind);
scaleAndCopy(d1, d2, x, originalMatrix.colNum);
matrices[tid].lengthInfo[i].id = ind; // the original id
}
}
}
}
}
void calculateAPneededForQuery(std::vector<VectorMatrix>& matrices, double thres, int k, std::vector<double>& global_cweights) {
global_cweights.resize(matrices[0].colNum, 0);
#pragma omp parallel
{
row_type tid = omp_get_thread_num();
col_type colNum = matrices[tid].colNum;
row_type rowNum = matrices[tid].rowNum;
row_type endUser = rowNum;
if (k == 0) {
auto up = std::lower_bound(matrices[tid].lengthInfo.begin(), matrices[tid].lengthInfo.end(), QueueElement(thres, 0), std::greater<QueueElement>());
endUser = up - matrices[tid].lengthInfo.begin();
}
matrices[tid].cweights.resize(colNum);
matrices[tid].maxVectorCoord.resize(rowNum);
matrices[tid].vectorNNZ.resize(rowNum, 0);
for (int i = 0; i < endUser; ++i) {
double * d = matrices[tid].getMatrixRowPtr(i);
for (int j = 0; j < colNum; ++j) {
if (matrices[tid].cweights[j] < fabs(d[j]))
matrices[tid].cweights[j] = fabs(d[j]);
if (d[j] != 0)
matrices[tid].vectorNNZ[i]++;
if (fabs(d[j]) > matrices[tid].maxVectorCoord[i])
matrices[tid].maxVectorCoord[i] = fabs(d[j]);
}
}
#pragma omp critical
{
for (int i = 0; i < colNum; ++i) {
if (global_cweights[i] < matrices[tid].cweights[i])
global_cweights[i] = matrices[tid].cweights[i];
}
}
}
}
}
#endif /* VECTORMATRIX_H_ */
|
YAKL_parallel_for_common.h |
// #pragma once is purposefully omitted here because it needs to be included twice: once in each namespace: c and fortran
// Included by YAKL_parallel_for_c.h and YAKL_parallel_for_fortran.h
// Inside the yakl::c and yakl::fortran namespaces
//////////////////////////////////////////////////////////////////////////////////////////////
// Convenience functions to handle the indexing
// Reduces code for the rest of the parallel_for implementations
// Calls the functor for the specified global index ID "i" using the specified loop bounds
//////////////////////////////////////////////////////////////////////////////////////////////
template <class F, bool simple, int N>
YAKL_DEVICE_INLINE void callFunctor(F const &f , Bounds<N,simple> const &bnd , int const i ) {
int ind[N];
bnd.unpackIndices( i , ind );
if constexpr (N == 1) f(ind[0]);
if constexpr (N == 2) f(ind[0],ind[1]);
if constexpr (N == 3) f(ind[0],ind[1],ind[2]);
if constexpr (N == 4) f(ind[0],ind[1],ind[2],ind[3]);
if constexpr (N == 5) f(ind[0],ind[1],ind[2],ind[3],ind[4]);
if constexpr (N == 6) f(ind[0],ind[1],ind[2],ind[3],ind[4],ind[5]);
if constexpr (N == 7) f(ind[0],ind[1],ind[2],ind[3],ind[4],ind[5],ind[6]);
if constexpr (N == 8) f(ind[0],ind[1],ind[2],ind[3],ind[4],ind[5],ind[6],ind[7]);
}
template <class F, bool simple, int N>
YAKL_DEVICE_INLINE void callFunctorOuter(F const &f , Bounds<N,simple> const &bnd , int const i ,
InnerHandler handler ) {
int ind[N];
bnd.unpackIndices( i , ind );
if constexpr (N == 1) f(ind[0],handler);
if constexpr (N == 2) f(ind[0],ind[1],handler);
if constexpr (N == 3) f(ind[0],ind[1],ind[2],handler);
if constexpr (N == 4) f(ind[0],ind[1],ind[2],ind[3],handler);
if constexpr (N == 5) f(ind[0],ind[1],ind[2],ind[3],ind[4],handler);
if constexpr (N == 6) f(ind[0],ind[1],ind[2],ind[3],ind[4],ind[5],handler);
if constexpr (N == 7) f(ind[0],ind[1],ind[2],ind[3],ind[4],ind[5],ind[6],handler);
if constexpr (N == 8) f(ind[0],ind[1],ind[2],ind[3],ind[4],ind[5],ind[6],ind[7],handler);
}
////////////////////////////////////////////////
// HARDWARE BACKENDS FOR KERNEL LAUNCHING
////////////////////////////////////////////////
#ifdef YAKL_ARCH_CUDA
// CUDA has a limit on the parameter space for a kernel launch. When it's exceeded, then the kernel
// needs to be launched by reference from device memory (i.e., "cudaKernelRef").
// otherwise, it needs to be launched by value (i.e., "cudaKernelVal")
// A kernel launch will get the global ID and then use the callFunctor intermediate to tranform that
// ID into a set of indices for multiple loops and then call the functor with those indices
// The __launch_bounds__ matches the number of threads used to launch the kernels so that the compiler
// does not compile for more threads per block than the user plans to use. This is for optimal register
// usage and reduced register spilling.
template <class F, int N, bool simple, int VecLen, bool B4B> __global__ __launch_bounds__(VecLen)
void cudaKernelVal( Bounds<N,simple> bounds , F f , LaunchConfig<VecLen,B4B> config ) {
size_t i = blockIdx.x*blockDim.x + threadIdx.x;
if (i < bounds.nIter) {
callFunctor( f , bounds , i );
}
}
template <class F, int N, bool simple, int VecLen, bool B4B> __global__ __launch_bounds__(VecLen)
void cudaKernelRef( Bounds<N,simple> bounds , F const &f , LaunchConfig<VecLen,B4B> config ) {
size_t i = blockIdx.x*blockDim.x + threadIdx.x;
if (i < bounds.nIter) {
callFunctor( f , bounds , i );
}
}
// If the functor is small enough, then launch it like normal
template<class F , int N , bool simple, int VecLen, bool B4B>
void parallel_for_cuda( Bounds<N,simple> const &bounds , F const &f , LaunchConfig<VecLen,B4B> config ) {
if constexpr (sizeof(F) <= 4000) {
cudaKernelVal <<< (unsigned int) (bounds.nIter-1)/VecLen+1 , VecLen >>> ( bounds , f , config );
check_last_error();
} else {
F *fp = (F *) functorBuffer;
cudaMemcpyAsync(fp,&f,sizeof(F),cudaMemcpyHostToDevice);
check_last_error();
cudaKernelRef <<< (unsigned int) (bounds.nIter-1)/VecLen+1 , VecLen >>> ( bounds , *fp , config );
check_last_error();
}
}
template <class F, int N, bool simple, int VecLen, bool B4B> __global__ __launch_bounds__(VecLen)
void cudaKernelOuterVal( Bounds<N,simple> bounds , F f , LaunchConfig<VecLen,B4B> config ,
InnerHandler handler ) {
callFunctorOuter( f , bounds , blockIdx.x , handler );
}
template <class F, int N, bool simple, int VecLen, bool B4B> __global__ __launch_bounds__(VecLen)
void cudaKernelOuterRef( Bounds<N,simple> bounds , F const &f , LaunchConfig<VecLen,B4B> config ,
InnerHandler handler ) {
callFunctorOuter( f , bounds , blockIdx.x , handler );
}
template<class F , int N , bool simple, int VecLen, bool B4B>
void parallel_outer_cuda( Bounds<N,simple> const &bounds , F const &f , LaunchConfig<VecLen,B4B> config ) {
if constexpr (sizeof(F) <= 4000) {
cudaKernelOuterVal <<< (unsigned int) bounds.nIter , config.inner_size >>> ( bounds , f , config , InnerHandler() );
check_last_error();
} else {
F *fp = (F *) functorBuffer;
cudaMemcpyAsync(fp,&f,sizeof(F),cudaMemcpyHostToDevice);
check_last_error();
cudaKernelOuterRef <<< (unsigned int) bounds.nIter , config.inner_size >>> ( bounds , *fp , config , InnerHandler() );
check_last_error();
}
}
template <class F, int N, bool simple>
YAKL_INLINE void parallel_inner_cuda( Bounds<N,simple> bounds , F const &f ) {
#if YAKL_CURRENTLY_ON_DEVICE()
if (threadIdx.x < bounds.nIter) callFunctor( f , bounds , threadIdx.x );
#else
// Avoid not used warning
(void) f;
#endif
}
template <class F>
YAKL_INLINE void single_inner_cuda( F const &f ) {
#if YAKL_CURRENTLY_ON_DEVICE()
if (threadIdx.x == 0) f();
#else
// Avoid not used warning
(void) f;
#endif
}
#endif
#ifdef YAKL_ARCH_HIP
// A kernel launch will get the global ID and then use the callFunctor intermediate to tranform that
// ID into a set of indices for multiple loops and then call the functor with those indices
// The __launch_bounds__ matches the number of threads used to launch the kernels so that the compiler
// does not compile for more threads per block than the user plans to use. This is for optimal register
// usage and reduced register spilling.
template <class F, int N, bool simple, int VecLen, bool B4B> __global__ __launch_bounds__(VecLen)
void hipKernel( Bounds<N,simple> bounds , F f , LaunchConfig<VecLen,B4B> config) {
size_t i = blockIdx.x*blockDim.x + threadIdx.x;
if (i < bounds.nIter) {
callFunctor( f , bounds , i );
}
}
template<class F, int N, bool simple, int VecLen, bool B4B>
void parallel_for_hip( Bounds<N,simple> const &bounds , F const &f , LaunchConfig<VecLen,B4B> config ) {
hipLaunchKernelGGL( hipKernel , dim3((bounds.nIter-1)/VecLen+1) , dim3(VecLen) ,
(std::uint32_t) 0 , (hipStream_t) 0 , bounds , f , config );
check_last_error();
}
template <class F, int N, bool simple, int VecLen, bool B4B> __global__ __launch_bounds__(VecLen)
void hipOuterKernel( Bounds<N,simple> bounds , F f , LaunchConfig<VecLen,B4B> config , InnerHandler handler) {
callFunctorOuter( f , bounds , blockIdx.x , handler );
}
template<class F, int N, bool simple, int VecLen, bool B4B>
void parallel_outer_hip( Bounds<N,simple> const &bounds , F const &f , LaunchConfig<VecLen,B4B> config ) {
hipLaunchKernelGGL( hipOuterKernel , dim3(bounds.nIter) , dim3(config.inner_size) ,
(std::uint32_t) 0 , (hipStream_t) 0 , bounds , f , config , InnerHandler() );
check_last_error();
}
template <class F, int N, bool simple>
YAKL_INLINE void parallel_inner_hip( Bounds<N,simple> bounds , F const &f ) {
#if YAKL_CURRENTLY_ON_DEVICE()
if (threadIdx.x < bounds.nIter) callFunctor( f , bounds , threadIdx.x );
#else
// Avoid not used warning
(void) f;
#endif
}
template <class F>
YAKL_INLINE void single_inner_hip( F const &f ) {
#if YAKL_CURRENTLY_ON_DEVICE()
if (threadIdx.x == 0) f();
#else
// Avoid not used warning
(void) f;
#endif
}
#endif
#ifdef YAKL_ARCH_SYCL
// Kernels are launched with the SYCL parallel_for routine.
// Currently, SYCL must copy this to the device manually and then run from the device
// Also, there is a violation of dependence wherein the stream must synchronized with a wait()
// call after launch
template<class F, int N, bool simple, int VecLen, bool B4B>
void parallel_for_sycl( Bounds<N,simple> const &bounds , F const &f , LaunchConfig<VecLen,B4B> config ) {
if constexpr (sycl::is_device_copyable<F>::value) {
sycl_default_stream().parallel_for( sycl::range<1>(bounds.nIter) , [=] (sycl::id<1> i) {
callFunctor( f , bounds , i );
});
sycl_default_stream().wait();
} else {
F *fp = (F *) functorBuffer;
auto copyEvent = sycl_default_stream().memcpy(fp, &f, sizeof(F));
auto kernelEvent = sycl_default_stream().parallel_for( sycl::range<1>(bounds.nIter) , copyEvent, [=] (sycl::id<1> i) {
callFunctor( *fp , bounds , i );
});
kernelEvent.wait();
}
check_last_error();
}
template<class F, int N, bool simple, int VecLen, bool B4B>
void parallel_outer_sycl( Bounds<N,simple> const &bounds , F const &f , LaunchConfig<VecLen,B4B> config ) {
if constexpr (sycl::is_device_copyable<F>::value) {
sycl_default_stream().parallel_for( sycl::nd_range<1>(bounds.nIter*config.inner_size,config.inner_size) ,
[=] (sycl::nd_item<1> item) {
callFunctorOuter( f , bounds , item.get_group(0) , InnerHandler(item) );
});
sycl_default_stream().wait();
} else {
F *fp = (F *) functorBuffer;
auto copyEvent = sycl_default_stream().memcpy(fp, &f, sizeof(F));
auto kernelEvent = sycl_default_stream().parallel_for( sycl::nd_range<1>(bounds.nIter*config.inner_size,config.inner_size) ,
copyEvent,
[=] (sycl::nd_item<1> item) {
callFunctorOuter( *fp , bounds , item.get_group(0) , InnerHandler(item) );
});
kernelEvent.wait();
}
check_last_error();
}
template<class F, int N, bool simple>
void parallel_inner_sycl( Bounds<N,simple> const &bounds , F const &f , InnerHandler handler ) {
#if YAKL_CURRENTLY_ON_DEVICE()
if (handler.get_item().get_local_id(0) < bounds.nIter) callFunctor( f , bounds , handler.get_item().get_local_id(0) );
#endif
}
template<class F>
void single_inner_sycl( F const &f , InnerHandler handler ) {
#if YAKL_CURRENTLY_ON_DEVICE()
if (handler.get_item().get_local_id(0) == 0) f();
#endif
}
#endif
template <bool OUTER> struct DoOuter {};
// These are the CPU routines for parallel_for without a device target. These rely on
// constexpr lower bounds and strides for the compiler to optimize loop arithmetic for
// simple bounds.
// For OMP target offload backend, target teams distribute parallel for simd is used.
// For OMP CPU threading backend, parallel for is used
template <class F, bool simple, int N, bool OUTER=false>
inline void parallel_for_cpu_serial( Bounds<N,simple> const &bounds , F const &f , bool omp_par = true , DoOuter<OUTER> dummy = DoOuter<false>() ) {
if constexpr (N == 1) {
#ifdef YAKL_ARCH_OPENMP
#pragma omp parallel for if (omp_par)
#endif
for (int i0 = bounds.lbound(0); i0 < (int) (bounds.lbound(0)+bounds.dim(0)*bounds.stride(0)); i0+=bounds.stride(0)) {
if constexpr (OUTER) { f(i0,InnerHandler() ); }
else { f(i0); }
}
} else if constexpr (N == 2) {
#ifdef YAKL_ARCH_OPENMP
#pragma omp parallel for collapse(2) if (omp_par)
#endif
for (int i0 = bounds.lbound(0); i0 < (int) (bounds.lbound(0)+bounds.dim(0)*bounds.stride(0)); i0+=bounds.stride(0)) {
for (int i1 = bounds.lbound(1); i1 < (int) (bounds.lbound(1)+bounds.dim(1)*bounds.stride(1)); i1+=bounds.stride(1)) {
if constexpr (OUTER) { f(i0,i1,InnerHandler() ); }
else { f(i0,i1); }
} }
} else if constexpr (N == 3) {
#ifdef YAKL_ARCH_OPENMP
#pragma omp parallel for collapse(3) if (omp_par)
#endif
for (int i0 = bounds.lbound(0); i0 < (int) (bounds.lbound(0)+bounds.dim(0)*bounds.stride(0)); i0+=bounds.stride(0)) {
for (int i1 = bounds.lbound(1); i1 < (int) (bounds.lbound(1)+bounds.dim(1)*bounds.stride(1)); i1+=bounds.stride(1)) {
for (int i2 = bounds.lbound(2); i2 < (int) (bounds.lbound(2)+bounds.dim(2)*bounds.stride(2)); i2+=bounds.stride(2)) {
if constexpr (OUTER) { f(i0,i1,i2,InnerHandler() ); }
else { f(i0,i1,i2); }
} } }
} else if constexpr (N == 4) {
#ifdef YAKL_ARCH_OPENMP
#pragma omp parallel for collapse(4) if (omp_par)
#endif
for (int i0 = bounds.lbound(0); i0 < (int) (bounds.lbound(0)+bounds.dim(0)*bounds.stride(0)); i0+=bounds.stride(0)) {
for (int i1 = bounds.lbound(1); i1 < (int) (bounds.lbound(1)+bounds.dim(1)*bounds.stride(1)); i1+=bounds.stride(1)) {
for (int i2 = bounds.lbound(2); i2 < (int) (bounds.lbound(2)+bounds.dim(2)*bounds.stride(2)); i2+=bounds.stride(2)) {
for (int i3 = bounds.lbound(3); i3 < (int) (bounds.lbound(3)+bounds.dim(3)*bounds.stride(3)); i3+=bounds.stride(3)) {
if constexpr (OUTER) { f(i0,i1,i2,i3,InnerHandler() ); }
else { f(i0,i1,i2,i3); }
} } } }
} else if constexpr (N == 5) {
#ifdef YAKL_ARCH_OPENMP
#pragma omp parallel for collapse(5) if (omp_par)
#endif
for (int i0 = bounds.lbound(0); i0 < (int) (bounds.lbound(0)+bounds.dim(0)*bounds.stride(0)); i0+=bounds.stride(0)) {
for (int i1 = bounds.lbound(1); i1 < (int) (bounds.lbound(1)+bounds.dim(1)*bounds.stride(1)); i1+=bounds.stride(1)) {
for (int i2 = bounds.lbound(2); i2 < (int) (bounds.lbound(2)+bounds.dim(2)*bounds.stride(2)); i2+=bounds.stride(2)) {
for (int i3 = bounds.lbound(3); i3 < (int) (bounds.lbound(3)+bounds.dim(3)*bounds.stride(3)); i3+=bounds.stride(3)) {
for (int i4 = bounds.lbound(4); i4 < (int) (bounds.lbound(4)+bounds.dim(4)*bounds.stride(4)); i4+=bounds.stride(4)) {
if constexpr (OUTER) { f(i0,i1,i2,i3,i4,InnerHandler() ); }
else { f(i0,i1,i2,i3,i4); }
} } } } }
} else if constexpr (N == 6) {
#ifdef YAKL_ARCH_OPENMP
#pragma omp parallel for collapse(6) if (omp_par)
#endif
for (int i0 = bounds.lbound(0); i0 < (int) (bounds.lbound(0)+bounds.dim(0)*bounds.stride(0)); i0+=bounds.stride(0)) {
for (int i1 = bounds.lbound(1); i1 < (int) (bounds.lbound(1)+bounds.dim(1)*bounds.stride(1)); i1+=bounds.stride(1)) {
for (int i2 = bounds.lbound(2); i2 < (int) (bounds.lbound(2)+bounds.dim(2)*bounds.stride(2)); i2+=bounds.stride(2)) {
for (int i3 = bounds.lbound(3); i3 < (int) (bounds.lbound(3)+bounds.dim(3)*bounds.stride(3)); i3+=bounds.stride(3)) {
for (int i4 = bounds.lbound(4); i4 < (int) (bounds.lbound(4)+bounds.dim(4)*bounds.stride(4)); i4+=bounds.stride(4)) {
for (int i5 = bounds.lbound(5); i5 < (int) (bounds.lbound(5)+bounds.dim(5)*bounds.stride(5)); i5+=bounds.stride(5)) {
if constexpr (OUTER) { f(i0,i1,i2,i3,i4,i5,InnerHandler() ); }
else { f(i0,i1,i2,i3,i4,i5); }
} } } } } }
} else if constexpr (N == 7) {
#ifdef YAKL_ARCH_OPENMP
#pragma omp parallel for collapse(7) if (omp_par)
#endif
for (int i0 = bounds.lbound(0); i0 < (int) (bounds.lbound(0)+bounds.dim(0)*bounds.stride(0)); i0+=bounds.stride(0)) {
for (int i1 = bounds.lbound(1); i1 < (int) (bounds.lbound(1)+bounds.dim(1)*bounds.stride(1)); i1+=bounds.stride(1)) {
for (int i2 = bounds.lbound(2); i2 < (int) (bounds.lbound(2)+bounds.dim(2)*bounds.stride(2)); i2+=bounds.stride(2)) {
for (int i3 = bounds.lbound(3); i3 < (int) (bounds.lbound(3)+bounds.dim(3)*bounds.stride(3)); i3+=bounds.stride(3)) {
for (int i4 = bounds.lbound(4); i4 < (int) (bounds.lbound(4)+bounds.dim(4)*bounds.stride(4)); i4+=bounds.stride(4)) {
for (int i5 = bounds.lbound(5); i5 < (int) (bounds.lbound(5)+bounds.dim(5)*bounds.stride(5)); i5+=bounds.stride(5)) {
for (int i6 = bounds.lbound(6); i6 < (int) (bounds.lbound(6)+bounds.dim(6)*bounds.stride(6)); i6+=bounds.stride(6)) {
if constexpr (OUTER) { f(i0,i1,i2,i3,i4,i5,i6,InnerHandler() ); }
else { f(i0,i1,i2,i3,i4,i5,i6); }
} } } } } } }
} else if constexpr (N == 8) {
#ifdef YAKL_ARCH_OPENMP
#pragma omp parallel for collapse(8) if (omp_par)
#endif
for (int i0 = bounds.lbound(0); i0 < (int) (bounds.lbound(0)+bounds.dim(0)*bounds.stride(0)); i0+=bounds.stride(0)) {
for (int i1 = bounds.lbound(1); i1 < (int) (bounds.lbound(1)+bounds.dim(1)*bounds.stride(1)); i1+=bounds.stride(1)) {
for (int i2 = bounds.lbound(2); i2 < (int) (bounds.lbound(2)+bounds.dim(2)*bounds.stride(2)); i2+=bounds.stride(2)) {
for (int i3 = bounds.lbound(3); i3 < (int) (bounds.lbound(3)+bounds.dim(3)*bounds.stride(3)); i3+=bounds.stride(3)) {
for (int i4 = bounds.lbound(4); i4 < (int) (bounds.lbound(4)+bounds.dim(4)*bounds.stride(4)); i4+=bounds.stride(4)) {
for (int i5 = bounds.lbound(5); i5 < (int) (bounds.lbound(5)+bounds.dim(5)*bounds.stride(5)); i5+=bounds.stride(5)) {
for (int i6 = bounds.lbound(6); i6 < (int) (bounds.lbound(6)+bounds.dim(6)*bounds.stride(6)); i6+=bounds.stride(6)) {
for (int i7 = bounds.lbound(7); i7 < (int) (bounds.lbound(7)+bounds.dim(7)*bounds.stride(7)); i7+=bounds.stride(7)) {
if constexpr (OUTER) { f(i0,i1,i2,i3,i4,i5,i6,i7,InnerHandler() ); }
else { f(i0,i1,i2,i3,i4,i5,i6,i7); }
} } } } } } } }
}
}
////////////////////////////////////////////////////////////////////////////////////
// parallel_for
////////////////////////////////////////////////////////////////////////////////////
template <class F, int N, bool simple, int VecLen=YAKL_DEFAULT_VECTOR_LEN , bool B4B = false>
inline void parallel_for( char const * str , Bounds<N,simple> const &bounds , F const &f ,
LaunchConfig<VecLen,B4B> config = LaunchConfig<>() ) {
// Automatically time (if requested) and add nvtx ranges for easier nvprof / nsight profiling
#ifdef YAKL_AUTO_PROFILE
timer_start(str);
#endif
#ifdef YAKL_ARCH_CUDA
nvtxRangePushA(str);
#endif
#ifdef YAKL_ARCH_HIP
roctxRangePushA(str);
#endif
bool do_b4b = false;
#ifdef YAKL_B4B
if constexpr (B4B) do_b4b = true;
#endif
if (do_b4b) {
fence();
// The if-state ment is redundant, but it shields cpu_serial from compilation if b4b isn't desired
// For instance, if the lambda is YAKL_DEVICE_LAMBDA, compiling this line will give an error because
// it isn't available on the host.
#ifdef YAKL_B4B
if constexpr (B4B) { parallel_for_cpu_serial( bounds , f ); }
#endif
} else {
#ifdef YAKL_ARCH_CUDA
parallel_for_cuda( bounds , f , config );
#elif defined(YAKL_ARCH_HIP)
parallel_for_hip ( bounds , f , config );
#elif defined(YAKL_ARCH_SYCL)
parallel_for_sycl( bounds , f , config );
#else
parallel_for_cpu_serial( bounds , f );
#endif
}
#if defined(YAKL_AUTO_FENCE)
fence();
#endif
#ifdef YAKL_ARCH_HIP
roctxRangePop();
#endif
#ifdef YAKL_ARCH_CUDA
nvtxRangePop();
#endif
#ifdef YAKL_AUTO_PROFILE
timer_stop(str);
#endif
}
template <class F, int N, bool simple, int VecLen=YAKL_DEFAULT_VECTOR_LEN, bool B4B=false>
inline void parallel_for( Bounds<N,simple> const &bounds , F const &f ,
LaunchConfig<VecLen,B4B> config = LaunchConfig<>() ) {
parallel_for( "Unlabeled" , bounds , f , config );
}
template <class F, int VecLen=YAKL_DEFAULT_VECTOR_LEN, bool B4B=false>
inline void parallel_for( LBnd bnd , F const &f ,
LaunchConfig<VecLen,B4B> config = LaunchConfig<>() ) {
if (bnd.l == bnd.default_lbound && bnd.s == 1) {
parallel_for( "Unlabeled" , Bounds<1,true>(bnd.to_scalar()) , f , config );
} else {
parallel_for( "Unlabeled" , Bounds<1,false>(bnd) , f , config );
}
}
template <class F, int VecLen=YAKL_DEFAULT_VECTOR_LEN, bool B4B=false>
inline void parallel_for( char const * str , LBnd bnd , F const &f ,
LaunchConfig<VecLen,B4B> config = LaunchConfig<>() ) {
if (bnd.l == bnd.default_lbound && bnd.s == 1) {
parallel_for( str , Bounds<1,true>(bnd.to_scalar()) , f , config );
} else {
parallel_for( str , Bounds<1,false>(bnd) , f , config );
}
}
////////////////////////////////////////////////////////////////////////////////////
// parallel_outer
////////////////////////////////////////////////////////////////////////////////////
template <class F, int N, bool simple, int VecLen=YAKL_DEFAULT_VECTOR_LEN, bool B4B = false>
inline void parallel_outer( char const * str , Bounds<N,simple> const &bounds , F const &f ,
LaunchConfig<VecLen,B4B> config = LaunchConfig<>() ) {
// Automatically time (if requested) and add nvtx ranges for easier nvprof / nsight profiling
#ifdef YAKL_AUTO_PROFILE
timer_start(str);
#endif
#ifdef YAKL_ARCH_CUDA
nvtxRangePushA(str);
#endif
#ifdef YAKL_ARCH_HIP
roctxRangePushA(str);
#endif
bool do_b4b = false;
#ifdef YAKL_B4B
if constexpr (B4B) do_b4b = true;
#endif
if (do_b4b) {
fence();
// The if-state ment is redundant, but it shields cpu_serial from compilation if b4b isn't desired
// For instance, if the lambda is YAKL_DEVICE_LAMBDA, compiling this line will give an error because
// it isn't available on the host.
#ifdef YAKL_B4B
if constexpr (B4B) parallel_for_cpu_serial( bounds , f , true , DoOuter<true>() );
#endif
} else {
#ifdef YAKL_ARCH_CUDA
parallel_outer_cuda( bounds , f , config );
#elif defined(YAKL_ARCH_HIP)
parallel_outer_hip ( bounds , f , config );
#elif defined(YAKL_ARCH_SYCL)
parallel_outer_sycl( bounds , f , config );
#else
parallel_for_cpu_serial( bounds , f , true , DoOuter<true>() );
#endif
}
#if defined(YAKL_AUTO_FENCE)
fence();
#endif
#ifdef YAKL_ARCH_HIP
roctxRangePop();
#endif
#ifdef YAKL_ARCH_CUDA
nvtxRangePop();
#endif
#ifdef YAKL_AUTO_PROFILE
timer_stop(str);
#endif
}
template <class F, int N, bool simple, int VecLen=YAKL_DEFAULT_VECTOR_LEN, bool B4B=false>
inline void parallel_outer( Bounds<N,simple> const &bounds , F const &f ,
LaunchConfig<VecLen,B4B> config = LaunchConfig<>() ) {
parallel_outer( "Unlabeled" , bounds , f , config );
}
template <class F, int VecLen=YAKL_DEFAULT_VECTOR_LEN, bool B4B=false>
inline void parallel_outer( LBnd bnd , F const &f ,
LaunchConfig<VecLen,B4B> config = LaunchConfig<>() ) {
if (bnd.l == bnd.default_lbound && bnd.s == 1) {
parallel_outer( "Unlabeled" , Bounds<1,true>(bnd.to_scalar()) , f , config );
} else {
parallel_outer( "Unlabeled" , Bounds<1,false>(bnd) , f , config );
}
}
template <class F, int VecLen=YAKL_DEFAULT_VECTOR_LEN, bool B4B=false>
inline void parallel_outer( char const * str , LBnd bnd , F const &f ,
LaunchConfig<VecLen,B4B> config = LaunchConfig<>() ) {
if (bnd.l == bnd.default_lbound && bnd.s == 1) {
parallel_outer( str , Bounds<1,true>(bnd.to_scalar()) , f , config );
} else {
parallel_outer( str , Bounds<1,false>(bnd) , f , config );
}
}
////////////////////////////////////////////////////////////////////////////////////
// parallel_inner
////////////////////////////////////////////////////////////////////////////////////
template <class F, int N, bool simple>
YAKL_INLINE void parallel_inner( Bounds<N,simple> const &bounds , F const &f , InnerHandler handler ) {
#if YAKL_CURRENTLY_ON_HOST()
parallel_for_cpu_serial( bounds , f , false );
#else
#ifdef YAKL_ARCH_CUDA
parallel_inner_cuda( bounds , f );
#elif defined(YAKL_ARCH_HIP)
parallel_inner_hip ( bounds , f );
#elif defined(YAKL_ARCH_SYCL)
parallel_inner_sycl( bounds , f , handler );
#else
parallel_for_cpu_serial( bounds , f , false );
#endif
#endif
#ifdef YAKL_AUTO_FENCE
fence_inner(handler);
#endif
}
template <class F>
YAKL_INLINE void parallel_inner( LBnd bnd , F const &f , InnerHandler handler ) {
if (bnd.l == bnd.default_lbound && bnd.s == 1) {
parallel_inner( Bounds<1,true>(bnd.to_scalar()) , f , handler );
} else {
parallel_inner( Bounds<1,false>(bnd) , f , handler );
}
}
template <class F>
YAKL_INLINE void single_inner( F const &f , InnerHandler handler ) {
#if YAKL_CURRENTLY_ON_HOST()
f();
#else
#ifdef YAKL_ARCH_CUDA
single_inner_cuda( f );
#elif defined(YAKL_ARCH_HIP)
single_inner_hip ( f );
#elif defined(YAKL_ARCH_SYCL)
single_inner_sycl( f , handler );
#else
f();
#endif
#endif
#ifdef YAKL_AUTO_FENCE
fence_inner(handler);
#endif
}
|
is.c | /*
Copyright (C) 1991-2018 Free Software Foundation, Inc.
This file is part of the GNU C Library.
The GNU C Library is free software; you can redistribute it andor
modify it under the terms of the GNU Lesser General Public
License as published by the Free Software Foundation; either
version 2.1 of the License, or (at your option) any later version.
The GNU C Library is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
Lesser General Public License for more details.
You should have received a copy of the GNU Lesser General Public
License along with the GNU C Library; if not, see
<http:www.gnu.org/licenses/>.
*/
/*
This header is separate from features.h so that the compiler can
include it implicitly at the start of every compilation. It must
not itself include <features.h> or any other header that includes
<features.h> because the implicit include comes before any feature
test macros that may be defined in a source file before it first
explicitly includes a system header. GCC knows the name of this
header in order to preinclude it.
*/
/*
glibc's intent is to support the IEC 559 math functionality, real
and complex. If the GCC (4.9 and later) predefined macros
specifying compiler intent are available, use them to determine
whether the overall intent is to support these features; otherwise,
presume an older compiler has intent to support these features and
define these macros by default.
*/
/*
wchar_t uses Unicode 10.0.0. Version 10.0 of the Unicode Standard is
synchronized with ISOIEC 10646:2017, fifth edition, plus
the following additions from Amendment 1 to the fifth edition:
- 56 emoji characters
- 285 hentaigana
- 3 additional Zanabazar Square characters
*/
/*
--------------------------------------------------------------------
NAS Parallel Benchmarks 2.3 OpenMP C versions - IS
This benchmark is an OpenMP C version of the NPB IS code.
The OpenMP C versions are developed by RWCP and derived from the serial
Fortran versions in "NPB 2.3-serial" developed by NAS.
Permission to use, copy, distribute and modify this software for any
purpose with or without fee is hereby granted.
This software is provided "as is" without express or implied warranty.
Send comments on the OpenMP C versions to pdp-openmp@rwcp.or.jp
Information on OpenMP activities at RWCP is available at:
http:pdplab.trc.rwcp.or.jppdperf/Omni/
Information on NAS Parallel Benchmarks 2.3 is available at:
http:www.nas.nasa.gov/NAS/NPB/
--------------------------------------------------------------------
*/
/*
--------------------------------------------------------------------
Author: M. Yarrow
OpenMP C version: S. Satoh
--------------------------------------------------------------------
*/
#include "npbparams.h"
#include <stdlib.h>
#include <stdio.h>
/* */
/* For serial IS, buckets are not really req'd to solve NPB1 IS */
/* spec, but their use on some machines improves performance, on */
/* other machines the use of buckets compromises performance, */
/* probably because it is extra computation which is not req'd. */
/* (Note: Mechanism not understood, probably cache related) */
/* Example: SP2-66MhzWN: 50% speedup with buckets */
/* Example: SGI Indy5000: 50% slowdown with buckets */
/* Example: SGI O2000: 400% slowdown with buckets (Wow!) */
/* */
/* #define USE_BUCKETS */
/* buckets are not used in the OpenMP C version */
/* */
/* default values */
/* */
/* */
/* CLASS S */
/* */
/* */
/* CLASS W */
/* */
/* */
/* CLASS A */
/* */
/* */
/* CLASS B */
/* */
/* */
/* CLASS C */
/* */
/* */
/* Typedef: if necessary, change the */
/* size of int here by changing the */
/* int type to, say, long */
/* */
typedef int INT_TYPE;
/* */
/* Some global info */
/* */
INT_TYPE * key_buff_ptr_global;
/* used by full_verify to get */
/* copies of rank info */
int passed_verification;
/* */
/* These are the three main arrays. */
/* See SIZE_OF_BUFFERS def above */
/* */
INT_TYPE key_array[(1<<23)], key_buff1[(1<<23)], key_buff2[(1<<23)], partial_verify_vals[5];
/* */
/* Partial verif info */
/* */
INT_TYPE test_index_array[5], test_rank_array[5], S_test_index_array[5] = {48427, 17148, 23627, 62548, 4431}, S_test_rank_array[5] = {0, 18, 346, 64917, 65463}, W_test_index_array[5] = {357773, 934767, 875723, 898999, 404505}, W_test_rank_array[5] = {1249, 11698, 1039987, 1043896, 1048018}, A_test_index_array[5] = {2112377, 662041, 5336171, 3642833, 4250760}, A_test_rank_array[5] = {104, 17523, 123928, 8288932, 8388264}, B_test_index_array[5] = {41869, 812306, 5102857, 18232239, 26860214}, B_test_rank_array[5] = {33422937, 10244, 59149, 33135281, 99}, C_test_index_array[5] = {44172927, 72999161, 74326391, 129606274, 21736814}, C_test_rank_array[5] = {61147, 882988, 266290, 133997595, 133525895};
/* */
/* function prototypes */
/* */
double randlc(double * X, double * A);
void full_verify(void );
/*
FUNCTION RANDLC (X, A)
*
* This routine returns a uniform pseudorandom double precision number in the
* range (0, 1) by using the linear congruential generator
*
* x_{k+1} = a x_k (mod 2^46)
*
* where 0 < x_k < 2^46 and 0 < a < 2^46. This scheme generates 2^44 numbers
* before repeating. The argument A is the same as 'a' in the above formula,
* and X is the same as x_0. A and X must be odd double precision integers
* in the range (1, 2^46). The returned value RANDLC is normalized to be
* between 0 and 1, i.e. RANDLC = 2^(-46) * x_1. X is updated to contain
* the new seed x_1, so that subsequent calls to RANDLC using the same
* arguments will generate a continuous sequence.
*
* This routine should produce the same results on any computer with at least
* 48 mantissa bits in double precision floating point data. On Cray systems,
* double precision should be disabled.
*
* David H. Bailey October 26, 1990
*
* IMPLICIT DOUBLE PRECISION (A-H, O-Z)
* SAVE KS, R23, R46, T23, T46
* DATA KS/0/
*
* If this is the first call to RANDLC, compute R23 = 2 ^ -23, R46 = 2 ^ -46,
* T23 = 2 ^ 23, and T46 = 2 ^ 46. These are computed in loops, rather than
* by merely using the ** operator, in order to insure that the results are
* exact on all systems. This code assumes that 0.5D0 is represented exactly.
*/
/* */
/* R A N D L C */
/* */
/* portable random number generator */
/* */
double randlc(double * X, double * A)
{
static int KS = 0;
static double R23, R46, T23, T46;
double T1, T2, T3, T4;
double A1;
double A2;
double X1;
double X2;
double Z;
int i, j;
double _ret_val_0;
if (KS==0)
{
R23=1.0;
R46=1.0;
T23=1.0;
T46=1.0;
#pragma loop name randlc#0
#pragma cetus reduction(*: R23, T23)
#pragma cetus parallel
#pragma omp parallel for reduction(*: R23, T23)
for (i=1; i<=23; i ++ )
{
R23=(0.5*R23);
T23=(2.0*T23);
}
#pragma loop name randlc#1
#pragma cetus reduction(*: R46, T46)
#pragma cetus parallel
#pragma omp parallel for reduction(*: R46, T46)
for (i=1; i<=46; i ++ )
{
R46=(0.5*R46);
T46=(2.0*T46);
}
KS=1;
}
/* Break A into two parts such that A = 2^23 A1 + A2 and set X = N. */
T1=(R23*( * A));
j=T1;
A1=j;
A2=(( * A)-(T23*A1));
/*
Break X into two parts such that X = 2^23 X1 + X2, compute
Z = A1 * X2 + A2 * X1 (mod 2^23), and then
X = 2^23 * Z + A2 * X2 (mod 2^46).
*/
T1=(R23*( * X));
j=T1;
X1=j;
X2=(( * X)-(T23*X1));
T1=((A1*X2)+(A2*X1));
j=(R23*T1);
T2=j;
Z=(T1-(T23*T2));
T3=((T23*Z)+(A2*X2));
j=(R46*T3);
T4=j;
( * X)=(T3-(T46*T4));
_ret_val_0=(R46*( * X));
return _ret_val_0;
}
/* */
/* C R E A T E _ S E Q */
/* */
void create_seq(double seed, double a)
{
double x;
int i, j, k;
k=((1<<19)/4);
#pragma loop name create_seq#0
for (i=0; i<(1<<23); i ++ )
{
x=randlc( & seed, & a);
x+=randlc( & seed, & a);
x+=randlc( & seed, & a);
x+=randlc( & seed, & a);
key_array[i]=(k*x);
}
return ;
}
/* */
/* F U L L _ V E R I F Y */
/* */
void full_verify()
{
INT_TYPE i, j;
INT_TYPE k;
INT_TYPE m, unique_keys;
/* Now, finally, sort the keys: */
#pragma loop name full_verify#0
for (i=0; i<(1<<23); i ++ )
{
key_array[ -- key_buff_ptr_global[key_buff2[i]]]=key_buff2[i];
}
/* Confirm keys correctly sorted: count incorrectly sorted keys, if any */
j=0;
#pragma loop name full_verify#1
#pragma cetus reduction(+: j)
#pragma cetus parallel
#pragma omp parallel for reduction(+: j)
for (i=1; i<(1<<23); i ++ )
{
if (key_array[i-1]>key_array[i])
{
j ++ ;
}
}
if (j!=0)
{
printf("Full_verify: number of keys out of sort: %d\n", j);
}
else
{
passed_verification ++ ;
}
return ;
}
/* */
/* R A N K */
/* */
void rank(int iteration)
{
INT_TYPE i, j, k;
INT_TYPE l, m;
INT_TYPE shift = 19-10;
INT_TYPE key;
INT_TYPE min_key_val, max_key_val;
INT_TYPE prv_buff1[(1<<19)];
key_array[iteration]=iteration;
key_array[iteration+10]=((1<<19)-iteration);
/* Determine where the partial verify test keys are, load into */
/* top of array bucket_size */
#pragma loop name rank#0
for (i=0; i<5; i ++ )
{
partial_verify_vals[i]=key_array[test_index_array[i]];
}
/* Clear the work array */
#pragma loop name rank#1
#pragma cetus parallel
#pragma omp parallel for
for (i=0; i<(1<<19); i ++ )
{
key_buff1[i]=0;
}
#pragma loop name rank#2
#pragma cetus parallel
#pragma omp parallel for
for (i=0; i<(1<<19); i ++ )
{
prv_buff1[i]=0;
}
/* Copy keys into work array; keys in key_array will be reused each iter. */
#pragma loop name rank#3
#pragma cetus reduction(+: prv_buff1[key_buff2[i]])
for (i=0; i<(1<<23); i ++ )
{
key_buff2[i]=key_array[i];
/* Ranking of all keys occurs in this section: */
/*
In this section, the keys themselves are used as their
own indexes to determine how many of each there are: their
individual population
*/
prv_buff1[key_buff2[i]] ++ ;
/* Now they have individual key */
}
/* population */
#pragma loop name rank#4
for (i=0; i<((1<<19)-1); i ++ )
{
prv_buff1[i+1]+=prv_buff1[i];
}
#pragma loop name rank#5
for (i=0; i<(1<<19); i ++ )
{
key_buff1[i]+=prv_buff1[i];
}
/*
To obtain ranks of each key, successively add the individual key
population, not forgetting to add m, the total of lesser keys,
to the first key population
*/
/* This is the partial verify test section */
/* Observe that test_rank_array vals are */
/* shifted differently for different cases */
#pragma loop name rank#6
#pragma cetus reduction(+: passed_verification)
for (i=0; i<5; i ++ )
{
k=partial_verify_vals[i];
/* test vals were put here */
if ((0<=k)&&(k<=((1<<23)-1)))
{
switch ('A')
{
case 'S':
if (i<=2)
{
if (key_buff1[k-1]!=(test_rank_array[i]+iteration))
{
printf("Failed partial verification: ""iteration %d, test key %d\n", iteration, i);
}
else
{
passed_verification ++ ;
}
}
else
{
if (key_buff1[k-1]!=(test_rank_array[i]-iteration))
{
printf("Failed partial verification: ""iteration %d, test key %d\n", iteration, i);
}
else
{
passed_verification ++ ;
}
}
break;
case 'W':
if (i<2)
{
if (key_buff1[k-1]!=(test_rank_array[i]+(iteration-2)))
{
printf("Failed partial verification: ""iteration %d, test key %d\n", iteration, i);
}
else
{
passed_verification ++ ;
}
}
else
{
if (key_buff1[k-1]!=(test_rank_array[i]-iteration))
{
printf("Failed partial verification: ""iteration %d, test key %d\n", iteration, i);
}
else
{
passed_verification ++ ;
}
}
break;
case 'A':
if (i<=2)
{
if (key_buff1[k-1]!=(test_rank_array[i]+(iteration-1)))
{
printf("Failed partial verification: ""iteration %d, test key %d\n", iteration, i);
}
else
{
passed_verification ++ ;
}
}
else
{
if (key_buff1[k-1]!=(test_rank_array[i]-(iteration-1)))
{
printf("Failed partial verification: ""iteration %d, test key %d\n", iteration, i);
}
else
{
passed_verification ++ ;
}
}
break;
case 'B':
if (((i==1)||(i==2))||(i==4))
{
if (key_buff1[k-1]!=(test_rank_array[i]+iteration))
{
printf("Failed partial verification: ""iteration %d, test key %d\n", iteration, i);
}
else
{
passed_verification ++ ;
}
}
else
{
if (key_buff1[k-1]!=(test_rank_array[i]-iteration))
{
printf("Failed partial verification: ""iteration %d, test key %d\n", iteration, i);
}
else
{
passed_verification ++ ;
}
}
break;
case 'C':
if (i<=2)
{
if (key_buff1[k-1]!=(test_rank_array[i]+iteration))
{
printf("Failed partial verification: ""iteration %d, test key %d\n", iteration, i);
}
else
{
passed_verification ++ ;
}
}
else
{
if (key_buff1[k-1]!=(test_rank_array[i]-iteration))
{
printf("Failed partial verification: ""iteration %d, test key %d\n", iteration, i);
}
else
{
passed_verification ++ ;
}
}
break;
}
}
}
/*
Make copies of rank info for use by full_verify: these variables
in rank are local; making them global slows down the code, probably
since they cannot be made register by compiler
*/
if (iteration==10)
{
key_buff_ptr_global=key_buff1;
}
/* end master */
return ;
}
/* */
/* M A I N */
/* */
main(int argc, char * * argv)
{
int i, iteration, itemp;
int nthreads = 1;
double timecounter, maxtime;
/* Initialize the verification arrays if a valid class */
int _ret_val_0;
#pragma loop name main#0
for (i=0; i<5; i ++ )
{
switch ('A')
{
case 'S':
test_index_array[i]=S_test_index_array[i];
test_rank_array[i]=S_test_rank_array[i];
break;
case 'A':
test_index_array[i]=A_test_index_array[i];
test_rank_array[i]=A_test_rank_array[i];
break;
case 'W':
test_index_array[i]=W_test_index_array[i];
test_rank_array[i]=W_test_rank_array[i];
break;
case 'B':
test_index_array[i]=B_test_index_array[i];
test_rank_array[i]=B_test_rank_array[i];
break;
case 'C':
test_index_array[i]=C_test_index_array[i];
test_rank_array[i]=C_test_rank_array[i];
break;
}
}
;
/* Printout initial NPB info */
printf("\n\n NAS Parallel Benchmarks 2.3 OpenMP C version"" - IS Benchmark\n\n");
printf(" Size: %d (class %c)\n", 1<<23, 'A');
printf(" Iterations: %d\n", 10);
/* Initialize timer */
timer_clear(0);
/* Generate random number sequence and subsequent keys on all procs */
/* Random number gen seed */
create_seq(3.14159265E8, 1.220703125E9);
/* Random number gen mult */
/*
Do one interation for free (i.e., untimed) to guarantee initialization of
all data and code pages and respective tables
*/
rank(1);
/* Start verification counter */
passed_verification=0;
printf("\n iteration\n");
/* Start timer */
timer_start(0);
/* This is the main iteration */
#pragma loop name main#1
for (iteration=1; iteration<=10; iteration ++ )
{
printf(" %d\n", iteration);
rank(iteration);
}
/* End of timing, obtain maximum time of all processors */
timer_stop(0);
timecounter=timer_read(0);
/*
This tests that keys are in sequence: sorting of last ranked key seq
occurs here, but is an untimed operation
*/
full_verify();
/* The final printout */
if (passed_verification!=((5*10)+1))
{
passed_verification=0;
}
c_print_results("IS", 'A', 1<<23, 0, 0, 10, nthreads, timecounter, (((double)(10*(1<<23)))/timecounter)/1000000.0, "keys ranked", passed_verification, "3.0 structured", "01 Dec 2019", "(none)", "(none)", "-lm", "(none)", "(none)", "(none)", "randlc");
/* */
return _ret_val_0;
}
|
updater_basemaker-inl.h | /*!
* Copyright 2014 by Contributors
* \file updater_basemaker-inl.h
* \brief implement a common tree constructor
* \author Tianqi Chen
*/
#ifndef XGBOOST_TREE_UPDATER_BASEMAKER_INL_H_
#define XGBOOST_TREE_UPDATER_BASEMAKER_INL_H_
#include <rabit/rabit.h>
#include <vector>
#include <algorithm>
#include <string>
#include <limits>
#include <utility>
#include "xgboost/base.h"
#include "xgboost/tree_updater.h"
#include "param.h"
#include "constraints.h"
#include "../common/io.h"
#include "../common/random.h"
#include "../common/quantile.h"
namespace xgboost {
namespace tree {
/*!
* \brief base tree maker class that defines common operation
* needed in tree making
*/
class BaseMaker: public TreeUpdater {
public:
void Configure(const Args& args) override {
param_.UpdateAllowUnknown(args);
}
protected:
// helper to collect and query feature meta information
struct FMetaHelper {
public:
/*! \brief find type of each feature, use column format */
inline void InitByCol(DMatrix* p_fmat,
const RegTree& tree) {
fminmax_.resize(tree.param.num_feature * 2);
std::fill(fminmax_.begin(), fminmax_.end(),
-std::numeric_limits<bst_float>::max());
// start accumulating statistics
for (const auto &batch : p_fmat->GetBatches<SortedCSCPage>()) {
for (bst_uint fid = 0; fid < batch.Size(); ++fid) {
auto c = batch[fid];
if (c.size() != 0) {
CHECK_LT(fid * 2, fminmax_.size());
fminmax_[fid * 2 + 0] =
std::max(-c[0].fvalue, fminmax_[fid * 2 + 0]);
fminmax_[fid * 2 + 1] =
std::max(c[c.size() - 1].fvalue, fminmax_[fid * 2 + 1]);
}
}
}
}
/*! \brief synchronize the information */
inline void SyncInfo() {
rabit::Allreduce<rabit::op::Max>(dmlc::BeginPtr(fminmax_), fminmax_.size());
}
// get feature type, 0:empty 1:binary 2:real
inline int Type(bst_uint fid) const {
CHECK_LT(fid * 2 + 1, fminmax_.size())
<< "FeatHelper fid exceed query bound ";
bst_float a = fminmax_[fid * 2];
bst_float b = fminmax_[fid * 2 + 1];
if (a == -std::numeric_limits<bst_float>::max()) return 0;
if (-a == b) {
return 1;
} else {
return 2;
}
}
bst_float MaxValue(bst_uint fid) const {
return fminmax_[fid *2 + 1];
}
void SampleCol(float p, std::vector<bst_feature_t> *p_findex) const {
std::vector<bst_feature_t> &findex = *p_findex;
findex.clear();
for (size_t i = 0; i < fminmax_.size(); i += 2) {
const auto fid = static_cast<bst_uint>(i / 2);
if (this->Type(fid) != 0) findex.push_back(fid);
}
auto n = static_cast<unsigned>(p * findex.size());
std::shuffle(findex.begin(), findex.end(), common::GlobalRandom());
findex.resize(n);
// sync the findex if it is subsample
std::string s_cache;
common::MemoryBufferStream fc(&s_cache);
dmlc::Stream& fs = fc;
if (rabit::GetRank() == 0) {
fs.Write(findex);
}
rabit::Broadcast(&s_cache, 0);
fs.Read(&findex);
}
private:
std::vector<bst_float> fminmax_;
};
// ------static helper functions ------
// helper function to get to next level of the tree
/*! \brief this is helper function for row based data*/
inline static int NextLevel(const SparsePage::Inst &inst, const RegTree &tree, int nid) {
const RegTree::Node &n = tree[nid];
bst_uint findex = n.SplitIndex();
for (const auto& ins : inst) {
if (findex == ins.index) {
if (ins.fvalue < n.SplitCond()) {
return n.LeftChild();
} else {
return n.RightChild();
}
}
}
return n.DefaultChild();
}
// ------class member helpers---------
/*! \brief initialize temp data structure */
inline void InitData(const std::vector<GradientPair> &gpair,
const DMatrix &fmat,
const RegTree &tree) {
CHECK_EQ(tree.param.num_nodes, tree.param.num_roots)
<< "TreeMaker: can only grow new tree";
const std::vector<unsigned> &root_index = fmat.Info().root_index_;
{
// setup position
position_.resize(gpair.size());
if (root_index.size() == 0) {
std::fill(position_.begin(), position_.end(), 0);
} else {
for (size_t i = 0; i < position_.size(); ++i) {
position_[i] = root_index[i];
CHECK_LT(root_index[i], (unsigned)tree.param.num_roots)
<< "root index exceed setting";
}
}
// mark delete for the deleted datas
for (size_t i = 0; i < position_.size(); ++i) {
if (gpair[i].GetHess() < 0.0f) position_[i] = ~position_[i];
}
// mark subsample
if (param_.subsample < 1.0f) {
std::bernoulli_distribution coin_flip(param_.subsample);
auto& rnd = common::GlobalRandom();
for (size_t i = 0; i < position_.size(); ++i) {
if (gpair[i].GetHess() < 0.0f) continue;
if (!coin_flip(rnd)) position_[i] = ~position_[i];
}
}
}
{
// expand query
qexpand_.reserve(256); qexpand_.clear();
for (int i = 0; i < tree.param.num_roots; ++i) {
qexpand_.push_back(i);
}
this->UpdateNode2WorkIndex(tree);
}
this->interaction_constraints_.Configure(param_, fmat.Info().num_col_);
}
/*! \brief update queue expand add in new leaves */
inline void UpdateQueueExpand(const RegTree &tree) {
std::vector<int> newnodes;
for (int nid : qexpand_) {
if (!tree[nid].IsLeaf()) {
newnodes.push_back(tree[nid].LeftChild());
newnodes.push_back(tree[nid].RightChild());
}
}
// use new nodes for qexpand
qexpand_ = newnodes;
this->UpdateNode2WorkIndex(tree);
}
// return decoded position
inline int DecodePosition(bst_uint ridx) const {
const int pid = position_[ridx];
return pid < 0 ? ~pid : pid;
}
// encode the encoded position value for ridx
inline void SetEncodePosition(bst_uint ridx, int nid) {
if (position_[ridx] < 0) {
position_[ridx] = ~nid;
} else {
position_[ridx] = nid;
}
}
/*!
* \brief this is helper function uses column based data structure,
* reset the positions to the lastest one
* \param nodes the set of nodes that contains the split to be used
* \param p_fmat feature matrix needed for tree construction
* \param tree the regression tree structure
*/
inline void ResetPositionCol(const std::vector<int> &nodes,
DMatrix *p_fmat,
const RegTree &tree) {
// set the positions in the nondefault
this->SetNonDefaultPositionCol(nodes, p_fmat, tree);
this->SetDefaultPostion(p_fmat, tree);
}
/*!
* \brief helper function to set the non-leaf positions to default direction.
* This function can be applied multiple times and will get the same result.
* \param p_fmat feature matrix needed for tree construction
* \param tree the regression tree structure
*/
inline void SetDefaultPostion(DMatrix *p_fmat,
const RegTree &tree) {
// set default direct nodes to default
// for leaf nodes that are not fresh, mark then to ~nid,
// so that they are ignored in future statistics collection
const auto ndata = static_cast<bst_omp_uint>(p_fmat->Info().num_row_);
#pragma omp parallel for schedule(static)
for (bst_omp_uint ridx = 0; ridx < ndata; ++ridx) {
const int nid = this->DecodePosition(ridx);
if (tree[nid].IsLeaf()) {
// mark finish when it is not a fresh leaf
if (tree[nid].RightChild() == -1) {
position_[ridx] = ~nid;
}
} else {
// push to default branch
if (tree[nid].DefaultLeft()) {
this->SetEncodePosition(ridx, tree[nid].LeftChild());
} else {
this->SetEncodePosition(ridx, tree[nid].RightChild());
}
}
}
}
/*!
* \brief this is helper function uses column based data structure,
* to CORRECT the positions of non-default directions that WAS set to default
* before calling this function.
* \param batch The column batch
* \param sorted_split_set The set of index that contains split solutions.
* \param tree the regression tree structure
*/
inline void CorrectNonDefaultPositionByBatch(
const SparsePage &batch, const std::vector<bst_uint> &sorted_split_set,
const RegTree &tree) {
for (size_t fid = 0; fid < batch.Size(); ++fid) {
auto col = batch[fid];
auto it = std::lower_bound(sorted_split_set.begin(), sorted_split_set.end(), fid);
if (it != sorted_split_set.end() && *it == fid) {
const auto ndata = static_cast<bst_omp_uint>(col.size());
#pragma omp parallel for schedule(static)
for (bst_omp_uint j = 0; j < ndata; ++j) {
const bst_uint ridx = col[j].index;
const bst_float fvalue = col[j].fvalue;
const int nid = this->DecodePosition(ridx);
CHECK(tree[nid].IsLeaf());
int pid = tree[nid].Parent();
// go back to parent, correct those who are not default
if (!tree[nid].IsRoot() && tree[pid].SplitIndex() == fid) {
if (fvalue < tree[pid].SplitCond()) {
this->SetEncodePosition(ridx, tree[pid].LeftChild());
} else {
this->SetEncodePosition(ridx, tree[pid].RightChild());
}
}
}
}
}
}
/*!
* \brief this is helper function uses column based data structure,
* \param nodes the set of nodes that contains the split to be used
* \param tree the regression tree structure
* \param out_split_set The split index set
*/
inline void GetSplitSet(const std::vector<int> &nodes,
const RegTree &tree,
std::vector<unsigned>* out_split_set) {
std::vector<unsigned>& fsplits = *out_split_set;
fsplits.clear();
// step 1, classify the non-default data into right places
for (int nid : nodes) {
if (!tree[nid].IsLeaf()) {
fsplits.push_back(tree[nid].SplitIndex());
}
}
std::sort(fsplits.begin(), fsplits.end());
fsplits.resize(std::unique(fsplits.begin(), fsplits.end()) - fsplits.begin());
}
/*!
* \brief this is helper function uses column based data structure,
* update all positions into nondefault branch, if any, ignore the default branch
* \param nodes the set of nodes that contains the split to be used
* \param p_fmat feature matrix needed for tree construction
* \param tree the regression tree structure
*/
virtual void SetNonDefaultPositionCol(const std::vector<int> &nodes,
DMatrix *p_fmat,
const RegTree &tree) {
std::vector<unsigned> fsplits;
this->GetSplitSet(nodes, tree, &fsplits);
for (const auto &batch : p_fmat->GetBatches<SortedCSCPage>()) {
for (auto fid : fsplits) {
auto col = batch[fid];
const auto ndata = static_cast<bst_omp_uint>(col.size());
#pragma omp parallel for schedule(static)
for (bst_omp_uint j = 0; j < ndata; ++j) {
const bst_uint ridx = col[j].index;
const bst_float fvalue = col[j].fvalue;
const int nid = this->DecodePosition(ridx);
// go back to parent, correct those who are not default
if (!tree[nid].IsLeaf() && tree[nid].SplitIndex() == fid) {
if (fvalue < tree[nid].SplitCond()) {
this->SetEncodePosition(ridx, tree[nid].LeftChild());
} else {
this->SetEncodePosition(ridx, tree[nid].RightChild());
}
}
}
}
}
}
/*! \brief helper function to get statistics from a tree */
template<typename TStats>
inline void GetNodeStats(const std::vector<GradientPair> &gpair,
const DMatrix &fmat,
const RegTree &tree,
std::vector< std::vector<TStats> > *p_thread_temp,
std::vector<TStats> *p_node_stats) {
std::vector< std::vector<TStats> > &thread_temp = *p_thread_temp;
thread_temp.resize(omp_get_max_threads());
p_node_stats->resize(tree.param.num_nodes);
#pragma omp parallel
{
const int tid = omp_get_thread_num();
thread_temp[tid].resize(tree.param.num_nodes, TStats());
for (unsigned int nid : qexpand_) {
thread_temp[tid][nid] = TStats();
}
}
// setup position
const auto ndata = static_cast<bst_omp_uint>(fmat.Info().num_row_);
#pragma omp parallel for schedule(static)
for (bst_omp_uint ridx = 0; ridx < ndata; ++ridx) {
const int nid = position_[ridx];
const int tid = omp_get_thread_num();
if (nid >= 0) {
thread_temp[tid][nid].Add(gpair[ridx]);
}
}
// sum the per thread statistics together
for (int nid : qexpand_) {
TStats &s = (*p_node_stats)[nid];
s = TStats();
for (size_t tid = 0; tid < thread_temp.size(); ++tid) {
s.Add(thread_temp[tid][nid]);
}
}
}
/*! \brief common helper data structure to build sketch */
struct SketchEntry {
/*! \brief total sum of amount to be met */
double sum_total;
/*! \brief statistics used in the sketch */
double rmin, wmin;
/*! \brief last seen feature value */
bst_float last_fvalue;
/*! \brief current size of sketch */
double next_goal;
// pointer to the sketch to put things in
common::WXQuantileSketch<bst_float, bst_float> *sketch;
// initialize the space
inline void Init(unsigned max_size) {
next_goal = -1.0f;
rmin = wmin = 0.0f;
sketch->temp.Reserve(max_size + 1);
sketch->temp.size = 0;
}
/*!
* \brief push a new element to sketch
* \param fvalue feature value, comes in sorted ascending order
* \param w weight
* \param max_size
*/
inline void Push(bst_float fvalue, bst_float w, unsigned max_size) {
if (next_goal == -1.0f) {
next_goal = 0.0f;
last_fvalue = fvalue;
wmin = w;
return;
}
if (last_fvalue != fvalue) {
double rmax = rmin + wmin;
if (rmax >= next_goal && sketch->temp.size != max_size) {
if (sketch->temp.size == 0 ||
last_fvalue > sketch->temp.data[sketch->temp.size-1].value) {
// push to sketch
sketch->temp.data[sketch->temp.size] =
common::WXQuantileSketch<bst_float, bst_float>::
Entry(static_cast<bst_float>(rmin),
static_cast<bst_float>(rmax),
static_cast<bst_float>(wmin), last_fvalue);
CHECK_LT(sketch->temp.size, max_size)
<< "invalid maximum size max_size=" << max_size
<< ", stemp.size" << sketch->temp.size;
++sketch->temp.size;
}
if (sketch->temp.size == max_size) {
next_goal = sum_total * 2.0f + 1e-5f;
} else {
next_goal = static_cast<bst_float>(sketch->temp.size * sum_total / max_size);
}
} else {
if (rmax >= next_goal) {
LOG(TRACKER) << "INFO: rmax=" << rmax
<< ", sum_total=" << sum_total
<< ", naxt_goal=" << next_goal
<< ", size=" << sketch->temp.size;
}
}
rmin = rmax;
wmin = w;
last_fvalue = fvalue;
} else {
wmin += w;
}
}
/*! \brief push final unfinished value to the sketch */
inline void Finalize(unsigned max_size) {
double rmax = rmin + wmin;
if (sketch->temp.size == 0 || last_fvalue > sketch->temp.data[sketch->temp.size-1].value) {
CHECK_LE(sketch->temp.size, max_size)
<< "Finalize: invalid maximum size, max_size=" << max_size
<< ", stemp.size=" << sketch->temp.size;
// push to sketch
sketch->temp.data[sketch->temp.size] =
common::WXQuantileSketch<bst_float, bst_float>::
Entry(static_cast<bst_float>(rmin),
static_cast<bst_float>(rmax),
static_cast<bst_float>(wmin), last_fvalue);
++sketch->temp.size;
}
sketch->PushTemp();
}
};
/*! \brief training parameter of tree grower */
TrainParam param_;
/*! \brief queue of nodes to be expanded */
std::vector<int> qexpand_;
/*!
* \brief map active node to is working index offset in qexpand,
* can be -1, which means the node is node actively expanding
*/
std::vector<int> node2workindex_;
/*!
* \brief position of each instance in the tree
* can be negative, which means this position is no longer expanding
* see also Decode/EncodePosition
*/
std::vector<int> position_;
FeatureInteractionConstraintHost interaction_constraints_;
private:
inline void UpdateNode2WorkIndex(const RegTree &tree) {
// update the node2workindex
std::fill(node2workindex_.begin(), node2workindex_.end(), -1);
node2workindex_.resize(tree.param.num_nodes);
for (size_t i = 0; i < qexpand_.size(); ++i) {
node2workindex_[qexpand_[i]] = static_cast<int>(i);
}
}
};
} // namespace tree
} // namespace xgboost
#endif // XGBOOST_TREE_UPDATER_BASEMAKER_INL_H_
|
loop-5.c | #include <omp.h>
#include <stdlib.h>
#include <string.h>
int
test1 (void)
{
short int buf[64], *p;
int i;
memset (buf, '\0', sizeof (buf));
#pragma omp parallel for
for (p = &buf[10]; p < &buf[54]; p++)
*p = 5;
for (i = 0; i < 64; i++)
if (buf[i] != 5 * (i >= 10 && i < 54))
abort ();
memset (buf, '\0', sizeof (buf));
#pragma omp parallel for
for (p = &buf[3]; p <= &buf[63]; p += 2)
p[-2] = 6;
for (i = 0; i < 64; i++)
if (buf[i] != 6 * ((i & 1) && i <= 61))
abort ();
memset (buf, '\0', sizeof (buf));
#pragma omp parallel for
for (p = &buf[16]; p < &buf[51]; p = 4 + p)
p[2] = 7;
for (i = 0; i < 64; i++)
if (buf[i] != 7 * ((i & 3) == 2 && i >= 18 && i < 53))
abort ();
memset (buf, '\0', sizeof (buf));
#pragma omp parallel for
for (p = &buf[16]; p <= &buf[40]; p = p + 4ULL)
p[2] = -7;
for (i = 0; i < 64; i++)
if (buf[i] != -7 * ((i & 3) == 2 && i >= 18 && i <= 42))
abort ();
memset (buf, '\0', sizeof (buf));
#pragma omp parallel for
for (p = &buf[53]; p > &buf[9]; --p)
*p = 5;
for (i = 0; i < 64; i++)
if (buf[i] != 5 * (i >= 10 && i < 54))
abort ();
memset (buf, '\0', sizeof (buf));
#pragma omp parallel for
for (p = &buf[63]; p >= &buf[3]; p -= 2)
p[-2] = 6;
for (i = 0; i < 64; i++)
if (buf[i] != 6 * ((i & 1) && i <= 61))
abort ();
memset (buf, '\0', sizeof (buf));
#pragma omp parallel for
for (p = &buf[48]; p > &buf[15]; p = -4 + p)
p[2] = 7;
for (i = 0; i < 64; i++)
if (buf[i] != 7 * ((i & 3) == 2 && i >= 18 && i < 53))
abort ();
memset (buf, '\0', sizeof (buf));
#pragma omp parallel for
for (p = &buf[40]; p >= &buf[16]; p = p - 4ULL)
p[2] = -7;
for (i = 0; i < 64; i++)
if (buf[i] != -7 * ((i & 3) == 2 && i >= 18 && i <= 42))
abort ();
return 0;
}
int
test2 (void)
{
int buf[64], *p;
int i;
memset (buf, '\0', sizeof (buf));
#pragma omp parallel for schedule (static, 3)
for (p = &buf[10]; p < &buf[54]; p++)
*p = 5;
for (i = 0; i < 64; i++)
if (buf[i] != 5 * (i >= 10 && i < 54))
abort ();
memset (buf, '\0', sizeof (buf));
#pragma omp parallel for schedule (static, 3)
for (p = &buf[3]; p <= &buf[63]; p += 2)
p[-2] = 6;
for (i = 0; i < 64; i++)
if (buf[i] != 6 * ((i & 1) && i <= 61))
abort ();
memset (buf, '\0', sizeof (buf));
#pragma omp parallel for schedule (static, 3)
for (p = &buf[16]; p < &buf[51]; p = 4 + p)
p[2] = 7;
for (i = 0; i < 64; i++)
if (buf[i] != 7 * ((i & 3) == 2 && i >= 18 && i < 53))
abort ();
memset (buf, '\0', sizeof (buf));
#pragma omp parallel for schedule (static, 3)
for (p = &buf[16]; p <= &buf[40]; p = p + 4ULL)
p[2] = -7;
for (i = 0; i < 64; i++)
if (buf[i] != -7 * ((i & 3) == 2 && i >= 18 && i <= 42))
abort ();
memset (buf, '\0', sizeof (buf));
#pragma omp parallel for schedule (static, 3)
for (p = &buf[53]; p > &buf[9]; --p)
*p = 5;
for (i = 0; i < 64; i++)
if (buf[i] != 5 * (i >= 10 && i < 54))
abort ();
memset (buf, '\0', sizeof (buf));
#pragma omp parallel for schedule (static, 3)
for (p = &buf[63]; p >= &buf[3]; p -= 2)
p[-2] = 6;
for (i = 0; i < 64; i++)
if (buf[i] != 6 * ((i & 1) && i <= 61))
abort ();
memset (buf, '\0', sizeof (buf));
#pragma omp parallel for schedule (static, 3)
for (p = &buf[48]; p > &buf[15]; p = -4 + p)
p[2] = 7;
for (i = 0; i < 64; i++)
if (buf[i] != 7 * ((i & 3) == 2 && i >= 18 && i < 53))
abort ();
memset (buf, '\0', sizeof (buf));
#pragma omp parallel for schedule (static, 3)
for (p = &buf[40]; p >= &buf[16]; p = p - 4ULL)
p[2] = -7;
for (i = 0; i < 64; i++)
if (buf[i] != -7 * ((i & 3) == 2 && i >= 18 && i <= 42))
abort ();
return 0;
}
int
test3 (void)
{
int buf[64], *p;
int i;
memset (buf, '\0', sizeof (buf));
#pragma omp parallel for schedule (dynamic, 3)
for (p = &buf[10]; p < &buf[54]; p++)
*p = 5;
for (i = 0; i < 64; i++)
if (buf[i] != 5 * (i >= 10 && i < 54))
abort ();
memset (buf, '\0', sizeof (buf));
#pragma omp parallel for schedule (dynamic, 3)
for (p = &buf[3]; p <= &buf[63]; p += 2)
p[-2] = 6;
for (i = 0; i < 64; i++)
if (buf[i] != 6 * ((i & 1) && i <= 61))
abort ();
memset (buf, '\0', sizeof (buf));
#pragma omp parallel for schedule (dynamic, 3)
for (p = &buf[16]; p < &buf[51]; p = 4 + p)
p[2] = 7;
for (i = 0; i < 64; i++)
if (buf[i] != 7 * ((i & 3) == 2 && i >= 18 && i < 53))
abort ();
memset (buf, '\0', sizeof (buf));
#pragma omp parallel for schedule (dynamic, 3)
for (p = &buf[16]; p <= &buf[40]; p = p + 4ULL)
p[2] = -7;
for (i = 0; i < 64; i++)
if (buf[i] != -7 * ((i & 3) == 2 && i >= 18 && i <= 42))
abort ();
memset (buf, '\0', sizeof (buf));
#pragma omp parallel for schedule (dynamic, 3)
for (p = &buf[53]; p > &buf[9]; --p)
*p = 5;
for (i = 0; i < 64; i++)
if (buf[i] != 5 * (i >= 10 && i < 54))
abort ();
memset (buf, '\0', sizeof (buf));
#pragma omp parallel for schedule (dynamic, 3)
for (p = &buf[63]; p >= &buf[3]; p -= 2)
p[-2] = 6;
for (i = 0; i < 64; i++)
if (buf[i] != 6 * ((i & 1) && i <= 61))
abort ();
memset (buf, '\0', sizeof (buf));
#pragma omp parallel for schedule (dynamic, 3)
for (p = &buf[48]; p > &buf[15]; p = -4 + p)
p[2] = 7;
for (i = 0; i < 64; i++)
if (buf[i] != 7 * ((i & 3) == 2 && i >= 18 && i < 53))
abort ();
memset (buf, '\0', sizeof (buf));
#pragma omp parallel for schedule (dynamic, 3)
for (p = &buf[40]; p >= &buf[16]; p = p - 4ULL)
p[2] = -7;
for (i = 0; i < 64; i++)
if (buf[i] != -7 * ((i & 3) == 2 && i >= 18 && i <= 42))
abort ();
return 0;
}
int
test4 (void)
{
int buf[64], *p;
int i;
memset (buf, '\0', sizeof (buf));
#pragma omp parallel for schedule (runtime)
for (p = &buf[10]; p < &buf[54]; p++)
*p = 5;
for (i = 0; i < 64; i++)
if (buf[i] != 5 * (i >= 10 && i < 54))
abort ();
memset (buf, '\0', sizeof (buf));
#pragma omp parallel for schedule (runtime)
for (p = &buf[3]; p <= &buf[63]; p += 2)
p[-2] = 6;
for (i = 0; i < 64; i++)
if (buf[i] != 6 * ((i & 1) && i <= 61))
abort ();
memset (buf, '\0', sizeof (buf));
#pragma omp parallel for schedule (runtime)
for (p = &buf[16]; p < &buf[51]; p = 4 + p)
p[2] = 7;
for (i = 0; i < 64; i++)
if (buf[i] != 7 * ((i & 3) == 2 && i >= 18 && i < 53))
abort ();
memset (buf, '\0', sizeof (buf));
#pragma omp parallel for schedule (runtime)
for (p = &buf[16]; p <= &buf[40]; p = p + 4ULL)
p[2] = -7;
for (i = 0; i < 64; i++)
if (buf[i] != -7 * ((i & 3) == 2 && i >= 18 && i <= 42))
abort ();
memset (buf, '\0', sizeof (buf));
#pragma omp parallel for schedule (runtime)
for (p = &buf[53]; p > &buf[9]; --p)
*p = 5;
for (i = 0; i < 64; i++)
if (buf[i] != 5 * (i >= 10 && i < 54))
abort ();
memset (buf, '\0', sizeof (buf));
#pragma omp parallel for schedule (runtime)
for (p = &buf[63]; p >= &buf[3]; p -= 2)
p[-2] = 6;
for (i = 0; i < 64; i++)
if (buf[i] != 6 * ((i & 1) && i <= 61))
abort ();
memset (buf, '\0', sizeof (buf));
#pragma omp parallel for schedule (runtime)
for (p = &buf[48]; p > &buf[15]; p = -4 + p)
p[2] = 7;
for (i = 0; i < 64; i++)
if (buf[i] != 7 * ((i & 3) == 2 && i >= 18 && i < 53))
abort ();
memset (buf, '\0', sizeof (buf));
#pragma omp parallel for schedule (runtime)
for (p = &buf[40]; p >= &buf[16]; p = p - 4ULL)
p[2] = -7;
for (i = 0; i < 64; i++)
if (buf[i] != -7 * ((i & 3) == 2 && i >= 18 && i <= 42))
abort ();
return 0;
}
int
main (void)
{
test1 ();
test2 ();
test3 ();
omp_set_schedule (omp_sched_static, 0);
test4 ();
omp_set_schedule (omp_sched_static, 3);
test4 ();
omp_set_schedule (omp_sched_dynamic, 5);
test4 ();
omp_set_schedule (omp_sched_guided, 2);
test4 ();
return 0;
}
|
ConstraintsContainerDms-impl.h | /**********************************************************************************************************************
This file is part of the Control Toolbox (https://github.com/ethz-adrl/control-toolbox), copyright by ETH Zurich.
Licensed under the BSD-2 license (see LICENSE file in main directory)
**********************************************************************************************************************/
template <size_t STATE_DIM, size_t CONTROL_DIM, typename SCALAR>
ConstraintsContainerDms<STATE_DIM, CONTROL_DIM, SCALAR>::ConstraintsContainerDms(
std::shared_ptr<OptVectorDms<STATE_DIM, CONTROL_DIM, SCALAR>> w,
std::shared_ptr<tpl::TimeGrid<SCALAR>> timeGrid,
std::vector<std::shared_ptr<ShotContainer<STATE_DIM, CONTROL_DIM, SCALAR>>> shotContainers,
std::shared_ptr<ConstraintDiscretizer<STATE_DIM, CONTROL_DIM, SCALAR>> discretizedConstraints,
const state_vector_t& x0,
const DmsSettings settings)
: settings_(settings), shotContainers_(shotContainers)
{
c_init_ = std::shared_ptr<InitStateConstraint<STATE_DIM, CONTROL_DIM, SCALAR>>(
new InitStateConstraint<STATE_DIM, CONTROL_DIM, SCALAR>(x0, w));
this->constraints_.push_back(c_init_);
for (size_t shotNr = 0; shotNr < settings_.N_; shotNr++)
{
std::shared_ptr<ContinuityConstraint<STATE_DIM, CONTROL_DIM, SCALAR>> c_i =
std::shared_ptr<ContinuityConstraint<STATE_DIM, CONTROL_DIM, SCALAR>>(
new ContinuityConstraint<STATE_DIM, CONTROL_DIM, SCALAR>(shotContainers[shotNr], w, shotNr, settings));
this->constraints_.push_back(c_i);
}
if (discretizedConstraints)
{
std::cout << "Adding discretized constraints" << std::endl;
this->constraints_.push_back(discretizedConstraints);
}
}
template <size_t STATE_DIM, size_t CONTROL_DIM, typename SCALAR>
void ConstraintsContainerDms<STATE_DIM, CONTROL_DIM, SCALAR>::prepareEvaluation()
{
#pragma omp parallel for num_threads(settings_.nThreads_)
for (auto shotContainer = shotContainers_.begin(); shotContainer < shotContainers_.end(); ++shotContainer)
{
(*shotContainer)->integrateShot();
}
}
template <size_t STATE_DIM, size_t CONTROL_DIM, typename SCALAR>
void ConstraintsContainerDms<STATE_DIM, CONTROL_DIM, SCALAR>::prepareJacobianEvaluation()
{
#pragma omp parallel for num_threads(settings_.nThreads_)
for (auto shotContainer = shotContainers_.begin(); shotContainer < shotContainers_.end(); ++shotContainer)
{
(*shotContainer)->integrateSensitivities();
}
}
template <size_t STATE_DIM, size_t CONTROL_DIM, typename SCALAR>
void ConstraintsContainerDms<STATE_DIM, CONTROL_DIM, SCALAR>::changeInitialConstraint(const state_vector_t& x0)
{
c_init_->updateConstraint(x0);
}
|
crc32_fmt_plug.c | /*
* This file is part of John the Ripper password cracker,
*
* Written by Jim Fougeron <jfoug at cox.net> in 2011. No copyright
* is claimed, and the software is hereby placed in the public domain.
* In case this attempt to disclaim copyright and place the software in the
* public domain is deemed null and void, then the software is
* Copyright (c) 2011 Jim Fougeron and it is hereby released to the
* general public under the following terms:
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted.
*
* There's ABSOLUTELY NO WARRANTY, express or implied.
*
* This format is: 8hex:8hex The first 8 hex is the 'starting' crc value
* So, if you have a file and its CRC is XYZ, then you would put that value
* here, then when the password(s) are found, append them to the file, and get
* the final CRC value. If you want to find a password with the 'proper' CRC
* value, then put 0 into the first field.
*
* The 2nd 8 hex value is what we are looking for.
*
*/
#if FMT_EXTERNS_H
extern struct fmt_main fmt_crc32;
#elif FMT_REGISTERS_H
john_register_one(&fmt_crc32);
#else
#include <string.h>
#include "common.h"
#include "formats.h"
#include "pkzip.h" // includes the 'inline' crc table.
#include "loader.h"
#ifdef _OPENMP
#include <omp.h>
#endif
#include "memdbg.h"
#define FORMAT_LABEL "CRC32"
#define FORMAT_NAME ""
#define ALGORITHM_NAME "CRC32 32/" ARCH_BITS_STR
#define BENCHMARK_COMMENT ""
#define BENCHMARK_LENGTH 0
#define PLAINTEXT_LENGTH 31
#define BINARY_SIZE 4
#define BINARY_ALIGN 4
#define SALT_SIZE 4
#define SALT_ALIGN 4
#define MIN_KEYS_PER_CRYPT 1
#define MAX_KEYS_PER_CRYPT 8192 // per thread
static struct fmt_tests tests[] = {
{"$crc32$00000000.fa455f6b", "ripper"},
{"$crc32$00000000.4ff4f23f", "dummy"},
// {"$crc32$00000000.00000000", ""}, // this one ends up skewing the benchmark time, WAY too much.
{"$crc32$4ff4f23f.ce6eb863", "password"}, // this would be for file with contents: 'dummy' and we want to find a password to append that is 'password'
{"$crc32$fa455f6b.c59b2aeb", "123456"}, // ripper123456
{NULL}
};
static char (*saved_key)[PLAINTEXT_LENGTH + 1];
static ARCH_WORD_32 (*crcs);
static ARCH_WORD_32 crcsalt;
static void init(struct fmt_main *self)
{
#ifdef _OPENMP
int n = omp_get_max_threads();
if (n > 4) {
n = 4; // it just won't scale further
omp_set_num_threads(n);
}
self->params.max_keys_per_crypt = MAX_KEYS_PER_CRYPT * n;
#endif
//printf("Using %u x %u = %u keys per crypt\n", MAX_KEYS_PER_CRYPT, n, self->params.max_keys_per_crypt);
saved_key = mem_calloc_tiny(sizeof(*saved_key) * self->params.max_keys_per_crypt, MEM_ALIGN_WORD);
crcs = mem_calloc_tiny(sizeof(*crcs) * self->params.max_keys_per_crypt, MEM_ALIGN_WORD);
}
static int valid(char *ciphertext, struct fmt_main *self)
{
char *p, *q;
int i;
if (strncmp(ciphertext, "$crc32$", 7))
return 0;
p = strrchr(ciphertext, '$');
q = strchr(p, '.');
if (!q || q-p != 9)
return 0;
for (i = 0; i < 8; ++i) {
int c1 = ARCH_INDEX(ciphertext[7+i]);
int c2 = ARCH_INDEX(ciphertext[16+i]);
if (atoi16[c1] == 0x7F || atoi16[c2] == 0x7F)
return 0;
/* We don't support uppercase hex digits here, or else we'd need to implement
* split() and set FMT_SPLIT_UNIFIES_CASE. */
if (c1 >= 'A' && c1 <= 'F')
return 0;
if (c2 >= 'A' && c2 <= 'F')
return 0;
}
return 1;
}
static int get_hash_0(int index) { return crcs[index] & 0xf; }
static int get_hash_1(int index) { return crcs[index] & 0xff; }
static int get_hash_2(int index) { return crcs[index] & 0xfff; }
static int get_hash_3(int index) { return crcs[index] & 0xffff; }
static int get_hash_4(int index) { return crcs[index] & 0xfffff; }
static int get_hash_5(int index) { return crcs[index] & 0xffffff; }
static int get_hash_6(int index) { return crcs[index] & 0x7ffffff; }
static void *binary(char *ciphertext)
{
static ARCH_WORD_32 *out;
if (!out)
out = mem_alloc_tiny(sizeof(ARCH_WORD_32), MEM_ALIGN_WORD);
sscanf(&ciphertext[16], "%x", out);
// Performing the complement here, allows us to not have to complement
// at the end of each crypt_all call.
*out = ~(*out);
return out;
}
static void *salt(char *ciphertext)
{
static ARCH_WORD_32 *out;
if (!out)
out = mem_alloc_tiny(sizeof(ARCH_WORD_32), MEM_ALIGN_WORD);
sscanf(&ciphertext[7], "%x", out);
// since we ask for the crc of a file, or zero, we need to complement here,
// to get it into 'proper' working order.
*out = ~(*out);
return out;
}
#if 0 // Not possible with current interface
static char *source(struct db_password *pw, char Buf[LINE_BUFFER_SIZE] )
{
ARCH_WORD_32 s = *(ARCH_WORD_32*)(pw->source);
ARCH_WORD_32 b = *(ARCH_WORD_32*)(pw->binary);
s = ~s;
b = ~b;
sprintf(Buf, "$crc32$%08x.%08x", s,b);
return Buf;
}
#endif
static void set_salt(void *salt)
{
crcsalt = *((ARCH_WORD_32 *)salt);
}
static void set_key(char *key, int index)
{
char *p = saved_key[index];
while ( (*p++ = *key++) )
;
}
static char *get_key(int index)
{
return saved_key[index];
}
static int crypt_all(int *pcount, struct db_salt *salt)
{
int count = *pcount;
int i;
#ifdef _OPENMP
#pragma omp parallel for private(i)
#endif
for (i = 0; i < count; ++i) {
ARCH_WORD_32 crc = crcsalt;
unsigned char *p = (unsigned char*)saved_key[i];
while (*p)
crc = pkzip_crc32(crc, *p++);
//crcs[i] = ~crc;
crcs[i] = crc;
}
return count;
}
static int cmp_all(void *binary, int count)
{
ARCH_WORD_32 crc=*((ARCH_WORD_32*)binary), i;
for (i = 0; i < count; ++i)
if (crc == crcs[i]) return 1;
return 0;
}
static int cmp_one(void *binary, int index)
{
return *((ARCH_WORD_32*)binary) == crcs[index];
}
static int cmp_exact(char *source, int index)
{
return 1;
}
static int salt_hash(void *salt)
{
return *(ARCH_WORD_32*)salt & (SALT_HASH_SIZE - 1);
}
struct fmt_main fmt_crc32 = {
{
FORMAT_LABEL,
FORMAT_NAME,
ALGORITHM_NAME,
BENCHMARK_COMMENT,
BENCHMARK_LENGTH,
PLAINTEXT_LENGTH,
BINARY_SIZE,
BINARY_ALIGN,
SALT_SIZE,
SALT_ALIGN,
MIN_KEYS_PER_CRYPT,
MAX_KEYS_PER_CRYPT,
FMT_CASE | FMT_8_BIT | FMT_NOT_EXACT | FMT_OMP,
#if FMT_MAIN_VERSION > 11
{ NULL },
#endif
tests
}, {
init,
fmt_default_done,
fmt_default_reset,
fmt_default_prepare,
valid,
fmt_default_split,
binary,
salt,
#if FMT_MAIN_VERSION > 11
{ NULL },
#endif
fmt_default_source,
{
fmt_default_binary_hash_0,
fmt_default_binary_hash_1,
fmt_default_binary_hash_2,
fmt_default_binary_hash_3,
fmt_default_binary_hash_4,
fmt_default_binary_hash_5,
fmt_default_binary_hash_6
},
salt_hash,
set_salt,
set_key,
get_key,
fmt_default_clear_keys,
crypt_all,
{
get_hash_0,
get_hash_1,
get_hash_2,
get_hash_3,
get_hash_4,
get_hash_5,
get_hash_6
},
cmp_all,
cmp_one,
cmp_exact
}
};
#endif /* plugin stanza */
|
par-simple.c |
int main() {
int x = 0;
#pragma omp parallel num_threads(42)
{
x++;
}
return x;
}
|
moments.c | /* Generated by Cython 0.25.1 */
/* BEGIN: Cython Metadata
{
"distutils": {
"depends": [],
"extra_compile_args": [
"-O3",
"-ffast-math",
"-fopenmp"
],
"extra_link_args": [
"-fopenmp"
]
},
"module_name": "bmtools.exact.moments"
}
END: Cython Metadata */
#define PY_SSIZE_T_CLEAN
#include "Python.h"
#ifndef Py_PYTHON_H
#error Python headers needed to compile C extensions, please install development version of Python.
#elif PY_VERSION_HEX < 0x02060000 || (0x03000000 <= PY_VERSION_HEX && PY_VERSION_HEX < 0x03020000)
#error Cython requires Python 2.6+ or Python 3.2+.
#else
#define CYTHON_ABI "0_25_1"
#include <stddef.h>
#ifndef offsetof
#define offsetof(type, member) ( (size_t) & ((type*)0) -> member )
#endif
#if !defined(WIN32) && !defined(MS_WINDOWS)
#ifndef __stdcall
#define __stdcall
#endif
#ifndef __cdecl
#define __cdecl
#endif
#ifndef __fastcall
#define __fastcall
#endif
#endif
#ifndef DL_IMPORT
#define DL_IMPORT(t) t
#endif
#ifndef DL_EXPORT
#define DL_EXPORT(t) t
#endif
#ifndef HAVE_LONG_LONG
#if PY_VERSION_HEX >= 0x03030000 || (PY_MAJOR_VERSION == 2 && PY_VERSION_HEX >= 0x02070000)
#define HAVE_LONG_LONG
#endif
#endif
#ifndef PY_LONG_LONG
#define PY_LONG_LONG LONG_LONG
#endif
#ifndef Py_HUGE_VAL
#define Py_HUGE_VAL HUGE_VAL
#endif
#ifdef PYPY_VERSION
#define CYTHON_COMPILING_IN_PYPY 1
#define CYTHON_COMPILING_IN_PYSTON 0
#define CYTHON_COMPILING_IN_CPYTHON 0
#undef CYTHON_USE_TYPE_SLOTS
#define CYTHON_USE_TYPE_SLOTS 0
#undef CYTHON_USE_ASYNC_SLOTS
#define CYTHON_USE_ASYNC_SLOTS 0
#undef CYTHON_USE_PYLIST_INTERNALS
#define CYTHON_USE_PYLIST_INTERNALS 0
#undef CYTHON_USE_UNICODE_INTERNALS
#define CYTHON_USE_UNICODE_INTERNALS 0
#undef CYTHON_USE_UNICODE_WRITER
#define CYTHON_USE_UNICODE_WRITER 0
#undef CYTHON_USE_PYLONG_INTERNALS
#define CYTHON_USE_PYLONG_INTERNALS 0
#undef CYTHON_AVOID_BORROWED_REFS
#define CYTHON_AVOID_BORROWED_REFS 1
#undef CYTHON_ASSUME_SAFE_MACROS
#define CYTHON_ASSUME_SAFE_MACROS 0
#undef CYTHON_UNPACK_METHODS
#define CYTHON_UNPACK_METHODS 0
#undef CYTHON_FAST_THREAD_STATE
#define CYTHON_FAST_THREAD_STATE 0
#undef CYTHON_FAST_PYCALL
#define CYTHON_FAST_PYCALL 0
#elif defined(PYSTON_VERSION)
#define CYTHON_COMPILING_IN_PYPY 0
#define CYTHON_COMPILING_IN_PYSTON 1
#define CYTHON_COMPILING_IN_CPYTHON 0
#ifndef CYTHON_USE_TYPE_SLOTS
#define CYTHON_USE_TYPE_SLOTS 1
#endif
#undef CYTHON_USE_ASYNC_SLOTS
#define CYTHON_USE_ASYNC_SLOTS 0
#undef CYTHON_USE_PYLIST_INTERNALS
#define CYTHON_USE_PYLIST_INTERNALS 0
#ifndef CYTHON_USE_UNICODE_INTERNALS
#define CYTHON_USE_UNICODE_INTERNALS 1
#endif
#undef CYTHON_USE_UNICODE_WRITER
#define CYTHON_USE_UNICODE_WRITER 0
#undef CYTHON_USE_PYLONG_INTERNALS
#define CYTHON_USE_PYLONG_INTERNALS 0
#ifndef CYTHON_AVOID_BORROWED_REFS
#define CYTHON_AVOID_BORROWED_REFS 0
#endif
#ifndef CYTHON_ASSUME_SAFE_MACROS
#define CYTHON_ASSUME_SAFE_MACROS 1
#endif
#ifndef CYTHON_UNPACK_METHODS
#define CYTHON_UNPACK_METHODS 1
#endif
#undef CYTHON_FAST_THREAD_STATE
#define CYTHON_FAST_THREAD_STATE 0
#undef CYTHON_FAST_PYCALL
#define CYTHON_FAST_PYCALL 0
#else
#define CYTHON_COMPILING_IN_PYPY 0
#define CYTHON_COMPILING_IN_PYSTON 0
#define CYTHON_COMPILING_IN_CPYTHON 1
#ifndef CYTHON_USE_TYPE_SLOTS
#define CYTHON_USE_TYPE_SLOTS 1
#endif
#if PY_MAJOR_VERSION < 3
#undef CYTHON_USE_ASYNC_SLOTS
#define CYTHON_USE_ASYNC_SLOTS 0
#elif !defined(CYTHON_USE_ASYNC_SLOTS)
#define CYTHON_USE_ASYNC_SLOTS 1
#endif
#if PY_VERSION_HEX < 0x02070000
#undef CYTHON_USE_PYLONG_INTERNALS
#define CYTHON_USE_PYLONG_INTERNALS 0
#elif !defined(CYTHON_USE_PYLONG_INTERNALS)
#define CYTHON_USE_PYLONG_INTERNALS 1
#endif
#ifndef CYTHON_USE_PYLIST_INTERNALS
#define CYTHON_USE_PYLIST_INTERNALS 1
#endif
#ifndef CYTHON_USE_UNICODE_INTERNALS
#define CYTHON_USE_UNICODE_INTERNALS 1
#endif
#if PY_VERSION_HEX < 0x030300F0
#undef CYTHON_USE_UNICODE_WRITER
#define CYTHON_USE_UNICODE_WRITER 0
#elif !defined(CYTHON_USE_UNICODE_WRITER)
#define CYTHON_USE_UNICODE_WRITER 1
#endif
#ifndef CYTHON_AVOID_BORROWED_REFS
#define CYTHON_AVOID_BORROWED_REFS 0
#endif
#ifndef CYTHON_ASSUME_SAFE_MACROS
#define CYTHON_ASSUME_SAFE_MACROS 1
#endif
#ifndef CYTHON_UNPACK_METHODS
#define CYTHON_UNPACK_METHODS 1
#endif
#ifndef CYTHON_FAST_THREAD_STATE
#define CYTHON_FAST_THREAD_STATE 1
#endif
#ifndef CYTHON_FAST_PYCALL
#define CYTHON_FAST_PYCALL 1
#endif
#endif
#if !defined(CYTHON_FAST_PYCCALL)
#define CYTHON_FAST_PYCCALL (CYTHON_FAST_PYCALL && PY_VERSION_HEX >= 0x030600B1)
#endif
#if CYTHON_USE_PYLONG_INTERNALS
#include "longintrepr.h"
#undef SHIFT
#undef BASE
#undef MASK
#endif
#if CYTHON_COMPILING_IN_PYPY && PY_VERSION_HEX < 0x02070600 && !defined(Py_OptimizeFlag)
#define Py_OptimizeFlag 0
#endif
#define __PYX_BUILD_PY_SSIZE_T "n"
#define CYTHON_FORMAT_SSIZE_T "z"
#if PY_MAJOR_VERSION < 3
#define __Pyx_BUILTIN_MODULE_NAME "__builtin__"
#define __Pyx_PyCode_New(a, k, l, s, f, code, c, n, v, fv, cell, fn, name, fline, lnos)\
PyCode_New(a+k, l, s, f, code, c, n, v, fv, cell, fn, name, fline, lnos)
#define __Pyx_DefaultClassType PyClass_Type
#else
#define __Pyx_BUILTIN_MODULE_NAME "builtins"
#define __Pyx_PyCode_New(a, k, l, s, f, code, c, n, v, fv, cell, fn, name, fline, lnos)\
PyCode_New(a, k, l, s, f, code, c, n, v, fv, cell, fn, name, fline, lnos)
#define __Pyx_DefaultClassType PyType_Type
#endif
#ifndef Py_TPFLAGS_CHECKTYPES
#define Py_TPFLAGS_CHECKTYPES 0
#endif
#ifndef Py_TPFLAGS_HAVE_INDEX
#define Py_TPFLAGS_HAVE_INDEX 0
#endif
#ifndef Py_TPFLAGS_HAVE_NEWBUFFER
#define Py_TPFLAGS_HAVE_NEWBUFFER 0
#endif
#ifndef Py_TPFLAGS_HAVE_FINALIZE
#define Py_TPFLAGS_HAVE_FINALIZE 0
#endif
#ifndef METH_FASTCALL
#define METH_FASTCALL 0x80
typedef PyObject *(*__Pyx_PyCFunctionFast) (PyObject *self, PyObject **args,
Py_ssize_t nargs, PyObject *kwnames);
#else
#define __Pyx_PyCFunctionFast _PyCFunctionFast
#endif
#if CYTHON_FAST_PYCCALL
#define __Pyx_PyFastCFunction_Check(func)\
((PyCFunction_Check(func) && METH_FASTCALL == PyCFunction_GET_FLAGS(func) & ~(METH_CLASS | METH_STATIC | METH_COEXIST)))
#else
#define __Pyx_PyFastCFunction_Check(func) 0
#endif
#if PY_VERSION_HEX > 0x03030000 && defined(PyUnicode_KIND)
#define CYTHON_PEP393_ENABLED 1
#define __Pyx_PyUnicode_READY(op) (likely(PyUnicode_IS_READY(op)) ?\
0 : _PyUnicode_Ready((PyObject *)(op)))
#define __Pyx_PyUnicode_GET_LENGTH(u) PyUnicode_GET_LENGTH(u)
#define __Pyx_PyUnicode_READ_CHAR(u, i) PyUnicode_READ_CHAR(u, i)
#define __Pyx_PyUnicode_MAX_CHAR_VALUE(u) PyUnicode_MAX_CHAR_VALUE(u)
#define __Pyx_PyUnicode_KIND(u) PyUnicode_KIND(u)
#define __Pyx_PyUnicode_DATA(u) PyUnicode_DATA(u)
#define __Pyx_PyUnicode_READ(k, d, i) PyUnicode_READ(k, d, i)
#define __Pyx_PyUnicode_WRITE(k, d, i, ch) PyUnicode_WRITE(k, d, i, ch)
#define __Pyx_PyUnicode_IS_TRUE(u) (0 != (likely(PyUnicode_IS_READY(u)) ? PyUnicode_GET_LENGTH(u) : PyUnicode_GET_SIZE(u)))
#else
#define CYTHON_PEP393_ENABLED 0
#define PyUnicode_1BYTE_KIND 1
#define PyUnicode_2BYTE_KIND 2
#define PyUnicode_4BYTE_KIND 4
#define __Pyx_PyUnicode_READY(op) (0)
#define __Pyx_PyUnicode_GET_LENGTH(u) PyUnicode_GET_SIZE(u)
#define __Pyx_PyUnicode_READ_CHAR(u, i) ((Py_UCS4)(PyUnicode_AS_UNICODE(u)[i]))
#define __Pyx_PyUnicode_MAX_CHAR_VALUE(u) ((sizeof(Py_UNICODE) == 2) ? 65535 : 1114111)
#define __Pyx_PyUnicode_KIND(u) (sizeof(Py_UNICODE))
#define __Pyx_PyUnicode_DATA(u) ((void*)PyUnicode_AS_UNICODE(u))
#define __Pyx_PyUnicode_READ(k, d, i) ((void)(k), (Py_UCS4)(((Py_UNICODE*)d)[i]))
#define __Pyx_PyUnicode_WRITE(k, d, i, ch) (((void)(k)), ((Py_UNICODE*)d)[i] = ch)
#define __Pyx_PyUnicode_IS_TRUE(u) (0 != PyUnicode_GET_SIZE(u))
#endif
#if CYTHON_COMPILING_IN_PYPY
#define __Pyx_PyUnicode_Concat(a, b) PyNumber_Add(a, b)
#define __Pyx_PyUnicode_ConcatSafe(a, b) PyNumber_Add(a, b)
#else
#define __Pyx_PyUnicode_Concat(a, b) PyUnicode_Concat(a, b)
#define __Pyx_PyUnicode_ConcatSafe(a, b) ((unlikely((a) == Py_None) || unlikely((b) == Py_None)) ?\
PyNumber_Add(a, b) : __Pyx_PyUnicode_Concat(a, b))
#endif
#if CYTHON_COMPILING_IN_PYPY && !defined(PyUnicode_Contains)
#define PyUnicode_Contains(u, s) PySequence_Contains(u, s)
#endif
#if CYTHON_COMPILING_IN_PYPY && !defined(PyByteArray_Check)
#define PyByteArray_Check(obj) PyObject_TypeCheck(obj, &PyByteArray_Type)
#endif
#if CYTHON_COMPILING_IN_PYPY && !defined(PyObject_Format)
#define PyObject_Format(obj, fmt) PyObject_CallMethod(obj, "__format__", "O", fmt)
#endif
#if CYTHON_COMPILING_IN_PYPY && !defined(PyObject_Malloc)
#define PyObject_Malloc(s) PyMem_Malloc(s)
#define PyObject_Free(p) PyMem_Free(p)
#define PyObject_Realloc(p) PyMem_Realloc(p)
#endif
#if CYTHON_COMPILING_IN_PYSTON
#define __Pyx_PyCode_HasFreeVars(co) PyCode_HasFreeVars(co)
#define __Pyx_PyFrame_SetLineNumber(frame, lineno) PyFrame_SetLineNumber(frame, lineno)
#else
#define __Pyx_PyCode_HasFreeVars(co) (PyCode_GetNumFree(co) > 0)
#define __Pyx_PyFrame_SetLineNumber(frame, lineno) (frame)->f_lineno = (lineno)
#endif
#define __Pyx_PyString_FormatSafe(a, b) ((unlikely((a) == Py_None)) ? PyNumber_Remainder(a, b) : __Pyx_PyString_Format(a, b))
#define __Pyx_PyUnicode_FormatSafe(a, b) ((unlikely((a) == Py_None)) ? PyNumber_Remainder(a, b) : PyUnicode_Format(a, b))
#if PY_MAJOR_VERSION >= 3
#define __Pyx_PyString_Format(a, b) PyUnicode_Format(a, b)
#else
#define __Pyx_PyString_Format(a, b) PyString_Format(a, b)
#endif
#if PY_MAJOR_VERSION < 3 && !defined(PyObject_ASCII)
#define PyObject_ASCII(o) PyObject_Repr(o)
#endif
#if PY_MAJOR_VERSION >= 3
#define PyBaseString_Type PyUnicode_Type
#define PyStringObject PyUnicodeObject
#define PyString_Type PyUnicode_Type
#define PyString_Check PyUnicode_Check
#define PyString_CheckExact PyUnicode_CheckExact
#endif
#if PY_MAJOR_VERSION >= 3
#define __Pyx_PyBaseString_Check(obj) PyUnicode_Check(obj)
#define __Pyx_PyBaseString_CheckExact(obj) PyUnicode_CheckExact(obj)
#else
#define __Pyx_PyBaseString_Check(obj) (PyString_Check(obj) || PyUnicode_Check(obj))
#define __Pyx_PyBaseString_CheckExact(obj) (PyString_CheckExact(obj) || PyUnicode_CheckExact(obj))
#endif
#ifndef PySet_CheckExact
#define PySet_CheckExact(obj) (Py_TYPE(obj) == &PySet_Type)
#endif
#define __Pyx_TypeCheck(obj, type) PyObject_TypeCheck(obj, (PyTypeObject *)type)
#define __Pyx_PyException_Check(obj) __Pyx_TypeCheck(obj, PyExc_Exception)
#if PY_MAJOR_VERSION >= 3
#define PyIntObject PyLongObject
#define PyInt_Type PyLong_Type
#define PyInt_Check(op) PyLong_Check(op)
#define PyInt_CheckExact(op) PyLong_CheckExact(op)
#define PyInt_FromString PyLong_FromString
#define PyInt_FromUnicode PyLong_FromUnicode
#define PyInt_FromLong PyLong_FromLong
#define PyInt_FromSize_t PyLong_FromSize_t
#define PyInt_FromSsize_t PyLong_FromSsize_t
#define PyInt_AsLong PyLong_AsLong
#define PyInt_AS_LONG PyLong_AS_LONG
#define PyInt_AsSsize_t PyLong_AsSsize_t
#define PyInt_AsUnsignedLongMask PyLong_AsUnsignedLongMask
#define PyInt_AsUnsignedLongLongMask PyLong_AsUnsignedLongLongMask
#define PyNumber_Int PyNumber_Long
#endif
#if PY_MAJOR_VERSION >= 3
#define PyBoolObject PyLongObject
#endif
#if PY_MAJOR_VERSION >= 3 && CYTHON_COMPILING_IN_PYPY
#ifndef PyUnicode_InternFromString
#define PyUnicode_InternFromString(s) PyUnicode_FromString(s)
#endif
#endif
#if PY_VERSION_HEX < 0x030200A4
typedef long Py_hash_t;
#define __Pyx_PyInt_FromHash_t PyInt_FromLong
#define __Pyx_PyInt_AsHash_t PyInt_AsLong
#else
#define __Pyx_PyInt_FromHash_t PyInt_FromSsize_t
#define __Pyx_PyInt_AsHash_t PyInt_AsSsize_t
#endif
#if PY_MAJOR_VERSION >= 3
#define __Pyx_PyMethod_New(func, self, klass) ((self) ? PyMethod_New(func, self) : PyInstanceMethod_New(func))
#else
#define __Pyx_PyMethod_New(func, self, klass) PyMethod_New(func, self, klass)
#endif
#if CYTHON_USE_ASYNC_SLOTS
#if PY_VERSION_HEX >= 0x030500B1
#define __Pyx_PyAsyncMethodsStruct PyAsyncMethods
#define __Pyx_PyType_AsAsync(obj) (Py_TYPE(obj)->tp_as_async)
#else
typedef struct {
unaryfunc am_await;
unaryfunc am_aiter;
unaryfunc am_anext;
} __Pyx_PyAsyncMethodsStruct;
#define __Pyx_PyType_AsAsync(obj) ((__Pyx_PyAsyncMethodsStruct*) (Py_TYPE(obj)->tp_reserved))
#endif
#else
#define __Pyx_PyType_AsAsync(obj) NULL
#endif
#ifndef CYTHON_RESTRICT
#if defined(__GNUC__)
#define CYTHON_RESTRICT __restrict__
#elif defined(_MSC_VER) && _MSC_VER >= 1400
#define CYTHON_RESTRICT __restrict
#elif defined (__STDC_VERSION__) && __STDC_VERSION__ >= 199901L
#define CYTHON_RESTRICT restrict
#else
#define CYTHON_RESTRICT
#endif
#endif
#define __Pyx_void_to_None(void_result) ((void)(void_result), Py_INCREF(Py_None), Py_None)
#ifndef CYTHON_INLINE
#if defined(__GNUC__)
#define CYTHON_INLINE __inline__
#elif defined(_MSC_VER)
#define CYTHON_INLINE __inline
#elif defined (__STDC_VERSION__) && __STDC_VERSION__ >= 199901L
#define CYTHON_INLINE inline
#else
#define CYTHON_INLINE
#endif
#endif
#if defined(WIN32) || defined(MS_WINDOWS)
#define _USE_MATH_DEFINES
#endif
#include <math.h>
#ifdef NAN
#define __PYX_NAN() ((float) NAN)
#else
static CYTHON_INLINE float __PYX_NAN() {
float value;
memset(&value, 0xFF, sizeof(value));
return value;
}
#endif
#if defined(__CYGWIN__) && defined(_LDBL_EQ_DBL)
#define __Pyx_truncl trunc
#else
#define __Pyx_truncl truncl
#endif
#define __PYX_ERR(f_index, lineno, Ln_error) \
{ \
__pyx_filename = __pyx_f[f_index]; __pyx_lineno = lineno; __pyx_clineno = __LINE__; goto Ln_error; \
}
#if PY_MAJOR_VERSION >= 3
#define __Pyx_PyNumber_Divide(x,y) PyNumber_TrueDivide(x,y)
#define __Pyx_PyNumber_InPlaceDivide(x,y) PyNumber_InPlaceTrueDivide(x,y)
#else
#define __Pyx_PyNumber_Divide(x,y) PyNumber_Divide(x,y)
#define __Pyx_PyNumber_InPlaceDivide(x,y) PyNumber_InPlaceDivide(x,y)
#endif
#ifndef __PYX_EXTERN_C
#ifdef __cplusplus
#define __PYX_EXTERN_C extern "C"
#else
#define __PYX_EXTERN_C extern
#endif
#endif
#define __PYX_HAVE__bmtools__exact__moments
#define __PYX_HAVE_API__bmtools__exact__moments
#include "math.h"
#include "pythread.h"
#include <string.h>
#include <stdlib.h>
#include <stdio.h>
#include "pystate.h"
#ifdef _OPENMP
#include <omp.h>
#endif /* _OPENMP */
#ifdef PYREX_WITHOUT_ASSERTIONS
#define CYTHON_WITHOUT_ASSERTIONS
#endif
#ifndef CYTHON_UNUSED
# if defined(__GNUC__)
# if !(defined(__cplusplus)) || (__GNUC__ > 3 || (__GNUC__ == 3 && __GNUC_MINOR__ >= 4))
# define CYTHON_UNUSED __attribute__ ((__unused__))
# else
# define CYTHON_UNUSED
# endif
# elif defined(__ICC) || (defined(__INTEL_COMPILER) && !defined(_MSC_VER))
# define CYTHON_UNUSED __attribute__ ((__unused__))
# else
# define CYTHON_UNUSED
# endif
#endif
#ifndef CYTHON_NCP_UNUSED
# if CYTHON_COMPILING_IN_CPYTHON
# define CYTHON_NCP_UNUSED
# else
# define CYTHON_NCP_UNUSED CYTHON_UNUSED
# endif
#endif
typedef struct {PyObject **p; const char *s; const Py_ssize_t n; const char* encoding;
const char is_unicode; const char is_str; const char intern; } __Pyx_StringTabEntry;
#define __PYX_DEFAULT_STRING_ENCODING_IS_ASCII 0
#define __PYX_DEFAULT_STRING_ENCODING_IS_DEFAULT 0
#define __PYX_DEFAULT_STRING_ENCODING ""
#define __Pyx_PyObject_FromString __Pyx_PyBytes_FromString
#define __Pyx_PyObject_FromStringAndSize __Pyx_PyBytes_FromStringAndSize
#define __Pyx_uchar_cast(c) ((unsigned char)c)
#define __Pyx_long_cast(x) ((long)x)
#define __Pyx_fits_Py_ssize_t(v, type, is_signed) (\
(sizeof(type) < sizeof(Py_ssize_t)) ||\
(sizeof(type) > sizeof(Py_ssize_t) &&\
likely(v < (type)PY_SSIZE_T_MAX ||\
v == (type)PY_SSIZE_T_MAX) &&\
(!is_signed || likely(v > (type)PY_SSIZE_T_MIN ||\
v == (type)PY_SSIZE_T_MIN))) ||\
(sizeof(type) == sizeof(Py_ssize_t) &&\
(is_signed || likely(v < (type)PY_SSIZE_T_MAX ||\
v == (type)PY_SSIZE_T_MAX))) )
#if defined (__cplusplus) && __cplusplus >= 201103L
#include <cstdlib>
#define __Pyx_sst_abs(value) std::abs(value)
#elif SIZEOF_INT >= SIZEOF_SIZE_T
#define __Pyx_sst_abs(value) abs(value)
#elif SIZEOF_LONG >= SIZEOF_SIZE_T
#define __Pyx_sst_abs(value) labs(value)
#elif defined (_MSC_VER) && defined (_M_X64)
#define __Pyx_sst_abs(value) _abs64(value)
#elif defined (__STDC_VERSION__) && __STDC_VERSION__ >= 199901L
#define __Pyx_sst_abs(value) llabs(value)
#elif defined (__GNUC__)
#define __Pyx_sst_abs(value) __builtin_llabs(value)
#else
#define __Pyx_sst_abs(value) ((value<0) ? -value : value)
#endif
static CYTHON_INLINE char* __Pyx_PyObject_AsString(PyObject*);
static CYTHON_INLINE char* __Pyx_PyObject_AsStringAndSize(PyObject*, Py_ssize_t* length);
#define __Pyx_PyByteArray_FromString(s) PyByteArray_FromStringAndSize((const char*)s, strlen((const char*)s))
#define __Pyx_PyByteArray_FromStringAndSize(s, l) PyByteArray_FromStringAndSize((const char*)s, l)
#define __Pyx_PyBytes_FromString PyBytes_FromString
#define __Pyx_PyBytes_FromStringAndSize PyBytes_FromStringAndSize
static CYTHON_INLINE PyObject* __Pyx_PyUnicode_FromString(const char*);
#if PY_MAJOR_VERSION < 3
#define __Pyx_PyStr_FromString __Pyx_PyBytes_FromString
#define __Pyx_PyStr_FromStringAndSize __Pyx_PyBytes_FromStringAndSize
#else
#define __Pyx_PyStr_FromString __Pyx_PyUnicode_FromString
#define __Pyx_PyStr_FromStringAndSize __Pyx_PyUnicode_FromStringAndSize
#endif
#define __Pyx_PyObject_AsSString(s) ((signed char*) __Pyx_PyObject_AsString(s))
#define __Pyx_PyObject_AsUString(s) ((unsigned char*) __Pyx_PyObject_AsString(s))
#define __Pyx_PyObject_FromCString(s) __Pyx_PyObject_FromString((const char*)s)
#define __Pyx_PyBytes_FromCString(s) __Pyx_PyBytes_FromString((const char*)s)
#define __Pyx_PyByteArray_FromCString(s) __Pyx_PyByteArray_FromString((const char*)s)
#define __Pyx_PyStr_FromCString(s) __Pyx_PyStr_FromString((const char*)s)
#define __Pyx_PyUnicode_FromCString(s) __Pyx_PyUnicode_FromString((const char*)s)
#if PY_MAJOR_VERSION < 3
static CYTHON_INLINE size_t __Pyx_Py_UNICODE_strlen(const Py_UNICODE *u)
{
const Py_UNICODE *u_end = u;
while (*u_end++) ;
return (size_t)(u_end - u - 1);
}
#else
#define __Pyx_Py_UNICODE_strlen Py_UNICODE_strlen
#endif
#define __Pyx_PyUnicode_FromUnicode(u) PyUnicode_FromUnicode(u, __Pyx_Py_UNICODE_strlen(u))
#define __Pyx_PyUnicode_FromUnicodeAndLength PyUnicode_FromUnicode
#define __Pyx_PyUnicode_AsUnicode PyUnicode_AsUnicode
#define __Pyx_NewRef(obj) (Py_INCREF(obj), obj)
#define __Pyx_Owned_Py_None(b) __Pyx_NewRef(Py_None)
#define __Pyx_PyBool_FromLong(b) ((b) ? __Pyx_NewRef(Py_True) : __Pyx_NewRef(Py_False))
static CYTHON_INLINE int __Pyx_PyObject_IsTrue(PyObject*);
static CYTHON_INLINE PyObject* __Pyx_PyNumber_IntOrLong(PyObject* x);
static CYTHON_INLINE Py_ssize_t __Pyx_PyIndex_AsSsize_t(PyObject*);
static CYTHON_INLINE PyObject * __Pyx_PyInt_FromSize_t(size_t);
#if CYTHON_ASSUME_SAFE_MACROS
#define __pyx_PyFloat_AsDouble(x) (PyFloat_CheckExact(x) ? PyFloat_AS_DOUBLE(x) : PyFloat_AsDouble(x))
#else
#define __pyx_PyFloat_AsDouble(x) PyFloat_AsDouble(x)
#endif
#define __pyx_PyFloat_AsFloat(x) ((float) __pyx_PyFloat_AsDouble(x))
#if PY_MAJOR_VERSION >= 3
#define __Pyx_PyNumber_Int(x) (PyLong_CheckExact(x) ? __Pyx_NewRef(x) : PyNumber_Long(x))
#else
#define __Pyx_PyNumber_Int(x) (PyInt_CheckExact(x) ? __Pyx_NewRef(x) : PyNumber_Int(x))
#endif
#define __Pyx_PyNumber_Float(x) (PyFloat_CheckExact(x) ? __Pyx_NewRef(x) : PyNumber_Float(x))
#if PY_MAJOR_VERSION < 3 && __PYX_DEFAULT_STRING_ENCODING_IS_ASCII
static int __Pyx_sys_getdefaultencoding_not_ascii;
static int __Pyx_init_sys_getdefaultencoding_params(void) {
PyObject* sys;
PyObject* default_encoding = NULL;
PyObject* ascii_chars_u = NULL;
PyObject* ascii_chars_b = NULL;
const char* default_encoding_c;
sys = PyImport_ImportModule("sys");
if (!sys) goto bad;
default_encoding = PyObject_CallMethod(sys, (char*) "getdefaultencoding", NULL);
Py_DECREF(sys);
if (!default_encoding) goto bad;
default_encoding_c = PyBytes_AsString(default_encoding);
if (!default_encoding_c) goto bad;
if (strcmp(default_encoding_c, "ascii") == 0) {
__Pyx_sys_getdefaultencoding_not_ascii = 0;
} else {
char ascii_chars[128];
int c;
for (c = 0; c < 128; c++) {
ascii_chars[c] = c;
}
__Pyx_sys_getdefaultencoding_not_ascii = 1;
ascii_chars_u = PyUnicode_DecodeASCII(ascii_chars, 128, NULL);
if (!ascii_chars_u) goto bad;
ascii_chars_b = PyUnicode_AsEncodedString(ascii_chars_u, default_encoding_c, NULL);
if (!ascii_chars_b || !PyBytes_Check(ascii_chars_b) || memcmp(ascii_chars, PyBytes_AS_STRING(ascii_chars_b), 128) != 0) {
PyErr_Format(
PyExc_ValueError,
"This module compiled with c_string_encoding=ascii, but default encoding '%.200s' is not a superset of ascii.",
default_encoding_c);
goto bad;
}
Py_DECREF(ascii_chars_u);
Py_DECREF(ascii_chars_b);
}
Py_DECREF(default_encoding);
return 0;
bad:
Py_XDECREF(default_encoding);
Py_XDECREF(ascii_chars_u);
Py_XDECREF(ascii_chars_b);
return -1;
}
#endif
#if __PYX_DEFAULT_STRING_ENCODING_IS_DEFAULT && PY_MAJOR_VERSION >= 3
#define __Pyx_PyUnicode_FromStringAndSize(c_str, size) PyUnicode_DecodeUTF8(c_str, size, NULL)
#else
#define __Pyx_PyUnicode_FromStringAndSize(c_str, size) PyUnicode_Decode(c_str, size, __PYX_DEFAULT_STRING_ENCODING, NULL)
#if __PYX_DEFAULT_STRING_ENCODING_IS_DEFAULT
static char* __PYX_DEFAULT_STRING_ENCODING;
static int __Pyx_init_sys_getdefaultencoding_params(void) {
PyObject* sys;
PyObject* default_encoding = NULL;
char* default_encoding_c;
sys = PyImport_ImportModule("sys");
if (!sys) goto bad;
default_encoding = PyObject_CallMethod(sys, (char*) (const char*) "getdefaultencoding", NULL);
Py_DECREF(sys);
if (!default_encoding) goto bad;
default_encoding_c = PyBytes_AsString(default_encoding);
if (!default_encoding_c) goto bad;
__PYX_DEFAULT_STRING_ENCODING = (char*) malloc(strlen(default_encoding_c));
if (!__PYX_DEFAULT_STRING_ENCODING) goto bad;
strcpy(__PYX_DEFAULT_STRING_ENCODING, default_encoding_c);
Py_DECREF(default_encoding);
return 0;
bad:
Py_XDECREF(default_encoding);
return -1;
}
#endif
#endif
/* Test for GCC > 2.95 */
#if defined(__GNUC__) && (__GNUC__ > 2 || (__GNUC__ == 2 && (__GNUC_MINOR__ > 95)))
#define likely(x) __builtin_expect(!!(x), 1)
#define unlikely(x) __builtin_expect(!!(x), 0)
#else /* !__GNUC__ or GCC < 2.95 */
#define likely(x) (x)
#define unlikely(x) (x)
#endif /* __GNUC__ */
static PyObject *__pyx_m;
static PyObject *__pyx_d;
static PyObject *__pyx_b;
static PyObject *__pyx_empty_tuple;
static PyObject *__pyx_empty_bytes;
static PyObject *__pyx_empty_unicode;
static int __pyx_lineno;
static int __pyx_clineno = 0;
static const char * __pyx_cfilenm= __FILE__;
static const char *__pyx_filename;
static const char *__pyx_f[] = {
"bmtools/exact/moments.pyx",
"stringsource",
};
/* MemviewSliceStruct.proto */
struct __pyx_memoryview_obj;
typedef struct {
struct __pyx_memoryview_obj *memview;
char *data;
Py_ssize_t shape[8];
Py_ssize_t strides[8];
Py_ssize_t suboffsets[8];
} __Pyx_memviewslice;
/* BufferFormatStructs.proto */
#define IS_UNSIGNED(type) (((type) -1) > 0)
struct __Pyx_StructField_;
#define __PYX_BUF_FLAGS_PACKED_STRUCT (1 << 0)
typedef struct {
const char* name;
struct __Pyx_StructField_* fields;
size_t size;
size_t arraysize[8];
int ndim;
char typegroup;
char is_unsigned;
int flags;
} __Pyx_TypeInfo;
typedef struct __Pyx_StructField_ {
__Pyx_TypeInfo* type;
const char* name;
size_t offset;
} __Pyx_StructField;
typedef struct {
__Pyx_StructField* field;
size_t parent_offset;
} __Pyx_BufFmt_StackElem;
typedef struct {
__Pyx_StructField root;
__Pyx_BufFmt_StackElem* head;
size_t fmt_offset;
size_t new_count, enc_count;
size_t struct_alignment;
int is_complex;
char enc_type;
char new_packmode;
char enc_packmode;
char is_valid_array;
} __Pyx_BufFmt_Context;
/* Atomics.proto */
#include <pythread.h>
#ifndef CYTHON_ATOMICS
#define CYTHON_ATOMICS 1
#endif
#define __pyx_atomic_int_type int
#if CYTHON_ATOMICS && __GNUC__ >= 4 && (__GNUC_MINOR__ > 1 ||\
(__GNUC_MINOR__ == 1 && __GNUC_PATCHLEVEL >= 2)) &&\
!defined(__i386__)
#define __pyx_atomic_incr_aligned(value, lock) __sync_fetch_and_add(value, 1)
#define __pyx_atomic_decr_aligned(value, lock) __sync_fetch_and_sub(value, 1)
#ifdef __PYX_DEBUG_ATOMICS
#warning "Using GNU atomics"
#endif
#elif CYTHON_ATOMICS && defined(_MSC_VER) && 0
#include <Windows.h>
#undef __pyx_atomic_int_type
#define __pyx_atomic_int_type LONG
#define __pyx_atomic_incr_aligned(value, lock) InterlockedIncrement(value)
#define __pyx_atomic_decr_aligned(value, lock) InterlockedDecrement(value)
#ifdef __PYX_DEBUG_ATOMICS
#pragma message ("Using MSVC atomics")
#endif
#elif CYTHON_ATOMICS && (defined(__ICC) || defined(__INTEL_COMPILER)) && 0
#define __pyx_atomic_incr_aligned(value, lock) _InterlockedIncrement(value)
#define __pyx_atomic_decr_aligned(value, lock) _InterlockedDecrement(value)
#ifdef __PYX_DEBUG_ATOMICS
#warning "Using Intel atomics"
#endif
#else
#undef CYTHON_ATOMICS
#define CYTHON_ATOMICS 0
#ifdef __PYX_DEBUG_ATOMICS
#warning "Not using atomics"
#endif
#endif
typedef volatile __pyx_atomic_int_type __pyx_atomic_int;
#if CYTHON_ATOMICS
#define __pyx_add_acquisition_count(memview)\
__pyx_atomic_incr_aligned(__pyx_get_slice_count_pointer(memview), memview->lock)
#define __pyx_sub_acquisition_count(memview)\
__pyx_atomic_decr_aligned(__pyx_get_slice_count_pointer(memview), memview->lock)
#else
#define __pyx_add_acquisition_count(memview)\
__pyx_add_acquisition_count_locked(__pyx_get_slice_count_pointer(memview), memview->lock)
#define __pyx_sub_acquisition_count(memview)\
__pyx_sub_acquisition_count_locked(__pyx_get_slice_count_pointer(memview), memview->lock)
#endif
/* "bmtools/exact/helpers.pxd":7
* """
*
* ctypedef signed char state_t # <<<<<<<<<<<<<<
* cdef char* state_t_code
*
*/
typedef signed char __pyx_t_7bmtools_5exact_7helpers_state_t;
/*--- Type declarations ---*/
struct __pyx_array_obj;
struct __pyx_MemviewEnum_obj;
struct __pyx_memoryview_obj;
struct __pyx_memoryviewslice_obj;
struct __pyx_opt_args_7bmtools_5exact_7moments_calc_norm_const;
/* "bmtools/exact/moments.pxd":31
*
*
* cdef double calc_norm_const(double[:,:] weights, double[:] biases, # <<<<<<<<<<<<<<
* state_t[:] state, long start_state_index=?,
* long end_state_index=?) nogil
*/
struct __pyx_opt_args_7bmtools_5exact_7moments_calc_norm_const {
int __pyx_n;
long start_state_index;
long end_state_index;
};
/* "View.MemoryView":103
*
* @cname("__pyx_array")
* cdef class array: # <<<<<<<<<<<<<<
*
* cdef:
*/
struct __pyx_array_obj {
PyObject_HEAD
struct __pyx_vtabstruct_array *__pyx_vtab;
char *data;
Py_ssize_t len;
char *format;
int ndim;
Py_ssize_t *_shape;
Py_ssize_t *_strides;
Py_ssize_t itemsize;
PyObject *mode;
PyObject *_format;
void (*callback_free_data)(void *);
int free_data;
int dtype_is_object;
};
/* "View.MemoryView":275
*
* @cname('__pyx_MemviewEnum')
* cdef class Enum(object): # <<<<<<<<<<<<<<
* cdef object name
* def __init__(self, name):
*/
struct __pyx_MemviewEnum_obj {
PyObject_HEAD
PyObject *name;
};
/* "View.MemoryView":326
*
* @cname('__pyx_memoryview')
* cdef class memoryview(object): # <<<<<<<<<<<<<<
*
* cdef object obj
*/
struct __pyx_memoryview_obj {
PyObject_HEAD
struct __pyx_vtabstruct_memoryview *__pyx_vtab;
PyObject *obj;
PyObject *_size;
PyObject *_array_interface;
PyThread_type_lock lock;
__pyx_atomic_int acquisition_count[2];
__pyx_atomic_int *acquisition_count_aligned_p;
Py_buffer view;
int flags;
int dtype_is_object;
__Pyx_TypeInfo *typeinfo;
};
/* "View.MemoryView":951
*
* @cname('__pyx_memoryviewslice')
* cdef class _memoryviewslice(memoryview): # <<<<<<<<<<<<<<
* "Internal class for passing memoryview slices to Python"
*
*/
struct __pyx_memoryviewslice_obj {
struct __pyx_memoryview_obj __pyx_base;
__Pyx_memviewslice from_slice;
PyObject *from_object;
PyObject *(*to_object_func)(char *);
int (*to_dtype_func)(char *, PyObject *);
};
/* "View.MemoryView":103
*
* @cname("__pyx_array")
* cdef class array: # <<<<<<<<<<<<<<
*
* cdef:
*/
struct __pyx_vtabstruct_array {
PyObject *(*get_memview)(struct __pyx_array_obj *);
};
static struct __pyx_vtabstruct_array *__pyx_vtabptr_array;
/* "View.MemoryView":326
*
* @cname('__pyx_memoryview')
* cdef class memoryview(object): # <<<<<<<<<<<<<<
*
* cdef object obj
*/
struct __pyx_vtabstruct_memoryview {
char *(*get_item_pointer)(struct __pyx_memoryview_obj *, PyObject *);
PyObject *(*is_slice)(struct __pyx_memoryview_obj *, PyObject *);
PyObject *(*setitem_slice_assignment)(struct __pyx_memoryview_obj *, PyObject *, PyObject *);
PyObject *(*setitem_slice_assign_scalar)(struct __pyx_memoryview_obj *, struct __pyx_memoryview_obj *, PyObject *);
PyObject *(*setitem_indexed)(struct __pyx_memoryview_obj *, PyObject *, PyObject *);
PyObject *(*convert_item_to_object)(struct __pyx_memoryview_obj *, char *);
PyObject *(*assign_item_from_object)(struct __pyx_memoryview_obj *, char *, PyObject *);
};
static struct __pyx_vtabstruct_memoryview *__pyx_vtabptr_memoryview;
/* "View.MemoryView":951
*
* @cname('__pyx_memoryviewslice')
* cdef class _memoryviewslice(memoryview): # <<<<<<<<<<<<<<
* "Internal class for passing memoryview slices to Python"
*
*/
struct __pyx_vtabstruct__memoryviewslice {
struct __pyx_vtabstruct_memoryview __pyx_base;
};
static struct __pyx_vtabstruct__memoryviewslice *__pyx_vtabptr__memoryviewslice;
/* --- Runtime support code (head) --- */
/* Refnanny.proto */
#ifndef CYTHON_REFNANNY
#define CYTHON_REFNANNY 0
#endif
#if CYTHON_REFNANNY
typedef struct {
void (*INCREF)(void*, PyObject*, int);
void (*DECREF)(void*, PyObject*, int);
void (*GOTREF)(void*, PyObject*, int);
void (*GIVEREF)(void*, PyObject*, int);
void* (*SetupContext)(const char*, int, const char*);
void (*FinishContext)(void**);
} __Pyx_RefNannyAPIStruct;
static __Pyx_RefNannyAPIStruct *__Pyx_RefNanny = NULL;
static __Pyx_RefNannyAPIStruct *__Pyx_RefNannyImportAPI(const char *modname);
#define __Pyx_RefNannyDeclarations void *__pyx_refnanny = NULL;
#ifdef WITH_THREAD
#define __Pyx_RefNannySetupContext(name, acquire_gil)\
if (acquire_gil) {\
PyGILState_STATE __pyx_gilstate_save = PyGILState_Ensure();\
__pyx_refnanny = __Pyx_RefNanny->SetupContext((name), __LINE__, __FILE__);\
PyGILState_Release(__pyx_gilstate_save);\
} else {\
__pyx_refnanny = __Pyx_RefNanny->SetupContext((name), __LINE__, __FILE__);\
}
#else
#define __Pyx_RefNannySetupContext(name, acquire_gil)\
__pyx_refnanny = __Pyx_RefNanny->SetupContext((name), __LINE__, __FILE__)
#endif
#define __Pyx_RefNannyFinishContext()\
__Pyx_RefNanny->FinishContext(&__pyx_refnanny)
#define __Pyx_INCREF(r) __Pyx_RefNanny->INCREF(__pyx_refnanny, (PyObject *)(r), __LINE__)
#define __Pyx_DECREF(r) __Pyx_RefNanny->DECREF(__pyx_refnanny, (PyObject *)(r), __LINE__)
#define __Pyx_GOTREF(r) __Pyx_RefNanny->GOTREF(__pyx_refnanny, (PyObject *)(r), __LINE__)
#define __Pyx_GIVEREF(r) __Pyx_RefNanny->GIVEREF(__pyx_refnanny, (PyObject *)(r), __LINE__)
#define __Pyx_XINCREF(r) do { if((r) != NULL) {__Pyx_INCREF(r); }} while(0)
#define __Pyx_XDECREF(r) do { if((r) != NULL) {__Pyx_DECREF(r); }} while(0)
#define __Pyx_XGOTREF(r) do { if((r) != NULL) {__Pyx_GOTREF(r); }} while(0)
#define __Pyx_XGIVEREF(r) do { if((r) != NULL) {__Pyx_GIVEREF(r);}} while(0)
#else
#define __Pyx_RefNannyDeclarations
#define __Pyx_RefNannySetupContext(name, acquire_gil)
#define __Pyx_RefNannyFinishContext()
#define __Pyx_INCREF(r) Py_INCREF(r)
#define __Pyx_DECREF(r) Py_DECREF(r)
#define __Pyx_GOTREF(r)
#define __Pyx_GIVEREF(r)
#define __Pyx_XINCREF(r) Py_XINCREF(r)
#define __Pyx_XDECREF(r) Py_XDECREF(r)
#define __Pyx_XGOTREF(r)
#define __Pyx_XGIVEREF(r)
#endif
#define __Pyx_XDECREF_SET(r, v) do {\
PyObject *tmp = (PyObject *) r;\
r = v; __Pyx_XDECREF(tmp);\
} while (0)
#define __Pyx_DECREF_SET(r, v) do {\
PyObject *tmp = (PyObject *) r;\
r = v; __Pyx_DECREF(tmp);\
} while (0)
#define __Pyx_CLEAR(r) do { PyObject* tmp = ((PyObject*)(r)); r = NULL; __Pyx_DECREF(tmp);} while(0)
#define __Pyx_XCLEAR(r) do { if((r) != NULL) {PyObject* tmp = ((PyObject*)(r)); r = NULL; __Pyx_DECREF(tmp);}} while(0)
/* PyObjectGetAttrStr.proto */
#if CYTHON_USE_TYPE_SLOTS
static CYTHON_INLINE PyObject* __Pyx_PyObject_GetAttrStr(PyObject* obj, PyObject* attr_name) {
PyTypeObject* tp = Py_TYPE(obj);
if (likely(tp->tp_getattro))
return tp->tp_getattro(obj, attr_name);
#if PY_MAJOR_VERSION < 3
if (likely(tp->tp_getattr))
return tp->tp_getattr(obj, PyString_AS_STRING(attr_name));
#endif
return PyObject_GetAttr(obj, attr_name);
}
#else
#define __Pyx_PyObject_GetAttrStr(o,n) PyObject_GetAttr(o,n)
#endif
/* GetBuiltinName.proto */
static PyObject *__Pyx_GetBuiltinName(PyObject *name);
/* RaiseArgTupleInvalid.proto */
static void __Pyx_RaiseArgtupleInvalid(const char* func_name, int exact,
Py_ssize_t num_min, Py_ssize_t num_max, Py_ssize_t num_found);
/* RaiseDoubleKeywords.proto */
static void __Pyx_RaiseDoubleKeywordsError(const char* func_name, PyObject* kw_name);
/* ParseKeywords.proto */
static int __Pyx_ParseOptionalKeywords(PyObject *kwds, PyObject **argnames[],\
PyObject *kwds2, PyObject *values[], Py_ssize_t num_pos_args,\
const char* function_name);
/* PyObjectCall.proto */
#if CYTHON_COMPILING_IN_CPYTHON
static CYTHON_INLINE PyObject* __Pyx_PyObject_Call(PyObject *func, PyObject *arg, PyObject *kw);
#else
#define __Pyx_PyObject_Call(func, arg, kw) PyObject_Call(func, arg, kw)
#endif
/* BufferFormatCheck.proto */
static CYTHON_INLINE int __Pyx_GetBufferAndValidate(Py_buffer* buf, PyObject* obj,
__Pyx_TypeInfo* dtype, int flags, int nd, int cast, __Pyx_BufFmt_StackElem* stack);
static CYTHON_INLINE void __Pyx_SafeReleaseBuffer(Py_buffer* info);
static const char* __Pyx_BufFmt_CheckString(__Pyx_BufFmt_Context* ctx, const char* ts);
static void __Pyx_BufFmt_Init(__Pyx_BufFmt_Context* ctx,
__Pyx_BufFmt_StackElem* stack,
__Pyx_TypeInfo* type); // PROTO
/* MemviewSliceInit.proto */
#define __Pyx_BUF_MAX_NDIMS %(BUF_MAX_NDIMS)d
#define __Pyx_MEMVIEW_DIRECT 1
#define __Pyx_MEMVIEW_PTR 2
#define __Pyx_MEMVIEW_FULL 4
#define __Pyx_MEMVIEW_CONTIG 8
#define __Pyx_MEMVIEW_STRIDED 16
#define __Pyx_MEMVIEW_FOLLOW 32
#define __Pyx_IS_C_CONTIG 1
#define __Pyx_IS_F_CONTIG 2
static int __Pyx_init_memviewslice(
struct __pyx_memoryview_obj *memview,
int ndim,
__Pyx_memviewslice *memviewslice,
int memview_is_new_reference);
static CYTHON_INLINE int __pyx_add_acquisition_count_locked(
__pyx_atomic_int *acquisition_count, PyThread_type_lock lock);
static CYTHON_INLINE int __pyx_sub_acquisition_count_locked(
__pyx_atomic_int *acquisition_count, PyThread_type_lock lock);
#define __pyx_get_slice_count_pointer(memview) (memview->acquisition_count_aligned_p)
#define __pyx_get_slice_count(memview) (*__pyx_get_slice_count_pointer(memview))
#define __PYX_INC_MEMVIEW(slice, have_gil) __Pyx_INC_MEMVIEW(slice, have_gil, __LINE__)
#define __PYX_XDEC_MEMVIEW(slice, have_gil) __Pyx_XDEC_MEMVIEW(slice, have_gil, __LINE__)
static CYTHON_INLINE void __Pyx_INC_MEMVIEW(__Pyx_memviewslice *, int, int);
static CYTHON_INLINE void __Pyx_XDEC_MEMVIEW(__Pyx_memviewslice *, int, int);
/* PyThreadStateGet.proto */
#if CYTHON_FAST_THREAD_STATE
#define __Pyx_PyThreadState_declare PyThreadState *__pyx_tstate;
#define __Pyx_PyThreadState_assign __pyx_tstate = PyThreadState_GET();
#else
#define __Pyx_PyThreadState_declare
#define __Pyx_PyThreadState_assign
#endif
/* PyErrFetchRestore.proto */
#if CYTHON_FAST_THREAD_STATE
#define __Pyx_ErrRestoreWithState(type, value, tb) __Pyx_ErrRestoreInState(PyThreadState_GET(), type, value, tb)
#define __Pyx_ErrFetchWithState(type, value, tb) __Pyx_ErrFetchInState(PyThreadState_GET(), type, value, tb)
#define __Pyx_ErrRestore(type, value, tb) __Pyx_ErrRestoreInState(__pyx_tstate, type, value, tb)
#define __Pyx_ErrFetch(type, value, tb) __Pyx_ErrFetchInState(__pyx_tstate, type, value, tb)
static CYTHON_INLINE void __Pyx_ErrRestoreInState(PyThreadState *tstate, PyObject *type, PyObject *value, PyObject *tb);
static CYTHON_INLINE void __Pyx_ErrFetchInState(PyThreadState *tstate, PyObject **type, PyObject **value, PyObject **tb);
#else
#define __Pyx_ErrRestoreWithState(type, value, tb) PyErr_Restore(type, value, tb)
#define __Pyx_ErrFetchWithState(type, value, tb) PyErr_Fetch(type, value, tb)
#define __Pyx_ErrRestore(type, value, tb) PyErr_Restore(type, value, tb)
#define __Pyx_ErrFetch(type, value, tb) PyErr_Fetch(type, value, tb)
#endif
/* RaiseException.proto */
static void __Pyx_Raise(PyObject *type, PyObject *value, PyObject *tb, PyObject *cause);
/* ForceInitThreads.proto */
#ifndef __PYX_FORCE_INIT_THREADS
#define __PYX_FORCE_INIT_THREADS 0
#endif
/* ArgTypeTest.proto */
static CYTHON_INLINE int __Pyx_ArgTypeTest(PyObject *obj, PyTypeObject *type, int none_allowed,
const char *name, int exact);
/* IncludeStringH.proto */
#include <string.h>
/* BytesEquals.proto */
static CYTHON_INLINE int __Pyx_PyBytes_Equals(PyObject* s1, PyObject* s2, int equals);
/* UnicodeEquals.proto */
static CYTHON_INLINE int __Pyx_PyUnicode_Equals(PyObject* s1, PyObject* s2, int equals);
/* StrEquals.proto */
#if PY_MAJOR_VERSION >= 3
#define __Pyx_PyString_Equals __Pyx_PyUnicode_Equals
#else
#define __Pyx_PyString_Equals __Pyx_PyBytes_Equals
#endif
/* UnaryNegOverflows.proto */
#define UNARY_NEG_WOULD_OVERFLOW(x)\
(((x) < 0) & ((unsigned long)(x) == 0-(unsigned long)(x)))
static CYTHON_UNUSED int __pyx_array_getbuffer(PyObject *__pyx_v_self, Py_buffer *__pyx_v_info, int __pyx_v_flags); /*proto*/
static PyObject *__pyx_array_get_memview(struct __pyx_array_obj *); /*proto*/
/* GetAttr.proto */
static CYTHON_INLINE PyObject *__Pyx_GetAttr(PyObject *, PyObject *);
/* decode_c_string.proto */
static CYTHON_INLINE PyObject* __Pyx_decode_c_string(
const char* cstring, Py_ssize_t start, Py_ssize_t stop,
const char* encoding, const char* errors,
PyObject* (*decode_func)(const char *s, Py_ssize_t size, const char *errors));
/* RaiseTooManyValuesToUnpack.proto */
static CYTHON_INLINE void __Pyx_RaiseTooManyValuesError(Py_ssize_t expected);
/* RaiseNeedMoreValuesToUnpack.proto */
static CYTHON_INLINE void __Pyx_RaiseNeedMoreValuesError(Py_ssize_t index);
/* RaiseNoneIterError.proto */
static CYTHON_INLINE void __Pyx_RaiseNoneNotIterableError(void);
/* ExtTypeTest.proto */
static CYTHON_INLINE int __Pyx_TypeTest(PyObject *obj, PyTypeObject *type);
/* SaveResetException.proto */
#if CYTHON_FAST_THREAD_STATE
#define __Pyx_ExceptionSave(type, value, tb) __Pyx__ExceptionSave(__pyx_tstate, type, value, tb)
static CYTHON_INLINE void __Pyx__ExceptionSave(PyThreadState *tstate, PyObject **type, PyObject **value, PyObject **tb);
#define __Pyx_ExceptionReset(type, value, tb) __Pyx__ExceptionReset(__pyx_tstate, type, value, tb)
static CYTHON_INLINE void __Pyx__ExceptionReset(PyThreadState *tstate, PyObject *type, PyObject *value, PyObject *tb);
#else
#define __Pyx_ExceptionSave(type, value, tb) PyErr_GetExcInfo(type, value, tb)
#define __Pyx_ExceptionReset(type, value, tb) PyErr_SetExcInfo(type, value, tb)
#endif
/* PyErrExceptionMatches.proto */
#if CYTHON_FAST_THREAD_STATE
#define __Pyx_PyErr_ExceptionMatches(err) __Pyx_PyErr_ExceptionMatchesInState(__pyx_tstate, err)
static CYTHON_INLINE int __Pyx_PyErr_ExceptionMatchesInState(PyThreadState* tstate, PyObject* err);
#else
#define __Pyx_PyErr_ExceptionMatches(err) PyErr_ExceptionMatches(err)
#endif
/* GetException.proto */
#if CYTHON_FAST_THREAD_STATE
#define __Pyx_GetException(type, value, tb) __Pyx__GetException(__pyx_tstate, type, value, tb)
static int __Pyx__GetException(PyThreadState *tstate, PyObject **type, PyObject **value, PyObject **tb);
#else
static int __Pyx_GetException(PyObject **type, PyObject **value, PyObject **tb);
#endif
/* SwapException.proto */
#if CYTHON_FAST_THREAD_STATE
#define __Pyx_ExceptionSwap(type, value, tb) __Pyx__ExceptionSwap(__pyx_tstate, type, value, tb)
static CYTHON_INLINE void __Pyx__ExceptionSwap(PyThreadState *tstate, PyObject **type, PyObject **value, PyObject **tb);
#else
static CYTHON_INLINE void __Pyx_ExceptionSwap(PyObject **type, PyObject **value, PyObject **tb);
#endif
/* Import.proto */
static PyObject *__Pyx_Import(PyObject *name, PyObject *from_list, int level);
/* PyFunctionFastCall.proto */
#if CYTHON_FAST_PYCALL
#define __Pyx_PyFunction_FastCall(func, args, nargs)\
__Pyx_PyFunction_FastCallDict((func), (args), (nargs), NULL)
#if 1 || PY_VERSION_HEX < 0x030600B1
static PyObject *__Pyx_PyFunction_FastCallDict(PyObject *func, PyObject **args, int nargs, PyObject *kwargs);
#else
#define __Pyx_PyFunction_FastCallDict(func, args, nargs, kwargs) _PyFunction_FastCallDict(func, args, nargs, kwargs)
#endif
#endif
/* PyCFunctionFastCall.proto */
#if CYTHON_FAST_PYCCALL
static CYTHON_INLINE PyObject *__Pyx_PyCFunction_FastCall(PyObject *func, PyObject **args, Py_ssize_t nargs);
#else
#define __Pyx_PyCFunction_FastCall(func, args, nargs) (assert(0), NULL)
#endif
/* GetItemInt.proto */
#define __Pyx_GetItemInt(o, i, type, is_signed, to_py_func, is_list, wraparound, boundscheck)\
(__Pyx_fits_Py_ssize_t(i, type, is_signed) ?\
__Pyx_GetItemInt_Fast(o, (Py_ssize_t)i, is_list, wraparound, boundscheck) :\
(is_list ? (PyErr_SetString(PyExc_IndexError, "list index out of range"), (PyObject*)NULL) :\
__Pyx_GetItemInt_Generic(o, to_py_func(i))))
#define __Pyx_GetItemInt_List(o, i, type, is_signed, to_py_func, is_list, wraparound, boundscheck)\
(__Pyx_fits_Py_ssize_t(i, type, is_signed) ?\
__Pyx_GetItemInt_List_Fast(o, (Py_ssize_t)i, wraparound, boundscheck) :\
(PyErr_SetString(PyExc_IndexError, "list index out of range"), (PyObject*)NULL))
static CYTHON_INLINE PyObject *__Pyx_GetItemInt_List_Fast(PyObject *o, Py_ssize_t i,
int wraparound, int boundscheck);
#define __Pyx_GetItemInt_Tuple(o, i, type, is_signed, to_py_func, is_list, wraparound, boundscheck)\
(__Pyx_fits_Py_ssize_t(i, type, is_signed) ?\
__Pyx_GetItemInt_Tuple_Fast(o, (Py_ssize_t)i, wraparound, boundscheck) :\
(PyErr_SetString(PyExc_IndexError, "tuple index out of range"), (PyObject*)NULL))
static CYTHON_INLINE PyObject *__Pyx_GetItemInt_Tuple_Fast(PyObject *o, Py_ssize_t i,
int wraparound, int boundscheck);
static CYTHON_INLINE PyObject *__Pyx_GetItemInt_Generic(PyObject *o, PyObject* j);
static CYTHON_INLINE PyObject *__Pyx_GetItemInt_Fast(PyObject *o, Py_ssize_t i,
int is_list, int wraparound, int boundscheck);
static CYTHON_UNUSED int __pyx_memoryview_getbuffer(PyObject *__pyx_v_self, Py_buffer *__pyx_v_info, int __pyx_v_flags); /*proto*/
/* ListCompAppend.proto */
#if CYTHON_USE_PYLIST_INTERNALS && CYTHON_ASSUME_SAFE_MACROS
static CYTHON_INLINE int __Pyx_ListComp_Append(PyObject* list, PyObject* x) {
PyListObject* L = (PyListObject*) list;
Py_ssize_t len = Py_SIZE(list);
if (likely(L->allocated > len)) {
Py_INCREF(x);
PyList_SET_ITEM(list, len, x);
Py_SIZE(list) = len+1;
return 0;
}
return PyList_Append(list, x);
}
#else
#define __Pyx_ListComp_Append(L,x) PyList_Append(L,x)
#endif
/* PyIntBinop.proto */
#if !CYTHON_COMPILING_IN_PYPY
static PyObject* __Pyx_PyInt_AddObjC(PyObject *op1, PyObject *op2, long intval, int inplace);
#else
#define __Pyx_PyInt_AddObjC(op1, op2, intval, inplace)\
(inplace ? PyNumber_InPlaceAdd(op1, op2) : PyNumber_Add(op1, op2))
#endif
/* ListExtend.proto */
static CYTHON_INLINE int __Pyx_PyList_Extend(PyObject* L, PyObject* v) {
#if CYTHON_COMPILING_IN_CPYTHON
PyObject* none = _PyList_Extend((PyListObject*)L, v);
if (unlikely(!none))
return -1;
Py_DECREF(none);
return 0;
#else
return PyList_SetSlice(L, PY_SSIZE_T_MAX, PY_SSIZE_T_MAX, v);
#endif
}
/* ListAppend.proto */
#if CYTHON_USE_PYLIST_INTERNALS && CYTHON_ASSUME_SAFE_MACROS
static CYTHON_INLINE int __Pyx_PyList_Append(PyObject* list, PyObject* x) {
PyListObject* L = (PyListObject*) list;
Py_ssize_t len = Py_SIZE(list);
if (likely(L->allocated > len) & likely(len > (L->allocated >> 1))) {
Py_INCREF(x);
PyList_SET_ITEM(list, len, x);
Py_SIZE(list) = len+1;
return 0;
}
return PyList_Append(list, x);
}
#else
#define __Pyx_PyList_Append(L,x) PyList_Append(L,x)
#endif
/* None.proto */
static CYTHON_INLINE void __Pyx_RaiseUnboundLocalError(const char *varname);
/* WriteUnraisableException.proto */
static void __Pyx_WriteUnraisable(const char *name, int clineno,
int lineno, const char *filename,
int full_traceback, int nogil);
/* PyObjectCallMethO.proto */
#if CYTHON_COMPILING_IN_CPYTHON
static CYTHON_INLINE PyObject* __Pyx_PyObject_CallMethO(PyObject *func, PyObject *arg);
#endif
/* PyObjectCallOneArg.proto */
static CYTHON_INLINE PyObject* __Pyx_PyObject_CallOneArg(PyObject *func, PyObject *arg);
/* SetVTable.proto */
static int __Pyx_SetVtable(PyObject *dict, void *vtable);
/* CodeObjectCache.proto */
typedef struct {
PyCodeObject* code_object;
int code_line;
} __Pyx_CodeObjectCacheEntry;
struct __Pyx_CodeObjectCache {
int count;
int max_count;
__Pyx_CodeObjectCacheEntry* entries;
};
static struct __Pyx_CodeObjectCache __pyx_code_cache = {0,0,NULL};
static int __pyx_bisect_code_objects(__Pyx_CodeObjectCacheEntry* entries, int count, int code_line);
static PyCodeObject *__pyx_find_code_object(int code_line);
static void __pyx_insert_code_object(int code_line, PyCodeObject* code_object);
/* AddTraceback.proto */
static void __Pyx_AddTraceback(const char *funcname, int c_line,
int py_line, const char *filename);
#if PY_MAJOR_VERSION < 3
static int __Pyx_GetBuffer(PyObject *obj, Py_buffer *view, int flags);
static void __Pyx_ReleaseBuffer(Py_buffer *view);
#else
#define __Pyx_GetBuffer PyObject_GetBuffer
#define __Pyx_ReleaseBuffer PyBuffer_Release
#endif
/* BufferStructDeclare.proto */
typedef struct {
Py_ssize_t shape, strides, suboffsets;
} __Pyx_Buf_DimInfo;
typedef struct {
size_t refcount;
Py_buffer pybuffer;
} __Pyx_Buffer;
typedef struct {
__Pyx_Buffer *rcbuffer;
char *data;
__Pyx_Buf_DimInfo diminfo[8];
} __Pyx_LocalBuf_ND;
/* None.proto */
static Py_ssize_t __Pyx_zeros[] = {0, 0, 0, 0, 0, 0, 0, 0};
static Py_ssize_t __Pyx_minusones[] = {-1, -1, -1, -1, -1, -1, -1, -1};
/* MemviewSliceIsContig.proto */
static int __pyx_memviewslice_is_contig(const __Pyx_memviewslice mvs,
char order, int ndim);
/* OverlappingSlices.proto */
static int __pyx_slices_overlap(__Pyx_memviewslice *slice1,
__Pyx_memviewslice *slice2,
int ndim, size_t itemsize);
/* Capsule.proto */
static CYTHON_INLINE PyObject *__pyx_capsule_create(void *p, const char *sig);
/* TypeInfoCompare.proto */
static int __pyx_typeinfo_cmp(__Pyx_TypeInfo *a, __Pyx_TypeInfo *b);
/* MemviewSliceValidateAndInit.proto */
static int __Pyx_ValidateAndInit_memviewslice(
int *axes_specs,
int c_or_f_flag,
int buf_flags,
int ndim,
__Pyx_TypeInfo *dtype,
__Pyx_BufFmt_StackElem stack[],
__Pyx_memviewslice *memviewslice,
PyObject *original_obj);
/* ObjectToMemviewSlice.proto */
static CYTHON_INLINE __Pyx_memviewslice __Pyx_PyObject_to_MemoryviewSlice_dsds_double(PyObject *);
/* ObjectToMemviewSlice.proto */
static CYTHON_INLINE __Pyx_memviewslice __Pyx_PyObject_to_MemoryviewSlice_ds_double(PyObject *);
/* ObjectToMemviewSlice.proto */
static CYTHON_INLINE __Pyx_memviewslice __Pyx_PyObject_to_MemoryviewSlice_dsdsds_double(PyObject *);
/* None.proto */
static CYTHON_INLINE long __Pyx_pow_long(long, long);
/* CIntToPy.proto */
static CYTHON_INLINE PyObject* __Pyx_PyInt_From_int(int value);
/* MemviewDtypeToObject.proto */
static CYTHON_INLINE PyObject *__pyx_memview_get_double(const char *itemp);
static CYTHON_INLINE int __pyx_memview_set_double(const char *itemp, PyObject *obj);
/* CIntToPy.proto */
static CYTHON_INLINE PyObject* __Pyx_PyInt_From_long(long value);
/* None.proto */
static CYTHON_INLINE Py_ssize_t __Pyx_pow_Py_ssize_t(Py_ssize_t, Py_ssize_t);
/* MemviewSliceCopyTemplate.proto */
static __Pyx_memviewslice
__pyx_memoryview_copy_new_contig(const __Pyx_memviewslice *from_mvs,
const char *mode, int ndim,
size_t sizeof_dtype, int contig_flag,
int dtype_is_object);
/* CIntFromPy.proto */
static CYTHON_INLINE int __Pyx_PyInt_As_int(PyObject *);
/* CIntFromPy.proto */
static CYTHON_INLINE long __Pyx_PyInt_As_long(PyObject *);
/* CIntFromPy.proto */
static CYTHON_INLINE char __Pyx_PyInt_As_char(PyObject *);
/* ObjectToMemviewSlice.proto */
static CYTHON_INLINE __Pyx_memviewslice __Pyx_PyObject_to_MemoryviewSlice_ds_nn___pyx_t_7bmtools_5exact_7helpers_state_t(PyObject *);
/* ObjectToMemviewSlice.proto */
static CYTHON_INLINE __Pyx_memviewslice __Pyx_PyObject_to_MemoryviewSlice_dsds_nn___pyx_t_7bmtools_5exact_7helpers_state_t(PyObject *);
/* CheckBinaryVersion.proto */
static int __Pyx_check_binary_version(void);
/* FunctionExport.proto */
static int __Pyx_ExportFunction(const char *name, void (*f)(void), const char *sig);
/* PyIdentifierFromString.proto */
#if !defined(__Pyx_PyIdentifier_FromString)
#if PY_MAJOR_VERSION < 3
#define __Pyx_PyIdentifier_FromString(s) PyString_FromString(s)
#else
#define __Pyx_PyIdentifier_FromString(s) PyUnicode_FromString(s)
#endif
#endif
/* ModuleImport.proto */
static PyObject *__Pyx_ImportModule(const char *name);
/* VoidPtrImport.proto */
static int __Pyx_ImportVoidPtr(PyObject *module, const char *name, void **p, const char *sig);
/* FunctionImport.proto */
static int __Pyx_ImportFunction(PyObject *module, const char *funcname, void (**f)(void), const char *sig);
/* InitStrings.proto */
static int __Pyx_InitStrings(__Pyx_StringTabEntry *t);
static PyObject *__pyx_array_get_memview(struct __pyx_array_obj *__pyx_v_self); /* proto*/
static char *__pyx_memoryview_get_item_pointer(struct __pyx_memoryview_obj *__pyx_v_self, PyObject *__pyx_v_index); /* proto*/
static PyObject *__pyx_memoryview_is_slice(struct __pyx_memoryview_obj *__pyx_v_self, PyObject *__pyx_v_obj); /* proto*/
static PyObject *__pyx_memoryview_setitem_slice_assignment(struct __pyx_memoryview_obj *__pyx_v_self, PyObject *__pyx_v_dst, PyObject *__pyx_v_src); /* proto*/
static PyObject *__pyx_memoryview_setitem_slice_assign_scalar(struct __pyx_memoryview_obj *__pyx_v_self, struct __pyx_memoryview_obj *__pyx_v_dst, PyObject *__pyx_v_value); /* proto*/
static PyObject *__pyx_memoryview_setitem_indexed(struct __pyx_memoryview_obj *__pyx_v_self, PyObject *__pyx_v_index, PyObject *__pyx_v_value); /* proto*/
static PyObject *__pyx_memoryview_convert_item_to_object(struct __pyx_memoryview_obj *__pyx_v_self, char *__pyx_v_itemp); /* proto*/
static PyObject *__pyx_memoryview_assign_item_from_object(struct __pyx_memoryview_obj *__pyx_v_self, char *__pyx_v_itemp, PyObject *__pyx_v_value); /* proto*/
static PyObject *__pyx_memoryviewslice_convert_item_to_object(struct __pyx_memoryviewslice_obj *__pyx_v_self, char *__pyx_v_itemp); /* proto*/
static PyObject *__pyx_memoryviewslice_assign_item_from_object(struct __pyx_memoryviewslice_obj *__pyx_v_self, char *__pyx_v_itemp, PyObject *__pyx_v_value); /* proto*/
/* Module declarations from 'bmtools.exact.helpers' */
static char **__pyx_vp_7bmtools_5exact_7helpers_state_t_code = 0;
#define __pyx_v_7bmtools_5exact_7helpers_state_t_code (*__pyx_vp_7bmtools_5exact_7helpers_state_t_code)
static double (*__pyx_f_7bmtools_5exact_7helpers_neg_energy)(__Pyx_memviewslice, __Pyx_memviewslice, __Pyx_memviewslice); /*proto*/
static void (*__pyx_f_7bmtools_5exact_7helpers_check_state_space_size)(int, int, int __pyx_skip_dispatch); /*proto*/
static __Pyx_memviewslice (*__pyx_f_7bmtools_5exact_7helpers_partition_state_space)(long, int); /*proto*/
static void (*__pyx_f_7bmtools_5exact_7helpers_index_to_state)(long, __Pyx_memviewslice, int __pyx_skip_dispatch); /*proto*/
static void (*__pyx_f_7bmtools_5exact_7helpers_next_state)(__Pyx_memviewslice, long); /*proto*/
/* Module declarations from 'cython.view' */
/* Module declarations from 'bmtools.exact.moments' */
static PyTypeObject *__pyx_array_type = 0;
static PyTypeObject *__pyx_MemviewEnum_type = 0;
static PyTypeObject *__pyx_memoryview_type = 0;
static PyTypeObject *__pyx_memoryviewslice_type = 0;
static PyObject *generic = 0;
static PyObject *strided = 0;
static PyObject *indirect = 0;
static PyObject *contiguous = 0;
static PyObject *indirect_contiguous = 0;
static int __pyx_memoryview_thread_locks_used;
static PyThread_type_lock __pyx_memoryview_thread_locks[8];
static void __pyx_f_7bmtools_5exact_7moments_calc_unnormed_probs_for_state_range(__Pyx_memviewslice, __Pyx_memviewslice, __Pyx_memviewslice, double *, __Pyx_memviewslice, long, long); /*proto*/
static void __pyx_f_7bmtools_5exact_7moments_normalise_probabilities(__Pyx_memviewslice, double); /*proto*/
static void __pyx_f_7bmtools_5exact_7moments_accum_moments_for_state_range(__Pyx_memviewslice, __Pyx_memviewslice, __Pyx_memviewslice, double *, __Pyx_memviewslice, __Pyx_memviewslice, long, long); /*proto*/
static void __pyx_f_7bmtools_5exact_7moments_normalise_first_moment(__Pyx_memviewslice, double); /*proto*/
static void __pyx_f_7bmtools_5exact_7moments_combine_and_normalise_first_moments(__Pyx_memviewslice, double); /*proto*/
static void __pyx_f_7bmtools_5exact_7moments_normalise_and_reflect_second_moment(__Pyx_memviewslice, double); /*proto*/
static void __pyx_f_7bmtools_5exact_7moments_combine_normalise_and_reflect_second_moments(__Pyx_memviewslice, double); /*proto*/
static struct __pyx_array_obj *__pyx_array_new(PyObject *, Py_ssize_t, char *, char *, char *); /*proto*/
static void *__pyx_align_pointer(void *, size_t); /*proto*/
static PyObject *__pyx_memoryview_new(PyObject *, int, int, __Pyx_TypeInfo *); /*proto*/
static CYTHON_INLINE int __pyx_memoryview_check(PyObject *); /*proto*/
static PyObject *_unellipsify(PyObject *, int); /*proto*/
static PyObject *assert_direct_dimensions(Py_ssize_t *, int); /*proto*/
static struct __pyx_memoryview_obj *__pyx_memview_slice(struct __pyx_memoryview_obj *, PyObject *); /*proto*/
static int __pyx_memoryview_slice_memviewslice(__Pyx_memviewslice *, Py_ssize_t, Py_ssize_t, Py_ssize_t, int, int, int *, Py_ssize_t, Py_ssize_t, Py_ssize_t, int, int, int, int); /*proto*/
static char *__pyx_pybuffer_index(Py_buffer *, char *, Py_ssize_t, Py_ssize_t); /*proto*/
static int __pyx_memslice_transpose(__Pyx_memviewslice *); /*proto*/
static PyObject *__pyx_memoryview_fromslice(__Pyx_memviewslice, int, PyObject *(*)(char *), int (*)(char *, PyObject *), int); /*proto*/
static __Pyx_memviewslice *__pyx_memoryview_get_slice_from_memoryview(struct __pyx_memoryview_obj *, __Pyx_memviewslice *); /*proto*/
static void __pyx_memoryview_slice_copy(struct __pyx_memoryview_obj *, __Pyx_memviewslice *); /*proto*/
static PyObject *__pyx_memoryview_copy_object(struct __pyx_memoryview_obj *); /*proto*/
static PyObject *__pyx_memoryview_copy_object_from_slice(struct __pyx_memoryview_obj *, __Pyx_memviewslice *); /*proto*/
static Py_ssize_t abs_py_ssize_t(Py_ssize_t); /*proto*/
static char __pyx_get_best_slice_order(__Pyx_memviewslice *, int); /*proto*/
static void _copy_strided_to_strided(char *, Py_ssize_t *, char *, Py_ssize_t *, Py_ssize_t *, Py_ssize_t *, int, size_t); /*proto*/
static void copy_strided_to_strided(__Pyx_memviewslice *, __Pyx_memviewslice *, int, size_t); /*proto*/
static Py_ssize_t __pyx_memoryview_slice_get_size(__Pyx_memviewslice *, int); /*proto*/
static Py_ssize_t __pyx_fill_contig_strides_array(Py_ssize_t *, Py_ssize_t *, Py_ssize_t, int, char); /*proto*/
static void *__pyx_memoryview_copy_data_to_temp(__Pyx_memviewslice *, __Pyx_memviewslice *, char, int); /*proto*/
static int __pyx_memoryview_err_extents(int, Py_ssize_t, Py_ssize_t); /*proto*/
static int __pyx_memoryview_err_dim(PyObject *, char *, int); /*proto*/
static int __pyx_memoryview_err(PyObject *, char *); /*proto*/
static int __pyx_memoryview_copy_contents(__Pyx_memviewslice, __Pyx_memviewslice, int, int, int); /*proto*/
static void __pyx_memoryview_broadcast_leading(__Pyx_memviewslice *, int, int); /*proto*/
static void __pyx_memoryview_refcount_copying(__Pyx_memviewslice *, int, int, int); /*proto*/
static void __pyx_memoryview_refcount_objects_in_slice_with_gil(char *, Py_ssize_t *, Py_ssize_t *, int, int); /*proto*/
static void __pyx_memoryview_refcount_objects_in_slice(char *, Py_ssize_t *, Py_ssize_t *, int, int); /*proto*/
static void __pyx_memoryview_slice_assign_scalar(__Pyx_memviewslice *, int, size_t, void *, int); /*proto*/
static void __pyx_memoryview__slice_assign_scalar(char *, Py_ssize_t *, Py_ssize_t *, int, size_t, void *); /*proto*/
static __Pyx_TypeInfo __Pyx_TypeInfo_double = { "double", NULL, sizeof(double), { 0 }, 0, 'R', 0, 0 };
static __Pyx_TypeInfo __Pyx_TypeInfo_nn___pyx_t_7bmtools_5exact_7helpers_state_t = { "state_t", NULL, sizeof(__pyx_t_7bmtools_5exact_7helpers_state_t), { 0 }, 0, IS_UNSIGNED(__pyx_t_7bmtools_5exact_7helpers_state_t) ? 'U' : 'I', IS_UNSIGNED(__pyx_t_7bmtools_5exact_7helpers_state_t), 0 };
#define __Pyx_MODULE_NAME "bmtools.exact.moments"
int __pyx_module_is_main_bmtools__exact__moments = 0;
/* Implementation of 'bmtools.exact.moments' */
static PyObject *__pyx_builtin_ValueError;
static PyObject *__pyx_builtin_range;
static PyObject *__pyx_builtin_MemoryError;
static PyObject *__pyx_builtin_enumerate;
static PyObject *__pyx_builtin_Ellipsis;
static PyObject *__pyx_builtin_TypeError;
static PyObject *__pyx_builtin_id;
static PyObject *__pyx_builtin_IndexError;
static const char __pyx_k_O[] = "O";
static const char __pyx_k_c[] = "c";
static const char __pyx_k_d[] = "d";
static const char __pyx_k_i[] = "i";
static const char __pyx_k_j[] = "j";
static const char __pyx_k_t[] = "t";
static const char __pyx_k_id[] = "id";
static const char __pyx_k_obj[] = "obj";
static const char __pyx_k_base[] = "base";
static const char __pyx_k_main[] = "__main__";
static const char __pyx_k_mode[] = "mode";
static const char __pyx_k_name[] = "name";
static const char __pyx_k_ndim[] = "ndim";
static const char __pyx_k_pack[] = "pack";
static const char __pyx_k_prob[] = "prob";
static const char __pyx_k_size[] = "size";
static const char __pyx_k_step[] = "step";
static const char __pyx_k_stop[] = "stop";
static const char __pyx_k_test[] = "__test__";
static const char __pyx_k_ASCII[] = "ASCII";
static const char __pyx_k_class[] = "__class__";
static const char __pyx_k_error[] = "error";
static const char __pyx_k_flags[] = "flags";
static const char __pyx_k_force[] = "force";
static const char __pyx_k_probs[] = "probs";
static const char __pyx_k_range[] = "range";
static const char __pyx_k_shape[] = "shape";
static const char __pyx_k_start[] = "start";
static const char __pyx_k_state[] = "state";
static const char __pyx_k_biases[] = "biases";
static const char __pyx_k_encode[] = "encode";
static const char __pyx_k_format[] = "format";
static const char __pyx_k_import[] = "__import__";
static const char __pyx_k_name_2[] = "__name__";
static const char __pyx_k_states[] = "states";
static const char __pyx_k_struct[] = "struct";
static const char __pyx_k_unpack[] = "unpack";
static const char __pyx_k_fortran[] = "fortran";
static const char __pyx_k_memview[] = "memview";
static const char __pyx_k_weights[] = "weights";
static const char __pyx_k_Ellipsis[] = "Ellipsis";
static const char __pyx_k_itemsize[] = "itemsize";
static const char __pyx_k_TypeError[] = "TypeError";
static const char __pyx_k_enumerate[] = "enumerate";
static const char __pyx_k_first_mom[] = "first_mom";
static const char __pyx_k_intervals[] = "intervals";
static const char __pyx_k_num_units[] = "num_units";
static const char __pyx_k_IndexError[] = "IndexError";
static const char __pyx_k_ValueError[] = "ValueError";
static const char __pyx_k_first_moms[] = "first_moms";
static const char __pyx_k_norm_const[] = "norm_const";
static const char __pyx_k_num_states[] = "num_states";
static const char __pyx_k_pyx_vtable[] = "__pyx_vtable__";
static const char __pyx_k_second_mom[] = "second_mom";
static const char __pyx_k_MemoryError[] = "MemoryError";
static const char __pyx_k_norm_consts[] = "norm_consts";
static const char __pyx_k_num_threads[] = "num_threads";
static const char __pyx_k_second_moms[] = "second_moms";
static const char __pyx_k_state_index[] = "state_index";
static const char __pyx_k_all_in_place[] = "all_in_place";
static const char __pyx_k_pyx_getbuffer[] = "__pyx_getbuffer";
static const char __pyx_k_allocate_buffer[] = "allocate_buffer";
static const char __pyx_k_dtype_is_object[] = "dtype_is_object";
static const char __pyx_k_strided_and_direct[] = "<strided and direct>";
static const char __pyx_k_strided_and_indirect[] = "<strided and indirect>";
static const char __pyx_k_bmtools_exact_moments[] = "bmtools.exact.moments";
static const char __pyx_k_contiguous_and_direct[] = "<contiguous and direct>";
static const char __pyx_k_MemoryView_of_r_object[] = "<MemoryView of %r object>";
static const char __pyx_k_MemoryView_of_r_at_0x_x[] = "<MemoryView of %r at 0x%x>";
static const char __pyx_k_contiguous_and_indirect[] = "<contiguous and indirect>";
static const char __pyx_k_Cannot_index_with_type_s[] = "Cannot index with type '%s'";
static const char __pyx_k_calculate_probs_parallel[] = "calculate_probs_parallel";
static const char __pyx_k_Invalid_shape_in_axis_d_d[] = "Invalid shape in axis %d: %d.";
static const char __pyx_k_calculate_moments_parallel[] = "calculate_moments_parallel";
static const char __pyx_k_Number_of_threads_must_be_0[] = "Number of threads must be > 0";
static const char __pyx_k_itemsize_0_for_cython_array[] = "itemsize <= 0 for cython.array";
static const char __pyx_k_calculate_moments_sequential[] = "calculate_moments_sequential";
static const char __pyx_k_unable_to_allocate_array_data[] = "unable to allocate array data.";
static const char __pyx_k_strided_and_direct_or_indirect[] = "<strided and direct or indirect>";
static const char __pyx_k_home_matt_Projects_boltzmann_ma[] = "/home/matt/Projects/boltzmann-machine-tools/bmtools/exact/moments.pyx";
static const char __pyx_k_Boltzmann_machine_moment_calcula[] = "Boltzmann machine moment calculation.\n\nFunctions for calculating first and second moments of Boltzmann machine\ninvariant distribution exactly given weight and bias parameters. Intended\nfor use only with small toy systems due to exponential scaling of\nsummations over state space with number of units. As well as a single\nthreaded sequential implementation a parallel implementation which can\nbe distributed over multiple compute units using OpenMP in a shared memory\narchitecture is provided.\n";
static const char __pyx_k_Buffer_view_does_not_expose_stri[] = "Buffer view does not expose strides";
static const char __pyx_k_Can_only_create_a_buffer_that_is[] = "Can only create a buffer that is contiguous in memory.";
static const char __pyx_k_Empty_shape_tuple_for_cython_arr[] = "Empty shape tuple for cython.array";
static const char __pyx_k_Indirect_dimensions_not_supporte[] = "Indirect dimensions not supported";
static const char __pyx_k_Invalid_mode_expected_c_or_fortr[] = "Invalid mode, expected 'c' or 'fortran', got %s";
static const char __pyx_k_Out_of_bounds_on_buffer_access_a[] = "Out of bounds on buffer access (axis %d)";
static const char __pyx_k_Unable_to_convert_item_to_object[] = "Unable to convert item to object";
static const char __pyx_k_got_differing_extents_in_dimensi[] = "got differing extents in dimension %d (got %d and %d)";
static const char __pyx_k_unable_to_allocate_shape_and_str[] = "unable to allocate shape and strides.";
static PyObject *__pyx_n_s_ASCII;
static PyObject *__pyx_kp_s_Buffer_view_does_not_expose_stri;
static PyObject *__pyx_kp_s_Can_only_create_a_buffer_that_is;
static PyObject *__pyx_kp_s_Cannot_index_with_type_s;
static PyObject *__pyx_n_s_Ellipsis;
static PyObject *__pyx_kp_s_Empty_shape_tuple_for_cython_arr;
static PyObject *__pyx_n_s_IndexError;
static PyObject *__pyx_kp_s_Indirect_dimensions_not_supporte;
static PyObject *__pyx_kp_s_Invalid_mode_expected_c_or_fortr;
static PyObject *__pyx_kp_s_Invalid_shape_in_axis_d_d;
static PyObject *__pyx_n_s_MemoryError;
static PyObject *__pyx_kp_s_MemoryView_of_r_at_0x_x;
static PyObject *__pyx_kp_s_MemoryView_of_r_object;
static PyObject *__pyx_kp_s_Number_of_threads_must_be_0;
static PyObject *__pyx_n_b_O;
static PyObject *__pyx_kp_s_Out_of_bounds_on_buffer_access_a;
static PyObject *__pyx_n_s_TypeError;
static PyObject *__pyx_kp_s_Unable_to_convert_item_to_object;
static PyObject *__pyx_n_s_ValueError;
static PyObject *__pyx_n_s_all_in_place;
static PyObject *__pyx_n_s_allocate_buffer;
static PyObject *__pyx_n_s_base;
static PyObject *__pyx_n_s_biases;
static PyObject *__pyx_n_s_bmtools_exact_moments;
static PyObject *__pyx_n_s_c;
static PyObject *__pyx_n_u_c;
static PyObject *__pyx_n_s_calculate_moments_parallel;
static PyObject *__pyx_n_s_calculate_moments_sequential;
static PyObject *__pyx_n_s_calculate_probs_parallel;
static PyObject *__pyx_n_s_class;
static PyObject *__pyx_kp_s_contiguous_and_direct;
static PyObject *__pyx_kp_s_contiguous_and_indirect;
static PyObject *__pyx_n_s_d;
static PyObject *__pyx_n_s_dtype_is_object;
static PyObject *__pyx_n_s_encode;
static PyObject *__pyx_n_s_enumerate;
static PyObject *__pyx_n_s_error;
static PyObject *__pyx_n_s_first_mom;
static PyObject *__pyx_n_s_first_moms;
static PyObject *__pyx_n_s_flags;
static PyObject *__pyx_n_s_force;
static PyObject *__pyx_n_s_format;
static PyObject *__pyx_n_s_fortran;
static PyObject *__pyx_n_u_fortran;
static PyObject *__pyx_kp_s_got_differing_extents_in_dimensi;
static PyObject *__pyx_kp_s_home_matt_Projects_boltzmann_ma;
static PyObject *__pyx_n_s_i;
static PyObject *__pyx_n_s_id;
static PyObject *__pyx_n_s_import;
static PyObject *__pyx_n_s_intervals;
static PyObject *__pyx_n_s_itemsize;
static PyObject *__pyx_kp_s_itemsize_0_for_cython_array;
static PyObject *__pyx_n_s_j;
static PyObject *__pyx_n_s_main;
static PyObject *__pyx_n_s_memview;
static PyObject *__pyx_n_s_mode;
static PyObject *__pyx_n_s_name;
static PyObject *__pyx_n_s_name_2;
static PyObject *__pyx_n_s_ndim;
static PyObject *__pyx_n_s_norm_const;
static PyObject *__pyx_n_s_norm_consts;
static PyObject *__pyx_n_s_num_states;
static PyObject *__pyx_n_s_num_threads;
static PyObject *__pyx_n_s_num_units;
static PyObject *__pyx_n_s_obj;
static PyObject *__pyx_n_s_pack;
static PyObject *__pyx_n_s_prob;
static PyObject *__pyx_n_s_probs;
static PyObject *__pyx_n_s_pyx_getbuffer;
static PyObject *__pyx_n_s_pyx_vtable;
static PyObject *__pyx_n_s_range;
static PyObject *__pyx_n_s_second_mom;
static PyObject *__pyx_n_s_second_moms;
static PyObject *__pyx_n_s_shape;
static PyObject *__pyx_n_s_size;
static PyObject *__pyx_n_s_start;
static PyObject *__pyx_n_s_state;
static PyObject *__pyx_n_s_state_index;
static PyObject *__pyx_n_s_states;
static PyObject *__pyx_n_s_step;
static PyObject *__pyx_n_s_stop;
static PyObject *__pyx_kp_s_strided_and_direct;
static PyObject *__pyx_kp_s_strided_and_direct_or_indirect;
static PyObject *__pyx_kp_s_strided_and_indirect;
static PyObject *__pyx_n_s_struct;
static PyObject *__pyx_n_s_t;
static PyObject *__pyx_n_s_test;
static PyObject *__pyx_kp_s_unable_to_allocate_array_data;
static PyObject *__pyx_kp_s_unable_to_allocate_shape_and_str;
static PyObject *__pyx_n_s_unpack;
static PyObject *__pyx_n_s_weights;
static PyObject *__pyx_pf_7bmtools_5exact_7moments_calculate_moments_sequential(CYTHON_UNUSED PyObject *__pyx_self, __Pyx_memviewslice __pyx_v_weights, __Pyx_memviewslice __pyx_v_biases, int __pyx_v_force); /* proto */
static PyObject *__pyx_pf_7bmtools_5exact_7moments_2calculate_moments_parallel(CYTHON_UNUSED PyObject *__pyx_self, __Pyx_memviewslice __pyx_v_weights, __Pyx_memviewslice __pyx_v_biases, int __pyx_v_force, int __pyx_v_num_threads, __Pyx_memviewslice __pyx_v_norm_consts, __Pyx_memviewslice __pyx_v_first_moms, __Pyx_memviewslice __pyx_v_second_moms); /* proto */
static PyObject *__pyx_pf_7bmtools_5exact_7moments_4calculate_probs_parallel(CYTHON_UNUSED PyObject *__pyx_self, __Pyx_memviewslice __pyx_v_weights, __Pyx_memviewslice __pyx_v_biases, int __pyx_v_force, int __pyx_v_num_threads); /* proto */
static int __pyx_array___pyx_pf_15View_dot_MemoryView_5array___cinit__(struct __pyx_array_obj *__pyx_v_self, PyObject *__pyx_v_shape, Py_ssize_t __pyx_v_itemsize, PyObject *__pyx_v_format, PyObject *__pyx_v_mode, int __pyx_v_allocate_buffer); /* proto */
static int __pyx_array___pyx_pf_15View_dot_MemoryView_5array_2__getbuffer__(struct __pyx_array_obj *__pyx_v_self, Py_buffer *__pyx_v_info, int __pyx_v_flags); /* proto */
static void __pyx_array___pyx_pf_15View_dot_MemoryView_5array_4__dealloc__(struct __pyx_array_obj *__pyx_v_self); /* proto */
static PyObject *__pyx_pf_15View_dot_MemoryView_5array_7memview___get__(struct __pyx_array_obj *__pyx_v_self); /* proto */
static PyObject *__pyx_array___pyx_pf_15View_dot_MemoryView_5array_6__getattr__(struct __pyx_array_obj *__pyx_v_self, PyObject *__pyx_v_attr); /* proto */
static PyObject *__pyx_array___pyx_pf_15View_dot_MemoryView_5array_8__getitem__(struct __pyx_array_obj *__pyx_v_self, PyObject *__pyx_v_item); /* proto */
static int __pyx_array___pyx_pf_15View_dot_MemoryView_5array_10__setitem__(struct __pyx_array_obj *__pyx_v_self, PyObject *__pyx_v_item, PyObject *__pyx_v_value); /* proto */
static int __pyx_MemviewEnum___pyx_pf_15View_dot_MemoryView_4Enum___init__(struct __pyx_MemviewEnum_obj *__pyx_v_self, PyObject *__pyx_v_name); /* proto */
static PyObject *__pyx_MemviewEnum___pyx_pf_15View_dot_MemoryView_4Enum_2__repr__(struct __pyx_MemviewEnum_obj *__pyx_v_self); /* proto */
static int __pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview___cinit__(struct __pyx_memoryview_obj *__pyx_v_self, PyObject *__pyx_v_obj, int __pyx_v_flags, int __pyx_v_dtype_is_object); /* proto */
static void __pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_2__dealloc__(struct __pyx_memoryview_obj *__pyx_v_self); /* proto */
static PyObject *__pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_4__getitem__(struct __pyx_memoryview_obj *__pyx_v_self, PyObject *__pyx_v_index); /* proto */
static int __pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_6__setitem__(struct __pyx_memoryview_obj *__pyx_v_self, PyObject *__pyx_v_index, PyObject *__pyx_v_value); /* proto */
static int __pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_8__getbuffer__(struct __pyx_memoryview_obj *__pyx_v_self, Py_buffer *__pyx_v_info, int __pyx_v_flags); /* proto */
static PyObject *__pyx_pf_15View_dot_MemoryView_10memoryview_1T___get__(struct __pyx_memoryview_obj *__pyx_v_self); /* proto */
static PyObject *__pyx_pf_15View_dot_MemoryView_10memoryview_4base___get__(struct __pyx_memoryview_obj *__pyx_v_self); /* proto */
static PyObject *__pyx_pf_15View_dot_MemoryView_10memoryview_5shape___get__(struct __pyx_memoryview_obj *__pyx_v_self); /* proto */
static PyObject *__pyx_pf_15View_dot_MemoryView_10memoryview_7strides___get__(struct __pyx_memoryview_obj *__pyx_v_self); /* proto */
static PyObject *__pyx_pf_15View_dot_MemoryView_10memoryview_10suboffsets___get__(struct __pyx_memoryview_obj *__pyx_v_self); /* proto */
static PyObject *__pyx_pf_15View_dot_MemoryView_10memoryview_4ndim___get__(struct __pyx_memoryview_obj *__pyx_v_self); /* proto */
static PyObject *__pyx_pf_15View_dot_MemoryView_10memoryview_8itemsize___get__(struct __pyx_memoryview_obj *__pyx_v_self); /* proto */
static PyObject *__pyx_pf_15View_dot_MemoryView_10memoryview_6nbytes___get__(struct __pyx_memoryview_obj *__pyx_v_self); /* proto */
static PyObject *__pyx_pf_15View_dot_MemoryView_10memoryview_4size___get__(struct __pyx_memoryview_obj *__pyx_v_self); /* proto */
static Py_ssize_t __pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_10__len__(struct __pyx_memoryview_obj *__pyx_v_self); /* proto */
static PyObject *__pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_12__repr__(struct __pyx_memoryview_obj *__pyx_v_self); /* proto */
static PyObject *__pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_14__str__(struct __pyx_memoryview_obj *__pyx_v_self); /* proto */
static PyObject *__pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_16is_c_contig(struct __pyx_memoryview_obj *__pyx_v_self); /* proto */
static PyObject *__pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_18is_f_contig(struct __pyx_memoryview_obj *__pyx_v_self); /* proto */
static PyObject *__pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_20copy(struct __pyx_memoryview_obj *__pyx_v_self); /* proto */
static PyObject *__pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_22copy_fortran(struct __pyx_memoryview_obj *__pyx_v_self); /* proto */
static void __pyx_memoryviewslice___pyx_pf_15View_dot_MemoryView_16_memoryviewslice___dealloc__(struct __pyx_memoryviewslice_obj *__pyx_v_self); /* proto */
static PyObject *__pyx_pf_15View_dot_MemoryView_16_memoryviewslice_4base___get__(struct __pyx_memoryviewslice_obj *__pyx_v_self); /* proto */
static PyObject *__pyx_tp_new_array(PyTypeObject *t, PyObject *a, PyObject *k); /*proto*/
static PyObject *__pyx_tp_new_Enum(PyTypeObject *t, PyObject *a, PyObject *k); /*proto*/
static PyObject *__pyx_tp_new_memoryview(PyTypeObject *t, PyObject *a, PyObject *k); /*proto*/
static PyObject *__pyx_tp_new__memoryviewslice(PyTypeObject *t, PyObject *a, PyObject *k); /*proto*/
static PyObject *__pyx_int_0;
static PyObject *__pyx_int_1;
static PyObject *__pyx_int_neg_1;
static __Pyx_memviewslice __pyx_k_;
static __Pyx_memviewslice __pyx_k__2;
static __Pyx_memviewslice __pyx_k__3;
static PyObject *__pyx_tuple__4;
static PyObject *__pyx_tuple__5;
static PyObject *__pyx_tuple__6;
static PyObject *__pyx_tuple__7;
static PyObject *__pyx_tuple__8;
static PyObject *__pyx_tuple__9;
static PyObject *__pyx_slice__15;
static PyObject *__pyx_slice__16;
static PyObject *__pyx_slice__17;
static PyObject *__pyx_tuple__10;
static PyObject *__pyx_tuple__11;
static PyObject *__pyx_tuple__12;
static PyObject *__pyx_tuple__13;
static PyObject *__pyx_tuple__14;
static PyObject *__pyx_tuple__18;
static PyObject *__pyx_tuple__19;
static PyObject *__pyx_tuple__21;
static PyObject *__pyx_tuple__23;
static PyObject *__pyx_tuple__25;
static PyObject *__pyx_tuple__26;
static PyObject *__pyx_tuple__27;
static PyObject *__pyx_tuple__28;
static PyObject *__pyx_tuple__29;
static PyObject *__pyx_codeobj__20;
static PyObject *__pyx_codeobj__22;
static PyObject *__pyx_codeobj__24;
/* "bmtools/exact/moments.pyx":24
* double log(double x) nogil
*
* def calculate_moments_sequential(double[:, :] weights, double[:] biases, # <<<<<<<<<<<<<<
* bint force=False):
* """Calculate Boltzmann machine distribution moments.
*/
/* Python wrapper */
static PyObject *__pyx_pw_7bmtools_5exact_7moments_1calculate_moments_sequential(PyObject *__pyx_self, PyObject *__pyx_args, PyObject *__pyx_kwds); /*proto*/
static char __pyx_doc_7bmtools_5exact_7moments_calculate_moments_sequential[] = "calculate_moments_sequential(__Pyx_memviewslice weights, __Pyx_memviewslice biases, bool force=False)\nCalculate Boltzmann machine distribution moments.\n\n Calculates normalisation constant (~zeroth moment), first and second\n moments of Boltzmann machine invariant distribution specified by the\n provided weight and bias parameters. Moment calculations are done\n using a single thread iterating across all possible state configurations\n sequentially.\n\n Parameters\n ----------\n weights : double[:, :]\n Matrix of weight parameters. Should be symmetric and zero-diagonal.\n biases : double[:]\n Vector of bias parameters.\n force : bool (bint)\n Flag to override forced exit when number of units is more than 20\n due to large size of state space.\n\n Returns\n -------\n norm_const : double\n Sum of unnormalised probability terms across all states.\n first_mom : double[:]\n Expectation of state vector with respect to distribution.\n second_mom : double[:, :]\n Expectation of outer product of state vectors with respect to\n distribution.\n ";
static PyMethodDef __pyx_mdef_7bmtools_5exact_7moments_1calculate_moments_sequential = {"calculate_moments_sequential", (PyCFunction)__pyx_pw_7bmtools_5exact_7moments_1calculate_moments_sequential, METH_VARARGS|METH_KEYWORDS, __pyx_doc_7bmtools_5exact_7moments_calculate_moments_sequential};
static PyObject *__pyx_pw_7bmtools_5exact_7moments_1calculate_moments_sequential(PyObject *__pyx_self, PyObject *__pyx_args, PyObject *__pyx_kwds) {
__Pyx_memviewslice __pyx_v_weights = { 0, 0, { 0 }, { 0 }, { 0 } };
__Pyx_memviewslice __pyx_v_biases = { 0, 0, { 0 }, { 0 }, { 0 } };
int __pyx_v_force;
PyObject *__pyx_r = 0;
__Pyx_RefNannyDeclarations
__Pyx_RefNannySetupContext("calculate_moments_sequential (wrapper)", 0);
{
static PyObject **__pyx_pyargnames[] = {&__pyx_n_s_weights,&__pyx_n_s_biases,&__pyx_n_s_force,0};
PyObject* values[3] = {0,0,0};
if (unlikely(__pyx_kwds)) {
Py_ssize_t kw_args;
const Py_ssize_t pos_args = PyTuple_GET_SIZE(__pyx_args);
switch (pos_args) {
case 3: values[2] = PyTuple_GET_ITEM(__pyx_args, 2);
case 2: values[1] = PyTuple_GET_ITEM(__pyx_args, 1);
case 1: values[0] = PyTuple_GET_ITEM(__pyx_args, 0);
case 0: break;
default: goto __pyx_L5_argtuple_error;
}
kw_args = PyDict_Size(__pyx_kwds);
switch (pos_args) {
case 0:
if (likely((values[0] = PyDict_GetItem(__pyx_kwds, __pyx_n_s_weights)) != 0)) kw_args--;
else goto __pyx_L5_argtuple_error;
case 1:
if (likely((values[1] = PyDict_GetItem(__pyx_kwds, __pyx_n_s_biases)) != 0)) kw_args--;
else {
__Pyx_RaiseArgtupleInvalid("calculate_moments_sequential", 0, 2, 3, 1); __PYX_ERR(0, 24, __pyx_L3_error)
}
case 2:
if (kw_args > 0) {
PyObject* value = PyDict_GetItem(__pyx_kwds, __pyx_n_s_force);
if (value) { values[2] = value; kw_args--; }
}
}
if (unlikely(kw_args > 0)) {
if (unlikely(__Pyx_ParseOptionalKeywords(__pyx_kwds, __pyx_pyargnames, 0, values, pos_args, "calculate_moments_sequential") < 0)) __PYX_ERR(0, 24, __pyx_L3_error)
}
} else {
switch (PyTuple_GET_SIZE(__pyx_args)) {
case 3: values[2] = PyTuple_GET_ITEM(__pyx_args, 2);
case 2: values[1] = PyTuple_GET_ITEM(__pyx_args, 1);
values[0] = PyTuple_GET_ITEM(__pyx_args, 0);
break;
default: goto __pyx_L5_argtuple_error;
}
}
__pyx_v_weights = __Pyx_PyObject_to_MemoryviewSlice_dsds_double(values[0]); if (unlikely(!__pyx_v_weights.memview)) __PYX_ERR(0, 24, __pyx_L3_error)
__pyx_v_biases = __Pyx_PyObject_to_MemoryviewSlice_ds_double(values[1]); if (unlikely(!__pyx_v_biases.memview)) __PYX_ERR(0, 24, __pyx_L3_error)
if (values[2]) {
__pyx_v_force = __Pyx_PyObject_IsTrue(values[2]); if (unlikely((__pyx_v_force == (int)-1) && PyErr_Occurred())) __PYX_ERR(0, 25, __pyx_L3_error)
} else {
/* "bmtools/exact/moments.pyx":25
*
* def calculate_moments_sequential(double[:, :] weights, double[:] biases,
* bint force=False): # <<<<<<<<<<<<<<
* """Calculate Boltzmann machine distribution moments.
*
*/
__pyx_v_force = ((int)0);
}
}
goto __pyx_L4_argument_unpacking_done;
__pyx_L5_argtuple_error:;
__Pyx_RaiseArgtupleInvalid("calculate_moments_sequential", 0, 2, 3, PyTuple_GET_SIZE(__pyx_args)); __PYX_ERR(0, 24, __pyx_L3_error)
__pyx_L3_error:;
__Pyx_AddTraceback("bmtools.exact.moments.calculate_moments_sequential", __pyx_clineno, __pyx_lineno, __pyx_filename);
__Pyx_RefNannyFinishContext();
return NULL;
__pyx_L4_argument_unpacking_done:;
__pyx_r = __pyx_pf_7bmtools_5exact_7moments_calculate_moments_sequential(__pyx_self, __pyx_v_weights, __pyx_v_biases, __pyx_v_force);
/* "bmtools/exact/moments.pyx":24
* double log(double x) nogil
*
* def calculate_moments_sequential(double[:, :] weights, double[:] biases, # <<<<<<<<<<<<<<
* bint force=False):
* """Calculate Boltzmann machine distribution moments.
*/
/* function exit code */
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
static PyObject *__pyx_pf_7bmtools_5exact_7moments_calculate_moments_sequential(CYTHON_UNUSED PyObject *__pyx_self, __Pyx_memviewslice __pyx_v_weights, __Pyx_memviewslice __pyx_v_biases, int __pyx_v_force) {
int __pyx_v_num_units;
CYTHON_UNUSED double __pyx_v_prob;
double __pyx_v_norm_const;
long __pyx_v_num_states;
__Pyx_memviewslice __pyx_v_state = { 0, 0, { 0 }, { 0 }, { 0 } };
__Pyx_memviewslice __pyx_v_first_mom = { 0, 0, { 0 }, { 0 }, { 0 } };
__Pyx_memviewslice __pyx_v_second_mom = { 0, 0, { 0 }, { 0 }, { 0 } };
PyObject *__pyx_r = NULL;
__Pyx_RefNannyDeclarations
PyObject *__pyx_t_1 = NULL;
PyObject *__pyx_t_2 = NULL;
PyObject *__pyx_t_3 = NULL;
__Pyx_memviewslice __pyx_t_4 = { 0, 0, { 0 }, { 0 }, { 0 } };
__Pyx_memviewslice __pyx_t_5 = { 0, 0, { 0 }, { 0 }, { 0 } };
PyObject *__pyx_t_6 = NULL;
__Pyx_memviewslice __pyx_t_7 = { 0, 0, { 0 }, { 0 }, { 0 } };
__Pyx_RefNannySetupContext("calculate_moments_sequential", 0);
/* "bmtools/exact/moments.pyx":56
* cdef long state_index
* cdef int i, j
* cdef int num_units = weights.shape[0] # <<<<<<<<<<<<<<
* cdef double prob = 0.
* cdef double norm_const = 0.
*/
__pyx_v_num_units = (__pyx_v_weights.shape[0]);
/* "bmtools/exact/moments.pyx":57
* cdef int i, j
* cdef int num_units = weights.shape[0]
* cdef double prob = 0. # <<<<<<<<<<<<<<
* cdef double norm_const = 0.
* cdef long num_states = 2**num_units
*/
__pyx_v_prob = 0.;
/* "bmtools/exact/moments.pyx":58
* cdef int num_units = weights.shape[0]
* cdef double prob = 0.
* cdef double norm_const = 0. # <<<<<<<<<<<<<<
* cdef long num_states = 2**num_units
* check_state_space_size(num_units, force)
*/
__pyx_v_norm_const = 0.;
/* "bmtools/exact/moments.pyx":59
* cdef double prob = 0.
* cdef double norm_const = 0.
* cdef long num_states = 2**num_units # <<<<<<<<<<<<<<
* check_state_space_size(num_units, force)
* cdef state_t[:] state = array(
*/
__pyx_v_num_states = __Pyx_pow_long(2, ((long)__pyx_v_num_units));
/* "bmtools/exact/moments.pyx":60
* cdef double norm_const = 0.
* cdef long num_states = 2**num_units
* check_state_space_size(num_units, force) # <<<<<<<<<<<<<<
* cdef state_t[:] state = array(
* shape=(num_units,), itemsize=sizeof(state_t), format=state_t_code)
*/
__pyx_f_7bmtools_5exact_7helpers_check_state_space_size(__pyx_v_num_units, __pyx_v_force, 0);
/* "bmtools/exact/moments.pyx":62
* check_state_space_size(num_units, force)
* cdef state_t[:] state = array(
* shape=(num_units,), itemsize=sizeof(state_t), format=state_t_code) # <<<<<<<<<<<<<<
* cdef double[:] first_mom = array(
* shape=(num_units,), itemsize=sizeof(double), format='d')
*/
__pyx_t_1 = PyDict_New(); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 62, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_1);
__pyx_t_2 = __Pyx_PyInt_From_int(__pyx_v_num_units); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 62, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_2);
__pyx_t_3 = PyTuple_New(1); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 62, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_3);
__Pyx_GIVEREF(__pyx_t_2);
PyTuple_SET_ITEM(__pyx_t_3, 0, __pyx_t_2);
__pyx_t_2 = 0;
if (PyDict_SetItem(__pyx_t_1, __pyx_n_s_shape, __pyx_t_3) < 0) __PYX_ERR(0, 62, __pyx_L1_error)
__Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
__pyx_t_3 = __Pyx_PyInt_FromSize_t((sizeof(__pyx_t_7bmtools_5exact_7helpers_state_t))); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 62, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_3);
if (PyDict_SetItem(__pyx_t_1, __pyx_n_s_itemsize, __pyx_t_3) < 0) __PYX_ERR(0, 62, __pyx_L1_error)
__Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
__pyx_t_3 = __Pyx_PyBytes_FromString(__pyx_v_7bmtools_5exact_7helpers_state_t_code); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 62, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_3);
if (PyDict_SetItem(__pyx_t_1, __pyx_n_s_format, __pyx_t_3) < 0) __PYX_ERR(0, 62, __pyx_L1_error)
__Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
/* "bmtools/exact/moments.pyx":61
* cdef long num_states = 2**num_units
* check_state_space_size(num_units, force)
* cdef state_t[:] state = array( # <<<<<<<<<<<<<<
* shape=(num_units,), itemsize=sizeof(state_t), format=state_t_code)
* cdef double[:] first_mom = array(
*/
__pyx_t_3 = __Pyx_PyObject_Call(((PyObject *)__pyx_array_type), __pyx_empty_tuple, __pyx_t_1); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 61, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_3);
__Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
__pyx_t_4 = __Pyx_PyObject_to_MemoryviewSlice_ds_nn___pyx_t_7bmtools_5exact_7helpers_state_t(__pyx_t_3);
if (unlikely(!__pyx_t_4.memview)) __PYX_ERR(0, 61, __pyx_L1_error)
__Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
__pyx_v_state = __pyx_t_4;
__pyx_t_4.memview = NULL;
__pyx_t_4.data = NULL;
/* "bmtools/exact/moments.pyx":64
* shape=(num_units,), itemsize=sizeof(state_t), format=state_t_code)
* cdef double[:] first_mom = array(
* shape=(num_units,), itemsize=sizeof(double), format='d') # <<<<<<<<<<<<<<
* cdef double[:, :] second_mom = array(
* shape=(num_units, num_units), itemsize=sizeof(double), format='d')
*/
__pyx_t_3 = PyDict_New(); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 64, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_3);
__pyx_t_1 = __Pyx_PyInt_From_int(__pyx_v_num_units); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 64, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_1);
__pyx_t_2 = PyTuple_New(1); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 64, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_2);
__Pyx_GIVEREF(__pyx_t_1);
PyTuple_SET_ITEM(__pyx_t_2, 0, __pyx_t_1);
__pyx_t_1 = 0;
if (PyDict_SetItem(__pyx_t_3, __pyx_n_s_shape, __pyx_t_2) < 0) __PYX_ERR(0, 64, __pyx_L1_error)
__Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
__pyx_t_2 = __Pyx_PyInt_FromSize_t((sizeof(double))); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 64, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_2);
if (PyDict_SetItem(__pyx_t_3, __pyx_n_s_itemsize, __pyx_t_2) < 0) __PYX_ERR(0, 64, __pyx_L1_error)
__Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
if (PyDict_SetItem(__pyx_t_3, __pyx_n_s_format, __pyx_n_s_d) < 0) __PYX_ERR(0, 64, __pyx_L1_error)
/* "bmtools/exact/moments.pyx":63
* cdef state_t[:] state = array(
* shape=(num_units,), itemsize=sizeof(state_t), format=state_t_code)
* cdef double[:] first_mom = array( # <<<<<<<<<<<<<<
* shape=(num_units,), itemsize=sizeof(double), format='d')
* cdef double[:, :] second_mom = array(
*/
__pyx_t_2 = __Pyx_PyObject_Call(((PyObject *)__pyx_array_type), __pyx_empty_tuple, __pyx_t_3); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 63, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_2);
__Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
__pyx_t_5 = __Pyx_PyObject_to_MemoryviewSlice_ds_double(__pyx_t_2);
if (unlikely(!__pyx_t_5.memview)) __PYX_ERR(0, 63, __pyx_L1_error)
__Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
__pyx_v_first_mom = __pyx_t_5;
__pyx_t_5.memview = NULL;
__pyx_t_5.data = NULL;
/* "bmtools/exact/moments.pyx":66
* shape=(num_units,), itemsize=sizeof(double), format='d')
* cdef double[:, :] second_mom = array(
* shape=(num_units, num_units), itemsize=sizeof(double), format='d') # <<<<<<<<<<<<<<
* accum_moments_for_state_range(weights, biases, state, &norm_const,
* first_mom, second_mom, 0, num_states)
*/
__pyx_t_2 = PyDict_New(); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 66, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_2);
__pyx_t_3 = __Pyx_PyInt_From_int(__pyx_v_num_units); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 66, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_3);
__pyx_t_1 = __Pyx_PyInt_From_int(__pyx_v_num_units); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 66, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_1);
__pyx_t_6 = PyTuple_New(2); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 66, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_6);
__Pyx_GIVEREF(__pyx_t_3);
PyTuple_SET_ITEM(__pyx_t_6, 0, __pyx_t_3);
__Pyx_GIVEREF(__pyx_t_1);
PyTuple_SET_ITEM(__pyx_t_6, 1, __pyx_t_1);
__pyx_t_3 = 0;
__pyx_t_1 = 0;
if (PyDict_SetItem(__pyx_t_2, __pyx_n_s_shape, __pyx_t_6) < 0) __PYX_ERR(0, 66, __pyx_L1_error)
__Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0;
__pyx_t_6 = __Pyx_PyInt_FromSize_t((sizeof(double))); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 66, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_6);
if (PyDict_SetItem(__pyx_t_2, __pyx_n_s_itemsize, __pyx_t_6) < 0) __PYX_ERR(0, 66, __pyx_L1_error)
__Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0;
if (PyDict_SetItem(__pyx_t_2, __pyx_n_s_format, __pyx_n_s_d) < 0) __PYX_ERR(0, 66, __pyx_L1_error)
/* "bmtools/exact/moments.pyx":65
* cdef double[:] first_mom = array(
* shape=(num_units,), itemsize=sizeof(double), format='d')
* cdef double[:, :] second_mom = array( # <<<<<<<<<<<<<<
* shape=(num_units, num_units), itemsize=sizeof(double), format='d')
* accum_moments_for_state_range(weights, biases, state, &norm_const,
*/
__pyx_t_6 = __Pyx_PyObject_Call(((PyObject *)__pyx_array_type), __pyx_empty_tuple, __pyx_t_2); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 65, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_6);
__Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
__pyx_t_7 = __Pyx_PyObject_to_MemoryviewSlice_dsds_double(__pyx_t_6);
if (unlikely(!__pyx_t_7.memview)) __PYX_ERR(0, 65, __pyx_L1_error)
__Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0;
__pyx_v_second_mom = __pyx_t_7;
__pyx_t_7.memview = NULL;
__pyx_t_7.data = NULL;
/* "bmtools/exact/moments.pyx":67
* cdef double[:, :] second_mom = array(
* shape=(num_units, num_units), itemsize=sizeof(double), format='d')
* accum_moments_for_state_range(weights, biases, state, &norm_const, # <<<<<<<<<<<<<<
* first_mom, second_mom, 0, num_states)
* normalise_first_moment(first_mom, norm_const)
*/
__pyx_f_7bmtools_5exact_7moments_accum_moments_for_state_range(__pyx_v_weights, __pyx_v_biases, __pyx_v_state, (&__pyx_v_norm_const), __pyx_v_first_mom, __pyx_v_second_mom, 0, __pyx_v_num_states);
/* "bmtools/exact/moments.pyx":69
* accum_moments_for_state_range(weights, biases, state, &norm_const,
* first_mom, second_mom, 0, num_states)
* normalise_first_moment(first_mom, norm_const) # <<<<<<<<<<<<<<
* normalise_and_reflect_second_moment(second_mom, norm_const)
* return norm_const, first_mom, second_mom
*/
__pyx_f_7bmtools_5exact_7moments_normalise_first_moment(__pyx_v_first_mom, __pyx_v_norm_const);
/* "bmtools/exact/moments.pyx":70
* first_mom, second_mom, 0, num_states)
* normalise_first_moment(first_mom, norm_const)
* normalise_and_reflect_second_moment(second_mom, norm_const) # <<<<<<<<<<<<<<
* return norm_const, first_mom, second_mom
*
*/
__pyx_f_7bmtools_5exact_7moments_normalise_and_reflect_second_moment(__pyx_v_second_mom, __pyx_v_norm_const);
/* "bmtools/exact/moments.pyx":71
* normalise_first_moment(first_mom, norm_const)
* normalise_and_reflect_second_moment(second_mom, norm_const)
* return norm_const, first_mom, second_mom # <<<<<<<<<<<<<<
*
*
*/
__Pyx_XDECREF(__pyx_r);
__pyx_t_6 = PyFloat_FromDouble(__pyx_v_norm_const); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 71, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_6);
__pyx_t_2 = __pyx_memoryview_fromslice(__pyx_v_first_mom, 1, (PyObject *(*)(char *)) __pyx_memview_get_double, (int (*)(char *, PyObject *)) __pyx_memview_set_double, 0);; if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 71, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_2);
__pyx_t_1 = __pyx_memoryview_fromslice(__pyx_v_second_mom, 2, (PyObject *(*)(char *)) __pyx_memview_get_double, (int (*)(char *, PyObject *)) __pyx_memview_set_double, 0);; if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 71, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_1);
__pyx_t_3 = PyTuple_New(3); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 71, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_3);
__Pyx_GIVEREF(__pyx_t_6);
PyTuple_SET_ITEM(__pyx_t_3, 0, __pyx_t_6);
__Pyx_GIVEREF(__pyx_t_2);
PyTuple_SET_ITEM(__pyx_t_3, 1, __pyx_t_2);
__Pyx_GIVEREF(__pyx_t_1);
PyTuple_SET_ITEM(__pyx_t_3, 2, __pyx_t_1);
__pyx_t_6 = 0;
__pyx_t_2 = 0;
__pyx_t_1 = 0;
__pyx_r = __pyx_t_3;
__pyx_t_3 = 0;
goto __pyx_L0;
/* "bmtools/exact/moments.pyx":24
* double log(double x) nogil
*
* def calculate_moments_sequential(double[:, :] weights, double[:] biases, # <<<<<<<<<<<<<<
* bint force=False):
* """Calculate Boltzmann machine distribution moments.
*/
/* function exit code */
__pyx_L1_error:;
__Pyx_XDECREF(__pyx_t_1);
__Pyx_XDECREF(__pyx_t_2);
__Pyx_XDECREF(__pyx_t_3);
__PYX_XDEC_MEMVIEW(&__pyx_t_4, 1);
__PYX_XDEC_MEMVIEW(&__pyx_t_5, 1);
__Pyx_XDECREF(__pyx_t_6);
__PYX_XDEC_MEMVIEW(&__pyx_t_7, 1);
__Pyx_AddTraceback("bmtools.exact.moments.calculate_moments_sequential", __pyx_clineno, __pyx_lineno, __pyx_filename);
__pyx_r = NULL;
__pyx_L0:;
__PYX_XDEC_MEMVIEW(&__pyx_v_state, 1);
__PYX_XDEC_MEMVIEW(&__pyx_v_first_mom, 1);
__PYX_XDEC_MEMVIEW(&__pyx_v_second_mom, 1);
__PYX_XDEC_MEMVIEW(&__pyx_v_weights, 1);
__PYX_XDEC_MEMVIEW(&__pyx_v_biases, 1);
__Pyx_XGIVEREF(__pyx_r);
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
/* "bmtools/exact/moments.pyx":74
*
*
* def calculate_moments_parallel(double[:, :] weights, double[:] biases, # <<<<<<<<<<<<<<
* bint force=False, int num_threads=2,
* double[:] norm_consts=None,
*/
/* Python wrapper */
static PyObject *__pyx_pw_7bmtools_5exact_7moments_3calculate_moments_parallel(PyObject *__pyx_self, PyObject *__pyx_args, PyObject *__pyx_kwds); /*proto*/
static char __pyx_doc_7bmtools_5exact_7moments_2calculate_moments_parallel[] = "calculate_moments_parallel(__Pyx_memviewslice weights, __Pyx_memviewslice biases, bool force=False, int num_threads=2, __Pyx_memviewslice norm_consts=None, __Pyx_memviewslice first_moms=None, __Pyx_memviewslice second_moms=None)\nCalculate Boltzmann machine distribution moments.\n\n Calculates normalisation constant (~zeroth moment), first and second\n moments of Boltzmann machine invariant distribution specified by the\n provided weight and bias parameters. Moment calculations are done in\n parallel using a specified number of thread each iterating across an\n equal partition of the state space.\n\n Parameters\n ----------\n weights : double[:, :]\n Matrix of weight parameters. Should be symmetric and zero-diagonal.\n biases : double[:]\n Vector of bias parameters.\n force : bool (bint)\n Flag to override forced exit when number of units is more than 20\n due to large size of state space.\n num_threads : int (default=2)\n Number of parallel threads to use.\n norm_consts : double[:], optional\n Allocated array to use in parallel normalisation constant\n calculation. Should have shape (num_threads,). Final value accumulated\n over all threads will be written to first entry. If not\n specified (or either of first_moms or second_moms not specidied) new\n array allocated and returned.\n first_moms : double[:, :], optional\n Allocated array to use in parallel first moment calculation. Should\n have shape (num_threads, num_units). Final value accumulated\n over all threads will be written to first element. If not specified\n (or either of norm_consts or second_moms not specified) new array\n allocated and first entry returned.\n second_moms : double[:, :, :], optional\n Allocated array to use in parallel second moment calculation. Should\n have shape (num_threads, num_units, num_units). Final values\n accum""ulated over all threads will be written to first entry. If not\n specified (or either of norm_consts or first_moms not specified) new\n array allocated and first entry returned.\n\n Returns\n -------\n **If no arrays specified for in-place calculation, otherwise no return**\n norm_const : double\n Sum of unnormalised probability terms across all states.\n first_mom : double[:]\n Expectation of state vector with respect to distribution.\n second_mom : double[:, :]\n Expectation of outer product of state vectors with respect to\n distribution.\n ";
static PyMethodDef __pyx_mdef_7bmtools_5exact_7moments_3calculate_moments_parallel = {"calculate_moments_parallel", (PyCFunction)__pyx_pw_7bmtools_5exact_7moments_3calculate_moments_parallel, METH_VARARGS|METH_KEYWORDS, __pyx_doc_7bmtools_5exact_7moments_2calculate_moments_parallel};
static PyObject *__pyx_pw_7bmtools_5exact_7moments_3calculate_moments_parallel(PyObject *__pyx_self, PyObject *__pyx_args, PyObject *__pyx_kwds) {
__Pyx_memviewslice __pyx_v_weights = { 0, 0, { 0 }, { 0 }, { 0 } };
__Pyx_memviewslice __pyx_v_biases = { 0, 0, { 0 }, { 0 }, { 0 } };
int __pyx_v_force;
int __pyx_v_num_threads;
__Pyx_memviewslice __pyx_v_norm_consts = { 0, 0, { 0 }, { 0 }, { 0 } };
__Pyx_memviewslice __pyx_v_first_moms = { 0, 0, { 0 }, { 0 }, { 0 } };
__Pyx_memviewslice __pyx_v_second_moms = { 0, 0, { 0 }, { 0 }, { 0 } };
PyObject *__pyx_r = 0;
__Pyx_RefNannyDeclarations
__Pyx_RefNannySetupContext("calculate_moments_parallel (wrapper)", 0);
{
static PyObject **__pyx_pyargnames[] = {&__pyx_n_s_weights,&__pyx_n_s_biases,&__pyx_n_s_force,&__pyx_n_s_num_threads,&__pyx_n_s_norm_consts,&__pyx_n_s_first_moms,&__pyx_n_s_second_moms,0};
PyObject* values[7] = {0,0,0,0,0,0,0};
if (unlikely(__pyx_kwds)) {
Py_ssize_t kw_args;
const Py_ssize_t pos_args = PyTuple_GET_SIZE(__pyx_args);
switch (pos_args) {
case 7: values[6] = PyTuple_GET_ITEM(__pyx_args, 6);
case 6: values[5] = PyTuple_GET_ITEM(__pyx_args, 5);
case 5: values[4] = PyTuple_GET_ITEM(__pyx_args, 4);
case 4: values[3] = PyTuple_GET_ITEM(__pyx_args, 3);
case 3: values[2] = PyTuple_GET_ITEM(__pyx_args, 2);
case 2: values[1] = PyTuple_GET_ITEM(__pyx_args, 1);
case 1: values[0] = PyTuple_GET_ITEM(__pyx_args, 0);
case 0: break;
default: goto __pyx_L5_argtuple_error;
}
kw_args = PyDict_Size(__pyx_kwds);
switch (pos_args) {
case 0:
if (likely((values[0] = PyDict_GetItem(__pyx_kwds, __pyx_n_s_weights)) != 0)) kw_args--;
else goto __pyx_L5_argtuple_error;
case 1:
if (likely((values[1] = PyDict_GetItem(__pyx_kwds, __pyx_n_s_biases)) != 0)) kw_args--;
else {
__Pyx_RaiseArgtupleInvalid("calculate_moments_parallel", 0, 2, 7, 1); __PYX_ERR(0, 74, __pyx_L3_error)
}
case 2:
if (kw_args > 0) {
PyObject* value = PyDict_GetItem(__pyx_kwds, __pyx_n_s_force);
if (value) { values[2] = value; kw_args--; }
}
case 3:
if (kw_args > 0) {
PyObject* value = PyDict_GetItem(__pyx_kwds, __pyx_n_s_num_threads);
if (value) { values[3] = value; kw_args--; }
}
case 4:
if (kw_args > 0) {
PyObject* value = PyDict_GetItem(__pyx_kwds, __pyx_n_s_norm_consts);
if (value) { values[4] = value; kw_args--; }
}
case 5:
if (kw_args > 0) {
PyObject* value = PyDict_GetItem(__pyx_kwds, __pyx_n_s_first_moms);
if (value) { values[5] = value; kw_args--; }
}
case 6:
if (kw_args > 0) {
PyObject* value = PyDict_GetItem(__pyx_kwds, __pyx_n_s_second_moms);
if (value) { values[6] = value; kw_args--; }
}
}
if (unlikely(kw_args > 0)) {
if (unlikely(__Pyx_ParseOptionalKeywords(__pyx_kwds, __pyx_pyargnames, 0, values, pos_args, "calculate_moments_parallel") < 0)) __PYX_ERR(0, 74, __pyx_L3_error)
}
} else {
switch (PyTuple_GET_SIZE(__pyx_args)) {
case 7: values[6] = PyTuple_GET_ITEM(__pyx_args, 6);
case 6: values[5] = PyTuple_GET_ITEM(__pyx_args, 5);
case 5: values[4] = PyTuple_GET_ITEM(__pyx_args, 4);
case 4: values[3] = PyTuple_GET_ITEM(__pyx_args, 3);
case 3: values[2] = PyTuple_GET_ITEM(__pyx_args, 2);
case 2: values[1] = PyTuple_GET_ITEM(__pyx_args, 1);
values[0] = PyTuple_GET_ITEM(__pyx_args, 0);
break;
default: goto __pyx_L5_argtuple_error;
}
}
__pyx_v_weights = __Pyx_PyObject_to_MemoryviewSlice_dsds_double(values[0]); if (unlikely(!__pyx_v_weights.memview)) __PYX_ERR(0, 74, __pyx_L3_error)
__pyx_v_biases = __Pyx_PyObject_to_MemoryviewSlice_ds_double(values[1]); if (unlikely(!__pyx_v_biases.memview)) __PYX_ERR(0, 74, __pyx_L3_error)
if (values[2]) {
__pyx_v_force = __Pyx_PyObject_IsTrue(values[2]); if (unlikely((__pyx_v_force == (int)-1) && PyErr_Occurred())) __PYX_ERR(0, 75, __pyx_L3_error)
} else {
/* "bmtools/exact/moments.pyx":75
*
* def calculate_moments_parallel(double[:, :] weights, double[:] biases,
* bint force=False, int num_threads=2, # <<<<<<<<<<<<<<
* double[:] norm_consts=None,
* double[:, :] first_moms=None,
*/
__pyx_v_force = ((int)0);
}
if (values[3]) {
__pyx_v_num_threads = __Pyx_PyInt_As_int(values[3]); if (unlikely((__pyx_v_num_threads == (int)-1) && PyErr_Occurred())) __PYX_ERR(0, 75, __pyx_L3_error)
} else {
__pyx_v_num_threads = ((int)2);
}
if (values[4]) {
__pyx_v_norm_consts = __Pyx_PyObject_to_MemoryviewSlice_ds_double(values[4]); if (unlikely(!__pyx_v_norm_consts.memview)) __PYX_ERR(0, 76, __pyx_L3_error)
} else {
__pyx_v_norm_consts = __pyx_k_;
__PYX_INC_MEMVIEW(&__pyx_v_norm_consts, 1);
}
if (values[5]) {
__pyx_v_first_moms = __Pyx_PyObject_to_MemoryviewSlice_dsds_double(values[5]); if (unlikely(!__pyx_v_first_moms.memview)) __PYX_ERR(0, 77, __pyx_L3_error)
} else {
__pyx_v_first_moms = __pyx_k__2;
__PYX_INC_MEMVIEW(&__pyx_v_first_moms, 1);
}
if (values[6]) {
__pyx_v_second_moms = __Pyx_PyObject_to_MemoryviewSlice_dsdsds_double(values[6]); if (unlikely(!__pyx_v_second_moms.memview)) __PYX_ERR(0, 78, __pyx_L3_error)
} else {
__pyx_v_second_moms = __pyx_k__3;
__PYX_INC_MEMVIEW(&__pyx_v_second_moms, 1);
}
}
goto __pyx_L4_argument_unpacking_done;
__pyx_L5_argtuple_error:;
__Pyx_RaiseArgtupleInvalid("calculate_moments_parallel", 0, 2, 7, PyTuple_GET_SIZE(__pyx_args)); __PYX_ERR(0, 74, __pyx_L3_error)
__pyx_L3_error:;
__Pyx_AddTraceback("bmtools.exact.moments.calculate_moments_parallel", __pyx_clineno, __pyx_lineno, __pyx_filename);
__Pyx_RefNannyFinishContext();
return NULL;
__pyx_L4_argument_unpacking_done:;
__pyx_r = __pyx_pf_7bmtools_5exact_7moments_2calculate_moments_parallel(__pyx_self, __pyx_v_weights, __pyx_v_biases, __pyx_v_force, __pyx_v_num_threads, __pyx_v_norm_consts, __pyx_v_first_moms, __pyx_v_second_moms);
/* "bmtools/exact/moments.pyx":74
*
*
* def calculate_moments_parallel(double[:, :] weights, double[:] biases, # <<<<<<<<<<<<<<
* bint force=False, int num_threads=2,
* double[:] norm_consts=None,
*/
/* function exit code */
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
static PyObject *__pyx_pf_7bmtools_5exact_7moments_2calculate_moments_parallel(CYTHON_UNUSED PyObject *__pyx_self, __Pyx_memviewslice __pyx_v_weights, __Pyx_memviewslice __pyx_v_biases, int __pyx_v_force, int __pyx_v_num_threads, __Pyx_memviewslice __pyx_v_norm_consts, __Pyx_memviewslice __pyx_v_first_moms, __Pyx_memviewslice __pyx_v_second_moms) {
int __pyx_v_t;
int __pyx_v_num_units;
CYTHON_UNUSED double __pyx_v_prob;
long __pyx_v_num_states;
__Pyx_memviewslice __pyx_v_states = { 0, 0, { 0 }, { 0 }, { 0 } };
int __pyx_v_all_in_place;
__Pyx_memviewslice __pyx_v_intervals = { 0, 0, { 0 }, { 0 }, { 0 } };
PyObject *__pyx_r = NULL;
__Pyx_RefNannyDeclarations
int __pyx_t_1;
PyObject *__pyx_t_2 = NULL;
PyObject *__pyx_t_3 = NULL;
PyObject *__pyx_t_4 = NULL;
PyObject *__pyx_t_5 = NULL;
__Pyx_memviewslice __pyx_t_6 = { 0, 0, { 0 }, { 0 }, { 0 } };
int __pyx_t_7;
__Pyx_memviewslice __pyx_t_8 = { 0, 0, { 0 }, { 0 }, { 0 } };
__Pyx_memviewslice __pyx_t_9 = { 0, 0, { 0 }, { 0 }, { 0 } };
PyObject *__pyx_t_10 = NULL;
__Pyx_memviewslice __pyx_t_11 = { 0, 0, { 0 }, { 0 }, { 0 } };
__Pyx_memviewslice __pyx_t_12 = { 0, 0, { 0 }, { 0 }, { 0 } };
int __pyx_t_13;
int __pyx_t_14;
int __pyx_t_15;
int __pyx_t_16;
Py_ssize_t __pyx_t_17;
__Pyx_memviewslice __pyx_t_18 = { 0, 0, { 0 }, { 0 }, { 0 } };
Py_ssize_t __pyx_t_19;
Py_ssize_t __pyx_t_20;
Py_ssize_t __pyx_t_21;
Py_ssize_t __pyx_t_22;
Py_ssize_t __pyx_t_23;
Py_ssize_t __pyx_t_24;
Py_ssize_t __pyx_t_25;
Py_ssize_t __pyx_t_26;
Py_ssize_t __pyx_t_27;
Py_ssize_t __pyx_t_28;
__Pyx_RefNannySetupContext("calculate_moments_parallel", 0);
/* "bmtools/exact/moments.pyx":129
* """
* cdef int t
* cdef int num_units = weights.shape[0] # <<<<<<<<<<<<<<
* cdef double prob = 0.
* cdef long num_states = 2**num_units
*/
__pyx_v_num_units = (__pyx_v_weights.shape[0]);
/* "bmtools/exact/moments.pyx":130
* cdef int t
* cdef int num_units = weights.shape[0]
* cdef double prob = 0. # <<<<<<<<<<<<<<
* cdef long num_states = 2**num_units
* if num_threads <= 0:
*/
__pyx_v_prob = 0.;
/* "bmtools/exact/moments.pyx":131
* cdef int num_units = weights.shape[0]
* cdef double prob = 0.
* cdef long num_states = 2**num_units # <<<<<<<<<<<<<<
* if num_threads <= 0:
* raise ValueError('Number of threads must be > 0')
*/
__pyx_v_num_states = __Pyx_pow_long(2, ((long)__pyx_v_num_units));
/* "bmtools/exact/moments.pyx":132
* cdef double prob = 0.
* cdef long num_states = 2**num_units
* if num_threads <= 0: # <<<<<<<<<<<<<<
* raise ValueError('Number of threads must be > 0')
* check_state_space_size(num_units, force)
*/
__pyx_t_1 = ((__pyx_v_num_threads <= 0) != 0);
if (__pyx_t_1) {
/* "bmtools/exact/moments.pyx":133
* cdef long num_states = 2**num_units
* if num_threads <= 0:
* raise ValueError('Number of threads must be > 0') # <<<<<<<<<<<<<<
* check_state_space_size(num_units, force)
* cdef state_t[:, :] states = array(
*/
__pyx_t_2 = __Pyx_PyObject_Call(__pyx_builtin_ValueError, __pyx_tuple__4, NULL); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 133, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_2);
__Pyx_Raise(__pyx_t_2, 0, 0, 0);
__Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
__PYX_ERR(0, 133, __pyx_L1_error)
/* "bmtools/exact/moments.pyx":132
* cdef double prob = 0.
* cdef long num_states = 2**num_units
* if num_threads <= 0: # <<<<<<<<<<<<<<
* raise ValueError('Number of threads must be > 0')
* check_state_space_size(num_units, force)
*/
}
/* "bmtools/exact/moments.pyx":134
* if num_threads <= 0:
* raise ValueError('Number of threads must be > 0')
* check_state_space_size(num_units, force) # <<<<<<<<<<<<<<
* cdef state_t[:, :] states = array(
* shape=(num_threads, num_units), itemsize=sizeof(state_t),
*/
__pyx_f_7bmtools_5exact_7helpers_check_state_space_size(__pyx_v_num_units, __pyx_v_force, 0);
/* "bmtools/exact/moments.pyx":136
* check_state_space_size(num_units, force)
* cdef state_t[:, :] states = array(
* shape=(num_threads, num_units), itemsize=sizeof(state_t), # <<<<<<<<<<<<<<
* format=state_t_code)
* # check if any arrays for in place updates not specified and if so
*/
__pyx_t_2 = PyDict_New(); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 136, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_2);
__pyx_t_3 = __Pyx_PyInt_From_int(__pyx_v_num_threads); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 136, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_3);
__pyx_t_4 = __Pyx_PyInt_From_int(__pyx_v_num_units); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 136, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_4);
__pyx_t_5 = PyTuple_New(2); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 136, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_5);
__Pyx_GIVEREF(__pyx_t_3);
PyTuple_SET_ITEM(__pyx_t_5, 0, __pyx_t_3);
__Pyx_GIVEREF(__pyx_t_4);
PyTuple_SET_ITEM(__pyx_t_5, 1, __pyx_t_4);
__pyx_t_3 = 0;
__pyx_t_4 = 0;
if (PyDict_SetItem(__pyx_t_2, __pyx_n_s_shape, __pyx_t_5) < 0) __PYX_ERR(0, 136, __pyx_L1_error)
__Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
__pyx_t_5 = __Pyx_PyInt_FromSize_t((sizeof(__pyx_t_7bmtools_5exact_7helpers_state_t))); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 136, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_5);
if (PyDict_SetItem(__pyx_t_2, __pyx_n_s_itemsize, __pyx_t_5) < 0) __PYX_ERR(0, 136, __pyx_L1_error)
__Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
/* "bmtools/exact/moments.pyx":137
* cdef state_t[:, :] states = array(
* shape=(num_threads, num_units), itemsize=sizeof(state_t),
* format=state_t_code) # <<<<<<<<<<<<<<
* # check if any arrays for in place updates not specified and if so
* # initialise
*/
__pyx_t_5 = __Pyx_PyBytes_FromString(__pyx_v_7bmtools_5exact_7helpers_state_t_code); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 137, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_5);
if (PyDict_SetItem(__pyx_t_2, __pyx_n_s_format, __pyx_t_5) < 0) __PYX_ERR(0, 136, __pyx_L1_error)
__Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
/* "bmtools/exact/moments.pyx":135
* raise ValueError('Number of threads must be > 0')
* check_state_space_size(num_units, force)
* cdef state_t[:, :] states = array( # <<<<<<<<<<<<<<
* shape=(num_threads, num_units), itemsize=sizeof(state_t),
* format=state_t_code)
*/
__pyx_t_5 = __Pyx_PyObject_Call(((PyObject *)__pyx_array_type), __pyx_empty_tuple, __pyx_t_2); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 135, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_5);
__Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
__pyx_t_6 = __Pyx_PyObject_to_MemoryviewSlice_dsds_nn___pyx_t_7bmtools_5exact_7helpers_state_t(__pyx_t_5);
if (unlikely(!__pyx_t_6.memview)) __PYX_ERR(0, 135, __pyx_L1_error)
__Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
__pyx_v_states = __pyx_t_6;
__pyx_t_6.memview = NULL;
__pyx_t_6.data = NULL;
/* "bmtools/exact/moments.pyx":140
* # check if any arrays for in place updates not specified and if so
* # initialise
* cdef bint all_in_place = True # <<<<<<<<<<<<<<
* if norm_consts is None or first_moms is None or second_moms is None:
* all_in_place = False
*/
__pyx_v_all_in_place = 1;
/* "bmtools/exact/moments.pyx":141
* # initialise
* cdef bint all_in_place = True
* if norm_consts is None or first_moms is None or second_moms is None: # <<<<<<<<<<<<<<
* all_in_place = False
* norm_consts = array(shape=(num_threads,),
*/
__pyx_t_7 = ((((PyObject *) __pyx_v_norm_consts.memview) == Py_None) != 0);
if (!__pyx_t_7) {
} else {
__pyx_t_1 = __pyx_t_7;
goto __pyx_L5_bool_binop_done;
}
__pyx_t_7 = ((((PyObject *) __pyx_v_first_moms.memview) == Py_None) != 0);
if (!__pyx_t_7) {
} else {
__pyx_t_1 = __pyx_t_7;
goto __pyx_L5_bool_binop_done;
}
__pyx_t_7 = ((((PyObject *) __pyx_v_second_moms.memview) == Py_None) != 0);
__pyx_t_1 = __pyx_t_7;
__pyx_L5_bool_binop_done:;
if (__pyx_t_1) {
/* "bmtools/exact/moments.pyx":142
* cdef bint all_in_place = True
* if norm_consts is None or first_moms is None or second_moms is None:
* all_in_place = False # <<<<<<<<<<<<<<
* norm_consts = array(shape=(num_threads,),
* itemsize=sizeof(double), format='d')
*/
__pyx_v_all_in_place = 0;
/* "bmtools/exact/moments.pyx":143
* if norm_consts is None or first_moms is None or second_moms is None:
* all_in_place = False
* norm_consts = array(shape=(num_threads,), # <<<<<<<<<<<<<<
* itemsize=sizeof(double), format='d')
* if first_moms is None:
*/
__pyx_t_5 = PyDict_New(); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 143, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_5);
__pyx_t_2 = __Pyx_PyInt_From_int(__pyx_v_num_threads); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 143, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_2);
__pyx_t_4 = PyTuple_New(1); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 143, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_4);
__Pyx_GIVEREF(__pyx_t_2);
PyTuple_SET_ITEM(__pyx_t_4, 0, __pyx_t_2);
__pyx_t_2 = 0;
if (PyDict_SetItem(__pyx_t_5, __pyx_n_s_shape, __pyx_t_4) < 0) __PYX_ERR(0, 143, __pyx_L1_error)
__Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
/* "bmtools/exact/moments.pyx":144
* all_in_place = False
* norm_consts = array(shape=(num_threads,),
* itemsize=sizeof(double), format='d') # <<<<<<<<<<<<<<
* if first_moms is None:
* all_in_place = False
*/
__pyx_t_4 = __Pyx_PyInt_FromSize_t((sizeof(double))); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 144, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_4);
if (PyDict_SetItem(__pyx_t_5, __pyx_n_s_itemsize, __pyx_t_4) < 0) __PYX_ERR(0, 143, __pyx_L1_error)
__Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
if (PyDict_SetItem(__pyx_t_5, __pyx_n_s_format, __pyx_n_s_d) < 0) __PYX_ERR(0, 143, __pyx_L1_error)
/* "bmtools/exact/moments.pyx":143
* if norm_consts is None or first_moms is None or second_moms is None:
* all_in_place = False
* norm_consts = array(shape=(num_threads,), # <<<<<<<<<<<<<<
* itemsize=sizeof(double), format='d')
* if first_moms is None:
*/
__pyx_t_4 = __Pyx_PyObject_Call(((PyObject *)__pyx_array_type), __pyx_empty_tuple, __pyx_t_5); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 143, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_4);
__Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
__pyx_t_8 = __Pyx_PyObject_to_MemoryviewSlice_ds_double(__pyx_t_4);
if (unlikely(!__pyx_t_8.memview)) __PYX_ERR(0, 143, __pyx_L1_error)
__Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
__PYX_XDEC_MEMVIEW(&__pyx_v_norm_consts, 1);
__pyx_v_norm_consts = __pyx_t_8;
__pyx_t_8.memview = NULL;
__pyx_t_8.data = NULL;
/* "bmtools/exact/moments.pyx":141
* # initialise
* cdef bint all_in_place = True
* if norm_consts is None or first_moms is None or second_moms is None: # <<<<<<<<<<<<<<
* all_in_place = False
* norm_consts = array(shape=(num_threads,),
*/
}
/* "bmtools/exact/moments.pyx":145
* norm_consts = array(shape=(num_threads,),
* itemsize=sizeof(double), format='d')
* if first_moms is None: # <<<<<<<<<<<<<<
* all_in_place = False
* first_moms = array(shape=(num_threads, num_units),
*/
__pyx_t_1 = ((((PyObject *) __pyx_v_first_moms.memview) == Py_None) != 0);
if (__pyx_t_1) {
/* "bmtools/exact/moments.pyx":146
* itemsize=sizeof(double), format='d')
* if first_moms is None:
* all_in_place = False # <<<<<<<<<<<<<<
* first_moms = array(shape=(num_threads, num_units),
* itemsize=sizeof(double), format='d')
*/
__pyx_v_all_in_place = 0;
/* "bmtools/exact/moments.pyx":147
* if first_moms is None:
* all_in_place = False
* first_moms = array(shape=(num_threads, num_units), # <<<<<<<<<<<<<<
* itemsize=sizeof(double), format='d')
* if second_moms is None:
*/
__pyx_t_4 = PyDict_New(); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 147, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_4);
__pyx_t_5 = __Pyx_PyInt_From_int(__pyx_v_num_threads); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 147, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_5);
__pyx_t_2 = __Pyx_PyInt_From_int(__pyx_v_num_units); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 147, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_2);
__pyx_t_3 = PyTuple_New(2); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 147, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_3);
__Pyx_GIVEREF(__pyx_t_5);
PyTuple_SET_ITEM(__pyx_t_3, 0, __pyx_t_5);
__Pyx_GIVEREF(__pyx_t_2);
PyTuple_SET_ITEM(__pyx_t_3, 1, __pyx_t_2);
__pyx_t_5 = 0;
__pyx_t_2 = 0;
if (PyDict_SetItem(__pyx_t_4, __pyx_n_s_shape, __pyx_t_3) < 0) __PYX_ERR(0, 147, __pyx_L1_error)
__Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
/* "bmtools/exact/moments.pyx":148
* all_in_place = False
* first_moms = array(shape=(num_threads, num_units),
* itemsize=sizeof(double), format='d') # <<<<<<<<<<<<<<
* if second_moms is None:
* all_in_place = False
*/
__pyx_t_3 = __Pyx_PyInt_FromSize_t((sizeof(double))); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 148, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_3);
if (PyDict_SetItem(__pyx_t_4, __pyx_n_s_itemsize, __pyx_t_3) < 0) __PYX_ERR(0, 147, __pyx_L1_error)
__Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
if (PyDict_SetItem(__pyx_t_4, __pyx_n_s_format, __pyx_n_s_d) < 0) __PYX_ERR(0, 147, __pyx_L1_error)
/* "bmtools/exact/moments.pyx":147
* if first_moms is None:
* all_in_place = False
* first_moms = array(shape=(num_threads, num_units), # <<<<<<<<<<<<<<
* itemsize=sizeof(double), format='d')
* if second_moms is None:
*/
__pyx_t_3 = __Pyx_PyObject_Call(((PyObject *)__pyx_array_type), __pyx_empty_tuple, __pyx_t_4); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 147, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_3);
__Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
__pyx_t_9 = __Pyx_PyObject_to_MemoryviewSlice_dsds_double(__pyx_t_3);
if (unlikely(!__pyx_t_9.memview)) __PYX_ERR(0, 147, __pyx_L1_error)
__Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
__PYX_XDEC_MEMVIEW(&__pyx_v_first_moms, 1);
__pyx_v_first_moms = __pyx_t_9;
__pyx_t_9.memview = NULL;
__pyx_t_9.data = NULL;
/* "bmtools/exact/moments.pyx":145
* norm_consts = array(shape=(num_threads,),
* itemsize=sizeof(double), format='d')
* if first_moms is None: # <<<<<<<<<<<<<<
* all_in_place = False
* first_moms = array(shape=(num_threads, num_units),
*/
}
/* "bmtools/exact/moments.pyx":149
* first_moms = array(shape=(num_threads, num_units),
* itemsize=sizeof(double), format='d')
* if second_moms is None: # <<<<<<<<<<<<<<
* all_in_place = False
* second_moms = array(shape=(num_threads, num_units, num_units),
*/
__pyx_t_1 = ((((PyObject *) __pyx_v_second_moms.memview) == Py_None) != 0);
if (__pyx_t_1) {
/* "bmtools/exact/moments.pyx":150
* itemsize=sizeof(double), format='d')
* if second_moms is None:
* all_in_place = False # <<<<<<<<<<<<<<
* second_moms = array(shape=(num_threads, num_units, num_units),
* itemsize=sizeof(double), format='d')
*/
__pyx_v_all_in_place = 0;
/* "bmtools/exact/moments.pyx":151
* if second_moms is None:
* all_in_place = False
* second_moms = array(shape=(num_threads, num_units, num_units), # <<<<<<<<<<<<<<
* itemsize=sizeof(double), format='d')
* # partition state space in to equal sized sections to allocate to
*/
__pyx_t_3 = PyDict_New(); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 151, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_3);
__pyx_t_4 = __Pyx_PyInt_From_int(__pyx_v_num_threads); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 151, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_4);
__pyx_t_2 = __Pyx_PyInt_From_int(__pyx_v_num_units); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 151, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_2);
__pyx_t_5 = __Pyx_PyInt_From_int(__pyx_v_num_units); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 151, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_5);
__pyx_t_10 = PyTuple_New(3); if (unlikely(!__pyx_t_10)) __PYX_ERR(0, 151, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_10);
__Pyx_GIVEREF(__pyx_t_4);
PyTuple_SET_ITEM(__pyx_t_10, 0, __pyx_t_4);
__Pyx_GIVEREF(__pyx_t_2);
PyTuple_SET_ITEM(__pyx_t_10, 1, __pyx_t_2);
__Pyx_GIVEREF(__pyx_t_5);
PyTuple_SET_ITEM(__pyx_t_10, 2, __pyx_t_5);
__pyx_t_4 = 0;
__pyx_t_2 = 0;
__pyx_t_5 = 0;
if (PyDict_SetItem(__pyx_t_3, __pyx_n_s_shape, __pyx_t_10) < 0) __PYX_ERR(0, 151, __pyx_L1_error)
__Pyx_DECREF(__pyx_t_10); __pyx_t_10 = 0;
/* "bmtools/exact/moments.pyx":152
* all_in_place = False
* second_moms = array(shape=(num_threads, num_units, num_units),
* itemsize=sizeof(double), format='d') # <<<<<<<<<<<<<<
* # partition state space in to equal sized sections to allocate to
* # different parallel threads
*/
__pyx_t_10 = __Pyx_PyInt_FromSize_t((sizeof(double))); if (unlikely(!__pyx_t_10)) __PYX_ERR(0, 152, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_10);
if (PyDict_SetItem(__pyx_t_3, __pyx_n_s_itemsize, __pyx_t_10) < 0) __PYX_ERR(0, 151, __pyx_L1_error)
__Pyx_DECREF(__pyx_t_10); __pyx_t_10 = 0;
if (PyDict_SetItem(__pyx_t_3, __pyx_n_s_format, __pyx_n_s_d) < 0) __PYX_ERR(0, 151, __pyx_L1_error)
/* "bmtools/exact/moments.pyx":151
* if second_moms is None:
* all_in_place = False
* second_moms = array(shape=(num_threads, num_units, num_units), # <<<<<<<<<<<<<<
* itemsize=sizeof(double), format='d')
* # partition state space in to equal sized sections to allocate to
*/
__pyx_t_10 = __Pyx_PyObject_Call(((PyObject *)__pyx_array_type), __pyx_empty_tuple, __pyx_t_3); if (unlikely(!__pyx_t_10)) __PYX_ERR(0, 151, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_10);
__Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
__pyx_t_11 = __Pyx_PyObject_to_MemoryviewSlice_dsdsds_double(__pyx_t_10);
if (unlikely(!__pyx_t_11.memview)) __PYX_ERR(0, 151, __pyx_L1_error)
__Pyx_DECREF(__pyx_t_10); __pyx_t_10 = 0;
__PYX_XDEC_MEMVIEW(&__pyx_v_second_moms, 1);
__pyx_v_second_moms = __pyx_t_11;
__pyx_t_11.memview = NULL;
__pyx_t_11.data = NULL;
/* "bmtools/exact/moments.pyx":149
* first_moms = array(shape=(num_threads, num_units),
* itemsize=sizeof(double), format='d')
* if second_moms is None: # <<<<<<<<<<<<<<
* all_in_place = False
* second_moms = array(shape=(num_threads, num_units, num_units),
*/
}
/* "bmtools/exact/moments.pyx":155
* # partition state space in to equal sized sections to allocate to
* # different parallel threads
* cdef long[:] intervals = partition_state_space(num_states, num_threads) # <<<<<<<<<<<<<<
* # parallel loop over partitions of state space, with each thread
* # accumulating moments for its assigned states into thread-specific
*/
__pyx_t_12 = __pyx_f_7bmtools_5exact_7helpers_partition_state_space(__pyx_v_num_states, __pyx_v_num_threads); if (unlikely(!__pyx_t_12.memview)) __PYX_ERR(0, 155, __pyx_L1_error)
__pyx_v_intervals = __pyx_t_12;
__pyx_t_12.memview = NULL;
__pyx_t_12.data = NULL;
/* "bmtools/exact/moments.pyx":159
* # accumulating moments for its assigned states into thread-specific
* # arrays
* for t in prange(num_threads, nogil=True, schedule='static', chunksize=1, # <<<<<<<<<<<<<<
* num_threads=num_threads):
* norm_consts[t] = 0.
*/
{
#ifdef WITH_THREAD
PyThreadState *_save;
Py_UNBLOCK_THREADS
#endif
/*try:*/ {
__pyx_t_13 = __pyx_v_num_threads;
if (1 == 0) abort();
{
int __pyx_parallel_temp0 = 0xbad0bad0;
const char *__pyx_parallel_filename = NULL; int __pyx_parallel_lineno = 0, __pyx_parallel_clineno = 0;
PyObject *__pyx_parallel_exc_type = NULL, *__pyx_parallel_exc_value = NULL, *__pyx_parallel_exc_tb = NULL;
int __pyx_parallel_why;
__pyx_parallel_why = 0;
__pyx_t_16 = 1;
#if ((defined(__APPLE__) || defined(__OSX__)) && (defined(__GNUC__) && (__GNUC__ > 2 || (__GNUC__ == 2 && (__GNUC_MINOR__ > 95)))))
#undef likely
#undef unlikely
#define likely(x) (x)
#define unlikely(x) (x)
#endif
__pyx_t_15 = (__pyx_t_13 - 0 + 1 - 1/abs(1)) / 1;
if (__pyx_t_15 > 0)
{
#ifdef _OPENMP
#pragma omp parallel num_threads(__pyx_v_num_threads) private(__pyx_t_17, __pyx_t_19, __pyx_t_20, __pyx_t_21) firstprivate(__pyx_t_18, __pyx_t_8, __pyx_t_9) private(__pyx_filename, __pyx_lineno, __pyx_clineno) shared(__pyx_parallel_why, __pyx_parallel_exc_type, __pyx_parallel_exc_value, __pyx_parallel_exc_tb)
#endif /* _OPENMP */
{
#ifdef _OPENMP
#ifdef WITH_THREAD
PyGILState_STATE __pyx_gilstate_save = PyGILState_Ensure();
#endif
Py_BEGIN_ALLOW_THREADS
#endif /* _OPENMP */
#ifdef _OPENMP
#pragma omp for firstprivate(__pyx_v_t) lastprivate(__pyx_v_t) schedule(static, __pyx_t_16)
#endif /* _OPENMP */
for (__pyx_t_14 = 0; __pyx_t_14 < __pyx_t_15; __pyx_t_14++){
if (__pyx_parallel_why < 2)
{
__pyx_v_t = (int)(0 + 1 * __pyx_t_14);
/* "bmtools/exact/moments.pyx":161
* for t in prange(num_threads, nogil=True, schedule='static', chunksize=1,
* num_threads=num_threads):
* norm_consts[t] = 0. # <<<<<<<<<<<<<<
* accum_moments_for_state_range(
* weights, biases, states[t], &norm_consts[t], first_moms[t],
*/
__pyx_t_17 = __pyx_v_t;
*((double *) ( /* dim=0 */ (__pyx_v_norm_consts.data + __pyx_t_17 * __pyx_v_norm_consts.strides[0]) )) = 0.;
/* "bmtools/exact/moments.pyx":163
* norm_consts[t] = 0.
* accum_moments_for_state_range(
* weights, biases, states[t], &norm_consts[t], first_moms[t], # <<<<<<<<<<<<<<
* second_moms[t], intervals[t], intervals[t+1])
* # accumulate normalisation constant terms calculated by each individual
*/
__pyx_t_18.data = __pyx_v_states.data;
__pyx_t_18.memview = __pyx_v_states.memview;
__PYX_INC_MEMVIEW(&__pyx_t_18, 0);
{
Py_ssize_t __pyx_tmp_idx = __pyx_v_t;
Py_ssize_t __pyx_tmp_shape = __pyx_v_states.shape[0];
Py_ssize_t __pyx_tmp_stride = __pyx_v_states.strides[0];
if (0 && (__pyx_tmp_idx < 0))
__pyx_tmp_idx += __pyx_tmp_shape;
if (0 && (__pyx_tmp_idx < 0 || __pyx_tmp_idx >= __pyx_tmp_shape)) {
#ifdef WITH_THREAD
PyGILState_STATE __pyx_gilstate_save = PyGILState_Ensure();
#endif
PyErr_SetString(PyExc_IndexError, "Index out of bounds (axis 0)");
#ifdef WITH_THREAD
PyGILState_Release(__pyx_gilstate_save);
#endif
__PYX_ERR(0, 163, __pyx_L15_error)
}
__pyx_t_18.data += __pyx_tmp_idx * __pyx_tmp_stride;
}
__pyx_t_18.shape[0] = __pyx_v_states.shape[1];
__pyx_t_18.strides[0] = __pyx_v_states.strides[1];
__pyx_t_18.suboffsets[0] = -1;
__pyx_t_19 = __pyx_v_t;
__pyx_t_8.data = __pyx_v_first_moms.data;
__pyx_t_8.memview = __pyx_v_first_moms.memview;
__PYX_INC_MEMVIEW(&__pyx_t_8, 0);
{
Py_ssize_t __pyx_tmp_idx = __pyx_v_t;
Py_ssize_t __pyx_tmp_shape = __pyx_v_first_moms.shape[0];
Py_ssize_t __pyx_tmp_stride = __pyx_v_first_moms.strides[0];
if (0 && (__pyx_tmp_idx < 0))
__pyx_tmp_idx += __pyx_tmp_shape;
if (0 && (__pyx_tmp_idx < 0 || __pyx_tmp_idx >= __pyx_tmp_shape)) {
#ifdef WITH_THREAD
PyGILState_STATE __pyx_gilstate_save = PyGILState_Ensure();
#endif
PyErr_SetString(PyExc_IndexError, "Index out of bounds (axis 0)");
#ifdef WITH_THREAD
PyGILState_Release(__pyx_gilstate_save);
#endif
__PYX_ERR(0, 163, __pyx_L15_error)
}
__pyx_t_8.data += __pyx_tmp_idx * __pyx_tmp_stride;
}
__pyx_t_8.shape[0] = __pyx_v_first_moms.shape[1];
__pyx_t_8.strides[0] = __pyx_v_first_moms.strides[1];
__pyx_t_8.suboffsets[0] = -1;
__pyx_t_9.data = __pyx_v_second_moms.data;
/* "bmtools/exact/moments.pyx":164
* accum_moments_for_state_range(
* weights, biases, states[t], &norm_consts[t], first_moms[t],
* second_moms[t], intervals[t], intervals[t+1]) # <<<<<<<<<<<<<<
* # accumulate normalisation constant terms calculated by each individual
* # thread to get overall value
*/
__pyx_t_9.memview = __pyx_v_second_moms.memview;
__PYX_INC_MEMVIEW(&__pyx_t_9, 0);
{
Py_ssize_t __pyx_tmp_idx = __pyx_v_t;
Py_ssize_t __pyx_tmp_shape = __pyx_v_second_moms.shape[0];
Py_ssize_t __pyx_tmp_stride = __pyx_v_second_moms.strides[0];
if (0 && (__pyx_tmp_idx < 0))
__pyx_tmp_idx += __pyx_tmp_shape;
if (0 && (__pyx_tmp_idx < 0 || __pyx_tmp_idx >= __pyx_tmp_shape)) {
#ifdef WITH_THREAD
PyGILState_STATE __pyx_gilstate_save = PyGILState_Ensure();
#endif
PyErr_SetString(PyExc_IndexError, "Index out of bounds (axis 0)");
#ifdef WITH_THREAD
PyGILState_Release(__pyx_gilstate_save);
#endif
__PYX_ERR(0, 164, __pyx_L15_error)
}
__pyx_t_9.data += __pyx_tmp_idx * __pyx_tmp_stride;
}
__pyx_t_9.shape[0] = __pyx_v_second_moms.shape[1];
__pyx_t_9.strides[0] = __pyx_v_second_moms.strides[1];
__pyx_t_9.suboffsets[0] = -1;
__pyx_t_9.shape[1] = __pyx_v_second_moms.shape[2];
__pyx_t_9.strides[1] = __pyx_v_second_moms.strides[2];
__pyx_t_9.suboffsets[1] = -1;
__pyx_t_20 = __pyx_v_t;
__pyx_t_21 = (__pyx_v_t + 1);
/* "bmtools/exact/moments.pyx":162
* num_threads=num_threads):
* norm_consts[t] = 0.
* accum_moments_for_state_range( # <<<<<<<<<<<<<<
* weights, biases, states[t], &norm_consts[t], first_moms[t],
* second_moms[t], intervals[t], intervals[t+1])
*/
__pyx_f_7bmtools_5exact_7moments_accum_moments_for_state_range(__pyx_v_weights, __pyx_v_biases, __pyx_t_18, (&(*((double *) ( /* dim=0 */ (__pyx_v_norm_consts.data + __pyx_t_19 * __pyx_v_norm_consts.strides[0]) )))), __pyx_t_8, __pyx_t_9, (*((long *) ( /* dim=0 */ (__pyx_v_intervals.data + __pyx_t_20 * __pyx_v_intervals.strides[0]) ))), (*((long *) ( /* dim=0 */ (__pyx_v_intervals.data + __pyx_t_21 * __pyx_v_intervals.strides[0]) ))));
__PYX_XDEC_MEMVIEW(&__pyx_t_18, 0);
__pyx_t_18.memview = NULL;
__pyx_t_18.data = NULL;
__PYX_XDEC_MEMVIEW(&__pyx_t_8, 0);
__pyx_t_8.memview = NULL;
__pyx_t_8.data = NULL;
__PYX_XDEC_MEMVIEW(&__pyx_t_9, 0);
__pyx_t_9.memview = NULL;
__pyx_t_9.data = NULL;
goto __pyx_L18;
__pyx_L15_error:;
{
#ifdef WITH_THREAD
PyGILState_STATE __pyx_gilstate_save = PyGILState_Ensure();
#endif
#ifdef _OPENMP
#pragma omp flush(__pyx_parallel_exc_type)
#endif /* _OPENMP */
if (!__pyx_parallel_exc_type) {
__Pyx_ErrFetchWithState(&__pyx_parallel_exc_type, &__pyx_parallel_exc_value, &__pyx_parallel_exc_tb);
__pyx_parallel_filename = __pyx_filename; __pyx_parallel_lineno = __pyx_lineno; __pyx_parallel_clineno = __pyx_clineno;
__Pyx_GOTREF(__pyx_parallel_exc_type);
}
#ifdef WITH_THREAD
PyGILState_Release(__pyx_gilstate_save);
#endif
}
__pyx_parallel_why = 4;
goto __pyx_L17;
__pyx_L17:;
#ifdef _OPENMP
#pragma omp critical(__pyx_parallel_lastprivates0)
#endif /* _OPENMP */
{
__pyx_parallel_temp0 = __pyx_v_t;
}
__pyx_L18:;
#ifdef _OPENMP
#pragma omp flush(__pyx_parallel_why)
#endif /* _OPENMP */
}
}
#ifdef _OPENMP
Py_END_ALLOW_THREADS
#else
{
#ifdef WITH_THREAD
PyGILState_STATE __pyx_gilstate_save = PyGILState_Ensure();
#endif
#endif /* _OPENMP */
/* Clean up any temporaries */
__PYX_XDEC_MEMVIEW(&__pyx_t_18, 0);
__PYX_XDEC_MEMVIEW(&__pyx_t_8, 0);
__PYX_XDEC_MEMVIEW(&__pyx_t_9, 0);
#ifdef WITH_THREAD
PyGILState_Release(__pyx_gilstate_save);
#endif
#ifndef _OPENMP
}
#endif /* _OPENMP */
}
}
if (__pyx_parallel_exc_type) {
/* This may have been overridden by a continue, break or return in another thread. Prefer the error. */
__pyx_parallel_why = 4;
}
if (__pyx_parallel_why) {
__pyx_v_t = __pyx_parallel_temp0;
switch (__pyx_parallel_why) {
case 4:
{
#ifdef WITH_THREAD
PyGILState_STATE __pyx_gilstate_save = PyGILState_Ensure();
#endif
__Pyx_GIVEREF(__pyx_parallel_exc_type);
__Pyx_ErrRestoreWithState(__pyx_parallel_exc_type, __pyx_parallel_exc_value, __pyx_parallel_exc_tb);
__pyx_filename = __pyx_parallel_filename; __pyx_lineno = __pyx_parallel_lineno; __pyx_clineno = __pyx_parallel_clineno;
#ifdef WITH_THREAD
PyGILState_Release(__pyx_gilstate_save);
#endif
}
goto __pyx_L11_error;
}
}
}
#if ((defined(__APPLE__) || defined(__OSX__)) && (defined(__GNUC__) && (__GNUC__ > 2 || (__GNUC__ == 2 && (__GNUC_MINOR__ > 95)))))
#undef likely
#undef unlikely
#define likely(x) __builtin_expect(!!(x), 1)
#define unlikely(x) __builtin_expect(!!(x), 0)
#endif
}
/* "bmtools/exact/moments.pyx":159
* # accumulating moments for its assigned states into thread-specific
* # arrays
* for t in prange(num_threads, nogil=True, schedule='static', chunksize=1, # <<<<<<<<<<<<<<
* num_threads=num_threads):
* norm_consts[t] = 0.
*/
/*finally:*/ {
/*normal exit:*/{
#ifdef WITH_THREAD
Py_BLOCK_THREADS
#endif
goto __pyx_L12;
}
__pyx_L11_error: {
#ifdef WITH_THREAD
Py_BLOCK_THREADS
#endif
goto __pyx_L1_error;
}
__pyx_L12:;
}
}
/* "bmtools/exact/moments.pyx":167
* # accumulate normalisation constant terms calculated by each individual
* # thread to get overall value
* for t in range(1, num_threads): # <<<<<<<<<<<<<<
* norm_consts[0] += norm_consts[t]
* # if multiple threads used accumulate moment values calculated by each
*/
__pyx_t_15 = __pyx_v_num_threads;
for (__pyx_t_14 = 1; __pyx_t_14 < __pyx_t_15; __pyx_t_14+=1) {
__pyx_v_t = __pyx_t_14;
/* "bmtools/exact/moments.pyx":168
* # thread to get overall value
* for t in range(1, num_threads):
* norm_consts[0] += norm_consts[t] # <<<<<<<<<<<<<<
* # if multiple threads used accumulate moment values calculated by each
* # thread before normalising (and for second moments filling in
*/
__pyx_t_22 = __pyx_v_t;
__pyx_t_23 = 0;
*((double *) ( /* dim=0 */ (__pyx_v_norm_consts.data + __pyx_t_23 * __pyx_v_norm_consts.strides[0]) )) += (*((double *) ( /* dim=0 */ (__pyx_v_norm_consts.data + __pyx_t_22 * __pyx_v_norm_consts.strides[0]) )));
}
/* "bmtools/exact/moments.pyx":172
* # thread before normalising (and for second moments filling in
* # diagonal and upper triangle values using symmetricity)
* if num_threads > 1: # <<<<<<<<<<<<<<
* combine_and_normalise_first_moments(first_moms, norm_consts[0])
* combine_normalise_and_reflect_second_moments(second_moms,
*/
__pyx_t_1 = ((__pyx_v_num_threads > 1) != 0);
if (__pyx_t_1) {
/* "bmtools/exact/moments.pyx":173
* # diagonal and upper triangle values using symmetricity)
* if num_threads > 1:
* combine_and_normalise_first_moments(first_moms, norm_consts[0]) # <<<<<<<<<<<<<<
* combine_normalise_and_reflect_second_moments(second_moms,
* norm_consts[0])
*/
__pyx_t_24 = 0;
__pyx_f_7bmtools_5exact_7moments_combine_and_normalise_first_moments(__pyx_v_first_moms, (*((double *) ( /* dim=0 */ (__pyx_v_norm_consts.data + __pyx_t_24 * __pyx_v_norm_consts.strides[0]) ))));
/* "bmtools/exact/moments.pyx":175
* combine_and_normalise_first_moments(first_moms, norm_consts[0])
* combine_normalise_and_reflect_second_moments(second_moms,
* norm_consts[0]) # <<<<<<<<<<<<<<
* else:
* normalise_first_moment(first_moms[0], norm_consts[0])
*/
__pyx_t_25 = 0;
/* "bmtools/exact/moments.pyx":174
* if num_threads > 1:
* combine_and_normalise_first_moments(first_moms, norm_consts[0])
* combine_normalise_and_reflect_second_moments(second_moms, # <<<<<<<<<<<<<<
* norm_consts[0])
* else:
*/
__pyx_f_7bmtools_5exact_7moments_combine_normalise_and_reflect_second_moments(__pyx_v_second_moms, (*((double *) ( /* dim=0 */ (__pyx_v_norm_consts.data + __pyx_t_25 * __pyx_v_norm_consts.strides[0]) ))));
/* "bmtools/exact/moments.pyx":172
* # thread before normalising (and for second moments filling in
* # diagonal and upper triangle values using symmetricity)
* if num_threads > 1: # <<<<<<<<<<<<<<
* combine_and_normalise_first_moments(first_moms, norm_consts[0])
* combine_normalise_and_reflect_second_moments(second_moms,
*/
goto __pyx_L21;
}
/* "bmtools/exact/moments.pyx":177
* norm_consts[0])
* else:
* normalise_first_moment(first_moms[0], norm_consts[0]) # <<<<<<<<<<<<<<
* normalise_and_reflect_second_moment(second_moms[0], norm_consts[0])
* # only return values if not all arrays update in place
*/
/*else*/ {
__pyx_t_8.data = __pyx_v_first_moms.data;
__pyx_t_8.memview = __pyx_v_first_moms.memview;
__PYX_INC_MEMVIEW(&__pyx_t_8, 0);
{
Py_ssize_t __pyx_tmp_idx = 0;
Py_ssize_t __pyx_tmp_shape = __pyx_v_first_moms.shape[0];
Py_ssize_t __pyx_tmp_stride = __pyx_v_first_moms.strides[0];
if (0 && (__pyx_tmp_idx < 0))
__pyx_tmp_idx += __pyx_tmp_shape;
if (0 && (__pyx_tmp_idx < 0 || __pyx_tmp_idx >= __pyx_tmp_shape)) {
PyErr_SetString(PyExc_IndexError, "Index out of bounds (axis 0)");
__PYX_ERR(0, 177, __pyx_L1_error)
}
__pyx_t_8.data += __pyx_tmp_idx * __pyx_tmp_stride;
}
__pyx_t_8.shape[0] = __pyx_v_first_moms.shape[1];
__pyx_t_8.strides[0] = __pyx_v_first_moms.strides[1];
__pyx_t_8.suboffsets[0] = -1;
__pyx_t_26 = 0;
__pyx_f_7bmtools_5exact_7moments_normalise_first_moment(__pyx_t_8, (*((double *) ( /* dim=0 */ (__pyx_v_norm_consts.data + __pyx_t_26 * __pyx_v_norm_consts.strides[0]) ))));
__PYX_XDEC_MEMVIEW(&__pyx_t_8, 1);
__pyx_t_8.memview = NULL;
__pyx_t_8.data = NULL;
/* "bmtools/exact/moments.pyx":178
* else:
* normalise_first_moment(first_moms[0], norm_consts[0])
* normalise_and_reflect_second_moment(second_moms[0], norm_consts[0]) # <<<<<<<<<<<<<<
* # only return values if not all arrays update in place
* if not all_in_place:
*/
__pyx_t_9.data = __pyx_v_second_moms.data;
__pyx_t_9.memview = __pyx_v_second_moms.memview;
__PYX_INC_MEMVIEW(&__pyx_t_9, 0);
{
Py_ssize_t __pyx_tmp_idx = 0;
Py_ssize_t __pyx_tmp_shape = __pyx_v_second_moms.shape[0];
Py_ssize_t __pyx_tmp_stride = __pyx_v_second_moms.strides[0];
if (0 && (__pyx_tmp_idx < 0))
__pyx_tmp_idx += __pyx_tmp_shape;
if (0 && (__pyx_tmp_idx < 0 || __pyx_tmp_idx >= __pyx_tmp_shape)) {
PyErr_SetString(PyExc_IndexError, "Index out of bounds (axis 0)");
__PYX_ERR(0, 178, __pyx_L1_error)
}
__pyx_t_9.data += __pyx_tmp_idx * __pyx_tmp_stride;
}
__pyx_t_9.shape[0] = __pyx_v_second_moms.shape[1];
__pyx_t_9.strides[0] = __pyx_v_second_moms.strides[1];
__pyx_t_9.suboffsets[0] = -1;
__pyx_t_9.shape[1] = __pyx_v_second_moms.shape[2];
__pyx_t_9.strides[1] = __pyx_v_second_moms.strides[2];
__pyx_t_9.suboffsets[1] = -1;
__pyx_t_27 = 0;
__pyx_f_7bmtools_5exact_7moments_normalise_and_reflect_second_moment(__pyx_t_9, (*((double *) ( /* dim=0 */ (__pyx_v_norm_consts.data + __pyx_t_27 * __pyx_v_norm_consts.strides[0]) ))));
__PYX_XDEC_MEMVIEW(&__pyx_t_9, 1);
__pyx_t_9.memview = NULL;
__pyx_t_9.data = NULL;
}
__pyx_L21:;
/* "bmtools/exact/moments.pyx":180
* normalise_and_reflect_second_moment(second_moms[0], norm_consts[0])
* # only return values if not all arrays update in place
* if not all_in_place: # <<<<<<<<<<<<<<
* return norm_consts[0], first_moms[0], second_moms[0]
*
*/
__pyx_t_1 = ((!(__pyx_v_all_in_place != 0)) != 0);
if (__pyx_t_1) {
/* "bmtools/exact/moments.pyx":181
* # only return values if not all arrays update in place
* if not all_in_place:
* return norm_consts[0], first_moms[0], second_moms[0] # <<<<<<<<<<<<<<
*
*
*/
__Pyx_XDECREF(__pyx_r);
__pyx_t_28 = 0;
__pyx_t_10 = PyFloat_FromDouble((*((double *) ( /* dim=0 */ (__pyx_v_norm_consts.data + __pyx_t_28 * __pyx_v_norm_consts.strides[0]) )))); if (unlikely(!__pyx_t_10)) __PYX_ERR(0, 181, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_10);
__pyx_t_8.data = __pyx_v_first_moms.data;
__pyx_t_8.memview = __pyx_v_first_moms.memview;
__PYX_INC_MEMVIEW(&__pyx_t_8, 0);
{
Py_ssize_t __pyx_tmp_idx = 0;
Py_ssize_t __pyx_tmp_shape = __pyx_v_first_moms.shape[0];
Py_ssize_t __pyx_tmp_stride = __pyx_v_first_moms.strides[0];
if (0 && (__pyx_tmp_idx < 0))
__pyx_tmp_idx += __pyx_tmp_shape;
if (0 && (__pyx_tmp_idx < 0 || __pyx_tmp_idx >= __pyx_tmp_shape)) {
PyErr_SetString(PyExc_IndexError, "Index out of bounds (axis 0)");
__PYX_ERR(0, 181, __pyx_L1_error)
}
__pyx_t_8.data += __pyx_tmp_idx * __pyx_tmp_stride;
}
__pyx_t_8.shape[0] = __pyx_v_first_moms.shape[1];
__pyx_t_8.strides[0] = __pyx_v_first_moms.strides[1];
__pyx_t_8.suboffsets[0] = -1;
__pyx_t_3 = __pyx_memoryview_fromslice(__pyx_t_8, 1, (PyObject *(*)(char *)) __pyx_memview_get_double, (int (*)(char *, PyObject *)) __pyx_memview_set_double, 0);; if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 181, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_3);
__PYX_XDEC_MEMVIEW(&__pyx_t_8, 1);
__pyx_t_8.memview = NULL;
__pyx_t_8.data = NULL;
__pyx_t_9.data = __pyx_v_second_moms.data;
__pyx_t_9.memview = __pyx_v_second_moms.memview;
__PYX_INC_MEMVIEW(&__pyx_t_9, 0);
{
Py_ssize_t __pyx_tmp_idx = 0;
Py_ssize_t __pyx_tmp_shape = __pyx_v_second_moms.shape[0];
Py_ssize_t __pyx_tmp_stride = __pyx_v_second_moms.strides[0];
if (0 && (__pyx_tmp_idx < 0))
__pyx_tmp_idx += __pyx_tmp_shape;
if (0 && (__pyx_tmp_idx < 0 || __pyx_tmp_idx >= __pyx_tmp_shape)) {
PyErr_SetString(PyExc_IndexError, "Index out of bounds (axis 0)");
__PYX_ERR(0, 181, __pyx_L1_error)
}
__pyx_t_9.data += __pyx_tmp_idx * __pyx_tmp_stride;
}
__pyx_t_9.shape[0] = __pyx_v_second_moms.shape[1];
__pyx_t_9.strides[0] = __pyx_v_second_moms.strides[1];
__pyx_t_9.suboffsets[0] = -1;
__pyx_t_9.shape[1] = __pyx_v_second_moms.shape[2];
__pyx_t_9.strides[1] = __pyx_v_second_moms.strides[2];
__pyx_t_9.suboffsets[1] = -1;
__pyx_t_5 = __pyx_memoryview_fromslice(__pyx_t_9, 2, (PyObject *(*)(char *)) __pyx_memview_get_double, (int (*)(char *, PyObject *)) __pyx_memview_set_double, 0);; if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 181, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_5);
__PYX_XDEC_MEMVIEW(&__pyx_t_9, 1);
__pyx_t_9.memview = NULL;
__pyx_t_9.data = NULL;
__pyx_t_2 = PyTuple_New(3); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 181, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_2);
__Pyx_GIVEREF(__pyx_t_10);
PyTuple_SET_ITEM(__pyx_t_2, 0, __pyx_t_10);
__Pyx_GIVEREF(__pyx_t_3);
PyTuple_SET_ITEM(__pyx_t_2, 1, __pyx_t_3);
__Pyx_GIVEREF(__pyx_t_5);
PyTuple_SET_ITEM(__pyx_t_2, 2, __pyx_t_5);
__pyx_t_10 = 0;
__pyx_t_3 = 0;
__pyx_t_5 = 0;
__pyx_r = __pyx_t_2;
__pyx_t_2 = 0;
goto __pyx_L0;
/* "bmtools/exact/moments.pyx":180
* normalise_and_reflect_second_moment(second_moms[0], norm_consts[0])
* # only return values if not all arrays update in place
* if not all_in_place: # <<<<<<<<<<<<<<
* return norm_consts[0], first_moms[0], second_moms[0]
*
*/
}
/* "bmtools/exact/moments.pyx":74
*
*
* def calculate_moments_parallel(double[:, :] weights, double[:] biases, # <<<<<<<<<<<<<<
* bint force=False, int num_threads=2,
* double[:] norm_consts=None,
*/
/* function exit code */
__pyx_r = Py_None; __Pyx_INCREF(Py_None);
goto __pyx_L0;
__pyx_L1_error:;
__Pyx_XDECREF(__pyx_t_2);
__Pyx_XDECREF(__pyx_t_3);
__Pyx_XDECREF(__pyx_t_4);
__Pyx_XDECREF(__pyx_t_5);
__PYX_XDEC_MEMVIEW(&__pyx_t_6, 1);
__PYX_XDEC_MEMVIEW(&__pyx_t_8, 1);
__PYX_XDEC_MEMVIEW(&__pyx_t_9, 1);
__Pyx_XDECREF(__pyx_t_10);
__PYX_XDEC_MEMVIEW(&__pyx_t_11, 1);
__PYX_XDEC_MEMVIEW(&__pyx_t_12, 1);
__PYX_XDEC_MEMVIEW(&__pyx_t_18, 1);
__Pyx_AddTraceback("bmtools.exact.moments.calculate_moments_parallel", __pyx_clineno, __pyx_lineno, __pyx_filename);
__pyx_r = NULL;
__pyx_L0:;
__PYX_XDEC_MEMVIEW(&__pyx_v_states, 1);
__PYX_XDEC_MEMVIEW(&__pyx_v_intervals, 1);
__PYX_XDEC_MEMVIEW(&__pyx_v_weights, 1);
__PYX_XDEC_MEMVIEW(&__pyx_v_biases, 1);
__PYX_XDEC_MEMVIEW(&__pyx_v_norm_consts, 1);
__PYX_XDEC_MEMVIEW(&__pyx_v_first_moms, 1);
__PYX_XDEC_MEMVIEW(&__pyx_v_second_moms, 1);
__Pyx_XGIVEREF(__pyx_r);
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
/* "bmtools/exact/moments.pyx":184
*
*
* def calculate_probs_parallel( # <<<<<<<<<<<<<<
* double[:, :] weights, double[:] biases, bint force=False,
* int num_threads=2,):
*/
/* Python wrapper */
static PyObject *__pyx_pw_7bmtools_5exact_7moments_5calculate_probs_parallel(PyObject *__pyx_self, PyObject *__pyx_args, PyObject *__pyx_kwds); /*proto*/
static char __pyx_doc_7bmtools_5exact_7moments_4calculate_probs_parallel[] = "calculate_probs_parallel(__Pyx_memviewslice weights, __Pyx_memviewslice biases, bool force=False, int num_threads=2)\nCalculate BM distribution probabilities using parallel implementation.\n\n Calculates the probabilities of all signed binary states in according to a\n Boltzmann machine invariant distribution specified by the provided weight\n and bias parameters. Calculation is done by exahustive iteration over the\n 2**num_units state space so should only be attempted for moderate\n dimensionalities. Calculations are done in parallel using a specified number\n of thread each iterating across an equal partition of the state space.\n\n Parameters\n ----------\n weights : double[:, :]\n Matrix of weight parameters. Should be symmetric and zero-diagonal.\n biases : double[:]\n Vector of bias parameters.\n force : bool (bint)\n Flag to override forced exit when number of units is more than 20\n due to large size of state space.\n num_threads : int\n Number of parallel threads to use.\n\n Returns\n -------\n probs : double[:]\n Array of normalised probabilities for all states.\n norm_const : double\n Sum of unnormalised probability terms across all states.\n ";
static PyMethodDef __pyx_mdef_7bmtools_5exact_7moments_5calculate_probs_parallel = {"calculate_probs_parallel", (PyCFunction)__pyx_pw_7bmtools_5exact_7moments_5calculate_probs_parallel, METH_VARARGS|METH_KEYWORDS, __pyx_doc_7bmtools_5exact_7moments_4calculate_probs_parallel};
static PyObject *__pyx_pw_7bmtools_5exact_7moments_5calculate_probs_parallel(PyObject *__pyx_self, PyObject *__pyx_args, PyObject *__pyx_kwds) {
__Pyx_memviewslice __pyx_v_weights = { 0, 0, { 0 }, { 0 }, { 0 } };
__Pyx_memviewslice __pyx_v_biases = { 0, 0, { 0 }, { 0 }, { 0 } };
int __pyx_v_force;
int __pyx_v_num_threads;
PyObject *__pyx_r = 0;
__Pyx_RefNannyDeclarations
__Pyx_RefNannySetupContext("calculate_probs_parallel (wrapper)", 0);
{
static PyObject **__pyx_pyargnames[] = {&__pyx_n_s_weights,&__pyx_n_s_biases,&__pyx_n_s_force,&__pyx_n_s_num_threads,0};
PyObject* values[4] = {0,0,0,0};
if (unlikely(__pyx_kwds)) {
Py_ssize_t kw_args;
const Py_ssize_t pos_args = PyTuple_GET_SIZE(__pyx_args);
switch (pos_args) {
case 4: values[3] = PyTuple_GET_ITEM(__pyx_args, 3);
case 3: values[2] = PyTuple_GET_ITEM(__pyx_args, 2);
case 2: values[1] = PyTuple_GET_ITEM(__pyx_args, 1);
case 1: values[0] = PyTuple_GET_ITEM(__pyx_args, 0);
case 0: break;
default: goto __pyx_L5_argtuple_error;
}
kw_args = PyDict_Size(__pyx_kwds);
switch (pos_args) {
case 0:
if (likely((values[0] = PyDict_GetItem(__pyx_kwds, __pyx_n_s_weights)) != 0)) kw_args--;
else goto __pyx_L5_argtuple_error;
case 1:
if (likely((values[1] = PyDict_GetItem(__pyx_kwds, __pyx_n_s_biases)) != 0)) kw_args--;
else {
__Pyx_RaiseArgtupleInvalid("calculate_probs_parallel", 0, 2, 4, 1); __PYX_ERR(0, 184, __pyx_L3_error)
}
case 2:
if (kw_args > 0) {
PyObject* value = PyDict_GetItem(__pyx_kwds, __pyx_n_s_force);
if (value) { values[2] = value; kw_args--; }
}
case 3:
if (kw_args > 0) {
PyObject* value = PyDict_GetItem(__pyx_kwds, __pyx_n_s_num_threads);
if (value) { values[3] = value; kw_args--; }
}
}
if (unlikely(kw_args > 0)) {
if (unlikely(__Pyx_ParseOptionalKeywords(__pyx_kwds, __pyx_pyargnames, 0, values, pos_args, "calculate_probs_parallel") < 0)) __PYX_ERR(0, 184, __pyx_L3_error)
}
} else {
switch (PyTuple_GET_SIZE(__pyx_args)) {
case 4: values[3] = PyTuple_GET_ITEM(__pyx_args, 3);
case 3: values[2] = PyTuple_GET_ITEM(__pyx_args, 2);
case 2: values[1] = PyTuple_GET_ITEM(__pyx_args, 1);
values[0] = PyTuple_GET_ITEM(__pyx_args, 0);
break;
default: goto __pyx_L5_argtuple_error;
}
}
__pyx_v_weights = __Pyx_PyObject_to_MemoryviewSlice_dsds_double(values[0]); if (unlikely(!__pyx_v_weights.memview)) __PYX_ERR(0, 185, __pyx_L3_error)
__pyx_v_biases = __Pyx_PyObject_to_MemoryviewSlice_ds_double(values[1]); if (unlikely(!__pyx_v_biases.memview)) __PYX_ERR(0, 185, __pyx_L3_error)
if (values[2]) {
__pyx_v_force = __Pyx_PyObject_IsTrue(values[2]); if (unlikely((__pyx_v_force == (int)-1) && PyErr_Occurred())) __PYX_ERR(0, 185, __pyx_L3_error)
} else {
/* "bmtools/exact/moments.pyx":185
*
* def calculate_probs_parallel(
* double[:, :] weights, double[:] biases, bint force=False, # <<<<<<<<<<<<<<
* int num_threads=2,):
* """Calculate BM distribution probabilities using parallel implementation.
*/
__pyx_v_force = ((int)0);
}
if (values[3]) {
__pyx_v_num_threads = __Pyx_PyInt_As_int(values[3]); if (unlikely((__pyx_v_num_threads == (int)-1) && PyErr_Occurred())) __PYX_ERR(0, 186, __pyx_L3_error)
} else {
__pyx_v_num_threads = ((int)2);
}
}
goto __pyx_L4_argument_unpacking_done;
__pyx_L5_argtuple_error:;
__Pyx_RaiseArgtupleInvalid("calculate_probs_parallel", 0, 2, 4, PyTuple_GET_SIZE(__pyx_args)); __PYX_ERR(0, 184, __pyx_L3_error)
__pyx_L3_error:;
__Pyx_AddTraceback("bmtools.exact.moments.calculate_probs_parallel", __pyx_clineno, __pyx_lineno, __pyx_filename);
__Pyx_RefNannyFinishContext();
return NULL;
__pyx_L4_argument_unpacking_done:;
__pyx_r = __pyx_pf_7bmtools_5exact_7moments_4calculate_probs_parallel(__pyx_self, __pyx_v_weights, __pyx_v_biases, __pyx_v_force, __pyx_v_num_threads);
/* "bmtools/exact/moments.pyx":184
*
*
* def calculate_probs_parallel( # <<<<<<<<<<<<<<
* double[:, :] weights, double[:] biases, bint force=False,
* int num_threads=2,):
*/
/* function exit code */
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
static PyObject *__pyx_pf_7bmtools_5exact_7moments_4calculate_probs_parallel(CYTHON_UNUSED PyObject *__pyx_self, __Pyx_memviewslice __pyx_v_weights, __Pyx_memviewslice __pyx_v_biases, int __pyx_v_force, int __pyx_v_num_threads) {
int __pyx_v_t;
int __pyx_v_num_units;
long __pyx_v_num_states;
__Pyx_memviewslice __pyx_v_states = { 0, 0, { 0 }, { 0 }, { 0 } };
__Pyx_memviewslice __pyx_v_norm_consts = { 0, 0, { 0 }, { 0 }, { 0 } };
__Pyx_memviewslice __pyx_v_probs = { 0, 0, { 0 }, { 0 }, { 0 } };
__Pyx_memviewslice __pyx_v_intervals = { 0, 0, { 0 }, { 0 }, { 0 } };
PyObject *__pyx_r = NULL;
__Pyx_RefNannyDeclarations
int __pyx_t_1;
PyObject *__pyx_t_2 = NULL;
PyObject *__pyx_t_3 = NULL;
PyObject *__pyx_t_4 = NULL;
PyObject *__pyx_t_5 = NULL;
__Pyx_memviewslice __pyx_t_6 = { 0, 0, { 0 }, { 0 }, { 0 } };
__Pyx_memviewslice __pyx_t_7 = { 0, 0, { 0 }, { 0 }, { 0 } };
__Pyx_memviewslice __pyx_t_8 = { 0, 0, { 0 }, { 0 }, { 0 } };
int __pyx_t_9;
int __pyx_t_10;
int __pyx_t_11;
int __pyx_t_12;
Py_ssize_t __pyx_t_13;
__Pyx_memviewslice __pyx_t_14 = { 0, 0, { 0 }, { 0 }, { 0 } };
Py_ssize_t __pyx_t_15;
Py_ssize_t __pyx_t_16;
Py_ssize_t __pyx_t_17;
int __pyx_t_18;
Py_ssize_t __pyx_t_19;
Py_ssize_t __pyx_t_20;
Py_ssize_t __pyx_t_21;
Py_ssize_t __pyx_t_22;
Py_ssize_t __pyx_t_23;
Py_ssize_t __pyx_t_24;
Py_ssize_t __pyx_t_25;
Py_ssize_t __pyx_t_26;
__Pyx_RefNannySetupContext("calculate_probs_parallel", 0);
/* "bmtools/exact/moments.pyx":216
* """
* cdef int t
* cdef int num_units = weights.shape[0] # <<<<<<<<<<<<<<
* cdef long num_states = 2 ** num_units
* if num_threads <= 0:
*/
__pyx_v_num_units = (__pyx_v_weights.shape[0]);
/* "bmtools/exact/moments.pyx":217
* cdef int t
* cdef int num_units = weights.shape[0]
* cdef long num_states = 2 ** num_units # <<<<<<<<<<<<<<
* if num_threads <= 0:
* raise ValueError('Number of threads must be > 0')
*/
__pyx_v_num_states = __Pyx_pow_long(2, ((long)__pyx_v_num_units));
/* "bmtools/exact/moments.pyx":218
* cdef int num_units = weights.shape[0]
* cdef long num_states = 2 ** num_units
* if num_threads <= 0: # <<<<<<<<<<<<<<
* raise ValueError('Number of threads must be > 0')
* check_state_space_size(num_units, force)
*/
__pyx_t_1 = ((__pyx_v_num_threads <= 0) != 0);
if (__pyx_t_1) {
/* "bmtools/exact/moments.pyx":219
* cdef long num_states = 2 ** num_units
* if num_threads <= 0:
* raise ValueError('Number of threads must be > 0') # <<<<<<<<<<<<<<
* check_state_space_size(num_units, force)
* cdef state_t[:, :] states = array(
*/
__pyx_t_2 = __Pyx_PyObject_Call(__pyx_builtin_ValueError, __pyx_tuple__5, NULL); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 219, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_2);
__Pyx_Raise(__pyx_t_2, 0, 0, 0);
__Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
__PYX_ERR(0, 219, __pyx_L1_error)
/* "bmtools/exact/moments.pyx":218
* cdef int num_units = weights.shape[0]
* cdef long num_states = 2 ** num_units
* if num_threads <= 0: # <<<<<<<<<<<<<<
* raise ValueError('Number of threads must be > 0')
* check_state_space_size(num_units, force)
*/
}
/* "bmtools/exact/moments.pyx":220
* if num_threads <= 0:
* raise ValueError('Number of threads must be > 0')
* check_state_space_size(num_units, force) # <<<<<<<<<<<<<<
* cdef state_t[:, :] states = array(
* shape=(num_threads, num_units), itemsize=sizeof(state_t), format=state_t_code)
*/
__pyx_f_7bmtools_5exact_7helpers_check_state_space_size(__pyx_v_num_units, __pyx_v_force, 0);
/* "bmtools/exact/moments.pyx":222
* check_state_space_size(num_units, force)
* cdef state_t[:, :] states = array(
* shape=(num_threads, num_units), itemsize=sizeof(state_t), format=state_t_code) # <<<<<<<<<<<<<<
* cdef double[:] norm_consts = array(shape=(num_threads,),
* itemsize=sizeof(double), format='d')
*/
__pyx_t_2 = PyDict_New(); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 222, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_2);
__pyx_t_3 = __Pyx_PyInt_From_int(__pyx_v_num_threads); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 222, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_3);
__pyx_t_4 = __Pyx_PyInt_From_int(__pyx_v_num_units); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 222, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_4);
__pyx_t_5 = PyTuple_New(2); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 222, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_5);
__Pyx_GIVEREF(__pyx_t_3);
PyTuple_SET_ITEM(__pyx_t_5, 0, __pyx_t_3);
__Pyx_GIVEREF(__pyx_t_4);
PyTuple_SET_ITEM(__pyx_t_5, 1, __pyx_t_4);
__pyx_t_3 = 0;
__pyx_t_4 = 0;
if (PyDict_SetItem(__pyx_t_2, __pyx_n_s_shape, __pyx_t_5) < 0) __PYX_ERR(0, 222, __pyx_L1_error)
__Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
__pyx_t_5 = __Pyx_PyInt_FromSize_t((sizeof(__pyx_t_7bmtools_5exact_7helpers_state_t))); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 222, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_5);
if (PyDict_SetItem(__pyx_t_2, __pyx_n_s_itemsize, __pyx_t_5) < 0) __PYX_ERR(0, 222, __pyx_L1_error)
__Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
__pyx_t_5 = __Pyx_PyBytes_FromString(__pyx_v_7bmtools_5exact_7helpers_state_t_code); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 222, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_5);
if (PyDict_SetItem(__pyx_t_2, __pyx_n_s_format, __pyx_t_5) < 0) __PYX_ERR(0, 222, __pyx_L1_error)
__Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
/* "bmtools/exact/moments.pyx":221
* raise ValueError('Number of threads must be > 0')
* check_state_space_size(num_units, force)
* cdef state_t[:, :] states = array( # <<<<<<<<<<<<<<
* shape=(num_threads, num_units), itemsize=sizeof(state_t), format=state_t_code)
* cdef double[:] norm_consts = array(shape=(num_threads,),
*/
__pyx_t_5 = __Pyx_PyObject_Call(((PyObject *)__pyx_array_type), __pyx_empty_tuple, __pyx_t_2); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 221, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_5);
__Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
__pyx_t_6 = __Pyx_PyObject_to_MemoryviewSlice_dsds_nn___pyx_t_7bmtools_5exact_7helpers_state_t(__pyx_t_5);
if (unlikely(!__pyx_t_6.memview)) __PYX_ERR(0, 221, __pyx_L1_error)
__Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
__pyx_v_states = __pyx_t_6;
__pyx_t_6.memview = NULL;
__pyx_t_6.data = NULL;
/* "bmtools/exact/moments.pyx":223
* cdef state_t[:, :] states = array(
* shape=(num_threads, num_units), itemsize=sizeof(state_t), format=state_t_code)
* cdef double[:] norm_consts = array(shape=(num_threads,), # <<<<<<<<<<<<<<
* itemsize=sizeof(double), format='d')
* cdef double[:] probs = array(
*/
__pyx_t_5 = PyDict_New(); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 223, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_5);
__pyx_t_2 = __Pyx_PyInt_From_int(__pyx_v_num_threads); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 223, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_2);
__pyx_t_4 = PyTuple_New(1); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 223, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_4);
__Pyx_GIVEREF(__pyx_t_2);
PyTuple_SET_ITEM(__pyx_t_4, 0, __pyx_t_2);
__pyx_t_2 = 0;
if (PyDict_SetItem(__pyx_t_5, __pyx_n_s_shape, __pyx_t_4) < 0) __PYX_ERR(0, 223, __pyx_L1_error)
__Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
/* "bmtools/exact/moments.pyx":224
* shape=(num_threads, num_units), itemsize=sizeof(state_t), format=state_t_code)
* cdef double[:] norm_consts = array(shape=(num_threads,),
* itemsize=sizeof(double), format='d') # <<<<<<<<<<<<<<
* cdef double[:] probs = array(
* shape=(num_states,), itemsize=sizeof(double), format='d'
*/
__pyx_t_4 = __Pyx_PyInt_FromSize_t((sizeof(double))); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 224, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_4);
if (PyDict_SetItem(__pyx_t_5, __pyx_n_s_itemsize, __pyx_t_4) < 0) __PYX_ERR(0, 223, __pyx_L1_error)
__Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
if (PyDict_SetItem(__pyx_t_5, __pyx_n_s_format, __pyx_n_s_d) < 0) __PYX_ERR(0, 223, __pyx_L1_error)
/* "bmtools/exact/moments.pyx":223
* cdef state_t[:, :] states = array(
* shape=(num_threads, num_units), itemsize=sizeof(state_t), format=state_t_code)
* cdef double[:] norm_consts = array(shape=(num_threads,), # <<<<<<<<<<<<<<
* itemsize=sizeof(double), format='d')
* cdef double[:] probs = array(
*/
__pyx_t_4 = __Pyx_PyObject_Call(((PyObject *)__pyx_array_type), __pyx_empty_tuple, __pyx_t_5); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 223, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_4);
__Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
__pyx_t_7 = __Pyx_PyObject_to_MemoryviewSlice_ds_double(__pyx_t_4);
if (unlikely(!__pyx_t_7.memview)) __PYX_ERR(0, 223, __pyx_L1_error)
__Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
__pyx_v_norm_consts = __pyx_t_7;
__pyx_t_7.memview = NULL;
__pyx_t_7.data = NULL;
/* "bmtools/exact/moments.pyx":226
* itemsize=sizeof(double), format='d')
* cdef double[:] probs = array(
* shape=(num_states,), itemsize=sizeof(double), format='d' # <<<<<<<<<<<<<<
* )
* # partition state space in to equal sized sections to allocate to
*/
__pyx_t_4 = PyDict_New(); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 226, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_4);
__pyx_t_5 = __Pyx_PyInt_From_long(__pyx_v_num_states); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 226, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_5);
__pyx_t_2 = PyTuple_New(1); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 226, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_2);
__Pyx_GIVEREF(__pyx_t_5);
PyTuple_SET_ITEM(__pyx_t_2, 0, __pyx_t_5);
__pyx_t_5 = 0;
if (PyDict_SetItem(__pyx_t_4, __pyx_n_s_shape, __pyx_t_2) < 0) __PYX_ERR(0, 226, __pyx_L1_error)
__Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
__pyx_t_2 = __Pyx_PyInt_FromSize_t((sizeof(double))); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 226, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_2);
if (PyDict_SetItem(__pyx_t_4, __pyx_n_s_itemsize, __pyx_t_2) < 0) __PYX_ERR(0, 226, __pyx_L1_error)
__Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
if (PyDict_SetItem(__pyx_t_4, __pyx_n_s_format, __pyx_n_s_d) < 0) __PYX_ERR(0, 226, __pyx_L1_error)
/* "bmtools/exact/moments.pyx":225
* cdef double[:] norm_consts = array(shape=(num_threads,),
* itemsize=sizeof(double), format='d')
* cdef double[:] probs = array( # <<<<<<<<<<<<<<
* shape=(num_states,), itemsize=sizeof(double), format='d'
* )
*/
__pyx_t_2 = __Pyx_PyObject_Call(((PyObject *)__pyx_array_type), __pyx_empty_tuple, __pyx_t_4); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 225, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_2);
__Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
__pyx_t_7 = __Pyx_PyObject_to_MemoryviewSlice_ds_double(__pyx_t_2);
if (unlikely(!__pyx_t_7.memview)) __PYX_ERR(0, 225, __pyx_L1_error)
__Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
__pyx_v_probs = __pyx_t_7;
__pyx_t_7.memview = NULL;
__pyx_t_7.data = NULL;
/* "bmtools/exact/moments.pyx":230
* # partition state space in to equal sized sections to allocate to
* # different parallel threads
* cdef long[:] intervals = partition_state_space(num_states, num_threads) # <<<<<<<<<<<<<<
* # parallel loop over partitions of state space, with each thread
* # calculating probabilities for its assigned states into thread-specific
*/
__pyx_t_8 = __pyx_f_7bmtools_5exact_7helpers_partition_state_space(__pyx_v_num_states, __pyx_v_num_threads); if (unlikely(!__pyx_t_8.memview)) __PYX_ERR(0, 230, __pyx_L1_error)
__pyx_v_intervals = __pyx_t_8;
__pyx_t_8.memview = NULL;
__pyx_t_8.data = NULL;
/* "bmtools/exact/moments.pyx":234
* # calculating probabilities for its assigned states into thread-specific
* # slice of probs array
* for t in prange(num_threads, nogil=True, schedule='static', chunksize=1, # <<<<<<<<<<<<<<
* num_threads=num_threads):
* norm_consts[t] = 0.
*/
{
#ifdef WITH_THREAD
PyThreadState *_save;
Py_UNBLOCK_THREADS
#endif
/*try:*/ {
__pyx_t_9 = __pyx_v_num_threads;
if (1 == 0) abort();
{
int __pyx_parallel_temp0 = 0xbad0bad0;
const char *__pyx_parallel_filename = NULL; int __pyx_parallel_lineno = 0, __pyx_parallel_clineno = 0;
PyObject *__pyx_parallel_exc_type = NULL, *__pyx_parallel_exc_value = NULL, *__pyx_parallel_exc_tb = NULL;
int __pyx_parallel_why;
__pyx_parallel_why = 0;
__pyx_t_12 = 1;
#if ((defined(__APPLE__) || defined(__OSX__)) && (defined(__GNUC__) && (__GNUC__ > 2 || (__GNUC__ == 2 && (__GNUC_MINOR__ > 95)))))
#undef likely
#undef unlikely
#define likely(x) (x)
#define unlikely(x) (x)
#endif
__pyx_t_11 = (__pyx_t_9 - 0 + 1 - 1/abs(1)) / 1;
if (__pyx_t_11 > 0)
{
#ifdef _OPENMP
#pragma omp parallel num_threads(__pyx_v_num_threads) private(__pyx_t_13, __pyx_t_15, __pyx_t_16, __pyx_t_17, __pyx_t_18, __pyx_t_19, __pyx_t_20) firstprivate(__pyx_t_14, __pyx_t_7) private(__pyx_filename, __pyx_lineno, __pyx_clineno) shared(__pyx_parallel_why, __pyx_parallel_exc_type, __pyx_parallel_exc_value, __pyx_parallel_exc_tb)
#endif /* _OPENMP */
{
#ifdef _OPENMP
#ifdef WITH_THREAD
PyGILState_STATE __pyx_gilstate_save = PyGILState_Ensure();
#endif
Py_BEGIN_ALLOW_THREADS
#endif /* _OPENMP */
#ifdef _OPENMP
#pragma omp for firstprivate(__pyx_v_t) lastprivate(__pyx_v_t) schedule(static, __pyx_t_12)
#endif /* _OPENMP */
for (__pyx_t_10 = 0; __pyx_t_10 < __pyx_t_11; __pyx_t_10++){
if (__pyx_parallel_why < 2)
{
__pyx_v_t = (int)(0 + 1 * __pyx_t_10);
/* "bmtools/exact/moments.pyx":236
* for t in prange(num_threads, nogil=True, schedule='static', chunksize=1,
* num_threads=num_threads):
* norm_consts[t] = 0. # <<<<<<<<<<<<<<
* calc_unnormed_probs_for_state_range(
* weights, biases, states[t], &norm_consts[t],
*/
__pyx_t_13 = __pyx_v_t;
*((double *) ( /* dim=0 */ (__pyx_v_norm_consts.data + __pyx_t_13 * __pyx_v_norm_consts.strides[0]) )) = 0.;
/* "bmtools/exact/moments.pyx":238
* norm_consts[t] = 0.
* calc_unnormed_probs_for_state_range(
* weights, biases, states[t], &norm_consts[t], # <<<<<<<<<<<<<<
* probs[intervals[t]:intervals[t+1]],
* intervals[t], intervals[t+1])
*/
__pyx_t_14.data = __pyx_v_states.data;
__pyx_t_14.memview = __pyx_v_states.memview;
__PYX_INC_MEMVIEW(&__pyx_t_14, 0);
{
Py_ssize_t __pyx_tmp_idx = __pyx_v_t;
Py_ssize_t __pyx_tmp_shape = __pyx_v_states.shape[0];
Py_ssize_t __pyx_tmp_stride = __pyx_v_states.strides[0];
if (0 && (__pyx_tmp_idx < 0))
__pyx_tmp_idx += __pyx_tmp_shape;
if (0 && (__pyx_tmp_idx < 0 || __pyx_tmp_idx >= __pyx_tmp_shape)) {
#ifdef WITH_THREAD
PyGILState_STATE __pyx_gilstate_save = PyGILState_Ensure();
#endif
PyErr_SetString(PyExc_IndexError, "Index out of bounds (axis 0)");
#ifdef WITH_THREAD
PyGILState_Release(__pyx_gilstate_save);
#endif
__PYX_ERR(0, 238, __pyx_L9_error)
}
__pyx_t_14.data += __pyx_tmp_idx * __pyx_tmp_stride;
}
__pyx_t_14.shape[0] = __pyx_v_states.shape[1];
__pyx_t_14.strides[0] = __pyx_v_states.strides[1];
__pyx_t_14.suboffsets[0] = -1;
__pyx_t_15 = __pyx_v_t;
/* "bmtools/exact/moments.pyx":239
* calc_unnormed_probs_for_state_range(
* weights, biases, states[t], &norm_consts[t],
* probs[intervals[t]:intervals[t+1]], # <<<<<<<<<<<<<<
* intervals[t], intervals[t+1])
* # accumulate normalisation constant terms calculated by each individual
*/
__pyx_t_16 = __pyx_v_t;
__pyx_t_17 = (__pyx_v_t + 1);
__pyx_t_7.data = __pyx_v_probs.data;
__pyx_t_7.memview = __pyx_v_probs.memview;
__PYX_INC_MEMVIEW(&__pyx_t_7, 0);
__pyx_t_18 = -1;
if (unlikely(__pyx_memoryview_slice_memviewslice(
&__pyx_t_7,
__pyx_v_probs.shape[0], __pyx_v_probs.strides[0], __pyx_v_probs.suboffsets[0],
0,
0,
&__pyx_t_18,
(*((long *) ( /* dim=0 */ (__pyx_v_intervals.data + __pyx_t_16 * __pyx_v_intervals.strides[0]) ))),
(*((long *) ( /* dim=0 */ (__pyx_v_intervals.data + __pyx_t_17 * __pyx_v_intervals.strides[0]) ))),
0,
1,
1,
0,
1) < 0))
{
__PYX_ERR(0, 239, __pyx_L9_error)
}
__pyx_t_19 = __pyx_v_t;
/* "bmtools/exact/moments.pyx":240
* weights, biases, states[t], &norm_consts[t],
* probs[intervals[t]:intervals[t+1]],
* intervals[t], intervals[t+1]) # <<<<<<<<<<<<<<
* # accumulate normalisation constant terms calculated by each individual
* # thread to get overall value
*/
__pyx_t_20 = (__pyx_v_t + 1);
/* "bmtools/exact/moments.pyx":237
* num_threads=num_threads):
* norm_consts[t] = 0.
* calc_unnormed_probs_for_state_range( # <<<<<<<<<<<<<<
* weights, biases, states[t], &norm_consts[t],
* probs[intervals[t]:intervals[t+1]],
*/
__pyx_f_7bmtools_5exact_7moments_calc_unnormed_probs_for_state_range(__pyx_v_weights, __pyx_v_biases, __pyx_t_14, (&(*((double *) ( /* dim=0 */ (__pyx_v_norm_consts.data + __pyx_t_15 * __pyx_v_norm_consts.strides[0]) )))), __pyx_t_7, (*((long *) ( /* dim=0 */ (__pyx_v_intervals.data + __pyx_t_19 * __pyx_v_intervals.strides[0]) ))), (*((long *) ( /* dim=0 */ (__pyx_v_intervals.data + __pyx_t_20 * __pyx_v_intervals.strides[0]) ))));
__PYX_XDEC_MEMVIEW(&__pyx_t_14, 0);
__pyx_t_14.memview = NULL;
__pyx_t_14.data = NULL;
__PYX_XDEC_MEMVIEW(&__pyx_t_7, 0);
__pyx_t_7.memview = NULL;
__pyx_t_7.data = NULL;
goto __pyx_L12;
__pyx_L9_error:;
{
#ifdef WITH_THREAD
PyGILState_STATE __pyx_gilstate_save = PyGILState_Ensure();
#endif
#ifdef _OPENMP
#pragma omp flush(__pyx_parallel_exc_type)
#endif /* _OPENMP */
if (!__pyx_parallel_exc_type) {
__Pyx_ErrFetchWithState(&__pyx_parallel_exc_type, &__pyx_parallel_exc_value, &__pyx_parallel_exc_tb);
__pyx_parallel_filename = __pyx_filename; __pyx_parallel_lineno = __pyx_lineno; __pyx_parallel_clineno = __pyx_clineno;
__Pyx_GOTREF(__pyx_parallel_exc_type);
}
#ifdef WITH_THREAD
PyGILState_Release(__pyx_gilstate_save);
#endif
}
__pyx_parallel_why = 4;
goto __pyx_L11;
__pyx_L11:;
#ifdef _OPENMP
#pragma omp critical(__pyx_parallel_lastprivates1)
#endif /* _OPENMP */
{
__pyx_parallel_temp0 = __pyx_v_t;
}
__pyx_L12:;
#ifdef _OPENMP
#pragma omp flush(__pyx_parallel_why)
#endif /* _OPENMP */
}
}
#ifdef _OPENMP
Py_END_ALLOW_THREADS
#else
{
#ifdef WITH_THREAD
PyGILState_STATE __pyx_gilstate_save = PyGILState_Ensure();
#endif
#endif /* _OPENMP */
/* Clean up any temporaries */
__PYX_XDEC_MEMVIEW(&__pyx_t_14, 0);
__PYX_XDEC_MEMVIEW(&__pyx_t_7, 0);
#ifdef WITH_THREAD
PyGILState_Release(__pyx_gilstate_save);
#endif
#ifndef _OPENMP
}
#endif /* _OPENMP */
}
}
if (__pyx_parallel_exc_type) {
/* This may have been overridden by a continue, break or return in another thread. Prefer the error. */
__pyx_parallel_why = 4;
}
if (__pyx_parallel_why) {
__pyx_v_t = __pyx_parallel_temp0;
switch (__pyx_parallel_why) {
case 4:
{
#ifdef WITH_THREAD
PyGILState_STATE __pyx_gilstate_save = PyGILState_Ensure();
#endif
__Pyx_GIVEREF(__pyx_parallel_exc_type);
__Pyx_ErrRestoreWithState(__pyx_parallel_exc_type, __pyx_parallel_exc_value, __pyx_parallel_exc_tb);
__pyx_filename = __pyx_parallel_filename; __pyx_lineno = __pyx_parallel_lineno; __pyx_clineno = __pyx_parallel_clineno;
#ifdef WITH_THREAD
PyGILState_Release(__pyx_gilstate_save);
#endif
}
goto __pyx_L5_error;
}
}
}
#if ((defined(__APPLE__) || defined(__OSX__)) && (defined(__GNUC__) && (__GNUC__ > 2 || (__GNUC__ == 2 && (__GNUC_MINOR__ > 95)))))
#undef likely
#undef unlikely
#define likely(x) __builtin_expect(!!(x), 1)
#define unlikely(x) __builtin_expect(!!(x), 0)
#endif
}
/* "bmtools/exact/moments.pyx":234
* # calculating probabilities for its assigned states into thread-specific
* # slice of probs array
* for t in prange(num_threads, nogil=True, schedule='static', chunksize=1, # <<<<<<<<<<<<<<
* num_threads=num_threads):
* norm_consts[t] = 0.
*/
/*finally:*/ {
/*normal exit:*/{
#ifdef WITH_THREAD
Py_BLOCK_THREADS
#endif
goto __pyx_L6;
}
__pyx_L5_error: {
#ifdef WITH_THREAD
Py_BLOCK_THREADS
#endif
goto __pyx_L1_error;
}
__pyx_L6:;
}
}
/* "bmtools/exact/moments.pyx":243
* # accumulate normalisation constant terms calculated by each individual
* # thread to get overall value
* for t in range(1, num_threads): # <<<<<<<<<<<<<<
* norm_consts[0] += norm_consts[t]
* # normalise probabilities by dividing through by normalisation constant
*/
__pyx_t_11 = __pyx_v_num_threads;
for (__pyx_t_10 = 1; __pyx_t_10 < __pyx_t_11; __pyx_t_10+=1) {
__pyx_v_t = __pyx_t_10;
/* "bmtools/exact/moments.pyx":244
* # thread to get overall value
* for t in range(1, num_threads):
* norm_consts[0] += norm_consts[t] # <<<<<<<<<<<<<<
* # normalise probabilities by dividing through by normalisation constant
* # in parallel over multiple threads
*/
__pyx_t_21 = __pyx_v_t;
__pyx_t_22 = 0;
*((double *) ( /* dim=0 */ (__pyx_v_norm_consts.data + __pyx_t_22 * __pyx_v_norm_consts.strides[0]) )) += (*((double *) ( /* dim=0 */ (__pyx_v_norm_consts.data + __pyx_t_21 * __pyx_v_norm_consts.strides[0]) )));
}
/* "bmtools/exact/moments.pyx":247
* # normalise probabilities by dividing through by normalisation constant
* # in parallel over multiple threads
* for t in prange(num_threads, nogil=True, schedule='static', chunksize=1, # <<<<<<<<<<<<<<
* num_threads=num_threads):
* normalise_probabilities(probs[intervals[t]:intervals[t+1]],
*/
{
#ifdef WITH_THREAD
PyThreadState *_save;
Py_UNBLOCK_THREADS
#endif
/*try:*/ {
__pyx_t_11 = __pyx_v_num_threads;
if (1 == 0) abort();
{
int __pyx_parallel_temp0 = 0xbad0bad0;
const char *__pyx_parallel_filename = NULL; int __pyx_parallel_lineno = 0, __pyx_parallel_clineno = 0;
PyObject *__pyx_parallel_exc_type = NULL, *__pyx_parallel_exc_value = NULL, *__pyx_parallel_exc_tb = NULL;
int __pyx_parallel_why;
__pyx_parallel_why = 0;
__pyx_t_9 = 1;
#if ((defined(__APPLE__) || defined(__OSX__)) && (defined(__GNUC__) && (__GNUC__ > 2 || (__GNUC__ == 2 && (__GNUC_MINOR__ > 95)))))
#undef likely
#undef unlikely
#define likely(x) (x)
#define unlikely(x) (x)
#endif
__pyx_t_12 = (__pyx_t_11 - 0 + 1 - 1/abs(1)) / 1;
if (__pyx_t_12 > 0)
{
#ifdef _OPENMP
#pragma omp parallel num_threads(__pyx_v_num_threads) private(__pyx_t_18, __pyx_t_23, __pyx_t_24, __pyx_t_25) firstprivate(__pyx_t_7) private(__pyx_filename, __pyx_lineno, __pyx_clineno) shared(__pyx_parallel_why, __pyx_parallel_exc_type, __pyx_parallel_exc_value, __pyx_parallel_exc_tb)
#endif /* _OPENMP */
{
#ifdef _OPENMP
#ifdef WITH_THREAD
PyGILState_STATE __pyx_gilstate_save = PyGILState_Ensure();
#endif
Py_BEGIN_ALLOW_THREADS
#endif /* _OPENMP */
#ifdef _OPENMP
#pragma omp for firstprivate(__pyx_v_t) lastprivate(__pyx_v_t) schedule(static, __pyx_t_9)
#endif /* _OPENMP */
for (__pyx_t_10 = 0; __pyx_t_10 < __pyx_t_12; __pyx_t_10++){
if (__pyx_parallel_why < 2)
{
__pyx_v_t = (int)(0 + 1 * __pyx_t_10);
/* "bmtools/exact/moments.pyx":249
* for t in prange(num_threads, nogil=True, schedule='static', chunksize=1,
* num_threads=num_threads):
* normalise_probabilities(probs[intervals[t]:intervals[t+1]], # <<<<<<<<<<<<<<
* norm_consts[0])
* return probs, norm_consts[0]
*/
__pyx_t_23 = __pyx_v_t;
__pyx_t_24 = (__pyx_v_t + 1);
__pyx_t_7.data = __pyx_v_probs.data;
__pyx_t_7.memview = __pyx_v_probs.memview;
__PYX_INC_MEMVIEW(&__pyx_t_7, 0);
__pyx_t_18 = -1;
if (unlikely(__pyx_memoryview_slice_memviewslice(
&__pyx_t_7,
__pyx_v_probs.shape[0], __pyx_v_probs.strides[0], __pyx_v_probs.suboffsets[0],
0,
0,
&__pyx_t_18,
(*((long *) ( /* dim=0 */ (__pyx_v_intervals.data + __pyx_t_23 * __pyx_v_intervals.strides[0]) ))),
(*((long *) ( /* dim=0 */ (__pyx_v_intervals.data + __pyx_t_24 * __pyx_v_intervals.strides[0]) ))),
0,
1,
1,
0,
1) < 0))
{
__PYX_ERR(0, 249, __pyx_L20_error)
}
__pyx_t_25 = 0;
/* "bmtools/exact/moments.pyx":250
* num_threads=num_threads):
* normalise_probabilities(probs[intervals[t]:intervals[t+1]],
* norm_consts[0]) # <<<<<<<<<<<<<<
* return probs, norm_consts[0]
*
*/
__pyx_f_7bmtools_5exact_7moments_normalise_probabilities(__pyx_t_7, (*((double *) ( /* dim=0 */ (__pyx_v_norm_consts.data + __pyx_t_25 * __pyx_v_norm_consts.strides[0]) ))));
__PYX_XDEC_MEMVIEW(&__pyx_t_7, 0);
__pyx_t_7.memview = NULL;
__pyx_t_7.data = NULL;
goto __pyx_L23;
__pyx_L20_error:;
{
#ifdef WITH_THREAD
PyGILState_STATE __pyx_gilstate_save = PyGILState_Ensure();
#endif
#ifdef _OPENMP
#pragma omp flush(__pyx_parallel_exc_type)
#endif /* _OPENMP */
if (!__pyx_parallel_exc_type) {
__Pyx_ErrFetchWithState(&__pyx_parallel_exc_type, &__pyx_parallel_exc_value, &__pyx_parallel_exc_tb);
__pyx_parallel_filename = __pyx_filename; __pyx_parallel_lineno = __pyx_lineno; __pyx_parallel_clineno = __pyx_clineno;
__Pyx_GOTREF(__pyx_parallel_exc_type);
}
#ifdef WITH_THREAD
PyGILState_Release(__pyx_gilstate_save);
#endif
}
__pyx_parallel_why = 4;
goto __pyx_L22;
__pyx_L22:;
#ifdef _OPENMP
#pragma omp critical(__pyx_parallel_lastprivates2)
#endif /* _OPENMP */
{
__pyx_parallel_temp0 = __pyx_v_t;
}
__pyx_L23:;
#ifdef _OPENMP
#pragma omp flush(__pyx_parallel_why)
#endif /* _OPENMP */
}
}
#ifdef _OPENMP
Py_END_ALLOW_THREADS
#else
{
#ifdef WITH_THREAD
PyGILState_STATE __pyx_gilstate_save = PyGILState_Ensure();
#endif
#endif /* _OPENMP */
/* Clean up any temporaries */
__PYX_XDEC_MEMVIEW(&__pyx_t_7, 0);
#ifdef WITH_THREAD
PyGILState_Release(__pyx_gilstate_save);
#endif
#ifndef _OPENMP
}
#endif /* _OPENMP */
}
}
if (__pyx_parallel_exc_type) {
/* This may have been overridden by a continue, break or return in another thread. Prefer the error. */
__pyx_parallel_why = 4;
}
if (__pyx_parallel_why) {
__pyx_v_t = __pyx_parallel_temp0;
switch (__pyx_parallel_why) {
case 4:
{
#ifdef WITH_THREAD
PyGILState_STATE __pyx_gilstate_save = PyGILState_Ensure();
#endif
__Pyx_GIVEREF(__pyx_parallel_exc_type);
__Pyx_ErrRestoreWithState(__pyx_parallel_exc_type, __pyx_parallel_exc_value, __pyx_parallel_exc_tb);
__pyx_filename = __pyx_parallel_filename; __pyx_lineno = __pyx_parallel_lineno; __pyx_clineno = __pyx_parallel_clineno;
#ifdef WITH_THREAD
PyGILState_Release(__pyx_gilstate_save);
#endif
}
goto __pyx_L16_error;
}
}
}
#if ((defined(__APPLE__) || defined(__OSX__)) && (defined(__GNUC__) && (__GNUC__ > 2 || (__GNUC__ == 2 && (__GNUC_MINOR__ > 95)))))
#undef likely
#undef unlikely
#define likely(x) __builtin_expect(!!(x), 1)
#define unlikely(x) __builtin_expect(!!(x), 0)
#endif
}
/* "bmtools/exact/moments.pyx":247
* # normalise probabilities by dividing through by normalisation constant
* # in parallel over multiple threads
* for t in prange(num_threads, nogil=True, schedule='static', chunksize=1, # <<<<<<<<<<<<<<
* num_threads=num_threads):
* normalise_probabilities(probs[intervals[t]:intervals[t+1]],
*/
/*finally:*/ {
/*normal exit:*/{
#ifdef WITH_THREAD
Py_BLOCK_THREADS
#endif
goto __pyx_L17;
}
__pyx_L16_error: {
#ifdef WITH_THREAD
Py_BLOCK_THREADS
#endif
goto __pyx_L1_error;
}
__pyx_L17:;
}
}
/* "bmtools/exact/moments.pyx":251
* normalise_probabilities(probs[intervals[t]:intervals[t+1]],
* norm_consts[0])
* return probs, norm_consts[0] # <<<<<<<<<<<<<<
*
*
*/
__Pyx_XDECREF(__pyx_r);
__pyx_t_2 = __pyx_memoryview_fromslice(__pyx_v_probs, 1, (PyObject *(*)(char *)) __pyx_memview_get_double, (int (*)(char *, PyObject *)) __pyx_memview_set_double, 0);; if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 251, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_2);
__pyx_t_26 = 0;
__pyx_t_4 = PyFloat_FromDouble((*((double *) ( /* dim=0 */ (__pyx_v_norm_consts.data + __pyx_t_26 * __pyx_v_norm_consts.strides[0]) )))); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 251, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_4);
__pyx_t_5 = PyTuple_New(2); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 251, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_5);
__Pyx_GIVEREF(__pyx_t_2);
PyTuple_SET_ITEM(__pyx_t_5, 0, __pyx_t_2);
__Pyx_GIVEREF(__pyx_t_4);
PyTuple_SET_ITEM(__pyx_t_5, 1, __pyx_t_4);
__pyx_t_2 = 0;
__pyx_t_4 = 0;
__pyx_r = __pyx_t_5;
__pyx_t_5 = 0;
goto __pyx_L0;
/* "bmtools/exact/moments.pyx":184
*
*
* def calculate_probs_parallel( # <<<<<<<<<<<<<<
* double[:, :] weights, double[:] biases, bint force=False,
* int num_threads=2,):
*/
/* function exit code */
__pyx_L1_error:;
__Pyx_XDECREF(__pyx_t_2);
__Pyx_XDECREF(__pyx_t_3);
__Pyx_XDECREF(__pyx_t_4);
__Pyx_XDECREF(__pyx_t_5);
__PYX_XDEC_MEMVIEW(&__pyx_t_6, 1);
__PYX_XDEC_MEMVIEW(&__pyx_t_7, 1);
__PYX_XDEC_MEMVIEW(&__pyx_t_8, 1);
__PYX_XDEC_MEMVIEW(&__pyx_t_14, 1);
__Pyx_AddTraceback("bmtools.exact.moments.calculate_probs_parallel", __pyx_clineno, __pyx_lineno, __pyx_filename);
__pyx_r = NULL;
__pyx_L0:;
__PYX_XDEC_MEMVIEW(&__pyx_v_states, 1);
__PYX_XDEC_MEMVIEW(&__pyx_v_norm_consts, 1);
__PYX_XDEC_MEMVIEW(&__pyx_v_probs, 1);
__PYX_XDEC_MEMVIEW(&__pyx_v_intervals, 1);
__PYX_XDEC_MEMVIEW(&__pyx_v_weights, 1);
__PYX_XDEC_MEMVIEW(&__pyx_v_biases, 1);
__Pyx_XGIVEREF(__pyx_r);
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
/* "bmtools/exact/moments.pyx":254
*
*
* cdef void calc_unnormed_probs_for_state_range( # <<<<<<<<<<<<<<
* double[:, :] weights, double[:] biases, state_t[:] state,
* double* norm_const, double[:] probs,
*/
static void __pyx_f_7bmtools_5exact_7moments_calc_unnormed_probs_for_state_range(__Pyx_memviewslice __pyx_v_weights, __Pyx_memviewslice __pyx_v_biases, __Pyx_memviewslice __pyx_v_state, double *__pyx_v_norm_const, __Pyx_memviewslice __pyx_v_probs, long __pyx_v_start_state_index, long __pyx_v_end_state_index) {
long __pyx_v_i;
long __pyx_t_1;
long __pyx_t_2;
Py_ssize_t __pyx_t_3;
long __pyx_t_4;
Py_ssize_t __pyx_t_5;
/* "bmtools/exact/moments.pyx":263
* """
* cdef long i
* index_to_state(start_state_index, state) # <<<<<<<<<<<<<<
* for i in range(end_state_index - start_state_index):
* probs[i] = exp(neg_energy(state, weights, biases))
*/
__pyx_f_7bmtools_5exact_7helpers_index_to_state(__pyx_v_start_state_index, __pyx_v_state, 0);
/* "bmtools/exact/moments.pyx":264
* cdef long i
* index_to_state(start_state_index, state)
* for i in range(end_state_index - start_state_index): # <<<<<<<<<<<<<<
* probs[i] = exp(neg_energy(state, weights, biases))
* norm_const[0] += probs[i]
*/
__pyx_t_1 = (__pyx_v_end_state_index - __pyx_v_start_state_index);
for (__pyx_t_2 = 0; __pyx_t_2 < __pyx_t_1; __pyx_t_2+=1) {
__pyx_v_i = __pyx_t_2;
/* "bmtools/exact/moments.pyx":265
* index_to_state(start_state_index, state)
* for i in range(end_state_index - start_state_index):
* probs[i] = exp(neg_energy(state, weights, biases)) # <<<<<<<<<<<<<<
* norm_const[0] += probs[i]
* next_state(state, start_state_index + i + 1)
*/
__pyx_t_3 = __pyx_v_i;
*((double *) ( /* dim=0 */ (__pyx_v_probs.data + __pyx_t_3 * __pyx_v_probs.strides[0]) )) = exp(__pyx_f_7bmtools_5exact_7helpers_neg_energy(__pyx_v_state, __pyx_v_weights, __pyx_v_biases));
/* "bmtools/exact/moments.pyx":266
* for i in range(end_state_index - start_state_index):
* probs[i] = exp(neg_energy(state, weights, biases))
* norm_const[0] += probs[i] # <<<<<<<<<<<<<<
* next_state(state, start_state_index + i + 1)
*
*/
__pyx_t_4 = 0;
__pyx_t_5 = __pyx_v_i;
(__pyx_v_norm_const[__pyx_t_4]) = ((__pyx_v_norm_const[__pyx_t_4]) + (*((double *) ( /* dim=0 */ (__pyx_v_probs.data + __pyx_t_5 * __pyx_v_probs.strides[0]) ))));
/* "bmtools/exact/moments.pyx":267
* probs[i] = exp(neg_energy(state, weights, biases))
* norm_const[0] += probs[i]
* next_state(state, start_state_index + i + 1) # <<<<<<<<<<<<<<
*
*
*/
__pyx_f_7bmtools_5exact_7helpers_next_state(__pyx_v_state, ((__pyx_v_start_state_index + __pyx_v_i) + 1));
}
/* "bmtools/exact/moments.pyx":254
*
*
* cdef void calc_unnormed_probs_for_state_range( # <<<<<<<<<<<<<<
* double[:, :] weights, double[:] biases, state_t[:] state,
* double* norm_const, double[:] probs,
*/
/* function exit code */
}
/* "bmtools/exact/moments.pyx":270
*
*
* cdef void normalise_probabilities(double[:] probs, double norm_const) nogil: # <<<<<<<<<<<<<<
* """Divides an array of probabilities by a normalisation constant."""
* cdef long i
*/
static void __pyx_f_7bmtools_5exact_7moments_normalise_probabilities(__Pyx_memviewslice __pyx_v_probs, double __pyx_v_norm_const) {
long __pyx_v_i;
Py_ssize_t __pyx_t_1;
long __pyx_t_2;
Py_ssize_t __pyx_t_3;
/* "bmtools/exact/moments.pyx":273
* """Divides an array of probabilities by a normalisation constant."""
* cdef long i
* for i in range(probs.shape[0]): # <<<<<<<<<<<<<<
* probs[i] /= norm_const
*
*/
__pyx_t_1 = (__pyx_v_probs.shape[0]);
for (__pyx_t_2 = 0; __pyx_t_2 < __pyx_t_1; __pyx_t_2+=1) {
__pyx_v_i = __pyx_t_2;
/* "bmtools/exact/moments.pyx":274
* cdef long i
* for i in range(probs.shape[0]):
* probs[i] /= norm_const # <<<<<<<<<<<<<<
*
*
*/
__pyx_t_3 = __pyx_v_i;
*((double *) ( /* dim=0 */ (__pyx_v_probs.data + __pyx_t_3 * __pyx_v_probs.strides[0]) )) /= __pyx_v_norm_const;
}
/* "bmtools/exact/moments.pyx":270
*
*
* cdef void normalise_probabilities(double[:] probs, double norm_const) nogil: # <<<<<<<<<<<<<<
* """Divides an array of probabilities by a normalisation constant."""
* cdef long i
*/
/* function exit code */
}
/* "bmtools/exact/moments.pyx":277
*
*
* cdef void accum_moments_for_state_range( # <<<<<<<<<<<<<<
* double[:, :] weights, double[:] biases, state_t[:] state,
* double* norm_const, double[:] first_mom, double[:, :] second_mom,
*/
static void __pyx_f_7bmtools_5exact_7moments_accum_moments_for_state_range(__Pyx_memviewslice __pyx_v_weights, __Pyx_memviewslice __pyx_v_biases, __Pyx_memviewslice __pyx_v_state, double *__pyx_v_norm_const, __Pyx_memviewslice __pyx_v_first_mom, __Pyx_memviewslice __pyx_v_second_mom, long __pyx_v_start_state_index, long __pyx_v_end_state_index) {
double __pyx_v_prob;
long __pyx_v_state_index;
int __pyx_v_i;
int __pyx_v_j;
Py_ssize_t __pyx_t_1;
int __pyx_t_2;
Py_ssize_t __pyx_t_3;
int __pyx_t_4;
int __pyx_t_5;
Py_ssize_t __pyx_t_6;
Py_ssize_t __pyx_t_7;
long __pyx_t_8;
long __pyx_t_9;
long __pyx_t_10;
Py_ssize_t __pyx_t_11;
Py_ssize_t __pyx_t_12;
Py_ssize_t __pyx_t_13;
Py_ssize_t __pyx_t_14;
Py_ssize_t __pyx_t_15;
Py_ssize_t __pyx_t_16;
/* "bmtools/exact/moments.pyx":285
* corresponding to a contiguous range of state integer indices.
* """
* cdef double prob = 0. # <<<<<<<<<<<<<<
* cdef long state_index
* cdef int i, j
*/
__pyx_v_prob = 0.;
/* "bmtools/exact/moments.pyx":288
* cdef long state_index
* cdef int i, j
* index_to_state(start_state_index, state) # <<<<<<<<<<<<<<
* for i in range(weights.shape[0]):
* first_mom[i] = 0.
*/
__pyx_f_7bmtools_5exact_7helpers_index_to_state(__pyx_v_start_state_index, __pyx_v_state, 0);
/* "bmtools/exact/moments.pyx":289
* cdef int i, j
* index_to_state(start_state_index, state)
* for i in range(weights.shape[0]): # <<<<<<<<<<<<<<
* first_mom[i] = 0.
* for j in range(i):
*/
__pyx_t_1 = (__pyx_v_weights.shape[0]);
for (__pyx_t_2 = 0; __pyx_t_2 < __pyx_t_1; __pyx_t_2+=1) {
__pyx_v_i = __pyx_t_2;
/* "bmtools/exact/moments.pyx":290
* index_to_state(start_state_index, state)
* for i in range(weights.shape[0]):
* first_mom[i] = 0. # <<<<<<<<<<<<<<
* for j in range(i):
* second_mom[i, j] = 0.
*/
__pyx_t_3 = __pyx_v_i;
*((double *) ( /* dim=0 */ (__pyx_v_first_mom.data + __pyx_t_3 * __pyx_v_first_mom.strides[0]) )) = 0.;
/* "bmtools/exact/moments.pyx":291
* for i in range(weights.shape[0]):
* first_mom[i] = 0.
* for j in range(i): # <<<<<<<<<<<<<<
* second_mom[i, j] = 0.
* for state_index in range(start_state_index, end_state_index):
*/
__pyx_t_4 = __pyx_v_i;
for (__pyx_t_5 = 0; __pyx_t_5 < __pyx_t_4; __pyx_t_5+=1) {
__pyx_v_j = __pyx_t_5;
/* "bmtools/exact/moments.pyx":292
* first_mom[i] = 0.
* for j in range(i):
* second_mom[i, j] = 0. # <<<<<<<<<<<<<<
* for state_index in range(start_state_index, end_state_index):
* prob = exp(neg_energy(state, weights, biases))
*/
__pyx_t_6 = __pyx_v_i;
__pyx_t_7 = __pyx_v_j;
*((double *) ( /* dim=1 */ (( /* dim=0 */ (__pyx_v_second_mom.data + __pyx_t_6 * __pyx_v_second_mom.strides[0]) ) + __pyx_t_7 * __pyx_v_second_mom.strides[1]) )) = 0.;
}
}
/* "bmtools/exact/moments.pyx":293
* for j in range(i):
* second_mom[i, j] = 0.
* for state_index in range(start_state_index, end_state_index): # <<<<<<<<<<<<<<
* prob = exp(neg_energy(state, weights, biases))
* norm_const[0] += prob
*/
__pyx_t_8 = __pyx_v_end_state_index;
for (__pyx_t_9 = __pyx_v_start_state_index; __pyx_t_9 < __pyx_t_8; __pyx_t_9+=1) {
__pyx_v_state_index = __pyx_t_9;
/* "bmtools/exact/moments.pyx":294
* second_mom[i, j] = 0.
* for state_index in range(start_state_index, end_state_index):
* prob = exp(neg_energy(state, weights, biases)) # <<<<<<<<<<<<<<
* norm_const[0] += prob
* for i in range(state.shape[0]):
*/
__pyx_v_prob = exp(__pyx_f_7bmtools_5exact_7helpers_neg_energy(__pyx_v_state, __pyx_v_weights, __pyx_v_biases));
/* "bmtools/exact/moments.pyx":295
* for state_index in range(start_state_index, end_state_index):
* prob = exp(neg_energy(state, weights, biases))
* norm_const[0] += prob # <<<<<<<<<<<<<<
* for i in range(state.shape[0]):
* first_mom[i] += state[i] * prob
*/
__pyx_t_10 = 0;
(__pyx_v_norm_const[__pyx_t_10]) = ((__pyx_v_norm_const[__pyx_t_10]) + __pyx_v_prob);
/* "bmtools/exact/moments.pyx":296
* prob = exp(neg_energy(state, weights, biases))
* norm_const[0] += prob
* for i in range(state.shape[0]): # <<<<<<<<<<<<<<
* first_mom[i] += state[i] * prob
* for j in range(i):
*/
__pyx_t_1 = (__pyx_v_state.shape[0]);
for (__pyx_t_2 = 0; __pyx_t_2 < __pyx_t_1; __pyx_t_2+=1) {
__pyx_v_i = __pyx_t_2;
/* "bmtools/exact/moments.pyx":297
* norm_const[0] += prob
* for i in range(state.shape[0]):
* first_mom[i] += state[i] * prob # <<<<<<<<<<<<<<
* for j in range(i):
* second_mom[i, j] += state[i] * state[j] * prob
*/
__pyx_t_11 = __pyx_v_i;
__pyx_t_12 = __pyx_v_i;
*((double *) ( /* dim=0 */ (__pyx_v_first_mom.data + __pyx_t_12 * __pyx_v_first_mom.strides[0]) )) += ((*((__pyx_t_7bmtools_5exact_7helpers_state_t *) ( /* dim=0 */ (__pyx_v_state.data + __pyx_t_11 * __pyx_v_state.strides[0]) ))) * __pyx_v_prob);
/* "bmtools/exact/moments.pyx":298
* for i in range(state.shape[0]):
* first_mom[i] += state[i] * prob
* for j in range(i): # <<<<<<<<<<<<<<
* second_mom[i, j] += state[i] * state[j] * prob
* next_state(state, state_index + 1)
*/
__pyx_t_4 = __pyx_v_i;
for (__pyx_t_5 = 0; __pyx_t_5 < __pyx_t_4; __pyx_t_5+=1) {
__pyx_v_j = __pyx_t_5;
/* "bmtools/exact/moments.pyx":299
* first_mom[i] += state[i] * prob
* for j in range(i):
* second_mom[i, j] += state[i] * state[j] * prob # <<<<<<<<<<<<<<
* next_state(state, state_index + 1)
*
*/
__pyx_t_13 = __pyx_v_i;
__pyx_t_14 = __pyx_v_j;
__pyx_t_15 = __pyx_v_i;
__pyx_t_16 = __pyx_v_j;
*((double *) ( /* dim=1 */ (( /* dim=0 */ (__pyx_v_second_mom.data + __pyx_t_15 * __pyx_v_second_mom.strides[0]) ) + __pyx_t_16 * __pyx_v_second_mom.strides[1]) )) += (((*((__pyx_t_7bmtools_5exact_7helpers_state_t *) ( /* dim=0 */ (__pyx_v_state.data + __pyx_t_13 * __pyx_v_state.strides[0]) ))) * (*((__pyx_t_7bmtools_5exact_7helpers_state_t *) ( /* dim=0 */ (__pyx_v_state.data + __pyx_t_14 * __pyx_v_state.strides[0]) )))) * __pyx_v_prob);
}
}
/* "bmtools/exact/moments.pyx":300
* for j in range(i):
* second_mom[i, j] += state[i] * state[j] * prob
* next_state(state, state_index + 1) # <<<<<<<<<<<<<<
*
*
*/
__pyx_f_7bmtools_5exact_7helpers_next_state(__pyx_v_state, (__pyx_v_state_index + 1));
}
/* "bmtools/exact/moments.pyx":277
*
*
* cdef void accum_moments_for_state_range( # <<<<<<<<<<<<<<
* double[:, :] weights, double[:] biases, state_t[:] state,
* double* norm_const, double[:] first_mom, double[:, :] second_mom,
*/
/* function exit code */
}
/* "bmtools/exact/moments.pyx":303
*
*
* cdef double calc_norm_const(double[:,:] weights, double[:] biases, # <<<<<<<<<<<<<<
* state_t[:] state, long start_state_index=0,
* long end_state_index=-1) nogil:
*/
static double __pyx_f_7bmtools_5exact_7moments_calc_norm_const(__Pyx_memviewslice __pyx_v_weights, __Pyx_memviewslice __pyx_v_biases, __Pyx_memviewslice __pyx_v_state, struct __pyx_opt_args_7bmtools_5exact_7moments_calc_norm_const *__pyx_optional_args) {
long __pyx_v_start_state_index = ((long)0);
long __pyx_v_end_state_index = ((long)-1L);
double __pyx_v_norm_const;
long __pyx_v_state_index;
double __pyx_r;
int __pyx_t_1;
long __pyx_t_2;
long __pyx_t_3;
if (__pyx_optional_args) {
if (__pyx_optional_args->__pyx_n > 0) {
__pyx_v_start_state_index = __pyx_optional_args->start_state_index;
if (__pyx_optional_args->__pyx_n > 1) {
__pyx_v_end_state_index = __pyx_optional_args->end_state_index;
}
}
}
/* "bmtools/exact/moments.pyx":311
* including probabilities of states with indices in specified range.
* """
* cdef double norm_const = 0. # <<<<<<<<<<<<<<
* cdef long state_index
* index_to_state(start_state_index, state)
*/
__pyx_v_norm_const = 0.;
/* "bmtools/exact/moments.pyx":313
* cdef double norm_const = 0.
* cdef long state_index
* index_to_state(start_state_index, state) # <<<<<<<<<<<<<<
* if end_state_index == -1:
* end_state_index = 2**weights.shape[0]
*/
__pyx_f_7bmtools_5exact_7helpers_index_to_state(__pyx_v_start_state_index, __pyx_v_state, 0);
/* "bmtools/exact/moments.pyx":314
* cdef long state_index
* index_to_state(start_state_index, state)
* if end_state_index == -1: # <<<<<<<<<<<<<<
* end_state_index = 2**weights.shape[0]
* for state_index in range(start_state_index, end_state_index):
*/
__pyx_t_1 = ((__pyx_v_end_state_index == -1L) != 0);
if (__pyx_t_1) {
/* "bmtools/exact/moments.pyx":315
* index_to_state(start_state_index, state)
* if end_state_index == -1:
* end_state_index = 2**weights.shape[0] # <<<<<<<<<<<<<<
* for state_index in range(start_state_index, end_state_index):
* norm_const += exp(neg_energy(state, weights, biases))
*/
__pyx_v_end_state_index = __Pyx_pow_Py_ssize_t(2, (__pyx_v_weights.shape[0]));
/* "bmtools/exact/moments.pyx":314
* cdef long state_index
* index_to_state(start_state_index, state)
* if end_state_index == -1: # <<<<<<<<<<<<<<
* end_state_index = 2**weights.shape[0]
* for state_index in range(start_state_index, end_state_index):
*/
}
/* "bmtools/exact/moments.pyx":316
* if end_state_index == -1:
* end_state_index = 2**weights.shape[0]
* for state_index in range(start_state_index, end_state_index): # <<<<<<<<<<<<<<
* norm_const += exp(neg_energy(state, weights, biases))
* next_state(state, state_index+1)
*/
__pyx_t_2 = __pyx_v_end_state_index;
for (__pyx_t_3 = __pyx_v_start_state_index; __pyx_t_3 < __pyx_t_2; __pyx_t_3+=1) {
__pyx_v_state_index = __pyx_t_3;
/* "bmtools/exact/moments.pyx":317
* end_state_index = 2**weights.shape[0]
* for state_index in range(start_state_index, end_state_index):
* norm_const += exp(neg_energy(state, weights, biases)) # <<<<<<<<<<<<<<
* next_state(state, state_index+1)
* return norm_const
*/
__pyx_v_norm_const = (__pyx_v_norm_const + exp(__pyx_f_7bmtools_5exact_7helpers_neg_energy(__pyx_v_state, __pyx_v_weights, __pyx_v_biases)));
/* "bmtools/exact/moments.pyx":318
* for state_index in range(start_state_index, end_state_index):
* norm_const += exp(neg_energy(state, weights, biases))
* next_state(state, state_index+1) # <<<<<<<<<<<<<<
* return norm_const
*
*/
__pyx_f_7bmtools_5exact_7helpers_next_state(__pyx_v_state, (__pyx_v_state_index + 1));
}
/* "bmtools/exact/moments.pyx":319
* norm_const += exp(neg_energy(state, weights, biases))
* next_state(state, state_index+1)
* return norm_const # <<<<<<<<<<<<<<
*
*
*/
__pyx_r = __pyx_v_norm_const;
goto __pyx_L0;
/* "bmtools/exact/moments.pyx":303
*
*
* cdef double calc_norm_const(double[:,:] weights, double[:] biases, # <<<<<<<<<<<<<<
* state_t[:] state, long start_state_index=0,
* long end_state_index=-1) nogil:
*/
/* function exit code */
__pyx_L0:;
return __pyx_r;
}
/* "bmtools/exact/moments.pyx":322
*
*
* cdef void normalise_first_moment( # <<<<<<<<<<<<<<
* double[:] first_mom, double norm_const) nogil:
* """
*/
static void __pyx_f_7bmtools_5exact_7moments_normalise_first_moment(__Pyx_memviewslice __pyx_v_first_mom, double __pyx_v_norm_const) {
int __pyx_v_i;
Py_ssize_t __pyx_t_1;
int __pyx_t_2;
Py_ssize_t __pyx_t_3;
/* "bmtools/exact/moments.pyx":328
* """
* cdef int i
* for i in range(first_mom.shape[0]): # <<<<<<<<<<<<<<
* first_mom[i] /= norm_const
*
*/
__pyx_t_1 = (__pyx_v_first_mom.shape[0]);
for (__pyx_t_2 = 0; __pyx_t_2 < __pyx_t_1; __pyx_t_2+=1) {
__pyx_v_i = __pyx_t_2;
/* "bmtools/exact/moments.pyx":329
* cdef int i
* for i in range(first_mom.shape[0]):
* first_mom[i] /= norm_const # <<<<<<<<<<<<<<
*
*
*/
__pyx_t_3 = __pyx_v_i;
*((double *) ( /* dim=0 */ (__pyx_v_first_mom.data + __pyx_t_3 * __pyx_v_first_mom.strides[0]) )) /= __pyx_v_norm_const;
}
/* "bmtools/exact/moments.pyx":322
*
*
* cdef void normalise_first_moment( # <<<<<<<<<<<<<<
* double[:] first_mom, double norm_const) nogil:
* """
*/
/* function exit code */
}
/* "bmtools/exact/moments.pyx":332
*
*
* cdef void combine_and_normalise_first_moments( # <<<<<<<<<<<<<<
* double[:, :] first_moms, double norm_const) nogil:
* """
*/
static void __pyx_f_7bmtools_5exact_7moments_combine_and_normalise_first_moments(__Pyx_memviewslice __pyx_v_first_moms, double __pyx_v_norm_const) {
int __pyx_v_i;
int __pyx_v_j;
Py_ssize_t __pyx_t_1;
int __pyx_t_2;
Py_ssize_t __pyx_t_3;
int __pyx_t_4;
Py_ssize_t __pyx_t_5;
Py_ssize_t __pyx_t_6;
Py_ssize_t __pyx_t_7;
Py_ssize_t __pyx_t_8;
int __pyx_t_9;
Py_ssize_t __pyx_t_10;
Py_ssize_t __pyx_t_11;
/* "bmtools/exact/moments.pyx":340
* """
* cdef int i, j
* for i in range(1, first_moms.shape[0]): # <<<<<<<<<<<<<<
* for j in range(first_moms.shape[1]):
* first_moms[0, j] += first_moms[i, j]
*/
__pyx_t_1 = (__pyx_v_first_moms.shape[0]);
for (__pyx_t_2 = 1; __pyx_t_2 < __pyx_t_1; __pyx_t_2+=1) {
__pyx_v_i = __pyx_t_2;
/* "bmtools/exact/moments.pyx":341
* cdef int i, j
* for i in range(1, first_moms.shape[0]):
* for j in range(first_moms.shape[1]): # <<<<<<<<<<<<<<
* first_moms[0, j] += first_moms[i, j]
* if i == first_moms.shape[0] - 1:
*/
__pyx_t_3 = (__pyx_v_first_moms.shape[1]);
for (__pyx_t_4 = 0; __pyx_t_4 < __pyx_t_3; __pyx_t_4+=1) {
__pyx_v_j = __pyx_t_4;
/* "bmtools/exact/moments.pyx":342
* for i in range(1, first_moms.shape[0]):
* for j in range(first_moms.shape[1]):
* first_moms[0, j] += first_moms[i, j] # <<<<<<<<<<<<<<
* if i == first_moms.shape[0] - 1:
* first_moms[0, j] /= norm_const
*/
__pyx_t_5 = __pyx_v_i;
__pyx_t_6 = __pyx_v_j;
__pyx_t_7 = 0;
__pyx_t_8 = __pyx_v_j;
*((double *) ( /* dim=1 */ (( /* dim=0 */ (__pyx_v_first_moms.data + __pyx_t_7 * __pyx_v_first_moms.strides[0]) ) + __pyx_t_8 * __pyx_v_first_moms.strides[1]) )) += (*((double *) ( /* dim=1 */ (( /* dim=0 */ (__pyx_v_first_moms.data + __pyx_t_5 * __pyx_v_first_moms.strides[0]) ) + __pyx_t_6 * __pyx_v_first_moms.strides[1]) )));
/* "bmtools/exact/moments.pyx":343
* for j in range(first_moms.shape[1]):
* first_moms[0, j] += first_moms[i, j]
* if i == first_moms.shape[0] - 1: # <<<<<<<<<<<<<<
* first_moms[0, j] /= norm_const
*
*/
__pyx_t_9 = ((__pyx_v_i == ((__pyx_v_first_moms.shape[0]) - 1)) != 0);
if (__pyx_t_9) {
/* "bmtools/exact/moments.pyx":344
* first_moms[0, j] += first_moms[i, j]
* if i == first_moms.shape[0] - 1:
* first_moms[0, j] /= norm_const # <<<<<<<<<<<<<<
*
*
*/
__pyx_t_10 = 0;
__pyx_t_11 = __pyx_v_j;
*((double *) ( /* dim=1 */ (( /* dim=0 */ (__pyx_v_first_moms.data + __pyx_t_10 * __pyx_v_first_moms.strides[0]) ) + __pyx_t_11 * __pyx_v_first_moms.strides[1]) )) /= __pyx_v_norm_const;
/* "bmtools/exact/moments.pyx":343
* for j in range(first_moms.shape[1]):
* first_moms[0, j] += first_moms[i, j]
* if i == first_moms.shape[0] - 1: # <<<<<<<<<<<<<<
* first_moms[0, j] /= norm_const
*
*/
}
}
}
/* "bmtools/exact/moments.pyx":332
*
*
* cdef void combine_and_normalise_first_moments( # <<<<<<<<<<<<<<
* double[:, :] first_moms, double norm_const) nogil:
* """
*/
/* function exit code */
}
/* "bmtools/exact/moments.pyx":347
*
*
* cdef void normalise_and_reflect_second_moment(double[:, :] second_mom, # <<<<<<<<<<<<<<
* double norm_const) nogil:
* """
*/
static void __pyx_f_7bmtools_5exact_7moments_normalise_and_reflect_second_moment(__Pyx_memviewslice __pyx_v_second_mom, double __pyx_v_norm_const) {
int __pyx_v_i;
int __pyx_v_j;
Py_ssize_t __pyx_t_1;
int __pyx_t_2;
Py_ssize_t __pyx_t_3;
Py_ssize_t __pyx_t_4;
int __pyx_t_5;
int __pyx_t_6;
Py_ssize_t __pyx_t_7;
Py_ssize_t __pyx_t_8;
Py_ssize_t __pyx_t_9;
Py_ssize_t __pyx_t_10;
Py_ssize_t __pyx_t_11;
Py_ssize_t __pyx_t_12;
/* "bmtools/exact/moments.pyx":356
* """
* cdef int i, j
* for i in range(second_mom.shape[0]): # <<<<<<<<<<<<<<
* second_mom[i, i] = 1.
* for j in range(i):
*/
__pyx_t_1 = (__pyx_v_second_mom.shape[0]);
for (__pyx_t_2 = 0; __pyx_t_2 < __pyx_t_1; __pyx_t_2+=1) {
__pyx_v_i = __pyx_t_2;
/* "bmtools/exact/moments.pyx":357
* cdef int i, j
* for i in range(second_mom.shape[0]):
* second_mom[i, i] = 1. # <<<<<<<<<<<<<<
* for j in range(i):
* second_mom[i, j] /= norm_const
*/
__pyx_t_3 = __pyx_v_i;
__pyx_t_4 = __pyx_v_i;
*((double *) ( /* dim=1 */ (( /* dim=0 */ (__pyx_v_second_mom.data + __pyx_t_3 * __pyx_v_second_mom.strides[0]) ) + __pyx_t_4 * __pyx_v_second_mom.strides[1]) )) = 1.;
/* "bmtools/exact/moments.pyx":358
* for i in range(second_mom.shape[0]):
* second_mom[i, i] = 1.
* for j in range(i): # <<<<<<<<<<<<<<
* second_mom[i, j] /= norm_const
* second_mom[j, i] = second_mom[i, j]
*/
__pyx_t_5 = __pyx_v_i;
for (__pyx_t_6 = 0; __pyx_t_6 < __pyx_t_5; __pyx_t_6+=1) {
__pyx_v_j = __pyx_t_6;
/* "bmtools/exact/moments.pyx":359
* second_mom[i, i] = 1.
* for j in range(i):
* second_mom[i, j] /= norm_const # <<<<<<<<<<<<<<
* second_mom[j, i] = second_mom[i, j]
*
*/
__pyx_t_7 = __pyx_v_i;
__pyx_t_8 = __pyx_v_j;
*((double *) ( /* dim=1 */ (( /* dim=0 */ (__pyx_v_second_mom.data + __pyx_t_7 * __pyx_v_second_mom.strides[0]) ) + __pyx_t_8 * __pyx_v_second_mom.strides[1]) )) /= __pyx_v_norm_const;
/* "bmtools/exact/moments.pyx":360
* for j in range(i):
* second_mom[i, j] /= norm_const
* second_mom[j, i] = second_mom[i, j] # <<<<<<<<<<<<<<
*
*
*/
__pyx_t_9 = __pyx_v_i;
__pyx_t_10 = __pyx_v_j;
__pyx_t_11 = __pyx_v_j;
__pyx_t_12 = __pyx_v_i;
*((double *) ( /* dim=1 */ (( /* dim=0 */ (__pyx_v_second_mom.data + __pyx_t_11 * __pyx_v_second_mom.strides[0]) ) + __pyx_t_12 * __pyx_v_second_mom.strides[1]) )) = (*((double *) ( /* dim=1 */ (( /* dim=0 */ (__pyx_v_second_mom.data + __pyx_t_9 * __pyx_v_second_mom.strides[0]) ) + __pyx_t_10 * __pyx_v_second_mom.strides[1]) )));
}
}
/* "bmtools/exact/moments.pyx":347
*
*
* cdef void normalise_and_reflect_second_moment(double[:, :] second_mom, # <<<<<<<<<<<<<<
* double norm_const) nogil:
* """
*/
/* function exit code */
}
/* "bmtools/exact/moments.pyx":363
*
*
* cdef void combine_normalise_and_reflect_second_moments( # <<<<<<<<<<<<<<
* double[:, :, :] second_moms, double norm_const) nogil:
* """
*/
static void __pyx_f_7bmtools_5exact_7moments_combine_normalise_and_reflect_second_moments(__Pyx_memviewslice __pyx_v_second_moms, double __pyx_v_norm_const) {
int __pyx_v_i;
int __pyx_v_j;
int __pyx_v_k;
Py_ssize_t __pyx_t_1;
int __pyx_t_2;
Py_ssize_t __pyx_t_3;
int __pyx_t_4;
Py_ssize_t __pyx_t_5;
Py_ssize_t __pyx_t_6;
Py_ssize_t __pyx_t_7;
int __pyx_t_8;
int __pyx_t_9;
Py_ssize_t __pyx_t_10;
Py_ssize_t __pyx_t_11;
Py_ssize_t __pyx_t_12;
Py_ssize_t __pyx_t_13;
Py_ssize_t __pyx_t_14;
Py_ssize_t __pyx_t_15;
int __pyx_t_16;
Py_ssize_t __pyx_t_17;
Py_ssize_t __pyx_t_18;
Py_ssize_t __pyx_t_19;
Py_ssize_t __pyx_t_20;
Py_ssize_t __pyx_t_21;
Py_ssize_t __pyx_t_22;
Py_ssize_t __pyx_t_23;
Py_ssize_t __pyx_t_24;
Py_ssize_t __pyx_t_25;
/* "bmtools/exact/moments.pyx":373
* """
* cdef int i, j, k
* for i in range(1, second_moms.shape[0]): # <<<<<<<<<<<<<<
* for j in range(second_moms.shape[1]):
* second_moms[0, j, j] = 1.
*/
__pyx_t_1 = (__pyx_v_second_moms.shape[0]);
for (__pyx_t_2 = 1; __pyx_t_2 < __pyx_t_1; __pyx_t_2+=1) {
__pyx_v_i = __pyx_t_2;
/* "bmtools/exact/moments.pyx":374
* cdef int i, j, k
* for i in range(1, second_moms.shape[0]):
* for j in range(second_moms.shape[1]): # <<<<<<<<<<<<<<
* second_moms[0, j, j] = 1.
* for k in range(j):
*/
__pyx_t_3 = (__pyx_v_second_moms.shape[1]);
for (__pyx_t_4 = 0; __pyx_t_4 < __pyx_t_3; __pyx_t_4+=1) {
__pyx_v_j = __pyx_t_4;
/* "bmtools/exact/moments.pyx":375
* for i in range(1, second_moms.shape[0]):
* for j in range(second_moms.shape[1]):
* second_moms[0, j, j] = 1. # <<<<<<<<<<<<<<
* for k in range(j):
* second_moms[0, j, k] += second_moms[i, j, k]
*/
__pyx_t_5 = 0;
__pyx_t_6 = __pyx_v_j;
__pyx_t_7 = __pyx_v_j;
*((double *) ( /* dim=2 */ (( /* dim=1 */ (( /* dim=0 */ (__pyx_v_second_moms.data + __pyx_t_5 * __pyx_v_second_moms.strides[0]) ) + __pyx_t_6 * __pyx_v_second_moms.strides[1]) ) + __pyx_t_7 * __pyx_v_second_moms.strides[2]) )) = 1.;
/* "bmtools/exact/moments.pyx":376
* for j in range(second_moms.shape[1]):
* second_moms[0, j, j] = 1.
* for k in range(j): # <<<<<<<<<<<<<<
* second_moms[0, j, k] += second_moms[i, j, k]
* if i == second_moms.shape[0] - 1:
*/
__pyx_t_8 = __pyx_v_j;
for (__pyx_t_9 = 0; __pyx_t_9 < __pyx_t_8; __pyx_t_9+=1) {
__pyx_v_k = __pyx_t_9;
/* "bmtools/exact/moments.pyx":377
* second_moms[0, j, j] = 1.
* for k in range(j):
* second_moms[0, j, k] += second_moms[i, j, k] # <<<<<<<<<<<<<<
* if i == second_moms.shape[0] - 1:
* second_moms[0, j, k] /= norm_const
*/
__pyx_t_10 = __pyx_v_i;
__pyx_t_11 = __pyx_v_j;
__pyx_t_12 = __pyx_v_k;
__pyx_t_13 = 0;
__pyx_t_14 = __pyx_v_j;
__pyx_t_15 = __pyx_v_k;
*((double *) ( /* dim=2 */ (( /* dim=1 */ (( /* dim=0 */ (__pyx_v_second_moms.data + __pyx_t_13 * __pyx_v_second_moms.strides[0]) ) + __pyx_t_14 * __pyx_v_second_moms.strides[1]) ) + __pyx_t_15 * __pyx_v_second_moms.strides[2]) )) += (*((double *) ( /* dim=2 */ (( /* dim=1 */ (( /* dim=0 */ (__pyx_v_second_moms.data + __pyx_t_10 * __pyx_v_second_moms.strides[0]) ) + __pyx_t_11 * __pyx_v_second_moms.strides[1]) ) + __pyx_t_12 * __pyx_v_second_moms.strides[2]) )));
/* "bmtools/exact/moments.pyx":378
* for k in range(j):
* second_moms[0, j, k] += second_moms[i, j, k]
* if i == second_moms.shape[0] - 1: # <<<<<<<<<<<<<<
* second_moms[0, j, k] /= norm_const
* second_moms[0, k, j] = second_moms[0, j, k]
*/
__pyx_t_16 = ((__pyx_v_i == ((__pyx_v_second_moms.shape[0]) - 1)) != 0);
if (__pyx_t_16) {
/* "bmtools/exact/moments.pyx":379
* second_moms[0, j, k] += second_moms[i, j, k]
* if i == second_moms.shape[0] - 1:
* second_moms[0, j, k] /= norm_const # <<<<<<<<<<<<<<
* second_moms[0, k, j] = second_moms[0, j, k]
*/
__pyx_t_17 = 0;
__pyx_t_18 = __pyx_v_j;
__pyx_t_19 = __pyx_v_k;
*((double *) ( /* dim=2 */ (( /* dim=1 */ (( /* dim=0 */ (__pyx_v_second_moms.data + __pyx_t_17 * __pyx_v_second_moms.strides[0]) ) + __pyx_t_18 * __pyx_v_second_moms.strides[1]) ) + __pyx_t_19 * __pyx_v_second_moms.strides[2]) )) /= __pyx_v_norm_const;
/* "bmtools/exact/moments.pyx":380
* if i == second_moms.shape[0] - 1:
* second_moms[0, j, k] /= norm_const
* second_moms[0, k, j] = second_moms[0, j, k] # <<<<<<<<<<<<<<
*/
__pyx_t_20 = 0;
__pyx_t_21 = __pyx_v_j;
__pyx_t_22 = __pyx_v_k;
__pyx_t_23 = 0;
__pyx_t_24 = __pyx_v_k;
__pyx_t_25 = __pyx_v_j;
*((double *) ( /* dim=2 */ (( /* dim=1 */ (( /* dim=0 */ (__pyx_v_second_moms.data + __pyx_t_23 * __pyx_v_second_moms.strides[0]) ) + __pyx_t_24 * __pyx_v_second_moms.strides[1]) ) + __pyx_t_25 * __pyx_v_second_moms.strides[2]) )) = (*((double *) ( /* dim=2 */ (( /* dim=1 */ (( /* dim=0 */ (__pyx_v_second_moms.data + __pyx_t_20 * __pyx_v_second_moms.strides[0]) ) + __pyx_t_21 * __pyx_v_second_moms.strides[1]) ) + __pyx_t_22 * __pyx_v_second_moms.strides[2]) )));
/* "bmtools/exact/moments.pyx":378
* for k in range(j):
* second_moms[0, j, k] += second_moms[i, j, k]
* if i == second_moms.shape[0] - 1: # <<<<<<<<<<<<<<
* second_moms[0, j, k] /= norm_const
* second_moms[0, k, j] = second_moms[0, j, k]
*/
}
}
}
}
/* "bmtools/exact/moments.pyx":363
*
*
* cdef void combine_normalise_and_reflect_second_moments( # <<<<<<<<<<<<<<
* double[:, :, :] second_moms, double norm_const) nogil:
* """
*/
/* function exit code */
}
/* "View.MemoryView":120
* cdef bint dtype_is_object
*
* def __cinit__(array self, tuple shape, Py_ssize_t itemsize, format not None, # <<<<<<<<<<<<<<
* mode="c", bint allocate_buffer=True):
*
*/
/* Python wrapper */
static int __pyx_array___cinit__(PyObject *__pyx_v_self, PyObject *__pyx_args, PyObject *__pyx_kwds); /*proto*/
static int __pyx_array___cinit__(PyObject *__pyx_v_self, PyObject *__pyx_args, PyObject *__pyx_kwds) {
PyObject *__pyx_v_shape = 0;
Py_ssize_t __pyx_v_itemsize;
PyObject *__pyx_v_format = 0;
PyObject *__pyx_v_mode = 0;
int __pyx_v_allocate_buffer;
int __pyx_r;
__Pyx_RefNannyDeclarations
__Pyx_RefNannySetupContext("__cinit__ (wrapper)", 0);
{
static PyObject **__pyx_pyargnames[] = {&__pyx_n_s_shape,&__pyx_n_s_itemsize,&__pyx_n_s_format,&__pyx_n_s_mode,&__pyx_n_s_allocate_buffer,0};
PyObject* values[5] = {0,0,0,0,0};
values[3] = ((PyObject *)__pyx_n_s_c);
if (unlikely(__pyx_kwds)) {
Py_ssize_t kw_args;
const Py_ssize_t pos_args = PyTuple_GET_SIZE(__pyx_args);
switch (pos_args) {
case 5: values[4] = PyTuple_GET_ITEM(__pyx_args, 4);
case 4: values[3] = PyTuple_GET_ITEM(__pyx_args, 3);
case 3: values[2] = PyTuple_GET_ITEM(__pyx_args, 2);
case 2: values[1] = PyTuple_GET_ITEM(__pyx_args, 1);
case 1: values[0] = PyTuple_GET_ITEM(__pyx_args, 0);
case 0: break;
default: goto __pyx_L5_argtuple_error;
}
kw_args = PyDict_Size(__pyx_kwds);
switch (pos_args) {
case 0:
if (likely((values[0] = PyDict_GetItem(__pyx_kwds, __pyx_n_s_shape)) != 0)) kw_args--;
else goto __pyx_L5_argtuple_error;
case 1:
if (likely((values[1] = PyDict_GetItem(__pyx_kwds, __pyx_n_s_itemsize)) != 0)) kw_args--;
else {
__Pyx_RaiseArgtupleInvalid("__cinit__", 0, 3, 5, 1); __PYX_ERR(1, 120, __pyx_L3_error)
}
case 2:
if (likely((values[2] = PyDict_GetItem(__pyx_kwds, __pyx_n_s_format)) != 0)) kw_args--;
else {
__Pyx_RaiseArgtupleInvalid("__cinit__", 0, 3, 5, 2); __PYX_ERR(1, 120, __pyx_L3_error)
}
case 3:
if (kw_args > 0) {
PyObject* value = PyDict_GetItem(__pyx_kwds, __pyx_n_s_mode);
if (value) { values[3] = value; kw_args--; }
}
case 4:
if (kw_args > 0) {
PyObject* value = PyDict_GetItem(__pyx_kwds, __pyx_n_s_allocate_buffer);
if (value) { values[4] = value; kw_args--; }
}
}
if (unlikely(kw_args > 0)) {
if (unlikely(__Pyx_ParseOptionalKeywords(__pyx_kwds, __pyx_pyargnames, 0, values, pos_args, "__cinit__") < 0)) __PYX_ERR(1, 120, __pyx_L3_error)
}
} else {
switch (PyTuple_GET_SIZE(__pyx_args)) {
case 5: values[4] = PyTuple_GET_ITEM(__pyx_args, 4);
case 4: values[3] = PyTuple_GET_ITEM(__pyx_args, 3);
case 3: values[2] = PyTuple_GET_ITEM(__pyx_args, 2);
values[1] = PyTuple_GET_ITEM(__pyx_args, 1);
values[0] = PyTuple_GET_ITEM(__pyx_args, 0);
break;
default: goto __pyx_L5_argtuple_error;
}
}
__pyx_v_shape = ((PyObject*)values[0]);
__pyx_v_itemsize = __Pyx_PyIndex_AsSsize_t(values[1]); if (unlikely((__pyx_v_itemsize == (Py_ssize_t)-1) && PyErr_Occurred())) __PYX_ERR(1, 120, __pyx_L3_error)
__pyx_v_format = values[2];
__pyx_v_mode = values[3];
if (values[4]) {
__pyx_v_allocate_buffer = __Pyx_PyObject_IsTrue(values[4]); if (unlikely((__pyx_v_allocate_buffer == (int)-1) && PyErr_Occurred())) __PYX_ERR(1, 121, __pyx_L3_error)
} else {
/* "View.MemoryView":121
*
* def __cinit__(array self, tuple shape, Py_ssize_t itemsize, format not None,
* mode="c", bint allocate_buffer=True): # <<<<<<<<<<<<<<
*
* cdef int idx
*/
__pyx_v_allocate_buffer = ((int)1);
}
}
goto __pyx_L4_argument_unpacking_done;
__pyx_L5_argtuple_error:;
__Pyx_RaiseArgtupleInvalid("__cinit__", 0, 3, 5, PyTuple_GET_SIZE(__pyx_args)); __PYX_ERR(1, 120, __pyx_L3_error)
__pyx_L3_error:;
__Pyx_AddTraceback("View.MemoryView.array.__cinit__", __pyx_clineno, __pyx_lineno, __pyx_filename);
__Pyx_RefNannyFinishContext();
return -1;
__pyx_L4_argument_unpacking_done:;
if (unlikely(!__Pyx_ArgTypeTest(((PyObject *)__pyx_v_shape), (&PyTuple_Type), 1, "shape", 1))) __PYX_ERR(1, 120, __pyx_L1_error)
if (unlikely(((PyObject *)__pyx_v_format) == Py_None)) {
PyErr_Format(PyExc_TypeError, "Argument '%.200s' must not be None", "format"); __PYX_ERR(1, 120, __pyx_L1_error)
}
__pyx_r = __pyx_array___pyx_pf_15View_dot_MemoryView_5array___cinit__(((struct __pyx_array_obj *)__pyx_v_self), __pyx_v_shape, __pyx_v_itemsize, __pyx_v_format, __pyx_v_mode, __pyx_v_allocate_buffer);
/* "View.MemoryView":120
* cdef bint dtype_is_object
*
* def __cinit__(array self, tuple shape, Py_ssize_t itemsize, format not None, # <<<<<<<<<<<<<<
* mode="c", bint allocate_buffer=True):
*
*/
/* function exit code */
goto __pyx_L0;
__pyx_L1_error:;
__pyx_r = -1;
__pyx_L0:;
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
static int __pyx_array___pyx_pf_15View_dot_MemoryView_5array___cinit__(struct __pyx_array_obj *__pyx_v_self, PyObject *__pyx_v_shape, Py_ssize_t __pyx_v_itemsize, PyObject *__pyx_v_format, PyObject *__pyx_v_mode, int __pyx_v_allocate_buffer) {
int __pyx_v_idx;
Py_ssize_t __pyx_v_i;
Py_ssize_t __pyx_v_dim;
PyObject **__pyx_v_p;
char __pyx_v_order;
int __pyx_r;
__Pyx_RefNannyDeclarations
Py_ssize_t __pyx_t_1;
int __pyx_t_2;
PyObject *__pyx_t_3 = NULL;
int __pyx_t_4;
PyObject *__pyx_t_5 = NULL;
char *__pyx_t_6;
int __pyx_t_7;
Py_ssize_t __pyx_t_8;
PyObject *__pyx_t_9 = NULL;
PyObject *__pyx_t_10 = NULL;
__Pyx_RefNannySetupContext("__cinit__", 0);
__Pyx_INCREF(__pyx_v_format);
/* "View.MemoryView":127
* cdef PyObject **p
*
* self.ndim = <int> len(shape) # <<<<<<<<<<<<<<
* self.itemsize = itemsize
*
*/
if (unlikely(__pyx_v_shape == Py_None)) {
PyErr_SetString(PyExc_TypeError, "object of type 'NoneType' has no len()");
__PYX_ERR(1, 127, __pyx_L1_error)
}
__pyx_t_1 = PyTuple_GET_SIZE(__pyx_v_shape); if (unlikely(__pyx_t_1 == -1)) __PYX_ERR(1, 127, __pyx_L1_error)
__pyx_v_self->ndim = ((int)__pyx_t_1);
/* "View.MemoryView":128
*
* self.ndim = <int> len(shape)
* self.itemsize = itemsize # <<<<<<<<<<<<<<
*
* if not self.ndim:
*/
__pyx_v_self->itemsize = __pyx_v_itemsize;
/* "View.MemoryView":130
* self.itemsize = itemsize
*
* if not self.ndim: # <<<<<<<<<<<<<<
* raise ValueError("Empty shape tuple for cython.array")
*
*/
__pyx_t_2 = ((!(__pyx_v_self->ndim != 0)) != 0);
if (__pyx_t_2) {
/* "View.MemoryView":131
*
* if not self.ndim:
* raise ValueError("Empty shape tuple for cython.array") # <<<<<<<<<<<<<<
*
* if itemsize <= 0:
*/
__pyx_t_3 = __Pyx_PyObject_Call(__pyx_builtin_ValueError, __pyx_tuple__6, NULL); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 131, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_3);
__Pyx_Raise(__pyx_t_3, 0, 0, 0);
__Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
__PYX_ERR(1, 131, __pyx_L1_error)
/* "View.MemoryView":130
* self.itemsize = itemsize
*
* if not self.ndim: # <<<<<<<<<<<<<<
* raise ValueError("Empty shape tuple for cython.array")
*
*/
}
/* "View.MemoryView":133
* raise ValueError("Empty shape tuple for cython.array")
*
* if itemsize <= 0: # <<<<<<<<<<<<<<
* raise ValueError("itemsize <= 0 for cython.array")
*
*/
__pyx_t_2 = ((__pyx_v_itemsize <= 0) != 0);
if (__pyx_t_2) {
/* "View.MemoryView":134
*
* if itemsize <= 0:
* raise ValueError("itemsize <= 0 for cython.array") # <<<<<<<<<<<<<<
*
* if not isinstance(format, bytes):
*/
__pyx_t_3 = __Pyx_PyObject_Call(__pyx_builtin_ValueError, __pyx_tuple__7, NULL); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 134, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_3);
__Pyx_Raise(__pyx_t_3, 0, 0, 0);
__Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
__PYX_ERR(1, 134, __pyx_L1_error)
/* "View.MemoryView":133
* raise ValueError("Empty shape tuple for cython.array")
*
* if itemsize <= 0: # <<<<<<<<<<<<<<
* raise ValueError("itemsize <= 0 for cython.array")
*
*/
}
/* "View.MemoryView":136
* raise ValueError("itemsize <= 0 for cython.array")
*
* if not isinstance(format, bytes): # <<<<<<<<<<<<<<
* format = format.encode('ASCII')
* self._format = format # keep a reference to the byte string
*/
__pyx_t_2 = PyBytes_Check(__pyx_v_format);
__pyx_t_4 = ((!(__pyx_t_2 != 0)) != 0);
if (__pyx_t_4) {
/* "View.MemoryView":137
*
* if not isinstance(format, bytes):
* format = format.encode('ASCII') # <<<<<<<<<<<<<<
* self._format = format # keep a reference to the byte string
* self.format = self._format
*/
__pyx_t_3 = __Pyx_PyObject_GetAttrStr(__pyx_v_format, __pyx_n_s_encode); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 137, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_3);
__pyx_t_5 = __Pyx_PyObject_Call(__pyx_t_3, __pyx_tuple__8, NULL); if (unlikely(!__pyx_t_5)) __PYX_ERR(1, 137, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_5);
__Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
__Pyx_DECREF_SET(__pyx_v_format, __pyx_t_5);
__pyx_t_5 = 0;
/* "View.MemoryView":136
* raise ValueError("itemsize <= 0 for cython.array")
*
* if not isinstance(format, bytes): # <<<<<<<<<<<<<<
* format = format.encode('ASCII')
* self._format = format # keep a reference to the byte string
*/
}
/* "View.MemoryView":138
* if not isinstance(format, bytes):
* format = format.encode('ASCII')
* self._format = format # keep a reference to the byte string # <<<<<<<<<<<<<<
* self.format = self._format
*
*/
if (!(likely(PyBytes_CheckExact(__pyx_v_format))||((__pyx_v_format) == Py_None)||(PyErr_Format(PyExc_TypeError, "Expected %.16s, got %.200s", "bytes", Py_TYPE(__pyx_v_format)->tp_name), 0))) __PYX_ERR(1, 138, __pyx_L1_error)
__pyx_t_5 = __pyx_v_format;
__Pyx_INCREF(__pyx_t_5);
__Pyx_GIVEREF(__pyx_t_5);
__Pyx_GOTREF(__pyx_v_self->_format);
__Pyx_DECREF(__pyx_v_self->_format);
__pyx_v_self->_format = ((PyObject*)__pyx_t_5);
__pyx_t_5 = 0;
/* "View.MemoryView":139
* format = format.encode('ASCII')
* self._format = format # keep a reference to the byte string
* self.format = self._format # <<<<<<<<<<<<<<
*
*
*/
__pyx_t_6 = __Pyx_PyObject_AsString(__pyx_v_self->_format); if (unlikely((!__pyx_t_6) && PyErr_Occurred())) __PYX_ERR(1, 139, __pyx_L1_error)
__pyx_v_self->format = __pyx_t_6;
/* "View.MemoryView":142
*
*
* self._shape = <Py_ssize_t *> PyObject_Malloc(sizeof(Py_ssize_t)*self.ndim*2) # <<<<<<<<<<<<<<
* self._strides = self._shape + self.ndim
*
*/
__pyx_v_self->_shape = ((Py_ssize_t *)PyObject_Malloc((((sizeof(Py_ssize_t)) * __pyx_v_self->ndim) * 2)));
/* "View.MemoryView":143
*
* self._shape = <Py_ssize_t *> PyObject_Malloc(sizeof(Py_ssize_t)*self.ndim*2)
* self._strides = self._shape + self.ndim # <<<<<<<<<<<<<<
*
* if not self._shape:
*/
__pyx_v_self->_strides = (__pyx_v_self->_shape + __pyx_v_self->ndim);
/* "View.MemoryView":145
* self._strides = self._shape + self.ndim
*
* if not self._shape: # <<<<<<<<<<<<<<
* raise MemoryError("unable to allocate shape and strides.")
*
*/
__pyx_t_4 = ((!(__pyx_v_self->_shape != 0)) != 0);
if (__pyx_t_4) {
/* "View.MemoryView":146
*
* if not self._shape:
* raise MemoryError("unable to allocate shape and strides.") # <<<<<<<<<<<<<<
*
*
*/
__pyx_t_5 = __Pyx_PyObject_Call(__pyx_builtin_MemoryError, __pyx_tuple__9, NULL); if (unlikely(!__pyx_t_5)) __PYX_ERR(1, 146, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_5);
__Pyx_Raise(__pyx_t_5, 0, 0, 0);
__Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
__PYX_ERR(1, 146, __pyx_L1_error)
/* "View.MemoryView":145
* self._strides = self._shape + self.ndim
*
* if not self._shape: # <<<<<<<<<<<<<<
* raise MemoryError("unable to allocate shape and strides.")
*
*/
}
/* "View.MemoryView":149
*
*
* for idx, dim in enumerate(shape): # <<<<<<<<<<<<<<
* if dim <= 0:
* raise ValueError("Invalid shape in axis %d: %d." % (idx, dim))
*/
__pyx_t_7 = 0;
__pyx_t_5 = __pyx_v_shape; __Pyx_INCREF(__pyx_t_5); __pyx_t_1 = 0;
for (;;) {
if (__pyx_t_1 >= PyTuple_GET_SIZE(__pyx_t_5)) break;
#if CYTHON_ASSUME_SAFE_MACROS && !CYTHON_AVOID_BORROWED_REFS
__pyx_t_3 = PyTuple_GET_ITEM(__pyx_t_5, __pyx_t_1); __Pyx_INCREF(__pyx_t_3); __pyx_t_1++; if (unlikely(0 < 0)) __PYX_ERR(1, 149, __pyx_L1_error)
#else
__pyx_t_3 = PySequence_ITEM(__pyx_t_5, __pyx_t_1); __pyx_t_1++; if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 149, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_3);
#endif
__pyx_t_8 = __Pyx_PyIndex_AsSsize_t(__pyx_t_3); if (unlikely((__pyx_t_8 == (Py_ssize_t)-1) && PyErr_Occurred())) __PYX_ERR(1, 149, __pyx_L1_error)
__Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
__pyx_v_dim = __pyx_t_8;
__pyx_v_idx = __pyx_t_7;
__pyx_t_7 = (__pyx_t_7 + 1);
/* "View.MemoryView":150
*
* for idx, dim in enumerate(shape):
* if dim <= 0: # <<<<<<<<<<<<<<
* raise ValueError("Invalid shape in axis %d: %d." % (idx, dim))
* self._shape[idx] = dim
*/
__pyx_t_4 = ((__pyx_v_dim <= 0) != 0);
if (__pyx_t_4) {
/* "View.MemoryView":151
* for idx, dim in enumerate(shape):
* if dim <= 0:
* raise ValueError("Invalid shape in axis %d: %d." % (idx, dim)) # <<<<<<<<<<<<<<
* self._shape[idx] = dim
*
*/
__pyx_t_3 = __Pyx_PyInt_From_int(__pyx_v_idx); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 151, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_3);
__pyx_t_9 = PyInt_FromSsize_t(__pyx_v_dim); if (unlikely(!__pyx_t_9)) __PYX_ERR(1, 151, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_9);
__pyx_t_10 = PyTuple_New(2); if (unlikely(!__pyx_t_10)) __PYX_ERR(1, 151, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_10);
__Pyx_GIVEREF(__pyx_t_3);
PyTuple_SET_ITEM(__pyx_t_10, 0, __pyx_t_3);
__Pyx_GIVEREF(__pyx_t_9);
PyTuple_SET_ITEM(__pyx_t_10, 1, __pyx_t_9);
__pyx_t_3 = 0;
__pyx_t_9 = 0;
__pyx_t_9 = __Pyx_PyString_Format(__pyx_kp_s_Invalid_shape_in_axis_d_d, __pyx_t_10); if (unlikely(!__pyx_t_9)) __PYX_ERR(1, 151, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_9);
__Pyx_DECREF(__pyx_t_10); __pyx_t_10 = 0;
__pyx_t_10 = PyTuple_New(1); if (unlikely(!__pyx_t_10)) __PYX_ERR(1, 151, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_10);
__Pyx_GIVEREF(__pyx_t_9);
PyTuple_SET_ITEM(__pyx_t_10, 0, __pyx_t_9);
__pyx_t_9 = 0;
__pyx_t_9 = __Pyx_PyObject_Call(__pyx_builtin_ValueError, __pyx_t_10, NULL); if (unlikely(!__pyx_t_9)) __PYX_ERR(1, 151, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_9);
__Pyx_DECREF(__pyx_t_10); __pyx_t_10 = 0;
__Pyx_Raise(__pyx_t_9, 0, 0, 0);
__Pyx_DECREF(__pyx_t_9); __pyx_t_9 = 0;
__PYX_ERR(1, 151, __pyx_L1_error)
/* "View.MemoryView":150
*
* for idx, dim in enumerate(shape):
* if dim <= 0: # <<<<<<<<<<<<<<
* raise ValueError("Invalid shape in axis %d: %d." % (idx, dim))
* self._shape[idx] = dim
*/
}
/* "View.MemoryView":152
* if dim <= 0:
* raise ValueError("Invalid shape in axis %d: %d." % (idx, dim))
* self._shape[idx] = dim # <<<<<<<<<<<<<<
*
* cdef char order
*/
(__pyx_v_self->_shape[__pyx_v_idx]) = __pyx_v_dim;
/* "View.MemoryView":149
*
*
* for idx, dim in enumerate(shape): # <<<<<<<<<<<<<<
* if dim <= 0:
* raise ValueError("Invalid shape in axis %d: %d." % (idx, dim))
*/
}
__Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
/* "View.MemoryView":155
*
* cdef char order
* if mode == 'fortran': # <<<<<<<<<<<<<<
* order = b'F'
* self.mode = u'fortran'
*/
__pyx_t_4 = (__Pyx_PyString_Equals(__pyx_v_mode, __pyx_n_s_fortran, Py_EQ)); if (unlikely(__pyx_t_4 < 0)) __PYX_ERR(1, 155, __pyx_L1_error)
if (__pyx_t_4) {
/* "View.MemoryView":156
* cdef char order
* if mode == 'fortran':
* order = b'F' # <<<<<<<<<<<<<<
* self.mode = u'fortran'
* elif mode == 'c':
*/
__pyx_v_order = 'F';
/* "View.MemoryView":157
* if mode == 'fortran':
* order = b'F'
* self.mode = u'fortran' # <<<<<<<<<<<<<<
* elif mode == 'c':
* order = b'C'
*/
__Pyx_INCREF(__pyx_n_u_fortran);
__Pyx_GIVEREF(__pyx_n_u_fortran);
__Pyx_GOTREF(__pyx_v_self->mode);
__Pyx_DECREF(__pyx_v_self->mode);
__pyx_v_self->mode = __pyx_n_u_fortran;
/* "View.MemoryView":155
*
* cdef char order
* if mode == 'fortran': # <<<<<<<<<<<<<<
* order = b'F'
* self.mode = u'fortran'
*/
goto __pyx_L10;
}
/* "View.MemoryView":158
* order = b'F'
* self.mode = u'fortran'
* elif mode == 'c': # <<<<<<<<<<<<<<
* order = b'C'
* self.mode = u'c'
*/
__pyx_t_4 = (__Pyx_PyString_Equals(__pyx_v_mode, __pyx_n_s_c, Py_EQ)); if (unlikely(__pyx_t_4 < 0)) __PYX_ERR(1, 158, __pyx_L1_error)
if (__pyx_t_4) {
/* "View.MemoryView":159
* self.mode = u'fortran'
* elif mode == 'c':
* order = b'C' # <<<<<<<<<<<<<<
* self.mode = u'c'
* else:
*/
__pyx_v_order = 'C';
/* "View.MemoryView":160
* elif mode == 'c':
* order = b'C'
* self.mode = u'c' # <<<<<<<<<<<<<<
* else:
* raise ValueError("Invalid mode, expected 'c' or 'fortran', got %s" % mode)
*/
__Pyx_INCREF(__pyx_n_u_c);
__Pyx_GIVEREF(__pyx_n_u_c);
__Pyx_GOTREF(__pyx_v_self->mode);
__Pyx_DECREF(__pyx_v_self->mode);
__pyx_v_self->mode = __pyx_n_u_c;
/* "View.MemoryView":158
* order = b'F'
* self.mode = u'fortran'
* elif mode == 'c': # <<<<<<<<<<<<<<
* order = b'C'
* self.mode = u'c'
*/
goto __pyx_L10;
}
/* "View.MemoryView":162
* self.mode = u'c'
* else:
* raise ValueError("Invalid mode, expected 'c' or 'fortran', got %s" % mode) # <<<<<<<<<<<<<<
*
* self.len = fill_contig_strides_array(self._shape, self._strides,
*/
/*else*/ {
__pyx_t_5 = __Pyx_PyString_Format(__pyx_kp_s_Invalid_mode_expected_c_or_fortr, __pyx_v_mode); if (unlikely(!__pyx_t_5)) __PYX_ERR(1, 162, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_5);
__pyx_t_9 = PyTuple_New(1); if (unlikely(!__pyx_t_9)) __PYX_ERR(1, 162, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_9);
__Pyx_GIVEREF(__pyx_t_5);
PyTuple_SET_ITEM(__pyx_t_9, 0, __pyx_t_5);
__pyx_t_5 = 0;
__pyx_t_5 = __Pyx_PyObject_Call(__pyx_builtin_ValueError, __pyx_t_9, NULL); if (unlikely(!__pyx_t_5)) __PYX_ERR(1, 162, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_5);
__Pyx_DECREF(__pyx_t_9); __pyx_t_9 = 0;
__Pyx_Raise(__pyx_t_5, 0, 0, 0);
__Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
__PYX_ERR(1, 162, __pyx_L1_error)
}
__pyx_L10:;
/* "View.MemoryView":164
* raise ValueError("Invalid mode, expected 'c' or 'fortran', got %s" % mode)
*
* self.len = fill_contig_strides_array(self._shape, self._strides, # <<<<<<<<<<<<<<
* itemsize, self.ndim, order)
*
*/
__pyx_v_self->len = __pyx_fill_contig_strides_array(__pyx_v_self->_shape, __pyx_v_self->_strides, __pyx_v_itemsize, __pyx_v_self->ndim, __pyx_v_order);
/* "View.MemoryView":167
* itemsize, self.ndim, order)
*
* self.free_data = allocate_buffer # <<<<<<<<<<<<<<
* self.dtype_is_object = format == b'O'
* if allocate_buffer:
*/
__pyx_v_self->free_data = __pyx_v_allocate_buffer;
/* "View.MemoryView":168
*
* self.free_data = allocate_buffer
* self.dtype_is_object = format == b'O' # <<<<<<<<<<<<<<
* if allocate_buffer:
*
*/
__pyx_t_5 = PyObject_RichCompare(__pyx_v_format, __pyx_n_b_O, Py_EQ); __Pyx_XGOTREF(__pyx_t_5); if (unlikely(!__pyx_t_5)) __PYX_ERR(1, 168, __pyx_L1_error)
__pyx_t_4 = __Pyx_PyObject_IsTrue(__pyx_t_5); if (unlikely((__pyx_t_4 == (int)-1) && PyErr_Occurred())) __PYX_ERR(1, 168, __pyx_L1_error)
__Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
__pyx_v_self->dtype_is_object = __pyx_t_4;
/* "View.MemoryView":169
* self.free_data = allocate_buffer
* self.dtype_is_object = format == b'O'
* if allocate_buffer: # <<<<<<<<<<<<<<
*
*
*/
__pyx_t_4 = (__pyx_v_allocate_buffer != 0);
if (__pyx_t_4) {
/* "View.MemoryView":172
*
*
* self.data = <char *>malloc(self.len) # <<<<<<<<<<<<<<
* if not self.data:
* raise MemoryError("unable to allocate array data.")
*/
__pyx_v_self->data = ((char *)malloc(__pyx_v_self->len));
/* "View.MemoryView":173
*
* self.data = <char *>malloc(self.len)
* if not self.data: # <<<<<<<<<<<<<<
* raise MemoryError("unable to allocate array data.")
*
*/
__pyx_t_4 = ((!(__pyx_v_self->data != 0)) != 0);
if (__pyx_t_4) {
/* "View.MemoryView":174
* self.data = <char *>malloc(self.len)
* if not self.data:
* raise MemoryError("unable to allocate array data.") # <<<<<<<<<<<<<<
*
* if self.dtype_is_object:
*/
__pyx_t_5 = __Pyx_PyObject_Call(__pyx_builtin_MemoryError, __pyx_tuple__10, NULL); if (unlikely(!__pyx_t_5)) __PYX_ERR(1, 174, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_5);
__Pyx_Raise(__pyx_t_5, 0, 0, 0);
__Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
__PYX_ERR(1, 174, __pyx_L1_error)
/* "View.MemoryView":173
*
* self.data = <char *>malloc(self.len)
* if not self.data: # <<<<<<<<<<<<<<
* raise MemoryError("unable to allocate array data.")
*
*/
}
/* "View.MemoryView":176
* raise MemoryError("unable to allocate array data.")
*
* if self.dtype_is_object: # <<<<<<<<<<<<<<
* p = <PyObject **> self.data
* for i in range(self.len / itemsize):
*/
__pyx_t_4 = (__pyx_v_self->dtype_is_object != 0);
if (__pyx_t_4) {
/* "View.MemoryView":177
*
* if self.dtype_is_object:
* p = <PyObject **> self.data # <<<<<<<<<<<<<<
* for i in range(self.len / itemsize):
* p[i] = Py_None
*/
__pyx_v_p = ((PyObject **)__pyx_v_self->data);
/* "View.MemoryView":178
* if self.dtype_is_object:
* p = <PyObject **> self.data
* for i in range(self.len / itemsize): # <<<<<<<<<<<<<<
* p[i] = Py_None
* Py_INCREF(Py_None)
*/
if (unlikely(__pyx_v_itemsize == 0)) {
PyErr_SetString(PyExc_ZeroDivisionError, "integer division or modulo by zero");
__PYX_ERR(1, 178, __pyx_L1_error)
}
else if (sizeof(Py_ssize_t) == sizeof(long) && (!(((Py_ssize_t)-1) > 0)) && unlikely(__pyx_v_itemsize == (Py_ssize_t)-1) && unlikely(UNARY_NEG_WOULD_OVERFLOW(__pyx_v_self->len))) {
PyErr_SetString(PyExc_OverflowError, "value too large to perform division");
__PYX_ERR(1, 178, __pyx_L1_error)
}
__pyx_t_1 = (__pyx_v_self->len / __pyx_v_itemsize);
for (__pyx_t_8 = 0; __pyx_t_8 < __pyx_t_1; __pyx_t_8+=1) {
__pyx_v_i = __pyx_t_8;
/* "View.MemoryView":179
* p = <PyObject **> self.data
* for i in range(self.len / itemsize):
* p[i] = Py_None # <<<<<<<<<<<<<<
* Py_INCREF(Py_None)
*
*/
(__pyx_v_p[__pyx_v_i]) = Py_None;
/* "View.MemoryView":180
* for i in range(self.len / itemsize):
* p[i] = Py_None
* Py_INCREF(Py_None) # <<<<<<<<<<<<<<
*
* @cname('getbuffer')
*/
Py_INCREF(Py_None);
}
/* "View.MemoryView":176
* raise MemoryError("unable to allocate array data.")
*
* if self.dtype_is_object: # <<<<<<<<<<<<<<
* p = <PyObject **> self.data
* for i in range(self.len / itemsize):
*/
}
/* "View.MemoryView":169
* self.free_data = allocate_buffer
* self.dtype_is_object = format == b'O'
* if allocate_buffer: # <<<<<<<<<<<<<<
*
*
*/
}
/* "View.MemoryView":120
* cdef bint dtype_is_object
*
* def __cinit__(array self, tuple shape, Py_ssize_t itemsize, format not None, # <<<<<<<<<<<<<<
* mode="c", bint allocate_buffer=True):
*
*/
/* function exit code */
__pyx_r = 0;
goto __pyx_L0;
__pyx_L1_error:;
__Pyx_XDECREF(__pyx_t_3);
__Pyx_XDECREF(__pyx_t_5);
__Pyx_XDECREF(__pyx_t_9);
__Pyx_XDECREF(__pyx_t_10);
__Pyx_AddTraceback("View.MemoryView.array.__cinit__", __pyx_clineno, __pyx_lineno, __pyx_filename);
__pyx_r = -1;
__pyx_L0:;
__Pyx_XDECREF(__pyx_v_format);
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
/* "View.MemoryView":183
*
* @cname('getbuffer')
* def __getbuffer__(self, Py_buffer *info, int flags): # <<<<<<<<<<<<<<
* cdef int bufmode = -1
* if self.mode == u"c":
*/
/* Python wrapper */
static CYTHON_UNUSED int __pyx_array_getbuffer(PyObject *__pyx_v_self, Py_buffer *__pyx_v_info, int __pyx_v_flags); /*proto*/
static CYTHON_UNUSED int __pyx_array_getbuffer(PyObject *__pyx_v_self, Py_buffer *__pyx_v_info, int __pyx_v_flags) {
int __pyx_r;
__Pyx_RefNannyDeclarations
__Pyx_RefNannySetupContext("__getbuffer__ (wrapper)", 0);
__pyx_r = __pyx_array___pyx_pf_15View_dot_MemoryView_5array_2__getbuffer__(((struct __pyx_array_obj *)__pyx_v_self), ((Py_buffer *)__pyx_v_info), ((int)__pyx_v_flags));
/* function exit code */
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
static int __pyx_array___pyx_pf_15View_dot_MemoryView_5array_2__getbuffer__(struct __pyx_array_obj *__pyx_v_self, Py_buffer *__pyx_v_info, int __pyx_v_flags) {
int __pyx_v_bufmode;
int __pyx_r;
__Pyx_RefNannyDeclarations
int __pyx_t_1;
int __pyx_t_2;
PyObject *__pyx_t_3 = NULL;
char *__pyx_t_4;
Py_ssize_t __pyx_t_5;
int __pyx_t_6;
Py_ssize_t *__pyx_t_7;
__Pyx_RefNannySetupContext("__getbuffer__", 0);
if (__pyx_v_info != NULL) {
__pyx_v_info->obj = Py_None; __Pyx_INCREF(Py_None);
__Pyx_GIVEREF(__pyx_v_info->obj);
}
/* "View.MemoryView":184
* @cname('getbuffer')
* def __getbuffer__(self, Py_buffer *info, int flags):
* cdef int bufmode = -1 # <<<<<<<<<<<<<<
* if self.mode == u"c":
* bufmode = PyBUF_C_CONTIGUOUS | PyBUF_ANY_CONTIGUOUS
*/
__pyx_v_bufmode = -1;
/* "View.MemoryView":185
* def __getbuffer__(self, Py_buffer *info, int flags):
* cdef int bufmode = -1
* if self.mode == u"c": # <<<<<<<<<<<<<<
* bufmode = PyBUF_C_CONTIGUOUS | PyBUF_ANY_CONTIGUOUS
* elif self.mode == u"fortran":
*/
__pyx_t_1 = (__Pyx_PyUnicode_Equals(__pyx_v_self->mode, __pyx_n_u_c, Py_EQ)); if (unlikely(__pyx_t_1 < 0)) __PYX_ERR(1, 185, __pyx_L1_error)
__pyx_t_2 = (__pyx_t_1 != 0);
if (__pyx_t_2) {
/* "View.MemoryView":186
* cdef int bufmode = -1
* if self.mode == u"c":
* bufmode = PyBUF_C_CONTIGUOUS | PyBUF_ANY_CONTIGUOUS # <<<<<<<<<<<<<<
* elif self.mode == u"fortran":
* bufmode = PyBUF_F_CONTIGUOUS | PyBUF_ANY_CONTIGUOUS
*/
__pyx_v_bufmode = (PyBUF_C_CONTIGUOUS | PyBUF_ANY_CONTIGUOUS);
/* "View.MemoryView":185
* def __getbuffer__(self, Py_buffer *info, int flags):
* cdef int bufmode = -1
* if self.mode == u"c": # <<<<<<<<<<<<<<
* bufmode = PyBUF_C_CONTIGUOUS | PyBUF_ANY_CONTIGUOUS
* elif self.mode == u"fortran":
*/
goto __pyx_L3;
}
/* "View.MemoryView":187
* if self.mode == u"c":
* bufmode = PyBUF_C_CONTIGUOUS | PyBUF_ANY_CONTIGUOUS
* elif self.mode == u"fortran": # <<<<<<<<<<<<<<
* bufmode = PyBUF_F_CONTIGUOUS | PyBUF_ANY_CONTIGUOUS
* if not (flags & bufmode):
*/
__pyx_t_2 = (__Pyx_PyUnicode_Equals(__pyx_v_self->mode, __pyx_n_u_fortran, Py_EQ)); if (unlikely(__pyx_t_2 < 0)) __PYX_ERR(1, 187, __pyx_L1_error)
__pyx_t_1 = (__pyx_t_2 != 0);
if (__pyx_t_1) {
/* "View.MemoryView":188
* bufmode = PyBUF_C_CONTIGUOUS | PyBUF_ANY_CONTIGUOUS
* elif self.mode == u"fortran":
* bufmode = PyBUF_F_CONTIGUOUS | PyBUF_ANY_CONTIGUOUS # <<<<<<<<<<<<<<
* if not (flags & bufmode):
* raise ValueError("Can only create a buffer that is contiguous in memory.")
*/
__pyx_v_bufmode = (PyBUF_F_CONTIGUOUS | PyBUF_ANY_CONTIGUOUS);
/* "View.MemoryView":187
* if self.mode == u"c":
* bufmode = PyBUF_C_CONTIGUOUS | PyBUF_ANY_CONTIGUOUS
* elif self.mode == u"fortran": # <<<<<<<<<<<<<<
* bufmode = PyBUF_F_CONTIGUOUS | PyBUF_ANY_CONTIGUOUS
* if not (flags & bufmode):
*/
}
__pyx_L3:;
/* "View.MemoryView":189
* elif self.mode == u"fortran":
* bufmode = PyBUF_F_CONTIGUOUS | PyBUF_ANY_CONTIGUOUS
* if not (flags & bufmode): # <<<<<<<<<<<<<<
* raise ValueError("Can only create a buffer that is contiguous in memory.")
* info.buf = self.data
*/
__pyx_t_1 = ((!((__pyx_v_flags & __pyx_v_bufmode) != 0)) != 0);
if (__pyx_t_1) {
/* "View.MemoryView":190
* bufmode = PyBUF_F_CONTIGUOUS | PyBUF_ANY_CONTIGUOUS
* if not (flags & bufmode):
* raise ValueError("Can only create a buffer that is contiguous in memory.") # <<<<<<<<<<<<<<
* info.buf = self.data
* info.len = self.len
*/
__pyx_t_3 = __Pyx_PyObject_Call(__pyx_builtin_ValueError, __pyx_tuple__11, NULL); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 190, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_3);
__Pyx_Raise(__pyx_t_3, 0, 0, 0);
__Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
__PYX_ERR(1, 190, __pyx_L1_error)
/* "View.MemoryView":189
* elif self.mode == u"fortran":
* bufmode = PyBUF_F_CONTIGUOUS | PyBUF_ANY_CONTIGUOUS
* if not (flags & bufmode): # <<<<<<<<<<<<<<
* raise ValueError("Can only create a buffer that is contiguous in memory.")
* info.buf = self.data
*/
}
/* "View.MemoryView":191
* if not (flags & bufmode):
* raise ValueError("Can only create a buffer that is contiguous in memory.")
* info.buf = self.data # <<<<<<<<<<<<<<
* info.len = self.len
* info.ndim = self.ndim
*/
__pyx_t_4 = __pyx_v_self->data;
__pyx_v_info->buf = __pyx_t_4;
/* "View.MemoryView":192
* raise ValueError("Can only create a buffer that is contiguous in memory.")
* info.buf = self.data
* info.len = self.len # <<<<<<<<<<<<<<
* info.ndim = self.ndim
* info.shape = self._shape
*/
__pyx_t_5 = __pyx_v_self->len;
__pyx_v_info->len = __pyx_t_5;
/* "View.MemoryView":193
* info.buf = self.data
* info.len = self.len
* info.ndim = self.ndim # <<<<<<<<<<<<<<
* info.shape = self._shape
* info.strides = self._strides
*/
__pyx_t_6 = __pyx_v_self->ndim;
__pyx_v_info->ndim = __pyx_t_6;
/* "View.MemoryView":194
* info.len = self.len
* info.ndim = self.ndim
* info.shape = self._shape # <<<<<<<<<<<<<<
* info.strides = self._strides
* info.suboffsets = NULL
*/
__pyx_t_7 = __pyx_v_self->_shape;
__pyx_v_info->shape = __pyx_t_7;
/* "View.MemoryView":195
* info.ndim = self.ndim
* info.shape = self._shape
* info.strides = self._strides # <<<<<<<<<<<<<<
* info.suboffsets = NULL
* info.itemsize = self.itemsize
*/
__pyx_t_7 = __pyx_v_self->_strides;
__pyx_v_info->strides = __pyx_t_7;
/* "View.MemoryView":196
* info.shape = self._shape
* info.strides = self._strides
* info.suboffsets = NULL # <<<<<<<<<<<<<<
* info.itemsize = self.itemsize
* info.readonly = 0
*/
__pyx_v_info->suboffsets = NULL;
/* "View.MemoryView":197
* info.strides = self._strides
* info.suboffsets = NULL
* info.itemsize = self.itemsize # <<<<<<<<<<<<<<
* info.readonly = 0
*
*/
__pyx_t_5 = __pyx_v_self->itemsize;
__pyx_v_info->itemsize = __pyx_t_5;
/* "View.MemoryView":198
* info.suboffsets = NULL
* info.itemsize = self.itemsize
* info.readonly = 0 # <<<<<<<<<<<<<<
*
* if flags & PyBUF_FORMAT:
*/
__pyx_v_info->readonly = 0;
/* "View.MemoryView":200
* info.readonly = 0
*
* if flags & PyBUF_FORMAT: # <<<<<<<<<<<<<<
* info.format = self.format
* else:
*/
__pyx_t_1 = ((__pyx_v_flags & PyBUF_FORMAT) != 0);
if (__pyx_t_1) {
/* "View.MemoryView":201
*
* if flags & PyBUF_FORMAT:
* info.format = self.format # <<<<<<<<<<<<<<
* else:
* info.format = NULL
*/
__pyx_t_4 = __pyx_v_self->format;
__pyx_v_info->format = __pyx_t_4;
/* "View.MemoryView":200
* info.readonly = 0
*
* if flags & PyBUF_FORMAT: # <<<<<<<<<<<<<<
* info.format = self.format
* else:
*/
goto __pyx_L5;
}
/* "View.MemoryView":203
* info.format = self.format
* else:
* info.format = NULL # <<<<<<<<<<<<<<
*
* info.obj = self
*/
/*else*/ {
__pyx_v_info->format = NULL;
}
__pyx_L5:;
/* "View.MemoryView":205
* info.format = NULL
*
* info.obj = self # <<<<<<<<<<<<<<
*
* __pyx_getbuffer = capsule(<void *> &__pyx_array_getbuffer, "getbuffer(obj, view, flags)")
*/
__Pyx_INCREF(((PyObject *)__pyx_v_self));
__Pyx_GIVEREF(((PyObject *)__pyx_v_self));
__Pyx_GOTREF(__pyx_v_info->obj);
__Pyx_DECREF(__pyx_v_info->obj);
__pyx_v_info->obj = ((PyObject *)__pyx_v_self);
/* "View.MemoryView":183
*
* @cname('getbuffer')
* def __getbuffer__(self, Py_buffer *info, int flags): # <<<<<<<<<<<<<<
* cdef int bufmode = -1
* if self.mode == u"c":
*/
/* function exit code */
__pyx_r = 0;
goto __pyx_L0;
__pyx_L1_error:;
__Pyx_XDECREF(__pyx_t_3);
__Pyx_AddTraceback("View.MemoryView.array.__getbuffer__", __pyx_clineno, __pyx_lineno, __pyx_filename);
__pyx_r = -1;
if (__pyx_v_info != NULL && __pyx_v_info->obj != NULL) {
__Pyx_GOTREF(__pyx_v_info->obj);
__Pyx_DECREF(__pyx_v_info->obj); __pyx_v_info->obj = NULL;
}
goto __pyx_L2;
__pyx_L0:;
if (__pyx_v_info != NULL && __pyx_v_info->obj == Py_None) {
__Pyx_GOTREF(Py_None);
__Pyx_DECREF(Py_None); __pyx_v_info->obj = NULL;
}
__pyx_L2:;
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
/* "View.MemoryView":209
* __pyx_getbuffer = capsule(<void *> &__pyx_array_getbuffer, "getbuffer(obj, view, flags)")
*
* def __dealloc__(array self): # <<<<<<<<<<<<<<
* if self.callback_free_data != NULL:
* self.callback_free_data(self.data)
*/
/* Python wrapper */
static void __pyx_array___dealloc__(PyObject *__pyx_v_self); /*proto*/
static void __pyx_array___dealloc__(PyObject *__pyx_v_self) {
__Pyx_RefNannyDeclarations
__Pyx_RefNannySetupContext("__dealloc__ (wrapper)", 0);
__pyx_array___pyx_pf_15View_dot_MemoryView_5array_4__dealloc__(((struct __pyx_array_obj *)__pyx_v_self));
/* function exit code */
__Pyx_RefNannyFinishContext();
}
static void __pyx_array___pyx_pf_15View_dot_MemoryView_5array_4__dealloc__(struct __pyx_array_obj *__pyx_v_self) {
__Pyx_RefNannyDeclarations
int __pyx_t_1;
__Pyx_RefNannySetupContext("__dealloc__", 0);
/* "View.MemoryView":210
*
* def __dealloc__(array self):
* if self.callback_free_data != NULL: # <<<<<<<<<<<<<<
* self.callback_free_data(self.data)
* elif self.free_data:
*/
__pyx_t_1 = ((__pyx_v_self->callback_free_data != NULL) != 0);
if (__pyx_t_1) {
/* "View.MemoryView":211
* def __dealloc__(array self):
* if self.callback_free_data != NULL:
* self.callback_free_data(self.data) # <<<<<<<<<<<<<<
* elif self.free_data:
* if self.dtype_is_object:
*/
__pyx_v_self->callback_free_data(__pyx_v_self->data);
/* "View.MemoryView":210
*
* def __dealloc__(array self):
* if self.callback_free_data != NULL: # <<<<<<<<<<<<<<
* self.callback_free_data(self.data)
* elif self.free_data:
*/
goto __pyx_L3;
}
/* "View.MemoryView":212
* if self.callback_free_data != NULL:
* self.callback_free_data(self.data)
* elif self.free_data: # <<<<<<<<<<<<<<
* if self.dtype_is_object:
* refcount_objects_in_slice(self.data, self._shape,
*/
__pyx_t_1 = (__pyx_v_self->free_data != 0);
if (__pyx_t_1) {
/* "View.MemoryView":213
* self.callback_free_data(self.data)
* elif self.free_data:
* if self.dtype_is_object: # <<<<<<<<<<<<<<
* refcount_objects_in_slice(self.data, self._shape,
* self._strides, self.ndim, False)
*/
__pyx_t_1 = (__pyx_v_self->dtype_is_object != 0);
if (__pyx_t_1) {
/* "View.MemoryView":214
* elif self.free_data:
* if self.dtype_is_object:
* refcount_objects_in_slice(self.data, self._shape, # <<<<<<<<<<<<<<
* self._strides, self.ndim, False)
* free(self.data)
*/
__pyx_memoryview_refcount_objects_in_slice(__pyx_v_self->data, __pyx_v_self->_shape, __pyx_v_self->_strides, __pyx_v_self->ndim, 0);
/* "View.MemoryView":213
* self.callback_free_data(self.data)
* elif self.free_data:
* if self.dtype_is_object: # <<<<<<<<<<<<<<
* refcount_objects_in_slice(self.data, self._shape,
* self._strides, self.ndim, False)
*/
}
/* "View.MemoryView":216
* refcount_objects_in_slice(self.data, self._shape,
* self._strides, self.ndim, False)
* free(self.data) # <<<<<<<<<<<<<<
* PyObject_Free(self._shape)
*
*/
free(__pyx_v_self->data);
/* "View.MemoryView":212
* if self.callback_free_data != NULL:
* self.callback_free_data(self.data)
* elif self.free_data: # <<<<<<<<<<<<<<
* if self.dtype_is_object:
* refcount_objects_in_slice(self.data, self._shape,
*/
}
__pyx_L3:;
/* "View.MemoryView":217
* self._strides, self.ndim, False)
* free(self.data)
* PyObject_Free(self._shape) # <<<<<<<<<<<<<<
*
* @property
*/
PyObject_Free(__pyx_v_self->_shape);
/* "View.MemoryView":209
* __pyx_getbuffer = capsule(<void *> &__pyx_array_getbuffer, "getbuffer(obj, view, flags)")
*
* def __dealloc__(array self): # <<<<<<<<<<<<<<
* if self.callback_free_data != NULL:
* self.callback_free_data(self.data)
*/
/* function exit code */
__Pyx_RefNannyFinishContext();
}
/* "View.MemoryView":220
*
* @property
* def memview(self): # <<<<<<<<<<<<<<
* return self.get_memview()
*
*/
/* Python wrapper */
static PyObject *__pyx_pw_15View_dot_MemoryView_5array_7memview_1__get__(PyObject *__pyx_v_self); /*proto*/
static PyObject *__pyx_pw_15View_dot_MemoryView_5array_7memview_1__get__(PyObject *__pyx_v_self) {
PyObject *__pyx_r = 0;
__Pyx_RefNannyDeclarations
__Pyx_RefNannySetupContext("__get__ (wrapper)", 0);
__pyx_r = __pyx_pf_15View_dot_MemoryView_5array_7memview___get__(((struct __pyx_array_obj *)__pyx_v_self));
/* function exit code */
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
static PyObject *__pyx_pf_15View_dot_MemoryView_5array_7memview___get__(struct __pyx_array_obj *__pyx_v_self) {
PyObject *__pyx_r = NULL;
__Pyx_RefNannyDeclarations
PyObject *__pyx_t_1 = NULL;
__Pyx_RefNannySetupContext("__get__", 0);
/* "View.MemoryView":221
* @property
* def memview(self):
* return self.get_memview() # <<<<<<<<<<<<<<
*
* @cname('get_memview')
*/
__Pyx_XDECREF(__pyx_r);
__pyx_t_1 = ((struct __pyx_vtabstruct_array *)__pyx_v_self->__pyx_vtab)->get_memview(__pyx_v_self); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 221, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_1);
__pyx_r = __pyx_t_1;
__pyx_t_1 = 0;
goto __pyx_L0;
/* "View.MemoryView":220
*
* @property
* def memview(self): # <<<<<<<<<<<<<<
* return self.get_memview()
*
*/
/* function exit code */
__pyx_L1_error:;
__Pyx_XDECREF(__pyx_t_1);
__Pyx_AddTraceback("View.MemoryView.array.memview.__get__", __pyx_clineno, __pyx_lineno, __pyx_filename);
__pyx_r = NULL;
__pyx_L0:;
__Pyx_XGIVEREF(__pyx_r);
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
/* "View.MemoryView":224
*
* @cname('get_memview')
* cdef get_memview(self): # <<<<<<<<<<<<<<
* flags = PyBUF_ANY_CONTIGUOUS|PyBUF_FORMAT|PyBUF_WRITABLE
* return memoryview(self, flags, self.dtype_is_object)
*/
static PyObject *__pyx_array_get_memview(struct __pyx_array_obj *__pyx_v_self) {
int __pyx_v_flags;
PyObject *__pyx_r = NULL;
__Pyx_RefNannyDeclarations
PyObject *__pyx_t_1 = NULL;
PyObject *__pyx_t_2 = NULL;
PyObject *__pyx_t_3 = NULL;
__Pyx_RefNannySetupContext("get_memview", 0);
/* "View.MemoryView":225
* @cname('get_memview')
* cdef get_memview(self):
* flags = PyBUF_ANY_CONTIGUOUS|PyBUF_FORMAT|PyBUF_WRITABLE # <<<<<<<<<<<<<<
* return memoryview(self, flags, self.dtype_is_object)
*
*/
__pyx_v_flags = ((PyBUF_ANY_CONTIGUOUS | PyBUF_FORMAT) | PyBUF_WRITABLE);
/* "View.MemoryView":226
* cdef get_memview(self):
* flags = PyBUF_ANY_CONTIGUOUS|PyBUF_FORMAT|PyBUF_WRITABLE
* return memoryview(self, flags, self.dtype_is_object) # <<<<<<<<<<<<<<
*
*
*/
__Pyx_XDECREF(__pyx_r);
__pyx_t_1 = __Pyx_PyInt_From_int(__pyx_v_flags); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 226, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_1);
__pyx_t_2 = __Pyx_PyBool_FromLong(__pyx_v_self->dtype_is_object); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 226, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_2);
__pyx_t_3 = PyTuple_New(3); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 226, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_3);
__Pyx_INCREF(((PyObject *)__pyx_v_self));
__Pyx_GIVEREF(((PyObject *)__pyx_v_self));
PyTuple_SET_ITEM(__pyx_t_3, 0, ((PyObject *)__pyx_v_self));
__Pyx_GIVEREF(__pyx_t_1);
PyTuple_SET_ITEM(__pyx_t_3, 1, __pyx_t_1);
__Pyx_GIVEREF(__pyx_t_2);
PyTuple_SET_ITEM(__pyx_t_3, 2, __pyx_t_2);
__pyx_t_1 = 0;
__pyx_t_2 = 0;
__pyx_t_2 = __Pyx_PyObject_Call(((PyObject *)__pyx_memoryview_type), __pyx_t_3, NULL); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 226, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_2);
__Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
__pyx_r = __pyx_t_2;
__pyx_t_2 = 0;
goto __pyx_L0;
/* "View.MemoryView":224
*
* @cname('get_memview')
* cdef get_memview(self): # <<<<<<<<<<<<<<
* flags = PyBUF_ANY_CONTIGUOUS|PyBUF_FORMAT|PyBUF_WRITABLE
* return memoryview(self, flags, self.dtype_is_object)
*/
/* function exit code */
__pyx_L1_error:;
__Pyx_XDECREF(__pyx_t_1);
__Pyx_XDECREF(__pyx_t_2);
__Pyx_XDECREF(__pyx_t_3);
__Pyx_AddTraceback("View.MemoryView.array.get_memview", __pyx_clineno, __pyx_lineno, __pyx_filename);
__pyx_r = 0;
__pyx_L0:;
__Pyx_XGIVEREF(__pyx_r);
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
/* "View.MemoryView":229
*
*
* def __getattr__(self, attr): # <<<<<<<<<<<<<<
* return getattr(self.memview, attr)
*
*/
/* Python wrapper */
static PyObject *__pyx_array___getattr__(PyObject *__pyx_v_self, PyObject *__pyx_v_attr); /*proto*/
static PyObject *__pyx_array___getattr__(PyObject *__pyx_v_self, PyObject *__pyx_v_attr) {
PyObject *__pyx_r = 0;
__Pyx_RefNannyDeclarations
__Pyx_RefNannySetupContext("__getattr__ (wrapper)", 0);
__pyx_r = __pyx_array___pyx_pf_15View_dot_MemoryView_5array_6__getattr__(((struct __pyx_array_obj *)__pyx_v_self), ((PyObject *)__pyx_v_attr));
/* function exit code */
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
static PyObject *__pyx_array___pyx_pf_15View_dot_MemoryView_5array_6__getattr__(struct __pyx_array_obj *__pyx_v_self, PyObject *__pyx_v_attr) {
PyObject *__pyx_r = NULL;
__Pyx_RefNannyDeclarations
PyObject *__pyx_t_1 = NULL;
PyObject *__pyx_t_2 = NULL;
__Pyx_RefNannySetupContext("__getattr__", 0);
/* "View.MemoryView":230
*
* def __getattr__(self, attr):
* return getattr(self.memview, attr) # <<<<<<<<<<<<<<
*
* def __getitem__(self, item):
*/
__Pyx_XDECREF(__pyx_r);
__pyx_t_1 = __Pyx_PyObject_GetAttrStr(((PyObject *)__pyx_v_self), __pyx_n_s_memview); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 230, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_1);
__pyx_t_2 = __Pyx_GetAttr(__pyx_t_1, __pyx_v_attr); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 230, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_2);
__Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
__pyx_r = __pyx_t_2;
__pyx_t_2 = 0;
goto __pyx_L0;
/* "View.MemoryView":229
*
*
* def __getattr__(self, attr): # <<<<<<<<<<<<<<
* return getattr(self.memview, attr)
*
*/
/* function exit code */
__pyx_L1_error:;
__Pyx_XDECREF(__pyx_t_1);
__Pyx_XDECREF(__pyx_t_2);
__Pyx_AddTraceback("View.MemoryView.array.__getattr__", __pyx_clineno, __pyx_lineno, __pyx_filename);
__pyx_r = NULL;
__pyx_L0:;
__Pyx_XGIVEREF(__pyx_r);
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
/* "View.MemoryView":232
* return getattr(self.memview, attr)
*
* def __getitem__(self, item): # <<<<<<<<<<<<<<
* return self.memview[item]
*
*/
/* Python wrapper */
static PyObject *__pyx_array___getitem__(PyObject *__pyx_v_self, PyObject *__pyx_v_item); /*proto*/
static PyObject *__pyx_array___getitem__(PyObject *__pyx_v_self, PyObject *__pyx_v_item) {
PyObject *__pyx_r = 0;
__Pyx_RefNannyDeclarations
__Pyx_RefNannySetupContext("__getitem__ (wrapper)", 0);
__pyx_r = __pyx_array___pyx_pf_15View_dot_MemoryView_5array_8__getitem__(((struct __pyx_array_obj *)__pyx_v_self), ((PyObject *)__pyx_v_item));
/* function exit code */
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
static PyObject *__pyx_array___pyx_pf_15View_dot_MemoryView_5array_8__getitem__(struct __pyx_array_obj *__pyx_v_self, PyObject *__pyx_v_item) {
PyObject *__pyx_r = NULL;
__Pyx_RefNannyDeclarations
PyObject *__pyx_t_1 = NULL;
PyObject *__pyx_t_2 = NULL;
__Pyx_RefNannySetupContext("__getitem__", 0);
/* "View.MemoryView":233
*
* def __getitem__(self, item):
* return self.memview[item] # <<<<<<<<<<<<<<
*
* def __setitem__(self, item, value):
*/
__Pyx_XDECREF(__pyx_r);
__pyx_t_1 = __Pyx_PyObject_GetAttrStr(((PyObject *)__pyx_v_self), __pyx_n_s_memview); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 233, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_1);
__pyx_t_2 = PyObject_GetItem(__pyx_t_1, __pyx_v_item); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 233, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_2);
__Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
__pyx_r = __pyx_t_2;
__pyx_t_2 = 0;
goto __pyx_L0;
/* "View.MemoryView":232
* return getattr(self.memview, attr)
*
* def __getitem__(self, item): # <<<<<<<<<<<<<<
* return self.memview[item]
*
*/
/* function exit code */
__pyx_L1_error:;
__Pyx_XDECREF(__pyx_t_1);
__Pyx_XDECREF(__pyx_t_2);
__Pyx_AddTraceback("View.MemoryView.array.__getitem__", __pyx_clineno, __pyx_lineno, __pyx_filename);
__pyx_r = NULL;
__pyx_L0:;
__Pyx_XGIVEREF(__pyx_r);
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
/* "View.MemoryView":235
* return self.memview[item]
*
* def __setitem__(self, item, value): # <<<<<<<<<<<<<<
* self.memview[item] = value
*
*/
/* Python wrapper */
static int __pyx_array___setitem__(PyObject *__pyx_v_self, PyObject *__pyx_v_item, PyObject *__pyx_v_value); /*proto*/
static int __pyx_array___setitem__(PyObject *__pyx_v_self, PyObject *__pyx_v_item, PyObject *__pyx_v_value) {
int __pyx_r;
__Pyx_RefNannyDeclarations
__Pyx_RefNannySetupContext("__setitem__ (wrapper)", 0);
__pyx_r = __pyx_array___pyx_pf_15View_dot_MemoryView_5array_10__setitem__(((struct __pyx_array_obj *)__pyx_v_self), ((PyObject *)__pyx_v_item), ((PyObject *)__pyx_v_value));
/* function exit code */
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
static int __pyx_array___pyx_pf_15View_dot_MemoryView_5array_10__setitem__(struct __pyx_array_obj *__pyx_v_self, PyObject *__pyx_v_item, PyObject *__pyx_v_value) {
int __pyx_r;
__Pyx_RefNannyDeclarations
PyObject *__pyx_t_1 = NULL;
__Pyx_RefNannySetupContext("__setitem__", 0);
/* "View.MemoryView":236
*
* def __setitem__(self, item, value):
* self.memview[item] = value # <<<<<<<<<<<<<<
*
*
*/
__pyx_t_1 = __Pyx_PyObject_GetAttrStr(((PyObject *)__pyx_v_self), __pyx_n_s_memview); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 236, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_1);
if (unlikely(PyObject_SetItem(__pyx_t_1, __pyx_v_item, __pyx_v_value) < 0)) __PYX_ERR(1, 236, __pyx_L1_error)
__Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
/* "View.MemoryView":235
* return self.memview[item]
*
* def __setitem__(self, item, value): # <<<<<<<<<<<<<<
* self.memview[item] = value
*
*/
/* function exit code */
__pyx_r = 0;
goto __pyx_L0;
__pyx_L1_error:;
__Pyx_XDECREF(__pyx_t_1);
__Pyx_AddTraceback("View.MemoryView.array.__setitem__", __pyx_clineno, __pyx_lineno, __pyx_filename);
__pyx_r = -1;
__pyx_L0:;
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
/* "View.MemoryView":240
*
* @cname("__pyx_array_new")
* cdef array array_cwrapper(tuple shape, Py_ssize_t itemsize, char *format, # <<<<<<<<<<<<<<
* char *mode, char *buf):
* cdef array result
*/
static struct __pyx_array_obj *__pyx_array_new(PyObject *__pyx_v_shape, Py_ssize_t __pyx_v_itemsize, char *__pyx_v_format, char *__pyx_v_mode, char *__pyx_v_buf) {
struct __pyx_array_obj *__pyx_v_result = 0;
struct __pyx_array_obj *__pyx_r = NULL;
__Pyx_RefNannyDeclarations
int __pyx_t_1;
PyObject *__pyx_t_2 = NULL;
PyObject *__pyx_t_3 = NULL;
PyObject *__pyx_t_4 = NULL;
PyObject *__pyx_t_5 = NULL;
__Pyx_RefNannySetupContext("array_cwrapper", 0);
/* "View.MemoryView":244
* cdef array result
*
* if buf == NULL: # <<<<<<<<<<<<<<
* result = array(shape, itemsize, format, mode.decode('ASCII'))
* else:
*/
__pyx_t_1 = ((__pyx_v_buf == NULL) != 0);
if (__pyx_t_1) {
/* "View.MemoryView":245
*
* if buf == NULL:
* result = array(shape, itemsize, format, mode.decode('ASCII')) # <<<<<<<<<<<<<<
* else:
* result = array(shape, itemsize, format, mode.decode('ASCII'),
*/
__pyx_t_2 = PyInt_FromSsize_t(__pyx_v_itemsize); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 245, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_2);
__pyx_t_3 = __Pyx_PyBytes_FromString(__pyx_v_format); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 245, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_3);
__pyx_t_4 = __Pyx_decode_c_string(__pyx_v_mode, 0, strlen(__pyx_v_mode), NULL, NULL, PyUnicode_DecodeASCII); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 245, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_4);
__pyx_t_5 = PyTuple_New(4); if (unlikely(!__pyx_t_5)) __PYX_ERR(1, 245, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_5);
__Pyx_INCREF(__pyx_v_shape);
__Pyx_GIVEREF(__pyx_v_shape);
PyTuple_SET_ITEM(__pyx_t_5, 0, __pyx_v_shape);
__Pyx_GIVEREF(__pyx_t_2);
PyTuple_SET_ITEM(__pyx_t_5, 1, __pyx_t_2);
__Pyx_GIVEREF(__pyx_t_3);
PyTuple_SET_ITEM(__pyx_t_5, 2, __pyx_t_3);
__Pyx_GIVEREF(__pyx_t_4);
PyTuple_SET_ITEM(__pyx_t_5, 3, __pyx_t_4);
__pyx_t_2 = 0;
__pyx_t_3 = 0;
__pyx_t_4 = 0;
__pyx_t_4 = __Pyx_PyObject_Call(((PyObject *)__pyx_array_type), __pyx_t_5, NULL); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 245, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_4);
__Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
__pyx_v_result = ((struct __pyx_array_obj *)__pyx_t_4);
__pyx_t_4 = 0;
/* "View.MemoryView":244
* cdef array result
*
* if buf == NULL: # <<<<<<<<<<<<<<
* result = array(shape, itemsize, format, mode.decode('ASCII'))
* else:
*/
goto __pyx_L3;
}
/* "View.MemoryView":247
* result = array(shape, itemsize, format, mode.decode('ASCII'))
* else:
* result = array(shape, itemsize, format, mode.decode('ASCII'), # <<<<<<<<<<<<<<
* allocate_buffer=False)
* result.data = buf
*/
/*else*/ {
__pyx_t_4 = PyInt_FromSsize_t(__pyx_v_itemsize); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 247, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_4);
__pyx_t_5 = __Pyx_PyBytes_FromString(__pyx_v_format); if (unlikely(!__pyx_t_5)) __PYX_ERR(1, 247, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_5);
__pyx_t_3 = __Pyx_decode_c_string(__pyx_v_mode, 0, strlen(__pyx_v_mode), NULL, NULL, PyUnicode_DecodeASCII); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 247, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_3);
__pyx_t_2 = PyTuple_New(4); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 247, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_2);
__Pyx_INCREF(__pyx_v_shape);
__Pyx_GIVEREF(__pyx_v_shape);
PyTuple_SET_ITEM(__pyx_t_2, 0, __pyx_v_shape);
__Pyx_GIVEREF(__pyx_t_4);
PyTuple_SET_ITEM(__pyx_t_2, 1, __pyx_t_4);
__Pyx_GIVEREF(__pyx_t_5);
PyTuple_SET_ITEM(__pyx_t_2, 2, __pyx_t_5);
__Pyx_GIVEREF(__pyx_t_3);
PyTuple_SET_ITEM(__pyx_t_2, 3, __pyx_t_3);
__pyx_t_4 = 0;
__pyx_t_5 = 0;
__pyx_t_3 = 0;
/* "View.MemoryView":248
* else:
* result = array(shape, itemsize, format, mode.decode('ASCII'),
* allocate_buffer=False) # <<<<<<<<<<<<<<
* result.data = buf
*
*/
__pyx_t_3 = PyDict_New(); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 248, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_3);
if (PyDict_SetItem(__pyx_t_3, __pyx_n_s_allocate_buffer, Py_False) < 0) __PYX_ERR(1, 248, __pyx_L1_error)
/* "View.MemoryView":247
* result = array(shape, itemsize, format, mode.decode('ASCII'))
* else:
* result = array(shape, itemsize, format, mode.decode('ASCII'), # <<<<<<<<<<<<<<
* allocate_buffer=False)
* result.data = buf
*/
__pyx_t_5 = __Pyx_PyObject_Call(((PyObject *)__pyx_array_type), __pyx_t_2, __pyx_t_3); if (unlikely(!__pyx_t_5)) __PYX_ERR(1, 247, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_5);
__Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
__Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
__pyx_v_result = ((struct __pyx_array_obj *)__pyx_t_5);
__pyx_t_5 = 0;
/* "View.MemoryView":249
* result = array(shape, itemsize, format, mode.decode('ASCII'),
* allocate_buffer=False)
* result.data = buf # <<<<<<<<<<<<<<
*
* return result
*/
__pyx_v_result->data = __pyx_v_buf;
}
__pyx_L3:;
/* "View.MemoryView":251
* result.data = buf
*
* return result # <<<<<<<<<<<<<<
*
*
*/
__Pyx_XDECREF(((PyObject *)__pyx_r));
__Pyx_INCREF(((PyObject *)__pyx_v_result));
__pyx_r = __pyx_v_result;
goto __pyx_L0;
/* "View.MemoryView":240
*
* @cname("__pyx_array_new")
* cdef array array_cwrapper(tuple shape, Py_ssize_t itemsize, char *format, # <<<<<<<<<<<<<<
* char *mode, char *buf):
* cdef array result
*/
/* function exit code */
__pyx_L1_error:;
__Pyx_XDECREF(__pyx_t_2);
__Pyx_XDECREF(__pyx_t_3);
__Pyx_XDECREF(__pyx_t_4);
__Pyx_XDECREF(__pyx_t_5);
__Pyx_AddTraceback("View.MemoryView.array_cwrapper", __pyx_clineno, __pyx_lineno, __pyx_filename);
__pyx_r = 0;
__pyx_L0:;
__Pyx_XDECREF((PyObject *)__pyx_v_result);
__Pyx_XGIVEREF((PyObject *)__pyx_r);
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
/* "View.MemoryView":277
* cdef class Enum(object):
* cdef object name
* def __init__(self, name): # <<<<<<<<<<<<<<
* self.name = name
* def __repr__(self):
*/
/* Python wrapper */
static int __pyx_MemviewEnum___init__(PyObject *__pyx_v_self, PyObject *__pyx_args, PyObject *__pyx_kwds); /*proto*/
static int __pyx_MemviewEnum___init__(PyObject *__pyx_v_self, PyObject *__pyx_args, PyObject *__pyx_kwds) {
PyObject *__pyx_v_name = 0;
int __pyx_r;
__Pyx_RefNannyDeclarations
__Pyx_RefNannySetupContext("__init__ (wrapper)", 0);
{
static PyObject **__pyx_pyargnames[] = {&__pyx_n_s_name,0};
PyObject* values[1] = {0};
if (unlikely(__pyx_kwds)) {
Py_ssize_t kw_args;
const Py_ssize_t pos_args = PyTuple_GET_SIZE(__pyx_args);
switch (pos_args) {
case 1: values[0] = PyTuple_GET_ITEM(__pyx_args, 0);
case 0: break;
default: goto __pyx_L5_argtuple_error;
}
kw_args = PyDict_Size(__pyx_kwds);
switch (pos_args) {
case 0:
if (likely((values[0] = PyDict_GetItem(__pyx_kwds, __pyx_n_s_name)) != 0)) kw_args--;
else goto __pyx_L5_argtuple_error;
}
if (unlikely(kw_args > 0)) {
if (unlikely(__Pyx_ParseOptionalKeywords(__pyx_kwds, __pyx_pyargnames, 0, values, pos_args, "__init__") < 0)) __PYX_ERR(1, 277, __pyx_L3_error)
}
} else if (PyTuple_GET_SIZE(__pyx_args) != 1) {
goto __pyx_L5_argtuple_error;
} else {
values[0] = PyTuple_GET_ITEM(__pyx_args, 0);
}
__pyx_v_name = values[0];
}
goto __pyx_L4_argument_unpacking_done;
__pyx_L5_argtuple_error:;
__Pyx_RaiseArgtupleInvalid("__init__", 1, 1, 1, PyTuple_GET_SIZE(__pyx_args)); __PYX_ERR(1, 277, __pyx_L3_error)
__pyx_L3_error:;
__Pyx_AddTraceback("View.MemoryView.Enum.__init__", __pyx_clineno, __pyx_lineno, __pyx_filename);
__Pyx_RefNannyFinishContext();
return -1;
__pyx_L4_argument_unpacking_done:;
__pyx_r = __pyx_MemviewEnum___pyx_pf_15View_dot_MemoryView_4Enum___init__(((struct __pyx_MemviewEnum_obj *)__pyx_v_self), __pyx_v_name);
/* function exit code */
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
static int __pyx_MemviewEnum___pyx_pf_15View_dot_MemoryView_4Enum___init__(struct __pyx_MemviewEnum_obj *__pyx_v_self, PyObject *__pyx_v_name) {
int __pyx_r;
__Pyx_RefNannyDeclarations
__Pyx_RefNannySetupContext("__init__", 0);
/* "View.MemoryView":278
* cdef object name
* def __init__(self, name):
* self.name = name # <<<<<<<<<<<<<<
* def __repr__(self):
* return self.name
*/
__Pyx_INCREF(__pyx_v_name);
__Pyx_GIVEREF(__pyx_v_name);
__Pyx_GOTREF(__pyx_v_self->name);
__Pyx_DECREF(__pyx_v_self->name);
__pyx_v_self->name = __pyx_v_name;
/* "View.MemoryView":277
* cdef class Enum(object):
* cdef object name
* def __init__(self, name): # <<<<<<<<<<<<<<
* self.name = name
* def __repr__(self):
*/
/* function exit code */
__pyx_r = 0;
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
/* "View.MemoryView":279
* def __init__(self, name):
* self.name = name
* def __repr__(self): # <<<<<<<<<<<<<<
* return self.name
*
*/
/* Python wrapper */
static PyObject *__pyx_MemviewEnum___repr__(PyObject *__pyx_v_self); /*proto*/
static PyObject *__pyx_MemviewEnum___repr__(PyObject *__pyx_v_self) {
PyObject *__pyx_r = 0;
__Pyx_RefNannyDeclarations
__Pyx_RefNannySetupContext("__repr__ (wrapper)", 0);
__pyx_r = __pyx_MemviewEnum___pyx_pf_15View_dot_MemoryView_4Enum_2__repr__(((struct __pyx_MemviewEnum_obj *)__pyx_v_self));
/* function exit code */
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
static PyObject *__pyx_MemviewEnum___pyx_pf_15View_dot_MemoryView_4Enum_2__repr__(struct __pyx_MemviewEnum_obj *__pyx_v_self) {
PyObject *__pyx_r = NULL;
__Pyx_RefNannyDeclarations
__Pyx_RefNannySetupContext("__repr__", 0);
/* "View.MemoryView":280
* self.name = name
* def __repr__(self):
* return self.name # <<<<<<<<<<<<<<
*
* cdef generic = Enum("<strided and direct or indirect>")
*/
__Pyx_XDECREF(__pyx_r);
__Pyx_INCREF(__pyx_v_self->name);
__pyx_r = __pyx_v_self->name;
goto __pyx_L0;
/* "View.MemoryView":279
* def __init__(self, name):
* self.name = name
* def __repr__(self): # <<<<<<<<<<<<<<
* return self.name
*
*/
/* function exit code */
__pyx_L0:;
__Pyx_XGIVEREF(__pyx_r);
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
/* "View.MemoryView":294
*
* @cname('__pyx_align_pointer')
* cdef void *align_pointer(void *memory, size_t alignment) nogil: # <<<<<<<<<<<<<<
* "Align pointer memory on a given boundary"
* cdef Py_intptr_t aligned_p = <Py_intptr_t> memory
*/
static void *__pyx_align_pointer(void *__pyx_v_memory, size_t __pyx_v_alignment) {
Py_intptr_t __pyx_v_aligned_p;
size_t __pyx_v_offset;
void *__pyx_r;
int __pyx_t_1;
/* "View.MemoryView":296
* cdef void *align_pointer(void *memory, size_t alignment) nogil:
* "Align pointer memory on a given boundary"
* cdef Py_intptr_t aligned_p = <Py_intptr_t> memory # <<<<<<<<<<<<<<
* cdef size_t offset
*
*/
__pyx_v_aligned_p = ((Py_intptr_t)__pyx_v_memory);
/* "View.MemoryView":300
*
* with cython.cdivision(True):
* offset = aligned_p % alignment # <<<<<<<<<<<<<<
*
* if offset > 0:
*/
__pyx_v_offset = (__pyx_v_aligned_p % __pyx_v_alignment);
/* "View.MemoryView":302
* offset = aligned_p % alignment
*
* if offset > 0: # <<<<<<<<<<<<<<
* aligned_p += alignment - offset
*
*/
__pyx_t_1 = ((__pyx_v_offset > 0) != 0);
if (__pyx_t_1) {
/* "View.MemoryView":303
*
* if offset > 0:
* aligned_p += alignment - offset # <<<<<<<<<<<<<<
*
* return <void *> aligned_p
*/
__pyx_v_aligned_p = (__pyx_v_aligned_p + (__pyx_v_alignment - __pyx_v_offset));
/* "View.MemoryView":302
* offset = aligned_p % alignment
*
* if offset > 0: # <<<<<<<<<<<<<<
* aligned_p += alignment - offset
*
*/
}
/* "View.MemoryView":305
* aligned_p += alignment - offset
*
* return <void *> aligned_p # <<<<<<<<<<<<<<
*
*
*/
__pyx_r = ((void *)__pyx_v_aligned_p);
goto __pyx_L0;
/* "View.MemoryView":294
*
* @cname('__pyx_align_pointer')
* cdef void *align_pointer(void *memory, size_t alignment) nogil: # <<<<<<<<<<<<<<
* "Align pointer memory on a given boundary"
* cdef Py_intptr_t aligned_p = <Py_intptr_t> memory
*/
/* function exit code */
__pyx_L0:;
return __pyx_r;
}
/* "View.MemoryView":341
* cdef __Pyx_TypeInfo *typeinfo
*
* def __cinit__(memoryview self, object obj, int flags, bint dtype_is_object=False): # <<<<<<<<<<<<<<
* self.obj = obj
* self.flags = flags
*/
/* Python wrapper */
static int __pyx_memoryview___cinit__(PyObject *__pyx_v_self, PyObject *__pyx_args, PyObject *__pyx_kwds); /*proto*/
static int __pyx_memoryview___cinit__(PyObject *__pyx_v_self, PyObject *__pyx_args, PyObject *__pyx_kwds) {
PyObject *__pyx_v_obj = 0;
int __pyx_v_flags;
int __pyx_v_dtype_is_object;
int __pyx_r;
__Pyx_RefNannyDeclarations
__Pyx_RefNannySetupContext("__cinit__ (wrapper)", 0);
{
static PyObject **__pyx_pyargnames[] = {&__pyx_n_s_obj,&__pyx_n_s_flags,&__pyx_n_s_dtype_is_object,0};
PyObject* values[3] = {0,0,0};
if (unlikely(__pyx_kwds)) {
Py_ssize_t kw_args;
const Py_ssize_t pos_args = PyTuple_GET_SIZE(__pyx_args);
switch (pos_args) {
case 3: values[2] = PyTuple_GET_ITEM(__pyx_args, 2);
case 2: values[1] = PyTuple_GET_ITEM(__pyx_args, 1);
case 1: values[0] = PyTuple_GET_ITEM(__pyx_args, 0);
case 0: break;
default: goto __pyx_L5_argtuple_error;
}
kw_args = PyDict_Size(__pyx_kwds);
switch (pos_args) {
case 0:
if (likely((values[0] = PyDict_GetItem(__pyx_kwds, __pyx_n_s_obj)) != 0)) kw_args--;
else goto __pyx_L5_argtuple_error;
case 1:
if (likely((values[1] = PyDict_GetItem(__pyx_kwds, __pyx_n_s_flags)) != 0)) kw_args--;
else {
__Pyx_RaiseArgtupleInvalid("__cinit__", 0, 2, 3, 1); __PYX_ERR(1, 341, __pyx_L3_error)
}
case 2:
if (kw_args > 0) {
PyObject* value = PyDict_GetItem(__pyx_kwds, __pyx_n_s_dtype_is_object);
if (value) { values[2] = value; kw_args--; }
}
}
if (unlikely(kw_args > 0)) {
if (unlikely(__Pyx_ParseOptionalKeywords(__pyx_kwds, __pyx_pyargnames, 0, values, pos_args, "__cinit__") < 0)) __PYX_ERR(1, 341, __pyx_L3_error)
}
} else {
switch (PyTuple_GET_SIZE(__pyx_args)) {
case 3: values[2] = PyTuple_GET_ITEM(__pyx_args, 2);
case 2: values[1] = PyTuple_GET_ITEM(__pyx_args, 1);
values[0] = PyTuple_GET_ITEM(__pyx_args, 0);
break;
default: goto __pyx_L5_argtuple_error;
}
}
__pyx_v_obj = values[0];
__pyx_v_flags = __Pyx_PyInt_As_int(values[1]); if (unlikely((__pyx_v_flags == (int)-1) && PyErr_Occurred())) __PYX_ERR(1, 341, __pyx_L3_error)
if (values[2]) {
__pyx_v_dtype_is_object = __Pyx_PyObject_IsTrue(values[2]); if (unlikely((__pyx_v_dtype_is_object == (int)-1) && PyErr_Occurred())) __PYX_ERR(1, 341, __pyx_L3_error)
} else {
__pyx_v_dtype_is_object = ((int)0);
}
}
goto __pyx_L4_argument_unpacking_done;
__pyx_L5_argtuple_error:;
__Pyx_RaiseArgtupleInvalid("__cinit__", 0, 2, 3, PyTuple_GET_SIZE(__pyx_args)); __PYX_ERR(1, 341, __pyx_L3_error)
__pyx_L3_error:;
__Pyx_AddTraceback("View.MemoryView.memoryview.__cinit__", __pyx_clineno, __pyx_lineno, __pyx_filename);
__Pyx_RefNannyFinishContext();
return -1;
__pyx_L4_argument_unpacking_done:;
__pyx_r = __pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview___cinit__(((struct __pyx_memoryview_obj *)__pyx_v_self), __pyx_v_obj, __pyx_v_flags, __pyx_v_dtype_is_object);
/* function exit code */
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
static int __pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview___cinit__(struct __pyx_memoryview_obj *__pyx_v_self, PyObject *__pyx_v_obj, int __pyx_v_flags, int __pyx_v_dtype_is_object) {
int __pyx_r;
__Pyx_RefNannyDeclarations
int __pyx_t_1;
int __pyx_t_2;
int __pyx_t_3;
int __pyx_t_4;
__Pyx_RefNannySetupContext("__cinit__", 0);
/* "View.MemoryView":342
*
* def __cinit__(memoryview self, object obj, int flags, bint dtype_is_object=False):
* self.obj = obj # <<<<<<<<<<<<<<
* self.flags = flags
* if type(self) is memoryview or obj is not None:
*/
__Pyx_INCREF(__pyx_v_obj);
__Pyx_GIVEREF(__pyx_v_obj);
__Pyx_GOTREF(__pyx_v_self->obj);
__Pyx_DECREF(__pyx_v_self->obj);
__pyx_v_self->obj = __pyx_v_obj;
/* "View.MemoryView":343
* def __cinit__(memoryview self, object obj, int flags, bint dtype_is_object=False):
* self.obj = obj
* self.flags = flags # <<<<<<<<<<<<<<
* if type(self) is memoryview or obj is not None:
* __Pyx_GetBuffer(obj, &self.view, flags)
*/
__pyx_v_self->flags = __pyx_v_flags;
/* "View.MemoryView":344
* self.obj = obj
* self.flags = flags
* if type(self) is memoryview or obj is not None: # <<<<<<<<<<<<<<
* __Pyx_GetBuffer(obj, &self.view, flags)
* if <PyObject *> self.view.obj == NULL:
*/
__pyx_t_2 = (((PyObject *)Py_TYPE(((PyObject *)__pyx_v_self))) == ((PyObject *)__pyx_memoryview_type));
__pyx_t_3 = (__pyx_t_2 != 0);
if (!__pyx_t_3) {
} else {
__pyx_t_1 = __pyx_t_3;
goto __pyx_L4_bool_binop_done;
}
__pyx_t_3 = (__pyx_v_obj != Py_None);
__pyx_t_2 = (__pyx_t_3 != 0);
__pyx_t_1 = __pyx_t_2;
__pyx_L4_bool_binop_done:;
if (__pyx_t_1) {
/* "View.MemoryView":345
* self.flags = flags
* if type(self) is memoryview or obj is not None:
* __Pyx_GetBuffer(obj, &self.view, flags) # <<<<<<<<<<<<<<
* if <PyObject *> self.view.obj == NULL:
* (<__pyx_buffer *> &self.view).obj = Py_None
*/
__pyx_t_4 = __Pyx_GetBuffer(__pyx_v_obj, (&__pyx_v_self->view), __pyx_v_flags); if (unlikely(__pyx_t_4 == -1)) __PYX_ERR(1, 345, __pyx_L1_error)
/* "View.MemoryView":346
* if type(self) is memoryview or obj is not None:
* __Pyx_GetBuffer(obj, &self.view, flags)
* if <PyObject *> self.view.obj == NULL: # <<<<<<<<<<<<<<
* (<__pyx_buffer *> &self.view).obj = Py_None
* Py_INCREF(Py_None)
*/
__pyx_t_1 = ((((PyObject *)__pyx_v_self->view.obj) == NULL) != 0);
if (__pyx_t_1) {
/* "View.MemoryView":347
* __Pyx_GetBuffer(obj, &self.view, flags)
* if <PyObject *> self.view.obj == NULL:
* (<__pyx_buffer *> &self.view).obj = Py_None # <<<<<<<<<<<<<<
* Py_INCREF(Py_None)
*
*/
((Py_buffer *)(&__pyx_v_self->view))->obj = Py_None;
/* "View.MemoryView":348
* if <PyObject *> self.view.obj == NULL:
* (<__pyx_buffer *> &self.view).obj = Py_None
* Py_INCREF(Py_None) # <<<<<<<<<<<<<<
*
* global __pyx_memoryview_thread_locks_used
*/
Py_INCREF(Py_None);
/* "View.MemoryView":346
* if type(self) is memoryview or obj is not None:
* __Pyx_GetBuffer(obj, &self.view, flags)
* if <PyObject *> self.view.obj == NULL: # <<<<<<<<<<<<<<
* (<__pyx_buffer *> &self.view).obj = Py_None
* Py_INCREF(Py_None)
*/
}
/* "View.MemoryView":344
* self.obj = obj
* self.flags = flags
* if type(self) is memoryview or obj is not None: # <<<<<<<<<<<<<<
* __Pyx_GetBuffer(obj, &self.view, flags)
* if <PyObject *> self.view.obj == NULL:
*/
}
/* "View.MemoryView":351
*
* global __pyx_memoryview_thread_locks_used
* if __pyx_memoryview_thread_locks_used < THREAD_LOCKS_PREALLOCATED: # <<<<<<<<<<<<<<
* self.lock = __pyx_memoryview_thread_locks[__pyx_memoryview_thread_locks_used]
* __pyx_memoryview_thread_locks_used += 1
*/
__pyx_t_1 = ((__pyx_memoryview_thread_locks_used < 8) != 0);
if (__pyx_t_1) {
/* "View.MemoryView":352
* global __pyx_memoryview_thread_locks_used
* if __pyx_memoryview_thread_locks_used < THREAD_LOCKS_PREALLOCATED:
* self.lock = __pyx_memoryview_thread_locks[__pyx_memoryview_thread_locks_used] # <<<<<<<<<<<<<<
* __pyx_memoryview_thread_locks_used += 1
* if self.lock is NULL:
*/
__pyx_v_self->lock = (__pyx_memoryview_thread_locks[__pyx_memoryview_thread_locks_used]);
/* "View.MemoryView":353
* if __pyx_memoryview_thread_locks_used < THREAD_LOCKS_PREALLOCATED:
* self.lock = __pyx_memoryview_thread_locks[__pyx_memoryview_thread_locks_used]
* __pyx_memoryview_thread_locks_used += 1 # <<<<<<<<<<<<<<
* if self.lock is NULL:
* self.lock = PyThread_allocate_lock()
*/
__pyx_memoryview_thread_locks_used = (__pyx_memoryview_thread_locks_used + 1);
/* "View.MemoryView":351
*
* global __pyx_memoryview_thread_locks_used
* if __pyx_memoryview_thread_locks_used < THREAD_LOCKS_PREALLOCATED: # <<<<<<<<<<<<<<
* self.lock = __pyx_memoryview_thread_locks[__pyx_memoryview_thread_locks_used]
* __pyx_memoryview_thread_locks_used += 1
*/
}
/* "View.MemoryView":354
* self.lock = __pyx_memoryview_thread_locks[__pyx_memoryview_thread_locks_used]
* __pyx_memoryview_thread_locks_used += 1
* if self.lock is NULL: # <<<<<<<<<<<<<<
* self.lock = PyThread_allocate_lock()
* if self.lock is NULL:
*/
__pyx_t_1 = ((__pyx_v_self->lock == NULL) != 0);
if (__pyx_t_1) {
/* "View.MemoryView":355
* __pyx_memoryview_thread_locks_used += 1
* if self.lock is NULL:
* self.lock = PyThread_allocate_lock() # <<<<<<<<<<<<<<
* if self.lock is NULL:
* raise MemoryError
*/
__pyx_v_self->lock = PyThread_allocate_lock();
/* "View.MemoryView":356
* if self.lock is NULL:
* self.lock = PyThread_allocate_lock()
* if self.lock is NULL: # <<<<<<<<<<<<<<
* raise MemoryError
*
*/
__pyx_t_1 = ((__pyx_v_self->lock == NULL) != 0);
if (__pyx_t_1) {
/* "View.MemoryView":357
* self.lock = PyThread_allocate_lock()
* if self.lock is NULL:
* raise MemoryError # <<<<<<<<<<<<<<
*
* if flags & PyBUF_FORMAT:
*/
PyErr_NoMemory(); __PYX_ERR(1, 357, __pyx_L1_error)
/* "View.MemoryView":356
* if self.lock is NULL:
* self.lock = PyThread_allocate_lock()
* if self.lock is NULL: # <<<<<<<<<<<<<<
* raise MemoryError
*
*/
}
/* "View.MemoryView":354
* self.lock = __pyx_memoryview_thread_locks[__pyx_memoryview_thread_locks_used]
* __pyx_memoryview_thread_locks_used += 1
* if self.lock is NULL: # <<<<<<<<<<<<<<
* self.lock = PyThread_allocate_lock()
* if self.lock is NULL:
*/
}
/* "View.MemoryView":359
* raise MemoryError
*
* if flags & PyBUF_FORMAT: # <<<<<<<<<<<<<<
* self.dtype_is_object = (self.view.format[0] == b'O' and self.view.format[1] == b'\0')
* else:
*/
__pyx_t_1 = ((__pyx_v_flags & PyBUF_FORMAT) != 0);
if (__pyx_t_1) {
/* "View.MemoryView":360
*
* if flags & PyBUF_FORMAT:
* self.dtype_is_object = (self.view.format[0] == b'O' and self.view.format[1] == b'\0') # <<<<<<<<<<<<<<
* else:
* self.dtype_is_object = dtype_is_object
*/
__pyx_t_2 = (((__pyx_v_self->view.format[0]) == 'O') != 0);
if (__pyx_t_2) {
} else {
__pyx_t_1 = __pyx_t_2;
goto __pyx_L11_bool_binop_done;
}
__pyx_t_2 = (((__pyx_v_self->view.format[1]) == '\x00') != 0);
__pyx_t_1 = __pyx_t_2;
__pyx_L11_bool_binop_done:;
__pyx_v_self->dtype_is_object = __pyx_t_1;
/* "View.MemoryView":359
* raise MemoryError
*
* if flags & PyBUF_FORMAT: # <<<<<<<<<<<<<<
* self.dtype_is_object = (self.view.format[0] == b'O' and self.view.format[1] == b'\0')
* else:
*/
goto __pyx_L10;
}
/* "View.MemoryView":362
* self.dtype_is_object = (self.view.format[0] == b'O' and self.view.format[1] == b'\0')
* else:
* self.dtype_is_object = dtype_is_object # <<<<<<<<<<<<<<
*
* self.acquisition_count_aligned_p = <__pyx_atomic_int *> align_pointer(
*/
/*else*/ {
__pyx_v_self->dtype_is_object = __pyx_v_dtype_is_object;
}
__pyx_L10:;
/* "View.MemoryView":364
* self.dtype_is_object = dtype_is_object
*
* self.acquisition_count_aligned_p = <__pyx_atomic_int *> align_pointer( # <<<<<<<<<<<<<<
* <void *> &self.acquisition_count[0], sizeof(__pyx_atomic_int))
* self.typeinfo = NULL
*/
__pyx_v_self->acquisition_count_aligned_p = ((__pyx_atomic_int *)__pyx_align_pointer(((void *)(&(__pyx_v_self->acquisition_count[0]))), (sizeof(__pyx_atomic_int))));
/* "View.MemoryView":366
* self.acquisition_count_aligned_p = <__pyx_atomic_int *> align_pointer(
* <void *> &self.acquisition_count[0], sizeof(__pyx_atomic_int))
* self.typeinfo = NULL # <<<<<<<<<<<<<<
*
* def __dealloc__(memoryview self):
*/
__pyx_v_self->typeinfo = NULL;
/* "View.MemoryView":341
* cdef __Pyx_TypeInfo *typeinfo
*
* def __cinit__(memoryview self, object obj, int flags, bint dtype_is_object=False): # <<<<<<<<<<<<<<
* self.obj = obj
* self.flags = flags
*/
/* function exit code */
__pyx_r = 0;
goto __pyx_L0;
__pyx_L1_error:;
__Pyx_AddTraceback("View.MemoryView.memoryview.__cinit__", __pyx_clineno, __pyx_lineno, __pyx_filename);
__pyx_r = -1;
__pyx_L0:;
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
/* "View.MemoryView":368
* self.typeinfo = NULL
*
* def __dealloc__(memoryview self): # <<<<<<<<<<<<<<
* if self.obj is not None:
* __Pyx_ReleaseBuffer(&self.view)
*/
/* Python wrapper */
static void __pyx_memoryview___dealloc__(PyObject *__pyx_v_self); /*proto*/
static void __pyx_memoryview___dealloc__(PyObject *__pyx_v_self) {
__Pyx_RefNannyDeclarations
__Pyx_RefNannySetupContext("__dealloc__ (wrapper)", 0);
__pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_2__dealloc__(((struct __pyx_memoryview_obj *)__pyx_v_self));
/* function exit code */
__Pyx_RefNannyFinishContext();
}
static void __pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_2__dealloc__(struct __pyx_memoryview_obj *__pyx_v_self) {
int __pyx_v_i;
__Pyx_RefNannyDeclarations
int __pyx_t_1;
int __pyx_t_2;
int __pyx_t_3;
int __pyx_t_4;
PyThread_type_lock __pyx_t_5;
PyThread_type_lock __pyx_t_6;
__Pyx_RefNannySetupContext("__dealloc__", 0);
/* "View.MemoryView":369
*
* def __dealloc__(memoryview self):
* if self.obj is not None: # <<<<<<<<<<<<<<
* __Pyx_ReleaseBuffer(&self.view)
*
*/
__pyx_t_1 = (__pyx_v_self->obj != Py_None);
__pyx_t_2 = (__pyx_t_1 != 0);
if (__pyx_t_2) {
/* "View.MemoryView":370
* def __dealloc__(memoryview self):
* if self.obj is not None:
* __Pyx_ReleaseBuffer(&self.view) # <<<<<<<<<<<<<<
*
* cdef int i
*/
__Pyx_ReleaseBuffer((&__pyx_v_self->view));
/* "View.MemoryView":369
*
* def __dealloc__(memoryview self):
* if self.obj is not None: # <<<<<<<<<<<<<<
* __Pyx_ReleaseBuffer(&self.view)
*
*/
}
/* "View.MemoryView":374
* cdef int i
* global __pyx_memoryview_thread_locks_used
* if self.lock != NULL: # <<<<<<<<<<<<<<
* for i in range(__pyx_memoryview_thread_locks_used):
* if __pyx_memoryview_thread_locks[i] is self.lock:
*/
__pyx_t_2 = ((__pyx_v_self->lock != NULL) != 0);
if (__pyx_t_2) {
/* "View.MemoryView":375
* global __pyx_memoryview_thread_locks_used
* if self.lock != NULL:
* for i in range(__pyx_memoryview_thread_locks_used): # <<<<<<<<<<<<<<
* if __pyx_memoryview_thread_locks[i] is self.lock:
* __pyx_memoryview_thread_locks_used -= 1
*/
__pyx_t_3 = __pyx_memoryview_thread_locks_used;
for (__pyx_t_4 = 0; __pyx_t_4 < __pyx_t_3; __pyx_t_4+=1) {
__pyx_v_i = __pyx_t_4;
/* "View.MemoryView":376
* if self.lock != NULL:
* for i in range(__pyx_memoryview_thread_locks_used):
* if __pyx_memoryview_thread_locks[i] is self.lock: # <<<<<<<<<<<<<<
* __pyx_memoryview_thread_locks_used -= 1
* if i != __pyx_memoryview_thread_locks_used:
*/
__pyx_t_2 = (((__pyx_memoryview_thread_locks[__pyx_v_i]) == __pyx_v_self->lock) != 0);
if (__pyx_t_2) {
/* "View.MemoryView":377
* for i in range(__pyx_memoryview_thread_locks_used):
* if __pyx_memoryview_thread_locks[i] is self.lock:
* __pyx_memoryview_thread_locks_used -= 1 # <<<<<<<<<<<<<<
* if i != __pyx_memoryview_thread_locks_used:
* __pyx_memoryview_thread_locks[i], __pyx_memoryview_thread_locks[__pyx_memoryview_thread_locks_used] = (
*/
__pyx_memoryview_thread_locks_used = (__pyx_memoryview_thread_locks_used - 1);
/* "View.MemoryView":378
* if __pyx_memoryview_thread_locks[i] is self.lock:
* __pyx_memoryview_thread_locks_used -= 1
* if i != __pyx_memoryview_thread_locks_used: # <<<<<<<<<<<<<<
* __pyx_memoryview_thread_locks[i], __pyx_memoryview_thread_locks[__pyx_memoryview_thread_locks_used] = (
* __pyx_memoryview_thread_locks[__pyx_memoryview_thread_locks_used], __pyx_memoryview_thread_locks[i])
*/
__pyx_t_2 = ((__pyx_v_i != __pyx_memoryview_thread_locks_used) != 0);
if (__pyx_t_2) {
/* "View.MemoryView":380
* if i != __pyx_memoryview_thread_locks_used:
* __pyx_memoryview_thread_locks[i], __pyx_memoryview_thread_locks[__pyx_memoryview_thread_locks_used] = (
* __pyx_memoryview_thread_locks[__pyx_memoryview_thread_locks_used], __pyx_memoryview_thread_locks[i]) # <<<<<<<<<<<<<<
* break
* else:
*/
__pyx_t_5 = (__pyx_memoryview_thread_locks[__pyx_memoryview_thread_locks_used]);
__pyx_t_6 = (__pyx_memoryview_thread_locks[__pyx_v_i]);
/* "View.MemoryView":379
* __pyx_memoryview_thread_locks_used -= 1
* if i != __pyx_memoryview_thread_locks_used:
* __pyx_memoryview_thread_locks[i], __pyx_memoryview_thread_locks[__pyx_memoryview_thread_locks_used] = ( # <<<<<<<<<<<<<<
* __pyx_memoryview_thread_locks[__pyx_memoryview_thread_locks_used], __pyx_memoryview_thread_locks[i])
* break
*/
(__pyx_memoryview_thread_locks[__pyx_v_i]) = __pyx_t_5;
(__pyx_memoryview_thread_locks[__pyx_memoryview_thread_locks_used]) = __pyx_t_6;
/* "View.MemoryView":378
* if __pyx_memoryview_thread_locks[i] is self.lock:
* __pyx_memoryview_thread_locks_used -= 1
* if i != __pyx_memoryview_thread_locks_used: # <<<<<<<<<<<<<<
* __pyx_memoryview_thread_locks[i], __pyx_memoryview_thread_locks[__pyx_memoryview_thread_locks_used] = (
* __pyx_memoryview_thread_locks[__pyx_memoryview_thread_locks_used], __pyx_memoryview_thread_locks[i])
*/
}
/* "View.MemoryView":381
* __pyx_memoryview_thread_locks[i], __pyx_memoryview_thread_locks[__pyx_memoryview_thread_locks_used] = (
* __pyx_memoryview_thread_locks[__pyx_memoryview_thread_locks_used], __pyx_memoryview_thread_locks[i])
* break # <<<<<<<<<<<<<<
* else:
* PyThread_free_lock(self.lock)
*/
goto __pyx_L6_break;
/* "View.MemoryView":376
* if self.lock != NULL:
* for i in range(__pyx_memoryview_thread_locks_used):
* if __pyx_memoryview_thread_locks[i] is self.lock: # <<<<<<<<<<<<<<
* __pyx_memoryview_thread_locks_used -= 1
* if i != __pyx_memoryview_thread_locks_used:
*/
}
}
/*else*/ {
/* "View.MemoryView":383
* break
* else:
* PyThread_free_lock(self.lock) # <<<<<<<<<<<<<<
*
* cdef char *get_item_pointer(memoryview self, object index) except NULL:
*/
PyThread_free_lock(__pyx_v_self->lock);
}
__pyx_L6_break:;
/* "View.MemoryView":374
* cdef int i
* global __pyx_memoryview_thread_locks_used
* if self.lock != NULL: # <<<<<<<<<<<<<<
* for i in range(__pyx_memoryview_thread_locks_used):
* if __pyx_memoryview_thread_locks[i] is self.lock:
*/
}
/* "View.MemoryView":368
* self.typeinfo = NULL
*
* def __dealloc__(memoryview self): # <<<<<<<<<<<<<<
* if self.obj is not None:
* __Pyx_ReleaseBuffer(&self.view)
*/
/* function exit code */
__Pyx_RefNannyFinishContext();
}
/* "View.MemoryView":385
* PyThread_free_lock(self.lock)
*
* cdef char *get_item_pointer(memoryview self, object index) except NULL: # <<<<<<<<<<<<<<
* cdef Py_ssize_t dim
* cdef char *itemp = <char *> self.view.buf
*/
static char *__pyx_memoryview_get_item_pointer(struct __pyx_memoryview_obj *__pyx_v_self, PyObject *__pyx_v_index) {
Py_ssize_t __pyx_v_dim;
char *__pyx_v_itemp;
PyObject *__pyx_v_idx = NULL;
char *__pyx_r;
__Pyx_RefNannyDeclarations
Py_ssize_t __pyx_t_1;
PyObject *__pyx_t_2 = NULL;
Py_ssize_t __pyx_t_3;
PyObject *(*__pyx_t_4)(PyObject *);
PyObject *__pyx_t_5 = NULL;
Py_ssize_t __pyx_t_6;
char *__pyx_t_7;
__Pyx_RefNannySetupContext("get_item_pointer", 0);
/* "View.MemoryView":387
* cdef char *get_item_pointer(memoryview self, object index) except NULL:
* cdef Py_ssize_t dim
* cdef char *itemp = <char *> self.view.buf # <<<<<<<<<<<<<<
*
* for dim, idx in enumerate(index):
*/
__pyx_v_itemp = ((char *)__pyx_v_self->view.buf);
/* "View.MemoryView":389
* cdef char *itemp = <char *> self.view.buf
*
* for dim, idx in enumerate(index): # <<<<<<<<<<<<<<
* itemp = pybuffer_index(&self.view, itemp, idx, dim)
*
*/
__pyx_t_1 = 0;
if (likely(PyList_CheckExact(__pyx_v_index)) || PyTuple_CheckExact(__pyx_v_index)) {
__pyx_t_2 = __pyx_v_index; __Pyx_INCREF(__pyx_t_2); __pyx_t_3 = 0;
__pyx_t_4 = NULL;
} else {
__pyx_t_3 = -1; __pyx_t_2 = PyObject_GetIter(__pyx_v_index); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 389, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_2);
__pyx_t_4 = Py_TYPE(__pyx_t_2)->tp_iternext; if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 389, __pyx_L1_error)
}
for (;;) {
if (likely(!__pyx_t_4)) {
if (likely(PyList_CheckExact(__pyx_t_2))) {
if (__pyx_t_3 >= PyList_GET_SIZE(__pyx_t_2)) break;
#if CYTHON_ASSUME_SAFE_MACROS && !CYTHON_AVOID_BORROWED_REFS
__pyx_t_5 = PyList_GET_ITEM(__pyx_t_2, __pyx_t_3); __Pyx_INCREF(__pyx_t_5); __pyx_t_3++; if (unlikely(0 < 0)) __PYX_ERR(1, 389, __pyx_L1_error)
#else
__pyx_t_5 = PySequence_ITEM(__pyx_t_2, __pyx_t_3); __pyx_t_3++; if (unlikely(!__pyx_t_5)) __PYX_ERR(1, 389, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_5);
#endif
} else {
if (__pyx_t_3 >= PyTuple_GET_SIZE(__pyx_t_2)) break;
#if CYTHON_ASSUME_SAFE_MACROS && !CYTHON_AVOID_BORROWED_REFS
__pyx_t_5 = PyTuple_GET_ITEM(__pyx_t_2, __pyx_t_3); __Pyx_INCREF(__pyx_t_5); __pyx_t_3++; if (unlikely(0 < 0)) __PYX_ERR(1, 389, __pyx_L1_error)
#else
__pyx_t_5 = PySequence_ITEM(__pyx_t_2, __pyx_t_3); __pyx_t_3++; if (unlikely(!__pyx_t_5)) __PYX_ERR(1, 389, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_5);
#endif
}
} else {
__pyx_t_5 = __pyx_t_4(__pyx_t_2);
if (unlikely(!__pyx_t_5)) {
PyObject* exc_type = PyErr_Occurred();
if (exc_type) {
if (likely(exc_type == PyExc_StopIteration || PyErr_GivenExceptionMatches(exc_type, PyExc_StopIteration))) PyErr_Clear();
else __PYX_ERR(1, 389, __pyx_L1_error)
}
break;
}
__Pyx_GOTREF(__pyx_t_5);
}
__Pyx_XDECREF_SET(__pyx_v_idx, __pyx_t_5);
__pyx_t_5 = 0;
__pyx_v_dim = __pyx_t_1;
__pyx_t_1 = (__pyx_t_1 + 1);
/* "View.MemoryView":390
*
* for dim, idx in enumerate(index):
* itemp = pybuffer_index(&self.view, itemp, idx, dim) # <<<<<<<<<<<<<<
*
* return itemp
*/
__pyx_t_6 = __Pyx_PyIndex_AsSsize_t(__pyx_v_idx); if (unlikely((__pyx_t_6 == (Py_ssize_t)-1) && PyErr_Occurred())) __PYX_ERR(1, 390, __pyx_L1_error)
__pyx_t_7 = __pyx_pybuffer_index((&__pyx_v_self->view), __pyx_v_itemp, __pyx_t_6, __pyx_v_dim); if (unlikely(__pyx_t_7 == NULL)) __PYX_ERR(1, 390, __pyx_L1_error)
__pyx_v_itemp = __pyx_t_7;
/* "View.MemoryView":389
* cdef char *itemp = <char *> self.view.buf
*
* for dim, idx in enumerate(index): # <<<<<<<<<<<<<<
* itemp = pybuffer_index(&self.view, itemp, idx, dim)
*
*/
}
__Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
/* "View.MemoryView":392
* itemp = pybuffer_index(&self.view, itemp, idx, dim)
*
* return itemp # <<<<<<<<<<<<<<
*
*
*/
__pyx_r = __pyx_v_itemp;
goto __pyx_L0;
/* "View.MemoryView":385
* PyThread_free_lock(self.lock)
*
* cdef char *get_item_pointer(memoryview self, object index) except NULL: # <<<<<<<<<<<<<<
* cdef Py_ssize_t dim
* cdef char *itemp = <char *> self.view.buf
*/
/* function exit code */
__pyx_L1_error:;
__Pyx_XDECREF(__pyx_t_2);
__Pyx_XDECREF(__pyx_t_5);
__Pyx_AddTraceback("View.MemoryView.memoryview.get_item_pointer", __pyx_clineno, __pyx_lineno, __pyx_filename);
__pyx_r = NULL;
__pyx_L0:;
__Pyx_XDECREF(__pyx_v_idx);
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
/* "View.MemoryView":395
*
*
* def __getitem__(memoryview self, object index): # <<<<<<<<<<<<<<
* if index is Ellipsis:
* return self
*/
/* Python wrapper */
static PyObject *__pyx_memoryview___getitem__(PyObject *__pyx_v_self, PyObject *__pyx_v_index); /*proto*/
static PyObject *__pyx_memoryview___getitem__(PyObject *__pyx_v_self, PyObject *__pyx_v_index) {
PyObject *__pyx_r = 0;
__Pyx_RefNannyDeclarations
__Pyx_RefNannySetupContext("__getitem__ (wrapper)", 0);
__pyx_r = __pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_4__getitem__(((struct __pyx_memoryview_obj *)__pyx_v_self), ((PyObject *)__pyx_v_index));
/* function exit code */
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
static PyObject *__pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_4__getitem__(struct __pyx_memoryview_obj *__pyx_v_self, PyObject *__pyx_v_index) {
PyObject *__pyx_v_have_slices = NULL;
PyObject *__pyx_v_indices = NULL;
char *__pyx_v_itemp;
PyObject *__pyx_r = NULL;
__Pyx_RefNannyDeclarations
int __pyx_t_1;
int __pyx_t_2;
PyObject *__pyx_t_3 = NULL;
PyObject *__pyx_t_4 = NULL;
PyObject *__pyx_t_5 = NULL;
char *__pyx_t_6;
__Pyx_RefNannySetupContext("__getitem__", 0);
/* "View.MemoryView":396
*
* def __getitem__(memoryview self, object index):
* if index is Ellipsis: # <<<<<<<<<<<<<<
* return self
*
*/
__pyx_t_1 = (__pyx_v_index == __pyx_builtin_Ellipsis);
__pyx_t_2 = (__pyx_t_1 != 0);
if (__pyx_t_2) {
/* "View.MemoryView":397
* def __getitem__(memoryview self, object index):
* if index is Ellipsis:
* return self # <<<<<<<<<<<<<<
*
* have_slices, indices = _unellipsify(index, self.view.ndim)
*/
__Pyx_XDECREF(__pyx_r);
__Pyx_INCREF(((PyObject *)__pyx_v_self));
__pyx_r = ((PyObject *)__pyx_v_self);
goto __pyx_L0;
/* "View.MemoryView":396
*
* def __getitem__(memoryview self, object index):
* if index is Ellipsis: # <<<<<<<<<<<<<<
* return self
*
*/
}
/* "View.MemoryView":399
* return self
*
* have_slices, indices = _unellipsify(index, self.view.ndim) # <<<<<<<<<<<<<<
*
* cdef char *itemp
*/
__pyx_t_3 = _unellipsify(__pyx_v_index, __pyx_v_self->view.ndim); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 399, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_3);
if (likely(__pyx_t_3 != Py_None)) {
PyObject* sequence = __pyx_t_3;
#if !CYTHON_COMPILING_IN_PYPY
Py_ssize_t size = Py_SIZE(sequence);
#else
Py_ssize_t size = PySequence_Size(sequence);
#endif
if (unlikely(size != 2)) {
if (size > 2) __Pyx_RaiseTooManyValuesError(2);
else if (size >= 0) __Pyx_RaiseNeedMoreValuesError(size);
__PYX_ERR(1, 399, __pyx_L1_error)
}
#if CYTHON_ASSUME_SAFE_MACROS && !CYTHON_AVOID_BORROWED_REFS
__pyx_t_4 = PyTuple_GET_ITEM(sequence, 0);
__pyx_t_5 = PyTuple_GET_ITEM(sequence, 1);
__Pyx_INCREF(__pyx_t_4);
__Pyx_INCREF(__pyx_t_5);
#else
__pyx_t_4 = PySequence_ITEM(sequence, 0); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 399, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_4);
__pyx_t_5 = PySequence_ITEM(sequence, 1); if (unlikely(!__pyx_t_5)) __PYX_ERR(1, 399, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_5);
#endif
__Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
} else {
__Pyx_RaiseNoneNotIterableError(); __PYX_ERR(1, 399, __pyx_L1_error)
}
__pyx_v_have_slices = __pyx_t_4;
__pyx_t_4 = 0;
__pyx_v_indices = __pyx_t_5;
__pyx_t_5 = 0;
/* "View.MemoryView":402
*
* cdef char *itemp
* if have_slices: # <<<<<<<<<<<<<<
* return memview_slice(self, indices)
* else:
*/
__pyx_t_2 = __Pyx_PyObject_IsTrue(__pyx_v_have_slices); if (unlikely(__pyx_t_2 < 0)) __PYX_ERR(1, 402, __pyx_L1_error)
if (__pyx_t_2) {
/* "View.MemoryView":403
* cdef char *itemp
* if have_slices:
* return memview_slice(self, indices) # <<<<<<<<<<<<<<
* else:
* itemp = self.get_item_pointer(indices)
*/
__Pyx_XDECREF(__pyx_r);
__pyx_t_3 = ((PyObject *)__pyx_memview_slice(__pyx_v_self, __pyx_v_indices)); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 403, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_3);
__pyx_r = __pyx_t_3;
__pyx_t_3 = 0;
goto __pyx_L0;
/* "View.MemoryView":402
*
* cdef char *itemp
* if have_slices: # <<<<<<<<<<<<<<
* return memview_slice(self, indices)
* else:
*/
}
/* "View.MemoryView":405
* return memview_slice(self, indices)
* else:
* itemp = self.get_item_pointer(indices) # <<<<<<<<<<<<<<
* return self.convert_item_to_object(itemp)
*
*/
/*else*/ {
__pyx_t_6 = ((struct __pyx_vtabstruct_memoryview *)__pyx_v_self->__pyx_vtab)->get_item_pointer(__pyx_v_self, __pyx_v_indices); if (unlikely(__pyx_t_6 == NULL)) __PYX_ERR(1, 405, __pyx_L1_error)
__pyx_v_itemp = __pyx_t_6;
/* "View.MemoryView":406
* else:
* itemp = self.get_item_pointer(indices)
* return self.convert_item_to_object(itemp) # <<<<<<<<<<<<<<
*
* def __setitem__(memoryview self, object index, object value):
*/
__Pyx_XDECREF(__pyx_r);
__pyx_t_3 = ((struct __pyx_vtabstruct_memoryview *)__pyx_v_self->__pyx_vtab)->convert_item_to_object(__pyx_v_self, __pyx_v_itemp); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 406, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_3);
__pyx_r = __pyx_t_3;
__pyx_t_3 = 0;
goto __pyx_L0;
}
/* "View.MemoryView":395
*
*
* def __getitem__(memoryview self, object index): # <<<<<<<<<<<<<<
* if index is Ellipsis:
* return self
*/
/* function exit code */
__pyx_L1_error:;
__Pyx_XDECREF(__pyx_t_3);
__Pyx_XDECREF(__pyx_t_4);
__Pyx_XDECREF(__pyx_t_5);
__Pyx_AddTraceback("View.MemoryView.memoryview.__getitem__", __pyx_clineno, __pyx_lineno, __pyx_filename);
__pyx_r = NULL;
__pyx_L0:;
__Pyx_XDECREF(__pyx_v_have_slices);
__Pyx_XDECREF(__pyx_v_indices);
__Pyx_XGIVEREF(__pyx_r);
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
/* "View.MemoryView":408
* return self.convert_item_to_object(itemp)
*
* def __setitem__(memoryview self, object index, object value): # <<<<<<<<<<<<<<
* have_slices, index = _unellipsify(index, self.view.ndim)
*
*/
/* Python wrapper */
static int __pyx_memoryview___setitem__(PyObject *__pyx_v_self, PyObject *__pyx_v_index, PyObject *__pyx_v_value); /*proto*/
static int __pyx_memoryview___setitem__(PyObject *__pyx_v_self, PyObject *__pyx_v_index, PyObject *__pyx_v_value) {
int __pyx_r;
__Pyx_RefNannyDeclarations
__Pyx_RefNannySetupContext("__setitem__ (wrapper)", 0);
__pyx_r = __pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_6__setitem__(((struct __pyx_memoryview_obj *)__pyx_v_self), ((PyObject *)__pyx_v_index), ((PyObject *)__pyx_v_value));
/* function exit code */
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
static int __pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_6__setitem__(struct __pyx_memoryview_obj *__pyx_v_self, PyObject *__pyx_v_index, PyObject *__pyx_v_value) {
PyObject *__pyx_v_have_slices = NULL;
PyObject *__pyx_v_obj = NULL;
int __pyx_r;
__Pyx_RefNannyDeclarations
PyObject *__pyx_t_1 = NULL;
PyObject *__pyx_t_2 = NULL;
PyObject *__pyx_t_3 = NULL;
int __pyx_t_4;
__Pyx_RefNannySetupContext("__setitem__", 0);
__Pyx_INCREF(__pyx_v_index);
/* "View.MemoryView":409
*
* def __setitem__(memoryview self, object index, object value):
* have_slices, index = _unellipsify(index, self.view.ndim) # <<<<<<<<<<<<<<
*
* if have_slices:
*/
__pyx_t_1 = _unellipsify(__pyx_v_index, __pyx_v_self->view.ndim); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 409, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_1);
if (likely(__pyx_t_1 != Py_None)) {
PyObject* sequence = __pyx_t_1;
#if !CYTHON_COMPILING_IN_PYPY
Py_ssize_t size = Py_SIZE(sequence);
#else
Py_ssize_t size = PySequence_Size(sequence);
#endif
if (unlikely(size != 2)) {
if (size > 2) __Pyx_RaiseTooManyValuesError(2);
else if (size >= 0) __Pyx_RaiseNeedMoreValuesError(size);
__PYX_ERR(1, 409, __pyx_L1_error)
}
#if CYTHON_ASSUME_SAFE_MACROS && !CYTHON_AVOID_BORROWED_REFS
__pyx_t_2 = PyTuple_GET_ITEM(sequence, 0);
__pyx_t_3 = PyTuple_GET_ITEM(sequence, 1);
__Pyx_INCREF(__pyx_t_2);
__Pyx_INCREF(__pyx_t_3);
#else
__pyx_t_2 = PySequence_ITEM(sequence, 0); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 409, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_2);
__pyx_t_3 = PySequence_ITEM(sequence, 1); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 409, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_3);
#endif
__Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
} else {
__Pyx_RaiseNoneNotIterableError(); __PYX_ERR(1, 409, __pyx_L1_error)
}
__pyx_v_have_slices = __pyx_t_2;
__pyx_t_2 = 0;
__Pyx_DECREF_SET(__pyx_v_index, __pyx_t_3);
__pyx_t_3 = 0;
/* "View.MemoryView":411
* have_slices, index = _unellipsify(index, self.view.ndim)
*
* if have_slices: # <<<<<<<<<<<<<<
* obj = self.is_slice(value)
* if obj:
*/
__pyx_t_4 = __Pyx_PyObject_IsTrue(__pyx_v_have_slices); if (unlikely(__pyx_t_4 < 0)) __PYX_ERR(1, 411, __pyx_L1_error)
if (__pyx_t_4) {
/* "View.MemoryView":412
*
* if have_slices:
* obj = self.is_slice(value) # <<<<<<<<<<<<<<
* if obj:
* self.setitem_slice_assignment(self[index], obj)
*/
__pyx_t_1 = ((struct __pyx_vtabstruct_memoryview *)__pyx_v_self->__pyx_vtab)->is_slice(__pyx_v_self, __pyx_v_value); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 412, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_1);
__pyx_v_obj = __pyx_t_1;
__pyx_t_1 = 0;
/* "View.MemoryView":413
* if have_slices:
* obj = self.is_slice(value)
* if obj: # <<<<<<<<<<<<<<
* self.setitem_slice_assignment(self[index], obj)
* else:
*/
__pyx_t_4 = __Pyx_PyObject_IsTrue(__pyx_v_obj); if (unlikely(__pyx_t_4 < 0)) __PYX_ERR(1, 413, __pyx_L1_error)
if (__pyx_t_4) {
/* "View.MemoryView":414
* obj = self.is_slice(value)
* if obj:
* self.setitem_slice_assignment(self[index], obj) # <<<<<<<<<<<<<<
* else:
* self.setitem_slice_assign_scalar(self[index], value)
*/
__pyx_t_1 = PyObject_GetItem(((PyObject *)__pyx_v_self), __pyx_v_index); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 414, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_1);
__pyx_t_3 = ((struct __pyx_vtabstruct_memoryview *)__pyx_v_self->__pyx_vtab)->setitem_slice_assignment(__pyx_v_self, __pyx_t_1, __pyx_v_obj); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 414, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_3);
__Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
__Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
/* "View.MemoryView":413
* if have_slices:
* obj = self.is_slice(value)
* if obj: # <<<<<<<<<<<<<<
* self.setitem_slice_assignment(self[index], obj)
* else:
*/
goto __pyx_L4;
}
/* "View.MemoryView":416
* self.setitem_slice_assignment(self[index], obj)
* else:
* self.setitem_slice_assign_scalar(self[index], value) # <<<<<<<<<<<<<<
* else:
* self.setitem_indexed(index, value)
*/
/*else*/ {
__pyx_t_3 = PyObject_GetItem(((PyObject *)__pyx_v_self), __pyx_v_index); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 416, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_3);
if (!(likely(((__pyx_t_3) == Py_None) || likely(__Pyx_TypeTest(__pyx_t_3, __pyx_memoryview_type))))) __PYX_ERR(1, 416, __pyx_L1_error)
__pyx_t_1 = ((struct __pyx_vtabstruct_memoryview *)__pyx_v_self->__pyx_vtab)->setitem_slice_assign_scalar(__pyx_v_self, ((struct __pyx_memoryview_obj *)__pyx_t_3), __pyx_v_value); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 416, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_1);
__Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
__Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
}
__pyx_L4:;
/* "View.MemoryView":411
* have_slices, index = _unellipsify(index, self.view.ndim)
*
* if have_slices: # <<<<<<<<<<<<<<
* obj = self.is_slice(value)
* if obj:
*/
goto __pyx_L3;
}
/* "View.MemoryView":418
* self.setitem_slice_assign_scalar(self[index], value)
* else:
* self.setitem_indexed(index, value) # <<<<<<<<<<<<<<
*
* cdef is_slice(self, obj):
*/
/*else*/ {
__pyx_t_1 = ((struct __pyx_vtabstruct_memoryview *)__pyx_v_self->__pyx_vtab)->setitem_indexed(__pyx_v_self, __pyx_v_index, __pyx_v_value); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 418, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_1);
__Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
}
__pyx_L3:;
/* "View.MemoryView":408
* return self.convert_item_to_object(itemp)
*
* def __setitem__(memoryview self, object index, object value): # <<<<<<<<<<<<<<
* have_slices, index = _unellipsify(index, self.view.ndim)
*
*/
/* function exit code */
__pyx_r = 0;
goto __pyx_L0;
__pyx_L1_error:;
__Pyx_XDECREF(__pyx_t_1);
__Pyx_XDECREF(__pyx_t_2);
__Pyx_XDECREF(__pyx_t_3);
__Pyx_AddTraceback("View.MemoryView.memoryview.__setitem__", __pyx_clineno, __pyx_lineno, __pyx_filename);
__pyx_r = -1;
__pyx_L0:;
__Pyx_XDECREF(__pyx_v_have_slices);
__Pyx_XDECREF(__pyx_v_obj);
__Pyx_XDECREF(__pyx_v_index);
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
/* "View.MemoryView":420
* self.setitem_indexed(index, value)
*
* cdef is_slice(self, obj): # <<<<<<<<<<<<<<
* if not isinstance(obj, memoryview):
* try:
*/
static PyObject *__pyx_memoryview_is_slice(struct __pyx_memoryview_obj *__pyx_v_self, PyObject *__pyx_v_obj) {
PyObject *__pyx_r = NULL;
__Pyx_RefNannyDeclarations
int __pyx_t_1;
int __pyx_t_2;
PyObject *__pyx_t_3 = NULL;
PyObject *__pyx_t_4 = NULL;
PyObject *__pyx_t_5 = NULL;
PyObject *__pyx_t_6 = NULL;
PyObject *__pyx_t_7 = NULL;
PyObject *__pyx_t_8 = NULL;
int __pyx_t_9;
__Pyx_RefNannySetupContext("is_slice", 0);
__Pyx_INCREF(__pyx_v_obj);
/* "View.MemoryView":421
*
* cdef is_slice(self, obj):
* if not isinstance(obj, memoryview): # <<<<<<<<<<<<<<
* try:
* obj = memoryview(obj, self.flags|PyBUF_ANY_CONTIGUOUS,
*/
__pyx_t_1 = __Pyx_TypeCheck(__pyx_v_obj, __pyx_memoryview_type);
__pyx_t_2 = ((!(__pyx_t_1 != 0)) != 0);
if (__pyx_t_2) {
/* "View.MemoryView":422
* cdef is_slice(self, obj):
* if not isinstance(obj, memoryview):
* try: # <<<<<<<<<<<<<<
* obj = memoryview(obj, self.flags|PyBUF_ANY_CONTIGUOUS,
* self.dtype_is_object)
*/
{
__Pyx_PyThreadState_declare
__Pyx_PyThreadState_assign
__Pyx_ExceptionSave(&__pyx_t_3, &__pyx_t_4, &__pyx_t_5);
__Pyx_XGOTREF(__pyx_t_3);
__Pyx_XGOTREF(__pyx_t_4);
__Pyx_XGOTREF(__pyx_t_5);
/*try:*/ {
/* "View.MemoryView":423
* if not isinstance(obj, memoryview):
* try:
* obj = memoryview(obj, self.flags|PyBUF_ANY_CONTIGUOUS, # <<<<<<<<<<<<<<
* self.dtype_is_object)
* except TypeError:
*/
__pyx_t_6 = __Pyx_PyInt_From_int((__pyx_v_self->flags | PyBUF_ANY_CONTIGUOUS)); if (unlikely(!__pyx_t_6)) __PYX_ERR(1, 423, __pyx_L4_error)
__Pyx_GOTREF(__pyx_t_6);
/* "View.MemoryView":424
* try:
* obj = memoryview(obj, self.flags|PyBUF_ANY_CONTIGUOUS,
* self.dtype_is_object) # <<<<<<<<<<<<<<
* except TypeError:
* return None
*/
__pyx_t_7 = __Pyx_PyBool_FromLong(__pyx_v_self->dtype_is_object); if (unlikely(!__pyx_t_7)) __PYX_ERR(1, 424, __pyx_L4_error)
__Pyx_GOTREF(__pyx_t_7);
/* "View.MemoryView":423
* if not isinstance(obj, memoryview):
* try:
* obj = memoryview(obj, self.flags|PyBUF_ANY_CONTIGUOUS, # <<<<<<<<<<<<<<
* self.dtype_is_object)
* except TypeError:
*/
__pyx_t_8 = PyTuple_New(3); if (unlikely(!__pyx_t_8)) __PYX_ERR(1, 423, __pyx_L4_error)
__Pyx_GOTREF(__pyx_t_8);
__Pyx_INCREF(__pyx_v_obj);
__Pyx_GIVEREF(__pyx_v_obj);
PyTuple_SET_ITEM(__pyx_t_8, 0, __pyx_v_obj);
__Pyx_GIVEREF(__pyx_t_6);
PyTuple_SET_ITEM(__pyx_t_8, 1, __pyx_t_6);
__Pyx_GIVEREF(__pyx_t_7);
PyTuple_SET_ITEM(__pyx_t_8, 2, __pyx_t_7);
__pyx_t_6 = 0;
__pyx_t_7 = 0;
__pyx_t_7 = __Pyx_PyObject_Call(((PyObject *)__pyx_memoryview_type), __pyx_t_8, NULL); if (unlikely(!__pyx_t_7)) __PYX_ERR(1, 423, __pyx_L4_error)
__Pyx_GOTREF(__pyx_t_7);
__Pyx_DECREF(__pyx_t_8); __pyx_t_8 = 0;
__Pyx_DECREF_SET(__pyx_v_obj, __pyx_t_7);
__pyx_t_7 = 0;
/* "View.MemoryView":422
* cdef is_slice(self, obj):
* if not isinstance(obj, memoryview):
* try: # <<<<<<<<<<<<<<
* obj = memoryview(obj, self.flags|PyBUF_ANY_CONTIGUOUS,
* self.dtype_is_object)
*/
}
__Pyx_XDECREF(__pyx_t_3); __pyx_t_3 = 0;
__Pyx_XDECREF(__pyx_t_4); __pyx_t_4 = 0;
__Pyx_XDECREF(__pyx_t_5); __pyx_t_5 = 0;
goto __pyx_L11_try_end;
__pyx_L4_error:;
__Pyx_PyThreadState_assign
__Pyx_XDECREF(__pyx_t_6); __pyx_t_6 = 0;
__Pyx_XDECREF(__pyx_t_8); __pyx_t_8 = 0;
__Pyx_XDECREF(__pyx_t_7); __pyx_t_7 = 0;
/* "View.MemoryView":425
* obj = memoryview(obj, self.flags|PyBUF_ANY_CONTIGUOUS,
* self.dtype_is_object)
* except TypeError: # <<<<<<<<<<<<<<
* return None
*
*/
__pyx_t_9 = __Pyx_PyErr_ExceptionMatches(__pyx_builtin_TypeError);
if (__pyx_t_9) {
__Pyx_AddTraceback("View.MemoryView.memoryview.is_slice", __pyx_clineno, __pyx_lineno, __pyx_filename);
if (__Pyx_GetException(&__pyx_t_7, &__pyx_t_8, &__pyx_t_6) < 0) __PYX_ERR(1, 425, __pyx_L6_except_error)
__Pyx_GOTREF(__pyx_t_7);
__Pyx_GOTREF(__pyx_t_8);
__Pyx_GOTREF(__pyx_t_6);
/* "View.MemoryView":426
* self.dtype_is_object)
* except TypeError:
* return None # <<<<<<<<<<<<<<
*
* return obj
*/
__Pyx_XDECREF(__pyx_r);
__Pyx_INCREF(Py_None);
__pyx_r = Py_None;
__Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0;
__Pyx_DECREF(__pyx_t_7); __pyx_t_7 = 0;
__Pyx_DECREF(__pyx_t_8); __pyx_t_8 = 0;
goto __pyx_L7_except_return;
}
goto __pyx_L6_except_error;
__pyx_L6_except_error:;
/* "View.MemoryView":422
* cdef is_slice(self, obj):
* if not isinstance(obj, memoryview):
* try: # <<<<<<<<<<<<<<
* obj = memoryview(obj, self.flags|PyBUF_ANY_CONTIGUOUS,
* self.dtype_is_object)
*/
__Pyx_PyThreadState_assign
__Pyx_XGIVEREF(__pyx_t_3);
__Pyx_XGIVEREF(__pyx_t_4);
__Pyx_XGIVEREF(__pyx_t_5);
__Pyx_ExceptionReset(__pyx_t_3, __pyx_t_4, __pyx_t_5);
goto __pyx_L1_error;
__pyx_L7_except_return:;
__Pyx_PyThreadState_assign
__Pyx_XGIVEREF(__pyx_t_3);
__Pyx_XGIVEREF(__pyx_t_4);
__Pyx_XGIVEREF(__pyx_t_5);
__Pyx_ExceptionReset(__pyx_t_3, __pyx_t_4, __pyx_t_5);
goto __pyx_L0;
__pyx_L11_try_end:;
}
/* "View.MemoryView":421
*
* cdef is_slice(self, obj):
* if not isinstance(obj, memoryview): # <<<<<<<<<<<<<<
* try:
* obj = memoryview(obj, self.flags|PyBUF_ANY_CONTIGUOUS,
*/
}
/* "View.MemoryView":428
* return None
*
* return obj # <<<<<<<<<<<<<<
*
* cdef setitem_slice_assignment(self, dst, src):
*/
__Pyx_XDECREF(__pyx_r);
__Pyx_INCREF(__pyx_v_obj);
__pyx_r = __pyx_v_obj;
goto __pyx_L0;
/* "View.MemoryView":420
* self.setitem_indexed(index, value)
*
* cdef is_slice(self, obj): # <<<<<<<<<<<<<<
* if not isinstance(obj, memoryview):
* try:
*/
/* function exit code */
__pyx_L1_error:;
__Pyx_XDECREF(__pyx_t_6);
__Pyx_XDECREF(__pyx_t_7);
__Pyx_XDECREF(__pyx_t_8);
__Pyx_AddTraceback("View.MemoryView.memoryview.is_slice", __pyx_clineno, __pyx_lineno, __pyx_filename);
__pyx_r = 0;
__pyx_L0:;
__Pyx_XDECREF(__pyx_v_obj);
__Pyx_XGIVEREF(__pyx_r);
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
/* "View.MemoryView":430
* return obj
*
* cdef setitem_slice_assignment(self, dst, src): # <<<<<<<<<<<<<<
* cdef __Pyx_memviewslice dst_slice
* cdef __Pyx_memviewslice src_slice
*/
static PyObject *__pyx_memoryview_setitem_slice_assignment(struct __pyx_memoryview_obj *__pyx_v_self, PyObject *__pyx_v_dst, PyObject *__pyx_v_src) {
__Pyx_memviewslice __pyx_v_dst_slice;
__Pyx_memviewslice __pyx_v_src_slice;
PyObject *__pyx_r = NULL;
__Pyx_RefNannyDeclarations
PyObject *__pyx_t_1 = NULL;
int __pyx_t_2;
int __pyx_t_3;
int __pyx_t_4;
__Pyx_RefNannySetupContext("setitem_slice_assignment", 0);
/* "View.MemoryView":434
* cdef __Pyx_memviewslice src_slice
*
* memoryview_copy_contents(get_slice_from_memview(src, &src_slice)[0], # <<<<<<<<<<<<<<
* get_slice_from_memview(dst, &dst_slice)[0],
* src.ndim, dst.ndim, self.dtype_is_object)
*/
if (!(likely(((__pyx_v_src) == Py_None) || likely(__Pyx_TypeTest(__pyx_v_src, __pyx_memoryview_type))))) __PYX_ERR(1, 434, __pyx_L1_error)
/* "View.MemoryView":435
*
* memoryview_copy_contents(get_slice_from_memview(src, &src_slice)[0],
* get_slice_from_memview(dst, &dst_slice)[0], # <<<<<<<<<<<<<<
* src.ndim, dst.ndim, self.dtype_is_object)
*
*/
if (!(likely(((__pyx_v_dst) == Py_None) || likely(__Pyx_TypeTest(__pyx_v_dst, __pyx_memoryview_type))))) __PYX_ERR(1, 435, __pyx_L1_error)
/* "View.MemoryView":436
* memoryview_copy_contents(get_slice_from_memview(src, &src_slice)[0],
* get_slice_from_memview(dst, &dst_slice)[0],
* src.ndim, dst.ndim, self.dtype_is_object) # <<<<<<<<<<<<<<
*
* cdef setitem_slice_assign_scalar(self, memoryview dst, value):
*/
__pyx_t_1 = __Pyx_PyObject_GetAttrStr(__pyx_v_src, __pyx_n_s_ndim); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 436, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_1);
__pyx_t_2 = __Pyx_PyInt_As_int(__pyx_t_1); if (unlikely((__pyx_t_2 == (int)-1) && PyErr_Occurred())) __PYX_ERR(1, 436, __pyx_L1_error)
__Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
__pyx_t_1 = __Pyx_PyObject_GetAttrStr(__pyx_v_dst, __pyx_n_s_ndim); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 436, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_1);
__pyx_t_3 = __Pyx_PyInt_As_int(__pyx_t_1); if (unlikely((__pyx_t_3 == (int)-1) && PyErr_Occurred())) __PYX_ERR(1, 436, __pyx_L1_error)
__Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
/* "View.MemoryView":434
* cdef __Pyx_memviewslice src_slice
*
* memoryview_copy_contents(get_slice_from_memview(src, &src_slice)[0], # <<<<<<<<<<<<<<
* get_slice_from_memview(dst, &dst_slice)[0],
* src.ndim, dst.ndim, self.dtype_is_object)
*/
__pyx_t_4 = __pyx_memoryview_copy_contents((__pyx_memoryview_get_slice_from_memoryview(((struct __pyx_memoryview_obj *)__pyx_v_src), (&__pyx_v_src_slice))[0]), (__pyx_memoryview_get_slice_from_memoryview(((struct __pyx_memoryview_obj *)__pyx_v_dst), (&__pyx_v_dst_slice))[0]), __pyx_t_2, __pyx_t_3, __pyx_v_self->dtype_is_object); if (unlikely(__pyx_t_4 == -1)) __PYX_ERR(1, 434, __pyx_L1_error)
/* "View.MemoryView":430
* return obj
*
* cdef setitem_slice_assignment(self, dst, src): # <<<<<<<<<<<<<<
* cdef __Pyx_memviewslice dst_slice
* cdef __Pyx_memviewslice src_slice
*/
/* function exit code */
__pyx_r = Py_None; __Pyx_INCREF(Py_None);
goto __pyx_L0;
__pyx_L1_error:;
__Pyx_XDECREF(__pyx_t_1);
__Pyx_AddTraceback("View.MemoryView.memoryview.setitem_slice_assignment", __pyx_clineno, __pyx_lineno, __pyx_filename);
__pyx_r = 0;
__pyx_L0:;
__Pyx_XGIVEREF(__pyx_r);
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
/* "View.MemoryView":438
* src.ndim, dst.ndim, self.dtype_is_object)
*
* cdef setitem_slice_assign_scalar(self, memoryview dst, value): # <<<<<<<<<<<<<<
* cdef int array[128]
* cdef void *tmp = NULL
*/
static PyObject *__pyx_memoryview_setitem_slice_assign_scalar(struct __pyx_memoryview_obj *__pyx_v_self, struct __pyx_memoryview_obj *__pyx_v_dst, PyObject *__pyx_v_value) {
int __pyx_v_array[0x80];
void *__pyx_v_tmp;
void *__pyx_v_item;
__Pyx_memviewslice *__pyx_v_dst_slice;
__Pyx_memviewslice __pyx_v_tmp_slice;
PyObject *__pyx_r = NULL;
__Pyx_RefNannyDeclarations
int __pyx_t_1;
PyObject *__pyx_t_2 = NULL;
int __pyx_t_3;
int __pyx_t_4;
char const *__pyx_t_5;
PyObject *__pyx_t_6 = NULL;
PyObject *__pyx_t_7 = NULL;
PyObject *__pyx_t_8 = NULL;
PyObject *__pyx_t_9 = NULL;
PyObject *__pyx_t_10 = NULL;
PyObject *__pyx_t_11 = NULL;
__Pyx_RefNannySetupContext("setitem_slice_assign_scalar", 0);
/* "View.MemoryView":440
* cdef setitem_slice_assign_scalar(self, memoryview dst, value):
* cdef int array[128]
* cdef void *tmp = NULL # <<<<<<<<<<<<<<
* cdef void *item
*
*/
__pyx_v_tmp = NULL;
/* "View.MemoryView":445
* cdef __Pyx_memviewslice *dst_slice
* cdef __Pyx_memviewslice tmp_slice
* dst_slice = get_slice_from_memview(dst, &tmp_slice) # <<<<<<<<<<<<<<
*
* if <size_t>self.view.itemsize > sizeof(array):
*/
__pyx_v_dst_slice = __pyx_memoryview_get_slice_from_memoryview(__pyx_v_dst, (&__pyx_v_tmp_slice));
/* "View.MemoryView":447
* dst_slice = get_slice_from_memview(dst, &tmp_slice)
*
* if <size_t>self.view.itemsize > sizeof(array): # <<<<<<<<<<<<<<
* tmp = PyMem_Malloc(self.view.itemsize)
* if tmp == NULL:
*/
__pyx_t_1 = ((((size_t)__pyx_v_self->view.itemsize) > (sizeof(__pyx_v_array))) != 0);
if (__pyx_t_1) {
/* "View.MemoryView":448
*
* if <size_t>self.view.itemsize > sizeof(array):
* tmp = PyMem_Malloc(self.view.itemsize) # <<<<<<<<<<<<<<
* if tmp == NULL:
* raise MemoryError
*/
__pyx_v_tmp = PyMem_Malloc(__pyx_v_self->view.itemsize);
/* "View.MemoryView":449
* if <size_t>self.view.itemsize > sizeof(array):
* tmp = PyMem_Malloc(self.view.itemsize)
* if tmp == NULL: # <<<<<<<<<<<<<<
* raise MemoryError
* item = tmp
*/
__pyx_t_1 = ((__pyx_v_tmp == NULL) != 0);
if (__pyx_t_1) {
/* "View.MemoryView":450
* tmp = PyMem_Malloc(self.view.itemsize)
* if tmp == NULL:
* raise MemoryError # <<<<<<<<<<<<<<
* item = tmp
* else:
*/
PyErr_NoMemory(); __PYX_ERR(1, 450, __pyx_L1_error)
/* "View.MemoryView":449
* if <size_t>self.view.itemsize > sizeof(array):
* tmp = PyMem_Malloc(self.view.itemsize)
* if tmp == NULL: # <<<<<<<<<<<<<<
* raise MemoryError
* item = tmp
*/
}
/* "View.MemoryView":451
* if tmp == NULL:
* raise MemoryError
* item = tmp # <<<<<<<<<<<<<<
* else:
* item = <void *> array
*/
__pyx_v_item = __pyx_v_tmp;
/* "View.MemoryView":447
* dst_slice = get_slice_from_memview(dst, &tmp_slice)
*
* if <size_t>self.view.itemsize > sizeof(array): # <<<<<<<<<<<<<<
* tmp = PyMem_Malloc(self.view.itemsize)
* if tmp == NULL:
*/
goto __pyx_L3;
}
/* "View.MemoryView":453
* item = tmp
* else:
* item = <void *> array # <<<<<<<<<<<<<<
*
* try:
*/
/*else*/ {
__pyx_v_item = ((void *)__pyx_v_array);
}
__pyx_L3:;
/* "View.MemoryView":455
* item = <void *> array
*
* try: # <<<<<<<<<<<<<<
* if self.dtype_is_object:
* (<PyObject **> item)[0] = <PyObject *> value
*/
/*try:*/ {
/* "View.MemoryView":456
*
* try:
* if self.dtype_is_object: # <<<<<<<<<<<<<<
* (<PyObject **> item)[0] = <PyObject *> value
* else:
*/
__pyx_t_1 = (__pyx_v_self->dtype_is_object != 0);
if (__pyx_t_1) {
/* "View.MemoryView":457
* try:
* if self.dtype_is_object:
* (<PyObject **> item)[0] = <PyObject *> value # <<<<<<<<<<<<<<
* else:
* self.assign_item_from_object(<char *> item, value)
*/
(((PyObject **)__pyx_v_item)[0]) = ((PyObject *)__pyx_v_value);
/* "View.MemoryView":456
*
* try:
* if self.dtype_is_object: # <<<<<<<<<<<<<<
* (<PyObject **> item)[0] = <PyObject *> value
* else:
*/
goto __pyx_L8;
}
/* "View.MemoryView":459
* (<PyObject **> item)[0] = <PyObject *> value
* else:
* self.assign_item_from_object(<char *> item, value) # <<<<<<<<<<<<<<
*
*
*/
/*else*/ {
__pyx_t_2 = ((struct __pyx_vtabstruct_memoryview *)__pyx_v_self->__pyx_vtab)->assign_item_from_object(__pyx_v_self, ((char *)__pyx_v_item), __pyx_v_value); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 459, __pyx_L6_error)
__Pyx_GOTREF(__pyx_t_2);
__Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
}
__pyx_L8:;
/* "View.MemoryView":463
*
*
* if self.view.suboffsets != NULL: # <<<<<<<<<<<<<<
* assert_direct_dimensions(self.view.suboffsets, self.view.ndim)
* slice_assign_scalar(dst_slice, dst.view.ndim, self.view.itemsize,
*/
__pyx_t_1 = ((__pyx_v_self->view.suboffsets != NULL) != 0);
if (__pyx_t_1) {
/* "View.MemoryView":464
*
* if self.view.suboffsets != NULL:
* assert_direct_dimensions(self.view.suboffsets, self.view.ndim) # <<<<<<<<<<<<<<
* slice_assign_scalar(dst_slice, dst.view.ndim, self.view.itemsize,
* item, self.dtype_is_object)
*/
__pyx_t_2 = assert_direct_dimensions(__pyx_v_self->view.suboffsets, __pyx_v_self->view.ndim); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 464, __pyx_L6_error)
__Pyx_GOTREF(__pyx_t_2);
__Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
/* "View.MemoryView":463
*
*
* if self.view.suboffsets != NULL: # <<<<<<<<<<<<<<
* assert_direct_dimensions(self.view.suboffsets, self.view.ndim)
* slice_assign_scalar(dst_slice, dst.view.ndim, self.view.itemsize,
*/
}
/* "View.MemoryView":465
* if self.view.suboffsets != NULL:
* assert_direct_dimensions(self.view.suboffsets, self.view.ndim)
* slice_assign_scalar(dst_slice, dst.view.ndim, self.view.itemsize, # <<<<<<<<<<<<<<
* item, self.dtype_is_object)
* finally:
*/
__pyx_memoryview_slice_assign_scalar(__pyx_v_dst_slice, __pyx_v_dst->view.ndim, __pyx_v_self->view.itemsize, __pyx_v_item, __pyx_v_self->dtype_is_object);
}
/* "View.MemoryView":468
* item, self.dtype_is_object)
* finally:
* PyMem_Free(tmp) # <<<<<<<<<<<<<<
*
* cdef setitem_indexed(self, index, value):
*/
/*finally:*/ {
/*normal exit:*/{
PyMem_Free(__pyx_v_tmp);
goto __pyx_L7;
}
/*exception exit:*/{
__Pyx_PyThreadState_declare
__pyx_L6_error:;
__pyx_t_6 = 0; __pyx_t_7 = 0; __pyx_t_8 = 0; __pyx_t_9 = 0; __pyx_t_10 = 0; __pyx_t_11 = 0;
__Pyx_PyThreadState_assign
__Pyx_XDECREF(__pyx_t_2); __pyx_t_2 = 0;
if (PY_MAJOR_VERSION >= 3) __Pyx_ExceptionSwap(&__pyx_t_9, &__pyx_t_10, &__pyx_t_11);
if ((PY_MAJOR_VERSION < 3) || unlikely(__Pyx_GetException(&__pyx_t_6, &__pyx_t_7, &__pyx_t_8) < 0)) __Pyx_ErrFetch(&__pyx_t_6, &__pyx_t_7, &__pyx_t_8);
__Pyx_XGOTREF(__pyx_t_6);
__Pyx_XGOTREF(__pyx_t_7);
__Pyx_XGOTREF(__pyx_t_8);
__Pyx_XGOTREF(__pyx_t_9);
__Pyx_XGOTREF(__pyx_t_10);
__Pyx_XGOTREF(__pyx_t_11);
__pyx_t_3 = __pyx_lineno; __pyx_t_4 = __pyx_clineno; __pyx_t_5 = __pyx_filename;
{
PyMem_Free(__pyx_v_tmp);
}
__Pyx_PyThreadState_assign
if (PY_MAJOR_VERSION >= 3) {
__Pyx_XGIVEREF(__pyx_t_9);
__Pyx_XGIVEREF(__pyx_t_10);
__Pyx_XGIVEREF(__pyx_t_11);
__Pyx_ExceptionReset(__pyx_t_9, __pyx_t_10, __pyx_t_11);
}
__Pyx_XGIVEREF(__pyx_t_6);
__Pyx_XGIVEREF(__pyx_t_7);
__Pyx_XGIVEREF(__pyx_t_8);
__Pyx_ErrRestore(__pyx_t_6, __pyx_t_7, __pyx_t_8);
__pyx_t_6 = 0; __pyx_t_7 = 0; __pyx_t_8 = 0; __pyx_t_9 = 0; __pyx_t_10 = 0; __pyx_t_11 = 0;
__pyx_lineno = __pyx_t_3; __pyx_clineno = __pyx_t_4; __pyx_filename = __pyx_t_5;
goto __pyx_L1_error;
}
__pyx_L7:;
}
/* "View.MemoryView":438
* src.ndim, dst.ndim, self.dtype_is_object)
*
* cdef setitem_slice_assign_scalar(self, memoryview dst, value): # <<<<<<<<<<<<<<
* cdef int array[128]
* cdef void *tmp = NULL
*/
/* function exit code */
__pyx_r = Py_None; __Pyx_INCREF(Py_None);
goto __pyx_L0;
__pyx_L1_error:;
__Pyx_XDECREF(__pyx_t_2);
__Pyx_AddTraceback("View.MemoryView.memoryview.setitem_slice_assign_scalar", __pyx_clineno, __pyx_lineno, __pyx_filename);
__pyx_r = 0;
__pyx_L0:;
__Pyx_XGIVEREF(__pyx_r);
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
/* "View.MemoryView":470
* PyMem_Free(tmp)
*
* cdef setitem_indexed(self, index, value): # <<<<<<<<<<<<<<
* cdef char *itemp = self.get_item_pointer(index)
* self.assign_item_from_object(itemp, value)
*/
static PyObject *__pyx_memoryview_setitem_indexed(struct __pyx_memoryview_obj *__pyx_v_self, PyObject *__pyx_v_index, PyObject *__pyx_v_value) {
char *__pyx_v_itemp;
PyObject *__pyx_r = NULL;
__Pyx_RefNannyDeclarations
char *__pyx_t_1;
PyObject *__pyx_t_2 = NULL;
__Pyx_RefNannySetupContext("setitem_indexed", 0);
/* "View.MemoryView":471
*
* cdef setitem_indexed(self, index, value):
* cdef char *itemp = self.get_item_pointer(index) # <<<<<<<<<<<<<<
* self.assign_item_from_object(itemp, value)
*
*/
__pyx_t_1 = ((struct __pyx_vtabstruct_memoryview *)__pyx_v_self->__pyx_vtab)->get_item_pointer(__pyx_v_self, __pyx_v_index); if (unlikely(__pyx_t_1 == NULL)) __PYX_ERR(1, 471, __pyx_L1_error)
__pyx_v_itemp = __pyx_t_1;
/* "View.MemoryView":472
* cdef setitem_indexed(self, index, value):
* cdef char *itemp = self.get_item_pointer(index)
* self.assign_item_from_object(itemp, value) # <<<<<<<<<<<<<<
*
* cdef convert_item_to_object(self, char *itemp):
*/
__pyx_t_2 = ((struct __pyx_vtabstruct_memoryview *)__pyx_v_self->__pyx_vtab)->assign_item_from_object(__pyx_v_self, __pyx_v_itemp, __pyx_v_value); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 472, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_2);
__Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
/* "View.MemoryView":470
* PyMem_Free(tmp)
*
* cdef setitem_indexed(self, index, value): # <<<<<<<<<<<<<<
* cdef char *itemp = self.get_item_pointer(index)
* self.assign_item_from_object(itemp, value)
*/
/* function exit code */
__pyx_r = Py_None; __Pyx_INCREF(Py_None);
goto __pyx_L0;
__pyx_L1_error:;
__Pyx_XDECREF(__pyx_t_2);
__Pyx_AddTraceback("View.MemoryView.memoryview.setitem_indexed", __pyx_clineno, __pyx_lineno, __pyx_filename);
__pyx_r = 0;
__pyx_L0:;
__Pyx_XGIVEREF(__pyx_r);
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
/* "View.MemoryView":474
* self.assign_item_from_object(itemp, value)
*
* cdef convert_item_to_object(self, char *itemp): # <<<<<<<<<<<<<<
* """Only used if instantiated manually by the user, or if Cython doesn't
* know how to convert the type"""
*/
static PyObject *__pyx_memoryview_convert_item_to_object(struct __pyx_memoryview_obj *__pyx_v_self, char *__pyx_v_itemp) {
PyObject *__pyx_v_struct = NULL;
PyObject *__pyx_v_bytesitem = 0;
PyObject *__pyx_v_result = NULL;
PyObject *__pyx_r = NULL;
__Pyx_RefNannyDeclarations
PyObject *__pyx_t_1 = NULL;
PyObject *__pyx_t_2 = NULL;
PyObject *__pyx_t_3 = NULL;
PyObject *__pyx_t_4 = NULL;
PyObject *__pyx_t_5 = NULL;
PyObject *__pyx_t_6 = NULL;
PyObject *__pyx_t_7 = NULL;
int __pyx_t_8;
PyObject *__pyx_t_9 = NULL;
size_t __pyx_t_10;
int __pyx_t_11;
__Pyx_RefNannySetupContext("convert_item_to_object", 0);
/* "View.MemoryView":477
* """Only used if instantiated manually by the user, or if Cython doesn't
* know how to convert the type"""
* import struct # <<<<<<<<<<<<<<
* cdef bytes bytesitem
*
*/
__pyx_t_1 = __Pyx_Import(__pyx_n_s_struct, 0, 0); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 477, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_1);
__pyx_v_struct = __pyx_t_1;
__pyx_t_1 = 0;
/* "View.MemoryView":480
* cdef bytes bytesitem
*
* bytesitem = itemp[:self.view.itemsize] # <<<<<<<<<<<<<<
* try:
* result = struct.unpack(self.view.format, bytesitem)
*/
__pyx_t_1 = __Pyx_PyBytes_FromStringAndSize(__pyx_v_itemp + 0, __pyx_v_self->view.itemsize - 0); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 480, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_1);
__pyx_v_bytesitem = ((PyObject*)__pyx_t_1);
__pyx_t_1 = 0;
/* "View.MemoryView":481
*
* bytesitem = itemp[:self.view.itemsize]
* try: # <<<<<<<<<<<<<<
* result = struct.unpack(self.view.format, bytesitem)
* except struct.error:
*/
{
__Pyx_PyThreadState_declare
__Pyx_PyThreadState_assign
__Pyx_ExceptionSave(&__pyx_t_2, &__pyx_t_3, &__pyx_t_4);
__Pyx_XGOTREF(__pyx_t_2);
__Pyx_XGOTREF(__pyx_t_3);
__Pyx_XGOTREF(__pyx_t_4);
/*try:*/ {
/* "View.MemoryView":482
* bytesitem = itemp[:self.view.itemsize]
* try:
* result = struct.unpack(self.view.format, bytesitem) # <<<<<<<<<<<<<<
* except struct.error:
* raise ValueError("Unable to convert item to object")
*/
__pyx_t_5 = __Pyx_PyObject_GetAttrStr(__pyx_v_struct, __pyx_n_s_unpack); if (unlikely(!__pyx_t_5)) __PYX_ERR(1, 482, __pyx_L3_error)
__Pyx_GOTREF(__pyx_t_5);
__pyx_t_6 = __Pyx_PyBytes_FromString(__pyx_v_self->view.format); if (unlikely(!__pyx_t_6)) __PYX_ERR(1, 482, __pyx_L3_error)
__Pyx_GOTREF(__pyx_t_6);
__pyx_t_7 = NULL;
__pyx_t_8 = 0;
if (CYTHON_UNPACK_METHODS && likely(PyMethod_Check(__pyx_t_5))) {
__pyx_t_7 = PyMethod_GET_SELF(__pyx_t_5);
if (likely(__pyx_t_7)) {
PyObject* function = PyMethod_GET_FUNCTION(__pyx_t_5);
__Pyx_INCREF(__pyx_t_7);
__Pyx_INCREF(function);
__Pyx_DECREF_SET(__pyx_t_5, function);
__pyx_t_8 = 1;
}
}
#if CYTHON_FAST_PYCALL
if (PyFunction_Check(__pyx_t_5)) {
PyObject *__pyx_temp[3] = {__pyx_t_7, __pyx_t_6, __pyx_v_bytesitem};
__pyx_t_1 = __Pyx_PyFunction_FastCall(__pyx_t_5, __pyx_temp+1-__pyx_t_8, 2+__pyx_t_8); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 482, __pyx_L3_error)
__Pyx_XDECREF(__pyx_t_7); __pyx_t_7 = 0;
__Pyx_GOTREF(__pyx_t_1);
__Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0;
} else
#endif
#if CYTHON_FAST_PYCCALL
if (__Pyx_PyFastCFunction_Check(__pyx_t_5)) {
PyObject *__pyx_temp[3] = {__pyx_t_7, __pyx_t_6, __pyx_v_bytesitem};
__pyx_t_1 = __Pyx_PyCFunction_FastCall(__pyx_t_5, __pyx_temp+1-__pyx_t_8, 2+__pyx_t_8); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 482, __pyx_L3_error)
__Pyx_XDECREF(__pyx_t_7); __pyx_t_7 = 0;
__Pyx_GOTREF(__pyx_t_1);
__Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0;
} else
#endif
{
__pyx_t_9 = PyTuple_New(2+__pyx_t_8); if (unlikely(!__pyx_t_9)) __PYX_ERR(1, 482, __pyx_L3_error)
__Pyx_GOTREF(__pyx_t_9);
if (__pyx_t_7) {
__Pyx_GIVEREF(__pyx_t_7); PyTuple_SET_ITEM(__pyx_t_9, 0, __pyx_t_7); __pyx_t_7 = NULL;
}
__Pyx_GIVEREF(__pyx_t_6);
PyTuple_SET_ITEM(__pyx_t_9, 0+__pyx_t_8, __pyx_t_6);
__Pyx_INCREF(__pyx_v_bytesitem);
__Pyx_GIVEREF(__pyx_v_bytesitem);
PyTuple_SET_ITEM(__pyx_t_9, 1+__pyx_t_8, __pyx_v_bytesitem);
__pyx_t_6 = 0;
__pyx_t_1 = __Pyx_PyObject_Call(__pyx_t_5, __pyx_t_9, NULL); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 482, __pyx_L3_error)
__Pyx_GOTREF(__pyx_t_1);
__Pyx_DECREF(__pyx_t_9); __pyx_t_9 = 0;
}
__Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
__pyx_v_result = __pyx_t_1;
__pyx_t_1 = 0;
/* "View.MemoryView":481
*
* bytesitem = itemp[:self.view.itemsize]
* try: # <<<<<<<<<<<<<<
* result = struct.unpack(self.view.format, bytesitem)
* except struct.error:
*/
}
/* "View.MemoryView":486
* raise ValueError("Unable to convert item to object")
* else:
* if len(self.view.format) == 1: # <<<<<<<<<<<<<<
* return result[0]
* return result
*/
/*else:*/ {
__pyx_t_10 = strlen(__pyx_v_self->view.format);
__pyx_t_11 = ((__pyx_t_10 == 1) != 0);
if (__pyx_t_11) {
/* "View.MemoryView":487
* else:
* if len(self.view.format) == 1:
* return result[0] # <<<<<<<<<<<<<<
* return result
*
*/
__Pyx_XDECREF(__pyx_r);
__pyx_t_1 = __Pyx_GetItemInt(__pyx_v_result, 0, long, 1, __Pyx_PyInt_From_long, 0, 0, 0); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 487, __pyx_L5_except_error)
__Pyx_GOTREF(__pyx_t_1);
__pyx_r = __pyx_t_1;
__pyx_t_1 = 0;
goto __pyx_L6_except_return;
/* "View.MemoryView":486
* raise ValueError("Unable to convert item to object")
* else:
* if len(self.view.format) == 1: # <<<<<<<<<<<<<<
* return result[0]
* return result
*/
}
/* "View.MemoryView":488
* if len(self.view.format) == 1:
* return result[0]
* return result # <<<<<<<<<<<<<<
*
* cdef assign_item_from_object(self, char *itemp, object value):
*/
__Pyx_XDECREF(__pyx_r);
__Pyx_INCREF(__pyx_v_result);
__pyx_r = __pyx_v_result;
goto __pyx_L6_except_return;
}
__pyx_L3_error:;
__Pyx_PyThreadState_assign
__Pyx_XDECREF(__pyx_t_7); __pyx_t_7 = 0;
__Pyx_XDECREF(__pyx_t_6); __pyx_t_6 = 0;
__Pyx_XDECREF(__pyx_t_9); __pyx_t_9 = 0;
__Pyx_XDECREF(__pyx_t_5); __pyx_t_5 = 0;
__Pyx_XDECREF(__pyx_t_1); __pyx_t_1 = 0;
/* "View.MemoryView":483
* try:
* result = struct.unpack(self.view.format, bytesitem)
* except struct.error: # <<<<<<<<<<<<<<
* raise ValueError("Unable to convert item to object")
* else:
*/
__pyx_t_1 = __Pyx_PyObject_GetAttrStr(__pyx_v_struct, __pyx_n_s_error); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 483, __pyx_L5_except_error)
__Pyx_GOTREF(__pyx_t_1);
__pyx_t_8 = __Pyx_PyErr_ExceptionMatches(__pyx_t_1);
__Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
if (__pyx_t_8) {
__Pyx_AddTraceback("View.MemoryView.memoryview.convert_item_to_object", __pyx_clineno, __pyx_lineno, __pyx_filename);
if (__Pyx_GetException(&__pyx_t_1, &__pyx_t_5, &__pyx_t_9) < 0) __PYX_ERR(1, 483, __pyx_L5_except_error)
__Pyx_GOTREF(__pyx_t_1);
__Pyx_GOTREF(__pyx_t_5);
__Pyx_GOTREF(__pyx_t_9);
/* "View.MemoryView":484
* result = struct.unpack(self.view.format, bytesitem)
* except struct.error:
* raise ValueError("Unable to convert item to object") # <<<<<<<<<<<<<<
* else:
* if len(self.view.format) == 1:
*/
__pyx_t_6 = __Pyx_PyObject_Call(__pyx_builtin_ValueError, __pyx_tuple__12, NULL); if (unlikely(!__pyx_t_6)) __PYX_ERR(1, 484, __pyx_L5_except_error)
__Pyx_GOTREF(__pyx_t_6);
__Pyx_Raise(__pyx_t_6, 0, 0, 0);
__Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0;
__PYX_ERR(1, 484, __pyx_L5_except_error)
}
goto __pyx_L5_except_error;
__pyx_L5_except_error:;
/* "View.MemoryView":481
*
* bytesitem = itemp[:self.view.itemsize]
* try: # <<<<<<<<<<<<<<
* result = struct.unpack(self.view.format, bytesitem)
* except struct.error:
*/
__Pyx_PyThreadState_assign
__Pyx_XGIVEREF(__pyx_t_2);
__Pyx_XGIVEREF(__pyx_t_3);
__Pyx_XGIVEREF(__pyx_t_4);
__Pyx_ExceptionReset(__pyx_t_2, __pyx_t_3, __pyx_t_4);
goto __pyx_L1_error;
__pyx_L6_except_return:;
__Pyx_PyThreadState_assign
__Pyx_XGIVEREF(__pyx_t_2);
__Pyx_XGIVEREF(__pyx_t_3);
__Pyx_XGIVEREF(__pyx_t_4);
__Pyx_ExceptionReset(__pyx_t_2, __pyx_t_3, __pyx_t_4);
goto __pyx_L0;
}
/* "View.MemoryView":474
* self.assign_item_from_object(itemp, value)
*
* cdef convert_item_to_object(self, char *itemp): # <<<<<<<<<<<<<<
* """Only used if instantiated manually by the user, or if Cython doesn't
* know how to convert the type"""
*/
/* function exit code */
__pyx_L1_error:;
__Pyx_XDECREF(__pyx_t_1);
__Pyx_XDECREF(__pyx_t_5);
__Pyx_XDECREF(__pyx_t_6);
__Pyx_XDECREF(__pyx_t_7);
__Pyx_XDECREF(__pyx_t_9);
__Pyx_AddTraceback("View.MemoryView.memoryview.convert_item_to_object", __pyx_clineno, __pyx_lineno, __pyx_filename);
__pyx_r = 0;
__pyx_L0:;
__Pyx_XDECREF(__pyx_v_struct);
__Pyx_XDECREF(__pyx_v_bytesitem);
__Pyx_XDECREF(__pyx_v_result);
__Pyx_XGIVEREF(__pyx_r);
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
/* "View.MemoryView":490
* return result
*
* cdef assign_item_from_object(self, char *itemp, object value): # <<<<<<<<<<<<<<
* """Only used if instantiated manually by the user, or if Cython doesn't
* know how to convert the type"""
*/
static PyObject *__pyx_memoryview_assign_item_from_object(struct __pyx_memoryview_obj *__pyx_v_self, char *__pyx_v_itemp, PyObject *__pyx_v_value) {
PyObject *__pyx_v_struct = NULL;
char __pyx_v_c;
PyObject *__pyx_v_bytesvalue = 0;
Py_ssize_t __pyx_v_i;
PyObject *__pyx_r = NULL;
__Pyx_RefNannyDeclarations
PyObject *__pyx_t_1 = NULL;
int __pyx_t_2;
int __pyx_t_3;
PyObject *__pyx_t_4 = NULL;
PyObject *__pyx_t_5 = NULL;
PyObject *__pyx_t_6 = NULL;
int __pyx_t_7;
PyObject *__pyx_t_8 = NULL;
Py_ssize_t __pyx_t_9;
PyObject *__pyx_t_10 = NULL;
char *__pyx_t_11;
char *__pyx_t_12;
char *__pyx_t_13;
char *__pyx_t_14;
__Pyx_RefNannySetupContext("assign_item_from_object", 0);
/* "View.MemoryView":493
* """Only used if instantiated manually by the user, or if Cython doesn't
* know how to convert the type"""
* import struct # <<<<<<<<<<<<<<
* cdef char c
* cdef bytes bytesvalue
*/
__pyx_t_1 = __Pyx_Import(__pyx_n_s_struct, 0, 0); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 493, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_1);
__pyx_v_struct = __pyx_t_1;
__pyx_t_1 = 0;
/* "View.MemoryView":498
* cdef Py_ssize_t i
*
* if isinstance(value, tuple): # <<<<<<<<<<<<<<
* bytesvalue = struct.pack(self.view.format, *value)
* else:
*/
__pyx_t_2 = PyTuple_Check(__pyx_v_value);
__pyx_t_3 = (__pyx_t_2 != 0);
if (__pyx_t_3) {
/* "View.MemoryView":499
*
* if isinstance(value, tuple):
* bytesvalue = struct.pack(self.view.format, *value) # <<<<<<<<<<<<<<
* else:
* bytesvalue = struct.pack(self.view.format, value)
*/
__pyx_t_1 = __Pyx_PyObject_GetAttrStr(__pyx_v_struct, __pyx_n_s_pack); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 499, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_1);
__pyx_t_4 = __Pyx_PyBytes_FromString(__pyx_v_self->view.format); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 499, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_4);
__pyx_t_5 = PyTuple_New(1); if (unlikely(!__pyx_t_5)) __PYX_ERR(1, 499, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_5);
__Pyx_GIVEREF(__pyx_t_4);
PyTuple_SET_ITEM(__pyx_t_5, 0, __pyx_t_4);
__pyx_t_4 = 0;
__pyx_t_4 = PySequence_Tuple(__pyx_v_value); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 499, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_4);
__pyx_t_6 = PyNumber_Add(__pyx_t_5, __pyx_t_4); if (unlikely(!__pyx_t_6)) __PYX_ERR(1, 499, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_6);
__Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
__Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
__pyx_t_4 = __Pyx_PyObject_Call(__pyx_t_1, __pyx_t_6, NULL); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 499, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_4);
__Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
__Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0;
if (!(likely(PyBytes_CheckExact(__pyx_t_4))||((__pyx_t_4) == Py_None)||(PyErr_Format(PyExc_TypeError, "Expected %.16s, got %.200s", "bytes", Py_TYPE(__pyx_t_4)->tp_name), 0))) __PYX_ERR(1, 499, __pyx_L1_error)
__pyx_v_bytesvalue = ((PyObject*)__pyx_t_4);
__pyx_t_4 = 0;
/* "View.MemoryView":498
* cdef Py_ssize_t i
*
* if isinstance(value, tuple): # <<<<<<<<<<<<<<
* bytesvalue = struct.pack(self.view.format, *value)
* else:
*/
goto __pyx_L3;
}
/* "View.MemoryView":501
* bytesvalue = struct.pack(self.view.format, *value)
* else:
* bytesvalue = struct.pack(self.view.format, value) # <<<<<<<<<<<<<<
*
* for i, c in enumerate(bytesvalue):
*/
/*else*/ {
__pyx_t_6 = __Pyx_PyObject_GetAttrStr(__pyx_v_struct, __pyx_n_s_pack); if (unlikely(!__pyx_t_6)) __PYX_ERR(1, 501, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_6);
__pyx_t_1 = __Pyx_PyBytes_FromString(__pyx_v_self->view.format); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 501, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_1);
__pyx_t_5 = NULL;
__pyx_t_7 = 0;
if (CYTHON_UNPACK_METHODS && likely(PyMethod_Check(__pyx_t_6))) {
__pyx_t_5 = PyMethod_GET_SELF(__pyx_t_6);
if (likely(__pyx_t_5)) {
PyObject* function = PyMethod_GET_FUNCTION(__pyx_t_6);
__Pyx_INCREF(__pyx_t_5);
__Pyx_INCREF(function);
__Pyx_DECREF_SET(__pyx_t_6, function);
__pyx_t_7 = 1;
}
}
#if CYTHON_FAST_PYCALL
if (PyFunction_Check(__pyx_t_6)) {
PyObject *__pyx_temp[3] = {__pyx_t_5, __pyx_t_1, __pyx_v_value};
__pyx_t_4 = __Pyx_PyFunction_FastCall(__pyx_t_6, __pyx_temp+1-__pyx_t_7, 2+__pyx_t_7); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 501, __pyx_L1_error)
__Pyx_XDECREF(__pyx_t_5); __pyx_t_5 = 0;
__Pyx_GOTREF(__pyx_t_4);
__Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
} else
#endif
#if CYTHON_FAST_PYCCALL
if (__Pyx_PyFastCFunction_Check(__pyx_t_6)) {
PyObject *__pyx_temp[3] = {__pyx_t_5, __pyx_t_1, __pyx_v_value};
__pyx_t_4 = __Pyx_PyCFunction_FastCall(__pyx_t_6, __pyx_temp+1-__pyx_t_7, 2+__pyx_t_7); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 501, __pyx_L1_error)
__Pyx_XDECREF(__pyx_t_5); __pyx_t_5 = 0;
__Pyx_GOTREF(__pyx_t_4);
__Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
} else
#endif
{
__pyx_t_8 = PyTuple_New(2+__pyx_t_7); if (unlikely(!__pyx_t_8)) __PYX_ERR(1, 501, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_8);
if (__pyx_t_5) {
__Pyx_GIVEREF(__pyx_t_5); PyTuple_SET_ITEM(__pyx_t_8, 0, __pyx_t_5); __pyx_t_5 = NULL;
}
__Pyx_GIVEREF(__pyx_t_1);
PyTuple_SET_ITEM(__pyx_t_8, 0+__pyx_t_7, __pyx_t_1);
__Pyx_INCREF(__pyx_v_value);
__Pyx_GIVEREF(__pyx_v_value);
PyTuple_SET_ITEM(__pyx_t_8, 1+__pyx_t_7, __pyx_v_value);
__pyx_t_1 = 0;
__pyx_t_4 = __Pyx_PyObject_Call(__pyx_t_6, __pyx_t_8, NULL); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 501, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_4);
__Pyx_DECREF(__pyx_t_8); __pyx_t_8 = 0;
}
__Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0;
if (!(likely(PyBytes_CheckExact(__pyx_t_4))||((__pyx_t_4) == Py_None)||(PyErr_Format(PyExc_TypeError, "Expected %.16s, got %.200s", "bytes", Py_TYPE(__pyx_t_4)->tp_name), 0))) __PYX_ERR(1, 501, __pyx_L1_error)
__pyx_v_bytesvalue = ((PyObject*)__pyx_t_4);
__pyx_t_4 = 0;
}
__pyx_L3:;
/* "View.MemoryView":503
* bytesvalue = struct.pack(self.view.format, value)
*
* for i, c in enumerate(bytesvalue): # <<<<<<<<<<<<<<
* itemp[i] = c
*
*/
__pyx_t_9 = 0;
if (unlikely(__pyx_v_bytesvalue == Py_None)) {
PyErr_SetString(PyExc_TypeError, "'NoneType' is not iterable");
__PYX_ERR(1, 503, __pyx_L1_error)
}
__Pyx_INCREF(__pyx_v_bytesvalue);
__pyx_t_10 = __pyx_v_bytesvalue;
__pyx_t_12 = PyBytes_AS_STRING(__pyx_t_10);
__pyx_t_13 = (__pyx_t_12 + PyBytes_GET_SIZE(__pyx_t_10));
for (__pyx_t_14 = __pyx_t_12; __pyx_t_14 < __pyx_t_13; __pyx_t_14++) {
__pyx_t_11 = __pyx_t_14;
__pyx_v_c = (__pyx_t_11[0]);
/* "View.MemoryView":504
*
* for i, c in enumerate(bytesvalue):
* itemp[i] = c # <<<<<<<<<<<<<<
*
* @cname('getbuffer')
*/
__pyx_v_i = __pyx_t_9;
/* "View.MemoryView":503
* bytesvalue = struct.pack(self.view.format, value)
*
* for i, c in enumerate(bytesvalue): # <<<<<<<<<<<<<<
* itemp[i] = c
*
*/
__pyx_t_9 = (__pyx_t_9 + 1);
/* "View.MemoryView":504
*
* for i, c in enumerate(bytesvalue):
* itemp[i] = c # <<<<<<<<<<<<<<
*
* @cname('getbuffer')
*/
(__pyx_v_itemp[__pyx_v_i]) = __pyx_v_c;
}
__Pyx_DECREF(__pyx_t_10); __pyx_t_10 = 0;
/* "View.MemoryView":490
* return result
*
* cdef assign_item_from_object(self, char *itemp, object value): # <<<<<<<<<<<<<<
* """Only used if instantiated manually by the user, or if Cython doesn't
* know how to convert the type"""
*/
/* function exit code */
__pyx_r = Py_None; __Pyx_INCREF(Py_None);
goto __pyx_L0;
__pyx_L1_error:;
__Pyx_XDECREF(__pyx_t_1);
__Pyx_XDECREF(__pyx_t_4);
__Pyx_XDECREF(__pyx_t_5);
__Pyx_XDECREF(__pyx_t_6);
__Pyx_XDECREF(__pyx_t_8);
__Pyx_XDECREF(__pyx_t_10);
__Pyx_AddTraceback("View.MemoryView.memoryview.assign_item_from_object", __pyx_clineno, __pyx_lineno, __pyx_filename);
__pyx_r = 0;
__pyx_L0:;
__Pyx_XDECREF(__pyx_v_struct);
__Pyx_XDECREF(__pyx_v_bytesvalue);
__Pyx_XGIVEREF(__pyx_r);
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
/* "View.MemoryView":507
*
* @cname('getbuffer')
* def __getbuffer__(self, Py_buffer *info, int flags): # <<<<<<<<<<<<<<
* if flags & PyBUF_STRIDES:
* info.shape = self.view.shape
*/
/* Python wrapper */
static CYTHON_UNUSED int __pyx_memoryview_getbuffer(PyObject *__pyx_v_self, Py_buffer *__pyx_v_info, int __pyx_v_flags); /*proto*/
static CYTHON_UNUSED int __pyx_memoryview_getbuffer(PyObject *__pyx_v_self, Py_buffer *__pyx_v_info, int __pyx_v_flags) {
int __pyx_r;
__Pyx_RefNannyDeclarations
__Pyx_RefNannySetupContext("__getbuffer__ (wrapper)", 0);
__pyx_r = __pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_8__getbuffer__(((struct __pyx_memoryview_obj *)__pyx_v_self), ((Py_buffer *)__pyx_v_info), ((int)__pyx_v_flags));
/* function exit code */
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
static int __pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_8__getbuffer__(struct __pyx_memoryview_obj *__pyx_v_self, Py_buffer *__pyx_v_info, int __pyx_v_flags) {
int __pyx_r;
__Pyx_RefNannyDeclarations
int __pyx_t_1;
Py_ssize_t *__pyx_t_2;
char *__pyx_t_3;
void *__pyx_t_4;
int __pyx_t_5;
Py_ssize_t __pyx_t_6;
__Pyx_RefNannySetupContext("__getbuffer__", 0);
if (__pyx_v_info != NULL) {
__pyx_v_info->obj = Py_None; __Pyx_INCREF(Py_None);
__Pyx_GIVEREF(__pyx_v_info->obj);
}
/* "View.MemoryView":508
* @cname('getbuffer')
* def __getbuffer__(self, Py_buffer *info, int flags):
* if flags & PyBUF_STRIDES: # <<<<<<<<<<<<<<
* info.shape = self.view.shape
* else:
*/
__pyx_t_1 = ((__pyx_v_flags & PyBUF_STRIDES) != 0);
if (__pyx_t_1) {
/* "View.MemoryView":509
* def __getbuffer__(self, Py_buffer *info, int flags):
* if flags & PyBUF_STRIDES:
* info.shape = self.view.shape # <<<<<<<<<<<<<<
* else:
* info.shape = NULL
*/
__pyx_t_2 = __pyx_v_self->view.shape;
__pyx_v_info->shape = __pyx_t_2;
/* "View.MemoryView":508
* @cname('getbuffer')
* def __getbuffer__(self, Py_buffer *info, int flags):
* if flags & PyBUF_STRIDES: # <<<<<<<<<<<<<<
* info.shape = self.view.shape
* else:
*/
goto __pyx_L3;
}
/* "View.MemoryView":511
* info.shape = self.view.shape
* else:
* info.shape = NULL # <<<<<<<<<<<<<<
*
* if flags & PyBUF_STRIDES:
*/
/*else*/ {
__pyx_v_info->shape = NULL;
}
__pyx_L3:;
/* "View.MemoryView":513
* info.shape = NULL
*
* if flags & PyBUF_STRIDES: # <<<<<<<<<<<<<<
* info.strides = self.view.strides
* else:
*/
__pyx_t_1 = ((__pyx_v_flags & PyBUF_STRIDES) != 0);
if (__pyx_t_1) {
/* "View.MemoryView":514
*
* if flags & PyBUF_STRIDES:
* info.strides = self.view.strides # <<<<<<<<<<<<<<
* else:
* info.strides = NULL
*/
__pyx_t_2 = __pyx_v_self->view.strides;
__pyx_v_info->strides = __pyx_t_2;
/* "View.MemoryView":513
* info.shape = NULL
*
* if flags & PyBUF_STRIDES: # <<<<<<<<<<<<<<
* info.strides = self.view.strides
* else:
*/
goto __pyx_L4;
}
/* "View.MemoryView":516
* info.strides = self.view.strides
* else:
* info.strides = NULL # <<<<<<<<<<<<<<
*
* if flags & PyBUF_INDIRECT:
*/
/*else*/ {
__pyx_v_info->strides = NULL;
}
__pyx_L4:;
/* "View.MemoryView":518
* info.strides = NULL
*
* if flags & PyBUF_INDIRECT: # <<<<<<<<<<<<<<
* info.suboffsets = self.view.suboffsets
* else:
*/
__pyx_t_1 = ((__pyx_v_flags & PyBUF_INDIRECT) != 0);
if (__pyx_t_1) {
/* "View.MemoryView":519
*
* if flags & PyBUF_INDIRECT:
* info.suboffsets = self.view.suboffsets # <<<<<<<<<<<<<<
* else:
* info.suboffsets = NULL
*/
__pyx_t_2 = __pyx_v_self->view.suboffsets;
__pyx_v_info->suboffsets = __pyx_t_2;
/* "View.MemoryView":518
* info.strides = NULL
*
* if flags & PyBUF_INDIRECT: # <<<<<<<<<<<<<<
* info.suboffsets = self.view.suboffsets
* else:
*/
goto __pyx_L5;
}
/* "View.MemoryView":521
* info.suboffsets = self.view.suboffsets
* else:
* info.suboffsets = NULL # <<<<<<<<<<<<<<
*
* if flags & PyBUF_FORMAT:
*/
/*else*/ {
__pyx_v_info->suboffsets = NULL;
}
__pyx_L5:;
/* "View.MemoryView":523
* info.suboffsets = NULL
*
* if flags & PyBUF_FORMAT: # <<<<<<<<<<<<<<
* info.format = self.view.format
* else:
*/
__pyx_t_1 = ((__pyx_v_flags & PyBUF_FORMAT) != 0);
if (__pyx_t_1) {
/* "View.MemoryView":524
*
* if flags & PyBUF_FORMAT:
* info.format = self.view.format # <<<<<<<<<<<<<<
* else:
* info.format = NULL
*/
__pyx_t_3 = __pyx_v_self->view.format;
__pyx_v_info->format = __pyx_t_3;
/* "View.MemoryView":523
* info.suboffsets = NULL
*
* if flags & PyBUF_FORMAT: # <<<<<<<<<<<<<<
* info.format = self.view.format
* else:
*/
goto __pyx_L6;
}
/* "View.MemoryView":526
* info.format = self.view.format
* else:
* info.format = NULL # <<<<<<<<<<<<<<
*
* info.buf = self.view.buf
*/
/*else*/ {
__pyx_v_info->format = NULL;
}
__pyx_L6:;
/* "View.MemoryView":528
* info.format = NULL
*
* info.buf = self.view.buf # <<<<<<<<<<<<<<
* info.ndim = self.view.ndim
* info.itemsize = self.view.itemsize
*/
__pyx_t_4 = __pyx_v_self->view.buf;
__pyx_v_info->buf = __pyx_t_4;
/* "View.MemoryView":529
*
* info.buf = self.view.buf
* info.ndim = self.view.ndim # <<<<<<<<<<<<<<
* info.itemsize = self.view.itemsize
* info.len = self.view.len
*/
__pyx_t_5 = __pyx_v_self->view.ndim;
__pyx_v_info->ndim = __pyx_t_5;
/* "View.MemoryView":530
* info.buf = self.view.buf
* info.ndim = self.view.ndim
* info.itemsize = self.view.itemsize # <<<<<<<<<<<<<<
* info.len = self.view.len
* info.readonly = 0
*/
__pyx_t_6 = __pyx_v_self->view.itemsize;
__pyx_v_info->itemsize = __pyx_t_6;
/* "View.MemoryView":531
* info.ndim = self.view.ndim
* info.itemsize = self.view.itemsize
* info.len = self.view.len # <<<<<<<<<<<<<<
* info.readonly = 0
* info.obj = self
*/
__pyx_t_6 = __pyx_v_self->view.len;
__pyx_v_info->len = __pyx_t_6;
/* "View.MemoryView":532
* info.itemsize = self.view.itemsize
* info.len = self.view.len
* info.readonly = 0 # <<<<<<<<<<<<<<
* info.obj = self
*
*/
__pyx_v_info->readonly = 0;
/* "View.MemoryView":533
* info.len = self.view.len
* info.readonly = 0
* info.obj = self # <<<<<<<<<<<<<<
*
* __pyx_getbuffer = capsule(<void *> &__pyx_memoryview_getbuffer, "getbuffer(obj, view, flags)")
*/
__Pyx_INCREF(((PyObject *)__pyx_v_self));
__Pyx_GIVEREF(((PyObject *)__pyx_v_self));
__Pyx_GOTREF(__pyx_v_info->obj);
__Pyx_DECREF(__pyx_v_info->obj);
__pyx_v_info->obj = ((PyObject *)__pyx_v_self);
/* "View.MemoryView":507
*
* @cname('getbuffer')
* def __getbuffer__(self, Py_buffer *info, int flags): # <<<<<<<<<<<<<<
* if flags & PyBUF_STRIDES:
* info.shape = self.view.shape
*/
/* function exit code */
__pyx_r = 0;
if (__pyx_v_info != NULL && __pyx_v_info->obj == Py_None) {
__Pyx_GOTREF(Py_None);
__Pyx_DECREF(Py_None); __pyx_v_info->obj = NULL;
}
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
/* "View.MemoryView":539
*
* @property
* def T(self): # <<<<<<<<<<<<<<
* cdef _memoryviewslice result = memoryview_copy(self)
* transpose_memslice(&result.from_slice)
*/
/* Python wrapper */
static PyObject *__pyx_pw_15View_dot_MemoryView_10memoryview_1T_1__get__(PyObject *__pyx_v_self); /*proto*/
static PyObject *__pyx_pw_15View_dot_MemoryView_10memoryview_1T_1__get__(PyObject *__pyx_v_self) {
PyObject *__pyx_r = 0;
__Pyx_RefNannyDeclarations
__Pyx_RefNannySetupContext("__get__ (wrapper)", 0);
__pyx_r = __pyx_pf_15View_dot_MemoryView_10memoryview_1T___get__(((struct __pyx_memoryview_obj *)__pyx_v_self));
/* function exit code */
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
static PyObject *__pyx_pf_15View_dot_MemoryView_10memoryview_1T___get__(struct __pyx_memoryview_obj *__pyx_v_self) {
struct __pyx_memoryviewslice_obj *__pyx_v_result = 0;
PyObject *__pyx_r = NULL;
__Pyx_RefNannyDeclarations
PyObject *__pyx_t_1 = NULL;
int __pyx_t_2;
__Pyx_RefNannySetupContext("__get__", 0);
/* "View.MemoryView":540
* @property
* def T(self):
* cdef _memoryviewslice result = memoryview_copy(self) # <<<<<<<<<<<<<<
* transpose_memslice(&result.from_slice)
* return result
*/
__pyx_t_1 = __pyx_memoryview_copy_object(__pyx_v_self); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 540, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_1);
if (!(likely(((__pyx_t_1) == Py_None) || likely(__Pyx_TypeTest(__pyx_t_1, __pyx_memoryviewslice_type))))) __PYX_ERR(1, 540, __pyx_L1_error)
__pyx_v_result = ((struct __pyx_memoryviewslice_obj *)__pyx_t_1);
__pyx_t_1 = 0;
/* "View.MemoryView":541
* def T(self):
* cdef _memoryviewslice result = memoryview_copy(self)
* transpose_memslice(&result.from_slice) # <<<<<<<<<<<<<<
* return result
*
*/
__pyx_t_2 = __pyx_memslice_transpose((&__pyx_v_result->from_slice)); if (unlikely(__pyx_t_2 == 0)) __PYX_ERR(1, 541, __pyx_L1_error)
/* "View.MemoryView":542
* cdef _memoryviewslice result = memoryview_copy(self)
* transpose_memslice(&result.from_slice)
* return result # <<<<<<<<<<<<<<
*
* @property
*/
__Pyx_XDECREF(__pyx_r);
__Pyx_INCREF(((PyObject *)__pyx_v_result));
__pyx_r = ((PyObject *)__pyx_v_result);
goto __pyx_L0;
/* "View.MemoryView":539
*
* @property
* def T(self): # <<<<<<<<<<<<<<
* cdef _memoryviewslice result = memoryview_copy(self)
* transpose_memslice(&result.from_slice)
*/
/* function exit code */
__pyx_L1_error:;
__Pyx_XDECREF(__pyx_t_1);
__Pyx_AddTraceback("View.MemoryView.memoryview.T.__get__", __pyx_clineno, __pyx_lineno, __pyx_filename);
__pyx_r = NULL;
__pyx_L0:;
__Pyx_XDECREF((PyObject *)__pyx_v_result);
__Pyx_XGIVEREF(__pyx_r);
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
/* "View.MemoryView":545
*
* @property
* def base(self): # <<<<<<<<<<<<<<
* return self.obj
*
*/
/* Python wrapper */
static PyObject *__pyx_pw_15View_dot_MemoryView_10memoryview_4base_1__get__(PyObject *__pyx_v_self); /*proto*/
static PyObject *__pyx_pw_15View_dot_MemoryView_10memoryview_4base_1__get__(PyObject *__pyx_v_self) {
PyObject *__pyx_r = 0;
__Pyx_RefNannyDeclarations
__Pyx_RefNannySetupContext("__get__ (wrapper)", 0);
__pyx_r = __pyx_pf_15View_dot_MemoryView_10memoryview_4base___get__(((struct __pyx_memoryview_obj *)__pyx_v_self));
/* function exit code */
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
static PyObject *__pyx_pf_15View_dot_MemoryView_10memoryview_4base___get__(struct __pyx_memoryview_obj *__pyx_v_self) {
PyObject *__pyx_r = NULL;
__Pyx_RefNannyDeclarations
__Pyx_RefNannySetupContext("__get__", 0);
/* "View.MemoryView":546
* @property
* def base(self):
* return self.obj # <<<<<<<<<<<<<<
*
* @property
*/
__Pyx_XDECREF(__pyx_r);
__Pyx_INCREF(__pyx_v_self->obj);
__pyx_r = __pyx_v_self->obj;
goto __pyx_L0;
/* "View.MemoryView":545
*
* @property
* def base(self): # <<<<<<<<<<<<<<
* return self.obj
*
*/
/* function exit code */
__pyx_L0:;
__Pyx_XGIVEREF(__pyx_r);
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
/* "View.MemoryView":549
*
* @property
* def shape(self): # <<<<<<<<<<<<<<
* return tuple([length for length in self.view.shape[:self.view.ndim]])
*
*/
/* Python wrapper */
static PyObject *__pyx_pw_15View_dot_MemoryView_10memoryview_5shape_1__get__(PyObject *__pyx_v_self); /*proto*/
static PyObject *__pyx_pw_15View_dot_MemoryView_10memoryview_5shape_1__get__(PyObject *__pyx_v_self) {
PyObject *__pyx_r = 0;
__Pyx_RefNannyDeclarations
__Pyx_RefNannySetupContext("__get__ (wrapper)", 0);
__pyx_r = __pyx_pf_15View_dot_MemoryView_10memoryview_5shape___get__(((struct __pyx_memoryview_obj *)__pyx_v_self));
/* function exit code */
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
static PyObject *__pyx_pf_15View_dot_MemoryView_10memoryview_5shape___get__(struct __pyx_memoryview_obj *__pyx_v_self) {
Py_ssize_t __pyx_v_length;
PyObject *__pyx_r = NULL;
__Pyx_RefNannyDeclarations
PyObject *__pyx_t_1 = NULL;
Py_ssize_t *__pyx_t_2;
Py_ssize_t *__pyx_t_3;
Py_ssize_t *__pyx_t_4;
PyObject *__pyx_t_5 = NULL;
__Pyx_RefNannySetupContext("__get__", 0);
/* "View.MemoryView":550
* @property
* def shape(self):
* return tuple([length for length in self.view.shape[:self.view.ndim]]) # <<<<<<<<<<<<<<
*
* @property
*/
__Pyx_XDECREF(__pyx_r);
__pyx_t_1 = PyList_New(0); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 550, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_1);
__pyx_t_3 = (__pyx_v_self->view.shape + __pyx_v_self->view.ndim);
for (__pyx_t_4 = __pyx_v_self->view.shape; __pyx_t_4 < __pyx_t_3; __pyx_t_4++) {
__pyx_t_2 = __pyx_t_4;
__pyx_v_length = (__pyx_t_2[0]);
__pyx_t_5 = PyInt_FromSsize_t(__pyx_v_length); if (unlikely(!__pyx_t_5)) __PYX_ERR(1, 550, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_5);
if (unlikely(__Pyx_ListComp_Append(__pyx_t_1, (PyObject*)__pyx_t_5))) __PYX_ERR(1, 550, __pyx_L1_error)
__Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
}
__pyx_t_5 = PyList_AsTuple(((PyObject*)__pyx_t_1)); if (unlikely(!__pyx_t_5)) __PYX_ERR(1, 550, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_5);
__Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
__pyx_r = __pyx_t_5;
__pyx_t_5 = 0;
goto __pyx_L0;
/* "View.MemoryView":549
*
* @property
* def shape(self): # <<<<<<<<<<<<<<
* return tuple([length for length in self.view.shape[:self.view.ndim]])
*
*/
/* function exit code */
__pyx_L1_error:;
__Pyx_XDECREF(__pyx_t_1);
__Pyx_XDECREF(__pyx_t_5);
__Pyx_AddTraceback("View.MemoryView.memoryview.shape.__get__", __pyx_clineno, __pyx_lineno, __pyx_filename);
__pyx_r = NULL;
__pyx_L0:;
__Pyx_XGIVEREF(__pyx_r);
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
/* "View.MemoryView":553
*
* @property
* def strides(self): # <<<<<<<<<<<<<<
* if self.view.strides == NULL:
*
*/
/* Python wrapper */
static PyObject *__pyx_pw_15View_dot_MemoryView_10memoryview_7strides_1__get__(PyObject *__pyx_v_self); /*proto*/
static PyObject *__pyx_pw_15View_dot_MemoryView_10memoryview_7strides_1__get__(PyObject *__pyx_v_self) {
PyObject *__pyx_r = 0;
__Pyx_RefNannyDeclarations
__Pyx_RefNannySetupContext("__get__ (wrapper)", 0);
__pyx_r = __pyx_pf_15View_dot_MemoryView_10memoryview_7strides___get__(((struct __pyx_memoryview_obj *)__pyx_v_self));
/* function exit code */
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
static PyObject *__pyx_pf_15View_dot_MemoryView_10memoryview_7strides___get__(struct __pyx_memoryview_obj *__pyx_v_self) {
Py_ssize_t __pyx_v_stride;
PyObject *__pyx_r = NULL;
__Pyx_RefNannyDeclarations
int __pyx_t_1;
PyObject *__pyx_t_2 = NULL;
Py_ssize_t *__pyx_t_3;
Py_ssize_t *__pyx_t_4;
Py_ssize_t *__pyx_t_5;
PyObject *__pyx_t_6 = NULL;
__Pyx_RefNannySetupContext("__get__", 0);
/* "View.MemoryView":554
* @property
* def strides(self):
* if self.view.strides == NULL: # <<<<<<<<<<<<<<
*
* raise ValueError("Buffer view does not expose strides")
*/
__pyx_t_1 = ((__pyx_v_self->view.strides == NULL) != 0);
if (__pyx_t_1) {
/* "View.MemoryView":556
* if self.view.strides == NULL:
*
* raise ValueError("Buffer view does not expose strides") # <<<<<<<<<<<<<<
*
* return tuple([stride for stride in self.view.strides[:self.view.ndim]])
*/
__pyx_t_2 = __Pyx_PyObject_Call(__pyx_builtin_ValueError, __pyx_tuple__13, NULL); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 556, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_2);
__Pyx_Raise(__pyx_t_2, 0, 0, 0);
__Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
__PYX_ERR(1, 556, __pyx_L1_error)
/* "View.MemoryView":554
* @property
* def strides(self):
* if self.view.strides == NULL: # <<<<<<<<<<<<<<
*
* raise ValueError("Buffer view does not expose strides")
*/
}
/* "View.MemoryView":558
* raise ValueError("Buffer view does not expose strides")
*
* return tuple([stride for stride in self.view.strides[:self.view.ndim]]) # <<<<<<<<<<<<<<
*
* @property
*/
__Pyx_XDECREF(__pyx_r);
__pyx_t_2 = PyList_New(0); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 558, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_2);
__pyx_t_4 = (__pyx_v_self->view.strides + __pyx_v_self->view.ndim);
for (__pyx_t_5 = __pyx_v_self->view.strides; __pyx_t_5 < __pyx_t_4; __pyx_t_5++) {
__pyx_t_3 = __pyx_t_5;
__pyx_v_stride = (__pyx_t_3[0]);
__pyx_t_6 = PyInt_FromSsize_t(__pyx_v_stride); if (unlikely(!__pyx_t_6)) __PYX_ERR(1, 558, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_6);
if (unlikely(__Pyx_ListComp_Append(__pyx_t_2, (PyObject*)__pyx_t_6))) __PYX_ERR(1, 558, __pyx_L1_error)
__Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0;
}
__pyx_t_6 = PyList_AsTuple(((PyObject*)__pyx_t_2)); if (unlikely(!__pyx_t_6)) __PYX_ERR(1, 558, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_6);
__Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
__pyx_r = __pyx_t_6;
__pyx_t_6 = 0;
goto __pyx_L0;
/* "View.MemoryView":553
*
* @property
* def strides(self): # <<<<<<<<<<<<<<
* if self.view.strides == NULL:
*
*/
/* function exit code */
__pyx_L1_error:;
__Pyx_XDECREF(__pyx_t_2);
__Pyx_XDECREF(__pyx_t_6);
__Pyx_AddTraceback("View.MemoryView.memoryview.strides.__get__", __pyx_clineno, __pyx_lineno, __pyx_filename);
__pyx_r = NULL;
__pyx_L0:;
__Pyx_XGIVEREF(__pyx_r);
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
/* "View.MemoryView":561
*
* @property
* def suboffsets(self): # <<<<<<<<<<<<<<
* if self.view.suboffsets == NULL:
* return (-1,) * self.view.ndim
*/
/* Python wrapper */
static PyObject *__pyx_pw_15View_dot_MemoryView_10memoryview_10suboffsets_1__get__(PyObject *__pyx_v_self); /*proto*/
static PyObject *__pyx_pw_15View_dot_MemoryView_10memoryview_10suboffsets_1__get__(PyObject *__pyx_v_self) {
PyObject *__pyx_r = 0;
__Pyx_RefNannyDeclarations
__Pyx_RefNannySetupContext("__get__ (wrapper)", 0);
__pyx_r = __pyx_pf_15View_dot_MemoryView_10memoryview_10suboffsets___get__(((struct __pyx_memoryview_obj *)__pyx_v_self));
/* function exit code */
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
static PyObject *__pyx_pf_15View_dot_MemoryView_10memoryview_10suboffsets___get__(struct __pyx_memoryview_obj *__pyx_v_self) {
Py_ssize_t __pyx_v_suboffset;
PyObject *__pyx_r = NULL;
__Pyx_RefNannyDeclarations
int __pyx_t_1;
PyObject *__pyx_t_2 = NULL;
PyObject *__pyx_t_3 = NULL;
Py_ssize_t *__pyx_t_4;
Py_ssize_t *__pyx_t_5;
Py_ssize_t *__pyx_t_6;
__Pyx_RefNannySetupContext("__get__", 0);
/* "View.MemoryView":562
* @property
* def suboffsets(self):
* if self.view.suboffsets == NULL: # <<<<<<<<<<<<<<
* return (-1,) * self.view.ndim
*
*/
__pyx_t_1 = ((__pyx_v_self->view.suboffsets == NULL) != 0);
if (__pyx_t_1) {
/* "View.MemoryView":563
* def suboffsets(self):
* if self.view.suboffsets == NULL:
* return (-1,) * self.view.ndim # <<<<<<<<<<<<<<
*
* return tuple([suboffset for suboffset in self.view.suboffsets[:self.view.ndim]])
*/
__Pyx_XDECREF(__pyx_r);
__pyx_t_2 = __Pyx_PyInt_From_int(__pyx_v_self->view.ndim); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 563, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_2);
__pyx_t_3 = PyNumber_Multiply(__pyx_tuple__14, __pyx_t_2); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 563, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_3);
__Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
__pyx_r = __pyx_t_3;
__pyx_t_3 = 0;
goto __pyx_L0;
/* "View.MemoryView":562
* @property
* def suboffsets(self):
* if self.view.suboffsets == NULL: # <<<<<<<<<<<<<<
* return (-1,) * self.view.ndim
*
*/
}
/* "View.MemoryView":565
* return (-1,) * self.view.ndim
*
* return tuple([suboffset for suboffset in self.view.suboffsets[:self.view.ndim]]) # <<<<<<<<<<<<<<
*
* @property
*/
__Pyx_XDECREF(__pyx_r);
__pyx_t_3 = PyList_New(0); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 565, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_3);
__pyx_t_5 = (__pyx_v_self->view.suboffsets + __pyx_v_self->view.ndim);
for (__pyx_t_6 = __pyx_v_self->view.suboffsets; __pyx_t_6 < __pyx_t_5; __pyx_t_6++) {
__pyx_t_4 = __pyx_t_6;
__pyx_v_suboffset = (__pyx_t_4[0]);
__pyx_t_2 = PyInt_FromSsize_t(__pyx_v_suboffset); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 565, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_2);
if (unlikely(__Pyx_ListComp_Append(__pyx_t_3, (PyObject*)__pyx_t_2))) __PYX_ERR(1, 565, __pyx_L1_error)
__Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
}
__pyx_t_2 = PyList_AsTuple(((PyObject*)__pyx_t_3)); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 565, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_2);
__Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
__pyx_r = __pyx_t_2;
__pyx_t_2 = 0;
goto __pyx_L0;
/* "View.MemoryView":561
*
* @property
* def suboffsets(self): # <<<<<<<<<<<<<<
* if self.view.suboffsets == NULL:
* return (-1,) * self.view.ndim
*/
/* function exit code */
__pyx_L1_error:;
__Pyx_XDECREF(__pyx_t_2);
__Pyx_XDECREF(__pyx_t_3);
__Pyx_AddTraceback("View.MemoryView.memoryview.suboffsets.__get__", __pyx_clineno, __pyx_lineno, __pyx_filename);
__pyx_r = NULL;
__pyx_L0:;
__Pyx_XGIVEREF(__pyx_r);
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
/* "View.MemoryView":568
*
* @property
* def ndim(self): # <<<<<<<<<<<<<<
* return self.view.ndim
*
*/
/* Python wrapper */
static PyObject *__pyx_pw_15View_dot_MemoryView_10memoryview_4ndim_1__get__(PyObject *__pyx_v_self); /*proto*/
static PyObject *__pyx_pw_15View_dot_MemoryView_10memoryview_4ndim_1__get__(PyObject *__pyx_v_self) {
PyObject *__pyx_r = 0;
__Pyx_RefNannyDeclarations
__Pyx_RefNannySetupContext("__get__ (wrapper)", 0);
__pyx_r = __pyx_pf_15View_dot_MemoryView_10memoryview_4ndim___get__(((struct __pyx_memoryview_obj *)__pyx_v_self));
/* function exit code */
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
static PyObject *__pyx_pf_15View_dot_MemoryView_10memoryview_4ndim___get__(struct __pyx_memoryview_obj *__pyx_v_self) {
PyObject *__pyx_r = NULL;
__Pyx_RefNannyDeclarations
PyObject *__pyx_t_1 = NULL;
__Pyx_RefNannySetupContext("__get__", 0);
/* "View.MemoryView":569
* @property
* def ndim(self):
* return self.view.ndim # <<<<<<<<<<<<<<
*
* @property
*/
__Pyx_XDECREF(__pyx_r);
__pyx_t_1 = __Pyx_PyInt_From_int(__pyx_v_self->view.ndim); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 569, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_1);
__pyx_r = __pyx_t_1;
__pyx_t_1 = 0;
goto __pyx_L0;
/* "View.MemoryView":568
*
* @property
* def ndim(self): # <<<<<<<<<<<<<<
* return self.view.ndim
*
*/
/* function exit code */
__pyx_L1_error:;
__Pyx_XDECREF(__pyx_t_1);
__Pyx_AddTraceback("View.MemoryView.memoryview.ndim.__get__", __pyx_clineno, __pyx_lineno, __pyx_filename);
__pyx_r = NULL;
__pyx_L0:;
__Pyx_XGIVEREF(__pyx_r);
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
/* "View.MemoryView":572
*
* @property
* def itemsize(self): # <<<<<<<<<<<<<<
* return self.view.itemsize
*
*/
/* Python wrapper */
static PyObject *__pyx_pw_15View_dot_MemoryView_10memoryview_8itemsize_1__get__(PyObject *__pyx_v_self); /*proto*/
static PyObject *__pyx_pw_15View_dot_MemoryView_10memoryview_8itemsize_1__get__(PyObject *__pyx_v_self) {
PyObject *__pyx_r = 0;
__Pyx_RefNannyDeclarations
__Pyx_RefNannySetupContext("__get__ (wrapper)", 0);
__pyx_r = __pyx_pf_15View_dot_MemoryView_10memoryview_8itemsize___get__(((struct __pyx_memoryview_obj *)__pyx_v_self));
/* function exit code */
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
static PyObject *__pyx_pf_15View_dot_MemoryView_10memoryview_8itemsize___get__(struct __pyx_memoryview_obj *__pyx_v_self) {
PyObject *__pyx_r = NULL;
__Pyx_RefNannyDeclarations
PyObject *__pyx_t_1 = NULL;
__Pyx_RefNannySetupContext("__get__", 0);
/* "View.MemoryView":573
* @property
* def itemsize(self):
* return self.view.itemsize # <<<<<<<<<<<<<<
*
* @property
*/
__Pyx_XDECREF(__pyx_r);
__pyx_t_1 = PyInt_FromSsize_t(__pyx_v_self->view.itemsize); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 573, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_1);
__pyx_r = __pyx_t_1;
__pyx_t_1 = 0;
goto __pyx_L0;
/* "View.MemoryView":572
*
* @property
* def itemsize(self): # <<<<<<<<<<<<<<
* return self.view.itemsize
*
*/
/* function exit code */
__pyx_L1_error:;
__Pyx_XDECREF(__pyx_t_1);
__Pyx_AddTraceback("View.MemoryView.memoryview.itemsize.__get__", __pyx_clineno, __pyx_lineno, __pyx_filename);
__pyx_r = NULL;
__pyx_L0:;
__Pyx_XGIVEREF(__pyx_r);
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
/* "View.MemoryView":576
*
* @property
* def nbytes(self): # <<<<<<<<<<<<<<
* return self.size * self.view.itemsize
*
*/
/* Python wrapper */
static PyObject *__pyx_pw_15View_dot_MemoryView_10memoryview_6nbytes_1__get__(PyObject *__pyx_v_self); /*proto*/
static PyObject *__pyx_pw_15View_dot_MemoryView_10memoryview_6nbytes_1__get__(PyObject *__pyx_v_self) {
PyObject *__pyx_r = 0;
__Pyx_RefNannyDeclarations
__Pyx_RefNannySetupContext("__get__ (wrapper)", 0);
__pyx_r = __pyx_pf_15View_dot_MemoryView_10memoryview_6nbytes___get__(((struct __pyx_memoryview_obj *)__pyx_v_self));
/* function exit code */
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
static PyObject *__pyx_pf_15View_dot_MemoryView_10memoryview_6nbytes___get__(struct __pyx_memoryview_obj *__pyx_v_self) {
PyObject *__pyx_r = NULL;
__Pyx_RefNannyDeclarations
PyObject *__pyx_t_1 = NULL;
PyObject *__pyx_t_2 = NULL;
PyObject *__pyx_t_3 = NULL;
__Pyx_RefNannySetupContext("__get__", 0);
/* "View.MemoryView":577
* @property
* def nbytes(self):
* return self.size * self.view.itemsize # <<<<<<<<<<<<<<
*
* @property
*/
__Pyx_XDECREF(__pyx_r);
__pyx_t_1 = __Pyx_PyObject_GetAttrStr(((PyObject *)__pyx_v_self), __pyx_n_s_size); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 577, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_1);
__pyx_t_2 = PyInt_FromSsize_t(__pyx_v_self->view.itemsize); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 577, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_2);
__pyx_t_3 = PyNumber_Multiply(__pyx_t_1, __pyx_t_2); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 577, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_3);
__Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
__Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
__pyx_r = __pyx_t_3;
__pyx_t_3 = 0;
goto __pyx_L0;
/* "View.MemoryView":576
*
* @property
* def nbytes(self): # <<<<<<<<<<<<<<
* return self.size * self.view.itemsize
*
*/
/* function exit code */
__pyx_L1_error:;
__Pyx_XDECREF(__pyx_t_1);
__Pyx_XDECREF(__pyx_t_2);
__Pyx_XDECREF(__pyx_t_3);
__Pyx_AddTraceback("View.MemoryView.memoryview.nbytes.__get__", __pyx_clineno, __pyx_lineno, __pyx_filename);
__pyx_r = NULL;
__pyx_L0:;
__Pyx_XGIVEREF(__pyx_r);
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
/* "View.MemoryView":580
*
* @property
* def size(self): # <<<<<<<<<<<<<<
* if self._size is None:
* result = 1
*/
/* Python wrapper */
static PyObject *__pyx_pw_15View_dot_MemoryView_10memoryview_4size_1__get__(PyObject *__pyx_v_self); /*proto*/
static PyObject *__pyx_pw_15View_dot_MemoryView_10memoryview_4size_1__get__(PyObject *__pyx_v_self) {
PyObject *__pyx_r = 0;
__Pyx_RefNannyDeclarations
__Pyx_RefNannySetupContext("__get__ (wrapper)", 0);
__pyx_r = __pyx_pf_15View_dot_MemoryView_10memoryview_4size___get__(((struct __pyx_memoryview_obj *)__pyx_v_self));
/* function exit code */
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
static PyObject *__pyx_pf_15View_dot_MemoryView_10memoryview_4size___get__(struct __pyx_memoryview_obj *__pyx_v_self) {
PyObject *__pyx_v_result = NULL;
PyObject *__pyx_v_length = NULL;
PyObject *__pyx_r = NULL;
__Pyx_RefNannyDeclarations
int __pyx_t_1;
int __pyx_t_2;
Py_ssize_t *__pyx_t_3;
Py_ssize_t *__pyx_t_4;
Py_ssize_t *__pyx_t_5;
PyObject *__pyx_t_6 = NULL;
__Pyx_RefNannySetupContext("__get__", 0);
/* "View.MemoryView":581
* @property
* def size(self):
* if self._size is None: # <<<<<<<<<<<<<<
* result = 1
*
*/
__pyx_t_1 = (__pyx_v_self->_size == Py_None);
__pyx_t_2 = (__pyx_t_1 != 0);
if (__pyx_t_2) {
/* "View.MemoryView":582
* def size(self):
* if self._size is None:
* result = 1 # <<<<<<<<<<<<<<
*
* for length in self.view.shape[:self.view.ndim]:
*/
__Pyx_INCREF(__pyx_int_1);
__pyx_v_result = __pyx_int_1;
/* "View.MemoryView":584
* result = 1
*
* for length in self.view.shape[:self.view.ndim]: # <<<<<<<<<<<<<<
* result *= length
*
*/
__pyx_t_4 = (__pyx_v_self->view.shape + __pyx_v_self->view.ndim);
for (__pyx_t_5 = __pyx_v_self->view.shape; __pyx_t_5 < __pyx_t_4; __pyx_t_5++) {
__pyx_t_3 = __pyx_t_5;
__pyx_t_6 = PyInt_FromSsize_t((__pyx_t_3[0])); if (unlikely(!__pyx_t_6)) __PYX_ERR(1, 584, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_6);
__Pyx_XDECREF_SET(__pyx_v_length, __pyx_t_6);
__pyx_t_6 = 0;
/* "View.MemoryView":585
*
* for length in self.view.shape[:self.view.ndim]:
* result *= length # <<<<<<<<<<<<<<
*
* self._size = result
*/
__pyx_t_6 = PyNumber_InPlaceMultiply(__pyx_v_result, __pyx_v_length); if (unlikely(!__pyx_t_6)) __PYX_ERR(1, 585, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_6);
__Pyx_DECREF_SET(__pyx_v_result, __pyx_t_6);
__pyx_t_6 = 0;
}
/* "View.MemoryView":587
* result *= length
*
* self._size = result # <<<<<<<<<<<<<<
*
* return self._size
*/
__Pyx_INCREF(__pyx_v_result);
__Pyx_GIVEREF(__pyx_v_result);
__Pyx_GOTREF(__pyx_v_self->_size);
__Pyx_DECREF(__pyx_v_self->_size);
__pyx_v_self->_size = __pyx_v_result;
/* "View.MemoryView":581
* @property
* def size(self):
* if self._size is None: # <<<<<<<<<<<<<<
* result = 1
*
*/
}
/* "View.MemoryView":589
* self._size = result
*
* return self._size # <<<<<<<<<<<<<<
*
* def __len__(self):
*/
__Pyx_XDECREF(__pyx_r);
__Pyx_INCREF(__pyx_v_self->_size);
__pyx_r = __pyx_v_self->_size;
goto __pyx_L0;
/* "View.MemoryView":580
*
* @property
* def size(self): # <<<<<<<<<<<<<<
* if self._size is None:
* result = 1
*/
/* function exit code */
__pyx_L1_error:;
__Pyx_XDECREF(__pyx_t_6);
__Pyx_AddTraceback("View.MemoryView.memoryview.size.__get__", __pyx_clineno, __pyx_lineno, __pyx_filename);
__pyx_r = NULL;
__pyx_L0:;
__Pyx_XDECREF(__pyx_v_result);
__Pyx_XDECREF(__pyx_v_length);
__Pyx_XGIVEREF(__pyx_r);
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
/* "View.MemoryView":591
* return self._size
*
* def __len__(self): # <<<<<<<<<<<<<<
* if self.view.ndim >= 1:
* return self.view.shape[0]
*/
/* Python wrapper */
static Py_ssize_t __pyx_memoryview___len__(PyObject *__pyx_v_self); /*proto*/
static Py_ssize_t __pyx_memoryview___len__(PyObject *__pyx_v_self) {
Py_ssize_t __pyx_r;
__Pyx_RefNannyDeclarations
__Pyx_RefNannySetupContext("__len__ (wrapper)", 0);
__pyx_r = __pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_10__len__(((struct __pyx_memoryview_obj *)__pyx_v_self));
/* function exit code */
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
static Py_ssize_t __pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_10__len__(struct __pyx_memoryview_obj *__pyx_v_self) {
Py_ssize_t __pyx_r;
__Pyx_RefNannyDeclarations
int __pyx_t_1;
__Pyx_RefNannySetupContext("__len__", 0);
/* "View.MemoryView":592
*
* def __len__(self):
* if self.view.ndim >= 1: # <<<<<<<<<<<<<<
* return self.view.shape[0]
*
*/
__pyx_t_1 = ((__pyx_v_self->view.ndim >= 1) != 0);
if (__pyx_t_1) {
/* "View.MemoryView":593
* def __len__(self):
* if self.view.ndim >= 1:
* return self.view.shape[0] # <<<<<<<<<<<<<<
*
* return 0
*/
__pyx_r = (__pyx_v_self->view.shape[0]);
goto __pyx_L0;
/* "View.MemoryView":592
*
* def __len__(self):
* if self.view.ndim >= 1: # <<<<<<<<<<<<<<
* return self.view.shape[0]
*
*/
}
/* "View.MemoryView":595
* return self.view.shape[0]
*
* return 0 # <<<<<<<<<<<<<<
*
* def __repr__(self):
*/
__pyx_r = 0;
goto __pyx_L0;
/* "View.MemoryView":591
* return self._size
*
* def __len__(self): # <<<<<<<<<<<<<<
* if self.view.ndim >= 1:
* return self.view.shape[0]
*/
/* function exit code */
__pyx_L0:;
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
/* "View.MemoryView":597
* return 0
*
* def __repr__(self): # <<<<<<<<<<<<<<
* return "<MemoryView of %r at 0x%x>" % (self.base.__class__.__name__,
* id(self))
*/
/* Python wrapper */
static PyObject *__pyx_memoryview___repr__(PyObject *__pyx_v_self); /*proto*/
static PyObject *__pyx_memoryview___repr__(PyObject *__pyx_v_self) {
PyObject *__pyx_r = 0;
__Pyx_RefNannyDeclarations
__Pyx_RefNannySetupContext("__repr__ (wrapper)", 0);
__pyx_r = __pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_12__repr__(((struct __pyx_memoryview_obj *)__pyx_v_self));
/* function exit code */
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
static PyObject *__pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_12__repr__(struct __pyx_memoryview_obj *__pyx_v_self) {
PyObject *__pyx_r = NULL;
__Pyx_RefNannyDeclarations
PyObject *__pyx_t_1 = NULL;
PyObject *__pyx_t_2 = NULL;
PyObject *__pyx_t_3 = NULL;
__Pyx_RefNannySetupContext("__repr__", 0);
/* "View.MemoryView":598
*
* def __repr__(self):
* return "<MemoryView of %r at 0x%x>" % (self.base.__class__.__name__, # <<<<<<<<<<<<<<
* id(self))
*
*/
__Pyx_XDECREF(__pyx_r);
__pyx_t_1 = __Pyx_PyObject_GetAttrStr(((PyObject *)__pyx_v_self), __pyx_n_s_base); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 598, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_1);
__pyx_t_2 = __Pyx_PyObject_GetAttrStr(__pyx_t_1, __pyx_n_s_class); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 598, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_2);
__Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
__pyx_t_1 = __Pyx_PyObject_GetAttrStr(__pyx_t_2, __pyx_n_s_name_2); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 598, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_1);
__Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
/* "View.MemoryView":599
* def __repr__(self):
* return "<MemoryView of %r at 0x%x>" % (self.base.__class__.__name__,
* id(self)) # <<<<<<<<<<<<<<
*
* def __str__(self):
*/
__pyx_t_2 = PyTuple_New(1); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 599, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_2);
__Pyx_INCREF(((PyObject *)__pyx_v_self));
__Pyx_GIVEREF(((PyObject *)__pyx_v_self));
PyTuple_SET_ITEM(__pyx_t_2, 0, ((PyObject *)__pyx_v_self));
__pyx_t_3 = __Pyx_PyObject_Call(__pyx_builtin_id, __pyx_t_2, NULL); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 599, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_3);
__Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
/* "View.MemoryView":598
*
* def __repr__(self):
* return "<MemoryView of %r at 0x%x>" % (self.base.__class__.__name__, # <<<<<<<<<<<<<<
* id(self))
*
*/
__pyx_t_2 = PyTuple_New(2); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 598, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_2);
__Pyx_GIVEREF(__pyx_t_1);
PyTuple_SET_ITEM(__pyx_t_2, 0, __pyx_t_1);
__Pyx_GIVEREF(__pyx_t_3);
PyTuple_SET_ITEM(__pyx_t_2, 1, __pyx_t_3);
__pyx_t_1 = 0;
__pyx_t_3 = 0;
__pyx_t_3 = __Pyx_PyString_Format(__pyx_kp_s_MemoryView_of_r_at_0x_x, __pyx_t_2); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 598, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_3);
__Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
__pyx_r = __pyx_t_3;
__pyx_t_3 = 0;
goto __pyx_L0;
/* "View.MemoryView":597
* return 0
*
* def __repr__(self): # <<<<<<<<<<<<<<
* return "<MemoryView of %r at 0x%x>" % (self.base.__class__.__name__,
* id(self))
*/
/* function exit code */
__pyx_L1_error:;
__Pyx_XDECREF(__pyx_t_1);
__Pyx_XDECREF(__pyx_t_2);
__Pyx_XDECREF(__pyx_t_3);
__Pyx_AddTraceback("View.MemoryView.memoryview.__repr__", __pyx_clineno, __pyx_lineno, __pyx_filename);
__pyx_r = NULL;
__pyx_L0:;
__Pyx_XGIVEREF(__pyx_r);
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
/* "View.MemoryView":601
* id(self))
*
* def __str__(self): # <<<<<<<<<<<<<<
* return "<MemoryView of %r object>" % (self.base.__class__.__name__,)
*
*/
/* Python wrapper */
static PyObject *__pyx_memoryview___str__(PyObject *__pyx_v_self); /*proto*/
static PyObject *__pyx_memoryview___str__(PyObject *__pyx_v_self) {
PyObject *__pyx_r = 0;
__Pyx_RefNannyDeclarations
__Pyx_RefNannySetupContext("__str__ (wrapper)", 0);
__pyx_r = __pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_14__str__(((struct __pyx_memoryview_obj *)__pyx_v_self));
/* function exit code */
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
static PyObject *__pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_14__str__(struct __pyx_memoryview_obj *__pyx_v_self) {
PyObject *__pyx_r = NULL;
__Pyx_RefNannyDeclarations
PyObject *__pyx_t_1 = NULL;
PyObject *__pyx_t_2 = NULL;
__Pyx_RefNannySetupContext("__str__", 0);
/* "View.MemoryView":602
*
* def __str__(self):
* return "<MemoryView of %r object>" % (self.base.__class__.__name__,) # <<<<<<<<<<<<<<
*
*
*/
__Pyx_XDECREF(__pyx_r);
__pyx_t_1 = __Pyx_PyObject_GetAttrStr(((PyObject *)__pyx_v_self), __pyx_n_s_base); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 602, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_1);
__pyx_t_2 = __Pyx_PyObject_GetAttrStr(__pyx_t_1, __pyx_n_s_class); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 602, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_2);
__Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
__pyx_t_1 = __Pyx_PyObject_GetAttrStr(__pyx_t_2, __pyx_n_s_name_2); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 602, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_1);
__Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
__pyx_t_2 = PyTuple_New(1); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 602, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_2);
__Pyx_GIVEREF(__pyx_t_1);
PyTuple_SET_ITEM(__pyx_t_2, 0, __pyx_t_1);
__pyx_t_1 = 0;
__pyx_t_1 = __Pyx_PyString_Format(__pyx_kp_s_MemoryView_of_r_object, __pyx_t_2); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 602, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_1);
__Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
__pyx_r = __pyx_t_1;
__pyx_t_1 = 0;
goto __pyx_L0;
/* "View.MemoryView":601
* id(self))
*
* def __str__(self): # <<<<<<<<<<<<<<
* return "<MemoryView of %r object>" % (self.base.__class__.__name__,)
*
*/
/* function exit code */
__pyx_L1_error:;
__Pyx_XDECREF(__pyx_t_1);
__Pyx_XDECREF(__pyx_t_2);
__Pyx_AddTraceback("View.MemoryView.memoryview.__str__", __pyx_clineno, __pyx_lineno, __pyx_filename);
__pyx_r = NULL;
__pyx_L0:;
__Pyx_XGIVEREF(__pyx_r);
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
/* "View.MemoryView":605
*
*
* def is_c_contig(self): # <<<<<<<<<<<<<<
* cdef __Pyx_memviewslice *mslice
* cdef __Pyx_memviewslice tmp
*/
/* Python wrapper */
static PyObject *__pyx_memoryview_is_c_contig(PyObject *__pyx_v_self, CYTHON_UNUSED PyObject *unused); /*proto*/
static PyObject *__pyx_memoryview_is_c_contig(PyObject *__pyx_v_self, CYTHON_UNUSED PyObject *unused) {
PyObject *__pyx_r = 0;
__Pyx_RefNannyDeclarations
__Pyx_RefNannySetupContext("is_c_contig (wrapper)", 0);
__pyx_r = __pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_16is_c_contig(((struct __pyx_memoryview_obj *)__pyx_v_self));
/* function exit code */
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
static PyObject *__pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_16is_c_contig(struct __pyx_memoryview_obj *__pyx_v_self) {
__Pyx_memviewslice *__pyx_v_mslice;
__Pyx_memviewslice __pyx_v_tmp;
PyObject *__pyx_r = NULL;
__Pyx_RefNannyDeclarations
PyObject *__pyx_t_1 = NULL;
__Pyx_RefNannySetupContext("is_c_contig", 0);
/* "View.MemoryView":608
* cdef __Pyx_memviewslice *mslice
* cdef __Pyx_memviewslice tmp
* mslice = get_slice_from_memview(self, &tmp) # <<<<<<<<<<<<<<
* return slice_is_contig(mslice[0], 'C', self.view.ndim)
*
*/
__pyx_v_mslice = __pyx_memoryview_get_slice_from_memoryview(__pyx_v_self, (&__pyx_v_tmp));
/* "View.MemoryView":609
* cdef __Pyx_memviewslice tmp
* mslice = get_slice_from_memview(self, &tmp)
* return slice_is_contig(mslice[0], 'C', self.view.ndim) # <<<<<<<<<<<<<<
*
* def is_f_contig(self):
*/
__Pyx_XDECREF(__pyx_r);
__pyx_t_1 = __Pyx_PyBool_FromLong(__pyx_memviewslice_is_contig((__pyx_v_mslice[0]), 'C', __pyx_v_self->view.ndim)); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 609, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_1);
__pyx_r = __pyx_t_1;
__pyx_t_1 = 0;
goto __pyx_L0;
/* "View.MemoryView":605
*
*
* def is_c_contig(self): # <<<<<<<<<<<<<<
* cdef __Pyx_memviewslice *mslice
* cdef __Pyx_memviewslice tmp
*/
/* function exit code */
__pyx_L1_error:;
__Pyx_XDECREF(__pyx_t_1);
__Pyx_AddTraceback("View.MemoryView.memoryview.is_c_contig", __pyx_clineno, __pyx_lineno, __pyx_filename);
__pyx_r = NULL;
__pyx_L0:;
__Pyx_XGIVEREF(__pyx_r);
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
/* "View.MemoryView":611
* return slice_is_contig(mslice[0], 'C', self.view.ndim)
*
* def is_f_contig(self): # <<<<<<<<<<<<<<
* cdef __Pyx_memviewslice *mslice
* cdef __Pyx_memviewslice tmp
*/
/* Python wrapper */
static PyObject *__pyx_memoryview_is_f_contig(PyObject *__pyx_v_self, CYTHON_UNUSED PyObject *unused); /*proto*/
static PyObject *__pyx_memoryview_is_f_contig(PyObject *__pyx_v_self, CYTHON_UNUSED PyObject *unused) {
PyObject *__pyx_r = 0;
__Pyx_RefNannyDeclarations
__Pyx_RefNannySetupContext("is_f_contig (wrapper)", 0);
__pyx_r = __pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_18is_f_contig(((struct __pyx_memoryview_obj *)__pyx_v_self));
/* function exit code */
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
static PyObject *__pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_18is_f_contig(struct __pyx_memoryview_obj *__pyx_v_self) {
__Pyx_memviewslice *__pyx_v_mslice;
__Pyx_memviewslice __pyx_v_tmp;
PyObject *__pyx_r = NULL;
__Pyx_RefNannyDeclarations
PyObject *__pyx_t_1 = NULL;
__Pyx_RefNannySetupContext("is_f_contig", 0);
/* "View.MemoryView":614
* cdef __Pyx_memviewslice *mslice
* cdef __Pyx_memviewslice tmp
* mslice = get_slice_from_memview(self, &tmp) # <<<<<<<<<<<<<<
* return slice_is_contig(mslice[0], 'F', self.view.ndim)
*
*/
__pyx_v_mslice = __pyx_memoryview_get_slice_from_memoryview(__pyx_v_self, (&__pyx_v_tmp));
/* "View.MemoryView":615
* cdef __Pyx_memviewslice tmp
* mslice = get_slice_from_memview(self, &tmp)
* return slice_is_contig(mslice[0], 'F', self.view.ndim) # <<<<<<<<<<<<<<
*
* def copy(self):
*/
__Pyx_XDECREF(__pyx_r);
__pyx_t_1 = __Pyx_PyBool_FromLong(__pyx_memviewslice_is_contig((__pyx_v_mslice[0]), 'F', __pyx_v_self->view.ndim)); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 615, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_1);
__pyx_r = __pyx_t_1;
__pyx_t_1 = 0;
goto __pyx_L0;
/* "View.MemoryView":611
* return slice_is_contig(mslice[0], 'C', self.view.ndim)
*
* def is_f_contig(self): # <<<<<<<<<<<<<<
* cdef __Pyx_memviewslice *mslice
* cdef __Pyx_memviewslice tmp
*/
/* function exit code */
__pyx_L1_error:;
__Pyx_XDECREF(__pyx_t_1);
__Pyx_AddTraceback("View.MemoryView.memoryview.is_f_contig", __pyx_clineno, __pyx_lineno, __pyx_filename);
__pyx_r = NULL;
__pyx_L0:;
__Pyx_XGIVEREF(__pyx_r);
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
/* "View.MemoryView":617
* return slice_is_contig(mslice[0], 'F', self.view.ndim)
*
* def copy(self): # <<<<<<<<<<<<<<
* cdef __Pyx_memviewslice mslice
* cdef int flags = self.flags & ~PyBUF_F_CONTIGUOUS
*/
/* Python wrapper */
static PyObject *__pyx_memoryview_copy(PyObject *__pyx_v_self, CYTHON_UNUSED PyObject *unused); /*proto*/
static PyObject *__pyx_memoryview_copy(PyObject *__pyx_v_self, CYTHON_UNUSED PyObject *unused) {
PyObject *__pyx_r = 0;
__Pyx_RefNannyDeclarations
__Pyx_RefNannySetupContext("copy (wrapper)", 0);
__pyx_r = __pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_20copy(((struct __pyx_memoryview_obj *)__pyx_v_self));
/* function exit code */
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
static PyObject *__pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_20copy(struct __pyx_memoryview_obj *__pyx_v_self) {
__Pyx_memviewslice __pyx_v_mslice;
int __pyx_v_flags;
PyObject *__pyx_r = NULL;
__Pyx_RefNannyDeclarations
__Pyx_memviewslice __pyx_t_1;
PyObject *__pyx_t_2 = NULL;
__Pyx_RefNannySetupContext("copy", 0);
/* "View.MemoryView":619
* def copy(self):
* cdef __Pyx_memviewslice mslice
* cdef int flags = self.flags & ~PyBUF_F_CONTIGUOUS # <<<<<<<<<<<<<<
*
* slice_copy(self, &mslice)
*/
__pyx_v_flags = (__pyx_v_self->flags & (~PyBUF_F_CONTIGUOUS));
/* "View.MemoryView":621
* cdef int flags = self.flags & ~PyBUF_F_CONTIGUOUS
*
* slice_copy(self, &mslice) # <<<<<<<<<<<<<<
* mslice = slice_copy_contig(&mslice, "c", self.view.ndim,
* self.view.itemsize,
*/
__pyx_memoryview_slice_copy(__pyx_v_self, (&__pyx_v_mslice));
/* "View.MemoryView":622
*
* slice_copy(self, &mslice)
* mslice = slice_copy_contig(&mslice, "c", self.view.ndim, # <<<<<<<<<<<<<<
* self.view.itemsize,
* flags|PyBUF_C_CONTIGUOUS,
*/
__pyx_t_1 = __pyx_memoryview_copy_new_contig((&__pyx_v_mslice), ((char *)"c"), __pyx_v_self->view.ndim, __pyx_v_self->view.itemsize, (__pyx_v_flags | PyBUF_C_CONTIGUOUS), __pyx_v_self->dtype_is_object); if (unlikely(PyErr_Occurred())) __PYX_ERR(1, 622, __pyx_L1_error)
__pyx_v_mslice = __pyx_t_1;
/* "View.MemoryView":627
* self.dtype_is_object)
*
* return memoryview_copy_from_slice(self, &mslice) # <<<<<<<<<<<<<<
*
* def copy_fortran(self):
*/
__Pyx_XDECREF(__pyx_r);
__pyx_t_2 = __pyx_memoryview_copy_object_from_slice(__pyx_v_self, (&__pyx_v_mslice)); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 627, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_2);
__pyx_r = __pyx_t_2;
__pyx_t_2 = 0;
goto __pyx_L0;
/* "View.MemoryView":617
* return slice_is_contig(mslice[0], 'F', self.view.ndim)
*
* def copy(self): # <<<<<<<<<<<<<<
* cdef __Pyx_memviewslice mslice
* cdef int flags = self.flags & ~PyBUF_F_CONTIGUOUS
*/
/* function exit code */
__pyx_L1_error:;
__Pyx_XDECREF(__pyx_t_2);
__Pyx_AddTraceback("View.MemoryView.memoryview.copy", __pyx_clineno, __pyx_lineno, __pyx_filename);
__pyx_r = NULL;
__pyx_L0:;
__Pyx_XGIVEREF(__pyx_r);
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
/* "View.MemoryView":629
* return memoryview_copy_from_slice(self, &mslice)
*
* def copy_fortran(self): # <<<<<<<<<<<<<<
* cdef __Pyx_memviewslice src, dst
* cdef int flags = self.flags & ~PyBUF_C_CONTIGUOUS
*/
/* Python wrapper */
static PyObject *__pyx_memoryview_copy_fortran(PyObject *__pyx_v_self, CYTHON_UNUSED PyObject *unused); /*proto*/
static PyObject *__pyx_memoryview_copy_fortran(PyObject *__pyx_v_self, CYTHON_UNUSED PyObject *unused) {
PyObject *__pyx_r = 0;
__Pyx_RefNannyDeclarations
__Pyx_RefNannySetupContext("copy_fortran (wrapper)", 0);
__pyx_r = __pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_22copy_fortran(((struct __pyx_memoryview_obj *)__pyx_v_self));
/* function exit code */
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
static PyObject *__pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_22copy_fortran(struct __pyx_memoryview_obj *__pyx_v_self) {
__Pyx_memviewslice __pyx_v_src;
__Pyx_memviewslice __pyx_v_dst;
int __pyx_v_flags;
PyObject *__pyx_r = NULL;
__Pyx_RefNannyDeclarations
__Pyx_memviewslice __pyx_t_1;
PyObject *__pyx_t_2 = NULL;
__Pyx_RefNannySetupContext("copy_fortran", 0);
/* "View.MemoryView":631
* def copy_fortran(self):
* cdef __Pyx_memviewslice src, dst
* cdef int flags = self.flags & ~PyBUF_C_CONTIGUOUS # <<<<<<<<<<<<<<
*
* slice_copy(self, &src)
*/
__pyx_v_flags = (__pyx_v_self->flags & (~PyBUF_C_CONTIGUOUS));
/* "View.MemoryView":633
* cdef int flags = self.flags & ~PyBUF_C_CONTIGUOUS
*
* slice_copy(self, &src) # <<<<<<<<<<<<<<
* dst = slice_copy_contig(&src, "fortran", self.view.ndim,
* self.view.itemsize,
*/
__pyx_memoryview_slice_copy(__pyx_v_self, (&__pyx_v_src));
/* "View.MemoryView":634
*
* slice_copy(self, &src)
* dst = slice_copy_contig(&src, "fortran", self.view.ndim, # <<<<<<<<<<<<<<
* self.view.itemsize,
* flags|PyBUF_F_CONTIGUOUS,
*/
__pyx_t_1 = __pyx_memoryview_copy_new_contig((&__pyx_v_src), ((char *)"fortran"), __pyx_v_self->view.ndim, __pyx_v_self->view.itemsize, (__pyx_v_flags | PyBUF_F_CONTIGUOUS), __pyx_v_self->dtype_is_object); if (unlikely(PyErr_Occurred())) __PYX_ERR(1, 634, __pyx_L1_error)
__pyx_v_dst = __pyx_t_1;
/* "View.MemoryView":639
* self.dtype_is_object)
*
* return memoryview_copy_from_slice(self, &dst) # <<<<<<<<<<<<<<
*
*
*/
__Pyx_XDECREF(__pyx_r);
__pyx_t_2 = __pyx_memoryview_copy_object_from_slice(__pyx_v_self, (&__pyx_v_dst)); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 639, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_2);
__pyx_r = __pyx_t_2;
__pyx_t_2 = 0;
goto __pyx_L0;
/* "View.MemoryView":629
* return memoryview_copy_from_slice(self, &mslice)
*
* def copy_fortran(self): # <<<<<<<<<<<<<<
* cdef __Pyx_memviewslice src, dst
* cdef int flags = self.flags & ~PyBUF_C_CONTIGUOUS
*/
/* function exit code */
__pyx_L1_error:;
__Pyx_XDECREF(__pyx_t_2);
__Pyx_AddTraceback("View.MemoryView.memoryview.copy_fortran", __pyx_clineno, __pyx_lineno, __pyx_filename);
__pyx_r = NULL;
__pyx_L0:;
__Pyx_XGIVEREF(__pyx_r);
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
/* "View.MemoryView":643
*
* @cname('__pyx_memoryview_new')
* cdef memoryview_cwrapper(object o, int flags, bint dtype_is_object, __Pyx_TypeInfo *typeinfo): # <<<<<<<<<<<<<<
* cdef memoryview result = memoryview(o, flags, dtype_is_object)
* result.typeinfo = typeinfo
*/
static PyObject *__pyx_memoryview_new(PyObject *__pyx_v_o, int __pyx_v_flags, int __pyx_v_dtype_is_object, __Pyx_TypeInfo *__pyx_v_typeinfo) {
struct __pyx_memoryview_obj *__pyx_v_result = 0;
PyObject *__pyx_r = NULL;
__Pyx_RefNannyDeclarations
PyObject *__pyx_t_1 = NULL;
PyObject *__pyx_t_2 = NULL;
PyObject *__pyx_t_3 = NULL;
__Pyx_RefNannySetupContext("memoryview_cwrapper", 0);
/* "View.MemoryView":644
* @cname('__pyx_memoryview_new')
* cdef memoryview_cwrapper(object o, int flags, bint dtype_is_object, __Pyx_TypeInfo *typeinfo):
* cdef memoryview result = memoryview(o, flags, dtype_is_object) # <<<<<<<<<<<<<<
* result.typeinfo = typeinfo
* return result
*/
__pyx_t_1 = __Pyx_PyInt_From_int(__pyx_v_flags); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 644, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_1);
__pyx_t_2 = __Pyx_PyBool_FromLong(__pyx_v_dtype_is_object); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 644, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_2);
__pyx_t_3 = PyTuple_New(3); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 644, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_3);
__Pyx_INCREF(__pyx_v_o);
__Pyx_GIVEREF(__pyx_v_o);
PyTuple_SET_ITEM(__pyx_t_3, 0, __pyx_v_o);
__Pyx_GIVEREF(__pyx_t_1);
PyTuple_SET_ITEM(__pyx_t_3, 1, __pyx_t_1);
__Pyx_GIVEREF(__pyx_t_2);
PyTuple_SET_ITEM(__pyx_t_3, 2, __pyx_t_2);
__pyx_t_1 = 0;
__pyx_t_2 = 0;
__pyx_t_2 = __Pyx_PyObject_Call(((PyObject *)__pyx_memoryview_type), __pyx_t_3, NULL); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 644, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_2);
__Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
__pyx_v_result = ((struct __pyx_memoryview_obj *)__pyx_t_2);
__pyx_t_2 = 0;
/* "View.MemoryView":645
* cdef memoryview_cwrapper(object o, int flags, bint dtype_is_object, __Pyx_TypeInfo *typeinfo):
* cdef memoryview result = memoryview(o, flags, dtype_is_object)
* result.typeinfo = typeinfo # <<<<<<<<<<<<<<
* return result
*
*/
__pyx_v_result->typeinfo = __pyx_v_typeinfo;
/* "View.MemoryView":646
* cdef memoryview result = memoryview(o, flags, dtype_is_object)
* result.typeinfo = typeinfo
* return result # <<<<<<<<<<<<<<
*
* @cname('__pyx_memoryview_check')
*/
__Pyx_XDECREF(__pyx_r);
__Pyx_INCREF(((PyObject *)__pyx_v_result));
__pyx_r = ((PyObject *)__pyx_v_result);
goto __pyx_L0;
/* "View.MemoryView":643
*
* @cname('__pyx_memoryview_new')
* cdef memoryview_cwrapper(object o, int flags, bint dtype_is_object, __Pyx_TypeInfo *typeinfo): # <<<<<<<<<<<<<<
* cdef memoryview result = memoryview(o, flags, dtype_is_object)
* result.typeinfo = typeinfo
*/
/* function exit code */
__pyx_L1_error:;
__Pyx_XDECREF(__pyx_t_1);
__Pyx_XDECREF(__pyx_t_2);
__Pyx_XDECREF(__pyx_t_3);
__Pyx_AddTraceback("View.MemoryView.memoryview_cwrapper", __pyx_clineno, __pyx_lineno, __pyx_filename);
__pyx_r = 0;
__pyx_L0:;
__Pyx_XDECREF((PyObject *)__pyx_v_result);
__Pyx_XGIVEREF(__pyx_r);
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
/* "View.MemoryView":649
*
* @cname('__pyx_memoryview_check')
* cdef inline bint memoryview_check(object o): # <<<<<<<<<<<<<<
* return isinstance(o, memoryview)
*
*/
static CYTHON_INLINE int __pyx_memoryview_check(PyObject *__pyx_v_o) {
int __pyx_r;
__Pyx_RefNannyDeclarations
int __pyx_t_1;
__Pyx_RefNannySetupContext("memoryview_check", 0);
/* "View.MemoryView":650
* @cname('__pyx_memoryview_check')
* cdef inline bint memoryview_check(object o):
* return isinstance(o, memoryview) # <<<<<<<<<<<<<<
*
* cdef tuple _unellipsify(object index, int ndim):
*/
__pyx_t_1 = __Pyx_TypeCheck(__pyx_v_o, __pyx_memoryview_type);
__pyx_r = __pyx_t_1;
goto __pyx_L0;
/* "View.MemoryView":649
*
* @cname('__pyx_memoryview_check')
* cdef inline bint memoryview_check(object o): # <<<<<<<<<<<<<<
* return isinstance(o, memoryview)
*
*/
/* function exit code */
__pyx_L0:;
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
/* "View.MemoryView":652
* return isinstance(o, memoryview)
*
* cdef tuple _unellipsify(object index, int ndim): # <<<<<<<<<<<<<<
* """
* Replace all ellipses with full slices and fill incomplete indices with
*/
static PyObject *_unellipsify(PyObject *__pyx_v_index, int __pyx_v_ndim) {
PyObject *__pyx_v_tup = NULL;
PyObject *__pyx_v_result = NULL;
int __pyx_v_have_slices;
int __pyx_v_seen_ellipsis;
CYTHON_UNUSED PyObject *__pyx_v_idx = NULL;
PyObject *__pyx_v_item = NULL;
Py_ssize_t __pyx_v_nslices;
PyObject *__pyx_r = NULL;
__Pyx_RefNannyDeclarations
int __pyx_t_1;
int __pyx_t_2;
PyObject *__pyx_t_3 = NULL;
PyObject *__pyx_t_4 = NULL;
Py_ssize_t __pyx_t_5;
PyObject *(*__pyx_t_6)(PyObject *);
PyObject *__pyx_t_7 = NULL;
Py_ssize_t __pyx_t_8;
int __pyx_t_9;
int __pyx_t_10;
PyObject *__pyx_t_11 = NULL;
__Pyx_RefNannySetupContext("_unellipsify", 0);
/* "View.MemoryView":657
* full slices.
* """
* if not isinstance(index, tuple): # <<<<<<<<<<<<<<
* tup = (index,)
* else:
*/
__pyx_t_1 = PyTuple_Check(__pyx_v_index);
__pyx_t_2 = ((!(__pyx_t_1 != 0)) != 0);
if (__pyx_t_2) {
/* "View.MemoryView":658
* """
* if not isinstance(index, tuple):
* tup = (index,) # <<<<<<<<<<<<<<
* else:
* tup = index
*/
__pyx_t_3 = PyTuple_New(1); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 658, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_3);
__Pyx_INCREF(__pyx_v_index);
__Pyx_GIVEREF(__pyx_v_index);
PyTuple_SET_ITEM(__pyx_t_3, 0, __pyx_v_index);
__pyx_v_tup = __pyx_t_3;
__pyx_t_3 = 0;
/* "View.MemoryView":657
* full slices.
* """
* if not isinstance(index, tuple): # <<<<<<<<<<<<<<
* tup = (index,)
* else:
*/
goto __pyx_L3;
}
/* "View.MemoryView":660
* tup = (index,)
* else:
* tup = index # <<<<<<<<<<<<<<
*
* result = []
*/
/*else*/ {
__Pyx_INCREF(__pyx_v_index);
__pyx_v_tup = __pyx_v_index;
}
__pyx_L3:;
/* "View.MemoryView":662
* tup = index
*
* result = [] # <<<<<<<<<<<<<<
* have_slices = False
* seen_ellipsis = False
*/
__pyx_t_3 = PyList_New(0); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 662, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_3);
__pyx_v_result = ((PyObject*)__pyx_t_3);
__pyx_t_3 = 0;
/* "View.MemoryView":663
*
* result = []
* have_slices = False # <<<<<<<<<<<<<<
* seen_ellipsis = False
* for idx, item in enumerate(tup):
*/
__pyx_v_have_slices = 0;
/* "View.MemoryView":664
* result = []
* have_slices = False
* seen_ellipsis = False # <<<<<<<<<<<<<<
* for idx, item in enumerate(tup):
* if item is Ellipsis:
*/
__pyx_v_seen_ellipsis = 0;
/* "View.MemoryView":665
* have_slices = False
* seen_ellipsis = False
* for idx, item in enumerate(tup): # <<<<<<<<<<<<<<
* if item is Ellipsis:
* if not seen_ellipsis:
*/
__Pyx_INCREF(__pyx_int_0);
__pyx_t_3 = __pyx_int_0;
if (likely(PyList_CheckExact(__pyx_v_tup)) || PyTuple_CheckExact(__pyx_v_tup)) {
__pyx_t_4 = __pyx_v_tup; __Pyx_INCREF(__pyx_t_4); __pyx_t_5 = 0;
__pyx_t_6 = NULL;
} else {
__pyx_t_5 = -1; __pyx_t_4 = PyObject_GetIter(__pyx_v_tup); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 665, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_4);
__pyx_t_6 = Py_TYPE(__pyx_t_4)->tp_iternext; if (unlikely(!__pyx_t_6)) __PYX_ERR(1, 665, __pyx_L1_error)
}
for (;;) {
if (likely(!__pyx_t_6)) {
if (likely(PyList_CheckExact(__pyx_t_4))) {
if (__pyx_t_5 >= PyList_GET_SIZE(__pyx_t_4)) break;
#if CYTHON_ASSUME_SAFE_MACROS && !CYTHON_AVOID_BORROWED_REFS
__pyx_t_7 = PyList_GET_ITEM(__pyx_t_4, __pyx_t_5); __Pyx_INCREF(__pyx_t_7); __pyx_t_5++; if (unlikely(0 < 0)) __PYX_ERR(1, 665, __pyx_L1_error)
#else
__pyx_t_7 = PySequence_ITEM(__pyx_t_4, __pyx_t_5); __pyx_t_5++; if (unlikely(!__pyx_t_7)) __PYX_ERR(1, 665, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_7);
#endif
} else {
if (__pyx_t_5 >= PyTuple_GET_SIZE(__pyx_t_4)) break;
#if CYTHON_ASSUME_SAFE_MACROS && !CYTHON_AVOID_BORROWED_REFS
__pyx_t_7 = PyTuple_GET_ITEM(__pyx_t_4, __pyx_t_5); __Pyx_INCREF(__pyx_t_7); __pyx_t_5++; if (unlikely(0 < 0)) __PYX_ERR(1, 665, __pyx_L1_error)
#else
__pyx_t_7 = PySequence_ITEM(__pyx_t_4, __pyx_t_5); __pyx_t_5++; if (unlikely(!__pyx_t_7)) __PYX_ERR(1, 665, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_7);
#endif
}
} else {
__pyx_t_7 = __pyx_t_6(__pyx_t_4);
if (unlikely(!__pyx_t_7)) {
PyObject* exc_type = PyErr_Occurred();
if (exc_type) {
if (likely(exc_type == PyExc_StopIteration || PyErr_GivenExceptionMatches(exc_type, PyExc_StopIteration))) PyErr_Clear();
else __PYX_ERR(1, 665, __pyx_L1_error)
}
break;
}
__Pyx_GOTREF(__pyx_t_7);
}
__Pyx_XDECREF_SET(__pyx_v_item, __pyx_t_7);
__pyx_t_7 = 0;
__Pyx_INCREF(__pyx_t_3);
__Pyx_XDECREF_SET(__pyx_v_idx, __pyx_t_3);
__pyx_t_7 = __Pyx_PyInt_AddObjC(__pyx_t_3, __pyx_int_1, 1, 0); if (unlikely(!__pyx_t_7)) __PYX_ERR(1, 665, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_7);
__Pyx_DECREF(__pyx_t_3);
__pyx_t_3 = __pyx_t_7;
__pyx_t_7 = 0;
/* "View.MemoryView":666
* seen_ellipsis = False
* for idx, item in enumerate(tup):
* if item is Ellipsis: # <<<<<<<<<<<<<<
* if not seen_ellipsis:
* result.extend([slice(None)] * (ndim - len(tup) + 1))
*/
__pyx_t_2 = (__pyx_v_item == __pyx_builtin_Ellipsis);
__pyx_t_1 = (__pyx_t_2 != 0);
if (__pyx_t_1) {
/* "View.MemoryView":667
* for idx, item in enumerate(tup):
* if item is Ellipsis:
* if not seen_ellipsis: # <<<<<<<<<<<<<<
* result.extend([slice(None)] * (ndim - len(tup) + 1))
* seen_ellipsis = True
*/
__pyx_t_1 = ((!(__pyx_v_seen_ellipsis != 0)) != 0);
if (__pyx_t_1) {
/* "View.MemoryView":668
* if item is Ellipsis:
* if not seen_ellipsis:
* result.extend([slice(None)] * (ndim - len(tup) + 1)) # <<<<<<<<<<<<<<
* seen_ellipsis = True
* else:
*/
__pyx_t_8 = PyObject_Length(__pyx_v_tup); if (unlikely(__pyx_t_8 == -1)) __PYX_ERR(1, 668, __pyx_L1_error)
__pyx_t_7 = PyList_New(1 * ((((__pyx_v_ndim - __pyx_t_8) + 1)<0) ? 0:((__pyx_v_ndim - __pyx_t_8) + 1))); if (unlikely(!__pyx_t_7)) __PYX_ERR(1, 668, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_7);
{ Py_ssize_t __pyx_temp;
for (__pyx_temp=0; __pyx_temp < ((__pyx_v_ndim - __pyx_t_8) + 1); __pyx_temp++) {
__Pyx_INCREF(__pyx_slice__15);
__Pyx_GIVEREF(__pyx_slice__15);
PyList_SET_ITEM(__pyx_t_7, __pyx_temp, __pyx_slice__15);
}
}
__pyx_t_9 = __Pyx_PyList_Extend(__pyx_v_result, __pyx_t_7); if (unlikely(__pyx_t_9 == -1)) __PYX_ERR(1, 668, __pyx_L1_error)
__Pyx_DECREF(__pyx_t_7); __pyx_t_7 = 0;
/* "View.MemoryView":669
* if not seen_ellipsis:
* result.extend([slice(None)] * (ndim - len(tup) + 1))
* seen_ellipsis = True # <<<<<<<<<<<<<<
* else:
* result.append(slice(None))
*/
__pyx_v_seen_ellipsis = 1;
/* "View.MemoryView":667
* for idx, item in enumerate(tup):
* if item is Ellipsis:
* if not seen_ellipsis: # <<<<<<<<<<<<<<
* result.extend([slice(None)] * (ndim - len(tup) + 1))
* seen_ellipsis = True
*/
goto __pyx_L7;
}
/* "View.MemoryView":671
* seen_ellipsis = True
* else:
* result.append(slice(None)) # <<<<<<<<<<<<<<
* have_slices = True
* else:
*/
/*else*/ {
__pyx_t_9 = __Pyx_PyList_Append(__pyx_v_result, __pyx_slice__16); if (unlikely(__pyx_t_9 == -1)) __PYX_ERR(1, 671, __pyx_L1_error)
}
__pyx_L7:;
/* "View.MemoryView":672
* else:
* result.append(slice(None))
* have_slices = True # <<<<<<<<<<<<<<
* else:
* if not isinstance(item, slice) and not PyIndex_Check(item):
*/
__pyx_v_have_slices = 1;
/* "View.MemoryView":666
* seen_ellipsis = False
* for idx, item in enumerate(tup):
* if item is Ellipsis: # <<<<<<<<<<<<<<
* if not seen_ellipsis:
* result.extend([slice(None)] * (ndim - len(tup) + 1))
*/
goto __pyx_L6;
}
/* "View.MemoryView":674
* have_slices = True
* else:
* if not isinstance(item, slice) and not PyIndex_Check(item): # <<<<<<<<<<<<<<
* raise TypeError("Cannot index with type '%s'" % type(item))
*
*/
/*else*/ {
__pyx_t_2 = PySlice_Check(__pyx_v_item);
__pyx_t_10 = ((!(__pyx_t_2 != 0)) != 0);
if (__pyx_t_10) {
} else {
__pyx_t_1 = __pyx_t_10;
goto __pyx_L9_bool_binop_done;
}
__pyx_t_10 = ((!(PyIndex_Check(__pyx_v_item) != 0)) != 0);
__pyx_t_1 = __pyx_t_10;
__pyx_L9_bool_binop_done:;
if (__pyx_t_1) {
/* "View.MemoryView":675
* else:
* if not isinstance(item, slice) and not PyIndex_Check(item):
* raise TypeError("Cannot index with type '%s'" % type(item)) # <<<<<<<<<<<<<<
*
* have_slices = have_slices or isinstance(item, slice)
*/
__pyx_t_7 = __Pyx_PyString_Format(__pyx_kp_s_Cannot_index_with_type_s, ((PyObject *)Py_TYPE(__pyx_v_item))); if (unlikely(!__pyx_t_7)) __PYX_ERR(1, 675, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_7);
__pyx_t_11 = PyTuple_New(1); if (unlikely(!__pyx_t_11)) __PYX_ERR(1, 675, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_11);
__Pyx_GIVEREF(__pyx_t_7);
PyTuple_SET_ITEM(__pyx_t_11, 0, __pyx_t_7);
__pyx_t_7 = 0;
__pyx_t_7 = __Pyx_PyObject_Call(__pyx_builtin_TypeError, __pyx_t_11, NULL); if (unlikely(!__pyx_t_7)) __PYX_ERR(1, 675, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_7);
__Pyx_DECREF(__pyx_t_11); __pyx_t_11 = 0;
__Pyx_Raise(__pyx_t_7, 0, 0, 0);
__Pyx_DECREF(__pyx_t_7); __pyx_t_7 = 0;
__PYX_ERR(1, 675, __pyx_L1_error)
/* "View.MemoryView":674
* have_slices = True
* else:
* if not isinstance(item, slice) and not PyIndex_Check(item): # <<<<<<<<<<<<<<
* raise TypeError("Cannot index with type '%s'" % type(item))
*
*/
}
/* "View.MemoryView":677
* raise TypeError("Cannot index with type '%s'" % type(item))
*
* have_slices = have_slices or isinstance(item, slice) # <<<<<<<<<<<<<<
* result.append(item)
*
*/
__pyx_t_10 = (__pyx_v_have_slices != 0);
if (!__pyx_t_10) {
} else {
__pyx_t_1 = __pyx_t_10;
goto __pyx_L11_bool_binop_done;
}
__pyx_t_10 = PySlice_Check(__pyx_v_item);
__pyx_t_2 = (__pyx_t_10 != 0);
__pyx_t_1 = __pyx_t_2;
__pyx_L11_bool_binop_done:;
__pyx_v_have_slices = __pyx_t_1;
/* "View.MemoryView":678
*
* have_slices = have_slices or isinstance(item, slice)
* result.append(item) # <<<<<<<<<<<<<<
*
* nslices = ndim - len(result)
*/
__pyx_t_9 = __Pyx_PyList_Append(__pyx_v_result, __pyx_v_item); if (unlikely(__pyx_t_9 == -1)) __PYX_ERR(1, 678, __pyx_L1_error)
}
__pyx_L6:;
/* "View.MemoryView":665
* have_slices = False
* seen_ellipsis = False
* for idx, item in enumerate(tup): # <<<<<<<<<<<<<<
* if item is Ellipsis:
* if not seen_ellipsis:
*/
}
__Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
__Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
/* "View.MemoryView":680
* result.append(item)
*
* nslices = ndim - len(result) # <<<<<<<<<<<<<<
* if nslices:
* result.extend([slice(None)] * nslices)
*/
__pyx_t_5 = PyList_GET_SIZE(__pyx_v_result); if (unlikely(__pyx_t_5 == -1)) __PYX_ERR(1, 680, __pyx_L1_error)
__pyx_v_nslices = (__pyx_v_ndim - __pyx_t_5);
/* "View.MemoryView":681
*
* nslices = ndim - len(result)
* if nslices: # <<<<<<<<<<<<<<
* result.extend([slice(None)] * nslices)
*
*/
__pyx_t_1 = (__pyx_v_nslices != 0);
if (__pyx_t_1) {
/* "View.MemoryView":682
* nslices = ndim - len(result)
* if nslices:
* result.extend([slice(None)] * nslices) # <<<<<<<<<<<<<<
*
* return have_slices or nslices, tuple(result)
*/
__pyx_t_3 = PyList_New(1 * ((__pyx_v_nslices<0) ? 0:__pyx_v_nslices)); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 682, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_3);
{ Py_ssize_t __pyx_temp;
for (__pyx_temp=0; __pyx_temp < __pyx_v_nslices; __pyx_temp++) {
__Pyx_INCREF(__pyx_slice__17);
__Pyx_GIVEREF(__pyx_slice__17);
PyList_SET_ITEM(__pyx_t_3, __pyx_temp, __pyx_slice__17);
}
}
__pyx_t_9 = __Pyx_PyList_Extend(__pyx_v_result, __pyx_t_3); if (unlikely(__pyx_t_9 == -1)) __PYX_ERR(1, 682, __pyx_L1_error)
__Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
/* "View.MemoryView":681
*
* nslices = ndim - len(result)
* if nslices: # <<<<<<<<<<<<<<
* result.extend([slice(None)] * nslices)
*
*/
}
/* "View.MemoryView":684
* result.extend([slice(None)] * nslices)
*
* return have_slices or nslices, tuple(result) # <<<<<<<<<<<<<<
*
* cdef assert_direct_dimensions(Py_ssize_t *suboffsets, int ndim):
*/
__Pyx_XDECREF(__pyx_r);
if (!__pyx_v_have_slices) {
} else {
__pyx_t_4 = __Pyx_PyBool_FromLong(__pyx_v_have_slices); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 684, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_4);
__pyx_t_3 = __pyx_t_4;
__pyx_t_4 = 0;
goto __pyx_L14_bool_binop_done;
}
__pyx_t_4 = PyInt_FromSsize_t(__pyx_v_nslices); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 684, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_4);
__pyx_t_3 = __pyx_t_4;
__pyx_t_4 = 0;
__pyx_L14_bool_binop_done:;
__pyx_t_4 = PyList_AsTuple(__pyx_v_result); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 684, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_4);
__pyx_t_7 = PyTuple_New(2); if (unlikely(!__pyx_t_7)) __PYX_ERR(1, 684, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_7);
__Pyx_GIVEREF(__pyx_t_3);
PyTuple_SET_ITEM(__pyx_t_7, 0, __pyx_t_3);
__Pyx_GIVEREF(__pyx_t_4);
PyTuple_SET_ITEM(__pyx_t_7, 1, __pyx_t_4);
__pyx_t_3 = 0;
__pyx_t_4 = 0;
__pyx_r = ((PyObject*)__pyx_t_7);
__pyx_t_7 = 0;
goto __pyx_L0;
/* "View.MemoryView":652
* return isinstance(o, memoryview)
*
* cdef tuple _unellipsify(object index, int ndim): # <<<<<<<<<<<<<<
* """
* Replace all ellipses with full slices and fill incomplete indices with
*/
/* function exit code */
__pyx_L1_error:;
__Pyx_XDECREF(__pyx_t_3);
__Pyx_XDECREF(__pyx_t_4);
__Pyx_XDECREF(__pyx_t_7);
__Pyx_XDECREF(__pyx_t_11);
__Pyx_AddTraceback("View.MemoryView._unellipsify", __pyx_clineno, __pyx_lineno, __pyx_filename);
__pyx_r = 0;
__pyx_L0:;
__Pyx_XDECREF(__pyx_v_tup);
__Pyx_XDECREF(__pyx_v_result);
__Pyx_XDECREF(__pyx_v_idx);
__Pyx_XDECREF(__pyx_v_item);
__Pyx_XGIVEREF(__pyx_r);
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
/* "View.MemoryView":686
* return have_slices or nslices, tuple(result)
*
* cdef assert_direct_dimensions(Py_ssize_t *suboffsets, int ndim): # <<<<<<<<<<<<<<
* for suboffset in suboffsets[:ndim]:
* if suboffset >= 0:
*/
static PyObject *assert_direct_dimensions(Py_ssize_t *__pyx_v_suboffsets, int __pyx_v_ndim) {
Py_ssize_t __pyx_v_suboffset;
PyObject *__pyx_r = NULL;
__Pyx_RefNannyDeclarations
Py_ssize_t *__pyx_t_1;
Py_ssize_t *__pyx_t_2;
Py_ssize_t *__pyx_t_3;
int __pyx_t_4;
PyObject *__pyx_t_5 = NULL;
__Pyx_RefNannySetupContext("assert_direct_dimensions", 0);
/* "View.MemoryView":687
*
* cdef assert_direct_dimensions(Py_ssize_t *suboffsets, int ndim):
* for suboffset in suboffsets[:ndim]: # <<<<<<<<<<<<<<
* if suboffset >= 0:
* raise ValueError("Indirect dimensions not supported")
*/
__pyx_t_2 = (__pyx_v_suboffsets + __pyx_v_ndim);
for (__pyx_t_3 = __pyx_v_suboffsets; __pyx_t_3 < __pyx_t_2; __pyx_t_3++) {
__pyx_t_1 = __pyx_t_3;
__pyx_v_suboffset = (__pyx_t_1[0]);
/* "View.MemoryView":688
* cdef assert_direct_dimensions(Py_ssize_t *suboffsets, int ndim):
* for suboffset in suboffsets[:ndim]:
* if suboffset >= 0: # <<<<<<<<<<<<<<
* raise ValueError("Indirect dimensions not supported")
*
*/
__pyx_t_4 = ((__pyx_v_suboffset >= 0) != 0);
if (__pyx_t_4) {
/* "View.MemoryView":689
* for suboffset in suboffsets[:ndim]:
* if suboffset >= 0:
* raise ValueError("Indirect dimensions not supported") # <<<<<<<<<<<<<<
*
*
*/
__pyx_t_5 = __Pyx_PyObject_Call(__pyx_builtin_ValueError, __pyx_tuple__18, NULL); if (unlikely(!__pyx_t_5)) __PYX_ERR(1, 689, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_5);
__Pyx_Raise(__pyx_t_5, 0, 0, 0);
__Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
__PYX_ERR(1, 689, __pyx_L1_error)
/* "View.MemoryView":688
* cdef assert_direct_dimensions(Py_ssize_t *suboffsets, int ndim):
* for suboffset in suboffsets[:ndim]:
* if suboffset >= 0: # <<<<<<<<<<<<<<
* raise ValueError("Indirect dimensions not supported")
*
*/
}
}
/* "View.MemoryView":686
* return have_slices or nslices, tuple(result)
*
* cdef assert_direct_dimensions(Py_ssize_t *suboffsets, int ndim): # <<<<<<<<<<<<<<
* for suboffset in suboffsets[:ndim]:
* if suboffset >= 0:
*/
/* function exit code */
__pyx_r = Py_None; __Pyx_INCREF(Py_None);
goto __pyx_L0;
__pyx_L1_error:;
__Pyx_XDECREF(__pyx_t_5);
__Pyx_AddTraceback("View.MemoryView.assert_direct_dimensions", __pyx_clineno, __pyx_lineno, __pyx_filename);
__pyx_r = 0;
__pyx_L0:;
__Pyx_XGIVEREF(__pyx_r);
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
/* "View.MemoryView":696
*
* @cname('__pyx_memview_slice')
* cdef memoryview memview_slice(memoryview memview, object indices): # <<<<<<<<<<<<<<
* cdef int new_ndim = 0, suboffset_dim = -1, dim
* cdef bint negative_step
*/
static struct __pyx_memoryview_obj *__pyx_memview_slice(struct __pyx_memoryview_obj *__pyx_v_memview, PyObject *__pyx_v_indices) {
int __pyx_v_new_ndim;
int __pyx_v_suboffset_dim;
int __pyx_v_dim;
__Pyx_memviewslice __pyx_v_src;
__Pyx_memviewslice __pyx_v_dst;
__Pyx_memviewslice *__pyx_v_p_src;
struct __pyx_memoryviewslice_obj *__pyx_v_memviewsliceobj = 0;
__Pyx_memviewslice *__pyx_v_p_dst;
int *__pyx_v_p_suboffset_dim;
Py_ssize_t __pyx_v_start;
Py_ssize_t __pyx_v_stop;
Py_ssize_t __pyx_v_step;
int __pyx_v_have_start;
int __pyx_v_have_stop;
int __pyx_v_have_step;
PyObject *__pyx_v_index = NULL;
struct __pyx_memoryview_obj *__pyx_r = NULL;
__Pyx_RefNannyDeclarations
int __pyx_t_1;
int __pyx_t_2;
PyObject *__pyx_t_3 = NULL;
struct __pyx_memoryview_obj *__pyx_t_4;
char *__pyx_t_5;
int __pyx_t_6;
Py_ssize_t __pyx_t_7;
PyObject *(*__pyx_t_8)(PyObject *);
PyObject *__pyx_t_9 = NULL;
Py_ssize_t __pyx_t_10;
int __pyx_t_11;
Py_ssize_t __pyx_t_12;
__Pyx_RefNannySetupContext("memview_slice", 0);
/* "View.MemoryView":697
* @cname('__pyx_memview_slice')
* cdef memoryview memview_slice(memoryview memview, object indices):
* cdef int new_ndim = 0, suboffset_dim = -1, dim # <<<<<<<<<<<<<<
* cdef bint negative_step
* cdef __Pyx_memviewslice src, dst
*/
__pyx_v_new_ndim = 0;
__pyx_v_suboffset_dim = -1;
/* "View.MemoryView":704
*
*
* memset(&dst, 0, sizeof(dst)) # <<<<<<<<<<<<<<
*
* cdef _memoryviewslice memviewsliceobj
*/
memset((&__pyx_v_dst), 0, (sizeof(__pyx_v_dst)));
/* "View.MemoryView":708
* cdef _memoryviewslice memviewsliceobj
*
* assert memview.view.ndim > 0 # <<<<<<<<<<<<<<
*
* if isinstance(memview, _memoryviewslice):
*/
#ifndef CYTHON_WITHOUT_ASSERTIONS
if (unlikely(!Py_OptimizeFlag)) {
if (unlikely(!((__pyx_v_memview->view.ndim > 0) != 0))) {
PyErr_SetNone(PyExc_AssertionError);
__PYX_ERR(1, 708, __pyx_L1_error)
}
}
#endif
/* "View.MemoryView":710
* assert memview.view.ndim > 0
*
* if isinstance(memview, _memoryviewslice): # <<<<<<<<<<<<<<
* memviewsliceobj = memview
* p_src = &memviewsliceobj.from_slice
*/
__pyx_t_1 = __Pyx_TypeCheck(((PyObject *)__pyx_v_memview), __pyx_memoryviewslice_type);
__pyx_t_2 = (__pyx_t_1 != 0);
if (__pyx_t_2) {
/* "View.MemoryView":711
*
* if isinstance(memview, _memoryviewslice):
* memviewsliceobj = memview # <<<<<<<<<<<<<<
* p_src = &memviewsliceobj.from_slice
* else:
*/
if (!(likely(((((PyObject *)__pyx_v_memview)) == Py_None) || likely(__Pyx_TypeTest(((PyObject *)__pyx_v_memview), __pyx_memoryviewslice_type))))) __PYX_ERR(1, 711, __pyx_L1_error)
__pyx_t_3 = ((PyObject *)__pyx_v_memview);
__Pyx_INCREF(__pyx_t_3);
__pyx_v_memviewsliceobj = ((struct __pyx_memoryviewslice_obj *)__pyx_t_3);
__pyx_t_3 = 0;
/* "View.MemoryView":712
* if isinstance(memview, _memoryviewslice):
* memviewsliceobj = memview
* p_src = &memviewsliceobj.from_slice # <<<<<<<<<<<<<<
* else:
* slice_copy(memview, &src)
*/
__pyx_v_p_src = (&__pyx_v_memviewsliceobj->from_slice);
/* "View.MemoryView":710
* assert memview.view.ndim > 0
*
* if isinstance(memview, _memoryviewslice): # <<<<<<<<<<<<<<
* memviewsliceobj = memview
* p_src = &memviewsliceobj.from_slice
*/
goto __pyx_L3;
}
/* "View.MemoryView":714
* p_src = &memviewsliceobj.from_slice
* else:
* slice_copy(memview, &src) # <<<<<<<<<<<<<<
* p_src = &src
*
*/
/*else*/ {
__pyx_memoryview_slice_copy(__pyx_v_memview, (&__pyx_v_src));
/* "View.MemoryView":715
* else:
* slice_copy(memview, &src)
* p_src = &src # <<<<<<<<<<<<<<
*
*
*/
__pyx_v_p_src = (&__pyx_v_src);
}
__pyx_L3:;
/* "View.MemoryView":721
*
*
* dst.memview = p_src.memview # <<<<<<<<<<<<<<
* dst.data = p_src.data
*
*/
__pyx_t_4 = __pyx_v_p_src->memview;
__pyx_v_dst.memview = __pyx_t_4;
/* "View.MemoryView":722
*
* dst.memview = p_src.memview
* dst.data = p_src.data # <<<<<<<<<<<<<<
*
*
*/
__pyx_t_5 = __pyx_v_p_src->data;
__pyx_v_dst.data = __pyx_t_5;
/* "View.MemoryView":727
*
*
* cdef __Pyx_memviewslice *p_dst = &dst # <<<<<<<<<<<<<<
* cdef int *p_suboffset_dim = &suboffset_dim
* cdef Py_ssize_t start, stop, step
*/
__pyx_v_p_dst = (&__pyx_v_dst);
/* "View.MemoryView":728
*
* cdef __Pyx_memviewslice *p_dst = &dst
* cdef int *p_suboffset_dim = &suboffset_dim # <<<<<<<<<<<<<<
* cdef Py_ssize_t start, stop, step
* cdef bint have_start, have_stop, have_step
*/
__pyx_v_p_suboffset_dim = (&__pyx_v_suboffset_dim);
/* "View.MemoryView":732
* cdef bint have_start, have_stop, have_step
*
* for dim, index in enumerate(indices): # <<<<<<<<<<<<<<
* if PyIndex_Check(index):
* slice_memviewslice(
*/
__pyx_t_6 = 0;
if (likely(PyList_CheckExact(__pyx_v_indices)) || PyTuple_CheckExact(__pyx_v_indices)) {
__pyx_t_3 = __pyx_v_indices; __Pyx_INCREF(__pyx_t_3); __pyx_t_7 = 0;
__pyx_t_8 = NULL;
} else {
__pyx_t_7 = -1; __pyx_t_3 = PyObject_GetIter(__pyx_v_indices); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 732, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_3);
__pyx_t_8 = Py_TYPE(__pyx_t_3)->tp_iternext; if (unlikely(!__pyx_t_8)) __PYX_ERR(1, 732, __pyx_L1_error)
}
for (;;) {
if (likely(!__pyx_t_8)) {
if (likely(PyList_CheckExact(__pyx_t_3))) {
if (__pyx_t_7 >= PyList_GET_SIZE(__pyx_t_3)) break;
#if CYTHON_ASSUME_SAFE_MACROS && !CYTHON_AVOID_BORROWED_REFS
__pyx_t_9 = PyList_GET_ITEM(__pyx_t_3, __pyx_t_7); __Pyx_INCREF(__pyx_t_9); __pyx_t_7++; if (unlikely(0 < 0)) __PYX_ERR(1, 732, __pyx_L1_error)
#else
__pyx_t_9 = PySequence_ITEM(__pyx_t_3, __pyx_t_7); __pyx_t_7++; if (unlikely(!__pyx_t_9)) __PYX_ERR(1, 732, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_9);
#endif
} else {
if (__pyx_t_7 >= PyTuple_GET_SIZE(__pyx_t_3)) break;
#if CYTHON_ASSUME_SAFE_MACROS && !CYTHON_AVOID_BORROWED_REFS
__pyx_t_9 = PyTuple_GET_ITEM(__pyx_t_3, __pyx_t_7); __Pyx_INCREF(__pyx_t_9); __pyx_t_7++; if (unlikely(0 < 0)) __PYX_ERR(1, 732, __pyx_L1_error)
#else
__pyx_t_9 = PySequence_ITEM(__pyx_t_3, __pyx_t_7); __pyx_t_7++; if (unlikely(!__pyx_t_9)) __PYX_ERR(1, 732, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_9);
#endif
}
} else {
__pyx_t_9 = __pyx_t_8(__pyx_t_3);
if (unlikely(!__pyx_t_9)) {
PyObject* exc_type = PyErr_Occurred();
if (exc_type) {
if (likely(exc_type == PyExc_StopIteration || PyErr_GivenExceptionMatches(exc_type, PyExc_StopIteration))) PyErr_Clear();
else __PYX_ERR(1, 732, __pyx_L1_error)
}
break;
}
__Pyx_GOTREF(__pyx_t_9);
}
__Pyx_XDECREF_SET(__pyx_v_index, __pyx_t_9);
__pyx_t_9 = 0;
__pyx_v_dim = __pyx_t_6;
__pyx_t_6 = (__pyx_t_6 + 1);
/* "View.MemoryView":733
*
* for dim, index in enumerate(indices):
* if PyIndex_Check(index): # <<<<<<<<<<<<<<
* slice_memviewslice(
* p_dst, p_src.shape[dim], p_src.strides[dim], p_src.suboffsets[dim],
*/
__pyx_t_2 = (PyIndex_Check(__pyx_v_index) != 0);
if (__pyx_t_2) {
/* "View.MemoryView":737
* p_dst, p_src.shape[dim], p_src.strides[dim], p_src.suboffsets[dim],
* dim, new_ndim, p_suboffset_dim,
* index, 0, 0, # start, stop, step # <<<<<<<<<<<<<<
* 0, 0, 0, # have_{start,stop,step}
* False)
*/
__pyx_t_10 = __Pyx_PyIndex_AsSsize_t(__pyx_v_index); if (unlikely((__pyx_t_10 == (Py_ssize_t)-1) && PyErr_Occurred())) __PYX_ERR(1, 737, __pyx_L1_error)
/* "View.MemoryView":734
* for dim, index in enumerate(indices):
* if PyIndex_Check(index):
* slice_memviewslice( # <<<<<<<<<<<<<<
* p_dst, p_src.shape[dim], p_src.strides[dim], p_src.suboffsets[dim],
* dim, new_ndim, p_suboffset_dim,
*/
__pyx_t_11 = __pyx_memoryview_slice_memviewslice(__pyx_v_p_dst, (__pyx_v_p_src->shape[__pyx_v_dim]), (__pyx_v_p_src->strides[__pyx_v_dim]), (__pyx_v_p_src->suboffsets[__pyx_v_dim]), __pyx_v_dim, __pyx_v_new_ndim, __pyx_v_p_suboffset_dim, __pyx_t_10, 0, 0, 0, 0, 0, 0); if (unlikely(__pyx_t_11 == -1)) __PYX_ERR(1, 734, __pyx_L1_error)
/* "View.MemoryView":733
*
* for dim, index in enumerate(indices):
* if PyIndex_Check(index): # <<<<<<<<<<<<<<
* slice_memviewslice(
* p_dst, p_src.shape[dim], p_src.strides[dim], p_src.suboffsets[dim],
*/
goto __pyx_L6;
}
/* "View.MemoryView":740
* 0, 0, 0, # have_{start,stop,step}
* False)
* elif index is None: # <<<<<<<<<<<<<<
* p_dst.shape[new_ndim] = 1
* p_dst.strides[new_ndim] = 0
*/
__pyx_t_2 = (__pyx_v_index == Py_None);
__pyx_t_1 = (__pyx_t_2 != 0);
if (__pyx_t_1) {
/* "View.MemoryView":741
* False)
* elif index is None:
* p_dst.shape[new_ndim] = 1 # <<<<<<<<<<<<<<
* p_dst.strides[new_ndim] = 0
* p_dst.suboffsets[new_ndim] = -1
*/
(__pyx_v_p_dst->shape[__pyx_v_new_ndim]) = 1;
/* "View.MemoryView":742
* elif index is None:
* p_dst.shape[new_ndim] = 1
* p_dst.strides[new_ndim] = 0 # <<<<<<<<<<<<<<
* p_dst.suboffsets[new_ndim] = -1
* new_ndim += 1
*/
(__pyx_v_p_dst->strides[__pyx_v_new_ndim]) = 0;
/* "View.MemoryView":743
* p_dst.shape[new_ndim] = 1
* p_dst.strides[new_ndim] = 0
* p_dst.suboffsets[new_ndim] = -1 # <<<<<<<<<<<<<<
* new_ndim += 1
* else:
*/
(__pyx_v_p_dst->suboffsets[__pyx_v_new_ndim]) = -1L;
/* "View.MemoryView":744
* p_dst.strides[new_ndim] = 0
* p_dst.suboffsets[new_ndim] = -1
* new_ndim += 1 # <<<<<<<<<<<<<<
* else:
* start = index.start or 0
*/
__pyx_v_new_ndim = (__pyx_v_new_ndim + 1);
/* "View.MemoryView":740
* 0, 0, 0, # have_{start,stop,step}
* False)
* elif index is None: # <<<<<<<<<<<<<<
* p_dst.shape[new_ndim] = 1
* p_dst.strides[new_ndim] = 0
*/
goto __pyx_L6;
}
/* "View.MemoryView":746
* new_ndim += 1
* else:
* start = index.start or 0 # <<<<<<<<<<<<<<
* stop = index.stop or 0
* step = index.step or 0
*/
/*else*/ {
__pyx_t_9 = __Pyx_PyObject_GetAttrStr(__pyx_v_index, __pyx_n_s_start); if (unlikely(!__pyx_t_9)) __PYX_ERR(1, 746, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_9);
__pyx_t_1 = __Pyx_PyObject_IsTrue(__pyx_t_9); if (unlikely(__pyx_t_1 < 0)) __PYX_ERR(1, 746, __pyx_L1_error)
if (!__pyx_t_1) {
__Pyx_DECREF(__pyx_t_9); __pyx_t_9 = 0;
} else {
__pyx_t_12 = __Pyx_PyIndex_AsSsize_t(__pyx_t_9); if (unlikely((__pyx_t_12 == (Py_ssize_t)-1) && PyErr_Occurred())) __PYX_ERR(1, 746, __pyx_L1_error)
__pyx_t_10 = __pyx_t_12;
__Pyx_DECREF(__pyx_t_9); __pyx_t_9 = 0;
goto __pyx_L7_bool_binop_done;
}
__pyx_t_10 = 0;
__pyx_L7_bool_binop_done:;
__pyx_v_start = __pyx_t_10;
/* "View.MemoryView":747
* else:
* start = index.start or 0
* stop = index.stop or 0 # <<<<<<<<<<<<<<
* step = index.step or 0
*
*/
__pyx_t_9 = __Pyx_PyObject_GetAttrStr(__pyx_v_index, __pyx_n_s_stop); if (unlikely(!__pyx_t_9)) __PYX_ERR(1, 747, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_9);
__pyx_t_1 = __Pyx_PyObject_IsTrue(__pyx_t_9); if (unlikely(__pyx_t_1 < 0)) __PYX_ERR(1, 747, __pyx_L1_error)
if (!__pyx_t_1) {
__Pyx_DECREF(__pyx_t_9); __pyx_t_9 = 0;
} else {
__pyx_t_12 = __Pyx_PyIndex_AsSsize_t(__pyx_t_9); if (unlikely((__pyx_t_12 == (Py_ssize_t)-1) && PyErr_Occurred())) __PYX_ERR(1, 747, __pyx_L1_error)
__pyx_t_10 = __pyx_t_12;
__Pyx_DECREF(__pyx_t_9); __pyx_t_9 = 0;
goto __pyx_L9_bool_binop_done;
}
__pyx_t_10 = 0;
__pyx_L9_bool_binop_done:;
__pyx_v_stop = __pyx_t_10;
/* "View.MemoryView":748
* start = index.start or 0
* stop = index.stop or 0
* step = index.step or 0 # <<<<<<<<<<<<<<
*
* have_start = index.start is not None
*/
__pyx_t_9 = __Pyx_PyObject_GetAttrStr(__pyx_v_index, __pyx_n_s_step); if (unlikely(!__pyx_t_9)) __PYX_ERR(1, 748, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_9);
__pyx_t_1 = __Pyx_PyObject_IsTrue(__pyx_t_9); if (unlikely(__pyx_t_1 < 0)) __PYX_ERR(1, 748, __pyx_L1_error)
if (!__pyx_t_1) {
__Pyx_DECREF(__pyx_t_9); __pyx_t_9 = 0;
} else {
__pyx_t_12 = __Pyx_PyIndex_AsSsize_t(__pyx_t_9); if (unlikely((__pyx_t_12 == (Py_ssize_t)-1) && PyErr_Occurred())) __PYX_ERR(1, 748, __pyx_L1_error)
__pyx_t_10 = __pyx_t_12;
__Pyx_DECREF(__pyx_t_9); __pyx_t_9 = 0;
goto __pyx_L11_bool_binop_done;
}
__pyx_t_10 = 0;
__pyx_L11_bool_binop_done:;
__pyx_v_step = __pyx_t_10;
/* "View.MemoryView":750
* step = index.step or 0
*
* have_start = index.start is not None # <<<<<<<<<<<<<<
* have_stop = index.stop is not None
* have_step = index.step is not None
*/
__pyx_t_9 = __Pyx_PyObject_GetAttrStr(__pyx_v_index, __pyx_n_s_start); if (unlikely(!__pyx_t_9)) __PYX_ERR(1, 750, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_9);
__pyx_t_1 = (__pyx_t_9 != Py_None);
__Pyx_DECREF(__pyx_t_9); __pyx_t_9 = 0;
__pyx_v_have_start = __pyx_t_1;
/* "View.MemoryView":751
*
* have_start = index.start is not None
* have_stop = index.stop is not None # <<<<<<<<<<<<<<
* have_step = index.step is not None
*
*/
__pyx_t_9 = __Pyx_PyObject_GetAttrStr(__pyx_v_index, __pyx_n_s_stop); if (unlikely(!__pyx_t_9)) __PYX_ERR(1, 751, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_9);
__pyx_t_1 = (__pyx_t_9 != Py_None);
__Pyx_DECREF(__pyx_t_9); __pyx_t_9 = 0;
__pyx_v_have_stop = __pyx_t_1;
/* "View.MemoryView":752
* have_start = index.start is not None
* have_stop = index.stop is not None
* have_step = index.step is not None # <<<<<<<<<<<<<<
*
* slice_memviewslice(
*/
__pyx_t_9 = __Pyx_PyObject_GetAttrStr(__pyx_v_index, __pyx_n_s_step); if (unlikely(!__pyx_t_9)) __PYX_ERR(1, 752, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_9);
__pyx_t_1 = (__pyx_t_9 != Py_None);
__Pyx_DECREF(__pyx_t_9); __pyx_t_9 = 0;
__pyx_v_have_step = __pyx_t_1;
/* "View.MemoryView":754
* have_step = index.step is not None
*
* slice_memviewslice( # <<<<<<<<<<<<<<
* p_dst, p_src.shape[dim], p_src.strides[dim], p_src.suboffsets[dim],
* dim, new_ndim, p_suboffset_dim,
*/
__pyx_t_11 = __pyx_memoryview_slice_memviewslice(__pyx_v_p_dst, (__pyx_v_p_src->shape[__pyx_v_dim]), (__pyx_v_p_src->strides[__pyx_v_dim]), (__pyx_v_p_src->suboffsets[__pyx_v_dim]), __pyx_v_dim, __pyx_v_new_ndim, __pyx_v_p_suboffset_dim, __pyx_v_start, __pyx_v_stop, __pyx_v_step, __pyx_v_have_start, __pyx_v_have_stop, __pyx_v_have_step, 1); if (unlikely(__pyx_t_11 == -1)) __PYX_ERR(1, 754, __pyx_L1_error)
/* "View.MemoryView":760
* have_start, have_stop, have_step,
* True)
* new_ndim += 1 # <<<<<<<<<<<<<<
*
* if isinstance(memview, _memoryviewslice):
*/
__pyx_v_new_ndim = (__pyx_v_new_ndim + 1);
}
__pyx_L6:;
/* "View.MemoryView":732
* cdef bint have_start, have_stop, have_step
*
* for dim, index in enumerate(indices): # <<<<<<<<<<<<<<
* if PyIndex_Check(index):
* slice_memviewslice(
*/
}
__Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
/* "View.MemoryView":762
* new_ndim += 1
*
* if isinstance(memview, _memoryviewslice): # <<<<<<<<<<<<<<
* return memoryview_fromslice(dst, new_ndim,
* memviewsliceobj.to_object_func,
*/
__pyx_t_1 = __Pyx_TypeCheck(((PyObject *)__pyx_v_memview), __pyx_memoryviewslice_type);
__pyx_t_2 = (__pyx_t_1 != 0);
if (__pyx_t_2) {
/* "View.MemoryView":763
*
* if isinstance(memview, _memoryviewslice):
* return memoryview_fromslice(dst, new_ndim, # <<<<<<<<<<<<<<
* memviewsliceobj.to_object_func,
* memviewsliceobj.to_dtype_func,
*/
__Pyx_XDECREF(((PyObject *)__pyx_r));
/* "View.MemoryView":764
* if isinstance(memview, _memoryviewslice):
* return memoryview_fromslice(dst, new_ndim,
* memviewsliceobj.to_object_func, # <<<<<<<<<<<<<<
* memviewsliceobj.to_dtype_func,
* memview.dtype_is_object)
*/
if (unlikely(!__pyx_v_memviewsliceobj)) { __Pyx_RaiseUnboundLocalError("memviewsliceobj"); __PYX_ERR(1, 764, __pyx_L1_error) }
/* "View.MemoryView":765
* return memoryview_fromslice(dst, new_ndim,
* memviewsliceobj.to_object_func,
* memviewsliceobj.to_dtype_func, # <<<<<<<<<<<<<<
* memview.dtype_is_object)
* else:
*/
if (unlikely(!__pyx_v_memviewsliceobj)) { __Pyx_RaiseUnboundLocalError("memviewsliceobj"); __PYX_ERR(1, 765, __pyx_L1_error) }
/* "View.MemoryView":763
*
* if isinstance(memview, _memoryviewslice):
* return memoryview_fromslice(dst, new_ndim, # <<<<<<<<<<<<<<
* memviewsliceobj.to_object_func,
* memviewsliceobj.to_dtype_func,
*/
__pyx_t_3 = __pyx_memoryview_fromslice(__pyx_v_dst, __pyx_v_new_ndim, __pyx_v_memviewsliceobj->to_object_func, __pyx_v_memviewsliceobj->to_dtype_func, __pyx_v_memview->dtype_is_object); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 763, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_3);
if (!(likely(((__pyx_t_3) == Py_None) || likely(__Pyx_TypeTest(__pyx_t_3, __pyx_memoryview_type))))) __PYX_ERR(1, 763, __pyx_L1_error)
__pyx_r = ((struct __pyx_memoryview_obj *)__pyx_t_3);
__pyx_t_3 = 0;
goto __pyx_L0;
/* "View.MemoryView":762
* new_ndim += 1
*
* if isinstance(memview, _memoryviewslice): # <<<<<<<<<<<<<<
* return memoryview_fromslice(dst, new_ndim,
* memviewsliceobj.to_object_func,
*/
}
/* "View.MemoryView":768
* memview.dtype_is_object)
* else:
* return memoryview_fromslice(dst, new_ndim, NULL, NULL, # <<<<<<<<<<<<<<
* memview.dtype_is_object)
*
*/
/*else*/ {
__Pyx_XDECREF(((PyObject *)__pyx_r));
/* "View.MemoryView":769
* else:
* return memoryview_fromslice(dst, new_ndim, NULL, NULL,
* memview.dtype_is_object) # <<<<<<<<<<<<<<
*
*
*/
__pyx_t_3 = __pyx_memoryview_fromslice(__pyx_v_dst, __pyx_v_new_ndim, NULL, NULL, __pyx_v_memview->dtype_is_object); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 768, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_3);
/* "View.MemoryView":768
* memview.dtype_is_object)
* else:
* return memoryview_fromslice(dst, new_ndim, NULL, NULL, # <<<<<<<<<<<<<<
* memview.dtype_is_object)
*
*/
if (!(likely(((__pyx_t_3) == Py_None) || likely(__Pyx_TypeTest(__pyx_t_3, __pyx_memoryview_type))))) __PYX_ERR(1, 768, __pyx_L1_error)
__pyx_r = ((struct __pyx_memoryview_obj *)__pyx_t_3);
__pyx_t_3 = 0;
goto __pyx_L0;
}
/* "View.MemoryView":696
*
* @cname('__pyx_memview_slice')
* cdef memoryview memview_slice(memoryview memview, object indices): # <<<<<<<<<<<<<<
* cdef int new_ndim = 0, suboffset_dim = -1, dim
* cdef bint negative_step
*/
/* function exit code */
__pyx_L1_error:;
__Pyx_XDECREF(__pyx_t_3);
__Pyx_XDECREF(__pyx_t_9);
__Pyx_AddTraceback("View.MemoryView.memview_slice", __pyx_clineno, __pyx_lineno, __pyx_filename);
__pyx_r = 0;
__pyx_L0:;
__Pyx_XDECREF((PyObject *)__pyx_v_memviewsliceobj);
__Pyx_XDECREF(__pyx_v_index);
__Pyx_XGIVEREF((PyObject *)__pyx_r);
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
/* "View.MemoryView":793
*
* @cname('__pyx_memoryview_slice_memviewslice')
* cdef int slice_memviewslice( # <<<<<<<<<<<<<<
* __Pyx_memviewslice *dst,
* Py_ssize_t shape, Py_ssize_t stride, Py_ssize_t suboffset,
*/
static int __pyx_memoryview_slice_memviewslice(__Pyx_memviewslice *__pyx_v_dst, Py_ssize_t __pyx_v_shape, Py_ssize_t __pyx_v_stride, Py_ssize_t __pyx_v_suboffset, int __pyx_v_dim, int __pyx_v_new_ndim, int *__pyx_v_suboffset_dim, Py_ssize_t __pyx_v_start, Py_ssize_t __pyx_v_stop, Py_ssize_t __pyx_v_step, int __pyx_v_have_start, int __pyx_v_have_stop, int __pyx_v_have_step, int __pyx_v_is_slice) {
Py_ssize_t __pyx_v_new_shape;
int __pyx_v_negative_step;
int __pyx_r;
int __pyx_t_1;
int __pyx_t_2;
int __pyx_t_3;
/* "View.MemoryView":813
* cdef bint negative_step
*
* if not is_slice: # <<<<<<<<<<<<<<
*
* if start < 0:
*/
__pyx_t_1 = ((!(__pyx_v_is_slice != 0)) != 0);
if (__pyx_t_1) {
/* "View.MemoryView":815
* if not is_slice:
*
* if start < 0: # <<<<<<<<<<<<<<
* start += shape
* if not 0 <= start < shape:
*/
__pyx_t_1 = ((__pyx_v_start < 0) != 0);
if (__pyx_t_1) {
/* "View.MemoryView":816
*
* if start < 0:
* start += shape # <<<<<<<<<<<<<<
* if not 0 <= start < shape:
* _err_dim(IndexError, "Index out of bounds (axis %d)", dim)
*/
__pyx_v_start = (__pyx_v_start + __pyx_v_shape);
/* "View.MemoryView":815
* if not is_slice:
*
* if start < 0: # <<<<<<<<<<<<<<
* start += shape
* if not 0 <= start < shape:
*/
}
/* "View.MemoryView":817
* if start < 0:
* start += shape
* if not 0 <= start < shape: # <<<<<<<<<<<<<<
* _err_dim(IndexError, "Index out of bounds (axis %d)", dim)
* else:
*/
__pyx_t_1 = (0 <= __pyx_v_start);
if (__pyx_t_1) {
__pyx_t_1 = (__pyx_v_start < __pyx_v_shape);
}
__pyx_t_2 = ((!(__pyx_t_1 != 0)) != 0);
if (__pyx_t_2) {
/* "View.MemoryView":818
* start += shape
* if not 0 <= start < shape:
* _err_dim(IndexError, "Index out of bounds (axis %d)", dim) # <<<<<<<<<<<<<<
* else:
*
*/
__pyx_t_3 = __pyx_memoryview_err_dim(__pyx_builtin_IndexError, ((char *)"Index out of bounds (axis %d)"), __pyx_v_dim); if (unlikely(__pyx_t_3 == -1)) __PYX_ERR(1, 818, __pyx_L1_error)
/* "View.MemoryView":817
* if start < 0:
* start += shape
* if not 0 <= start < shape: # <<<<<<<<<<<<<<
* _err_dim(IndexError, "Index out of bounds (axis %d)", dim)
* else:
*/
}
/* "View.MemoryView":813
* cdef bint negative_step
*
* if not is_slice: # <<<<<<<<<<<<<<
*
* if start < 0:
*/
goto __pyx_L3;
}
/* "View.MemoryView":821
* else:
*
* negative_step = have_step != 0 and step < 0 # <<<<<<<<<<<<<<
*
* if have_step and step == 0:
*/
/*else*/ {
__pyx_t_1 = ((__pyx_v_have_step != 0) != 0);
if (__pyx_t_1) {
} else {
__pyx_t_2 = __pyx_t_1;
goto __pyx_L6_bool_binop_done;
}
__pyx_t_1 = ((__pyx_v_step < 0) != 0);
__pyx_t_2 = __pyx_t_1;
__pyx_L6_bool_binop_done:;
__pyx_v_negative_step = __pyx_t_2;
/* "View.MemoryView":823
* negative_step = have_step != 0 and step < 0
*
* if have_step and step == 0: # <<<<<<<<<<<<<<
* _err_dim(ValueError, "Step may not be zero (axis %d)", dim)
*
*/
__pyx_t_1 = (__pyx_v_have_step != 0);
if (__pyx_t_1) {
} else {
__pyx_t_2 = __pyx_t_1;
goto __pyx_L9_bool_binop_done;
}
__pyx_t_1 = ((__pyx_v_step == 0) != 0);
__pyx_t_2 = __pyx_t_1;
__pyx_L9_bool_binop_done:;
if (__pyx_t_2) {
/* "View.MemoryView":824
*
* if have_step and step == 0:
* _err_dim(ValueError, "Step may not be zero (axis %d)", dim) # <<<<<<<<<<<<<<
*
*
*/
__pyx_t_3 = __pyx_memoryview_err_dim(__pyx_builtin_ValueError, ((char *)"Step may not be zero (axis %d)"), __pyx_v_dim); if (unlikely(__pyx_t_3 == -1)) __PYX_ERR(1, 824, __pyx_L1_error)
/* "View.MemoryView":823
* negative_step = have_step != 0 and step < 0
*
* if have_step and step == 0: # <<<<<<<<<<<<<<
* _err_dim(ValueError, "Step may not be zero (axis %d)", dim)
*
*/
}
/* "View.MemoryView":827
*
*
* if have_start: # <<<<<<<<<<<<<<
* if start < 0:
* start += shape
*/
__pyx_t_2 = (__pyx_v_have_start != 0);
if (__pyx_t_2) {
/* "View.MemoryView":828
*
* if have_start:
* if start < 0: # <<<<<<<<<<<<<<
* start += shape
* if start < 0:
*/
__pyx_t_2 = ((__pyx_v_start < 0) != 0);
if (__pyx_t_2) {
/* "View.MemoryView":829
* if have_start:
* if start < 0:
* start += shape # <<<<<<<<<<<<<<
* if start < 0:
* start = 0
*/
__pyx_v_start = (__pyx_v_start + __pyx_v_shape);
/* "View.MemoryView":830
* if start < 0:
* start += shape
* if start < 0: # <<<<<<<<<<<<<<
* start = 0
* elif start >= shape:
*/
__pyx_t_2 = ((__pyx_v_start < 0) != 0);
if (__pyx_t_2) {
/* "View.MemoryView":831
* start += shape
* if start < 0:
* start = 0 # <<<<<<<<<<<<<<
* elif start >= shape:
* if negative_step:
*/
__pyx_v_start = 0;
/* "View.MemoryView":830
* if start < 0:
* start += shape
* if start < 0: # <<<<<<<<<<<<<<
* start = 0
* elif start >= shape:
*/
}
/* "View.MemoryView":828
*
* if have_start:
* if start < 0: # <<<<<<<<<<<<<<
* start += shape
* if start < 0:
*/
goto __pyx_L12;
}
/* "View.MemoryView":832
* if start < 0:
* start = 0
* elif start >= shape: # <<<<<<<<<<<<<<
* if negative_step:
* start = shape - 1
*/
__pyx_t_2 = ((__pyx_v_start >= __pyx_v_shape) != 0);
if (__pyx_t_2) {
/* "View.MemoryView":833
* start = 0
* elif start >= shape:
* if negative_step: # <<<<<<<<<<<<<<
* start = shape - 1
* else:
*/
__pyx_t_2 = (__pyx_v_negative_step != 0);
if (__pyx_t_2) {
/* "View.MemoryView":834
* elif start >= shape:
* if negative_step:
* start = shape - 1 # <<<<<<<<<<<<<<
* else:
* start = shape
*/
__pyx_v_start = (__pyx_v_shape - 1);
/* "View.MemoryView":833
* start = 0
* elif start >= shape:
* if negative_step: # <<<<<<<<<<<<<<
* start = shape - 1
* else:
*/
goto __pyx_L14;
}
/* "View.MemoryView":836
* start = shape - 1
* else:
* start = shape # <<<<<<<<<<<<<<
* else:
* if negative_step:
*/
/*else*/ {
__pyx_v_start = __pyx_v_shape;
}
__pyx_L14:;
/* "View.MemoryView":832
* if start < 0:
* start = 0
* elif start >= shape: # <<<<<<<<<<<<<<
* if negative_step:
* start = shape - 1
*/
}
__pyx_L12:;
/* "View.MemoryView":827
*
*
* if have_start: # <<<<<<<<<<<<<<
* if start < 0:
* start += shape
*/
goto __pyx_L11;
}
/* "View.MemoryView":838
* start = shape
* else:
* if negative_step: # <<<<<<<<<<<<<<
* start = shape - 1
* else:
*/
/*else*/ {
__pyx_t_2 = (__pyx_v_negative_step != 0);
if (__pyx_t_2) {
/* "View.MemoryView":839
* else:
* if negative_step:
* start = shape - 1 # <<<<<<<<<<<<<<
* else:
* start = 0
*/
__pyx_v_start = (__pyx_v_shape - 1);
/* "View.MemoryView":838
* start = shape
* else:
* if negative_step: # <<<<<<<<<<<<<<
* start = shape - 1
* else:
*/
goto __pyx_L15;
}
/* "View.MemoryView":841
* start = shape - 1
* else:
* start = 0 # <<<<<<<<<<<<<<
*
* if have_stop:
*/
/*else*/ {
__pyx_v_start = 0;
}
__pyx_L15:;
}
__pyx_L11:;
/* "View.MemoryView":843
* start = 0
*
* if have_stop: # <<<<<<<<<<<<<<
* if stop < 0:
* stop += shape
*/
__pyx_t_2 = (__pyx_v_have_stop != 0);
if (__pyx_t_2) {
/* "View.MemoryView":844
*
* if have_stop:
* if stop < 0: # <<<<<<<<<<<<<<
* stop += shape
* if stop < 0:
*/
__pyx_t_2 = ((__pyx_v_stop < 0) != 0);
if (__pyx_t_2) {
/* "View.MemoryView":845
* if have_stop:
* if stop < 0:
* stop += shape # <<<<<<<<<<<<<<
* if stop < 0:
* stop = 0
*/
__pyx_v_stop = (__pyx_v_stop + __pyx_v_shape);
/* "View.MemoryView":846
* if stop < 0:
* stop += shape
* if stop < 0: # <<<<<<<<<<<<<<
* stop = 0
* elif stop > shape:
*/
__pyx_t_2 = ((__pyx_v_stop < 0) != 0);
if (__pyx_t_2) {
/* "View.MemoryView":847
* stop += shape
* if stop < 0:
* stop = 0 # <<<<<<<<<<<<<<
* elif stop > shape:
* stop = shape
*/
__pyx_v_stop = 0;
/* "View.MemoryView":846
* if stop < 0:
* stop += shape
* if stop < 0: # <<<<<<<<<<<<<<
* stop = 0
* elif stop > shape:
*/
}
/* "View.MemoryView":844
*
* if have_stop:
* if stop < 0: # <<<<<<<<<<<<<<
* stop += shape
* if stop < 0:
*/
goto __pyx_L17;
}
/* "View.MemoryView":848
* if stop < 0:
* stop = 0
* elif stop > shape: # <<<<<<<<<<<<<<
* stop = shape
* else:
*/
__pyx_t_2 = ((__pyx_v_stop > __pyx_v_shape) != 0);
if (__pyx_t_2) {
/* "View.MemoryView":849
* stop = 0
* elif stop > shape:
* stop = shape # <<<<<<<<<<<<<<
* else:
* if negative_step:
*/
__pyx_v_stop = __pyx_v_shape;
/* "View.MemoryView":848
* if stop < 0:
* stop = 0
* elif stop > shape: # <<<<<<<<<<<<<<
* stop = shape
* else:
*/
}
__pyx_L17:;
/* "View.MemoryView":843
* start = 0
*
* if have_stop: # <<<<<<<<<<<<<<
* if stop < 0:
* stop += shape
*/
goto __pyx_L16;
}
/* "View.MemoryView":851
* stop = shape
* else:
* if negative_step: # <<<<<<<<<<<<<<
* stop = -1
* else:
*/
/*else*/ {
__pyx_t_2 = (__pyx_v_negative_step != 0);
if (__pyx_t_2) {
/* "View.MemoryView":852
* else:
* if negative_step:
* stop = -1 # <<<<<<<<<<<<<<
* else:
* stop = shape
*/
__pyx_v_stop = -1L;
/* "View.MemoryView":851
* stop = shape
* else:
* if negative_step: # <<<<<<<<<<<<<<
* stop = -1
* else:
*/
goto __pyx_L19;
}
/* "View.MemoryView":854
* stop = -1
* else:
* stop = shape # <<<<<<<<<<<<<<
*
* if not have_step:
*/
/*else*/ {
__pyx_v_stop = __pyx_v_shape;
}
__pyx_L19:;
}
__pyx_L16:;
/* "View.MemoryView":856
* stop = shape
*
* if not have_step: # <<<<<<<<<<<<<<
* step = 1
*
*/
__pyx_t_2 = ((!(__pyx_v_have_step != 0)) != 0);
if (__pyx_t_2) {
/* "View.MemoryView":857
*
* if not have_step:
* step = 1 # <<<<<<<<<<<<<<
*
*
*/
__pyx_v_step = 1;
/* "View.MemoryView":856
* stop = shape
*
* if not have_step: # <<<<<<<<<<<<<<
* step = 1
*
*/
}
/* "View.MemoryView":861
*
* with cython.cdivision(True):
* new_shape = (stop - start) // step # <<<<<<<<<<<<<<
*
* if (stop - start) - step * new_shape:
*/
__pyx_v_new_shape = ((__pyx_v_stop - __pyx_v_start) / __pyx_v_step);
/* "View.MemoryView":863
* new_shape = (stop - start) // step
*
* if (stop - start) - step * new_shape: # <<<<<<<<<<<<<<
* new_shape += 1
*
*/
__pyx_t_2 = (((__pyx_v_stop - __pyx_v_start) - (__pyx_v_step * __pyx_v_new_shape)) != 0);
if (__pyx_t_2) {
/* "View.MemoryView":864
*
* if (stop - start) - step * new_shape:
* new_shape += 1 # <<<<<<<<<<<<<<
*
* if new_shape < 0:
*/
__pyx_v_new_shape = (__pyx_v_new_shape + 1);
/* "View.MemoryView":863
* new_shape = (stop - start) // step
*
* if (stop - start) - step * new_shape: # <<<<<<<<<<<<<<
* new_shape += 1
*
*/
}
/* "View.MemoryView":866
* new_shape += 1
*
* if new_shape < 0: # <<<<<<<<<<<<<<
* new_shape = 0
*
*/
__pyx_t_2 = ((__pyx_v_new_shape < 0) != 0);
if (__pyx_t_2) {
/* "View.MemoryView":867
*
* if new_shape < 0:
* new_shape = 0 # <<<<<<<<<<<<<<
*
*
*/
__pyx_v_new_shape = 0;
/* "View.MemoryView":866
* new_shape += 1
*
* if new_shape < 0: # <<<<<<<<<<<<<<
* new_shape = 0
*
*/
}
/* "View.MemoryView":870
*
*
* dst.strides[new_ndim] = stride * step # <<<<<<<<<<<<<<
* dst.shape[new_ndim] = new_shape
* dst.suboffsets[new_ndim] = suboffset
*/
(__pyx_v_dst->strides[__pyx_v_new_ndim]) = (__pyx_v_stride * __pyx_v_step);
/* "View.MemoryView":871
*
* dst.strides[new_ndim] = stride * step
* dst.shape[new_ndim] = new_shape # <<<<<<<<<<<<<<
* dst.suboffsets[new_ndim] = suboffset
*
*/
(__pyx_v_dst->shape[__pyx_v_new_ndim]) = __pyx_v_new_shape;
/* "View.MemoryView":872
* dst.strides[new_ndim] = stride * step
* dst.shape[new_ndim] = new_shape
* dst.suboffsets[new_ndim] = suboffset # <<<<<<<<<<<<<<
*
*
*/
(__pyx_v_dst->suboffsets[__pyx_v_new_ndim]) = __pyx_v_suboffset;
}
__pyx_L3:;
/* "View.MemoryView":875
*
*
* if suboffset_dim[0] < 0: # <<<<<<<<<<<<<<
* dst.data += start * stride
* else:
*/
__pyx_t_2 = (((__pyx_v_suboffset_dim[0]) < 0) != 0);
if (__pyx_t_2) {
/* "View.MemoryView":876
*
* if suboffset_dim[0] < 0:
* dst.data += start * stride # <<<<<<<<<<<<<<
* else:
* dst.suboffsets[suboffset_dim[0]] += start * stride
*/
__pyx_v_dst->data = (__pyx_v_dst->data + (__pyx_v_start * __pyx_v_stride));
/* "View.MemoryView":875
*
*
* if suboffset_dim[0] < 0: # <<<<<<<<<<<<<<
* dst.data += start * stride
* else:
*/
goto __pyx_L23;
}
/* "View.MemoryView":878
* dst.data += start * stride
* else:
* dst.suboffsets[suboffset_dim[0]] += start * stride # <<<<<<<<<<<<<<
*
* if suboffset >= 0:
*/
/*else*/ {
__pyx_t_3 = (__pyx_v_suboffset_dim[0]);
(__pyx_v_dst->suboffsets[__pyx_t_3]) = ((__pyx_v_dst->suboffsets[__pyx_t_3]) + (__pyx_v_start * __pyx_v_stride));
}
__pyx_L23:;
/* "View.MemoryView":880
* dst.suboffsets[suboffset_dim[0]] += start * stride
*
* if suboffset >= 0: # <<<<<<<<<<<<<<
* if not is_slice:
* if new_ndim == 0:
*/
__pyx_t_2 = ((__pyx_v_suboffset >= 0) != 0);
if (__pyx_t_2) {
/* "View.MemoryView":881
*
* if suboffset >= 0:
* if not is_slice: # <<<<<<<<<<<<<<
* if new_ndim == 0:
* dst.data = (<char **> dst.data)[0] + suboffset
*/
__pyx_t_2 = ((!(__pyx_v_is_slice != 0)) != 0);
if (__pyx_t_2) {
/* "View.MemoryView":882
* if suboffset >= 0:
* if not is_slice:
* if new_ndim == 0: # <<<<<<<<<<<<<<
* dst.data = (<char **> dst.data)[0] + suboffset
* else:
*/
__pyx_t_2 = ((__pyx_v_new_ndim == 0) != 0);
if (__pyx_t_2) {
/* "View.MemoryView":883
* if not is_slice:
* if new_ndim == 0:
* dst.data = (<char **> dst.data)[0] + suboffset # <<<<<<<<<<<<<<
* else:
* _err_dim(IndexError, "All dimensions preceding dimension %d "
*/
__pyx_v_dst->data = ((((char **)__pyx_v_dst->data)[0]) + __pyx_v_suboffset);
/* "View.MemoryView":882
* if suboffset >= 0:
* if not is_slice:
* if new_ndim == 0: # <<<<<<<<<<<<<<
* dst.data = (<char **> dst.data)[0] + suboffset
* else:
*/
goto __pyx_L26;
}
/* "View.MemoryView":885
* dst.data = (<char **> dst.data)[0] + suboffset
* else:
* _err_dim(IndexError, "All dimensions preceding dimension %d " # <<<<<<<<<<<<<<
* "must be indexed and not sliced", dim)
* else:
*/
/*else*/ {
/* "View.MemoryView":886
* else:
* _err_dim(IndexError, "All dimensions preceding dimension %d "
* "must be indexed and not sliced", dim) # <<<<<<<<<<<<<<
* else:
* suboffset_dim[0] = new_ndim
*/
__pyx_t_3 = __pyx_memoryview_err_dim(__pyx_builtin_IndexError, ((char *)"All dimensions preceding dimension %d must be indexed and not sliced"), __pyx_v_dim); if (unlikely(__pyx_t_3 == -1)) __PYX_ERR(1, 885, __pyx_L1_error)
}
__pyx_L26:;
/* "View.MemoryView":881
*
* if suboffset >= 0:
* if not is_slice: # <<<<<<<<<<<<<<
* if new_ndim == 0:
* dst.data = (<char **> dst.data)[0] + suboffset
*/
goto __pyx_L25;
}
/* "View.MemoryView":888
* "must be indexed and not sliced", dim)
* else:
* suboffset_dim[0] = new_ndim # <<<<<<<<<<<<<<
*
* return 0
*/
/*else*/ {
(__pyx_v_suboffset_dim[0]) = __pyx_v_new_ndim;
}
__pyx_L25:;
/* "View.MemoryView":880
* dst.suboffsets[suboffset_dim[0]] += start * stride
*
* if suboffset >= 0: # <<<<<<<<<<<<<<
* if not is_slice:
* if new_ndim == 0:
*/
}
/* "View.MemoryView":890
* suboffset_dim[0] = new_ndim
*
* return 0 # <<<<<<<<<<<<<<
*
*
*/
__pyx_r = 0;
goto __pyx_L0;
/* "View.MemoryView":793
*
* @cname('__pyx_memoryview_slice_memviewslice')
* cdef int slice_memviewslice( # <<<<<<<<<<<<<<
* __Pyx_memviewslice *dst,
* Py_ssize_t shape, Py_ssize_t stride, Py_ssize_t suboffset,
*/
/* function exit code */
__pyx_L1_error:;
{
#ifdef WITH_THREAD
PyGILState_STATE __pyx_gilstate_save = PyGILState_Ensure();
#endif
__Pyx_AddTraceback("View.MemoryView.slice_memviewslice", __pyx_clineno, __pyx_lineno, __pyx_filename);
#ifdef WITH_THREAD
PyGILState_Release(__pyx_gilstate_save);
#endif
}
__pyx_r = -1;
__pyx_L0:;
return __pyx_r;
}
/* "View.MemoryView":896
*
* @cname('__pyx_pybuffer_index')
* cdef char *pybuffer_index(Py_buffer *view, char *bufp, Py_ssize_t index, # <<<<<<<<<<<<<<
* Py_ssize_t dim) except NULL:
* cdef Py_ssize_t shape, stride, suboffset = -1
*/
static char *__pyx_pybuffer_index(Py_buffer *__pyx_v_view, char *__pyx_v_bufp, Py_ssize_t __pyx_v_index, Py_ssize_t __pyx_v_dim) {
Py_ssize_t __pyx_v_shape;
Py_ssize_t __pyx_v_stride;
Py_ssize_t __pyx_v_suboffset;
Py_ssize_t __pyx_v_itemsize;
char *__pyx_v_resultp;
char *__pyx_r;
__Pyx_RefNannyDeclarations
Py_ssize_t __pyx_t_1;
int __pyx_t_2;
PyObject *__pyx_t_3 = NULL;
PyObject *__pyx_t_4 = NULL;
__Pyx_RefNannySetupContext("pybuffer_index", 0);
/* "View.MemoryView":898
* cdef char *pybuffer_index(Py_buffer *view, char *bufp, Py_ssize_t index,
* Py_ssize_t dim) except NULL:
* cdef Py_ssize_t shape, stride, suboffset = -1 # <<<<<<<<<<<<<<
* cdef Py_ssize_t itemsize = view.itemsize
* cdef char *resultp
*/
__pyx_v_suboffset = -1L;
/* "View.MemoryView":899
* Py_ssize_t dim) except NULL:
* cdef Py_ssize_t shape, stride, suboffset = -1
* cdef Py_ssize_t itemsize = view.itemsize # <<<<<<<<<<<<<<
* cdef char *resultp
*
*/
__pyx_t_1 = __pyx_v_view->itemsize;
__pyx_v_itemsize = __pyx_t_1;
/* "View.MemoryView":902
* cdef char *resultp
*
* if view.ndim == 0: # <<<<<<<<<<<<<<
* shape = view.len / itemsize
* stride = itemsize
*/
__pyx_t_2 = ((__pyx_v_view->ndim == 0) != 0);
if (__pyx_t_2) {
/* "View.MemoryView":903
*
* if view.ndim == 0:
* shape = view.len / itemsize # <<<<<<<<<<<<<<
* stride = itemsize
* else:
*/
if (unlikely(__pyx_v_itemsize == 0)) {
PyErr_SetString(PyExc_ZeroDivisionError, "integer division or modulo by zero");
__PYX_ERR(1, 903, __pyx_L1_error)
}
else if (sizeof(Py_ssize_t) == sizeof(long) && (!(((Py_ssize_t)-1) > 0)) && unlikely(__pyx_v_itemsize == (Py_ssize_t)-1) && unlikely(UNARY_NEG_WOULD_OVERFLOW(__pyx_v_view->len))) {
PyErr_SetString(PyExc_OverflowError, "value too large to perform division");
__PYX_ERR(1, 903, __pyx_L1_error)
}
__pyx_v_shape = (__pyx_v_view->len / __pyx_v_itemsize);
/* "View.MemoryView":904
* if view.ndim == 0:
* shape = view.len / itemsize
* stride = itemsize # <<<<<<<<<<<<<<
* else:
* shape = view.shape[dim]
*/
__pyx_v_stride = __pyx_v_itemsize;
/* "View.MemoryView":902
* cdef char *resultp
*
* if view.ndim == 0: # <<<<<<<<<<<<<<
* shape = view.len / itemsize
* stride = itemsize
*/
goto __pyx_L3;
}
/* "View.MemoryView":906
* stride = itemsize
* else:
* shape = view.shape[dim] # <<<<<<<<<<<<<<
* stride = view.strides[dim]
* if view.suboffsets != NULL:
*/
/*else*/ {
__pyx_v_shape = (__pyx_v_view->shape[__pyx_v_dim]);
/* "View.MemoryView":907
* else:
* shape = view.shape[dim]
* stride = view.strides[dim] # <<<<<<<<<<<<<<
* if view.suboffsets != NULL:
* suboffset = view.suboffsets[dim]
*/
__pyx_v_stride = (__pyx_v_view->strides[__pyx_v_dim]);
/* "View.MemoryView":908
* shape = view.shape[dim]
* stride = view.strides[dim]
* if view.suboffsets != NULL: # <<<<<<<<<<<<<<
* suboffset = view.suboffsets[dim]
*
*/
__pyx_t_2 = ((__pyx_v_view->suboffsets != NULL) != 0);
if (__pyx_t_2) {
/* "View.MemoryView":909
* stride = view.strides[dim]
* if view.suboffsets != NULL:
* suboffset = view.suboffsets[dim] # <<<<<<<<<<<<<<
*
* if index < 0:
*/
__pyx_v_suboffset = (__pyx_v_view->suboffsets[__pyx_v_dim]);
/* "View.MemoryView":908
* shape = view.shape[dim]
* stride = view.strides[dim]
* if view.suboffsets != NULL: # <<<<<<<<<<<<<<
* suboffset = view.suboffsets[dim]
*
*/
}
}
__pyx_L3:;
/* "View.MemoryView":911
* suboffset = view.suboffsets[dim]
*
* if index < 0: # <<<<<<<<<<<<<<
* index += view.shape[dim]
* if index < 0:
*/
__pyx_t_2 = ((__pyx_v_index < 0) != 0);
if (__pyx_t_2) {
/* "View.MemoryView":912
*
* if index < 0:
* index += view.shape[dim] # <<<<<<<<<<<<<<
* if index < 0:
* raise IndexError("Out of bounds on buffer access (axis %d)" % dim)
*/
__pyx_v_index = (__pyx_v_index + (__pyx_v_view->shape[__pyx_v_dim]));
/* "View.MemoryView":913
* if index < 0:
* index += view.shape[dim]
* if index < 0: # <<<<<<<<<<<<<<
* raise IndexError("Out of bounds on buffer access (axis %d)" % dim)
*
*/
__pyx_t_2 = ((__pyx_v_index < 0) != 0);
if (__pyx_t_2) {
/* "View.MemoryView":914
* index += view.shape[dim]
* if index < 0:
* raise IndexError("Out of bounds on buffer access (axis %d)" % dim) # <<<<<<<<<<<<<<
*
* if index >= shape:
*/
__pyx_t_3 = PyInt_FromSsize_t(__pyx_v_dim); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 914, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_3);
__pyx_t_4 = __Pyx_PyString_Format(__pyx_kp_s_Out_of_bounds_on_buffer_access_a, __pyx_t_3); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 914, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_4);
__Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
__pyx_t_3 = PyTuple_New(1); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 914, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_3);
__Pyx_GIVEREF(__pyx_t_4);
PyTuple_SET_ITEM(__pyx_t_3, 0, __pyx_t_4);
__pyx_t_4 = 0;
__pyx_t_4 = __Pyx_PyObject_Call(__pyx_builtin_IndexError, __pyx_t_3, NULL); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 914, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_4);
__Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
__Pyx_Raise(__pyx_t_4, 0, 0, 0);
__Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
__PYX_ERR(1, 914, __pyx_L1_error)
/* "View.MemoryView":913
* if index < 0:
* index += view.shape[dim]
* if index < 0: # <<<<<<<<<<<<<<
* raise IndexError("Out of bounds on buffer access (axis %d)" % dim)
*
*/
}
/* "View.MemoryView":911
* suboffset = view.suboffsets[dim]
*
* if index < 0: # <<<<<<<<<<<<<<
* index += view.shape[dim]
* if index < 0:
*/
}
/* "View.MemoryView":916
* raise IndexError("Out of bounds on buffer access (axis %d)" % dim)
*
* if index >= shape: # <<<<<<<<<<<<<<
* raise IndexError("Out of bounds on buffer access (axis %d)" % dim)
*
*/
__pyx_t_2 = ((__pyx_v_index >= __pyx_v_shape) != 0);
if (__pyx_t_2) {
/* "View.MemoryView":917
*
* if index >= shape:
* raise IndexError("Out of bounds on buffer access (axis %d)" % dim) # <<<<<<<<<<<<<<
*
* resultp = bufp + index * stride
*/
__pyx_t_4 = PyInt_FromSsize_t(__pyx_v_dim); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 917, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_4);
__pyx_t_3 = __Pyx_PyString_Format(__pyx_kp_s_Out_of_bounds_on_buffer_access_a, __pyx_t_4); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 917, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_3);
__Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
__pyx_t_4 = PyTuple_New(1); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 917, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_4);
__Pyx_GIVEREF(__pyx_t_3);
PyTuple_SET_ITEM(__pyx_t_4, 0, __pyx_t_3);
__pyx_t_3 = 0;
__pyx_t_3 = __Pyx_PyObject_Call(__pyx_builtin_IndexError, __pyx_t_4, NULL); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 917, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_3);
__Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
__Pyx_Raise(__pyx_t_3, 0, 0, 0);
__Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
__PYX_ERR(1, 917, __pyx_L1_error)
/* "View.MemoryView":916
* raise IndexError("Out of bounds on buffer access (axis %d)" % dim)
*
* if index >= shape: # <<<<<<<<<<<<<<
* raise IndexError("Out of bounds on buffer access (axis %d)" % dim)
*
*/
}
/* "View.MemoryView":919
* raise IndexError("Out of bounds on buffer access (axis %d)" % dim)
*
* resultp = bufp + index * stride # <<<<<<<<<<<<<<
* if suboffset >= 0:
* resultp = (<char **> resultp)[0] + suboffset
*/
__pyx_v_resultp = (__pyx_v_bufp + (__pyx_v_index * __pyx_v_stride));
/* "View.MemoryView":920
*
* resultp = bufp + index * stride
* if suboffset >= 0: # <<<<<<<<<<<<<<
* resultp = (<char **> resultp)[0] + suboffset
*
*/
__pyx_t_2 = ((__pyx_v_suboffset >= 0) != 0);
if (__pyx_t_2) {
/* "View.MemoryView":921
* resultp = bufp + index * stride
* if suboffset >= 0:
* resultp = (<char **> resultp)[0] + suboffset # <<<<<<<<<<<<<<
*
* return resultp
*/
__pyx_v_resultp = ((((char **)__pyx_v_resultp)[0]) + __pyx_v_suboffset);
/* "View.MemoryView":920
*
* resultp = bufp + index * stride
* if suboffset >= 0: # <<<<<<<<<<<<<<
* resultp = (<char **> resultp)[0] + suboffset
*
*/
}
/* "View.MemoryView":923
* resultp = (<char **> resultp)[0] + suboffset
*
* return resultp # <<<<<<<<<<<<<<
*
*
*/
__pyx_r = __pyx_v_resultp;
goto __pyx_L0;
/* "View.MemoryView":896
*
* @cname('__pyx_pybuffer_index')
* cdef char *pybuffer_index(Py_buffer *view, char *bufp, Py_ssize_t index, # <<<<<<<<<<<<<<
* Py_ssize_t dim) except NULL:
* cdef Py_ssize_t shape, stride, suboffset = -1
*/
/* function exit code */
__pyx_L1_error:;
__Pyx_XDECREF(__pyx_t_3);
__Pyx_XDECREF(__pyx_t_4);
__Pyx_AddTraceback("View.MemoryView.pybuffer_index", __pyx_clineno, __pyx_lineno, __pyx_filename);
__pyx_r = NULL;
__pyx_L0:;
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
/* "View.MemoryView":929
*
* @cname('__pyx_memslice_transpose')
* cdef int transpose_memslice(__Pyx_memviewslice *memslice) nogil except 0: # <<<<<<<<<<<<<<
* cdef int ndim = memslice.memview.view.ndim
*
*/
static int __pyx_memslice_transpose(__Pyx_memviewslice *__pyx_v_memslice) {
int __pyx_v_ndim;
Py_ssize_t *__pyx_v_shape;
Py_ssize_t *__pyx_v_strides;
int __pyx_v_i;
int __pyx_v_j;
int __pyx_r;
int __pyx_t_1;
Py_ssize_t *__pyx_t_2;
long __pyx_t_3;
Py_ssize_t __pyx_t_4;
Py_ssize_t __pyx_t_5;
int __pyx_t_6;
int __pyx_t_7;
int __pyx_t_8;
/* "View.MemoryView":930
* @cname('__pyx_memslice_transpose')
* cdef int transpose_memslice(__Pyx_memviewslice *memslice) nogil except 0:
* cdef int ndim = memslice.memview.view.ndim # <<<<<<<<<<<<<<
*
* cdef Py_ssize_t *shape = memslice.shape
*/
__pyx_t_1 = __pyx_v_memslice->memview->view.ndim;
__pyx_v_ndim = __pyx_t_1;
/* "View.MemoryView":932
* cdef int ndim = memslice.memview.view.ndim
*
* cdef Py_ssize_t *shape = memslice.shape # <<<<<<<<<<<<<<
* cdef Py_ssize_t *strides = memslice.strides
*
*/
__pyx_t_2 = __pyx_v_memslice->shape;
__pyx_v_shape = __pyx_t_2;
/* "View.MemoryView":933
*
* cdef Py_ssize_t *shape = memslice.shape
* cdef Py_ssize_t *strides = memslice.strides # <<<<<<<<<<<<<<
*
*
*/
__pyx_t_2 = __pyx_v_memslice->strides;
__pyx_v_strides = __pyx_t_2;
/* "View.MemoryView":937
*
* cdef int i, j
* for i in range(ndim / 2): # <<<<<<<<<<<<<<
* j = ndim - 1 - i
* strides[i], strides[j] = strides[j], strides[i]
*/
__pyx_t_3 = (__pyx_v_ndim / 2);
for (__pyx_t_1 = 0; __pyx_t_1 < __pyx_t_3; __pyx_t_1+=1) {
__pyx_v_i = __pyx_t_1;
/* "View.MemoryView":938
* cdef int i, j
* for i in range(ndim / 2):
* j = ndim - 1 - i # <<<<<<<<<<<<<<
* strides[i], strides[j] = strides[j], strides[i]
* shape[i], shape[j] = shape[j], shape[i]
*/
__pyx_v_j = ((__pyx_v_ndim - 1) - __pyx_v_i);
/* "View.MemoryView":939
* for i in range(ndim / 2):
* j = ndim - 1 - i
* strides[i], strides[j] = strides[j], strides[i] # <<<<<<<<<<<<<<
* shape[i], shape[j] = shape[j], shape[i]
*
*/
__pyx_t_4 = (__pyx_v_strides[__pyx_v_j]);
__pyx_t_5 = (__pyx_v_strides[__pyx_v_i]);
(__pyx_v_strides[__pyx_v_i]) = __pyx_t_4;
(__pyx_v_strides[__pyx_v_j]) = __pyx_t_5;
/* "View.MemoryView":940
* j = ndim - 1 - i
* strides[i], strides[j] = strides[j], strides[i]
* shape[i], shape[j] = shape[j], shape[i] # <<<<<<<<<<<<<<
*
* if memslice.suboffsets[i] >= 0 or memslice.suboffsets[j] >= 0:
*/
__pyx_t_5 = (__pyx_v_shape[__pyx_v_j]);
__pyx_t_4 = (__pyx_v_shape[__pyx_v_i]);
(__pyx_v_shape[__pyx_v_i]) = __pyx_t_5;
(__pyx_v_shape[__pyx_v_j]) = __pyx_t_4;
/* "View.MemoryView":942
* shape[i], shape[j] = shape[j], shape[i]
*
* if memslice.suboffsets[i] >= 0 or memslice.suboffsets[j] >= 0: # <<<<<<<<<<<<<<
* _err(ValueError, "Cannot transpose memoryview with indirect dimensions")
*
*/
__pyx_t_7 = (((__pyx_v_memslice->suboffsets[__pyx_v_i]) >= 0) != 0);
if (!__pyx_t_7) {
} else {
__pyx_t_6 = __pyx_t_7;
goto __pyx_L6_bool_binop_done;
}
__pyx_t_7 = (((__pyx_v_memslice->suboffsets[__pyx_v_j]) >= 0) != 0);
__pyx_t_6 = __pyx_t_7;
__pyx_L6_bool_binop_done:;
if (__pyx_t_6) {
/* "View.MemoryView":943
*
* if memslice.suboffsets[i] >= 0 or memslice.suboffsets[j] >= 0:
* _err(ValueError, "Cannot transpose memoryview with indirect dimensions") # <<<<<<<<<<<<<<
*
* return 1
*/
__pyx_t_8 = __pyx_memoryview_err(__pyx_builtin_ValueError, ((char *)"Cannot transpose memoryview with indirect dimensions")); if (unlikely(__pyx_t_8 == -1)) __PYX_ERR(1, 943, __pyx_L1_error)
/* "View.MemoryView":942
* shape[i], shape[j] = shape[j], shape[i]
*
* if memslice.suboffsets[i] >= 0 or memslice.suboffsets[j] >= 0: # <<<<<<<<<<<<<<
* _err(ValueError, "Cannot transpose memoryview with indirect dimensions")
*
*/
}
}
/* "View.MemoryView":945
* _err(ValueError, "Cannot transpose memoryview with indirect dimensions")
*
* return 1 # <<<<<<<<<<<<<<
*
*
*/
__pyx_r = 1;
goto __pyx_L0;
/* "View.MemoryView":929
*
* @cname('__pyx_memslice_transpose')
* cdef int transpose_memslice(__Pyx_memviewslice *memslice) nogil except 0: # <<<<<<<<<<<<<<
* cdef int ndim = memslice.memview.view.ndim
*
*/
/* function exit code */
__pyx_L1_error:;
{
#ifdef WITH_THREAD
PyGILState_STATE __pyx_gilstate_save = PyGILState_Ensure();
#endif
__Pyx_AddTraceback("View.MemoryView.transpose_memslice", __pyx_clineno, __pyx_lineno, __pyx_filename);
#ifdef WITH_THREAD
PyGILState_Release(__pyx_gilstate_save);
#endif
}
__pyx_r = 0;
__pyx_L0:;
return __pyx_r;
}
/* "View.MemoryView":962
* cdef int (*to_dtype_func)(char *, object) except 0
*
* def __dealloc__(self): # <<<<<<<<<<<<<<
* __PYX_XDEC_MEMVIEW(&self.from_slice, 1)
*
*/
/* Python wrapper */
static void __pyx_memoryviewslice___dealloc__(PyObject *__pyx_v_self); /*proto*/
static void __pyx_memoryviewslice___dealloc__(PyObject *__pyx_v_self) {
__Pyx_RefNannyDeclarations
__Pyx_RefNannySetupContext("__dealloc__ (wrapper)", 0);
__pyx_memoryviewslice___pyx_pf_15View_dot_MemoryView_16_memoryviewslice___dealloc__(((struct __pyx_memoryviewslice_obj *)__pyx_v_self));
/* function exit code */
__Pyx_RefNannyFinishContext();
}
static void __pyx_memoryviewslice___pyx_pf_15View_dot_MemoryView_16_memoryviewslice___dealloc__(struct __pyx_memoryviewslice_obj *__pyx_v_self) {
__Pyx_RefNannyDeclarations
__Pyx_RefNannySetupContext("__dealloc__", 0);
/* "View.MemoryView":963
*
* def __dealloc__(self):
* __PYX_XDEC_MEMVIEW(&self.from_slice, 1) # <<<<<<<<<<<<<<
*
* cdef convert_item_to_object(self, char *itemp):
*/
__PYX_XDEC_MEMVIEW((&__pyx_v_self->from_slice), 1);
/* "View.MemoryView":962
* cdef int (*to_dtype_func)(char *, object) except 0
*
* def __dealloc__(self): # <<<<<<<<<<<<<<
* __PYX_XDEC_MEMVIEW(&self.from_slice, 1)
*
*/
/* function exit code */
__Pyx_RefNannyFinishContext();
}
/* "View.MemoryView":965
* __PYX_XDEC_MEMVIEW(&self.from_slice, 1)
*
* cdef convert_item_to_object(self, char *itemp): # <<<<<<<<<<<<<<
* if self.to_object_func != NULL:
* return self.to_object_func(itemp)
*/
static PyObject *__pyx_memoryviewslice_convert_item_to_object(struct __pyx_memoryviewslice_obj *__pyx_v_self, char *__pyx_v_itemp) {
PyObject *__pyx_r = NULL;
__Pyx_RefNannyDeclarations
int __pyx_t_1;
PyObject *__pyx_t_2 = NULL;
__Pyx_RefNannySetupContext("convert_item_to_object", 0);
/* "View.MemoryView":966
*
* cdef convert_item_to_object(self, char *itemp):
* if self.to_object_func != NULL: # <<<<<<<<<<<<<<
* return self.to_object_func(itemp)
* else:
*/
__pyx_t_1 = ((__pyx_v_self->to_object_func != NULL) != 0);
if (__pyx_t_1) {
/* "View.MemoryView":967
* cdef convert_item_to_object(self, char *itemp):
* if self.to_object_func != NULL:
* return self.to_object_func(itemp) # <<<<<<<<<<<<<<
* else:
* return memoryview.convert_item_to_object(self, itemp)
*/
__Pyx_XDECREF(__pyx_r);
__pyx_t_2 = __pyx_v_self->to_object_func(__pyx_v_itemp); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 967, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_2);
__pyx_r = __pyx_t_2;
__pyx_t_2 = 0;
goto __pyx_L0;
/* "View.MemoryView":966
*
* cdef convert_item_to_object(self, char *itemp):
* if self.to_object_func != NULL: # <<<<<<<<<<<<<<
* return self.to_object_func(itemp)
* else:
*/
}
/* "View.MemoryView":969
* return self.to_object_func(itemp)
* else:
* return memoryview.convert_item_to_object(self, itemp) # <<<<<<<<<<<<<<
*
* cdef assign_item_from_object(self, char *itemp, object value):
*/
/*else*/ {
__Pyx_XDECREF(__pyx_r);
__pyx_t_2 = __pyx_memoryview_convert_item_to_object(((struct __pyx_memoryview_obj *)__pyx_v_self), __pyx_v_itemp); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 969, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_2);
__pyx_r = __pyx_t_2;
__pyx_t_2 = 0;
goto __pyx_L0;
}
/* "View.MemoryView":965
* __PYX_XDEC_MEMVIEW(&self.from_slice, 1)
*
* cdef convert_item_to_object(self, char *itemp): # <<<<<<<<<<<<<<
* if self.to_object_func != NULL:
* return self.to_object_func(itemp)
*/
/* function exit code */
__pyx_L1_error:;
__Pyx_XDECREF(__pyx_t_2);
__Pyx_AddTraceback("View.MemoryView._memoryviewslice.convert_item_to_object", __pyx_clineno, __pyx_lineno, __pyx_filename);
__pyx_r = 0;
__pyx_L0:;
__Pyx_XGIVEREF(__pyx_r);
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
/* "View.MemoryView":971
* return memoryview.convert_item_to_object(self, itemp)
*
* cdef assign_item_from_object(self, char *itemp, object value): # <<<<<<<<<<<<<<
* if self.to_dtype_func != NULL:
* self.to_dtype_func(itemp, value)
*/
static PyObject *__pyx_memoryviewslice_assign_item_from_object(struct __pyx_memoryviewslice_obj *__pyx_v_self, char *__pyx_v_itemp, PyObject *__pyx_v_value) {
PyObject *__pyx_r = NULL;
__Pyx_RefNannyDeclarations
int __pyx_t_1;
int __pyx_t_2;
PyObject *__pyx_t_3 = NULL;
__Pyx_RefNannySetupContext("assign_item_from_object", 0);
/* "View.MemoryView":972
*
* cdef assign_item_from_object(self, char *itemp, object value):
* if self.to_dtype_func != NULL: # <<<<<<<<<<<<<<
* self.to_dtype_func(itemp, value)
* else:
*/
__pyx_t_1 = ((__pyx_v_self->to_dtype_func != NULL) != 0);
if (__pyx_t_1) {
/* "View.MemoryView":973
* cdef assign_item_from_object(self, char *itemp, object value):
* if self.to_dtype_func != NULL:
* self.to_dtype_func(itemp, value) # <<<<<<<<<<<<<<
* else:
* memoryview.assign_item_from_object(self, itemp, value)
*/
__pyx_t_2 = __pyx_v_self->to_dtype_func(__pyx_v_itemp, __pyx_v_value); if (unlikely(__pyx_t_2 == 0)) __PYX_ERR(1, 973, __pyx_L1_error)
/* "View.MemoryView":972
*
* cdef assign_item_from_object(self, char *itemp, object value):
* if self.to_dtype_func != NULL: # <<<<<<<<<<<<<<
* self.to_dtype_func(itemp, value)
* else:
*/
goto __pyx_L3;
}
/* "View.MemoryView":975
* self.to_dtype_func(itemp, value)
* else:
* memoryview.assign_item_from_object(self, itemp, value) # <<<<<<<<<<<<<<
*
* @property
*/
/*else*/ {
__pyx_t_3 = __pyx_memoryview_assign_item_from_object(((struct __pyx_memoryview_obj *)__pyx_v_self), __pyx_v_itemp, __pyx_v_value); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 975, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_3);
__Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
}
__pyx_L3:;
/* "View.MemoryView":971
* return memoryview.convert_item_to_object(self, itemp)
*
* cdef assign_item_from_object(self, char *itemp, object value): # <<<<<<<<<<<<<<
* if self.to_dtype_func != NULL:
* self.to_dtype_func(itemp, value)
*/
/* function exit code */
__pyx_r = Py_None; __Pyx_INCREF(Py_None);
goto __pyx_L0;
__pyx_L1_error:;
__Pyx_XDECREF(__pyx_t_3);
__Pyx_AddTraceback("View.MemoryView._memoryviewslice.assign_item_from_object", __pyx_clineno, __pyx_lineno, __pyx_filename);
__pyx_r = 0;
__pyx_L0:;
__Pyx_XGIVEREF(__pyx_r);
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
/* "View.MemoryView":978
*
* @property
* def base(self): # <<<<<<<<<<<<<<
* return self.from_object
*
*/
/* Python wrapper */
static PyObject *__pyx_pw_15View_dot_MemoryView_16_memoryviewslice_4base_1__get__(PyObject *__pyx_v_self); /*proto*/
static PyObject *__pyx_pw_15View_dot_MemoryView_16_memoryviewslice_4base_1__get__(PyObject *__pyx_v_self) {
PyObject *__pyx_r = 0;
__Pyx_RefNannyDeclarations
__Pyx_RefNannySetupContext("__get__ (wrapper)", 0);
__pyx_r = __pyx_pf_15View_dot_MemoryView_16_memoryviewslice_4base___get__(((struct __pyx_memoryviewslice_obj *)__pyx_v_self));
/* function exit code */
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
static PyObject *__pyx_pf_15View_dot_MemoryView_16_memoryviewslice_4base___get__(struct __pyx_memoryviewslice_obj *__pyx_v_self) {
PyObject *__pyx_r = NULL;
__Pyx_RefNannyDeclarations
__Pyx_RefNannySetupContext("__get__", 0);
/* "View.MemoryView":979
* @property
* def base(self):
* return self.from_object # <<<<<<<<<<<<<<
*
* __pyx_getbuffer = capsule(<void *> &__pyx_memoryview_getbuffer, "getbuffer(obj, view, flags)")
*/
__Pyx_XDECREF(__pyx_r);
__Pyx_INCREF(__pyx_v_self->from_object);
__pyx_r = __pyx_v_self->from_object;
goto __pyx_L0;
/* "View.MemoryView":978
*
* @property
* def base(self): # <<<<<<<<<<<<<<
* return self.from_object
*
*/
/* function exit code */
__pyx_L0:;
__Pyx_XGIVEREF(__pyx_r);
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
/* "View.MemoryView":985
*
* @cname('__pyx_memoryview_fromslice')
* cdef memoryview_fromslice(__Pyx_memviewslice memviewslice, # <<<<<<<<<<<<<<
* int ndim,
* object (*to_object_func)(char *),
*/
static PyObject *__pyx_memoryview_fromslice(__Pyx_memviewslice __pyx_v_memviewslice, int __pyx_v_ndim, PyObject *(*__pyx_v_to_object_func)(char *), int (*__pyx_v_to_dtype_func)(char *, PyObject *), int __pyx_v_dtype_is_object) {
struct __pyx_memoryviewslice_obj *__pyx_v_result = 0;
Py_ssize_t __pyx_v_suboffset;
PyObject *__pyx_v_length = NULL;
PyObject *__pyx_r = NULL;
__Pyx_RefNannyDeclarations
int __pyx_t_1;
PyObject *__pyx_t_2 = NULL;
PyObject *__pyx_t_3 = NULL;
__Pyx_TypeInfo *__pyx_t_4;
Py_buffer __pyx_t_5;
Py_ssize_t *__pyx_t_6;
Py_ssize_t *__pyx_t_7;
Py_ssize_t *__pyx_t_8;
Py_ssize_t __pyx_t_9;
__Pyx_RefNannySetupContext("memoryview_fromslice", 0);
/* "View.MemoryView":993
* cdef _memoryviewslice result
*
* if <PyObject *> memviewslice.memview == Py_None: # <<<<<<<<<<<<<<
* return None
*
*/
__pyx_t_1 = ((((PyObject *)__pyx_v_memviewslice.memview) == Py_None) != 0);
if (__pyx_t_1) {
/* "View.MemoryView":994
*
* if <PyObject *> memviewslice.memview == Py_None:
* return None # <<<<<<<<<<<<<<
*
*
*/
__Pyx_XDECREF(__pyx_r);
__Pyx_INCREF(Py_None);
__pyx_r = Py_None;
goto __pyx_L0;
/* "View.MemoryView":993
* cdef _memoryviewslice result
*
* if <PyObject *> memviewslice.memview == Py_None: # <<<<<<<<<<<<<<
* return None
*
*/
}
/* "View.MemoryView":999
*
*
* result = _memoryviewslice(None, 0, dtype_is_object) # <<<<<<<<<<<<<<
*
* result.from_slice = memviewslice
*/
__pyx_t_2 = __Pyx_PyBool_FromLong(__pyx_v_dtype_is_object); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 999, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_2);
__pyx_t_3 = PyTuple_New(3); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 999, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_3);
__Pyx_INCREF(Py_None);
__Pyx_GIVEREF(Py_None);
PyTuple_SET_ITEM(__pyx_t_3, 0, Py_None);
__Pyx_INCREF(__pyx_int_0);
__Pyx_GIVEREF(__pyx_int_0);
PyTuple_SET_ITEM(__pyx_t_3, 1, __pyx_int_0);
__Pyx_GIVEREF(__pyx_t_2);
PyTuple_SET_ITEM(__pyx_t_3, 2, __pyx_t_2);
__pyx_t_2 = 0;
__pyx_t_2 = __Pyx_PyObject_Call(((PyObject *)__pyx_memoryviewslice_type), __pyx_t_3, NULL); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 999, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_2);
__Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
__pyx_v_result = ((struct __pyx_memoryviewslice_obj *)__pyx_t_2);
__pyx_t_2 = 0;
/* "View.MemoryView":1001
* result = _memoryviewslice(None, 0, dtype_is_object)
*
* result.from_slice = memviewslice # <<<<<<<<<<<<<<
* __PYX_INC_MEMVIEW(&memviewslice, 1)
*
*/
__pyx_v_result->from_slice = __pyx_v_memviewslice;
/* "View.MemoryView":1002
*
* result.from_slice = memviewslice
* __PYX_INC_MEMVIEW(&memviewslice, 1) # <<<<<<<<<<<<<<
*
* result.from_object = (<memoryview> memviewslice.memview).base
*/
__PYX_INC_MEMVIEW((&__pyx_v_memviewslice), 1);
/* "View.MemoryView":1004
* __PYX_INC_MEMVIEW(&memviewslice, 1)
*
* result.from_object = (<memoryview> memviewslice.memview).base # <<<<<<<<<<<<<<
* result.typeinfo = memviewslice.memview.typeinfo
*
*/
__pyx_t_2 = __Pyx_PyObject_GetAttrStr(((PyObject *)__pyx_v_memviewslice.memview), __pyx_n_s_base); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 1004, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_2);
__Pyx_GIVEREF(__pyx_t_2);
__Pyx_GOTREF(__pyx_v_result->from_object);
__Pyx_DECREF(__pyx_v_result->from_object);
__pyx_v_result->from_object = __pyx_t_2;
__pyx_t_2 = 0;
/* "View.MemoryView":1005
*
* result.from_object = (<memoryview> memviewslice.memview).base
* result.typeinfo = memviewslice.memview.typeinfo # <<<<<<<<<<<<<<
*
* result.view = memviewslice.memview.view
*/
__pyx_t_4 = __pyx_v_memviewslice.memview->typeinfo;
__pyx_v_result->__pyx_base.typeinfo = __pyx_t_4;
/* "View.MemoryView":1007
* result.typeinfo = memviewslice.memview.typeinfo
*
* result.view = memviewslice.memview.view # <<<<<<<<<<<<<<
* result.view.buf = <void *> memviewslice.data
* result.view.ndim = ndim
*/
__pyx_t_5 = __pyx_v_memviewslice.memview->view;
__pyx_v_result->__pyx_base.view = __pyx_t_5;
/* "View.MemoryView":1008
*
* result.view = memviewslice.memview.view
* result.view.buf = <void *> memviewslice.data # <<<<<<<<<<<<<<
* result.view.ndim = ndim
* (<__pyx_buffer *> &result.view).obj = Py_None
*/
__pyx_v_result->__pyx_base.view.buf = ((void *)__pyx_v_memviewslice.data);
/* "View.MemoryView":1009
* result.view = memviewslice.memview.view
* result.view.buf = <void *> memviewslice.data
* result.view.ndim = ndim # <<<<<<<<<<<<<<
* (<__pyx_buffer *> &result.view).obj = Py_None
* Py_INCREF(Py_None)
*/
__pyx_v_result->__pyx_base.view.ndim = __pyx_v_ndim;
/* "View.MemoryView":1010
* result.view.buf = <void *> memviewslice.data
* result.view.ndim = ndim
* (<__pyx_buffer *> &result.view).obj = Py_None # <<<<<<<<<<<<<<
* Py_INCREF(Py_None)
*
*/
((Py_buffer *)(&__pyx_v_result->__pyx_base.view))->obj = Py_None;
/* "View.MemoryView":1011
* result.view.ndim = ndim
* (<__pyx_buffer *> &result.view).obj = Py_None
* Py_INCREF(Py_None) # <<<<<<<<<<<<<<
*
* result.flags = PyBUF_RECORDS
*/
Py_INCREF(Py_None);
/* "View.MemoryView":1013
* Py_INCREF(Py_None)
*
* result.flags = PyBUF_RECORDS # <<<<<<<<<<<<<<
*
* result.view.shape = <Py_ssize_t *> result.from_slice.shape
*/
__pyx_v_result->__pyx_base.flags = PyBUF_RECORDS;
/* "View.MemoryView":1015
* result.flags = PyBUF_RECORDS
*
* result.view.shape = <Py_ssize_t *> result.from_slice.shape # <<<<<<<<<<<<<<
* result.view.strides = <Py_ssize_t *> result.from_slice.strides
*
*/
__pyx_v_result->__pyx_base.view.shape = ((Py_ssize_t *)__pyx_v_result->from_slice.shape);
/* "View.MemoryView":1016
*
* result.view.shape = <Py_ssize_t *> result.from_slice.shape
* result.view.strides = <Py_ssize_t *> result.from_slice.strides # <<<<<<<<<<<<<<
*
*
*/
__pyx_v_result->__pyx_base.view.strides = ((Py_ssize_t *)__pyx_v_result->from_slice.strides);
/* "View.MemoryView":1019
*
*
* result.view.suboffsets = NULL # <<<<<<<<<<<<<<
* for suboffset in result.from_slice.suboffsets[:ndim]:
* if suboffset >= 0:
*/
__pyx_v_result->__pyx_base.view.suboffsets = NULL;
/* "View.MemoryView":1020
*
* result.view.suboffsets = NULL
* for suboffset in result.from_slice.suboffsets[:ndim]: # <<<<<<<<<<<<<<
* if suboffset >= 0:
* result.view.suboffsets = <Py_ssize_t *> result.from_slice.suboffsets
*/
__pyx_t_7 = (__pyx_v_result->from_slice.suboffsets + __pyx_v_ndim);
for (__pyx_t_8 = __pyx_v_result->from_slice.suboffsets; __pyx_t_8 < __pyx_t_7; __pyx_t_8++) {
__pyx_t_6 = __pyx_t_8;
__pyx_v_suboffset = (__pyx_t_6[0]);
/* "View.MemoryView":1021
* result.view.suboffsets = NULL
* for suboffset in result.from_slice.suboffsets[:ndim]:
* if suboffset >= 0: # <<<<<<<<<<<<<<
* result.view.suboffsets = <Py_ssize_t *> result.from_slice.suboffsets
* break
*/
__pyx_t_1 = ((__pyx_v_suboffset >= 0) != 0);
if (__pyx_t_1) {
/* "View.MemoryView":1022
* for suboffset in result.from_slice.suboffsets[:ndim]:
* if suboffset >= 0:
* result.view.suboffsets = <Py_ssize_t *> result.from_slice.suboffsets # <<<<<<<<<<<<<<
* break
*
*/
__pyx_v_result->__pyx_base.view.suboffsets = ((Py_ssize_t *)__pyx_v_result->from_slice.suboffsets);
/* "View.MemoryView":1023
* if suboffset >= 0:
* result.view.suboffsets = <Py_ssize_t *> result.from_slice.suboffsets
* break # <<<<<<<<<<<<<<
*
* result.view.len = result.view.itemsize
*/
goto __pyx_L5_break;
/* "View.MemoryView":1021
* result.view.suboffsets = NULL
* for suboffset in result.from_slice.suboffsets[:ndim]:
* if suboffset >= 0: # <<<<<<<<<<<<<<
* result.view.suboffsets = <Py_ssize_t *> result.from_slice.suboffsets
* break
*/
}
}
__pyx_L5_break:;
/* "View.MemoryView":1025
* break
*
* result.view.len = result.view.itemsize # <<<<<<<<<<<<<<
* for length in result.view.shape[:ndim]:
* result.view.len *= length
*/
__pyx_t_9 = __pyx_v_result->__pyx_base.view.itemsize;
__pyx_v_result->__pyx_base.view.len = __pyx_t_9;
/* "View.MemoryView":1026
*
* result.view.len = result.view.itemsize
* for length in result.view.shape[:ndim]: # <<<<<<<<<<<<<<
* result.view.len *= length
*
*/
__pyx_t_7 = (__pyx_v_result->__pyx_base.view.shape + __pyx_v_ndim);
for (__pyx_t_8 = __pyx_v_result->__pyx_base.view.shape; __pyx_t_8 < __pyx_t_7; __pyx_t_8++) {
__pyx_t_6 = __pyx_t_8;
__pyx_t_2 = PyInt_FromSsize_t((__pyx_t_6[0])); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 1026, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_2);
__Pyx_XDECREF_SET(__pyx_v_length, __pyx_t_2);
__pyx_t_2 = 0;
/* "View.MemoryView":1027
* result.view.len = result.view.itemsize
* for length in result.view.shape[:ndim]:
* result.view.len *= length # <<<<<<<<<<<<<<
*
* result.to_object_func = to_object_func
*/
__pyx_t_2 = PyInt_FromSsize_t(__pyx_v_result->__pyx_base.view.len); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 1027, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_2);
__pyx_t_3 = PyNumber_InPlaceMultiply(__pyx_t_2, __pyx_v_length); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 1027, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_3);
__Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
__pyx_t_9 = __Pyx_PyIndex_AsSsize_t(__pyx_t_3); if (unlikely((__pyx_t_9 == (Py_ssize_t)-1) && PyErr_Occurred())) __PYX_ERR(1, 1027, __pyx_L1_error)
__Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
__pyx_v_result->__pyx_base.view.len = __pyx_t_9;
}
/* "View.MemoryView":1029
* result.view.len *= length
*
* result.to_object_func = to_object_func # <<<<<<<<<<<<<<
* result.to_dtype_func = to_dtype_func
*
*/
__pyx_v_result->to_object_func = __pyx_v_to_object_func;
/* "View.MemoryView":1030
*
* result.to_object_func = to_object_func
* result.to_dtype_func = to_dtype_func # <<<<<<<<<<<<<<
*
* return result
*/
__pyx_v_result->to_dtype_func = __pyx_v_to_dtype_func;
/* "View.MemoryView":1032
* result.to_dtype_func = to_dtype_func
*
* return result # <<<<<<<<<<<<<<
*
* @cname('__pyx_memoryview_get_slice_from_memoryview')
*/
__Pyx_XDECREF(__pyx_r);
__Pyx_INCREF(((PyObject *)__pyx_v_result));
__pyx_r = ((PyObject *)__pyx_v_result);
goto __pyx_L0;
/* "View.MemoryView":985
*
* @cname('__pyx_memoryview_fromslice')
* cdef memoryview_fromslice(__Pyx_memviewslice memviewslice, # <<<<<<<<<<<<<<
* int ndim,
* object (*to_object_func)(char *),
*/
/* function exit code */
__pyx_L1_error:;
__Pyx_XDECREF(__pyx_t_2);
__Pyx_XDECREF(__pyx_t_3);
__Pyx_AddTraceback("View.MemoryView.memoryview_fromslice", __pyx_clineno, __pyx_lineno, __pyx_filename);
__pyx_r = 0;
__pyx_L0:;
__Pyx_XDECREF((PyObject *)__pyx_v_result);
__Pyx_XDECREF(__pyx_v_length);
__Pyx_XGIVEREF(__pyx_r);
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
/* "View.MemoryView":1035
*
* @cname('__pyx_memoryview_get_slice_from_memoryview')
* cdef __Pyx_memviewslice *get_slice_from_memview(memoryview memview, # <<<<<<<<<<<<<<
* __Pyx_memviewslice *mslice):
* cdef _memoryviewslice obj
*/
static __Pyx_memviewslice *__pyx_memoryview_get_slice_from_memoryview(struct __pyx_memoryview_obj *__pyx_v_memview, __Pyx_memviewslice *__pyx_v_mslice) {
struct __pyx_memoryviewslice_obj *__pyx_v_obj = 0;
__Pyx_memviewslice *__pyx_r;
__Pyx_RefNannyDeclarations
int __pyx_t_1;
int __pyx_t_2;
PyObject *__pyx_t_3 = NULL;
__Pyx_RefNannySetupContext("get_slice_from_memview", 0);
/* "View.MemoryView":1038
* __Pyx_memviewslice *mslice):
* cdef _memoryviewslice obj
* if isinstance(memview, _memoryviewslice): # <<<<<<<<<<<<<<
* obj = memview
* return &obj.from_slice
*/
__pyx_t_1 = __Pyx_TypeCheck(((PyObject *)__pyx_v_memview), __pyx_memoryviewslice_type);
__pyx_t_2 = (__pyx_t_1 != 0);
if (__pyx_t_2) {
/* "View.MemoryView":1039
* cdef _memoryviewslice obj
* if isinstance(memview, _memoryviewslice):
* obj = memview # <<<<<<<<<<<<<<
* return &obj.from_slice
* else:
*/
if (!(likely(((((PyObject *)__pyx_v_memview)) == Py_None) || likely(__Pyx_TypeTest(((PyObject *)__pyx_v_memview), __pyx_memoryviewslice_type))))) __PYX_ERR(1, 1039, __pyx_L1_error)
__pyx_t_3 = ((PyObject *)__pyx_v_memview);
__Pyx_INCREF(__pyx_t_3);
__pyx_v_obj = ((struct __pyx_memoryviewslice_obj *)__pyx_t_3);
__pyx_t_3 = 0;
/* "View.MemoryView":1040
* if isinstance(memview, _memoryviewslice):
* obj = memview
* return &obj.from_slice # <<<<<<<<<<<<<<
* else:
* slice_copy(memview, mslice)
*/
__pyx_r = (&__pyx_v_obj->from_slice);
goto __pyx_L0;
/* "View.MemoryView":1038
* __Pyx_memviewslice *mslice):
* cdef _memoryviewslice obj
* if isinstance(memview, _memoryviewslice): # <<<<<<<<<<<<<<
* obj = memview
* return &obj.from_slice
*/
}
/* "View.MemoryView":1042
* return &obj.from_slice
* else:
* slice_copy(memview, mslice) # <<<<<<<<<<<<<<
* return mslice
*
*/
/*else*/ {
__pyx_memoryview_slice_copy(__pyx_v_memview, __pyx_v_mslice);
/* "View.MemoryView":1043
* else:
* slice_copy(memview, mslice)
* return mslice # <<<<<<<<<<<<<<
*
* @cname('__pyx_memoryview_slice_copy')
*/
__pyx_r = __pyx_v_mslice;
goto __pyx_L0;
}
/* "View.MemoryView":1035
*
* @cname('__pyx_memoryview_get_slice_from_memoryview')
* cdef __Pyx_memviewslice *get_slice_from_memview(memoryview memview, # <<<<<<<<<<<<<<
* __Pyx_memviewslice *mslice):
* cdef _memoryviewslice obj
*/
/* function exit code */
__pyx_L1_error:;
__Pyx_XDECREF(__pyx_t_3);
__Pyx_WriteUnraisable("View.MemoryView.get_slice_from_memview", __pyx_clineno, __pyx_lineno, __pyx_filename, 0, 0);
__pyx_r = 0;
__pyx_L0:;
__Pyx_XDECREF((PyObject *)__pyx_v_obj);
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
/* "View.MemoryView":1046
*
* @cname('__pyx_memoryview_slice_copy')
* cdef void slice_copy(memoryview memview, __Pyx_memviewslice *dst): # <<<<<<<<<<<<<<
* cdef int dim
* cdef (Py_ssize_t*) shape, strides, suboffsets
*/
static void __pyx_memoryview_slice_copy(struct __pyx_memoryview_obj *__pyx_v_memview, __Pyx_memviewslice *__pyx_v_dst) {
int __pyx_v_dim;
Py_ssize_t *__pyx_v_shape;
Py_ssize_t *__pyx_v_strides;
Py_ssize_t *__pyx_v_suboffsets;
__Pyx_RefNannyDeclarations
Py_ssize_t *__pyx_t_1;
int __pyx_t_2;
int __pyx_t_3;
Py_ssize_t __pyx_t_4;
__Pyx_RefNannySetupContext("slice_copy", 0);
/* "View.MemoryView":1050
* cdef (Py_ssize_t*) shape, strides, suboffsets
*
* shape = memview.view.shape # <<<<<<<<<<<<<<
* strides = memview.view.strides
* suboffsets = memview.view.suboffsets
*/
__pyx_t_1 = __pyx_v_memview->view.shape;
__pyx_v_shape = __pyx_t_1;
/* "View.MemoryView":1051
*
* shape = memview.view.shape
* strides = memview.view.strides # <<<<<<<<<<<<<<
* suboffsets = memview.view.suboffsets
*
*/
__pyx_t_1 = __pyx_v_memview->view.strides;
__pyx_v_strides = __pyx_t_1;
/* "View.MemoryView":1052
* shape = memview.view.shape
* strides = memview.view.strides
* suboffsets = memview.view.suboffsets # <<<<<<<<<<<<<<
*
* dst.memview = <__pyx_memoryview *> memview
*/
__pyx_t_1 = __pyx_v_memview->view.suboffsets;
__pyx_v_suboffsets = __pyx_t_1;
/* "View.MemoryView":1054
* suboffsets = memview.view.suboffsets
*
* dst.memview = <__pyx_memoryview *> memview # <<<<<<<<<<<<<<
* dst.data = <char *> memview.view.buf
*
*/
__pyx_v_dst->memview = ((struct __pyx_memoryview_obj *)__pyx_v_memview);
/* "View.MemoryView":1055
*
* dst.memview = <__pyx_memoryview *> memview
* dst.data = <char *> memview.view.buf # <<<<<<<<<<<<<<
*
* for dim in range(memview.view.ndim):
*/
__pyx_v_dst->data = ((char *)__pyx_v_memview->view.buf);
/* "View.MemoryView":1057
* dst.data = <char *> memview.view.buf
*
* for dim in range(memview.view.ndim): # <<<<<<<<<<<<<<
* dst.shape[dim] = shape[dim]
* dst.strides[dim] = strides[dim]
*/
__pyx_t_2 = __pyx_v_memview->view.ndim;
for (__pyx_t_3 = 0; __pyx_t_3 < __pyx_t_2; __pyx_t_3+=1) {
__pyx_v_dim = __pyx_t_3;
/* "View.MemoryView":1058
*
* for dim in range(memview.view.ndim):
* dst.shape[dim] = shape[dim] # <<<<<<<<<<<<<<
* dst.strides[dim] = strides[dim]
* dst.suboffsets[dim] = suboffsets[dim] if suboffsets else -1
*/
(__pyx_v_dst->shape[__pyx_v_dim]) = (__pyx_v_shape[__pyx_v_dim]);
/* "View.MemoryView":1059
* for dim in range(memview.view.ndim):
* dst.shape[dim] = shape[dim]
* dst.strides[dim] = strides[dim] # <<<<<<<<<<<<<<
* dst.suboffsets[dim] = suboffsets[dim] if suboffsets else -1
*
*/
(__pyx_v_dst->strides[__pyx_v_dim]) = (__pyx_v_strides[__pyx_v_dim]);
/* "View.MemoryView":1060
* dst.shape[dim] = shape[dim]
* dst.strides[dim] = strides[dim]
* dst.suboffsets[dim] = suboffsets[dim] if suboffsets else -1 # <<<<<<<<<<<<<<
*
* @cname('__pyx_memoryview_copy_object')
*/
if ((__pyx_v_suboffsets != 0)) {
__pyx_t_4 = (__pyx_v_suboffsets[__pyx_v_dim]);
} else {
__pyx_t_4 = -1L;
}
(__pyx_v_dst->suboffsets[__pyx_v_dim]) = __pyx_t_4;
}
/* "View.MemoryView":1046
*
* @cname('__pyx_memoryview_slice_copy')
* cdef void slice_copy(memoryview memview, __Pyx_memviewslice *dst): # <<<<<<<<<<<<<<
* cdef int dim
* cdef (Py_ssize_t*) shape, strides, suboffsets
*/
/* function exit code */
__Pyx_RefNannyFinishContext();
}
/* "View.MemoryView":1063
*
* @cname('__pyx_memoryview_copy_object')
* cdef memoryview_copy(memoryview memview): # <<<<<<<<<<<<<<
* "Create a new memoryview object"
* cdef __Pyx_memviewslice memviewslice
*/
static PyObject *__pyx_memoryview_copy_object(struct __pyx_memoryview_obj *__pyx_v_memview) {
__Pyx_memviewslice __pyx_v_memviewslice;
PyObject *__pyx_r = NULL;
__Pyx_RefNannyDeclarations
PyObject *__pyx_t_1 = NULL;
__Pyx_RefNannySetupContext("memoryview_copy", 0);
/* "View.MemoryView":1066
* "Create a new memoryview object"
* cdef __Pyx_memviewslice memviewslice
* slice_copy(memview, &memviewslice) # <<<<<<<<<<<<<<
* return memoryview_copy_from_slice(memview, &memviewslice)
*
*/
__pyx_memoryview_slice_copy(__pyx_v_memview, (&__pyx_v_memviewslice));
/* "View.MemoryView":1067
* cdef __Pyx_memviewslice memviewslice
* slice_copy(memview, &memviewslice)
* return memoryview_copy_from_slice(memview, &memviewslice) # <<<<<<<<<<<<<<
*
* @cname('__pyx_memoryview_copy_object_from_slice')
*/
__Pyx_XDECREF(__pyx_r);
__pyx_t_1 = __pyx_memoryview_copy_object_from_slice(__pyx_v_memview, (&__pyx_v_memviewslice)); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 1067, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_1);
__pyx_r = __pyx_t_1;
__pyx_t_1 = 0;
goto __pyx_L0;
/* "View.MemoryView":1063
*
* @cname('__pyx_memoryview_copy_object')
* cdef memoryview_copy(memoryview memview): # <<<<<<<<<<<<<<
* "Create a new memoryview object"
* cdef __Pyx_memviewslice memviewslice
*/
/* function exit code */
__pyx_L1_error:;
__Pyx_XDECREF(__pyx_t_1);
__Pyx_AddTraceback("View.MemoryView.memoryview_copy", __pyx_clineno, __pyx_lineno, __pyx_filename);
__pyx_r = 0;
__pyx_L0:;
__Pyx_XGIVEREF(__pyx_r);
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
/* "View.MemoryView":1070
*
* @cname('__pyx_memoryview_copy_object_from_slice')
* cdef memoryview_copy_from_slice(memoryview memview, __Pyx_memviewslice *memviewslice): # <<<<<<<<<<<<<<
* """
* Create a new memoryview object from a given memoryview object and slice.
*/
static PyObject *__pyx_memoryview_copy_object_from_slice(struct __pyx_memoryview_obj *__pyx_v_memview, __Pyx_memviewslice *__pyx_v_memviewslice) {
PyObject *(*__pyx_v_to_object_func)(char *);
int (*__pyx_v_to_dtype_func)(char *, PyObject *);
PyObject *__pyx_r = NULL;
__Pyx_RefNannyDeclarations
int __pyx_t_1;
int __pyx_t_2;
PyObject *(*__pyx_t_3)(char *);
int (*__pyx_t_4)(char *, PyObject *);
PyObject *__pyx_t_5 = NULL;
__Pyx_RefNannySetupContext("memoryview_copy_from_slice", 0);
/* "View.MemoryView":1077
* cdef int (*to_dtype_func)(char *, object) except 0
*
* if isinstance(memview, _memoryviewslice): # <<<<<<<<<<<<<<
* to_object_func = (<_memoryviewslice> memview).to_object_func
* to_dtype_func = (<_memoryviewslice> memview).to_dtype_func
*/
__pyx_t_1 = __Pyx_TypeCheck(((PyObject *)__pyx_v_memview), __pyx_memoryviewslice_type);
__pyx_t_2 = (__pyx_t_1 != 0);
if (__pyx_t_2) {
/* "View.MemoryView":1078
*
* if isinstance(memview, _memoryviewslice):
* to_object_func = (<_memoryviewslice> memview).to_object_func # <<<<<<<<<<<<<<
* to_dtype_func = (<_memoryviewslice> memview).to_dtype_func
* else:
*/
__pyx_t_3 = ((struct __pyx_memoryviewslice_obj *)__pyx_v_memview)->to_object_func;
__pyx_v_to_object_func = __pyx_t_3;
/* "View.MemoryView":1079
* if isinstance(memview, _memoryviewslice):
* to_object_func = (<_memoryviewslice> memview).to_object_func
* to_dtype_func = (<_memoryviewslice> memview).to_dtype_func # <<<<<<<<<<<<<<
* else:
* to_object_func = NULL
*/
__pyx_t_4 = ((struct __pyx_memoryviewslice_obj *)__pyx_v_memview)->to_dtype_func;
__pyx_v_to_dtype_func = __pyx_t_4;
/* "View.MemoryView":1077
* cdef int (*to_dtype_func)(char *, object) except 0
*
* if isinstance(memview, _memoryviewslice): # <<<<<<<<<<<<<<
* to_object_func = (<_memoryviewslice> memview).to_object_func
* to_dtype_func = (<_memoryviewslice> memview).to_dtype_func
*/
goto __pyx_L3;
}
/* "View.MemoryView":1081
* to_dtype_func = (<_memoryviewslice> memview).to_dtype_func
* else:
* to_object_func = NULL # <<<<<<<<<<<<<<
* to_dtype_func = NULL
*
*/
/*else*/ {
__pyx_v_to_object_func = NULL;
/* "View.MemoryView":1082
* else:
* to_object_func = NULL
* to_dtype_func = NULL # <<<<<<<<<<<<<<
*
* return memoryview_fromslice(memviewslice[0], memview.view.ndim,
*/
__pyx_v_to_dtype_func = NULL;
}
__pyx_L3:;
/* "View.MemoryView":1084
* to_dtype_func = NULL
*
* return memoryview_fromslice(memviewslice[0], memview.view.ndim, # <<<<<<<<<<<<<<
* to_object_func, to_dtype_func,
* memview.dtype_is_object)
*/
__Pyx_XDECREF(__pyx_r);
/* "View.MemoryView":1086
* return memoryview_fromslice(memviewslice[0], memview.view.ndim,
* to_object_func, to_dtype_func,
* memview.dtype_is_object) # <<<<<<<<<<<<<<
*
*
*/
__pyx_t_5 = __pyx_memoryview_fromslice((__pyx_v_memviewslice[0]), __pyx_v_memview->view.ndim, __pyx_v_to_object_func, __pyx_v_to_dtype_func, __pyx_v_memview->dtype_is_object); if (unlikely(!__pyx_t_5)) __PYX_ERR(1, 1084, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_5);
__pyx_r = __pyx_t_5;
__pyx_t_5 = 0;
goto __pyx_L0;
/* "View.MemoryView":1070
*
* @cname('__pyx_memoryview_copy_object_from_slice')
* cdef memoryview_copy_from_slice(memoryview memview, __Pyx_memviewslice *memviewslice): # <<<<<<<<<<<<<<
* """
* Create a new memoryview object from a given memoryview object and slice.
*/
/* function exit code */
__pyx_L1_error:;
__Pyx_XDECREF(__pyx_t_5);
__Pyx_AddTraceback("View.MemoryView.memoryview_copy_from_slice", __pyx_clineno, __pyx_lineno, __pyx_filename);
__pyx_r = 0;
__pyx_L0:;
__Pyx_XGIVEREF(__pyx_r);
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
/* "View.MemoryView":1092
*
*
* cdef Py_ssize_t abs_py_ssize_t(Py_ssize_t arg) nogil: # <<<<<<<<<<<<<<
* if arg < 0:
* return -arg
*/
static Py_ssize_t abs_py_ssize_t(Py_ssize_t __pyx_v_arg) {
Py_ssize_t __pyx_r;
int __pyx_t_1;
/* "View.MemoryView":1093
*
* cdef Py_ssize_t abs_py_ssize_t(Py_ssize_t arg) nogil:
* if arg < 0: # <<<<<<<<<<<<<<
* return -arg
* else:
*/
__pyx_t_1 = ((__pyx_v_arg < 0) != 0);
if (__pyx_t_1) {
/* "View.MemoryView":1094
* cdef Py_ssize_t abs_py_ssize_t(Py_ssize_t arg) nogil:
* if arg < 0:
* return -arg # <<<<<<<<<<<<<<
* else:
* return arg
*/
__pyx_r = (-__pyx_v_arg);
goto __pyx_L0;
/* "View.MemoryView":1093
*
* cdef Py_ssize_t abs_py_ssize_t(Py_ssize_t arg) nogil:
* if arg < 0: # <<<<<<<<<<<<<<
* return -arg
* else:
*/
}
/* "View.MemoryView":1096
* return -arg
* else:
* return arg # <<<<<<<<<<<<<<
*
* @cname('__pyx_get_best_slice_order')
*/
/*else*/ {
__pyx_r = __pyx_v_arg;
goto __pyx_L0;
}
/* "View.MemoryView":1092
*
*
* cdef Py_ssize_t abs_py_ssize_t(Py_ssize_t arg) nogil: # <<<<<<<<<<<<<<
* if arg < 0:
* return -arg
*/
/* function exit code */
__pyx_L0:;
return __pyx_r;
}
/* "View.MemoryView":1099
*
* @cname('__pyx_get_best_slice_order')
* cdef char get_best_order(__Pyx_memviewslice *mslice, int ndim) nogil: # <<<<<<<<<<<<<<
* """
* Figure out the best memory access order for a given slice.
*/
static char __pyx_get_best_slice_order(__Pyx_memviewslice *__pyx_v_mslice, int __pyx_v_ndim) {
int __pyx_v_i;
Py_ssize_t __pyx_v_c_stride;
Py_ssize_t __pyx_v_f_stride;
char __pyx_r;
int __pyx_t_1;
int __pyx_t_2;
int __pyx_t_3;
/* "View.MemoryView":1104
* """
* cdef int i
* cdef Py_ssize_t c_stride = 0 # <<<<<<<<<<<<<<
* cdef Py_ssize_t f_stride = 0
*
*/
__pyx_v_c_stride = 0;
/* "View.MemoryView":1105
* cdef int i
* cdef Py_ssize_t c_stride = 0
* cdef Py_ssize_t f_stride = 0 # <<<<<<<<<<<<<<
*
* for i in range(ndim - 1, -1, -1):
*/
__pyx_v_f_stride = 0;
/* "View.MemoryView":1107
* cdef Py_ssize_t f_stride = 0
*
* for i in range(ndim - 1, -1, -1): # <<<<<<<<<<<<<<
* if mslice.shape[i] > 1:
* c_stride = mslice.strides[i]
*/
for (__pyx_t_1 = (__pyx_v_ndim - 1); __pyx_t_1 > -1L; __pyx_t_1-=1) {
__pyx_v_i = __pyx_t_1;
/* "View.MemoryView":1108
*
* for i in range(ndim - 1, -1, -1):
* if mslice.shape[i] > 1: # <<<<<<<<<<<<<<
* c_stride = mslice.strides[i]
* break
*/
__pyx_t_2 = (((__pyx_v_mslice->shape[__pyx_v_i]) > 1) != 0);
if (__pyx_t_2) {
/* "View.MemoryView":1109
* for i in range(ndim - 1, -1, -1):
* if mslice.shape[i] > 1:
* c_stride = mslice.strides[i] # <<<<<<<<<<<<<<
* break
*
*/
__pyx_v_c_stride = (__pyx_v_mslice->strides[__pyx_v_i]);
/* "View.MemoryView":1110
* if mslice.shape[i] > 1:
* c_stride = mslice.strides[i]
* break # <<<<<<<<<<<<<<
*
* for i in range(ndim):
*/
goto __pyx_L4_break;
/* "View.MemoryView":1108
*
* for i in range(ndim - 1, -1, -1):
* if mslice.shape[i] > 1: # <<<<<<<<<<<<<<
* c_stride = mslice.strides[i]
* break
*/
}
}
__pyx_L4_break:;
/* "View.MemoryView":1112
* break
*
* for i in range(ndim): # <<<<<<<<<<<<<<
* if mslice.shape[i] > 1:
* f_stride = mslice.strides[i]
*/
__pyx_t_1 = __pyx_v_ndim;
for (__pyx_t_3 = 0; __pyx_t_3 < __pyx_t_1; __pyx_t_3+=1) {
__pyx_v_i = __pyx_t_3;
/* "View.MemoryView":1113
*
* for i in range(ndim):
* if mslice.shape[i] > 1: # <<<<<<<<<<<<<<
* f_stride = mslice.strides[i]
* break
*/
__pyx_t_2 = (((__pyx_v_mslice->shape[__pyx_v_i]) > 1) != 0);
if (__pyx_t_2) {
/* "View.MemoryView":1114
* for i in range(ndim):
* if mslice.shape[i] > 1:
* f_stride = mslice.strides[i] # <<<<<<<<<<<<<<
* break
*
*/
__pyx_v_f_stride = (__pyx_v_mslice->strides[__pyx_v_i]);
/* "View.MemoryView":1115
* if mslice.shape[i] > 1:
* f_stride = mslice.strides[i]
* break # <<<<<<<<<<<<<<
*
* if abs_py_ssize_t(c_stride) <= abs_py_ssize_t(f_stride):
*/
goto __pyx_L7_break;
/* "View.MemoryView":1113
*
* for i in range(ndim):
* if mslice.shape[i] > 1: # <<<<<<<<<<<<<<
* f_stride = mslice.strides[i]
* break
*/
}
}
__pyx_L7_break:;
/* "View.MemoryView":1117
* break
*
* if abs_py_ssize_t(c_stride) <= abs_py_ssize_t(f_stride): # <<<<<<<<<<<<<<
* return 'C'
* else:
*/
__pyx_t_2 = ((abs_py_ssize_t(__pyx_v_c_stride) <= abs_py_ssize_t(__pyx_v_f_stride)) != 0);
if (__pyx_t_2) {
/* "View.MemoryView":1118
*
* if abs_py_ssize_t(c_stride) <= abs_py_ssize_t(f_stride):
* return 'C' # <<<<<<<<<<<<<<
* else:
* return 'F'
*/
__pyx_r = 'C';
goto __pyx_L0;
/* "View.MemoryView":1117
* break
*
* if abs_py_ssize_t(c_stride) <= abs_py_ssize_t(f_stride): # <<<<<<<<<<<<<<
* return 'C'
* else:
*/
}
/* "View.MemoryView":1120
* return 'C'
* else:
* return 'F' # <<<<<<<<<<<<<<
*
* @cython.cdivision(True)
*/
/*else*/ {
__pyx_r = 'F';
goto __pyx_L0;
}
/* "View.MemoryView":1099
*
* @cname('__pyx_get_best_slice_order')
* cdef char get_best_order(__Pyx_memviewslice *mslice, int ndim) nogil: # <<<<<<<<<<<<<<
* """
* Figure out the best memory access order for a given slice.
*/
/* function exit code */
__pyx_L0:;
return __pyx_r;
}
/* "View.MemoryView":1123
*
* @cython.cdivision(True)
* cdef void _copy_strided_to_strided(char *src_data, Py_ssize_t *src_strides, # <<<<<<<<<<<<<<
* char *dst_data, Py_ssize_t *dst_strides,
* Py_ssize_t *src_shape, Py_ssize_t *dst_shape,
*/
static void _copy_strided_to_strided(char *__pyx_v_src_data, Py_ssize_t *__pyx_v_src_strides, char *__pyx_v_dst_data, Py_ssize_t *__pyx_v_dst_strides, Py_ssize_t *__pyx_v_src_shape, Py_ssize_t *__pyx_v_dst_shape, int __pyx_v_ndim, size_t __pyx_v_itemsize) {
CYTHON_UNUSED Py_ssize_t __pyx_v_i;
CYTHON_UNUSED Py_ssize_t __pyx_v_src_extent;
Py_ssize_t __pyx_v_dst_extent;
Py_ssize_t __pyx_v_src_stride;
Py_ssize_t __pyx_v_dst_stride;
int __pyx_t_1;
int __pyx_t_2;
int __pyx_t_3;
Py_ssize_t __pyx_t_4;
Py_ssize_t __pyx_t_5;
/* "View.MemoryView":1130
*
* cdef Py_ssize_t i
* cdef Py_ssize_t src_extent = src_shape[0] # <<<<<<<<<<<<<<
* cdef Py_ssize_t dst_extent = dst_shape[0]
* cdef Py_ssize_t src_stride = src_strides[0]
*/
__pyx_v_src_extent = (__pyx_v_src_shape[0]);
/* "View.MemoryView":1131
* cdef Py_ssize_t i
* cdef Py_ssize_t src_extent = src_shape[0]
* cdef Py_ssize_t dst_extent = dst_shape[0] # <<<<<<<<<<<<<<
* cdef Py_ssize_t src_stride = src_strides[0]
* cdef Py_ssize_t dst_stride = dst_strides[0]
*/
__pyx_v_dst_extent = (__pyx_v_dst_shape[0]);
/* "View.MemoryView":1132
* cdef Py_ssize_t src_extent = src_shape[0]
* cdef Py_ssize_t dst_extent = dst_shape[0]
* cdef Py_ssize_t src_stride = src_strides[0] # <<<<<<<<<<<<<<
* cdef Py_ssize_t dst_stride = dst_strides[0]
*
*/
__pyx_v_src_stride = (__pyx_v_src_strides[0]);
/* "View.MemoryView":1133
* cdef Py_ssize_t dst_extent = dst_shape[0]
* cdef Py_ssize_t src_stride = src_strides[0]
* cdef Py_ssize_t dst_stride = dst_strides[0] # <<<<<<<<<<<<<<
*
* if ndim == 1:
*/
__pyx_v_dst_stride = (__pyx_v_dst_strides[0]);
/* "View.MemoryView":1135
* cdef Py_ssize_t dst_stride = dst_strides[0]
*
* if ndim == 1: # <<<<<<<<<<<<<<
* if (src_stride > 0 and dst_stride > 0 and
* <size_t> src_stride == itemsize == <size_t> dst_stride):
*/
__pyx_t_1 = ((__pyx_v_ndim == 1) != 0);
if (__pyx_t_1) {
/* "View.MemoryView":1136
*
* if ndim == 1:
* if (src_stride > 0 and dst_stride > 0 and # <<<<<<<<<<<<<<
* <size_t> src_stride == itemsize == <size_t> dst_stride):
* memcpy(dst_data, src_data, itemsize * dst_extent)
*/
__pyx_t_2 = ((__pyx_v_src_stride > 0) != 0);
if (__pyx_t_2) {
} else {
__pyx_t_1 = __pyx_t_2;
goto __pyx_L5_bool_binop_done;
}
__pyx_t_2 = ((__pyx_v_dst_stride > 0) != 0);
if (__pyx_t_2) {
} else {
__pyx_t_1 = __pyx_t_2;
goto __pyx_L5_bool_binop_done;
}
/* "View.MemoryView":1137
* if ndim == 1:
* if (src_stride > 0 and dst_stride > 0 and
* <size_t> src_stride == itemsize == <size_t> dst_stride): # <<<<<<<<<<<<<<
* memcpy(dst_data, src_data, itemsize * dst_extent)
* else:
*/
__pyx_t_2 = (((size_t)__pyx_v_src_stride) == __pyx_v_itemsize);
if (__pyx_t_2) {
__pyx_t_2 = (__pyx_v_itemsize == ((size_t)__pyx_v_dst_stride));
}
__pyx_t_3 = (__pyx_t_2 != 0);
__pyx_t_1 = __pyx_t_3;
__pyx_L5_bool_binop_done:;
/* "View.MemoryView":1136
*
* if ndim == 1:
* if (src_stride > 0 and dst_stride > 0 and # <<<<<<<<<<<<<<
* <size_t> src_stride == itemsize == <size_t> dst_stride):
* memcpy(dst_data, src_data, itemsize * dst_extent)
*/
if (__pyx_t_1) {
/* "View.MemoryView":1138
* if (src_stride > 0 and dst_stride > 0 and
* <size_t> src_stride == itemsize == <size_t> dst_stride):
* memcpy(dst_data, src_data, itemsize * dst_extent) # <<<<<<<<<<<<<<
* else:
* for i in range(dst_extent):
*/
memcpy(__pyx_v_dst_data, __pyx_v_src_data, (__pyx_v_itemsize * __pyx_v_dst_extent));
/* "View.MemoryView":1136
*
* if ndim == 1:
* if (src_stride > 0 and dst_stride > 0 and # <<<<<<<<<<<<<<
* <size_t> src_stride == itemsize == <size_t> dst_stride):
* memcpy(dst_data, src_data, itemsize * dst_extent)
*/
goto __pyx_L4;
}
/* "View.MemoryView":1140
* memcpy(dst_data, src_data, itemsize * dst_extent)
* else:
* for i in range(dst_extent): # <<<<<<<<<<<<<<
* memcpy(dst_data, src_data, itemsize)
* src_data += src_stride
*/
/*else*/ {
__pyx_t_4 = __pyx_v_dst_extent;
for (__pyx_t_5 = 0; __pyx_t_5 < __pyx_t_4; __pyx_t_5+=1) {
__pyx_v_i = __pyx_t_5;
/* "View.MemoryView":1141
* else:
* for i in range(dst_extent):
* memcpy(dst_data, src_data, itemsize) # <<<<<<<<<<<<<<
* src_data += src_stride
* dst_data += dst_stride
*/
memcpy(__pyx_v_dst_data, __pyx_v_src_data, __pyx_v_itemsize);
/* "View.MemoryView":1142
* for i in range(dst_extent):
* memcpy(dst_data, src_data, itemsize)
* src_data += src_stride # <<<<<<<<<<<<<<
* dst_data += dst_stride
* else:
*/
__pyx_v_src_data = (__pyx_v_src_data + __pyx_v_src_stride);
/* "View.MemoryView":1143
* memcpy(dst_data, src_data, itemsize)
* src_data += src_stride
* dst_data += dst_stride # <<<<<<<<<<<<<<
* else:
* for i in range(dst_extent):
*/
__pyx_v_dst_data = (__pyx_v_dst_data + __pyx_v_dst_stride);
}
}
__pyx_L4:;
/* "View.MemoryView":1135
* cdef Py_ssize_t dst_stride = dst_strides[0]
*
* if ndim == 1: # <<<<<<<<<<<<<<
* if (src_stride > 0 and dst_stride > 0 and
* <size_t> src_stride == itemsize == <size_t> dst_stride):
*/
goto __pyx_L3;
}
/* "View.MemoryView":1145
* dst_data += dst_stride
* else:
* for i in range(dst_extent): # <<<<<<<<<<<<<<
* _copy_strided_to_strided(src_data, src_strides + 1,
* dst_data, dst_strides + 1,
*/
/*else*/ {
__pyx_t_4 = __pyx_v_dst_extent;
for (__pyx_t_5 = 0; __pyx_t_5 < __pyx_t_4; __pyx_t_5+=1) {
__pyx_v_i = __pyx_t_5;
/* "View.MemoryView":1146
* else:
* for i in range(dst_extent):
* _copy_strided_to_strided(src_data, src_strides + 1, # <<<<<<<<<<<<<<
* dst_data, dst_strides + 1,
* src_shape + 1, dst_shape + 1,
*/
_copy_strided_to_strided(__pyx_v_src_data, (__pyx_v_src_strides + 1), __pyx_v_dst_data, (__pyx_v_dst_strides + 1), (__pyx_v_src_shape + 1), (__pyx_v_dst_shape + 1), (__pyx_v_ndim - 1), __pyx_v_itemsize);
/* "View.MemoryView":1150
* src_shape + 1, dst_shape + 1,
* ndim - 1, itemsize)
* src_data += src_stride # <<<<<<<<<<<<<<
* dst_data += dst_stride
*
*/
__pyx_v_src_data = (__pyx_v_src_data + __pyx_v_src_stride);
/* "View.MemoryView":1151
* ndim - 1, itemsize)
* src_data += src_stride
* dst_data += dst_stride # <<<<<<<<<<<<<<
*
* cdef void copy_strided_to_strided(__Pyx_memviewslice *src,
*/
__pyx_v_dst_data = (__pyx_v_dst_data + __pyx_v_dst_stride);
}
}
__pyx_L3:;
/* "View.MemoryView":1123
*
* @cython.cdivision(True)
* cdef void _copy_strided_to_strided(char *src_data, Py_ssize_t *src_strides, # <<<<<<<<<<<<<<
* char *dst_data, Py_ssize_t *dst_strides,
* Py_ssize_t *src_shape, Py_ssize_t *dst_shape,
*/
/* function exit code */
}
/* "View.MemoryView":1153
* dst_data += dst_stride
*
* cdef void copy_strided_to_strided(__Pyx_memviewslice *src, # <<<<<<<<<<<<<<
* __Pyx_memviewslice *dst,
* int ndim, size_t itemsize) nogil:
*/
static void copy_strided_to_strided(__Pyx_memviewslice *__pyx_v_src, __Pyx_memviewslice *__pyx_v_dst, int __pyx_v_ndim, size_t __pyx_v_itemsize) {
/* "View.MemoryView":1156
* __Pyx_memviewslice *dst,
* int ndim, size_t itemsize) nogil:
* _copy_strided_to_strided(src.data, src.strides, dst.data, dst.strides, # <<<<<<<<<<<<<<
* src.shape, dst.shape, ndim, itemsize)
*
*/
_copy_strided_to_strided(__pyx_v_src->data, __pyx_v_src->strides, __pyx_v_dst->data, __pyx_v_dst->strides, __pyx_v_src->shape, __pyx_v_dst->shape, __pyx_v_ndim, __pyx_v_itemsize);
/* "View.MemoryView":1153
* dst_data += dst_stride
*
* cdef void copy_strided_to_strided(__Pyx_memviewslice *src, # <<<<<<<<<<<<<<
* __Pyx_memviewslice *dst,
* int ndim, size_t itemsize) nogil:
*/
/* function exit code */
}
/* "View.MemoryView":1160
*
* @cname('__pyx_memoryview_slice_get_size')
* cdef Py_ssize_t slice_get_size(__Pyx_memviewslice *src, int ndim) nogil: # <<<<<<<<<<<<<<
* "Return the size of the memory occupied by the slice in number of bytes"
* cdef int i
*/
static Py_ssize_t __pyx_memoryview_slice_get_size(__Pyx_memviewslice *__pyx_v_src, int __pyx_v_ndim) {
int __pyx_v_i;
Py_ssize_t __pyx_v_size;
Py_ssize_t __pyx_r;
Py_ssize_t __pyx_t_1;
int __pyx_t_2;
int __pyx_t_3;
/* "View.MemoryView":1163
* "Return the size of the memory occupied by the slice in number of bytes"
* cdef int i
* cdef Py_ssize_t size = src.memview.view.itemsize # <<<<<<<<<<<<<<
*
* for i in range(ndim):
*/
__pyx_t_1 = __pyx_v_src->memview->view.itemsize;
__pyx_v_size = __pyx_t_1;
/* "View.MemoryView":1165
* cdef Py_ssize_t size = src.memview.view.itemsize
*
* for i in range(ndim): # <<<<<<<<<<<<<<
* size *= src.shape[i]
*
*/
__pyx_t_2 = __pyx_v_ndim;
for (__pyx_t_3 = 0; __pyx_t_3 < __pyx_t_2; __pyx_t_3+=1) {
__pyx_v_i = __pyx_t_3;
/* "View.MemoryView":1166
*
* for i in range(ndim):
* size *= src.shape[i] # <<<<<<<<<<<<<<
*
* return size
*/
__pyx_v_size = (__pyx_v_size * (__pyx_v_src->shape[__pyx_v_i]));
}
/* "View.MemoryView":1168
* size *= src.shape[i]
*
* return size # <<<<<<<<<<<<<<
*
* @cname('__pyx_fill_contig_strides_array')
*/
__pyx_r = __pyx_v_size;
goto __pyx_L0;
/* "View.MemoryView":1160
*
* @cname('__pyx_memoryview_slice_get_size')
* cdef Py_ssize_t slice_get_size(__Pyx_memviewslice *src, int ndim) nogil: # <<<<<<<<<<<<<<
* "Return the size of the memory occupied by the slice in number of bytes"
* cdef int i
*/
/* function exit code */
__pyx_L0:;
return __pyx_r;
}
/* "View.MemoryView":1171
*
* @cname('__pyx_fill_contig_strides_array')
* cdef Py_ssize_t fill_contig_strides_array( # <<<<<<<<<<<<<<
* Py_ssize_t *shape, Py_ssize_t *strides, Py_ssize_t stride,
* int ndim, char order) nogil:
*/
static Py_ssize_t __pyx_fill_contig_strides_array(Py_ssize_t *__pyx_v_shape, Py_ssize_t *__pyx_v_strides, Py_ssize_t __pyx_v_stride, int __pyx_v_ndim, char __pyx_v_order) {
int __pyx_v_idx;
Py_ssize_t __pyx_r;
int __pyx_t_1;
int __pyx_t_2;
int __pyx_t_3;
/* "View.MemoryView":1180
* cdef int idx
*
* if order == 'F': # <<<<<<<<<<<<<<
* for idx in range(ndim):
* strides[idx] = stride
*/
__pyx_t_1 = ((__pyx_v_order == 'F') != 0);
if (__pyx_t_1) {
/* "View.MemoryView":1181
*
* if order == 'F':
* for idx in range(ndim): # <<<<<<<<<<<<<<
* strides[idx] = stride
* stride = stride * shape[idx]
*/
__pyx_t_2 = __pyx_v_ndim;
for (__pyx_t_3 = 0; __pyx_t_3 < __pyx_t_2; __pyx_t_3+=1) {
__pyx_v_idx = __pyx_t_3;
/* "View.MemoryView":1182
* if order == 'F':
* for idx in range(ndim):
* strides[idx] = stride # <<<<<<<<<<<<<<
* stride = stride * shape[idx]
* else:
*/
(__pyx_v_strides[__pyx_v_idx]) = __pyx_v_stride;
/* "View.MemoryView":1183
* for idx in range(ndim):
* strides[idx] = stride
* stride = stride * shape[idx] # <<<<<<<<<<<<<<
* else:
* for idx in range(ndim - 1, -1, -1):
*/
__pyx_v_stride = (__pyx_v_stride * (__pyx_v_shape[__pyx_v_idx]));
}
/* "View.MemoryView":1180
* cdef int idx
*
* if order == 'F': # <<<<<<<<<<<<<<
* for idx in range(ndim):
* strides[idx] = stride
*/
goto __pyx_L3;
}
/* "View.MemoryView":1185
* stride = stride * shape[idx]
* else:
* for idx in range(ndim - 1, -1, -1): # <<<<<<<<<<<<<<
* strides[idx] = stride
* stride = stride * shape[idx]
*/
/*else*/ {
for (__pyx_t_2 = (__pyx_v_ndim - 1); __pyx_t_2 > -1L; __pyx_t_2-=1) {
__pyx_v_idx = __pyx_t_2;
/* "View.MemoryView":1186
* else:
* for idx in range(ndim - 1, -1, -1):
* strides[idx] = stride # <<<<<<<<<<<<<<
* stride = stride * shape[idx]
*
*/
(__pyx_v_strides[__pyx_v_idx]) = __pyx_v_stride;
/* "View.MemoryView":1187
* for idx in range(ndim - 1, -1, -1):
* strides[idx] = stride
* stride = stride * shape[idx] # <<<<<<<<<<<<<<
*
* return stride
*/
__pyx_v_stride = (__pyx_v_stride * (__pyx_v_shape[__pyx_v_idx]));
}
}
__pyx_L3:;
/* "View.MemoryView":1189
* stride = stride * shape[idx]
*
* return stride # <<<<<<<<<<<<<<
*
* @cname('__pyx_memoryview_copy_data_to_temp')
*/
__pyx_r = __pyx_v_stride;
goto __pyx_L0;
/* "View.MemoryView":1171
*
* @cname('__pyx_fill_contig_strides_array')
* cdef Py_ssize_t fill_contig_strides_array( # <<<<<<<<<<<<<<
* Py_ssize_t *shape, Py_ssize_t *strides, Py_ssize_t stride,
* int ndim, char order) nogil:
*/
/* function exit code */
__pyx_L0:;
return __pyx_r;
}
/* "View.MemoryView":1192
*
* @cname('__pyx_memoryview_copy_data_to_temp')
* cdef void *copy_data_to_temp(__Pyx_memviewslice *src, # <<<<<<<<<<<<<<
* __Pyx_memviewslice *tmpslice,
* char order,
*/
static void *__pyx_memoryview_copy_data_to_temp(__Pyx_memviewslice *__pyx_v_src, __Pyx_memviewslice *__pyx_v_tmpslice, char __pyx_v_order, int __pyx_v_ndim) {
int __pyx_v_i;
void *__pyx_v_result;
size_t __pyx_v_itemsize;
size_t __pyx_v_size;
void *__pyx_r;
Py_ssize_t __pyx_t_1;
int __pyx_t_2;
int __pyx_t_3;
struct __pyx_memoryview_obj *__pyx_t_4;
int __pyx_t_5;
/* "View.MemoryView":1203
* cdef void *result
*
* cdef size_t itemsize = src.memview.view.itemsize # <<<<<<<<<<<<<<
* cdef size_t size = slice_get_size(src, ndim)
*
*/
__pyx_t_1 = __pyx_v_src->memview->view.itemsize;
__pyx_v_itemsize = __pyx_t_1;
/* "View.MemoryView":1204
*
* cdef size_t itemsize = src.memview.view.itemsize
* cdef size_t size = slice_get_size(src, ndim) # <<<<<<<<<<<<<<
*
* result = malloc(size)
*/
__pyx_v_size = __pyx_memoryview_slice_get_size(__pyx_v_src, __pyx_v_ndim);
/* "View.MemoryView":1206
* cdef size_t size = slice_get_size(src, ndim)
*
* result = malloc(size) # <<<<<<<<<<<<<<
* if not result:
* _err(MemoryError, NULL)
*/
__pyx_v_result = malloc(__pyx_v_size);
/* "View.MemoryView":1207
*
* result = malloc(size)
* if not result: # <<<<<<<<<<<<<<
* _err(MemoryError, NULL)
*
*/
__pyx_t_2 = ((!(__pyx_v_result != 0)) != 0);
if (__pyx_t_2) {
/* "View.MemoryView":1208
* result = malloc(size)
* if not result:
* _err(MemoryError, NULL) # <<<<<<<<<<<<<<
*
*
*/
__pyx_t_3 = __pyx_memoryview_err(__pyx_builtin_MemoryError, NULL); if (unlikely(__pyx_t_3 == -1)) __PYX_ERR(1, 1208, __pyx_L1_error)
/* "View.MemoryView":1207
*
* result = malloc(size)
* if not result: # <<<<<<<<<<<<<<
* _err(MemoryError, NULL)
*
*/
}
/* "View.MemoryView":1211
*
*
* tmpslice.data = <char *> result # <<<<<<<<<<<<<<
* tmpslice.memview = src.memview
* for i in range(ndim):
*/
__pyx_v_tmpslice->data = ((char *)__pyx_v_result);
/* "View.MemoryView":1212
*
* tmpslice.data = <char *> result
* tmpslice.memview = src.memview # <<<<<<<<<<<<<<
* for i in range(ndim):
* tmpslice.shape[i] = src.shape[i]
*/
__pyx_t_4 = __pyx_v_src->memview;
__pyx_v_tmpslice->memview = __pyx_t_4;
/* "View.MemoryView":1213
* tmpslice.data = <char *> result
* tmpslice.memview = src.memview
* for i in range(ndim): # <<<<<<<<<<<<<<
* tmpslice.shape[i] = src.shape[i]
* tmpslice.suboffsets[i] = -1
*/
__pyx_t_3 = __pyx_v_ndim;
for (__pyx_t_5 = 0; __pyx_t_5 < __pyx_t_3; __pyx_t_5+=1) {
__pyx_v_i = __pyx_t_5;
/* "View.MemoryView":1214
* tmpslice.memview = src.memview
* for i in range(ndim):
* tmpslice.shape[i] = src.shape[i] # <<<<<<<<<<<<<<
* tmpslice.suboffsets[i] = -1
*
*/
(__pyx_v_tmpslice->shape[__pyx_v_i]) = (__pyx_v_src->shape[__pyx_v_i]);
/* "View.MemoryView":1215
* for i in range(ndim):
* tmpslice.shape[i] = src.shape[i]
* tmpslice.suboffsets[i] = -1 # <<<<<<<<<<<<<<
*
* fill_contig_strides_array(&tmpslice.shape[0], &tmpslice.strides[0], itemsize,
*/
(__pyx_v_tmpslice->suboffsets[__pyx_v_i]) = -1L;
}
/* "View.MemoryView":1217
* tmpslice.suboffsets[i] = -1
*
* fill_contig_strides_array(&tmpslice.shape[0], &tmpslice.strides[0], itemsize, # <<<<<<<<<<<<<<
* ndim, order)
*
*/
__pyx_fill_contig_strides_array((&(__pyx_v_tmpslice->shape[0])), (&(__pyx_v_tmpslice->strides[0])), __pyx_v_itemsize, __pyx_v_ndim, __pyx_v_order);
/* "View.MemoryView":1221
*
*
* for i in range(ndim): # <<<<<<<<<<<<<<
* if tmpslice.shape[i] == 1:
* tmpslice.strides[i] = 0
*/
__pyx_t_3 = __pyx_v_ndim;
for (__pyx_t_5 = 0; __pyx_t_5 < __pyx_t_3; __pyx_t_5+=1) {
__pyx_v_i = __pyx_t_5;
/* "View.MemoryView":1222
*
* for i in range(ndim):
* if tmpslice.shape[i] == 1: # <<<<<<<<<<<<<<
* tmpslice.strides[i] = 0
*
*/
__pyx_t_2 = (((__pyx_v_tmpslice->shape[__pyx_v_i]) == 1) != 0);
if (__pyx_t_2) {
/* "View.MemoryView":1223
* for i in range(ndim):
* if tmpslice.shape[i] == 1:
* tmpslice.strides[i] = 0 # <<<<<<<<<<<<<<
*
* if slice_is_contig(src[0], order, ndim):
*/
(__pyx_v_tmpslice->strides[__pyx_v_i]) = 0;
/* "View.MemoryView":1222
*
* for i in range(ndim):
* if tmpslice.shape[i] == 1: # <<<<<<<<<<<<<<
* tmpslice.strides[i] = 0
*
*/
}
}
/* "View.MemoryView":1225
* tmpslice.strides[i] = 0
*
* if slice_is_contig(src[0], order, ndim): # <<<<<<<<<<<<<<
* memcpy(result, src.data, size)
* else:
*/
__pyx_t_2 = (__pyx_memviewslice_is_contig((__pyx_v_src[0]), __pyx_v_order, __pyx_v_ndim) != 0);
if (__pyx_t_2) {
/* "View.MemoryView":1226
*
* if slice_is_contig(src[0], order, ndim):
* memcpy(result, src.data, size) # <<<<<<<<<<<<<<
* else:
* copy_strided_to_strided(src, tmpslice, ndim, itemsize)
*/
memcpy(__pyx_v_result, __pyx_v_src->data, __pyx_v_size);
/* "View.MemoryView":1225
* tmpslice.strides[i] = 0
*
* if slice_is_contig(src[0], order, ndim): # <<<<<<<<<<<<<<
* memcpy(result, src.data, size)
* else:
*/
goto __pyx_L9;
}
/* "View.MemoryView":1228
* memcpy(result, src.data, size)
* else:
* copy_strided_to_strided(src, tmpslice, ndim, itemsize) # <<<<<<<<<<<<<<
*
* return result
*/
/*else*/ {
copy_strided_to_strided(__pyx_v_src, __pyx_v_tmpslice, __pyx_v_ndim, __pyx_v_itemsize);
}
__pyx_L9:;
/* "View.MemoryView":1230
* copy_strided_to_strided(src, tmpslice, ndim, itemsize)
*
* return result # <<<<<<<<<<<<<<
*
*
*/
__pyx_r = __pyx_v_result;
goto __pyx_L0;
/* "View.MemoryView":1192
*
* @cname('__pyx_memoryview_copy_data_to_temp')
* cdef void *copy_data_to_temp(__Pyx_memviewslice *src, # <<<<<<<<<<<<<<
* __Pyx_memviewslice *tmpslice,
* char order,
*/
/* function exit code */
__pyx_L1_error:;
{
#ifdef WITH_THREAD
PyGILState_STATE __pyx_gilstate_save = PyGILState_Ensure();
#endif
__Pyx_AddTraceback("View.MemoryView.copy_data_to_temp", __pyx_clineno, __pyx_lineno, __pyx_filename);
#ifdef WITH_THREAD
PyGILState_Release(__pyx_gilstate_save);
#endif
}
__pyx_r = NULL;
__pyx_L0:;
return __pyx_r;
}
/* "View.MemoryView":1235
*
* @cname('__pyx_memoryview_err_extents')
* cdef int _err_extents(int i, Py_ssize_t extent1, # <<<<<<<<<<<<<<
* Py_ssize_t extent2) except -1 with gil:
* raise ValueError("got differing extents in dimension %d (got %d and %d)" %
*/
static int __pyx_memoryview_err_extents(int __pyx_v_i, Py_ssize_t __pyx_v_extent1, Py_ssize_t __pyx_v_extent2) {
int __pyx_r;
__Pyx_RefNannyDeclarations
PyObject *__pyx_t_1 = NULL;
PyObject *__pyx_t_2 = NULL;
PyObject *__pyx_t_3 = NULL;
PyObject *__pyx_t_4 = NULL;
#ifdef WITH_THREAD
PyGILState_STATE __pyx_gilstate_save = PyGILState_Ensure();
#endif
__Pyx_RefNannySetupContext("_err_extents", 0);
/* "View.MemoryView":1238
* Py_ssize_t extent2) except -1 with gil:
* raise ValueError("got differing extents in dimension %d (got %d and %d)" %
* (i, extent1, extent2)) # <<<<<<<<<<<<<<
*
* @cname('__pyx_memoryview_err_dim')
*/
__pyx_t_1 = __Pyx_PyInt_From_int(__pyx_v_i); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 1238, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_1);
__pyx_t_2 = PyInt_FromSsize_t(__pyx_v_extent1); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 1238, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_2);
__pyx_t_3 = PyInt_FromSsize_t(__pyx_v_extent2); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 1238, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_3);
__pyx_t_4 = PyTuple_New(3); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 1238, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_4);
__Pyx_GIVEREF(__pyx_t_1);
PyTuple_SET_ITEM(__pyx_t_4, 0, __pyx_t_1);
__Pyx_GIVEREF(__pyx_t_2);
PyTuple_SET_ITEM(__pyx_t_4, 1, __pyx_t_2);
__Pyx_GIVEREF(__pyx_t_3);
PyTuple_SET_ITEM(__pyx_t_4, 2, __pyx_t_3);
__pyx_t_1 = 0;
__pyx_t_2 = 0;
__pyx_t_3 = 0;
/* "View.MemoryView":1237
* cdef int _err_extents(int i, Py_ssize_t extent1,
* Py_ssize_t extent2) except -1 with gil:
* raise ValueError("got differing extents in dimension %d (got %d and %d)" % # <<<<<<<<<<<<<<
* (i, extent1, extent2))
*
*/
__pyx_t_3 = __Pyx_PyString_Format(__pyx_kp_s_got_differing_extents_in_dimensi, __pyx_t_4); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 1237, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_3);
__Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
__pyx_t_4 = PyTuple_New(1); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 1237, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_4);
__Pyx_GIVEREF(__pyx_t_3);
PyTuple_SET_ITEM(__pyx_t_4, 0, __pyx_t_3);
__pyx_t_3 = 0;
__pyx_t_3 = __Pyx_PyObject_Call(__pyx_builtin_ValueError, __pyx_t_4, NULL); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 1237, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_3);
__Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
__Pyx_Raise(__pyx_t_3, 0, 0, 0);
__Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
__PYX_ERR(1, 1237, __pyx_L1_error)
/* "View.MemoryView":1235
*
* @cname('__pyx_memoryview_err_extents')
* cdef int _err_extents(int i, Py_ssize_t extent1, # <<<<<<<<<<<<<<
* Py_ssize_t extent2) except -1 with gil:
* raise ValueError("got differing extents in dimension %d (got %d and %d)" %
*/
/* function exit code */
__pyx_L1_error:;
__Pyx_XDECREF(__pyx_t_1);
__Pyx_XDECREF(__pyx_t_2);
__Pyx_XDECREF(__pyx_t_3);
__Pyx_XDECREF(__pyx_t_4);
__Pyx_AddTraceback("View.MemoryView._err_extents", __pyx_clineno, __pyx_lineno, __pyx_filename);
__pyx_r = -1;
__Pyx_RefNannyFinishContext();
#ifdef WITH_THREAD
PyGILState_Release(__pyx_gilstate_save);
#endif
return __pyx_r;
}
/* "View.MemoryView":1241
*
* @cname('__pyx_memoryview_err_dim')
* cdef int _err_dim(object error, char *msg, int dim) except -1 with gil: # <<<<<<<<<<<<<<
* raise error(msg.decode('ascii') % dim)
*
*/
static int __pyx_memoryview_err_dim(PyObject *__pyx_v_error, char *__pyx_v_msg, int __pyx_v_dim) {
int __pyx_r;
__Pyx_RefNannyDeclarations
PyObject *__pyx_t_1 = NULL;
PyObject *__pyx_t_2 = NULL;
PyObject *__pyx_t_3 = NULL;
PyObject *__pyx_t_4 = NULL;
PyObject *__pyx_t_5 = NULL;
#ifdef WITH_THREAD
PyGILState_STATE __pyx_gilstate_save = PyGILState_Ensure();
#endif
__Pyx_RefNannySetupContext("_err_dim", 0);
__Pyx_INCREF(__pyx_v_error);
/* "View.MemoryView":1242
* @cname('__pyx_memoryview_err_dim')
* cdef int _err_dim(object error, char *msg, int dim) except -1 with gil:
* raise error(msg.decode('ascii') % dim) # <<<<<<<<<<<<<<
*
* @cname('__pyx_memoryview_err')
*/
__pyx_t_2 = __Pyx_decode_c_string(__pyx_v_msg, 0, strlen(__pyx_v_msg), NULL, NULL, PyUnicode_DecodeASCII); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 1242, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_2);
__pyx_t_3 = __Pyx_PyInt_From_int(__pyx_v_dim); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 1242, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_3);
__pyx_t_4 = PyUnicode_Format(__pyx_t_2, __pyx_t_3); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 1242, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_4);
__Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
__Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
__Pyx_INCREF(__pyx_v_error);
__pyx_t_3 = __pyx_v_error; __pyx_t_2 = NULL;
if (CYTHON_UNPACK_METHODS && unlikely(PyMethod_Check(__pyx_t_3))) {
__pyx_t_2 = PyMethod_GET_SELF(__pyx_t_3);
if (likely(__pyx_t_2)) {
PyObject* function = PyMethod_GET_FUNCTION(__pyx_t_3);
__Pyx_INCREF(__pyx_t_2);
__Pyx_INCREF(function);
__Pyx_DECREF_SET(__pyx_t_3, function);
}
}
if (!__pyx_t_2) {
__pyx_t_1 = __Pyx_PyObject_CallOneArg(__pyx_t_3, __pyx_t_4); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 1242, __pyx_L1_error)
__Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
__Pyx_GOTREF(__pyx_t_1);
} else {
#if CYTHON_FAST_PYCALL
if (PyFunction_Check(__pyx_t_3)) {
PyObject *__pyx_temp[2] = {__pyx_t_2, __pyx_t_4};
__pyx_t_1 = __Pyx_PyFunction_FastCall(__pyx_t_3, __pyx_temp+1-1, 1+1); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 1242, __pyx_L1_error)
__Pyx_XDECREF(__pyx_t_2); __pyx_t_2 = 0;
__Pyx_GOTREF(__pyx_t_1);
__Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
} else
#endif
#if CYTHON_FAST_PYCCALL
if (__Pyx_PyFastCFunction_Check(__pyx_t_3)) {
PyObject *__pyx_temp[2] = {__pyx_t_2, __pyx_t_4};
__pyx_t_1 = __Pyx_PyCFunction_FastCall(__pyx_t_3, __pyx_temp+1-1, 1+1); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 1242, __pyx_L1_error)
__Pyx_XDECREF(__pyx_t_2); __pyx_t_2 = 0;
__Pyx_GOTREF(__pyx_t_1);
__Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
} else
#endif
{
__pyx_t_5 = PyTuple_New(1+1); if (unlikely(!__pyx_t_5)) __PYX_ERR(1, 1242, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_5);
__Pyx_GIVEREF(__pyx_t_2); PyTuple_SET_ITEM(__pyx_t_5, 0, __pyx_t_2); __pyx_t_2 = NULL;
__Pyx_GIVEREF(__pyx_t_4);
PyTuple_SET_ITEM(__pyx_t_5, 0+1, __pyx_t_4);
__pyx_t_4 = 0;
__pyx_t_1 = __Pyx_PyObject_Call(__pyx_t_3, __pyx_t_5, NULL); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 1242, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_1);
__Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
}
}
__Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
__Pyx_Raise(__pyx_t_1, 0, 0, 0);
__Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
__PYX_ERR(1, 1242, __pyx_L1_error)
/* "View.MemoryView":1241
*
* @cname('__pyx_memoryview_err_dim')
* cdef int _err_dim(object error, char *msg, int dim) except -1 with gil: # <<<<<<<<<<<<<<
* raise error(msg.decode('ascii') % dim)
*
*/
/* function exit code */
__pyx_L1_error:;
__Pyx_XDECREF(__pyx_t_1);
__Pyx_XDECREF(__pyx_t_2);
__Pyx_XDECREF(__pyx_t_3);
__Pyx_XDECREF(__pyx_t_4);
__Pyx_XDECREF(__pyx_t_5);
__Pyx_AddTraceback("View.MemoryView._err_dim", __pyx_clineno, __pyx_lineno, __pyx_filename);
__pyx_r = -1;
__Pyx_XDECREF(__pyx_v_error);
__Pyx_RefNannyFinishContext();
#ifdef WITH_THREAD
PyGILState_Release(__pyx_gilstate_save);
#endif
return __pyx_r;
}
/* "View.MemoryView":1245
*
* @cname('__pyx_memoryview_err')
* cdef int _err(object error, char *msg) except -1 with gil: # <<<<<<<<<<<<<<
* if msg != NULL:
* raise error(msg.decode('ascii'))
*/
static int __pyx_memoryview_err(PyObject *__pyx_v_error, char *__pyx_v_msg) {
int __pyx_r;
__Pyx_RefNannyDeclarations
int __pyx_t_1;
PyObject *__pyx_t_2 = NULL;
PyObject *__pyx_t_3 = NULL;
PyObject *__pyx_t_4 = NULL;
PyObject *__pyx_t_5 = NULL;
PyObject *__pyx_t_6 = NULL;
#ifdef WITH_THREAD
PyGILState_STATE __pyx_gilstate_save = PyGILState_Ensure();
#endif
__Pyx_RefNannySetupContext("_err", 0);
__Pyx_INCREF(__pyx_v_error);
/* "View.MemoryView":1246
* @cname('__pyx_memoryview_err')
* cdef int _err(object error, char *msg) except -1 with gil:
* if msg != NULL: # <<<<<<<<<<<<<<
* raise error(msg.decode('ascii'))
* else:
*/
__pyx_t_1 = ((__pyx_v_msg != NULL) != 0);
if (__pyx_t_1) {
/* "View.MemoryView":1247
* cdef int _err(object error, char *msg) except -1 with gil:
* if msg != NULL:
* raise error(msg.decode('ascii')) # <<<<<<<<<<<<<<
* else:
* raise error
*/
__pyx_t_3 = __Pyx_decode_c_string(__pyx_v_msg, 0, strlen(__pyx_v_msg), NULL, NULL, PyUnicode_DecodeASCII); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 1247, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_3);
__Pyx_INCREF(__pyx_v_error);
__pyx_t_4 = __pyx_v_error; __pyx_t_5 = NULL;
if (CYTHON_UNPACK_METHODS && unlikely(PyMethod_Check(__pyx_t_4))) {
__pyx_t_5 = PyMethod_GET_SELF(__pyx_t_4);
if (likely(__pyx_t_5)) {
PyObject* function = PyMethod_GET_FUNCTION(__pyx_t_4);
__Pyx_INCREF(__pyx_t_5);
__Pyx_INCREF(function);
__Pyx_DECREF_SET(__pyx_t_4, function);
}
}
if (!__pyx_t_5) {
__pyx_t_2 = __Pyx_PyObject_CallOneArg(__pyx_t_4, __pyx_t_3); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 1247, __pyx_L1_error)
__Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
__Pyx_GOTREF(__pyx_t_2);
} else {
#if CYTHON_FAST_PYCALL
if (PyFunction_Check(__pyx_t_4)) {
PyObject *__pyx_temp[2] = {__pyx_t_5, __pyx_t_3};
__pyx_t_2 = __Pyx_PyFunction_FastCall(__pyx_t_4, __pyx_temp+1-1, 1+1); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 1247, __pyx_L1_error)
__Pyx_XDECREF(__pyx_t_5); __pyx_t_5 = 0;
__Pyx_GOTREF(__pyx_t_2);
__Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
} else
#endif
#if CYTHON_FAST_PYCCALL
if (__Pyx_PyFastCFunction_Check(__pyx_t_4)) {
PyObject *__pyx_temp[2] = {__pyx_t_5, __pyx_t_3};
__pyx_t_2 = __Pyx_PyCFunction_FastCall(__pyx_t_4, __pyx_temp+1-1, 1+1); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 1247, __pyx_L1_error)
__Pyx_XDECREF(__pyx_t_5); __pyx_t_5 = 0;
__Pyx_GOTREF(__pyx_t_2);
__Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
} else
#endif
{
__pyx_t_6 = PyTuple_New(1+1); if (unlikely(!__pyx_t_6)) __PYX_ERR(1, 1247, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_6);
__Pyx_GIVEREF(__pyx_t_5); PyTuple_SET_ITEM(__pyx_t_6, 0, __pyx_t_5); __pyx_t_5 = NULL;
__Pyx_GIVEREF(__pyx_t_3);
PyTuple_SET_ITEM(__pyx_t_6, 0+1, __pyx_t_3);
__pyx_t_3 = 0;
__pyx_t_2 = __Pyx_PyObject_Call(__pyx_t_4, __pyx_t_6, NULL); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 1247, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_2);
__Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0;
}
}
__Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
__Pyx_Raise(__pyx_t_2, 0, 0, 0);
__Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
__PYX_ERR(1, 1247, __pyx_L1_error)
/* "View.MemoryView":1246
* @cname('__pyx_memoryview_err')
* cdef int _err(object error, char *msg) except -1 with gil:
* if msg != NULL: # <<<<<<<<<<<<<<
* raise error(msg.decode('ascii'))
* else:
*/
}
/* "View.MemoryView":1249
* raise error(msg.decode('ascii'))
* else:
* raise error # <<<<<<<<<<<<<<
*
* @cname('__pyx_memoryview_copy_contents')
*/
/*else*/ {
__Pyx_Raise(__pyx_v_error, 0, 0, 0);
__PYX_ERR(1, 1249, __pyx_L1_error)
}
/* "View.MemoryView":1245
*
* @cname('__pyx_memoryview_err')
* cdef int _err(object error, char *msg) except -1 with gil: # <<<<<<<<<<<<<<
* if msg != NULL:
* raise error(msg.decode('ascii'))
*/
/* function exit code */
__pyx_L1_error:;
__Pyx_XDECREF(__pyx_t_2);
__Pyx_XDECREF(__pyx_t_3);
__Pyx_XDECREF(__pyx_t_4);
__Pyx_XDECREF(__pyx_t_5);
__Pyx_XDECREF(__pyx_t_6);
__Pyx_AddTraceback("View.MemoryView._err", __pyx_clineno, __pyx_lineno, __pyx_filename);
__pyx_r = -1;
__Pyx_XDECREF(__pyx_v_error);
__Pyx_RefNannyFinishContext();
#ifdef WITH_THREAD
PyGILState_Release(__pyx_gilstate_save);
#endif
return __pyx_r;
}
/* "View.MemoryView":1252
*
* @cname('__pyx_memoryview_copy_contents')
* cdef int memoryview_copy_contents(__Pyx_memviewslice src, # <<<<<<<<<<<<<<
* __Pyx_memviewslice dst,
* int src_ndim, int dst_ndim,
*/
static int __pyx_memoryview_copy_contents(__Pyx_memviewslice __pyx_v_src, __Pyx_memviewslice __pyx_v_dst, int __pyx_v_src_ndim, int __pyx_v_dst_ndim, int __pyx_v_dtype_is_object) {
void *__pyx_v_tmpdata;
size_t __pyx_v_itemsize;
int __pyx_v_i;
char __pyx_v_order;
int __pyx_v_broadcasting;
int __pyx_v_direct_copy;
__Pyx_memviewslice __pyx_v_tmp;
int __pyx_v_ndim;
int __pyx_r;
Py_ssize_t __pyx_t_1;
int __pyx_t_2;
int __pyx_t_3;
int __pyx_t_4;
int __pyx_t_5;
void *__pyx_t_6;
int __pyx_t_7;
/* "View.MemoryView":1260
* Check for overlapping memory and verify the shapes.
* """
* cdef void *tmpdata = NULL # <<<<<<<<<<<<<<
* cdef size_t itemsize = src.memview.view.itemsize
* cdef int i
*/
__pyx_v_tmpdata = NULL;
/* "View.MemoryView":1261
* """
* cdef void *tmpdata = NULL
* cdef size_t itemsize = src.memview.view.itemsize # <<<<<<<<<<<<<<
* cdef int i
* cdef char order = get_best_order(&src, src_ndim)
*/
__pyx_t_1 = __pyx_v_src.memview->view.itemsize;
__pyx_v_itemsize = __pyx_t_1;
/* "View.MemoryView":1263
* cdef size_t itemsize = src.memview.view.itemsize
* cdef int i
* cdef char order = get_best_order(&src, src_ndim) # <<<<<<<<<<<<<<
* cdef bint broadcasting = False
* cdef bint direct_copy = False
*/
__pyx_v_order = __pyx_get_best_slice_order((&__pyx_v_src), __pyx_v_src_ndim);
/* "View.MemoryView":1264
* cdef int i
* cdef char order = get_best_order(&src, src_ndim)
* cdef bint broadcasting = False # <<<<<<<<<<<<<<
* cdef bint direct_copy = False
* cdef __Pyx_memviewslice tmp
*/
__pyx_v_broadcasting = 0;
/* "View.MemoryView":1265
* cdef char order = get_best_order(&src, src_ndim)
* cdef bint broadcasting = False
* cdef bint direct_copy = False # <<<<<<<<<<<<<<
* cdef __Pyx_memviewslice tmp
*
*/
__pyx_v_direct_copy = 0;
/* "View.MemoryView":1268
* cdef __Pyx_memviewslice tmp
*
* if src_ndim < dst_ndim: # <<<<<<<<<<<<<<
* broadcast_leading(&src, src_ndim, dst_ndim)
* elif dst_ndim < src_ndim:
*/
__pyx_t_2 = ((__pyx_v_src_ndim < __pyx_v_dst_ndim) != 0);
if (__pyx_t_2) {
/* "View.MemoryView":1269
*
* if src_ndim < dst_ndim:
* broadcast_leading(&src, src_ndim, dst_ndim) # <<<<<<<<<<<<<<
* elif dst_ndim < src_ndim:
* broadcast_leading(&dst, dst_ndim, src_ndim)
*/
__pyx_memoryview_broadcast_leading((&__pyx_v_src), __pyx_v_src_ndim, __pyx_v_dst_ndim);
/* "View.MemoryView":1268
* cdef __Pyx_memviewslice tmp
*
* if src_ndim < dst_ndim: # <<<<<<<<<<<<<<
* broadcast_leading(&src, src_ndim, dst_ndim)
* elif dst_ndim < src_ndim:
*/
goto __pyx_L3;
}
/* "View.MemoryView":1270
* if src_ndim < dst_ndim:
* broadcast_leading(&src, src_ndim, dst_ndim)
* elif dst_ndim < src_ndim: # <<<<<<<<<<<<<<
* broadcast_leading(&dst, dst_ndim, src_ndim)
*
*/
__pyx_t_2 = ((__pyx_v_dst_ndim < __pyx_v_src_ndim) != 0);
if (__pyx_t_2) {
/* "View.MemoryView":1271
* broadcast_leading(&src, src_ndim, dst_ndim)
* elif dst_ndim < src_ndim:
* broadcast_leading(&dst, dst_ndim, src_ndim) # <<<<<<<<<<<<<<
*
* cdef int ndim = max(src_ndim, dst_ndim)
*/
__pyx_memoryview_broadcast_leading((&__pyx_v_dst), __pyx_v_dst_ndim, __pyx_v_src_ndim);
/* "View.MemoryView":1270
* if src_ndim < dst_ndim:
* broadcast_leading(&src, src_ndim, dst_ndim)
* elif dst_ndim < src_ndim: # <<<<<<<<<<<<<<
* broadcast_leading(&dst, dst_ndim, src_ndim)
*
*/
}
__pyx_L3:;
/* "View.MemoryView":1273
* broadcast_leading(&dst, dst_ndim, src_ndim)
*
* cdef int ndim = max(src_ndim, dst_ndim) # <<<<<<<<<<<<<<
*
* for i in range(ndim):
*/
__pyx_t_3 = __pyx_v_dst_ndim;
__pyx_t_4 = __pyx_v_src_ndim;
if (((__pyx_t_3 > __pyx_t_4) != 0)) {
__pyx_t_5 = __pyx_t_3;
} else {
__pyx_t_5 = __pyx_t_4;
}
__pyx_v_ndim = __pyx_t_5;
/* "View.MemoryView":1275
* cdef int ndim = max(src_ndim, dst_ndim)
*
* for i in range(ndim): # <<<<<<<<<<<<<<
* if src.shape[i] != dst.shape[i]:
* if src.shape[i] == 1:
*/
__pyx_t_5 = __pyx_v_ndim;
for (__pyx_t_3 = 0; __pyx_t_3 < __pyx_t_5; __pyx_t_3+=1) {
__pyx_v_i = __pyx_t_3;
/* "View.MemoryView":1276
*
* for i in range(ndim):
* if src.shape[i] != dst.shape[i]: # <<<<<<<<<<<<<<
* if src.shape[i] == 1:
* broadcasting = True
*/
__pyx_t_2 = (((__pyx_v_src.shape[__pyx_v_i]) != (__pyx_v_dst.shape[__pyx_v_i])) != 0);
if (__pyx_t_2) {
/* "View.MemoryView":1277
* for i in range(ndim):
* if src.shape[i] != dst.shape[i]:
* if src.shape[i] == 1: # <<<<<<<<<<<<<<
* broadcasting = True
* src.strides[i] = 0
*/
__pyx_t_2 = (((__pyx_v_src.shape[__pyx_v_i]) == 1) != 0);
if (__pyx_t_2) {
/* "View.MemoryView":1278
* if src.shape[i] != dst.shape[i]:
* if src.shape[i] == 1:
* broadcasting = True # <<<<<<<<<<<<<<
* src.strides[i] = 0
* else:
*/
__pyx_v_broadcasting = 1;
/* "View.MemoryView":1279
* if src.shape[i] == 1:
* broadcasting = True
* src.strides[i] = 0 # <<<<<<<<<<<<<<
* else:
* _err_extents(i, dst.shape[i], src.shape[i])
*/
(__pyx_v_src.strides[__pyx_v_i]) = 0;
/* "View.MemoryView":1277
* for i in range(ndim):
* if src.shape[i] != dst.shape[i]:
* if src.shape[i] == 1: # <<<<<<<<<<<<<<
* broadcasting = True
* src.strides[i] = 0
*/
goto __pyx_L7;
}
/* "View.MemoryView":1281
* src.strides[i] = 0
* else:
* _err_extents(i, dst.shape[i], src.shape[i]) # <<<<<<<<<<<<<<
*
* if src.suboffsets[i] >= 0:
*/
/*else*/ {
__pyx_t_4 = __pyx_memoryview_err_extents(__pyx_v_i, (__pyx_v_dst.shape[__pyx_v_i]), (__pyx_v_src.shape[__pyx_v_i])); if (unlikely(__pyx_t_4 == -1)) __PYX_ERR(1, 1281, __pyx_L1_error)
}
__pyx_L7:;
/* "View.MemoryView":1276
*
* for i in range(ndim):
* if src.shape[i] != dst.shape[i]: # <<<<<<<<<<<<<<
* if src.shape[i] == 1:
* broadcasting = True
*/
}
/* "View.MemoryView":1283
* _err_extents(i, dst.shape[i], src.shape[i])
*
* if src.suboffsets[i] >= 0: # <<<<<<<<<<<<<<
* _err_dim(ValueError, "Dimension %d is not direct", i)
*
*/
__pyx_t_2 = (((__pyx_v_src.suboffsets[__pyx_v_i]) >= 0) != 0);
if (__pyx_t_2) {
/* "View.MemoryView":1284
*
* if src.suboffsets[i] >= 0:
* _err_dim(ValueError, "Dimension %d is not direct", i) # <<<<<<<<<<<<<<
*
* if slices_overlap(&src, &dst, ndim, itemsize):
*/
__pyx_t_4 = __pyx_memoryview_err_dim(__pyx_builtin_ValueError, ((char *)"Dimension %d is not direct"), __pyx_v_i); if (unlikely(__pyx_t_4 == -1)) __PYX_ERR(1, 1284, __pyx_L1_error)
/* "View.MemoryView":1283
* _err_extents(i, dst.shape[i], src.shape[i])
*
* if src.suboffsets[i] >= 0: # <<<<<<<<<<<<<<
* _err_dim(ValueError, "Dimension %d is not direct", i)
*
*/
}
}
/* "View.MemoryView":1286
* _err_dim(ValueError, "Dimension %d is not direct", i)
*
* if slices_overlap(&src, &dst, ndim, itemsize): # <<<<<<<<<<<<<<
*
* if not slice_is_contig(src, order, ndim):
*/
__pyx_t_2 = (__pyx_slices_overlap((&__pyx_v_src), (&__pyx_v_dst), __pyx_v_ndim, __pyx_v_itemsize) != 0);
if (__pyx_t_2) {
/* "View.MemoryView":1288
* if slices_overlap(&src, &dst, ndim, itemsize):
*
* if not slice_is_contig(src, order, ndim): # <<<<<<<<<<<<<<
* order = get_best_order(&dst, ndim)
*
*/
__pyx_t_2 = ((!(__pyx_memviewslice_is_contig(__pyx_v_src, __pyx_v_order, __pyx_v_ndim) != 0)) != 0);
if (__pyx_t_2) {
/* "View.MemoryView":1289
*
* if not slice_is_contig(src, order, ndim):
* order = get_best_order(&dst, ndim) # <<<<<<<<<<<<<<
*
* tmpdata = copy_data_to_temp(&src, &tmp, order, ndim)
*/
__pyx_v_order = __pyx_get_best_slice_order((&__pyx_v_dst), __pyx_v_ndim);
/* "View.MemoryView":1288
* if slices_overlap(&src, &dst, ndim, itemsize):
*
* if not slice_is_contig(src, order, ndim): # <<<<<<<<<<<<<<
* order = get_best_order(&dst, ndim)
*
*/
}
/* "View.MemoryView":1291
* order = get_best_order(&dst, ndim)
*
* tmpdata = copy_data_to_temp(&src, &tmp, order, ndim) # <<<<<<<<<<<<<<
* src = tmp
*
*/
__pyx_t_6 = __pyx_memoryview_copy_data_to_temp((&__pyx_v_src), (&__pyx_v_tmp), __pyx_v_order, __pyx_v_ndim); if (unlikely(__pyx_t_6 == NULL)) __PYX_ERR(1, 1291, __pyx_L1_error)
__pyx_v_tmpdata = __pyx_t_6;
/* "View.MemoryView":1292
*
* tmpdata = copy_data_to_temp(&src, &tmp, order, ndim)
* src = tmp # <<<<<<<<<<<<<<
*
* if not broadcasting:
*/
__pyx_v_src = __pyx_v_tmp;
/* "View.MemoryView":1286
* _err_dim(ValueError, "Dimension %d is not direct", i)
*
* if slices_overlap(&src, &dst, ndim, itemsize): # <<<<<<<<<<<<<<
*
* if not slice_is_contig(src, order, ndim):
*/
}
/* "View.MemoryView":1294
* src = tmp
*
* if not broadcasting: # <<<<<<<<<<<<<<
*
*
*/
__pyx_t_2 = ((!(__pyx_v_broadcasting != 0)) != 0);
if (__pyx_t_2) {
/* "View.MemoryView":1297
*
*
* if slice_is_contig(src, 'C', ndim): # <<<<<<<<<<<<<<
* direct_copy = slice_is_contig(dst, 'C', ndim)
* elif slice_is_contig(src, 'F', ndim):
*/
__pyx_t_2 = (__pyx_memviewslice_is_contig(__pyx_v_src, 'C', __pyx_v_ndim) != 0);
if (__pyx_t_2) {
/* "View.MemoryView":1298
*
* if slice_is_contig(src, 'C', ndim):
* direct_copy = slice_is_contig(dst, 'C', ndim) # <<<<<<<<<<<<<<
* elif slice_is_contig(src, 'F', ndim):
* direct_copy = slice_is_contig(dst, 'F', ndim)
*/
__pyx_v_direct_copy = __pyx_memviewslice_is_contig(__pyx_v_dst, 'C', __pyx_v_ndim);
/* "View.MemoryView":1297
*
*
* if slice_is_contig(src, 'C', ndim): # <<<<<<<<<<<<<<
* direct_copy = slice_is_contig(dst, 'C', ndim)
* elif slice_is_contig(src, 'F', ndim):
*/
goto __pyx_L12;
}
/* "View.MemoryView":1299
* if slice_is_contig(src, 'C', ndim):
* direct_copy = slice_is_contig(dst, 'C', ndim)
* elif slice_is_contig(src, 'F', ndim): # <<<<<<<<<<<<<<
* direct_copy = slice_is_contig(dst, 'F', ndim)
*
*/
__pyx_t_2 = (__pyx_memviewslice_is_contig(__pyx_v_src, 'F', __pyx_v_ndim) != 0);
if (__pyx_t_2) {
/* "View.MemoryView":1300
* direct_copy = slice_is_contig(dst, 'C', ndim)
* elif slice_is_contig(src, 'F', ndim):
* direct_copy = slice_is_contig(dst, 'F', ndim) # <<<<<<<<<<<<<<
*
* if direct_copy:
*/
__pyx_v_direct_copy = __pyx_memviewslice_is_contig(__pyx_v_dst, 'F', __pyx_v_ndim);
/* "View.MemoryView":1299
* if slice_is_contig(src, 'C', ndim):
* direct_copy = slice_is_contig(dst, 'C', ndim)
* elif slice_is_contig(src, 'F', ndim): # <<<<<<<<<<<<<<
* direct_copy = slice_is_contig(dst, 'F', ndim)
*
*/
}
__pyx_L12:;
/* "View.MemoryView":1302
* direct_copy = slice_is_contig(dst, 'F', ndim)
*
* if direct_copy: # <<<<<<<<<<<<<<
*
* refcount_copying(&dst, dtype_is_object, ndim, False)
*/
__pyx_t_2 = (__pyx_v_direct_copy != 0);
if (__pyx_t_2) {
/* "View.MemoryView":1304
* if direct_copy:
*
* refcount_copying(&dst, dtype_is_object, ndim, False) # <<<<<<<<<<<<<<
* memcpy(dst.data, src.data, slice_get_size(&src, ndim))
* refcount_copying(&dst, dtype_is_object, ndim, True)
*/
__pyx_memoryview_refcount_copying((&__pyx_v_dst), __pyx_v_dtype_is_object, __pyx_v_ndim, 0);
/* "View.MemoryView":1305
*
* refcount_copying(&dst, dtype_is_object, ndim, False)
* memcpy(dst.data, src.data, slice_get_size(&src, ndim)) # <<<<<<<<<<<<<<
* refcount_copying(&dst, dtype_is_object, ndim, True)
* free(tmpdata)
*/
memcpy(__pyx_v_dst.data, __pyx_v_src.data, __pyx_memoryview_slice_get_size((&__pyx_v_src), __pyx_v_ndim));
/* "View.MemoryView":1306
* refcount_copying(&dst, dtype_is_object, ndim, False)
* memcpy(dst.data, src.data, slice_get_size(&src, ndim))
* refcount_copying(&dst, dtype_is_object, ndim, True) # <<<<<<<<<<<<<<
* free(tmpdata)
* return 0
*/
__pyx_memoryview_refcount_copying((&__pyx_v_dst), __pyx_v_dtype_is_object, __pyx_v_ndim, 1);
/* "View.MemoryView":1307
* memcpy(dst.data, src.data, slice_get_size(&src, ndim))
* refcount_copying(&dst, dtype_is_object, ndim, True)
* free(tmpdata) # <<<<<<<<<<<<<<
* return 0
*
*/
free(__pyx_v_tmpdata);
/* "View.MemoryView":1308
* refcount_copying(&dst, dtype_is_object, ndim, True)
* free(tmpdata)
* return 0 # <<<<<<<<<<<<<<
*
* if order == 'F' == get_best_order(&dst, ndim):
*/
__pyx_r = 0;
goto __pyx_L0;
/* "View.MemoryView":1302
* direct_copy = slice_is_contig(dst, 'F', ndim)
*
* if direct_copy: # <<<<<<<<<<<<<<
*
* refcount_copying(&dst, dtype_is_object, ndim, False)
*/
}
/* "View.MemoryView":1294
* src = tmp
*
* if not broadcasting: # <<<<<<<<<<<<<<
*
*
*/
}
/* "View.MemoryView":1310
* return 0
*
* if order == 'F' == get_best_order(&dst, ndim): # <<<<<<<<<<<<<<
*
*
*/
__pyx_t_2 = (__pyx_v_order == 'F');
if (__pyx_t_2) {
__pyx_t_2 = ('F' == __pyx_get_best_slice_order((&__pyx_v_dst), __pyx_v_ndim));
}
__pyx_t_7 = (__pyx_t_2 != 0);
if (__pyx_t_7) {
/* "View.MemoryView":1313
*
*
* transpose_memslice(&src) # <<<<<<<<<<<<<<
* transpose_memslice(&dst)
*
*/
__pyx_t_5 = __pyx_memslice_transpose((&__pyx_v_src)); if (unlikely(__pyx_t_5 == 0)) __PYX_ERR(1, 1313, __pyx_L1_error)
/* "View.MemoryView":1314
*
* transpose_memslice(&src)
* transpose_memslice(&dst) # <<<<<<<<<<<<<<
*
* refcount_copying(&dst, dtype_is_object, ndim, False)
*/
__pyx_t_5 = __pyx_memslice_transpose((&__pyx_v_dst)); if (unlikely(__pyx_t_5 == 0)) __PYX_ERR(1, 1314, __pyx_L1_error)
/* "View.MemoryView":1310
* return 0
*
* if order == 'F' == get_best_order(&dst, ndim): # <<<<<<<<<<<<<<
*
*
*/
}
/* "View.MemoryView":1316
* transpose_memslice(&dst)
*
* refcount_copying(&dst, dtype_is_object, ndim, False) # <<<<<<<<<<<<<<
* copy_strided_to_strided(&src, &dst, ndim, itemsize)
* refcount_copying(&dst, dtype_is_object, ndim, True)
*/
__pyx_memoryview_refcount_copying((&__pyx_v_dst), __pyx_v_dtype_is_object, __pyx_v_ndim, 0);
/* "View.MemoryView":1317
*
* refcount_copying(&dst, dtype_is_object, ndim, False)
* copy_strided_to_strided(&src, &dst, ndim, itemsize) # <<<<<<<<<<<<<<
* refcount_copying(&dst, dtype_is_object, ndim, True)
*
*/
copy_strided_to_strided((&__pyx_v_src), (&__pyx_v_dst), __pyx_v_ndim, __pyx_v_itemsize);
/* "View.MemoryView":1318
* refcount_copying(&dst, dtype_is_object, ndim, False)
* copy_strided_to_strided(&src, &dst, ndim, itemsize)
* refcount_copying(&dst, dtype_is_object, ndim, True) # <<<<<<<<<<<<<<
*
* free(tmpdata)
*/
__pyx_memoryview_refcount_copying((&__pyx_v_dst), __pyx_v_dtype_is_object, __pyx_v_ndim, 1);
/* "View.MemoryView":1320
* refcount_copying(&dst, dtype_is_object, ndim, True)
*
* free(tmpdata) # <<<<<<<<<<<<<<
* return 0
*
*/
free(__pyx_v_tmpdata);
/* "View.MemoryView":1321
*
* free(tmpdata)
* return 0 # <<<<<<<<<<<<<<
*
* @cname('__pyx_memoryview_broadcast_leading')
*/
__pyx_r = 0;
goto __pyx_L0;
/* "View.MemoryView":1252
*
* @cname('__pyx_memoryview_copy_contents')
* cdef int memoryview_copy_contents(__Pyx_memviewslice src, # <<<<<<<<<<<<<<
* __Pyx_memviewslice dst,
* int src_ndim, int dst_ndim,
*/
/* function exit code */
__pyx_L1_error:;
{
#ifdef WITH_THREAD
PyGILState_STATE __pyx_gilstate_save = PyGILState_Ensure();
#endif
__Pyx_AddTraceback("View.MemoryView.memoryview_copy_contents", __pyx_clineno, __pyx_lineno, __pyx_filename);
#ifdef WITH_THREAD
PyGILState_Release(__pyx_gilstate_save);
#endif
}
__pyx_r = -1;
__pyx_L0:;
return __pyx_r;
}
/* "View.MemoryView":1324
*
* @cname('__pyx_memoryview_broadcast_leading')
* cdef void broadcast_leading(__Pyx_memviewslice *mslice, # <<<<<<<<<<<<<<
* int ndim,
* int ndim_other) nogil:
*/
static void __pyx_memoryview_broadcast_leading(__Pyx_memviewslice *__pyx_v_mslice, int __pyx_v_ndim, int __pyx_v_ndim_other) {
int __pyx_v_i;
int __pyx_v_offset;
int __pyx_t_1;
int __pyx_t_2;
/* "View.MemoryView":1328
* int ndim_other) nogil:
* cdef int i
* cdef int offset = ndim_other - ndim # <<<<<<<<<<<<<<
*
* for i in range(ndim - 1, -1, -1):
*/
__pyx_v_offset = (__pyx_v_ndim_other - __pyx_v_ndim);
/* "View.MemoryView":1330
* cdef int offset = ndim_other - ndim
*
* for i in range(ndim - 1, -1, -1): # <<<<<<<<<<<<<<
* mslice.shape[i + offset] = mslice.shape[i]
* mslice.strides[i + offset] = mslice.strides[i]
*/
for (__pyx_t_1 = (__pyx_v_ndim - 1); __pyx_t_1 > -1L; __pyx_t_1-=1) {
__pyx_v_i = __pyx_t_1;
/* "View.MemoryView":1331
*
* for i in range(ndim - 1, -1, -1):
* mslice.shape[i + offset] = mslice.shape[i] # <<<<<<<<<<<<<<
* mslice.strides[i + offset] = mslice.strides[i]
* mslice.suboffsets[i + offset] = mslice.suboffsets[i]
*/
(__pyx_v_mslice->shape[(__pyx_v_i + __pyx_v_offset)]) = (__pyx_v_mslice->shape[__pyx_v_i]);
/* "View.MemoryView":1332
* for i in range(ndim - 1, -1, -1):
* mslice.shape[i + offset] = mslice.shape[i]
* mslice.strides[i + offset] = mslice.strides[i] # <<<<<<<<<<<<<<
* mslice.suboffsets[i + offset] = mslice.suboffsets[i]
*
*/
(__pyx_v_mslice->strides[(__pyx_v_i + __pyx_v_offset)]) = (__pyx_v_mslice->strides[__pyx_v_i]);
/* "View.MemoryView":1333
* mslice.shape[i + offset] = mslice.shape[i]
* mslice.strides[i + offset] = mslice.strides[i]
* mslice.suboffsets[i + offset] = mslice.suboffsets[i] # <<<<<<<<<<<<<<
*
* for i in range(offset):
*/
(__pyx_v_mslice->suboffsets[(__pyx_v_i + __pyx_v_offset)]) = (__pyx_v_mslice->suboffsets[__pyx_v_i]);
}
/* "View.MemoryView":1335
* mslice.suboffsets[i + offset] = mslice.suboffsets[i]
*
* for i in range(offset): # <<<<<<<<<<<<<<
* mslice.shape[i] = 1
* mslice.strides[i] = mslice.strides[0]
*/
__pyx_t_1 = __pyx_v_offset;
for (__pyx_t_2 = 0; __pyx_t_2 < __pyx_t_1; __pyx_t_2+=1) {
__pyx_v_i = __pyx_t_2;
/* "View.MemoryView":1336
*
* for i in range(offset):
* mslice.shape[i] = 1 # <<<<<<<<<<<<<<
* mslice.strides[i] = mslice.strides[0]
* mslice.suboffsets[i] = -1
*/
(__pyx_v_mslice->shape[__pyx_v_i]) = 1;
/* "View.MemoryView":1337
* for i in range(offset):
* mslice.shape[i] = 1
* mslice.strides[i] = mslice.strides[0] # <<<<<<<<<<<<<<
* mslice.suboffsets[i] = -1
*
*/
(__pyx_v_mslice->strides[__pyx_v_i]) = (__pyx_v_mslice->strides[0]);
/* "View.MemoryView":1338
* mslice.shape[i] = 1
* mslice.strides[i] = mslice.strides[0]
* mslice.suboffsets[i] = -1 # <<<<<<<<<<<<<<
*
*
*/
(__pyx_v_mslice->suboffsets[__pyx_v_i]) = -1L;
}
/* "View.MemoryView":1324
*
* @cname('__pyx_memoryview_broadcast_leading')
* cdef void broadcast_leading(__Pyx_memviewslice *mslice, # <<<<<<<<<<<<<<
* int ndim,
* int ndim_other) nogil:
*/
/* function exit code */
}
/* "View.MemoryView":1346
*
* @cname('__pyx_memoryview_refcount_copying')
* cdef void refcount_copying(__Pyx_memviewslice *dst, bint dtype_is_object, # <<<<<<<<<<<<<<
* int ndim, bint inc) nogil:
*
*/
static void __pyx_memoryview_refcount_copying(__Pyx_memviewslice *__pyx_v_dst, int __pyx_v_dtype_is_object, int __pyx_v_ndim, int __pyx_v_inc) {
int __pyx_t_1;
/* "View.MemoryView":1350
*
*
* if dtype_is_object: # <<<<<<<<<<<<<<
* refcount_objects_in_slice_with_gil(dst.data, dst.shape,
* dst.strides, ndim, inc)
*/
__pyx_t_1 = (__pyx_v_dtype_is_object != 0);
if (__pyx_t_1) {
/* "View.MemoryView":1351
*
* if dtype_is_object:
* refcount_objects_in_slice_with_gil(dst.data, dst.shape, # <<<<<<<<<<<<<<
* dst.strides, ndim, inc)
*
*/
__pyx_memoryview_refcount_objects_in_slice_with_gil(__pyx_v_dst->data, __pyx_v_dst->shape, __pyx_v_dst->strides, __pyx_v_ndim, __pyx_v_inc);
/* "View.MemoryView":1350
*
*
* if dtype_is_object: # <<<<<<<<<<<<<<
* refcount_objects_in_slice_with_gil(dst.data, dst.shape,
* dst.strides, ndim, inc)
*/
}
/* "View.MemoryView":1346
*
* @cname('__pyx_memoryview_refcount_copying')
* cdef void refcount_copying(__Pyx_memviewslice *dst, bint dtype_is_object, # <<<<<<<<<<<<<<
* int ndim, bint inc) nogil:
*
*/
/* function exit code */
}
/* "View.MemoryView":1355
*
* @cname('__pyx_memoryview_refcount_objects_in_slice_with_gil')
* cdef void refcount_objects_in_slice_with_gil(char *data, Py_ssize_t *shape, # <<<<<<<<<<<<<<
* Py_ssize_t *strides, int ndim,
* bint inc) with gil:
*/
static void __pyx_memoryview_refcount_objects_in_slice_with_gil(char *__pyx_v_data, Py_ssize_t *__pyx_v_shape, Py_ssize_t *__pyx_v_strides, int __pyx_v_ndim, int __pyx_v_inc) {
__Pyx_RefNannyDeclarations
#ifdef WITH_THREAD
PyGILState_STATE __pyx_gilstate_save = PyGILState_Ensure();
#endif
__Pyx_RefNannySetupContext("refcount_objects_in_slice_with_gil", 0);
/* "View.MemoryView":1358
* Py_ssize_t *strides, int ndim,
* bint inc) with gil:
* refcount_objects_in_slice(data, shape, strides, ndim, inc) # <<<<<<<<<<<<<<
*
* @cname('__pyx_memoryview_refcount_objects_in_slice')
*/
__pyx_memoryview_refcount_objects_in_slice(__pyx_v_data, __pyx_v_shape, __pyx_v_strides, __pyx_v_ndim, __pyx_v_inc);
/* "View.MemoryView":1355
*
* @cname('__pyx_memoryview_refcount_objects_in_slice_with_gil')
* cdef void refcount_objects_in_slice_with_gil(char *data, Py_ssize_t *shape, # <<<<<<<<<<<<<<
* Py_ssize_t *strides, int ndim,
* bint inc) with gil:
*/
/* function exit code */
__Pyx_RefNannyFinishContext();
#ifdef WITH_THREAD
PyGILState_Release(__pyx_gilstate_save);
#endif
}
/* "View.MemoryView":1361
*
* @cname('__pyx_memoryview_refcount_objects_in_slice')
* cdef void refcount_objects_in_slice(char *data, Py_ssize_t *shape, # <<<<<<<<<<<<<<
* Py_ssize_t *strides, int ndim, bint inc):
* cdef Py_ssize_t i
*/
static void __pyx_memoryview_refcount_objects_in_slice(char *__pyx_v_data, Py_ssize_t *__pyx_v_shape, Py_ssize_t *__pyx_v_strides, int __pyx_v_ndim, int __pyx_v_inc) {
CYTHON_UNUSED Py_ssize_t __pyx_v_i;
__Pyx_RefNannyDeclarations
Py_ssize_t __pyx_t_1;
Py_ssize_t __pyx_t_2;
int __pyx_t_3;
__Pyx_RefNannySetupContext("refcount_objects_in_slice", 0);
/* "View.MemoryView":1365
* cdef Py_ssize_t i
*
* for i in range(shape[0]): # <<<<<<<<<<<<<<
* if ndim == 1:
* if inc:
*/
__pyx_t_1 = (__pyx_v_shape[0]);
for (__pyx_t_2 = 0; __pyx_t_2 < __pyx_t_1; __pyx_t_2+=1) {
__pyx_v_i = __pyx_t_2;
/* "View.MemoryView":1366
*
* for i in range(shape[0]):
* if ndim == 1: # <<<<<<<<<<<<<<
* if inc:
* Py_INCREF((<PyObject **> data)[0])
*/
__pyx_t_3 = ((__pyx_v_ndim == 1) != 0);
if (__pyx_t_3) {
/* "View.MemoryView":1367
* for i in range(shape[0]):
* if ndim == 1:
* if inc: # <<<<<<<<<<<<<<
* Py_INCREF((<PyObject **> data)[0])
* else:
*/
__pyx_t_3 = (__pyx_v_inc != 0);
if (__pyx_t_3) {
/* "View.MemoryView":1368
* if ndim == 1:
* if inc:
* Py_INCREF((<PyObject **> data)[0]) # <<<<<<<<<<<<<<
* else:
* Py_DECREF((<PyObject **> data)[0])
*/
Py_INCREF((((PyObject **)__pyx_v_data)[0]));
/* "View.MemoryView":1367
* for i in range(shape[0]):
* if ndim == 1:
* if inc: # <<<<<<<<<<<<<<
* Py_INCREF((<PyObject **> data)[0])
* else:
*/
goto __pyx_L6;
}
/* "View.MemoryView":1370
* Py_INCREF((<PyObject **> data)[0])
* else:
* Py_DECREF((<PyObject **> data)[0]) # <<<<<<<<<<<<<<
* else:
* refcount_objects_in_slice(data, shape + 1, strides + 1,
*/
/*else*/ {
Py_DECREF((((PyObject **)__pyx_v_data)[0]));
}
__pyx_L6:;
/* "View.MemoryView":1366
*
* for i in range(shape[0]):
* if ndim == 1: # <<<<<<<<<<<<<<
* if inc:
* Py_INCREF((<PyObject **> data)[0])
*/
goto __pyx_L5;
}
/* "View.MemoryView":1372
* Py_DECREF((<PyObject **> data)[0])
* else:
* refcount_objects_in_slice(data, shape + 1, strides + 1, # <<<<<<<<<<<<<<
* ndim - 1, inc)
*
*/
/*else*/ {
/* "View.MemoryView":1373
* else:
* refcount_objects_in_slice(data, shape + 1, strides + 1,
* ndim - 1, inc) # <<<<<<<<<<<<<<
*
* data += strides[0]
*/
__pyx_memoryview_refcount_objects_in_slice(__pyx_v_data, (__pyx_v_shape + 1), (__pyx_v_strides + 1), (__pyx_v_ndim - 1), __pyx_v_inc);
}
__pyx_L5:;
/* "View.MemoryView":1375
* ndim - 1, inc)
*
* data += strides[0] # <<<<<<<<<<<<<<
*
*
*/
__pyx_v_data = (__pyx_v_data + (__pyx_v_strides[0]));
}
/* "View.MemoryView":1361
*
* @cname('__pyx_memoryview_refcount_objects_in_slice')
* cdef void refcount_objects_in_slice(char *data, Py_ssize_t *shape, # <<<<<<<<<<<<<<
* Py_ssize_t *strides, int ndim, bint inc):
* cdef Py_ssize_t i
*/
/* function exit code */
__Pyx_RefNannyFinishContext();
}
/* "View.MemoryView":1381
*
* @cname('__pyx_memoryview_slice_assign_scalar')
* cdef void slice_assign_scalar(__Pyx_memviewslice *dst, int ndim, # <<<<<<<<<<<<<<
* size_t itemsize, void *item,
* bint dtype_is_object) nogil:
*/
static void __pyx_memoryview_slice_assign_scalar(__Pyx_memviewslice *__pyx_v_dst, int __pyx_v_ndim, size_t __pyx_v_itemsize, void *__pyx_v_item, int __pyx_v_dtype_is_object) {
/* "View.MemoryView":1384
* size_t itemsize, void *item,
* bint dtype_is_object) nogil:
* refcount_copying(dst, dtype_is_object, ndim, False) # <<<<<<<<<<<<<<
* _slice_assign_scalar(dst.data, dst.shape, dst.strides, ndim,
* itemsize, item)
*/
__pyx_memoryview_refcount_copying(__pyx_v_dst, __pyx_v_dtype_is_object, __pyx_v_ndim, 0);
/* "View.MemoryView":1385
* bint dtype_is_object) nogil:
* refcount_copying(dst, dtype_is_object, ndim, False)
* _slice_assign_scalar(dst.data, dst.shape, dst.strides, ndim, # <<<<<<<<<<<<<<
* itemsize, item)
* refcount_copying(dst, dtype_is_object, ndim, True)
*/
__pyx_memoryview__slice_assign_scalar(__pyx_v_dst->data, __pyx_v_dst->shape, __pyx_v_dst->strides, __pyx_v_ndim, __pyx_v_itemsize, __pyx_v_item);
/* "View.MemoryView":1387
* _slice_assign_scalar(dst.data, dst.shape, dst.strides, ndim,
* itemsize, item)
* refcount_copying(dst, dtype_is_object, ndim, True) # <<<<<<<<<<<<<<
*
*
*/
__pyx_memoryview_refcount_copying(__pyx_v_dst, __pyx_v_dtype_is_object, __pyx_v_ndim, 1);
/* "View.MemoryView":1381
*
* @cname('__pyx_memoryview_slice_assign_scalar')
* cdef void slice_assign_scalar(__Pyx_memviewslice *dst, int ndim, # <<<<<<<<<<<<<<
* size_t itemsize, void *item,
* bint dtype_is_object) nogil:
*/
/* function exit code */
}
/* "View.MemoryView":1391
*
* @cname('__pyx_memoryview__slice_assign_scalar')
* cdef void _slice_assign_scalar(char *data, Py_ssize_t *shape, # <<<<<<<<<<<<<<
* Py_ssize_t *strides, int ndim,
* size_t itemsize, void *item) nogil:
*/
static void __pyx_memoryview__slice_assign_scalar(char *__pyx_v_data, Py_ssize_t *__pyx_v_shape, Py_ssize_t *__pyx_v_strides, int __pyx_v_ndim, size_t __pyx_v_itemsize, void *__pyx_v_item) {
CYTHON_UNUSED Py_ssize_t __pyx_v_i;
Py_ssize_t __pyx_v_stride;
Py_ssize_t __pyx_v_extent;
int __pyx_t_1;
Py_ssize_t __pyx_t_2;
Py_ssize_t __pyx_t_3;
/* "View.MemoryView":1395
* size_t itemsize, void *item) nogil:
* cdef Py_ssize_t i
* cdef Py_ssize_t stride = strides[0] # <<<<<<<<<<<<<<
* cdef Py_ssize_t extent = shape[0]
*
*/
__pyx_v_stride = (__pyx_v_strides[0]);
/* "View.MemoryView":1396
* cdef Py_ssize_t i
* cdef Py_ssize_t stride = strides[0]
* cdef Py_ssize_t extent = shape[0] # <<<<<<<<<<<<<<
*
* if ndim == 1:
*/
__pyx_v_extent = (__pyx_v_shape[0]);
/* "View.MemoryView":1398
* cdef Py_ssize_t extent = shape[0]
*
* if ndim == 1: # <<<<<<<<<<<<<<
* for i in range(extent):
* memcpy(data, item, itemsize)
*/
__pyx_t_1 = ((__pyx_v_ndim == 1) != 0);
if (__pyx_t_1) {
/* "View.MemoryView":1399
*
* if ndim == 1:
* for i in range(extent): # <<<<<<<<<<<<<<
* memcpy(data, item, itemsize)
* data += stride
*/
__pyx_t_2 = __pyx_v_extent;
for (__pyx_t_3 = 0; __pyx_t_3 < __pyx_t_2; __pyx_t_3+=1) {
__pyx_v_i = __pyx_t_3;
/* "View.MemoryView":1400
* if ndim == 1:
* for i in range(extent):
* memcpy(data, item, itemsize) # <<<<<<<<<<<<<<
* data += stride
* else:
*/
memcpy(__pyx_v_data, __pyx_v_item, __pyx_v_itemsize);
/* "View.MemoryView":1401
* for i in range(extent):
* memcpy(data, item, itemsize)
* data += stride # <<<<<<<<<<<<<<
* else:
* for i in range(extent):
*/
__pyx_v_data = (__pyx_v_data + __pyx_v_stride);
}
/* "View.MemoryView":1398
* cdef Py_ssize_t extent = shape[0]
*
* if ndim == 1: # <<<<<<<<<<<<<<
* for i in range(extent):
* memcpy(data, item, itemsize)
*/
goto __pyx_L3;
}
/* "View.MemoryView":1403
* data += stride
* else:
* for i in range(extent): # <<<<<<<<<<<<<<
* _slice_assign_scalar(data, shape + 1, strides + 1,
* ndim - 1, itemsize, item)
*/
/*else*/ {
__pyx_t_2 = __pyx_v_extent;
for (__pyx_t_3 = 0; __pyx_t_3 < __pyx_t_2; __pyx_t_3+=1) {
__pyx_v_i = __pyx_t_3;
/* "View.MemoryView":1404
* else:
* for i in range(extent):
* _slice_assign_scalar(data, shape + 1, strides + 1, # <<<<<<<<<<<<<<
* ndim - 1, itemsize, item)
* data += stride
*/
__pyx_memoryview__slice_assign_scalar(__pyx_v_data, (__pyx_v_shape + 1), (__pyx_v_strides + 1), (__pyx_v_ndim - 1), __pyx_v_itemsize, __pyx_v_item);
/* "View.MemoryView":1406
* _slice_assign_scalar(data, shape + 1, strides + 1,
* ndim - 1, itemsize, item)
* data += stride # <<<<<<<<<<<<<<
*
*
*/
__pyx_v_data = (__pyx_v_data + __pyx_v_stride);
}
}
__pyx_L3:;
/* "View.MemoryView":1391
*
* @cname('__pyx_memoryview__slice_assign_scalar')
* cdef void _slice_assign_scalar(char *data, Py_ssize_t *shape, # <<<<<<<<<<<<<<
* Py_ssize_t *strides, int ndim,
* size_t itemsize, void *item) nogil:
*/
/* function exit code */
}
static struct __pyx_vtabstruct_array __pyx_vtable_array;
static PyObject *__pyx_tp_new_array(PyTypeObject *t, PyObject *a, PyObject *k) {
struct __pyx_array_obj *p;
PyObject *o;
if (likely((t->tp_flags & Py_TPFLAGS_IS_ABSTRACT) == 0)) {
o = (*t->tp_alloc)(t, 0);
} else {
o = (PyObject *) PyBaseObject_Type.tp_new(t, __pyx_empty_tuple, 0);
}
if (unlikely(!o)) return 0;
p = ((struct __pyx_array_obj *)o);
p->__pyx_vtab = __pyx_vtabptr_array;
p->mode = ((PyObject*)Py_None); Py_INCREF(Py_None);
p->_format = ((PyObject*)Py_None); Py_INCREF(Py_None);
if (unlikely(__pyx_array___cinit__(o, a, k) < 0)) goto bad;
return o;
bad:
Py_DECREF(o); o = 0;
return NULL;
}
static void __pyx_tp_dealloc_array(PyObject *o) {
struct __pyx_array_obj *p = (struct __pyx_array_obj *)o;
#if PY_VERSION_HEX >= 0x030400a1
if (unlikely(Py_TYPE(o)->tp_finalize) && (!PyType_IS_GC(Py_TYPE(o)) || !_PyGC_FINALIZED(o))) {
if (PyObject_CallFinalizerFromDealloc(o)) return;
}
#endif
{
PyObject *etype, *eval, *etb;
PyErr_Fetch(&etype, &eval, &etb);
++Py_REFCNT(o);
__pyx_array___dealloc__(o);
--Py_REFCNT(o);
PyErr_Restore(etype, eval, etb);
}
Py_CLEAR(p->mode);
Py_CLEAR(p->_format);
(*Py_TYPE(o)->tp_free)(o);
}
static PyObject *__pyx_sq_item_array(PyObject *o, Py_ssize_t i) {
PyObject *r;
PyObject *x = PyInt_FromSsize_t(i); if(!x) return 0;
r = Py_TYPE(o)->tp_as_mapping->mp_subscript(o, x);
Py_DECREF(x);
return r;
}
static int __pyx_mp_ass_subscript_array(PyObject *o, PyObject *i, PyObject *v) {
if (v) {
return __pyx_array___setitem__(o, i, v);
}
else {
PyErr_Format(PyExc_NotImplementedError,
"Subscript deletion not supported by %.200s", Py_TYPE(o)->tp_name);
return -1;
}
}
static PyObject *__pyx_tp_getattro_array(PyObject *o, PyObject *n) {
PyObject *v = PyObject_GenericGetAttr(o, n);
if (!v && PyErr_ExceptionMatches(PyExc_AttributeError)) {
PyErr_Clear();
v = __pyx_array___getattr__(o, n);
}
return v;
}
static PyObject *__pyx_getprop___pyx_array_memview(PyObject *o, CYTHON_UNUSED void *x) {
return __pyx_pw_15View_dot_MemoryView_5array_7memview_1__get__(o);
}
static PyMethodDef __pyx_methods_array[] = {
{"__getattr__", (PyCFunction)__pyx_array___getattr__, METH_O|METH_COEXIST, 0},
{0, 0, 0, 0}
};
static struct PyGetSetDef __pyx_getsets_array[] = {
{(char *)"memview", __pyx_getprop___pyx_array_memview, 0, (char *)0, 0},
{0, 0, 0, 0, 0}
};
static PySequenceMethods __pyx_tp_as_sequence_array = {
0, /*sq_length*/
0, /*sq_concat*/
0, /*sq_repeat*/
__pyx_sq_item_array, /*sq_item*/
0, /*sq_slice*/
0, /*sq_ass_item*/
0, /*sq_ass_slice*/
0, /*sq_contains*/
0, /*sq_inplace_concat*/
0, /*sq_inplace_repeat*/
};
static PyMappingMethods __pyx_tp_as_mapping_array = {
0, /*mp_length*/
__pyx_array___getitem__, /*mp_subscript*/
__pyx_mp_ass_subscript_array, /*mp_ass_subscript*/
};
static PyBufferProcs __pyx_tp_as_buffer_array = {
#if PY_MAJOR_VERSION < 3
0, /*bf_getreadbuffer*/
#endif
#if PY_MAJOR_VERSION < 3
0, /*bf_getwritebuffer*/
#endif
#if PY_MAJOR_VERSION < 3
0, /*bf_getsegcount*/
#endif
#if PY_MAJOR_VERSION < 3
0, /*bf_getcharbuffer*/
#endif
__pyx_array_getbuffer, /*bf_getbuffer*/
0, /*bf_releasebuffer*/
};
static PyTypeObject __pyx_type___pyx_array = {
PyVarObject_HEAD_INIT(0, 0)
"bmtools.exact.moments.array", /*tp_name*/
sizeof(struct __pyx_array_obj), /*tp_basicsize*/
0, /*tp_itemsize*/
__pyx_tp_dealloc_array, /*tp_dealloc*/
0, /*tp_print*/
0, /*tp_getattr*/
0, /*tp_setattr*/
#if PY_MAJOR_VERSION < 3
0, /*tp_compare*/
#endif
#if PY_MAJOR_VERSION >= 3
0, /*tp_as_async*/
#endif
0, /*tp_repr*/
0, /*tp_as_number*/
&__pyx_tp_as_sequence_array, /*tp_as_sequence*/
&__pyx_tp_as_mapping_array, /*tp_as_mapping*/
0, /*tp_hash*/
0, /*tp_call*/
0, /*tp_str*/
__pyx_tp_getattro_array, /*tp_getattro*/
0, /*tp_setattro*/
&__pyx_tp_as_buffer_array, /*tp_as_buffer*/
Py_TPFLAGS_DEFAULT|Py_TPFLAGS_HAVE_VERSION_TAG|Py_TPFLAGS_CHECKTYPES|Py_TPFLAGS_HAVE_NEWBUFFER|Py_TPFLAGS_BASETYPE, /*tp_flags*/
0, /*tp_doc*/
0, /*tp_traverse*/
0, /*tp_clear*/
0, /*tp_richcompare*/
0, /*tp_weaklistoffset*/
0, /*tp_iter*/
0, /*tp_iternext*/
__pyx_methods_array, /*tp_methods*/
0, /*tp_members*/
__pyx_getsets_array, /*tp_getset*/
0, /*tp_base*/
0, /*tp_dict*/
0, /*tp_descr_get*/
0, /*tp_descr_set*/
0, /*tp_dictoffset*/
0, /*tp_init*/
0, /*tp_alloc*/
__pyx_tp_new_array, /*tp_new*/
0, /*tp_free*/
0, /*tp_is_gc*/
0, /*tp_bases*/
0, /*tp_mro*/
0, /*tp_cache*/
0, /*tp_subclasses*/
0, /*tp_weaklist*/
0, /*tp_del*/
0, /*tp_version_tag*/
#if PY_VERSION_HEX >= 0x030400a1
0, /*tp_finalize*/
#endif
};
static PyObject *__pyx_tp_new_Enum(PyTypeObject *t, CYTHON_UNUSED PyObject *a, CYTHON_UNUSED PyObject *k) {
struct __pyx_MemviewEnum_obj *p;
PyObject *o;
if (likely((t->tp_flags & Py_TPFLAGS_IS_ABSTRACT) == 0)) {
o = (*t->tp_alloc)(t, 0);
} else {
o = (PyObject *) PyBaseObject_Type.tp_new(t, __pyx_empty_tuple, 0);
}
if (unlikely(!o)) return 0;
p = ((struct __pyx_MemviewEnum_obj *)o);
p->name = Py_None; Py_INCREF(Py_None);
return o;
}
static void __pyx_tp_dealloc_Enum(PyObject *o) {
struct __pyx_MemviewEnum_obj *p = (struct __pyx_MemviewEnum_obj *)o;
#if PY_VERSION_HEX >= 0x030400a1
if (unlikely(Py_TYPE(o)->tp_finalize) && !_PyGC_FINALIZED(o)) {
if (PyObject_CallFinalizerFromDealloc(o)) return;
}
#endif
PyObject_GC_UnTrack(o);
Py_CLEAR(p->name);
(*Py_TYPE(o)->tp_free)(o);
}
static int __pyx_tp_traverse_Enum(PyObject *o, visitproc v, void *a) {
int e;
struct __pyx_MemviewEnum_obj *p = (struct __pyx_MemviewEnum_obj *)o;
if (p->name) {
e = (*v)(p->name, a); if (e) return e;
}
return 0;
}
static int __pyx_tp_clear_Enum(PyObject *o) {
PyObject* tmp;
struct __pyx_MemviewEnum_obj *p = (struct __pyx_MemviewEnum_obj *)o;
tmp = ((PyObject*)p->name);
p->name = Py_None; Py_INCREF(Py_None);
Py_XDECREF(tmp);
return 0;
}
static PyMethodDef __pyx_methods_Enum[] = {
{0, 0, 0, 0}
};
static PyTypeObject __pyx_type___pyx_MemviewEnum = {
PyVarObject_HEAD_INIT(0, 0)
"bmtools.exact.moments.Enum", /*tp_name*/
sizeof(struct __pyx_MemviewEnum_obj), /*tp_basicsize*/
0, /*tp_itemsize*/
__pyx_tp_dealloc_Enum, /*tp_dealloc*/
0, /*tp_print*/
0, /*tp_getattr*/
0, /*tp_setattr*/
#if PY_MAJOR_VERSION < 3
0, /*tp_compare*/
#endif
#if PY_MAJOR_VERSION >= 3
0, /*tp_as_async*/
#endif
__pyx_MemviewEnum___repr__, /*tp_repr*/
0, /*tp_as_number*/
0, /*tp_as_sequence*/
0, /*tp_as_mapping*/
0, /*tp_hash*/
0, /*tp_call*/
0, /*tp_str*/
0, /*tp_getattro*/
0, /*tp_setattro*/
0, /*tp_as_buffer*/
Py_TPFLAGS_DEFAULT|Py_TPFLAGS_HAVE_VERSION_TAG|Py_TPFLAGS_CHECKTYPES|Py_TPFLAGS_HAVE_NEWBUFFER|Py_TPFLAGS_BASETYPE|Py_TPFLAGS_HAVE_GC, /*tp_flags*/
0, /*tp_doc*/
__pyx_tp_traverse_Enum, /*tp_traverse*/
__pyx_tp_clear_Enum, /*tp_clear*/
0, /*tp_richcompare*/
0, /*tp_weaklistoffset*/
0, /*tp_iter*/
0, /*tp_iternext*/
__pyx_methods_Enum, /*tp_methods*/
0, /*tp_members*/
0, /*tp_getset*/
0, /*tp_base*/
0, /*tp_dict*/
0, /*tp_descr_get*/
0, /*tp_descr_set*/
0, /*tp_dictoffset*/
__pyx_MemviewEnum___init__, /*tp_init*/
0, /*tp_alloc*/
__pyx_tp_new_Enum, /*tp_new*/
0, /*tp_free*/
0, /*tp_is_gc*/
0, /*tp_bases*/
0, /*tp_mro*/
0, /*tp_cache*/
0, /*tp_subclasses*/
0, /*tp_weaklist*/
0, /*tp_del*/
0, /*tp_version_tag*/
#if PY_VERSION_HEX >= 0x030400a1
0, /*tp_finalize*/
#endif
};
static struct __pyx_vtabstruct_memoryview __pyx_vtable_memoryview;
static PyObject *__pyx_tp_new_memoryview(PyTypeObject *t, PyObject *a, PyObject *k) {
struct __pyx_memoryview_obj *p;
PyObject *o;
if (likely((t->tp_flags & Py_TPFLAGS_IS_ABSTRACT) == 0)) {
o = (*t->tp_alloc)(t, 0);
} else {
o = (PyObject *) PyBaseObject_Type.tp_new(t, __pyx_empty_tuple, 0);
}
if (unlikely(!o)) return 0;
p = ((struct __pyx_memoryview_obj *)o);
p->__pyx_vtab = __pyx_vtabptr_memoryview;
p->obj = Py_None; Py_INCREF(Py_None);
p->_size = Py_None; Py_INCREF(Py_None);
p->_array_interface = Py_None; Py_INCREF(Py_None);
p->view.obj = NULL;
if (unlikely(__pyx_memoryview___cinit__(o, a, k) < 0)) goto bad;
return o;
bad:
Py_DECREF(o); o = 0;
return NULL;
}
static void __pyx_tp_dealloc_memoryview(PyObject *o) {
struct __pyx_memoryview_obj *p = (struct __pyx_memoryview_obj *)o;
#if PY_VERSION_HEX >= 0x030400a1
if (unlikely(Py_TYPE(o)->tp_finalize) && !_PyGC_FINALIZED(o)) {
if (PyObject_CallFinalizerFromDealloc(o)) return;
}
#endif
PyObject_GC_UnTrack(o);
{
PyObject *etype, *eval, *etb;
PyErr_Fetch(&etype, &eval, &etb);
++Py_REFCNT(o);
__pyx_memoryview___dealloc__(o);
--Py_REFCNT(o);
PyErr_Restore(etype, eval, etb);
}
Py_CLEAR(p->obj);
Py_CLEAR(p->_size);
Py_CLEAR(p->_array_interface);
(*Py_TYPE(o)->tp_free)(o);
}
static int __pyx_tp_traverse_memoryview(PyObject *o, visitproc v, void *a) {
int e;
struct __pyx_memoryview_obj *p = (struct __pyx_memoryview_obj *)o;
if (p->obj) {
e = (*v)(p->obj, a); if (e) return e;
}
if (p->_size) {
e = (*v)(p->_size, a); if (e) return e;
}
if (p->_array_interface) {
e = (*v)(p->_array_interface, a); if (e) return e;
}
if (p->view.obj) {
e = (*v)(p->view.obj, a); if (e) return e;
}
return 0;
}
static int __pyx_tp_clear_memoryview(PyObject *o) {
PyObject* tmp;
struct __pyx_memoryview_obj *p = (struct __pyx_memoryview_obj *)o;
tmp = ((PyObject*)p->obj);
p->obj = Py_None; Py_INCREF(Py_None);
Py_XDECREF(tmp);
tmp = ((PyObject*)p->_size);
p->_size = Py_None; Py_INCREF(Py_None);
Py_XDECREF(tmp);
tmp = ((PyObject*)p->_array_interface);
p->_array_interface = Py_None; Py_INCREF(Py_None);
Py_XDECREF(tmp);
Py_CLEAR(p->view.obj);
return 0;
}
static PyObject *__pyx_sq_item_memoryview(PyObject *o, Py_ssize_t i) {
PyObject *r;
PyObject *x = PyInt_FromSsize_t(i); if(!x) return 0;
r = Py_TYPE(o)->tp_as_mapping->mp_subscript(o, x);
Py_DECREF(x);
return r;
}
static int __pyx_mp_ass_subscript_memoryview(PyObject *o, PyObject *i, PyObject *v) {
if (v) {
return __pyx_memoryview___setitem__(o, i, v);
}
else {
PyErr_Format(PyExc_NotImplementedError,
"Subscript deletion not supported by %.200s", Py_TYPE(o)->tp_name);
return -1;
}
}
static PyObject *__pyx_getprop___pyx_memoryview_T(PyObject *o, CYTHON_UNUSED void *x) {
return __pyx_pw_15View_dot_MemoryView_10memoryview_1T_1__get__(o);
}
static PyObject *__pyx_getprop___pyx_memoryview_base(PyObject *o, CYTHON_UNUSED void *x) {
return __pyx_pw_15View_dot_MemoryView_10memoryview_4base_1__get__(o);
}
static PyObject *__pyx_getprop___pyx_memoryview_shape(PyObject *o, CYTHON_UNUSED void *x) {
return __pyx_pw_15View_dot_MemoryView_10memoryview_5shape_1__get__(o);
}
static PyObject *__pyx_getprop___pyx_memoryview_strides(PyObject *o, CYTHON_UNUSED void *x) {
return __pyx_pw_15View_dot_MemoryView_10memoryview_7strides_1__get__(o);
}
static PyObject *__pyx_getprop___pyx_memoryview_suboffsets(PyObject *o, CYTHON_UNUSED void *x) {
return __pyx_pw_15View_dot_MemoryView_10memoryview_10suboffsets_1__get__(o);
}
static PyObject *__pyx_getprop___pyx_memoryview_ndim(PyObject *o, CYTHON_UNUSED void *x) {
return __pyx_pw_15View_dot_MemoryView_10memoryview_4ndim_1__get__(o);
}
static PyObject *__pyx_getprop___pyx_memoryview_itemsize(PyObject *o, CYTHON_UNUSED void *x) {
return __pyx_pw_15View_dot_MemoryView_10memoryview_8itemsize_1__get__(o);
}
static PyObject *__pyx_getprop___pyx_memoryview_nbytes(PyObject *o, CYTHON_UNUSED void *x) {
return __pyx_pw_15View_dot_MemoryView_10memoryview_6nbytes_1__get__(o);
}
static PyObject *__pyx_getprop___pyx_memoryview_size(PyObject *o, CYTHON_UNUSED void *x) {
return __pyx_pw_15View_dot_MemoryView_10memoryview_4size_1__get__(o);
}
static PyMethodDef __pyx_methods_memoryview[] = {
{"is_c_contig", (PyCFunction)__pyx_memoryview_is_c_contig, METH_NOARGS, 0},
{"is_f_contig", (PyCFunction)__pyx_memoryview_is_f_contig, METH_NOARGS, 0},
{"copy", (PyCFunction)__pyx_memoryview_copy, METH_NOARGS, 0},
{"copy_fortran", (PyCFunction)__pyx_memoryview_copy_fortran, METH_NOARGS, 0},
{0, 0, 0, 0}
};
static struct PyGetSetDef __pyx_getsets_memoryview[] = {
{(char *)"T", __pyx_getprop___pyx_memoryview_T, 0, (char *)0, 0},
{(char *)"base", __pyx_getprop___pyx_memoryview_base, 0, (char *)0, 0},
{(char *)"shape", __pyx_getprop___pyx_memoryview_shape, 0, (char *)0, 0},
{(char *)"strides", __pyx_getprop___pyx_memoryview_strides, 0, (char *)0, 0},
{(char *)"suboffsets", __pyx_getprop___pyx_memoryview_suboffsets, 0, (char *)0, 0},
{(char *)"ndim", __pyx_getprop___pyx_memoryview_ndim, 0, (char *)0, 0},
{(char *)"itemsize", __pyx_getprop___pyx_memoryview_itemsize, 0, (char *)0, 0},
{(char *)"nbytes", __pyx_getprop___pyx_memoryview_nbytes, 0, (char *)0, 0},
{(char *)"size", __pyx_getprop___pyx_memoryview_size, 0, (char *)0, 0},
{0, 0, 0, 0, 0}
};
static PySequenceMethods __pyx_tp_as_sequence_memoryview = {
__pyx_memoryview___len__, /*sq_length*/
0, /*sq_concat*/
0, /*sq_repeat*/
__pyx_sq_item_memoryview, /*sq_item*/
0, /*sq_slice*/
0, /*sq_ass_item*/
0, /*sq_ass_slice*/
0, /*sq_contains*/
0, /*sq_inplace_concat*/
0, /*sq_inplace_repeat*/
};
static PyMappingMethods __pyx_tp_as_mapping_memoryview = {
__pyx_memoryview___len__, /*mp_length*/
__pyx_memoryview___getitem__, /*mp_subscript*/
__pyx_mp_ass_subscript_memoryview, /*mp_ass_subscript*/
};
static PyBufferProcs __pyx_tp_as_buffer_memoryview = {
#if PY_MAJOR_VERSION < 3
0, /*bf_getreadbuffer*/
#endif
#if PY_MAJOR_VERSION < 3
0, /*bf_getwritebuffer*/
#endif
#if PY_MAJOR_VERSION < 3
0, /*bf_getsegcount*/
#endif
#if PY_MAJOR_VERSION < 3
0, /*bf_getcharbuffer*/
#endif
__pyx_memoryview_getbuffer, /*bf_getbuffer*/
0, /*bf_releasebuffer*/
};
static PyTypeObject __pyx_type___pyx_memoryview = {
PyVarObject_HEAD_INIT(0, 0)
"bmtools.exact.moments.memoryview", /*tp_name*/
sizeof(struct __pyx_memoryview_obj), /*tp_basicsize*/
0, /*tp_itemsize*/
__pyx_tp_dealloc_memoryview, /*tp_dealloc*/
0, /*tp_print*/
0, /*tp_getattr*/
0, /*tp_setattr*/
#if PY_MAJOR_VERSION < 3
0, /*tp_compare*/
#endif
#if PY_MAJOR_VERSION >= 3
0, /*tp_as_async*/
#endif
__pyx_memoryview___repr__, /*tp_repr*/
0, /*tp_as_number*/
&__pyx_tp_as_sequence_memoryview, /*tp_as_sequence*/
&__pyx_tp_as_mapping_memoryview, /*tp_as_mapping*/
0, /*tp_hash*/
0, /*tp_call*/
__pyx_memoryview___str__, /*tp_str*/
0, /*tp_getattro*/
0, /*tp_setattro*/
&__pyx_tp_as_buffer_memoryview, /*tp_as_buffer*/
Py_TPFLAGS_DEFAULT|Py_TPFLAGS_HAVE_VERSION_TAG|Py_TPFLAGS_CHECKTYPES|Py_TPFLAGS_HAVE_NEWBUFFER|Py_TPFLAGS_BASETYPE|Py_TPFLAGS_HAVE_GC, /*tp_flags*/
0, /*tp_doc*/
__pyx_tp_traverse_memoryview, /*tp_traverse*/
__pyx_tp_clear_memoryview, /*tp_clear*/
0, /*tp_richcompare*/
0, /*tp_weaklistoffset*/
0, /*tp_iter*/
0, /*tp_iternext*/
__pyx_methods_memoryview, /*tp_methods*/
0, /*tp_members*/
__pyx_getsets_memoryview, /*tp_getset*/
0, /*tp_base*/
0, /*tp_dict*/
0, /*tp_descr_get*/
0, /*tp_descr_set*/
0, /*tp_dictoffset*/
0, /*tp_init*/
0, /*tp_alloc*/
__pyx_tp_new_memoryview, /*tp_new*/
0, /*tp_free*/
0, /*tp_is_gc*/
0, /*tp_bases*/
0, /*tp_mro*/
0, /*tp_cache*/
0, /*tp_subclasses*/
0, /*tp_weaklist*/
0, /*tp_del*/
0, /*tp_version_tag*/
#if PY_VERSION_HEX >= 0x030400a1
0, /*tp_finalize*/
#endif
};
static struct __pyx_vtabstruct__memoryviewslice __pyx_vtable__memoryviewslice;
static PyObject *__pyx_tp_new__memoryviewslice(PyTypeObject *t, PyObject *a, PyObject *k) {
struct __pyx_memoryviewslice_obj *p;
PyObject *o = __pyx_tp_new_memoryview(t, a, k);
if (unlikely(!o)) return 0;
p = ((struct __pyx_memoryviewslice_obj *)o);
p->__pyx_base.__pyx_vtab = (struct __pyx_vtabstruct_memoryview*)__pyx_vtabptr__memoryviewslice;
p->from_object = Py_None; Py_INCREF(Py_None);
p->from_slice.memview = NULL;
return o;
}
static void __pyx_tp_dealloc__memoryviewslice(PyObject *o) {
struct __pyx_memoryviewslice_obj *p = (struct __pyx_memoryviewslice_obj *)o;
#if PY_VERSION_HEX >= 0x030400a1
if (unlikely(Py_TYPE(o)->tp_finalize) && !_PyGC_FINALIZED(o)) {
if (PyObject_CallFinalizerFromDealloc(o)) return;
}
#endif
PyObject_GC_UnTrack(o);
{
PyObject *etype, *eval, *etb;
PyErr_Fetch(&etype, &eval, &etb);
++Py_REFCNT(o);
__pyx_memoryviewslice___dealloc__(o);
--Py_REFCNT(o);
PyErr_Restore(etype, eval, etb);
}
Py_CLEAR(p->from_object);
PyObject_GC_Track(o);
__pyx_tp_dealloc_memoryview(o);
}
static int __pyx_tp_traverse__memoryviewslice(PyObject *o, visitproc v, void *a) {
int e;
struct __pyx_memoryviewslice_obj *p = (struct __pyx_memoryviewslice_obj *)o;
e = __pyx_tp_traverse_memoryview(o, v, a); if (e) return e;
if (p->from_object) {
e = (*v)(p->from_object, a); if (e) return e;
}
return 0;
}
static int __pyx_tp_clear__memoryviewslice(PyObject *o) {
PyObject* tmp;
struct __pyx_memoryviewslice_obj *p = (struct __pyx_memoryviewslice_obj *)o;
__pyx_tp_clear_memoryview(o);
tmp = ((PyObject*)p->from_object);
p->from_object = Py_None; Py_INCREF(Py_None);
Py_XDECREF(tmp);
__PYX_XDEC_MEMVIEW(&p->from_slice, 1);
return 0;
}
static PyObject *__pyx_getprop___pyx_memoryviewslice_base(PyObject *o, CYTHON_UNUSED void *x) {
return __pyx_pw_15View_dot_MemoryView_16_memoryviewslice_4base_1__get__(o);
}
static PyMethodDef __pyx_methods__memoryviewslice[] = {
{0, 0, 0, 0}
};
static struct PyGetSetDef __pyx_getsets__memoryviewslice[] = {
{(char *)"base", __pyx_getprop___pyx_memoryviewslice_base, 0, (char *)0, 0},
{0, 0, 0, 0, 0}
};
static PyTypeObject __pyx_type___pyx_memoryviewslice = {
PyVarObject_HEAD_INIT(0, 0)
"bmtools.exact.moments._memoryviewslice", /*tp_name*/
sizeof(struct __pyx_memoryviewslice_obj), /*tp_basicsize*/
0, /*tp_itemsize*/
__pyx_tp_dealloc__memoryviewslice, /*tp_dealloc*/
0, /*tp_print*/
0, /*tp_getattr*/
0, /*tp_setattr*/
#if PY_MAJOR_VERSION < 3
0, /*tp_compare*/
#endif
#if PY_MAJOR_VERSION >= 3
0, /*tp_as_async*/
#endif
#if CYTHON_COMPILING_IN_PYPY
__pyx_memoryview___repr__, /*tp_repr*/
#else
0, /*tp_repr*/
#endif
0, /*tp_as_number*/
0, /*tp_as_sequence*/
0, /*tp_as_mapping*/
0, /*tp_hash*/
0, /*tp_call*/
#if CYTHON_COMPILING_IN_PYPY
__pyx_memoryview___str__, /*tp_str*/
#else
0, /*tp_str*/
#endif
0, /*tp_getattro*/
0, /*tp_setattro*/
0, /*tp_as_buffer*/
Py_TPFLAGS_DEFAULT|Py_TPFLAGS_HAVE_VERSION_TAG|Py_TPFLAGS_CHECKTYPES|Py_TPFLAGS_HAVE_NEWBUFFER|Py_TPFLAGS_BASETYPE|Py_TPFLAGS_HAVE_GC, /*tp_flags*/
"Internal class for passing memoryview slices to Python", /*tp_doc*/
__pyx_tp_traverse__memoryviewslice, /*tp_traverse*/
__pyx_tp_clear__memoryviewslice, /*tp_clear*/
0, /*tp_richcompare*/
0, /*tp_weaklistoffset*/
0, /*tp_iter*/
0, /*tp_iternext*/
__pyx_methods__memoryviewslice, /*tp_methods*/
0, /*tp_members*/
__pyx_getsets__memoryviewslice, /*tp_getset*/
0, /*tp_base*/
0, /*tp_dict*/
0, /*tp_descr_get*/
0, /*tp_descr_set*/
0, /*tp_dictoffset*/
0, /*tp_init*/
0, /*tp_alloc*/
__pyx_tp_new__memoryviewslice, /*tp_new*/
0, /*tp_free*/
0, /*tp_is_gc*/
0, /*tp_bases*/
0, /*tp_mro*/
0, /*tp_cache*/
0, /*tp_subclasses*/
0, /*tp_weaklist*/
0, /*tp_del*/
0, /*tp_version_tag*/
#if PY_VERSION_HEX >= 0x030400a1
0, /*tp_finalize*/
#endif
};
static PyMethodDef __pyx_methods[] = {
{0, 0, 0, 0}
};
#if PY_MAJOR_VERSION >= 3
static struct PyModuleDef __pyx_moduledef = {
#if PY_VERSION_HEX < 0x03020000
{ PyObject_HEAD_INIT(NULL) NULL, 0, NULL },
#else
PyModuleDef_HEAD_INIT,
#endif
"moments",
__pyx_k_Boltzmann_machine_moment_calcula, /* m_doc */
-1, /* m_size */
__pyx_methods /* m_methods */,
NULL, /* m_reload */
NULL, /* m_traverse */
NULL, /* m_clear */
NULL /* m_free */
};
#endif
static __Pyx_StringTabEntry __pyx_string_tab[] = {
{&__pyx_n_s_ASCII, __pyx_k_ASCII, sizeof(__pyx_k_ASCII), 0, 0, 1, 1},
{&__pyx_kp_s_Buffer_view_does_not_expose_stri, __pyx_k_Buffer_view_does_not_expose_stri, sizeof(__pyx_k_Buffer_view_does_not_expose_stri), 0, 0, 1, 0},
{&__pyx_kp_s_Can_only_create_a_buffer_that_is, __pyx_k_Can_only_create_a_buffer_that_is, sizeof(__pyx_k_Can_only_create_a_buffer_that_is), 0, 0, 1, 0},
{&__pyx_kp_s_Cannot_index_with_type_s, __pyx_k_Cannot_index_with_type_s, sizeof(__pyx_k_Cannot_index_with_type_s), 0, 0, 1, 0},
{&__pyx_n_s_Ellipsis, __pyx_k_Ellipsis, sizeof(__pyx_k_Ellipsis), 0, 0, 1, 1},
{&__pyx_kp_s_Empty_shape_tuple_for_cython_arr, __pyx_k_Empty_shape_tuple_for_cython_arr, sizeof(__pyx_k_Empty_shape_tuple_for_cython_arr), 0, 0, 1, 0},
{&__pyx_n_s_IndexError, __pyx_k_IndexError, sizeof(__pyx_k_IndexError), 0, 0, 1, 1},
{&__pyx_kp_s_Indirect_dimensions_not_supporte, __pyx_k_Indirect_dimensions_not_supporte, sizeof(__pyx_k_Indirect_dimensions_not_supporte), 0, 0, 1, 0},
{&__pyx_kp_s_Invalid_mode_expected_c_or_fortr, __pyx_k_Invalid_mode_expected_c_or_fortr, sizeof(__pyx_k_Invalid_mode_expected_c_or_fortr), 0, 0, 1, 0},
{&__pyx_kp_s_Invalid_shape_in_axis_d_d, __pyx_k_Invalid_shape_in_axis_d_d, sizeof(__pyx_k_Invalid_shape_in_axis_d_d), 0, 0, 1, 0},
{&__pyx_n_s_MemoryError, __pyx_k_MemoryError, sizeof(__pyx_k_MemoryError), 0, 0, 1, 1},
{&__pyx_kp_s_MemoryView_of_r_at_0x_x, __pyx_k_MemoryView_of_r_at_0x_x, sizeof(__pyx_k_MemoryView_of_r_at_0x_x), 0, 0, 1, 0},
{&__pyx_kp_s_MemoryView_of_r_object, __pyx_k_MemoryView_of_r_object, sizeof(__pyx_k_MemoryView_of_r_object), 0, 0, 1, 0},
{&__pyx_kp_s_Number_of_threads_must_be_0, __pyx_k_Number_of_threads_must_be_0, sizeof(__pyx_k_Number_of_threads_must_be_0), 0, 0, 1, 0},
{&__pyx_n_b_O, __pyx_k_O, sizeof(__pyx_k_O), 0, 0, 0, 1},
{&__pyx_kp_s_Out_of_bounds_on_buffer_access_a, __pyx_k_Out_of_bounds_on_buffer_access_a, sizeof(__pyx_k_Out_of_bounds_on_buffer_access_a), 0, 0, 1, 0},
{&__pyx_n_s_TypeError, __pyx_k_TypeError, sizeof(__pyx_k_TypeError), 0, 0, 1, 1},
{&__pyx_kp_s_Unable_to_convert_item_to_object, __pyx_k_Unable_to_convert_item_to_object, sizeof(__pyx_k_Unable_to_convert_item_to_object), 0, 0, 1, 0},
{&__pyx_n_s_ValueError, __pyx_k_ValueError, sizeof(__pyx_k_ValueError), 0, 0, 1, 1},
{&__pyx_n_s_all_in_place, __pyx_k_all_in_place, sizeof(__pyx_k_all_in_place), 0, 0, 1, 1},
{&__pyx_n_s_allocate_buffer, __pyx_k_allocate_buffer, sizeof(__pyx_k_allocate_buffer), 0, 0, 1, 1},
{&__pyx_n_s_base, __pyx_k_base, sizeof(__pyx_k_base), 0, 0, 1, 1},
{&__pyx_n_s_biases, __pyx_k_biases, sizeof(__pyx_k_biases), 0, 0, 1, 1},
{&__pyx_n_s_bmtools_exact_moments, __pyx_k_bmtools_exact_moments, sizeof(__pyx_k_bmtools_exact_moments), 0, 0, 1, 1},
{&__pyx_n_s_c, __pyx_k_c, sizeof(__pyx_k_c), 0, 0, 1, 1},
{&__pyx_n_u_c, __pyx_k_c, sizeof(__pyx_k_c), 0, 1, 0, 1},
{&__pyx_n_s_calculate_moments_parallel, __pyx_k_calculate_moments_parallel, sizeof(__pyx_k_calculate_moments_parallel), 0, 0, 1, 1},
{&__pyx_n_s_calculate_moments_sequential, __pyx_k_calculate_moments_sequential, sizeof(__pyx_k_calculate_moments_sequential), 0, 0, 1, 1},
{&__pyx_n_s_calculate_probs_parallel, __pyx_k_calculate_probs_parallel, sizeof(__pyx_k_calculate_probs_parallel), 0, 0, 1, 1},
{&__pyx_n_s_class, __pyx_k_class, sizeof(__pyx_k_class), 0, 0, 1, 1},
{&__pyx_kp_s_contiguous_and_direct, __pyx_k_contiguous_and_direct, sizeof(__pyx_k_contiguous_and_direct), 0, 0, 1, 0},
{&__pyx_kp_s_contiguous_and_indirect, __pyx_k_contiguous_and_indirect, sizeof(__pyx_k_contiguous_and_indirect), 0, 0, 1, 0},
{&__pyx_n_s_d, __pyx_k_d, sizeof(__pyx_k_d), 0, 0, 1, 1},
{&__pyx_n_s_dtype_is_object, __pyx_k_dtype_is_object, sizeof(__pyx_k_dtype_is_object), 0, 0, 1, 1},
{&__pyx_n_s_encode, __pyx_k_encode, sizeof(__pyx_k_encode), 0, 0, 1, 1},
{&__pyx_n_s_enumerate, __pyx_k_enumerate, sizeof(__pyx_k_enumerate), 0, 0, 1, 1},
{&__pyx_n_s_error, __pyx_k_error, sizeof(__pyx_k_error), 0, 0, 1, 1},
{&__pyx_n_s_first_mom, __pyx_k_first_mom, sizeof(__pyx_k_first_mom), 0, 0, 1, 1},
{&__pyx_n_s_first_moms, __pyx_k_first_moms, sizeof(__pyx_k_first_moms), 0, 0, 1, 1},
{&__pyx_n_s_flags, __pyx_k_flags, sizeof(__pyx_k_flags), 0, 0, 1, 1},
{&__pyx_n_s_force, __pyx_k_force, sizeof(__pyx_k_force), 0, 0, 1, 1},
{&__pyx_n_s_format, __pyx_k_format, sizeof(__pyx_k_format), 0, 0, 1, 1},
{&__pyx_n_s_fortran, __pyx_k_fortran, sizeof(__pyx_k_fortran), 0, 0, 1, 1},
{&__pyx_n_u_fortran, __pyx_k_fortran, sizeof(__pyx_k_fortran), 0, 1, 0, 1},
{&__pyx_kp_s_got_differing_extents_in_dimensi, __pyx_k_got_differing_extents_in_dimensi, sizeof(__pyx_k_got_differing_extents_in_dimensi), 0, 0, 1, 0},
{&__pyx_kp_s_home_matt_Projects_boltzmann_ma, __pyx_k_home_matt_Projects_boltzmann_ma, sizeof(__pyx_k_home_matt_Projects_boltzmann_ma), 0, 0, 1, 0},
{&__pyx_n_s_i, __pyx_k_i, sizeof(__pyx_k_i), 0, 0, 1, 1},
{&__pyx_n_s_id, __pyx_k_id, sizeof(__pyx_k_id), 0, 0, 1, 1},
{&__pyx_n_s_import, __pyx_k_import, sizeof(__pyx_k_import), 0, 0, 1, 1},
{&__pyx_n_s_intervals, __pyx_k_intervals, sizeof(__pyx_k_intervals), 0, 0, 1, 1},
{&__pyx_n_s_itemsize, __pyx_k_itemsize, sizeof(__pyx_k_itemsize), 0, 0, 1, 1},
{&__pyx_kp_s_itemsize_0_for_cython_array, __pyx_k_itemsize_0_for_cython_array, sizeof(__pyx_k_itemsize_0_for_cython_array), 0, 0, 1, 0},
{&__pyx_n_s_j, __pyx_k_j, sizeof(__pyx_k_j), 0, 0, 1, 1},
{&__pyx_n_s_main, __pyx_k_main, sizeof(__pyx_k_main), 0, 0, 1, 1},
{&__pyx_n_s_memview, __pyx_k_memview, sizeof(__pyx_k_memview), 0, 0, 1, 1},
{&__pyx_n_s_mode, __pyx_k_mode, sizeof(__pyx_k_mode), 0, 0, 1, 1},
{&__pyx_n_s_name, __pyx_k_name, sizeof(__pyx_k_name), 0, 0, 1, 1},
{&__pyx_n_s_name_2, __pyx_k_name_2, sizeof(__pyx_k_name_2), 0, 0, 1, 1},
{&__pyx_n_s_ndim, __pyx_k_ndim, sizeof(__pyx_k_ndim), 0, 0, 1, 1},
{&__pyx_n_s_norm_const, __pyx_k_norm_const, sizeof(__pyx_k_norm_const), 0, 0, 1, 1},
{&__pyx_n_s_norm_consts, __pyx_k_norm_consts, sizeof(__pyx_k_norm_consts), 0, 0, 1, 1},
{&__pyx_n_s_num_states, __pyx_k_num_states, sizeof(__pyx_k_num_states), 0, 0, 1, 1},
{&__pyx_n_s_num_threads, __pyx_k_num_threads, sizeof(__pyx_k_num_threads), 0, 0, 1, 1},
{&__pyx_n_s_num_units, __pyx_k_num_units, sizeof(__pyx_k_num_units), 0, 0, 1, 1},
{&__pyx_n_s_obj, __pyx_k_obj, sizeof(__pyx_k_obj), 0, 0, 1, 1},
{&__pyx_n_s_pack, __pyx_k_pack, sizeof(__pyx_k_pack), 0, 0, 1, 1},
{&__pyx_n_s_prob, __pyx_k_prob, sizeof(__pyx_k_prob), 0, 0, 1, 1},
{&__pyx_n_s_probs, __pyx_k_probs, sizeof(__pyx_k_probs), 0, 0, 1, 1},
{&__pyx_n_s_pyx_getbuffer, __pyx_k_pyx_getbuffer, sizeof(__pyx_k_pyx_getbuffer), 0, 0, 1, 1},
{&__pyx_n_s_pyx_vtable, __pyx_k_pyx_vtable, sizeof(__pyx_k_pyx_vtable), 0, 0, 1, 1},
{&__pyx_n_s_range, __pyx_k_range, sizeof(__pyx_k_range), 0, 0, 1, 1},
{&__pyx_n_s_second_mom, __pyx_k_second_mom, sizeof(__pyx_k_second_mom), 0, 0, 1, 1},
{&__pyx_n_s_second_moms, __pyx_k_second_moms, sizeof(__pyx_k_second_moms), 0, 0, 1, 1},
{&__pyx_n_s_shape, __pyx_k_shape, sizeof(__pyx_k_shape), 0, 0, 1, 1},
{&__pyx_n_s_size, __pyx_k_size, sizeof(__pyx_k_size), 0, 0, 1, 1},
{&__pyx_n_s_start, __pyx_k_start, sizeof(__pyx_k_start), 0, 0, 1, 1},
{&__pyx_n_s_state, __pyx_k_state, sizeof(__pyx_k_state), 0, 0, 1, 1},
{&__pyx_n_s_state_index, __pyx_k_state_index, sizeof(__pyx_k_state_index), 0, 0, 1, 1},
{&__pyx_n_s_states, __pyx_k_states, sizeof(__pyx_k_states), 0, 0, 1, 1},
{&__pyx_n_s_step, __pyx_k_step, sizeof(__pyx_k_step), 0, 0, 1, 1},
{&__pyx_n_s_stop, __pyx_k_stop, sizeof(__pyx_k_stop), 0, 0, 1, 1},
{&__pyx_kp_s_strided_and_direct, __pyx_k_strided_and_direct, sizeof(__pyx_k_strided_and_direct), 0, 0, 1, 0},
{&__pyx_kp_s_strided_and_direct_or_indirect, __pyx_k_strided_and_direct_or_indirect, sizeof(__pyx_k_strided_and_direct_or_indirect), 0, 0, 1, 0},
{&__pyx_kp_s_strided_and_indirect, __pyx_k_strided_and_indirect, sizeof(__pyx_k_strided_and_indirect), 0, 0, 1, 0},
{&__pyx_n_s_struct, __pyx_k_struct, sizeof(__pyx_k_struct), 0, 0, 1, 1},
{&__pyx_n_s_t, __pyx_k_t, sizeof(__pyx_k_t), 0, 0, 1, 1},
{&__pyx_n_s_test, __pyx_k_test, sizeof(__pyx_k_test), 0, 0, 1, 1},
{&__pyx_kp_s_unable_to_allocate_array_data, __pyx_k_unable_to_allocate_array_data, sizeof(__pyx_k_unable_to_allocate_array_data), 0, 0, 1, 0},
{&__pyx_kp_s_unable_to_allocate_shape_and_str, __pyx_k_unable_to_allocate_shape_and_str, sizeof(__pyx_k_unable_to_allocate_shape_and_str), 0, 0, 1, 0},
{&__pyx_n_s_unpack, __pyx_k_unpack, sizeof(__pyx_k_unpack), 0, 0, 1, 1},
{&__pyx_n_s_weights, __pyx_k_weights, sizeof(__pyx_k_weights), 0, 0, 1, 1},
{0, 0, 0, 0, 0, 0, 0}
};
static int __Pyx_InitCachedBuiltins(void) {
__pyx_builtin_ValueError = __Pyx_GetBuiltinName(__pyx_n_s_ValueError); if (!__pyx_builtin_ValueError) __PYX_ERR(0, 133, __pyx_L1_error)
__pyx_builtin_range = __Pyx_GetBuiltinName(__pyx_n_s_range); if (!__pyx_builtin_range) __PYX_ERR(0, 167, __pyx_L1_error)
__pyx_builtin_MemoryError = __Pyx_GetBuiltinName(__pyx_n_s_MemoryError); if (!__pyx_builtin_MemoryError) __PYX_ERR(1, 146, __pyx_L1_error)
__pyx_builtin_enumerate = __Pyx_GetBuiltinName(__pyx_n_s_enumerate); if (!__pyx_builtin_enumerate) __PYX_ERR(1, 149, __pyx_L1_error)
__pyx_builtin_Ellipsis = __Pyx_GetBuiltinName(__pyx_n_s_Ellipsis); if (!__pyx_builtin_Ellipsis) __PYX_ERR(1, 396, __pyx_L1_error)
__pyx_builtin_TypeError = __Pyx_GetBuiltinName(__pyx_n_s_TypeError); if (!__pyx_builtin_TypeError) __PYX_ERR(1, 425, __pyx_L1_error)
__pyx_builtin_id = __Pyx_GetBuiltinName(__pyx_n_s_id); if (!__pyx_builtin_id) __PYX_ERR(1, 599, __pyx_L1_error)
__pyx_builtin_IndexError = __Pyx_GetBuiltinName(__pyx_n_s_IndexError); if (!__pyx_builtin_IndexError) __PYX_ERR(1, 818, __pyx_L1_error)
return 0;
__pyx_L1_error:;
return -1;
}
static int __Pyx_InitCachedConstants(void) {
__Pyx_RefNannyDeclarations
__Pyx_RefNannySetupContext("__Pyx_InitCachedConstants", 0);
/* "bmtools/exact/moments.pyx":133
* cdef long num_states = 2**num_units
* if num_threads <= 0:
* raise ValueError('Number of threads must be > 0') # <<<<<<<<<<<<<<
* check_state_space_size(num_units, force)
* cdef state_t[:, :] states = array(
*/
__pyx_tuple__4 = PyTuple_Pack(1, __pyx_kp_s_Number_of_threads_must_be_0); if (unlikely(!__pyx_tuple__4)) __PYX_ERR(0, 133, __pyx_L1_error)
__Pyx_GOTREF(__pyx_tuple__4);
__Pyx_GIVEREF(__pyx_tuple__4);
/* "bmtools/exact/moments.pyx":219
* cdef long num_states = 2 ** num_units
* if num_threads <= 0:
* raise ValueError('Number of threads must be > 0') # <<<<<<<<<<<<<<
* check_state_space_size(num_units, force)
* cdef state_t[:, :] states = array(
*/
__pyx_tuple__5 = PyTuple_Pack(1, __pyx_kp_s_Number_of_threads_must_be_0); if (unlikely(!__pyx_tuple__5)) __PYX_ERR(0, 219, __pyx_L1_error)
__Pyx_GOTREF(__pyx_tuple__5);
__Pyx_GIVEREF(__pyx_tuple__5);
/* "View.MemoryView":131
*
* if not self.ndim:
* raise ValueError("Empty shape tuple for cython.array") # <<<<<<<<<<<<<<
*
* if itemsize <= 0:
*/
__pyx_tuple__6 = PyTuple_Pack(1, __pyx_kp_s_Empty_shape_tuple_for_cython_arr); if (unlikely(!__pyx_tuple__6)) __PYX_ERR(1, 131, __pyx_L1_error)
__Pyx_GOTREF(__pyx_tuple__6);
__Pyx_GIVEREF(__pyx_tuple__6);
/* "View.MemoryView":134
*
* if itemsize <= 0:
* raise ValueError("itemsize <= 0 for cython.array") # <<<<<<<<<<<<<<
*
* if not isinstance(format, bytes):
*/
__pyx_tuple__7 = PyTuple_Pack(1, __pyx_kp_s_itemsize_0_for_cython_array); if (unlikely(!__pyx_tuple__7)) __PYX_ERR(1, 134, __pyx_L1_error)
__Pyx_GOTREF(__pyx_tuple__7);
__Pyx_GIVEREF(__pyx_tuple__7);
/* "View.MemoryView":137
*
* if not isinstance(format, bytes):
* format = format.encode('ASCII') # <<<<<<<<<<<<<<
* self._format = format # keep a reference to the byte string
* self.format = self._format
*/
__pyx_tuple__8 = PyTuple_Pack(1, __pyx_n_s_ASCII); if (unlikely(!__pyx_tuple__8)) __PYX_ERR(1, 137, __pyx_L1_error)
__Pyx_GOTREF(__pyx_tuple__8);
__Pyx_GIVEREF(__pyx_tuple__8);
/* "View.MemoryView":146
*
* if not self._shape:
* raise MemoryError("unable to allocate shape and strides.") # <<<<<<<<<<<<<<
*
*
*/
__pyx_tuple__9 = PyTuple_Pack(1, __pyx_kp_s_unable_to_allocate_shape_and_str); if (unlikely(!__pyx_tuple__9)) __PYX_ERR(1, 146, __pyx_L1_error)
__Pyx_GOTREF(__pyx_tuple__9);
__Pyx_GIVEREF(__pyx_tuple__9);
/* "View.MemoryView":174
* self.data = <char *>malloc(self.len)
* if not self.data:
* raise MemoryError("unable to allocate array data.") # <<<<<<<<<<<<<<
*
* if self.dtype_is_object:
*/
__pyx_tuple__10 = PyTuple_Pack(1, __pyx_kp_s_unable_to_allocate_array_data); if (unlikely(!__pyx_tuple__10)) __PYX_ERR(1, 174, __pyx_L1_error)
__Pyx_GOTREF(__pyx_tuple__10);
__Pyx_GIVEREF(__pyx_tuple__10);
/* "View.MemoryView":190
* bufmode = PyBUF_F_CONTIGUOUS | PyBUF_ANY_CONTIGUOUS
* if not (flags & bufmode):
* raise ValueError("Can only create a buffer that is contiguous in memory.") # <<<<<<<<<<<<<<
* info.buf = self.data
* info.len = self.len
*/
__pyx_tuple__11 = PyTuple_Pack(1, __pyx_kp_s_Can_only_create_a_buffer_that_is); if (unlikely(!__pyx_tuple__11)) __PYX_ERR(1, 190, __pyx_L1_error)
__Pyx_GOTREF(__pyx_tuple__11);
__Pyx_GIVEREF(__pyx_tuple__11);
/* "View.MemoryView":484
* result = struct.unpack(self.view.format, bytesitem)
* except struct.error:
* raise ValueError("Unable to convert item to object") # <<<<<<<<<<<<<<
* else:
* if len(self.view.format) == 1:
*/
__pyx_tuple__12 = PyTuple_Pack(1, __pyx_kp_s_Unable_to_convert_item_to_object); if (unlikely(!__pyx_tuple__12)) __PYX_ERR(1, 484, __pyx_L1_error)
__Pyx_GOTREF(__pyx_tuple__12);
__Pyx_GIVEREF(__pyx_tuple__12);
/* "View.MemoryView":556
* if self.view.strides == NULL:
*
* raise ValueError("Buffer view does not expose strides") # <<<<<<<<<<<<<<
*
* return tuple([stride for stride in self.view.strides[:self.view.ndim]])
*/
__pyx_tuple__13 = PyTuple_Pack(1, __pyx_kp_s_Buffer_view_does_not_expose_stri); if (unlikely(!__pyx_tuple__13)) __PYX_ERR(1, 556, __pyx_L1_error)
__Pyx_GOTREF(__pyx_tuple__13);
__Pyx_GIVEREF(__pyx_tuple__13);
/* "View.MemoryView":563
* def suboffsets(self):
* if self.view.suboffsets == NULL:
* return (-1,) * self.view.ndim # <<<<<<<<<<<<<<
*
* return tuple([suboffset for suboffset in self.view.suboffsets[:self.view.ndim]])
*/
__pyx_tuple__14 = PyTuple_New(1); if (unlikely(!__pyx_tuple__14)) __PYX_ERR(1, 563, __pyx_L1_error)
__Pyx_GOTREF(__pyx_tuple__14);
__Pyx_INCREF(__pyx_int_neg_1);
__Pyx_GIVEREF(__pyx_int_neg_1);
PyTuple_SET_ITEM(__pyx_tuple__14, 0, __pyx_int_neg_1);
__Pyx_GIVEREF(__pyx_tuple__14);
/* "View.MemoryView":668
* if item is Ellipsis:
* if not seen_ellipsis:
* result.extend([slice(None)] * (ndim - len(tup) + 1)) # <<<<<<<<<<<<<<
* seen_ellipsis = True
* else:
*/
__pyx_slice__15 = PySlice_New(Py_None, Py_None, Py_None); if (unlikely(!__pyx_slice__15)) __PYX_ERR(1, 668, __pyx_L1_error)
__Pyx_GOTREF(__pyx_slice__15);
__Pyx_GIVEREF(__pyx_slice__15);
/* "View.MemoryView":671
* seen_ellipsis = True
* else:
* result.append(slice(None)) # <<<<<<<<<<<<<<
* have_slices = True
* else:
*/
__pyx_slice__16 = PySlice_New(Py_None, Py_None, Py_None); if (unlikely(!__pyx_slice__16)) __PYX_ERR(1, 671, __pyx_L1_error)
__Pyx_GOTREF(__pyx_slice__16);
__Pyx_GIVEREF(__pyx_slice__16);
/* "View.MemoryView":682
* nslices = ndim - len(result)
* if nslices:
* result.extend([slice(None)] * nslices) # <<<<<<<<<<<<<<
*
* return have_slices or nslices, tuple(result)
*/
__pyx_slice__17 = PySlice_New(Py_None, Py_None, Py_None); if (unlikely(!__pyx_slice__17)) __PYX_ERR(1, 682, __pyx_L1_error)
__Pyx_GOTREF(__pyx_slice__17);
__Pyx_GIVEREF(__pyx_slice__17);
/* "View.MemoryView":689
* for suboffset in suboffsets[:ndim]:
* if suboffset >= 0:
* raise ValueError("Indirect dimensions not supported") # <<<<<<<<<<<<<<
*
*
*/
__pyx_tuple__18 = PyTuple_Pack(1, __pyx_kp_s_Indirect_dimensions_not_supporte); if (unlikely(!__pyx_tuple__18)) __PYX_ERR(1, 689, __pyx_L1_error)
__Pyx_GOTREF(__pyx_tuple__18);
__Pyx_GIVEREF(__pyx_tuple__18);
/* "bmtools/exact/moments.pyx":24
* double log(double x) nogil
*
* def calculate_moments_sequential(double[:, :] weights, double[:] biases, # <<<<<<<<<<<<<<
* bint force=False):
* """Calculate Boltzmann machine distribution moments.
*/
__pyx_tuple__19 = PyTuple_Pack(13, __pyx_n_s_weights, __pyx_n_s_biases, __pyx_n_s_force, __pyx_n_s_state_index, __pyx_n_s_i, __pyx_n_s_j, __pyx_n_s_num_units, __pyx_n_s_prob, __pyx_n_s_norm_const, __pyx_n_s_num_states, __pyx_n_s_state, __pyx_n_s_first_mom, __pyx_n_s_second_mom); if (unlikely(!__pyx_tuple__19)) __PYX_ERR(0, 24, __pyx_L1_error)
__Pyx_GOTREF(__pyx_tuple__19);
__Pyx_GIVEREF(__pyx_tuple__19);
__pyx_codeobj__20 = (PyObject*)__Pyx_PyCode_New(3, 0, 13, 0, 0, __pyx_empty_bytes, __pyx_empty_tuple, __pyx_empty_tuple, __pyx_tuple__19, __pyx_empty_tuple, __pyx_empty_tuple, __pyx_kp_s_home_matt_Projects_boltzmann_ma, __pyx_n_s_calculate_moments_sequential, 24, __pyx_empty_bytes); if (unlikely(!__pyx_codeobj__20)) __PYX_ERR(0, 24, __pyx_L1_error)
/* "bmtools/exact/moments.pyx":74
*
*
* def calculate_moments_parallel(double[:, :] weights, double[:] biases, # <<<<<<<<<<<<<<
* bint force=False, int num_threads=2,
* double[:] norm_consts=None,
*/
__pyx_tuple__21 = PyTuple_Pack(14, __pyx_n_s_weights, __pyx_n_s_biases, __pyx_n_s_force, __pyx_n_s_num_threads, __pyx_n_s_norm_consts, __pyx_n_s_first_moms, __pyx_n_s_second_moms, __pyx_n_s_t, __pyx_n_s_num_units, __pyx_n_s_prob, __pyx_n_s_num_states, __pyx_n_s_states, __pyx_n_s_all_in_place, __pyx_n_s_intervals); if (unlikely(!__pyx_tuple__21)) __PYX_ERR(0, 74, __pyx_L1_error)
__Pyx_GOTREF(__pyx_tuple__21);
__Pyx_GIVEREF(__pyx_tuple__21);
__pyx_codeobj__22 = (PyObject*)__Pyx_PyCode_New(7, 0, 14, 0, 0, __pyx_empty_bytes, __pyx_empty_tuple, __pyx_empty_tuple, __pyx_tuple__21, __pyx_empty_tuple, __pyx_empty_tuple, __pyx_kp_s_home_matt_Projects_boltzmann_ma, __pyx_n_s_calculate_moments_parallel, 74, __pyx_empty_bytes); if (unlikely(!__pyx_codeobj__22)) __PYX_ERR(0, 74, __pyx_L1_error)
/* "bmtools/exact/moments.pyx":184
*
*
* def calculate_probs_parallel( # <<<<<<<<<<<<<<
* double[:, :] weights, double[:] biases, bint force=False,
* int num_threads=2,):
*/
__pyx_tuple__23 = PyTuple_Pack(11, __pyx_n_s_weights, __pyx_n_s_biases, __pyx_n_s_force, __pyx_n_s_num_threads, __pyx_n_s_t, __pyx_n_s_num_units, __pyx_n_s_num_states, __pyx_n_s_states, __pyx_n_s_norm_consts, __pyx_n_s_probs, __pyx_n_s_intervals); if (unlikely(!__pyx_tuple__23)) __PYX_ERR(0, 184, __pyx_L1_error)
__Pyx_GOTREF(__pyx_tuple__23);
__Pyx_GIVEREF(__pyx_tuple__23);
__pyx_codeobj__24 = (PyObject*)__Pyx_PyCode_New(4, 0, 11, 0, 0, __pyx_empty_bytes, __pyx_empty_tuple, __pyx_empty_tuple, __pyx_tuple__23, __pyx_empty_tuple, __pyx_empty_tuple, __pyx_kp_s_home_matt_Projects_boltzmann_ma, __pyx_n_s_calculate_probs_parallel, 184, __pyx_empty_bytes); if (unlikely(!__pyx_codeobj__24)) __PYX_ERR(0, 184, __pyx_L1_error)
/* "View.MemoryView":282
* return self.name
*
* cdef generic = Enum("<strided and direct or indirect>") # <<<<<<<<<<<<<<
* cdef strided = Enum("<strided and direct>") # default
* cdef indirect = Enum("<strided and indirect>")
*/
__pyx_tuple__25 = PyTuple_Pack(1, __pyx_kp_s_strided_and_direct_or_indirect); if (unlikely(!__pyx_tuple__25)) __PYX_ERR(1, 282, __pyx_L1_error)
__Pyx_GOTREF(__pyx_tuple__25);
__Pyx_GIVEREF(__pyx_tuple__25);
/* "View.MemoryView":283
*
* cdef generic = Enum("<strided and direct or indirect>")
* cdef strided = Enum("<strided and direct>") # default # <<<<<<<<<<<<<<
* cdef indirect = Enum("<strided and indirect>")
*
*/
__pyx_tuple__26 = PyTuple_Pack(1, __pyx_kp_s_strided_and_direct); if (unlikely(!__pyx_tuple__26)) __PYX_ERR(1, 283, __pyx_L1_error)
__Pyx_GOTREF(__pyx_tuple__26);
__Pyx_GIVEREF(__pyx_tuple__26);
/* "View.MemoryView":284
* cdef generic = Enum("<strided and direct or indirect>")
* cdef strided = Enum("<strided and direct>") # default
* cdef indirect = Enum("<strided and indirect>") # <<<<<<<<<<<<<<
*
*
*/
__pyx_tuple__27 = PyTuple_Pack(1, __pyx_kp_s_strided_and_indirect); if (unlikely(!__pyx_tuple__27)) __PYX_ERR(1, 284, __pyx_L1_error)
__Pyx_GOTREF(__pyx_tuple__27);
__Pyx_GIVEREF(__pyx_tuple__27);
/* "View.MemoryView":287
*
*
* cdef contiguous = Enum("<contiguous and direct>") # <<<<<<<<<<<<<<
* cdef indirect_contiguous = Enum("<contiguous and indirect>")
*
*/
__pyx_tuple__28 = PyTuple_Pack(1, __pyx_kp_s_contiguous_and_direct); if (unlikely(!__pyx_tuple__28)) __PYX_ERR(1, 287, __pyx_L1_error)
__Pyx_GOTREF(__pyx_tuple__28);
__Pyx_GIVEREF(__pyx_tuple__28);
/* "View.MemoryView":288
*
* cdef contiguous = Enum("<contiguous and direct>")
* cdef indirect_contiguous = Enum("<contiguous and indirect>") # <<<<<<<<<<<<<<
*
*
*/
__pyx_tuple__29 = PyTuple_Pack(1, __pyx_kp_s_contiguous_and_indirect); if (unlikely(!__pyx_tuple__29)) __PYX_ERR(1, 288, __pyx_L1_error)
__Pyx_GOTREF(__pyx_tuple__29);
__Pyx_GIVEREF(__pyx_tuple__29);
__Pyx_RefNannyFinishContext();
return 0;
__pyx_L1_error:;
__Pyx_RefNannyFinishContext();
return -1;
}
static int __Pyx_InitGlobals(void) {
/* InitThreads.init */
#ifdef WITH_THREAD
PyEval_InitThreads();
#endif
if (unlikely(PyErr_Occurred())) __PYX_ERR(0, 1, __pyx_L1_error)
if (__Pyx_InitStrings(__pyx_string_tab) < 0) __PYX_ERR(0, 1, __pyx_L1_error);
__pyx_int_0 = PyInt_FromLong(0); if (unlikely(!__pyx_int_0)) __PYX_ERR(0, 1, __pyx_L1_error)
__pyx_int_1 = PyInt_FromLong(1); if (unlikely(!__pyx_int_1)) __PYX_ERR(0, 1, __pyx_L1_error)
__pyx_int_neg_1 = PyInt_FromLong(-1); if (unlikely(!__pyx_int_neg_1)) __PYX_ERR(0, 1, __pyx_L1_error)
return 0;
__pyx_L1_error:;
return -1;
}
#if PY_MAJOR_VERSION < 3
PyMODINIT_FUNC initmoments(void); /*proto*/
PyMODINIT_FUNC initmoments(void)
#else
PyMODINIT_FUNC PyInit_moments(void); /*proto*/
PyMODINIT_FUNC PyInit_moments(void)
#endif
{
PyObject *__pyx_t_1 = NULL;
PyObject *__pyx_t_2 = NULL;
PyObject *__pyx_t_3 = NULL;
__Pyx_memviewslice __pyx_t_4 = { 0, 0, { 0 }, { 0 }, { 0 } };
__Pyx_memviewslice __pyx_t_5 = { 0, 0, { 0 }, { 0 }, { 0 } };
__Pyx_memviewslice __pyx_t_6 = { 0, 0, { 0 }, { 0 }, { 0 } };
static PyThread_type_lock __pyx_t_7[8];
__Pyx_RefNannyDeclarations
#if CYTHON_REFNANNY
__Pyx_RefNanny = __Pyx_RefNannyImportAPI("refnanny");
if (!__Pyx_RefNanny) {
PyErr_Clear();
__Pyx_RefNanny = __Pyx_RefNannyImportAPI("Cython.Runtime.refnanny");
if (!__Pyx_RefNanny)
Py_FatalError("failed to import 'refnanny' module");
}
#endif
__Pyx_RefNannySetupContext("PyMODINIT_FUNC PyInit_moments(void)", 0);
if (__Pyx_check_binary_version() < 0) __PYX_ERR(0, 1, __pyx_L1_error)
__pyx_empty_tuple = PyTuple_New(0); if (unlikely(!__pyx_empty_tuple)) __PYX_ERR(0, 1, __pyx_L1_error)
__pyx_empty_bytes = PyBytes_FromStringAndSize("", 0); if (unlikely(!__pyx_empty_bytes)) __PYX_ERR(0, 1, __pyx_L1_error)
__pyx_empty_unicode = PyUnicode_FromStringAndSize("", 0); if (unlikely(!__pyx_empty_unicode)) __PYX_ERR(0, 1, __pyx_L1_error)
#ifdef __Pyx_CyFunction_USED
if (__pyx_CyFunction_init() < 0) __PYX_ERR(0, 1, __pyx_L1_error)
#endif
#ifdef __Pyx_FusedFunction_USED
if (__pyx_FusedFunction_init() < 0) __PYX_ERR(0, 1, __pyx_L1_error)
#endif
#ifdef __Pyx_Coroutine_USED
if (__pyx_Coroutine_init() < 0) __PYX_ERR(0, 1, __pyx_L1_error)
#endif
#ifdef __Pyx_Generator_USED
if (__pyx_Generator_init() < 0) __PYX_ERR(0, 1, __pyx_L1_error)
#endif
#ifdef __Pyx_StopAsyncIteration_USED
if (__pyx_StopAsyncIteration_init() < 0) __PYX_ERR(0, 1, __pyx_L1_error)
#endif
/*--- Library function declarations ---*/
/*--- Threads initialization code ---*/
#if defined(__PYX_FORCE_INIT_THREADS) && __PYX_FORCE_INIT_THREADS
#ifdef WITH_THREAD /* Python build with threading support? */
PyEval_InitThreads();
#endif
#endif
/*--- Module creation code ---*/
#if PY_MAJOR_VERSION < 3
__pyx_m = Py_InitModule4("moments", __pyx_methods, __pyx_k_Boltzmann_machine_moment_calcula, 0, PYTHON_API_VERSION); Py_XINCREF(__pyx_m);
#else
__pyx_m = PyModule_Create(&__pyx_moduledef);
#endif
if (unlikely(!__pyx_m)) __PYX_ERR(0, 1, __pyx_L1_error)
__pyx_d = PyModule_GetDict(__pyx_m); if (unlikely(!__pyx_d)) __PYX_ERR(0, 1, __pyx_L1_error)
Py_INCREF(__pyx_d);
__pyx_b = PyImport_AddModule(__Pyx_BUILTIN_MODULE_NAME); if (unlikely(!__pyx_b)) __PYX_ERR(0, 1, __pyx_L1_error)
#if CYTHON_COMPILING_IN_PYPY
Py_INCREF(__pyx_b);
#endif
if (PyObject_SetAttrString(__pyx_m, "__builtins__", __pyx_b) < 0) __PYX_ERR(0, 1, __pyx_L1_error);
/*--- Initialize various global constants etc. ---*/
if (__Pyx_InitGlobals() < 0) __PYX_ERR(0, 1, __pyx_L1_error)
#if PY_MAJOR_VERSION < 3 && (__PYX_DEFAULT_STRING_ENCODING_IS_ASCII || __PYX_DEFAULT_STRING_ENCODING_IS_DEFAULT)
if (__Pyx_init_sys_getdefaultencoding_params() < 0) __PYX_ERR(0, 1, __pyx_L1_error)
#endif
if (__pyx_module_is_main_bmtools__exact__moments) {
if (PyObject_SetAttrString(__pyx_m, "__name__", __pyx_n_s_main) < 0) __PYX_ERR(0, 1, __pyx_L1_error)
}
#if PY_MAJOR_VERSION >= 3
{
PyObject *modules = PyImport_GetModuleDict(); if (unlikely(!modules)) __PYX_ERR(0, 1, __pyx_L1_error)
if (!PyDict_GetItemString(modules, "bmtools.exact.moments")) {
if (unlikely(PyDict_SetItemString(modules, "bmtools.exact.moments", __pyx_m) < 0)) __PYX_ERR(0, 1, __pyx_L1_error)
}
}
#endif
/*--- Builtin init code ---*/
if (__Pyx_InitCachedBuiltins() < 0) __PYX_ERR(0, 1, __pyx_L1_error)
/*--- Constants init code ---*/
if (__Pyx_InitCachedConstants() < 0) __PYX_ERR(0, 1, __pyx_L1_error)
/*--- Global init code ---*/
generic = Py_None; Py_INCREF(Py_None);
strided = Py_None; Py_INCREF(Py_None);
indirect = Py_None; Py_INCREF(Py_None);
contiguous = Py_None; Py_INCREF(Py_None);
indirect_contiguous = Py_None; Py_INCREF(Py_None);
/*--- Variable export code ---*/
/*--- Function export code ---*/
if (__Pyx_ExportFunction("calc_unnormed_probs_for_state_range", (void (*)(void))__pyx_f_7bmtools_5exact_7moments_calc_unnormed_probs_for_state_range, "void (__Pyx_memviewslice, __Pyx_memviewslice, __Pyx_memviewslice, double *, __Pyx_memviewslice, long, long)") < 0) __PYX_ERR(0, 1, __pyx_L1_error)
if (__Pyx_ExportFunction("normalise_probabilities", (void (*)(void))__pyx_f_7bmtools_5exact_7moments_normalise_probabilities, "void (__Pyx_memviewslice, double)") < 0) __PYX_ERR(0, 1, __pyx_L1_error)
if (__Pyx_ExportFunction("accum_moments_for_state_range", (void (*)(void))__pyx_f_7bmtools_5exact_7moments_accum_moments_for_state_range, "void (__Pyx_memviewslice, __Pyx_memviewslice, __Pyx_memviewslice, double *, __Pyx_memviewslice, __Pyx_memviewslice, long, long)") < 0) __PYX_ERR(0, 1, __pyx_L1_error)
if (__Pyx_ExportFunction("calc_norm_const", (void (*)(void))__pyx_f_7bmtools_5exact_7moments_calc_norm_const, "double (__Pyx_memviewslice, __Pyx_memviewslice, __Pyx_memviewslice, struct __pyx_opt_args_7bmtools_5exact_7moments_calc_norm_const *__pyx_optional_args)") < 0) __PYX_ERR(0, 1, __pyx_L1_error)
if (__Pyx_ExportFunction("normalise_first_moment", (void (*)(void))__pyx_f_7bmtools_5exact_7moments_normalise_first_moment, "void (__Pyx_memviewslice, double)") < 0) __PYX_ERR(0, 1, __pyx_L1_error)
if (__Pyx_ExportFunction("combine_and_normalise_first_moments", (void (*)(void))__pyx_f_7bmtools_5exact_7moments_combine_and_normalise_first_moments, "void (__Pyx_memviewslice, double)") < 0) __PYX_ERR(0, 1, __pyx_L1_error)
if (__Pyx_ExportFunction("normalise_and_reflect_second_moment", (void (*)(void))__pyx_f_7bmtools_5exact_7moments_normalise_and_reflect_second_moment, "void (__Pyx_memviewslice, double)") < 0) __PYX_ERR(0, 1, __pyx_L1_error)
if (__Pyx_ExportFunction("combine_normalise_and_reflect_second_moments", (void (*)(void))__pyx_f_7bmtools_5exact_7moments_combine_normalise_and_reflect_second_moments, "void (__Pyx_memviewslice, double)") < 0) __PYX_ERR(0, 1, __pyx_L1_error)
/*--- Type init code ---*/
__pyx_vtabptr_array = &__pyx_vtable_array;
__pyx_vtable_array.get_memview = (PyObject *(*)(struct __pyx_array_obj *))__pyx_array_get_memview;
if (PyType_Ready(&__pyx_type___pyx_array) < 0) __PYX_ERR(1, 103, __pyx_L1_error)
__pyx_type___pyx_array.tp_print = 0;
if (__Pyx_SetVtable(__pyx_type___pyx_array.tp_dict, __pyx_vtabptr_array) < 0) __PYX_ERR(1, 103, __pyx_L1_error)
__pyx_array_type = &__pyx_type___pyx_array;
if (PyType_Ready(&__pyx_type___pyx_MemviewEnum) < 0) __PYX_ERR(1, 275, __pyx_L1_error)
__pyx_type___pyx_MemviewEnum.tp_print = 0;
__pyx_MemviewEnum_type = &__pyx_type___pyx_MemviewEnum;
__pyx_vtabptr_memoryview = &__pyx_vtable_memoryview;
__pyx_vtable_memoryview.get_item_pointer = (char *(*)(struct __pyx_memoryview_obj *, PyObject *))__pyx_memoryview_get_item_pointer;
__pyx_vtable_memoryview.is_slice = (PyObject *(*)(struct __pyx_memoryview_obj *, PyObject *))__pyx_memoryview_is_slice;
__pyx_vtable_memoryview.setitem_slice_assignment = (PyObject *(*)(struct __pyx_memoryview_obj *, PyObject *, PyObject *))__pyx_memoryview_setitem_slice_assignment;
__pyx_vtable_memoryview.setitem_slice_assign_scalar = (PyObject *(*)(struct __pyx_memoryview_obj *, struct __pyx_memoryview_obj *, PyObject *))__pyx_memoryview_setitem_slice_assign_scalar;
__pyx_vtable_memoryview.setitem_indexed = (PyObject *(*)(struct __pyx_memoryview_obj *, PyObject *, PyObject *))__pyx_memoryview_setitem_indexed;
__pyx_vtable_memoryview.convert_item_to_object = (PyObject *(*)(struct __pyx_memoryview_obj *, char *))__pyx_memoryview_convert_item_to_object;
__pyx_vtable_memoryview.assign_item_from_object = (PyObject *(*)(struct __pyx_memoryview_obj *, char *, PyObject *))__pyx_memoryview_assign_item_from_object;
if (PyType_Ready(&__pyx_type___pyx_memoryview) < 0) __PYX_ERR(1, 326, __pyx_L1_error)
__pyx_type___pyx_memoryview.tp_print = 0;
if (__Pyx_SetVtable(__pyx_type___pyx_memoryview.tp_dict, __pyx_vtabptr_memoryview) < 0) __PYX_ERR(1, 326, __pyx_L1_error)
__pyx_memoryview_type = &__pyx_type___pyx_memoryview;
__pyx_vtabptr__memoryviewslice = &__pyx_vtable__memoryviewslice;
__pyx_vtable__memoryviewslice.__pyx_base = *__pyx_vtabptr_memoryview;
__pyx_vtable__memoryviewslice.__pyx_base.convert_item_to_object = (PyObject *(*)(struct __pyx_memoryview_obj *, char *))__pyx_memoryviewslice_convert_item_to_object;
__pyx_vtable__memoryviewslice.__pyx_base.assign_item_from_object = (PyObject *(*)(struct __pyx_memoryview_obj *, char *, PyObject *))__pyx_memoryviewslice_assign_item_from_object;
__pyx_type___pyx_memoryviewslice.tp_base = __pyx_memoryview_type;
if (PyType_Ready(&__pyx_type___pyx_memoryviewslice) < 0) __PYX_ERR(1, 951, __pyx_L1_error)
__pyx_type___pyx_memoryviewslice.tp_print = 0;
if (__Pyx_SetVtable(__pyx_type___pyx_memoryviewslice.tp_dict, __pyx_vtabptr__memoryviewslice) < 0) __PYX_ERR(1, 951, __pyx_L1_error)
__pyx_memoryviewslice_type = &__pyx_type___pyx_memoryviewslice;
/*--- Type import code ---*/
/*--- Variable import code ---*/
__pyx_t_1 = __Pyx_ImportModule("bmtools.exact.helpers"); if (!__pyx_t_1) __PYX_ERR(0, 1, __pyx_L1_error)
if (__Pyx_ImportVoidPtr(__pyx_t_1, "state_t_code", (void **)&__pyx_vp_7bmtools_5exact_7helpers_state_t_code, "char *") < 0) __PYX_ERR(0, 1, __pyx_L1_error)
Py_DECREF(__pyx_t_1); __pyx_t_1 = 0;
/*--- Function import code ---*/
__pyx_t_2 = __Pyx_ImportModule("bmtools.exact.helpers"); if (!__pyx_t_2) __PYX_ERR(0, 1, __pyx_L1_error)
if (__Pyx_ImportFunction(__pyx_t_2, "neg_energy", (void (**)(void))&__pyx_f_7bmtools_5exact_7helpers_neg_energy, "double (__Pyx_memviewslice, __Pyx_memviewslice, __Pyx_memviewslice)") < 0) __PYX_ERR(0, 1, __pyx_L1_error)
if (__Pyx_ImportFunction(__pyx_t_2, "check_state_space_size", (void (**)(void))&__pyx_f_7bmtools_5exact_7helpers_check_state_space_size, "void (int, int, int __pyx_skip_dispatch)") < 0) __PYX_ERR(0, 1, __pyx_L1_error)
if (__Pyx_ImportFunction(__pyx_t_2, "partition_state_space", (void (**)(void))&__pyx_f_7bmtools_5exact_7helpers_partition_state_space, "__Pyx_memviewslice (long, int)") < 0) __PYX_ERR(0, 1, __pyx_L1_error)
if (__Pyx_ImportFunction(__pyx_t_2, "index_to_state", (void (**)(void))&__pyx_f_7bmtools_5exact_7helpers_index_to_state, "void (long, __Pyx_memviewslice, int __pyx_skip_dispatch)") < 0) __PYX_ERR(0, 1, __pyx_L1_error)
if (__Pyx_ImportFunction(__pyx_t_2, "next_state", (void (**)(void))&__pyx_f_7bmtools_5exact_7helpers_next_state, "void (__Pyx_memviewslice, long)") < 0) __PYX_ERR(0, 1, __pyx_L1_error)
Py_DECREF(__pyx_t_2); __pyx_t_2 = 0;
/*--- Execution code ---*/
#if defined(__Pyx_Generator_USED) || defined(__Pyx_Coroutine_USED)
if (__Pyx_patch_abc() < 0) __PYX_ERR(0, 1, __pyx_L1_error)
#endif
/* "bmtools/exact/moments.pyx":24
* double log(double x) nogil
*
* def calculate_moments_sequential(double[:, :] weights, double[:] biases, # <<<<<<<<<<<<<<
* bint force=False):
* """Calculate Boltzmann machine distribution moments.
*/
__pyx_t_3 = PyCFunction_NewEx(&__pyx_mdef_7bmtools_5exact_7moments_1calculate_moments_sequential, NULL, __pyx_n_s_bmtools_exact_moments); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 24, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_3);
if (PyDict_SetItem(__pyx_d, __pyx_n_s_calculate_moments_sequential, __pyx_t_3) < 0) __PYX_ERR(0, 24, __pyx_L1_error)
__Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
/* "bmtools/exact/moments.pyx":76
* def calculate_moments_parallel(double[:, :] weights, double[:] biases,
* bint force=False, int num_threads=2,
* double[:] norm_consts=None, # <<<<<<<<<<<<<<
* double[:, :] first_moms=None,
* double[:, :, :] second_moms=None):
*/
__pyx_t_4 = __Pyx_PyObject_to_MemoryviewSlice_ds_double(Py_None);
if (unlikely(!__pyx_t_4.memview)) __PYX_ERR(0, 76, __pyx_L1_error)
__pyx_k_ = __pyx_t_4;
__pyx_t_4.memview = NULL;
__pyx_t_4.data = NULL;
/* "bmtools/exact/moments.pyx":77
* bint force=False, int num_threads=2,
* double[:] norm_consts=None,
* double[:, :] first_moms=None, # <<<<<<<<<<<<<<
* double[:, :, :] second_moms=None):
* """Calculate Boltzmann machine distribution moments.
*/
__pyx_t_5 = __Pyx_PyObject_to_MemoryviewSlice_dsds_double(Py_None);
if (unlikely(!__pyx_t_5.memview)) __PYX_ERR(0, 77, __pyx_L1_error)
__pyx_k__2 = __pyx_t_5;
__pyx_t_5.memview = NULL;
__pyx_t_5.data = NULL;
/* "bmtools/exact/moments.pyx":78
* double[:] norm_consts=None,
* double[:, :] first_moms=None,
* double[:, :, :] second_moms=None): # <<<<<<<<<<<<<<
* """Calculate Boltzmann machine distribution moments.
*
*/
__pyx_t_6 = __Pyx_PyObject_to_MemoryviewSlice_dsdsds_double(Py_None);
if (unlikely(!__pyx_t_6.memview)) __PYX_ERR(0, 78, __pyx_L1_error)
__pyx_k__3 = __pyx_t_6;
__pyx_t_6.memview = NULL;
__pyx_t_6.data = NULL;
/* "bmtools/exact/moments.pyx":74
*
*
* def calculate_moments_parallel(double[:, :] weights, double[:] biases, # <<<<<<<<<<<<<<
* bint force=False, int num_threads=2,
* double[:] norm_consts=None,
*/
__pyx_t_3 = PyCFunction_NewEx(&__pyx_mdef_7bmtools_5exact_7moments_3calculate_moments_parallel, NULL, __pyx_n_s_bmtools_exact_moments); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 74, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_3);
if (PyDict_SetItem(__pyx_d, __pyx_n_s_calculate_moments_parallel, __pyx_t_3) < 0) __PYX_ERR(0, 74, __pyx_L1_error)
__Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
/* "bmtools/exact/moments.pyx":184
*
*
* def calculate_probs_parallel( # <<<<<<<<<<<<<<
* double[:, :] weights, double[:] biases, bint force=False,
* int num_threads=2,):
*/
__pyx_t_3 = PyCFunction_NewEx(&__pyx_mdef_7bmtools_5exact_7moments_5calculate_probs_parallel, NULL, __pyx_n_s_bmtools_exact_moments); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 184, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_3);
if (PyDict_SetItem(__pyx_d, __pyx_n_s_calculate_probs_parallel, __pyx_t_3) < 0) __PYX_ERR(0, 184, __pyx_L1_error)
__Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
/* "bmtools/exact/moments.pyx":1
* # -*- coding: utf-8 -*- # <<<<<<<<<<<<<<
* """Boltzmann machine moment calculation.
*
*/
__pyx_t_3 = PyDict_New(); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 1, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_3);
if (PyDict_SetItem(__pyx_d, __pyx_n_s_test, __pyx_t_3) < 0) __PYX_ERR(0, 1, __pyx_L1_error)
__Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
/* "View.MemoryView":207
* info.obj = self
*
* __pyx_getbuffer = capsule(<void *> &__pyx_array_getbuffer, "getbuffer(obj, view, flags)") # <<<<<<<<<<<<<<
*
* def __dealloc__(array self):
*/
__pyx_t_3 = __pyx_capsule_create(((void *)(&__pyx_array_getbuffer)), ((char *)"getbuffer(obj, view, flags)")); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 207, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_3);
if (PyDict_SetItem(__pyx_array_type->tp_dict, __pyx_n_s_pyx_getbuffer, __pyx_t_3) < 0) __PYX_ERR(1, 207, __pyx_L1_error)
__Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
PyType_Modified(__pyx_array_type);
/* "View.MemoryView":282
* return self.name
*
* cdef generic = Enum("<strided and direct or indirect>") # <<<<<<<<<<<<<<
* cdef strided = Enum("<strided and direct>") # default
* cdef indirect = Enum("<strided and indirect>")
*/
__pyx_t_3 = __Pyx_PyObject_Call(((PyObject *)__pyx_MemviewEnum_type), __pyx_tuple__25, NULL); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 282, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_3);
__Pyx_XGOTREF(generic);
__Pyx_DECREF_SET(generic, __pyx_t_3);
__Pyx_GIVEREF(__pyx_t_3);
__pyx_t_3 = 0;
/* "View.MemoryView":283
*
* cdef generic = Enum("<strided and direct or indirect>")
* cdef strided = Enum("<strided and direct>") # default # <<<<<<<<<<<<<<
* cdef indirect = Enum("<strided and indirect>")
*
*/
__pyx_t_3 = __Pyx_PyObject_Call(((PyObject *)__pyx_MemviewEnum_type), __pyx_tuple__26, NULL); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 283, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_3);
__Pyx_XGOTREF(strided);
__Pyx_DECREF_SET(strided, __pyx_t_3);
__Pyx_GIVEREF(__pyx_t_3);
__pyx_t_3 = 0;
/* "View.MemoryView":284
* cdef generic = Enum("<strided and direct or indirect>")
* cdef strided = Enum("<strided and direct>") # default
* cdef indirect = Enum("<strided and indirect>") # <<<<<<<<<<<<<<
*
*
*/
__pyx_t_3 = __Pyx_PyObject_Call(((PyObject *)__pyx_MemviewEnum_type), __pyx_tuple__27, NULL); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 284, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_3);
__Pyx_XGOTREF(indirect);
__Pyx_DECREF_SET(indirect, __pyx_t_3);
__Pyx_GIVEREF(__pyx_t_3);
__pyx_t_3 = 0;
/* "View.MemoryView":287
*
*
* cdef contiguous = Enum("<contiguous and direct>") # <<<<<<<<<<<<<<
* cdef indirect_contiguous = Enum("<contiguous and indirect>")
*
*/
__pyx_t_3 = __Pyx_PyObject_Call(((PyObject *)__pyx_MemviewEnum_type), __pyx_tuple__28, NULL); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 287, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_3);
__Pyx_XGOTREF(contiguous);
__Pyx_DECREF_SET(contiguous, __pyx_t_3);
__Pyx_GIVEREF(__pyx_t_3);
__pyx_t_3 = 0;
/* "View.MemoryView":288
*
* cdef contiguous = Enum("<contiguous and direct>")
* cdef indirect_contiguous = Enum("<contiguous and indirect>") # <<<<<<<<<<<<<<
*
*
*/
__pyx_t_3 = __Pyx_PyObject_Call(((PyObject *)__pyx_MemviewEnum_type), __pyx_tuple__29, NULL); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 288, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_3);
__Pyx_XGOTREF(indirect_contiguous);
__Pyx_DECREF_SET(indirect_contiguous, __pyx_t_3);
__Pyx_GIVEREF(__pyx_t_3);
__pyx_t_3 = 0;
/* "View.MemoryView":312
*
* DEF THREAD_LOCKS_PREALLOCATED = 8
* cdef int __pyx_memoryview_thread_locks_used = 0 # <<<<<<<<<<<<<<
* cdef PyThread_type_lock[THREAD_LOCKS_PREALLOCATED] __pyx_memoryview_thread_locks = [
* PyThread_allocate_lock(),
*/
__pyx_memoryview_thread_locks_used = 0;
/* "View.MemoryView":313
* DEF THREAD_LOCKS_PREALLOCATED = 8
* cdef int __pyx_memoryview_thread_locks_used = 0
* cdef PyThread_type_lock[THREAD_LOCKS_PREALLOCATED] __pyx_memoryview_thread_locks = [ # <<<<<<<<<<<<<<
* PyThread_allocate_lock(),
* PyThread_allocate_lock(),
*/
__pyx_t_7[0] = PyThread_allocate_lock();
__pyx_t_7[1] = PyThread_allocate_lock();
__pyx_t_7[2] = PyThread_allocate_lock();
__pyx_t_7[3] = PyThread_allocate_lock();
__pyx_t_7[4] = PyThread_allocate_lock();
__pyx_t_7[5] = PyThread_allocate_lock();
__pyx_t_7[6] = PyThread_allocate_lock();
__pyx_t_7[7] = PyThread_allocate_lock();
memcpy(&(__pyx_memoryview_thread_locks[0]), __pyx_t_7, sizeof(__pyx_memoryview_thread_locks[0]) * (8));
/* "View.MemoryView":535
* info.obj = self
*
* __pyx_getbuffer = capsule(<void *> &__pyx_memoryview_getbuffer, "getbuffer(obj, view, flags)") # <<<<<<<<<<<<<<
*
*
*/
__pyx_t_3 = __pyx_capsule_create(((void *)(&__pyx_memoryview_getbuffer)), ((char *)"getbuffer(obj, view, flags)")); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 535, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_3);
if (PyDict_SetItem(__pyx_memoryview_type->tp_dict, __pyx_n_s_pyx_getbuffer, __pyx_t_3) < 0) __PYX_ERR(1, 535, __pyx_L1_error)
__Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
PyType_Modified(__pyx_memoryview_type);
/* "View.MemoryView":981
* return self.from_object
*
* __pyx_getbuffer = capsule(<void *> &__pyx_memoryview_getbuffer, "getbuffer(obj, view, flags)") # <<<<<<<<<<<<<<
*
*
*/
__pyx_t_3 = __pyx_capsule_create(((void *)(&__pyx_memoryview_getbuffer)), ((char *)"getbuffer(obj, view, flags)")); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 981, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_3);
if (PyDict_SetItem(__pyx_memoryviewslice_type->tp_dict, __pyx_n_s_pyx_getbuffer, __pyx_t_3) < 0) __PYX_ERR(1, 981, __pyx_L1_error)
__Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
PyType_Modified(__pyx_memoryviewslice_type);
/* "View.MemoryView":1391
*
* @cname('__pyx_memoryview__slice_assign_scalar')
* cdef void _slice_assign_scalar(char *data, Py_ssize_t *shape, # <<<<<<<<<<<<<<
* Py_ssize_t *strides, int ndim,
* size_t itemsize, void *item) nogil:
*/
/*--- Wrapped vars code ---*/
goto __pyx_L0;
__pyx_L1_error:;
__Pyx_XDECREF(__pyx_t_1);
__Pyx_XDECREF(__pyx_t_2);
__Pyx_XDECREF(__pyx_t_3);
__PYX_XDEC_MEMVIEW(&__pyx_t_4, 1);
__PYX_XDEC_MEMVIEW(&__pyx_t_5, 1);
__PYX_XDEC_MEMVIEW(&__pyx_t_6, 1);
if (__pyx_m) {
if (__pyx_d) {
__Pyx_AddTraceback("init bmtools.exact.moments", __pyx_clineno, __pyx_lineno, __pyx_filename);
}
Py_DECREF(__pyx_m); __pyx_m = 0;
} else if (!PyErr_Occurred()) {
PyErr_SetString(PyExc_ImportError, "init bmtools.exact.moments");
}
__pyx_L0:;
__Pyx_RefNannyFinishContext();
#if PY_MAJOR_VERSION < 3
return;
#else
return __pyx_m;
#endif
}
/* --- Runtime support code --- */
/* Refnanny */
#if CYTHON_REFNANNY
static __Pyx_RefNannyAPIStruct *__Pyx_RefNannyImportAPI(const char *modname) {
PyObject *m = NULL, *p = NULL;
void *r = NULL;
m = PyImport_ImportModule((char *)modname);
if (!m) goto end;
p = PyObject_GetAttrString(m, (char *)"RefNannyAPI");
if (!p) goto end;
r = PyLong_AsVoidPtr(p);
end:
Py_XDECREF(p);
Py_XDECREF(m);
return (__Pyx_RefNannyAPIStruct *)r;
}
#endif
/* GetBuiltinName */
static PyObject *__Pyx_GetBuiltinName(PyObject *name) {
PyObject* result = __Pyx_PyObject_GetAttrStr(__pyx_b, name);
if (unlikely(!result)) {
PyErr_Format(PyExc_NameError,
#if PY_MAJOR_VERSION >= 3
"name '%U' is not defined", name);
#else
"name '%.200s' is not defined", PyString_AS_STRING(name));
#endif
}
return result;
}
/* RaiseArgTupleInvalid */
static void __Pyx_RaiseArgtupleInvalid(
const char* func_name,
int exact,
Py_ssize_t num_min,
Py_ssize_t num_max,
Py_ssize_t num_found)
{
Py_ssize_t num_expected;
const char *more_or_less;
if (num_found < num_min) {
num_expected = num_min;
more_or_less = "at least";
} else {
num_expected = num_max;
more_or_less = "at most";
}
if (exact) {
more_or_less = "exactly";
}
PyErr_Format(PyExc_TypeError,
"%.200s() takes %.8s %" CYTHON_FORMAT_SSIZE_T "d positional argument%.1s (%" CYTHON_FORMAT_SSIZE_T "d given)",
func_name, more_or_less, num_expected,
(num_expected == 1) ? "" : "s", num_found);
}
/* RaiseDoubleKeywords */
static void __Pyx_RaiseDoubleKeywordsError(
const char* func_name,
PyObject* kw_name)
{
PyErr_Format(PyExc_TypeError,
#if PY_MAJOR_VERSION >= 3
"%s() got multiple values for keyword argument '%U'", func_name, kw_name);
#else
"%s() got multiple values for keyword argument '%s'", func_name,
PyString_AsString(kw_name));
#endif
}
/* ParseKeywords */
static int __Pyx_ParseOptionalKeywords(
PyObject *kwds,
PyObject **argnames[],
PyObject *kwds2,
PyObject *values[],
Py_ssize_t num_pos_args,
const char* function_name)
{
PyObject *key = 0, *value = 0;
Py_ssize_t pos = 0;
PyObject*** name;
PyObject*** first_kw_arg = argnames + num_pos_args;
while (PyDict_Next(kwds, &pos, &key, &value)) {
name = first_kw_arg;
while (*name && (**name != key)) name++;
if (*name) {
values[name-argnames] = value;
continue;
}
name = first_kw_arg;
#if PY_MAJOR_VERSION < 3
if (likely(PyString_CheckExact(key)) || likely(PyString_Check(key))) {
while (*name) {
if ((CYTHON_COMPILING_IN_PYPY || PyString_GET_SIZE(**name) == PyString_GET_SIZE(key))
&& _PyString_Eq(**name, key)) {
values[name-argnames] = value;
break;
}
name++;
}
if (*name) continue;
else {
PyObject*** argname = argnames;
while (argname != first_kw_arg) {
if ((**argname == key) || (
(CYTHON_COMPILING_IN_PYPY || PyString_GET_SIZE(**argname) == PyString_GET_SIZE(key))
&& _PyString_Eq(**argname, key))) {
goto arg_passed_twice;
}
argname++;
}
}
} else
#endif
if (likely(PyUnicode_Check(key))) {
while (*name) {
int cmp = (**name == key) ? 0 :
#if !CYTHON_COMPILING_IN_PYPY && PY_MAJOR_VERSION >= 3
(PyUnicode_GET_SIZE(**name) != PyUnicode_GET_SIZE(key)) ? 1 :
#endif
PyUnicode_Compare(**name, key);
if (cmp < 0 && unlikely(PyErr_Occurred())) goto bad;
if (cmp == 0) {
values[name-argnames] = value;
break;
}
name++;
}
if (*name) continue;
else {
PyObject*** argname = argnames;
while (argname != first_kw_arg) {
int cmp = (**argname == key) ? 0 :
#if !CYTHON_COMPILING_IN_PYPY && PY_MAJOR_VERSION >= 3
(PyUnicode_GET_SIZE(**argname) != PyUnicode_GET_SIZE(key)) ? 1 :
#endif
PyUnicode_Compare(**argname, key);
if (cmp < 0 && unlikely(PyErr_Occurred())) goto bad;
if (cmp == 0) goto arg_passed_twice;
argname++;
}
}
} else
goto invalid_keyword_type;
if (kwds2) {
if (unlikely(PyDict_SetItem(kwds2, key, value))) goto bad;
} else {
goto invalid_keyword;
}
}
return 0;
arg_passed_twice:
__Pyx_RaiseDoubleKeywordsError(function_name, key);
goto bad;
invalid_keyword_type:
PyErr_Format(PyExc_TypeError,
"%.200s() keywords must be strings", function_name);
goto bad;
invalid_keyword:
PyErr_Format(PyExc_TypeError,
#if PY_MAJOR_VERSION < 3
"%.200s() got an unexpected keyword argument '%.200s'",
function_name, PyString_AsString(key));
#else
"%s() got an unexpected keyword argument '%U'",
function_name, key);
#endif
bad:
return -1;
}
/* PyObjectCall */
#if CYTHON_COMPILING_IN_CPYTHON
static CYTHON_INLINE PyObject* __Pyx_PyObject_Call(PyObject *func, PyObject *arg, PyObject *kw) {
PyObject *result;
ternaryfunc call = func->ob_type->tp_call;
if (unlikely(!call))
return PyObject_Call(func, arg, kw);
if (unlikely(Py_EnterRecursiveCall((char*)" while calling a Python object")))
return NULL;
result = (*call)(func, arg, kw);
Py_LeaveRecursiveCall();
if (unlikely(!result) && unlikely(!PyErr_Occurred())) {
PyErr_SetString(
PyExc_SystemError,
"NULL result without error in PyObject_Call");
}
return result;
}
#endif
/* BufferFormatCheck */
static CYTHON_INLINE int __Pyx_IsLittleEndian(void) {
unsigned int n = 1;
return *(unsigned char*)(&n) != 0;
}
static void __Pyx_BufFmt_Init(__Pyx_BufFmt_Context* ctx,
__Pyx_BufFmt_StackElem* stack,
__Pyx_TypeInfo* type) {
stack[0].field = &ctx->root;
stack[0].parent_offset = 0;
ctx->root.type = type;
ctx->root.name = "buffer dtype";
ctx->root.offset = 0;
ctx->head = stack;
ctx->head->field = &ctx->root;
ctx->fmt_offset = 0;
ctx->head->parent_offset = 0;
ctx->new_packmode = '@';
ctx->enc_packmode = '@';
ctx->new_count = 1;
ctx->enc_count = 0;
ctx->enc_type = 0;
ctx->is_complex = 0;
ctx->is_valid_array = 0;
ctx->struct_alignment = 0;
while (type->typegroup == 'S') {
++ctx->head;
ctx->head->field = type->fields;
ctx->head->parent_offset = 0;
type = type->fields->type;
}
}
static int __Pyx_BufFmt_ParseNumber(const char** ts) {
int count;
const char* t = *ts;
if (*t < '0' || *t > '9') {
return -1;
} else {
count = *t++ - '0';
while (*t >= '0' && *t < '9') {
count *= 10;
count += *t++ - '0';
}
}
*ts = t;
return count;
}
static int __Pyx_BufFmt_ExpectNumber(const char **ts) {
int number = __Pyx_BufFmt_ParseNumber(ts);
if (number == -1)
PyErr_Format(PyExc_ValueError,\
"Does not understand character buffer dtype format string ('%c')", **ts);
return number;
}
static void __Pyx_BufFmt_RaiseUnexpectedChar(char ch) {
PyErr_Format(PyExc_ValueError,
"Unexpected format string character: '%c'", ch);
}
static const char* __Pyx_BufFmt_DescribeTypeChar(char ch, int is_complex) {
switch (ch) {
case 'c': return "'char'";
case 'b': return "'signed char'";
case 'B': return "'unsigned char'";
case 'h': return "'short'";
case 'H': return "'unsigned short'";
case 'i': return "'int'";
case 'I': return "'unsigned int'";
case 'l': return "'long'";
case 'L': return "'unsigned long'";
case 'q': return "'long long'";
case 'Q': return "'unsigned long long'";
case 'f': return (is_complex ? "'complex float'" : "'float'");
case 'd': return (is_complex ? "'complex double'" : "'double'");
case 'g': return (is_complex ? "'complex long double'" : "'long double'");
case 'T': return "a struct";
case 'O': return "Python object";
case 'P': return "a pointer";
case 's': case 'p': return "a string";
case 0: return "end";
default: return "unparseable format string";
}
}
static size_t __Pyx_BufFmt_TypeCharToStandardSize(char ch, int is_complex) {
switch (ch) {
case '?': case 'c': case 'b': case 'B': case 's': case 'p': return 1;
case 'h': case 'H': return 2;
case 'i': case 'I': case 'l': case 'L': return 4;
case 'q': case 'Q': return 8;
case 'f': return (is_complex ? 8 : 4);
case 'd': return (is_complex ? 16 : 8);
case 'g': {
PyErr_SetString(PyExc_ValueError, "Python does not define a standard format string size for long double ('g')..");
return 0;
}
case 'O': case 'P': return sizeof(void*);
default:
__Pyx_BufFmt_RaiseUnexpectedChar(ch);
return 0;
}
}
static size_t __Pyx_BufFmt_TypeCharToNativeSize(char ch, int is_complex) {
switch (ch) {
case 'c': case 'b': case 'B': case 's': case 'p': return 1;
case 'h': case 'H': return sizeof(short);
case 'i': case 'I': return sizeof(int);
case 'l': case 'L': return sizeof(long);
#ifdef HAVE_LONG_LONG
case 'q': case 'Q': return sizeof(PY_LONG_LONG);
#endif
case 'f': return sizeof(float) * (is_complex ? 2 : 1);
case 'd': return sizeof(double) * (is_complex ? 2 : 1);
case 'g': return sizeof(long double) * (is_complex ? 2 : 1);
case 'O': case 'P': return sizeof(void*);
default: {
__Pyx_BufFmt_RaiseUnexpectedChar(ch);
return 0;
}
}
}
typedef struct { char c; short x; } __Pyx_st_short;
typedef struct { char c; int x; } __Pyx_st_int;
typedef struct { char c; long x; } __Pyx_st_long;
typedef struct { char c; float x; } __Pyx_st_float;
typedef struct { char c; double x; } __Pyx_st_double;
typedef struct { char c; long double x; } __Pyx_st_longdouble;
typedef struct { char c; void *x; } __Pyx_st_void_p;
#ifdef HAVE_LONG_LONG
typedef struct { char c; PY_LONG_LONG x; } __Pyx_st_longlong;
#endif
static size_t __Pyx_BufFmt_TypeCharToAlignment(char ch, CYTHON_UNUSED int is_complex) {
switch (ch) {
case '?': case 'c': case 'b': case 'B': case 's': case 'p': return 1;
case 'h': case 'H': return sizeof(__Pyx_st_short) - sizeof(short);
case 'i': case 'I': return sizeof(__Pyx_st_int) - sizeof(int);
case 'l': case 'L': return sizeof(__Pyx_st_long) - sizeof(long);
#ifdef HAVE_LONG_LONG
case 'q': case 'Q': return sizeof(__Pyx_st_longlong) - sizeof(PY_LONG_LONG);
#endif
case 'f': return sizeof(__Pyx_st_float) - sizeof(float);
case 'd': return sizeof(__Pyx_st_double) - sizeof(double);
case 'g': return sizeof(__Pyx_st_longdouble) - sizeof(long double);
case 'P': case 'O': return sizeof(__Pyx_st_void_p) - sizeof(void*);
default:
__Pyx_BufFmt_RaiseUnexpectedChar(ch);
return 0;
}
}
/* These are for computing the padding at the end of the struct to align
on the first member of the struct. This will probably the same as above,
but we don't have any guarantees.
*/
typedef struct { short x; char c; } __Pyx_pad_short;
typedef struct { int x; char c; } __Pyx_pad_int;
typedef struct { long x; char c; } __Pyx_pad_long;
typedef struct { float x; char c; } __Pyx_pad_float;
typedef struct { double x; char c; } __Pyx_pad_double;
typedef struct { long double x; char c; } __Pyx_pad_longdouble;
typedef struct { void *x; char c; } __Pyx_pad_void_p;
#ifdef HAVE_LONG_LONG
typedef struct { PY_LONG_LONG x; char c; } __Pyx_pad_longlong;
#endif
static size_t __Pyx_BufFmt_TypeCharToPadding(char ch, CYTHON_UNUSED int is_complex) {
switch (ch) {
case '?': case 'c': case 'b': case 'B': case 's': case 'p': return 1;
case 'h': case 'H': return sizeof(__Pyx_pad_short) - sizeof(short);
case 'i': case 'I': return sizeof(__Pyx_pad_int) - sizeof(int);
case 'l': case 'L': return sizeof(__Pyx_pad_long) - sizeof(long);
#ifdef HAVE_LONG_LONG
case 'q': case 'Q': return sizeof(__Pyx_pad_longlong) - sizeof(PY_LONG_LONG);
#endif
case 'f': return sizeof(__Pyx_pad_float) - sizeof(float);
case 'd': return sizeof(__Pyx_pad_double) - sizeof(double);
case 'g': return sizeof(__Pyx_pad_longdouble) - sizeof(long double);
case 'P': case 'O': return sizeof(__Pyx_pad_void_p) - sizeof(void*);
default:
__Pyx_BufFmt_RaiseUnexpectedChar(ch);
return 0;
}
}
static char __Pyx_BufFmt_TypeCharToGroup(char ch, int is_complex) {
switch (ch) {
case 'c':
return 'H';
case 'b': case 'h': case 'i':
case 'l': case 'q': case 's': case 'p':
return 'I';
case 'B': case 'H': case 'I': case 'L': case 'Q':
return 'U';
case 'f': case 'd': case 'g':
return (is_complex ? 'C' : 'R');
case 'O':
return 'O';
case 'P':
return 'P';
default: {
__Pyx_BufFmt_RaiseUnexpectedChar(ch);
return 0;
}
}
}
static void __Pyx_BufFmt_RaiseExpected(__Pyx_BufFmt_Context* ctx) {
if (ctx->head == NULL || ctx->head->field == &ctx->root) {
const char* expected;
const char* quote;
if (ctx->head == NULL) {
expected = "end";
quote = "";
} else {
expected = ctx->head->field->type->name;
quote = "'";
}
PyErr_Format(PyExc_ValueError,
"Buffer dtype mismatch, expected %s%s%s but got %s",
quote, expected, quote,
__Pyx_BufFmt_DescribeTypeChar(ctx->enc_type, ctx->is_complex));
} else {
__Pyx_StructField* field = ctx->head->field;
__Pyx_StructField* parent = (ctx->head - 1)->field;
PyErr_Format(PyExc_ValueError,
"Buffer dtype mismatch, expected '%s' but got %s in '%s.%s'",
field->type->name, __Pyx_BufFmt_DescribeTypeChar(ctx->enc_type, ctx->is_complex),
parent->type->name, field->name);
}
}
static int __Pyx_BufFmt_ProcessTypeChunk(__Pyx_BufFmt_Context* ctx) {
char group;
size_t size, offset, arraysize = 1;
if (ctx->enc_type == 0) return 0;
if (ctx->head->field->type->arraysize[0]) {
int i, ndim = 0;
if (ctx->enc_type == 's' || ctx->enc_type == 'p') {
ctx->is_valid_array = ctx->head->field->type->ndim == 1;
ndim = 1;
if (ctx->enc_count != ctx->head->field->type->arraysize[0]) {
PyErr_Format(PyExc_ValueError,
"Expected a dimension of size %zu, got %zu",
ctx->head->field->type->arraysize[0], ctx->enc_count);
return -1;
}
}
if (!ctx->is_valid_array) {
PyErr_Format(PyExc_ValueError, "Expected %d dimensions, got %d",
ctx->head->field->type->ndim, ndim);
return -1;
}
for (i = 0; i < ctx->head->field->type->ndim; i++) {
arraysize *= ctx->head->field->type->arraysize[i];
}
ctx->is_valid_array = 0;
ctx->enc_count = 1;
}
group = __Pyx_BufFmt_TypeCharToGroup(ctx->enc_type, ctx->is_complex);
do {
__Pyx_StructField* field = ctx->head->field;
__Pyx_TypeInfo* type = field->type;
if (ctx->enc_packmode == '@' || ctx->enc_packmode == '^') {
size = __Pyx_BufFmt_TypeCharToNativeSize(ctx->enc_type, ctx->is_complex);
} else {
size = __Pyx_BufFmt_TypeCharToStandardSize(ctx->enc_type, ctx->is_complex);
}
if (ctx->enc_packmode == '@') {
size_t align_at = __Pyx_BufFmt_TypeCharToAlignment(ctx->enc_type, ctx->is_complex);
size_t align_mod_offset;
if (align_at == 0) return -1;
align_mod_offset = ctx->fmt_offset % align_at;
if (align_mod_offset > 0) ctx->fmt_offset += align_at - align_mod_offset;
if (ctx->struct_alignment == 0)
ctx->struct_alignment = __Pyx_BufFmt_TypeCharToPadding(ctx->enc_type,
ctx->is_complex);
}
if (type->size != size || type->typegroup != group) {
if (type->typegroup == 'C' && type->fields != NULL) {
size_t parent_offset = ctx->head->parent_offset + field->offset;
++ctx->head;
ctx->head->field = type->fields;
ctx->head->parent_offset = parent_offset;
continue;
}
if ((type->typegroup == 'H' || group == 'H') && type->size == size) {
} else {
__Pyx_BufFmt_RaiseExpected(ctx);
return -1;
}
}
offset = ctx->head->parent_offset + field->offset;
if (ctx->fmt_offset != offset) {
PyErr_Format(PyExc_ValueError,
"Buffer dtype mismatch; next field is at offset %" CYTHON_FORMAT_SSIZE_T "d but %" CYTHON_FORMAT_SSIZE_T "d expected",
(Py_ssize_t)ctx->fmt_offset, (Py_ssize_t)offset);
return -1;
}
ctx->fmt_offset += size;
if (arraysize)
ctx->fmt_offset += (arraysize - 1) * size;
--ctx->enc_count;
while (1) {
if (field == &ctx->root) {
ctx->head = NULL;
if (ctx->enc_count != 0) {
__Pyx_BufFmt_RaiseExpected(ctx);
return -1;
}
break;
}
ctx->head->field = ++field;
if (field->type == NULL) {
--ctx->head;
field = ctx->head->field;
continue;
} else if (field->type->typegroup == 'S') {
size_t parent_offset = ctx->head->parent_offset + field->offset;
if (field->type->fields->type == NULL) continue;
field = field->type->fields;
++ctx->head;
ctx->head->field = field;
ctx->head->parent_offset = parent_offset;
break;
} else {
break;
}
}
} while (ctx->enc_count);
ctx->enc_type = 0;
ctx->is_complex = 0;
return 0;
}
static CYTHON_INLINE PyObject *
__pyx_buffmt_parse_array(__Pyx_BufFmt_Context* ctx, const char** tsp)
{
const char *ts = *tsp;
int i = 0, number;
int ndim = ctx->head->field->type->ndim;
;
++ts;
if (ctx->new_count != 1) {
PyErr_SetString(PyExc_ValueError,
"Cannot handle repeated arrays in format string");
return NULL;
}
if (__Pyx_BufFmt_ProcessTypeChunk(ctx) == -1) return NULL;
while (*ts && *ts != ')') {
switch (*ts) {
case ' ': case '\f': case '\r': case '\n': case '\t': case '\v': continue;
default: break;
}
number = __Pyx_BufFmt_ExpectNumber(&ts);
if (number == -1) return NULL;
if (i < ndim && (size_t) number != ctx->head->field->type->arraysize[i])
return PyErr_Format(PyExc_ValueError,
"Expected a dimension of size %zu, got %d",
ctx->head->field->type->arraysize[i], number);
if (*ts != ',' && *ts != ')')
return PyErr_Format(PyExc_ValueError,
"Expected a comma in format string, got '%c'", *ts);
if (*ts == ',') ts++;
i++;
}
if (i != ndim)
return PyErr_Format(PyExc_ValueError, "Expected %d dimension(s), got %d",
ctx->head->field->type->ndim, i);
if (!*ts) {
PyErr_SetString(PyExc_ValueError,
"Unexpected end of format string, expected ')'");
return NULL;
}
ctx->is_valid_array = 1;
ctx->new_count = 1;
*tsp = ++ts;
return Py_None;
}
static const char* __Pyx_BufFmt_CheckString(__Pyx_BufFmt_Context* ctx, const char* ts) {
int got_Z = 0;
while (1) {
switch(*ts) {
case 0:
if (ctx->enc_type != 0 && ctx->head == NULL) {
__Pyx_BufFmt_RaiseExpected(ctx);
return NULL;
}
if (__Pyx_BufFmt_ProcessTypeChunk(ctx) == -1) return NULL;
if (ctx->head != NULL) {
__Pyx_BufFmt_RaiseExpected(ctx);
return NULL;
}
return ts;
case ' ':
case '\r':
case '\n':
++ts;
break;
case '<':
if (!__Pyx_IsLittleEndian()) {
PyErr_SetString(PyExc_ValueError, "Little-endian buffer not supported on big-endian compiler");
return NULL;
}
ctx->new_packmode = '=';
++ts;
break;
case '>':
case '!':
if (__Pyx_IsLittleEndian()) {
PyErr_SetString(PyExc_ValueError, "Big-endian buffer not supported on little-endian compiler");
return NULL;
}
ctx->new_packmode = '=';
++ts;
break;
case '=':
case '@':
case '^':
ctx->new_packmode = *ts++;
break;
case 'T':
{
const char* ts_after_sub;
size_t i, struct_count = ctx->new_count;
size_t struct_alignment = ctx->struct_alignment;
ctx->new_count = 1;
++ts;
if (*ts != '{') {
PyErr_SetString(PyExc_ValueError, "Buffer acquisition: Expected '{' after 'T'");
return NULL;
}
if (__Pyx_BufFmt_ProcessTypeChunk(ctx) == -1) return NULL;
ctx->enc_type = 0;
ctx->enc_count = 0;
ctx->struct_alignment = 0;
++ts;
ts_after_sub = ts;
for (i = 0; i != struct_count; ++i) {
ts_after_sub = __Pyx_BufFmt_CheckString(ctx, ts);
if (!ts_after_sub) return NULL;
}
ts = ts_after_sub;
if (struct_alignment) ctx->struct_alignment = struct_alignment;
}
break;
case '}':
{
size_t alignment = ctx->struct_alignment;
++ts;
if (__Pyx_BufFmt_ProcessTypeChunk(ctx) == -1) return NULL;
ctx->enc_type = 0;
if (alignment && ctx->fmt_offset % alignment) {
ctx->fmt_offset += alignment - (ctx->fmt_offset % alignment);
}
}
return ts;
case 'x':
if (__Pyx_BufFmt_ProcessTypeChunk(ctx) == -1) return NULL;
ctx->fmt_offset += ctx->new_count;
ctx->new_count = 1;
ctx->enc_count = 0;
ctx->enc_type = 0;
ctx->enc_packmode = ctx->new_packmode;
++ts;
break;
case 'Z':
got_Z = 1;
++ts;
if (*ts != 'f' && *ts != 'd' && *ts != 'g') {
__Pyx_BufFmt_RaiseUnexpectedChar('Z');
return NULL;
}
case 'c': case 'b': case 'B': case 'h': case 'H': case 'i': case 'I':
case 'l': case 'L': case 'q': case 'Q':
case 'f': case 'd': case 'g':
case 'O': case 'p':
if (ctx->enc_type == *ts && got_Z == ctx->is_complex &&
ctx->enc_packmode == ctx->new_packmode) {
ctx->enc_count += ctx->new_count;
ctx->new_count = 1;
got_Z = 0;
++ts;
break;
}
case 's':
if (__Pyx_BufFmt_ProcessTypeChunk(ctx) == -1) return NULL;
ctx->enc_count = ctx->new_count;
ctx->enc_packmode = ctx->new_packmode;
ctx->enc_type = *ts;
ctx->is_complex = got_Z;
++ts;
ctx->new_count = 1;
got_Z = 0;
break;
case ':':
++ts;
while(*ts != ':') ++ts;
++ts;
break;
case '(':
if (!__pyx_buffmt_parse_array(ctx, &ts)) return NULL;
break;
default:
{
int number = __Pyx_BufFmt_ExpectNumber(&ts);
if (number == -1) return NULL;
ctx->new_count = (size_t)number;
}
}
}
}
static CYTHON_INLINE void __Pyx_ZeroBuffer(Py_buffer* buf) {
buf->buf = NULL;
buf->obj = NULL;
buf->strides = __Pyx_zeros;
buf->shape = __Pyx_zeros;
buf->suboffsets = __Pyx_minusones;
}
static CYTHON_INLINE int __Pyx_GetBufferAndValidate(
Py_buffer* buf, PyObject* obj, __Pyx_TypeInfo* dtype, int flags,
int nd, int cast, __Pyx_BufFmt_StackElem* stack)
{
if (obj == Py_None || obj == NULL) {
__Pyx_ZeroBuffer(buf);
return 0;
}
buf->buf = NULL;
if (__Pyx_GetBuffer(obj, buf, flags) == -1) goto fail;
if (buf->ndim != nd) {
PyErr_Format(PyExc_ValueError,
"Buffer has wrong number of dimensions (expected %d, got %d)",
nd, buf->ndim);
goto fail;
}
if (!cast) {
__Pyx_BufFmt_Context ctx;
__Pyx_BufFmt_Init(&ctx, stack, dtype);
if (!__Pyx_BufFmt_CheckString(&ctx, buf->format)) goto fail;
}
if ((unsigned)buf->itemsize != dtype->size) {
PyErr_Format(PyExc_ValueError,
"Item size of buffer (%" CYTHON_FORMAT_SSIZE_T "d byte%s) does not match size of '%s' (%" CYTHON_FORMAT_SSIZE_T "d byte%s)",
buf->itemsize, (buf->itemsize > 1) ? "s" : "",
dtype->name, (Py_ssize_t)dtype->size, (dtype->size > 1) ? "s" : "");
goto fail;
}
if (buf->suboffsets == NULL) buf->suboffsets = __Pyx_minusones;
return 0;
fail:;
__Pyx_ZeroBuffer(buf);
return -1;
}
static CYTHON_INLINE void __Pyx_SafeReleaseBuffer(Py_buffer* info) {
if (info->buf == NULL) return;
if (info->suboffsets == __Pyx_minusones) info->suboffsets = NULL;
__Pyx_ReleaseBuffer(info);
}
/* MemviewSliceInit */
static int
__Pyx_init_memviewslice(struct __pyx_memoryview_obj *memview,
int ndim,
__Pyx_memviewslice *memviewslice,
int memview_is_new_reference)
{
__Pyx_RefNannyDeclarations
int i, retval=-1;
Py_buffer *buf = &memview->view;
__Pyx_RefNannySetupContext("init_memviewslice", 0);
if (!buf) {
PyErr_SetString(PyExc_ValueError,
"buf is NULL.");
goto fail;
} else if (memviewslice->memview || memviewslice->data) {
PyErr_SetString(PyExc_ValueError,
"memviewslice is already initialized!");
goto fail;
}
if (buf->strides) {
for (i = 0; i < ndim; i++) {
memviewslice->strides[i] = buf->strides[i];
}
} else {
Py_ssize_t stride = buf->itemsize;
for (i = ndim - 1; i >= 0; i--) {
memviewslice->strides[i] = stride;
stride *= buf->shape[i];
}
}
for (i = 0; i < ndim; i++) {
memviewslice->shape[i] = buf->shape[i];
if (buf->suboffsets) {
memviewslice->suboffsets[i] = buf->suboffsets[i];
} else {
memviewslice->suboffsets[i] = -1;
}
}
memviewslice->memview = memview;
memviewslice->data = (char *)buf->buf;
if (__pyx_add_acquisition_count(memview) == 0 && !memview_is_new_reference) {
Py_INCREF(memview);
}
retval = 0;
goto no_fail;
fail:
memviewslice->memview = 0;
memviewslice->data = 0;
retval = -1;
no_fail:
__Pyx_RefNannyFinishContext();
return retval;
}
static CYTHON_INLINE void __pyx_fatalerror(const char *fmt, ...) {
va_list vargs;
char msg[200];
#ifdef HAVE_STDARG_PROTOTYPES
va_start(vargs, fmt);
#else
va_start(vargs);
#endif
vsnprintf(msg, 200, fmt, vargs);
Py_FatalError(msg);
va_end(vargs);
}
static CYTHON_INLINE int
__pyx_add_acquisition_count_locked(__pyx_atomic_int *acquisition_count,
PyThread_type_lock lock)
{
int result;
PyThread_acquire_lock(lock, 1);
result = (*acquisition_count)++;
PyThread_release_lock(lock);
return result;
}
static CYTHON_INLINE int
__pyx_sub_acquisition_count_locked(__pyx_atomic_int *acquisition_count,
PyThread_type_lock lock)
{
int result;
PyThread_acquire_lock(lock, 1);
result = (*acquisition_count)--;
PyThread_release_lock(lock);
return result;
}
static CYTHON_INLINE void
__Pyx_INC_MEMVIEW(__Pyx_memviewslice *memslice, int have_gil, int lineno)
{
int first_time;
struct __pyx_memoryview_obj *memview = memslice->memview;
if (!memview || (PyObject *) memview == Py_None)
return;
if (__pyx_get_slice_count(memview) < 0)
__pyx_fatalerror("Acquisition count is %d (line %d)",
__pyx_get_slice_count(memview), lineno);
first_time = __pyx_add_acquisition_count(memview) == 0;
if (first_time) {
if (have_gil) {
Py_INCREF((PyObject *) memview);
} else {
PyGILState_STATE _gilstate = PyGILState_Ensure();
Py_INCREF((PyObject *) memview);
PyGILState_Release(_gilstate);
}
}
}
static CYTHON_INLINE void __Pyx_XDEC_MEMVIEW(__Pyx_memviewslice *memslice,
int have_gil, int lineno) {
int last_time;
struct __pyx_memoryview_obj *memview = memslice->memview;
if (!memview ) {
return;
} else if ((PyObject *) memview == Py_None) {
memslice->memview = NULL;
return;
}
if (__pyx_get_slice_count(memview) <= 0)
__pyx_fatalerror("Acquisition count is %d (line %d)",
__pyx_get_slice_count(memview), lineno);
last_time = __pyx_sub_acquisition_count(memview) == 1;
memslice->data = NULL;
if (last_time) {
if (have_gil) {
Py_CLEAR(memslice->memview);
} else {
PyGILState_STATE _gilstate = PyGILState_Ensure();
Py_CLEAR(memslice->memview);
PyGILState_Release(_gilstate);
}
} else {
memslice->memview = NULL;
}
}
/* PyErrFetchRestore */
#if CYTHON_FAST_THREAD_STATE
static CYTHON_INLINE void __Pyx_ErrRestoreInState(PyThreadState *tstate, PyObject *type, PyObject *value, PyObject *tb) {
PyObject *tmp_type, *tmp_value, *tmp_tb;
tmp_type = tstate->curexc_type;
tmp_value = tstate->curexc_value;
tmp_tb = tstate->curexc_traceback;
tstate->curexc_type = type;
tstate->curexc_value = value;
tstate->curexc_traceback = tb;
Py_XDECREF(tmp_type);
Py_XDECREF(tmp_value);
Py_XDECREF(tmp_tb);
}
static CYTHON_INLINE void __Pyx_ErrFetchInState(PyThreadState *tstate, PyObject **type, PyObject **value, PyObject **tb) {
*type = tstate->curexc_type;
*value = tstate->curexc_value;
*tb = tstate->curexc_traceback;
tstate->curexc_type = 0;
tstate->curexc_value = 0;
tstate->curexc_traceback = 0;
}
#endif
/* RaiseException */
#if PY_MAJOR_VERSION < 3
static void __Pyx_Raise(PyObject *type, PyObject *value, PyObject *tb,
CYTHON_UNUSED PyObject *cause) {
__Pyx_PyThreadState_declare
Py_XINCREF(type);
if (!value || value == Py_None)
value = NULL;
else
Py_INCREF(value);
if (!tb || tb == Py_None)
tb = NULL;
else {
Py_INCREF(tb);
if (!PyTraceBack_Check(tb)) {
PyErr_SetString(PyExc_TypeError,
"raise: arg 3 must be a traceback or None");
goto raise_error;
}
}
if (PyType_Check(type)) {
#if CYTHON_COMPILING_IN_PYPY
if (!value) {
Py_INCREF(Py_None);
value = Py_None;
}
#endif
PyErr_NormalizeException(&type, &value, &tb);
} else {
if (value) {
PyErr_SetString(PyExc_TypeError,
"instance exception may not have a separate value");
goto raise_error;
}
value = type;
type = (PyObject*) Py_TYPE(type);
Py_INCREF(type);
if (!PyType_IsSubtype((PyTypeObject *)type, (PyTypeObject *)PyExc_BaseException)) {
PyErr_SetString(PyExc_TypeError,
"raise: exception class must be a subclass of BaseException");
goto raise_error;
}
}
__Pyx_PyThreadState_assign
__Pyx_ErrRestore(type, value, tb);
return;
raise_error:
Py_XDECREF(value);
Py_XDECREF(type);
Py_XDECREF(tb);
return;
}
#else
static void __Pyx_Raise(PyObject *type, PyObject *value, PyObject *tb, PyObject *cause) {
PyObject* owned_instance = NULL;
if (tb == Py_None) {
tb = 0;
} else if (tb && !PyTraceBack_Check(tb)) {
PyErr_SetString(PyExc_TypeError,
"raise: arg 3 must be a traceback or None");
goto bad;
}
if (value == Py_None)
value = 0;
if (PyExceptionInstance_Check(type)) {
if (value) {
PyErr_SetString(PyExc_TypeError,
"instance exception may not have a separate value");
goto bad;
}
value = type;
type = (PyObject*) Py_TYPE(value);
} else if (PyExceptionClass_Check(type)) {
PyObject *instance_class = NULL;
if (value && PyExceptionInstance_Check(value)) {
instance_class = (PyObject*) Py_TYPE(value);
if (instance_class != type) {
int is_subclass = PyObject_IsSubclass(instance_class, type);
if (!is_subclass) {
instance_class = NULL;
} else if (unlikely(is_subclass == -1)) {
goto bad;
} else {
type = instance_class;
}
}
}
if (!instance_class) {
PyObject *args;
if (!value)
args = PyTuple_New(0);
else if (PyTuple_Check(value)) {
Py_INCREF(value);
args = value;
} else
args = PyTuple_Pack(1, value);
if (!args)
goto bad;
owned_instance = PyObject_Call(type, args, NULL);
Py_DECREF(args);
if (!owned_instance)
goto bad;
value = owned_instance;
if (!PyExceptionInstance_Check(value)) {
PyErr_Format(PyExc_TypeError,
"calling %R should have returned an instance of "
"BaseException, not %R",
type, Py_TYPE(value));
goto bad;
}
}
} else {
PyErr_SetString(PyExc_TypeError,
"raise: exception class must be a subclass of BaseException");
goto bad;
}
#if PY_VERSION_HEX >= 0x03030000
if (cause) {
#else
if (cause && cause != Py_None) {
#endif
PyObject *fixed_cause;
if (cause == Py_None) {
fixed_cause = NULL;
} else if (PyExceptionClass_Check(cause)) {
fixed_cause = PyObject_CallObject(cause, NULL);
if (fixed_cause == NULL)
goto bad;
} else if (PyExceptionInstance_Check(cause)) {
fixed_cause = cause;
Py_INCREF(fixed_cause);
} else {
PyErr_SetString(PyExc_TypeError,
"exception causes must derive from "
"BaseException");
goto bad;
}
PyException_SetCause(value, fixed_cause);
}
PyErr_SetObject(type, value);
if (tb) {
#if CYTHON_COMPILING_IN_PYPY
PyObject *tmp_type, *tmp_value, *tmp_tb;
PyErr_Fetch(&tmp_type, &tmp_value, &tmp_tb);
Py_INCREF(tb);
PyErr_Restore(tmp_type, tmp_value, tb);
Py_XDECREF(tmp_tb);
#else
PyThreadState *tstate = PyThreadState_GET();
PyObject* tmp_tb = tstate->curexc_traceback;
if (tb != tmp_tb) {
Py_INCREF(tb);
tstate->curexc_traceback = tb;
Py_XDECREF(tmp_tb);
}
#endif
}
bad:
Py_XDECREF(owned_instance);
return;
}
#endif
/* ArgTypeTest */
static void __Pyx_RaiseArgumentTypeInvalid(const char* name, PyObject *obj, PyTypeObject *type) {
PyErr_Format(PyExc_TypeError,
"Argument '%.200s' has incorrect type (expected %.200s, got %.200s)",
name, type->tp_name, Py_TYPE(obj)->tp_name);
}
static CYTHON_INLINE int __Pyx_ArgTypeTest(PyObject *obj, PyTypeObject *type, int none_allowed,
const char *name, int exact)
{
if (unlikely(!type)) {
PyErr_SetString(PyExc_SystemError, "Missing type object");
return 0;
}
if (none_allowed && obj == Py_None) return 1;
else if (exact) {
if (likely(Py_TYPE(obj) == type)) return 1;
#if PY_MAJOR_VERSION == 2
else if ((type == &PyBaseString_Type) && likely(__Pyx_PyBaseString_CheckExact(obj))) return 1;
#endif
}
else {
if (likely(PyObject_TypeCheck(obj, type))) return 1;
}
__Pyx_RaiseArgumentTypeInvalid(name, obj, type);
return 0;
}
/* BytesEquals */
static CYTHON_INLINE int __Pyx_PyBytes_Equals(PyObject* s1, PyObject* s2, int equals) {
#if CYTHON_COMPILING_IN_PYPY
return PyObject_RichCompareBool(s1, s2, equals);
#else
if (s1 == s2) {
return (equals == Py_EQ);
} else if (PyBytes_CheckExact(s1) & PyBytes_CheckExact(s2)) {
const char *ps1, *ps2;
Py_ssize_t length = PyBytes_GET_SIZE(s1);
if (length != PyBytes_GET_SIZE(s2))
return (equals == Py_NE);
ps1 = PyBytes_AS_STRING(s1);
ps2 = PyBytes_AS_STRING(s2);
if (ps1[0] != ps2[0]) {
return (equals == Py_NE);
} else if (length == 1) {
return (equals == Py_EQ);
} else {
int result = memcmp(ps1, ps2, (size_t)length);
return (equals == Py_EQ) ? (result == 0) : (result != 0);
}
} else if ((s1 == Py_None) & PyBytes_CheckExact(s2)) {
return (equals == Py_NE);
} else if ((s2 == Py_None) & PyBytes_CheckExact(s1)) {
return (equals == Py_NE);
} else {
int result;
PyObject* py_result = PyObject_RichCompare(s1, s2, equals);
if (!py_result)
return -1;
result = __Pyx_PyObject_IsTrue(py_result);
Py_DECREF(py_result);
return result;
}
#endif
}
/* UnicodeEquals */
static CYTHON_INLINE int __Pyx_PyUnicode_Equals(PyObject* s1, PyObject* s2, int equals) {
#if CYTHON_COMPILING_IN_PYPY
return PyObject_RichCompareBool(s1, s2, equals);
#else
#if PY_MAJOR_VERSION < 3
PyObject* owned_ref = NULL;
#endif
int s1_is_unicode, s2_is_unicode;
if (s1 == s2) {
goto return_eq;
}
s1_is_unicode = PyUnicode_CheckExact(s1);
s2_is_unicode = PyUnicode_CheckExact(s2);
#if PY_MAJOR_VERSION < 3
if ((s1_is_unicode & (!s2_is_unicode)) && PyString_CheckExact(s2)) {
owned_ref = PyUnicode_FromObject(s2);
if (unlikely(!owned_ref))
return -1;
s2 = owned_ref;
s2_is_unicode = 1;
} else if ((s2_is_unicode & (!s1_is_unicode)) && PyString_CheckExact(s1)) {
owned_ref = PyUnicode_FromObject(s1);
if (unlikely(!owned_ref))
return -1;
s1 = owned_ref;
s1_is_unicode = 1;
} else if (((!s2_is_unicode) & (!s1_is_unicode))) {
return __Pyx_PyBytes_Equals(s1, s2, equals);
}
#endif
if (s1_is_unicode & s2_is_unicode) {
Py_ssize_t length;
int kind;
void *data1, *data2;
if (unlikely(__Pyx_PyUnicode_READY(s1) < 0) || unlikely(__Pyx_PyUnicode_READY(s2) < 0))
return -1;
length = __Pyx_PyUnicode_GET_LENGTH(s1);
if (length != __Pyx_PyUnicode_GET_LENGTH(s2)) {
goto return_ne;
}
kind = __Pyx_PyUnicode_KIND(s1);
if (kind != __Pyx_PyUnicode_KIND(s2)) {
goto return_ne;
}
data1 = __Pyx_PyUnicode_DATA(s1);
data2 = __Pyx_PyUnicode_DATA(s2);
if (__Pyx_PyUnicode_READ(kind, data1, 0) != __Pyx_PyUnicode_READ(kind, data2, 0)) {
goto return_ne;
} else if (length == 1) {
goto return_eq;
} else {
int result = memcmp(data1, data2, (size_t)(length * kind));
#if PY_MAJOR_VERSION < 3
Py_XDECREF(owned_ref);
#endif
return (equals == Py_EQ) ? (result == 0) : (result != 0);
}
} else if ((s1 == Py_None) & s2_is_unicode) {
goto return_ne;
} else if ((s2 == Py_None) & s1_is_unicode) {
goto return_ne;
} else {
int result;
PyObject* py_result = PyObject_RichCompare(s1, s2, equals);
if (!py_result)
return -1;
result = __Pyx_PyObject_IsTrue(py_result);
Py_DECREF(py_result);
return result;
}
return_eq:
#if PY_MAJOR_VERSION < 3
Py_XDECREF(owned_ref);
#endif
return (equals == Py_EQ);
return_ne:
#if PY_MAJOR_VERSION < 3
Py_XDECREF(owned_ref);
#endif
return (equals == Py_NE);
#endif
}
/* GetAttr */
static CYTHON_INLINE PyObject *__Pyx_GetAttr(PyObject *o, PyObject *n) {
#if CYTHON_COMPILING_IN_CPYTHON
#if PY_MAJOR_VERSION >= 3
if (likely(PyUnicode_Check(n)))
#else
if (likely(PyString_Check(n)))
#endif
return __Pyx_PyObject_GetAttrStr(o, n);
#endif
return PyObject_GetAttr(o, n);
}
/* decode_c_string */
static CYTHON_INLINE PyObject* __Pyx_decode_c_string(
const char* cstring, Py_ssize_t start, Py_ssize_t stop,
const char* encoding, const char* errors,
PyObject* (*decode_func)(const char *s, Py_ssize_t size, const char *errors)) {
Py_ssize_t length;
if (unlikely((start < 0) | (stop < 0))) {
size_t slen = strlen(cstring);
if (unlikely(slen > (size_t) PY_SSIZE_T_MAX)) {
PyErr_SetString(PyExc_OverflowError,
"c-string too long to convert to Python");
return NULL;
}
length = (Py_ssize_t) slen;
if (start < 0) {
start += length;
if (start < 0)
start = 0;
}
if (stop < 0)
stop += length;
}
length = stop - start;
if (unlikely(length <= 0))
return PyUnicode_FromUnicode(NULL, 0);
cstring += start;
if (decode_func) {
return decode_func(cstring, length, errors);
} else {
return PyUnicode_Decode(cstring, length, encoding, errors);
}
}
/* RaiseTooManyValuesToUnpack */
static CYTHON_INLINE void __Pyx_RaiseTooManyValuesError(Py_ssize_t expected) {
PyErr_Format(PyExc_ValueError,
"too many values to unpack (expected %" CYTHON_FORMAT_SSIZE_T "d)", expected);
}
/* RaiseNeedMoreValuesToUnpack */
static CYTHON_INLINE void __Pyx_RaiseNeedMoreValuesError(Py_ssize_t index) {
PyErr_Format(PyExc_ValueError,
"need more than %" CYTHON_FORMAT_SSIZE_T "d value%.1s to unpack",
index, (index == 1) ? "" : "s");
}
/* RaiseNoneIterError */
static CYTHON_INLINE void __Pyx_RaiseNoneNotIterableError(void) {
PyErr_SetString(PyExc_TypeError, "'NoneType' object is not iterable");
}
/* ExtTypeTest */
static CYTHON_INLINE int __Pyx_TypeTest(PyObject *obj, PyTypeObject *type) {
if (unlikely(!type)) {
PyErr_SetString(PyExc_SystemError, "Missing type object");
return 0;
}
if (likely(PyObject_TypeCheck(obj, type)))
return 1;
PyErr_Format(PyExc_TypeError, "Cannot convert %.200s to %.200s",
Py_TYPE(obj)->tp_name, type->tp_name);
return 0;
}
/* SaveResetException */
#if CYTHON_FAST_THREAD_STATE
static CYTHON_INLINE void __Pyx__ExceptionSave(PyThreadState *tstate, PyObject **type, PyObject **value, PyObject **tb) {
*type = tstate->exc_type;
*value = tstate->exc_value;
*tb = tstate->exc_traceback;
Py_XINCREF(*type);
Py_XINCREF(*value);
Py_XINCREF(*tb);
}
static CYTHON_INLINE void __Pyx__ExceptionReset(PyThreadState *tstate, PyObject *type, PyObject *value, PyObject *tb) {
PyObject *tmp_type, *tmp_value, *tmp_tb;
tmp_type = tstate->exc_type;
tmp_value = tstate->exc_value;
tmp_tb = tstate->exc_traceback;
tstate->exc_type = type;
tstate->exc_value = value;
tstate->exc_traceback = tb;
Py_XDECREF(tmp_type);
Py_XDECREF(tmp_value);
Py_XDECREF(tmp_tb);
}
#endif
/* PyErrExceptionMatches */
#if CYTHON_FAST_THREAD_STATE
static CYTHON_INLINE int __Pyx_PyErr_ExceptionMatchesInState(PyThreadState* tstate, PyObject* err) {
PyObject *exc_type = tstate->curexc_type;
if (exc_type == err) return 1;
if (unlikely(!exc_type)) return 0;
return PyErr_GivenExceptionMatches(exc_type, err);
}
#endif
/* GetException */
#if CYTHON_FAST_THREAD_STATE
static int __Pyx__GetException(PyThreadState *tstate, PyObject **type, PyObject **value, PyObject **tb) {
#else
static int __Pyx_GetException(PyObject **type, PyObject **value, PyObject **tb) {
#endif
PyObject *local_type, *local_value, *local_tb;
#if CYTHON_FAST_THREAD_STATE
PyObject *tmp_type, *tmp_value, *tmp_tb;
local_type = tstate->curexc_type;
local_value = tstate->curexc_value;
local_tb = tstate->curexc_traceback;
tstate->curexc_type = 0;
tstate->curexc_value = 0;
tstate->curexc_traceback = 0;
#else
PyErr_Fetch(&local_type, &local_value, &local_tb);
#endif
PyErr_NormalizeException(&local_type, &local_value, &local_tb);
#if CYTHON_FAST_THREAD_STATE
if (unlikely(tstate->curexc_type))
#else
if (unlikely(PyErr_Occurred()))
#endif
goto bad;
#if PY_MAJOR_VERSION >= 3
if (local_tb) {
if (unlikely(PyException_SetTraceback(local_value, local_tb) < 0))
goto bad;
}
#endif
Py_XINCREF(local_tb);
Py_XINCREF(local_type);
Py_XINCREF(local_value);
*type = local_type;
*value = local_value;
*tb = local_tb;
#if CYTHON_FAST_THREAD_STATE
tmp_type = tstate->exc_type;
tmp_value = tstate->exc_value;
tmp_tb = tstate->exc_traceback;
tstate->exc_type = local_type;
tstate->exc_value = local_value;
tstate->exc_traceback = local_tb;
Py_XDECREF(tmp_type);
Py_XDECREF(tmp_value);
Py_XDECREF(tmp_tb);
#else
PyErr_SetExcInfo(local_type, local_value, local_tb);
#endif
return 0;
bad:
*type = 0;
*value = 0;
*tb = 0;
Py_XDECREF(local_type);
Py_XDECREF(local_value);
Py_XDECREF(local_tb);
return -1;
}
/* SwapException */
#if CYTHON_FAST_THREAD_STATE
static CYTHON_INLINE void __Pyx__ExceptionSwap(PyThreadState *tstate, PyObject **type, PyObject **value, PyObject **tb) {
PyObject *tmp_type, *tmp_value, *tmp_tb;
tmp_type = tstate->exc_type;
tmp_value = tstate->exc_value;
tmp_tb = tstate->exc_traceback;
tstate->exc_type = *type;
tstate->exc_value = *value;
tstate->exc_traceback = *tb;
*type = tmp_type;
*value = tmp_value;
*tb = tmp_tb;
}
#else
static CYTHON_INLINE void __Pyx_ExceptionSwap(PyObject **type, PyObject **value, PyObject **tb) {
PyObject *tmp_type, *tmp_value, *tmp_tb;
PyErr_GetExcInfo(&tmp_type, &tmp_value, &tmp_tb);
PyErr_SetExcInfo(*type, *value, *tb);
*type = tmp_type;
*value = tmp_value;
*tb = tmp_tb;
}
#endif
/* Import */
static PyObject *__Pyx_Import(PyObject *name, PyObject *from_list, int level) {
PyObject *empty_list = 0;
PyObject *module = 0;
PyObject *global_dict = 0;
PyObject *empty_dict = 0;
PyObject *list;
#if PY_VERSION_HEX < 0x03030000
PyObject *py_import;
py_import = __Pyx_PyObject_GetAttrStr(__pyx_b, __pyx_n_s_import);
if (!py_import)
goto bad;
#endif
if (from_list)
list = from_list;
else {
empty_list = PyList_New(0);
if (!empty_list)
goto bad;
list = empty_list;
}
global_dict = PyModule_GetDict(__pyx_m);
if (!global_dict)
goto bad;
empty_dict = PyDict_New();
if (!empty_dict)
goto bad;
{
#if PY_MAJOR_VERSION >= 3
if (level == -1) {
if (strchr(__Pyx_MODULE_NAME, '.')) {
#if PY_VERSION_HEX < 0x03030000
PyObject *py_level = PyInt_FromLong(1);
if (!py_level)
goto bad;
module = PyObject_CallFunctionObjArgs(py_import,
name, global_dict, empty_dict, list, py_level, NULL);
Py_DECREF(py_level);
#else
module = PyImport_ImportModuleLevelObject(
name, global_dict, empty_dict, list, 1);
#endif
if (!module) {
if (!PyErr_ExceptionMatches(PyExc_ImportError))
goto bad;
PyErr_Clear();
}
}
level = 0;
}
#endif
if (!module) {
#if PY_VERSION_HEX < 0x03030000
PyObject *py_level = PyInt_FromLong(level);
if (!py_level)
goto bad;
module = PyObject_CallFunctionObjArgs(py_import,
name, global_dict, empty_dict, list, py_level, NULL);
Py_DECREF(py_level);
#else
module = PyImport_ImportModuleLevelObject(
name, global_dict, empty_dict, list, level);
#endif
}
}
bad:
#if PY_VERSION_HEX < 0x03030000
Py_XDECREF(py_import);
#endif
Py_XDECREF(empty_list);
Py_XDECREF(empty_dict);
return module;
}
/* PyFunctionFastCall */
#if CYTHON_FAST_PYCALL
#include "frameobject.h"
static PyObject* __Pyx_PyFunction_FastCallNoKw(PyCodeObject *co, PyObject **args, Py_ssize_t na,
PyObject *globals) {
PyFrameObject *f;
PyThreadState *tstate = PyThreadState_GET();
PyObject **fastlocals;
Py_ssize_t i;
PyObject *result;
assert(globals != NULL);
/* XXX Perhaps we should create a specialized
PyFrame_New() that doesn't take locals, but does
take builtins without sanity checking them.
*/
assert(tstate != NULL);
f = PyFrame_New(tstate, co, globals, NULL);
if (f == NULL) {
return NULL;
}
fastlocals = f->f_localsplus;
for (i = 0; i < na; i++) {
Py_INCREF(*args);
fastlocals[i] = *args++;
}
result = PyEval_EvalFrameEx(f,0);
++tstate->recursion_depth;
Py_DECREF(f);
--tstate->recursion_depth;
return result;
}
#if 1 || PY_VERSION_HEX < 0x030600B1
static PyObject *__Pyx_PyFunction_FastCallDict(PyObject *func, PyObject **args, int nargs, PyObject *kwargs) {
PyCodeObject *co = (PyCodeObject *)PyFunction_GET_CODE(func);
PyObject *globals = PyFunction_GET_GLOBALS(func);
PyObject *argdefs = PyFunction_GET_DEFAULTS(func);
PyObject *closure;
#if PY_MAJOR_VERSION >= 3
PyObject *kwdefs;
#endif
PyObject *kwtuple, **k;
PyObject **d;
Py_ssize_t nd;
Py_ssize_t nk;
PyObject *result;
assert(kwargs == NULL || PyDict_Check(kwargs));
nk = kwargs ? PyDict_Size(kwargs) : 0;
if (Py_EnterRecursiveCall((char*)" while calling a Python object")) {
return NULL;
}
if (
#if PY_MAJOR_VERSION >= 3
co->co_kwonlyargcount == 0 &&
#endif
likely(kwargs == NULL || nk == 0) &&
co->co_flags == (CO_OPTIMIZED | CO_NEWLOCALS | CO_NOFREE)) {
if (argdefs == NULL && co->co_argcount == nargs) {
result = __Pyx_PyFunction_FastCallNoKw(co, args, nargs, globals);
goto done;
}
else if (nargs == 0 && argdefs != NULL
&& co->co_argcount == Py_SIZE(argdefs)) {
/* function called with no arguments, but all parameters have
a default value: use default values as arguments .*/
args = &PyTuple_GET_ITEM(argdefs, 0);
result =__Pyx_PyFunction_FastCallNoKw(co, args, Py_SIZE(argdefs), globals);
goto done;
}
}
if (kwargs != NULL) {
Py_ssize_t pos, i;
kwtuple = PyTuple_New(2 * nk);
if (kwtuple == NULL) {
result = NULL;
goto done;
}
k = &PyTuple_GET_ITEM(kwtuple, 0);
pos = i = 0;
while (PyDict_Next(kwargs, &pos, &k[i], &k[i+1])) {
Py_INCREF(k[i]);
Py_INCREF(k[i+1]);
i += 2;
}
nk = i / 2;
}
else {
kwtuple = NULL;
k = NULL;
}
closure = PyFunction_GET_CLOSURE(func);
#if PY_MAJOR_VERSION >= 3
kwdefs = PyFunction_GET_KW_DEFAULTS(func);
#endif
if (argdefs != NULL) {
d = &PyTuple_GET_ITEM(argdefs, 0);
nd = Py_SIZE(argdefs);
}
else {
d = NULL;
nd = 0;
}
#if PY_MAJOR_VERSION >= 3
result = PyEval_EvalCodeEx((PyObject*)co, globals, (PyObject *)NULL,
args, nargs,
k, (int)nk,
d, (int)nd, kwdefs, closure);
#else
result = PyEval_EvalCodeEx(co, globals, (PyObject *)NULL,
args, nargs,
k, (int)nk,
d, (int)nd, closure);
#endif
Py_XDECREF(kwtuple);
done:
Py_LeaveRecursiveCall();
return result;
}
#endif // CPython < 3.6
#endif // CYTHON_FAST_PYCALL
/* PyCFunctionFastCall */
#if CYTHON_FAST_PYCCALL
static CYTHON_INLINE PyObject * __Pyx_PyCFunction_FastCall(PyObject *func_obj, PyObject **args, Py_ssize_t nargs) {
PyCFunctionObject *func = (PyCFunctionObject*)func_obj;
PyCFunction meth = PyCFunction_GET_FUNCTION(func);
PyObject *self = PyCFunction_GET_SELF(func);
PyObject *result;
int flags;
assert(PyCFunction_Check(func));
assert(METH_FASTCALL == PyCFunction_GET_FLAGS(func) & ~(METH_CLASS | METH_STATIC | METH_COEXIST));
assert(nargs >= 0);
assert(nargs == 0 || args != NULL);
/* _PyCFunction_FastCallDict() must not be called with an exception set,
because it may clear it (directly or indirectly) and so the
caller loses its exception */
assert(!PyErr_Occurred());
return (*((__Pyx_PyCFunctionFast)meth)) (self, args, nargs, NULL);
}
#endif // CYTHON_FAST_PYCCALL
/* GetItemInt */
static CYTHON_INLINE PyObject *__Pyx_GetItemInt_Generic(PyObject *o, PyObject* j) {
PyObject *r;
if (!j) return NULL;
r = PyObject_GetItem(o, j);
Py_DECREF(j);
return r;
}
static CYTHON_INLINE PyObject *__Pyx_GetItemInt_List_Fast(PyObject *o, Py_ssize_t i,
CYTHON_NCP_UNUSED int wraparound,
CYTHON_NCP_UNUSED int boundscheck) {
#if CYTHON_ASSUME_SAFE_MACROS && !CYTHON_AVOID_BORROWED_REFS
if (wraparound & unlikely(i < 0)) i += PyList_GET_SIZE(o);
if ((!boundscheck) || likely((0 <= i) & (i < PyList_GET_SIZE(o)))) {
PyObject *r = PyList_GET_ITEM(o, i);
Py_INCREF(r);
return r;
}
return __Pyx_GetItemInt_Generic(o, PyInt_FromSsize_t(i));
#else
return PySequence_GetItem(o, i);
#endif
}
static CYTHON_INLINE PyObject *__Pyx_GetItemInt_Tuple_Fast(PyObject *o, Py_ssize_t i,
CYTHON_NCP_UNUSED int wraparound,
CYTHON_NCP_UNUSED int boundscheck) {
#if CYTHON_ASSUME_SAFE_MACROS && !CYTHON_AVOID_BORROWED_REFS
if (wraparound & unlikely(i < 0)) i += PyTuple_GET_SIZE(o);
if ((!boundscheck) || likely((0 <= i) & (i < PyTuple_GET_SIZE(o)))) {
PyObject *r = PyTuple_GET_ITEM(o, i);
Py_INCREF(r);
return r;
}
return __Pyx_GetItemInt_Generic(o, PyInt_FromSsize_t(i));
#else
return PySequence_GetItem(o, i);
#endif
}
static CYTHON_INLINE PyObject *__Pyx_GetItemInt_Fast(PyObject *o, Py_ssize_t i, int is_list,
CYTHON_NCP_UNUSED int wraparound,
CYTHON_NCP_UNUSED int boundscheck) {
#if CYTHON_ASSUME_SAFE_MACROS && !CYTHON_AVOID_BORROWED_REFS && CYTHON_USE_TYPE_SLOTS
if (is_list || PyList_CheckExact(o)) {
Py_ssize_t n = ((!wraparound) | likely(i >= 0)) ? i : i + PyList_GET_SIZE(o);
if ((!boundscheck) || (likely((n >= 0) & (n < PyList_GET_SIZE(o))))) {
PyObject *r = PyList_GET_ITEM(o, n);
Py_INCREF(r);
return r;
}
}
else if (PyTuple_CheckExact(o)) {
Py_ssize_t n = ((!wraparound) | likely(i >= 0)) ? i : i + PyTuple_GET_SIZE(o);
if ((!boundscheck) || likely((n >= 0) & (n < PyTuple_GET_SIZE(o)))) {
PyObject *r = PyTuple_GET_ITEM(o, n);
Py_INCREF(r);
return r;
}
} else {
PySequenceMethods *m = Py_TYPE(o)->tp_as_sequence;
if (likely(m && m->sq_item)) {
if (wraparound && unlikely(i < 0) && likely(m->sq_length)) {
Py_ssize_t l = m->sq_length(o);
if (likely(l >= 0)) {
i += l;
} else {
if (!PyErr_ExceptionMatches(PyExc_OverflowError))
return NULL;
PyErr_Clear();
}
}
return m->sq_item(o, i);
}
}
#else
if (is_list || PySequence_Check(o)) {
return PySequence_GetItem(o, i);
}
#endif
return __Pyx_GetItemInt_Generic(o, PyInt_FromSsize_t(i));
}
/* PyIntBinop */
#if !CYTHON_COMPILING_IN_PYPY
static PyObject* __Pyx_PyInt_AddObjC(PyObject *op1, PyObject *op2, CYTHON_UNUSED long intval, CYTHON_UNUSED int inplace) {
#if PY_MAJOR_VERSION < 3
if (likely(PyInt_CheckExact(op1))) {
const long b = intval;
long x;
long a = PyInt_AS_LONG(op1);
x = (long)((unsigned long)a + b);
if (likely((x^a) >= 0 || (x^b) >= 0))
return PyInt_FromLong(x);
return PyLong_Type.tp_as_number->nb_add(op1, op2);
}
#endif
#if CYTHON_USE_PYLONG_INTERNALS
if (likely(PyLong_CheckExact(op1))) {
const long b = intval;
long a, x;
#ifdef HAVE_LONG_LONG
const PY_LONG_LONG llb = intval;
PY_LONG_LONG lla, llx;
#endif
const digit* digits = ((PyLongObject*)op1)->ob_digit;
const Py_ssize_t size = Py_SIZE(op1);
if (likely(__Pyx_sst_abs(size) <= 1)) {
a = likely(size) ? digits[0] : 0;
if (size == -1) a = -a;
} else {
switch (size) {
case -2:
if (8 * sizeof(long) - 1 > 2 * PyLong_SHIFT) {
a = -(long) (((((unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]));
break;
#ifdef HAVE_LONG_LONG
} else if (8 * sizeof(PY_LONG_LONG) - 1 > 2 * PyLong_SHIFT) {
lla = -(PY_LONG_LONG) (((((unsigned PY_LONG_LONG)digits[1]) << PyLong_SHIFT) | (unsigned PY_LONG_LONG)digits[0]));
goto long_long;
#endif
}
case 2:
if (8 * sizeof(long) - 1 > 2 * PyLong_SHIFT) {
a = (long) (((((unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]));
break;
#ifdef HAVE_LONG_LONG
} else if (8 * sizeof(PY_LONG_LONG) - 1 > 2 * PyLong_SHIFT) {
lla = (PY_LONG_LONG) (((((unsigned PY_LONG_LONG)digits[1]) << PyLong_SHIFT) | (unsigned PY_LONG_LONG)digits[0]));
goto long_long;
#endif
}
case -3:
if (8 * sizeof(long) - 1 > 3 * PyLong_SHIFT) {
a = -(long) (((((((unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]));
break;
#ifdef HAVE_LONG_LONG
} else if (8 * sizeof(PY_LONG_LONG) - 1 > 3 * PyLong_SHIFT) {
lla = -(PY_LONG_LONG) (((((((unsigned PY_LONG_LONG)digits[2]) << PyLong_SHIFT) | (unsigned PY_LONG_LONG)digits[1]) << PyLong_SHIFT) | (unsigned PY_LONG_LONG)digits[0]));
goto long_long;
#endif
}
case 3:
if (8 * sizeof(long) - 1 > 3 * PyLong_SHIFT) {
a = (long) (((((((unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]));
break;
#ifdef HAVE_LONG_LONG
} else if (8 * sizeof(PY_LONG_LONG) - 1 > 3 * PyLong_SHIFT) {
lla = (PY_LONG_LONG) (((((((unsigned PY_LONG_LONG)digits[2]) << PyLong_SHIFT) | (unsigned PY_LONG_LONG)digits[1]) << PyLong_SHIFT) | (unsigned PY_LONG_LONG)digits[0]));
goto long_long;
#endif
}
case -4:
if (8 * sizeof(long) - 1 > 4 * PyLong_SHIFT) {
a = -(long) (((((((((unsigned long)digits[3]) << PyLong_SHIFT) | (unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]));
break;
#ifdef HAVE_LONG_LONG
} else if (8 * sizeof(PY_LONG_LONG) - 1 > 4 * PyLong_SHIFT) {
lla = -(PY_LONG_LONG) (((((((((unsigned PY_LONG_LONG)digits[3]) << PyLong_SHIFT) | (unsigned PY_LONG_LONG)digits[2]) << PyLong_SHIFT) | (unsigned PY_LONG_LONG)digits[1]) << PyLong_SHIFT) | (unsigned PY_LONG_LONG)digits[0]));
goto long_long;
#endif
}
case 4:
if (8 * sizeof(long) - 1 > 4 * PyLong_SHIFT) {
a = (long) (((((((((unsigned long)digits[3]) << PyLong_SHIFT) | (unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]));
break;
#ifdef HAVE_LONG_LONG
} else if (8 * sizeof(PY_LONG_LONG) - 1 > 4 * PyLong_SHIFT) {
lla = (PY_LONG_LONG) (((((((((unsigned PY_LONG_LONG)digits[3]) << PyLong_SHIFT) | (unsigned PY_LONG_LONG)digits[2]) << PyLong_SHIFT) | (unsigned PY_LONG_LONG)digits[1]) << PyLong_SHIFT) | (unsigned PY_LONG_LONG)digits[0]));
goto long_long;
#endif
}
default: return PyLong_Type.tp_as_number->nb_add(op1, op2);
}
}
x = a + b;
return PyLong_FromLong(x);
#ifdef HAVE_LONG_LONG
long_long:
llx = lla + llb;
return PyLong_FromLongLong(llx);
#endif
}
#endif
if (PyFloat_CheckExact(op1)) {
const long b = intval;
double a = PyFloat_AS_DOUBLE(op1);
double result;
PyFPE_START_PROTECT("add", return NULL)
result = ((double)a) + (double)b;
PyFPE_END_PROTECT(result)
return PyFloat_FromDouble(result);
}
return (inplace ? PyNumber_InPlaceAdd : PyNumber_Add)(op1, op2);
}
#endif
/* None */
static CYTHON_INLINE void __Pyx_RaiseUnboundLocalError(const char *varname) {
PyErr_Format(PyExc_UnboundLocalError, "local variable '%s' referenced before assignment", varname);
}
/* WriteUnraisableException */
static void __Pyx_WriteUnraisable(const char *name, CYTHON_UNUSED int clineno,
CYTHON_UNUSED int lineno, CYTHON_UNUSED const char *filename,
int full_traceback, CYTHON_UNUSED int nogil) {
PyObject *old_exc, *old_val, *old_tb;
PyObject *ctx;
__Pyx_PyThreadState_declare
#ifdef WITH_THREAD
PyGILState_STATE state;
if (nogil)
state = PyGILState_Ensure();
#ifdef _MSC_VER
else state = (PyGILState_STATE)-1;
#endif
#endif
__Pyx_PyThreadState_assign
__Pyx_ErrFetch(&old_exc, &old_val, &old_tb);
if (full_traceback) {
Py_XINCREF(old_exc);
Py_XINCREF(old_val);
Py_XINCREF(old_tb);
__Pyx_ErrRestore(old_exc, old_val, old_tb);
PyErr_PrintEx(1);
}
#if PY_MAJOR_VERSION < 3
ctx = PyString_FromString(name);
#else
ctx = PyUnicode_FromString(name);
#endif
__Pyx_ErrRestore(old_exc, old_val, old_tb);
if (!ctx) {
PyErr_WriteUnraisable(Py_None);
} else {
PyErr_WriteUnraisable(ctx);
Py_DECREF(ctx);
}
#ifdef WITH_THREAD
if (nogil)
PyGILState_Release(state);
#endif
}
/* PyObjectCallMethO */
#if CYTHON_COMPILING_IN_CPYTHON
static CYTHON_INLINE PyObject* __Pyx_PyObject_CallMethO(PyObject *func, PyObject *arg) {
PyObject *self, *result;
PyCFunction cfunc;
cfunc = PyCFunction_GET_FUNCTION(func);
self = PyCFunction_GET_SELF(func);
if (unlikely(Py_EnterRecursiveCall((char*)" while calling a Python object")))
return NULL;
result = cfunc(self, arg);
Py_LeaveRecursiveCall();
if (unlikely(!result) && unlikely(!PyErr_Occurred())) {
PyErr_SetString(
PyExc_SystemError,
"NULL result without error in PyObject_Call");
}
return result;
}
#endif
/* PyObjectCallOneArg */
#if CYTHON_COMPILING_IN_CPYTHON
static PyObject* __Pyx__PyObject_CallOneArg(PyObject *func, PyObject *arg) {
PyObject *result;
PyObject *args = PyTuple_New(1);
if (unlikely(!args)) return NULL;
Py_INCREF(arg);
PyTuple_SET_ITEM(args, 0, arg);
result = __Pyx_PyObject_Call(func, args, NULL);
Py_DECREF(args);
return result;
}
static CYTHON_INLINE PyObject* __Pyx_PyObject_CallOneArg(PyObject *func, PyObject *arg) {
#if CYTHON_FAST_PYCALL
if (PyFunction_Check(func)) {
return __Pyx_PyFunction_FastCall(func, &arg, 1);
}
#endif
#ifdef __Pyx_CyFunction_USED
if (likely(PyCFunction_Check(func) || PyObject_TypeCheck(func, __pyx_CyFunctionType))) {
#else
if (likely(PyCFunction_Check(func))) {
#endif
if (likely(PyCFunction_GET_FLAGS(func) & METH_O)) {
return __Pyx_PyObject_CallMethO(func, arg);
#if CYTHON_FAST_PYCCALL
} else if (PyCFunction_GET_FLAGS(func) & METH_FASTCALL) {
return __Pyx_PyCFunction_FastCall(func, &arg, 1);
#endif
}
}
return __Pyx__PyObject_CallOneArg(func, arg);
}
#else
static CYTHON_INLINE PyObject* __Pyx_PyObject_CallOneArg(PyObject *func, PyObject *arg) {
PyObject *result;
PyObject *args = PyTuple_Pack(1, arg);
if (unlikely(!args)) return NULL;
result = __Pyx_PyObject_Call(func, args, NULL);
Py_DECREF(args);
return result;
}
#endif
/* SetVTable */
static int __Pyx_SetVtable(PyObject *dict, void *vtable) {
#if PY_VERSION_HEX >= 0x02070000
PyObject *ob = PyCapsule_New(vtable, 0, 0);
#else
PyObject *ob = PyCObject_FromVoidPtr(vtable, 0);
#endif
if (!ob)
goto bad;
if (PyDict_SetItem(dict, __pyx_n_s_pyx_vtable, ob) < 0)
goto bad;
Py_DECREF(ob);
return 0;
bad:
Py_XDECREF(ob);
return -1;
}
/* CodeObjectCache */
static int __pyx_bisect_code_objects(__Pyx_CodeObjectCacheEntry* entries, int count, int code_line) {
int start = 0, mid = 0, end = count - 1;
if (end >= 0 && code_line > entries[end].code_line) {
return count;
}
while (start < end) {
mid = start + (end - start) / 2;
if (code_line < entries[mid].code_line) {
end = mid;
} else if (code_line > entries[mid].code_line) {
start = mid + 1;
} else {
return mid;
}
}
if (code_line <= entries[mid].code_line) {
return mid;
} else {
return mid + 1;
}
}
static PyCodeObject *__pyx_find_code_object(int code_line) {
PyCodeObject* code_object;
int pos;
if (unlikely(!code_line) || unlikely(!__pyx_code_cache.entries)) {
return NULL;
}
pos = __pyx_bisect_code_objects(__pyx_code_cache.entries, __pyx_code_cache.count, code_line);
if (unlikely(pos >= __pyx_code_cache.count) || unlikely(__pyx_code_cache.entries[pos].code_line != code_line)) {
return NULL;
}
code_object = __pyx_code_cache.entries[pos].code_object;
Py_INCREF(code_object);
return code_object;
}
static void __pyx_insert_code_object(int code_line, PyCodeObject* code_object) {
int pos, i;
__Pyx_CodeObjectCacheEntry* entries = __pyx_code_cache.entries;
if (unlikely(!code_line)) {
return;
}
if (unlikely(!entries)) {
entries = (__Pyx_CodeObjectCacheEntry*)PyMem_Malloc(64*sizeof(__Pyx_CodeObjectCacheEntry));
if (likely(entries)) {
__pyx_code_cache.entries = entries;
__pyx_code_cache.max_count = 64;
__pyx_code_cache.count = 1;
entries[0].code_line = code_line;
entries[0].code_object = code_object;
Py_INCREF(code_object);
}
return;
}
pos = __pyx_bisect_code_objects(__pyx_code_cache.entries, __pyx_code_cache.count, code_line);
if ((pos < __pyx_code_cache.count) && unlikely(__pyx_code_cache.entries[pos].code_line == code_line)) {
PyCodeObject* tmp = entries[pos].code_object;
entries[pos].code_object = code_object;
Py_DECREF(tmp);
return;
}
if (__pyx_code_cache.count == __pyx_code_cache.max_count) {
int new_max = __pyx_code_cache.max_count + 64;
entries = (__Pyx_CodeObjectCacheEntry*)PyMem_Realloc(
__pyx_code_cache.entries, (size_t)new_max*sizeof(__Pyx_CodeObjectCacheEntry));
if (unlikely(!entries)) {
return;
}
__pyx_code_cache.entries = entries;
__pyx_code_cache.max_count = new_max;
}
for (i=__pyx_code_cache.count; i>pos; i--) {
entries[i] = entries[i-1];
}
entries[pos].code_line = code_line;
entries[pos].code_object = code_object;
__pyx_code_cache.count++;
Py_INCREF(code_object);
}
/* AddTraceback */
#include "compile.h"
#include "frameobject.h"
#include "traceback.h"
static PyCodeObject* __Pyx_CreateCodeObjectForTraceback(
const char *funcname, int c_line,
int py_line, const char *filename) {
PyCodeObject *py_code = 0;
PyObject *py_srcfile = 0;
PyObject *py_funcname = 0;
#if PY_MAJOR_VERSION < 3
py_srcfile = PyString_FromString(filename);
#else
py_srcfile = PyUnicode_FromString(filename);
#endif
if (!py_srcfile) goto bad;
if (c_line) {
#if PY_MAJOR_VERSION < 3
py_funcname = PyString_FromFormat( "%s (%s:%d)", funcname, __pyx_cfilenm, c_line);
#else
py_funcname = PyUnicode_FromFormat( "%s (%s:%d)", funcname, __pyx_cfilenm, c_line);
#endif
}
else {
#if PY_MAJOR_VERSION < 3
py_funcname = PyString_FromString(funcname);
#else
py_funcname = PyUnicode_FromString(funcname);
#endif
}
if (!py_funcname) goto bad;
py_code = __Pyx_PyCode_New(
0,
0,
0,
0,
0,
__pyx_empty_bytes, /*PyObject *code,*/
__pyx_empty_tuple, /*PyObject *consts,*/
__pyx_empty_tuple, /*PyObject *names,*/
__pyx_empty_tuple, /*PyObject *varnames,*/
__pyx_empty_tuple, /*PyObject *freevars,*/
__pyx_empty_tuple, /*PyObject *cellvars,*/
py_srcfile, /*PyObject *filename,*/
py_funcname, /*PyObject *name,*/
py_line,
__pyx_empty_bytes /*PyObject *lnotab*/
);
Py_DECREF(py_srcfile);
Py_DECREF(py_funcname);
return py_code;
bad:
Py_XDECREF(py_srcfile);
Py_XDECREF(py_funcname);
return NULL;
}
static void __Pyx_AddTraceback(const char *funcname, int c_line,
int py_line, const char *filename) {
PyCodeObject *py_code = 0;
PyFrameObject *py_frame = 0;
py_code = __pyx_find_code_object(c_line ? c_line : py_line);
if (!py_code) {
py_code = __Pyx_CreateCodeObjectForTraceback(
funcname, c_line, py_line, filename);
if (!py_code) goto bad;
__pyx_insert_code_object(c_line ? c_line : py_line, py_code);
}
py_frame = PyFrame_New(
PyThreadState_GET(), /*PyThreadState *tstate,*/
py_code, /*PyCodeObject *code,*/
__pyx_d, /*PyObject *globals,*/
0 /*PyObject *locals*/
);
if (!py_frame) goto bad;
__Pyx_PyFrame_SetLineNumber(py_frame, py_line);
PyTraceBack_Here(py_frame);
bad:
Py_XDECREF(py_code);
Py_XDECREF(py_frame);
}
#if PY_MAJOR_VERSION < 3
static int __Pyx_GetBuffer(PyObject *obj, Py_buffer *view, int flags) {
if (PyObject_CheckBuffer(obj)) return PyObject_GetBuffer(obj, view, flags);
if (PyObject_TypeCheck(obj, __pyx_array_type)) return __pyx_array_getbuffer(obj, view, flags);
if (PyObject_TypeCheck(obj, __pyx_memoryview_type)) return __pyx_memoryview_getbuffer(obj, view, flags);
PyErr_Format(PyExc_TypeError, "'%.200s' does not have the buffer interface", Py_TYPE(obj)->tp_name);
return -1;
}
static void __Pyx_ReleaseBuffer(Py_buffer *view) {
PyObject *obj = view->obj;
if (!obj) return;
if (PyObject_CheckBuffer(obj)) {
PyBuffer_Release(view);
return;
}
Py_DECREF(obj);
view->obj = NULL;
}
#endif
/* MemviewSliceIsContig */
static int
__pyx_memviewslice_is_contig(const __Pyx_memviewslice mvs,
char order, int ndim)
{
int i, index, step, start;
Py_ssize_t itemsize = mvs.memview->view.itemsize;
if (order == 'F') {
step = 1;
start = 0;
} else {
step = -1;
start = ndim - 1;
}
for (i = 0; i < ndim; i++) {
index = start + step * i;
if (mvs.suboffsets[index] >= 0 || mvs.strides[index] != itemsize)
return 0;
itemsize *= mvs.shape[index];
}
return 1;
}
/* OverlappingSlices */
static void
__pyx_get_array_memory_extents(__Pyx_memviewslice *slice,
void **out_start, void **out_end,
int ndim, size_t itemsize)
{
char *start, *end;
int i;
start = end = slice->data;
for (i = 0; i < ndim; i++) {
Py_ssize_t stride = slice->strides[i];
Py_ssize_t extent = slice->shape[i];
if (extent == 0) {
*out_start = *out_end = start;
return;
} else {
if (stride > 0)
end += stride * (extent - 1);
else
start += stride * (extent - 1);
}
}
*out_start = start;
*out_end = end + itemsize;
}
static int
__pyx_slices_overlap(__Pyx_memviewslice *slice1,
__Pyx_memviewslice *slice2,
int ndim, size_t itemsize)
{
void *start1, *end1, *start2, *end2;
__pyx_get_array_memory_extents(slice1, &start1, &end1, ndim, itemsize);
__pyx_get_array_memory_extents(slice2, &start2, &end2, ndim, itemsize);
return (start1 < end2) && (start2 < end1);
}
/* Capsule */
static CYTHON_INLINE PyObject *
__pyx_capsule_create(void *p, CYTHON_UNUSED const char *sig)
{
PyObject *cobj;
#if PY_VERSION_HEX >= 0x02070000
cobj = PyCapsule_New(p, sig, NULL);
#else
cobj = PyCObject_FromVoidPtr(p, NULL);
#endif
return cobj;
}
/* TypeInfoCompare */
static int
__pyx_typeinfo_cmp(__Pyx_TypeInfo *a, __Pyx_TypeInfo *b)
{
int i;
if (!a || !b)
return 0;
if (a == b)
return 1;
if (a->size != b->size || a->typegroup != b->typegroup ||
a->is_unsigned != b->is_unsigned || a->ndim != b->ndim) {
if (a->typegroup == 'H' || b->typegroup == 'H') {
return a->size == b->size;
} else {
return 0;
}
}
if (a->ndim) {
for (i = 0; i < a->ndim; i++)
if (a->arraysize[i] != b->arraysize[i])
return 0;
}
if (a->typegroup == 'S') {
if (a->flags != b->flags)
return 0;
if (a->fields || b->fields) {
if (!(a->fields && b->fields))
return 0;
for (i = 0; a->fields[i].type && b->fields[i].type; i++) {
__Pyx_StructField *field_a = a->fields + i;
__Pyx_StructField *field_b = b->fields + i;
if (field_a->offset != field_b->offset ||
!__pyx_typeinfo_cmp(field_a->type, field_b->type))
return 0;
}
return !a->fields[i].type && !b->fields[i].type;
}
}
return 1;
}
/* MemviewSliceValidateAndInit */
static int
__pyx_check_strides(Py_buffer *buf, int dim, int ndim, int spec)
{
if (buf->shape[dim] <= 1)
return 1;
if (buf->strides) {
if (spec & __Pyx_MEMVIEW_CONTIG) {
if (spec & (__Pyx_MEMVIEW_PTR|__Pyx_MEMVIEW_FULL)) {
if (buf->strides[dim] != sizeof(void *)) {
PyErr_Format(PyExc_ValueError,
"Buffer is not indirectly contiguous "
"in dimension %d.", dim);
goto fail;
}
} else if (buf->strides[dim] != buf->itemsize) {
PyErr_SetString(PyExc_ValueError,
"Buffer and memoryview are not contiguous "
"in the same dimension.");
goto fail;
}
}
if (spec & __Pyx_MEMVIEW_FOLLOW) {
Py_ssize_t stride = buf->strides[dim];
if (stride < 0)
stride = -stride;
if (stride < buf->itemsize) {
PyErr_SetString(PyExc_ValueError,
"Buffer and memoryview are not contiguous "
"in the same dimension.");
goto fail;
}
}
} else {
if (spec & __Pyx_MEMVIEW_CONTIG && dim != ndim - 1) {
PyErr_Format(PyExc_ValueError,
"C-contiguous buffer is not contiguous in "
"dimension %d", dim);
goto fail;
} else if (spec & (__Pyx_MEMVIEW_PTR)) {
PyErr_Format(PyExc_ValueError,
"C-contiguous buffer is not indirect in "
"dimension %d", dim);
goto fail;
} else if (buf->suboffsets) {
PyErr_SetString(PyExc_ValueError,
"Buffer exposes suboffsets but no strides");
goto fail;
}
}
return 1;
fail:
return 0;
}
static int
__pyx_check_suboffsets(Py_buffer *buf, int dim, CYTHON_UNUSED int ndim, int spec)
{
if (spec & __Pyx_MEMVIEW_DIRECT) {
if (buf->suboffsets && buf->suboffsets[dim] >= 0) {
PyErr_Format(PyExc_ValueError,
"Buffer not compatible with direct access "
"in dimension %d.", dim);
goto fail;
}
}
if (spec & __Pyx_MEMVIEW_PTR) {
if (!buf->suboffsets || (buf->suboffsets && buf->suboffsets[dim] < 0)) {
PyErr_Format(PyExc_ValueError,
"Buffer is not indirectly accessible "
"in dimension %d.", dim);
goto fail;
}
}
return 1;
fail:
return 0;
}
static int
__pyx_verify_contig(Py_buffer *buf, int ndim, int c_or_f_flag)
{
int i;
if (c_or_f_flag & __Pyx_IS_F_CONTIG) {
Py_ssize_t stride = 1;
for (i = 0; i < ndim; i++) {
if (stride * buf->itemsize != buf->strides[i] &&
buf->shape[i] > 1)
{
PyErr_SetString(PyExc_ValueError,
"Buffer not fortran contiguous.");
goto fail;
}
stride = stride * buf->shape[i];
}
} else if (c_or_f_flag & __Pyx_IS_C_CONTIG) {
Py_ssize_t stride = 1;
for (i = ndim - 1; i >- 1; i--) {
if (stride * buf->itemsize != buf->strides[i] &&
buf->shape[i] > 1) {
PyErr_SetString(PyExc_ValueError,
"Buffer not C contiguous.");
goto fail;
}
stride = stride * buf->shape[i];
}
}
return 1;
fail:
return 0;
}
static int __Pyx_ValidateAndInit_memviewslice(
int *axes_specs,
int c_or_f_flag,
int buf_flags,
int ndim,
__Pyx_TypeInfo *dtype,
__Pyx_BufFmt_StackElem stack[],
__Pyx_memviewslice *memviewslice,
PyObject *original_obj)
{
struct __pyx_memoryview_obj *memview, *new_memview;
__Pyx_RefNannyDeclarations
Py_buffer *buf;
int i, spec = 0, retval = -1;
__Pyx_BufFmt_Context ctx;
int from_memoryview = __pyx_memoryview_check(original_obj);
__Pyx_RefNannySetupContext("ValidateAndInit_memviewslice", 0);
if (from_memoryview && __pyx_typeinfo_cmp(dtype, ((struct __pyx_memoryview_obj *)
original_obj)->typeinfo)) {
memview = (struct __pyx_memoryview_obj *) original_obj;
new_memview = NULL;
} else {
memview = (struct __pyx_memoryview_obj *) __pyx_memoryview_new(
original_obj, buf_flags, 0, dtype);
new_memview = memview;
if (unlikely(!memview))
goto fail;
}
buf = &memview->view;
if (buf->ndim != ndim) {
PyErr_Format(PyExc_ValueError,
"Buffer has wrong number of dimensions (expected %d, got %d)",
ndim, buf->ndim);
goto fail;
}
if (new_memview) {
__Pyx_BufFmt_Init(&ctx, stack, dtype);
if (!__Pyx_BufFmt_CheckString(&ctx, buf->format)) goto fail;
}
if ((unsigned) buf->itemsize != dtype->size) {
PyErr_Format(PyExc_ValueError,
"Item size of buffer (%" CYTHON_FORMAT_SSIZE_T "u byte%s) "
"does not match size of '%s' (%" CYTHON_FORMAT_SSIZE_T "u byte%s)",
buf->itemsize,
(buf->itemsize > 1) ? "s" : "",
dtype->name,
dtype->size,
(dtype->size > 1) ? "s" : "");
goto fail;
}
for (i = 0; i < ndim; i++) {
spec = axes_specs[i];
if (!__pyx_check_strides(buf, i, ndim, spec))
goto fail;
if (!__pyx_check_suboffsets(buf, i, ndim, spec))
goto fail;
}
if (buf->strides && !__pyx_verify_contig(buf, ndim, c_or_f_flag))
goto fail;
if (unlikely(__Pyx_init_memviewslice(memview, ndim, memviewslice,
new_memview != NULL) == -1)) {
goto fail;
}
retval = 0;
goto no_fail;
fail:
Py_XDECREF(new_memview);
retval = -1;
no_fail:
__Pyx_RefNannyFinishContext();
return retval;
}
/* ObjectToMemviewSlice */
static CYTHON_INLINE __Pyx_memviewslice __Pyx_PyObject_to_MemoryviewSlice_dsds_double(PyObject *obj) {
__Pyx_memviewslice result = { 0, 0, { 0 }, { 0 }, { 0 } };
__Pyx_BufFmt_StackElem stack[1];
int axes_specs[] = { (__Pyx_MEMVIEW_DIRECT | __Pyx_MEMVIEW_STRIDED), (__Pyx_MEMVIEW_DIRECT | __Pyx_MEMVIEW_STRIDED) };
int retcode;
if (obj == Py_None) {
result.memview = (struct __pyx_memoryview_obj *) Py_None;
return result;
}
retcode = __Pyx_ValidateAndInit_memviewslice(axes_specs, 0,
PyBUF_RECORDS, 2,
&__Pyx_TypeInfo_double, stack,
&result, obj);
if (unlikely(retcode == -1))
goto __pyx_fail;
return result;
__pyx_fail:
result.memview = NULL;
result.data = NULL;
return result;
}
/* ObjectToMemviewSlice */
static CYTHON_INLINE __Pyx_memviewslice __Pyx_PyObject_to_MemoryviewSlice_ds_double(PyObject *obj) {
__Pyx_memviewslice result = { 0, 0, { 0 }, { 0 }, { 0 } };
__Pyx_BufFmt_StackElem stack[1];
int axes_specs[] = { (__Pyx_MEMVIEW_DIRECT | __Pyx_MEMVIEW_STRIDED) };
int retcode;
if (obj == Py_None) {
result.memview = (struct __pyx_memoryview_obj *) Py_None;
return result;
}
retcode = __Pyx_ValidateAndInit_memviewslice(axes_specs, 0,
PyBUF_RECORDS, 1,
&__Pyx_TypeInfo_double, stack,
&result, obj);
if (unlikely(retcode == -1))
goto __pyx_fail;
return result;
__pyx_fail:
result.memview = NULL;
result.data = NULL;
return result;
}
/* CIntFromPyVerify */
#define __PYX_VERIFY_RETURN_INT(target_type, func_type, func_value)\
__PYX__VERIFY_RETURN_INT(target_type, func_type, func_value, 0)
#define __PYX_VERIFY_RETURN_INT_EXC(target_type, func_type, func_value)\
__PYX__VERIFY_RETURN_INT(target_type, func_type, func_value, 1)
#define __PYX__VERIFY_RETURN_INT(target_type, func_type, func_value, exc)\
{\
func_type value = func_value;\
if (sizeof(target_type) < sizeof(func_type)) {\
if (unlikely(value != (func_type) (target_type) value)) {\
func_type zero = 0;\
if (exc && unlikely(value == (func_type)-1 && PyErr_Occurred()))\
return (target_type) -1;\
if (is_unsigned && unlikely(value < zero))\
goto raise_neg_overflow;\
else\
goto raise_overflow;\
}\
}\
return (target_type) value;\
}
/* ObjectToMemviewSlice */
static CYTHON_INLINE __Pyx_memviewslice __Pyx_PyObject_to_MemoryviewSlice_dsdsds_double(PyObject *obj) {
__Pyx_memviewslice result = { 0, 0, { 0 }, { 0 }, { 0 } };
__Pyx_BufFmt_StackElem stack[1];
int axes_specs[] = { (__Pyx_MEMVIEW_DIRECT | __Pyx_MEMVIEW_STRIDED), (__Pyx_MEMVIEW_DIRECT | __Pyx_MEMVIEW_STRIDED), (__Pyx_MEMVIEW_DIRECT | __Pyx_MEMVIEW_STRIDED) };
int retcode;
if (obj == Py_None) {
result.memview = (struct __pyx_memoryview_obj *) Py_None;
return result;
}
retcode = __Pyx_ValidateAndInit_memviewslice(axes_specs, 0,
PyBUF_RECORDS, 3,
&__Pyx_TypeInfo_double, stack,
&result, obj);
if (unlikely(retcode == -1))
goto __pyx_fail;
return result;
__pyx_fail:
result.memview = NULL;
result.data = NULL;
return result;
}
/* None */
static CYTHON_INLINE long __Pyx_pow_long(long b, long e) {
long t = b;
switch (e) {
case 3:
t *= b;
case 2:
t *= b;
case 1:
return t;
case 0:
return 1;
}
#if 1
if (unlikely(e<0)) return 0;
#endif
t = 1;
while (likely(e)) {
t *= (b * (e&1)) | ((~e)&1);
b *= b;
e >>= 1;
}
return t;
}
/* CIntToPy */
static CYTHON_INLINE PyObject* __Pyx_PyInt_From_int(int value) {
const int neg_one = (int) -1, const_zero = (int) 0;
const int is_unsigned = neg_one > const_zero;
if (is_unsigned) {
if (sizeof(int) < sizeof(long)) {
return PyInt_FromLong((long) value);
} else if (sizeof(int) <= sizeof(unsigned long)) {
return PyLong_FromUnsignedLong((unsigned long) value);
#ifdef HAVE_LONG_LONG
} else if (sizeof(int) <= sizeof(unsigned PY_LONG_LONG)) {
return PyLong_FromUnsignedLongLong((unsigned PY_LONG_LONG) value);
#endif
}
} else {
if (sizeof(int) <= sizeof(long)) {
return PyInt_FromLong((long) value);
#ifdef HAVE_LONG_LONG
} else if (sizeof(int) <= sizeof(PY_LONG_LONG)) {
return PyLong_FromLongLong((PY_LONG_LONG) value);
#endif
}
}
{
int one = 1; int little = (int)*(unsigned char *)&one;
unsigned char *bytes = (unsigned char *)&value;
return _PyLong_FromByteArray(bytes, sizeof(int),
little, !is_unsigned);
}
}
/* MemviewDtypeToObject */
static CYTHON_INLINE PyObject *__pyx_memview_get_double(const char *itemp) {
return (PyObject *) PyFloat_FromDouble(*(double *) itemp);
}
static CYTHON_INLINE int __pyx_memview_set_double(const char *itemp, PyObject *obj) {
double value = __pyx_PyFloat_AsDouble(obj);
if ((value == (double)-1) && PyErr_Occurred())
return 0;
*(double *) itemp = value;
return 1;
}
/* CIntToPy */
static CYTHON_INLINE PyObject* __Pyx_PyInt_From_long(long value) {
const long neg_one = (long) -1, const_zero = (long) 0;
const int is_unsigned = neg_one > const_zero;
if (is_unsigned) {
if (sizeof(long) < sizeof(long)) {
return PyInt_FromLong((long) value);
} else if (sizeof(long) <= sizeof(unsigned long)) {
return PyLong_FromUnsignedLong((unsigned long) value);
#ifdef HAVE_LONG_LONG
} else if (sizeof(long) <= sizeof(unsigned PY_LONG_LONG)) {
return PyLong_FromUnsignedLongLong((unsigned PY_LONG_LONG) value);
#endif
}
} else {
if (sizeof(long) <= sizeof(long)) {
return PyInt_FromLong((long) value);
#ifdef HAVE_LONG_LONG
} else if (sizeof(long) <= sizeof(PY_LONG_LONG)) {
return PyLong_FromLongLong((PY_LONG_LONG) value);
#endif
}
}
{
int one = 1; int little = (int)*(unsigned char *)&one;
unsigned char *bytes = (unsigned char *)&value;
return _PyLong_FromByteArray(bytes, sizeof(long),
little, !is_unsigned);
}
}
/* None */
static CYTHON_INLINE Py_ssize_t __Pyx_pow_Py_ssize_t(Py_ssize_t b, Py_ssize_t e) {
Py_ssize_t t = b;
switch (e) {
case 3:
t *= b;
case 2:
t *= b;
case 1:
return t;
case 0:
return 1;
}
#if 1
if (unlikely(e<0)) return 0;
#endif
t = 1;
while (likely(e)) {
t *= (b * (e&1)) | ((~e)&1);
b *= b;
e >>= 1;
}
return t;
}
/* MemviewSliceCopyTemplate */
static __Pyx_memviewslice
__pyx_memoryview_copy_new_contig(const __Pyx_memviewslice *from_mvs,
const char *mode, int ndim,
size_t sizeof_dtype, int contig_flag,
int dtype_is_object)
{
__Pyx_RefNannyDeclarations
int i;
__Pyx_memviewslice new_mvs = { 0, 0, { 0 }, { 0 }, { 0 } };
struct __pyx_memoryview_obj *from_memview = from_mvs->memview;
Py_buffer *buf = &from_memview->view;
PyObject *shape_tuple = NULL;
PyObject *temp_int = NULL;
struct __pyx_array_obj *array_obj = NULL;
struct __pyx_memoryview_obj *memview_obj = NULL;
__Pyx_RefNannySetupContext("__pyx_memoryview_copy_new_contig", 0);
for (i = 0; i < ndim; i++) {
if (from_mvs->suboffsets[i] >= 0) {
PyErr_Format(PyExc_ValueError, "Cannot copy memoryview slice with "
"indirect dimensions (axis %d)", i);
goto fail;
}
}
shape_tuple = PyTuple_New(ndim);
if (unlikely(!shape_tuple)) {
goto fail;
}
__Pyx_GOTREF(shape_tuple);
for(i = 0; i < ndim; i++) {
temp_int = PyInt_FromSsize_t(from_mvs->shape[i]);
if(unlikely(!temp_int)) {
goto fail;
} else {
PyTuple_SET_ITEM(shape_tuple, i, temp_int);
temp_int = NULL;
}
}
array_obj = __pyx_array_new(shape_tuple, sizeof_dtype, buf->format, (char *) mode, NULL);
if (unlikely(!array_obj)) {
goto fail;
}
__Pyx_GOTREF(array_obj);
memview_obj = (struct __pyx_memoryview_obj *) __pyx_memoryview_new(
(PyObject *) array_obj, contig_flag,
dtype_is_object,
from_mvs->memview->typeinfo);
if (unlikely(!memview_obj))
goto fail;
if (unlikely(__Pyx_init_memviewslice(memview_obj, ndim, &new_mvs, 1) < 0))
goto fail;
if (unlikely(__pyx_memoryview_copy_contents(*from_mvs, new_mvs, ndim, ndim,
dtype_is_object) < 0))
goto fail;
goto no_fail;
fail:
__Pyx_XDECREF(new_mvs.memview);
new_mvs.memview = NULL;
new_mvs.data = NULL;
no_fail:
__Pyx_XDECREF(shape_tuple);
__Pyx_XDECREF(temp_int);
__Pyx_XDECREF(array_obj);
__Pyx_RefNannyFinishContext();
return new_mvs;
}
/* CIntFromPy */
static CYTHON_INLINE int __Pyx_PyInt_As_int(PyObject *x) {
const int neg_one = (int) -1, const_zero = (int) 0;
const int is_unsigned = neg_one > const_zero;
#if PY_MAJOR_VERSION < 3
if (likely(PyInt_Check(x))) {
if (sizeof(int) < sizeof(long)) {
__PYX_VERIFY_RETURN_INT(int, long, PyInt_AS_LONG(x))
} else {
long val = PyInt_AS_LONG(x);
if (is_unsigned && unlikely(val < 0)) {
goto raise_neg_overflow;
}
return (int) val;
}
} else
#endif
if (likely(PyLong_Check(x))) {
if (is_unsigned) {
#if CYTHON_USE_PYLONG_INTERNALS
const digit* digits = ((PyLongObject*)x)->ob_digit;
switch (Py_SIZE(x)) {
case 0: return (int) 0;
case 1: __PYX_VERIFY_RETURN_INT(int, digit, digits[0])
case 2:
if (8 * sizeof(int) > 1 * PyLong_SHIFT) {
if (8 * sizeof(unsigned long) > 2 * PyLong_SHIFT) {
__PYX_VERIFY_RETURN_INT(int, unsigned long, (((((unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])))
} else if (8 * sizeof(int) >= 2 * PyLong_SHIFT) {
return (int) (((((int)digits[1]) << PyLong_SHIFT) | (int)digits[0]));
}
}
break;
case 3:
if (8 * sizeof(int) > 2 * PyLong_SHIFT) {
if (8 * sizeof(unsigned long) > 3 * PyLong_SHIFT) {
__PYX_VERIFY_RETURN_INT(int, unsigned long, (((((((unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])))
} else if (8 * sizeof(int) >= 3 * PyLong_SHIFT) {
return (int) (((((((int)digits[2]) << PyLong_SHIFT) | (int)digits[1]) << PyLong_SHIFT) | (int)digits[0]));
}
}
break;
case 4:
if (8 * sizeof(int) > 3 * PyLong_SHIFT) {
if (8 * sizeof(unsigned long) > 4 * PyLong_SHIFT) {
__PYX_VERIFY_RETURN_INT(int, unsigned long, (((((((((unsigned long)digits[3]) << PyLong_SHIFT) | (unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])))
} else if (8 * sizeof(int) >= 4 * PyLong_SHIFT) {
return (int) (((((((((int)digits[3]) << PyLong_SHIFT) | (int)digits[2]) << PyLong_SHIFT) | (int)digits[1]) << PyLong_SHIFT) | (int)digits[0]));
}
}
break;
}
#endif
#if CYTHON_COMPILING_IN_CPYTHON
if (unlikely(Py_SIZE(x) < 0)) {
goto raise_neg_overflow;
}
#else
{
int result = PyObject_RichCompareBool(x, Py_False, Py_LT);
if (unlikely(result < 0))
return (int) -1;
if (unlikely(result == 1))
goto raise_neg_overflow;
}
#endif
if (sizeof(int) <= sizeof(unsigned long)) {
__PYX_VERIFY_RETURN_INT_EXC(int, unsigned long, PyLong_AsUnsignedLong(x))
#ifdef HAVE_LONG_LONG
} else if (sizeof(int) <= sizeof(unsigned PY_LONG_LONG)) {
__PYX_VERIFY_RETURN_INT_EXC(int, unsigned PY_LONG_LONG, PyLong_AsUnsignedLongLong(x))
#endif
}
} else {
#if CYTHON_USE_PYLONG_INTERNALS
const digit* digits = ((PyLongObject*)x)->ob_digit;
switch (Py_SIZE(x)) {
case 0: return (int) 0;
case -1: __PYX_VERIFY_RETURN_INT(int, sdigit, (sdigit) (-(sdigit)digits[0]))
case 1: __PYX_VERIFY_RETURN_INT(int, digit, +digits[0])
case -2:
if (8 * sizeof(int) - 1 > 1 * PyLong_SHIFT) {
if (8 * sizeof(unsigned long) > 2 * PyLong_SHIFT) {
__PYX_VERIFY_RETURN_INT(int, long, -(long) (((((unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])))
} else if (8 * sizeof(int) - 1 > 2 * PyLong_SHIFT) {
return (int) (((int)-1)*(((((int)digits[1]) << PyLong_SHIFT) | (int)digits[0])));
}
}
break;
case 2:
if (8 * sizeof(int) > 1 * PyLong_SHIFT) {
if (8 * sizeof(unsigned long) > 2 * PyLong_SHIFT) {
__PYX_VERIFY_RETURN_INT(int, unsigned long, (((((unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])))
} else if (8 * sizeof(int) - 1 > 2 * PyLong_SHIFT) {
return (int) ((((((int)digits[1]) << PyLong_SHIFT) | (int)digits[0])));
}
}
break;
case -3:
if (8 * sizeof(int) - 1 > 2 * PyLong_SHIFT) {
if (8 * sizeof(unsigned long) > 3 * PyLong_SHIFT) {
__PYX_VERIFY_RETURN_INT(int, long, -(long) (((((((unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])))
} else if (8 * sizeof(int) - 1 > 3 * PyLong_SHIFT) {
return (int) (((int)-1)*(((((((int)digits[2]) << PyLong_SHIFT) | (int)digits[1]) << PyLong_SHIFT) | (int)digits[0])));
}
}
break;
case 3:
if (8 * sizeof(int) > 2 * PyLong_SHIFT) {
if (8 * sizeof(unsigned long) > 3 * PyLong_SHIFT) {
__PYX_VERIFY_RETURN_INT(int, unsigned long, (((((((unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])))
} else if (8 * sizeof(int) - 1 > 3 * PyLong_SHIFT) {
return (int) ((((((((int)digits[2]) << PyLong_SHIFT) | (int)digits[1]) << PyLong_SHIFT) | (int)digits[0])));
}
}
break;
case -4:
if (8 * sizeof(int) - 1 > 3 * PyLong_SHIFT) {
if (8 * sizeof(unsigned long) > 4 * PyLong_SHIFT) {
__PYX_VERIFY_RETURN_INT(int, long, -(long) (((((((((unsigned long)digits[3]) << PyLong_SHIFT) | (unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])))
} else if (8 * sizeof(int) - 1 > 4 * PyLong_SHIFT) {
return (int) (((int)-1)*(((((((((int)digits[3]) << PyLong_SHIFT) | (int)digits[2]) << PyLong_SHIFT) | (int)digits[1]) << PyLong_SHIFT) | (int)digits[0])));
}
}
break;
case 4:
if (8 * sizeof(int) > 3 * PyLong_SHIFT) {
if (8 * sizeof(unsigned long) > 4 * PyLong_SHIFT) {
__PYX_VERIFY_RETURN_INT(int, unsigned long, (((((((((unsigned long)digits[3]) << PyLong_SHIFT) | (unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])))
} else if (8 * sizeof(int) - 1 > 4 * PyLong_SHIFT) {
return (int) ((((((((((int)digits[3]) << PyLong_SHIFT) | (int)digits[2]) << PyLong_SHIFT) | (int)digits[1]) << PyLong_SHIFT) | (int)digits[0])));
}
}
break;
}
#endif
if (sizeof(int) <= sizeof(long)) {
__PYX_VERIFY_RETURN_INT_EXC(int, long, PyLong_AsLong(x))
#ifdef HAVE_LONG_LONG
} else if (sizeof(int) <= sizeof(PY_LONG_LONG)) {
__PYX_VERIFY_RETURN_INT_EXC(int, PY_LONG_LONG, PyLong_AsLongLong(x))
#endif
}
}
{
#if CYTHON_COMPILING_IN_PYPY && !defined(_PyLong_AsByteArray)
PyErr_SetString(PyExc_RuntimeError,
"_PyLong_AsByteArray() not available in PyPy, cannot convert large numbers");
#else
int val;
PyObject *v = __Pyx_PyNumber_IntOrLong(x);
#if PY_MAJOR_VERSION < 3
if (likely(v) && !PyLong_Check(v)) {
PyObject *tmp = v;
v = PyNumber_Long(tmp);
Py_DECREF(tmp);
}
#endif
if (likely(v)) {
int one = 1; int is_little = (int)*(unsigned char *)&one;
unsigned char *bytes = (unsigned char *)&val;
int ret = _PyLong_AsByteArray((PyLongObject *)v,
bytes, sizeof(val),
is_little, !is_unsigned);
Py_DECREF(v);
if (likely(!ret))
return val;
}
#endif
return (int) -1;
}
} else {
int val;
PyObject *tmp = __Pyx_PyNumber_IntOrLong(x);
if (!tmp) return (int) -1;
val = __Pyx_PyInt_As_int(tmp);
Py_DECREF(tmp);
return val;
}
raise_overflow:
PyErr_SetString(PyExc_OverflowError,
"value too large to convert to int");
return (int) -1;
raise_neg_overflow:
PyErr_SetString(PyExc_OverflowError,
"can't convert negative value to int");
return (int) -1;
}
/* CIntFromPy */
static CYTHON_INLINE long __Pyx_PyInt_As_long(PyObject *x) {
const long neg_one = (long) -1, const_zero = (long) 0;
const int is_unsigned = neg_one > const_zero;
#if PY_MAJOR_VERSION < 3
if (likely(PyInt_Check(x))) {
if (sizeof(long) < sizeof(long)) {
__PYX_VERIFY_RETURN_INT(long, long, PyInt_AS_LONG(x))
} else {
long val = PyInt_AS_LONG(x);
if (is_unsigned && unlikely(val < 0)) {
goto raise_neg_overflow;
}
return (long) val;
}
} else
#endif
if (likely(PyLong_Check(x))) {
if (is_unsigned) {
#if CYTHON_USE_PYLONG_INTERNALS
const digit* digits = ((PyLongObject*)x)->ob_digit;
switch (Py_SIZE(x)) {
case 0: return (long) 0;
case 1: __PYX_VERIFY_RETURN_INT(long, digit, digits[0])
case 2:
if (8 * sizeof(long) > 1 * PyLong_SHIFT) {
if (8 * sizeof(unsigned long) > 2 * PyLong_SHIFT) {
__PYX_VERIFY_RETURN_INT(long, unsigned long, (((((unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])))
} else if (8 * sizeof(long) >= 2 * PyLong_SHIFT) {
return (long) (((((long)digits[1]) << PyLong_SHIFT) | (long)digits[0]));
}
}
break;
case 3:
if (8 * sizeof(long) > 2 * PyLong_SHIFT) {
if (8 * sizeof(unsigned long) > 3 * PyLong_SHIFT) {
__PYX_VERIFY_RETURN_INT(long, unsigned long, (((((((unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])))
} else if (8 * sizeof(long) >= 3 * PyLong_SHIFT) {
return (long) (((((((long)digits[2]) << PyLong_SHIFT) | (long)digits[1]) << PyLong_SHIFT) | (long)digits[0]));
}
}
break;
case 4:
if (8 * sizeof(long) > 3 * PyLong_SHIFT) {
if (8 * sizeof(unsigned long) > 4 * PyLong_SHIFT) {
__PYX_VERIFY_RETURN_INT(long, unsigned long, (((((((((unsigned long)digits[3]) << PyLong_SHIFT) | (unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])))
} else if (8 * sizeof(long) >= 4 * PyLong_SHIFT) {
return (long) (((((((((long)digits[3]) << PyLong_SHIFT) | (long)digits[2]) << PyLong_SHIFT) | (long)digits[1]) << PyLong_SHIFT) | (long)digits[0]));
}
}
break;
}
#endif
#if CYTHON_COMPILING_IN_CPYTHON
if (unlikely(Py_SIZE(x) < 0)) {
goto raise_neg_overflow;
}
#else
{
int result = PyObject_RichCompareBool(x, Py_False, Py_LT);
if (unlikely(result < 0))
return (long) -1;
if (unlikely(result == 1))
goto raise_neg_overflow;
}
#endif
if (sizeof(long) <= sizeof(unsigned long)) {
__PYX_VERIFY_RETURN_INT_EXC(long, unsigned long, PyLong_AsUnsignedLong(x))
#ifdef HAVE_LONG_LONG
} else if (sizeof(long) <= sizeof(unsigned PY_LONG_LONG)) {
__PYX_VERIFY_RETURN_INT_EXC(long, unsigned PY_LONG_LONG, PyLong_AsUnsignedLongLong(x))
#endif
}
} else {
#if CYTHON_USE_PYLONG_INTERNALS
const digit* digits = ((PyLongObject*)x)->ob_digit;
switch (Py_SIZE(x)) {
case 0: return (long) 0;
case -1: __PYX_VERIFY_RETURN_INT(long, sdigit, (sdigit) (-(sdigit)digits[0]))
case 1: __PYX_VERIFY_RETURN_INT(long, digit, +digits[0])
case -2:
if (8 * sizeof(long) - 1 > 1 * PyLong_SHIFT) {
if (8 * sizeof(unsigned long) > 2 * PyLong_SHIFT) {
__PYX_VERIFY_RETURN_INT(long, long, -(long) (((((unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])))
} else if (8 * sizeof(long) - 1 > 2 * PyLong_SHIFT) {
return (long) (((long)-1)*(((((long)digits[1]) << PyLong_SHIFT) | (long)digits[0])));
}
}
break;
case 2:
if (8 * sizeof(long) > 1 * PyLong_SHIFT) {
if (8 * sizeof(unsigned long) > 2 * PyLong_SHIFT) {
__PYX_VERIFY_RETURN_INT(long, unsigned long, (((((unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])))
} else if (8 * sizeof(long) - 1 > 2 * PyLong_SHIFT) {
return (long) ((((((long)digits[1]) << PyLong_SHIFT) | (long)digits[0])));
}
}
break;
case -3:
if (8 * sizeof(long) - 1 > 2 * PyLong_SHIFT) {
if (8 * sizeof(unsigned long) > 3 * PyLong_SHIFT) {
__PYX_VERIFY_RETURN_INT(long, long, -(long) (((((((unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])))
} else if (8 * sizeof(long) - 1 > 3 * PyLong_SHIFT) {
return (long) (((long)-1)*(((((((long)digits[2]) << PyLong_SHIFT) | (long)digits[1]) << PyLong_SHIFT) | (long)digits[0])));
}
}
break;
case 3:
if (8 * sizeof(long) > 2 * PyLong_SHIFT) {
if (8 * sizeof(unsigned long) > 3 * PyLong_SHIFT) {
__PYX_VERIFY_RETURN_INT(long, unsigned long, (((((((unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])))
} else if (8 * sizeof(long) - 1 > 3 * PyLong_SHIFT) {
return (long) ((((((((long)digits[2]) << PyLong_SHIFT) | (long)digits[1]) << PyLong_SHIFT) | (long)digits[0])));
}
}
break;
case -4:
if (8 * sizeof(long) - 1 > 3 * PyLong_SHIFT) {
if (8 * sizeof(unsigned long) > 4 * PyLong_SHIFT) {
__PYX_VERIFY_RETURN_INT(long, long, -(long) (((((((((unsigned long)digits[3]) << PyLong_SHIFT) | (unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])))
} else if (8 * sizeof(long) - 1 > 4 * PyLong_SHIFT) {
return (long) (((long)-1)*(((((((((long)digits[3]) << PyLong_SHIFT) | (long)digits[2]) << PyLong_SHIFT) | (long)digits[1]) << PyLong_SHIFT) | (long)digits[0])));
}
}
break;
case 4:
if (8 * sizeof(long) > 3 * PyLong_SHIFT) {
if (8 * sizeof(unsigned long) > 4 * PyLong_SHIFT) {
__PYX_VERIFY_RETURN_INT(long, unsigned long, (((((((((unsigned long)digits[3]) << PyLong_SHIFT) | (unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])))
} else if (8 * sizeof(long) - 1 > 4 * PyLong_SHIFT) {
return (long) ((((((((((long)digits[3]) << PyLong_SHIFT) | (long)digits[2]) << PyLong_SHIFT) | (long)digits[1]) << PyLong_SHIFT) | (long)digits[0])));
}
}
break;
}
#endif
if (sizeof(long) <= sizeof(long)) {
__PYX_VERIFY_RETURN_INT_EXC(long, long, PyLong_AsLong(x))
#ifdef HAVE_LONG_LONG
} else if (sizeof(long) <= sizeof(PY_LONG_LONG)) {
__PYX_VERIFY_RETURN_INT_EXC(long, PY_LONG_LONG, PyLong_AsLongLong(x))
#endif
}
}
{
#if CYTHON_COMPILING_IN_PYPY && !defined(_PyLong_AsByteArray)
PyErr_SetString(PyExc_RuntimeError,
"_PyLong_AsByteArray() not available in PyPy, cannot convert large numbers");
#else
long val;
PyObject *v = __Pyx_PyNumber_IntOrLong(x);
#if PY_MAJOR_VERSION < 3
if (likely(v) && !PyLong_Check(v)) {
PyObject *tmp = v;
v = PyNumber_Long(tmp);
Py_DECREF(tmp);
}
#endif
if (likely(v)) {
int one = 1; int is_little = (int)*(unsigned char *)&one;
unsigned char *bytes = (unsigned char *)&val;
int ret = _PyLong_AsByteArray((PyLongObject *)v,
bytes, sizeof(val),
is_little, !is_unsigned);
Py_DECREF(v);
if (likely(!ret))
return val;
}
#endif
return (long) -1;
}
} else {
long val;
PyObject *tmp = __Pyx_PyNumber_IntOrLong(x);
if (!tmp) return (long) -1;
val = __Pyx_PyInt_As_long(tmp);
Py_DECREF(tmp);
return val;
}
raise_overflow:
PyErr_SetString(PyExc_OverflowError,
"value too large to convert to long");
return (long) -1;
raise_neg_overflow:
PyErr_SetString(PyExc_OverflowError,
"can't convert negative value to long");
return (long) -1;
}
/* CIntFromPy */
static CYTHON_INLINE char __Pyx_PyInt_As_char(PyObject *x) {
const char neg_one = (char) -1, const_zero = (char) 0;
const int is_unsigned = neg_one > const_zero;
#if PY_MAJOR_VERSION < 3
if (likely(PyInt_Check(x))) {
if (sizeof(char) < sizeof(long)) {
__PYX_VERIFY_RETURN_INT(char, long, PyInt_AS_LONG(x))
} else {
long val = PyInt_AS_LONG(x);
if (is_unsigned && unlikely(val < 0)) {
goto raise_neg_overflow;
}
return (char) val;
}
} else
#endif
if (likely(PyLong_Check(x))) {
if (is_unsigned) {
#if CYTHON_USE_PYLONG_INTERNALS
const digit* digits = ((PyLongObject*)x)->ob_digit;
switch (Py_SIZE(x)) {
case 0: return (char) 0;
case 1: __PYX_VERIFY_RETURN_INT(char, digit, digits[0])
case 2:
if (8 * sizeof(char) > 1 * PyLong_SHIFT) {
if (8 * sizeof(unsigned long) > 2 * PyLong_SHIFT) {
__PYX_VERIFY_RETURN_INT(char, unsigned long, (((((unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])))
} else if (8 * sizeof(char) >= 2 * PyLong_SHIFT) {
return (char) (((((char)digits[1]) << PyLong_SHIFT) | (char)digits[0]));
}
}
break;
case 3:
if (8 * sizeof(char) > 2 * PyLong_SHIFT) {
if (8 * sizeof(unsigned long) > 3 * PyLong_SHIFT) {
__PYX_VERIFY_RETURN_INT(char, unsigned long, (((((((unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])))
} else if (8 * sizeof(char) >= 3 * PyLong_SHIFT) {
return (char) (((((((char)digits[2]) << PyLong_SHIFT) | (char)digits[1]) << PyLong_SHIFT) | (char)digits[0]));
}
}
break;
case 4:
if (8 * sizeof(char) > 3 * PyLong_SHIFT) {
if (8 * sizeof(unsigned long) > 4 * PyLong_SHIFT) {
__PYX_VERIFY_RETURN_INT(char, unsigned long, (((((((((unsigned long)digits[3]) << PyLong_SHIFT) | (unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])))
} else if (8 * sizeof(char) >= 4 * PyLong_SHIFT) {
return (char) (((((((((char)digits[3]) << PyLong_SHIFT) | (char)digits[2]) << PyLong_SHIFT) | (char)digits[1]) << PyLong_SHIFT) | (char)digits[0]));
}
}
break;
}
#endif
#if CYTHON_COMPILING_IN_CPYTHON
if (unlikely(Py_SIZE(x) < 0)) {
goto raise_neg_overflow;
}
#else
{
int result = PyObject_RichCompareBool(x, Py_False, Py_LT);
if (unlikely(result < 0))
return (char) -1;
if (unlikely(result == 1))
goto raise_neg_overflow;
}
#endif
if (sizeof(char) <= sizeof(unsigned long)) {
__PYX_VERIFY_RETURN_INT_EXC(char, unsigned long, PyLong_AsUnsignedLong(x))
#ifdef HAVE_LONG_LONG
} else if (sizeof(char) <= sizeof(unsigned PY_LONG_LONG)) {
__PYX_VERIFY_RETURN_INT_EXC(char, unsigned PY_LONG_LONG, PyLong_AsUnsignedLongLong(x))
#endif
}
} else {
#if CYTHON_USE_PYLONG_INTERNALS
const digit* digits = ((PyLongObject*)x)->ob_digit;
switch (Py_SIZE(x)) {
case 0: return (char) 0;
case -1: __PYX_VERIFY_RETURN_INT(char, sdigit, (sdigit) (-(sdigit)digits[0]))
case 1: __PYX_VERIFY_RETURN_INT(char, digit, +digits[0])
case -2:
if (8 * sizeof(char) - 1 > 1 * PyLong_SHIFT) {
if (8 * sizeof(unsigned long) > 2 * PyLong_SHIFT) {
__PYX_VERIFY_RETURN_INT(char, long, -(long) (((((unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])))
} else if (8 * sizeof(char) - 1 > 2 * PyLong_SHIFT) {
return (char) (((char)-1)*(((((char)digits[1]) << PyLong_SHIFT) | (char)digits[0])));
}
}
break;
case 2:
if (8 * sizeof(char) > 1 * PyLong_SHIFT) {
if (8 * sizeof(unsigned long) > 2 * PyLong_SHIFT) {
__PYX_VERIFY_RETURN_INT(char, unsigned long, (((((unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])))
} else if (8 * sizeof(char) - 1 > 2 * PyLong_SHIFT) {
return (char) ((((((char)digits[1]) << PyLong_SHIFT) | (char)digits[0])));
}
}
break;
case -3:
if (8 * sizeof(char) - 1 > 2 * PyLong_SHIFT) {
if (8 * sizeof(unsigned long) > 3 * PyLong_SHIFT) {
__PYX_VERIFY_RETURN_INT(char, long, -(long) (((((((unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])))
} else if (8 * sizeof(char) - 1 > 3 * PyLong_SHIFT) {
return (char) (((char)-1)*(((((((char)digits[2]) << PyLong_SHIFT) | (char)digits[1]) << PyLong_SHIFT) | (char)digits[0])));
}
}
break;
case 3:
if (8 * sizeof(char) > 2 * PyLong_SHIFT) {
if (8 * sizeof(unsigned long) > 3 * PyLong_SHIFT) {
__PYX_VERIFY_RETURN_INT(char, unsigned long, (((((((unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])))
} else if (8 * sizeof(char) - 1 > 3 * PyLong_SHIFT) {
return (char) ((((((((char)digits[2]) << PyLong_SHIFT) | (char)digits[1]) << PyLong_SHIFT) | (char)digits[0])));
}
}
break;
case -4:
if (8 * sizeof(char) - 1 > 3 * PyLong_SHIFT) {
if (8 * sizeof(unsigned long) > 4 * PyLong_SHIFT) {
__PYX_VERIFY_RETURN_INT(char, long, -(long) (((((((((unsigned long)digits[3]) << PyLong_SHIFT) | (unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])))
} else if (8 * sizeof(char) - 1 > 4 * PyLong_SHIFT) {
return (char) (((char)-1)*(((((((((char)digits[3]) << PyLong_SHIFT) | (char)digits[2]) << PyLong_SHIFT) | (char)digits[1]) << PyLong_SHIFT) | (char)digits[0])));
}
}
break;
case 4:
if (8 * sizeof(char) > 3 * PyLong_SHIFT) {
if (8 * sizeof(unsigned long) > 4 * PyLong_SHIFT) {
__PYX_VERIFY_RETURN_INT(char, unsigned long, (((((((((unsigned long)digits[3]) << PyLong_SHIFT) | (unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])))
} else if (8 * sizeof(char) - 1 > 4 * PyLong_SHIFT) {
return (char) ((((((((((char)digits[3]) << PyLong_SHIFT) | (char)digits[2]) << PyLong_SHIFT) | (char)digits[1]) << PyLong_SHIFT) | (char)digits[0])));
}
}
break;
}
#endif
if (sizeof(char) <= sizeof(long)) {
__PYX_VERIFY_RETURN_INT_EXC(char, long, PyLong_AsLong(x))
#ifdef HAVE_LONG_LONG
} else if (sizeof(char) <= sizeof(PY_LONG_LONG)) {
__PYX_VERIFY_RETURN_INT_EXC(char, PY_LONG_LONG, PyLong_AsLongLong(x))
#endif
}
}
{
#if CYTHON_COMPILING_IN_PYPY && !defined(_PyLong_AsByteArray)
PyErr_SetString(PyExc_RuntimeError,
"_PyLong_AsByteArray() not available in PyPy, cannot convert large numbers");
#else
char val;
PyObject *v = __Pyx_PyNumber_IntOrLong(x);
#if PY_MAJOR_VERSION < 3
if (likely(v) && !PyLong_Check(v)) {
PyObject *tmp = v;
v = PyNumber_Long(tmp);
Py_DECREF(tmp);
}
#endif
if (likely(v)) {
int one = 1; int is_little = (int)*(unsigned char *)&one;
unsigned char *bytes = (unsigned char *)&val;
int ret = _PyLong_AsByteArray((PyLongObject *)v,
bytes, sizeof(val),
is_little, !is_unsigned);
Py_DECREF(v);
if (likely(!ret))
return val;
}
#endif
return (char) -1;
}
} else {
char val;
PyObject *tmp = __Pyx_PyNumber_IntOrLong(x);
if (!tmp) return (char) -1;
val = __Pyx_PyInt_As_char(tmp);
Py_DECREF(tmp);
return val;
}
raise_overflow:
PyErr_SetString(PyExc_OverflowError,
"value too large to convert to char");
return (char) -1;
raise_neg_overflow:
PyErr_SetString(PyExc_OverflowError,
"can't convert negative value to char");
return (char) -1;
}
/* ObjectToMemviewSlice */
static CYTHON_INLINE __Pyx_memviewslice __Pyx_PyObject_to_MemoryviewSlice_ds_nn___pyx_t_7bmtools_5exact_7helpers_state_t(PyObject *obj) {
__Pyx_memviewslice result = { 0, 0, { 0 }, { 0 }, { 0 } };
__Pyx_BufFmt_StackElem stack[1];
int axes_specs[] = { (__Pyx_MEMVIEW_DIRECT | __Pyx_MEMVIEW_STRIDED) };
int retcode;
if (obj == Py_None) {
result.memview = (struct __pyx_memoryview_obj *) Py_None;
return result;
}
retcode = __Pyx_ValidateAndInit_memviewslice(axes_specs, 0,
PyBUF_RECORDS, 1,
&__Pyx_TypeInfo_nn___pyx_t_7bmtools_5exact_7helpers_state_t, stack,
&result, obj);
if (unlikely(retcode == -1))
goto __pyx_fail;
return result;
__pyx_fail:
result.memview = NULL;
result.data = NULL;
return result;
}
/* ObjectToMemviewSlice */
static CYTHON_INLINE __Pyx_memviewslice __Pyx_PyObject_to_MemoryviewSlice_dsds_nn___pyx_t_7bmtools_5exact_7helpers_state_t(PyObject *obj) {
__Pyx_memviewslice result = { 0, 0, { 0 }, { 0 }, { 0 } };
__Pyx_BufFmt_StackElem stack[1];
int axes_specs[] = { (__Pyx_MEMVIEW_DIRECT | __Pyx_MEMVIEW_STRIDED), (__Pyx_MEMVIEW_DIRECT | __Pyx_MEMVIEW_STRIDED) };
int retcode;
if (obj == Py_None) {
result.memview = (struct __pyx_memoryview_obj *) Py_None;
return result;
}
retcode = __Pyx_ValidateAndInit_memviewslice(axes_specs, 0,
PyBUF_RECORDS, 2,
&__Pyx_TypeInfo_nn___pyx_t_7bmtools_5exact_7helpers_state_t, stack,
&result, obj);
if (unlikely(retcode == -1))
goto __pyx_fail;
return result;
__pyx_fail:
result.memview = NULL;
result.data = NULL;
return result;
}
/* CheckBinaryVersion */
static int __Pyx_check_binary_version(void) {
char ctversion[4], rtversion[4];
PyOS_snprintf(ctversion, 4, "%d.%d", PY_MAJOR_VERSION, PY_MINOR_VERSION);
PyOS_snprintf(rtversion, 4, "%s", Py_GetVersion());
if (ctversion[0] != rtversion[0] || ctversion[2] != rtversion[2]) {
char message[200];
PyOS_snprintf(message, sizeof(message),
"compiletime version %s of module '%.100s' "
"does not match runtime version %s",
ctversion, __Pyx_MODULE_NAME, rtversion);
return PyErr_WarnEx(NULL, message, 1);
}
return 0;
}
/* FunctionExport */
static int __Pyx_ExportFunction(const char *name, void (*f)(void), const char *sig) {
PyObject *d = 0;
PyObject *cobj = 0;
union {
void (*fp)(void);
void *p;
} tmp;
d = PyObject_GetAttrString(__pyx_m, (char *)"__pyx_capi__");
if (!d) {
PyErr_Clear();
d = PyDict_New();
if (!d)
goto bad;
Py_INCREF(d);
if (PyModule_AddObject(__pyx_m, (char *)"__pyx_capi__", d) < 0)
goto bad;
}
tmp.fp = f;
#if PY_VERSION_HEX >= 0x02070000
cobj = PyCapsule_New(tmp.p, sig, 0);
#else
cobj = PyCObject_FromVoidPtrAndDesc(tmp.p, (void *)sig, 0);
#endif
if (!cobj)
goto bad;
if (PyDict_SetItemString(d, name, cobj) < 0)
goto bad;
Py_DECREF(cobj);
Py_DECREF(d);
return 0;
bad:
Py_XDECREF(cobj);
Py_XDECREF(d);
return -1;
}
/* ModuleImport */
#ifndef __PYX_HAVE_RT_ImportModule
#define __PYX_HAVE_RT_ImportModule
static PyObject *__Pyx_ImportModule(const char *name) {
PyObject *py_name = 0;
PyObject *py_module = 0;
py_name = __Pyx_PyIdentifier_FromString(name);
if (!py_name)
goto bad;
py_module = PyImport_Import(py_name);
Py_DECREF(py_name);
return py_module;
bad:
Py_XDECREF(py_name);
return 0;
}
#endif
/* VoidPtrImport */
#ifndef __PYX_HAVE_RT_ImportVoidPtr
#define __PYX_HAVE_RT_ImportVoidPtr
static int __Pyx_ImportVoidPtr(PyObject *module, const char *name, void **p, const char *sig) {
PyObject *d = 0;
PyObject *cobj = 0;
d = PyObject_GetAttrString(module, (char *)"__pyx_capi__");
if (!d)
goto bad;
cobj = PyDict_GetItemString(d, name);
if (!cobj) {
PyErr_Format(PyExc_ImportError,
"%.200s does not export expected C variable %.200s",
PyModule_GetName(module), name);
goto bad;
}
#if PY_VERSION_HEX >= 0x02070000
if (!PyCapsule_IsValid(cobj, sig)) {
PyErr_Format(PyExc_TypeError,
"C variable %.200s.%.200s has wrong signature (expected %.500s, got %.500s)",
PyModule_GetName(module), name, sig, PyCapsule_GetName(cobj));
goto bad;
}
*p = PyCapsule_GetPointer(cobj, sig);
#else
{const char *desc, *s1, *s2;
desc = (const char *)PyCObject_GetDesc(cobj);
if (!desc)
goto bad;
s1 = desc; s2 = sig;
while (*s1 != '\0' && *s1 == *s2) { s1++; s2++; }
if (*s1 != *s2) {
PyErr_Format(PyExc_TypeError,
"C variable %.200s.%.200s has wrong signature (expected %.500s, got %.500s)",
PyModule_GetName(module), name, sig, desc);
goto bad;
}
*p = PyCObject_AsVoidPtr(cobj);}
#endif
if (!(*p))
goto bad;
Py_DECREF(d);
return 0;
bad:
Py_XDECREF(d);
return -1;
}
#endif
/* FunctionImport */
#ifndef __PYX_HAVE_RT_ImportFunction
#define __PYX_HAVE_RT_ImportFunction
static int __Pyx_ImportFunction(PyObject *module, const char *funcname, void (**f)(void), const char *sig) {
PyObject *d = 0;
PyObject *cobj = 0;
union {
void (*fp)(void);
void *p;
} tmp;
d = PyObject_GetAttrString(module, (char *)"__pyx_capi__");
if (!d)
goto bad;
cobj = PyDict_GetItemString(d, funcname);
if (!cobj) {
PyErr_Format(PyExc_ImportError,
"%.200s does not export expected C function %.200s",
PyModule_GetName(module), funcname);
goto bad;
}
#if PY_VERSION_HEX >= 0x02070000
if (!PyCapsule_IsValid(cobj, sig)) {
PyErr_Format(PyExc_TypeError,
"C function %.200s.%.200s has wrong signature (expected %.500s, got %.500s)",
PyModule_GetName(module), funcname, sig, PyCapsule_GetName(cobj));
goto bad;
}
tmp.p = PyCapsule_GetPointer(cobj, sig);
#else
{const char *desc, *s1, *s2;
desc = (const char *)PyCObject_GetDesc(cobj);
if (!desc)
goto bad;
s1 = desc; s2 = sig;
while (*s1 != '\0' && *s1 == *s2) { s1++; s2++; }
if (*s1 != *s2) {
PyErr_Format(PyExc_TypeError,
"C function %.200s.%.200s has wrong signature (expected %.500s, got %.500s)",
PyModule_GetName(module), funcname, sig, desc);
goto bad;
}
tmp.p = PyCObject_AsVoidPtr(cobj);}
#endif
*f = tmp.fp;
if (!(*f))
goto bad;
Py_DECREF(d);
return 0;
bad:
Py_XDECREF(d);
return -1;
}
#endif
/* InitStrings */
static int __Pyx_InitStrings(__Pyx_StringTabEntry *t) {
while (t->p) {
#if PY_MAJOR_VERSION < 3
if (t->is_unicode) {
*t->p = PyUnicode_DecodeUTF8(t->s, t->n - 1, NULL);
} else if (t->intern) {
*t->p = PyString_InternFromString(t->s);
} else {
*t->p = PyString_FromStringAndSize(t->s, t->n - 1);
}
#else
if (t->is_unicode | t->is_str) {
if (t->intern) {
*t->p = PyUnicode_InternFromString(t->s);
} else if (t->encoding) {
*t->p = PyUnicode_Decode(t->s, t->n - 1, t->encoding, NULL);
} else {
*t->p = PyUnicode_FromStringAndSize(t->s, t->n - 1);
}
} else {
*t->p = PyBytes_FromStringAndSize(t->s, t->n - 1);
}
#endif
if (!*t->p)
return -1;
++t;
}
return 0;
}
static CYTHON_INLINE PyObject* __Pyx_PyUnicode_FromString(const char* c_str) {
return __Pyx_PyUnicode_FromStringAndSize(c_str, (Py_ssize_t)strlen(c_str));
}
static CYTHON_INLINE char* __Pyx_PyObject_AsString(PyObject* o) {
Py_ssize_t ignore;
return __Pyx_PyObject_AsStringAndSize(o, &ignore);
}
static CYTHON_INLINE char* __Pyx_PyObject_AsStringAndSize(PyObject* o, Py_ssize_t *length) {
#if CYTHON_COMPILING_IN_CPYTHON && (__PYX_DEFAULT_STRING_ENCODING_IS_ASCII || __PYX_DEFAULT_STRING_ENCODING_IS_DEFAULT)
if (
#if PY_MAJOR_VERSION < 3 && __PYX_DEFAULT_STRING_ENCODING_IS_ASCII
__Pyx_sys_getdefaultencoding_not_ascii &&
#endif
PyUnicode_Check(o)) {
#if PY_VERSION_HEX < 0x03030000
char* defenc_c;
PyObject* defenc = _PyUnicode_AsDefaultEncodedString(o, NULL);
if (!defenc) return NULL;
defenc_c = PyBytes_AS_STRING(defenc);
#if __PYX_DEFAULT_STRING_ENCODING_IS_ASCII
{
char* end = defenc_c + PyBytes_GET_SIZE(defenc);
char* c;
for (c = defenc_c; c < end; c++) {
if ((unsigned char) (*c) >= 128) {
PyUnicode_AsASCIIString(o);
return NULL;
}
}
}
#endif
*length = PyBytes_GET_SIZE(defenc);
return defenc_c;
#else
if (__Pyx_PyUnicode_READY(o) == -1) return NULL;
#if __PYX_DEFAULT_STRING_ENCODING_IS_ASCII
if (PyUnicode_IS_ASCII(o)) {
*length = PyUnicode_GET_LENGTH(o);
return PyUnicode_AsUTF8(o);
} else {
PyUnicode_AsASCIIString(o);
return NULL;
}
#else
return PyUnicode_AsUTF8AndSize(o, length);
#endif
#endif
} else
#endif
#if (!CYTHON_COMPILING_IN_PYPY) || (defined(PyByteArray_AS_STRING) && defined(PyByteArray_GET_SIZE))
if (PyByteArray_Check(o)) {
*length = PyByteArray_GET_SIZE(o);
return PyByteArray_AS_STRING(o);
} else
#endif
{
char* result;
int r = PyBytes_AsStringAndSize(o, &result, length);
if (unlikely(r < 0)) {
return NULL;
} else {
return result;
}
}
}
static CYTHON_INLINE int __Pyx_PyObject_IsTrue(PyObject* x) {
int is_true = x == Py_True;
if (is_true | (x == Py_False) | (x == Py_None)) return is_true;
else return PyObject_IsTrue(x);
}
static CYTHON_INLINE PyObject* __Pyx_PyNumber_IntOrLong(PyObject* x) {
#if CYTHON_USE_TYPE_SLOTS
PyNumberMethods *m;
#endif
const char *name = NULL;
PyObject *res = NULL;
#if PY_MAJOR_VERSION < 3
if (PyInt_Check(x) || PyLong_Check(x))
#else
if (PyLong_Check(x))
#endif
return __Pyx_NewRef(x);
#if CYTHON_USE_TYPE_SLOTS
m = Py_TYPE(x)->tp_as_number;
#if PY_MAJOR_VERSION < 3
if (m && m->nb_int) {
name = "int";
res = PyNumber_Int(x);
}
else if (m && m->nb_long) {
name = "long";
res = PyNumber_Long(x);
}
#else
if (m && m->nb_int) {
name = "int";
res = PyNumber_Long(x);
}
#endif
#else
res = PyNumber_Int(x);
#endif
if (res) {
#if PY_MAJOR_VERSION < 3
if (!PyInt_Check(res) && !PyLong_Check(res)) {
#else
if (!PyLong_Check(res)) {
#endif
PyErr_Format(PyExc_TypeError,
"__%.4s__ returned non-%.4s (type %.200s)",
name, name, Py_TYPE(res)->tp_name);
Py_DECREF(res);
return NULL;
}
}
else if (!PyErr_Occurred()) {
PyErr_SetString(PyExc_TypeError,
"an integer is required");
}
return res;
}
static CYTHON_INLINE Py_ssize_t __Pyx_PyIndex_AsSsize_t(PyObject* b) {
Py_ssize_t ival;
PyObject *x;
#if PY_MAJOR_VERSION < 3
if (likely(PyInt_CheckExact(b))) {
if (sizeof(Py_ssize_t) >= sizeof(long))
return PyInt_AS_LONG(b);
else
return PyInt_AsSsize_t(x);
}
#endif
if (likely(PyLong_CheckExact(b))) {
#if CYTHON_USE_PYLONG_INTERNALS
const digit* digits = ((PyLongObject*)b)->ob_digit;
const Py_ssize_t size = Py_SIZE(b);
if (likely(__Pyx_sst_abs(size) <= 1)) {
ival = likely(size) ? digits[0] : 0;
if (size == -1) ival = -ival;
return ival;
} else {
switch (size) {
case 2:
if (8 * sizeof(Py_ssize_t) > 2 * PyLong_SHIFT) {
return (Py_ssize_t) (((((size_t)digits[1]) << PyLong_SHIFT) | (size_t)digits[0]));
}
break;
case -2:
if (8 * sizeof(Py_ssize_t) > 2 * PyLong_SHIFT) {
return -(Py_ssize_t) (((((size_t)digits[1]) << PyLong_SHIFT) | (size_t)digits[0]));
}
break;
case 3:
if (8 * sizeof(Py_ssize_t) > 3 * PyLong_SHIFT) {
return (Py_ssize_t) (((((((size_t)digits[2]) << PyLong_SHIFT) | (size_t)digits[1]) << PyLong_SHIFT) | (size_t)digits[0]));
}
break;
case -3:
if (8 * sizeof(Py_ssize_t) > 3 * PyLong_SHIFT) {
return -(Py_ssize_t) (((((((size_t)digits[2]) << PyLong_SHIFT) | (size_t)digits[1]) << PyLong_SHIFT) | (size_t)digits[0]));
}
break;
case 4:
if (8 * sizeof(Py_ssize_t) > 4 * PyLong_SHIFT) {
return (Py_ssize_t) (((((((((size_t)digits[3]) << PyLong_SHIFT) | (size_t)digits[2]) << PyLong_SHIFT) | (size_t)digits[1]) << PyLong_SHIFT) | (size_t)digits[0]));
}
break;
case -4:
if (8 * sizeof(Py_ssize_t) > 4 * PyLong_SHIFT) {
return -(Py_ssize_t) (((((((((size_t)digits[3]) << PyLong_SHIFT) | (size_t)digits[2]) << PyLong_SHIFT) | (size_t)digits[1]) << PyLong_SHIFT) | (size_t)digits[0]));
}
break;
}
}
#endif
return PyLong_AsSsize_t(b);
}
x = PyNumber_Index(b);
if (!x) return -1;
ival = PyInt_AsSsize_t(x);
Py_DECREF(x);
return ival;
}
static CYTHON_INLINE PyObject * __Pyx_PyInt_FromSize_t(size_t ival) {
return PyInt_FromSize_t(ival);
}
#endif /* Py_PYTHON_H */
|
dz1z3.c | #include <stdio.h>
#include <stdlib.h>
#include <math.h>
#include <omp.h>
void Usage(char *prog_name);
#define ACCURACY 0.01
/*
* tasks
*/
double sequential_solution(int argc, char *argv[])
{
long long n, i;
double factor;
double sum = 0.0;
if (argc != 2)
Usage(argv[0]);
n = strtoll(argv[1], NULL, 10);
if (n < 1)
Usage(argv[0]);
printf("Before for loop, factor = %f.\n", factor);
for (i = 0; i < n; i++)
{
factor = (i % 2 == 0) ? 1.0 : -1.0;
sum += factor / (2 * i + 1);
}
printf("After for loop, factor = %f.\n", factor);
sum = 4.0 * sum;
printf("With n = %lld terms\n", n);
printf(" Our estimate of pi = %.14f\n", sum);
printf(" Ref estimate of pi = %.14f\n", 4.0 * atan(1.0));
return sum;
}
double parallel_solution(int argc, char *argv[])
{
long long n, i;
double factor;
double sum = 0.0;
if (argc != 2)
Usage(argv[0]);
n = strtoll(argv[1], NULL, 10);
if (n < 1)
Usage(argv[0]);
printf("Before for loop, factor = %f.\n", factor);
int it;
int num_tasks = 8;
double sums[num_tasks];
double last_factor;
#pragma omp parallel shared(it, num_tasks, sums)
{
#pragma omp single
{
long long int start_i, end_i;
for (it = 0; it < num_tasks; it++)
{
// init start_i, end_i
start_i = it * n / num_tasks;
end_i = (it + 1) * n / num_tasks;
#pragma omp task firstprivate(it, start_i, end_i) private(i, factor) shared(sums) shared(last_factor)
{
for (i = start_i; i < end_i; i++)
{
factor = (i % 2 == 0) ? 1.0 : -1.0;
sums[it] += factor / (2 * i + 1);
}
if (it == num_tasks - 1)
last_factor = factor;
} // task
}
} // single
} // parallel
factor = last_factor;
for (it = 0; it < num_tasks; it++)
{
sum += sums[it];
}
printf("After for loop, factor = %f.\n", factor);
sum = 4.0 * sum;
printf("With n = %lld terms\n", n);
printf(" Our estimate of pi = %.14f\n", sum);
printf(" Ref estimate of pi = %.14f\n", 4.0 * atan(1.0));
return sum;
}
int main(int argc, char *argv[])
{
printf("---------------------Sequential execution---------------------\n");
double start_time_seq = omp_get_wtime();
double sum_seq = sequential_solution(argc, argv);
double end_time_seq = omp_get_wtime();
printf("----------------------Parallel execution----------------------\n");
double start_time_parallel = omp_get_wtime();
double sum_parallel = parallel_solution(argc, argv);
double end_time_parallel = omp_get_wtime();
printf("\nSequential elapsed time: %lfs\n", end_time_seq - start_time_seq);
printf("Parallel elapsed time: %lfs\n", end_time_parallel - start_time_parallel);
if (fabs(sum_seq - sum_parallel) < ACCURACY)
printf("Test PASSED\n");
else
printf("Test FAILED\n");
return 0;
}
void Usage(char *prog_name)
{
fprintf(stderr, "usage: %s <thread_count> <n>\n", prog_name);
fprintf(stderr, " n is the number of terms and should be >= 1\n");
exit(0);
}
|
Sema.h | //===--- Sema.h - Semantic Analysis & AST Building --------------*- C++ -*-===//
//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
//
// This file defines the Sema class, which performs semantic analysis and
// builds ASTs.
//
//===----------------------------------------------------------------------===//
#ifndef LLVM_CLANG_SEMA_SEMA_H
#define LLVM_CLANG_SEMA_SEMA_H
#include "clang/AST/Attr.h"
#include "clang/AST/Availability.h"
#include "clang/AST/DeclarationName.h"
#include "clang/AST/DeclTemplate.h"
#include "clang/AST/Expr.h"
#include "clang/AST/ExprObjC.h"
#include "clang/AST/ExternalASTSource.h"
#include "clang/AST/LocInfoType.h"
#include "clang/AST/MangleNumberingContext.h"
#include "clang/AST/NSAPI.h"
#include "clang/AST/PrettyPrinter.h"
#include "clang/AST/StmtCXX.h"
#include "clang/AST/TypeLoc.h"
#include "clang/AST/TypeOrdering.h"
#include "clang/Basic/ExpressionTraits.h"
#include "clang/Basic/LangOptions.h"
#include "clang/Basic/Module.h"
#include "clang/Basic/OpenMPKinds.h"
#include "clang/Basic/PragmaKinds.h"
#include "clang/Basic/Specifiers.h"
#include "clang/Basic/TemplateKinds.h"
#include "clang/Basic/TypeTraits.h"
#include "clang/Sema/AnalysisBasedWarnings.h"
#include "clang/Sema/CleanupInfo.h"
#include "clang/Sema/DeclSpec.h"
#include "clang/Sema/ExternalSemaSource.h"
#include "clang/Sema/IdentifierResolver.h"
#include "clang/Sema/ObjCMethodList.h"
#include "clang/Sema/Ownership.h"
#include "clang/Sema/Scope.h"
#include "clang/Sema/ScopeInfo.h"
#include "clang/Sema/TypoCorrection.h"
#include "clang/Sema/Weak.h"
#include "llvm/ADT/ArrayRef.h"
#include "llvm/ADT/Optional.h"
#include "llvm/ADT/SetVector.h"
#include "llvm/ADT/SmallPtrSet.h"
#include "llvm/ADT/SmallVector.h"
#include "llvm/ADT/TinyPtrVector.h"
#include <deque>
#include <memory>
#include <string>
#include <vector>
namespace llvm {
class APSInt;
template <typename ValueT> struct DenseMapInfo;
template <typename ValueT, typename ValueInfoT> class DenseSet;
class SmallBitVector;
struct InlineAsmIdentifierInfo;
}
namespace clang {
class ADLResult;
class ASTConsumer;
class ASTContext;
class ASTMutationListener;
class ASTReader;
class ASTWriter;
class ArrayType;
class AttributeList;
class BindingDecl;
class BlockDecl;
class CapturedDecl;
class CXXBasePath;
class CXXBasePaths;
class CXXBindTemporaryExpr;
typedef SmallVector<CXXBaseSpecifier*, 4> CXXCastPath;
class CXXConstructorDecl;
class CXXConversionDecl;
class CXXDeleteExpr;
class CXXDestructorDecl;
class CXXFieldCollector;
class CXXMemberCallExpr;
class CXXMethodDecl;
class CXXScopeSpec;
class CXXTemporary;
class CXXTryStmt;
class CallExpr;
class ClassTemplateDecl;
class ClassTemplatePartialSpecializationDecl;
class ClassTemplateSpecializationDecl;
class VarTemplatePartialSpecializationDecl;
class CodeCompleteConsumer;
class CodeCompletionAllocator;
class CodeCompletionTUInfo;
class CodeCompletionResult;
class CoroutineBodyStmt;
class Decl;
class DeclAccessPair;
class DeclContext;
class DeclRefExpr;
class DeclaratorDecl;
class DeducedTemplateArgument;
class DependentDiagnostic;
class DesignatedInitExpr;
class Designation;
class EnableIfAttr;
class EnumConstantDecl;
class Expr;
class ExtVectorType;
class FormatAttr;
class FriendDecl;
class FunctionDecl;
class FunctionProtoType;
class FunctionTemplateDecl;
class ImplicitConversionSequence;
typedef MutableArrayRef<ImplicitConversionSequence> ConversionSequenceList;
class InitListExpr;
class InitializationKind;
class InitializationSequence;
class InitializedEntity;
class IntegerLiteral;
class LabelStmt;
class LambdaExpr;
class LangOptions;
class LocalInstantiationScope;
class LookupResult;
class MacroInfo;
typedef ArrayRef<std::pair<IdentifierInfo *, SourceLocation>> ModuleIdPath;
class ModuleLoader;
class MultiLevelTemplateArgumentList;
class NamedDecl;
class ObjCCategoryDecl;
class ObjCCategoryImplDecl;
class ObjCCompatibleAliasDecl;
class ObjCContainerDecl;
class ObjCImplDecl;
class ObjCImplementationDecl;
class ObjCInterfaceDecl;
class ObjCIvarDecl;
template <class T> class ObjCList;
class ObjCMessageExpr;
class ObjCMethodDecl;
class ObjCPropertyDecl;
class ObjCProtocolDecl;
class OMPThreadPrivateDecl;
class OMPDeclareReductionDecl;
class OMPDeclareSimdDecl;
class OMPClause;
struct OverloadCandidate;
class OverloadCandidateSet;
class OverloadExpr;
class ParenListExpr;
class ParmVarDecl;
class Preprocessor;
class PseudoDestructorTypeStorage;
class PseudoObjectExpr;
class QualType;
class StandardConversionSequence;
class Stmt;
class StringLiteral;
class SwitchStmt;
class TemplateArgument;
class TemplateArgumentList;
class TemplateArgumentLoc;
class TemplateDecl;
class TemplateParameterList;
class TemplatePartialOrderingContext;
class TemplateTemplateParmDecl;
class Token;
class TypeAliasDecl;
class TypedefDecl;
class TypedefNameDecl;
class TypeLoc;
class TypoCorrectionConsumer;
class UnqualifiedId;
class UnresolvedLookupExpr;
class UnresolvedMemberExpr;
class UnresolvedSetImpl;
class UnresolvedSetIterator;
class UsingDecl;
class UsingShadowDecl;
class ValueDecl;
class VarDecl;
class VarTemplateSpecializationDecl;
class VisibilityAttr;
class VisibleDeclConsumer;
class IndirectFieldDecl;
struct DeductionFailureInfo;
class TemplateSpecCandidateSet;
namespace sema {
class AccessedEntity;
class BlockScopeInfo;
class CapturedRegionScopeInfo;
class CapturingScopeInfo;
class CompoundScopeInfo;
class DelayedDiagnostic;
class DelayedDiagnosticPool;
class FunctionScopeInfo;
class LambdaScopeInfo;
class PossiblyUnreachableDiag;
class SemaPPCallbacks;
class TemplateDeductionInfo;
}
namespace threadSafety {
class BeforeSet;
void threadSafetyCleanup(BeforeSet* Cache);
}
// FIXME: No way to easily map from TemplateTypeParmTypes to
// TemplateTypeParmDecls, so we have this horrible PointerUnion.
typedef std::pair<llvm::PointerUnion<const TemplateTypeParmType*, NamedDecl*>,
SourceLocation> UnexpandedParameterPack;
/// Describes whether we've seen any nullability information for the given
/// file.
struct FileNullability {
/// The first pointer declarator (of any pointer kind) in the file that does
/// not have a corresponding nullability annotation.
SourceLocation PointerLoc;
/// The end location for the first pointer declarator in the file. Used for
/// placing fix-its.
SourceLocation PointerEndLoc;
/// Which kind of pointer declarator we saw.
uint8_t PointerKind;
/// Whether we saw any type nullability annotations in the given file.
bool SawTypeNullability = false;
};
/// A mapping from file IDs to a record of whether we've seen nullability
/// information in that file.
class FileNullabilityMap {
/// A mapping from file IDs to the nullability information for each file ID.
llvm::DenseMap<FileID, FileNullability> Map;
/// A single-element cache based on the file ID.
struct {
FileID File;
FileNullability Nullability;
} Cache;
public:
FileNullability &operator[](FileID file) {
// Check the single-element cache.
if (file == Cache.File)
return Cache.Nullability;
// It's not in the single-element cache; flush the cache if we have one.
if (!Cache.File.isInvalid()) {
Map[Cache.File] = Cache.Nullability;
}
// Pull this entry into the cache.
Cache.File = file;
Cache.Nullability = Map[file];
return Cache.Nullability;
}
};
/// Sema - This implements semantic analysis and AST building for C.
class Sema {
Sema(const Sema &) = delete;
void operator=(const Sema &) = delete;
///\brief Source of additional semantic information.
ExternalSemaSource *ExternalSource;
///\brief Whether Sema has generated a multiplexer and has to delete it.
bool isMultiplexExternalSource;
static bool mightHaveNonExternalLinkage(const DeclaratorDecl *FD);
bool isVisibleSlow(const NamedDecl *D);
/// Determine whether two declarations should be linked together, given that
/// the old declaration might not be visible and the new declaration might
/// not have external linkage.
bool shouldLinkPossiblyHiddenDecl(const NamedDecl *Old,
const NamedDecl *New) {
if (isVisible(Old))
return true;
// See comment in below overload for why it's safe to compute the linkage
// of the new declaration here.
if (New->isExternallyDeclarable()) {
assert(Old->isExternallyDeclarable() &&
"should not have found a non-externally-declarable previous decl");
return true;
}
return false;
}
bool shouldLinkPossiblyHiddenDecl(LookupResult &Old, const NamedDecl *New);
public:
typedef OpaquePtr<DeclGroupRef> DeclGroupPtrTy;
typedef OpaquePtr<TemplateName> TemplateTy;
typedef OpaquePtr<QualType> TypeTy;
OpenCLOptions OpenCLFeatures;
FPOptions FPFeatures;
const LangOptions &LangOpts;
Preprocessor &PP;
ASTContext &Context;
ASTConsumer &Consumer;
DiagnosticsEngine &Diags;
SourceManager &SourceMgr;
/// \brief Flag indicating whether or not to collect detailed statistics.
bool CollectStats;
/// \brief Code-completion consumer.
CodeCompleteConsumer *CodeCompleter;
/// CurContext - This is the current declaration context of parsing.
DeclContext *CurContext;
/// \brief Generally null except when we temporarily switch decl contexts,
/// like in \see ActOnObjCTemporaryExitContainerContext.
DeclContext *OriginalLexicalContext;
/// VAListTagName - The declaration name corresponding to __va_list_tag.
/// This is used as part of a hack to omit that class from ADL results.
DeclarationName VAListTagName;
bool MSStructPragmaOn; // True when \#pragma ms_struct on
/// \brief Controls member pointer representation format under the MS ABI.
LangOptions::PragmaMSPointersToMembersKind
MSPointerToMemberRepresentationMethod;
/// Stack of active SEH __finally scopes. Can be empty.
SmallVector<Scope*, 2> CurrentSEHFinally;
/// \brief Source location for newly created implicit MSInheritanceAttrs
SourceLocation ImplicitMSInheritanceAttrLoc;
/// \brief pragma clang section kind
enum PragmaClangSectionKind {
PCSK_Invalid = 0,
PCSK_BSS = 1,
PCSK_Data = 2,
PCSK_Rodata = 3,
PCSK_Text = 4
};
enum PragmaClangSectionAction {
PCSA_Set = 0,
PCSA_Clear = 1
};
struct PragmaClangSection {
std::string SectionName;
bool Valid = false;
SourceLocation PragmaLocation;
void Act(SourceLocation PragmaLocation,
PragmaClangSectionAction Action,
StringLiteral* Name);
};
PragmaClangSection PragmaClangBSSSection;
PragmaClangSection PragmaClangDataSection;
PragmaClangSection PragmaClangRodataSection;
PragmaClangSection PragmaClangTextSection;
enum PragmaMsStackAction {
PSK_Reset = 0x0, // #pragma ()
PSK_Set = 0x1, // #pragma (value)
PSK_Push = 0x2, // #pragma (push[, id])
PSK_Pop = 0x4, // #pragma (pop[, id])
PSK_Show = 0x8, // #pragma (show) -- only for "pack"!
PSK_Push_Set = PSK_Push | PSK_Set, // #pragma (push[, id], value)
PSK_Pop_Set = PSK_Pop | PSK_Set, // #pragma (pop[, id], value)
};
template<typename ValueType>
struct PragmaStack {
struct Slot {
llvm::StringRef StackSlotLabel;
ValueType Value;
SourceLocation PragmaLocation;
SourceLocation PragmaPushLocation;
Slot(llvm::StringRef StackSlotLabel, ValueType Value,
SourceLocation PragmaLocation, SourceLocation PragmaPushLocation)
: StackSlotLabel(StackSlotLabel), Value(Value),
PragmaLocation(PragmaLocation),
PragmaPushLocation(PragmaPushLocation) {}
};
void Act(SourceLocation PragmaLocation,
PragmaMsStackAction Action,
llvm::StringRef StackSlotLabel,
ValueType Value);
// MSVC seems to add artificial slots to #pragma stacks on entering a C++
// method body to restore the stacks on exit, so it works like this:
//
// struct S {
// #pragma <name>(push, InternalPragmaSlot, <current_pragma_value>)
// void Method {}
// #pragma <name>(pop, InternalPragmaSlot)
// };
//
// It works even with #pragma vtordisp, although MSVC doesn't support
// #pragma vtordisp(push [, id], n)
// syntax.
//
// Push / pop a named sentinel slot.
void SentinelAction(PragmaMsStackAction Action, StringRef Label) {
assert((Action == PSK_Push || Action == PSK_Pop) &&
"Can only push / pop #pragma stack sentinels!");
Act(CurrentPragmaLocation, Action, Label, CurrentValue);
}
// Constructors.
explicit PragmaStack(const ValueType &Default)
: DefaultValue(Default), CurrentValue(Default) {}
bool hasValue() const { return CurrentValue != DefaultValue; }
SmallVector<Slot, 2> Stack;
ValueType DefaultValue; // Value used for PSK_Reset action.
ValueType CurrentValue;
SourceLocation CurrentPragmaLocation;
};
// FIXME: We should serialize / deserialize these if they occur in a PCH (but
// we shouldn't do so if they're in a module).
/// \brief Whether to insert vtordisps prior to virtual bases in the Microsoft
/// C++ ABI. Possible values are 0, 1, and 2, which mean:
///
/// 0: Suppress all vtordisps
/// 1: Insert vtordisps in the presence of vbase overrides and non-trivial
/// structors
/// 2: Always insert vtordisps to support RTTI on partially constructed
/// objects
PragmaStack<MSVtorDispAttr::Mode> VtorDispStack;
// #pragma pack.
// Sentinel to represent when the stack is set to mac68k alignment.
static const unsigned kMac68kAlignmentSentinel = ~0U;
PragmaStack<unsigned> PackStack;
// The current #pragma pack values and locations at each #include.
struct PackIncludeState {
unsigned CurrentValue;
SourceLocation CurrentPragmaLocation;
bool HasNonDefaultValue, ShouldWarnOnInclude;
};
SmallVector<PackIncludeState, 8> PackIncludeStack;
// Segment #pragmas.
PragmaStack<StringLiteral *> DataSegStack;
PragmaStack<StringLiteral *> BSSSegStack;
PragmaStack<StringLiteral *> ConstSegStack;
PragmaStack<StringLiteral *> CodeSegStack;
// RAII object to push / pop sentinel slots for all MS #pragma stacks.
// Actions should be performed only if we enter / exit a C++ method body.
class PragmaStackSentinelRAII {
public:
PragmaStackSentinelRAII(Sema &S, StringRef SlotLabel, bool ShouldAct);
~PragmaStackSentinelRAII();
private:
Sema &S;
StringRef SlotLabel;
bool ShouldAct;
};
/// A mapping that describes the nullability we've seen in each header file.
FileNullabilityMap NullabilityMap;
/// Last section used with #pragma init_seg.
StringLiteral *CurInitSeg;
SourceLocation CurInitSegLoc;
/// VisContext - Manages the stack for \#pragma GCC visibility.
void *VisContext; // Really a "PragmaVisStack*"
/// \brief This represents the stack of attributes that were pushed by
/// \#pragma clang attribute.
struct PragmaAttributeEntry {
SourceLocation Loc;
AttributeList *Attribute;
SmallVector<attr::SubjectMatchRule, 4> MatchRules;
bool IsUsed;
};
SmallVector<PragmaAttributeEntry, 2> PragmaAttributeStack;
/// \brief The declaration that is currently receiving an attribute from the
/// #pragma attribute stack.
const Decl *PragmaAttributeCurrentTargetDecl;
/// \brief This represents the last location of a "#pragma clang optimize off"
/// directive if such a directive has not been closed by an "on" yet. If
/// optimizations are currently "on", this is set to an invalid location.
SourceLocation OptimizeOffPragmaLocation;
/// \brief Flag indicating if Sema is building a recovery call expression.
///
/// This flag is used to avoid building recovery call expressions
/// if Sema is already doing so, which would cause infinite recursions.
bool IsBuildingRecoveryCallExpr;
/// Used to control the generation of ExprWithCleanups.
CleanupInfo Cleanup;
/// ExprCleanupObjects - This is the stack of objects requiring
/// cleanup that are created by the current full expression. The
/// element type here is ExprWithCleanups::Object.
SmallVector<BlockDecl*, 8> ExprCleanupObjects;
/// \brief Store a list of either DeclRefExprs or MemberExprs
/// that contain a reference to a variable (constant) that may or may not
/// be odr-used in this Expr, and we won't know until all lvalue-to-rvalue
/// and discarded value conversions have been applied to all subexpressions
/// of the enclosing full expression. This is cleared at the end of each
/// full expression.
llvm::SmallPtrSet<Expr*, 2> MaybeODRUseExprs;
/// \brief Stack containing information about each of the nested
/// function, block, and method scopes that are currently active.
///
/// This array is never empty. Clients should ignore the first
/// element, which is used to cache a single FunctionScopeInfo
/// that's used to parse every top-level function.
SmallVector<sema::FunctionScopeInfo *, 4> FunctionScopes;
typedef LazyVector<TypedefNameDecl *, ExternalSemaSource,
&ExternalSemaSource::ReadExtVectorDecls, 2, 2>
ExtVectorDeclsType;
/// ExtVectorDecls - This is a list all the extended vector types. This allows
/// us to associate a raw vector type with one of the ext_vector type names.
/// This is only necessary for issuing pretty diagnostics.
ExtVectorDeclsType ExtVectorDecls;
/// FieldCollector - Collects CXXFieldDecls during parsing of C++ classes.
std::unique_ptr<CXXFieldCollector> FieldCollector;
typedef llvm::SmallSetVector<const NamedDecl*, 16> NamedDeclSetType;
/// \brief Set containing all declared private fields that are not used.
NamedDeclSetType UnusedPrivateFields;
/// \brief Set containing all typedefs that are likely unused.
llvm::SmallSetVector<const TypedefNameDecl *, 4>
UnusedLocalTypedefNameCandidates;
/// \brief Delete-expressions to be analyzed at the end of translation unit
///
/// This list contains class members, and locations of delete-expressions
/// that could not be proven as to whether they mismatch with new-expression
/// used in initializer of the field.
typedef std::pair<SourceLocation, bool> DeleteExprLoc;
typedef llvm::SmallVector<DeleteExprLoc, 4> DeleteLocs;
llvm::MapVector<FieldDecl *, DeleteLocs> DeleteExprs;
typedef llvm::SmallPtrSet<const CXXRecordDecl*, 8> RecordDeclSetTy;
/// PureVirtualClassDiagSet - a set of class declarations which we have
/// emitted a list of pure virtual functions. Used to prevent emitting the
/// same list more than once.
std::unique_ptr<RecordDeclSetTy> PureVirtualClassDiagSet;
/// ParsingInitForAutoVars - a set of declarations with auto types for which
/// we are currently parsing the initializer.
llvm::SmallPtrSet<const Decl*, 4> ParsingInitForAutoVars;
/// \brief Look for a locally scoped extern "C" declaration by the given name.
NamedDecl *findLocallyScopedExternCDecl(DeclarationName Name);
typedef LazyVector<VarDecl *, ExternalSemaSource,
&ExternalSemaSource::ReadTentativeDefinitions, 2, 2>
TentativeDefinitionsType;
/// \brief All the tentative definitions encountered in the TU.
TentativeDefinitionsType TentativeDefinitions;
typedef LazyVector<const DeclaratorDecl *, ExternalSemaSource,
&ExternalSemaSource::ReadUnusedFileScopedDecls, 2, 2>
UnusedFileScopedDeclsType;
/// \brief The set of file scoped decls seen so far that have not been used
/// and must warn if not used. Only contains the first declaration.
UnusedFileScopedDeclsType UnusedFileScopedDecls;
typedef LazyVector<CXXConstructorDecl *, ExternalSemaSource,
&ExternalSemaSource::ReadDelegatingConstructors, 2, 2>
DelegatingCtorDeclsType;
/// \brief All the delegating constructors seen so far in the file, used for
/// cycle detection at the end of the TU.
DelegatingCtorDeclsType DelegatingCtorDecls;
/// \brief All the overriding functions seen during a class definition
/// that had their exception spec checks delayed, plus the overridden
/// function.
SmallVector<std::pair<const CXXMethodDecl*, const CXXMethodDecl*>, 2>
DelayedExceptionSpecChecks;
/// \brief All the members seen during a class definition which were both
/// explicitly defaulted and had explicitly-specified exception
/// specifications, along with the function type containing their
/// user-specified exception specification. Those exception specifications
/// were overridden with the default specifications, but we still need to
/// check whether they are compatible with the default specification, and
/// we can't do that until the nesting set of class definitions is complete.
SmallVector<std::pair<CXXMethodDecl*, const FunctionProtoType*>, 2>
DelayedDefaultedMemberExceptionSpecs;
typedef llvm::MapVector<const FunctionDecl *,
std::unique_ptr<LateParsedTemplate>>
LateParsedTemplateMapT;
LateParsedTemplateMapT LateParsedTemplateMap;
/// \brief Callback to the parser to parse templated functions when needed.
typedef void LateTemplateParserCB(void *P, LateParsedTemplate &LPT);
typedef void LateTemplateParserCleanupCB(void *P);
LateTemplateParserCB *LateTemplateParser;
LateTemplateParserCleanupCB *LateTemplateParserCleanup;
void *OpaqueParser;
void SetLateTemplateParser(LateTemplateParserCB *LTP,
LateTemplateParserCleanupCB *LTPCleanup,
void *P) {
LateTemplateParser = LTP;
LateTemplateParserCleanup = LTPCleanup;
OpaqueParser = P;
}
class DelayedDiagnostics;
class DelayedDiagnosticsState {
sema::DelayedDiagnosticPool *SavedPool;
friend class Sema::DelayedDiagnostics;
};
typedef DelayedDiagnosticsState ParsingDeclState;
typedef DelayedDiagnosticsState ProcessingContextState;
/// A class which encapsulates the logic for delaying diagnostics
/// during parsing and other processing.
class DelayedDiagnostics {
/// \brief The current pool of diagnostics into which delayed
/// diagnostics should go.
sema::DelayedDiagnosticPool *CurPool;
public:
DelayedDiagnostics() : CurPool(nullptr) {}
/// Adds a delayed diagnostic.
void add(const sema::DelayedDiagnostic &diag); // in DelayedDiagnostic.h
/// Determines whether diagnostics should be delayed.
bool shouldDelayDiagnostics() { return CurPool != nullptr; }
/// Returns the current delayed-diagnostics pool.
sema::DelayedDiagnosticPool *getCurrentPool() const {
return CurPool;
}
/// Enter a new scope. Access and deprecation diagnostics will be
/// collected in this pool.
DelayedDiagnosticsState push(sema::DelayedDiagnosticPool &pool) {
DelayedDiagnosticsState state;
state.SavedPool = CurPool;
CurPool = &pool;
return state;
}
/// Leave a delayed-diagnostic state that was previously pushed.
/// Do not emit any of the diagnostics. This is performed as part
/// of the bookkeeping of popping a pool "properly".
void popWithoutEmitting(DelayedDiagnosticsState state) {
CurPool = state.SavedPool;
}
/// Enter a new scope where access and deprecation diagnostics are
/// not delayed.
DelayedDiagnosticsState pushUndelayed() {
DelayedDiagnosticsState state;
state.SavedPool = CurPool;
CurPool = nullptr;
return state;
}
/// Undo a previous pushUndelayed().
void popUndelayed(DelayedDiagnosticsState state) {
assert(CurPool == nullptr);
CurPool = state.SavedPool;
}
} DelayedDiagnostics;
/// A RAII object to temporarily push a declaration context.
class ContextRAII {
private:
Sema &S;
DeclContext *SavedContext;
ProcessingContextState SavedContextState;
QualType SavedCXXThisTypeOverride;
public:
ContextRAII(Sema &S, DeclContext *ContextToPush, bool NewThisContext = true)
: S(S), SavedContext(S.CurContext),
SavedContextState(S.DelayedDiagnostics.pushUndelayed()),
SavedCXXThisTypeOverride(S.CXXThisTypeOverride)
{
assert(ContextToPush && "pushing null context");
S.CurContext = ContextToPush;
if (NewThisContext)
S.CXXThisTypeOverride = QualType();
}
void pop() {
if (!SavedContext) return;
S.CurContext = SavedContext;
S.DelayedDiagnostics.popUndelayed(SavedContextState);
S.CXXThisTypeOverride = SavedCXXThisTypeOverride;
SavedContext = nullptr;
}
~ContextRAII() {
pop();
}
};
/// \brief RAII object to handle the state changes required to synthesize
/// a function body.
class SynthesizedFunctionScope {
Sema &S;
Sema::ContextRAII SavedContext;
bool PushedCodeSynthesisContext = false;
public:
SynthesizedFunctionScope(Sema &S, DeclContext *DC)
: S(S), SavedContext(S, DC) {
S.PushFunctionScope();
S.PushExpressionEvaluationContext(
Sema::ExpressionEvaluationContext::PotentiallyEvaluated);
if (auto *FD = dyn_cast<FunctionDecl>(DC))
FD->setWillHaveBody(true);
else
assert(isa<ObjCMethodDecl>(DC));
}
void addContextNote(SourceLocation UseLoc) {
assert(!PushedCodeSynthesisContext);
Sema::CodeSynthesisContext Ctx;
Ctx.Kind = Sema::CodeSynthesisContext::DefiningSynthesizedFunction;
Ctx.PointOfInstantiation = UseLoc;
Ctx.Entity = cast<Decl>(S.CurContext);
S.pushCodeSynthesisContext(Ctx);
PushedCodeSynthesisContext = true;
}
~SynthesizedFunctionScope() {
if (PushedCodeSynthesisContext)
S.popCodeSynthesisContext();
if (auto *FD = dyn_cast<FunctionDecl>(S.CurContext))
FD->setWillHaveBody(false);
S.PopExpressionEvaluationContext();
S.PopFunctionScopeInfo();
}
};
/// WeakUndeclaredIdentifiers - Identifiers contained in
/// \#pragma weak before declared. rare. may alias another
/// identifier, declared or undeclared
llvm::MapVector<IdentifierInfo *, WeakInfo> WeakUndeclaredIdentifiers;
/// ExtnameUndeclaredIdentifiers - Identifiers contained in
/// \#pragma redefine_extname before declared. Used in Solaris system headers
/// to define functions that occur in multiple standards to call the version
/// in the currently selected standard.
llvm::DenseMap<IdentifierInfo*,AsmLabelAttr*> ExtnameUndeclaredIdentifiers;
/// \brief Load weak undeclared identifiers from the external source.
void LoadExternalWeakUndeclaredIdentifiers();
/// WeakTopLevelDecl - Translation-unit scoped declarations generated by
/// \#pragma weak during processing of other Decls.
/// I couldn't figure out a clean way to generate these in-line, so
/// we store them here and handle separately -- which is a hack.
/// It would be best to refactor this.
SmallVector<Decl*,2> WeakTopLevelDecl;
IdentifierResolver IdResolver;
/// Translation Unit Scope - useful to Objective-C actions that need
/// to lookup file scope declarations in the "ordinary" C decl namespace.
/// For example, user-defined classes, built-in "id" type, etc.
Scope *TUScope;
/// \brief The C++ "std" namespace, where the standard library resides.
LazyDeclPtr StdNamespace;
/// \brief The C++ "std::bad_alloc" class, which is defined by the C++
/// standard library.
LazyDeclPtr StdBadAlloc;
/// \brief The C++ "std::align_val_t" enum class, which is defined by the C++
/// standard library.
LazyDeclPtr StdAlignValT;
/// \brief The C++ "std::experimental" namespace, where the experimental parts
/// of the standard library resides.
NamespaceDecl *StdExperimentalNamespaceCache;
/// \brief The C++ "std::initializer_list" template, which is defined in
/// \<initializer_list>.
ClassTemplateDecl *StdInitializerList;
/// \brief The C++ "type_info" declaration, which is defined in \<typeinfo>.
RecordDecl *CXXTypeInfoDecl;
/// \brief The MSVC "_GUID" struct, which is defined in MSVC header files.
RecordDecl *MSVCGuidDecl;
/// \brief Caches identifiers/selectors for NSFoundation APIs.
std::unique_ptr<NSAPI> NSAPIObj;
/// \brief The declaration of the Objective-C NSNumber class.
ObjCInterfaceDecl *NSNumberDecl;
/// \brief The declaration of the Objective-C NSValue class.
ObjCInterfaceDecl *NSValueDecl;
/// \brief Pointer to NSNumber type (NSNumber *).
QualType NSNumberPointer;
/// \brief Pointer to NSValue type (NSValue *).
QualType NSValuePointer;
/// \brief The Objective-C NSNumber methods used to create NSNumber literals.
ObjCMethodDecl *NSNumberLiteralMethods[NSAPI::NumNSNumberLiteralMethods];
/// \brief The declaration of the Objective-C NSString class.
ObjCInterfaceDecl *NSStringDecl;
/// \brief Pointer to NSString type (NSString *).
QualType NSStringPointer;
/// \brief The declaration of the stringWithUTF8String: method.
ObjCMethodDecl *StringWithUTF8StringMethod;
/// \brief The declaration of the valueWithBytes:objCType: method.
ObjCMethodDecl *ValueWithBytesObjCTypeMethod;
/// \brief The declaration of the Objective-C NSArray class.
ObjCInterfaceDecl *NSArrayDecl;
/// \brief The declaration of the arrayWithObjects:count: method.
ObjCMethodDecl *ArrayWithObjectsMethod;
/// \brief The declaration of the Objective-C NSDictionary class.
ObjCInterfaceDecl *NSDictionaryDecl;
/// \brief The declaration of the dictionaryWithObjects:forKeys:count: method.
ObjCMethodDecl *DictionaryWithObjectsMethod;
/// \brief id<NSCopying> type.
QualType QIDNSCopying;
/// \brief will hold 'respondsToSelector:'
Selector RespondsToSelectorSel;
/// A flag to remember whether the implicit forms of operator new and delete
/// have been declared.
bool GlobalNewDeleteDeclared;
/// A flag to indicate that we're in a context that permits abstract
/// references to fields. This is really a
bool AllowAbstractFieldReference;
/// \brief Describes how the expressions currently being parsed are
/// evaluated at run-time, if at all.
enum class ExpressionEvaluationContext {
/// \brief The current expression and its subexpressions occur within an
/// unevaluated operand (C++11 [expr]p7), such as the subexpression of
/// \c sizeof, where the type of the expression may be significant but
/// no code will be generated to evaluate the value of the expression at
/// run time.
Unevaluated,
/// \brief The current expression occurs within a braced-init-list within
/// an unevaluated operand. This is mostly like a regular unevaluated
/// context, except that we still instantiate constexpr functions that are
/// referenced here so that we can perform narrowing checks correctly.
UnevaluatedList,
/// \brief The current expression occurs within a discarded statement.
/// This behaves largely similarly to an unevaluated operand in preventing
/// definitions from being required, but not in other ways.
DiscardedStatement,
/// \brief The current expression occurs within an unevaluated
/// operand that unconditionally permits abstract references to
/// fields, such as a SIZE operator in MS-style inline assembly.
UnevaluatedAbstract,
/// \brief The current context is "potentially evaluated" in C++11 terms,
/// but the expression is evaluated at compile-time (like the values of
/// cases in a switch statement).
ConstantEvaluated,
/// \brief The current expression is potentially evaluated at run time,
/// which means that code may be generated to evaluate the value of the
/// expression at run time.
PotentiallyEvaluated,
/// \brief The current expression is potentially evaluated, but any
/// declarations referenced inside that expression are only used if
/// in fact the current expression is used.
///
/// This value is used when parsing default function arguments, for which
/// we would like to provide diagnostics (e.g., passing non-POD arguments
/// through varargs) but do not want to mark declarations as "referenced"
/// until the default argument is used.
PotentiallyEvaluatedIfUsed
};
/// \brief Data structure used to record current or nested
/// expression evaluation contexts.
struct ExpressionEvaluationContextRecord {
/// \brief The expression evaluation context.
ExpressionEvaluationContext Context;
/// \brief Whether the enclosing context needed a cleanup.
CleanupInfo ParentCleanup;
/// \brief Whether we are in a decltype expression.
bool IsDecltype;
/// \brief The number of active cleanup objects when we entered
/// this expression evaluation context.
unsigned NumCleanupObjects;
/// \brief The number of typos encountered during this expression evaluation
/// context (i.e. the number of TypoExprs created).
unsigned NumTypos;
llvm::SmallPtrSet<Expr*, 2> SavedMaybeODRUseExprs;
/// \brief The lambdas that are present within this context, if it
/// is indeed an unevaluated context.
SmallVector<LambdaExpr *, 2> Lambdas;
/// \brief The declaration that provides context for lambda expressions
/// and block literals if the normal declaration context does not
/// suffice, e.g., in a default function argument.
Decl *ManglingContextDecl;
/// \brief The context information used to mangle lambda expressions
/// and block literals within this context.
///
/// This mangling information is allocated lazily, since most contexts
/// do not have lambda expressions or block literals.
std::unique_ptr<MangleNumberingContext> MangleNumbering;
/// \brief If we are processing a decltype type, a set of call expressions
/// for which we have deferred checking the completeness of the return type.
SmallVector<CallExpr *, 8> DelayedDecltypeCalls;
/// \brief If we are processing a decltype type, a set of temporary binding
/// expressions for which we have deferred checking the destructor.
SmallVector<CXXBindTemporaryExpr *, 8> DelayedDecltypeBinds;
ExpressionEvaluationContextRecord(ExpressionEvaluationContext Context,
unsigned NumCleanupObjects,
CleanupInfo ParentCleanup,
Decl *ManglingContextDecl,
bool IsDecltype)
: Context(Context), ParentCleanup(ParentCleanup),
IsDecltype(IsDecltype), NumCleanupObjects(NumCleanupObjects),
NumTypos(0),
ManglingContextDecl(ManglingContextDecl), MangleNumbering() { }
/// \brief Retrieve the mangling numbering context, used to consistently
/// number constructs like lambdas for mangling.
MangleNumberingContext &getMangleNumberingContext(ASTContext &Ctx);
bool isUnevaluated() const {
return Context == ExpressionEvaluationContext::Unevaluated ||
Context == ExpressionEvaluationContext::UnevaluatedAbstract ||
Context == ExpressionEvaluationContext::UnevaluatedList;
}
bool isConstantEvaluated() const {
return Context == ExpressionEvaluationContext::ConstantEvaluated;
}
};
/// A stack of expression evaluation contexts.
SmallVector<ExpressionEvaluationContextRecord, 8> ExprEvalContexts;
/// \brief Compute the mangling number context for a lambda expression or
/// block literal.
///
/// \param DC - The DeclContext containing the lambda expression or
/// block literal.
/// \param[out] ManglingContextDecl - Returns the ManglingContextDecl
/// associated with the context, if relevant.
MangleNumberingContext *getCurrentMangleNumberContext(
const DeclContext *DC,
Decl *&ManglingContextDecl);
/// SpecialMemberOverloadResult - The overloading result for a special member
/// function.
///
/// This is basically a wrapper around PointerIntPair. The lowest bits of the
/// integer are used to determine whether overload resolution succeeded.
class SpecialMemberOverloadResult {
public:
enum Kind {
NoMemberOrDeleted,
Ambiguous,
Success
};
private:
llvm::PointerIntPair<CXXMethodDecl*, 2> Pair;
public:
SpecialMemberOverloadResult() : Pair() {}
SpecialMemberOverloadResult(CXXMethodDecl *MD)
: Pair(MD, MD->isDeleted() ? NoMemberOrDeleted : Success) {}
CXXMethodDecl *getMethod() const { return Pair.getPointer(); }
void setMethod(CXXMethodDecl *MD) { Pair.setPointer(MD); }
Kind getKind() const { return static_cast<Kind>(Pair.getInt()); }
void setKind(Kind K) { Pair.setInt(K); }
};
class SpecialMemberOverloadResultEntry
: public llvm::FastFoldingSetNode,
public SpecialMemberOverloadResult {
public:
SpecialMemberOverloadResultEntry(const llvm::FoldingSetNodeID &ID)
: FastFoldingSetNode(ID)
{}
};
/// \brief A cache of special member function overload resolution results
/// for C++ records.
llvm::FoldingSet<SpecialMemberOverloadResultEntry> SpecialMemberCache;
/// \brief A cache of the flags available in enumerations with the flag_bits
/// attribute.
mutable llvm::DenseMap<const EnumDecl*, llvm::APInt> FlagBitsCache;
/// \brief The kind of translation unit we are processing.
///
/// When we're processing a complete translation unit, Sema will perform
/// end-of-translation-unit semantic tasks (such as creating
/// initializers for tentative definitions in C) once parsing has
/// completed. Modules and precompiled headers perform different kinds of
/// checks.
TranslationUnitKind TUKind;
llvm::BumpPtrAllocator BumpAlloc;
/// \brief The number of SFINAE diagnostics that have been trapped.
unsigned NumSFINAEErrors;
typedef llvm::DenseMap<ParmVarDecl *, llvm::TinyPtrVector<ParmVarDecl *>>
UnparsedDefaultArgInstantiationsMap;
/// \brief A mapping from parameters with unparsed default arguments to the
/// set of instantiations of each parameter.
///
/// This mapping is a temporary data structure used when parsing
/// nested class templates or nested classes of class templates,
/// where we might end up instantiating an inner class before the
/// default arguments of its methods have been parsed.
UnparsedDefaultArgInstantiationsMap UnparsedDefaultArgInstantiations;
// Contains the locations of the beginning of unparsed default
// argument locations.
llvm::DenseMap<ParmVarDecl *, SourceLocation> UnparsedDefaultArgLocs;
/// UndefinedInternals - all the used, undefined objects which require a
/// definition in this translation unit.
llvm::MapVector<NamedDecl *, SourceLocation> UndefinedButUsed;
/// Determine if VD, which must be a variable or function, is an external
/// symbol that nonetheless can't be referenced from outside this translation
/// unit because its type has no linkage and it's not extern "C".
bool isExternalWithNoLinkageType(ValueDecl *VD);
/// Obtain a sorted list of functions that are undefined but ODR-used.
void getUndefinedButUsed(
SmallVectorImpl<std::pair<NamedDecl *, SourceLocation> > &Undefined);
/// Retrieves list of suspicious delete-expressions that will be checked at
/// the end of translation unit.
const llvm::MapVector<FieldDecl *, DeleteLocs> &
getMismatchingDeleteExpressions() const;
typedef std::pair<ObjCMethodList, ObjCMethodList> GlobalMethods;
typedef llvm::DenseMap<Selector, GlobalMethods> GlobalMethodPool;
/// Method Pool - allows efficient lookup when typechecking messages to "id".
/// We need to maintain a list, since selectors can have differing signatures
/// across classes. In Cocoa, this happens to be extremely uncommon (only 1%
/// of selectors are "overloaded").
/// At the head of the list it is recorded whether there were 0, 1, or >= 2
/// methods inside categories with a particular selector.
GlobalMethodPool MethodPool;
/// Method selectors used in a \@selector expression. Used for implementation
/// of -Wselector.
llvm::MapVector<Selector, SourceLocation> ReferencedSelectors;
/// Kinds of C++ special members.
enum CXXSpecialMember {
CXXDefaultConstructor,
CXXCopyConstructor,
CXXMoveConstructor,
CXXCopyAssignment,
CXXMoveAssignment,
CXXDestructor,
CXXInvalid
};
typedef llvm::PointerIntPair<CXXRecordDecl *, 3, CXXSpecialMember>
SpecialMemberDecl;
/// The C++ special members which we are currently in the process of
/// declaring. If this process recursively triggers the declaration of the
/// same special member, we should act as if it is not yet declared.
llvm::SmallPtrSet<SpecialMemberDecl, 4> SpecialMembersBeingDeclared;
/// The function definitions which were renamed as part of typo-correction
/// to match their respective declarations. We want to keep track of them
/// to ensure that we don't emit a "redefinition" error if we encounter a
/// correctly named definition after the renamed definition.
llvm::SmallPtrSet<const NamedDecl *, 4> TypoCorrectedFunctionDefinitions;
/// Stack of types that correspond to the parameter entities that are
/// currently being copy-initialized. Can be empty.
llvm::SmallVector<QualType, 4> CurrentParameterCopyTypes;
void ReadMethodPool(Selector Sel);
void updateOutOfDateSelector(Selector Sel);
/// Private Helper predicate to check for 'self'.
bool isSelfExpr(Expr *RExpr);
bool isSelfExpr(Expr *RExpr, const ObjCMethodDecl *Method);
/// \brief Cause the active diagnostic on the DiagosticsEngine to be
/// emitted. This is closely coupled to the SemaDiagnosticBuilder class and
/// should not be used elsewhere.
void EmitCurrentDiagnostic(unsigned DiagID);
/// Records and restores the FP_CONTRACT state on entry/exit of compound
/// statements.
class FPContractStateRAII {
public:
FPContractStateRAII(Sema &S) : S(S), OldFPFeaturesState(S.FPFeatures) {}
~FPContractStateRAII() { S.FPFeatures = OldFPFeaturesState; }
private:
Sema& S;
FPOptions OldFPFeaturesState;
};
void addImplicitTypedef(StringRef Name, QualType T);
public:
Sema(Preprocessor &pp, ASTContext &ctxt, ASTConsumer &consumer,
TranslationUnitKind TUKind = TU_Complete,
CodeCompleteConsumer *CompletionConsumer = nullptr);
~Sema();
/// \brief Perform initialization that occurs after the parser has been
/// initialized but before it parses anything.
void Initialize();
const LangOptions &getLangOpts() const { return LangOpts; }
OpenCLOptions &getOpenCLOptions() { return OpenCLFeatures; }
FPOptions &getFPOptions() { return FPFeatures; }
DiagnosticsEngine &getDiagnostics() const { return Diags; }
SourceManager &getSourceManager() const { return SourceMgr; }
Preprocessor &getPreprocessor() const { return PP; }
ASTContext &getASTContext() const { return Context; }
ASTConsumer &getASTConsumer() const { return Consumer; }
ASTMutationListener *getASTMutationListener() const;
ExternalSemaSource* getExternalSource() const { return ExternalSource; }
///\brief Registers an external source. If an external source already exists,
/// creates a multiplex external source and appends to it.
///
///\param[in] E - A non-null external sema source.
///
void addExternalSource(ExternalSemaSource *E);
void PrintStats() const;
/// \brief Helper class that creates diagnostics with optional
/// template instantiation stacks.
///
/// This class provides a wrapper around the basic DiagnosticBuilder
/// class that emits diagnostics. SemaDiagnosticBuilder is
/// responsible for emitting the diagnostic (as DiagnosticBuilder
/// does) and, if the diagnostic comes from inside a template
/// instantiation, printing the template instantiation stack as
/// well.
class SemaDiagnosticBuilder : public DiagnosticBuilder {
Sema &SemaRef;
unsigned DiagID;
public:
SemaDiagnosticBuilder(DiagnosticBuilder &DB, Sema &SemaRef, unsigned DiagID)
: DiagnosticBuilder(DB), SemaRef(SemaRef), DiagID(DiagID) { }
// This is a cunning lie. DiagnosticBuilder actually performs move
// construction in its copy constructor (but due to varied uses, it's not
// possible to conveniently express this as actual move construction). So
// the default copy ctor here is fine, because the base class disables the
// source anyway, so the user-defined ~SemaDiagnosticBuilder is a safe no-op
// in that case anwyay.
SemaDiagnosticBuilder(const SemaDiagnosticBuilder&) = default;
~SemaDiagnosticBuilder() {
// If we aren't active, there is nothing to do.
if (!isActive()) return;
// Otherwise, we need to emit the diagnostic. First flush the underlying
// DiagnosticBuilder data, and clear the diagnostic builder itself so it
// won't emit the diagnostic in its own destructor.
//
// This seems wasteful, in that as written the DiagnosticBuilder dtor will
// do its own needless checks to see if the diagnostic needs to be
// emitted. However, because we take care to ensure that the builder
// objects never escape, a sufficiently smart compiler will be able to
// eliminate that code.
FlushCounts();
Clear();
// Dispatch to Sema to emit the diagnostic.
SemaRef.EmitCurrentDiagnostic(DiagID);
}
/// Teach operator<< to produce an object of the correct type.
template<typename T>
friend const SemaDiagnosticBuilder &operator<<(
const SemaDiagnosticBuilder &Diag, const T &Value) {
const DiagnosticBuilder &BaseDiag = Diag;
BaseDiag << Value;
return Diag;
}
};
/// \brief Emit a diagnostic.
SemaDiagnosticBuilder Diag(SourceLocation Loc, unsigned DiagID) {
DiagnosticBuilder DB = Diags.Report(Loc, DiagID);
return SemaDiagnosticBuilder(DB, *this, DiagID);
}
/// \brief Emit a partial diagnostic.
SemaDiagnosticBuilder Diag(SourceLocation Loc, const PartialDiagnostic& PD);
/// \brief Build a partial diagnostic.
PartialDiagnostic PDiag(unsigned DiagID = 0); // in SemaInternal.h
bool findMacroSpelling(SourceLocation &loc, StringRef name);
/// \brief Get a string to suggest for zero-initialization of a type.
std::string
getFixItZeroInitializerForType(QualType T, SourceLocation Loc) const;
std::string getFixItZeroLiteralForType(QualType T, SourceLocation Loc) const;
/// \brief Calls \c Lexer::getLocForEndOfToken()
SourceLocation getLocForEndOfToken(SourceLocation Loc, unsigned Offset = 0);
/// \brief Retrieve the module loader associated with the preprocessor.
ModuleLoader &getModuleLoader() const;
void emitAndClearUnusedLocalTypedefWarnings();
void ActOnStartOfTranslationUnit();
void ActOnEndOfTranslationUnit();
void CheckDelegatingCtorCycles();
Scope *getScopeForContext(DeclContext *Ctx);
void PushFunctionScope();
void PushBlockScope(Scope *BlockScope, BlockDecl *Block);
sema::LambdaScopeInfo *PushLambdaScope();
/// \brief This is used to inform Sema what the current TemplateParameterDepth
/// is during Parsing. Currently it is used to pass on the depth
/// when parsing generic lambda 'auto' parameters.
void RecordParsingTemplateParameterDepth(unsigned Depth);
void PushCapturedRegionScope(Scope *RegionScope, CapturedDecl *CD,
RecordDecl *RD,
CapturedRegionKind K);
void
PopFunctionScopeInfo(const sema::AnalysisBasedWarnings::Policy *WP = nullptr,
const Decl *D = nullptr,
const BlockExpr *blkExpr = nullptr);
sema::FunctionScopeInfo *getCurFunction() const {
return FunctionScopes.back();
}
sema::FunctionScopeInfo *getEnclosingFunction() const {
if (FunctionScopes.empty())
return nullptr;
for (int e = FunctionScopes.size()-1; e >= 0; --e) {
if (isa<sema::BlockScopeInfo>(FunctionScopes[e]))
continue;
return FunctionScopes[e];
}
return nullptr;
}
template <typename ExprT>
void recordUseOfEvaluatedWeak(const ExprT *E, bool IsRead=true) {
if (!isUnevaluatedContext())
getCurFunction()->recordUseOfWeak(E, IsRead);
}
void PushCompoundScope();
void PopCompoundScope();
sema::CompoundScopeInfo &getCurCompoundScope() const;
bool hasAnyUnrecoverableErrorsInThisFunction() const;
/// \brief Retrieve the current block, if any.
sema::BlockScopeInfo *getCurBlock();
/// Retrieve the current lambda scope info, if any.
/// \param IgnoreNonLambdaCapturingScope true if should find the top-most
/// lambda scope info ignoring all inner capturing scopes that are not
/// lambda scopes.
sema::LambdaScopeInfo *
getCurLambda(bool IgnoreNonLambdaCapturingScope = false);
/// \brief Retrieve the current generic lambda info, if any.
sema::LambdaScopeInfo *getCurGenericLambda();
/// \brief Retrieve the current captured region, if any.
sema::CapturedRegionScopeInfo *getCurCapturedRegion();
/// WeakTopLevelDeclDecls - access to \#pragma weak-generated Decls
SmallVectorImpl<Decl *> &WeakTopLevelDecls() { return WeakTopLevelDecl; }
void ActOnComment(SourceRange Comment);
//===--------------------------------------------------------------------===//
// Type Analysis / Processing: SemaType.cpp.
//
QualType BuildQualifiedType(QualType T, SourceLocation Loc, Qualifiers Qs,
const DeclSpec *DS = nullptr);
QualType BuildQualifiedType(QualType T, SourceLocation Loc, unsigned CVRA,
const DeclSpec *DS = nullptr);
QualType BuildPointerType(QualType T,
SourceLocation Loc, DeclarationName Entity);
QualType BuildReferenceType(QualType T, bool LValueRef,
SourceLocation Loc, DeclarationName Entity);
QualType BuildArrayType(QualType T, ArrayType::ArraySizeModifier ASM,
Expr *ArraySize, unsigned Quals,
SourceRange Brackets, DeclarationName Entity);
QualType BuildExtVectorType(QualType T, Expr *ArraySize,
SourceLocation AttrLoc);
QualType BuildAddressSpaceAttr(QualType &T, Expr *AddrSpace,
SourceLocation AttrLoc);
bool CheckFunctionReturnType(QualType T, SourceLocation Loc);
/// \brief Build a function type.
///
/// This routine checks the function type according to C++ rules and
/// under the assumption that the result type and parameter types have
/// just been instantiated from a template. It therefore duplicates
/// some of the behavior of GetTypeForDeclarator, but in a much
/// simpler form that is only suitable for this narrow use case.
///
/// \param T The return type of the function.
///
/// \param ParamTypes The parameter types of the function. This array
/// will be modified to account for adjustments to the types of the
/// function parameters.
///
/// \param Loc The location of the entity whose type involves this
/// function type or, if there is no such entity, the location of the
/// type that will have function type.
///
/// \param Entity The name of the entity that involves the function
/// type, if known.
///
/// \param EPI Extra information about the function type. Usually this will
/// be taken from an existing function with the same prototype.
///
/// \returns A suitable function type, if there are no errors. The
/// unqualified type will always be a FunctionProtoType.
/// Otherwise, returns a NULL type.
QualType BuildFunctionType(QualType T,
MutableArrayRef<QualType> ParamTypes,
SourceLocation Loc, DeclarationName Entity,
const FunctionProtoType::ExtProtoInfo &EPI);
QualType BuildMemberPointerType(QualType T, QualType Class,
SourceLocation Loc,
DeclarationName Entity);
QualType BuildBlockPointerType(QualType T,
SourceLocation Loc, DeclarationName Entity);
QualType BuildParenType(QualType T);
QualType BuildAtomicType(QualType T, SourceLocation Loc);
QualType BuildReadPipeType(QualType T,
SourceLocation Loc);
QualType BuildWritePipeType(QualType T,
SourceLocation Loc);
TypeSourceInfo *GetTypeForDeclarator(Declarator &D, Scope *S);
TypeSourceInfo *GetTypeForDeclaratorCast(Declarator &D, QualType FromTy);
TypeSourceInfo *GetTypeSourceInfoForDeclarator(Declarator &D, QualType T,
TypeSourceInfo *ReturnTypeInfo);
/// \brief Package the given type and TSI into a ParsedType.
ParsedType CreateParsedType(QualType T, TypeSourceInfo *TInfo);
DeclarationNameInfo GetNameForDeclarator(Declarator &D);
DeclarationNameInfo GetNameFromUnqualifiedId(const UnqualifiedId &Name);
static QualType GetTypeFromParser(ParsedType Ty,
TypeSourceInfo **TInfo = nullptr);
CanThrowResult canThrow(const Expr *E);
const FunctionProtoType *ResolveExceptionSpec(SourceLocation Loc,
const FunctionProtoType *FPT);
void UpdateExceptionSpec(FunctionDecl *FD,
const FunctionProtoType::ExceptionSpecInfo &ESI);
bool CheckSpecifiedExceptionType(QualType &T, SourceRange Range);
bool CheckDistantExceptionSpec(QualType T);
bool CheckEquivalentExceptionSpec(FunctionDecl *Old, FunctionDecl *New);
bool CheckEquivalentExceptionSpec(
const FunctionProtoType *Old, SourceLocation OldLoc,
const FunctionProtoType *New, SourceLocation NewLoc);
bool CheckEquivalentExceptionSpec(
const PartialDiagnostic &DiagID, const PartialDiagnostic & NoteID,
const FunctionProtoType *Old, SourceLocation OldLoc,
const FunctionProtoType *New, SourceLocation NewLoc);
bool CheckExceptionSpecSubset(const PartialDiagnostic &DiagID,
const PartialDiagnostic &NestedDiagID,
const PartialDiagnostic &NoteID,
const FunctionProtoType *Superset,
SourceLocation SuperLoc,
const FunctionProtoType *Subset,
SourceLocation SubLoc);
bool CheckParamExceptionSpec(const PartialDiagnostic &NestedDiagID,
const PartialDiagnostic &NoteID,
const FunctionProtoType *Target,
SourceLocation TargetLoc,
const FunctionProtoType *Source,
SourceLocation SourceLoc);
TypeResult ActOnTypeName(Scope *S, Declarator &D);
/// \brief The parser has parsed the context-sensitive type 'instancetype'
/// in an Objective-C message declaration. Return the appropriate type.
ParsedType ActOnObjCInstanceType(SourceLocation Loc);
/// \brief Abstract class used to diagnose incomplete types.
struct TypeDiagnoser {
TypeDiagnoser() {}
virtual void diagnose(Sema &S, SourceLocation Loc, QualType T) = 0;
virtual ~TypeDiagnoser() {}
};
static int getPrintable(int I) { return I; }
static unsigned getPrintable(unsigned I) { return I; }
static bool getPrintable(bool B) { return B; }
static const char * getPrintable(const char *S) { return S; }
static StringRef getPrintable(StringRef S) { return S; }
static const std::string &getPrintable(const std::string &S) { return S; }
static const IdentifierInfo *getPrintable(const IdentifierInfo *II) {
return II;
}
static DeclarationName getPrintable(DeclarationName N) { return N; }
static QualType getPrintable(QualType T) { return T; }
static SourceRange getPrintable(SourceRange R) { return R; }
static SourceRange getPrintable(SourceLocation L) { return L; }
static SourceRange getPrintable(const Expr *E) { return E->getSourceRange(); }
static SourceRange getPrintable(TypeLoc TL) { return TL.getSourceRange();}
template <typename... Ts> class BoundTypeDiagnoser : public TypeDiagnoser {
unsigned DiagID;
std::tuple<const Ts &...> Args;
template <std::size_t... Is>
void emit(const SemaDiagnosticBuilder &DB,
llvm::index_sequence<Is...>) const {
// Apply all tuple elements to the builder in order.
bool Dummy[] = {false, (DB << getPrintable(std::get<Is>(Args)))...};
(void)Dummy;
}
public:
BoundTypeDiagnoser(unsigned DiagID, const Ts &...Args)
: TypeDiagnoser(), DiagID(DiagID), Args(Args...) {
assert(DiagID != 0 && "no diagnostic for type diagnoser");
}
void diagnose(Sema &S, SourceLocation Loc, QualType T) override {
const SemaDiagnosticBuilder &DB = S.Diag(Loc, DiagID);
emit(DB, llvm::index_sequence_for<Ts...>());
DB << T;
}
};
private:
bool RequireCompleteTypeImpl(SourceLocation Loc, QualType T,
TypeDiagnoser *Diagnoser);
struct ModuleScope {
clang::Module *Module = nullptr;
bool ModuleInterface = false;
VisibleModuleSet OuterVisibleModules;
};
/// The modules we're currently parsing.
llvm::SmallVector<ModuleScope, 16> ModuleScopes;
/// Get the module whose scope we are currently within.
Module *getCurrentModule() const {
return ModuleScopes.empty() ? nullptr : ModuleScopes.back().Module;
}
VisibleModuleSet VisibleModules;
public:
/// \brief Get the module owning an entity.
Module *getOwningModule(Decl *Entity) { return Entity->getOwningModule(); }
/// \brief Make a merged definition of an existing hidden definition \p ND
/// visible at the specified location.
void makeMergedDefinitionVisible(NamedDecl *ND);
bool isModuleVisible(const Module *M) { return VisibleModules.isVisible(M); }
/// Determine whether a declaration is visible to name lookup.
bool isVisible(const NamedDecl *D) {
return !D->isHidden() || isVisibleSlow(D);
}
/// Determine whether any declaration of an entity is visible.
bool
hasVisibleDeclaration(const NamedDecl *D,
llvm::SmallVectorImpl<Module *> *Modules = nullptr) {
return isVisible(D) || hasVisibleDeclarationSlow(D, Modules);
}
bool hasVisibleDeclarationSlow(const NamedDecl *D,
llvm::SmallVectorImpl<Module *> *Modules);
bool hasVisibleMergedDefinition(NamedDecl *Def);
bool hasMergedDefinitionInCurrentModule(NamedDecl *Def);
/// Determine if \p D and \p Suggested have a structurally compatible
/// layout as described in C11 6.2.7/1.
bool hasStructuralCompatLayout(Decl *D, Decl *Suggested);
/// Determine if \p D has a visible definition. If not, suggest a declaration
/// that should be made visible to expose the definition.
bool hasVisibleDefinition(NamedDecl *D, NamedDecl **Suggested,
bool OnlyNeedComplete = false);
bool hasVisibleDefinition(const NamedDecl *D) {
NamedDecl *Hidden;
return hasVisibleDefinition(const_cast<NamedDecl*>(D), &Hidden);
}
/// Determine if the template parameter \p D has a visible default argument.
bool
hasVisibleDefaultArgument(const NamedDecl *D,
llvm::SmallVectorImpl<Module *> *Modules = nullptr);
/// Determine if there is a visible declaration of \p D that is an explicit
/// specialization declaration for a specialization of a template. (For a
/// member specialization, use hasVisibleMemberSpecialization.)
bool hasVisibleExplicitSpecialization(
const NamedDecl *D, llvm::SmallVectorImpl<Module *> *Modules = nullptr);
/// Determine if there is a visible declaration of \p D that is a member
/// specialization declaration (as opposed to an instantiated declaration).
bool hasVisibleMemberSpecialization(
const NamedDecl *D, llvm::SmallVectorImpl<Module *> *Modules = nullptr);
/// Determine if \p A and \p B are equivalent internal linkage declarations
/// from different modules, and thus an ambiguity error can be downgraded to
/// an extension warning.
bool isEquivalentInternalLinkageDeclaration(const NamedDecl *A,
const NamedDecl *B);
void diagnoseEquivalentInternalLinkageDeclarations(
SourceLocation Loc, const NamedDecl *D,
ArrayRef<const NamedDecl *> Equiv);
bool isCompleteType(SourceLocation Loc, QualType T) {
return !RequireCompleteTypeImpl(Loc, T, nullptr);
}
bool RequireCompleteType(SourceLocation Loc, QualType T,
TypeDiagnoser &Diagnoser);
bool RequireCompleteType(SourceLocation Loc, QualType T,
unsigned DiagID);
template <typename... Ts>
bool RequireCompleteType(SourceLocation Loc, QualType T, unsigned DiagID,
const Ts &...Args) {
BoundTypeDiagnoser<Ts...> Diagnoser(DiagID, Args...);
return RequireCompleteType(Loc, T, Diagnoser);
}
void completeExprArrayBound(Expr *E);
bool RequireCompleteExprType(Expr *E, TypeDiagnoser &Diagnoser);
bool RequireCompleteExprType(Expr *E, unsigned DiagID);
template <typename... Ts>
bool RequireCompleteExprType(Expr *E, unsigned DiagID, const Ts &...Args) {
BoundTypeDiagnoser<Ts...> Diagnoser(DiagID, Args...);
return RequireCompleteExprType(E, Diagnoser);
}
bool RequireLiteralType(SourceLocation Loc, QualType T,
TypeDiagnoser &Diagnoser);
bool RequireLiteralType(SourceLocation Loc, QualType T, unsigned DiagID);
template <typename... Ts>
bool RequireLiteralType(SourceLocation Loc, QualType T, unsigned DiagID,
const Ts &...Args) {
BoundTypeDiagnoser<Ts...> Diagnoser(DiagID, Args...);
return RequireLiteralType(Loc, T, Diagnoser);
}
QualType getElaboratedType(ElaboratedTypeKeyword Keyword,
const CXXScopeSpec &SS, QualType T);
QualType BuildTypeofExprType(Expr *E, SourceLocation Loc);
/// If AsUnevaluated is false, E is treated as though it were an evaluated
/// context, such as when building a type for decltype(auto).
QualType BuildDecltypeType(Expr *E, SourceLocation Loc,
bool AsUnevaluated = true);
QualType BuildUnaryTransformType(QualType BaseType,
UnaryTransformType::UTTKind UKind,
SourceLocation Loc);
//===--------------------------------------------------------------------===//
// Symbol table / Decl tracking callbacks: SemaDecl.cpp.
//
struct SkipBodyInfo {
SkipBodyInfo()
: ShouldSkip(false), CheckSameAsPrevious(false), Previous(nullptr),
New(nullptr) {}
bool ShouldSkip;
bool CheckSameAsPrevious;
NamedDecl *Previous;
NamedDecl *New;
};
DeclGroupPtrTy ConvertDeclToDeclGroup(Decl *Ptr, Decl *OwnedType = nullptr);
void DiagnoseUseOfUnimplementedSelectors();
bool isSimpleTypeSpecifier(tok::TokenKind Kind) const;
ParsedType getTypeName(const IdentifierInfo &II, SourceLocation NameLoc,
Scope *S, CXXScopeSpec *SS = nullptr,
bool isClassName = false, bool HasTrailingDot = false,
ParsedType ObjectType = nullptr,
bool IsCtorOrDtorName = false,
bool WantNontrivialTypeSourceInfo = false,
bool IsClassTemplateDeductionContext = true,
IdentifierInfo **CorrectedII = nullptr);
TypeSpecifierType isTagName(IdentifierInfo &II, Scope *S);
bool isMicrosoftMissingTypename(const CXXScopeSpec *SS, Scope *S);
void DiagnoseUnknownTypeName(IdentifierInfo *&II,
SourceLocation IILoc,
Scope *S,
CXXScopeSpec *SS,
ParsedType &SuggestedType,
bool IsTemplateName = false);
/// Attempt to behave like MSVC in situations where lookup of an unqualified
/// type name has failed in a dependent context. In these situations, we
/// automatically form a DependentTypeName that will retry lookup in a related
/// scope during instantiation.
ParsedType ActOnMSVCUnknownTypeName(const IdentifierInfo &II,
SourceLocation NameLoc,
bool IsTemplateTypeArg);
/// \brief Describes the result of the name lookup and resolution performed
/// by \c ClassifyName().
enum NameClassificationKind {
NC_Unknown,
NC_Error,
NC_Keyword,
NC_Type,
NC_Expression,
NC_NestedNameSpecifier,
NC_TypeTemplate,
NC_VarTemplate,
NC_FunctionTemplate
};
class NameClassification {
NameClassificationKind Kind;
ExprResult Expr;
TemplateName Template;
ParsedType Type;
explicit NameClassification(NameClassificationKind Kind) : Kind(Kind) {}
public:
NameClassification(ExprResult Expr) : Kind(NC_Expression), Expr(Expr) {}
NameClassification(ParsedType Type) : Kind(NC_Type), Type(Type) {}
NameClassification(const IdentifierInfo *Keyword) : Kind(NC_Keyword) {}
static NameClassification Error() {
return NameClassification(NC_Error);
}
static NameClassification Unknown() {
return NameClassification(NC_Unknown);
}
static NameClassification NestedNameSpecifier() {
return NameClassification(NC_NestedNameSpecifier);
}
static NameClassification TypeTemplate(TemplateName Name) {
NameClassification Result(NC_TypeTemplate);
Result.Template = Name;
return Result;
}
static NameClassification VarTemplate(TemplateName Name) {
NameClassification Result(NC_VarTemplate);
Result.Template = Name;
return Result;
}
static NameClassification FunctionTemplate(TemplateName Name) {
NameClassification Result(NC_FunctionTemplate);
Result.Template = Name;
return Result;
}
NameClassificationKind getKind() const { return Kind; }
ParsedType getType() const {
assert(Kind == NC_Type);
return Type;
}
ExprResult getExpression() const {
assert(Kind == NC_Expression);
return Expr;
}
TemplateName getTemplateName() const {
assert(Kind == NC_TypeTemplate || Kind == NC_FunctionTemplate ||
Kind == NC_VarTemplate);
return Template;
}
TemplateNameKind getTemplateNameKind() const {
switch (Kind) {
case NC_TypeTemplate:
return TNK_Type_template;
case NC_FunctionTemplate:
return TNK_Function_template;
case NC_VarTemplate:
return TNK_Var_template;
default:
llvm_unreachable("unsupported name classification.");
}
}
};
/// \brief Perform name lookup on the given name, classifying it based on
/// the results of name lookup and the following token.
///
/// This routine is used by the parser to resolve identifiers and help direct
/// parsing. When the identifier cannot be found, this routine will attempt
/// to correct the typo and classify based on the resulting name.
///
/// \param S The scope in which we're performing name lookup.
///
/// \param SS The nested-name-specifier that precedes the name.
///
/// \param Name The identifier. If typo correction finds an alternative name,
/// this pointer parameter will be updated accordingly.
///
/// \param NameLoc The location of the identifier.
///
/// \param NextToken The token following the identifier. Used to help
/// disambiguate the name.
///
/// \param IsAddressOfOperand True if this name is the operand of a unary
/// address of ('&') expression, assuming it is classified as an
/// expression.
///
/// \param CCC The correction callback, if typo correction is desired.
NameClassification
ClassifyName(Scope *S, CXXScopeSpec &SS, IdentifierInfo *&Name,
SourceLocation NameLoc, const Token &NextToken,
bool IsAddressOfOperand,
std::unique_ptr<CorrectionCandidateCallback> CCC = nullptr);
/// Describes the detailed kind of a template name. Used in diagnostics.
enum class TemplateNameKindForDiagnostics {
ClassTemplate,
FunctionTemplate,
VarTemplate,
AliasTemplate,
TemplateTemplateParam,
DependentTemplate
};
TemplateNameKindForDiagnostics
getTemplateNameKindForDiagnostics(TemplateName Name);
/// Determine whether it's plausible that E was intended to be a
/// template-name.
bool mightBeIntendedToBeTemplateName(ExprResult E) {
if (!getLangOpts().CPlusPlus || E.isInvalid())
return false;
if (auto *DRE = dyn_cast<DeclRefExpr>(E.get()))
return !DRE->hasExplicitTemplateArgs();
if (auto *ME = dyn_cast<MemberExpr>(E.get()))
return !ME->hasExplicitTemplateArgs();
// Any additional cases recognized here should also be handled by
// diagnoseExprIntendedAsTemplateName.
return false;
}
void diagnoseExprIntendedAsTemplateName(Scope *S, ExprResult TemplateName,
SourceLocation Less,
SourceLocation Greater);
Decl *ActOnDeclarator(Scope *S, Declarator &D);
NamedDecl *HandleDeclarator(Scope *S, Declarator &D,
MultiTemplateParamsArg TemplateParameterLists);
void RegisterLocallyScopedExternCDecl(NamedDecl *ND, Scope *S);
bool DiagnoseClassNameShadow(DeclContext *DC, DeclarationNameInfo Info);
bool diagnoseQualifiedDeclaration(CXXScopeSpec &SS, DeclContext *DC,
DeclarationName Name,
SourceLocation Loc);
void
diagnoseIgnoredQualifiers(unsigned DiagID, unsigned Quals,
SourceLocation FallbackLoc,
SourceLocation ConstQualLoc = SourceLocation(),
SourceLocation VolatileQualLoc = SourceLocation(),
SourceLocation RestrictQualLoc = SourceLocation(),
SourceLocation AtomicQualLoc = SourceLocation(),
SourceLocation UnalignedQualLoc = SourceLocation());
static bool adjustContextForLocalExternDecl(DeclContext *&DC);
void DiagnoseFunctionSpecifiers(const DeclSpec &DS);
NamedDecl *getShadowedDeclaration(const TypedefNameDecl *D,
const LookupResult &R);
NamedDecl *getShadowedDeclaration(const VarDecl *D, const LookupResult &R);
void CheckShadow(NamedDecl *D, NamedDecl *ShadowedDecl,
const LookupResult &R);
void CheckShadow(Scope *S, VarDecl *D);
/// Warn if 'E', which is an expression that is about to be modified, refers
/// to a shadowing declaration.
void CheckShadowingDeclModification(Expr *E, SourceLocation Loc);
void DiagnoseShadowingLambdaDecls(const sema::LambdaScopeInfo *LSI);
private:
/// Map of current shadowing declarations to shadowed declarations. Warn if
/// it looks like the user is trying to modify the shadowing declaration.
llvm::DenseMap<const NamedDecl *, const NamedDecl *> ShadowingDecls;
public:
void CheckCastAlign(Expr *Op, QualType T, SourceRange TRange);
void handleTagNumbering(const TagDecl *Tag, Scope *TagScope);
void setTagNameForLinkagePurposes(TagDecl *TagFromDeclSpec,
TypedefNameDecl *NewTD);
void CheckTypedefForVariablyModifiedType(Scope *S, TypedefNameDecl *D);
NamedDecl* ActOnTypedefDeclarator(Scope* S, Declarator& D, DeclContext* DC,
TypeSourceInfo *TInfo,
LookupResult &Previous);
NamedDecl* ActOnTypedefNameDecl(Scope* S, DeclContext* DC, TypedefNameDecl *D,
LookupResult &Previous, bool &Redeclaration);
NamedDecl *ActOnVariableDeclarator(Scope *S, Declarator &D, DeclContext *DC,
TypeSourceInfo *TInfo,
LookupResult &Previous,
MultiTemplateParamsArg TemplateParamLists,
bool &AddToScope,
ArrayRef<BindingDecl *> Bindings = None);
NamedDecl *
ActOnDecompositionDeclarator(Scope *S, Declarator &D,
MultiTemplateParamsArg TemplateParamLists);
// Returns true if the variable declaration is a redeclaration
bool CheckVariableDeclaration(VarDecl *NewVD, LookupResult &Previous);
void CheckVariableDeclarationType(VarDecl *NewVD);
bool DeduceVariableDeclarationType(VarDecl *VDecl, bool DirectInit,
Expr *Init);
void CheckCompleteVariableDeclaration(VarDecl *VD);
void CheckCompleteDecompositionDeclaration(DecompositionDecl *DD);
void MaybeSuggestAddingStaticToDecl(const FunctionDecl *D);
NamedDecl* ActOnFunctionDeclarator(Scope* S, Declarator& D, DeclContext* DC,
TypeSourceInfo *TInfo,
LookupResult &Previous,
MultiTemplateParamsArg TemplateParamLists,
bool &AddToScope);
bool AddOverriddenMethods(CXXRecordDecl *DC, CXXMethodDecl *MD);
bool CheckConstexprFunctionDecl(const FunctionDecl *FD);
bool CheckConstexprFunctionBody(const FunctionDecl *FD, Stmt *Body);
void DiagnoseHiddenVirtualMethods(CXXMethodDecl *MD);
void FindHiddenVirtualMethods(CXXMethodDecl *MD,
SmallVectorImpl<CXXMethodDecl*> &OverloadedMethods);
void NoteHiddenVirtualMethods(CXXMethodDecl *MD,
SmallVectorImpl<CXXMethodDecl*> &OverloadedMethods);
// Returns true if the function declaration is a redeclaration
bool CheckFunctionDeclaration(Scope *S,
FunctionDecl *NewFD, LookupResult &Previous,
bool IsMemberSpecialization);
bool shouldLinkDependentDeclWithPrevious(Decl *D, Decl *OldDecl);
void CheckMain(FunctionDecl *FD, const DeclSpec &D);
void CheckMSVCRTEntryPoint(FunctionDecl *FD);
Decl *ActOnParamDeclarator(Scope *S, Declarator &D);
ParmVarDecl *BuildParmVarDeclForTypedef(DeclContext *DC,
SourceLocation Loc,
QualType T);
ParmVarDecl *CheckParameter(DeclContext *DC, SourceLocation StartLoc,
SourceLocation NameLoc, IdentifierInfo *Name,
QualType T, TypeSourceInfo *TSInfo,
StorageClass SC);
void ActOnParamDefaultArgument(Decl *param,
SourceLocation EqualLoc,
Expr *defarg);
void ActOnParamUnparsedDefaultArgument(Decl *param,
SourceLocation EqualLoc,
SourceLocation ArgLoc);
void ActOnParamDefaultArgumentError(Decl *param, SourceLocation EqualLoc);
bool SetParamDefaultArgument(ParmVarDecl *Param, Expr *DefaultArg,
SourceLocation EqualLoc);
void AddInitializerToDecl(Decl *dcl, Expr *init, bool DirectInit);
void ActOnUninitializedDecl(Decl *dcl);
void ActOnInitializerError(Decl *Dcl);
void ActOnPureSpecifier(Decl *D, SourceLocation PureSpecLoc);
void ActOnCXXForRangeDecl(Decl *D);
StmtResult ActOnCXXForRangeIdentifier(Scope *S, SourceLocation IdentLoc,
IdentifierInfo *Ident,
ParsedAttributes &Attrs,
SourceLocation AttrEnd);
void SetDeclDeleted(Decl *dcl, SourceLocation DelLoc);
void SetDeclDefaulted(Decl *dcl, SourceLocation DefaultLoc);
void FinalizeDeclaration(Decl *D);
DeclGroupPtrTy FinalizeDeclaratorGroup(Scope *S, const DeclSpec &DS,
ArrayRef<Decl *> Group);
DeclGroupPtrTy BuildDeclaratorGroup(MutableArrayRef<Decl *> Group);
/// Should be called on all declarations that might have attached
/// documentation comments.
void ActOnDocumentableDecl(Decl *D);
void ActOnDocumentableDecls(ArrayRef<Decl *> Group);
void ActOnFinishKNRParamDeclarations(Scope *S, Declarator &D,
SourceLocation LocAfterDecls);
void CheckForFunctionRedefinition(
FunctionDecl *FD, const FunctionDecl *EffectiveDefinition = nullptr,
SkipBodyInfo *SkipBody = nullptr);
Decl *ActOnStartOfFunctionDef(Scope *S, Declarator &D,
MultiTemplateParamsArg TemplateParamLists,
SkipBodyInfo *SkipBody = nullptr);
Decl *ActOnStartOfFunctionDef(Scope *S, Decl *D,
SkipBodyInfo *SkipBody = nullptr);
void ActOnStartOfObjCMethodDef(Scope *S, Decl *D);
bool isObjCMethodDecl(Decl *D) {
return D && isa<ObjCMethodDecl>(D);
}
/// \brief Determine whether we can delay parsing the body of a function or
/// function template until it is used, assuming we don't care about emitting
/// code for that function.
///
/// This will be \c false if we may need the body of the function in the
/// middle of parsing an expression (where it's impractical to switch to
/// parsing a different function), for instance, if it's constexpr in C++11
/// or has an 'auto' return type in C++14. These cases are essentially bugs.
bool canDelayFunctionBody(const Declarator &D);
/// \brief Determine whether we can skip parsing the body of a function
/// definition, assuming we don't care about analyzing its body or emitting
/// code for that function.
///
/// This will be \c false only if we may need the body of the function in
/// order to parse the rest of the program (for instance, if it is
/// \c constexpr in C++11 or has an 'auto' return type in C++14).
bool canSkipFunctionBody(Decl *D);
void computeNRVO(Stmt *Body, sema::FunctionScopeInfo *Scope);
Decl *ActOnFinishFunctionBody(Decl *Decl, Stmt *Body);
Decl *ActOnFinishFunctionBody(Decl *Decl, Stmt *Body, bool IsInstantiation);
Decl *ActOnSkippedFunctionBody(Decl *Decl);
void ActOnFinishInlineFunctionDef(FunctionDecl *D);
/// ActOnFinishDelayedAttribute - Invoked when we have finished parsing an
/// attribute for which parsing is delayed.
void ActOnFinishDelayedAttribute(Scope *S, Decl *D, ParsedAttributes &Attrs);
/// \brief Diagnose any unused parameters in the given sequence of
/// ParmVarDecl pointers.
void DiagnoseUnusedParameters(ArrayRef<ParmVarDecl *> Parameters);
/// \brief Diagnose whether the size of parameters or return value of a
/// function or obj-c method definition is pass-by-value and larger than a
/// specified threshold.
void
DiagnoseSizeOfParametersAndReturnValue(ArrayRef<ParmVarDecl *> Parameters,
QualType ReturnTy, NamedDecl *D);
void DiagnoseInvalidJumps(Stmt *Body);
Decl *ActOnFileScopeAsmDecl(Expr *expr,
SourceLocation AsmLoc,
SourceLocation RParenLoc);
/// \brief Handle a C++11 empty-declaration and attribute-declaration.
Decl *ActOnEmptyDeclaration(Scope *S,
AttributeList *AttrList,
SourceLocation SemiLoc);
enum class ModuleDeclKind {
Interface, ///< 'export module X;'
Implementation, ///< 'module X;'
Partition, ///< 'module partition X;'
};
/// The parser has processed a module-declaration that begins the definition
/// of a module interface or implementation.
DeclGroupPtrTy ActOnModuleDecl(SourceLocation StartLoc,
SourceLocation ModuleLoc, ModuleDeclKind MDK,
ModuleIdPath Path);
/// \brief The parser has processed a module import declaration.
///
/// \param AtLoc The location of the '@' symbol, if any.
///
/// \param ImportLoc The location of the 'import' keyword.
///
/// \param Path The module access path.
DeclResult ActOnModuleImport(SourceLocation AtLoc, SourceLocation ImportLoc,
ModuleIdPath Path);
/// \brief The parser has processed a module import translated from a
/// #include or similar preprocessing directive.
void ActOnModuleInclude(SourceLocation DirectiveLoc, Module *Mod);
void BuildModuleInclude(SourceLocation DirectiveLoc, Module *Mod);
/// \brief The parsed has entered a submodule.
void ActOnModuleBegin(SourceLocation DirectiveLoc, Module *Mod);
/// \brief The parser has left a submodule.
void ActOnModuleEnd(SourceLocation DirectiveLoc, Module *Mod);
/// \brief Create an implicit import of the given module at the given
/// source location, for error recovery, if possible.
///
/// This routine is typically used when an entity found by name lookup
/// is actually hidden within a module that we know about but the user
/// has forgotten to import.
void createImplicitModuleImportForErrorRecovery(SourceLocation Loc,
Module *Mod);
/// Kinds of missing import. Note, the values of these enumerators correspond
/// to %select values in diagnostics.
enum class MissingImportKind {
Declaration,
Definition,
DefaultArgument,
ExplicitSpecialization,
PartialSpecialization
};
/// \brief Diagnose that the specified declaration needs to be visible but
/// isn't, and suggest a module import that would resolve the problem.
void diagnoseMissingImport(SourceLocation Loc, NamedDecl *Decl,
MissingImportKind MIK, bool Recover = true);
void diagnoseMissingImport(SourceLocation Loc, NamedDecl *Decl,
SourceLocation DeclLoc, ArrayRef<Module *> Modules,
MissingImportKind MIK, bool Recover);
Decl *ActOnStartExportDecl(Scope *S, SourceLocation ExportLoc,
SourceLocation LBraceLoc);
Decl *ActOnFinishExportDecl(Scope *S, Decl *ExportDecl,
SourceLocation RBraceLoc);
/// \brief We've found a use of a templated declaration that would trigger an
/// implicit instantiation. Check that any relevant explicit specializations
/// and partial specializations are visible, and diagnose if not.
void checkSpecializationVisibility(SourceLocation Loc, NamedDecl *Spec);
/// \brief We've found a use of a template specialization that would select a
/// partial specialization. Check that the partial specialization is visible,
/// and diagnose if not.
void checkPartialSpecializationVisibility(SourceLocation Loc,
NamedDecl *Spec);
/// \brief Retrieve a suitable printing policy.
PrintingPolicy getPrintingPolicy() const {
return getPrintingPolicy(Context, PP);
}
/// \brief Retrieve a suitable printing policy.
static PrintingPolicy getPrintingPolicy(const ASTContext &Ctx,
const Preprocessor &PP);
/// Scope actions.
void ActOnPopScope(SourceLocation Loc, Scope *S);
void ActOnTranslationUnitScope(Scope *S);
Decl *ParsedFreeStandingDeclSpec(Scope *S, AccessSpecifier AS, DeclSpec &DS,
RecordDecl *&AnonRecord);
Decl *ParsedFreeStandingDeclSpec(Scope *S, AccessSpecifier AS, DeclSpec &DS,
MultiTemplateParamsArg TemplateParams,
bool IsExplicitInstantiation,
RecordDecl *&AnonRecord);
Decl *BuildAnonymousStructOrUnion(Scope *S, DeclSpec &DS,
AccessSpecifier AS,
RecordDecl *Record,
const PrintingPolicy &Policy);
Decl *BuildMicrosoftCAnonymousStruct(Scope *S, DeclSpec &DS,
RecordDecl *Record);
/// Common ways to introduce type names without a tag for use in diagnostics.
/// Keep in sync with err_tag_reference_non_tag.
enum NonTagKind {
NTK_NonStruct,
NTK_NonClass,
NTK_NonUnion,
NTK_NonEnum,
NTK_Typedef,
NTK_TypeAlias,
NTK_Template,
NTK_TypeAliasTemplate,
NTK_TemplateTemplateArgument,
};
/// Given a non-tag type declaration, returns an enum useful for indicating
/// what kind of non-tag type this is.
NonTagKind getNonTagTypeDeclKind(const Decl *D, TagTypeKind TTK);
bool isAcceptableTagRedeclaration(const TagDecl *Previous,
TagTypeKind NewTag, bool isDefinition,
SourceLocation NewTagLoc,
const IdentifierInfo *Name);
enum TagUseKind {
TUK_Reference, // Reference to a tag: 'struct foo *X;'
TUK_Declaration, // Fwd decl of a tag: 'struct foo;'
TUK_Definition, // Definition of a tag: 'struct foo { int X; } Y;'
TUK_Friend // Friend declaration: 'friend struct foo;'
};
Decl *ActOnTag(Scope *S, unsigned TagSpec, TagUseKind TUK,
SourceLocation KWLoc, CXXScopeSpec &SS, IdentifierInfo *Name,
SourceLocation NameLoc, AttributeList *Attr,
AccessSpecifier AS, SourceLocation ModulePrivateLoc,
MultiTemplateParamsArg TemplateParameterLists, bool &OwnedDecl,
bool &IsDependent, SourceLocation ScopedEnumKWLoc,
bool ScopedEnumUsesClassTag, TypeResult UnderlyingType,
bool IsTypeSpecifier, bool IsTemplateParamOrArg,
SkipBodyInfo *SkipBody = nullptr);
Decl *ActOnTemplatedFriendTag(Scope *S, SourceLocation FriendLoc,
unsigned TagSpec, SourceLocation TagLoc,
CXXScopeSpec &SS,
IdentifierInfo *Name, SourceLocation NameLoc,
AttributeList *Attr,
MultiTemplateParamsArg TempParamLists);
TypeResult ActOnDependentTag(Scope *S,
unsigned TagSpec,
TagUseKind TUK,
const CXXScopeSpec &SS,
IdentifierInfo *Name,
SourceLocation TagLoc,
SourceLocation NameLoc);
void ActOnDefs(Scope *S, Decl *TagD, SourceLocation DeclStart,
IdentifierInfo *ClassName,
SmallVectorImpl<Decl *> &Decls);
Decl *ActOnField(Scope *S, Decl *TagD, SourceLocation DeclStart,
Declarator &D, Expr *BitfieldWidth);
FieldDecl *HandleField(Scope *S, RecordDecl *TagD, SourceLocation DeclStart,
Declarator &D, Expr *BitfieldWidth,
InClassInitStyle InitStyle,
AccessSpecifier AS);
MSPropertyDecl *HandleMSProperty(Scope *S, RecordDecl *TagD,
SourceLocation DeclStart,
Declarator &D, Expr *BitfieldWidth,
InClassInitStyle InitStyle,
AccessSpecifier AS,
AttributeList *MSPropertyAttr);
FieldDecl *CheckFieldDecl(DeclarationName Name, QualType T,
TypeSourceInfo *TInfo,
RecordDecl *Record, SourceLocation Loc,
bool Mutable, Expr *BitfieldWidth,
InClassInitStyle InitStyle,
SourceLocation TSSL,
AccessSpecifier AS, NamedDecl *PrevDecl,
Declarator *D = nullptr);
bool CheckNontrivialField(FieldDecl *FD);
void DiagnoseNontrivial(const CXXRecordDecl *Record, CXXSpecialMember CSM);
bool SpecialMemberIsTrivial(CXXMethodDecl *MD, CXXSpecialMember CSM,
bool Diagnose = false);
CXXSpecialMember getSpecialMember(const CXXMethodDecl *MD);
void ActOnLastBitfield(SourceLocation DeclStart,
SmallVectorImpl<Decl *> &AllIvarDecls);
Decl *ActOnIvar(Scope *S, SourceLocation DeclStart,
Declarator &D, Expr *BitfieldWidth,
tok::ObjCKeywordKind visibility);
// This is used for both record definitions and ObjC interface declarations.
void ActOnFields(Scope* S, SourceLocation RecLoc, Decl *TagDecl,
ArrayRef<Decl *> Fields,
SourceLocation LBrac, SourceLocation RBrac,
AttributeList *AttrList);
/// ActOnTagStartDefinition - Invoked when we have entered the
/// scope of a tag's definition (e.g., for an enumeration, class,
/// struct, or union).
void ActOnTagStartDefinition(Scope *S, Decl *TagDecl);
/// Perform ODR-like check for C/ObjC when merging tag types from modules.
/// Differently from C++, actually parse the body and reject / error out
/// in case of a structural mismatch.
bool ActOnDuplicateDefinition(DeclSpec &DS, Decl *Prev,
SkipBodyInfo &SkipBody);
typedef void *SkippedDefinitionContext;
/// \brief Invoked when we enter a tag definition that we're skipping.
SkippedDefinitionContext ActOnTagStartSkippedDefinition(Scope *S, Decl *TD);
Decl *ActOnObjCContainerStartDefinition(Decl *IDecl);
/// ActOnStartCXXMemberDeclarations - Invoked when we have parsed a
/// C++ record definition's base-specifiers clause and are starting its
/// member declarations.
void ActOnStartCXXMemberDeclarations(Scope *S, Decl *TagDecl,
SourceLocation FinalLoc,
bool IsFinalSpelledSealed,
SourceLocation LBraceLoc);
/// ActOnTagFinishDefinition - Invoked once we have finished parsing
/// the definition of a tag (enumeration, class, struct, or union).
void ActOnTagFinishDefinition(Scope *S, Decl *TagDecl,
SourceRange BraceRange);
void ActOnTagFinishSkippedDefinition(SkippedDefinitionContext Context);
void ActOnObjCContainerFinishDefinition();
/// \brief Invoked when we must temporarily exit the objective-c container
/// scope for parsing/looking-up C constructs.
///
/// Must be followed by a call to \see ActOnObjCReenterContainerContext
void ActOnObjCTemporaryExitContainerContext(DeclContext *DC);
void ActOnObjCReenterContainerContext(DeclContext *DC);
/// ActOnTagDefinitionError - Invoked when there was an unrecoverable
/// error parsing the definition of a tag.
void ActOnTagDefinitionError(Scope *S, Decl *TagDecl);
EnumConstantDecl *CheckEnumConstant(EnumDecl *Enum,
EnumConstantDecl *LastEnumConst,
SourceLocation IdLoc,
IdentifierInfo *Id,
Expr *val);
bool CheckEnumUnderlyingType(TypeSourceInfo *TI);
bool CheckEnumRedeclaration(SourceLocation EnumLoc, bool IsScoped,
QualType EnumUnderlyingTy,
bool EnumUnderlyingIsImplicit,
const EnumDecl *Prev);
/// Determine whether the body of an anonymous enumeration should be skipped.
/// \param II The name of the first enumerator.
SkipBodyInfo shouldSkipAnonEnumBody(Scope *S, IdentifierInfo *II,
SourceLocation IILoc);
Decl *ActOnEnumConstant(Scope *S, Decl *EnumDecl, Decl *LastEnumConstant,
SourceLocation IdLoc, IdentifierInfo *Id,
AttributeList *Attrs, SourceLocation EqualLoc,
Expr *Val);
void ActOnEnumBody(SourceLocation EnumLoc, SourceRange BraceRange,
Decl *EnumDecl,
ArrayRef<Decl *> Elements,
Scope *S, AttributeList *Attr);
DeclContext *getContainingDC(DeclContext *DC);
/// Set the current declaration context until it gets popped.
void PushDeclContext(Scope *S, DeclContext *DC);
void PopDeclContext();
/// EnterDeclaratorContext - Used when we must lookup names in the context
/// of a declarator's nested name specifier.
void EnterDeclaratorContext(Scope *S, DeclContext *DC);
void ExitDeclaratorContext(Scope *S);
/// Push the parameters of D, which must be a function, into scope.
void ActOnReenterFunctionContext(Scope* S, Decl* D);
void ActOnExitFunctionContext();
DeclContext *getFunctionLevelDeclContext();
/// getCurFunctionDecl - If inside of a function body, this returns a pointer
/// to the function decl for the function being parsed. If we're currently
/// in a 'block', this returns the containing context.
FunctionDecl *getCurFunctionDecl();
/// getCurMethodDecl - If inside of a method body, this returns a pointer to
/// the method decl for the method being parsed. If we're currently
/// in a 'block', this returns the containing context.
ObjCMethodDecl *getCurMethodDecl();
/// getCurFunctionOrMethodDecl - Return the Decl for the current ObjC method
/// or C function we're in, otherwise return null. If we're currently
/// in a 'block', this returns the containing context.
NamedDecl *getCurFunctionOrMethodDecl();
/// Add this decl to the scope shadowed decl chains.
void PushOnScopeChains(NamedDecl *D, Scope *S, bool AddToContext = true);
/// \brief Make the given externally-produced declaration visible at the
/// top level scope.
///
/// \param D The externally-produced declaration to push.
///
/// \param Name The name of the externally-produced declaration.
void pushExternalDeclIntoScope(NamedDecl *D, DeclarationName Name);
/// isDeclInScope - If 'Ctx' is a function/method, isDeclInScope returns true
/// if 'D' is in Scope 'S', otherwise 'S' is ignored and isDeclInScope returns
/// true if 'D' belongs to the given declaration context.
///
/// \param AllowInlineNamespace If \c true, allow the declaration to be in the
/// enclosing namespace set of the context, rather than contained
/// directly within it.
bool isDeclInScope(NamedDecl *D, DeclContext *Ctx, Scope *S = nullptr,
bool AllowInlineNamespace = false);
/// Finds the scope corresponding to the given decl context, if it
/// happens to be an enclosing scope. Otherwise return NULL.
static Scope *getScopeForDeclContext(Scope *S, DeclContext *DC);
/// Subroutines of ActOnDeclarator().
TypedefDecl *ParseTypedefDecl(Scope *S, Declarator &D, QualType T,
TypeSourceInfo *TInfo);
bool isIncompatibleTypedef(TypeDecl *Old, TypedefNameDecl *New);
/// \brief Describes the kind of merge to perform for availability
/// attributes (including "deprecated", "unavailable", and "availability").
enum AvailabilityMergeKind {
/// \brief Don't merge availability attributes at all.
AMK_None,
/// \brief Merge availability attributes for a redeclaration, which requires
/// an exact match.
AMK_Redeclaration,
/// \brief Merge availability attributes for an override, which requires
/// an exact match or a weakening of constraints.
AMK_Override,
/// \brief Merge availability attributes for an implementation of
/// a protocol requirement.
AMK_ProtocolImplementation,
};
/// Attribute merging methods. Return true if a new attribute was added.
AvailabilityAttr *mergeAvailabilityAttr(NamedDecl *D, SourceRange Range,
IdentifierInfo *Platform,
bool Implicit,
VersionTuple Introduced,
VersionTuple Deprecated,
VersionTuple Obsoleted,
bool IsUnavailable,
StringRef Message,
bool IsStrict, StringRef Replacement,
AvailabilityMergeKind AMK,
unsigned AttrSpellingListIndex);
TypeVisibilityAttr *mergeTypeVisibilityAttr(Decl *D, SourceRange Range,
TypeVisibilityAttr::VisibilityType Vis,
unsigned AttrSpellingListIndex);
VisibilityAttr *mergeVisibilityAttr(Decl *D, SourceRange Range,
VisibilityAttr::VisibilityType Vis,
unsigned AttrSpellingListIndex);
UuidAttr *mergeUuidAttr(Decl *D, SourceRange Range,
unsigned AttrSpellingListIndex, StringRef Uuid);
DLLImportAttr *mergeDLLImportAttr(Decl *D, SourceRange Range,
unsigned AttrSpellingListIndex);
DLLExportAttr *mergeDLLExportAttr(Decl *D, SourceRange Range,
unsigned AttrSpellingListIndex);
MSInheritanceAttr *
mergeMSInheritanceAttr(Decl *D, SourceRange Range, bool BestCase,
unsigned AttrSpellingListIndex,
MSInheritanceAttr::Spelling SemanticSpelling);
FormatAttr *mergeFormatAttr(Decl *D, SourceRange Range,
IdentifierInfo *Format, int FormatIdx,
int FirstArg, unsigned AttrSpellingListIndex);
SectionAttr *mergeSectionAttr(Decl *D, SourceRange Range, StringRef Name,
unsigned AttrSpellingListIndex);
AlwaysInlineAttr *mergeAlwaysInlineAttr(Decl *D, SourceRange Range,
IdentifierInfo *Ident,
unsigned AttrSpellingListIndex);
MinSizeAttr *mergeMinSizeAttr(Decl *D, SourceRange Range,
unsigned AttrSpellingListIndex);
OptimizeNoneAttr *mergeOptimizeNoneAttr(Decl *D, SourceRange Range,
unsigned AttrSpellingListIndex);
InternalLinkageAttr *mergeInternalLinkageAttr(Decl *D, SourceRange Range,
IdentifierInfo *Ident,
unsigned AttrSpellingListIndex);
CommonAttr *mergeCommonAttr(Decl *D, SourceRange Range, IdentifierInfo *Ident,
unsigned AttrSpellingListIndex);
void mergeDeclAttributes(NamedDecl *New, Decl *Old,
AvailabilityMergeKind AMK = AMK_Redeclaration);
void MergeTypedefNameDecl(Scope *S, TypedefNameDecl *New,
LookupResult &OldDecls);
bool MergeFunctionDecl(FunctionDecl *New, NamedDecl *&Old, Scope *S,
bool MergeTypeWithOld);
bool MergeCompatibleFunctionDecls(FunctionDecl *New, FunctionDecl *Old,
Scope *S, bool MergeTypeWithOld);
void mergeObjCMethodDecls(ObjCMethodDecl *New, ObjCMethodDecl *Old);
void MergeVarDecl(VarDecl *New, LookupResult &Previous);
void MergeVarDeclTypes(VarDecl *New, VarDecl *Old, bool MergeTypeWithOld);
void MergeVarDeclExceptionSpecs(VarDecl *New, VarDecl *Old);
bool checkVarDeclRedefinition(VarDecl *OldDefn, VarDecl *NewDefn);
void notePreviousDefinition(const NamedDecl *Old, SourceLocation New);
bool MergeCXXFunctionDecl(FunctionDecl *New, FunctionDecl *Old, Scope *S);
// AssignmentAction - This is used by all the assignment diagnostic functions
// to represent what is actually causing the operation
enum AssignmentAction {
AA_Assigning,
AA_Passing,
AA_Returning,
AA_Converting,
AA_Initializing,
AA_Sending,
AA_Casting,
AA_Passing_CFAudited
};
/// C++ Overloading.
enum OverloadKind {
/// This is a legitimate overload: the existing declarations are
/// functions or function templates with different signatures.
Ovl_Overload,
/// This is not an overload because the signature exactly matches
/// an existing declaration.
Ovl_Match,
/// This is not an overload because the lookup results contain a
/// non-function.
Ovl_NonFunction
};
OverloadKind CheckOverload(Scope *S,
FunctionDecl *New,
const LookupResult &OldDecls,
NamedDecl *&OldDecl,
bool IsForUsingDecl);
bool IsOverload(FunctionDecl *New, FunctionDecl *Old, bool IsForUsingDecl,
bool ConsiderCudaAttrs = true);
/// \brief Checks availability of the function depending on the current
/// function context.Inside an unavailable function,unavailability is ignored.
///
/// \returns true if \p FD is unavailable and current context is inside
/// an available function, false otherwise.
bool isFunctionConsideredUnavailable(FunctionDecl *FD);
ImplicitConversionSequence
TryImplicitConversion(Expr *From, QualType ToType,
bool SuppressUserConversions,
bool AllowExplicit,
bool InOverloadResolution,
bool CStyle,
bool AllowObjCWritebackConversion);
bool IsIntegralPromotion(Expr *From, QualType FromType, QualType ToType);
bool IsFloatingPointPromotion(QualType FromType, QualType ToType);
bool IsComplexPromotion(QualType FromType, QualType ToType);
bool IsPointerConversion(Expr *From, QualType FromType, QualType ToType,
bool InOverloadResolution,
QualType& ConvertedType, bool &IncompatibleObjC);
bool isObjCPointerConversion(QualType FromType, QualType ToType,
QualType& ConvertedType, bool &IncompatibleObjC);
bool isObjCWritebackConversion(QualType FromType, QualType ToType,
QualType &ConvertedType);
bool IsBlockPointerConversion(QualType FromType, QualType ToType,
QualType& ConvertedType);
bool FunctionParamTypesAreEqual(const FunctionProtoType *OldType,
const FunctionProtoType *NewType,
unsigned *ArgPos = nullptr);
void HandleFunctionTypeMismatch(PartialDiagnostic &PDiag,
QualType FromType, QualType ToType);
void maybeExtendBlockObject(ExprResult &E);
CastKind PrepareCastToObjCObjectPointer(ExprResult &E);
bool CheckPointerConversion(Expr *From, QualType ToType,
CastKind &Kind,
CXXCastPath& BasePath,
bool IgnoreBaseAccess,
bool Diagnose = true);
bool IsMemberPointerConversion(Expr *From, QualType FromType, QualType ToType,
bool InOverloadResolution,
QualType &ConvertedType);
bool CheckMemberPointerConversion(Expr *From, QualType ToType,
CastKind &Kind,
CXXCastPath &BasePath,
bool IgnoreBaseAccess);
bool IsQualificationConversion(QualType FromType, QualType ToType,
bool CStyle, bool &ObjCLifetimeConversion);
bool IsFunctionConversion(QualType FromType, QualType ToType,
QualType &ResultTy);
bool DiagnoseMultipleUserDefinedConversion(Expr *From, QualType ToType);
bool isSameOrCompatibleFunctionType(CanQualType Param, CanQualType Arg);
ExprResult PerformMoveOrCopyInitialization(const InitializedEntity &Entity,
const VarDecl *NRVOCandidate,
QualType ResultType,
Expr *Value,
bool AllowNRVO = true);
bool CanPerformCopyInitialization(const InitializedEntity &Entity,
ExprResult Init);
ExprResult PerformCopyInitialization(const InitializedEntity &Entity,
SourceLocation EqualLoc,
ExprResult Init,
bool TopLevelOfInitList = false,
bool AllowExplicit = false);
ExprResult PerformObjectArgumentInitialization(Expr *From,
NestedNameSpecifier *Qualifier,
NamedDecl *FoundDecl,
CXXMethodDecl *Method);
ExprResult PerformContextuallyConvertToBool(Expr *From);
ExprResult PerformContextuallyConvertToObjCPointer(Expr *From);
/// Contexts in which a converted constant expression is required.
enum CCEKind {
CCEK_CaseValue, ///< Expression in a case label.
CCEK_Enumerator, ///< Enumerator value with fixed underlying type.
CCEK_TemplateArg, ///< Value of a non-type template parameter.
CCEK_NewExpr, ///< Constant expression in a noptr-new-declarator.
CCEK_ConstexprIf ///< Condition in a constexpr if statement.
};
ExprResult CheckConvertedConstantExpression(Expr *From, QualType T,
llvm::APSInt &Value, CCEKind CCE);
ExprResult CheckConvertedConstantExpression(Expr *From, QualType T,
APValue &Value, CCEKind CCE);
/// \brief Abstract base class used to perform a contextual implicit
/// conversion from an expression to any type passing a filter.
class ContextualImplicitConverter {
public:
bool Suppress;
bool SuppressConversion;
ContextualImplicitConverter(bool Suppress = false,
bool SuppressConversion = false)
: Suppress(Suppress), SuppressConversion(SuppressConversion) {}
/// \brief Determine whether the specified type is a valid destination type
/// for this conversion.
virtual bool match(QualType T) = 0;
/// \brief Emits a diagnostic complaining that the expression does not have
/// integral or enumeration type.
virtual SemaDiagnosticBuilder
diagnoseNoMatch(Sema &S, SourceLocation Loc, QualType T) = 0;
/// \brief Emits a diagnostic when the expression has incomplete class type.
virtual SemaDiagnosticBuilder
diagnoseIncomplete(Sema &S, SourceLocation Loc, QualType T) = 0;
/// \brief Emits a diagnostic when the only matching conversion function
/// is explicit.
virtual SemaDiagnosticBuilder diagnoseExplicitConv(
Sema &S, SourceLocation Loc, QualType T, QualType ConvTy) = 0;
/// \brief Emits a note for the explicit conversion function.
virtual SemaDiagnosticBuilder
noteExplicitConv(Sema &S, CXXConversionDecl *Conv, QualType ConvTy) = 0;
/// \brief Emits a diagnostic when there are multiple possible conversion
/// functions.
virtual SemaDiagnosticBuilder
diagnoseAmbiguous(Sema &S, SourceLocation Loc, QualType T) = 0;
/// \brief Emits a note for one of the candidate conversions.
virtual SemaDiagnosticBuilder
noteAmbiguous(Sema &S, CXXConversionDecl *Conv, QualType ConvTy) = 0;
/// \brief Emits a diagnostic when we picked a conversion function
/// (for cases when we are not allowed to pick a conversion function).
virtual SemaDiagnosticBuilder diagnoseConversion(
Sema &S, SourceLocation Loc, QualType T, QualType ConvTy) = 0;
virtual ~ContextualImplicitConverter() {}
};
class ICEConvertDiagnoser : public ContextualImplicitConverter {
bool AllowScopedEnumerations;
public:
ICEConvertDiagnoser(bool AllowScopedEnumerations,
bool Suppress, bool SuppressConversion)
: ContextualImplicitConverter(Suppress, SuppressConversion),
AllowScopedEnumerations(AllowScopedEnumerations) {}
/// Match an integral or (possibly scoped) enumeration type.
bool match(QualType T) override;
SemaDiagnosticBuilder
diagnoseNoMatch(Sema &S, SourceLocation Loc, QualType T) override {
return diagnoseNotInt(S, Loc, T);
}
/// \brief Emits a diagnostic complaining that the expression does not have
/// integral or enumeration type.
virtual SemaDiagnosticBuilder
diagnoseNotInt(Sema &S, SourceLocation Loc, QualType T) = 0;
};
/// Perform a contextual implicit conversion.
ExprResult PerformContextualImplicitConversion(
SourceLocation Loc, Expr *FromE, ContextualImplicitConverter &Converter);
enum ObjCSubscriptKind {
OS_Array,
OS_Dictionary,
OS_Error
};
ObjCSubscriptKind CheckSubscriptingKind(Expr *FromE);
// Note that LK_String is intentionally after the other literals, as
// this is used for diagnostics logic.
enum ObjCLiteralKind {
LK_Array,
LK_Dictionary,
LK_Numeric,
LK_Boxed,
LK_String,
LK_Block,
LK_None
};
ObjCLiteralKind CheckLiteralKind(Expr *FromE);
ExprResult PerformObjectMemberConversion(Expr *From,
NestedNameSpecifier *Qualifier,
NamedDecl *FoundDecl,
NamedDecl *Member);
// Members have to be NamespaceDecl* or TranslationUnitDecl*.
// TODO: make this is a typesafe union.
typedef llvm::SmallSetVector<DeclContext *, 16> AssociatedNamespaceSet;
typedef llvm::SmallSetVector<CXXRecordDecl *, 16> AssociatedClassSet;
void AddOverloadCandidate(FunctionDecl *Function,
DeclAccessPair FoundDecl,
ArrayRef<Expr *> Args,
OverloadCandidateSet &CandidateSet,
bool SuppressUserConversions = false,
bool PartialOverloading = false,
bool AllowExplicit = false,
ConversionSequenceList EarlyConversions = None);
void AddFunctionCandidates(const UnresolvedSetImpl &Functions,
ArrayRef<Expr *> Args,
OverloadCandidateSet &CandidateSet,
TemplateArgumentListInfo *ExplicitTemplateArgs = nullptr,
bool SuppressUserConversions = false,
bool PartialOverloading = false,
bool FirstArgumentIsBase = false);
void AddMethodCandidate(DeclAccessPair FoundDecl,
QualType ObjectType,
Expr::Classification ObjectClassification,
ArrayRef<Expr *> Args,
OverloadCandidateSet& CandidateSet,
bool SuppressUserConversion = false);
void AddMethodCandidate(CXXMethodDecl *Method,
DeclAccessPair FoundDecl,
CXXRecordDecl *ActingContext, QualType ObjectType,
Expr::Classification ObjectClassification,
ArrayRef<Expr *> Args,
OverloadCandidateSet& CandidateSet,
bool SuppressUserConversions = false,
bool PartialOverloading = false,
ConversionSequenceList EarlyConversions = None);
void AddMethodTemplateCandidate(FunctionTemplateDecl *MethodTmpl,
DeclAccessPair FoundDecl,
CXXRecordDecl *ActingContext,
TemplateArgumentListInfo *ExplicitTemplateArgs,
QualType ObjectType,
Expr::Classification ObjectClassification,
ArrayRef<Expr *> Args,
OverloadCandidateSet& CandidateSet,
bool SuppressUserConversions = false,
bool PartialOverloading = false);
void AddTemplateOverloadCandidate(FunctionTemplateDecl *FunctionTemplate,
DeclAccessPair FoundDecl,
TemplateArgumentListInfo *ExplicitTemplateArgs,
ArrayRef<Expr *> Args,
OverloadCandidateSet& CandidateSet,
bool SuppressUserConversions = false,
bool PartialOverloading = false);
bool CheckNonDependentConversions(FunctionTemplateDecl *FunctionTemplate,
ArrayRef<QualType> ParamTypes,
ArrayRef<Expr *> Args,
OverloadCandidateSet &CandidateSet,
ConversionSequenceList &Conversions,
bool SuppressUserConversions,
CXXRecordDecl *ActingContext = nullptr,
QualType ObjectType = QualType(),
Expr::Classification
ObjectClassification = {});
void AddConversionCandidate(CXXConversionDecl *Conversion,
DeclAccessPair FoundDecl,
CXXRecordDecl *ActingContext,
Expr *From, QualType ToType,
OverloadCandidateSet& CandidateSet,
bool AllowObjCConversionOnExplicit,
bool AllowResultConversion = true);
void AddTemplateConversionCandidate(FunctionTemplateDecl *FunctionTemplate,
DeclAccessPair FoundDecl,
CXXRecordDecl *ActingContext,
Expr *From, QualType ToType,
OverloadCandidateSet &CandidateSet,
bool AllowObjCConversionOnExplicit,
bool AllowResultConversion = true);
void AddSurrogateCandidate(CXXConversionDecl *Conversion,
DeclAccessPair FoundDecl,
CXXRecordDecl *ActingContext,
const FunctionProtoType *Proto,
Expr *Object, ArrayRef<Expr *> Args,
OverloadCandidateSet& CandidateSet);
void AddMemberOperatorCandidates(OverloadedOperatorKind Op,
SourceLocation OpLoc, ArrayRef<Expr *> Args,
OverloadCandidateSet& CandidateSet,
SourceRange OpRange = SourceRange());
void AddBuiltinCandidate(QualType *ParamTys, ArrayRef<Expr *> Args,
OverloadCandidateSet& CandidateSet,
bool IsAssignmentOperator = false,
unsigned NumContextualBoolArguments = 0);
void AddBuiltinOperatorCandidates(OverloadedOperatorKind Op,
SourceLocation OpLoc, ArrayRef<Expr *> Args,
OverloadCandidateSet& CandidateSet);
void AddArgumentDependentLookupCandidates(DeclarationName Name,
SourceLocation Loc,
ArrayRef<Expr *> Args,
TemplateArgumentListInfo *ExplicitTemplateArgs,
OverloadCandidateSet& CandidateSet,
bool PartialOverloading = false);
// Emit as a 'note' the specific overload candidate
void NoteOverloadCandidate(NamedDecl *Found, FunctionDecl *Fn,
QualType DestType = QualType(),
bool TakingAddress = false);
// Emit as a series of 'note's all template and non-templates identified by
// the expression Expr
void NoteAllOverloadCandidates(Expr *E, QualType DestType = QualType(),
bool TakingAddress = false);
/// Check the enable_if expressions on the given function. Returns the first
/// failing attribute, or NULL if they were all successful.
EnableIfAttr *CheckEnableIf(FunctionDecl *Function, ArrayRef<Expr *> Args,
bool MissingImplicitThis = false);
/// Find the failed Boolean condition within a given Boolean
/// constant expression, and describe it with a string.
///
/// \param AllowTopLevelCond Whether to allow the result to be the
/// complete top-level condition.
std::pair<Expr *, std::string>
findFailedBooleanCondition(Expr *Cond, bool AllowTopLevelCond);
/// Emit diagnostics for the diagnose_if attributes on Function, ignoring any
/// non-ArgDependent DiagnoseIfAttrs.
///
/// Argument-dependent diagnose_if attributes should be checked each time a
/// function is used as a direct callee of a function call.
///
/// Returns true if any errors were emitted.
bool diagnoseArgDependentDiagnoseIfAttrs(const FunctionDecl *Function,
const Expr *ThisArg,
ArrayRef<const Expr *> Args,
SourceLocation Loc);
/// Emit diagnostics for the diagnose_if attributes on Function, ignoring any
/// ArgDependent DiagnoseIfAttrs.
///
/// Argument-independent diagnose_if attributes should be checked on every use
/// of a function.
///
/// Returns true if any errors were emitted.
bool diagnoseArgIndependentDiagnoseIfAttrs(const NamedDecl *ND,
SourceLocation Loc);
/// Returns whether the given function's address can be taken or not,
/// optionally emitting a diagnostic if the address can't be taken.
///
/// Returns false if taking the address of the function is illegal.
bool checkAddressOfFunctionIsAvailable(const FunctionDecl *Function,
bool Complain = false,
SourceLocation Loc = SourceLocation());
// [PossiblyAFunctionType] --> [Return]
// NonFunctionType --> NonFunctionType
// R (A) --> R(A)
// R (*)(A) --> R (A)
// R (&)(A) --> R (A)
// R (S::*)(A) --> R (A)
QualType ExtractUnqualifiedFunctionType(QualType PossiblyAFunctionType);
FunctionDecl *
ResolveAddressOfOverloadedFunction(Expr *AddressOfExpr,
QualType TargetType,
bool Complain,
DeclAccessPair &Found,
bool *pHadMultipleCandidates = nullptr);
FunctionDecl *
resolveAddressOfOnlyViableOverloadCandidate(Expr *E,
DeclAccessPair &FoundResult);
bool resolveAndFixAddressOfOnlyViableOverloadCandidate(
ExprResult &SrcExpr, bool DoFunctionPointerConversion = false);
FunctionDecl *
ResolveSingleFunctionTemplateSpecialization(OverloadExpr *ovl,
bool Complain = false,
DeclAccessPair *Found = nullptr);
bool ResolveAndFixSingleFunctionTemplateSpecialization(
ExprResult &SrcExpr,
bool DoFunctionPointerConverion = false,
bool Complain = false,
SourceRange OpRangeForComplaining = SourceRange(),
QualType DestTypeForComplaining = QualType(),
unsigned DiagIDForComplaining = 0);
Expr *FixOverloadedFunctionReference(Expr *E,
DeclAccessPair FoundDecl,
FunctionDecl *Fn);
ExprResult FixOverloadedFunctionReference(ExprResult,
DeclAccessPair FoundDecl,
FunctionDecl *Fn);
void AddOverloadedCallCandidates(UnresolvedLookupExpr *ULE,
ArrayRef<Expr *> Args,
OverloadCandidateSet &CandidateSet,
bool PartialOverloading = false);
// An enum used to represent the different possible results of building a
// range-based for loop.
enum ForRangeStatus {
FRS_Success,
FRS_NoViableFunction,
FRS_DiagnosticIssued
};
ForRangeStatus BuildForRangeBeginEndCall(SourceLocation Loc,
SourceLocation RangeLoc,
const DeclarationNameInfo &NameInfo,
LookupResult &MemberLookup,
OverloadCandidateSet *CandidateSet,
Expr *Range, ExprResult *CallExpr);
ExprResult BuildOverloadedCallExpr(Scope *S, Expr *Fn,
UnresolvedLookupExpr *ULE,
SourceLocation LParenLoc,
MultiExprArg Args,
SourceLocation RParenLoc,
Expr *ExecConfig,
bool AllowTypoCorrection=true,
bool CalleesAddressIsTaken=false);
bool buildOverloadedCallSet(Scope *S, Expr *Fn, UnresolvedLookupExpr *ULE,
MultiExprArg Args, SourceLocation RParenLoc,
OverloadCandidateSet *CandidateSet,
ExprResult *Result);
ExprResult CreateOverloadedUnaryOp(SourceLocation OpLoc,
UnaryOperatorKind Opc,
const UnresolvedSetImpl &Fns,
Expr *input, bool RequiresADL = true);
ExprResult CreateOverloadedBinOp(SourceLocation OpLoc,
BinaryOperatorKind Opc,
const UnresolvedSetImpl &Fns,
Expr *LHS, Expr *RHS,
bool RequiresADL = true);
ExprResult CreateOverloadedArraySubscriptExpr(SourceLocation LLoc,
SourceLocation RLoc,
Expr *Base,Expr *Idx);
ExprResult
BuildCallToMemberFunction(Scope *S, Expr *MemExpr,
SourceLocation LParenLoc,
MultiExprArg Args,
SourceLocation RParenLoc);
ExprResult
BuildCallToObjectOfClassType(Scope *S, Expr *Object, SourceLocation LParenLoc,
MultiExprArg Args,
SourceLocation RParenLoc);
ExprResult BuildOverloadedArrowExpr(Scope *S, Expr *Base,
SourceLocation OpLoc,
bool *NoArrowOperatorFound = nullptr);
/// CheckCallReturnType - Checks that a call expression's return type is
/// complete. Returns true on failure. The location passed in is the location
/// that best represents the call.
bool CheckCallReturnType(QualType ReturnType, SourceLocation Loc,
CallExpr *CE, FunctionDecl *FD);
/// Helpers for dealing with blocks and functions.
bool CheckParmsForFunctionDef(ArrayRef<ParmVarDecl *> Parameters,
bool CheckParameterNames);
void CheckCXXDefaultArguments(FunctionDecl *FD);
void CheckExtraCXXDefaultArguments(Declarator &D);
Scope *getNonFieldDeclScope(Scope *S);
/// \name Name lookup
///
/// These routines provide name lookup that is used during semantic
/// analysis to resolve the various kinds of names (identifiers,
/// overloaded operator names, constructor names, etc.) into zero or
/// more declarations within a particular scope. The major entry
/// points are LookupName, which performs unqualified name lookup,
/// and LookupQualifiedName, which performs qualified name lookup.
///
/// All name lookup is performed based on some specific criteria,
/// which specify what names will be visible to name lookup and how
/// far name lookup should work. These criteria are important both
/// for capturing language semantics (certain lookups will ignore
/// certain names, for example) and for performance, since name
/// lookup is often a bottleneck in the compilation of C++. Name
/// lookup criteria is specified via the LookupCriteria enumeration.
///
/// The results of name lookup can vary based on the kind of name
/// lookup performed, the current language, and the translation
/// unit. In C, for example, name lookup will either return nothing
/// (no entity found) or a single declaration. In C++, name lookup
/// can additionally refer to a set of overloaded functions or
/// result in an ambiguity. All of the possible results of name
/// lookup are captured by the LookupResult class, which provides
/// the ability to distinguish among them.
//@{
/// @brief Describes the kind of name lookup to perform.
enum LookupNameKind {
/// Ordinary name lookup, which finds ordinary names (functions,
/// variables, typedefs, etc.) in C and most kinds of names
/// (functions, variables, members, types, etc.) in C++.
LookupOrdinaryName = 0,
/// Tag name lookup, which finds the names of enums, classes,
/// structs, and unions.
LookupTagName,
/// Label name lookup.
LookupLabel,
/// Member name lookup, which finds the names of
/// class/struct/union members.
LookupMemberName,
/// Look up of an operator name (e.g., operator+) for use with
/// operator overloading. This lookup is similar to ordinary name
/// lookup, but will ignore any declarations that are class members.
LookupOperatorName,
/// Look up of a name that precedes the '::' scope resolution
/// operator in C++. This lookup completely ignores operator, object,
/// function, and enumerator names (C++ [basic.lookup.qual]p1).
LookupNestedNameSpecifierName,
/// Look up a namespace name within a C++ using directive or
/// namespace alias definition, ignoring non-namespace names (C++
/// [basic.lookup.udir]p1).
LookupNamespaceName,
/// Look up all declarations in a scope with the given name,
/// including resolved using declarations. This is appropriate
/// for checking redeclarations for a using declaration.
LookupUsingDeclName,
/// Look up an ordinary name that is going to be redeclared as a
/// name with linkage. This lookup ignores any declarations that
/// are outside of the current scope unless they have linkage. See
/// C99 6.2.2p4-5 and C++ [basic.link]p6.
LookupRedeclarationWithLinkage,
/// Look up a friend of a local class. This lookup does not look
/// outside the innermost non-class scope. See C++11 [class.friend]p11.
LookupLocalFriendName,
/// Look up the name of an Objective-C protocol.
LookupObjCProtocolName,
/// Look up implicit 'self' parameter of an objective-c method.
LookupObjCImplicitSelfParam,
/// \brief Look up the name of an OpenMP user-defined reduction operation.
LookupOMPReductionName,
/// \brief Look up any declaration with any name.
LookupAnyName
};
/// \brief Specifies whether (or how) name lookup is being performed for a
/// redeclaration (vs. a reference).
enum RedeclarationKind {
/// \brief The lookup is a reference to this name that is not for the
/// purpose of redeclaring the name.
NotForRedeclaration = 0,
/// \brief The lookup results will be used for redeclaration of a name,
/// if an entity by that name already exists and is visible.
ForVisibleRedeclaration,
/// \brief The lookup results will be used for redeclaration of a name
/// with external linkage; non-visible lookup results with external linkage
/// may also be found.
ForExternalRedeclaration
};
RedeclarationKind forRedeclarationInCurContext() {
// A declaration with an owning module for linkage can never link against
// anything that is not visible. We don't need to check linkage here; if
// the context has internal linkage, redeclaration lookup won't find things
// from other TUs, and we can't safely compute linkage yet in general.
if (cast<Decl>(CurContext)
->getOwningModuleForLinkage(/*IgnoreLinkage*/true))
return ForVisibleRedeclaration;
return ForExternalRedeclaration;
}
/// \brief The possible outcomes of name lookup for a literal operator.
enum LiteralOperatorLookupResult {
/// \brief The lookup resulted in an error.
LOLR_Error,
/// \brief The lookup found no match but no diagnostic was issued.
LOLR_ErrorNoDiagnostic,
/// \brief The lookup found a single 'cooked' literal operator, which
/// expects a normal literal to be built and passed to it.
LOLR_Cooked,
/// \brief The lookup found a single 'raw' literal operator, which expects
/// a string literal containing the spelling of the literal token.
LOLR_Raw,
/// \brief The lookup found an overload set of literal operator templates,
/// which expect the characters of the spelling of the literal token to be
/// passed as a non-type template argument pack.
LOLR_Template,
/// \brief The lookup found an overload set of literal operator templates,
/// which expect the character type and characters of the spelling of the
/// string literal token to be passed as template arguments.
LOLR_StringTemplate
};
SpecialMemberOverloadResult LookupSpecialMember(CXXRecordDecl *D,
CXXSpecialMember SM,
bool ConstArg,
bool VolatileArg,
bool RValueThis,
bool ConstThis,
bool VolatileThis);
typedef std::function<void(const TypoCorrection &)> TypoDiagnosticGenerator;
typedef std::function<ExprResult(Sema &, TypoExpr *, TypoCorrection)>
TypoRecoveryCallback;
private:
bool CppLookupName(LookupResult &R, Scope *S);
struct TypoExprState {
std::unique_ptr<TypoCorrectionConsumer> Consumer;
TypoDiagnosticGenerator DiagHandler;
TypoRecoveryCallback RecoveryHandler;
TypoExprState();
TypoExprState(TypoExprState &&other) noexcept;
TypoExprState &operator=(TypoExprState &&other) noexcept;
};
/// \brief The set of unhandled TypoExprs and their associated state.
llvm::MapVector<TypoExpr *, TypoExprState> DelayedTypos;
/// \brief Creates a new TypoExpr AST node.
TypoExpr *createDelayedTypo(std::unique_ptr<TypoCorrectionConsumer> TCC,
TypoDiagnosticGenerator TDG,
TypoRecoveryCallback TRC);
// \brief The set of known/encountered (unique, canonicalized) NamespaceDecls.
//
// The boolean value will be true to indicate that the namespace was loaded
// from an AST/PCH file, or false otherwise.
llvm::MapVector<NamespaceDecl*, bool> KnownNamespaces;
/// \brief Whether we have already loaded known namespaces from an extenal
/// source.
bool LoadedExternalKnownNamespaces;
/// \brief Helper for CorrectTypo and CorrectTypoDelayed used to create and
/// populate a new TypoCorrectionConsumer. Returns nullptr if typo correction
/// should be skipped entirely.
std::unique_ptr<TypoCorrectionConsumer>
makeTypoCorrectionConsumer(const DeclarationNameInfo &Typo,
Sema::LookupNameKind LookupKind, Scope *S,
CXXScopeSpec *SS,
std::unique_ptr<CorrectionCandidateCallback> CCC,
DeclContext *MemberContext, bool EnteringContext,
const ObjCObjectPointerType *OPT,
bool ErrorRecovery);
public:
const TypoExprState &getTypoExprState(TypoExpr *TE) const;
/// \brief Clears the state of the given TypoExpr.
void clearDelayedTypo(TypoExpr *TE);
/// \brief Look up a name, looking for a single declaration. Return
/// null if the results were absent, ambiguous, or overloaded.
///
/// It is preferable to use the elaborated form and explicitly handle
/// ambiguity and overloaded.
NamedDecl *LookupSingleName(Scope *S, DeclarationName Name,
SourceLocation Loc,
LookupNameKind NameKind,
RedeclarationKind Redecl
= NotForRedeclaration);
bool LookupName(LookupResult &R, Scope *S,
bool AllowBuiltinCreation = false);
bool LookupQualifiedName(LookupResult &R, DeclContext *LookupCtx,
bool InUnqualifiedLookup = false);
bool LookupQualifiedName(LookupResult &R, DeclContext *LookupCtx,
CXXScopeSpec &SS);
bool LookupParsedName(LookupResult &R, Scope *S, CXXScopeSpec *SS,
bool AllowBuiltinCreation = false,
bool EnteringContext = false);
ObjCProtocolDecl *LookupProtocol(IdentifierInfo *II, SourceLocation IdLoc,
RedeclarationKind Redecl
= NotForRedeclaration);
bool LookupInSuper(LookupResult &R, CXXRecordDecl *Class);
void LookupOverloadedOperatorName(OverloadedOperatorKind Op, Scope *S,
QualType T1, QualType T2,
UnresolvedSetImpl &Functions);
LabelDecl *LookupOrCreateLabel(IdentifierInfo *II, SourceLocation IdentLoc,
SourceLocation GnuLabelLoc = SourceLocation());
DeclContextLookupResult LookupConstructors(CXXRecordDecl *Class);
CXXConstructorDecl *LookupDefaultConstructor(CXXRecordDecl *Class);
CXXConstructorDecl *LookupCopyingConstructor(CXXRecordDecl *Class,
unsigned Quals);
CXXMethodDecl *LookupCopyingAssignment(CXXRecordDecl *Class, unsigned Quals,
bool RValueThis, unsigned ThisQuals);
CXXConstructorDecl *LookupMovingConstructor(CXXRecordDecl *Class,
unsigned Quals);
CXXMethodDecl *LookupMovingAssignment(CXXRecordDecl *Class, unsigned Quals,
bool RValueThis, unsigned ThisQuals);
CXXDestructorDecl *LookupDestructor(CXXRecordDecl *Class);
bool checkLiteralOperatorId(const CXXScopeSpec &SS, const UnqualifiedId &Id);
LiteralOperatorLookupResult LookupLiteralOperator(Scope *S, LookupResult &R,
ArrayRef<QualType> ArgTys,
bool AllowRaw,
bool AllowTemplate,
bool AllowStringTemplate,
bool DiagnoseMissing);
bool isKnownName(StringRef name);
void ArgumentDependentLookup(DeclarationName Name, SourceLocation Loc,
ArrayRef<Expr *> Args, ADLResult &Functions);
void LookupVisibleDecls(Scope *S, LookupNameKind Kind,
VisibleDeclConsumer &Consumer,
bool IncludeGlobalScope = true);
void LookupVisibleDecls(DeclContext *Ctx, LookupNameKind Kind,
VisibleDeclConsumer &Consumer,
bool IncludeGlobalScope = true,
bool IncludeDependentBases = false);
enum CorrectTypoKind {
CTK_NonError, // CorrectTypo used in a non error recovery situation.
CTK_ErrorRecovery // CorrectTypo used in normal error recovery.
};
TypoCorrection CorrectTypo(const DeclarationNameInfo &Typo,
Sema::LookupNameKind LookupKind,
Scope *S, CXXScopeSpec *SS,
std::unique_ptr<CorrectionCandidateCallback> CCC,
CorrectTypoKind Mode,
DeclContext *MemberContext = nullptr,
bool EnteringContext = false,
const ObjCObjectPointerType *OPT = nullptr,
bool RecordFailure = true);
TypoExpr *CorrectTypoDelayed(const DeclarationNameInfo &Typo,
Sema::LookupNameKind LookupKind, Scope *S,
CXXScopeSpec *SS,
std::unique_ptr<CorrectionCandidateCallback> CCC,
TypoDiagnosticGenerator TDG,
TypoRecoveryCallback TRC, CorrectTypoKind Mode,
DeclContext *MemberContext = nullptr,
bool EnteringContext = false,
const ObjCObjectPointerType *OPT = nullptr);
/// \brief Process any TypoExprs in the given Expr and its children,
/// generating diagnostics as appropriate and returning a new Expr if there
/// were typos that were all successfully corrected and ExprError if one or
/// more typos could not be corrected.
///
/// \param E The Expr to check for TypoExprs.
///
/// \param InitDecl A VarDecl to avoid because the Expr being corrected is its
/// initializer.
///
/// \param Filter A function applied to a newly rebuilt Expr to determine if
/// it is an acceptable/usable result from a single combination of typo
/// corrections. As long as the filter returns ExprError, different
/// combinations of corrections will be tried until all are exhausted.
ExprResult
CorrectDelayedTyposInExpr(Expr *E, VarDecl *InitDecl = nullptr,
llvm::function_ref<ExprResult(Expr *)> Filter =
[](Expr *E) -> ExprResult { return E; });
ExprResult
CorrectDelayedTyposInExpr(Expr *E,
llvm::function_ref<ExprResult(Expr *)> Filter) {
return CorrectDelayedTyposInExpr(E, nullptr, Filter);
}
ExprResult
CorrectDelayedTyposInExpr(ExprResult ER, VarDecl *InitDecl = nullptr,
llvm::function_ref<ExprResult(Expr *)> Filter =
[](Expr *E) -> ExprResult { return E; }) {
return ER.isInvalid() ? ER : CorrectDelayedTyposInExpr(ER.get(), Filter);
}
ExprResult
CorrectDelayedTyposInExpr(ExprResult ER,
llvm::function_ref<ExprResult(Expr *)> Filter) {
return CorrectDelayedTyposInExpr(ER, nullptr, Filter);
}
void diagnoseTypo(const TypoCorrection &Correction,
const PartialDiagnostic &TypoDiag,
bool ErrorRecovery = true);
void diagnoseTypo(const TypoCorrection &Correction,
const PartialDiagnostic &TypoDiag,
const PartialDiagnostic &PrevNote,
bool ErrorRecovery = true);
void MarkTypoCorrectedFunctionDefinition(const NamedDecl *F);
void FindAssociatedClassesAndNamespaces(SourceLocation InstantiationLoc,
ArrayRef<Expr *> Args,
AssociatedNamespaceSet &AssociatedNamespaces,
AssociatedClassSet &AssociatedClasses);
void FilterLookupForScope(LookupResult &R, DeclContext *Ctx, Scope *S,
bool ConsiderLinkage, bool AllowInlineNamespace);
bool CheckRedeclarationModuleOwnership(NamedDecl *New, NamedDecl *Old);
void DiagnoseAmbiguousLookup(LookupResult &Result);
//@}
ObjCInterfaceDecl *getObjCInterfaceDecl(IdentifierInfo *&Id,
SourceLocation IdLoc,
bool TypoCorrection = false);
NamedDecl *LazilyCreateBuiltin(IdentifierInfo *II, unsigned ID,
Scope *S, bool ForRedeclaration,
SourceLocation Loc);
NamedDecl *ImplicitlyDefineFunction(SourceLocation Loc, IdentifierInfo &II,
Scope *S);
void AddKnownFunctionAttributes(FunctionDecl *FD);
// More parsing and symbol table subroutines.
void ProcessPragmaWeak(Scope *S, Decl *D);
// Decl attributes - this routine is the top level dispatcher.
void ProcessDeclAttributes(Scope *S, Decl *D, const Declarator &PD);
// Helper for delayed processing of attributes.
void ProcessDeclAttributeDelayed(Decl *D, const AttributeList *AttrList);
void ProcessDeclAttributeList(Scope *S, Decl *D, const AttributeList *AL,
bool IncludeCXX11Attributes = true);
bool ProcessAccessDeclAttributeList(AccessSpecDecl *ASDecl,
const AttributeList *AttrList);
void checkUnusedDeclAttributes(Declarator &D);
/// Determine if type T is a valid subject for a nonnull and similar
/// attributes. By default, we look through references (the behavior used by
/// nonnull), but if the second parameter is true, then we treat a reference
/// type as valid.
bool isValidPointerAttrType(QualType T, bool RefOkay = false);
bool CheckRegparmAttr(const AttributeList &attr, unsigned &value);
bool CheckCallingConvAttr(const AttributeList &attr, CallingConv &CC,
const FunctionDecl *FD = nullptr);
bool CheckNoReturnAttr(const AttributeList &attr);
bool CheckNoCallerSavedRegsAttr(const AttributeList &attr);
bool checkStringLiteralArgumentAttr(const AttributeList &Attr,
unsigned ArgNum, StringRef &Str,
SourceLocation *ArgLocation = nullptr);
bool checkSectionName(SourceLocation LiteralLoc, StringRef Str);
bool checkTargetAttr(SourceLocation LiteralLoc, StringRef Str);
bool checkMSInheritanceAttrOnDefinition(
CXXRecordDecl *RD, SourceRange Range, bool BestCase,
MSInheritanceAttr::Spelling SemanticSpelling);
void CheckAlignasUnderalignment(Decl *D);
/// Adjust the calling convention of a method to be the ABI default if it
/// wasn't specified explicitly. This handles method types formed from
/// function type typedefs and typename template arguments.
void adjustMemberFunctionCC(QualType &T, bool IsStatic, bool IsCtorOrDtor,
SourceLocation Loc);
// Check if there is an explicit attribute, but only look through parens.
// The intent is to look for an attribute on the current declarator, but not
// one that came from a typedef.
bool hasExplicitCallingConv(QualType &T);
/// Get the outermost AttributedType node that sets a calling convention.
/// Valid types should not have multiple attributes with different CCs.
const AttributedType *getCallingConvAttributedType(QualType T) const;
/// Check whether a nullability type specifier can be added to the given
/// type.
///
/// \param type The type to which the nullability specifier will be
/// added. On success, this type will be updated appropriately.
///
/// \param nullability The nullability specifier to add.
///
/// \param nullabilityLoc The location of the nullability specifier.
///
/// \param isContextSensitive Whether this nullability specifier was
/// written as a context-sensitive keyword (in an Objective-C
/// method) or an Objective-C property attribute, rather than as an
/// underscored type specifier.
///
/// \param allowArrayTypes Whether to accept nullability specifiers on an
/// array type (e.g., because it will decay to a pointer).
///
/// \returns true if nullability cannot be applied, false otherwise.
bool checkNullabilityTypeSpecifier(QualType &type, NullabilityKind nullability,
SourceLocation nullabilityLoc,
bool isContextSensitive,
bool allowArrayTypes);
/// \brief Stmt attributes - this routine is the top level dispatcher.
StmtResult ProcessStmtAttributes(Stmt *Stmt, AttributeList *Attrs,
SourceRange Range);
void WarnConflictingTypedMethods(ObjCMethodDecl *Method,
ObjCMethodDecl *MethodDecl,
bool IsProtocolMethodDecl);
void CheckConflictingOverridingMethod(ObjCMethodDecl *Method,
ObjCMethodDecl *Overridden,
bool IsProtocolMethodDecl);
/// WarnExactTypedMethods - This routine issues a warning if method
/// implementation declaration matches exactly that of its declaration.
void WarnExactTypedMethods(ObjCMethodDecl *Method,
ObjCMethodDecl *MethodDecl,
bool IsProtocolMethodDecl);
typedef llvm::SmallPtrSet<Selector, 8> SelectorSet;
/// CheckImplementationIvars - This routine checks if the instance variables
/// listed in the implelementation match those listed in the interface.
void CheckImplementationIvars(ObjCImplementationDecl *ImpDecl,
ObjCIvarDecl **Fields, unsigned nIvars,
SourceLocation Loc);
/// ImplMethodsVsClassMethods - This is main routine to warn if any method
/// remains unimplemented in the class or category \@implementation.
void ImplMethodsVsClassMethods(Scope *S, ObjCImplDecl* IMPDecl,
ObjCContainerDecl* IDecl,
bool IncompleteImpl = false);
/// DiagnoseUnimplementedProperties - This routine warns on those properties
/// which must be implemented by this implementation.
void DiagnoseUnimplementedProperties(Scope *S, ObjCImplDecl* IMPDecl,
ObjCContainerDecl *CDecl,
bool SynthesizeProperties);
/// Diagnose any null-resettable synthesized setters.
void diagnoseNullResettableSynthesizedSetters(const ObjCImplDecl *impDecl);
/// DefaultSynthesizeProperties - This routine default synthesizes all
/// properties which must be synthesized in the class's \@implementation.
void DefaultSynthesizeProperties(Scope *S, ObjCImplDecl *IMPDecl,
ObjCInterfaceDecl *IDecl,
SourceLocation AtEnd);
void DefaultSynthesizeProperties(Scope *S, Decl *D, SourceLocation AtEnd);
/// IvarBacksCurrentMethodAccessor - This routine returns 'true' if 'IV' is
/// an ivar synthesized for 'Method' and 'Method' is a property accessor
/// declared in class 'IFace'.
bool IvarBacksCurrentMethodAccessor(ObjCInterfaceDecl *IFace,
ObjCMethodDecl *Method, ObjCIvarDecl *IV);
/// DiagnoseUnusedBackingIvarInAccessor - Issue an 'unused' warning if ivar which
/// backs the property is not used in the property's accessor.
void DiagnoseUnusedBackingIvarInAccessor(Scope *S,
const ObjCImplementationDecl *ImplD);
/// GetIvarBackingPropertyAccessor - If method is a property setter/getter and
/// it property has a backing ivar, returns this ivar; otherwise, returns NULL.
/// It also returns ivar's property on success.
ObjCIvarDecl *GetIvarBackingPropertyAccessor(const ObjCMethodDecl *Method,
const ObjCPropertyDecl *&PDecl) const;
/// Called by ActOnProperty to handle \@property declarations in
/// class extensions.
ObjCPropertyDecl *HandlePropertyInClassExtension(Scope *S,
SourceLocation AtLoc,
SourceLocation LParenLoc,
FieldDeclarator &FD,
Selector GetterSel,
SourceLocation GetterNameLoc,
Selector SetterSel,
SourceLocation SetterNameLoc,
const bool isReadWrite,
unsigned &Attributes,
const unsigned AttributesAsWritten,
QualType T,
TypeSourceInfo *TSI,
tok::ObjCKeywordKind MethodImplKind);
/// Called by ActOnProperty and HandlePropertyInClassExtension to
/// handle creating the ObjcPropertyDecl for a category or \@interface.
ObjCPropertyDecl *CreatePropertyDecl(Scope *S,
ObjCContainerDecl *CDecl,
SourceLocation AtLoc,
SourceLocation LParenLoc,
FieldDeclarator &FD,
Selector GetterSel,
SourceLocation GetterNameLoc,
Selector SetterSel,
SourceLocation SetterNameLoc,
const bool isReadWrite,
const unsigned Attributes,
const unsigned AttributesAsWritten,
QualType T,
TypeSourceInfo *TSI,
tok::ObjCKeywordKind MethodImplKind,
DeclContext *lexicalDC = nullptr);
/// AtomicPropertySetterGetterRules - This routine enforces the rule (via
/// warning) when atomic property has one but not the other user-declared
/// setter or getter.
void AtomicPropertySetterGetterRules(ObjCImplDecl* IMPDecl,
ObjCInterfaceDecl* IDecl);
void DiagnoseOwningPropertyGetterSynthesis(const ObjCImplementationDecl *D);
void DiagnoseMissingDesignatedInitOverrides(
const ObjCImplementationDecl *ImplD,
const ObjCInterfaceDecl *IFD);
void DiagnoseDuplicateIvars(ObjCInterfaceDecl *ID, ObjCInterfaceDecl *SID);
enum MethodMatchStrategy {
MMS_loose,
MMS_strict
};
/// MatchTwoMethodDeclarations - Checks if two methods' type match and returns
/// true, or false, accordingly.
bool MatchTwoMethodDeclarations(const ObjCMethodDecl *Method,
const ObjCMethodDecl *PrevMethod,
MethodMatchStrategy strategy = MMS_strict);
/// MatchAllMethodDeclarations - Check methods declaraed in interface or
/// or protocol against those declared in their implementations.
void MatchAllMethodDeclarations(const SelectorSet &InsMap,
const SelectorSet &ClsMap,
SelectorSet &InsMapSeen,
SelectorSet &ClsMapSeen,
ObjCImplDecl* IMPDecl,
ObjCContainerDecl* IDecl,
bool &IncompleteImpl,
bool ImmediateClass,
bool WarnCategoryMethodImpl=false);
/// CheckCategoryVsClassMethodMatches - Checks that methods implemented in
/// category matches with those implemented in its primary class and
/// warns each time an exact match is found.
void CheckCategoryVsClassMethodMatches(ObjCCategoryImplDecl *CatIMP);
/// \brief Add the given method to the list of globally-known methods.
void addMethodToGlobalList(ObjCMethodList *List, ObjCMethodDecl *Method);
private:
/// AddMethodToGlobalPool - Add an instance or factory method to the global
/// pool. See descriptoin of AddInstanceMethodToGlobalPool.
void AddMethodToGlobalPool(ObjCMethodDecl *Method, bool impl, bool instance);
/// LookupMethodInGlobalPool - Returns the instance or factory method and
/// optionally warns if there are multiple signatures.
ObjCMethodDecl *LookupMethodInGlobalPool(Selector Sel, SourceRange R,
bool receiverIdOrClass,
bool instance);
public:
/// \brief - Returns instance or factory methods in global method pool for
/// given selector. It checks the desired kind first, if none is found, and
/// parameter checkTheOther is set, it then checks the other kind. If no such
/// method or only one method is found, function returns false; otherwise, it
/// returns true.
bool
CollectMultipleMethodsInGlobalPool(Selector Sel,
SmallVectorImpl<ObjCMethodDecl*>& Methods,
bool InstanceFirst, bool CheckTheOther,
const ObjCObjectType *TypeBound = nullptr);
bool
AreMultipleMethodsInGlobalPool(Selector Sel, ObjCMethodDecl *BestMethod,
SourceRange R, bool receiverIdOrClass,
SmallVectorImpl<ObjCMethodDecl*>& Methods);
void
DiagnoseMultipleMethodInGlobalPool(SmallVectorImpl<ObjCMethodDecl*> &Methods,
Selector Sel, SourceRange R,
bool receiverIdOrClass);
private:
/// \brief - Returns a selector which best matches given argument list or
/// nullptr if none could be found
ObjCMethodDecl *SelectBestMethod(Selector Sel, MultiExprArg Args,
bool IsInstance,
SmallVectorImpl<ObjCMethodDecl*>& Methods);
/// \brief Record the typo correction failure and return an empty correction.
TypoCorrection FailedCorrection(IdentifierInfo *Typo, SourceLocation TypoLoc,
bool RecordFailure = true) {
if (RecordFailure)
TypoCorrectionFailures[Typo].insert(TypoLoc);
return TypoCorrection();
}
public:
/// AddInstanceMethodToGlobalPool - All instance methods in a translation
/// unit are added to a global pool. This allows us to efficiently associate
/// a selector with a method declaraation for purposes of typechecking
/// messages sent to "id" (where the class of the object is unknown).
void AddInstanceMethodToGlobalPool(ObjCMethodDecl *Method, bool impl=false) {
AddMethodToGlobalPool(Method, impl, /*instance*/true);
}
/// AddFactoryMethodToGlobalPool - Same as above, but for factory methods.
void AddFactoryMethodToGlobalPool(ObjCMethodDecl *Method, bool impl=false) {
AddMethodToGlobalPool(Method, impl, /*instance*/false);
}
/// AddAnyMethodToGlobalPool - Add any method, instance or factory to global
/// pool.
void AddAnyMethodToGlobalPool(Decl *D);
/// LookupInstanceMethodInGlobalPool - Returns the method and warns if
/// there are multiple signatures.
ObjCMethodDecl *LookupInstanceMethodInGlobalPool(Selector Sel, SourceRange R,
bool receiverIdOrClass=false) {
return LookupMethodInGlobalPool(Sel, R, receiverIdOrClass,
/*instance*/true);
}
/// LookupFactoryMethodInGlobalPool - Returns the method and warns if
/// there are multiple signatures.
ObjCMethodDecl *LookupFactoryMethodInGlobalPool(Selector Sel, SourceRange R,
bool receiverIdOrClass=false) {
return LookupMethodInGlobalPool(Sel, R, receiverIdOrClass,
/*instance*/false);
}
const ObjCMethodDecl *SelectorsForTypoCorrection(Selector Sel,
QualType ObjectType=QualType());
/// LookupImplementedMethodInGlobalPool - Returns the method which has an
/// implementation.
ObjCMethodDecl *LookupImplementedMethodInGlobalPool(Selector Sel);
/// CollectIvarsToConstructOrDestruct - Collect those ivars which require
/// initialization.
void CollectIvarsToConstructOrDestruct(ObjCInterfaceDecl *OI,
SmallVectorImpl<ObjCIvarDecl*> &Ivars);
//===--------------------------------------------------------------------===//
// Statement Parsing Callbacks: SemaStmt.cpp.
public:
class FullExprArg {
public:
FullExprArg() : E(nullptr) { }
FullExprArg(Sema &actions) : E(nullptr) { }
ExprResult release() {
return E;
}
Expr *get() const { return E; }
Expr *operator->() {
return E;
}
private:
// FIXME: No need to make the entire Sema class a friend when it's just
// Sema::MakeFullExpr that needs access to the constructor below.
friend class Sema;
explicit FullExprArg(Expr *expr) : E(expr) {}
Expr *E;
};
FullExprArg MakeFullExpr(Expr *Arg) {
return MakeFullExpr(Arg, Arg ? Arg->getExprLoc() : SourceLocation());
}
FullExprArg MakeFullExpr(Expr *Arg, SourceLocation CC) {
return FullExprArg(ActOnFinishFullExpr(Arg, CC).get());
}
FullExprArg MakeFullDiscardedValueExpr(Expr *Arg) {
ExprResult FE =
ActOnFinishFullExpr(Arg, Arg ? Arg->getExprLoc() : SourceLocation(),
/*DiscardedValue*/ true);
return FullExprArg(FE.get());
}
StmtResult ActOnExprStmt(ExprResult Arg);
StmtResult ActOnExprStmtError();
StmtResult ActOnNullStmt(SourceLocation SemiLoc,
bool HasLeadingEmptyMacro = false);
void ActOnStartOfCompoundStmt();
void ActOnFinishOfCompoundStmt();
StmtResult ActOnCompoundStmt(SourceLocation L, SourceLocation R,
ArrayRef<Stmt *> Elts, bool isStmtExpr);
/// \brief A RAII object to enter scope of a compound statement.
class CompoundScopeRAII {
public:
CompoundScopeRAII(Sema &S): S(S) {
S.ActOnStartOfCompoundStmt();
}
~CompoundScopeRAII() {
S.ActOnFinishOfCompoundStmt();
}
private:
Sema &S;
};
/// An RAII helper that pops function a function scope on exit.
struct FunctionScopeRAII {
Sema &S;
bool Active;
FunctionScopeRAII(Sema &S) : S(S), Active(true) {}
~FunctionScopeRAII() {
if (Active)
S.PopFunctionScopeInfo();
}
void disable() { Active = false; }
};
StmtResult ActOnDeclStmt(DeclGroupPtrTy Decl,
SourceLocation StartLoc,
SourceLocation EndLoc);
void ActOnForEachDeclStmt(DeclGroupPtrTy Decl);
StmtResult ActOnForEachLValueExpr(Expr *E);
StmtResult ActOnCaseStmt(SourceLocation CaseLoc, Expr *LHSVal,
SourceLocation DotDotDotLoc, Expr *RHSVal,
SourceLocation ColonLoc);
void ActOnCaseStmtBody(Stmt *CaseStmt, Stmt *SubStmt);
StmtResult ActOnDefaultStmt(SourceLocation DefaultLoc,
SourceLocation ColonLoc,
Stmt *SubStmt, Scope *CurScope);
StmtResult ActOnLabelStmt(SourceLocation IdentLoc, LabelDecl *TheDecl,
SourceLocation ColonLoc, Stmt *SubStmt);
StmtResult ActOnAttributedStmt(SourceLocation AttrLoc,
ArrayRef<const Attr*> Attrs,
Stmt *SubStmt);
class ConditionResult;
StmtResult ActOnIfStmt(SourceLocation IfLoc, bool IsConstexpr,
Stmt *InitStmt,
ConditionResult Cond, Stmt *ThenVal,
SourceLocation ElseLoc, Stmt *ElseVal);
StmtResult BuildIfStmt(SourceLocation IfLoc, bool IsConstexpr,
Stmt *InitStmt,
ConditionResult Cond, Stmt *ThenVal,
SourceLocation ElseLoc, Stmt *ElseVal);
StmtResult ActOnStartOfSwitchStmt(SourceLocation SwitchLoc,
Stmt *InitStmt,
ConditionResult Cond);
StmtResult ActOnFinishSwitchStmt(SourceLocation SwitchLoc,
Stmt *Switch, Stmt *Body);
StmtResult ActOnWhileStmt(SourceLocation WhileLoc, ConditionResult Cond,
Stmt *Body);
StmtResult ActOnDoStmt(SourceLocation DoLoc, Stmt *Body,
SourceLocation WhileLoc, SourceLocation CondLParen,
Expr *Cond, SourceLocation CondRParen);
StmtResult ActOnForStmt(SourceLocation ForLoc,
SourceLocation LParenLoc,
Stmt *First,
ConditionResult Second,
FullExprArg Third,
SourceLocation RParenLoc,
Stmt *Body);
ExprResult CheckObjCForCollectionOperand(SourceLocation forLoc,
Expr *collection);
StmtResult ActOnObjCForCollectionStmt(SourceLocation ForColLoc,
Stmt *First, Expr *collection,
SourceLocation RParenLoc);
StmtResult FinishObjCForCollectionStmt(Stmt *ForCollection, Stmt *Body);
enum BuildForRangeKind {
/// Initial building of a for-range statement.
BFRK_Build,
/// Instantiation or recovery rebuild of a for-range statement. Don't
/// attempt any typo-correction.
BFRK_Rebuild,
/// Determining whether a for-range statement could be built. Avoid any
/// unnecessary or irreversible actions.
BFRK_Check
};
StmtResult ActOnCXXForRangeStmt(Scope *S, SourceLocation ForLoc,
SourceLocation CoawaitLoc,
Stmt *LoopVar,
SourceLocation ColonLoc, Expr *Collection,
SourceLocation RParenLoc,
BuildForRangeKind Kind);
StmtResult BuildCXXForRangeStmt(SourceLocation ForLoc,
SourceLocation CoawaitLoc,
SourceLocation ColonLoc,
Stmt *RangeDecl, Stmt *Begin, Stmt *End,
Expr *Cond, Expr *Inc,
Stmt *LoopVarDecl,
SourceLocation RParenLoc,
BuildForRangeKind Kind);
StmtResult FinishCXXForRangeStmt(Stmt *ForRange, Stmt *Body);
StmtResult ActOnGotoStmt(SourceLocation GotoLoc,
SourceLocation LabelLoc,
LabelDecl *TheDecl);
StmtResult ActOnIndirectGotoStmt(SourceLocation GotoLoc,
SourceLocation StarLoc,
Expr *DestExp);
StmtResult ActOnContinueStmt(SourceLocation ContinueLoc, Scope *CurScope);
StmtResult ActOnBreakStmt(SourceLocation BreakLoc, Scope *CurScope);
void ActOnCapturedRegionStart(SourceLocation Loc, Scope *CurScope,
CapturedRegionKind Kind, unsigned NumParams);
typedef std::pair<StringRef, QualType> CapturedParamNameType;
void ActOnCapturedRegionStart(SourceLocation Loc, Scope *CurScope,
CapturedRegionKind Kind,
ArrayRef<CapturedParamNameType> Params);
StmtResult ActOnCapturedRegionEnd(Stmt *S);
void ActOnCapturedRegionError();
RecordDecl *CreateCapturedStmtRecordDecl(CapturedDecl *&CD,
SourceLocation Loc,
unsigned NumParams);
VarDecl *getCopyElisionCandidate(QualType ReturnType, Expr *E,
bool AllowParamOrMoveConstructible);
bool isCopyElisionCandidate(QualType ReturnType, const VarDecl *VD,
bool AllowParamOrMoveConstructible);
StmtResult ActOnReturnStmt(SourceLocation ReturnLoc, Expr *RetValExp,
Scope *CurScope);
StmtResult BuildReturnStmt(SourceLocation ReturnLoc, Expr *RetValExp);
StmtResult ActOnCapScopeReturnStmt(SourceLocation ReturnLoc, Expr *RetValExp);
StmtResult ActOnGCCAsmStmt(SourceLocation AsmLoc, bool IsSimple,
bool IsVolatile, unsigned NumOutputs,
unsigned NumInputs, IdentifierInfo **Names,
MultiExprArg Constraints, MultiExprArg Exprs,
Expr *AsmString, MultiExprArg Clobbers,
SourceLocation RParenLoc);
void FillInlineAsmIdentifierInfo(Expr *Res,
llvm::InlineAsmIdentifierInfo &Info);
ExprResult LookupInlineAsmIdentifier(CXXScopeSpec &SS,
SourceLocation TemplateKWLoc,
UnqualifiedId &Id,
bool IsUnevaluatedContext);
bool LookupInlineAsmField(StringRef Base, StringRef Member,
unsigned &Offset, SourceLocation AsmLoc);
ExprResult LookupInlineAsmVarDeclField(Expr *RefExpr, StringRef Member,
SourceLocation AsmLoc);
StmtResult ActOnMSAsmStmt(SourceLocation AsmLoc, SourceLocation LBraceLoc,
ArrayRef<Token> AsmToks,
StringRef AsmString,
unsigned NumOutputs, unsigned NumInputs,
ArrayRef<StringRef> Constraints,
ArrayRef<StringRef> Clobbers,
ArrayRef<Expr*> Exprs,
SourceLocation EndLoc);
LabelDecl *GetOrCreateMSAsmLabel(StringRef ExternalLabelName,
SourceLocation Location,
bool AlwaysCreate);
VarDecl *BuildObjCExceptionDecl(TypeSourceInfo *TInfo, QualType ExceptionType,
SourceLocation StartLoc,
SourceLocation IdLoc, IdentifierInfo *Id,
bool Invalid = false);
Decl *ActOnObjCExceptionDecl(Scope *S, Declarator &D);
StmtResult ActOnObjCAtCatchStmt(SourceLocation AtLoc, SourceLocation RParen,
Decl *Parm, Stmt *Body);
StmtResult ActOnObjCAtFinallyStmt(SourceLocation AtLoc, Stmt *Body);
StmtResult ActOnObjCAtTryStmt(SourceLocation AtLoc, Stmt *Try,
MultiStmtArg Catch, Stmt *Finally);
StmtResult BuildObjCAtThrowStmt(SourceLocation AtLoc, Expr *Throw);
StmtResult ActOnObjCAtThrowStmt(SourceLocation AtLoc, Expr *Throw,
Scope *CurScope);
ExprResult ActOnObjCAtSynchronizedOperand(SourceLocation atLoc,
Expr *operand);
StmtResult ActOnObjCAtSynchronizedStmt(SourceLocation AtLoc,
Expr *SynchExpr,
Stmt *SynchBody);
StmtResult ActOnObjCAutoreleasePoolStmt(SourceLocation AtLoc, Stmt *Body);
VarDecl *BuildExceptionDeclaration(Scope *S, TypeSourceInfo *TInfo,
SourceLocation StartLoc,
SourceLocation IdLoc,
IdentifierInfo *Id);
Decl *ActOnExceptionDeclarator(Scope *S, Declarator &D);
StmtResult ActOnCXXCatchBlock(SourceLocation CatchLoc,
Decl *ExDecl, Stmt *HandlerBlock);
StmtResult ActOnCXXTryBlock(SourceLocation TryLoc, Stmt *TryBlock,
ArrayRef<Stmt *> Handlers);
StmtResult ActOnSEHTryBlock(bool IsCXXTry, // try (true) or __try (false) ?
SourceLocation TryLoc, Stmt *TryBlock,
Stmt *Handler);
StmtResult ActOnSEHExceptBlock(SourceLocation Loc,
Expr *FilterExpr,
Stmt *Block);
void ActOnStartSEHFinallyBlock();
void ActOnAbortSEHFinallyBlock();
StmtResult ActOnFinishSEHFinallyBlock(SourceLocation Loc, Stmt *Block);
StmtResult ActOnSEHLeaveStmt(SourceLocation Loc, Scope *CurScope);
void DiagnoseReturnInConstructorExceptionHandler(CXXTryStmt *TryBlock);
bool ShouldWarnIfUnusedFileScopedDecl(const DeclaratorDecl *D) const;
/// \brief If it's a file scoped decl that must warn if not used, keep track
/// of it.
void MarkUnusedFileScopedDecl(const DeclaratorDecl *D);
/// DiagnoseUnusedExprResult - If the statement passed in is an expression
/// whose result is unused, warn.
void DiagnoseUnusedExprResult(const Stmt *S);
void DiagnoseUnusedNestedTypedefs(const RecordDecl *D);
void DiagnoseUnusedDecl(const NamedDecl *ND);
/// Emit \p DiagID if statement located on \p StmtLoc has a suspicious null
/// statement as a \p Body, and it is located on the same line.
///
/// This helps prevent bugs due to typos, such as:
/// if (condition);
/// do_stuff();
void DiagnoseEmptyStmtBody(SourceLocation StmtLoc,
const Stmt *Body,
unsigned DiagID);
/// Warn if a for/while loop statement \p S, which is followed by
/// \p PossibleBody, has a suspicious null statement as a body.
void DiagnoseEmptyLoopBody(const Stmt *S,
const Stmt *PossibleBody);
/// Warn if a value is moved to itself.
void DiagnoseSelfMove(const Expr *LHSExpr, const Expr *RHSExpr,
SourceLocation OpLoc);
/// \brief Warn if we're implicitly casting from a _Nullable pointer type to a
/// _Nonnull one.
void diagnoseNullableToNonnullConversion(QualType DstType, QualType SrcType,
SourceLocation Loc);
/// Warn when implicitly casting 0 to nullptr.
void diagnoseZeroToNullptrConversion(CastKind Kind, const Expr *E);
ParsingDeclState PushParsingDeclaration(sema::DelayedDiagnosticPool &pool) {
return DelayedDiagnostics.push(pool);
}
void PopParsingDeclaration(ParsingDeclState state, Decl *decl);
typedef ProcessingContextState ParsingClassState;
ParsingClassState PushParsingClass() {
return DelayedDiagnostics.pushUndelayed();
}
void PopParsingClass(ParsingClassState state) {
DelayedDiagnostics.popUndelayed(state);
}
void redelayDiagnostics(sema::DelayedDiagnosticPool &pool);
void DiagnoseAvailabilityOfDecl(NamedDecl *D, SourceLocation Loc,
const ObjCInterfaceDecl *UnknownObjCClass,
bool ObjCPropertyAccess,
bool AvoidPartialAvailabilityChecks = false);
bool makeUnavailableInSystemHeader(SourceLocation loc,
UnavailableAttr::ImplicitReason reason);
/// \brief Issue any -Wunguarded-availability warnings in \c FD
void DiagnoseUnguardedAvailabilityViolations(Decl *FD);
//===--------------------------------------------------------------------===//
// Expression Parsing Callbacks: SemaExpr.cpp.
bool CanUseDecl(NamedDecl *D, bool TreatUnavailableAsInvalid);
bool DiagnoseUseOfDecl(NamedDecl *D, SourceLocation Loc,
const ObjCInterfaceDecl *UnknownObjCClass = nullptr,
bool ObjCPropertyAccess = false,
bool AvoidPartialAvailabilityChecks = false);
void NoteDeletedFunction(FunctionDecl *FD);
void NoteDeletedInheritingConstructor(CXXConstructorDecl *CD);
std::string getDeletedOrUnavailableSuffix(const FunctionDecl *FD);
bool DiagnosePropertyAccessorMismatch(ObjCPropertyDecl *PD,
ObjCMethodDecl *Getter,
SourceLocation Loc);
void DiagnoseSentinelCalls(NamedDecl *D, SourceLocation Loc,
ArrayRef<Expr *> Args);
void PushExpressionEvaluationContext(ExpressionEvaluationContext NewContext,
Decl *LambdaContextDecl = nullptr,
bool IsDecltype = false);
enum ReuseLambdaContextDecl_t { ReuseLambdaContextDecl };
void PushExpressionEvaluationContext(ExpressionEvaluationContext NewContext,
ReuseLambdaContextDecl_t,
bool IsDecltype = false);
void PopExpressionEvaluationContext();
void DiscardCleanupsInEvaluationContext();
ExprResult TransformToPotentiallyEvaluated(Expr *E);
ExprResult HandleExprEvaluationContextForTypeof(Expr *E);
ExprResult ActOnConstantExpression(ExprResult Res);
// Functions for marking a declaration referenced. These functions also
// contain the relevant logic for marking if a reference to a function or
// variable is an odr-use (in the C++11 sense). There are separate variants
// for expressions referring to a decl; these exist because odr-use marking
// needs to be delayed for some constant variables when we build one of the
// named expressions.
//
// MightBeOdrUse indicates whether the use could possibly be an odr-use, and
// should usually be true. This only needs to be set to false if the lack of
// odr-use cannot be determined from the current context (for instance,
// because the name denotes a virtual function and was written without an
// explicit nested-name-specifier).
void MarkAnyDeclReferenced(SourceLocation Loc, Decl *D, bool MightBeOdrUse);
void MarkFunctionReferenced(SourceLocation Loc, FunctionDecl *Func,
bool MightBeOdrUse = true);
void MarkVariableReferenced(SourceLocation Loc, VarDecl *Var);
void MarkDeclRefReferenced(DeclRefExpr *E, const Expr *Base = nullptr);
void MarkMemberReferenced(MemberExpr *E);
void UpdateMarkingForLValueToRValue(Expr *E);
void CleanupVarDeclMarking();
enum TryCaptureKind {
TryCapture_Implicit, TryCapture_ExplicitByVal, TryCapture_ExplicitByRef
};
/// \brief Try to capture the given variable.
///
/// \param Var The variable to capture.
///
/// \param Loc The location at which the capture occurs.
///
/// \param Kind The kind of capture, which may be implicit (for either a
/// block or a lambda), or explicit by-value or by-reference (for a lambda).
///
/// \param EllipsisLoc The location of the ellipsis, if one is provided in
/// an explicit lambda capture.
///
/// \param BuildAndDiagnose Whether we are actually supposed to add the
/// captures or diagnose errors. If false, this routine merely check whether
/// the capture can occur without performing the capture itself or complaining
/// if the variable cannot be captured.
///
/// \param CaptureType Will be set to the type of the field used to capture
/// this variable in the innermost block or lambda. Only valid when the
/// variable can be captured.
///
/// \param DeclRefType Will be set to the type of a reference to the capture
/// from within the current scope. Only valid when the variable can be
/// captured.
///
/// \param FunctionScopeIndexToStopAt If non-null, it points to the index
/// of the FunctionScopeInfo stack beyond which we do not attempt to capture.
/// This is useful when enclosing lambdas must speculatively capture
/// variables that may or may not be used in certain specializations of
/// a nested generic lambda.
///
/// \returns true if an error occurred (i.e., the variable cannot be
/// captured) and false if the capture succeeded.
bool tryCaptureVariable(VarDecl *Var, SourceLocation Loc, TryCaptureKind Kind,
SourceLocation EllipsisLoc, bool BuildAndDiagnose,
QualType &CaptureType,
QualType &DeclRefType,
const unsigned *const FunctionScopeIndexToStopAt);
/// \brief Try to capture the given variable.
bool tryCaptureVariable(VarDecl *Var, SourceLocation Loc,
TryCaptureKind Kind = TryCapture_Implicit,
SourceLocation EllipsisLoc = SourceLocation());
/// \brief Checks if the variable must be captured.
bool NeedToCaptureVariable(VarDecl *Var, SourceLocation Loc);
/// \brief Given a variable, determine the type that a reference to that
/// variable will have in the given scope.
QualType getCapturedDeclRefType(VarDecl *Var, SourceLocation Loc);
/// Mark all of the declarations referenced within a particular AST node as
/// referenced. Used when template instantiation instantiates a non-dependent
/// type -- entities referenced by the type are now referenced.
void MarkDeclarationsReferencedInType(SourceLocation Loc, QualType T);
void MarkDeclarationsReferencedInExpr(Expr *E,
bool SkipLocalVariables = false);
/// \brief Try to recover by turning the given expression into a
/// call. Returns true if recovery was attempted or an error was
/// emitted; this may also leave the ExprResult invalid.
bool tryToRecoverWithCall(ExprResult &E, const PartialDiagnostic &PD,
bool ForceComplain = false,
bool (*IsPlausibleResult)(QualType) = nullptr);
/// \brief Figure out if an expression could be turned into a call.
bool tryExprAsCall(Expr &E, QualType &ZeroArgCallReturnTy,
UnresolvedSetImpl &NonTemplateOverloads);
/// \brief Conditionally issue a diagnostic based on the current
/// evaluation context.
///
/// \param Statement If Statement is non-null, delay reporting the
/// diagnostic until the function body is parsed, and then do a basic
/// reachability analysis to determine if the statement is reachable.
/// If it is unreachable, the diagnostic will not be emitted.
bool DiagRuntimeBehavior(SourceLocation Loc, const Stmt *Statement,
const PartialDiagnostic &PD);
// Primary Expressions.
SourceRange getExprRange(Expr *E) const;
ExprResult ActOnIdExpression(
Scope *S, CXXScopeSpec &SS, SourceLocation TemplateKWLoc,
UnqualifiedId &Id, bool HasTrailingLParen, bool IsAddressOfOperand,
std::unique_ptr<CorrectionCandidateCallback> CCC = nullptr,
bool IsInlineAsmIdentifier = false, Token *KeywordReplacement = nullptr);
void DecomposeUnqualifiedId(const UnqualifiedId &Id,
TemplateArgumentListInfo &Buffer,
DeclarationNameInfo &NameInfo,
const TemplateArgumentListInfo *&TemplateArgs);
bool
DiagnoseEmptyLookup(Scope *S, CXXScopeSpec &SS, LookupResult &R,
std::unique_ptr<CorrectionCandidateCallback> CCC,
TemplateArgumentListInfo *ExplicitTemplateArgs = nullptr,
ArrayRef<Expr *> Args = None, TypoExpr **Out = nullptr);
ExprResult LookupInObjCMethod(LookupResult &LookUp, Scope *S,
IdentifierInfo *II,
bool AllowBuiltinCreation=false);
ExprResult ActOnDependentIdExpression(const CXXScopeSpec &SS,
SourceLocation TemplateKWLoc,
const DeclarationNameInfo &NameInfo,
bool isAddressOfOperand,
const TemplateArgumentListInfo *TemplateArgs);
ExprResult BuildDeclRefExpr(ValueDecl *D, QualType Ty,
ExprValueKind VK,
SourceLocation Loc,
const CXXScopeSpec *SS = nullptr);
ExprResult
BuildDeclRefExpr(ValueDecl *D, QualType Ty, ExprValueKind VK,
const DeclarationNameInfo &NameInfo,
const CXXScopeSpec *SS = nullptr,
NamedDecl *FoundD = nullptr,
const TemplateArgumentListInfo *TemplateArgs = nullptr);
ExprResult
BuildAnonymousStructUnionMemberReference(
const CXXScopeSpec &SS,
SourceLocation nameLoc,
IndirectFieldDecl *indirectField,
DeclAccessPair FoundDecl = DeclAccessPair::make(nullptr, AS_none),
Expr *baseObjectExpr = nullptr,
SourceLocation opLoc = SourceLocation());
ExprResult BuildPossibleImplicitMemberExpr(const CXXScopeSpec &SS,
SourceLocation TemplateKWLoc,
LookupResult &R,
const TemplateArgumentListInfo *TemplateArgs,
const Scope *S);
ExprResult BuildImplicitMemberExpr(const CXXScopeSpec &SS,
SourceLocation TemplateKWLoc,
LookupResult &R,
const TemplateArgumentListInfo *TemplateArgs,
bool IsDefiniteInstance,
const Scope *S);
bool UseArgumentDependentLookup(const CXXScopeSpec &SS,
const LookupResult &R,
bool HasTrailingLParen);
ExprResult
BuildQualifiedDeclarationNameExpr(CXXScopeSpec &SS,
const DeclarationNameInfo &NameInfo,
bool IsAddressOfOperand, const Scope *S,
TypeSourceInfo **RecoveryTSI = nullptr);
ExprResult BuildDependentDeclRefExpr(const CXXScopeSpec &SS,
SourceLocation TemplateKWLoc,
const DeclarationNameInfo &NameInfo,
const TemplateArgumentListInfo *TemplateArgs);
ExprResult BuildDeclarationNameExpr(const CXXScopeSpec &SS,
LookupResult &R,
bool NeedsADL,
bool AcceptInvalidDecl = false);
ExprResult BuildDeclarationNameExpr(
const CXXScopeSpec &SS, const DeclarationNameInfo &NameInfo, NamedDecl *D,
NamedDecl *FoundD = nullptr,
const TemplateArgumentListInfo *TemplateArgs = nullptr,
bool AcceptInvalidDecl = false);
ExprResult BuildLiteralOperatorCall(LookupResult &R,
DeclarationNameInfo &SuffixInfo,
ArrayRef<Expr *> Args,
SourceLocation LitEndLoc,
TemplateArgumentListInfo *ExplicitTemplateArgs = nullptr);
ExprResult BuildPredefinedExpr(SourceLocation Loc,
PredefinedExpr::IdentType IT);
ExprResult ActOnPredefinedExpr(SourceLocation Loc, tok::TokenKind Kind);
ExprResult ActOnIntegerConstant(SourceLocation Loc, uint64_t Val);
bool CheckLoopHintExpr(Expr *E, SourceLocation Loc);
ExprResult ActOnNumericConstant(const Token &Tok, Scope *UDLScope = nullptr);
ExprResult ActOnCharacterConstant(const Token &Tok,
Scope *UDLScope = nullptr);
ExprResult ActOnParenExpr(SourceLocation L, SourceLocation R, Expr *E);
ExprResult ActOnParenListExpr(SourceLocation L,
SourceLocation R,
MultiExprArg Val);
/// ActOnStringLiteral - The specified tokens were lexed as pasted string
/// fragments (e.g. "foo" "bar" L"baz").
ExprResult ActOnStringLiteral(ArrayRef<Token> StringToks,
Scope *UDLScope = nullptr);
ExprResult ActOnGenericSelectionExpr(SourceLocation KeyLoc,
SourceLocation DefaultLoc,
SourceLocation RParenLoc,
Expr *ControllingExpr,
ArrayRef<ParsedType> ArgTypes,
ArrayRef<Expr *> ArgExprs);
ExprResult CreateGenericSelectionExpr(SourceLocation KeyLoc,
SourceLocation DefaultLoc,
SourceLocation RParenLoc,
Expr *ControllingExpr,
ArrayRef<TypeSourceInfo *> Types,
ArrayRef<Expr *> Exprs);
// Binary/Unary Operators. 'Tok' is the token for the operator.
ExprResult CreateBuiltinUnaryOp(SourceLocation OpLoc, UnaryOperatorKind Opc,
Expr *InputExpr);
ExprResult BuildUnaryOp(Scope *S, SourceLocation OpLoc,
UnaryOperatorKind Opc, Expr *Input);
ExprResult ActOnUnaryOp(Scope *S, SourceLocation OpLoc,
tok::TokenKind Op, Expr *Input);
QualType CheckAddressOfOperand(ExprResult &Operand, SourceLocation OpLoc);
ExprResult CreateUnaryExprOrTypeTraitExpr(TypeSourceInfo *TInfo,
SourceLocation OpLoc,
UnaryExprOrTypeTrait ExprKind,
SourceRange R);
ExprResult CreateUnaryExprOrTypeTraitExpr(Expr *E, SourceLocation OpLoc,
UnaryExprOrTypeTrait ExprKind);
ExprResult
ActOnUnaryExprOrTypeTraitExpr(SourceLocation OpLoc,
UnaryExprOrTypeTrait ExprKind,
bool IsType, void *TyOrEx,
SourceRange ArgRange);
ExprResult CheckPlaceholderExpr(Expr *E);
bool CheckVecStepExpr(Expr *E);
bool CheckUnaryExprOrTypeTraitOperand(Expr *E, UnaryExprOrTypeTrait ExprKind);
bool CheckUnaryExprOrTypeTraitOperand(QualType ExprType, SourceLocation OpLoc,
SourceRange ExprRange,
UnaryExprOrTypeTrait ExprKind);
ExprResult ActOnSizeofParameterPackExpr(Scope *S,
SourceLocation OpLoc,
IdentifierInfo &Name,
SourceLocation NameLoc,
SourceLocation RParenLoc);
ExprResult ActOnPostfixUnaryOp(Scope *S, SourceLocation OpLoc,
tok::TokenKind Kind, Expr *Input);
ExprResult ActOnArraySubscriptExpr(Scope *S, Expr *Base, SourceLocation LLoc,
Expr *Idx, SourceLocation RLoc);
ExprResult CreateBuiltinArraySubscriptExpr(Expr *Base, SourceLocation LLoc,
Expr *Idx, SourceLocation RLoc);
ExprResult ActOnOMPArraySectionExpr(Expr *Base, SourceLocation LBLoc,
Expr *LowerBound, SourceLocation ColonLoc,
Expr *Length, SourceLocation RBLoc);
// This struct is for use by ActOnMemberAccess to allow
// BuildMemberReferenceExpr to be able to reinvoke ActOnMemberAccess after
// changing the access operator from a '.' to a '->' (to see if that is the
// change needed to fix an error about an unknown member, e.g. when the class
// defines a custom operator->).
struct ActOnMemberAccessExtraArgs {
Scope *S;
UnqualifiedId &Id;
Decl *ObjCImpDecl;
};
ExprResult BuildMemberReferenceExpr(
Expr *Base, QualType BaseType, SourceLocation OpLoc, bool IsArrow,
CXXScopeSpec &SS, SourceLocation TemplateKWLoc,
NamedDecl *FirstQualifierInScope, const DeclarationNameInfo &NameInfo,
const TemplateArgumentListInfo *TemplateArgs,
const Scope *S,
ActOnMemberAccessExtraArgs *ExtraArgs = nullptr);
ExprResult
BuildMemberReferenceExpr(Expr *Base, QualType BaseType, SourceLocation OpLoc,
bool IsArrow, const CXXScopeSpec &SS,
SourceLocation TemplateKWLoc,
NamedDecl *FirstQualifierInScope, LookupResult &R,
const TemplateArgumentListInfo *TemplateArgs,
const Scope *S,
bool SuppressQualifierCheck = false,
ActOnMemberAccessExtraArgs *ExtraArgs = nullptr);
ExprResult BuildFieldReferenceExpr(Expr *BaseExpr, bool IsArrow,
SourceLocation OpLoc,
const CXXScopeSpec &SS, FieldDecl *Field,
DeclAccessPair FoundDecl,
const DeclarationNameInfo &MemberNameInfo);
ExprResult PerformMemberExprBaseConversion(Expr *Base, bool IsArrow);
bool CheckQualifiedMemberReference(Expr *BaseExpr, QualType BaseType,
const CXXScopeSpec &SS,
const LookupResult &R);
ExprResult ActOnDependentMemberExpr(Expr *Base, QualType BaseType,
bool IsArrow, SourceLocation OpLoc,
const CXXScopeSpec &SS,
SourceLocation TemplateKWLoc,
NamedDecl *FirstQualifierInScope,
const DeclarationNameInfo &NameInfo,
const TemplateArgumentListInfo *TemplateArgs);
ExprResult ActOnMemberAccessExpr(Scope *S, Expr *Base,
SourceLocation OpLoc,
tok::TokenKind OpKind,
CXXScopeSpec &SS,
SourceLocation TemplateKWLoc,
UnqualifiedId &Member,
Decl *ObjCImpDecl);
void ActOnDefaultCtorInitializers(Decl *CDtorDecl);
bool ConvertArgumentsForCall(CallExpr *Call, Expr *Fn,
FunctionDecl *FDecl,
const FunctionProtoType *Proto,
ArrayRef<Expr *> Args,
SourceLocation RParenLoc,
bool ExecConfig = false);
void CheckStaticArrayArgument(SourceLocation CallLoc,
ParmVarDecl *Param,
const Expr *ArgExpr);
/// ActOnCallExpr - Handle a call to Fn with the specified array of arguments.
/// This provides the location of the left/right parens and a list of comma
/// locations.
ExprResult ActOnCallExpr(Scope *S, Expr *Fn, SourceLocation LParenLoc,
MultiExprArg ArgExprs, SourceLocation RParenLoc,
Expr *ExecConfig = nullptr,
bool IsExecConfig = false);
ExprResult BuildResolvedCallExpr(Expr *Fn, NamedDecl *NDecl,
SourceLocation LParenLoc,
ArrayRef<Expr *> Arg,
SourceLocation RParenLoc,
Expr *Config = nullptr,
bool IsExecConfig = false);
ExprResult ActOnCUDAExecConfigExpr(Scope *S, SourceLocation LLLLoc,
MultiExprArg ExecConfig,
SourceLocation GGGLoc);
ExprResult ActOnCastExpr(Scope *S, SourceLocation LParenLoc,
Declarator &D, ParsedType &Ty,
SourceLocation RParenLoc, Expr *CastExpr);
ExprResult BuildCStyleCastExpr(SourceLocation LParenLoc,
TypeSourceInfo *Ty,
SourceLocation RParenLoc,
Expr *Op);
CastKind PrepareScalarCast(ExprResult &src, QualType destType);
/// \brief Build an altivec or OpenCL literal.
ExprResult BuildVectorLiteral(SourceLocation LParenLoc,
SourceLocation RParenLoc, Expr *E,
TypeSourceInfo *TInfo);
ExprResult MaybeConvertParenListExprToParenExpr(Scope *S, Expr *ME);
ExprResult ActOnCompoundLiteral(SourceLocation LParenLoc,
ParsedType Ty,
SourceLocation RParenLoc,
Expr *InitExpr);
ExprResult BuildCompoundLiteralExpr(SourceLocation LParenLoc,
TypeSourceInfo *TInfo,
SourceLocation RParenLoc,
Expr *LiteralExpr);
ExprResult ActOnInitList(SourceLocation LBraceLoc,
MultiExprArg InitArgList,
SourceLocation RBraceLoc);
ExprResult ActOnDesignatedInitializer(Designation &Desig,
SourceLocation Loc,
bool GNUSyntax,
ExprResult Init);
private:
static BinaryOperatorKind ConvertTokenKindToBinaryOpcode(tok::TokenKind Kind);
public:
ExprResult ActOnBinOp(Scope *S, SourceLocation TokLoc,
tok::TokenKind Kind, Expr *LHSExpr, Expr *RHSExpr);
ExprResult BuildBinOp(Scope *S, SourceLocation OpLoc,
BinaryOperatorKind Opc, Expr *LHSExpr, Expr *RHSExpr);
ExprResult CreateBuiltinBinOp(SourceLocation OpLoc, BinaryOperatorKind Opc,
Expr *LHSExpr, Expr *RHSExpr);
void DiagnoseCommaOperator(const Expr *LHS, SourceLocation Loc);
/// ActOnConditionalOp - Parse a ?: operation. Note that 'LHS' may be null
/// in the case of a the GNU conditional expr extension.
ExprResult ActOnConditionalOp(SourceLocation QuestionLoc,
SourceLocation ColonLoc,
Expr *CondExpr, Expr *LHSExpr, Expr *RHSExpr);
/// ActOnAddrLabel - Parse the GNU address of label extension: "&&foo".
ExprResult ActOnAddrLabel(SourceLocation OpLoc, SourceLocation LabLoc,
LabelDecl *TheDecl);
void ActOnStartStmtExpr();
ExprResult ActOnStmtExpr(SourceLocation LPLoc, Stmt *SubStmt,
SourceLocation RPLoc); // "({..})"
void ActOnStmtExprError();
// __builtin_offsetof(type, identifier(.identifier|[expr])*)
struct OffsetOfComponent {
SourceLocation LocStart, LocEnd;
bool isBrackets; // true if [expr], false if .ident
union {
IdentifierInfo *IdentInfo;
Expr *E;
} U;
};
/// __builtin_offsetof(type, a.b[123][456].c)
ExprResult BuildBuiltinOffsetOf(SourceLocation BuiltinLoc,
TypeSourceInfo *TInfo,
ArrayRef<OffsetOfComponent> Components,
SourceLocation RParenLoc);
ExprResult ActOnBuiltinOffsetOf(Scope *S,
SourceLocation BuiltinLoc,
SourceLocation TypeLoc,
ParsedType ParsedArgTy,
ArrayRef<OffsetOfComponent> Components,
SourceLocation RParenLoc);
// __builtin_choose_expr(constExpr, expr1, expr2)
ExprResult ActOnChooseExpr(SourceLocation BuiltinLoc,
Expr *CondExpr, Expr *LHSExpr,
Expr *RHSExpr, SourceLocation RPLoc);
// __builtin_va_arg(expr, type)
ExprResult ActOnVAArg(SourceLocation BuiltinLoc, Expr *E, ParsedType Ty,
SourceLocation RPLoc);
ExprResult BuildVAArgExpr(SourceLocation BuiltinLoc, Expr *E,
TypeSourceInfo *TInfo, SourceLocation RPLoc);
// __null
ExprResult ActOnGNUNullExpr(SourceLocation TokenLoc);
bool CheckCaseExpression(Expr *E);
/// \brief Describes the result of an "if-exists" condition check.
enum IfExistsResult {
/// \brief The symbol exists.
IER_Exists,
/// \brief The symbol does not exist.
IER_DoesNotExist,
/// \brief The name is a dependent name, so the results will differ
/// from one instantiation to the next.
IER_Dependent,
/// \brief An error occurred.
IER_Error
};
IfExistsResult
CheckMicrosoftIfExistsSymbol(Scope *S, CXXScopeSpec &SS,
const DeclarationNameInfo &TargetNameInfo);
IfExistsResult
CheckMicrosoftIfExistsSymbol(Scope *S, SourceLocation KeywordLoc,
bool IsIfExists, CXXScopeSpec &SS,
UnqualifiedId &Name);
StmtResult BuildMSDependentExistsStmt(SourceLocation KeywordLoc,
bool IsIfExists,
NestedNameSpecifierLoc QualifierLoc,
DeclarationNameInfo NameInfo,
Stmt *Nested);
StmtResult ActOnMSDependentExistsStmt(SourceLocation KeywordLoc,
bool IsIfExists,
CXXScopeSpec &SS, UnqualifiedId &Name,
Stmt *Nested);
//===------------------------- "Block" Extension ------------------------===//
/// ActOnBlockStart - This callback is invoked when a block literal is
/// started.
void ActOnBlockStart(SourceLocation CaretLoc, Scope *CurScope);
/// ActOnBlockArguments - This callback allows processing of block arguments.
/// If there are no arguments, this is still invoked.
void ActOnBlockArguments(SourceLocation CaretLoc, Declarator &ParamInfo,
Scope *CurScope);
/// ActOnBlockError - If there is an error parsing a block, this callback
/// is invoked to pop the information about the block from the action impl.
void ActOnBlockError(SourceLocation CaretLoc, Scope *CurScope);
/// ActOnBlockStmtExpr - This is called when the body of a block statement
/// literal was successfully completed. ^(int x){...}
ExprResult ActOnBlockStmtExpr(SourceLocation CaretLoc, Stmt *Body,
Scope *CurScope);
//===---------------------------- Clang Extensions ----------------------===//
/// __builtin_convertvector(...)
ExprResult ActOnConvertVectorExpr(Expr *E, ParsedType ParsedDestTy,
SourceLocation BuiltinLoc,
SourceLocation RParenLoc);
//===---------------------------- OpenCL Features -----------------------===//
/// __builtin_astype(...)
ExprResult ActOnAsTypeExpr(Expr *E, ParsedType ParsedDestTy,
SourceLocation BuiltinLoc,
SourceLocation RParenLoc);
//===---------------------------- C++ Features --------------------------===//
// Act on C++ namespaces
Decl *ActOnStartNamespaceDef(Scope *S, SourceLocation InlineLoc,
SourceLocation NamespaceLoc,
SourceLocation IdentLoc,
IdentifierInfo *Ident,
SourceLocation LBrace,
AttributeList *AttrList,
UsingDirectiveDecl * &UsingDecl);
void ActOnFinishNamespaceDef(Decl *Dcl, SourceLocation RBrace);
NamespaceDecl *getStdNamespace() const;
NamespaceDecl *getOrCreateStdNamespace();
NamespaceDecl *lookupStdExperimentalNamespace();
CXXRecordDecl *getStdBadAlloc() const;
EnumDecl *getStdAlignValT() const;
/// \brief Tests whether Ty is an instance of std::initializer_list and, if
/// it is and Element is not NULL, assigns the element type to Element.
bool isStdInitializerList(QualType Ty, QualType *Element);
/// \brief Looks for the std::initializer_list template and instantiates it
/// with Element, or emits an error if it's not found.
///
/// \returns The instantiated template, or null on error.
QualType BuildStdInitializerList(QualType Element, SourceLocation Loc);
/// \brief Determine whether Ctor is an initializer-list constructor, as
/// defined in [dcl.init.list]p2.
bool isInitListConstructor(const FunctionDecl *Ctor);
Decl *ActOnUsingDirective(Scope *CurScope,
SourceLocation UsingLoc,
SourceLocation NamespcLoc,
CXXScopeSpec &SS,
SourceLocation IdentLoc,
IdentifierInfo *NamespcName,
AttributeList *AttrList);
void PushUsingDirective(Scope *S, UsingDirectiveDecl *UDir);
Decl *ActOnNamespaceAliasDef(Scope *CurScope,
SourceLocation NamespaceLoc,
SourceLocation AliasLoc,
IdentifierInfo *Alias,
CXXScopeSpec &SS,
SourceLocation IdentLoc,
IdentifierInfo *Ident);
void HideUsingShadowDecl(Scope *S, UsingShadowDecl *Shadow);
bool CheckUsingShadowDecl(UsingDecl *UD, NamedDecl *Target,
const LookupResult &PreviousDecls,
UsingShadowDecl *&PrevShadow);
UsingShadowDecl *BuildUsingShadowDecl(Scope *S, UsingDecl *UD,
NamedDecl *Target,
UsingShadowDecl *PrevDecl);
bool CheckUsingDeclRedeclaration(SourceLocation UsingLoc,
bool HasTypenameKeyword,
const CXXScopeSpec &SS,
SourceLocation NameLoc,
const LookupResult &Previous);
bool CheckUsingDeclQualifier(SourceLocation UsingLoc,
bool HasTypename,
const CXXScopeSpec &SS,
const DeclarationNameInfo &NameInfo,
SourceLocation NameLoc);
NamedDecl *BuildUsingDeclaration(Scope *S, AccessSpecifier AS,
SourceLocation UsingLoc,
bool HasTypenameKeyword,
SourceLocation TypenameLoc,
CXXScopeSpec &SS,
DeclarationNameInfo NameInfo,
SourceLocation EllipsisLoc,
AttributeList *AttrList,
bool IsInstantiation);
NamedDecl *BuildUsingPackDecl(NamedDecl *InstantiatedFrom,
ArrayRef<NamedDecl *> Expansions);
bool CheckInheritingConstructorUsingDecl(UsingDecl *UD);
/// Given a derived-class using shadow declaration for a constructor and the
/// correspnding base class constructor, find or create the implicit
/// synthesized derived class constructor to use for this initialization.
CXXConstructorDecl *
findInheritingConstructor(SourceLocation Loc, CXXConstructorDecl *BaseCtor,
ConstructorUsingShadowDecl *DerivedShadow);
Decl *ActOnUsingDeclaration(Scope *CurScope,
AccessSpecifier AS,
SourceLocation UsingLoc,
SourceLocation TypenameLoc,
CXXScopeSpec &SS,
UnqualifiedId &Name,
SourceLocation EllipsisLoc,
AttributeList *AttrList);
Decl *ActOnAliasDeclaration(Scope *CurScope,
AccessSpecifier AS,
MultiTemplateParamsArg TemplateParams,
SourceLocation UsingLoc,
UnqualifiedId &Name,
AttributeList *AttrList,
TypeResult Type,
Decl *DeclFromDeclSpec);
/// BuildCXXConstructExpr - Creates a complete call to a constructor,
/// including handling of its default argument expressions.
///
/// \param ConstructKind - a CXXConstructExpr::ConstructionKind
ExprResult
BuildCXXConstructExpr(SourceLocation ConstructLoc, QualType DeclInitType,
NamedDecl *FoundDecl,
CXXConstructorDecl *Constructor, MultiExprArg Exprs,
bool HadMultipleCandidates, bool IsListInitialization,
bool IsStdInitListInitialization,
bool RequiresZeroInit, unsigned ConstructKind,
SourceRange ParenRange);
/// Build a CXXConstructExpr whose constructor has already been resolved if
/// it denotes an inherited constructor.
ExprResult
BuildCXXConstructExpr(SourceLocation ConstructLoc, QualType DeclInitType,
CXXConstructorDecl *Constructor, bool Elidable,
MultiExprArg Exprs,
bool HadMultipleCandidates, bool IsListInitialization,
bool IsStdInitListInitialization,
bool RequiresZeroInit, unsigned ConstructKind,
SourceRange ParenRange);
// FIXME: Can we remove this and have the above BuildCXXConstructExpr check if
// the constructor can be elidable?
ExprResult
BuildCXXConstructExpr(SourceLocation ConstructLoc, QualType DeclInitType,
NamedDecl *FoundDecl,
CXXConstructorDecl *Constructor, bool Elidable,
MultiExprArg Exprs, bool HadMultipleCandidates,
bool IsListInitialization,
bool IsStdInitListInitialization, bool RequiresZeroInit,
unsigned ConstructKind, SourceRange ParenRange);
ExprResult BuildCXXDefaultInitExpr(SourceLocation Loc, FieldDecl *Field);
/// Instantiate or parse a C++ default argument expression as necessary.
/// Return true on error.
bool CheckCXXDefaultArgExpr(SourceLocation CallLoc, FunctionDecl *FD,
ParmVarDecl *Param);
/// BuildCXXDefaultArgExpr - Creates a CXXDefaultArgExpr, instantiating
/// the default expr if needed.
ExprResult BuildCXXDefaultArgExpr(SourceLocation CallLoc,
FunctionDecl *FD,
ParmVarDecl *Param);
/// FinalizeVarWithDestructor - Prepare for calling destructor on the
/// constructed variable.
void FinalizeVarWithDestructor(VarDecl *VD, const RecordType *DeclInitType);
/// \brief Helper class that collects exception specifications for
/// implicitly-declared special member functions.
class ImplicitExceptionSpecification {
// Pointer to allow copying
Sema *Self;
// We order exception specifications thus:
// noexcept is the most restrictive, but is only used in C++11.
// throw() comes next.
// Then a throw(collected exceptions)
// Finally no specification, which is expressed as noexcept(false).
// throw(...) is used instead if any called function uses it.
ExceptionSpecificationType ComputedEST;
llvm::SmallPtrSet<CanQualType, 4> ExceptionsSeen;
SmallVector<QualType, 4> Exceptions;
void ClearExceptions() {
ExceptionsSeen.clear();
Exceptions.clear();
}
public:
explicit ImplicitExceptionSpecification(Sema &Self)
: Self(&Self), ComputedEST(EST_BasicNoexcept) {
if (!Self.getLangOpts().CPlusPlus11)
ComputedEST = EST_DynamicNone;
}
/// \brief Get the computed exception specification type.
ExceptionSpecificationType getExceptionSpecType() const {
assert(ComputedEST != EST_ComputedNoexcept &&
"noexcept(expr) should not be a possible result");
return ComputedEST;
}
/// \brief The number of exceptions in the exception specification.
unsigned size() const { return Exceptions.size(); }
/// \brief The set of exceptions in the exception specification.
const QualType *data() const { return Exceptions.data(); }
/// \brief Integrate another called method into the collected data.
void CalledDecl(SourceLocation CallLoc, const CXXMethodDecl *Method);
/// \brief Integrate an invoked expression into the collected data.
void CalledExpr(Expr *E);
/// \brief Overwrite an EPI's exception specification with this
/// computed exception specification.
FunctionProtoType::ExceptionSpecInfo getExceptionSpec() const {
FunctionProtoType::ExceptionSpecInfo ESI;
ESI.Type = getExceptionSpecType();
if (ESI.Type == EST_Dynamic) {
ESI.Exceptions = Exceptions;
} else if (ESI.Type == EST_None) {
/// C++11 [except.spec]p14:
/// The exception-specification is noexcept(false) if the set of
/// potential exceptions of the special member function contains "any"
ESI.Type = EST_ComputedNoexcept;
ESI.NoexceptExpr = Self->ActOnCXXBoolLiteral(SourceLocation(),
tok::kw_false).get();
}
return ESI;
}
};
/// \brief Determine what sort of exception specification a defaulted
/// copy constructor of a class will have.
ImplicitExceptionSpecification
ComputeDefaultedDefaultCtorExceptionSpec(SourceLocation Loc,
CXXMethodDecl *MD);
/// \brief Determine what sort of exception specification a defaulted
/// default constructor of a class will have, and whether the parameter
/// will be const.
ImplicitExceptionSpecification
ComputeDefaultedCopyCtorExceptionSpec(CXXMethodDecl *MD);
/// \brief Determine what sort of exception specification a defautled
/// copy assignment operator of a class will have, and whether the
/// parameter will be const.
ImplicitExceptionSpecification
ComputeDefaultedCopyAssignmentExceptionSpec(CXXMethodDecl *MD);
/// \brief Determine what sort of exception specification a defaulted move
/// constructor of a class will have.
ImplicitExceptionSpecification
ComputeDefaultedMoveCtorExceptionSpec(CXXMethodDecl *MD);
/// \brief Determine what sort of exception specification a defaulted move
/// assignment operator of a class will have.
ImplicitExceptionSpecification
ComputeDefaultedMoveAssignmentExceptionSpec(CXXMethodDecl *MD);
/// \brief Determine what sort of exception specification a defaulted
/// destructor of a class will have.
ImplicitExceptionSpecification
ComputeDefaultedDtorExceptionSpec(CXXMethodDecl *MD);
/// \brief Determine what sort of exception specification an inheriting
/// constructor of a class will have.
ImplicitExceptionSpecification
ComputeInheritingCtorExceptionSpec(SourceLocation Loc,
CXXConstructorDecl *CD);
/// \brief Evaluate the implicit exception specification for a defaulted
/// special member function.
void EvaluateImplicitExceptionSpec(SourceLocation Loc, CXXMethodDecl *MD);
/// \brief Check the given exception-specification and update the
/// exception specification information with the results.
void checkExceptionSpecification(bool IsTopLevel,
ExceptionSpecificationType EST,
ArrayRef<ParsedType> DynamicExceptions,
ArrayRef<SourceRange> DynamicExceptionRanges,
Expr *NoexceptExpr,
SmallVectorImpl<QualType> &Exceptions,
FunctionProtoType::ExceptionSpecInfo &ESI);
/// \brief Determine if we're in a case where we need to (incorrectly) eagerly
/// parse an exception specification to work around a libstdc++ bug.
bool isLibstdcxxEagerExceptionSpecHack(const Declarator &D);
/// \brief Add an exception-specification to the given member function
/// (or member function template). The exception-specification was parsed
/// after the method itself was declared.
void actOnDelayedExceptionSpecification(Decl *Method,
ExceptionSpecificationType EST,
SourceRange SpecificationRange,
ArrayRef<ParsedType> DynamicExceptions,
ArrayRef<SourceRange> DynamicExceptionRanges,
Expr *NoexceptExpr);
class InheritedConstructorInfo;
/// \brief Determine if a special member function should have a deleted
/// definition when it is defaulted.
bool ShouldDeleteSpecialMember(CXXMethodDecl *MD, CXXSpecialMember CSM,
InheritedConstructorInfo *ICI = nullptr,
bool Diagnose = false);
/// \brief Declare the implicit default constructor for the given class.
///
/// \param ClassDecl The class declaration into which the implicit
/// default constructor will be added.
///
/// \returns The implicitly-declared default constructor.
CXXConstructorDecl *DeclareImplicitDefaultConstructor(
CXXRecordDecl *ClassDecl);
/// DefineImplicitDefaultConstructor - Checks for feasibility of
/// defining this constructor as the default constructor.
void DefineImplicitDefaultConstructor(SourceLocation CurrentLocation,
CXXConstructorDecl *Constructor);
/// \brief Declare the implicit destructor for the given class.
///
/// \param ClassDecl The class declaration into which the implicit
/// destructor will be added.
///
/// \returns The implicitly-declared destructor.
CXXDestructorDecl *DeclareImplicitDestructor(CXXRecordDecl *ClassDecl);
/// DefineImplicitDestructor - Checks for feasibility of
/// defining this destructor as the default destructor.
void DefineImplicitDestructor(SourceLocation CurrentLocation,
CXXDestructorDecl *Destructor);
/// \brief Build an exception spec for destructors that don't have one.
///
/// C++11 says that user-defined destructors with no exception spec get one
/// that looks as if the destructor was implicitly declared.
void AdjustDestructorExceptionSpec(CXXRecordDecl *ClassDecl,
CXXDestructorDecl *Destructor);
/// \brief Define the specified inheriting constructor.
void DefineInheritingConstructor(SourceLocation UseLoc,
CXXConstructorDecl *Constructor);
/// \brief Declare the implicit copy constructor for the given class.
///
/// \param ClassDecl The class declaration into which the implicit
/// copy constructor will be added.
///
/// \returns The implicitly-declared copy constructor.
CXXConstructorDecl *DeclareImplicitCopyConstructor(CXXRecordDecl *ClassDecl);
/// DefineImplicitCopyConstructor - Checks for feasibility of
/// defining this constructor as the copy constructor.
void DefineImplicitCopyConstructor(SourceLocation CurrentLocation,
CXXConstructorDecl *Constructor);
/// \brief Declare the implicit move constructor for the given class.
///
/// \param ClassDecl The Class declaration into which the implicit
/// move constructor will be added.
///
/// \returns The implicitly-declared move constructor, or NULL if it wasn't
/// declared.
CXXConstructorDecl *DeclareImplicitMoveConstructor(CXXRecordDecl *ClassDecl);
/// DefineImplicitMoveConstructor - Checks for feasibility of
/// defining this constructor as the move constructor.
void DefineImplicitMoveConstructor(SourceLocation CurrentLocation,
CXXConstructorDecl *Constructor);
/// \brief Declare the implicit copy assignment operator for the given class.
///
/// \param ClassDecl The class declaration into which the implicit
/// copy assignment operator will be added.
///
/// \returns The implicitly-declared copy assignment operator.
CXXMethodDecl *DeclareImplicitCopyAssignment(CXXRecordDecl *ClassDecl);
/// \brief Defines an implicitly-declared copy assignment operator.
void DefineImplicitCopyAssignment(SourceLocation CurrentLocation,
CXXMethodDecl *MethodDecl);
/// \brief Declare the implicit move assignment operator for the given class.
///
/// \param ClassDecl The Class declaration into which the implicit
/// move assignment operator will be added.
///
/// \returns The implicitly-declared move assignment operator, or NULL if it
/// wasn't declared.
CXXMethodDecl *DeclareImplicitMoveAssignment(CXXRecordDecl *ClassDecl);
/// \brief Defines an implicitly-declared move assignment operator.
void DefineImplicitMoveAssignment(SourceLocation CurrentLocation,
CXXMethodDecl *MethodDecl);
/// \brief Force the declaration of any implicitly-declared members of this
/// class.
void ForceDeclarationOfImplicitMembers(CXXRecordDecl *Class);
/// \brief Check a completed declaration of an implicit special member.
void CheckImplicitSpecialMemberDeclaration(Scope *S, FunctionDecl *FD);
/// \brief Determine whether the given function is an implicitly-deleted
/// special member function.
bool isImplicitlyDeleted(FunctionDecl *FD);
/// \brief Check whether 'this' shows up in the type of a static member
/// function after the (naturally empty) cv-qualifier-seq would be.
///
/// \returns true if an error occurred.
bool checkThisInStaticMemberFunctionType(CXXMethodDecl *Method);
/// \brief Whether this' shows up in the exception specification of a static
/// member function.
bool checkThisInStaticMemberFunctionExceptionSpec(CXXMethodDecl *Method);
/// \brief Check whether 'this' shows up in the attributes of the given
/// static member function.
///
/// \returns true if an error occurred.
bool checkThisInStaticMemberFunctionAttributes(CXXMethodDecl *Method);
/// MaybeBindToTemporary - If the passed in expression has a record type with
/// a non-trivial destructor, this will return CXXBindTemporaryExpr. Otherwise
/// it simply returns the passed in expression.
ExprResult MaybeBindToTemporary(Expr *E);
bool CompleteConstructorCall(CXXConstructorDecl *Constructor,
MultiExprArg ArgsPtr,
SourceLocation Loc,
SmallVectorImpl<Expr*> &ConvertedArgs,
bool AllowExplicit = false,
bool IsListInitialization = false);
ParsedType getInheritingConstructorName(CXXScopeSpec &SS,
SourceLocation NameLoc,
IdentifierInfo &Name);
ParsedType getDestructorName(SourceLocation TildeLoc,
IdentifierInfo &II, SourceLocation NameLoc,
Scope *S, CXXScopeSpec &SS,
ParsedType ObjectType,
bool EnteringContext);
ParsedType getDestructorTypeForDecltype(const DeclSpec &DS,
ParsedType ObjectType);
// Checks that reinterpret casts don't have undefined behavior.
void CheckCompatibleReinterpretCast(QualType SrcType, QualType DestType,
bool IsDereference, SourceRange Range);
/// ActOnCXXNamedCast - Parse {dynamic,static,reinterpret,const}_cast's.
ExprResult ActOnCXXNamedCast(SourceLocation OpLoc,
tok::TokenKind Kind,
SourceLocation LAngleBracketLoc,
Declarator &D,
SourceLocation RAngleBracketLoc,
SourceLocation LParenLoc,
Expr *E,
SourceLocation RParenLoc);
ExprResult BuildCXXNamedCast(SourceLocation OpLoc,
tok::TokenKind Kind,
TypeSourceInfo *Ty,
Expr *E,
SourceRange AngleBrackets,
SourceRange Parens);
ExprResult BuildCXXTypeId(QualType TypeInfoType,
SourceLocation TypeidLoc,
TypeSourceInfo *Operand,
SourceLocation RParenLoc);
ExprResult BuildCXXTypeId(QualType TypeInfoType,
SourceLocation TypeidLoc,
Expr *Operand,
SourceLocation RParenLoc);
/// ActOnCXXTypeid - Parse typeid( something ).
ExprResult ActOnCXXTypeid(SourceLocation OpLoc,
SourceLocation LParenLoc, bool isType,
void *TyOrExpr,
SourceLocation RParenLoc);
ExprResult BuildCXXUuidof(QualType TypeInfoType,
SourceLocation TypeidLoc,
TypeSourceInfo *Operand,
SourceLocation RParenLoc);
ExprResult BuildCXXUuidof(QualType TypeInfoType,
SourceLocation TypeidLoc,
Expr *Operand,
SourceLocation RParenLoc);
/// ActOnCXXUuidof - Parse __uuidof( something ).
ExprResult ActOnCXXUuidof(SourceLocation OpLoc,
SourceLocation LParenLoc, bool isType,
void *TyOrExpr,
SourceLocation RParenLoc);
/// \brief Handle a C++1z fold-expression: ( expr op ... op expr ).
ExprResult ActOnCXXFoldExpr(SourceLocation LParenLoc, Expr *LHS,
tok::TokenKind Operator,
SourceLocation EllipsisLoc, Expr *RHS,
SourceLocation RParenLoc);
ExprResult BuildCXXFoldExpr(SourceLocation LParenLoc, Expr *LHS,
BinaryOperatorKind Operator,
SourceLocation EllipsisLoc, Expr *RHS,
SourceLocation RParenLoc);
ExprResult BuildEmptyCXXFoldExpr(SourceLocation EllipsisLoc,
BinaryOperatorKind Operator);
//// ActOnCXXThis - Parse 'this' pointer.
ExprResult ActOnCXXThis(SourceLocation loc);
/// \brief Try to retrieve the type of the 'this' pointer.
///
/// \returns The type of 'this', if possible. Otherwise, returns a NULL type.
QualType getCurrentThisType();
/// \brief When non-NULL, the C++ 'this' expression is allowed despite the
/// current context not being a non-static member function. In such cases,
/// this provides the type used for 'this'.
QualType CXXThisTypeOverride;
/// \brief RAII object used to temporarily allow the C++ 'this' expression
/// to be used, with the given qualifiers on the current class type.
class CXXThisScopeRAII {
Sema &S;
QualType OldCXXThisTypeOverride;
bool Enabled;
public:
/// \brief Introduce a new scope where 'this' may be allowed (when enabled),
/// using the given declaration (which is either a class template or a
/// class) along with the given qualifiers.
/// along with the qualifiers placed on '*this'.
CXXThisScopeRAII(Sema &S, Decl *ContextDecl, unsigned CXXThisTypeQuals,
bool Enabled = true);
~CXXThisScopeRAII();
};
/// \brief Make sure the value of 'this' is actually available in the current
/// context, if it is a potentially evaluated context.
///
/// \param Loc The location at which the capture of 'this' occurs.
///
/// \param Explicit Whether 'this' is explicitly captured in a lambda
/// capture list.
///
/// \param FunctionScopeIndexToStopAt If non-null, it points to the index
/// of the FunctionScopeInfo stack beyond which we do not attempt to capture.
/// This is useful when enclosing lambdas must speculatively capture
/// 'this' that may or may not be used in certain specializations of
/// a nested generic lambda (depending on whether the name resolves to
/// a non-static member function or a static function).
/// \return returns 'true' if failed, 'false' if success.
bool CheckCXXThisCapture(SourceLocation Loc, bool Explicit = false,
bool BuildAndDiagnose = true,
const unsigned *const FunctionScopeIndexToStopAt = nullptr,
bool ByCopy = false);
/// \brief Determine whether the given type is the type of *this that is used
/// outside of the body of a member function for a type that is currently
/// being defined.
bool isThisOutsideMemberFunctionBody(QualType BaseType);
/// ActOnCXXBoolLiteral - Parse {true,false} literals.
ExprResult ActOnCXXBoolLiteral(SourceLocation OpLoc, tok::TokenKind Kind);
/// ActOnObjCBoolLiteral - Parse {__objc_yes,__objc_no} literals.
ExprResult ActOnObjCBoolLiteral(SourceLocation OpLoc, tok::TokenKind Kind);
ExprResult
ActOnObjCAvailabilityCheckExpr(llvm::ArrayRef<AvailabilitySpec> AvailSpecs,
SourceLocation AtLoc, SourceLocation RParen);
/// ActOnCXXNullPtrLiteral - Parse 'nullptr'.
ExprResult ActOnCXXNullPtrLiteral(SourceLocation Loc);
//// ActOnCXXThrow - Parse throw expressions.
ExprResult ActOnCXXThrow(Scope *S, SourceLocation OpLoc, Expr *expr);
ExprResult BuildCXXThrow(SourceLocation OpLoc, Expr *Ex,
bool IsThrownVarInScope);
bool CheckCXXThrowOperand(SourceLocation ThrowLoc, QualType ThrowTy, Expr *E);
/// ActOnCXXTypeConstructExpr - Parse construction of a specified type.
/// Can be interpreted either as function-style casting ("int(x)")
/// or class type construction ("ClassType(x,y,z)")
/// or creation of a value-initialized type ("int()").
ExprResult ActOnCXXTypeConstructExpr(ParsedType TypeRep,
SourceLocation LParenLoc,
MultiExprArg Exprs,
SourceLocation RParenLoc);
ExprResult BuildCXXTypeConstructExpr(TypeSourceInfo *Type,
SourceLocation LParenLoc,
MultiExprArg Exprs,
SourceLocation RParenLoc);
/// ActOnCXXNew - Parsed a C++ 'new' expression.
ExprResult ActOnCXXNew(SourceLocation StartLoc, bool UseGlobal,
SourceLocation PlacementLParen,
MultiExprArg PlacementArgs,
SourceLocation PlacementRParen,
SourceRange TypeIdParens, Declarator &D,
Expr *Initializer);
ExprResult BuildCXXNew(SourceRange Range, bool UseGlobal,
SourceLocation PlacementLParen,
MultiExprArg PlacementArgs,
SourceLocation PlacementRParen,
SourceRange TypeIdParens,
QualType AllocType,
TypeSourceInfo *AllocTypeInfo,
Expr *ArraySize,
SourceRange DirectInitRange,
Expr *Initializer);
bool CheckAllocatedType(QualType AllocType, SourceLocation Loc,
SourceRange R);
bool FindAllocationFunctions(SourceLocation StartLoc, SourceRange Range,
bool UseGlobal, QualType AllocType, bool IsArray,
bool &PassAlignment, MultiExprArg PlaceArgs,
FunctionDecl *&OperatorNew,
FunctionDecl *&OperatorDelete);
void DeclareGlobalNewDelete();
void DeclareGlobalAllocationFunction(DeclarationName Name, QualType Return,
ArrayRef<QualType> Params);
bool FindDeallocationFunction(SourceLocation StartLoc, CXXRecordDecl *RD,
DeclarationName Name, FunctionDecl* &Operator,
bool Diagnose = true);
FunctionDecl *FindUsualDeallocationFunction(SourceLocation StartLoc,
bool CanProvideSize,
bool Overaligned,
DeclarationName Name);
FunctionDecl *FindDeallocationFunctionForDestructor(SourceLocation StartLoc,
CXXRecordDecl *RD);
/// ActOnCXXDelete - Parsed a C++ 'delete' expression
ExprResult ActOnCXXDelete(SourceLocation StartLoc,
bool UseGlobal, bool ArrayForm,
Expr *Operand);
void CheckVirtualDtorCall(CXXDestructorDecl *dtor, SourceLocation Loc,
bool IsDelete, bool CallCanBeVirtual,
bool WarnOnNonAbstractTypes,
SourceLocation DtorLoc);
ExprResult ActOnNoexceptExpr(SourceLocation KeyLoc, SourceLocation LParen,
Expr *Operand, SourceLocation RParen);
ExprResult BuildCXXNoexceptExpr(SourceLocation KeyLoc, Expr *Operand,
SourceLocation RParen);
/// \brief Parsed one of the type trait support pseudo-functions.
ExprResult ActOnTypeTrait(TypeTrait Kind, SourceLocation KWLoc,
ArrayRef<ParsedType> Args,
SourceLocation RParenLoc);
ExprResult BuildTypeTrait(TypeTrait Kind, SourceLocation KWLoc,
ArrayRef<TypeSourceInfo *> Args,
SourceLocation RParenLoc);
/// ActOnArrayTypeTrait - Parsed one of the binary type trait support
/// pseudo-functions.
ExprResult ActOnArrayTypeTrait(ArrayTypeTrait ATT,
SourceLocation KWLoc,
ParsedType LhsTy,
Expr *DimExpr,
SourceLocation RParen);
ExprResult BuildArrayTypeTrait(ArrayTypeTrait ATT,
SourceLocation KWLoc,
TypeSourceInfo *TSInfo,
Expr *DimExpr,
SourceLocation RParen);
/// ActOnExpressionTrait - Parsed one of the unary type trait support
/// pseudo-functions.
ExprResult ActOnExpressionTrait(ExpressionTrait OET,
SourceLocation KWLoc,
Expr *Queried,
SourceLocation RParen);
ExprResult BuildExpressionTrait(ExpressionTrait OET,
SourceLocation KWLoc,
Expr *Queried,
SourceLocation RParen);
ExprResult ActOnStartCXXMemberReference(Scope *S,
Expr *Base,
SourceLocation OpLoc,
tok::TokenKind OpKind,
ParsedType &ObjectType,
bool &MayBePseudoDestructor);
ExprResult BuildPseudoDestructorExpr(Expr *Base,
SourceLocation OpLoc,
tok::TokenKind OpKind,
const CXXScopeSpec &SS,
TypeSourceInfo *ScopeType,
SourceLocation CCLoc,
SourceLocation TildeLoc,
PseudoDestructorTypeStorage DestroyedType);
ExprResult ActOnPseudoDestructorExpr(Scope *S, Expr *Base,
SourceLocation OpLoc,
tok::TokenKind OpKind,
CXXScopeSpec &SS,
UnqualifiedId &FirstTypeName,
SourceLocation CCLoc,
SourceLocation TildeLoc,
UnqualifiedId &SecondTypeName);
ExprResult ActOnPseudoDestructorExpr(Scope *S, Expr *Base,
SourceLocation OpLoc,
tok::TokenKind OpKind,
SourceLocation TildeLoc,
const DeclSpec& DS);
/// MaybeCreateExprWithCleanups - If the current full-expression
/// requires any cleanups, surround it with a ExprWithCleanups node.
/// Otherwise, just returns the passed-in expression.
Expr *MaybeCreateExprWithCleanups(Expr *SubExpr);
Stmt *MaybeCreateStmtWithCleanups(Stmt *SubStmt);
ExprResult MaybeCreateExprWithCleanups(ExprResult SubExpr);
MaterializeTemporaryExpr *
CreateMaterializeTemporaryExpr(QualType T, Expr *Temporary,
bool BoundToLvalueReference);
ExprResult ActOnFinishFullExpr(Expr *Expr) {
return ActOnFinishFullExpr(Expr, Expr ? Expr->getExprLoc()
: SourceLocation());
}
ExprResult ActOnFinishFullExpr(Expr *Expr, SourceLocation CC,
bool DiscardedValue = false,
bool IsConstexpr = false,
bool IsLambdaInitCaptureInitializer = false);
StmtResult ActOnFinishFullStmt(Stmt *Stmt);
// Marks SS invalid if it represents an incomplete type.
bool RequireCompleteDeclContext(CXXScopeSpec &SS, DeclContext *DC);
DeclContext *computeDeclContext(QualType T);
DeclContext *computeDeclContext(const CXXScopeSpec &SS,
bool EnteringContext = false);
bool isDependentScopeSpecifier(const CXXScopeSpec &SS);
CXXRecordDecl *getCurrentInstantiationOf(NestedNameSpecifier *NNS);
/// \brief The parser has parsed a global nested-name-specifier '::'.
///
/// \param CCLoc The location of the '::'.
///
/// \param SS The nested-name-specifier, which will be updated in-place
/// to reflect the parsed nested-name-specifier.
///
/// \returns true if an error occurred, false otherwise.
bool ActOnCXXGlobalScopeSpecifier(SourceLocation CCLoc, CXXScopeSpec &SS);
/// \brief The parser has parsed a '__super' nested-name-specifier.
///
/// \param SuperLoc The location of the '__super' keyword.
///
/// \param ColonColonLoc The location of the '::'.
///
/// \param SS The nested-name-specifier, which will be updated in-place
/// to reflect the parsed nested-name-specifier.
///
/// \returns true if an error occurred, false otherwise.
bool ActOnSuperScopeSpecifier(SourceLocation SuperLoc,
SourceLocation ColonColonLoc, CXXScopeSpec &SS);
bool isAcceptableNestedNameSpecifier(const NamedDecl *SD,
bool *CanCorrect = nullptr);
NamedDecl *FindFirstQualifierInScope(Scope *S, NestedNameSpecifier *NNS);
/// \brief Keeps information about an identifier in a nested-name-spec.
///
struct NestedNameSpecInfo {
/// \brief The type of the object, if we're parsing nested-name-specifier in
/// a member access expression.
ParsedType ObjectType;
/// \brief The identifier preceding the '::'.
IdentifierInfo *Identifier;
/// \brief The location of the identifier.
SourceLocation IdentifierLoc;
/// \brief The location of the '::'.
SourceLocation CCLoc;
/// \brief Creates info object for the most typical case.
NestedNameSpecInfo(IdentifierInfo *II, SourceLocation IdLoc,
SourceLocation ColonColonLoc, ParsedType ObjectType = ParsedType())
: ObjectType(ObjectType), Identifier(II), IdentifierLoc(IdLoc),
CCLoc(ColonColonLoc) {
}
NestedNameSpecInfo(IdentifierInfo *II, SourceLocation IdLoc,
SourceLocation ColonColonLoc, QualType ObjectType)
: ObjectType(ParsedType::make(ObjectType)), Identifier(II),
IdentifierLoc(IdLoc), CCLoc(ColonColonLoc) {
}
};
bool isNonTypeNestedNameSpecifier(Scope *S, CXXScopeSpec &SS,
NestedNameSpecInfo &IdInfo);
bool BuildCXXNestedNameSpecifier(Scope *S,
NestedNameSpecInfo &IdInfo,
bool EnteringContext,
CXXScopeSpec &SS,
NamedDecl *ScopeLookupResult,
bool ErrorRecoveryLookup,
bool *IsCorrectedToColon = nullptr,
bool OnlyNamespace = false);
/// \brief The parser has parsed a nested-name-specifier 'identifier::'.
///
/// \param S The scope in which this nested-name-specifier occurs.
///
/// \param IdInfo Parser information about an identifier in the
/// nested-name-spec.
///
/// \param EnteringContext Whether we're entering the context nominated by
/// this nested-name-specifier.
///
/// \param SS The nested-name-specifier, which is both an input
/// parameter (the nested-name-specifier before this type) and an
/// output parameter (containing the full nested-name-specifier,
/// including this new type).
///
/// \param ErrorRecoveryLookup If true, then this method is called to improve
/// error recovery. In this case do not emit error message.
///
/// \param IsCorrectedToColon If not null, suggestions to replace '::' -> ':'
/// are allowed. The bool value pointed by this parameter is set to 'true'
/// if the identifier is treated as if it was followed by ':', not '::'.
///
/// \param OnlyNamespace If true, only considers namespaces in lookup.
///
/// \returns true if an error occurred, false otherwise.
bool ActOnCXXNestedNameSpecifier(Scope *S,
NestedNameSpecInfo &IdInfo,
bool EnteringContext,
CXXScopeSpec &SS,
bool ErrorRecoveryLookup = false,
bool *IsCorrectedToColon = nullptr,
bool OnlyNamespace = false);
ExprResult ActOnDecltypeExpression(Expr *E);
bool ActOnCXXNestedNameSpecifierDecltype(CXXScopeSpec &SS,
const DeclSpec &DS,
SourceLocation ColonColonLoc);
bool IsInvalidUnlessNestedName(Scope *S, CXXScopeSpec &SS,
NestedNameSpecInfo &IdInfo,
bool EnteringContext);
/// \brief The parser has parsed a nested-name-specifier
/// 'template[opt] template-name < template-args >::'.
///
/// \param S The scope in which this nested-name-specifier occurs.
///
/// \param SS The nested-name-specifier, which is both an input
/// parameter (the nested-name-specifier before this type) and an
/// output parameter (containing the full nested-name-specifier,
/// including this new type).
///
/// \param TemplateKWLoc the location of the 'template' keyword, if any.
/// \param TemplateName the template name.
/// \param TemplateNameLoc The location of the template name.
/// \param LAngleLoc The location of the opening angle bracket ('<').
/// \param TemplateArgs The template arguments.
/// \param RAngleLoc The location of the closing angle bracket ('>').
/// \param CCLoc The location of the '::'.
///
/// \param EnteringContext Whether we're entering the context of the
/// nested-name-specifier.
///
///
/// \returns true if an error occurred, false otherwise.
bool ActOnCXXNestedNameSpecifier(Scope *S,
CXXScopeSpec &SS,
SourceLocation TemplateKWLoc,
TemplateTy TemplateName,
SourceLocation TemplateNameLoc,
SourceLocation LAngleLoc,
ASTTemplateArgsPtr TemplateArgs,
SourceLocation RAngleLoc,
SourceLocation CCLoc,
bool EnteringContext);
/// \brief Given a C++ nested-name-specifier, produce an annotation value
/// that the parser can use later to reconstruct the given
/// nested-name-specifier.
///
/// \param SS A nested-name-specifier.
///
/// \returns A pointer containing all of the information in the
/// nested-name-specifier \p SS.
void *SaveNestedNameSpecifierAnnotation(CXXScopeSpec &SS);
/// \brief Given an annotation pointer for a nested-name-specifier, restore
/// the nested-name-specifier structure.
///
/// \param Annotation The annotation pointer, produced by
/// \c SaveNestedNameSpecifierAnnotation().
///
/// \param AnnotationRange The source range corresponding to the annotation.
///
/// \param SS The nested-name-specifier that will be updated with the contents
/// of the annotation pointer.
void RestoreNestedNameSpecifierAnnotation(void *Annotation,
SourceRange AnnotationRange,
CXXScopeSpec &SS);
bool ShouldEnterDeclaratorScope(Scope *S, const CXXScopeSpec &SS);
/// ActOnCXXEnterDeclaratorScope - Called when a C++ scope specifier (global
/// scope or nested-name-specifier) is parsed, part of a declarator-id.
/// After this method is called, according to [C++ 3.4.3p3], names should be
/// looked up in the declarator-id's scope, until the declarator is parsed and
/// ActOnCXXExitDeclaratorScope is called.
/// The 'SS' should be a non-empty valid CXXScopeSpec.
bool ActOnCXXEnterDeclaratorScope(Scope *S, CXXScopeSpec &SS);
/// ActOnCXXExitDeclaratorScope - Called when a declarator that previously
/// invoked ActOnCXXEnterDeclaratorScope(), is finished. 'SS' is the same
/// CXXScopeSpec that was passed to ActOnCXXEnterDeclaratorScope as well.
/// Used to indicate that names should revert to being looked up in the
/// defining scope.
void ActOnCXXExitDeclaratorScope(Scope *S, const CXXScopeSpec &SS);
/// ActOnCXXEnterDeclInitializer - Invoked when we are about to parse an
/// initializer for the declaration 'Dcl'.
/// After this method is called, according to [C++ 3.4.1p13], if 'Dcl' is a
/// static data member of class X, names should be looked up in the scope of
/// class X.
void ActOnCXXEnterDeclInitializer(Scope *S, Decl *Dcl);
/// ActOnCXXExitDeclInitializer - Invoked after we are finished parsing an
/// initializer for the declaration 'Dcl'.
void ActOnCXXExitDeclInitializer(Scope *S, Decl *Dcl);
/// \brief Create a new lambda closure type.
CXXRecordDecl *createLambdaClosureType(SourceRange IntroducerRange,
TypeSourceInfo *Info,
bool KnownDependent,
LambdaCaptureDefault CaptureDefault);
/// \brief Start the definition of a lambda expression.
CXXMethodDecl *startLambdaDefinition(CXXRecordDecl *Class,
SourceRange IntroducerRange,
TypeSourceInfo *MethodType,
SourceLocation EndLoc,
ArrayRef<ParmVarDecl *> Params,
bool IsConstexprSpecified);
/// \brief Endow the lambda scope info with the relevant properties.
void buildLambdaScope(sema::LambdaScopeInfo *LSI,
CXXMethodDecl *CallOperator,
SourceRange IntroducerRange,
LambdaCaptureDefault CaptureDefault,
SourceLocation CaptureDefaultLoc,
bool ExplicitParams,
bool ExplicitResultType,
bool Mutable);
/// \brief Perform initialization analysis of the init-capture and perform
/// any implicit conversions such as an lvalue-to-rvalue conversion if
/// not being used to initialize a reference.
ParsedType actOnLambdaInitCaptureInitialization(
SourceLocation Loc, bool ByRef, IdentifierInfo *Id,
LambdaCaptureInitKind InitKind, Expr *&Init) {
return ParsedType::make(buildLambdaInitCaptureInitialization(
Loc, ByRef, Id, InitKind != LambdaCaptureInitKind::CopyInit, Init));
}
QualType buildLambdaInitCaptureInitialization(SourceLocation Loc, bool ByRef,
IdentifierInfo *Id,
bool DirectInit, Expr *&Init);
/// \brief Create a dummy variable within the declcontext of the lambda's
/// call operator, for name lookup purposes for a lambda init capture.
///
/// CodeGen handles emission of lambda captures, ignoring these dummy
/// variables appropriately.
VarDecl *createLambdaInitCaptureVarDecl(SourceLocation Loc,
QualType InitCaptureType,
IdentifierInfo *Id,
unsigned InitStyle, Expr *Init);
/// \brief Build the implicit field for an init-capture.
FieldDecl *buildInitCaptureField(sema::LambdaScopeInfo *LSI, VarDecl *Var);
/// \brief Note that we have finished the explicit captures for the
/// given lambda.
void finishLambdaExplicitCaptures(sema::LambdaScopeInfo *LSI);
/// \brief Introduce the lambda parameters into scope.
void addLambdaParameters(CXXMethodDecl *CallOperator, Scope *CurScope);
/// \brief Deduce a block or lambda's return type based on the return
/// statements present in the body.
void deduceClosureReturnType(sema::CapturingScopeInfo &CSI);
/// ActOnStartOfLambdaDefinition - This is called just before we start
/// parsing the body of a lambda; it analyzes the explicit captures and
/// arguments, and sets up various data-structures for the body of the
/// lambda.
void ActOnStartOfLambdaDefinition(LambdaIntroducer &Intro,
Declarator &ParamInfo, Scope *CurScope);
/// ActOnLambdaError - If there is an error parsing a lambda, this callback
/// is invoked to pop the information about the lambda.
void ActOnLambdaError(SourceLocation StartLoc, Scope *CurScope,
bool IsInstantiation = false);
/// ActOnLambdaExpr - This is called when the body of a lambda expression
/// was successfully completed.
ExprResult ActOnLambdaExpr(SourceLocation StartLoc, Stmt *Body,
Scope *CurScope);
/// \brief Does copying/destroying the captured variable have side effects?
bool CaptureHasSideEffects(const sema::LambdaScopeInfo::Capture &From);
/// \brief Diagnose if an explicit lambda capture is unused.
void DiagnoseUnusedLambdaCapture(const sema::LambdaScopeInfo::Capture &From);
/// \brief Complete a lambda-expression having processed and attached the
/// lambda body.
ExprResult BuildLambdaExpr(SourceLocation StartLoc, SourceLocation EndLoc,
sema::LambdaScopeInfo *LSI);
/// Get the return type to use for a lambda's conversion function(s) to
/// function pointer type, given the type of the call operator.
QualType
getLambdaConversionFunctionResultType(const FunctionProtoType *CallOpType);
/// \brief Define the "body" of the conversion from a lambda object to a
/// function pointer.
///
/// This routine doesn't actually define a sensible body; rather, it fills
/// in the initialization expression needed to copy the lambda object into
/// the block, and IR generation actually generates the real body of the
/// block pointer conversion.
void DefineImplicitLambdaToFunctionPointerConversion(
SourceLocation CurrentLoc, CXXConversionDecl *Conv);
/// \brief Define the "body" of the conversion from a lambda object to a
/// block pointer.
///
/// This routine doesn't actually define a sensible body; rather, it fills
/// in the initialization expression needed to copy the lambda object into
/// the block, and IR generation actually generates the real body of the
/// block pointer conversion.
void DefineImplicitLambdaToBlockPointerConversion(SourceLocation CurrentLoc,
CXXConversionDecl *Conv);
ExprResult BuildBlockForLambdaConversion(SourceLocation CurrentLocation,
SourceLocation ConvLocation,
CXXConversionDecl *Conv,
Expr *Src);
// ParseObjCStringLiteral - Parse Objective-C string literals.
ExprResult ParseObjCStringLiteral(SourceLocation *AtLocs,
ArrayRef<Expr *> Strings);
ExprResult BuildObjCStringLiteral(SourceLocation AtLoc, StringLiteral *S);
/// BuildObjCNumericLiteral - builds an ObjCBoxedExpr AST node for the
/// numeric literal expression. Type of the expression will be "NSNumber *"
/// or "id" if NSNumber is unavailable.
ExprResult BuildObjCNumericLiteral(SourceLocation AtLoc, Expr *Number);
ExprResult ActOnObjCBoolLiteral(SourceLocation AtLoc, SourceLocation ValueLoc,
bool Value);
ExprResult BuildObjCArrayLiteral(SourceRange SR, MultiExprArg Elements);
/// BuildObjCBoxedExpr - builds an ObjCBoxedExpr AST node for the
/// '@' prefixed parenthesized expression. The type of the expression will
/// either be "NSNumber *", "NSString *" or "NSValue *" depending on the type
/// of ValueType, which is allowed to be a built-in numeric type, "char *",
/// "const char *" or C structure with attribute 'objc_boxable'.
ExprResult BuildObjCBoxedExpr(SourceRange SR, Expr *ValueExpr);
ExprResult BuildObjCSubscriptExpression(SourceLocation RB, Expr *BaseExpr,
Expr *IndexExpr,
ObjCMethodDecl *getterMethod,
ObjCMethodDecl *setterMethod);
ExprResult BuildObjCDictionaryLiteral(SourceRange SR,
MutableArrayRef<ObjCDictionaryElement> Elements);
ExprResult BuildObjCEncodeExpression(SourceLocation AtLoc,
TypeSourceInfo *EncodedTypeInfo,
SourceLocation RParenLoc);
ExprResult BuildCXXMemberCallExpr(Expr *Exp, NamedDecl *FoundDecl,
CXXConversionDecl *Method,
bool HadMultipleCandidates);
ExprResult ParseObjCEncodeExpression(SourceLocation AtLoc,
SourceLocation EncodeLoc,
SourceLocation LParenLoc,
ParsedType Ty,
SourceLocation RParenLoc);
/// ParseObjCSelectorExpression - Build selector expression for \@selector
ExprResult ParseObjCSelectorExpression(Selector Sel,
SourceLocation AtLoc,
SourceLocation SelLoc,
SourceLocation LParenLoc,
SourceLocation RParenLoc,
bool WarnMultipleSelectors);
/// ParseObjCProtocolExpression - Build protocol expression for \@protocol
ExprResult ParseObjCProtocolExpression(IdentifierInfo * ProtocolName,
SourceLocation AtLoc,
SourceLocation ProtoLoc,
SourceLocation LParenLoc,
SourceLocation ProtoIdLoc,
SourceLocation RParenLoc);
//===--------------------------------------------------------------------===//
// C++ Declarations
//
Decl *ActOnStartLinkageSpecification(Scope *S,
SourceLocation ExternLoc,
Expr *LangStr,
SourceLocation LBraceLoc);
Decl *ActOnFinishLinkageSpecification(Scope *S,
Decl *LinkageSpec,
SourceLocation RBraceLoc);
//===--------------------------------------------------------------------===//
// C++ Classes
//
bool isCurrentClassName(const IdentifierInfo &II, Scope *S,
const CXXScopeSpec *SS = nullptr);
bool isCurrentClassNameTypo(IdentifierInfo *&II, const CXXScopeSpec *SS);
bool ActOnAccessSpecifier(AccessSpecifier Access,
SourceLocation ASLoc,
SourceLocation ColonLoc,
AttributeList *Attrs = nullptr);
NamedDecl *ActOnCXXMemberDeclarator(Scope *S, AccessSpecifier AS,
Declarator &D,
MultiTemplateParamsArg TemplateParameterLists,
Expr *BitfieldWidth, const VirtSpecifiers &VS,
InClassInitStyle InitStyle);
void ActOnStartCXXInClassMemberInitializer();
void ActOnFinishCXXInClassMemberInitializer(Decl *VarDecl,
SourceLocation EqualLoc,
Expr *Init);
MemInitResult ActOnMemInitializer(Decl *ConstructorD,
Scope *S,
CXXScopeSpec &SS,
IdentifierInfo *MemberOrBase,
ParsedType TemplateTypeTy,
const DeclSpec &DS,
SourceLocation IdLoc,
SourceLocation LParenLoc,
ArrayRef<Expr *> Args,
SourceLocation RParenLoc,
SourceLocation EllipsisLoc);
MemInitResult ActOnMemInitializer(Decl *ConstructorD,
Scope *S,
CXXScopeSpec &SS,
IdentifierInfo *MemberOrBase,
ParsedType TemplateTypeTy,
const DeclSpec &DS,
SourceLocation IdLoc,
Expr *InitList,
SourceLocation EllipsisLoc);
MemInitResult BuildMemInitializer(Decl *ConstructorD,
Scope *S,
CXXScopeSpec &SS,
IdentifierInfo *MemberOrBase,
ParsedType TemplateTypeTy,
const DeclSpec &DS,
SourceLocation IdLoc,
Expr *Init,
SourceLocation EllipsisLoc);
MemInitResult BuildMemberInitializer(ValueDecl *Member,
Expr *Init,
SourceLocation IdLoc);
MemInitResult BuildBaseInitializer(QualType BaseType,
TypeSourceInfo *BaseTInfo,
Expr *Init,
CXXRecordDecl *ClassDecl,
SourceLocation EllipsisLoc);
MemInitResult BuildDelegatingInitializer(TypeSourceInfo *TInfo,
Expr *Init,
CXXRecordDecl *ClassDecl);
bool SetDelegatingInitializer(CXXConstructorDecl *Constructor,
CXXCtorInitializer *Initializer);
bool SetCtorInitializers(CXXConstructorDecl *Constructor, bool AnyErrors,
ArrayRef<CXXCtorInitializer *> Initializers = None);
void SetIvarInitializers(ObjCImplementationDecl *ObjCImplementation);
/// MarkBaseAndMemberDestructorsReferenced - Given a record decl,
/// mark all the non-trivial destructors of its members and bases as
/// referenced.
void MarkBaseAndMemberDestructorsReferenced(SourceLocation Loc,
CXXRecordDecl *Record);
/// \brief The list of classes whose vtables have been used within
/// this translation unit, and the source locations at which the
/// first use occurred.
typedef std::pair<CXXRecordDecl*, SourceLocation> VTableUse;
/// \brief The list of vtables that are required but have not yet been
/// materialized.
SmallVector<VTableUse, 16> VTableUses;
/// \brief The set of classes whose vtables have been used within
/// this translation unit, and a bit that will be true if the vtable is
/// required to be emitted (otherwise, it should be emitted only if needed
/// by code generation).
llvm::DenseMap<CXXRecordDecl *, bool> VTablesUsed;
/// \brief Load any externally-stored vtable uses.
void LoadExternalVTableUses();
/// \brief Note that the vtable for the given class was used at the
/// given location.
void MarkVTableUsed(SourceLocation Loc, CXXRecordDecl *Class,
bool DefinitionRequired = false);
/// \brief Mark the exception specifications of all virtual member functions
/// in the given class as needed.
void MarkVirtualMemberExceptionSpecsNeeded(SourceLocation Loc,
const CXXRecordDecl *RD);
/// MarkVirtualMembersReferenced - Will mark all members of the given
/// CXXRecordDecl referenced.
void MarkVirtualMembersReferenced(SourceLocation Loc,
const CXXRecordDecl *RD);
/// \brief Define all of the vtables that have been used in this
/// translation unit and reference any virtual members used by those
/// vtables.
///
/// \returns true if any work was done, false otherwise.
bool DefineUsedVTables();
void AddImplicitlyDeclaredMembersToClass(CXXRecordDecl *ClassDecl);
void ActOnMemInitializers(Decl *ConstructorDecl,
SourceLocation ColonLoc,
ArrayRef<CXXCtorInitializer*> MemInits,
bool AnyErrors);
/// \brief Check class-level dllimport/dllexport attribute. The caller must
/// ensure that referenceDLLExportedClassMethods is called some point later
/// when all outer classes of Class are complete.
void checkClassLevelDLLAttribute(CXXRecordDecl *Class);
void referenceDLLExportedClassMethods();
void propagateDLLAttrToBaseClassTemplate(
CXXRecordDecl *Class, Attr *ClassAttr,
ClassTemplateSpecializationDecl *BaseTemplateSpec,
SourceLocation BaseLoc);
void CheckCompletedCXXClass(CXXRecordDecl *Record);
void ActOnFinishCXXMemberSpecification(Scope* S, SourceLocation RLoc,
Decl *TagDecl,
SourceLocation LBrac,
SourceLocation RBrac,
AttributeList *AttrList);
void ActOnFinishCXXMemberDecls();
void ActOnFinishCXXNonNestedClass(Decl *D);
void ActOnReenterCXXMethodParameter(Scope *S, ParmVarDecl *Param);
unsigned ActOnReenterTemplateScope(Scope *S, Decl *Template);
void ActOnStartDelayedMemberDeclarations(Scope *S, Decl *Record);
void ActOnStartDelayedCXXMethodDeclaration(Scope *S, Decl *Method);
void ActOnDelayedCXXMethodParameter(Scope *S, Decl *Param);
void ActOnFinishDelayedMemberDeclarations(Scope *S, Decl *Record);
void ActOnFinishDelayedCXXMethodDeclaration(Scope *S, Decl *Method);
void ActOnFinishDelayedMemberInitializers(Decl *Record);
void MarkAsLateParsedTemplate(FunctionDecl *FD, Decl *FnD,
CachedTokens &Toks);
void UnmarkAsLateParsedTemplate(FunctionDecl *FD);
bool IsInsideALocalClassWithinATemplateFunction();
Decl *ActOnStaticAssertDeclaration(SourceLocation StaticAssertLoc,
Expr *AssertExpr,
Expr *AssertMessageExpr,
SourceLocation RParenLoc);
Decl *BuildStaticAssertDeclaration(SourceLocation StaticAssertLoc,
Expr *AssertExpr,
StringLiteral *AssertMessageExpr,
SourceLocation RParenLoc,
bool Failed);
FriendDecl *CheckFriendTypeDecl(SourceLocation LocStart,
SourceLocation FriendLoc,
TypeSourceInfo *TSInfo);
Decl *ActOnFriendTypeDecl(Scope *S, const DeclSpec &DS,
MultiTemplateParamsArg TemplateParams);
NamedDecl *ActOnFriendFunctionDecl(Scope *S, Declarator &D,
MultiTemplateParamsArg TemplateParams);
QualType CheckConstructorDeclarator(Declarator &D, QualType R,
StorageClass& SC);
void CheckConstructor(CXXConstructorDecl *Constructor);
QualType CheckDestructorDeclarator(Declarator &D, QualType R,
StorageClass& SC);
bool CheckDestructor(CXXDestructorDecl *Destructor);
void CheckConversionDeclarator(Declarator &D, QualType &R,
StorageClass& SC);
Decl *ActOnConversionDeclarator(CXXConversionDecl *Conversion);
void CheckDeductionGuideDeclarator(Declarator &D, QualType &R,
StorageClass &SC);
void CheckDeductionGuideTemplate(FunctionTemplateDecl *TD);
void CheckExplicitlyDefaultedSpecialMember(CXXMethodDecl *MD);
void CheckExplicitlyDefaultedMemberExceptionSpec(CXXMethodDecl *MD,
const FunctionProtoType *T);
void CheckDelayedMemberExceptionSpecs();
//===--------------------------------------------------------------------===//
// C++ Derived Classes
//
/// ActOnBaseSpecifier - Parsed a base specifier
CXXBaseSpecifier *CheckBaseSpecifier(CXXRecordDecl *Class,
SourceRange SpecifierRange,
bool Virtual, AccessSpecifier Access,
TypeSourceInfo *TInfo,
SourceLocation EllipsisLoc);
BaseResult ActOnBaseSpecifier(Decl *classdecl,
SourceRange SpecifierRange,
ParsedAttributes &Attrs,
bool Virtual, AccessSpecifier Access,
ParsedType basetype,
SourceLocation BaseLoc,
SourceLocation EllipsisLoc);
bool AttachBaseSpecifiers(CXXRecordDecl *Class,
MutableArrayRef<CXXBaseSpecifier *> Bases);
void ActOnBaseSpecifiers(Decl *ClassDecl,
MutableArrayRef<CXXBaseSpecifier *> Bases);
bool IsDerivedFrom(SourceLocation Loc, QualType Derived, QualType Base);
bool IsDerivedFrom(SourceLocation Loc, QualType Derived, QualType Base,
CXXBasePaths &Paths);
// FIXME: I don't like this name.
void BuildBasePathArray(const CXXBasePaths &Paths, CXXCastPath &BasePath);
bool CheckDerivedToBaseConversion(QualType Derived, QualType Base,
SourceLocation Loc, SourceRange Range,
CXXCastPath *BasePath = nullptr,
bool IgnoreAccess = false);
bool CheckDerivedToBaseConversion(QualType Derived, QualType Base,
unsigned InaccessibleBaseID,
unsigned AmbigiousBaseConvID,
SourceLocation Loc, SourceRange Range,
DeclarationName Name,
CXXCastPath *BasePath,
bool IgnoreAccess = false);
std::string getAmbiguousPathsDisplayString(CXXBasePaths &Paths);
bool CheckOverridingFunctionAttributes(const CXXMethodDecl *New,
const CXXMethodDecl *Old);
/// CheckOverridingFunctionReturnType - Checks whether the return types are
/// covariant, according to C++ [class.virtual]p5.
bool CheckOverridingFunctionReturnType(const CXXMethodDecl *New,
const CXXMethodDecl *Old);
/// CheckOverridingFunctionExceptionSpec - Checks whether the exception
/// spec is a subset of base spec.
bool CheckOverridingFunctionExceptionSpec(const CXXMethodDecl *New,
const CXXMethodDecl *Old);
bool CheckPureMethod(CXXMethodDecl *Method, SourceRange InitRange);
/// CheckOverrideControl - Check C++11 override control semantics.
void CheckOverrideControl(NamedDecl *D);
/// DiagnoseAbsenceOfOverrideControl - Diagnose if 'override' keyword was
/// not used in the declaration of an overriding method.
void DiagnoseAbsenceOfOverrideControl(NamedDecl *D);
/// CheckForFunctionMarkedFinal - Checks whether a virtual member function
/// overrides a virtual member function marked 'final', according to
/// C++11 [class.virtual]p4.
bool CheckIfOverriddenFunctionIsMarkedFinal(const CXXMethodDecl *New,
const CXXMethodDecl *Old);
//===--------------------------------------------------------------------===//
// C++ Access Control
//
enum AccessResult {
AR_accessible,
AR_inaccessible,
AR_dependent,
AR_delayed
};
bool SetMemberAccessSpecifier(NamedDecl *MemberDecl,
NamedDecl *PrevMemberDecl,
AccessSpecifier LexicalAS);
AccessResult CheckUnresolvedMemberAccess(UnresolvedMemberExpr *E,
DeclAccessPair FoundDecl);
AccessResult CheckUnresolvedLookupAccess(UnresolvedLookupExpr *E,
DeclAccessPair FoundDecl);
AccessResult CheckAllocationAccess(SourceLocation OperatorLoc,
SourceRange PlacementRange,
CXXRecordDecl *NamingClass,
DeclAccessPair FoundDecl,
bool Diagnose = true);
AccessResult CheckConstructorAccess(SourceLocation Loc,
CXXConstructorDecl *D,
DeclAccessPair FoundDecl,
const InitializedEntity &Entity,
bool IsCopyBindingRefToTemp = false);
AccessResult CheckConstructorAccess(SourceLocation Loc,
CXXConstructorDecl *D,
DeclAccessPair FoundDecl,
const InitializedEntity &Entity,
const PartialDiagnostic &PDiag);
AccessResult CheckDestructorAccess(SourceLocation Loc,
CXXDestructorDecl *Dtor,
const PartialDiagnostic &PDiag,
QualType objectType = QualType());
AccessResult CheckFriendAccess(NamedDecl *D);
AccessResult CheckMemberAccess(SourceLocation UseLoc,
CXXRecordDecl *NamingClass,
DeclAccessPair Found);
AccessResult CheckMemberOperatorAccess(SourceLocation Loc,
Expr *ObjectExpr,
Expr *ArgExpr,
DeclAccessPair FoundDecl);
AccessResult CheckAddressOfMemberAccess(Expr *OvlExpr,
DeclAccessPair FoundDecl);
AccessResult CheckBaseClassAccess(SourceLocation AccessLoc,
QualType Base, QualType Derived,
const CXXBasePath &Path,
unsigned DiagID,
bool ForceCheck = false,
bool ForceUnprivileged = false);
void CheckLookupAccess(const LookupResult &R);
bool IsSimplyAccessible(NamedDecl *decl, DeclContext *Ctx);
bool isSpecialMemberAccessibleForDeletion(CXXMethodDecl *decl,
AccessSpecifier access,
QualType objectType);
void HandleDependentAccessCheck(const DependentDiagnostic &DD,
const MultiLevelTemplateArgumentList &TemplateArgs);
void PerformDependentDiagnostics(const DeclContext *Pattern,
const MultiLevelTemplateArgumentList &TemplateArgs);
void HandleDelayedAccessCheck(sema::DelayedDiagnostic &DD, Decl *Ctx);
/// \brief When true, access checking violations are treated as SFINAE
/// failures rather than hard errors.
bool AccessCheckingSFINAE;
enum AbstractDiagSelID {
AbstractNone = -1,
AbstractReturnType,
AbstractParamType,
AbstractVariableType,
AbstractFieldType,
AbstractIvarType,
AbstractSynthesizedIvarType,
AbstractArrayType
};
bool isAbstractType(SourceLocation Loc, QualType T);
bool RequireNonAbstractType(SourceLocation Loc, QualType T,
TypeDiagnoser &Diagnoser);
template <typename... Ts>
bool RequireNonAbstractType(SourceLocation Loc, QualType T, unsigned DiagID,
const Ts &...Args) {
BoundTypeDiagnoser<Ts...> Diagnoser(DiagID, Args...);
return RequireNonAbstractType(Loc, T, Diagnoser);
}
void DiagnoseAbstractType(const CXXRecordDecl *RD);
//===--------------------------------------------------------------------===//
// C++ Overloaded Operators [C++ 13.5]
//
bool CheckOverloadedOperatorDeclaration(FunctionDecl *FnDecl);
bool CheckLiteralOperatorDeclaration(FunctionDecl *FnDecl);
//===--------------------------------------------------------------------===//
// C++ Templates [C++ 14]
//
void FilterAcceptableTemplateNames(LookupResult &R,
bool AllowFunctionTemplates = true);
bool hasAnyAcceptableTemplateNames(LookupResult &R,
bool AllowFunctionTemplates = true);
void LookupTemplateName(LookupResult &R, Scope *S, CXXScopeSpec &SS,
QualType ObjectType, bool EnteringContext,
bool &MemberOfUnknownSpecialization);
TemplateNameKind isTemplateName(Scope *S,
CXXScopeSpec &SS,
bool hasTemplateKeyword,
UnqualifiedId &Name,
ParsedType ObjectType,
bool EnteringContext,
TemplateTy &Template,
bool &MemberOfUnknownSpecialization);
/// Determine whether a particular identifier might be the name in a C++1z
/// deduction-guide declaration.
bool isDeductionGuideName(Scope *S, const IdentifierInfo &Name,
SourceLocation NameLoc,
ParsedTemplateTy *Template = nullptr);
bool DiagnoseUnknownTemplateName(const IdentifierInfo &II,
SourceLocation IILoc,
Scope *S,
const CXXScopeSpec *SS,
TemplateTy &SuggestedTemplate,
TemplateNameKind &SuggestedKind);
bool DiagnoseUninstantiableTemplate(SourceLocation PointOfInstantiation,
NamedDecl *Instantiation,
bool InstantiatedFromMember,
const NamedDecl *Pattern,
const NamedDecl *PatternDef,
TemplateSpecializationKind TSK,
bool Complain = true);
void DiagnoseTemplateParameterShadow(SourceLocation Loc, Decl *PrevDecl);
TemplateDecl *AdjustDeclIfTemplate(Decl *&Decl);
NamedDecl *ActOnTypeParameter(Scope *S, bool Typename,
SourceLocation EllipsisLoc,
SourceLocation KeyLoc,
IdentifierInfo *ParamName,
SourceLocation ParamNameLoc,
unsigned Depth, unsigned Position,
SourceLocation EqualLoc,
ParsedType DefaultArg);
QualType CheckNonTypeTemplateParameterType(TypeSourceInfo *&TSI,
SourceLocation Loc);
QualType CheckNonTypeTemplateParameterType(QualType T, SourceLocation Loc);
NamedDecl *ActOnNonTypeTemplateParameter(Scope *S, Declarator &D,
unsigned Depth,
unsigned Position,
SourceLocation EqualLoc,
Expr *DefaultArg);
NamedDecl *ActOnTemplateTemplateParameter(Scope *S,
SourceLocation TmpLoc,
TemplateParameterList *Params,
SourceLocation EllipsisLoc,
IdentifierInfo *ParamName,
SourceLocation ParamNameLoc,
unsigned Depth,
unsigned Position,
SourceLocation EqualLoc,
ParsedTemplateArgument DefaultArg);
TemplateParameterList *
ActOnTemplateParameterList(unsigned Depth,
SourceLocation ExportLoc,
SourceLocation TemplateLoc,
SourceLocation LAngleLoc,
ArrayRef<NamedDecl *> Params,
SourceLocation RAngleLoc,
Expr *RequiresClause);
/// \brief The context in which we are checking a template parameter list.
enum TemplateParamListContext {
TPC_ClassTemplate,
TPC_VarTemplate,
TPC_FunctionTemplate,
TPC_ClassTemplateMember,
TPC_FriendClassTemplate,
TPC_FriendFunctionTemplate,
TPC_FriendFunctionTemplateDefinition,
TPC_TypeAliasTemplate
};
bool CheckTemplateParameterList(TemplateParameterList *NewParams,
TemplateParameterList *OldParams,
TemplateParamListContext TPC);
TemplateParameterList *MatchTemplateParametersToScopeSpecifier(
SourceLocation DeclStartLoc, SourceLocation DeclLoc,
const CXXScopeSpec &SS, TemplateIdAnnotation *TemplateId,
ArrayRef<TemplateParameterList *> ParamLists,
bool IsFriend, bool &IsMemberSpecialization, bool &Invalid);
DeclResult CheckClassTemplate(Scope *S, unsigned TagSpec, TagUseKind TUK,
SourceLocation KWLoc, CXXScopeSpec &SS,
IdentifierInfo *Name, SourceLocation NameLoc,
AttributeList *Attr,
TemplateParameterList *TemplateParams,
AccessSpecifier AS,
SourceLocation ModulePrivateLoc,
SourceLocation FriendLoc,
unsigned NumOuterTemplateParamLists,
TemplateParameterList **OuterTemplateParamLists,
SkipBodyInfo *SkipBody = nullptr);
TemplateArgumentLoc getTrivialTemplateArgumentLoc(const TemplateArgument &Arg,
QualType NTTPType,
SourceLocation Loc);
void translateTemplateArguments(const ASTTemplateArgsPtr &In,
TemplateArgumentListInfo &Out);
void NoteAllFoundTemplates(TemplateName Name);
QualType CheckTemplateIdType(TemplateName Template,
SourceLocation TemplateLoc,
TemplateArgumentListInfo &TemplateArgs);
TypeResult
ActOnTemplateIdType(CXXScopeSpec &SS, SourceLocation TemplateKWLoc,
TemplateTy Template, IdentifierInfo *TemplateII,
SourceLocation TemplateIILoc,
SourceLocation LAngleLoc,
ASTTemplateArgsPtr TemplateArgs,
SourceLocation RAngleLoc,
bool IsCtorOrDtorName = false,
bool IsClassName = false);
/// \brief Parsed an elaborated-type-specifier that refers to a template-id,
/// such as \c class T::template apply<U>.
TypeResult ActOnTagTemplateIdType(TagUseKind TUK,
TypeSpecifierType TagSpec,
SourceLocation TagLoc,
CXXScopeSpec &SS,
SourceLocation TemplateKWLoc,
TemplateTy TemplateD,
SourceLocation TemplateLoc,
SourceLocation LAngleLoc,
ASTTemplateArgsPtr TemplateArgsIn,
SourceLocation RAngleLoc);
DeclResult ActOnVarTemplateSpecialization(
Scope *S, Declarator &D, TypeSourceInfo *DI,
SourceLocation TemplateKWLoc, TemplateParameterList *TemplateParams,
StorageClass SC, bool IsPartialSpecialization);
DeclResult CheckVarTemplateId(VarTemplateDecl *Template,
SourceLocation TemplateLoc,
SourceLocation TemplateNameLoc,
const TemplateArgumentListInfo &TemplateArgs);
ExprResult CheckVarTemplateId(const CXXScopeSpec &SS,
const DeclarationNameInfo &NameInfo,
VarTemplateDecl *Template,
SourceLocation TemplateLoc,
const TemplateArgumentListInfo *TemplateArgs);
ExprResult BuildTemplateIdExpr(const CXXScopeSpec &SS,
SourceLocation TemplateKWLoc,
LookupResult &R,
bool RequiresADL,
const TemplateArgumentListInfo *TemplateArgs);
ExprResult BuildQualifiedTemplateIdExpr(CXXScopeSpec &SS,
SourceLocation TemplateKWLoc,
const DeclarationNameInfo &NameInfo,
const TemplateArgumentListInfo *TemplateArgs);
TemplateNameKind ActOnDependentTemplateName(
Scope *S, CXXScopeSpec &SS, SourceLocation TemplateKWLoc,
UnqualifiedId &Name, ParsedType ObjectType, bool EnteringContext,
TemplateTy &Template, bool AllowInjectedClassName = false);
DeclResult
ActOnClassTemplateSpecialization(Scope *S, unsigned TagSpec, TagUseKind TUK,
SourceLocation KWLoc,
SourceLocation ModulePrivateLoc,
TemplateIdAnnotation &TemplateId,
AttributeList *Attr,
MultiTemplateParamsArg TemplateParameterLists,
SkipBodyInfo *SkipBody = nullptr);
bool CheckTemplatePartialSpecializationArgs(SourceLocation Loc,
TemplateDecl *PrimaryTemplate,
unsigned NumExplicitArgs,
ArrayRef<TemplateArgument> Args);
void CheckTemplatePartialSpecialization(
ClassTemplatePartialSpecializationDecl *Partial);
void CheckTemplatePartialSpecialization(
VarTemplatePartialSpecializationDecl *Partial);
Decl *ActOnTemplateDeclarator(Scope *S,
MultiTemplateParamsArg TemplateParameterLists,
Declarator &D);
bool
CheckSpecializationInstantiationRedecl(SourceLocation NewLoc,
TemplateSpecializationKind NewTSK,
NamedDecl *PrevDecl,
TemplateSpecializationKind PrevTSK,
SourceLocation PrevPtOfInstantiation,
bool &SuppressNew);
bool CheckDependentFunctionTemplateSpecialization(FunctionDecl *FD,
const TemplateArgumentListInfo &ExplicitTemplateArgs,
LookupResult &Previous);
bool CheckFunctionTemplateSpecialization(FunctionDecl *FD,
TemplateArgumentListInfo *ExplicitTemplateArgs,
LookupResult &Previous);
bool CheckMemberSpecialization(NamedDecl *Member, LookupResult &Previous);
void CompleteMemberSpecialization(NamedDecl *Member, LookupResult &Previous);
DeclResult
ActOnExplicitInstantiation(Scope *S,
SourceLocation ExternLoc,
SourceLocation TemplateLoc,
unsigned TagSpec,
SourceLocation KWLoc,
const CXXScopeSpec &SS,
TemplateTy Template,
SourceLocation TemplateNameLoc,
SourceLocation LAngleLoc,
ASTTemplateArgsPtr TemplateArgs,
SourceLocation RAngleLoc,
AttributeList *Attr);
DeclResult
ActOnExplicitInstantiation(Scope *S,
SourceLocation ExternLoc,
SourceLocation TemplateLoc,
unsigned TagSpec,
SourceLocation KWLoc,
CXXScopeSpec &SS,
IdentifierInfo *Name,
SourceLocation NameLoc,
AttributeList *Attr);
DeclResult ActOnExplicitInstantiation(Scope *S,
SourceLocation ExternLoc,
SourceLocation TemplateLoc,
Declarator &D);
TemplateArgumentLoc
SubstDefaultTemplateArgumentIfAvailable(TemplateDecl *Template,
SourceLocation TemplateLoc,
SourceLocation RAngleLoc,
Decl *Param,
SmallVectorImpl<TemplateArgument>
&Converted,
bool &HasDefaultArg);
/// \brief Specifies the context in which a particular template
/// argument is being checked.
enum CheckTemplateArgumentKind {
/// \brief The template argument was specified in the code or was
/// instantiated with some deduced template arguments.
CTAK_Specified,
/// \brief The template argument was deduced via template argument
/// deduction.
CTAK_Deduced,
/// \brief The template argument was deduced from an array bound
/// via template argument deduction.
CTAK_DeducedFromArrayBound
};
bool CheckTemplateArgument(NamedDecl *Param,
TemplateArgumentLoc &Arg,
NamedDecl *Template,
SourceLocation TemplateLoc,
SourceLocation RAngleLoc,
unsigned ArgumentPackIndex,
SmallVectorImpl<TemplateArgument> &Converted,
CheckTemplateArgumentKind CTAK = CTAK_Specified);
/// \brief Check that the given template arguments can be be provided to
/// the given template, converting the arguments along the way.
///
/// \param Template The template to which the template arguments are being
/// provided.
///
/// \param TemplateLoc The location of the template name in the source.
///
/// \param TemplateArgs The list of template arguments. If the template is
/// a template template parameter, this function may extend the set of
/// template arguments to also include substituted, defaulted template
/// arguments.
///
/// \param PartialTemplateArgs True if the list of template arguments is
/// intentionally partial, e.g., because we're checking just the initial
/// set of template arguments.
///
/// \param Converted Will receive the converted, canonicalized template
/// arguments.
///
/// \param UpdateArgsWithConversions If \c true, update \p TemplateArgs to
/// contain the converted forms of the template arguments as written.
/// Otherwise, \p TemplateArgs will not be modified.
///
/// \returns true if an error occurred, false otherwise.
bool CheckTemplateArgumentList(TemplateDecl *Template,
SourceLocation TemplateLoc,
TemplateArgumentListInfo &TemplateArgs,
bool PartialTemplateArgs,
SmallVectorImpl<TemplateArgument> &Converted,
bool UpdateArgsWithConversions = true);
bool CheckTemplateTypeArgument(TemplateTypeParmDecl *Param,
TemplateArgumentLoc &Arg,
SmallVectorImpl<TemplateArgument> &Converted);
bool CheckTemplateArgument(TemplateTypeParmDecl *Param,
TypeSourceInfo *Arg);
ExprResult CheckTemplateArgument(NonTypeTemplateParmDecl *Param,
QualType InstantiatedParamType, Expr *Arg,
TemplateArgument &Converted,
CheckTemplateArgumentKind CTAK = CTAK_Specified);
bool CheckTemplateArgument(TemplateTemplateParmDecl *Param,
TemplateArgumentLoc &Arg,
unsigned ArgumentPackIndex);
ExprResult
BuildExpressionFromDeclTemplateArgument(const TemplateArgument &Arg,
QualType ParamType,
SourceLocation Loc);
ExprResult
BuildExpressionFromIntegralTemplateArgument(const TemplateArgument &Arg,
SourceLocation Loc);
/// \brief Enumeration describing how template parameter lists are compared
/// for equality.
enum TemplateParameterListEqualKind {
/// \brief We are matching the template parameter lists of two templates
/// that might be redeclarations.
///
/// \code
/// template<typename T> struct X;
/// template<typename T> struct X;
/// \endcode
TPL_TemplateMatch,
/// \brief We are matching the template parameter lists of two template
/// template parameters as part of matching the template parameter lists
/// of two templates that might be redeclarations.
///
/// \code
/// template<template<int I> class TT> struct X;
/// template<template<int Value> class Other> struct X;
/// \endcode
TPL_TemplateTemplateParmMatch,
/// \brief We are matching the template parameter lists of a template
/// template argument against the template parameter lists of a template
/// template parameter.
///
/// \code
/// template<template<int Value> class Metafun> struct X;
/// template<int Value> struct integer_c;
/// X<integer_c> xic;
/// \endcode
TPL_TemplateTemplateArgumentMatch
};
bool TemplateParameterListsAreEqual(TemplateParameterList *New,
TemplateParameterList *Old,
bool Complain,
TemplateParameterListEqualKind Kind,
SourceLocation TemplateArgLoc
= SourceLocation());
bool CheckTemplateDeclScope(Scope *S, TemplateParameterList *TemplateParams);
/// \brief Called when the parser has parsed a C++ typename
/// specifier, e.g., "typename T::type".
///
/// \param S The scope in which this typename type occurs.
/// \param TypenameLoc the location of the 'typename' keyword
/// \param SS the nested-name-specifier following the typename (e.g., 'T::').
/// \param II the identifier we're retrieving (e.g., 'type' in the example).
/// \param IdLoc the location of the identifier.
TypeResult
ActOnTypenameType(Scope *S, SourceLocation TypenameLoc,
const CXXScopeSpec &SS, const IdentifierInfo &II,
SourceLocation IdLoc);
/// \brief Called when the parser has parsed a C++ typename
/// specifier that ends in a template-id, e.g.,
/// "typename MetaFun::template apply<T1, T2>".
///
/// \param S The scope in which this typename type occurs.
/// \param TypenameLoc the location of the 'typename' keyword
/// \param SS the nested-name-specifier following the typename (e.g., 'T::').
/// \param TemplateLoc the location of the 'template' keyword, if any.
/// \param TemplateName The template name.
/// \param TemplateII The identifier used to name the template.
/// \param TemplateIILoc The location of the template name.
/// \param LAngleLoc The location of the opening angle bracket ('<').
/// \param TemplateArgs The template arguments.
/// \param RAngleLoc The location of the closing angle bracket ('>').
TypeResult
ActOnTypenameType(Scope *S, SourceLocation TypenameLoc,
const CXXScopeSpec &SS,
SourceLocation TemplateLoc,
TemplateTy TemplateName,
IdentifierInfo *TemplateII,
SourceLocation TemplateIILoc,
SourceLocation LAngleLoc,
ASTTemplateArgsPtr TemplateArgs,
SourceLocation RAngleLoc);
QualType CheckTypenameType(ElaboratedTypeKeyword Keyword,
SourceLocation KeywordLoc,
NestedNameSpecifierLoc QualifierLoc,
const IdentifierInfo &II,
SourceLocation IILoc);
TypeSourceInfo *RebuildTypeInCurrentInstantiation(TypeSourceInfo *T,
SourceLocation Loc,
DeclarationName Name);
bool RebuildNestedNameSpecifierInCurrentInstantiation(CXXScopeSpec &SS);
ExprResult RebuildExprInCurrentInstantiation(Expr *E);
bool RebuildTemplateParamsInCurrentInstantiation(
TemplateParameterList *Params);
std::string
getTemplateArgumentBindingsText(const TemplateParameterList *Params,
const TemplateArgumentList &Args);
std::string
getTemplateArgumentBindingsText(const TemplateParameterList *Params,
const TemplateArgument *Args,
unsigned NumArgs);
//===--------------------------------------------------------------------===//
// C++ Variadic Templates (C++0x [temp.variadic])
//===--------------------------------------------------------------------===//
/// Determine whether an unexpanded parameter pack might be permitted in this
/// location. Useful for error recovery.
bool isUnexpandedParameterPackPermitted();
/// \brief The context in which an unexpanded parameter pack is
/// being diagnosed.
///
/// Note that the values of this enumeration line up with the first
/// argument to the \c err_unexpanded_parameter_pack diagnostic.
enum UnexpandedParameterPackContext {
/// \brief An arbitrary expression.
UPPC_Expression = 0,
/// \brief The base type of a class type.
UPPC_BaseType,
/// \brief The type of an arbitrary declaration.
UPPC_DeclarationType,
/// \brief The type of a data member.
UPPC_DataMemberType,
/// \brief The size of a bit-field.
UPPC_BitFieldWidth,
/// \brief The expression in a static assertion.
UPPC_StaticAssertExpression,
/// \brief The fixed underlying type of an enumeration.
UPPC_FixedUnderlyingType,
/// \brief The enumerator value.
UPPC_EnumeratorValue,
/// \brief A using declaration.
UPPC_UsingDeclaration,
/// \brief A friend declaration.
UPPC_FriendDeclaration,
/// \brief A declaration qualifier.
UPPC_DeclarationQualifier,
/// \brief An initializer.
UPPC_Initializer,
/// \brief A default argument.
UPPC_DefaultArgument,
/// \brief The type of a non-type template parameter.
UPPC_NonTypeTemplateParameterType,
/// \brief The type of an exception.
UPPC_ExceptionType,
/// \brief Partial specialization.
UPPC_PartialSpecialization,
/// \brief Microsoft __if_exists.
UPPC_IfExists,
/// \brief Microsoft __if_not_exists.
UPPC_IfNotExists,
/// \brief Lambda expression.
UPPC_Lambda,
/// \brief Block expression,
UPPC_Block
};
/// \brief Diagnose unexpanded parameter packs.
///
/// \param Loc The location at which we should emit the diagnostic.
///
/// \param UPPC The context in which we are diagnosing unexpanded
/// parameter packs.
///
/// \param Unexpanded the set of unexpanded parameter packs.
///
/// \returns true if an error occurred, false otherwise.
bool DiagnoseUnexpandedParameterPacks(SourceLocation Loc,
UnexpandedParameterPackContext UPPC,
ArrayRef<UnexpandedParameterPack> Unexpanded);
/// \brief If the given type contains an unexpanded parameter pack,
/// diagnose the error.
///
/// \param Loc The source location where a diagnostc should be emitted.
///
/// \param T The type that is being checked for unexpanded parameter
/// packs.
///
/// \returns true if an error occurred, false otherwise.
bool DiagnoseUnexpandedParameterPack(SourceLocation Loc, TypeSourceInfo *T,
UnexpandedParameterPackContext UPPC);
/// \brief If the given expression contains an unexpanded parameter
/// pack, diagnose the error.
///
/// \param E The expression that is being checked for unexpanded
/// parameter packs.
///
/// \returns true if an error occurred, false otherwise.
bool DiagnoseUnexpandedParameterPack(Expr *E,
UnexpandedParameterPackContext UPPC = UPPC_Expression);
/// \brief If the given nested-name-specifier contains an unexpanded
/// parameter pack, diagnose the error.
///
/// \param SS The nested-name-specifier that is being checked for
/// unexpanded parameter packs.
///
/// \returns true if an error occurred, false otherwise.
bool DiagnoseUnexpandedParameterPack(const CXXScopeSpec &SS,
UnexpandedParameterPackContext UPPC);
/// \brief If the given name contains an unexpanded parameter pack,
/// diagnose the error.
///
/// \param NameInfo The name (with source location information) that
/// is being checked for unexpanded parameter packs.
///
/// \returns true if an error occurred, false otherwise.
bool DiagnoseUnexpandedParameterPack(const DeclarationNameInfo &NameInfo,
UnexpandedParameterPackContext UPPC);
/// \brief If the given template name contains an unexpanded parameter pack,
/// diagnose the error.
///
/// \param Loc The location of the template name.
///
/// \param Template The template name that is being checked for unexpanded
/// parameter packs.
///
/// \returns true if an error occurred, false otherwise.
bool DiagnoseUnexpandedParameterPack(SourceLocation Loc,
TemplateName Template,
UnexpandedParameterPackContext UPPC);
/// \brief If the given template argument contains an unexpanded parameter
/// pack, diagnose the error.
///
/// \param Arg The template argument that is being checked for unexpanded
/// parameter packs.
///
/// \returns true if an error occurred, false otherwise.
bool DiagnoseUnexpandedParameterPack(TemplateArgumentLoc Arg,
UnexpandedParameterPackContext UPPC);
/// \brief Collect the set of unexpanded parameter packs within the given
/// template argument.
///
/// \param Arg The template argument that will be traversed to find
/// unexpanded parameter packs.
void collectUnexpandedParameterPacks(TemplateArgument Arg,
SmallVectorImpl<UnexpandedParameterPack> &Unexpanded);
/// \brief Collect the set of unexpanded parameter packs within the given
/// template argument.
///
/// \param Arg The template argument that will be traversed to find
/// unexpanded parameter packs.
void collectUnexpandedParameterPacks(TemplateArgumentLoc Arg,
SmallVectorImpl<UnexpandedParameterPack> &Unexpanded);
/// \brief Collect the set of unexpanded parameter packs within the given
/// type.
///
/// \param T The type that will be traversed to find
/// unexpanded parameter packs.
void collectUnexpandedParameterPacks(QualType T,
SmallVectorImpl<UnexpandedParameterPack> &Unexpanded);
/// \brief Collect the set of unexpanded parameter packs within the given
/// type.
///
/// \param TL The type that will be traversed to find
/// unexpanded parameter packs.
void collectUnexpandedParameterPacks(TypeLoc TL,
SmallVectorImpl<UnexpandedParameterPack> &Unexpanded);
/// \brief Collect the set of unexpanded parameter packs within the given
/// nested-name-specifier.
///
/// \param NNS The nested-name-specifier that will be traversed to find
/// unexpanded parameter packs.
void collectUnexpandedParameterPacks(NestedNameSpecifierLoc NNS,
SmallVectorImpl<UnexpandedParameterPack> &Unexpanded);
/// \brief Collect the set of unexpanded parameter packs within the given
/// name.
///
/// \param NameInfo The name that will be traversed to find
/// unexpanded parameter packs.
void collectUnexpandedParameterPacks(const DeclarationNameInfo &NameInfo,
SmallVectorImpl<UnexpandedParameterPack> &Unexpanded);
/// \brief Invoked when parsing a template argument followed by an
/// ellipsis, which creates a pack expansion.
///
/// \param Arg The template argument preceding the ellipsis, which
/// may already be invalid.
///
/// \param EllipsisLoc The location of the ellipsis.
ParsedTemplateArgument ActOnPackExpansion(const ParsedTemplateArgument &Arg,
SourceLocation EllipsisLoc);
/// \brief Invoked when parsing a type followed by an ellipsis, which
/// creates a pack expansion.
///
/// \param Type The type preceding the ellipsis, which will become
/// the pattern of the pack expansion.
///
/// \param EllipsisLoc The location of the ellipsis.
TypeResult ActOnPackExpansion(ParsedType Type, SourceLocation EllipsisLoc);
/// \brief Construct a pack expansion type from the pattern of the pack
/// expansion.
TypeSourceInfo *CheckPackExpansion(TypeSourceInfo *Pattern,
SourceLocation EllipsisLoc,
Optional<unsigned> NumExpansions);
/// \brief Construct a pack expansion type from the pattern of the pack
/// expansion.
QualType CheckPackExpansion(QualType Pattern,
SourceRange PatternRange,
SourceLocation EllipsisLoc,
Optional<unsigned> NumExpansions);
/// \brief Invoked when parsing an expression followed by an ellipsis, which
/// creates a pack expansion.
///
/// \param Pattern The expression preceding the ellipsis, which will become
/// the pattern of the pack expansion.
///
/// \param EllipsisLoc The location of the ellipsis.
ExprResult ActOnPackExpansion(Expr *Pattern, SourceLocation EllipsisLoc);
/// \brief Invoked when parsing an expression followed by an ellipsis, which
/// creates a pack expansion.
///
/// \param Pattern The expression preceding the ellipsis, which will become
/// the pattern of the pack expansion.
///
/// \param EllipsisLoc The location of the ellipsis.
ExprResult CheckPackExpansion(Expr *Pattern, SourceLocation EllipsisLoc,
Optional<unsigned> NumExpansions);
/// \brief Determine whether we could expand a pack expansion with the
/// given set of parameter packs into separate arguments by repeatedly
/// transforming the pattern.
///
/// \param EllipsisLoc The location of the ellipsis that identifies the
/// pack expansion.
///
/// \param PatternRange The source range that covers the entire pattern of
/// the pack expansion.
///
/// \param Unexpanded The set of unexpanded parameter packs within the
/// pattern.
///
/// \param ShouldExpand Will be set to \c true if the transformer should
/// expand the corresponding pack expansions into separate arguments. When
/// set, \c NumExpansions must also be set.
///
/// \param RetainExpansion Whether the caller should add an unexpanded
/// pack expansion after all of the expanded arguments. This is used
/// when extending explicitly-specified template argument packs per
/// C++0x [temp.arg.explicit]p9.
///
/// \param NumExpansions The number of separate arguments that will be in
/// the expanded form of the corresponding pack expansion. This is both an
/// input and an output parameter, which can be set by the caller if the
/// number of expansions is known a priori (e.g., due to a prior substitution)
/// and will be set by the callee when the number of expansions is known.
/// The callee must set this value when \c ShouldExpand is \c true; it may
/// set this value in other cases.
///
/// \returns true if an error occurred (e.g., because the parameter packs
/// are to be instantiated with arguments of different lengths), false
/// otherwise. If false, \c ShouldExpand (and possibly \c NumExpansions)
/// must be set.
bool CheckParameterPacksForExpansion(SourceLocation EllipsisLoc,
SourceRange PatternRange,
ArrayRef<UnexpandedParameterPack> Unexpanded,
const MultiLevelTemplateArgumentList &TemplateArgs,
bool &ShouldExpand,
bool &RetainExpansion,
Optional<unsigned> &NumExpansions);
/// \brief Determine the number of arguments in the given pack expansion
/// type.
///
/// This routine assumes that the number of arguments in the expansion is
/// consistent across all of the unexpanded parameter packs in its pattern.
///
/// Returns an empty Optional if the type can't be expanded.
Optional<unsigned> getNumArgumentsInExpansion(QualType T,
const MultiLevelTemplateArgumentList &TemplateArgs);
/// \brief Determine whether the given declarator contains any unexpanded
/// parameter packs.
///
/// This routine is used by the parser to disambiguate function declarators
/// with an ellipsis prior to the ')', e.g.,
///
/// \code
/// void f(T...);
/// \endcode
///
/// To determine whether we have an (unnamed) function parameter pack or
/// a variadic function.
///
/// \returns true if the declarator contains any unexpanded parameter packs,
/// false otherwise.
bool containsUnexpandedParameterPacks(Declarator &D);
/// \brief Returns the pattern of the pack expansion for a template argument.
///
/// \param OrigLoc The template argument to expand.
///
/// \param Ellipsis Will be set to the location of the ellipsis.
///
/// \param NumExpansions Will be set to the number of expansions that will
/// be generated from this pack expansion, if known a priori.
TemplateArgumentLoc getTemplateArgumentPackExpansionPattern(
TemplateArgumentLoc OrigLoc,
SourceLocation &Ellipsis,
Optional<unsigned> &NumExpansions) const;
/// Given a template argument that contains an unexpanded parameter pack, but
/// which has already been substituted, attempt to determine the number of
/// elements that will be produced once this argument is fully-expanded.
///
/// This is intended for use when transforming 'sizeof...(Arg)' in order to
/// avoid actually expanding the pack where possible.
Optional<unsigned> getFullyPackExpandedSize(TemplateArgument Arg);
//===--------------------------------------------------------------------===//
// C++ Template Argument Deduction (C++ [temp.deduct])
//===--------------------------------------------------------------------===//
/// Adjust the type \p ArgFunctionType to match the calling convention,
/// noreturn, and optionally the exception specification of \p FunctionType.
/// Deduction often wants to ignore these properties when matching function
/// types.
QualType adjustCCAndNoReturn(QualType ArgFunctionType, QualType FunctionType,
bool AdjustExceptionSpec = false);
/// \brief Describes the result of template argument deduction.
///
/// The TemplateDeductionResult enumeration describes the result of
/// template argument deduction, as returned from
/// DeduceTemplateArguments(). The separate TemplateDeductionInfo
/// structure provides additional information about the results of
/// template argument deduction, e.g., the deduced template argument
/// list (if successful) or the specific template parameters or
/// deduced arguments that were involved in the failure.
enum TemplateDeductionResult {
/// \brief Template argument deduction was successful.
TDK_Success = 0,
/// \brief The declaration was invalid; do nothing.
TDK_Invalid,
/// \brief Template argument deduction exceeded the maximum template
/// instantiation depth (which has already been diagnosed).
TDK_InstantiationDepth,
/// \brief Template argument deduction did not deduce a value
/// for every template parameter.
TDK_Incomplete,
/// \brief Template argument deduction produced inconsistent
/// deduced values for the given template parameter.
TDK_Inconsistent,
/// \brief Template argument deduction failed due to inconsistent
/// cv-qualifiers on a template parameter type that would
/// otherwise be deduced, e.g., we tried to deduce T in "const T"
/// but were given a non-const "X".
TDK_Underqualified,
/// \brief Substitution of the deduced template argument values
/// resulted in an error.
TDK_SubstitutionFailure,
/// \brief After substituting deduced template arguments, a dependent
/// parameter type did not match the corresponding argument.
TDK_DeducedMismatch,
/// \brief After substituting deduced template arguments, an element of
/// a dependent parameter type did not match the corresponding element
/// of the corresponding argument (when deducing from an initializer list).
TDK_DeducedMismatchNested,
/// \brief A non-depnedent component of the parameter did not match the
/// corresponding component of the argument.
TDK_NonDeducedMismatch,
/// \brief When performing template argument deduction for a function
/// template, there were too many call arguments.
TDK_TooManyArguments,
/// \brief When performing template argument deduction for a function
/// template, there were too few call arguments.
TDK_TooFewArguments,
/// \brief The explicitly-specified template arguments were not valid
/// template arguments for the given template.
TDK_InvalidExplicitArguments,
/// \brief Checking non-dependent argument conversions failed.
TDK_NonDependentConversionFailure,
/// \brief Deduction failed; that's all we know.
TDK_MiscellaneousDeductionFailure,
/// \brief CUDA Target attributes do not match.
TDK_CUDATargetMismatch
};
TemplateDeductionResult
DeduceTemplateArguments(ClassTemplatePartialSpecializationDecl *Partial,
const TemplateArgumentList &TemplateArgs,
sema::TemplateDeductionInfo &Info);
TemplateDeductionResult
DeduceTemplateArguments(VarTemplatePartialSpecializationDecl *Partial,
const TemplateArgumentList &TemplateArgs,
sema::TemplateDeductionInfo &Info);
TemplateDeductionResult SubstituteExplicitTemplateArguments(
FunctionTemplateDecl *FunctionTemplate,
TemplateArgumentListInfo &ExplicitTemplateArgs,
SmallVectorImpl<DeducedTemplateArgument> &Deduced,
SmallVectorImpl<QualType> &ParamTypes, QualType *FunctionType,
sema::TemplateDeductionInfo &Info);
/// brief A function argument from which we performed template argument
// deduction for a call.
struct OriginalCallArg {
OriginalCallArg(QualType OriginalParamType, bool DecomposedParam,
unsigned ArgIdx, QualType OriginalArgType)
: OriginalParamType(OriginalParamType),
DecomposedParam(DecomposedParam), ArgIdx(ArgIdx),
OriginalArgType(OriginalArgType) {}
QualType OriginalParamType;
bool DecomposedParam;
unsigned ArgIdx;
QualType OriginalArgType;
};
TemplateDeductionResult FinishTemplateArgumentDeduction(
FunctionTemplateDecl *FunctionTemplate,
SmallVectorImpl<DeducedTemplateArgument> &Deduced,
unsigned NumExplicitlySpecified, FunctionDecl *&Specialization,
sema::TemplateDeductionInfo &Info,
SmallVectorImpl<OriginalCallArg> const *OriginalCallArgs = nullptr,
bool PartialOverloading = false,
llvm::function_ref<bool()> CheckNonDependent = []{ return false; });
TemplateDeductionResult DeduceTemplateArguments(
FunctionTemplateDecl *FunctionTemplate,
TemplateArgumentListInfo *ExplicitTemplateArgs, ArrayRef<Expr *> Args,
FunctionDecl *&Specialization, sema::TemplateDeductionInfo &Info,
bool PartialOverloading,
llvm::function_ref<bool(ArrayRef<QualType>)> CheckNonDependent);
TemplateDeductionResult
DeduceTemplateArguments(FunctionTemplateDecl *FunctionTemplate,
TemplateArgumentListInfo *ExplicitTemplateArgs,
QualType ArgFunctionType,
FunctionDecl *&Specialization,
sema::TemplateDeductionInfo &Info,
bool IsAddressOfFunction = false);
TemplateDeductionResult
DeduceTemplateArguments(FunctionTemplateDecl *FunctionTemplate,
QualType ToType,
CXXConversionDecl *&Specialization,
sema::TemplateDeductionInfo &Info);
TemplateDeductionResult
DeduceTemplateArguments(FunctionTemplateDecl *FunctionTemplate,
TemplateArgumentListInfo *ExplicitTemplateArgs,
FunctionDecl *&Specialization,
sema::TemplateDeductionInfo &Info,
bool IsAddressOfFunction = false);
/// \brief Substitute Replacement for \p auto in \p TypeWithAuto
QualType SubstAutoType(QualType TypeWithAuto, QualType Replacement);
/// \brief Substitute Replacement for auto in TypeWithAuto
TypeSourceInfo* SubstAutoTypeSourceInfo(TypeSourceInfo *TypeWithAuto,
QualType Replacement);
/// \brief Completely replace the \c auto in \p TypeWithAuto by
/// \p Replacement. This does not retain any \c auto type sugar.
QualType ReplaceAutoType(QualType TypeWithAuto, QualType Replacement);
/// \brief Result type of DeduceAutoType.
enum DeduceAutoResult {
DAR_Succeeded,
DAR_Failed,
DAR_FailedAlreadyDiagnosed
};
DeduceAutoResult
DeduceAutoType(TypeSourceInfo *AutoType, Expr *&Initializer, QualType &Result,
Optional<unsigned> DependentDeductionDepth = None);
DeduceAutoResult
DeduceAutoType(TypeLoc AutoTypeLoc, Expr *&Initializer, QualType &Result,
Optional<unsigned> DependentDeductionDepth = None);
void DiagnoseAutoDeductionFailure(VarDecl *VDecl, Expr *Init);
bool DeduceReturnType(FunctionDecl *FD, SourceLocation Loc,
bool Diagnose = true);
/// \brief Declare implicit deduction guides for a class template if we've
/// not already done so.
void DeclareImplicitDeductionGuides(TemplateDecl *Template,
SourceLocation Loc);
QualType DeduceTemplateSpecializationFromInitializer(
TypeSourceInfo *TInfo, const InitializedEntity &Entity,
const InitializationKind &Kind, MultiExprArg Init);
QualType deduceVarTypeFromInitializer(VarDecl *VDecl, DeclarationName Name,
QualType Type, TypeSourceInfo *TSI,
SourceRange Range, bool DirectInit,
Expr *Init);
TypeLoc getReturnTypeLoc(FunctionDecl *FD) const;
bool DeduceFunctionTypeFromReturnExpr(FunctionDecl *FD,
SourceLocation ReturnLoc,
Expr *&RetExpr, AutoType *AT);
FunctionTemplateDecl *getMoreSpecializedTemplate(FunctionTemplateDecl *FT1,
FunctionTemplateDecl *FT2,
SourceLocation Loc,
TemplatePartialOrderingContext TPOC,
unsigned NumCallArguments1,
unsigned NumCallArguments2);
UnresolvedSetIterator
getMostSpecialized(UnresolvedSetIterator SBegin, UnresolvedSetIterator SEnd,
TemplateSpecCandidateSet &FailedCandidates,
SourceLocation Loc,
const PartialDiagnostic &NoneDiag,
const PartialDiagnostic &AmbigDiag,
const PartialDiagnostic &CandidateDiag,
bool Complain = true, QualType TargetType = QualType());
ClassTemplatePartialSpecializationDecl *
getMoreSpecializedPartialSpecialization(
ClassTemplatePartialSpecializationDecl *PS1,
ClassTemplatePartialSpecializationDecl *PS2,
SourceLocation Loc);
bool isMoreSpecializedThanPrimary(ClassTemplatePartialSpecializationDecl *T,
sema::TemplateDeductionInfo &Info);
VarTemplatePartialSpecializationDecl *getMoreSpecializedPartialSpecialization(
VarTemplatePartialSpecializationDecl *PS1,
VarTemplatePartialSpecializationDecl *PS2, SourceLocation Loc);
bool isMoreSpecializedThanPrimary(VarTemplatePartialSpecializationDecl *T,
sema::TemplateDeductionInfo &Info);
bool isTemplateTemplateParameterAtLeastAsSpecializedAs(
TemplateParameterList *P, TemplateDecl *AArg, SourceLocation Loc);
void MarkUsedTemplateParameters(const TemplateArgumentList &TemplateArgs,
bool OnlyDeduced,
unsigned Depth,
llvm::SmallBitVector &Used);
void MarkDeducedTemplateParameters(
const FunctionTemplateDecl *FunctionTemplate,
llvm::SmallBitVector &Deduced) {
return MarkDeducedTemplateParameters(Context, FunctionTemplate, Deduced);
}
static void MarkDeducedTemplateParameters(ASTContext &Ctx,
const FunctionTemplateDecl *FunctionTemplate,
llvm::SmallBitVector &Deduced);
//===--------------------------------------------------------------------===//
// C++ Template Instantiation
//
MultiLevelTemplateArgumentList
getTemplateInstantiationArgs(NamedDecl *D,
const TemplateArgumentList *Innermost = nullptr,
bool RelativeToPrimary = false,
const FunctionDecl *Pattern = nullptr);
/// A context in which code is being synthesized (where a source location
/// alone is not sufficient to identify the context). This covers template
/// instantiation and various forms of implicitly-generated functions.
struct CodeSynthesisContext {
/// \brief The kind of template instantiation we are performing
enum SynthesisKind {
/// We are instantiating a template declaration. The entity is
/// the declaration we're instantiating (e.g., a CXXRecordDecl).
TemplateInstantiation,
/// We are instantiating a default argument for a template
/// parameter. The Entity is the template parameter whose argument is
/// being instantiated, the Template is the template, and the
/// TemplateArgs/NumTemplateArguments provide the template arguments as
/// specified.
DefaultTemplateArgumentInstantiation,
/// We are instantiating a default argument for a function.
/// The Entity is the ParmVarDecl, and TemplateArgs/NumTemplateArgs
/// provides the template arguments as specified.
DefaultFunctionArgumentInstantiation,
/// We are substituting explicit template arguments provided for
/// a function template. The entity is a FunctionTemplateDecl.
ExplicitTemplateArgumentSubstitution,
/// We are substituting template argument determined as part of
/// template argument deduction for either a class template
/// partial specialization or a function template. The
/// Entity is either a {Class|Var}TemplatePartialSpecializationDecl or
/// a TemplateDecl.
DeducedTemplateArgumentSubstitution,
/// We are substituting prior template arguments into a new
/// template parameter. The template parameter itself is either a
/// NonTypeTemplateParmDecl or a TemplateTemplateParmDecl.
PriorTemplateArgumentSubstitution,
/// We are checking the validity of a default template argument that
/// has been used when naming a template-id.
DefaultTemplateArgumentChecking,
/// We are instantiating the exception specification for a function
/// template which was deferred until it was needed.
ExceptionSpecInstantiation,
/// We are declaring an implicit special member function.
DeclaringSpecialMember,
/// We are defining a synthesized function (such as a defaulted special
/// member).
DefiningSynthesizedFunction,
} Kind;
/// \brief Was the enclosing context a non-instantiation SFINAE context?
bool SavedInNonInstantiationSFINAEContext;
/// \brief The point of instantiation or synthesis within the source code.
SourceLocation PointOfInstantiation;
/// \brief The entity that is being synthesized.
Decl *Entity;
/// \brief The template (or partial specialization) in which we are
/// performing the instantiation, for substitutions of prior template
/// arguments.
NamedDecl *Template;
/// \brief The list of template arguments we are substituting, if they
/// are not part of the entity.
const TemplateArgument *TemplateArgs;
// FIXME: Wrap this union around more members, or perhaps store the
// kind-specific members in the RAII object owning the context.
union {
/// \brief The number of template arguments in TemplateArgs.
unsigned NumTemplateArgs;
/// \brief The special member being declared or defined.
CXXSpecialMember SpecialMember;
};
ArrayRef<TemplateArgument> template_arguments() const {
assert(Kind != DeclaringSpecialMember);
return {TemplateArgs, NumTemplateArgs};
}
/// \brief The template deduction info object associated with the
/// substitution or checking of explicit or deduced template arguments.
sema::TemplateDeductionInfo *DeductionInfo;
/// \brief The source range that covers the construct that cause
/// the instantiation, e.g., the template-id that causes a class
/// template instantiation.
SourceRange InstantiationRange;
CodeSynthesisContext()
: Kind(TemplateInstantiation), Entity(nullptr), Template(nullptr),
TemplateArgs(nullptr), NumTemplateArgs(0), DeductionInfo(nullptr) {}
/// \brief Determines whether this template is an actual instantiation
/// that should be counted toward the maximum instantiation depth.
bool isInstantiationRecord() const;
};
/// \brief List of active code synthesis contexts.
///
/// This vector is treated as a stack. As synthesis of one entity requires
/// synthesis of another, additional contexts are pushed onto the stack.
SmallVector<CodeSynthesisContext, 16> CodeSynthesisContexts;
/// Specializations whose definitions are currently being instantiated.
llvm::DenseSet<std::pair<Decl *, unsigned>> InstantiatingSpecializations;
/// Non-dependent types used in templates that have already been instantiated
/// by some template instantiation.
llvm::DenseSet<QualType> InstantiatedNonDependentTypes;
/// \brief Extra modules inspected when performing a lookup during a template
/// instantiation. Computed lazily.
SmallVector<Module*, 16> CodeSynthesisContextLookupModules;
/// \brief Cache of additional modules that should be used for name lookup
/// within the current template instantiation. Computed lazily; use
/// getLookupModules() to get a complete set.
llvm::DenseSet<Module*> LookupModulesCache;
/// \brief Get the set of additional modules that should be checked during
/// name lookup. A module and its imports become visible when instanting a
/// template defined within it.
llvm::DenseSet<Module*> &getLookupModules();
/// \brief Map from the most recent declaration of a namespace to the most
/// recent visible declaration of that namespace.
llvm::DenseMap<NamedDecl*, NamedDecl*> VisibleNamespaceCache;
/// \brief Whether we are in a SFINAE context that is not associated with
/// template instantiation.
///
/// This is used when setting up a SFINAE trap (\c see SFINAETrap) outside
/// of a template instantiation or template argument deduction.
bool InNonInstantiationSFINAEContext;
/// \brief The number of \p CodeSynthesisContexts that are not template
/// instantiations and, therefore, should not be counted as part of the
/// instantiation depth.
///
/// When the instantiation depth reaches the user-configurable limit
/// \p LangOptions::InstantiationDepth we will abort instantiation.
// FIXME: Should we have a similar limit for other forms of synthesis?
unsigned NonInstantiationEntries;
/// \brief The depth of the context stack at the point when the most recent
/// error or warning was produced.
///
/// This value is used to suppress printing of redundant context stacks
/// when there are multiple errors or warnings in the same instantiation.
// FIXME: Does this belong in Sema? It's tough to implement it anywhere else.
unsigned LastEmittedCodeSynthesisContextDepth = 0;
/// \brief The current index into pack expansion arguments that will be
/// used for substitution of parameter packs.
///
/// The pack expansion index will be -1 to indicate that parameter packs
/// should be instantiated as themselves. Otherwise, the index specifies
/// which argument within the parameter pack will be used for substitution.
int ArgumentPackSubstitutionIndex;
/// \brief RAII object used to change the argument pack substitution index
/// within a \c Sema object.
///
/// See \c ArgumentPackSubstitutionIndex for more information.
class ArgumentPackSubstitutionIndexRAII {
Sema &Self;
int OldSubstitutionIndex;
public:
ArgumentPackSubstitutionIndexRAII(Sema &Self, int NewSubstitutionIndex)
: Self(Self), OldSubstitutionIndex(Self.ArgumentPackSubstitutionIndex) {
Self.ArgumentPackSubstitutionIndex = NewSubstitutionIndex;
}
~ArgumentPackSubstitutionIndexRAII() {
Self.ArgumentPackSubstitutionIndex = OldSubstitutionIndex;
}
};
friend class ArgumentPackSubstitutionRAII;
/// \brief For each declaration that involved template argument deduction, the
/// set of diagnostics that were suppressed during that template argument
/// deduction.
///
/// FIXME: Serialize this structure to the AST file.
typedef llvm::DenseMap<Decl *, SmallVector<PartialDiagnosticAt, 1> >
SuppressedDiagnosticsMap;
SuppressedDiagnosticsMap SuppressedDiagnostics;
/// \brief A stack object to be created when performing template
/// instantiation.
///
/// Construction of an object of type \c InstantiatingTemplate
/// pushes the current instantiation onto the stack of active
/// instantiations. If the size of this stack exceeds the maximum
/// number of recursive template instantiations, construction
/// produces an error and evaluates true.
///
/// Destruction of this object will pop the named instantiation off
/// the stack.
struct InstantiatingTemplate {
/// \brief Note that we are instantiating a class template,
/// function template, variable template, alias template,
/// or a member thereof.
InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation,
Decl *Entity,
SourceRange InstantiationRange = SourceRange());
struct ExceptionSpecification {};
/// \brief Note that we are instantiating an exception specification
/// of a function template.
InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation,
FunctionDecl *Entity, ExceptionSpecification,
SourceRange InstantiationRange = SourceRange());
/// \brief Note that we are instantiating a default argument in a
/// template-id.
InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation,
TemplateParameter Param, TemplateDecl *Template,
ArrayRef<TemplateArgument> TemplateArgs,
SourceRange InstantiationRange = SourceRange());
/// \brief Note that we are substituting either explicitly-specified or
/// deduced template arguments during function template argument deduction.
InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation,
FunctionTemplateDecl *FunctionTemplate,
ArrayRef<TemplateArgument> TemplateArgs,
CodeSynthesisContext::SynthesisKind Kind,
sema::TemplateDeductionInfo &DeductionInfo,
SourceRange InstantiationRange = SourceRange());
/// \brief Note that we are instantiating as part of template
/// argument deduction for a class template declaration.
InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation,
TemplateDecl *Template,
ArrayRef<TemplateArgument> TemplateArgs,
sema::TemplateDeductionInfo &DeductionInfo,
SourceRange InstantiationRange = SourceRange());
/// \brief Note that we are instantiating as part of template
/// argument deduction for a class template partial
/// specialization.
InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation,
ClassTemplatePartialSpecializationDecl *PartialSpec,
ArrayRef<TemplateArgument> TemplateArgs,
sema::TemplateDeductionInfo &DeductionInfo,
SourceRange InstantiationRange = SourceRange());
/// \brief Note that we are instantiating as part of template
/// argument deduction for a variable template partial
/// specialization.
InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation,
VarTemplatePartialSpecializationDecl *PartialSpec,
ArrayRef<TemplateArgument> TemplateArgs,
sema::TemplateDeductionInfo &DeductionInfo,
SourceRange InstantiationRange = SourceRange());
/// \brief Note that we are instantiating a default argument for a function
/// parameter.
InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation,
ParmVarDecl *Param,
ArrayRef<TemplateArgument> TemplateArgs,
SourceRange InstantiationRange = SourceRange());
/// \brief Note that we are substituting prior template arguments into a
/// non-type parameter.
InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation,
NamedDecl *Template,
NonTypeTemplateParmDecl *Param,
ArrayRef<TemplateArgument> TemplateArgs,
SourceRange InstantiationRange);
/// \brief Note that we are substituting prior template arguments into a
/// template template parameter.
InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation,
NamedDecl *Template,
TemplateTemplateParmDecl *Param,
ArrayRef<TemplateArgument> TemplateArgs,
SourceRange InstantiationRange);
/// \brief Note that we are checking the default template argument
/// against the template parameter for a given template-id.
InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation,
TemplateDecl *Template,
NamedDecl *Param,
ArrayRef<TemplateArgument> TemplateArgs,
SourceRange InstantiationRange);
/// \brief Note that we have finished instantiating this template.
void Clear();
~InstantiatingTemplate() { Clear(); }
/// \brief Determines whether we have exceeded the maximum
/// recursive template instantiations.
bool isInvalid() const { return Invalid; }
/// \brief Determine whether we are already instantiating this
/// specialization in some surrounding active instantiation.
bool isAlreadyInstantiating() const { return AlreadyInstantiating; }
private:
Sema &SemaRef;
bool Invalid;
bool AlreadyInstantiating;
bool CheckInstantiationDepth(SourceLocation PointOfInstantiation,
SourceRange InstantiationRange);
InstantiatingTemplate(
Sema &SemaRef, CodeSynthesisContext::SynthesisKind Kind,
SourceLocation PointOfInstantiation, SourceRange InstantiationRange,
Decl *Entity, NamedDecl *Template = nullptr,
ArrayRef<TemplateArgument> TemplateArgs = None,
sema::TemplateDeductionInfo *DeductionInfo = nullptr);
InstantiatingTemplate(const InstantiatingTemplate&) = delete;
InstantiatingTemplate&
operator=(const InstantiatingTemplate&) = delete;
};
void pushCodeSynthesisContext(CodeSynthesisContext Ctx);
void popCodeSynthesisContext();
/// Determine whether we are currently performing template instantiation.
bool inTemplateInstantiation() const {
return CodeSynthesisContexts.size() > NonInstantiationEntries;
}
void PrintContextStack() {
if (!CodeSynthesisContexts.empty() &&
CodeSynthesisContexts.size() != LastEmittedCodeSynthesisContextDepth) {
PrintInstantiationStack();
LastEmittedCodeSynthesisContextDepth = CodeSynthesisContexts.size();
}
if (PragmaAttributeCurrentTargetDecl)
PrintPragmaAttributeInstantiationPoint();
}
void PrintInstantiationStack();
void PrintPragmaAttributeInstantiationPoint();
/// \brief Determines whether we are currently in a context where
/// template argument substitution failures are not considered
/// errors.
///
/// \returns An empty \c Optional if we're not in a SFINAE context.
/// Otherwise, contains a pointer that, if non-NULL, contains the nearest
/// template-deduction context object, which can be used to capture
/// diagnostics that will be suppressed.
Optional<sema::TemplateDeductionInfo *> isSFINAEContext() const;
/// \brief Determines whether we are currently in a context that
/// is not evaluated as per C++ [expr] p5.
bool isUnevaluatedContext() const {
assert(!ExprEvalContexts.empty() &&
"Must be in an expression evaluation context");
return ExprEvalContexts.back().isUnevaluated();
}
/// \brief RAII class used to determine whether SFINAE has
/// trapped any errors that occur during template argument
/// deduction.
class SFINAETrap {
Sema &SemaRef;
unsigned PrevSFINAEErrors;
bool PrevInNonInstantiationSFINAEContext;
bool PrevAccessCheckingSFINAE;
bool PrevLastDiagnosticIgnored;
public:
explicit SFINAETrap(Sema &SemaRef, bool AccessCheckingSFINAE = false)
: SemaRef(SemaRef), PrevSFINAEErrors(SemaRef.NumSFINAEErrors),
PrevInNonInstantiationSFINAEContext(
SemaRef.InNonInstantiationSFINAEContext),
PrevAccessCheckingSFINAE(SemaRef.AccessCheckingSFINAE),
PrevLastDiagnosticIgnored(
SemaRef.getDiagnostics().isLastDiagnosticIgnored())
{
if (!SemaRef.isSFINAEContext())
SemaRef.InNonInstantiationSFINAEContext = true;
SemaRef.AccessCheckingSFINAE = AccessCheckingSFINAE;
}
~SFINAETrap() {
SemaRef.NumSFINAEErrors = PrevSFINAEErrors;
SemaRef.InNonInstantiationSFINAEContext
= PrevInNonInstantiationSFINAEContext;
SemaRef.AccessCheckingSFINAE = PrevAccessCheckingSFINAE;
SemaRef.getDiagnostics().setLastDiagnosticIgnored(
PrevLastDiagnosticIgnored);
}
/// \brief Determine whether any SFINAE errors have been trapped.
bool hasErrorOccurred() const {
return SemaRef.NumSFINAEErrors > PrevSFINAEErrors;
}
};
/// \brief RAII class used to indicate that we are performing provisional
/// semantic analysis to determine the validity of a construct, so
/// typo-correction and diagnostics in the immediate context (not within
/// implicitly-instantiated templates) should be suppressed.
class TentativeAnalysisScope {
Sema &SemaRef;
// FIXME: Using a SFINAETrap for this is a hack.
SFINAETrap Trap;
bool PrevDisableTypoCorrection;
public:
explicit TentativeAnalysisScope(Sema &SemaRef)
: SemaRef(SemaRef), Trap(SemaRef, true),
PrevDisableTypoCorrection(SemaRef.DisableTypoCorrection) {
SemaRef.DisableTypoCorrection = true;
}
~TentativeAnalysisScope() {
SemaRef.DisableTypoCorrection = PrevDisableTypoCorrection;
}
};
/// \brief The current instantiation scope used to store local
/// variables.
LocalInstantiationScope *CurrentInstantiationScope;
/// \brief Tracks whether we are in a context where typo correction is
/// disabled.
bool DisableTypoCorrection;
/// \brief The number of typos corrected by CorrectTypo.
unsigned TyposCorrected;
typedef llvm::SmallSet<SourceLocation, 2> SrcLocSet;
typedef llvm::DenseMap<IdentifierInfo *, SrcLocSet> IdentifierSourceLocations;
/// \brief A cache containing identifiers for which typo correction failed and
/// their locations, so that repeated attempts to correct an identifier in a
/// given location are ignored if typo correction already failed for it.
IdentifierSourceLocations TypoCorrectionFailures;
/// \brief Worker object for performing CFG-based warnings.
sema::AnalysisBasedWarnings AnalysisWarnings;
threadSafety::BeforeSet *ThreadSafetyDeclCache;
/// \brief An entity for which implicit template instantiation is required.
///
/// The source location associated with the declaration is the first place in
/// the source code where the declaration was "used". It is not necessarily
/// the point of instantiation (which will be either before or after the
/// namespace-scope declaration that triggered this implicit instantiation),
/// However, it is the location that diagnostics should generally refer to,
/// because users will need to know what code triggered the instantiation.
typedef std::pair<ValueDecl *, SourceLocation> PendingImplicitInstantiation;
/// \brief The queue of implicit template instantiations that are required
/// but have not yet been performed.
std::deque<PendingImplicitInstantiation> PendingInstantiations;
class GlobalEagerInstantiationScope {
public:
GlobalEagerInstantiationScope(Sema &S, bool Enabled)
: S(S), Enabled(Enabled) {
if (!Enabled) return;
SavedPendingInstantiations.swap(S.PendingInstantiations);
SavedVTableUses.swap(S.VTableUses);
}
void perform() {
if (Enabled) {
S.DefineUsedVTables();
S.PerformPendingInstantiations();
}
}
~GlobalEagerInstantiationScope() {
if (!Enabled) return;
// Restore the set of pending vtables.
assert(S.VTableUses.empty() &&
"VTableUses should be empty before it is discarded.");
S.VTableUses.swap(SavedVTableUses);
// Restore the set of pending implicit instantiations.
assert(S.PendingInstantiations.empty() &&
"PendingInstantiations should be empty before it is discarded.");
S.PendingInstantiations.swap(SavedPendingInstantiations);
}
private:
Sema &S;
SmallVector<VTableUse, 16> SavedVTableUses;
std::deque<PendingImplicitInstantiation> SavedPendingInstantiations;
bool Enabled;
};
/// \brief The queue of implicit template instantiations that are required
/// and must be performed within the current local scope.
///
/// This queue is only used for member functions of local classes in
/// templates, which must be instantiated in the same scope as their
/// enclosing function, so that they can reference function-local
/// types, static variables, enumerators, etc.
std::deque<PendingImplicitInstantiation> PendingLocalImplicitInstantiations;
class LocalEagerInstantiationScope {
public:
LocalEagerInstantiationScope(Sema &S) : S(S) {
SavedPendingLocalImplicitInstantiations.swap(
S.PendingLocalImplicitInstantiations);
}
void perform() { S.PerformPendingInstantiations(/*LocalOnly=*/true); }
~LocalEagerInstantiationScope() {
assert(S.PendingLocalImplicitInstantiations.empty() &&
"there shouldn't be any pending local implicit instantiations");
SavedPendingLocalImplicitInstantiations.swap(
S.PendingLocalImplicitInstantiations);
}
private:
Sema &S;
std::deque<PendingImplicitInstantiation>
SavedPendingLocalImplicitInstantiations;
};
/// A helper class for building up ExtParameterInfos.
class ExtParameterInfoBuilder {
SmallVector<FunctionProtoType::ExtParameterInfo, 16> Infos;
bool HasInteresting = false;
public:
/// Set the ExtParameterInfo for the parameter at the given index,
///
void set(unsigned index, FunctionProtoType::ExtParameterInfo info) {
assert(Infos.size() <= index);
Infos.resize(index);
Infos.push_back(info);
if (!HasInteresting)
HasInteresting = (info != FunctionProtoType::ExtParameterInfo());
}
/// Return a pointer (suitable for setting in an ExtProtoInfo) to the
/// ExtParameterInfo array we've built up.
const FunctionProtoType::ExtParameterInfo *
getPointerOrNull(unsigned numParams) {
if (!HasInteresting) return nullptr;
Infos.resize(numParams);
return Infos.data();
}
};
void PerformPendingInstantiations(bool LocalOnly = false);
TypeSourceInfo *SubstType(TypeSourceInfo *T,
const MultiLevelTemplateArgumentList &TemplateArgs,
SourceLocation Loc, DeclarationName Entity,
bool AllowDeducedTST = false);
QualType SubstType(QualType T,
const MultiLevelTemplateArgumentList &TemplateArgs,
SourceLocation Loc, DeclarationName Entity);
TypeSourceInfo *SubstType(TypeLoc TL,
const MultiLevelTemplateArgumentList &TemplateArgs,
SourceLocation Loc, DeclarationName Entity);
TypeSourceInfo *SubstFunctionDeclType(TypeSourceInfo *T,
const MultiLevelTemplateArgumentList &TemplateArgs,
SourceLocation Loc,
DeclarationName Entity,
CXXRecordDecl *ThisContext,
unsigned ThisTypeQuals);
void SubstExceptionSpec(FunctionDecl *New, const FunctionProtoType *Proto,
const MultiLevelTemplateArgumentList &Args);
bool SubstExceptionSpec(SourceLocation Loc,
FunctionProtoType::ExceptionSpecInfo &ESI,
SmallVectorImpl<QualType> &ExceptionStorage,
const MultiLevelTemplateArgumentList &Args);
ParmVarDecl *SubstParmVarDecl(ParmVarDecl *D,
const MultiLevelTemplateArgumentList &TemplateArgs,
int indexAdjustment,
Optional<unsigned> NumExpansions,
bool ExpectParameterPack);
bool SubstParmTypes(SourceLocation Loc, ArrayRef<ParmVarDecl *> Params,
const FunctionProtoType::ExtParameterInfo *ExtParamInfos,
const MultiLevelTemplateArgumentList &TemplateArgs,
SmallVectorImpl<QualType> &ParamTypes,
SmallVectorImpl<ParmVarDecl *> *OutParams,
ExtParameterInfoBuilder &ParamInfos);
ExprResult SubstExpr(Expr *E,
const MultiLevelTemplateArgumentList &TemplateArgs);
/// \brief Substitute the given template arguments into a list of
/// expressions, expanding pack expansions if required.
///
/// \param Exprs The list of expressions to substitute into.
///
/// \param IsCall Whether this is some form of call, in which case
/// default arguments will be dropped.
///
/// \param TemplateArgs The set of template arguments to substitute.
///
/// \param Outputs Will receive all of the substituted arguments.
///
/// \returns true if an error occurred, false otherwise.
bool SubstExprs(ArrayRef<Expr *> Exprs, bool IsCall,
const MultiLevelTemplateArgumentList &TemplateArgs,
SmallVectorImpl<Expr *> &Outputs);
StmtResult SubstStmt(Stmt *S,
const MultiLevelTemplateArgumentList &TemplateArgs);
Decl *SubstDecl(Decl *D, DeclContext *Owner,
const MultiLevelTemplateArgumentList &TemplateArgs);
ExprResult SubstInitializer(Expr *E,
const MultiLevelTemplateArgumentList &TemplateArgs,
bool CXXDirectInit);
bool
SubstBaseSpecifiers(CXXRecordDecl *Instantiation,
CXXRecordDecl *Pattern,
const MultiLevelTemplateArgumentList &TemplateArgs);
bool
InstantiateClass(SourceLocation PointOfInstantiation,
CXXRecordDecl *Instantiation, CXXRecordDecl *Pattern,
const MultiLevelTemplateArgumentList &TemplateArgs,
TemplateSpecializationKind TSK,
bool Complain = true);
bool InstantiateEnum(SourceLocation PointOfInstantiation,
EnumDecl *Instantiation, EnumDecl *Pattern,
const MultiLevelTemplateArgumentList &TemplateArgs,
TemplateSpecializationKind TSK);
bool InstantiateInClassInitializer(
SourceLocation PointOfInstantiation, FieldDecl *Instantiation,
FieldDecl *Pattern, const MultiLevelTemplateArgumentList &TemplateArgs);
struct LateInstantiatedAttribute {
const Attr *TmplAttr;
LocalInstantiationScope *Scope;
Decl *NewDecl;
LateInstantiatedAttribute(const Attr *A, LocalInstantiationScope *S,
Decl *D)
: TmplAttr(A), Scope(S), NewDecl(D)
{ }
};
typedef SmallVector<LateInstantiatedAttribute, 16> LateInstantiatedAttrVec;
void InstantiateAttrs(const MultiLevelTemplateArgumentList &TemplateArgs,
const Decl *Pattern, Decl *Inst,
LateInstantiatedAttrVec *LateAttrs = nullptr,
LocalInstantiationScope *OuterMostScope = nullptr);
void
InstantiateAttrsForDecl(const MultiLevelTemplateArgumentList &TemplateArgs,
const Decl *Pattern, Decl *Inst,
LateInstantiatedAttrVec *LateAttrs = nullptr,
LocalInstantiationScope *OuterMostScope = nullptr);
bool usesPartialOrExplicitSpecialization(
SourceLocation Loc, ClassTemplateSpecializationDecl *ClassTemplateSpec);
bool
InstantiateClassTemplateSpecialization(SourceLocation PointOfInstantiation,
ClassTemplateSpecializationDecl *ClassTemplateSpec,
TemplateSpecializationKind TSK,
bool Complain = true);
void InstantiateClassMembers(SourceLocation PointOfInstantiation,
CXXRecordDecl *Instantiation,
const MultiLevelTemplateArgumentList &TemplateArgs,
TemplateSpecializationKind TSK);
void InstantiateClassTemplateSpecializationMembers(
SourceLocation PointOfInstantiation,
ClassTemplateSpecializationDecl *ClassTemplateSpec,
TemplateSpecializationKind TSK);
NestedNameSpecifierLoc
SubstNestedNameSpecifierLoc(NestedNameSpecifierLoc NNS,
const MultiLevelTemplateArgumentList &TemplateArgs);
DeclarationNameInfo
SubstDeclarationNameInfo(const DeclarationNameInfo &NameInfo,
const MultiLevelTemplateArgumentList &TemplateArgs);
TemplateName
SubstTemplateName(NestedNameSpecifierLoc QualifierLoc, TemplateName Name,
SourceLocation Loc,
const MultiLevelTemplateArgumentList &TemplateArgs);
bool Subst(const TemplateArgumentLoc *Args, unsigned NumArgs,
TemplateArgumentListInfo &Result,
const MultiLevelTemplateArgumentList &TemplateArgs);
void InstantiateExceptionSpec(SourceLocation PointOfInstantiation,
FunctionDecl *Function);
FunctionDecl *InstantiateFunctionDeclaration(FunctionTemplateDecl *FTD,
const TemplateArgumentList *Args,
SourceLocation Loc);
void InstantiateFunctionDefinition(SourceLocation PointOfInstantiation,
FunctionDecl *Function,
bool Recursive = false,
bool DefinitionRequired = false,
bool AtEndOfTU = false);
VarTemplateSpecializationDecl *BuildVarTemplateInstantiation(
VarTemplateDecl *VarTemplate, VarDecl *FromVar,
const TemplateArgumentList &TemplateArgList,
const TemplateArgumentListInfo &TemplateArgsInfo,
SmallVectorImpl<TemplateArgument> &Converted,
SourceLocation PointOfInstantiation, void *InsertPos,
LateInstantiatedAttrVec *LateAttrs = nullptr,
LocalInstantiationScope *StartingScope = nullptr);
VarTemplateSpecializationDecl *CompleteVarTemplateSpecializationDecl(
VarTemplateSpecializationDecl *VarSpec, VarDecl *PatternDecl,
const MultiLevelTemplateArgumentList &TemplateArgs);
void
BuildVariableInstantiation(VarDecl *NewVar, VarDecl *OldVar,
const MultiLevelTemplateArgumentList &TemplateArgs,
LateInstantiatedAttrVec *LateAttrs,
DeclContext *Owner,
LocalInstantiationScope *StartingScope,
bool InstantiatingVarTemplate = false);
void InstantiateVariableInitializer(
VarDecl *Var, VarDecl *OldVar,
const MultiLevelTemplateArgumentList &TemplateArgs);
void InstantiateVariableDefinition(SourceLocation PointOfInstantiation,
VarDecl *Var, bool Recursive = false,
bool DefinitionRequired = false,
bool AtEndOfTU = false);
void InstantiateMemInitializers(CXXConstructorDecl *New,
const CXXConstructorDecl *Tmpl,
const MultiLevelTemplateArgumentList &TemplateArgs);
NamedDecl *FindInstantiatedDecl(SourceLocation Loc, NamedDecl *D,
const MultiLevelTemplateArgumentList &TemplateArgs,
bool FindingInstantiatedContext = false);
DeclContext *FindInstantiatedContext(SourceLocation Loc, DeclContext *DC,
const MultiLevelTemplateArgumentList &TemplateArgs);
// Objective-C declarations.
enum ObjCContainerKind {
OCK_None = -1,
OCK_Interface = 0,
OCK_Protocol,
OCK_Category,
OCK_ClassExtension,
OCK_Implementation,
OCK_CategoryImplementation
};
ObjCContainerKind getObjCContainerKind() const;
DeclResult actOnObjCTypeParam(Scope *S,
ObjCTypeParamVariance variance,
SourceLocation varianceLoc,
unsigned index,
IdentifierInfo *paramName,
SourceLocation paramLoc,
SourceLocation colonLoc,
ParsedType typeBound);
ObjCTypeParamList *actOnObjCTypeParamList(Scope *S, SourceLocation lAngleLoc,
ArrayRef<Decl *> typeParams,
SourceLocation rAngleLoc);
void popObjCTypeParamList(Scope *S, ObjCTypeParamList *typeParamList);
Decl *ActOnStartClassInterface(Scope *S,
SourceLocation AtInterfaceLoc,
IdentifierInfo *ClassName,
SourceLocation ClassLoc,
ObjCTypeParamList *typeParamList,
IdentifierInfo *SuperName,
SourceLocation SuperLoc,
ArrayRef<ParsedType> SuperTypeArgs,
SourceRange SuperTypeArgsRange,
Decl * const *ProtoRefs,
unsigned NumProtoRefs,
const SourceLocation *ProtoLocs,
SourceLocation EndProtoLoc,
AttributeList *AttrList);
void ActOnSuperClassOfClassInterface(Scope *S,
SourceLocation AtInterfaceLoc,
ObjCInterfaceDecl *IDecl,
IdentifierInfo *ClassName,
SourceLocation ClassLoc,
IdentifierInfo *SuperName,
SourceLocation SuperLoc,
ArrayRef<ParsedType> SuperTypeArgs,
SourceRange SuperTypeArgsRange);
void ActOnTypedefedProtocols(SmallVectorImpl<Decl *> &ProtocolRefs,
SmallVectorImpl<SourceLocation> &ProtocolLocs,
IdentifierInfo *SuperName,
SourceLocation SuperLoc);
Decl *ActOnCompatibilityAlias(
SourceLocation AtCompatibilityAliasLoc,
IdentifierInfo *AliasName, SourceLocation AliasLocation,
IdentifierInfo *ClassName, SourceLocation ClassLocation);
bool CheckForwardProtocolDeclarationForCircularDependency(
IdentifierInfo *PName,
SourceLocation &PLoc, SourceLocation PrevLoc,
const ObjCList<ObjCProtocolDecl> &PList);
Decl *ActOnStartProtocolInterface(
SourceLocation AtProtoInterfaceLoc,
IdentifierInfo *ProtocolName, SourceLocation ProtocolLoc,
Decl * const *ProtoRefNames, unsigned NumProtoRefs,
const SourceLocation *ProtoLocs,
SourceLocation EndProtoLoc,
AttributeList *AttrList);
Decl *ActOnStartCategoryInterface(SourceLocation AtInterfaceLoc,
IdentifierInfo *ClassName,
SourceLocation ClassLoc,
ObjCTypeParamList *typeParamList,
IdentifierInfo *CategoryName,
SourceLocation CategoryLoc,
Decl * const *ProtoRefs,
unsigned NumProtoRefs,
const SourceLocation *ProtoLocs,
SourceLocation EndProtoLoc,
AttributeList *AttrList);
Decl *ActOnStartClassImplementation(
SourceLocation AtClassImplLoc,
IdentifierInfo *ClassName, SourceLocation ClassLoc,
IdentifierInfo *SuperClassname,
SourceLocation SuperClassLoc);
Decl *ActOnStartCategoryImplementation(SourceLocation AtCatImplLoc,
IdentifierInfo *ClassName,
SourceLocation ClassLoc,
IdentifierInfo *CatName,
SourceLocation CatLoc);
DeclGroupPtrTy ActOnFinishObjCImplementation(Decl *ObjCImpDecl,
ArrayRef<Decl *> Decls);
DeclGroupPtrTy ActOnForwardClassDeclaration(SourceLocation Loc,
IdentifierInfo **IdentList,
SourceLocation *IdentLocs,
ArrayRef<ObjCTypeParamList *> TypeParamLists,
unsigned NumElts);
DeclGroupPtrTy ActOnForwardProtocolDeclaration(SourceLocation AtProtoclLoc,
ArrayRef<IdentifierLocPair> IdentList,
AttributeList *attrList);
void FindProtocolDeclaration(bool WarnOnDeclarations, bool ForObjCContainer,
ArrayRef<IdentifierLocPair> ProtocolId,
SmallVectorImpl<Decl *> &Protocols);
void DiagnoseTypeArgsAndProtocols(IdentifierInfo *ProtocolId,
SourceLocation ProtocolLoc,
IdentifierInfo *TypeArgId,
SourceLocation TypeArgLoc,
bool SelectProtocolFirst = false);
/// Given a list of identifiers (and their locations), resolve the
/// names to either Objective-C protocol qualifiers or type
/// arguments, as appropriate.
void actOnObjCTypeArgsOrProtocolQualifiers(
Scope *S,
ParsedType baseType,
SourceLocation lAngleLoc,
ArrayRef<IdentifierInfo *> identifiers,
ArrayRef<SourceLocation> identifierLocs,
SourceLocation rAngleLoc,
SourceLocation &typeArgsLAngleLoc,
SmallVectorImpl<ParsedType> &typeArgs,
SourceLocation &typeArgsRAngleLoc,
SourceLocation &protocolLAngleLoc,
SmallVectorImpl<Decl *> &protocols,
SourceLocation &protocolRAngleLoc,
bool warnOnIncompleteProtocols);
/// Build a an Objective-C protocol-qualified 'id' type where no
/// base type was specified.
TypeResult actOnObjCProtocolQualifierType(
SourceLocation lAngleLoc,
ArrayRef<Decl *> protocols,
ArrayRef<SourceLocation> protocolLocs,
SourceLocation rAngleLoc);
/// Build a specialized and/or protocol-qualified Objective-C type.
TypeResult actOnObjCTypeArgsAndProtocolQualifiers(
Scope *S,
SourceLocation Loc,
ParsedType BaseType,
SourceLocation TypeArgsLAngleLoc,
ArrayRef<ParsedType> TypeArgs,
SourceLocation TypeArgsRAngleLoc,
SourceLocation ProtocolLAngleLoc,
ArrayRef<Decl *> Protocols,
ArrayRef<SourceLocation> ProtocolLocs,
SourceLocation ProtocolRAngleLoc);
/// Build an Objective-C type parameter type.
QualType BuildObjCTypeParamType(const ObjCTypeParamDecl *Decl,
SourceLocation ProtocolLAngleLoc,
ArrayRef<ObjCProtocolDecl *> Protocols,
ArrayRef<SourceLocation> ProtocolLocs,
SourceLocation ProtocolRAngleLoc,
bool FailOnError = false);
/// Build an Objective-C object pointer type.
QualType BuildObjCObjectType(QualType BaseType,
SourceLocation Loc,
SourceLocation TypeArgsLAngleLoc,
ArrayRef<TypeSourceInfo *> TypeArgs,
SourceLocation TypeArgsRAngleLoc,
SourceLocation ProtocolLAngleLoc,
ArrayRef<ObjCProtocolDecl *> Protocols,
ArrayRef<SourceLocation> ProtocolLocs,
SourceLocation ProtocolRAngleLoc,
bool FailOnError = false);
/// Check the application of the Objective-C '__kindof' qualifier to
/// the given type.
bool checkObjCKindOfType(QualType &type, SourceLocation loc);
/// Ensure attributes are consistent with type.
/// \param [in, out] Attributes The attributes to check; they will
/// be modified to be consistent with \p PropertyTy.
void CheckObjCPropertyAttributes(Decl *PropertyPtrTy,
SourceLocation Loc,
unsigned &Attributes,
bool propertyInPrimaryClass);
/// Process the specified property declaration and create decls for the
/// setters and getters as needed.
/// \param property The property declaration being processed
void ProcessPropertyDecl(ObjCPropertyDecl *property);
void DiagnosePropertyMismatch(ObjCPropertyDecl *Property,
ObjCPropertyDecl *SuperProperty,
const IdentifierInfo *Name,
bool OverridingProtocolProperty);
void DiagnoseClassExtensionDupMethods(ObjCCategoryDecl *CAT,
ObjCInterfaceDecl *ID);
Decl *ActOnAtEnd(Scope *S, SourceRange AtEnd,
ArrayRef<Decl *> allMethods = None,
ArrayRef<DeclGroupPtrTy> allTUVars = None);
Decl *ActOnProperty(Scope *S, SourceLocation AtLoc,
SourceLocation LParenLoc,
FieldDeclarator &FD, ObjCDeclSpec &ODS,
Selector GetterSel, Selector SetterSel,
tok::ObjCKeywordKind MethodImplKind,
DeclContext *lexicalDC = nullptr);
Decl *ActOnPropertyImplDecl(Scope *S,
SourceLocation AtLoc,
SourceLocation PropertyLoc,
bool ImplKind,
IdentifierInfo *PropertyId,
IdentifierInfo *PropertyIvar,
SourceLocation PropertyIvarLoc,
ObjCPropertyQueryKind QueryKind);
enum ObjCSpecialMethodKind {
OSMK_None,
OSMK_Alloc,
OSMK_New,
OSMK_Copy,
OSMK_RetainingInit,
OSMK_NonRetainingInit
};
struct ObjCArgInfo {
IdentifierInfo *Name;
SourceLocation NameLoc;
// The Type is null if no type was specified, and the DeclSpec is invalid
// in this case.
ParsedType Type;
ObjCDeclSpec DeclSpec;
/// ArgAttrs - Attribute list for this argument.
AttributeList *ArgAttrs;
};
Decl *ActOnMethodDeclaration(
Scope *S,
SourceLocation BeginLoc, // location of the + or -.
SourceLocation EndLoc, // location of the ; or {.
tok::TokenKind MethodType,
ObjCDeclSpec &ReturnQT, ParsedType ReturnType,
ArrayRef<SourceLocation> SelectorLocs, Selector Sel,
// optional arguments. The number of types/arguments is obtained
// from the Sel.getNumArgs().
ObjCArgInfo *ArgInfo,
DeclaratorChunk::ParamInfo *CParamInfo, unsigned CNumArgs, // c-style args
AttributeList *AttrList, tok::ObjCKeywordKind MethodImplKind,
bool isVariadic, bool MethodDefinition);
ObjCMethodDecl *LookupMethodInQualifiedType(Selector Sel,
const ObjCObjectPointerType *OPT,
bool IsInstance);
ObjCMethodDecl *LookupMethodInObjectType(Selector Sel, QualType Ty,
bool IsInstance);
bool CheckARCMethodDecl(ObjCMethodDecl *method);
bool inferObjCARCLifetime(ValueDecl *decl);
ExprResult
HandleExprPropertyRefExpr(const ObjCObjectPointerType *OPT,
Expr *BaseExpr,
SourceLocation OpLoc,
DeclarationName MemberName,
SourceLocation MemberLoc,
SourceLocation SuperLoc, QualType SuperType,
bool Super);
ExprResult
ActOnClassPropertyRefExpr(IdentifierInfo &receiverName,
IdentifierInfo &propertyName,
SourceLocation receiverNameLoc,
SourceLocation propertyNameLoc);
ObjCMethodDecl *tryCaptureObjCSelf(SourceLocation Loc);
/// \brief Describes the kind of message expression indicated by a message
/// send that starts with an identifier.
enum ObjCMessageKind {
/// \brief The message is sent to 'super'.
ObjCSuperMessage,
/// \brief The message is an instance message.
ObjCInstanceMessage,
/// \brief The message is a class message, and the identifier is a type
/// name.
ObjCClassMessage
};
ObjCMessageKind getObjCMessageKind(Scope *S,
IdentifierInfo *Name,
SourceLocation NameLoc,
bool IsSuper,
bool HasTrailingDot,
ParsedType &ReceiverType);
ExprResult ActOnSuperMessage(Scope *S, SourceLocation SuperLoc,
Selector Sel,
SourceLocation LBracLoc,
ArrayRef<SourceLocation> SelectorLocs,
SourceLocation RBracLoc,
MultiExprArg Args);
ExprResult BuildClassMessage(TypeSourceInfo *ReceiverTypeInfo,
QualType ReceiverType,
SourceLocation SuperLoc,
Selector Sel,
ObjCMethodDecl *Method,
SourceLocation LBracLoc,
ArrayRef<SourceLocation> SelectorLocs,
SourceLocation RBracLoc,
MultiExprArg Args,
bool isImplicit = false);
ExprResult BuildClassMessageImplicit(QualType ReceiverType,
bool isSuperReceiver,
SourceLocation Loc,
Selector Sel,
ObjCMethodDecl *Method,
MultiExprArg Args);
ExprResult ActOnClassMessage(Scope *S,
ParsedType Receiver,
Selector Sel,
SourceLocation LBracLoc,
ArrayRef<SourceLocation> SelectorLocs,
SourceLocation RBracLoc,
MultiExprArg Args);
ExprResult BuildInstanceMessage(Expr *Receiver,
QualType ReceiverType,
SourceLocation SuperLoc,
Selector Sel,
ObjCMethodDecl *Method,
SourceLocation LBracLoc,
ArrayRef<SourceLocation> SelectorLocs,
SourceLocation RBracLoc,
MultiExprArg Args,
bool isImplicit = false);
ExprResult BuildInstanceMessageImplicit(Expr *Receiver,
QualType ReceiverType,
SourceLocation Loc,
Selector Sel,
ObjCMethodDecl *Method,
MultiExprArg Args);
ExprResult ActOnInstanceMessage(Scope *S,
Expr *Receiver,
Selector Sel,
SourceLocation LBracLoc,
ArrayRef<SourceLocation> SelectorLocs,
SourceLocation RBracLoc,
MultiExprArg Args);
ExprResult BuildObjCBridgedCast(SourceLocation LParenLoc,
ObjCBridgeCastKind Kind,
SourceLocation BridgeKeywordLoc,
TypeSourceInfo *TSInfo,
Expr *SubExpr);
ExprResult ActOnObjCBridgedCast(Scope *S,
SourceLocation LParenLoc,
ObjCBridgeCastKind Kind,
SourceLocation BridgeKeywordLoc,
ParsedType Type,
SourceLocation RParenLoc,
Expr *SubExpr);
void CheckTollFreeBridgeCast(QualType castType, Expr *castExpr);
void CheckObjCBridgeRelatedCast(QualType castType, Expr *castExpr);
bool CheckTollFreeBridgeStaticCast(QualType castType, Expr *castExpr,
CastKind &Kind);
bool checkObjCBridgeRelatedComponents(SourceLocation Loc,
QualType DestType, QualType SrcType,
ObjCInterfaceDecl *&RelatedClass,
ObjCMethodDecl *&ClassMethod,
ObjCMethodDecl *&InstanceMethod,
TypedefNameDecl *&TDNDecl,
bool CfToNs, bool Diagnose = true);
bool CheckObjCBridgeRelatedConversions(SourceLocation Loc,
QualType DestType, QualType SrcType,
Expr *&SrcExpr, bool Diagnose = true);
bool ConversionToObjCStringLiteralCheck(QualType DstType, Expr *&SrcExpr,
bool Diagnose = true);
bool checkInitMethod(ObjCMethodDecl *method, QualType receiverTypeIfCall);
/// \brief Check whether the given new method is a valid override of the
/// given overridden method, and set any properties that should be inherited.
void CheckObjCMethodOverride(ObjCMethodDecl *NewMethod,
const ObjCMethodDecl *Overridden);
/// \brief Describes the compatibility of a result type with its method.
enum ResultTypeCompatibilityKind {
RTC_Compatible,
RTC_Incompatible,
RTC_Unknown
};
void CheckObjCMethodOverrides(ObjCMethodDecl *ObjCMethod,
ObjCInterfaceDecl *CurrentClass,
ResultTypeCompatibilityKind RTC);
enum PragmaOptionsAlignKind {
POAK_Native, // #pragma options align=native
POAK_Natural, // #pragma options align=natural
POAK_Packed, // #pragma options align=packed
POAK_Power, // #pragma options align=power
POAK_Mac68k, // #pragma options align=mac68k
POAK_Reset // #pragma options align=reset
};
/// ActOnPragmaClangSection - Called on well formed \#pragma clang section
void ActOnPragmaClangSection(SourceLocation PragmaLoc,
PragmaClangSectionAction Action,
PragmaClangSectionKind SecKind, StringRef SecName);
/// ActOnPragmaOptionsAlign - Called on well formed \#pragma options align.
void ActOnPragmaOptionsAlign(PragmaOptionsAlignKind Kind,
SourceLocation PragmaLoc);
/// ActOnPragmaPack - Called on well formed \#pragma pack(...).
void ActOnPragmaPack(SourceLocation PragmaLoc, PragmaMsStackAction Action,
StringRef SlotLabel, Expr *Alignment);
enum class PragmaPackDiagnoseKind {
NonDefaultStateAtInclude,
ChangedStateAtExit
};
void DiagnoseNonDefaultPragmaPack(PragmaPackDiagnoseKind Kind,
SourceLocation IncludeLoc);
void DiagnoseUnterminatedPragmaPack();
/// ActOnPragmaMSStruct - Called on well formed \#pragma ms_struct [on|off].
void ActOnPragmaMSStruct(PragmaMSStructKind Kind);
/// ActOnPragmaMSComment - Called on well formed
/// \#pragma comment(kind, "arg").
void ActOnPragmaMSComment(SourceLocation CommentLoc, PragmaMSCommentKind Kind,
StringRef Arg);
/// ActOnPragmaMSPointersToMembers - called on well formed \#pragma
/// pointers_to_members(representation method[, general purpose
/// representation]).
void ActOnPragmaMSPointersToMembers(
LangOptions::PragmaMSPointersToMembersKind Kind,
SourceLocation PragmaLoc);
/// \brief Called on well formed \#pragma vtordisp().
void ActOnPragmaMSVtorDisp(PragmaMsStackAction Action,
SourceLocation PragmaLoc,
MSVtorDispAttr::Mode Value);
enum PragmaSectionKind {
PSK_DataSeg,
PSK_BSSSeg,
PSK_ConstSeg,
PSK_CodeSeg,
};
bool UnifySection(StringRef SectionName,
int SectionFlags,
DeclaratorDecl *TheDecl);
bool UnifySection(StringRef SectionName,
int SectionFlags,
SourceLocation PragmaSectionLocation);
/// \brief Called on well formed \#pragma bss_seg/data_seg/const_seg/code_seg.
void ActOnPragmaMSSeg(SourceLocation PragmaLocation,
PragmaMsStackAction Action,
llvm::StringRef StackSlotLabel,
StringLiteral *SegmentName,
llvm::StringRef PragmaName);
/// \brief Called on well formed \#pragma section().
void ActOnPragmaMSSection(SourceLocation PragmaLocation,
int SectionFlags, StringLiteral *SegmentName);
/// \brief Called on well-formed \#pragma init_seg().
void ActOnPragmaMSInitSeg(SourceLocation PragmaLocation,
StringLiteral *SegmentName);
/// \brief Called on #pragma clang __debug dump II
void ActOnPragmaDump(Scope *S, SourceLocation Loc, IdentifierInfo *II);
/// ActOnPragmaDetectMismatch - Call on well-formed \#pragma detect_mismatch
void ActOnPragmaDetectMismatch(SourceLocation Loc, StringRef Name,
StringRef Value);
/// ActOnPragmaUnused - Called on well-formed '\#pragma unused'.
void ActOnPragmaUnused(const Token &Identifier,
Scope *curScope,
SourceLocation PragmaLoc);
/// ActOnPragmaVisibility - Called on well formed \#pragma GCC visibility... .
void ActOnPragmaVisibility(const IdentifierInfo* VisType,
SourceLocation PragmaLoc);
NamedDecl *DeclClonePragmaWeak(NamedDecl *ND, IdentifierInfo *II,
SourceLocation Loc);
void DeclApplyPragmaWeak(Scope *S, NamedDecl *ND, WeakInfo &W);
/// ActOnPragmaWeakID - Called on well formed \#pragma weak ident.
void ActOnPragmaWeakID(IdentifierInfo* WeakName,
SourceLocation PragmaLoc,
SourceLocation WeakNameLoc);
/// ActOnPragmaRedefineExtname - Called on well formed
/// \#pragma redefine_extname oldname newname.
void ActOnPragmaRedefineExtname(IdentifierInfo* WeakName,
IdentifierInfo* AliasName,
SourceLocation PragmaLoc,
SourceLocation WeakNameLoc,
SourceLocation AliasNameLoc);
/// ActOnPragmaWeakAlias - Called on well formed \#pragma weak ident = ident.
void ActOnPragmaWeakAlias(IdentifierInfo* WeakName,
IdentifierInfo* AliasName,
SourceLocation PragmaLoc,
SourceLocation WeakNameLoc,
SourceLocation AliasNameLoc);
/// ActOnPragmaFPContract - Called on well formed
/// \#pragma {STDC,OPENCL} FP_CONTRACT and
/// \#pragma clang fp contract
void ActOnPragmaFPContract(LangOptions::FPContractModeKind FPC);
/// AddAlignmentAttributesForRecord - Adds any needed alignment attributes to
/// a the record decl, to handle '\#pragma pack' and '\#pragma options align'.
void AddAlignmentAttributesForRecord(RecordDecl *RD);
/// AddMsStructLayoutForRecord - Adds ms_struct layout attribute to record.
void AddMsStructLayoutForRecord(RecordDecl *RD);
/// FreePackedContext - Deallocate and null out PackContext.
void FreePackedContext();
/// PushNamespaceVisibilityAttr - Note that we've entered a
/// namespace with a visibility attribute.
void PushNamespaceVisibilityAttr(const VisibilityAttr *Attr,
SourceLocation Loc);
/// AddPushedVisibilityAttribute - If '\#pragma GCC visibility' was used,
/// add an appropriate visibility attribute.
void AddPushedVisibilityAttribute(Decl *RD);
/// PopPragmaVisibility - Pop the top element of the visibility stack; used
/// for '\#pragma GCC visibility' and visibility attributes on namespaces.
void PopPragmaVisibility(bool IsNamespaceEnd, SourceLocation EndLoc);
/// FreeVisContext - Deallocate and null out VisContext.
void FreeVisContext();
/// AddCFAuditedAttribute - Check whether we're currently within
/// '\#pragma clang arc_cf_code_audited' and, if so, consider adding
/// the appropriate attribute.
void AddCFAuditedAttribute(Decl *D);
/// \brief Called on well-formed '\#pragma clang attribute push'.
void ActOnPragmaAttributePush(AttributeList &Attribute,
SourceLocation PragmaLoc,
attr::ParsedSubjectMatchRuleSet Rules);
/// \brief Called on well-formed '\#pragma clang attribute pop'.
void ActOnPragmaAttributePop(SourceLocation PragmaLoc);
/// \brief Adds the attributes that have been specified using the
/// '\#pragma clang attribute push' directives to the given declaration.
void AddPragmaAttributes(Scope *S, Decl *D);
void DiagnoseUnterminatedPragmaAttribute();
/// \brief Called on well formed \#pragma clang optimize.
void ActOnPragmaOptimize(bool On, SourceLocation PragmaLoc);
/// \brief Get the location for the currently active "\#pragma clang optimize
/// off". If this location is invalid, then the state of the pragma is "on".
SourceLocation getOptimizeOffPragmaLocation() const {
return OptimizeOffPragmaLocation;
}
/// \brief Only called on function definitions; if there is a pragma in scope
/// with the effect of a range-based optnone, consider marking the function
/// with attribute optnone.
void AddRangeBasedOptnone(FunctionDecl *FD);
/// \brief Adds the 'optnone' attribute to the function declaration if there
/// are no conflicts; Loc represents the location causing the 'optnone'
/// attribute to be added (usually because of a pragma).
void AddOptnoneAttributeIfNoConflicts(FunctionDecl *FD, SourceLocation Loc);
/// AddAlignedAttr - Adds an aligned attribute to a particular declaration.
void AddAlignedAttr(SourceRange AttrRange, Decl *D, Expr *E,
unsigned SpellingListIndex, bool IsPackExpansion);
void AddAlignedAttr(SourceRange AttrRange, Decl *D, TypeSourceInfo *T,
unsigned SpellingListIndex, bool IsPackExpansion);
/// AddAssumeAlignedAttr - Adds an assume_aligned attribute to a particular
/// declaration.
void AddAssumeAlignedAttr(SourceRange AttrRange, Decl *D, Expr *E, Expr *OE,
unsigned SpellingListIndex);
/// AddAllocAlignAttr - Adds an alloc_align attribute to a particular
/// declaration.
void AddAllocAlignAttr(SourceRange AttrRange, Decl *D, Expr *ParamExpr,
unsigned SpellingListIndex);
/// AddAlignValueAttr - Adds an align_value attribute to a particular
/// declaration.
void AddAlignValueAttr(SourceRange AttrRange, Decl *D, Expr *E,
unsigned SpellingListIndex);
/// AddLaunchBoundsAttr - Adds a launch_bounds attribute to a particular
/// declaration.
void AddLaunchBoundsAttr(SourceRange AttrRange, Decl *D, Expr *MaxThreads,
Expr *MinBlocks, unsigned SpellingListIndex);
/// AddModeAttr - Adds a mode attribute to a particular declaration.
void AddModeAttr(SourceRange AttrRange, Decl *D, IdentifierInfo *Name,
unsigned SpellingListIndex, bool InInstantiation = false);
void AddParameterABIAttr(SourceRange AttrRange, Decl *D,
ParameterABI ABI, unsigned SpellingListIndex);
void AddNSConsumedAttr(SourceRange AttrRange, Decl *D,
unsigned SpellingListIndex, bool isNSConsumed,
bool isTemplateInstantiation);
bool checkNSReturnsRetainedReturnType(SourceLocation loc, QualType type);
//===--------------------------------------------------------------------===//
// C++ Coroutines TS
//
bool ActOnCoroutineBodyStart(Scope *S, SourceLocation KwLoc,
StringRef Keyword);
ExprResult ActOnCoawaitExpr(Scope *S, SourceLocation KwLoc, Expr *E);
ExprResult ActOnCoyieldExpr(Scope *S, SourceLocation KwLoc, Expr *E);
StmtResult ActOnCoreturnStmt(Scope *S, SourceLocation KwLoc, Expr *E);
ExprResult BuildResolvedCoawaitExpr(SourceLocation KwLoc, Expr *E,
bool IsImplicit = false);
ExprResult BuildUnresolvedCoawaitExpr(SourceLocation KwLoc, Expr *E,
UnresolvedLookupExpr* Lookup);
ExprResult BuildCoyieldExpr(SourceLocation KwLoc, Expr *E);
StmtResult BuildCoreturnStmt(SourceLocation KwLoc, Expr *E,
bool IsImplicit = false);
StmtResult BuildCoroutineBodyStmt(CoroutineBodyStmt::CtorArgs);
VarDecl *buildCoroutinePromise(SourceLocation Loc);
void CheckCompletedCoroutineBody(FunctionDecl *FD, Stmt *&Body);
//===--------------------------------------------------------------------===//
// OpenCL extensions.
//
private:
std::string CurrOpenCLExtension;
/// Extensions required by an OpenCL type.
llvm::DenseMap<const Type*, std::set<std::string>> OpenCLTypeExtMap;
/// Extensions required by an OpenCL declaration.
llvm::DenseMap<const Decl*, std::set<std::string>> OpenCLDeclExtMap;
public:
llvm::StringRef getCurrentOpenCLExtension() const {
return CurrOpenCLExtension;
}
void setCurrentOpenCLExtension(llvm::StringRef Ext) {
CurrOpenCLExtension = Ext;
}
/// \brief Set OpenCL extensions for a type which can only be used when these
/// OpenCL extensions are enabled. If \p Exts is empty, do nothing.
/// \param Exts A space separated list of OpenCL extensions.
void setOpenCLExtensionForType(QualType T, llvm::StringRef Exts);
/// \brief Set OpenCL extensions for a declaration which can only be
/// used when these OpenCL extensions are enabled. If \p Exts is empty, do
/// nothing.
/// \param Exts A space separated list of OpenCL extensions.
void setOpenCLExtensionForDecl(Decl *FD, llvm::StringRef Exts);
/// \brief Set current OpenCL extensions for a type which can only be used
/// when these OpenCL extensions are enabled. If current OpenCL extension is
/// empty, do nothing.
void setCurrentOpenCLExtensionForType(QualType T);
/// \brief Set current OpenCL extensions for a declaration which
/// can only be used when these OpenCL extensions are enabled. If current
/// OpenCL extension is empty, do nothing.
void setCurrentOpenCLExtensionForDecl(Decl *FD);
bool isOpenCLDisabledDecl(Decl *FD);
/// \brief Check if type \p T corresponding to declaration specifier \p DS
/// is disabled due to required OpenCL extensions being disabled. If so,
/// emit diagnostics.
/// \return true if type is disabled.
bool checkOpenCLDisabledTypeDeclSpec(const DeclSpec &DS, QualType T);
/// \brief Check if declaration \p D used by expression \p E
/// is disabled due to required OpenCL extensions being disabled. If so,
/// emit diagnostics.
/// \return true if type is disabled.
bool checkOpenCLDisabledDecl(const NamedDecl &D, const Expr &E);
//===--------------------------------------------------------------------===//
// OpenMP directives and clauses.
//
private:
void *VarDataSharingAttributesStack;
/// Set to true inside '#pragma omp declare target' region.
bool IsInOpenMPDeclareTargetContext = false;
/// \brief Initialization of data-sharing attributes stack.
void InitDataSharingAttributesStack();
void DestroyDataSharingAttributesStack();
ExprResult
VerifyPositiveIntegerConstantInClause(Expr *Op, OpenMPClauseKind CKind,
bool StrictlyPositive = true);
/// Returns OpenMP nesting level for current directive.
unsigned getOpenMPNestingLevel() const;
/// Adjusts the function scopes index for the target-based regions.
void adjustOpenMPTargetScopeIndex(unsigned &FunctionScopesIndex,
unsigned Level) const;
/// Push new OpenMP function region for non-capturing function.
void pushOpenMPFunctionRegion();
/// Pop OpenMP function region for non-capturing function.
void popOpenMPFunctionRegion(const sema::FunctionScopeInfo *OldFSI);
/// Checks if a type or a declaration is disabled due to the owning extension
/// being disabled, and emits diagnostic messages if it is disabled.
/// \param D type or declaration to be checked.
/// \param DiagLoc source location for the diagnostic message.
/// \param DiagInfo information to be emitted for the diagnostic message.
/// \param SrcRange source range of the declaration.
/// \param Map maps type or declaration to the extensions.
/// \param Selector selects diagnostic message: 0 for type and 1 for
/// declaration.
/// \return true if the type or declaration is disabled.
template <typename T, typename DiagLocT, typename DiagInfoT, typename MapT>
bool checkOpenCLDisabledTypeOrDecl(T D, DiagLocT DiagLoc, DiagInfoT DiagInfo,
MapT &Map, unsigned Selector = 0,
SourceRange SrcRange = SourceRange());
public:
/// \brief Return true if the provided declaration \a VD should be captured by
/// reference.
/// \param Level Relative level of nested OpenMP construct for that the check
/// is performed.
bool IsOpenMPCapturedByRef(ValueDecl *D, unsigned Level);
/// \brief Check if the specified variable is used in one of the private
/// clauses (private, firstprivate, lastprivate, reduction etc.) in OpenMP
/// constructs.
VarDecl *IsOpenMPCapturedDecl(ValueDecl *D);
ExprResult getOpenMPCapturedExpr(VarDecl *Capture, ExprValueKind VK,
ExprObjectKind OK, SourceLocation Loc);
/// \brief Check if the specified variable is used in 'private' clause.
/// \param Level Relative level of nested OpenMP construct for that the check
/// is performed.
bool isOpenMPPrivateDecl(ValueDecl *D, unsigned Level);
/// Sets OpenMP capture kind (OMPC_private, OMPC_firstprivate, OMPC_map etc.)
/// for \p FD based on DSA for the provided corresponding captured declaration
/// \p D.
void setOpenMPCaptureKind(FieldDecl *FD, ValueDecl *D, unsigned Level);
/// \brief Check if the specified variable is captured by 'target' directive.
/// \param Level Relative level of nested OpenMP construct for that the check
/// is performed.
bool isOpenMPTargetCapturedDecl(ValueDecl *D, unsigned Level);
ExprResult PerformOpenMPImplicitIntegerConversion(SourceLocation OpLoc,
Expr *Op);
/// \brief Called on start of new data sharing attribute block.
void StartOpenMPDSABlock(OpenMPDirectiveKind K,
const DeclarationNameInfo &DirName, Scope *CurScope,
SourceLocation Loc);
/// \brief Start analysis of clauses.
void StartOpenMPClause(OpenMPClauseKind K);
/// \brief End analysis of clauses.
void EndOpenMPClause();
/// \brief Called on end of data sharing attribute block.
void EndOpenMPDSABlock(Stmt *CurDirective);
/// \brief Check if the current region is an OpenMP loop region and if it is,
/// mark loop control variable, used in \p Init for loop initialization, as
/// private by default.
/// \param Init First part of the for loop.
void ActOnOpenMPLoopInitialization(SourceLocation ForLoc, Stmt *Init);
// OpenMP directives and clauses.
/// \brief Called on correct id-expression from the '#pragma omp
/// threadprivate'.
ExprResult ActOnOpenMPIdExpression(Scope *CurScope,
CXXScopeSpec &ScopeSpec,
const DeclarationNameInfo &Id);
/// \brief Called on well-formed '#pragma omp threadprivate'.
DeclGroupPtrTy ActOnOpenMPThreadprivateDirective(
SourceLocation Loc,
ArrayRef<Expr *> VarList);
/// \brief Builds a new OpenMPThreadPrivateDecl and checks its correctness.
OMPThreadPrivateDecl *CheckOMPThreadPrivateDecl(
SourceLocation Loc,
ArrayRef<Expr *> VarList);
/// \brief Check if the specified type is allowed to be used in 'omp declare
/// reduction' construct.
QualType ActOnOpenMPDeclareReductionType(SourceLocation TyLoc,
TypeResult ParsedType);
/// \brief Called on start of '#pragma omp declare reduction'.
DeclGroupPtrTy ActOnOpenMPDeclareReductionDirectiveStart(
Scope *S, DeclContext *DC, DeclarationName Name,
ArrayRef<std::pair<QualType, SourceLocation>> ReductionTypes,
AccessSpecifier AS, Decl *PrevDeclInScope = nullptr);
/// \brief Initialize declare reduction construct initializer.
void ActOnOpenMPDeclareReductionCombinerStart(Scope *S, Decl *D);
/// \brief Finish current declare reduction construct initializer.
void ActOnOpenMPDeclareReductionCombinerEnd(Decl *D, Expr *Combiner);
/// \brief Initialize declare reduction construct initializer.
/// \return omp_priv variable.
VarDecl *ActOnOpenMPDeclareReductionInitializerStart(Scope *S, Decl *D);
/// \brief Finish current declare reduction construct initializer.
void ActOnOpenMPDeclareReductionInitializerEnd(Decl *D, Expr *Initializer,
VarDecl *OmpPrivParm);
/// \brief Called at the end of '#pragma omp declare reduction'.
DeclGroupPtrTy ActOnOpenMPDeclareReductionDirectiveEnd(
Scope *S, DeclGroupPtrTy DeclReductions, bool IsValid);
/// Called on the start of target region i.e. '#pragma omp declare target'.
bool ActOnStartOpenMPDeclareTargetDirective(SourceLocation Loc);
/// Called at the end of target region i.e. '#pragme omp end declare target'.
void ActOnFinishOpenMPDeclareTargetDirective();
/// Called on correct id-expression from the '#pragma omp declare target'.
void ActOnOpenMPDeclareTargetName(Scope *CurScope, CXXScopeSpec &ScopeSpec,
const DeclarationNameInfo &Id,
OMPDeclareTargetDeclAttr::MapTypeTy MT,
NamedDeclSetType &SameDirectiveDecls);
/// Check declaration inside target region.
void checkDeclIsAllowedInOpenMPTarget(Expr *E, Decl *D,
SourceLocation IdLoc = SourceLocation());
/// Return true inside OpenMP declare target region.
bool isInOpenMPDeclareTargetContext() const {
return IsInOpenMPDeclareTargetContext;
}
/// Return true inside OpenMP target region.
bool isInOpenMPTargetExecutionDirective() const;
/// Return true if (un)supported features for the current target should be
/// diagnosed if OpenMP (offloading) is enabled.
bool shouldDiagnoseTargetSupportFromOpenMP() const {
return !getLangOpts().OpenMPIsDevice || isInOpenMPDeclareTargetContext() ||
isInOpenMPTargetExecutionDirective();
}
/// Return the number of captured regions created for an OpenMP directive.
static int getOpenMPCaptureLevels(OpenMPDirectiveKind Kind);
/// \brief Initialization of captured region for OpenMP region.
void ActOnOpenMPRegionStart(OpenMPDirectiveKind DKind, Scope *CurScope);
/// \brief End of OpenMP region.
///
/// \param S Statement associated with the current OpenMP region.
/// \param Clauses List of clauses for the current OpenMP region.
///
/// \returns Statement for finished OpenMP region.
StmtResult ActOnOpenMPRegionEnd(StmtResult S, ArrayRef<OMPClause *> Clauses);
StmtResult ActOnOpenMPExecutableDirective(
OpenMPDirectiveKind Kind, const DeclarationNameInfo &DirName,
OpenMPDirectiveKind CancelRegion, ArrayRef<OMPClause *> Clauses,
Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc);
/// \brief Called on well-formed '\#pragma omp parallel' after parsing
/// of the associated statement.
StmtResult ActOnOpenMPParallelDirective(ArrayRef<OMPClause *> Clauses,
Stmt *AStmt,
SourceLocation StartLoc,
SourceLocation EndLoc);
/// \brief Called on well-formed '\#pragma omp simd' after parsing
/// of the associated statement.
StmtResult ActOnOpenMPSimdDirective(
ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc,
llvm::DenseMap<ValueDecl *, Expr *> &VarsWithImplicitDSA);
/// \brief Called on well-formed '\#pragma omp for' after parsing
/// of the associated statement.
StmtResult ActOnOpenMPForDirective(
ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc,
llvm::DenseMap<ValueDecl *, Expr *> &VarsWithImplicitDSA);
/// \brief Called on well-formed '\#pragma omp for simd' after parsing
/// of the associated statement.
StmtResult ActOnOpenMPForSimdDirective(
ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc,
llvm::DenseMap<ValueDecl *, Expr *> &VarsWithImplicitDSA);
/// \brief Called on well-formed '\#pragma omp sections' after parsing
/// of the associated statement.
StmtResult ActOnOpenMPSectionsDirective(ArrayRef<OMPClause *> Clauses,
Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc);
/// \brief Called on well-formed '\#pragma omp section' after parsing of the
/// associated statement.
StmtResult ActOnOpenMPSectionDirective(Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc);
/// \brief Called on well-formed '\#pragma omp single' after parsing of the
/// associated statement.
StmtResult ActOnOpenMPSingleDirective(ArrayRef<OMPClause *> Clauses,
Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc);
/// \brief Called on well-formed '\#pragma omp master' after parsing of the
/// associated statement.
StmtResult ActOnOpenMPMasterDirective(Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc);
/// \brief Called on well-formed '\#pragma omp critical' after parsing of the
/// associated statement.
StmtResult ActOnOpenMPCriticalDirective(const DeclarationNameInfo &DirName,
ArrayRef<OMPClause *> Clauses,
Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc);
/// \brief Called on well-formed '\#pragma omp parallel for' after parsing
/// of the associated statement.
StmtResult ActOnOpenMPParallelForDirective(
ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc,
llvm::DenseMap<ValueDecl *, Expr *> &VarsWithImplicitDSA);
/// \brief Called on well-formed '\#pragma omp parallel for simd' after
/// parsing of the associated statement.
StmtResult ActOnOpenMPParallelForSimdDirective(
ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc,
llvm::DenseMap<ValueDecl *, Expr *> &VarsWithImplicitDSA);
/// \brief Called on well-formed '\#pragma omp parallel sections' after
/// parsing of the associated statement.
StmtResult ActOnOpenMPParallelSectionsDirective(ArrayRef<OMPClause *> Clauses,
Stmt *AStmt,
SourceLocation StartLoc,
SourceLocation EndLoc);
/// \brief Called on well-formed '\#pragma omp task' after parsing of the
/// associated statement.
StmtResult ActOnOpenMPTaskDirective(ArrayRef<OMPClause *> Clauses,
Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc);
/// \brief Called on well-formed '\#pragma omp taskyield'.
StmtResult ActOnOpenMPTaskyieldDirective(SourceLocation StartLoc,
SourceLocation EndLoc);
/// \brief Called on well-formed '\#pragma omp barrier'.
StmtResult ActOnOpenMPBarrierDirective(SourceLocation StartLoc,
SourceLocation EndLoc);
/// \brief Called on well-formed '\#pragma omp taskwait'.
StmtResult ActOnOpenMPTaskwaitDirective(SourceLocation StartLoc,
SourceLocation EndLoc);
/// \brief Called on well-formed '\#pragma omp taskgroup'.
StmtResult ActOnOpenMPTaskgroupDirective(ArrayRef<OMPClause *> Clauses,
Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc);
/// \brief Called on well-formed '\#pragma omp flush'.
StmtResult ActOnOpenMPFlushDirective(ArrayRef<OMPClause *> Clauses,
SourceLocation StartLoc,
SourceLocation EndLoc);
/// \brief Called on well-formed '\#pragma omp ordered' after parsing of the
/// associated statement.
StmtResult ActOnOpenMPOrderedDirective(ArrayRef<OMPClause *> Clauses,
Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc);
/// \brief Called on well-formed '\#pragma omp atomic' after parsing of the
/// associated statement.
StmtResult ActOnOpenMPAtomicDirective(ArrayRef<OMPClause *> Clauses,
Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc);
/// \brief Called on well-formed '\#pragma omp target' after parsing of the
/// associated statement.
StmtResult ActOnOpenMPTargetDirective(ArrayRef<OMPClause *> Clauses,
Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc);
/// \brief Called on well-formed '\#pragma omp target data' after parsing of
/// the associated statement.
StmtResult ActOnOpenMPTargetDataDirective(ArrayRef<OMPClause *> Clauses,
Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc);
/// \brief Called on well-formed '\#pragma omp target enter data' after
/// parsing of the associated statement.
StmtResult ActOnOpenMPTargetEnterDataDirective(ArrayRef<OMPClause *> Clauses,
SourceLocation StartLoc,
SourceLocation EndLoc,
Stmt *AStmt);
/// \brief Called on well-formed '\#pragma omp target exit data' after
/// parsing of the associated statement.
StmtResult ActOnOpenMPTargetExitDataDirective(ArrayRef<OMPClause *> Clauses,
SourceLocation StartLoc,
SourceLocation EndLoc,
Stmt *AStmt);
/// \brief Called on well-formed '\#pragma omp target parallel' after
/// parsing of the associated statement.
StmtResult ActOnOpenMPTargetParallelDirective(ArrayRef<OMPClause *> Clauses,
Stmt *AStmt,
SourceLocation StartLoc,
SourceLocation EndLoc);
/// \brief Called on well-formed '\#pragma omp target parallel for' after
/// parsing of the associated statement.
StmtResult ActOnOpenMPTargetParallelForDirective(
ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc,
llvm::DenseMap<ValueDecl *, Expr *> &VarsWithImplicitDSA);
/// \brief Called on well-formed '\#pragma omp teams' after parsing of the
/// associated statement.
StmtResult ActOnOpenMPTeamsDirective(ArrayRef<OMPClause *> Clauses,
Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc);
/// \brief Called on well-formed '\#pragma omp cancellation point'.
StmtResult
ActOnOpenMPCancellationPointDirective(SourceLocation StartLoc,
SourceLocation EndLoc,
OpenMPDirectiveKind CancelRegion);
/// \brief Called on well-formed '\#pragma omp cancel'.
StmtResult ActOnOpenMPCancelDirective(ArrayRef<OMPClause *> Clauses,
SourceLocation StartLoc,
SourceLocation EndLoc,
OpenMPDirectiveKind CancelRegion);
/// \brief Called on well-formed '\#pragma omp taskloop' after parsing of the
/// associated statement.
StmtResult ActOnOpenMPTaskLoopDirective(
ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc,
llvm::DenseMap<ValueDecl *, Expr *> &VarsWithImplicitDSA);
/// \brief Called on well-formed '\#pragma omp taskloop simd' after parsing of
/// the associated statement.
StmtResult ActOnOpenMPTaskLoopSimdDirective(
ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc,
llvm::DenseMap<ValueDecl *, Expr *> &VarsWithImplicitDSA);
/// \brief Called on well-formed '\#pragma omp distribute' after parsing
/// of the associated statement.
StmtResult ActOnOpenMPDistributeDirective(
ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc,
llvm::DenseMap<ValueDecl *, Expr *> &VarsWithImplicitDSA);
/// \brief Called on well-formed '\#pragma omp target update'.
StmtResult ActOnOpenMPTargetUpdateDirective(ArrayRef<OMPClause *> Clauses,
SourceLocation StartLoc,
SourceLocation EndLoc,
Stmt *AStmt);
/// \brief Called on well-formed '\#pragma omp distribute parallel for' after
/// parsing of the associated statement.
StmtResult ActOnOpenMPDistributeParallelForDirective(
ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc,
llvm::DenseMap<ValueDecl *, Expr *> &VarsWithImplicitDSA);
/// \brief Called on well-formed '\#pragma omp distribute parallel for simd'
/// after parsing of the associated statement.
StmtResult ActOnOpenMPDistributeParallelForSimdDirective(
ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc,
llvm::DenseMap<ValueDecl *, Expr *> &VarsWithImplicitDSA);
/// \brief Called on well-formed '\#pragma omp distribute simd' after
/// parsing of the associated statement.
StmtResult ActOnOpenMPDistributeSimdDirective(
ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc,
llvm::DenseMap<ValueDecl *, Expr *> &VarsWithImplicitDSA);
/// \brief Called on well-formed '\#pragma omp target parallel for simd' after
/// parsing of the associated statement.
StmtResult ActOnOpenMPTargetParallelForSimdDirective(
ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc,
llvm::DenseMap<ValueDecl *, Expr *> &VarsWithImplicitDSA);
/// \brief Called on well-formed '\#pragma omp target simd' after parsing of
/// the associated statement.
StmtResult ActOnOpenMPTargetSimdDirective(
ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc,
llvm::DenseMap<ValueDecl *, Expr *> &VarsWithImplicitDSA);
/// Called on well-formed '\#pragma omp teams distribute' after parsing of
/// the associated statement.
StmtResult ActOnOpenMPTeamsDistributeDirective(
ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc,
llvm::DenseMap<ValueDecl *, Expr *> &VarsWithImplicitDSA);
/// Called on well-formed '\#pragma omp teams distribute simd' after parsing
/// of the associated statement.
StmtResult ActOnOpenMPTeamsDistributeSimdDirective(
ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc,
llvm::DenseMap<ValueDecl *, Expr *> &VarsWithImplicitDSA);
/// Called on well-formed '\#pragma omp teams distribute parallel for simd'
/// after parsing of the associated statement.
StmtResult ActOnOpenMPTeamsDistributeParallelForSimdDirective(
ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc,
llvm::DenseMap<ValueDecl *, Expr *> &VarsWithImplicitDSA);
/// Called on well-formed '\#pragma omp teams distribute parallel for'
/// after parsing of the associated statement.
StmtResult ActOnOpenMPTeamsDistributeParallelForDirective(
ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc,
llvm::DenseMap<ValueDecl *, Expr *> &VarsWithImplicitDSA);
/// Called on well-formed '\#pragma omp target teams' after parsing of the
/// associated statement.
StmtResult ActOnOpenMPTargetTeamsDirective(ArrayRef<OMPClause *> Clauses,
Stmt *AStmt,
SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed '\#pragma omp target teams distribute' after parsing
/// of the associated statement.
StmtResult ActOnOpenMPTargetTeamsDistributeDirective(
ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc,
llvm::DenseMap<ValueDecl *, Expr *> &VarsWithImplicitDSA);
/// Called on well-formed '\#pragma omp target teams distribute parallel for'
/// after parsing of the associated statement.
StmtResult ActOnOpenMPTargetTeamsDistributeParallelForDirective(
ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc,
llvm::DenseMap<ValueDecl *, Expr *> &VarsWithImplicitDSA);
/// Called on well-formed '\#pragma omp target teams distribute parallel for
/// simd' after parsing of the associated statement.
StmtResult ActOnOpenMPTargetTeamsDistributeParallelForSimdDirective(
ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc,
llvm::DenseMap<ValueDecl *, Expr *> &VarsWithImplicitDSA);
/// Called on well-formed '\#pragma omp target teams distribute simd' after
/// parsing of the associated statement.
StmtResult ActOnOpenMPTargetTeamsDistributeSimdDirective(
ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc,
llvm::DenseMap<ValueDecl *, Expr *> &VarsWithImplicitDSA);
/// Checks correctness of linear modifiers.
bool CheckOpenMPLinearModifier(OpenMPLinearClauseKind LinKind,
SourceLocation LinLoc);
/// Checks that the specified declaration matches requirements for the linear
/// decls.
bool CheckOpenMPLinearDecl(ValueDecl *D, SourceLocation ELoc,
OpenMPLinearClauseKind LinKind, QualType Type);
/// \brief Called on well-formed '\#pragma omp declare simd' after parsing of
/// the associated method/function.
DeclGroupPtrTy ActOnOpenMPDeclareSimdDirective(
DeclGroupPtrTy DG, OMPDeclareSimdDeclAttr::BranchStateTy BS,
Expr *Simdlen, ArrayRef<Expr *> Uniforms, ArrayRef<Expr *> Aligneds,
ArrayRef<Expr *> Alignments, ArrayRef<Expr *> Linears,
ArrayRef<unsigned> LinModifiers, ArrayRef<Expr *> Steps, SourceRange SR);
OMPClause *ActOnOpenMPSingleExprClause(OpenMPClauseKind Kind,
Expr *Expr,
SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
/// \brief Called on well-formed 'if' clause.
OMPClause *ActOnOpenMPIfClause(OpenMPDirectiveKind NameModifier,
Expr *Condition, SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation NameModifierLoc,
SourceLocation ColonLoc,
SourceLocation EndLoc);
/// \brief Called on well-formed 'final' clause.
OMPClause *ActOnOpenMPFinalClause(Expr *Condition, SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
/// \brief Called on well-formed 'num_threads' clause.
OMPClause *ActOnOpenMPNumThreadsClause(Expr *NumThreads,
SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
/// \brief Called on well-formed 'safelen' clause.
OMPClause *ActOnOpenMPSafelenClause(Expr *Length,
SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
/// \brief Called on well-formed 'simdlen' clause.
OMPClause *ActOnOpenMPSimdlenClause(Expr *Length, SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
/// \brief Called on well-formed 'collapse' clause.
OMPClause *ActOnOpenMPCollapseClause(Expr *NumForLoops,
SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
/// \brief Called on well-formed 'ordered' clause.
OMPClause *
ActOnOpenMPOrderedClause(SourceLocation StartLoc, SourceLocation EndLoc,
SourceLocation LParenLoc = SourceLocation(),
Expr *NumForLoops = nullptr);
/// \brief Called on well-formed 'grainsize' clause.
OMPClause *ActOnOpenMPGrainsizeClause(Expr *Size, SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
/// \brief Called on well-formed 'num_tasks' clause.
OMPClause *ActOnOpenMPNumTasksClause(Expr *NumTasks, SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
/// \brief Called on well-formed 'hint' clause.
OMPClause *ActOnOpenMPHintClause(Expr *Hint, SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
OMPClause *ActOnOpenMPSimpleClause(OpenMPClauseKind Kind,
unsigned Argument,
SourceLocation ArgumentLoc,
SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
/// \brief Called on well-formed 'default' clause.
OMPClause *ActOnOpenMPDefaultClause(OpenMPDefaultClauseKind Kind,
SourceLocation KindLoc,
SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
/// \brief Called on well-formed 'proc_bind' clause.
OMPClause *ActOnOpenMPProcBindClause(OpenMPProcBindClauseKind Kind,
SourceLocation KindLoc,
SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
OMPClause *ActOnOpenMPSingleExprWithArgClause(
OpenMPClauseKind Kind, ArrayRef<unsigned> Arguments, Expr *Expr,
SourceLocation StartLoc, SourceLocation LParenLoc,
ArrayRef<SourceLocation> ArgumentsLoc, SourceLocation DelimLoc,
SourceLocation EndLoc);
/// \brief Called on well-formed 'schedule' clause.
OMPClause *ActOnOpenMPScheduleClause(
OpenMPScheduleClauseModifier M1, OpenMPScheduleClauseModifier M2,
OpenMPScheduleClauseKind Kind, Expr *ChunkSize, SourceLocation StartLoc,
SourceLocation LParenLoc, SourceLocation M1Loc, SourceLocation M2Loc,
SourceLocation KindLoc, SourceLocation CommaLoc, SourceLocation EndLoc);
OMPClause *ActOnOpenMPClause(OpenMPClauseKind Kind, SourceLocation StartLoc,
SourceLocation EndLoc);
/// \brief Called on well-formed 'nowait' clause.
OMPClause *ActOnOpenMPNowaitClause(SourceLocation StartLoc,
SourceLocation EndLoc);
/// \brief Called on well-formed 'untied' clause.
OMPClause *ActOnOpenMPUntiedClause(SourceLocation StartLoc,
SourceLocation EndLoc);
/// \brief Called on well-formed 'mergeable' clause.
OMPClause *ActOnOpenMPMergeableClause(SourceLocation StartLoc,
SourceLocation EndLoc);
/// \brief Called on well-formed 'read' clause.
OMPClause *ActOnOpenMPReadClause(SourceLocation StartLoc,
SourceLocation EndLoc);
/// \brief Called on well-formed 'write' clause.
OMPClause *ActOnOpenMPWriteClause(SourceLocation StartLoc,
SourceLocation EndLoc);
/// \brief Called on well-formed 'update' clause.
OMPClause *ActOnOpenMPUpdateClause(SourceLocation StartLoc,
SourceLocation EndLoc);
/// \brief Called on well-formed 'capture' clause.
OMPClause *ActOnOpenMPCaptureClause(SourceLocation StartLoc,
SourceLocation EndLoc);
/// \brief Called on well-formed 'seq_cst' clause.
OMPClause *ActOnOpenMPSeqCstClause(SourceLocation StartLoc,
SourceLocation EndLoc);
/// \brief Called on well-formed 'threads' clause.
OMPClause *ActOnOpenMPThreadsClause(SourceLocation StartLoc,
SourceLocation EndLoc);
/// \brief Called on well-formed 'simd' clause.
OMPClause *ActOnOpenMPSIMDClause(SourceLocation StartLoc,
SourceLocation EndLoc);
/// \brief Called on well-formed 'nogroup' clause.
OMPClause *ActOnOpenMPNogroupClause(SourceLocation StartLoc,
SourceLocation EndLoc);
OMPClause *ActOnOpenMPVarListClause(
OpenMPClauseKind Kind, ArrayRef<Expr *> Vars, Expr *TailExpr,
SourceLocation StartLoc, SourceLocation LParenLoc,
SourceLocation ColonLoc, SourceLocation EndLoc,
CXXScopeSpec &ReductionIdScopeSpec,
const DeclarationNameInfo &ReductionId, OpenMPDependClauseKind DepKind,
OpenMPLinearClauseKind LinKind, OpenMPMapClauseKind MapTypeModifier,
OpenMPMapClauseKind MapType, bool IsMapTypeImplicit,
SourceLocation DepLinMapLoc);
/// \brief Called on well-formed 'private' clause.
OMPClause *ActOnOpenMPPrivateClause(ArrayRef<Expr *> VarList,
SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
/// \brief Called on well-formed 'firstprivate' clause.
OMPClause *ActOnOpenMPFirstprivateClause(ArrayRef<Expr *> VarList,
SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
/// \brief Called on well-formed 'lastprivate' clause.
OMPClause *ActOnOpenMPLastprivateClause(ArrayRef<Expr *> VarList,
SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
/// \brief Called on well-formed 'shared' clause.
OMPClause *ActOnOpenMPSharedClause(ArrayRef<Expr *> VarList,
SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
/// \brief Called on well-formed 'reduction' clause.
OMPClause *ActOnOpenMPReductionClause(
ArrayRef<Expr *> VarList, SourceLocation StartLoc,
SourceLocation LParenLoc, SourceLocation ColonLoc, SourceLocation EndLoc,
CXXScopeSpec &ReductionIdScopeSpec,
const DeclarationNameInfo &ReductionId,
ArrayRef<Expr *> UnresolvedReductions = llvm::None);
/// Called on well-formed 'task_reduction' clause.
OMPClause *ActOnOpenMPTaskReductionClause(
ArrayRef<Expr *> VarList, SourceLocation StartLoc,
SourceLocation LParenLoc, SourceLocation ColonLoc, SourceLocation EndLoc,
CXXScopeSpec &ReductionIdScopeSpec,
const DeclarationNameInfo &ReductionId,
ArrayRef<Expr *> UnresolvedReductions = llvm::None);
/// Called on well-formed 'in_reduction' clause.
OMPClause *ActOnOpenMPInReductionClause(
ArrayRef<Expr *> VarList, SourceLocation StartLoc,
SourceLocation LParenLoc, SourceLocation ColonLoc, SourceLocation EndLoc,
CXXScopeSpec &ReductionIdScopeSpec,
const DeclarationNameInfo &ReductionId,
ArrayRef<Expr *> UnresolvedReductions = llvm::None);
/// \brief Called on well-formed 'linear' clause.
OMPClause *
ActOnOpenMPLinearClause(ArrayRef<Expr *> VarList, Expr *Step,
SourceLocation StartLoc, SourceLocation LParenLoc,
OpenMPLinearClauseKind LinKind, SourceLocation LinLoc,
SourceLocation ColonLoc, SourceLocation EndLoc);
/// \brief Called on well-formed 'aligned' clause.
OMPClause *ActOnOpenMPAlignedClause(ArrayRef<Expr *> VarList,
Expr *Alignment,
SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation ColonLoc,
SourceLocation EndLoc);
/// \brief Called on well-formed 'copyin' clause.
OMPClause *ActOnOpenMPCopyinClause(ArrayRef<Expr *> VarList,
SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
/// \brief Called on well-formed 'copyprivate' clause.
OMPClause *ActOnOpenMPCopyprivateClause(ArrayRef<Expr *> VarList,
SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
/// \brief Called on well-formed 'flush' pseudo clause.
OMPClause *ActOnOpenMPFlushClause(ArrayRef<Expr *> VarList,
SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
/// \brief Called on well-formed 'depend' clause.
OMPClause *
ActOnOpenMPDependClause(OpenMPDependClauseKind DepKind, SourceLocation DepLoc,
SourceLocation ColonLoc, ArrayRef<Expr *> VarList,
SourceLocation StartLoc, SourceLocation LParenLoc,
SourceLocation EndLoc);
/// \brief Called on well-formed 'device' clause.
OMPClause *ActOnOpenMPDeviceClause(Expr *Device, SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
/// \brief Called on well-formed 'map' clause.
OMPClause *
ActOnOpenMPMapClause(OpenMPMapClauseKind MapTypeModifier,
OpenMPMapClauseKind MapType, bool IsMapTypeImplicit,
SourceLocation MapLoc, SourceLocation ColonLoc,
ArrayRef<Expr *> VarList, SourceLocation StartLoc,
SourceLocation LParenLoc, SourceLocation EndLoc);
/// \brief Called on well-formed 'num_teams' clause.
OMPClause *ActOnOpenMPNumTeamsClause(Expr *NumTeams, SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
/// \brief Called on well-formed 'thread_limit' clause.
OMPClause *ActOnOpenMPThreadLimitClause(Expr *ThreadLimit,
SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
/// \brief Called on well-formed 'priority' clause.
OMPClause *ActOnOpenMPPriorityClause(Expr *Priority, SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
/// \brief Called on well-formed 'dist_schedule' clause.
OMPClause *ActOnOpenMPDistScheduleClause(
OpenMPDistScheduleClauseKind Kind, Expr *ChunkSize,
SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation KindLoc,
SourceLocation CommaLoc, SourceLocation EndLoc);
/// \brief Called on well-formed 'defaultmap' clause.
OMPClause *ActOnOpenMPDefaultmapClause(
OpenMPDefaultmapClauseModifier M, OpenMPDefaultmapClauseKind Kind,
SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation MLoc,
SourceLocation KindLoc, SourceLocation EndLoc);
/// \brief Called on well-formed 'to' clause.
OMPClause *ActOnOpenMPToClause(ArrayRef<Expr *> VarList,
SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
/// \brief Called on well-formed 'from' clause.
OMPClause *ActOnOpenMPFromClause(ArrayRef<Expr *> VarList,
SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
/// Called on well-formed 'use_device_ptr' clause.
OMPClause *ActOnOpenMPUseDevicePtrClause(ArrayRef<Expr *> VarList,
SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
/// Called on well-formed 'is_device_ptr' clause.
OMPClause *ActOnOpenMPIsDevicePtrClause(ArrayRef<Expr *> VarList,
SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
/// \brief The kind of conversion being performed.
enum CheckedConversionKind {
/// \brief An implicit conversion.
CCK_ImplicitConversion,
/// \brief A C-style cast.
CCK_CStyleCast,
/// \brief A functional-style cast.
CCK_FunctionalCast,
/// \brief A cast other than a C-style cast.
CCK_OtherCast
};
/// ImpCastExprToType - If Expr is not of type 'Type', insert an implicit
/// cast. If there is already an implicit cast, merge into the existing one.
/// If isLvalue, the result of the cast is an lvalue.
ExprResult ImpCastExprToType(Expr *E, QualType Type, CastKind CK,
ExprValueKind VK = VK_RValue,
const CXXCastPath *BasePath = nullptr,
CheckedConversionKind CCK
= CCK_ImplicitConversion);
/// ScalarTypeToBooleanCastKind - Returns the cast kind corresponding
/// to the conversion from scalar type ScalarTy to the Boolean type.
static CastKind ScalarTypeToBooleanCastKind(QualType ScalarTy);
/// IgnoredValueConversions - Given that an expression's result is
/// syntactically ignored, perform any conversions that are
/// required.
ExprResult IgnoredValueConversions(Expr *E);
// UsualUnaryConversions - promotes integers (C99 6.3.1.1p2) and converts
// functions and arrays to their respective pointers (C99 6.3.2.1).
ExprResult UsualUnaryConversions(Expr *E);
/// CallExprUnaryConversions - a special case of an unary conversion
/// performed on a function designator of a call expression.
ExprResult CallExprUnaryConversions(Expr *E);
// DefaultFunctionArrayConversion - converts functions and arrays
// to their respective pointers (C99 6.3.2.1).
ExprResult DefaultFunctionArrayConversion(Expr *E, bool Diagnose = true);
// DefaultFunctionArrayLvalueConversion - converts functions and
// arrays to their respective pointers and performs the
// lvalue-to-rvalue conversion.
ExprResult DefaultFunctionArrayLvalueConversion(Expr *E,
bool Diagnose = true);
// DefaultLvalueConversion - performs lvalue-to-rvalue conversion on
// the operand. This is DefaultFunctionArrayLvalueConversion,
// except that it assumes the operand isn't of function or array
// type.
ExprResult DefaultLvalueConversion(Expr *E);
// DefaultArgumentPromotion (C99 6.5.2.2p6). Used for function calls that
// do not have a prototype. Integer promotions are performed on each
// argument, and arguments that have type float are promoted to double.
ExprResult DefaultArgumentPromotion(Expr *E);
/// If \p E is a prvalue denoting an unmaterialized temporary, materialize
/// it as an xvalue. In C++98, the result will still be a prvalue, because
/// we don't have xvalues there.
ExprResult TemporaryMaterializationConversion(Expr *E);
// Used for emitting the right warning by DefaultVariadicArgumentPromotion
enum VariadicCallType {
VariadicFunction,
VariadicBlock,
VariadicMethod,
VariadicConstructor,
VariadicDoesNotApply
};
VariadicCallType getVariadicCallType(FunctionDecl *FDecl,
const FunctionProtoType *Proto,
Expr *Fn);
// Used for determining in which context a type is allowed to be passed to a
// vararg function.
enum VarArgKind {
VAK_Valid,
VAK_ValidInCXX11,
VAK_Undefined,
VAK_MSVCUndefined,
VAK_Invalid
};
// Determines which VarArgKind fits an expression.
VarArgKind isValidVarArgType(const QualType &Ty);
/// Check to see if the given expression is a valid argument to a variadic
/// function, issuing a diagnostic if not.
void checkVariadicArgument(const Expr *E, VariadicCallType CT);
/// Check to see if a given expression could have '.c_str()' called on it.
bool hasCStrMethod(const Expr *E);
/// GatherArgumentsForCall - Collector argument expressions for various
/// form of call prototypes.
bool GatherArgumentsForCall(SourceLocation CallLoc, FunctionDecl *FDecl,
const FunctionProtoType *Proto,
unsigned FirstParam, ArrayRef<Expr *> Args,
SmallVectorImpl<Expr *> &AllArgs,
VariadicCallType CallType = VariadicDoesNotApply,
bool AllowExplicit = false,
bool IsListInitialization = false);
// DefaultVariadicArgumentPromotion - Like DefaultArgumentPromotion, but
// will create a runtime trap if the resulting type is not a POD type.
ExprResult DefaultVariadicArgumentPromotion(Expr *E, VariadicCallType CT,
FunctionDecl *FDecl);
// UsualArithmeticConversions - performs the UsualUnaryConversions on it's
// operands and then handles various conversions that are common to binary
// operators (C99 6.3.1.8). If both operands aren't arithmetic, this
// routine returns the first non-arithmetic type found. The client is
// responsible for emitting appropriate error diagnostics.
QualType UsualArithmeticConversions(ExprResult &LHS, ExprResult &RHS,
bool IsCompAssign = false);
/// AssignConvertType - All of the 'assignment' semantic checks return this
/// enum to indicate whether the assignment was allowed. These checks are
/// done for simple assignments, as well as initialization, return from
/// function, argument passing, etc. The query is phrased in terms of a
/// source and destination type.
enum AssignConvertType {
/// Compatible - the types are compatible according to the standard.
Compatible,
/// PointerToInt - The assignment converts a pointer to an int, which we
/// accept as an extension.
PointerToInt,
/// IntToPointer - The assignment converts an int to a pointer, which we
/// accept as an extension.
IntToPointer,
/// FunctionVoidPointer - The assignment is between a function pointer and
/// void*, which the standard doesn't allow, but we accept as an extension.
FunctionVoidPointer,
/// IncompatiblePointer - The assignment is between two pointers types that
/// are not compatible, but we accept them as an extension.
IncompatiblePointer,
/// IncompatiblePointerSign - The assignment is between two pointers types
/// which point to integers which have a different sign, but are otherwise
/// identical. This is a subset of the above, but broken out because it's by
/// far the most common case of incompatible pointers.
IncompatiblePointerSign,
/// CompatiblePointerDiscardsQualifiers - The assignment discards
/// c/v/r qualifiers, which we accept as an extension.
CompatiblePointerDiscardsQualifiers,
/// IncompatiblePointerDiscardsQualifiers - The assignment
/// discards qualifiers that we don't permit to be discarded,
/// like address spaces.
IncompatiblePointerDiscardsQualifiers,
/// IncompatibleNestedPointerQualifiers - The assignment is between two
/// nested pointer types, and the qualifiers other than the first two
/// levels differ e.g. char ** -> const char **, but we accept them as an
/// extension.
IncompatibleNestedPointerQualifiers,
/// IncompatibleVectors - The assignment is between two vector types that
/// have the same size, which we accept as an extension.
IncompatibleVectors,
/// IntToBlockPointer - The assignment converts an int to a block
/// pointer. We disallow this.
IntToBlockPointer,
/// IncompatibleBlockPointer - The assignment is between two block
/// pointers types that are not compatible.
IncompatibleBlockPointer,
/// IncompatibleObjCQualifiedId - The assignment is between a qualified
/// id type and something else (that is incompatible with it). For example,
/// "id <XXX>" = "Foo *", where "Foo *" doesn't implement the XXX protocol.
IncompatibleObjCQualifiedId,
/// IncompatibleObjCWeakRef - Assigning a weak-unavailable object to an
/// object with __weak qualifier.
IncompatibleObjCWeakRef,
/// Incompatible - We reject this conversion outright, it is invalid to
/// represent it in the AST.
Incompatible
};
/// DiagnoseAssignmentResult - Emit a diagnostic, if required, for the
/// assignment conversion type specified by ConvTy. This returns true if the
/// conversion was invalid or false if the conversion was accepted.
bool DiagnoseAssignmentResult(AssignConvertType ConvTy,
SourceLocation Loc,
QualType DstType, QualType SrcType,
Expr *SrcExpr, AssignmentAction Action,
bool *Complained = nullptr);
/// IsValueInFlagEnum - Determine if a value is allowed as part of a flag
/// enum. If AllowMask is true, then we also allow the complement of a valid
/// value, to be used as a mask.
bool IsValueInFlagEnum(const EnumDecl *ED, const llvm::APInt &Val,
bool AllowMask) const;
/// DiagnoseAssignmentEnum - Warn if assignment to enum is a constant
/// integer not in the range of enum values.
void DiagnoseAssignmentEnum(QualType DstType, QualType SrcType,
Expr *SrcExpr);
/// CheckAssignmentConstraints - Perform type checking for assignment,
/// argument passing, variable initialization, and function return values.
/// C99 6.5.16.
AssignConvertType CheckAssignmentConstraints(SourceLocation Loc,
QualType LHSType,
QualType RHSType);
/// Check assignment constraints and optionally prepare for a conversion of
/// the RHS to the LHS type. The conversion is prepared for if ConvertRHS
/// is true.
AssignConvertType CheckAssignmentConstraints(QualType LHSType,
ExprResult &RHS,
CastKind &Kind,
bool ConvertRHS = true);
/// Check assignment constraints for an assignment of RHS to LHSType.
///
/// \param LHSType The destination type for the assignment.
/// \param RHS The source expression for the assignment.
/// \param Diagnose If \c true, diagnostics may be produced when checking
/// for assignability. If a diagnostic is produced, \p RHS will be
/// set to ExprError(). Note that this function may still return
/// without producing a diagnostic, even for an invalid assignment.
/// \param DiagnoseCFAudited If \c true, the target is a function parameter
/// in an audited Core Foundation API and does not need to be checked
/// for ARC retain issues.
/// \param ConvertRHS If \c true, \p RHS will be updated to model the
/// conversions necessary to perform the assignment. If \c false,
/// \p Diagnose must also be \c false.
AssignConvertType CheckSingleAssignmentConstraints(
QualType LHSType, ExprResult &RHS, bool Diagnose = true,
bool DiagnoseCFAudited = false, bool ConvertRHS = true);
// \brief If the lhs type is a transparent union, check whether we
// can initialize the transparent union with the given expression.
AssignConvertType CheckTransparentUnionArgumentConstraints(QualType ArgType,
ExprResult &RHS);
bool IsStringLiteralToNonConstPointerConversion(Expr *From, QualType ToType);
bool CheckExceptionSpecCompatibility(Expr *From, QualType ToType);
ExprResult PerformImplicitConversion(Expr *From, QualType ToType,
AssignmentAction Action,
bool AllowExplicit = false);
ExprResult PerformImplicitConversion(Expr *From, QualType ToType,
AssignmentAction Action,
bool AllowExplicit,
ImplicitConversionSequence& ICS);
ExprResult PerformImplicitConversion(Expr *From, QualType ToType,
const ImplicitConversionSequence& ICS,
AssignmentAction Action,
CheckedConversionKind CCK
= CCK_ImplicitConversion);
ExprResult PerformImplicitConversion(Expr *From, QualType ToType,
const StandardConversionSequence& SCS,
AssignmentAction Action,
CheckedConversionKind CCK);
/// the following "Check" methods will return a valid/converted QualType
/// or a null QualType (indicating an error diagnostic was issued).
/// type checking binary operators (subroutines of CreateBuiltinBinOp).
QualType InvalidOperands(SourceLocation Loc, ExprResult &LHS,
ExprResult &RHS);
QualType InvalidLogicalVectorOperands(SourceLocation Loc, ExprResult &LHS,
ExprResult &RHS);
QualType CheckPointerToMemberOperands( // C++ 5.5
ExprResult &LHS, ExprResult &RHS, ExprValueKind &VK,
SourceLocation OpLoc, bool isIndirect);
QualType CheckMultiplyDivideOperands( // C99 6.5.5
ExprResult &LHS, ExprResult &RHS, SourceLocation Loc, bool IsCompAssign,
bool IsDivide);
QualType CheckRemainderOperands( // C99 6.5.5
ExprResult &LHS, ExprResult &RHS, SourceLocation Loc,
bool IsCompAssign = false);
QualType CheckAdditionOperands( // C99 6.5.6
ExprResult &LHS, ExprResult &RHS, SourceLocation Loc,
BinaryOperatorKind Opc, QualType* CompLHSTy = nullptr);
QualType CheckSubtractionOperands( // C99 6.5.6
ExprResult &LHS, ExprResult &RHS, SourceLocation Loc,
QualType* CompLHSTy = nullptr);
QualType CheckShiftOperands( // C99 6.5.7
ExprResult &LHS, ExprResult &RHS, SourceLocation Loc,
BinaryOperatorKind Opc, bool IsCompAssign = false);
QualType CheckCompareOperands( // C99 6.5.8/9
ExprResult &LHS, ExprResult &RHS, SourceLocation Loc,
BinaryOperatorKind Opc, bool isRelational);
QualType CheckBitwiseOperands( // C99 6.5.[10...12]
ExprResult &LHS, ExprResult &RHS, SourceLocation Loc,
BinaryOperatorKind Opc);
QualType CheckLogicalOperands( // C99 6.5.[13,14]
ExprResult &LHS, ExprResult &RHS, SourceLocation Loc,
BinaryOperatorKind Opc);
// CheckAssignmentOperands is used for both simple and compound assignment.
// For simple assignment, pass both expressions and a null converted type.
// For compound assignment, pass both expressions and the converted type.
QualType CheckAssignmentOperands( // C99 6.5.16.[1,2]
Expr *LHSExpr, ExprResult &RHS, SourceLocation Loc, QualType CompoundType);
ExprResult checkPseudoObjectIncDec(Scope *S, SourceLocation OpLoc,
UnaryOperatorKind Opcode, Expr *Op);
ExprResult checkPseudoObjectAssignment(Scope *S, SourceLocation OpLoc,
BinaryOperatorKind Opcode,
Expr *LHS, Expr *RHS);
ExprResult checkPseudoObjectRValue(Expr *E);
Expr *recreateSyntacticForm(PseudoObjectExpr *E);
QualType CheckConditionalOperands( // C99 6.5.15
ExprResult &Cond, ExprResult &LHS, ExprResult &RHS,
ExprValueKind &VK, ExprObjectKind &OK, SourceLocation QuestionLoc);
QualType CXXCheckConditionalOperands( // C++ 5.16
ExprResult &cond, ExprResult &lhs, ExprResult &rhs,
ExprValueKind &VK, ExprObjectKind &OK, SourceLocation questionLoc);
QualType FindCompositePointerType(SourceLocation Loc, Expr *&E1, Expr *&E2,
bool ConvertArgs = true);
QualType FindCompositePointerType(SourceLocation Loc,
ExprResult &E1, ExprResult &E2,
bool ConvertArgs = true) {
Expr *E1Tmp = E1.get(), *E2Tmp = E2.get();
QualType Composite =
FindCompositePointerType(Loc, E1Tmp, E2Tmp, ConvertArgs);
E1 = E1Tmp;
E2 = E2Tmp;
return Composite;
}
QualType FindCompositeObjCPointerType(ExprResult &LHS, ExprResult &RHS,
SourceLocation QuestionLoc);
bool DiagnoseConditionalForNull(Expr *LHSExpr, Expr *RHSExpr,
SourceLocation QuestionLoc);
void DiagnoseAlwaysNonNullPointer(Expr *E,
Expr::NullPointerConstantKind NullType,
bool IsEqual, SourceRange Range);
/// type checking for vector binary operators.
QualType CheckVectorOperands(ExprResult &LHS, ExprResult &RHS,
SourceLocation Loc, bool IsCompAssign,
bool AllowBothBool, bool AllowBoolConversion);
QualType GetSignedVectorType(QualType V);
QualType CheckVectorCompareOperands(ExprResult &LHS, ExprResult &RHS,
SourceLocation Loc, bool isRelational);
QualType CheckVectorLogicalOperands(ExprResult &LHS, ExprResult &RHS,
SourceLocation Loc);
bool areLaxCompatibleVectorTypes(QualType srcType, QualType destType);
bool isLaxVectorConversion(QualType srcType, QualType destType);
/// type checking declaration initializers (C99 6.7.8)
bool CheckForConstantInitializer(Expr *e, QualType t);
// type checking C++ declaration initializers (C++ [dcl.init]).
/// ReferenceCompareResult - Expresses the result of comparing two
/// types (cv1 T1 and cv2 T2) to determine their compatibility for the
/// purposes of initialization by reference (C++ [dcl.init.ref]p4).
enum ReferenceCompareResult {
/// Ref_Incompatible - The two types are incompatible, so direct
/// reference binding is not possible.
Ref_Incompatible = 0,
/// Ref_Related - The two types are reference-related, which means
/// that their unqualified forms (T1 and T2) are either the same
/// or T1 is a base class of T2.
Ref_Related,
/// Ref_Compatible - The two types are reference-compatible.
Ref_Compatible
};
ReferenceCompareResult CompareReferenceRelationship(SourceLocation Loc,
QualType T1, QualType T2,
bool &DerivedToBase,
bool &ObjCConversion,
bool &ObjCLifetimeConversion);
ExprResult checkUnknownAnyCast(SourceRange TypeRange, QualType CastType,
Expr *CastExpr, CastKind &CastKind,
ExprValueKind &VK, CXXCastPath &Path);
/// \brief Force an expression with unknown-type to an expression of the
/// given type.
ExprResult forceUnknownAnyToType(Expr *E, QualType ToType);
/// \brief Type-check an expression that's being passed to an
/// __unknown_anytype parameter.
ExprResult checkUnknownAnyArg(SourceLocation callLoc,
Expr *result, QualType ¶mType);
// CheckVectorCast - check type constraints for vectors.
// Since vectors are an extension, there are no C standard reference for this.
// We allow casting between vectors and integer datatypes of the same size.
// returns true if the cast is invalid
bool CheckVectorCast(SourceRange R, QualType VectorTy, QualType Ty,
CastKind &Kind);
/// \brief Prepare `SplattedExpr` for a vector splat operation, adding
/// implicit casts if necessary.
ExprResult prepareVectorSplat(QualType VectorTy, Expr *SplattedExpr);
// CheckExtVectorCast - check type constraints for extended vectors.
// Since vectors are an extension, there are no C standard reference for this.
// We allow casting between vectors and integer datatypes of the same size,
// or vectors and the element type of that vector.
// returns the cast expr
ExprResult CheckExtVectorCast(SourceRange R, QualType DestTy, Expr *CastExpr,
CastKind &Kind);
ExprResult BuildCXXFunctionalCastExpr(TypeSourceInfo *TInfo, QualType Type,
SourceLocation LParenLoc,
Expr *CastExpr,
SourceLocation RParenLoc);
enum ARCConversionResult { ACR_okay, ACR_unbridged, ACR_error };
/// \brief Checks for invalid conversions and casts between
/// retainable pointers and other pointer kinds for ARC and Weak.
ARCConversionResult CheckObjCConversion(SourceRange castRange,
QualType castType, Expr *&op,
CheckedConversionKind CCK,
bool Diagnose = true,
bool DiagnoseCFAudited = false,
BinaryOperatorKind Opc = BO_PtrMemD
);
Expr *stripARCUnbridgedCast(Expr *e);
void diagnoseARCUnbridgedCast(Expr *e);
bool CheckObjCARCUnavailableWeakConversion(QualType castType,
QualType ExprType);
/// checkRetainCycles - Check whether an Objective-C message send
/// might create an obvious retain cycle.
void checkRetainCycles(ObjCMessageExpr *msg);
void checkRetainCycles(Expr *receiver, Expr *argument);
void checkRetainCycles(VarDecl *Var, Expr *Init);
/// checkUnsafeAssigns - Check whether +1 expr is being assigned
/// to weak/__unsafe_unretained type.
bool checkUnsafeAssigns(SourceLocation Loc, QualType LHS, Expr *RHS);
/// checkUnsafeExprAssigns - Check whether +1 expr is being assigned
/// to weak/__unsafe_unretained expression.
void checkUnsafeExprAssigns(SourceLocation Loc, Expr *LHS, Expr *RHS);
/// CheckMessageArgumentTypes - Check types in an Obj-C message send.
/// \param Method - May be null.
/// \param [out] ReturnType - The return type of the send.
/// \return true iff there were any incompatible types.
bool CheckMessageArgumentTypes(QualType ReceiverType,
MultiExprArg Args, Selector Sel,
ArrayRef<SourceLocation> SelectorLocs,
ObjCMethodDecl *Method, bool isClassMessage,
bool isSuperMessage,
SourceLocation lbrac, SourceLocation rbrac,
SourceRange RecRange,
QualType &ReturnType, ExprValueKind &VK);
/// \brief Determine the result of a message send expression based on
/// the type of the receiver, the method expected to receive the message,
/// and the form of the message send.
QualType getMessageSendResultType(QualType ReceiverType,
ObjCMethodDecl *Method,
bool isClassMessage, bool isSuperMessage);
/// \brief If the given expression involves a message send to a method
/// with a related result type, emit a note describing what happened.
void EmitRelatedResultTypeNote(const Expr *E);
/// \brief Given that we had incompatible pointer types in a return
/// statement, check whether we're in a method with a related result
/// type, and if so, emit a note describing what happened.
void EmitRelatedResultTypeNoteForReturn(QualType destType);
class ConditionResult {
Decl *ConditionVar;
FullExprArg Condition;
bool Invalid;
bool HasKnownValue;
bool KnownValue;
friend class Sema;
ConditionResult(Sema &S, Decl *ConditionVar, FullExprArg Condition,
bool IsConstexpr)
: ConditionVar(ConditionVar), Condition(Condition), Invalid(false),
HasKnownValue(IsConstexpr && Condition.get() &&
!Condition.get()->isValueDependent()),
KnownValue(HasKnownValue &&
!!Condition.get()->EvaluateKnownConstInt(S.Context)) {}
explicit ConditionResult(bool Invalid)
: ConditionVar(nullptr), Condition(nullptr), Invalid(Invalid),
HasKnownValue(false), KnownValue(false) {}
public:
ConditionResult() : ConditionResult(false) {}
bool isInvalid() const { return Invalid; }
std::pair<VarDecl *, Expr *> get() const {
return std::make_pair(cast_or_null<VarDecl>(ConditionVar),
Condition.get());
}
llvm::Optional<bool> getKnownValue() const {
if (!HasKnownValue)
return None;
return KnownValue;
}
};
static ConditionResult ConditionError() { return ConditionResult(true); }
enum class ConditionKind {
Boolean, ///< A boolean condition, from 'if', 'while', 'for', or 'do'.
ConstexprIf, ///< A constant boolean condition from 'if constexpr'.
Switch ///< An integral condition for a 'switch' statement.
};
ConditionResult ActOnCondition(Scope *S, SourceLocation Loc,
Expr *SubExpr, ConditionKind CK);
ConditionResult ActOnConditionVariable(Decl *ConditionVar,
SourceLocation StmtLoc,
ConditionKind CK);
DeclResult ActOnCXXConditionDeclaration(Scope *S, Declarator &D);
ExprResult CheckConditionVariable(VarDecl *ConditionVar,
SourceLocation StmtLoc,
ConditionKind CK);
ExprResult CheckSwitchCondition(SourceLocation SwitchLoc, Expr *Cond);
/// CheckBooleanCondition - Diagnose problems involving the use of
/// the given expression as a boolean condition (e.g. in an if
/// statement). Also performs the standard function and array
/// decays, possibly changing the input variable.
///
/// \param Loc - A location associated with the condition, e.g. the
/// 'if' keyword.
/// \return true iff there were any errors
ExprResult CheckBooleanCondition(SourceLocation Loc, Expr *E,
bool IsConstexpr = false);
/// DiagnoseAssignmentAsCondition - Given that an expression is
/// being used as a boolean condition, warn if it's an assignment.
void DiagnoseAssignmentAsCondition(Expr *E);
/// \brief Redundant parentheses over an equality comparison can indicate
/// that the user intended an assignment used as condition.
void DiagnoseEqualityWithExtraParens(ParenExpr *ParenE);
/// CheckCXXBooleanCondition - Returns true if conversion to bool is invalid.
ExprResult CheckCXXBooleanCondition(Expr *CondExpr, bool IsConstexpr = false);
/// ConvertIntegerToTypeWarnOnOverflow - Convert the specified APInt to have
/// the specified width and sign. If an overflow occurs, detect it and emit
/// the specified diagnostic.
void ConvertIntegerToTypeWarnOnOverflow(llvm::APSInt &OldVal,
unsigned NewWidth, bool NewSign,
SourceLocation Loc, unsigned DiagID);
/// Checks that the Objective-C declaration is declared in the global scope.
/// Emits an error and marks the declaration as invalid if it's not declared
/// in the global scope.
bool CheckObjCDeclScope(Decl *D);
/// \brief Abstract base class used for diagnosing integer constant
/// expression violations.
class VerifyICEDiagnoser {
public:
bool Suppress;
VerifyICEDiagnoser(bool Suppress = false) : Suppress(Suppress) { }
virtual void diagnoseNotICE(Sema &S, SourceLocation Loc, SourceRange SR) =0;
virtual void diagnoseFold(Sema &S, SourceLocation Loc, SourceRange SR);
virtual ~VerifyICEDiagnoser() { }
};
/// VerifyIntegerConstantExpression - Verifies that an expression is an ICE,
/// and reports the appropriate diagnostics. Returns false on success.
/// Can optionally return the value of the expression.
ExprResult VerifyIntegerConstantExpression(Expr *E, llvm::APSInt *Result,
VerifyICEDiagnoser &Diagnoser,
bool AllowFold = true);
ExprResult VerifyIntegerConstantExpression(Expr *E, llvm::APSInt *Result,
unsigned DiagID,
bool AllowFold = true);
ExprResult VerifyIntegerConstantExpression(Expr *E,
llvm::APSInt *Result = nullptr);
/// VerifyBitField - verifies that a bit field expression is an ICE and has
/// the correct width, and that the field type is valid.
/// Returns false on success.
/// Can optionally return whether the bit-field is of width 0
ExprResult VerifyBitField(SourceLocation FieldLoc, IdentifierInfo *FieldName,
QualType FieldTy, bool IsMsStruct,
Expr *BitWidth, bool *ZeroWidth = nullptr);
private:
unsigned ForceCUDAHostDeviceDepth = 0;
public:
/// Increments our count of the number of times we've seen a pragma forcing
/// functions to be __host__ __device__. So long as this count is greater
/// than zero, all functions encountered will be __host__ __device__.
void PushForceCUDAHostDevice();
/// Decrements our count of the number of times we've seen a pragma forcing
/// functions to be __host__ __device__. Returns false if the count is 0
/// before incrementing, so you can emit an error.
bool PopForceCUDAHostDevice();
/// Diagnostics that are emitted only if we discover that the given function
/// must be codegen'ed. Because handling these correctly adds overhead to
/// compilation, this is currently only enabled for CUDA compilations.
llvm::DenseMap<CanonicalDeclPtr<FunctionDecl>,
std::vector<PartialDiagnosticAt>>
CUDADeferredDiags;
/// A pair of a canonical FunctionDecl and a SourceLocation. When used as the
/// key in a hashtable, both the FD and location are hashed.
struct FunctionDeclAndLoc {
CanonicalDeclPtr<FunctionDecl> FD;
SourceLocation Loc;
};
/// FunctionDecls and SourceLocations for which CheckCUDACall has emitted a
/// (maybe deferred) "bad call" diagnostic. We use this to avoid emitting the
/// same deferred diag twice.
llvm::DenseSet<FunctionDeclAndLoc> LocsWithCUDACallDiags;
/// An inverse call graph, mapping known-emitted functions to one of their
/// known-emitted callers (plus the location of the call).
///
/// Functions that we can tell a priori must be emitted aren't added to this
/// map.
llvm::DenseMap</* Callee = */ CanonicalDeclPtr<FunctionDecl>,
/* Caller = */ FunctionDeclAndLoc>
CUDAKnownEmittedFns;
/// A partial call graph maintained during CUDA compilation to support
/// deferred diagnostics.
///
/// Functions are only added here if, at the time they're considered, they are
/// not known-emitted. As soon as we discover that a function is
/// known-emitted, we remove it and everything it transitively calls from this
/// set and add those functions to CUDAKnownEmittedFns.
llvm::DenseMap</* Caller = */ CanonicalDeclPtr<FunctionDecl>,
/* Callees = */ llvm::MapVector<CanonicalDeclPtr<FunctionDecl>,
SourceLocation>>
CUDACallGraph;
/// Diagnostic builder for CUDA errors which may or may not be deferred.
///
/// In CUDA, there exist constructs (e.g. variable-length arrays, try/catch)
/// which are not allowed to appear inside __device__ functions and are
/// allowed to appear in __host__ __device__ functions only if the host+device
/// function is never codegen'ed.
///
/// To handle this, we use the notion of "deferred diagnostics", where we
/// attach a diagnostic to a FunctionDecl that's emitted iff it's codegen'ed.
///
/// This class lets you emit either a regular diagnostic, a deferred
/// diagnostic, or no diagnostic at all, according to an argument you pass to
/// its constructor, thus simplifying the process of creating these "maybe
/// deferred" diagnostics.
class CUDADiagBuilder {
public:
enum Kind {
/// Emit no diagnostics.
K_Nop,
/// Emit the diagnostic immediately (i.e., behave like Sema::Diag()).
K_Immediate,
/// Emit the diagnostic immediately, and, if it's a warning or error, also
/// emit a call stack showing how this function can be reached by an a
/// priori known-emitted function.
K_ImmediateWithCallStack,
/// Create a deferred diagnostic, which is emitted only if the function
/// it's attached to is codegen'ed. Also emit a call stack as with
/// K_ImmediateWithCallStack.
K_Deferred
};
CUDADiagBuilder(Kind K, SourceLocation Loc, unsigned DiagID,
FunctionDecl *Fn, Sema &S);
~CUDADiagBuilder();
/// Convertible to bool: True if we immediately emitted an error, false if
/// we didn't emit an error or we created a deferred error.
///
/// Example usage:
///
/// if (CUDADiagBuilder(...) << foo << bar)
/// return ExprError();
///
/// But see CUDADiagIfDeviceCode() and CUDADiagIfHostCode() -- you probably
/// want to use these instead of creating a CUDADiagBuilder yourself.
operator bool() const { return ImmediateDiag.hasValue(); }
template <typename T>
friend const CUDADiagBuilder &operator<<(const CUDADiagBuilder &Diag,
const T &Value) {
if (Diag.ImmediateDiag.hasValue())
*Diag.ImmediateDiag << Value;
else if (Diag.PartialDiag.hasValue())
*Diag.PartialDiag << Value;
return Diag;
}
private:
Sema &S;
SourceLocation Loc;
unsigned DiagID;
FunctionDecl *Fn;
bool ShowCallStack;
// Invariant: At most one of these Optionals has a value.
// FIXME: Switch these to a Variant once that exists.
llvm::Optional<SemaDiagnosticBuilder> ImmediateDiag;
llvm::Optional<PartialDiagnostic> PartialDiag;
};
/// Creates a CUDADiagBuilder that emits the diagnostic if the current context
/// is "used as device code".
///
/// - If CurContext is a __host__ function, does not emit any diagnostics.
/// - If CurContext is a __device__ or __global__ function, emits the
/// diagnostics immediately.
/// - If CurContext is a __host__ __device__ function and we are compiling for
/// the device, creates a diagnostic which is emitted if and when we realize
/// that the function will be codegen'ed.
///
/// Example usage:
///
/// // Variable-length arrays are not allowed in CUDA device code.
/// if (CUDADiagIfDeviceCode(Loc, diag::err_cuda_vla) << CurrentCUDATarget())
/// return ExprError();
/// // Otherwise, continue parsing as normal.
CUDADiagBuilder CUDADiagIfDeviceCode(SourceLocation Loc, unsigned DiagID);
/// Creates a CUDADiagBuilder that emits the diagnostic if the current context
/// is "used as host code".
///
/// Same as CUDADiagIfDeviceCode, with "host" and "device" switched.
CUDADiagBuilder CUDADiagIfHostCode(SourceLocation Loc, unsigned DiagID);
enum CUDAFunctionTarget {
CFT_Device,
CFT_Global,
CFT_Host,
CFT_HostDevice,
CFT_InvalidTarget
};
/// Determines whether the given function is a CUDA device/host/kernel/etc.
/// function.
///
/// Use this rather than examining the function's attributes yourself -- you
/// will get it wrong. Returns CFT_Host if D is null.
CUDAFunctionTarget IdentifyCUDATarget(const FunctionDecl *D,
bool IgnoreImplicitHDAttr = false);
CUDAFunctionTarget IdentifyCUDATarget(const AttributeList *Attr);
/// Gets the CUDA target for the current context.
CUDAFunctionTarget CurrentCUDATarget() {
return IdentifyCUDATarget(dyn_cast<FunctionDecl>(CurContext));
}
// CUDA function call preference. Must be ordered numerically from
// worst to best.
enum CUDAFunctionPreference {
CFP_Never, // Invalid caller/callee combination.
CFP_WrongSide, // Calls from host-device to host or device
// function that do not match current compilation
// mode.
CFP_HostDevice, // Any calls to host/device functions.
CFP_SameSide, // Calls from host-device to host or device
// function matching current compilation mode.
CFP_Native, // host-to-host or device-to-device calls.
};
/// Identifies relative preference of a given Caller/Callee
/// combination, based on their host/device attributes.
/// \param Caller function which needs address of \p Callee.
/// nullptr in case of global context.
/// \param Callee target function
///
/// \returns preference value for particular Caller/Callee combination.
CUDAFunctionPreference IdentifyCUDAPreference(const FunctionDecl *Caller,
const FunctionDecl *Callee);
/// Determines whether Caller may invoke Callee, based on their CUDA
/// host/device attributes. Returns false if the call is not allowed.
///
/// Note: Will return true for CFP_WrongSide calls. These may appear in
/// semantically correct CUDA programs, but only if they're never codegen'ed.
bool IsAllowedCUDACall(const FunctionDecl *Caller,
const FunctionDecl *Callee) {
return IdentifyCUDAPreference(Caller, Callee) != CFP_Never;
}
/// May add implicit CUDAHostAttr and CUDADeviceAttr attributes to FD,
/// depending on FD and the current compilation settings.
void maybeAddCUDAHostDeviceAttrs(FunctionDecl *FD,
const LookupResult &Previous);
public:
/// Check whether we're allowed to call Callee from the current context.
///
/// - If the call is never allowed in a semantically-correct program
/// (CFP_Never), emits an error and returns false.
///
/// - If the call is allowed in semantically-correct programs, but only if
/// it's never codegen'ed (CFP_WrongSide), creates a deferred diagnostic to
/// be emitted if and when the caller is codegen'ed, and returns true.
///
/// Will only create deferred diagnostics for a given SourceLocation once,
/// so you can safely call this multiple times without generating duplicate
/// deferred errors.
///
/// - Otherwise, returns true without emitting any diagnostics.
bool CheckCUDACall(SourceLocation Loc, FunctionDecl *Callee);
/// Set __device__ or __host__ __device__ attributes on the given lambda
/// operator() method.
///
/// CUDA lambdas declared inside __device__ or __global__ functions inherit
/// the __device__ attribute. Similarly, lambdas inside __host__ __device__
/// functions become __host__ __device__ themselves.
void CUDASetLambdaAttrs(CXXMethodDecl *Method);
/// Finds a function in \p Matches with highest calling priority
/// from \p Caller context and erases all functions with lower
/// calling priority.
void EraseUnwantedCUDAMatches(
const FunctionDecl *Caller,
SmallVectorImpl<std::pair<DeclAccessPair, FunctionDecl *>> &Matches);
/// Given a implicit special member, infer its CUDA target from the
/// calls it needs to make to underlying base/field special members.
/// \param ClassDecl the class for which the member is being created.
/// \param CSM the kind of special member.
/// \param MemberDecl the special member itself.
/// \param ConstRHS true if this is a copy operation with a const object on
/// its RHS.
/// \param Diagnose true if this call should emit diagnostics.
/// \return true if there was an error inferring.
/// The result of this call is implicit CUDA target attribute(s) attached to
/// the member declaration.
bool inferCUDATargetForImplicitSpecialMember(CXXRecordDecl *ClassDecl,
CXXSpecialMember CSM,
CXXMethodDecl *MemberDecl,
bool ConstRHS,
bool Diagnose);
/// \return true if \p CD can be considered empty according to CUDA
/// (E.2.3.1 in CUDA 7.5 Programming guide).
bool isEmptyCudaConstructor(SourceLocation Loc, CXXConstructorDecl *CD);
bool isEmptyCudaDestructor(SourceLocation Loc, CXXDestructorDecl *CD);
/// Check whether NewFD is a valid overload for CUDA. Emits
/// diagnostics and invalidates NewFD if not.
void checkCUDATargetOverload(FunctionDecl *NewFD,
const LookupResult &Previous);
/// Copies target attributes from the template TD to the function FD.
void inheritCUDATargetAttrs(FunctionDecl *FD, const FunctionTemplateDecl &TD);
/// \name Code completion
//@{
/// \brief Describes the context in which code completion occurs.
enum ParserCompletionContext {
/// \brief Code completion occurs at top-level or namespace context.
PCC_Namespace,
/// \brief Code completion occurs within a class, struct, or union.
PCC_Class,
/// \brief Code completion occurs within an Objective-C interface, protocol,
/// or category.
PCC_ObjCInterface,
/// \brief Code completion occurs within an Objective-C implementation or
/// category implementation
PCC_ObjCImplementation,
/// \brief Code completion occurs within the list of instance variables
/// in an Objective-C interface, protocol, category, or implementation.
PCC_ObjCInstanceVariableList,
/// \brief Code completion occurs following one or more template
/// headers.
PCC_Template,
/// \brief Code completion occurs following one or more template
/// headers within a class.
PCC_MemberTemplate,
/// \brief Code completion occurs within an expression.
PCC_Expression,
/// \brief Code completion occurs within a statement, which may
/// also be an expression or a declaration.
PCC_Statement,
/// \brief Code completion occurs at the beginning of the
/// initialization statement (or expression) in a for loop.
PCC_ForInit,
/// \brief Code completion occurs within the condition of an if,
/// while, switch, or for statement.
PCC_Condition,
/// \brief Code completion occurs within the body of a function on a
/// recovery path, where we do not have a specific handle on our position
/// in the grammar.
PCC_RecoveryInFunction,
/// \brief Code completion occurs where only a type is permitted.
PCC_Type,
/// \brief Code completion occurs in a parenthesized expression, which
/// might also be a type cast.
PCC_ParenthesizedExpression,
/// \brief Code completion occurs within a sequence of declaration
/// specifiers within a function, method, or block.
PCC_LocalDeclarationSpecifiers
};
void CodeCompleteModuleImport(SourceLocation ImportLoc, ModuleIdPath Path);
void CodeCompleteOrdinaryName(Scope *S,
ParserCompletionContext CompletionContext);
void CodeCompleteDeclSpec(Scope *S, DeclSpec &DS,
bool AllowNonIdentifiers,
bool AllowNestedNameSpecifiers);
struct CodeCompleteExpressionData;
void CodeCompleteExpression(Scope *S,
const CodeCompleteExpressionData &Data);
void CodeCompleteMemberReferenceExpr(Scope *S, Expr *Base,
SourceLocation OpLoc, bool IsArrow,
bool IsBaseExprStatement);
void CodeCompletePostfixExpression(Scope *S, ExprResult LHS);
void CodeCompleteTag(Scope *S, unsigned TagSpec);
void CodeCompleteTypeQualifiers(DeclSpec &DS);
void CodeCompleteFunctionQualifiers(DeclSpec &DS, Declarator &D,
const VirtSpecifiers *VS = nullptr);
void CodeCompleteBracketDeclarator(Scope *S);
void CodeCompleteCase(Scope *S);
void CodeCompleteCall(Scope *S, Expr *Fn, ArrayRef<Expr *> Args);
void CodeCompleteConstructor(Scope *S, QualType Type, SourceLocation Loc,
ArrayRef<Expr *> Args);
void CodeCompleteInitializer(Scope *S, Decl *D);
void CodeCompleteReturn(Scope *S);
void CodeCompleteAfterIf(Scope *S);
void CodeCompleteAssignmentRHS(Scope *S, Expr *LHS);
void CodeCompleteQualifiedId(Scope *S, CXXScopeSpec &SS,
bool EnteringContext);
void CodeCompleteUsing(Scope *S);
void CodeCompleteUsingDirective(Scope *S);
void CodeCompleteNamespaceDecl(Scope *S);
void CodeCompleteNamespaceAliasDecl(Scope *S);
void CodeCompleteOperatorName(Scope *S);
void CodeCompleteConstructorInitializer(
Decl *Constructor,
ArrayRef<CXXCtorInitializer *> Initializers);
void CodeCompleteLambdaIntroducer(Scope *S, LambdaIntroducer &Intro,
bool AfterAmpersand);
void CodeCompleteObjCAtDirective(Scope *S);
void CodeCompleteObjCAtVisibility(Scope *S);
void CodeCompleteObjCAtStatement(Scope *S);
void CodeCompleteObjCAtExpression(Scope *S);
void CodeCompleteObjCPropertyFlags(Scope *S, ObjCDeclSpec &ODS);
void CodeCompleteObjCPropertyGetter(Scope *S);
void CodeCompleteObjCPropertySetter(Scope *S);
void CodeCompleteObjCPassingType(Scope *S, ObjCDeclSpec &DS,
bool IsParameter);
void CodeCompleteObjCMessageReceiver(Scope *S);
void CodeCompleteObjCSuperMessage(Scope *S, SourceLocation SuperLoc,
ArrayRef<IdentifierInfo *> SelIdents,
bool AtArgumentExpression);
void CodeCompleteObjCClassMessage(Scope *S, ParsedType Receiver,
ArrayRef<IdentifierInfo *> SelIdents,
bool AtArgumentExpression,
bool IsSuper = false);
void CodeCompleteObjCInstanceMessage(Scope *S, Expr *Receiver,
ArrayRef<IdentifierInfo *> SelIdents,
bool AtArgumentExpression,
ObjCInterfaceDecl *Super = nullptr);
void CodeCompleteObjCForCollection(Scope *S,
DeclGroupPtrTy IterationVar);
void CodeCompleteObjCSelector(Scope *S,
ArrayRef<IdentifierInfo *> SelIdents);
void CodeCompleteObjCProtocolReferences(
ArrayRef<IdentifierLocPair> Protocols);
void CodeCompleteObjCProtocolDecl(Scope *S);
void CodeCompleteObjCInterfaceDecl(Scope *S);
void CodeCompleteObjCSuperclass(Scope *S,
IdentifierInfo *ClassName,
SourceLocation ClassNameLoc);
void CodeCompleteObjCImplementationDecl(Scope *S);
void CodeCompleteObjCInterfaceCategory(Scope *S,
IdentifierInfo *ClassName,
SourceLocation ClassNameLoc);
void CodeCompleteObjCImplementationCategory(Scope *S,
IdentifierInfo *ClassName,
SourceLocation ClassNameLoc);
void CodeCompleteObjCPropertyDefinition(Scope *S);
void CodeCompleteObjCPropertySynthesizeIvar(Scope *S,
IdentifierInfo *PropertyName);
void CodeCompleteObjCMethodDecl(Scope *S, Optional<bool> IsInstanceMethod,
ParsedType ReturnType);
void CodeCompleteObjCMethodDeclSelector(Scope *S,
bool IsInstanceMethod,
bool AtParameterName,
ParsedType ReturnType,
ArrayRef<IdentifierInfo *> SelIdents);
void CodeCompleteObjCClassPropertyRefExpr(Scope *S, IdentifierInfo &ClassName,
SourceLocation ClassNameLoc,
bool IsBaseExprStatement);
void CodeCompletePreprocessorDirective(bool InConditional);
void CodeCompleteInPreprocessorConditionalExclusion(Scope *S);
void CodeCompletePreprocessorMacroName(bool IsDefinition);
void CodeCompletePreprocessorExpression();
void CodeCompletePreprocessorMacroArgument(Scope *S,
IdentifierInfo *Macro,
MacroInfo *MacroInfo,
unsigned Argument);
void CodeCompleteNaturalLanguage();
void CodeCompleteAvailabilityPlatformName();
void GatherGlobalCodeCompletions(CodeCompletionAllocator &Allocator,
CodeCompletionTUInfo &CCTUInfo,
SmallVectorImpl<CodeCompletionResult> &Results);
//@}
//===--------------------------------------------------------------------===//
// Extra semantic analysis beyond the C type system
public:
SourceLocation getLocationOfStringLiteralByte(const StringLiteral *SL,
unsigned ByteNo) const;
private:
void CheckArrayAccess(const Expr *BaseExpr, const Expr *IndexExpr,
const ArraySubscriptExpr *ASE=nullptr,
bool AllowOnePastEnd=true, bool IndexNegated=false);
void CheckArrayAccess(const Expr *E);
// Used to grab the relevant information from a FormatAttr and a
// FunctionDeclaration.
struct FormatStringInfo {
unsigned FormatIdx;
unsigned FirstDataArg;
bool HasVAListArg;
};
static bool getFormatStringInfo(const FormatAttr *Format, bool IsCXXMember,
FormatStringInfo *FSI);
bool CheckFunctionCall(FunctionDecl *FDecl, CallExpr *TheCall,
const FunctionProtoType *Proto);
bool CheckObjCMethodCall(ObjCMethodDecl *Method, SourceLocation loc,
ArrayRef<const Expr *> Args);
bool CheckPointerCall(NamedDecl *NDecl, CallExpr *TheCall,
const FunctionProtoType *Proto);
bool CheckOtherCall(CallExpr *TheCall, const FunctionProtoType *Proto);
void CheckConstructorCall(FunctionDecl *FDecl,
ArrayRef<const Expr *> Args,
const FunctionProtoType *Proto,
SourceLocation Loc);
void checkCall(NamedDecl *FDecl, const FunctionProtoType *Proto,
const Expr *ThisArg, ArrayRef<const Expr *> Args,
bool IsMemberFunction, SourceLocation Loc, SourceRange Range,
VariadicCallType CallType);
bool CheckObjCString(Expr *Arg);
ExprResult CheckOSLogFormatStringArg(Expr *Arg);
ExprResult CheckBuiltinFunctionCall(FunctionDecl *FDecl,
unsigned BuiltinID, CallExpr *TheCall);
bool CheckARMBuiltinExclusiveCall(unsigned BuiltinID, CallExpr *TheCall,
unsigned MaxWidth);
bool CheckNeonBuiltinFunctionCall(unsigned BuiltinID, CallExpr *TheCall);
bool CheckARMBuiltinFunctionCall(unsigned BuiltinID, CallExpr *TheCall);
bool CheckAArch64BuiltinFunctionCall(unsigned BuiltinID, CallExpr *TheCall);
bool CheckMipsBuiltinFunctionCall(unsigned BuiltinID, CallExpr *TheCall);
bool CheckSystemZBuiltinFunctionCall(unsigned BuiltinID, CallExpr *TheCall);
bool CheckX86BuiltinRoundingOrSAE(unsigned BuiltinID, CallExpr *TheCall);
bool CheckX86BuiltinGatherScatterScale(unsigned BuiltinID, CallExpr *TheCall);
bool CheckX86BuiltinFunctionCall(unsigned BuiltinID, CallExpr *TheCall);
bool CheckPPCBuiltinFunctionCall(unsigned BuiltinID, CallExpr *TheCall);
bool SemaBuiltinVAStart(unsigned BuiltinID, CallExpr *TheCall);
bool SemaBuiltinVAStartARMMicrosoft(CallExpr *Call);
bool SemaBuiltinUnorderedCompare(CallExpr *TheCall);
bool SemaBuiltinFPClassification(CallExpr *TheCall, unsigned NumArgs);
bool SemaBuiltinVSX(CallExpr *TheCall);
bool SemaBuiltinOSLogFormat(CallExpr *TheCall);
public:
// Used by C++ template instantiation.
ExprResult SemaBuiltinShuffleVector(CallExpr *TheCall);
ExprResult SemaConvertVectorExpr(Expr *E, TypeSourceInfo *TInfo,
SourceLocation BuiltinLoc,
SourceLocation RParenLoc);
private:
bool SemaBuiltinPrefetch(CallExpr *TheCall);
bool SemaBuiltinAllocaWithAlign(CallExpr *TheCall);
bool SemaBuiltinAssume(CallExpr *TheCall);
bool SemaBuiltinAssumeAligned(CallExpr *TheCall);
bool SemaBuiltinLongjmp(CallExpr *TheCall);
bool SemaBuiltinSetjmp(CallExpr *TheCall);
ExprResult SemaBuiltinAtomicOverloaded(ExprResult TheCallResult);
ExprResult SemaBuiltinNontemporalOverloaded(ExprResult TheCallResult);
ExprResult SemaAtomicOpsOverloaded(ExprResult TheCallResult,
AtomicExpr::AtomicOp Op);
bool SemaBuiltinConstantArg(CallExpr *TheCall, int ArgNum,
llvm::APSInt &Result);
bool SemaBuiltinConstantArgRange(CallExpr *TheCall, int ArgNum,
int Low, int High);
bool SemaBuiltinConstantArgMultiple(CallExpr *TheCall, int ArgNum,
unsigned Multiple);
bool SemaBuiltinARMSpecialReg(unsigned BuiltinID, CallExpr *TheCall,
int ArgNum, unsigned ExpectedFieldNum,
bool AllowName);
public:
enum FormatStringType {
FST_Scanf,
FST_Printf,
FST_NSString,
FST_Strftime,
FST_Strfmon,
FST_Kprintf,
FST_FreeBSDKPrintf,
FST_OSTrace,
FST_OSLog,
FST_Unknown
};
static FormatStringType GetFormatStringType(const FormatAttr *Format);
bool FormatStringHasSArg(const StringLiteral *FExpr);
static bool GetFormatNSStringIdx(const FormatAttr *Format, unsigned &Idx);
private:
bool CheckFormatArguments(const FormatAttr *Format,
ArrayRef<const Expr *> Args,
bool IsCXXMember,
VariadicCallType CallType,
SourceLocation Loc, SourceRange Range,
llvm::SmallBitVector &CheckedVarArgs);
bool CheckFormatArguments(ArrayRef<const Expr *> Args,
bool HasVAListArg, unsigned format_idx,
unsigned firstDataArg, FormatStringType Type,
VariadicCallType CallType,
SourceLocation Loc, SourceRange range,
llvm::SmallBitVector &CheckedVarArgs);
void CheckAbsoluteValueFunction(const CallExpr *Call,
const FunctionDecl *FDecl);
void CheckMaxUnsignedZero(const CallExpr *Call, const FunctionDecl *FDecl);
void CheckMemaccessArguments(const CallExpr *Call,
unsigned BId,
IdentifierInfo *FnName);
void CheckStrlcpycatArguments(const CallExpr *Call,
IdentifierInfo *FnName);
void CheckStrncatArguments(const CallExpr *Call,
IdentifierInfo *FnName);
void CheckReturnValExpr(Expr *RetValExp, QualType lhsType,
SourceLocation ReturnLoc,
bool isObjCMethod = false,
const AttrVec *Attrs = nullptr,
const FunctionDecl *FD = nullptr);
void CheckFloatComparison(SourceLocation Loc, Expr* LHS, Expr* RHS);
void CheckImplicitConversions(Expr *E, SourceLocation CC = SourceLocation());
void CheckBoolLikeConversion(Expr *E, SourceLocation CC);
void CheckForIntOverflow(Expr *E);
void CheckUnsequencedOperations(Expr *E);
/// \brief Perform semantic checks on a completed expression. This will either
/// be a full-expression or a default argument expression.
void CheckCompletedExpr(Expr *E, SourceLocation CheckLoc = SourceLocation(),
bool IsConstexpr = false);
void CheckBitFieldInitialization(SourceLocation InitLoc, FieldDecl *Field,
Expr *Init);
/// Check if there is a field shadowing.
void CheckShadowInheritedFields(const SourceLocation &Loc,
DeclarationName FieldName,
const CXXRecordDecl *RD);
/// \brief Check if the given expression contains 'break' or 'continue'
/// statement that produces control flow different from GCC.
void CheckBreakContinueBinding(Expr *E);
/// \brief Check whether receiver is mutable ObjC container which
/// attempts to add itself into the container
void CheckObjCCircularContainer(ObjCMessageExpr *Message);
void AnalyzeDeleteExprMismatch(const CXXDeleteExpr *DE);
void AnalyzeDeleteExprMismatch(FieldDecl *Field, SourceLocation DeleteLoc,
bool DeleteWasArrayForm);
public:
/// \brief Register a magic integral constant to be used as a type tag.
void RegisterTypeTagForDatatype(const IdentifierInfo *ArgumentKind,
uint64_t MagicValue, QualType Type,
bool LayoutCompatible, bool MustBeNull);
struct TypeTagData {
TypeTagData() {}
TypeTagData(QualType Type, bool LayoutCompatible, bool MustBeNull) :
Type(Type), LayoutCompatible(LayoutCompatible),
MustBeNull(MustBeNull)
{}
QualType Type;
/// If true, \c Type should be compared with other expression's types for
/// layout-compatibility.
unsigned LayoutCompatible : 1;
unsigned MustBeNull : 1;
};
/// A pair of ArgumentKind identifier and magic value. This uniquely
/// identifies the magic value.
typedef std::pair<const IdentifierInfo *, uint64_t> TypeTagMagicValue;
private:
/// \brief A map from magic value to type information.
std::unique_ptr<llvm::DenseMap<TypeTagMagicValue, TypeTagData>>
TypeTagForDatatypeMagicValues;
/// \brief Peform checks on a call of a function with argument_with_type_tag
/// or pointer_with_type_tag attributes.
void CheckArgumentWithTypeTag(const ArgumentWithTypeTagAttr *Attr,
const ArrayRef<const Expr *> ExprArgs,
SourceLocation CallSiteLoc);
/// \brief Check if we are taking the address of a packed field
/// as this may be a problem if the pointer value is dereferenced.
void CheckAddressOfPackedMember(Expr *rhs);
/// \brief The parser's current scope.
///
/// The parser maintains this state here.
Scope *CurScope;
mutable IdentifierInfo *Ident_super;
mutable IdentifierInfo *Ident___float128;
/// Nullability type specifiers.
IdentifierInfo *Ident__Nonnull = nullptr;
IdentifierInfo *Ident__Nullable = nullptr;
IdentifierInfo *Ident__Null_unspecified = nullptr;
IdentifierInfo *Ident_NSError = nullptr;
/// \brief The handler for the FileChanged preprocessor events.
///
/// Used for diagnostics that implement custom semantic analysis for #include
/// directives, like -Wpragma-pack.
sema::SemaPPCallbacks *SemaPPCallbackHandler;
protected:
friend class Parser;
friend class InitializationSequence;
friend class ASTReader;
friend class ASTDeclReader;
friend class ASTWriter;
public:
/// Retrieve the keyword associated
IdentifierInfo *getNullabilityKeyword(NullabilityKind nullability);
/// The struct behind the CFErrorRef pointer.
RecordDecl *CFError = nullptr;
/// Retrieve the identifier "NSError".
IdentifierInfo *getNSErrorIdent();
/// \brief Retrieve the parser's current scope.
///
/// This routine must only be used when it is certain that semantic analysis
/// and the parser are in precisely the same context, which is not the case
/// when, e.g., we are performing any kind of template instantiation.
/// Therefore, the only safe places to use this scope are in the parser
/// itself and in routines directly invoked from the parser and *never* from
/// template substitution or instantiation.
Scope *getCurScope() const { return CurScope; }
void incrementMSManglingNumber() const {
return CurScope->incrementMSManglingNumber();
}
IdentifierInfo *getSuperIdentifier() const;
IdentifierInfo *getFloat128Identifier() const;
Decl *getObjCDeclContext() const;
DeclContext *getCurLexicalContext() const {
return OriginalLexicalContext ? OriginalLexicalContext : CurContext;
}
const DeclContext *getCurObjCLexicalContext() const {
const DeclContext *DC = getCurLexicalContext();
// A category implicitly has the attribute of the interface.
if (const ObjCCategoryDecl *CatD = dyn_cast<ObjCCategoryDecl>(DC))
DC = CatD->getClassInterface();
return DC;
}
/// \brief To be used for checking whether the arguments being passed to
/// function exceeds the number of parameters expected for it.
static bool TooManyArguments(size_t NumParams, size_t NumArgs,
bool PartialOverloading = false) {
// We check whether we're just after a comma in code-completion.
if (NumArgs > 0 && PartialOverloading)
return NumArgs + 1 > NumParams; // If so, we view as an extra argument.
return NumArgs > NumParams;
}
// Emitting members of dllexported classes is delayed until the class
// (including field initializers) is fully parsed.
SmallVector<CXXRecordDecl*, 4> DelayedDllExportClasses;
private:
class SavePendingParsedClassStateRAII {
public:
SavePendingParsedClassStateRAII(Sema &S) : S(S) { swapSavedState(); }
~SavePendingParsedClassStateRAII() {
assert(S.DelayedExceptionSpecChecks.empty() &&
"there shouldn't be any pending delayed exception spec checks");
assert(S.DelayedDefaultedMemberExceptionSpecs.empty() &&
"there shouldn't be any pending delayed defaulted member "
"exception specs");
assert(S.DelayedDllExportClasses.empty() &&
"there shouldn't be any pending delayed DLL export classes");
swapSavedState();
}
private:
Sema &S;
decltype(DelayedExceptionSpecChecks) SavedExceptionSpecChecks;
decltype(DelayedDefaultedMemberExceptionSpecs)
SavedDefaultedMemberExceptionSpecs;
decltype(DelayedDllExportClasses) SavedDllExportClasses;
void swapSavedState() {
SavedExceptionSpecChecks.swap(S.DelayedExceptionSpecChecks);
SavedDefaultedMemberExceptionSpecs.swap(
S.DelayedDefaultedMemberExceptionSpecs);
SavedDllExportClasses.swap(S.DelayedDllExportClasses);
}
};
/// \brief Helper class that collects misaligned member designations and
/// their location info for delayed diagnostics.
struct MisalignedMember {
Expr *E;
RecordDecl *RD;
ValueDecl *MD;
CharUnits Alignment;
MisalignedMember() : E(), RD(), MD(), Alignment() {}
MisalignedMember(Expr *E, RecordDecl *RD, ValueDecl *MD,
CharUnits Alignment)
: E(E), RD(RD), MD(MD), Alignment(Alignment) {}
explicit MisalignedMember(Expr *E)
: MisalignedMember(E, nullptr, nullptr, CharUnits()) {}
bool operator==(const MisalignedMember &m) { return this->E == m.E; }
};
/// \brief Small set of gathered accesses to potentially misaligned members
/// due to the packed attribute.
SmallVector<MisalignedMember, 4> MisalignedMembers;
/// \brief Adds an expression to the set of gathered misaligned members.
void AddPotentialMisalignedMembers(Expr *E, RecordDecl *RD, ValueDecl *MD,
CharUnits Alignment);
public:
/// \brief Diagnoses the current set of gathered accesses. This typically
/// happens at full expression level. The set is cleared after emitting the
/// diagnostics.
void DiagnoseMisalignedMembers();
/// \brief This function checks if the expression is in the sef of potentially
/// misaligned members and it is converted to some pointer type T with lower
/// or equal alignment requirements. If so it removes it. This is used when
/// we do not want to diagnose such misaligned access (e.g. in conversions to
/// void*).
void DiscardMisalignedMemberAddress(const Type *T, Expr *E);
/// \brief This function calls Action when it determines that E designates a
/// misaligned member due to the packed attribute. This is used to emit
/// local diagnostics like in reference binding.
void RefersToMemberWithReducedAlignment(
Expr *E,
llvm::function_ref<void(Expr *, RecordDecl *, FieldDecl *, CharUnits)>
Action);
};
/// \brief RAII object that enters a new expression evaluation context.
class EnterExpressionEvaluationContext {
Sema &Actions;
bool Entered = true;
public:
EnterExpressionEvaluationContext(Sema &Actions,
Sema::ExpressionEvaluationContext NewContext,
Decl *LambdaContextDecl = nullptr,
bool IsDecltype = false,
bool ShouldEnter = true)
: Actions(Actions), Entered(ShouldEnter) {
if (Entered)
Actions.PushExpressionEvaluationContext(NewContext, LambdaContextDecl,
IsDecltype);
}
EnterExpressionEvaluationContext(Sema &Actions,
Sema::ExpressionEvaluationContext NewContext,
Sema::ReuseLambdaContextDecl_t,
bool IsDecltype = false)
: Actions(Actions) {
Actions.PushExpressionEvaluationContext(NewContext,
Sema::ReuseLambdaContextDecl,
IsDecltype);
}
enum InitListTag { InitList };
EnterExpressionEvaluationContext(Sema &Actions, InitListTag,
bool ShouldEnter = true)
: Actions(Actions), Entered(false) {
// In C++11 onwards, narrowing checks are performed on the contents of
// braced-init-lists, even when they occur within unevaluated operands.
// Therefore we still need to instantiate constexpr functions used in such
// a context.
if (ShouldEnter && Actions.isUnevaluatedContext() &&
Actions.getLangOpts().CPlusPlus11) {
Actions.PushExpressionEvaluationContext(
Sema::ExpressionEvaluationContext::UnevaluatedList, nullptr, false);
Entered = true;
}
}
~EnterExpressionEvaluationContext() {
if (Entered)
Actions.PopExpressionEvaluationContext();
}
};
DeductionFailureInfo
MakeDeductionFailureInfo(ASTContext &Context, Sema::TemplateDeductionResult TDK,
sema::TemplateDeductionInfo &Info);
/// \brief Contains a late templated function.
/// Will be parsed at the end of the translation unit, used by Sema & Parser.
struct LateParsedTemplate {
CachedTokens Toks;
/// \brief The template function declaration to be late parsed.
Decl *D;
};
} // end namespace clang
namespace llvm {
// Hash a FunctionDeclAndLoc by looking at both its FunctionDecl and its
// SourceLocation.
template <> struct DenseMapInfo<clang::Sema::FunctionDeclAndLoc> {
using FunctionDeclAndLoc = clang::Sema::FunctionDeclAndLoc;
using FDBaseInfo = DenseMapInfo<clang::CanonicalDeclPtr<clang::FunctionDecl>>;
static FunctionDeclAndLoc getEmptyKey() {
return {FDBaseInfo::getEmptyKey(), clang::SourceLocation()};
}
static FunctionDeclAndLoc getTombstoneKey() {
return {FDBaseInfo::getTombstoneKey(), clang::SourceLocation()};
}
static unsigned getHashValue(const FunctionDeclAndLoc &FDL) {
return hash_combine(FDBaseInfo::getHashValue(FDL.FD),
FDL.Loc.getRawEncoding());
}
static bool isEqual(const FunctionDeclAndLoc &LHS,
const FunctionDeclAndLoc &RHS) {
return LHS.FD == RHS.FD && LHS.Loc == RHS.Loc;
}
};
} // namespace llvm
#endif
|
GB_unop__expm1_fc32_fc32.c | //------------------------------------------------------------------------------
// GB_unop: hard-coded functions for each built-in unary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2022, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// If this file is in the Generated2/ folder, do not edit it
// (it is auto-generated from Generator/*).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_control.h"
#include "GB_atomics.h"
#include "GB_unop__include.h"
// C=unop(A) is defined by the following types and operators:
// op(A) function: GB (_unop_apply__expm1_fc32_fc32)
// op(A') function: GB (_unop_tran__expm1_fc32_fc32)
// C type: GxB_FC32_t
// A type: GxB_FC32_t
// cast: GxB_FC32_t cij = aij
// unaryop: cij = GB_cexpm1f (aij)
#define GB_ATYPE \
GxB_FC32_t
#define GB_CTYPE \
GxB_FC32_t
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
GxB_FC32_t aij = Ax [pA]
#define GB_CX(p) Cx [p]
// unary operator
#define GB_OP(z, x) \
z = GB_cexpm1f (x) ;
// casting
#define GB_CAST(z, aij) \
GxB_FC32_t z = aij ;
// cij = op (aij)
#define GB_CAST_OP(pC,pA) \
{ \
/* aij = Ax [pA] */ \
GxB_FC32_t aij = Ax [pA] ; \
/* Cx [pC] = op (cast (aij)) */ \
GxB_FC32_t z = aij ; \
Cx [pC] = GB_cexpm1f (z) ; \
}
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_EXPM1 || GxB_NO_FC32)
//------------------------------------------------------------------------------
// Cx = op (cast (Ax)): apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB (_unop_apply__expm1_fc32_fc32)
(
GxB_FC32_t *Cx, // Cx and Ax may be aliased
const GxB_FC32_t *Ax,
const int8_t *restrict Ab, // A->b if A is bitmap
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
if (Ab == NULL)
{
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
GxB_FC32_t aij = Ax [p] ;
GxB_FC32_t z = aij ;
Cx [p] = GB_cexpm1f (z) ;
}
}
else
{
// bitmap case, no transpose; A->b already memcpy'd into C->b
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!Ab [p]) continue ;
GxB_FC32_t aij = Ax [p] ;
GxB_FC32_t z = aij ;
Cx [p] = GB_cexpm1f (z) ;
}
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (cast (A')): transpose, typecast, and apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB (_unop_tran__expm1_fc32_fc32)
(
GrB_Matrix C,
const GrB_Matrix A,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
srad.c | long rows, cols, size_I, size_R, niter, iter, k;
double *I, *J, q0sqr, sum, sum2, tmp, meanROI, varROI ;
double Jc, G2, L, num, den, qsqr;
long *iN, *iS, *jE, *jW;
double *dN, *dS, *dW, *dE;
long r1, r2, c1, c2;
double cN, cS, cW, cE;
double *c, D;
double lambda;
long nthreads;
long _pMalloc;
long _lastRand;
void srand(long seed)
{
_lastRand = seed;
}
long rand()
{
_lastRand = _lastRand * 0xfef3f6f4f3f2f1;
return _lastRand;
}
void *malloc_sr(long size)
{
long p;
// Align to the next page, so the memory allocated using 'malloc_sr' is always 'standalone'
if((_pMalloc & 0xFFF) != 0)
{
_pMalloc = (_pMalloc & 0xFFFFFFFFFFFFF000) + 0x1000;
}
p = _pMalloc;
_pMalloc += size;
return (void *)p;
}
void random_matrix(double *I, long rows, long cols)
{
long i, j;
srand(7);
for(i = 0 ; i < rows ; i = i + 1)
{
for(j = 0 ; j < cols ; j = j + 1)
{
I[i * cols + j] = (double)(rand()) /0xFFFFFFFFFFFFFFFF ;
}
}
}
double exp(double x)
{
return 1;
}
long main()
{
long i, j;
_pMalloc = 0x500000000;
niter = 10;
rows = 1024;
cols = 1024;
r1 = 200; //y1 position of the speckle
r2 = 500; //y2 position of the speckle
c1 = 1000; //x1 position of the speckle
c2 = 800; //x2 position of the speckle
lambda = 0.5; //Lambda value
niter = 100; //number of iterations
size_I = cols * rows;
size_R = (r2 - r1 + 1) * (c2 - c1 + 1);
I = (double *)malloc_sr(size_I * 8);
J = (double *)malloc_sr(size_I * 8);
c = (double *)malloc_sr(8 * size_I) ;
iN = (long *)malloc_sr(8 * rows) ;
iS = (long *)malloc_sr(8 * rows) ;
jW = (long *)malloc_sr(8 * cols) ;
jE = (long *)malloc_sr(8 * cols) ;
dN = (double *)malloc_sr(8 * size_I) ;
dS = (double *)malloc_sr(8 * size_I) ;
dW = (double *)malloc_sr(8 * size_I) ;
dE = (double *)malloc_sr(8 * size_I) ;
for(i = 0; i < rows; i+=1)
{
iN[i] = i - 1;
iS[i] = i + 1;
}
for(j = 0; j < cols; j+=1)
{
jW[j] = j - 1;
jE[j] = j + 1;
}
iN[0] = 0;
iS[rows - 1] = rows - 1;
jW[0] = 0;
jE[cols - 1] = cols - 1;
random_matrix(I, rows, cols);
for(k = 0; k < size_I; k+=1)
{
J[k] = exp(I[k]) ;
}
for(iter = 0; iter < niter; iter+=1)
{
sum = 0;
sum2 = 0;
for(i = r1; i <= r2; i = i + 1)
{
for(j = c1; j <= c2; j = j + 1)
{
tmp = J[i * cols + j];
sum += tmp ;
sum2 += tmp * tmp;
}
}
meanROI = sum / size_R;
varROI = (sum2 / size_R) - meanROI * meanROI;
q0sqr = varROI / (meanROI * meanROI);
// #pragma omp parallel for shared(J, dN, dS, dW, dE, c, rows, cols, iN, iS, jW, jE) private(i, j, k, Jc, G2, L, num, den, qsqr)
for(i = 0 ; i < rows ; i = i + 1)
{
for(j = 0; j < cols; j = j + 1)
{
k = i * cols + j;
Jc = J[k];
// directional derivates
dN[k] = J[iN[i] * cols + j] - Jc;
dS[k] = J[iS[i] * cols + j] - Jc;
dW[k] = J[i * cols + jW[j]] - Jc;
dE[k] = J[i * cols + jE[j]] - Jc;
G2 = (dN[k] * dN[k] + dS[k] * dS[k] + dW[k] * dW[k] + dE[k] * dE[k]) / (Jc * Jc);
L = (dN[k] + dS[k] + dW[k] + dE[k]) / Jc;
num = (0.5 * G2) - ((1.0 / 16.0) * (L * L)) ;
den = 1 + (0.25 * L);
qsqr = num / (den * den);
// diffusion coefficent (equ 33)
den = (qsqr - q0sqr) / (q0sqr * (1 + q0sqr)) ;
c[k] = 1.0 / (1.0 + den) ;
// saturate diffusion coefficent
if(c[k] < 0)
{
c[k] = 0;
}
else if(c[k] > 1)
{
c[k] = 1;
}
}
}
// #pragma omp parallel for shared(J, c, rows, cols, lambda) private(i, j, k, D, cS, cN, cW, cE)
for(i = 0; i < rows; i = i + 1)
{
for(j = 0; j < cols; j = j + 1)
{
// current index
k = i * cols + j;
// diffusion coefficent
cN = c[k];
cS = c[iS[i] * cols + j];
cW = c[k];
cE = c[i * cols + jE[j]];
// divergence (equ 58)
D = cN * dN[k] + cS * dS[k] + cW * dW[k] + cE * dE[k];
// image update (equ 61)
J[k] = J[k] + 0.25 * lambda * D;
}
}
}
return 0;
}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.