source stringlengths 3 92 | c stringlengths 26 2.25M |
|---|---|
GB_unaryop__lnot_fp64_fp64.c | //------------------------------------------------------------------------------
// GB_unaryop: hard-coded functions for each built-in unary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2020, All Rights Reserved.
// http://suitesparse.com See GraphBLAS/Doc/License.txt for license.
//------------------------------------------------------------------------------
// If this file is in the Generated/ folder, do not edit it (auto-generated).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_control.h"
#include "GB_iterator.h"
#include "GB_unaryop__include.h"
// C=unop(A) is defined by the following types and operators:
// op(A) function: GB_unop__lnot_fp64_fp64
// op(A') function: GB_tran__lnot_fp64_fp64
// C type: double
// A type: double
// cast: double cij = (double) aij
// unaryop: cij = !(aij != 0)
#define GB_ATYPE \
double
#define GB_CTYPE \
double
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
double aij = Ax [pA]
#define GB_CX(p) Cx [p]
// unary operator
#define GB_OP(z, x) \
z = !(x != 0) ;
// casting
#define GB_CASTING(z, aij) \
double z = (double) aij ;
// cij = op (cast (aij))
#define GB_CAST_OP(pC,pA) \
{ \
/* aij = Ax [pA] */ \
GB_GETA (aij, Ax, pA) ; \
/* Cx [pC] = op (cast (aij)) */ \
GB_CASTING (z, aij) ; \
GB_OP (GB_CX (pC), z) ; \
}
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_LNOT || GxB_NO_FP64)
//------------------------------------------------------------------------------
// Cx = op (cast (Ax)): apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_unop__lnot_fp64_fp64
(
double *Cx, // Cx and Ax may be aliased
double *Ax,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
GB_CAST_OP (p, p) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (cast (A')): transpose, typecast, and apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_tran__lnot_fp64_fp64
(
GrB_Matrix C,
const GrB_Matrix A,
int64_t *GB_RESTRICT *Rowcounts,
GBI_single_iterator Iter,
const int64_t *GB_RESTRICT A_slice,
int naslice
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#define GB_PHASE_2_OF_2
#include "GB_unaryop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
dynamic_module_load.c | // RUN: %libomptarget-compile-aarch64-unknown-linux-gnu -DSHARED -fPIC -shared -o %t.so && %clang %flags %s -o %t-aarch64-unknown-linux-gnu -ldl && %libomptarget-run-aarch64-unknown-linux-gnu %t.so 2>&1 | %fcheck-aarch64-unknown-linux-gnu
// RUN: %libomptarget-compile-powerpc64-ibm-linux-gnu -DSHARED -fPIC -shared -o %t.so && %clang %flags %s -o %t-powerpc64-ibm-linux-gnu -ldl && %libomptarget-run-powerpc64-ibm-linux-gnu %t.so 2>&1 | %fcheck-powerpc64-ibm-linux-gnu
// RUN: %libomptarget-compile-powerpc64le-ibm-linux-gnu -DSHARED -fPIC -shared -o %t.so && %clang %flags %s -o %t-powerpc64le-ibm-linux-gnu -ldl && %libomptarget-run-powerpc64le-ibm-linux-gnu %t.so 2>&1 | %fcheck-powerpc64le-ibm-linux-gnu
// RUN: %libomptarget-compile-x86_64-pc-linux-gnu -DSHARED -fPIC -shared -o %t.so && %clang %flags %s -o %t-x86_64-pc-linux-gnu -ldl && %libomptarget-run-x86_64-pc-linux-gnu %t.so 2>&1 | %fcheck-x86_64-pc-linux-gnu
#ifdef SHARED
#include <stdio.h>
int foo() {
#pragma omp target
;
printf("%s\n", "DONE.");
return 0;
}
#else
#include <dlfcn.h>
#include <stdio.h>
int main(int argc, char **argv) {
void *Handle = dlopen(argv[1], RTLD_NOW);
int (*Foo)(void);
if (Handle == NULL) {
printf("dlopen() failed: %s\n", dlerror());
return 1;
}
Foo = (int (*)(void)) dlsym(Handle, "foo");
if (Handle == NULL) {
printf("dlsym() failed: %s\n", dlerror());
return 1;
}
// CHECK: DONE.
// CHECK-NOT: {{abort|fault}}
return Foo();
}
#endif
|
mlp_mnist_f32.c | /******************************************************************************
* Copyright (c) Intel Corporation - All rights reserved. *
* This file is part of the LIBXSMM library. *
* *
* For information on the license, see the LICENSE file. *
* Further information: https://github.com/hfp/libxsmm/ *
* SPDX-License-Identifier: BSD-3-Clause *
******************************************************************************/
/* Evangelos Georganas, Alexander Heinecke (Intel Corp.)
******************************************************************************/
#include <libxsmm.h>
#include "../common/mnist.h"
#include <stdlib.h>
#include <string.h>
#include <stdio.h>
#include <math.h>
#if defined(_OPENMP)
# include <omp.h>
#endif
/* include c-based dnn library */
#include "../common/dnn_common.h"
#define CHKERR_LIBXSMM_DNN(A) { const int chkerr_libxsmm_dnn_ = A; if (LIBXSMM_DNN_SUCCESS != chkerr_libxsmm_dnn_) { \
fprintf(stderr, "%s\n", libxsmm_dnn_get_error(chkerr_libxsmm_dnn_)); global_status = chkerr_libxsmm_dnn_; } \
}
#define TEST_ACCURACY
int main(int argc, char* argv[])
{
float **act_libxsmm, **fil_libxsmm, **delact_libxsmm, **delfil_libxsmm;
float **bias_libxsmm, **delbias_libxsmm;
unsigned char **relumask_libxsmm;
int *label_libxsmm;
void* scratch = NULL;
size_t scratch_size = 0;
/* some parameters we can overwrite via cli,
default is some inner layer of overfeat */
int iters = 10; /* repetitions of benchmark */
int MB = 32; /* mini-batch size, "N" */
int fuse_type = 0; /* 0: nothing fused, 1: relu fused, 2: elementwise fused, 3: relu and elementwise fused */
char type = 'A'; /* 'A': ALL, 'F': FP, 'B': BP */
int bn = 64;
int bk = 64;
int bc = 64;
int *C; /* number of input feature maps, "C" */
int num_layers = 0;
const char *const env_check = getenv("CHECK");
const double check = LIBXSMM_ABS(0 == env_check ? 1 : atof(env_check));
#if defined(_OPENMP)
int nThreads = omp_get_max_threads(); /* number of threads */
#else
int nThreads = 1; /* number of threads */
#endif
unsigned long long l_start, l_end;
double l_total = 0.0;
double gflop = 0.0;
int i, j;
double fil_size = 0.0;
double act_size = 0.0;
libxsmm_dnn_fullyconnected_desc fullyconnected_desc;
libxsmm_dnn_fullyconnected** libxsmm_fc_layer;
libxsmm_dnn_optimizer_desc optimizer_desc;
libxsmm_dnn_optimizer** libxsmm_opt;
libxsmm_dnn_softmaxloss_desc softmaxloss_desc;
libxsmm_dnn_softmaxloss* libxsmm_softmax;
libxsmm_dnn_tensor** libxsmm_act;
libxsmm_dnn_tensor** libxsmm_delact;
libxsmm_dnn_tensor** libxsmm_fil;
libxsmm_dnn_tensor** libxsmm_delfil;
libxsmm_dnn_tensor** libxsmm_bias;
libxsmm_dnn_tensor** libxsmm_delbias;
libxsmm_dnn_tensor** libxsmm_relumask;
libxsmm_dnn_tensor* libxsmm_label;
libxsmm_dnn_tensor_datalayout* libxsmm_layout;
libxsmm_dnn_tensor_datalayout* libxsmm_layout2;
libxsmm_dnn_err_t status;
libxsmm_dnn_err_t global_status = LIBXSMM_DNN_SUCCESS;
if (argc > 1 && !strncmp(argv[1], "-h", 3)) {
printf("Usage: %s iters MB fuse_type type bn bk bc C1 C2 ... CN\n", argv[0]);
return 0;
}
libxsmm_rng_set_seed(1);
/* reading new values from cli */
i = 1;
num_layers = argc - 9;
if (argc > i) iters = atoi(argv[i++]);
if (argc > i) MB = atoi(argv[i++]);
if (argc > i) fuse_type = atoi(argv[i++]);
if (argc > i) type = *(argv[i++]);
if (argc > i) bn = atoi(argv[i++]);
if (argc > i) bk = atoi(argv[i++]);
if (argc > i) bc = atoi(argv[i++]);
/* allocate the number of channles buffer */
if ( num_layers < 1 ) {
printf("Usage: %s iters MB fuse_type type bn bk bc C1 C2 ... CN\n", argv[0]);
return 0;
}
C = (int*)malloc((num_layers+2)*sizeof(int));
for (j = 0 ; i < argc; ++i, ++j ) {
C[j] = atoi(argv[i]);
}
/* handle softmax config */
C[num_layers+1] = C[num_layers];
if (type != 'A' && type != 'F' && type != 'B') {
printf("type needs to be 'A' (All), 'F' (FP only), 'B' (BP only)\n");
return -1;
}
if ( (fuse_type < 0) || (fuse_type > 5) ) {
printf("fuse type needs to be 0 (None), 1 (Bias), 2 (ReLU), 3 (Sigmoid), 4 (Bias+ReLU), 5 (Bias+Sigmoid)\n");
return -1;
}
#if defined(__SSE3__)
_MM_SET_FLUSH_ZERO_MODE(_MM_FLUSH_ZERO_ON);
_MM_SET_DENORMALS_ZERO_MODE(_MM_DENORMALS_ZERO_ON);
_MM_SET_ROUNDING_MODE(_MM_ROUND_NEAREST);
#endif
/* print some summary */
printf("##########################################\n");
printf("# Setting Up (Common) #\n");
printf("##########################################\n");
printf("PARAMS: N:%d\n", MB);
printf("PARAMS: Layers: %d\n", num_layers);
printf("PARAMS: ITERS:%d", iters); if (LIBXSMM_FEQ(0, check)) printf(" Threads:%d\n", nThreads); else printf("\n");
for (i = 0; i < num_layers; ++i ) {
if (i == 0) {
act_size += (double)(MB*C[i]*sizeof(float))/(1024.0*1024.0);
printf("SIZE Activations %i (%dx%d): %10.2f MiB\n", i, MB, C[i], (double)(MB*C[i]*sizeof(float))/(1024.0*1024.0) );
}
act_size += (double)(MB*C[i+1]*sizeof(float))/(1024.0*1024.0);
fil_size += (double)(C[i]*C[i+1]*sizeof(float))/(1024.0*1024.0);
printf("SIZE Filter %i (%dx%d): %10.2f MiB\n", i, C[i], C[i+1], (double)(C[i]*C[i+1]*sizeof(float))/(1024.0*1024.0) );
printf("SIZE Activations %i (%dx%d): %10.2f MiB\n", i+1, MB, C[i+1], (double)(MB*C[i+1]*sizeof(float))/(1024.0*1024.0) );
}
act_size += (double)(MB*C[num_layers+1]*sizeof(float))/(1024.0*1024.0);
printf("SIZE Activations softmax (%dx%d): %10.2f MiB\n", MB, C[num_layers+1], (double)(MB*C[num_layers+1]*sizeof(float))/(1024.0*1024.0) );
printf("\nTOTAL SIZE Activations: %10.2f MiB\n", act_size );
printf("TOTAL SIZE Filter: %10.2f MiB\n", fil_size );
printf("TOTAL SIZE delActivations: %10.2f MiB\n", act_size );
printf("TOTAL SIZE delFilter: %10.2f MiB\n", fil_size );
printf("TOTAL SIZE MLP: %10.2f MiB\n", (2.0*fil_size) + (2.0*act_size) );
/* allocate data */
/* +2 because of the softwax layer */
act_libxsmm = (float**)malloc( (num_layers+2)*sizeof(float*) );
delact_libxsmm = (float**)malloc( (num_layers+1)*sizeof(float*) );
for ( i = 0 ; i < num_layers+2; ++i ) {
act_libxsmm[i] = (float*)libxsmm_aligned_malloc( MB*C[i]*sizeof(float), 2097152);
/* softmax has no incoming gradients */
if ( i < num_layers+1 ) {
delact_libxsmm[i] = (float*)libxsmm_aligned_malloc( MB*C[i]*sizeof(float), 2097152);
}
}
fil_libxsmm = (float**)malloc( num_layers*sizeof(float*) );
delfil_libxsmm = (float**)malloc( num_layers*sizeof(float*) );
for ( i = 0 ; i < num_layers; ++i ) {
fil_libxsmm[i] = (float*)libxsmm_aligned_malloc( C[i]*C[i+1]*sizeof(float), 2097152);
delfil_libxsmm[i] = (float*)libxsmm_aligned_malloc( C[i]*C[i+1]*sizeof(float), 2097152);
}
bias_libxsmm = (float**)malloc( num_layers*sizeof(float*) );
delbias_libxsmm = (float**)malloc( num_layers*sizeof(float*) );
for ( i = 0 ; i < num_layers; ++i ) {
bias_libxsmm[i] = (float*)libxsmm_aligned_malloc( C[i+1]*sizeof(float), 2097152);
delbias_libxsmm[i] = (float*)libxsmm_aligned_malloc( C[i+1]*sizeof(float), 2097152);
}
relumask_libxsmm = (unsigned char**)malloc( num_layers*sizeof(unsigned char*) );
for ( i = 0 ; i < num_layers; ++i ) {
relumask_libxsmm[i] = (unsigned char*)libxsmm_aligned_malloc( MB*C[i+1]*sizeof(unsigned char), 2097152);
}
label_libxsmm = (int*)libxsmm_aligned_malloc( MB*sizeof(int), 2097152 );
/* init data */
for ( i = 0 ; i < num_layers+2; ++i ) {
init_buf( act_libxsmm[i], MB*C[i], 0, 0 );
}
for ( i = 0 ; i < num_layers+1; ++i ) {
init_buf( delact_libxsmm[i], MB*C[i], 0, 0 );
}
for ( i = 0 ; i < num_layers; ++i ) {
init_buf( fil_libxsmm[i], C[i]*C[i+1], 0, 0 );
}
for ( i = 0 ; i < num_layers; ++i ) {
init_buf( delfil_libxsmm[i], C[i]*C[i+1], 0, 0 );
}
for ( i = 0 ; i < num_layers; ++i ) {
init_buf( bias_libxsmm[i], C[i+1], 0, 0 );
}
for ( i = 0 ; i < num_layers; ++i ) {
init_buf( delbias_libxsmm[i], C[i+1], 0, 0 );
}
for ( i = 0 ; i < num_layers; ++i ) {
zero_buf_uint8( relumask_libxsmm[i], MB*C[i+1] );
}
zero_buf_int32( label_libxsmm, MB );
printf("\n");
printf("##########################################\n");
printf("# Setting Up (custom-Storage) #\n");
printf("##########################################\n");
libxsmm_fc_layer = (libxsmm_dnn_fullyconnected**) malloc( num_layers*sizeof(libxsmm_dnn_fullyconnected*) );
libxsmm_opt = (libxsmm_dnn_optimizer**) malloc( num_layers*sizeof(libxsmm_dnn_optimizer*) );
libxsmm_act = (libxsmm_dnn_tensor**) malloc( (num_layers+2)*sizeof(libxsmm_dnn_tensor*) );
libxsmm_delact = (libxsmm_dnn_tensor**) malloc( (num_layers+1)*sizeof(libxsmm_dnn_tensor*) );
libxsmm_fil = (libxsmm_dnn_tensor**) malloc( num_layers*sizeof(libxsmm_dnn_tensor*) );
libxsmm_delfil = (libxsmm_dnn_tensor**) malloc( num_layers*sizeof(libxsmm_dnn_tensor*) );
libxsmm_bias = (libxsmm_dnn_tensor**) malloc( num_layers*sizeof(libxsmm_dnn_tensor*) );
libxsmm_delbias = (libxsmm_dnn_tensor**) malloc( num_layers*sizeof(libxsmm_dnn_tensor*) );
libxsmm_relumask = (libxsmm_dnn_tensor**) malloc( num_layers*sizeof(libxsmm_dnn_tensor*) );
for ( i = 0; i < num_layers; ++i ) {
fullyconnected_desc.N = MB;
fullyconnected_desc.C = C[i];
fullyconnected_desc.K = C[i+1];
fullyconnected_desc.bn = (MB % bn == 0) ? bn : MB;
fullyconnected_desc.bc = (C[i ] % bc == 0) ? bc : C[i ];
fullyconnected_desc.bk = (C[i+1] % bk == 0) ? bk : C[i+1];
fullyconnected_desc.threads = nThreads;
fullyconnected_desc.compressed_A = 0;
fullyconnected_desc.sparsity_factor_A = 1;
fullyconnected_desc.datatype_in = LIBXSMM_DNN_DATATYPE_F32;
fullyconnected_desc.datatype_out = LIBXSMM_DNN_DATATYPE_F32;
fullyconnected_desc.buffer_format = LIBXSMM_DNN_TENSOR_FORMAT_NCPACKED;
fullyconnected_desc.filter_format = LIBXSMM_DNN_TENSOR_FORMAT_CKPACKED;
/* MNIST Specific where everywhere we use relu act except the last layer */
if (i < num_layers - 1) {
fullyconnected_desc.fuse_ops = LIBXSMM_DNN_FULLYCONNECTED_FUSE_RELU;
} else {
fullyconnected_desc.fuse_ops = LIBXSMM_DNN_FULLYCONNECTED_FUSE_NONE;
}
libxsmm_fc_layer[i] = libxsmm_dnn_create_fullyconnected( fullyconnected_desc, &status );
CHKERR_LIBXSMM_DNN( status );
optimizer_desc.C = C[i];
optimizer_desc.K = C[i+1];
optimizer_desc.bc = (C[i ] % bc == 0) ? bc : C[i ];
optimizer_desc.bk = (C[i+1] % bk == 0) ? bk : C[i+1];
optimizer_desc.learning_rate = 0.1f;
optimizer_desc.threads = nThreads;
optimizer_desc.opt_type = LIBXSMM_DNN_OPTIMIZER_SGD;
optimizer_desc.datatype = LIBXSMM_DNN_DATATYPE_F32;
optimizer_desc.datatype_master = LIBXSMM_DNN_DATATYPE_F32;
optimizer_desc.filter_format = LIBXSMM_DNN_TENSOR_FORMAT_CKPACKED;
libxsmm_opt[i] = libxsmm_dnn_create_optimizer( optimizer_desc, &status );
CHKERR_LIBXSMM_DNN( status );
/* setup LIBXSMM buffers */
if ( i == 0 ) {
libxsmm_layout = libxsmm_dnn_fullyconnected_create_tensor_datalayout( libxsmm_fc_layer[i], LIBXSMM_DNN_REGULAR_INPUT, &status ); CHKERR_LIBXSMM_DNN( status );
libxsmm_act[i] = libxsmm_dnn_link_tensor( libxsmm_layout, act_libxsmm[i], &status ); CHKERR_LIBXSMM_DNN( status );
libxsmm_dnn_destroy_tensor_datalayout( libxsmm_layout );
libxsmm_layout = libxsmm_dnn_fullyconnected_create_tensor_datalayout( libxsmm_fc_layer[i], LIBXSMM_DNN_GRADIENT_INPUT, &status ); CHKERR_LIBXSMM_DNN( status );
libxsmm_delact[i] = libxsmm_dnn_link_tensor( libxsmm_layout, delact_libxsmm[i], &status ); CHKERR_LIBXSMM_DNN( status );
libxsmm_dnn_destroy_tensor_datalayout( libxsmm_layout );
}
libxsmm_layout = libxsmm_dnn_fullyconnected_create_tensor_datalayout( libxsmm_fc_layer[i], LIBXSMM_DNN_REGULAR_OUTPUT, &status ); CHKERR_LIBXSMM_DNN( status );
libxsmm_act[i+1] = libxsmm_dnn_link_tensor( libxsmm_layout, act_libxsmm[i+1], &status ); CHKERR_LIBXSMM_DNN( status );
libxsmm_dnn_destroy_tensor_datalayout( libxsmm_layout );
libxsmm_layout = libxsmm_dnn_fullyconnected_create_tensor_datalayout( libxsmm_fc_layer[i], LIBXSMM_DNN_GRADIENT_OUTPUT, &status ); CHKERR_LIBXSMM_DNN( status );
libxsmm_delact[i+1] = libxsmm_dnn_link_tensor( libxsmm_layout, delact_libxsmm[i+1], &status ); CHKERR_LIBXSMM_DNN( status );
libxsmm_dnn_destroy_tensor_datalayout( libxsmm_layout );
libxsmm_layout = libxsmm_dnn_fullyconnected_create_tensor_datalayout( libxsmm_fc_layer[i], LIBXSMM_DNN_REGULAR_FILTER, &status ); CHKERR_LIBXSMM_DNN( status );
libxsmm_fil[i] = libxsmm_dnn_link_tensor( libxsmm_layout, fil_libxsmm[i], &status ); CHKERR_LIBXSMM_DNN( status );
libxsmm_dnn_destroy_tensor_datalayout( libxsmm_layout );
libxsmm_layout = libxsmm_dnn_fullyconnected_create_tensor_datalayout( libxsmm_fc_layer[i], LIBXSMM_DNN_GRADIENT_FILTER, &status ); CHKERR_LIBXSMM_DNN( status );
libxsmm_delfil[i] = libxsmm_dnn_link_tensor( libxsmm_layout, delfil_libxsmm[i], &status ); CHKERR_LIBXSMM_DNN( status );
libxsmm_dnn_destroy_tensor_datalayout( libxsmm_layout );
libxsmm_layout = libxsmm_dnn_fullyconnected_create_tensor_datalayout( libxsmm_fc_layer[i], LIBXSMM_DNN_REGULAR_CHANNEL_BIAS, &status ); CHKERR_LIBXSMM_DNN( status );
libxsmm_bias[i] = libxsmm_dnn_link_tensor( libxsmm_layout, bias_libxsmm[i], &status ); CHKERR_LIBXSMM_DNN( status );
libxsmm_dnn_destroy_tensor_datalayout( libxsmm_layout );
libxsmm_layout = libxsmm_dnn_fullyconnected_create_tensor_datalayout( libxsmm_fc_layer[i], LIBXSMM_DNN_GRADIENT_CHANNEL_BIAS, &status ); CHKERR_LIBXSMM_DNN( status );
libxsmm_delbias[i] = libxsmm_dnn_link_tensor( libxsmm_layout, delbias_libxsmm[i], &status ); CHKERR_LIBXSMM_DNN( status );
libxsmm_dnn_destroy_tensor_datalayout( libxsmm_layout );
libxsmm_layout = libxsmm_dnn_fullyconnected_create_tensor_datalayout( libxsmm_fc_layer[i], LIBXSMM_DNN_RELU_MASK, &status ); CHKERR_LIBXSMM_DNN( status );
libxsmm_relumask[i] = libxsmm_dnn_link_tensor( libxsmm_layout, relumask_libxsmm[i], &status ); CHKERR_LIBXSMM_DNN( status );
libxsmm_dnn_destroy_tensor_datalayout( libxsmm_layout );
/* bind buffers and filter to fc layer */
CHKERR_LIBXSMM_DNN( libxsmm_dnn_fullyconnected_bind_tensor( libxsmm_fc_layer[i], libxsmm_act[ i], LIBXSMM_DNN_REGULAR_INPUT ) );
CHKERR_LIBXSMM_DNN( libxsmm_dnn_fullyconnected_bind_tensor( libxsmm_fc_layer[i], libxsmm_delact[i ], LIBXSMM_DNN_GRADIENT_INPUT ) );
CHKERR_LIBXSMM_DNN( libxsmm_dnn_fullyconnected_bind_tensor( libxsmm_fc_layer[i], libxsmm_act[i+1], LIBXSMM_DNN_REGULAR_OUTPUT ) );
CHKERR_LIBXSMM_DNN( libxsmm_dnn_fullyconnected_bind_tensor( libxsmm_fc_layer[i], libxsmm_delact[i+1], LIBXSMM_DNN_GRADIENT_OUTPUT ) );
CHKERR_LIBXSMM_DNN( libxsmm_dnn_fullyconnected_bind_tensor( libxsmm_fc_layer[i], libxsmm_fil[i], LIBXSMM_DNN_REGULAR_FILTER ) );
CHKERR_LIBXSMM_DNN( libxsmm_dnn_fullyconnected_bind_tensor( libxsmm_fc_layer[i], libxsmm_delfil[i], LIBXSMM_DNN_GRADIENT_FILTER ) );
CHKERR_LIBXSMM_DNN( libxsmm_dnn_fullyconnected_bind_tensor( libxsmm_fc_layer[i], libxsmm_bias[i], LIBXSMM_DNN_REGULAR_CHANNEL_BIAS ) );
CHKERR_LIBXSMM_DNN( libxsmm_dnn_fullyconnected_bind_tensor( libxsmm_fc_layer[i], libxsmm_delbias[i], LIBXSMM_DNN_GRADIENT_CHANNEL_BIAS ) );
CHKERR_LIBXSMM_DNN( libxsmm_dnn_fullyconnected_bind_tensor( libxsmm_fc_layer[i], libxsmm_relumask[i], LIBXSMM_DNN_RELU_MASK ) );
/* bind filters to optimizer */
CHKERR_LIBXSMM_DNN( libxsmm_dnn_optimizer_bind_tensor( libxsmm_opt[i], libxsmm_fil[i], LIBXSMM_DNN_REGULAR_FILTER ) );
CHKERR_LIBXSMM_DNN( libxsmm_dnn_optimizer_bind_tensor( libxsmm_opt[i], libxsmm_delfil[i], LIBXSMM_DNN_GRADIENT_FILTER ) );
/* let's allocate and bind scratch */
if ( libxsmm_dnn_fullyconnected_get_scratch_size( libxsmm_fc_layer[i], &status ) > scratch_size ) {
scratch_size = libxsmm_dnn_fullyconnected_get_scratch_size( libxsmm_fc_layer[i], &status );
CHKERR_LIBXSMM_DNN( status );
if ( scratch != NULL ) {
libxsmm_free( scratch );
}
scratch = libxsmm_aligned_malloc( scratch_size, 2097152 );
init_buf( (float*)scratch, scratch_size/4, 0, 0 );
}
if ( libxsmm_dnn_optimizer_get_scratch_size( libxsmm_opt[i], &status ) > scratch_size ) {
scratch_size = libxsmm_dnn_optimizer_get_scratch_size( libxsmm_opt[i], &status );
CHKERR_LIBXSMM_DNN( status );
if ( scratch != NULL ) {
libxsmm_free( scratch );
}
scratch = libxsmm_aligned_malloc( scratch_size, 2097152 );
init_buf( (float*)scratch, scratch_size/4, 0, 0 );
}
}
/* create softmax layer */
softmaxloss_desc.N = MB;
softmaxloss_desc.C = C[num_layers];
softmaxloss_desc.bn = (MB % bn == 0) ? bn : MB;
softmaxloss_desc.bc = (C[num_layers] % bc == 0) ? bc : C[num_layers];
softmaxloss_desc.loss_weight = 1.0;
softmaxloss_desc.threads = nThreads;
softmaxloss_desc.datatype = LIBXSMM_DNN_DATATYPE_F32;
softmaxloss_desc.buffer_format = LIBXSMM_DNN_TENSOR_FORMAT_NCPACKED;
libxsmm_softmax = libxsmm_dnn_create_softmaxloss( softmaxloss_desc, &status );
CHKERR_LIBXSMM_DNN( status );
libxsmm_layout = libxsmm_dnn_softmaxloss_create_tensor_datalayout( libxsmm_softmax, LIBXSMM_DNN_REGULAR_OUTPUT, &status ); CHKERR_LIBXSMM_DNN( status );
libxsmm_act[num_layers+1] = libxsmm_dnn_link_tensor( libxsmm_layout, act_libxsmm[num_layers+1], &status ); CHKERR_LIBXSMM_DNN( status );
libxsmm_dnn_destroy_tensor_datalayout( libxsmm_layout );
libxsmm_layout = libxsmm_dnn_softmaxloss_create_tensor_datalayout( libxsmm_softmax, LIBXSMM_DNN_LABEL, &status ); CHKERR_LIBXSMM_DNN( status );
libxsmm_label = libxsmm_dnn_link_tensor( libxsmm_layout, label_libxsmm, &status ); CHKERR_LIBXSMM_DNN( status );
libxsmm_dnn_destroy_tensor_datalayout( libxsmm_layout );
CHKERR_LIBXSMM_DNN( libxsmm_dnn_softmaxloss_bind_tensor( libxsmm_softmax, libxsmm_act[num_layers], LIBXSMM_DNN_REGULAR_INPUT ) );
CHKERR_LIBXSMM_DNN( libxsmm_dnn_softmaxloss_bind_tensor( libxsmm_softmax, libxsmm_delact[num_layers], LIBXSMM_DNN_GRADIENT_INPUT ) );
CHKERR_LIBXSMM_DNN( libxsmm_dnn_softmaxloss_bind_tensor( libxsmm_softmax, libxsmm_act[num_layers+1], LIBXSMM_DNN_REGULAR_OUTPUT ) );
CHKERR_LIBXSMM_DNN( libxsmm_dnn_softmaxloss_bind_tensor( libxsmm_softmax, libxsmm_label, LIBXSMM_DNN_LABEL ) );
if ( libxsmm_dnn_softmaxloss_get_scratch_size( libxsmm_softmax, &status ) > scratch_size ) {
scratch_size = libxsmm_dnn_softmaxloss_get_scratch_size( libxsmm_softmax, &status );
CHKERR_LIBXSMM_DNN( status );
if ( scratch != NULL ) {
libxsmm_free( scratch );
}
scratch = libxsmm_aligned_malloc( scratch_size, 2097152 );
init_buf( (float*)scratch, scratch_size/4, 0, 0 );
}
/* bind scratch to all layers */
for ( i = 0; i < num_layers; ++i ) {
CHKERR_LIBXSMM_DNN( libxsmm_dnn_fullyconnected_bind_scratch( libxsmm_fc_layer[i], scratch ) );
CHKERR_LIBXSMM_DNN( libxsmm_dnn_optimizer_bind_scratch( libxsmm_opt[i], scratch ) );
}
CHKERR_LIBXSMM_DNN( libxsmm_dnn_softmaxloss_bind_scratch( libxsmm_softmax, scratch ) );
int n_batches = NUM_TRAIN/MB, batch_id = 0;
int n_epochs = iters, epoch_id = 0;
float *input_acts = (float*)libxsmm_aligned_malloc( NUM_TRAIN * C[0] * sizeof(float), 2097152);
/* Read in input data */
char *train_image_path = "./mnist_data/train-images.idx3-ubyte";
char *train_label_path = "./mnist_data/train-labels.idx1-ubyte";
char *test_image_path = "./mnist_data/t10k-images.idx3-ubyte";
char *test_label_path = "./mnist_data/t10k-labels.idx1-ubyte";
load_mnist(train_image_path, train_label_path, test_image_path, test_label_path);
/* Format the input layer in NCNC blocked format */
int _i, _j;
for (_i = 0; _i < n_batches*MB; _i++) {
for (_j = 0; _j < C[0]; _j++) {
float val = (float) train_image[_i][_j];
int batchid = _i/MB;
int mb = _i % MB;
int _bn = (MB % bn == 0) ? bn : MB;
int _bc = (C[0] % bc == 0) ? bc : C[0];
float *cur_pos = input_acts + batchid * MB *C[0] + (mb / _bn) * C[0] * _bn + (_j / _bc) * _bn * _bc + (mb % _bn) * _bc + (_j % _bc);
*cur_pos = val;
}
}
libxsmm_layout = libxsmm_dnn_fullyconnected_create_tensor_datalayout( libxsmm_fc_layer[0], LIBXSMM_DNN_REGULAR_INPUT, &status ); CHKERR_LIBXSMM_DNN( status );
libxsmm_layout2 = libxsmm_dnn_softmaxloss_create_tensor_datalayout( libxsmm_softmax, LIBXSMM_DNN_LABEL, &status ); CHKERR_LIBXSMM_DNN( status );
n_batches = NUM_TRAIN/MB;
printf("###########################################\n");
printf("# Training MNIST with %d training samples #\n", n_batches*MB);
printf("###########################################\n");
l_start = libxsmm_timer_tick();
#if defined(_OPENMP)
# pragma omp parallel private(i,j,epoch_id,batch_id)
#endif
{
#if defined(_OPENMP)
const int tid = omp_get_thread_num();
#else
const int tid = 0;
#endif
for (epoch_id = 0; epoch_id < n_epochs; epoch_id++) {
for (batch_id = 0; batch_id < n_batches; batch_id++) {
/* Initialize input activations with the correct minibatch */
if (tid == 0) {
libxsmm_act[0] = libxsmm_dnn_link_tensor( libxsmm_layout, input_acts + batch_id * MB * C[0], &status ); CHKERR_LIBXSMM_DNN( status );
CHKERR_LIBXSMM_DNN( libxsmm_dnn_fullyconnected_bind_tensor( libxsmm_fc_layer[0], libxsmm_act[0], LIBXSMM_DNN_REGULAR_INPUT ) );
libxsmm_label = libxsmm_dnn_link_tensor( libxsmm_layout2, train_label + batch_id * MB, &status ); CHKERR_LIBXSMM_DNN( status );
CHKERR_LIBXSMM_DNN( libxsmm_dnn_softmaxloss_bind_tensor( libxsmm_softmax, libxsmm_label, LIBXSMM_DNN_LABEL ) );
}
#pragma omp barrier
for ( i = 0; i < num_layers; ++i) {
libxsmm_dnn_fullyconnected_execute_st( libxsmm_fc_layer[i], LIBXSMM_DNN_COMPUTE_KIND_FWD, 0, tid );
}
libxsmm_dnn_softmaxloss_execute_st( libxsmm_softmax, LIBXSMM_DNN_COMPUTE_KIND_FWD, 0, tid );
if ((tid == 0) && (batch_id == 0) && (epoch_id % 10 == 0 || epoch_id == n_epochs - 1 )) {
float new_loss;
new_loss = libxsmm_dnn_softmaxloss_get_loss(libxsmm_softmax, &status); CHKERR_LIBXSMM_DNN( status );
printf("Loss for epoch %d batch_id %d is %f\n", epoch_id, batch_id, new_loss);
}
libxsmm_dnn_softmaxloss_execute_st( libxsmm_softmax, LIBXSMM_DNN_COMPUTE_KIND_BWD, 0, tid );
for ( i = (num_layers-1); i > 0; --i) {
libxsmm_dnn_fullyconnected_execute_st( libxsmm_fc_layer[i], LIBXSMM_DNN_COMPUTE_KIND_BWDUPD, 0, tid );
libxsmm_dnn_optimizer_execute_st( libxsmm_opt[i], 0, tid );
}
libxsmm_dnn_fullyconnected_execute_st( libxsmm_fc_layer[0], LIBXSMM_DNN_COMPUTE_KIND_UPD, 0, tid );
libxsmm_dnn_optimizer_execute_st( libxsmm_opt[i], 0, tid );
}
}
}
l_end = libxsmm_timer_tick();
l_total = libxsmm_timer_duration(l_start, l_end);
gflop = 0.0;
for ( i = num_layers-1; i > 0; --i) {
gflop += (6.0*(double)MB*(double)C[i]*(double)C[i+1]*(double)n_epochs *(double)n_batches) / (1000.0*1000.0*1000.0);
}
gflop += (4.0*(double)MB*(double)C[0]*(double)C[1]*(double)n_epochs *(double)n_batches) / (1000.0*1000.0*1000.0);
printf("GFLOP = %.5g\n", gflop/((double)n_epochs *(double)n_batches));
printf("fp time = %.5g\n", ((double)(l_total/((double)n_epochs *(double)n_batches))));
printf("GFLOPS = %.5g\n", gflop/l_total);
printf("PERFDUMP,BP,%s,%i,%i,", LIBXSMM_VERSION, nThreads, MB );
for ( i = 0; i < num_layers; ++i ) {
printf("%i,", C[i] );
}
printf("%f,%f\n", ((double)(l_total/((double)n_epochs *(double)n_batches))), gflop/l_total);
#ifdef TEST_ACCURACY
/* Test accuracy */
n_batches = NUM_TEST/MB;
for (_i = 0; _i < n_batches * MB; _i++) {
for (_j = 0; _j < C[0]; _j++) {
float val = (float) test_image[_i][_j];
int batchid = _i/MB;
int mb = _i % MB;
int _bn = (MB % bn == 0) ? bn : MB;
int _bc = (C[0] % bc == 0) ? bc : C[0];
float *cur_pos = input_acts + batchid * MB *C[0] + (mb / _bn) * C[0] * _bn + (_j / _bc) * _bn * _bc + (mb % _bn) * _bc + (_j % _bc);
*cur_pos = val;
}
}
n_batches = NUM_TEST/MB;
unsigned int hits = 0;
unsigned int samples = 0;
#if defined(_OPENMP)
# pragma omp parallel private(i,j,batch_id)
#endif
{
#if defined(_OPENMP)
const int tid = omp_get_thread_num();
#else
const int tid = 0;
#endif
for (batch_id = 0; batch_id < n_batches; batch_id++) {
/* Initialize input activations with the correct minibatch */
if (tid == 0) {
libxsmm_act[0] = libxsmm_dnn_link_tensor( libxsmm_layout, input_acts + batch_id * MB * C[0], &status ); CHKERR_LIBXSMM_DNN( status );
CHKERR_LIBXSMM_DNN( libxsmm_dnn_fullyconnected_bind_tensor( libxsmm_fc_layer[0], libxsmm_act[0], LIBXSMM_DNN_REGULAR_INPUT ) );
libxsmm_label = libxsmm_dnn_link_tensor( libxsmm_layout2, test_label + batch_id * MB, &status ); CHKERR_LIBXSMM_DNN( status );
CHKERR_LIBXSMM_DNN( libxsmm_dnn_softmaxloss_bind_tensor( libxsmm_softmax, libxsmm_label, LIBXSMM_DNN_LABEL ) );
}
#pragma omp barrier
for ( i = 0; i < num_layers; ++i) {
libxsmm_dnn_fullyconnected_execute_st( libxsmm_fc_layer[i], LIBXSMM_DNN_COMPUTE_KIND_FWD, 0, tid );
}
libxsmm_dnn_softmaxloss_execute_st( libxsmm_softmax, LIBXSMM_DNN_COMPUTE_KIND_FWD, 0, tid );
if (tid == 0) {
for (_i = 0; _i < MB; _i++) {
int label = *(test_label + batch_id * MB + _i);
int max_id = 0;
float max_val = 0.0;
max_val = *(act_libxsmm[num_layers+1] + _i * 10);
float sum = max_val;
/* Find predicted label */
for (_j = 1; _j < 10; _j++) {
float val = *(act_libxsmm[num_layers+1] + _i * 10 + _j);
sum += val;
if (val > max_val) {
max_id = _j;
max_val = val;
}
}
/* Compare with true label */
if (max_id == label) {
hits++;
}
samples++;
}
}
#pragma omp barrier
}
}
printf("Accuracy is %f %% (%d test samples)\n", (1.0*hits)/(1.0*samples)*100.0, samples);
#endif
libxsmm_dnn_destroy_tensor_datalayout( libxsmm_layout );
libxsmm_dnn_destroy_tensor_datalayout( libxsmm_layout2 );
for ( i = 0; i < num_layers; ++i ) {
/* clean-up */
CHKERR_LIBXSMM_DNN( libxsmm_dnn_fullyconnected_release_scratch( libxsmm_fc_layer[i] ) );
CHKERR_LIBXSMM_DNN( libxsmm_dnn_fullyconnected_release_tensor( libxsmm_fc_layer[i], LIBXSMM_DNN_REGULAR_INPUT ) );
CHKERR_LIBXSMM_DNN( libxsmm_dnn_fullyconnected_release_tensor( libxsmm_fc_layer[i], LIBXSMM_DNN_GRADIENT_INPUT ) );
CHKERR_LIBXSMM_DNN( libxsmm_dnn_fullyconnected_release_tensor( libxsmm_fc_layer[i], LIBXSMM_DNN_REGULAR_OUTPUT ) );
CHKERR_LIBXSMM_DNN( libxsmm_dnn_fullyconnected_release_tensor( libxsmm_fc_layer[i], LIBXSMM_DNN_GRADIENT_OUTPUT ) );
CHKERR_LIBXSMM_DNN( libxsmm_dnn_fullyconnected_release_tensor( libxsmm_fc_layer[i], LIBXSMM_DNN_REGULAR_FILTER ) );
CHKERR_LIBXSMM_DNN( libxsmm_dnn_fullyconnected_release_tensor( libxsmm_fc_layer[i], LIBXSMM_DNN_GRADIENT_FILTER ) );
CHKERR_LIBXSMM_DNN( libxsmm_dnn_fullyconnected_release_tensor( libxsmm_fc_layer[i], LIBXSMM_DNN_REGULAR_CHANNEL_BIAS ) );
CHKERR_LIBXSMM_DNN( libxsmm_dnn_fullyconnected_release_tensor( libxsmm_fc_layer[i], LIBXSMM_DNN_GRADIENT_CHANNEL_BIAS ) );
CHKERR_LIBXSMM_DNN( libxsmm_dnn_fullyconnected_release_tensor( libxsmm_fc_layer[i], LIBXSMM_DNN_RELU_MASK ) );
CHKERR_LIBXSMM_DNN( libxsmm_dnn_destroy_fullyconnected( libxsmm_fc_layer[i] ) );
CHKERR_LIBXSMM_DNN( libxsmm_dnn_optimizer_release_scratch( libxsmm_opt[i] ) );
CHKERR_LIBXSMM_DNN( libxsmm_dnn_optimizer_release_tensor( libxsmm_opt[i], LIBXSMM_DNN_REGULAR_FILTER ) );
CHKERR_LIBXSMM_DNN( libxsmm_dnn_optimizer_release_tensor( libxsmm_opt[i], LIBXSMM_DNN_GRADIENT_FILTER ) );
CHKERR_LIBXSMM_DNN( libxsmm_dnn_destroy_optimizer( libxsmm_opt[i] ) );
}
CHKERR_LIBXSMM_DNN( libxsmm_dnn_softmaxloss_release_scratch( libxsmm_softmax ) );
CHKERR_LIBXSMM_DNN( libxsmm_dnn_softmaxloss_release_tensor( libxsmm_softmax, LIBXSMM_DNN_REGULAR_INPUT ) );
CHKERR_LIBXSMM_DNN( libxsmm_dnn_softmaxloss_release_tensor( libxsmm_softmax, LIBXSMM_DNN_GRADIENT_INPUT ) );
CHKERR_LIBXSMM_DNN( libxsmm_dnn_softmaxloss_release_tensor( libxsmm_softmax, LIBXSMM_DNN_REGULAR_OUTPUT ) );
CHKERR_LIBXSMM_DNN( libxsmm_dnn_softmaxloss_release_tensor( libxsmm_softmax, LIBXSMM_DNN_LABEL ) );
CHKERR_LIBXSMM_DNN( libxsmm_dnn_destroy_softmaxloss( libxsmm_softmax ) );
for ( i = 0; i < num_layers; ++i ) {
if ( i == 0 ) {
CHKERR_LIBXSMM_DNN( libxsmm_dnn_destroy_tensor( libxsmm_act[i] ) );
CHKERR_LIBXSMM_DNN( libxsmm_dnn_destroy_tensor( libxsmm_delact[i] ) );
}
CHKERR_LIBXSMM_DNN( libxsmm_dnn_destroy_tensor( libxsmm_act[i+1] ) );
CHKERR_LIBXSMM_DNN( libxsmm_dnn_destroy_tensor( libxsmm_delact[i+1] ) );
CHKERR_LIBXSMM_DNN( libxsmm_dnn_destroy_tensor( libxsmm_fil[i] ) );
CHKERR_LIBXSMM_DNN( libxsmm_dnn_destroy_tensor( libxsmm_delfil[i] ) );
CHKERR_LIBXSMM_DNN( libxsmm_dnn_destroy_tensor( libxsmm_bias[i] ) );
CHKERR_LIBXSMM_DNN( libxsmm_dnn_destroy_tensor( libxsmm_delbias[i] ) );
CHKERR_LIBXSMM_DNN( libxsmm_dnn_destroy_tensor( libxsmm_relumask[i] ) );
}
CHKERR_LIBXSMM_DNN( libxsmm_dnn_destroy_tensor( libxsmm_act[num_layers+1] ) );
CHKERR_LIBXSMM_DNN( libxsmm_dnn_destroy_tensor( libxsmm_label ) );
/* deallocate data */
libxsmm_free(scratch);
for ( i = 0; i < num_layers; ++i ) {
if ( i == 0 ) {
libxsmm_free(act_libxsmm[i]);
libxsmm_free(delact_libxsmm[i]);
}
libxsmm_free(act_libxsmm[i+1]);
libxsmm_free(delact_libxsmm[i+1]);
libxsmm_free(fil_libxsmm[i]);
libxsmm_free(delfil_libxsmm[i]);
libxsmm_free(bias_libxsmm[i]);
libxsmm_free(delbias_libxsmm[i]);
libxsmm_free(relumask_libxsmm[i]);
}
libxsmm_free(act_libxsmm[num_layers+1]);
libxsmm_free(label_libxsmm);
libxsmm_free(input_acts);
free( libxsmm_act );
free( libxsmm_delact );
free( libxsmm_fil );
free( libxsmm_delfil );
free( libxsmm_bias );
free( libxsmm_delbias );
free( libxsmm_relumask );
free( libxsmm_fc_layer );
free( libxsmm_opt );
free( act_libxsmm );
free( delact_libxsmm );
free( fil_libxsmm );
free( delfil_libxsmm );
free( bias_libxsmm );
free( delbias_libxsmm );
free( relumask_libxsmm );
free( C );
/* some empty lines at the end */
printf("\n\n\n");
return global_status;
}
|
DRB063-outeronly1-orig-no.c | /*
Copyright (c) 2017, Lawrence Livermore National Security, LLC.
Produced at the Lawrence Livermore National Laboratory
Written by Chunhua Liao, Pei-Hung Lin, Joshua Asplund,
Markus Schordan, and Ian Karlin
(email: liao6@llnl.gov, lin32@llnl.gov, asplund1@llnl.gov,
schordan1@llnl.gov, karlin1@llnl.gov)
LLNL-CODE-732144
All rights reserved.
This file is part of DataRaceBench. For details, see
https://github.com/LLNL/dataracebench. Please also see the LICENSE file
for our additional BSD notice.
Redistribution and use in source and binary forms, with
or without modification, are permitted provided that the following
conditions are met:
* Redistributions of source code must retain the above copyright
notice, this list of conditions and the disclaimer below.
* Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the disclaimer (as noted below)
in the documentation and/or other materials provided with the
distribution.
* Neither the name of the LLNS/LLNL nor the names of its contributors
may be used to endorse or promote products derived from this
software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND
CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES,
INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL LAWRENCE LIVERMORE NATIONAL
SECURITY, LLC, THE U.S. DEPARTMENT OF ENERGY OR CONTRIBUTORS BE
LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY,
OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
THE POSSIBILITY OF SUCH DAMAGE.
*/
/*
Only the outmost loop can be parallelized.
*/
#include <omp.h>
int n = 100;
int m = 100;
double b[100][100];
#define N 100
int init()
{
int i;
int j;
int k;
#pragma omp parallel for private (i,j)
for (i = 0; i <= 99; i += 1) {
#pragma omp parallel for private (j)
for (j = 0; j <= 99; j += 1) {
b[i][j] = (i * j);
}
}
return 0;
}
void foo()
{
int i;
int j;
#pragma omp parallel for private (i,j) firstprivate (n,m)
for (i = 0; i <= n - 1; i += 1) {
// Be careful about bounds of j
for (j = 0; j <= m - 1 - 1; j += 1) {
b[i][j] = b[i][j + 1];
}
}
}
int print()
{
int i;
int j;
int k;
for (i = 0; i <= 99; i += 1) {
for (j = 0; j <= 99; j += 1) {
printf("%lf\n",b[i][j]);
}
}
return 0;
}
int main()
{
init();
foo();
print();
return 0;
}
|
THTensorMath.c | #ifndef TH_GENERIC_FILE
#define TH_GENERIC_FILE "internal/THTensorMath.c"
#else
#ifndef NAN
#define NAN (nan(NULL))
#endif
#ifdef _OPENMP
#include <omp.h>
#endif
#define TH_OMP_OVERHEAD_THRESHOLD 100000
#ifdef _OPENMP
#ifndef _WIN32
#define PRAGMA(P) _Pragma(#P)
#else
#ifdef __GNUC__
#define PRAGMA(P) _Pragma(#P)
#else
#define PRAGMA(P) __pragma(P) //for msvc
#endif
#endif
#define TH_TENSOR_APPLY_CONTIG(TYPE, TENSOR, CODE) \
{ \
ptrdiff_t TH_TENSOR_size = THTensor_(nElement)(TENSOR); \
PRAGMA(omp parallel if (TH_TENSOR_size > TH_OMP_OVERHEAD_THRESHOLD)) \
{ \
size_t num_threads = omp_get_num_threads(); \
size_t tid = omp_get_thread_num(); \
ptrdiff_t TH_TENSOR_offset = tid * (TH_TENSOR_size / num_threads); \
ptrdiff_t TH_TENSOR_end = tid == num_threads - 1 ? TH_TENSOR_size : \
TH_TENSOR_offset + TH_TENSOR_size / num_threads; \
ptrdiff_t TENSOR##_len = TH_TENSOR_end - TH_TENSOR_offset; \
TYPE *TENSOR##_data = THTensor_(data)(TENSOR) + TH_TENSOR_offset; \
CODE \
} \
}
#else
#define TH_TENSOR_APPLY_CONTIG(TYPE, TENSOR, CODE) \
{ \
TYPE *TENSOR##_data = THTensor_(data)(TENSOR); \
ptrdiff_t TENSOR##_len = THTensor_(nElement)(TENSOR); \
CODE \
}
#endif
#ifdef _OPENMP
#define TH_TENSOR_APPLY2_CONTIG(TYPE1, TENSOR1, TYPE2, TENSOR2, CODE) \
{ \
ptrdiff_t TH_TENSOR_size = THTensor_(nElement)(TENSOR1); \
PRAGMA(omp parallel if (TH_TENSOR_size > TH_OMP_OVERHEAD_THRESHOLD)) \
{ \
size_t num_threads = omp_get_num_threads(); \
size_t tid = omp_get_thread_num(); \
ptrdiff_t TH_TENSOR_offset = tid * (TH_TENSOR_size / num_threads); \
ptrdiff_t TH_TENSOR_end = tid == num_threads - 1 ? TH_TENSOR_size : \
TH_TENSOR_offset + TH_TENSOR_size / num_threads; \
ptrdiff_t TENSOR1##_len = TH_TENSOR_end - TH_TENSOR_offset; \
TYPE1 *TENSOR1##_data = THTensor_(data)(TENSOR1) + TH_TENSOR_offset; \
TYPE2 *TENSOR2##_data = THTensor_(data)(TENSOR2) + TH_TENSOR_offset; \
CODE \
} \
}
#else
#define TH_TENSOR_APPLY2_CONTIG(TYPE1, TENSOR1, TYPE2, TENSOR2, CODE) \
{ \
TYPE1 *TENSOR1##_data = THTensor_(data)(TENSOR1); \
TYPE2 *TENSOR2##_data = THTensor_(data)(TENSOR2); \
ptrdiff_t TENSOR1##_len = THTensor_(nElement)(TENSOR1); \
CODE \
}
#endif
#ifdef _OPENMP
#define TH_TENSOR_APPLY3_CONTIG(TYPE1, TENSOR1, TYPE2, TENSOR2, TYPE3, TENSOR3, CODE) \
{ \
ptrdiff_t TH_TENSOR_size = THTensor_(nElement)(TENSOR1); \
PRAGMA(omp parallel if (TH_TENSOR_size > TH_OMP_OVERHEAD_THRESHOLD)) \
{ \
size_t num_threads = omp_get_num_threads(); \
size_t tid = omp_get_thread_num(); \
ptrdiff_t TH_TENSOR_offset = tid * (TH_TENSOR_size / num_threads); \
ptrdiff_t TH_TENSOR_end = tid == num_threads - 1 ? TH_TENSOR_size : \
TH_TENSOR_offset + TH_TENSOR_size / num_threads; \
ptrdiff_t TENSOR1##_len = TH_TENSOR_end - TH_TENSOR_offset; \
TYPE1 *TENSOR1##_data = THTensor_(data)(TENSOR1) + TH_TENSOR_offset; \
TYPE2 *TENSOR2##_data = THTensor_(data)(TENSOR2) + TH_TENSOR_offset; \
TYPE3 *TENSOR3##_data = THTensor_(data)(TENSOR3) + TH_TENSOR_offset; \
CODE \
} \
}
#else
#define TH_TENSOR_APPLY3_CONTIG(TYPE1, TENSOR1, TYPE2, TENSOR2, TYPE3, TENSOR3, CODE) \
{ \
TYPE1 *TENSOR1##_data = THTensor_(data)(TENSOR1); \
TYPE2 *TENSOR2##_data = THTensor_(data)(TENSOR2); \
TYPE3 *TENSOR3##_data = THTensor_(data)(TENSOR3); \
ptrdiff_t TENSOR1##_len = THTensor_(nElement)(TENSOR1); \
CODE \
}
#endif
void THTensor_(fill)(THTensor *r_, real value)
{
if (THTensor_(isContiguous)(r_) || THTensor_(isTransposed)(r_)) {
TH_TENSOR_APPLY_CONTIG(real, r_, THVector_(fill)(r__data, value, r__len););
} else {
TH_TENSOR_APPLY(real, r_,
if (r__stride == 1) {
THVector_(fill)(r__data, value, r__size);
r__i = r__size;
r__data += r__stride * r__size;
break;
} else {
*r__data = value;
}
);
}
}
void THTensor_(zero)(THTensor *r_)
{
THTensor_(fill)(r_, 0);
}
void THTensor_(maskedFill)(THTensor *tensor, THByteTensor *mask, real value)
{
TH_TENSOR_APPLY2(real, tensor, unsigned char, mask,
if (*mask_data > 1)
{
THFree(mask_counter);
THFree(tensor_counter);
THError("Mask tensor can take 0 and 1 values only");
}
else if (*mask_data == 1)
{
*tensor_data = value;
});
}
void THTensor_(maskedCopy)(THTensor *tensor, THByteTensor *mask, THTensor* src )
{
THTensor *srct = THTensor_(newContiguous)(src);
real *src_data = THTensor_(data)(srct);
ptrdiff_t cntr = 0;
ptrdiff_t nelem = THTensor_(nElement)(srct);
if (THTensor_(nElement)(tensor) != THByteTensor_nElement(mask))
{
THTensor_(free)(srct);
THError("Number of elements of destination tensor != Number of elements in mask");
}
TH_TENSOR_APPLY2(real, tensor, unsigned char, mask,
if (*mask_data > 1)
{
THTensor_(free)(srct);
THFree(mask_counter);
THFree(tensor_counter);
THError("Mask tensor can take 0 and 1 values only");
}
else if (*mask_data == 1)
{
if (cntr == nelem)
{
THTensor_(free)(srct);
THFree(mask_counter);
THFree(tensor_counter);
THError("Number of elements of src < number of ones in mask");
}
*tensor_data = *src_data;
src_data++;
cntr++;
});
THTensor_(free)(srct);
}
void THTensor_(maskedSelect)(THTensor *tensor, THTensor *src, THByteTensor *mask)
{
ptrdiff_t numel = THByteTensor_sumall(mask);
real *tensor_data;
#ifdef DEBUG
THAssert(numel <= LONG_MAX);
#endif
THTensor_(resize1d)(tensor,numel);
tensor_data = THTensor_(data)(tensor);
TH_TENSOR_APPLY2(real, src, unsigned char, mask,
if (*mask_data > 1)
{
THFree(mask_counter);
THFree(src_counter);
THError("Mask tensor can take 0 and 1 values only");
}
else if (*mask_data == 1)
{
*tensor_data = *src_data;
tensor_data++;
});
}
// Finds non-zero elements of a tensor and returns their subscripts
void THTensor_(nonzero)(THLongTensor *subscript, THTensor *tensor)
{
ptrdiff_t numel = 0;
long *subscript_data;
long i = 0;
long dim;
long div = 1;
#ifdef TH_REAL_IS_HALF
#define IS_NONZERO(val) ((val.x & 0x7fff) != 0)
#else
#define IS_NONZERO(val) ((val)!=0)
#endif
/* First Pass to determine size of subscripts */
TH_TENSOR_APPLY(real, tensor,
if IS_NONZERO(*tensor_data) {
++numel;
});
#ifdef DEBUG
THAssert(numel <= LONG_MAX);
#endif
THLongTensor_resize2d(subscript, numel, tensor->nDimension);
/* Second pass populates subscripts */
subscript_data = THLongTensor_data(subscript);
TH_TENSOR_APPLY(real, tensor,
if IS_NONZERO(*tensor_data) {
div = 1;
for (dim = tensor->nDimension - 1; dim >= 0; dim--) {
*(subscript_data + dim) = (i/div) % tensor->size[dim];
div *= tensor->size[dim];
}
subscript_data += tensor->nDimension;
}
++i;);
}
void THTensor_(indexSelect)(THTensor *tensor, THTensor *src, int dim, THLongTensor *index)
{
ptrdiff_t i, numel;
THLongStorage *newSize;
THTensor *tSlice, *sSlice;
long *index_data;
real *tensor_data, *src_data;
THArgCheck(index->nDimension == 1, 3, "Index is supposed to be a vector");
THArgCheck(dim < src->nDimension, 4,"Indexing dim %d is out of bounds of tensor", dim + TH_INDEX_BASE);
THArgCheck(src->nDimension > 0,2,"Source tensor is empty");
numel = THLongTensor_nElement(index);
newSize = THLongStorage_newWithSize(src->nDimension);
THLongStorage_rawCopy(newSize,src->size);
#ifdef DEBUG
THAssert(numel <= LONG_MAX);
#endif
newSize->data[dim] = numel;
THTensor_(resize)(tensor,newSize,NULL);
THLongStorage_free(newSize);
index = THLongTensor_newContiguous(index);
index_data = THLongTensor_data(index);
if (dim == 0 && THTensor_(isContiguous)(src) && THTensor_(isContiguous)(tensor))
{
tensor_data = THTensor_(data)(tensor);
src_data = THTensor_(data)(src);
ptrdiff_t rowsize = THTensor_(nElement)(src) / src->size[0];
// check that the indices are within range
long max = src->size[0] - 1 + TH_INDEX_BASE;
for (i=0; i<numel; i++) {
if (index_data[i] < TH_INDEX_BASE || index_data[i] > max) {
THLongTensor_free(index);
THError("index out of range");
}
}
if (src->nDimension == 1) {
#pragma omp parallel for if(numel > TH_OMP_OVERHEAD_THRESHOLD) private(i)
for (i=0; i<numel; i++)
tensor_data[i] = src_data[index_data[i] - TH_INDEX_BASE];
} else {
#pragma omp parallel for if(numel*rowsize > TH_OMP_OVERHEAD_THRESHOLD) private(i)
for (i=0; i<numel; i++)
memcpy(tensor_data + i*rowsize, src_data + (index_data[i] - TH_INDEX_BASE)*rowsize, rowsize*sizeof(real));
}
}
else if (src->nDimension == 1)
{
for (i=0; i<numel; i++)
THTensor_(set1d)(tensor,i,THTensor_(get1d)(src,index_data[i] - TH_INDEX_BASE));
}
else
{
for (i=0; i<numel; i++)
{
tSlice = THTensor_(new)();
sSlice = THTensor_(new)();
THTensor_(select)(tSlice, tensor, dim, i);
THTensor_(select)(sSlice, src, dim, index_data[i] - TH_INDEX_BASE);
THTensor_(copy)(tSlice, sSlice);
THTensor_(free)(tSlice);
THTensor_(free)(sSlice);
}
}
THLongTensor_free(index);
}
void THTensor_(indexCopy)(THTensor *tensor, int dim, THLongTensor *index, THTensor *src)
{
ptrdiff_t i, numel;
THTensor *tSlice, *sSlice;
long *index_data;
numel = THLongTensor_nElement(index);
THArgCheck(index->nDimension == 1, 3, "Index is supposed to be a vector");
THArgCheck(dim < src->nDimension, 4, "Indexing dim %d is out of bounds of tensor", dim + TH_INDEX_BASE);
THArgCheck(numel == src->size[dim],4,"Number of indices should be equal to source:size(dim)");
index = THLongTensor_newContiguous(index);
index_data = THLongTensor_data(index);
if (tensor->nDimension > 1 )
{
tSlice = THTensor_(new)();
sSlice = THTensor_(new)();
for (i=0; i<numel; i++)
{
THTensor_(select)(tSlice, tensor, dim, index_data[i] - TH_INDEX_BASE);
THTensor_(select)(sSlice, src, dim, i);
THTensor_(copy)(tSlice, sSlice);
}
THTensor_(free)(tSlice);
THTensor_(free)(sSlice);
}
else
{
for (i=0; i<numel; i++)
{
THTensor_(set1d)(tensor, index_data[i] - TH_INDEX_BASE, THTensor_(get1d)(src,i));
}
}
THLongTensor_free(index);
}
void THTensor_(indexAdd)(THTensor *tensor, int dim, THLongTensor *index, THTensor *src)
{
ptrdiff_t i, numel;
THTensor *tSlice, *sSlice;
long *index_data;
numel = THLongTensor_nElement(index);
THArgCheck(index->nDimension == 1, 3, "Index is supposed to be a vector");
THArgCheck(dim < src->nDimension, 4,"Indexing dim %d is out of bounds of tensor", dim + TH_INDEX_BASE);
THArgCheck(numel == src->size[dim],4,"Number of indices should be equal to source:size(dim)");
index = THLongTensor_newContiguous(index);
index_data = THLongTensor_data(index);
if (tensor->nDimension > 1)
{
tSlice = THTensor_(new)();
sSlice = THTensor_(new)();
for (i=0; i<numel; i++)
{
THTensor_(select)(tSlice, tensor, dim, index_data[i] - TH_INDEX_BASE);
THTensor_(select)(sSlice, src, dim, i);
THTensor_(cadd)(tSlice, tSlice, 1.0, sSlice);
}
THTensor_(free)(tSlice);
THTensor_(free)(sSlice);
}
else
{
for (i=0; i<numel; i++)
{
THTensor_(set1d)(tensor,
index_data[i] - TH_INDEX_BASE,
THTensor_(get1d)(src,i) + THTensor_(get1d)(tensor,index_data[i] - TH_INDEX_BASE));
}
}
THLongTensor_free(index);
}
void THTensor_(indexFill)(THTensor *tensor, int dim, THLongTensor *index, real val)
{
ptrdiff_t i, numel;
THTensor *tSlice;
long *index_data;
numel = THLongTensor_nElement(index);
THArgCheck(index->nDimension == 1, 3, "Index is supposed to be a vector");
THArgCheck(dim < tensor->nDimension, 4,"Indexing dim %d is out of bounds of tensor", dim + TH_INDEX_BASE);
index = THLongTensor_newContiguous(index);
index_data = THLongTensor_data(index);
for (i=0; i<numel; i++)
{
if (tensor->nDimension > 1)
{
tSlice = THTensor_(new)();
THTensor_(select)(tSlice, tensor,dim,index_data[i] - TH_INDEX_BASE);
THTensor_(fill)(tSlice, val);
THTensor_(free)(tSlice);
}
else
{
THTensor_(set1d)(tensor, index_data[i] - TH_INDEX_BASE, val);
}
}
THLongTensor_free(index);
}
void THTensor_(gather)(THTensor *tensor, THTensor *src, int dim, THLongTensor *index)
{
long elems_per_row, i, idx;
THArgCheck(THTensor_(nDimension)(src) == THTensor_(nDimension)(tensor), 2,
"Input tensor must have same dimensions as output tensor");
THArgCheck(dim < THTensor_(nDimension)(tensor), 3, "Index dimension is out of bounds");
THArgCheck(THLongTensor_nDimension(index) == THTensor_(nDimension)(src), 4,
"Index tensor must have same dimensions as input tensor");
elems_per_row = THLongTensor_size(index, dim);
TH_TENSOR_DIM_APPLY3(real, tensor, real, src, long, index, dim,
for (i = 0; i < elems_per_row; ++i)
{
idx = *(index_data + i*index_stride);
if (idx < TH_INDEX_BASE || idx >= src_size + TH_INDEX_BASE)
{
THFree(TH_TENSOR_DIM_APPLY_counter);
THError("Invalid index in gather");
}
*(tensor_data + i*tensor_stride) = src_data[(idx - TH_INDEX_BASE) * src_stride];
})
}
void THTensor_(scatter)(THTensor *tensor, int dim, THLongTensor *index, THTensor *src)
{
long elems_per_row, i, idx;
THArgCheck(dim < THTensor_(nDimension)(tensor), 2, "Index dimension is out of bounds");
THArgCheck(THLongTensor_nDimension(index) == THTensor_(nDimension)(tensor), 3,
"Index tensor must have same dimensions as output tensor");
THArgCheck(THTensor_(nDimension)(src) == THTensor_(nDimension)(tensor), 4,
"Input tensor must have same dimensions as output tensor");
elems_per_row = THLongTensor_size(index, dim);
TH_TENSOR_DIM_APPLY3(real, tensor, real, src, long, index, dim,
for (i = 0; i < elems_per_row; ++i)
{
idx = *(index_data + i*index_stride);
if (idx < TH_INDEX_BASE || idx >= tensor_size + TH_INDEX_BASE)
{
THFree(TH_TENSOR_DIM_APPLY_counter);
THError("Invalid index in scatter");
}
tensor_data[(idx - TH_INDEX_BASE) * tensor_stride] = *(src_data + i*src_stride);
})
}
void THTensor_(scatterAdd)(THTensor *tensor, int dim, THLongTensor *index, THTensor *src)
{
long elems_per_row, i, idx;
THArgCheck(dim < THTensor_(nDimension)(tensor), 2, "Index dimension is out of bounds");
THArgCheck(THLongTensor_nDimension(index) == THTensor_(nDimension)(tensor), 3,
"Index tensor must have same dimensions as output tensor");
THArgCheck(THTensor_(nDimension)(src) == THTensor_(nDimension)(tensor), 4,
"Input tensor must have same dimensions as output tensor");
elems_per_row = THLongTensor_size(index, dim);
TH_TENSOR_DIM_APPLY3(real, tensor, real, src, long, index, dim,
for (i = 0; i < elems_per_row; ++i)
{
idx = *(index_data + i*index_stride);
if (idx < TH_INDEX_BASE || idx >= tensor_size + TH_INDEX_BASE)
{
THFree(TH_TENSOR_DIM_APPLY_counter);
THError("Invalid index in scatterAdd");
}
tensor_data[(idx - TH_INDEX_BASE) * tensor_stride] += *(src_data + i*src_stride);
})
}
void THTensor_(scatterFill)(THTensor *tensor, int dim, THLongTensor *index, real val)
{
long elems_per_row, i, idx;
THArgCheck(dim < THTensor_(nDimension)(tensor), 2, "Index dimension is out of bounds");
THArgCheck(THLongTensor_nDimension(index) == THTensor_(nDimension)(tensor), 3,
"Index tensor must have same dimensions as output tensor");
elems_per_row = THLongTensor_size(index, dim);
TH_TENSOR_DIM_APPLY2(real, tensor, long, index, dim,
for (i = 0; i < elems_per_row; ++i)
{
idx = *(index_data + i*index_stride);
if (idx < TH_INDEX_BASE || idx >= tensor_size + TH_INDEX_BASE)
{
THFree(TH_TENSOR_DIM_APPLY_counter);
THError("Invalid index in scatter");
}
tensor_data[(idx - TH_INDEX_BASE) * tensor_stride] = val;
})
}
accreal THTensor_(dot)(THTensor *tensor, THTensor *src)
{
accreal sum = 0;
/* we use a trick here. careful with that. */
TH_TENSOR_APPLY2(real, tensor, real, src,
long sz = (tensor_size-tensor_i < src_size-src_i ? tensor_size-tensor_i : src_size-src_i);
sum += THBlas_(dot)(sz, src_data, src_stride, tensor_data, tensor_stride);
tensor_i += sz;
src_i += sz;
tensor_data += sz*tensor_stride;
src_data += sz*src_stride;
break;);
return sum;
}
#undef th_isnan
#if defined(TH_REAL_IS_FLOAT) || defined(TH_REAL_IS_DOUBLE)
#define th_isnan(val) \
(isnan(val))
#else
#define th_isnan(val) (0)
#endif
#undef th_isnan_break
#if defined(TH_REAL_IS_FLOAT) || defined(TH_REAL_IS_DOUBLE)
#define th_isnan_break(val) \
if (isnan(val)) break;
#else
#define th_isnan_break(val)
#endif
real THTensor_(minall)(THTensor *tensor)
{
real theMin;
real value;
THArgCheck(tensor->nDimension > 0, 1, "tensor must have one dimension");
theMin = THTensor_(data)(tensor)[0];
TH_TENSOR_APPLY(real, tensor,
value = *tensor_data;
/* This is not the same as value<theMin in the case of NaNs */
if(!(value >= theMin))
{
theMin = value;
th_isnan_break(value)
});
return theMin;
}
real THTensor_(maxall)(THTensor *tensor)
{
real theMax;
real value;
THArgCheck(tensor->nDimension > 0, 1, "tensor must have one dimension");
theMax = THTensor_(data)(tensor)[0];
TH_TENSOR_APPLY(real, tensor,
value = *tensor_data;
/* This is not the same as value>theMax in the case of NaNs */
if(!(value <= theMax))
{
theMax = value;
th_isnan_break(value)
});
return theMax;
}
static void THTensor_(quickselectnoidx)(real *arr, long k, long elements, long stride);
real THTensor_(medianall)(THTensor *tensor)
{
THArgCheck(tensor->nDimension > 0, 1, "tensor must have one dimension");
real theMedian;
ptrdiff_t numel;
long k;
THTensor *temp_;
real *temp__data;
numel = THTensor_(nElement)(tensor);
k = (numel-1) >> 1;
temp_ = THTensor_(newClone)(tensor);
temp__data = THTensor_(data)(temp_);
THTensor_(quickselectnoidx)(temp__data, k, numel, 1);
theMedian = temp__data[k];
THTensor_(free)(temp_);
return theMedian;
}
accreal THTensor_(sumall)(THTensor *tensor)
{
accreal sum = 0;
TH_TENSOR_APPLY(real, tensor, sum += *tensor_data;);
return sum;
}
accreal THTensor_(prodall)(THTensor *tensor)
{
accreal prod = 1;
TH_TENSOR_APPLY(real, tensor, prod *= *tensor_data;);
return prod;
}
void THTensor_(add)(THTensor *r_, THTensor *t, real value)
{
THTensor_(resizeAs)(r_, t);
if (THTensor_(isContiguous)(r_) && THTensor_(isContiguous)(t) && THTensor_(nElement)(r_) == THTensor_(nElement)(t)) {
TH_TENSOR_APPLY2_CONTIG(real, r_, real, t, THVector_(adds)(r__data, t_data, value, r__len););
} else {
TH_TENSOR_APPLY2(real, r_, real, t, *r__data = *t_data + value;);
}
}
void THTensor_(sub)(THTensor *r_, THTensor *t, real value)
{
THTensor_(add)(r_, t, -value);
}
void THTensor_(mul)(THTensor *r_, THTensor *t, real value)
{
THTensor_(resizeAs)(r_, t);
if (THTensor_(isContiguous)(r_) && THTensor_(isContiguous)(t) && THTensor_(nElement)(r_) == THTensor_(nElement)(t)) {
TH_TENSOR_APPLY2_CONTIG(real, r_, real, t, THVector_(muls)(r__data, t_data, value, r__len););
} else {
TH_TENSOR_APPLY2(real, r_, real, t, *r__data = *t_data * value;);
}
}
void THTensor_(div)(THTensor *r_, THTensor *t, real value)
{
THTensor_(resizeAs)(r_, t);
if (THTensor_(isContiguous)(r_) && THTensor_(isContiguous)(t) && THTensor_(nElement)(r_) == THTensor_(nElement)(t)) {
TH_TENSOR_APPLY2_CONTIG(real, r_, real, t, THVector_(divs)(r__data, t_data, value, r__len););
} else {
TH_TENSOR_APPLY2(real, r_, real, t, *r__data = *t_data / value;);
}
}
void THTensor_(lshift)(THTensor *r_, THTensor *t, real value)
{
#if defined(TH_REAL_IS_FLOAT)
return THTensor_(mul)(r_, t, powf(2, value));
#elif defined(TH_REAL_IS_DOUBLE)
return THTensor_(mul)(r_, t, pow(2, value));
#elif defined(TH_REAL_IS_HALF)
return THError("lshift is not supported for torch.HalfTensor");
#else
THTensor_(resizeAs)(r_, t);
if (THTensor_(isContiguous)(r_) &&
THTensor_(isContiguous)(t) &&
THTensor_(nElement)(r_) == THTensor_(nElement)(t)) {
real *tp = THTensor_(data)(t);
real *rp = THTensor_(data)(r_);
long sz = THTensor_(nElement)(t);
long i;
#pragma omp parallel for if(sz > TH_OMP_OVERHEAD_THRESHOLD * 100) private(i)
for (i=0; i<sz; i++) {
#if defined(TH_REAL_IS_BYTE)
rp[i] = ((real) tp[i]) << value;
#else
rp[i] = ((unsigned real) tp[i]) << value;
#endif
}
} else {
#if defined(TH_REAL_IS_BYTE)
TH_TENSOR_APPLY2(real, r_, real, t, *r__data = (((real) *t_data) << value););
#else
TH_TENSOR_APPLY2(real, r_, real, t, *r__data = (((unsigned real) *t_data) << value););
#endif
}
#endif
}
void THTensor_(rshift)(THTensor *r_, THTensor *t, real value)
{
#if defined(TH_REAL_IS_FLOAT)
return THTensor_(div)(r_, t, powf(2, value));
#elif defined(TH_REAL_IS_DOUBLE)
return THTensor_(div)(r_, t, pow(2, value));
#elif defined(TH_REAL_IS_HALF)
return THError("rshift is not supported for torch.HalfTensor");
#else
THTensor_(resizeAs)(r_, t);
if (THTensor_(isContiguous)(r_) &&
THTensor_(isContiguous)(t) &&
THTensor_(nElement)(r_) == THTensor_(nElement)(t)) {
real *tp = THTensor_(data)(t);
real *rp = THTensor_(data)(r_);
long sz = THTensor_(nElement)(t);
long i;
#pragma omp parallel for if(sz > TH_OMP_OVERHEAD_THRESHOLD * 100) private(i)
for (i=0; i<sz; i++) {
#if defined(TH_REAL_IS_BYTE)
rp[i] = ((real) tp[i]) >> value;
#else
rp[i] = ((unsigned real) tp[i]) >> value;
#endif
}
} else {
#if defined(TH_REAL_IS_BYTE)
TH_TENSOR_APPLY2(real, r_, real, t, *r__data = (((real) *t_data) >> value););
#else
TH_TENSOR_APPLY2(real, r_, real, t, *r__data = (((unsigned real) *t_data) >> value););
#endif
}
#endif
}
void THTensor_(fmod)(THTensor *r_, THTensor *t, real value)
{
THTensor_(resizeAs)(r_, t);
if (THTensor_(isContiguous)(r_) && THTensor_(isContiguous)(t) && THTensor_(nElement)(r_) == THTensor_(nElement)(t)) {
real *tp = THTensor_(data)(t);
real *rp = THTensor_(data)(r_);
ptrdiff_t sz = THTensor_(nElement)(t);
ptrdiff_t i;
#pragma omp parallel for if(sz > TH_OMP_OVERHEAD_THRESHOLD) private(i)
for (i=0; i<sz; i++) {
#if defined(TH_REAL_IS_FLOAT) || defined(TH_REAL_IS_DOUBLE)
rp[i] = fmod(tp[i], value);
#else
rp[i] = tp[i] % value;
#endif
}
} else {
#if defined(TH_REAL_IS_FLOAT) || defined(TH_REAL_IS_DOUBLE)
TH_TENSOR_APPLY2(real, r_, real, t, *r__data = fmod(*t_data, value););
#else
TH_TENSOR_APPLY2(real, r_, real, t, *r__data = (*t_data % value););
#endif
}
}
void THTensor_(remainder)(THTensor *r_, THTensor *t, real value)
{
THTensor_(resizeAs)(r_, t);
if (THTensor_(isContiguous)(r_) && THTensor_(isContiguous)(t) && THTensor_(nElement)(r_) == THTensor_(nElement)(t)) {
real *tp = THTensor_(data)(t);
real *rp = THTensor_(data)(r_);
ptrdiff_t sz = THTensor_(nElement)(t);
ptrdiff_t i;
#pragma omp parallel for if(sz > TH_OMP_OVERHEAD_THRESHOLD) private(i)
for (i=0; i<sz; i++) {
#if defined(TH_REAL_IS_FLOAT) || defined(TH_REAL_IS_DOUBLE)
rp[i] = (value == 0)? NAN : tp[i] - value * floor(tp[i] / value);
#else
// There is no NAN for integers
rp[i] = tp[i] % value;
if (rp[i] * value < 0)
rp[i] += value;
#endif
}
} else {
#if defined(TH_REAL_IS_FLOAT) || defined(TH_REAL_IS_DOUBLE)
TH_TENSOR_APPLY2(real, r_, real, t, *r__data = (value == 0)? NAN : *t_data - value * floor(*t_data / value););
#else
// There is no NAN for integers
TH_TENSOR_APPLY2(real, r_, real, t, *r__data = *t_data % value;
if (*r__data * value < 0) *r__data += value;);
#endif
}
}
void THTensor_(bitand)(THTensor *r_, THTensor *t, real value)
{
#if defined(TH_REAL_IS_FLOAT) || defined(TH_REAL_IS_DOUBLE) || defined(TH_REAL_IS_HALF)
return THError("bitand is only supported for integer type tensors");
#else
THTensor_(resizeAs)(r_, t);
if (THTensor_(isContiguous)(r_) &&
THTensor_(isContiguous)(t) &&
THTensor_(nElement)(r_) == THTensor_(nElement)(t)) {
real *tp = THTensor_(data)(t);
real *rp = THTensor_(data)(r_);
long sz = THTensor_(nElement)(t);
long i;
#pragma omp parallel for if(sz > TH_OMP_OVERHEAD_THRESHOLD * 100) private(i)
for (i=0; i<sz; i++) {
rp[i] = tp[i] & value;
}
} else {
TH_TENSOR_APPLY2(real, r_, real, t, *r__data = *t_data & value;);
}
#endif
}
void THTensor_(bitor)(THTensor *r_, THTensor *t, real value)
{
#if defined(TH_REAL_IS_FLOAT) || defined(TH_REAL_IS_DOUBLE) || defined(TH_REAL_IS_HALF)
return THError("bitor is only supported for integer type tensors");
#else
THTensor_(resizeAs)(r_, t);
if (THTensor_(isContiguous)(r_) &&
THTensor_(isContiguous)(t) &&
THTensor_(nElement)(r_) == THTensor_(nElement)(t)) {
real *tp = THTensor_(data)(t);
real *rp = THTensor_(data)(r_);
long sz = THTensor_(nElement)(t);
long i;
#pragma omp parallel for if(sz > TH_OMP_OVERHEAD_THRESHOLD * 100) private(i)
for (i=0; i<sz; i++) {
rp[i] = tp[i] | value;
}
} else {
TH_TENSOR_APPLY2(real, r_, real, t, *r__data = *t_data | value;);
}
#endif
}
void THTensor_(bitxor)(THTensor *r_, THTensor *t, real value)
{
#if defined(TH_REAL_IS_FLOAT) || defined(TH_REAL_IS_DOUBLE) || defined(TH_REAL_IS_HALF)
return THError("bitxor is only supported for integer type tensors");
#else
THTensor_(resizeAs)(r_, t);
if (THTensor_(isContiguous)(r_) &&
THTensor_(isContiguous)(t) &&
THTensor_(nElement)(r_) == THTensor_(nElement)(t)) {
real *tp = THTensor_(data)(t);
real *rp = THTensor_(data)(r_);
long sz = THTensor_(nElement)(t);
long i;
#pragma omp parallel for if(sz > TH_OMP_OVERHEAD_THRESHOLD * 100) private(i)
for (i=0; i<sz; i++) {
rp[i] = tp[i] ^ value;
}
} else {
TH_TENSOR_APPLY2(real, r_, real, t, *r__data = *t_data ^ value;);
}
#endif
}
void THTensor_(clamp)(THTensor *r_, THTensor *t, real min_value, real max_value)
{
THTensor_(resizeAs)(r_, t);
if (THTensor_(isContiguous)(r_) && THTensor_(isContiguous)(t) && THTensor_(nElement)(r_) == THTensor_(nElement)(t)) {
real *tp = THTensor_(data)(t);
real *rp = THTensor_(data)(r_);
/* real t_val; */
ptrdiff_t sz = THTensor_(nElement)(t);
ptrdiff_t i;
#pragma omp parallel for if(sz > TH_OMP_OVERHEAD_THRESHOLD) private(i)
for (i=0; i<sz; i++)
rp[i] = (tp[i] < min_value) ? min_value : (tp[i] > max_value ? max_value : tp[i]);
} else {
TH_TENSOR_APPLY2(real, r_, real, t, *r__data = (*t_data < min_value) ? min_value : (*t_data > max_value ? max_value : *t_data););
}
}
void THTensor_(cadd)(THTensor *r_, THTensor *t, real value, THTensor *src)
{
THTensor_(resizeAs)(r_, t);
if (THTensor_(isContiguous)(r_) && THTensor_(isContiguous)(t) && THTensor_(isContiguous)(src) && THTensor_(nElement)(r_) == THTensor_(nElement)(src)) {
if(r_ == t) {
THBlas_(axpy)(THTensor_(nElement)(t), value, THTensor_(data)(src), 1, THTensor_(data)(r_), 1);
} else {
TH_TENSOR_APPLY3_CONTIG(real, r_, real, t, real, src, THVector_(cadd)(r__data, t_data, src_data, value, r__len););
}
} else {
TH_TENSOR_APPLY3(real, r_, real, t, real, src, *r__data = *t_data + value * *src_data;);
}
}
void THTensor_(csub)(THTensor *r_, THTensor *t, real value,THTensor *src)
{
THTensor_(cadd)(r_, t, -value, src);
}
void THTensor_(cmul)(THTensor *r_, THTensor *t, THTensor *src)
{
THTensor_(resizeAs)(r_, t);
if (THTensor_(isContiguous)(r_) && THTensor_(isContiguous)(t) && THTensor_(isContiguous)(src) && THTensor_(nElement)(r_) == THTensor_(nElement)(src)) {
TH_TENSOR_APPLY3_CONTIG(real, r_, real, t, real, src, THVector_(cmul)(r__data, t_data, src_data, r__len););
} else {
TH_TENSOR_APPLY3(real, r_, real, t, real, src, *r__data = *t_data * *src_data;);
}
}
void THTensor_(cpow)(THTensor *r_, THTensor *t, THTensor *src)
{
THTensor_(resizeAs)(r_, t);
if (THTensor_(isContiguous)(r_) && THTensor_(isContiguous)(t) && THTensor_(isContiguous)(src) && THTensor_(nElement)(r_) == THTensor_(nElement)(src)) {
real *tp = THTensor_(data)(t);
real *sp = THTensor_(data)(src);
real *rp = THTensor_(data)(r_);
ptrdiff_t sz = THTensor_(nElement)(t);
ptrdiff_t i;
#pragma omp parallel for if(sz > TH_OMP_OVERHEAD_THRESHOLD) private(i)
for (i=0; i<sz; i++)
rp[i] = pow(tp[i], sp[i]);
} else {
TH_TENSOR_APPLY3(real, r_, real, t, real, src, *r__data = pow(*t_data, *src_data););
}
}
void THTensor_(cdiv)(THTensor *r_, THTensor *t, THTensor *src)
{
THTensor_(resizeAs)(r_, t);
if (THTensor_(isContiguous)(r_) && THTensor_(isContiguous)(t) && THTensor_(isContiguous)(src) && THTensor_(nElement)(r_) == THTensor_(nElement)(src)) {
TH_TENSOR_APPLY3_CONTIG(real, r_, real, t, real, src, THVector_(cdiv)(r__data, t_data, src_data, r__len););
} else {
TH_TENSOR_APPLY3(real, r_, real, t, real, src, *r__data = *t_data / *src_data;);
}
}
void THTensor_(clshift)(THTensor *r_, THTensor *t, THTensor *src)
{
#if defined(TH_REAL_IS_HALF)
return THError("clshift is not supported for torch.HalfTensor");
#endif
THTensor_(resizeAs)(r_, t);
if (THTensor_(isContiguous)(r_) &&
THTensor_(isContiguous)(t) &&
THTensor_(isContiguous)(src) &&
THTensor_(nElement)(r_) == THTensor_(nElement)(src)) {
real *tp = THTensor_(data)(t);
real *sp = THTensor_(data)(src);
real *rp = THTensor_(data)(r_);
ptrdiff_t sz = THTensor_(nElement)(t);
ptrdiff_t i;
#pragma omp parallel for if(sz > TH_OMP_OVERHEAD_THRESHOLD) private(i)
for (i=0; i<sz; i++) {
#if defined(TH_REAL_IS_FLOAT)
rp[i] = tp[i] * powf(2, sp[i]);
#elif defined(TH_REAL_IS_DOUBLE)
rp[i] = tp[i] * pow(2, sp[i]);
#elif defined(TH_REAL_IS_BYTE)
rp[i] = ((real) tp[i]) << sp[i];
#else
rp[i] = ((unsigned real) tp[i]) << sp[i];
#endif
}
} else {
#if defined(TH_REAL_IS_FLOAT)
TH_TENSOR_APPLY3(real, r_, real, t, real, src, *r__data = *t_data * powf(2, *src_data););
#elif defined(TH_REAL_IS_DOUBLE)
TH_TENSOR_APPLY3(real, r_, real, t, real, src, *r__data = *t_data * pow(2, *src_data););
#elif defined(TH_REAL_IS_BYTE)
TH_TENSOR_APPLY3(real, r_, real, t, real, src, *r__data = ((real)*t_data) << *src_data;);
#else
TH_TENSOR_APPLY3(real, r_, real, t, real, src, *r__data = ((unsigned real)*t_data) << *src_data;);
#endif
}
}
void THTensor_(crshift)(THTensor *r_, THTensor *t, THTensor *src)
{
#if defined(TH_REAL_IS_HALF)
return THError("crshift is not supported for torch.HalfTensor");
#endif
THTensor_(resizeAs)(r_, t);
if (THTensor_(isContiguous)(r_) &&
THTensor_(isContiguous)(t) &&
THTensor_(isContiguous)(src) &&
THTensor_(nElement)(r_) == THTensor_(nElement)(src)) {
real *tp = THTensor_(data)(t);
real *sp = THTensor_(data)(src);
real *rp = THTensor_(data)(r_);
ptrdiff_t sz = THTensor_(nElement)(t);
ptrdiff_t i;
#pragma omp parallel for if(sz > TH_OMP_OVERHEAD_THRESHOLD) private(i)
for (i=0; i<sz; i++) {
#if defined(TH_REAL_IS_FLOAT)
rp[i] = tp[i] / powf(2, sp[i]);
#elif defined(TH_REAL_IS_DOUBLE)
rp[i] = tp[i] / pow(2, sp[i]);
#elif defined(TH_REAL_IS_BYTE)
rp[i] = ((real) tp[i]) >> sp[i];
#else
rp[i] = ((unsigned real) tp[i]) >> sp[i];
#endif
}
} else {
#if defined(TH_REAL_IS_FLOAT)
TH_TENSOR_APPLY3(real, r_, real, t, real, src, *r__data = *t_data / powf(2, *src_data););
#elif defined(TH_REAL_IS_DOUBLE)
TH_TENSOR_APPLY3(real, r_, real, t, real, src, *r__data = *t_data / pow(2, *src_data););
#elif defined(TH_REAL_IS_BYTE)
TH_TENSOR_APPLY3(real, r_, real, t, real, src, *r__data = ((real)*t_data) >> *src_data;);
#else
TH_TENSOR_APPLY3(real, r_, real, t, real, src, *r__data = ((unsigned real)*t_data) >> *src_data;);
#endif
}
}
void THTensor_(cfmod)(THTensor *r_, THTensor *t, THTensor *src)
{
THTensor_(resizeAs)(r_, t);
if (THTensor_(isContiguous)(r_) && THTensor_(isContiguous)(t) && THTensor_(isContiguous)(src) && THTensor_(nElement)(r_) == THTensor_(nElement)(src)) {
real *tp = THTensor_(data)(t);
real *sp = THTensor_(data)(src);
real *rp = THTensor_(data)(r_);
ptrdiff_t sz = THTensor_(nElement)(t);
ptrdiff_t i;
#pragma omp parallel for if(sz > TH_OMP_OVERHEAD_THRESHOLD) private(i)
for (i=0; i<sz; i++) {
#if defined(TH_REAL_IS_FLOAT) || defined(TH_REAL_IS_DOUBLE)
rp[i] = fmod(tp[i], sp[i]);
#else
rp[i] = tp[i] % sp[i];
#endif
}
} else {
#if defined(TH_REAL_IS_FLOAT) || defined(TH_REAL_IS_DOUBLE)
TH_TENSOR_APPLY3(real, r_, real, t, real, src, *r__data = fmod(*t_data, *src_data););
#else
TH_TENSOR_APPLY3(real, r_, real, t, real, src, *r__data = (*t_data % *src_data););
#endif
}
}
void THTensor_(cremainder)(THTensor *r_, THTensor *t, THTensor *src)
{
THTensor_(resizeAs)(r_, t);
if (THTensor_(isContiguous)(r_) && THTensor_(isContiguous)(t) && THTensor_(isContiguous)(src) && THTensor_(nElement)(r_) == THTensor_(nElement)(src)) {
real *tp = THTensor_(data)(t);
real *sp = THTensor_(data)(src);
real *rp = THTensor_(data)(r_);
ptrdiff_t sz = THTensor_(nElement)(t);
ptrdiff_t i;
#pragma omp parallel for if(sz > TH_OMP_OVERHEAD_THRESHOLD) private(i)
for (i=0; i<sz; i++) {
#if defined(TH_REAL_IS_FLOAT) || defined(TH_REAL_IS_DOUBLE)
rp[i] = (sp[i] == 0)? NAN : tp[i] - sp[i] * floor(tp[i] / sp[i]);
#else
// There is no NAN for integers
rp[i] = tp[i] % sp[i];
if (rp[i] * sp[i] < 0)
rp[i] += sp[i];
#endif
}
} else {
#if defined(TH_REAL_IS_FLOAT) || defined(TH_REAL_IS_DOUBLE)
TH_TENSOR_APPLY3(real, r_, real, t, real, src, *r__data = (*src_data == 0)? NAN : *t_data - *src_data * floor(*t_data / *src_data););
#else
// There is no NAN for integers
TH_TENSOR_APPLY3(real, r_, real, t, real, src, *r__data = *t_data % *src_data;
if (*r__data * *src_data < 0) *r__data += *src_data;);
#endif
}
}
void THTensor_(cbitand)(THTensor *r_, THTensor *t, THTensor *src)
{
#if defined(TH_REAL_IS_FLOAT) || defined(TH_REAL_IS_DOUBLE) || defined(TH_REAL_IS_HALF)
return THError("cbitand is only supported for integer type tensors");
#else
THTensor_(resizeAs)(r_, t);
if (THTensor_(isContiguous)(r_) &&
THTensor_(isContiguous)(t) &&
THTensor_(isContiguous)(src) &&
THTensor_(nElement)(r_) == THTensor_(nElement)(src)) {
real *tp = THTensor_(data)(t);
real *sp = THTensor_(data)(src);
real *rp = THTensor_(data)(r_);
ptrdiff_t sz = THTensor_(nElement)(t);
ptrdiff_t i;
#pragma omp parallel for if(sz > TH_OMP_OVERHEAD_THRESHOLD) private(i)
for (i=0; i<sz; i++) {
rp[i] = tp[i] & sp[i];
}
} else {
TH_TENSOR_APPLY3(real, r_, real, t, real, src, *r__data = *t_data & *src_data;);
}
#endif
}
void THTensor_(cbitor)(THTensor *r_, THTensor *t, THTensor *src)
{
#if defined(TH_REAL_IS_FLOAT) || defined(TH_REAL_IS_DOUBLE) || defined(TH_REAL_IS_HALF)
return THError("cbitor is only supported for integer type tensors");
#else
THTensor_(resizeAs)(r_, t);
if (THTensor_(isContiguous)(r_) &&
THTensor_(isContiguous)(t) &&
THTensor_(isContiguous)(src) &&
THTensor_(nElement)(r_) == THTensor_(nElement)(src)) {
real *tp = THTensor_(data)(t);
real *sp = THTensor_(data)(src);
real *rp = THTensor_(data)(r_);
ptrdiff_t sz = THTensor_(nElement)(t);
ptrdiff_t i;
#pragma omp parallel for if(sz > TH_OMP_OVERHEAD_THRESHOLD) private(i)
for (i=0; i<sz; i++) {
rp[i] = tp[i] | sp[i];
}
} else {
TH_TENSOR_APPLY3(real, r_, real, t, real, src, *r__data = *t_data | *src_data;);
}
#endif
}
void THTensor_(cbitxor)(THTensor *r_, THTensor *t, THTensor *src)
{
#if defined(TH_REAL_IS_FLOAT) || defined(TH_REAL_IS_DOUBLE) || defined(TH_REAL_IS_HALF)
return THError("cbitxor is only supported for integer type tensors");
#else
THTensor_(resizeAs)(r_, t);
if (THTensor_(isContiguous)(r_) &&
THTensor_(isContiguous)(t) &&
THTensor_(isContiguous)(src) &&
THTensor_(nElement)(r_) == THTensor_(nElement)(src)) {
real *tp = THTensor_(data)(t);
real *sp = THTensor_(data)(src);
real *rp = THTensor_(data)(r_);
ptrdiff_t sz = THTensor_(nElement)(t);
ptrdiff_t i;
#pragma omp parallel for if(sz > TH_OMP_OVERHEAD_THRESHOLD) private(i)
for (i=0; i<sz; i++) {
rp[i] = tp[i] ^ sp[i];
}
} else {
TH_TENSOR_APPLY3(real, r_, real, t, real, src, *r__data = *t_data ^ *src_data;);
}
#endif
}
void THTensor_(tpow)(THTensor *r_, real value, THTensor *t)
{
THTensor_(resizeAs)(r_, t);
if (THTensor_(isContiguous)(r_) && THTensor_(isContiguous)(t) && THTensor_(nElement)(r_) == THTensor_(nElement)(t)) {
real *tp = THTensor_(data)(t);
real *rp = THTensor_(data)(r_);
ptrdiff_t sz = THTensor_(nElement)(t);
ptrdiff_t i;
#pragma omp parallel for if(sz > TH_OMP_OVERHEAD_THRESHOLD) private(i)
for (i=0; i<sz; i++)
rp[i] = pow(value, tp[i]);
} else {
TH_TENSOR_APPLY2(real, r_, real, t, *r__data = pow(value, *t_data););
}
}
void THTensor_(addcmul)(THTensor *r_, THTensor *t, real value, THTensor *src1, THTensor *src2)
{
if(r_ != t)
{
THTensor_(resizeAs)(r_, t);
THTensor_(copy)(r_, t);
}
TH_TENSOR_APPLY3(real, r_, real, src1, real, src2, *r__data += value * *src1_data * *src2_data;);
}
void THTensor_(addcdiv)(THTensor *r_, THTensor *t, real value, THTensor *src1, THTensor *src2)
{
if(r_ != t)
{
THTensor_(resizeAs)(r_, t);
THTensor_(copy)(r_, t);
}
TH_TENSOR_APPLY3(real, r_, real, src1, real, src2, *r__data += value * *src1_data / *src2_data;);
}
void THTensor_(addmv)(THTensor *r_, real beta, THTensor *t, real alpha, THTensor *mat, THTensor *vec)
{
if( (mat->nDimension != 2) || (vec->nDimension != 1) )
THError("matrix and vector expected, got %dD, %dD",
mat->nDimension, vec->nDimension);
if( mat->size[1] != vec->size[0] ) {
THDescBuff bm = THTensor_(sizeDesc)(mat);
THDescBuff bv = THTensor_(sizeDesc)(vec);
THError("size mismatch, %s, %s", bm.str, bv.str);
}
if(t->nDimension != 1)
THError("vector expected, got t: %dD", t->nDimension);
if(t->size[0] != mat->size[0]) {
THDescBuff bt = THTensor_(sizeDesc)(t);
THDescBuff bm = THTensor_(sizeDesc)(mat);
THError("size mismatch, t: %s, mat: %s", bt.str, bm.str);
}
if(r_ != t)
{
THTensor_(resizeAs)(r_, t);
THTensor_(copy)(r_, t);
}
if(mat->stride[0] == 1)
{
THBlas_(gemv)('n', mat->size[0], mat->size[1],
alpha, THTensor_(data)(mat), mat->stride[1],
THTensor_(data)(vec), vec->stride[0],
beta, THTensor_(data)(r_), r_->stride[0]);
}
else if(mat->stride[1] == 1)
{
THBlas_(gemv)('t', mat->size[1], mat->size[0],
alpha, THTensor_(data)(mat), mat->stride[0],
THTensor_(data)(vec), vec->stride[0],
beta, THTensor_(data)(r_), r_->stride[0]);
}
else
{
THTensor *cmat = THTensor_(newContiguous)(mat);
THBlas_(gemv)('t', mat->size[1], mat->size[0],
alpha, THTensor_(data)(cmat), cmat->stride[0],
THTensor_(data)(vec), vec->stride[0],
beta, THTensor_(data)(r_), r_->stride[0]);
THTensor_(free)(cmat);
}
}
void THTensor_(match)(THTensor *r_, THTensor *m1, THTensor *m2, real gain)
{
long N1 = m1->size[0];
long N2 = m2->size[0];
long dim;
real *m1_p;
real *m2_p;
real *r_p;
long i;
THTensor_(resize2d)(r_, N1, N2);
m1 = THTensor_(newContiguous)(m1);
m2 = THTensor_(newContiguous)(m2);
THTensor_(resize2d)(m1, N1, THTensor_(nElement)(m1) / N1);
THTensor_(resize2d)(m2, N2, THTensor_(nElement)(m2) / N2);
dim = m1->size[1];
THArgCheck(m1->size[1] == m2->size[1], 3, "m1 and m2 must have the same inner vector dim");
m1_p = THTensor_(data)(m1);
m2_p = THTensor_(data)(m2);
r_p = THTensor_(data)(r_);
#pragma omp parallel for private(i)
for (i=0; i<N1; i++) {
long j,k;
for (j=0; j<N2; j++) {
real sum = 0;
for (k=0; k<dim; k++) {
real term = m1_p[ i*dim + k ] - m2_p[ j*dim + k ];
sum += term*term;
}
r_p[ i*N2 + j ] = gain * sum;
}
}
THTensor_(free)(m1);
THTensor_(free)(m2);
}
void THTensor_(addmm)(THTensor *r_, real beta, THTensor *t, real alpha, THTensor *m1, THTensor *m2)
{
char transpose_r, transpose_m1, transpose_m2;
THTensor *r__, *m1_, *m2_;
if( (m1->nDimension != 2) || (m2->nDimension != 2))
THError("matrices expected, got %dD, %dD tensors", m1->nDimension, m2->nDimension);
if(m1->size[1] != m2->size[0]) {
THDescBuff bm1 = THTensor_(sizeDesc)(m1);
THDescBuff bm2 = THTensor_(sizeDesc)(m2);
THError("size mismatch, m1: %s, m2: %s", bm1.str, bm2.str);
}
if( t->nDimension != 2 )
THError("matrix expected, got %dD tensor for t", t->nDimension);
if( (t->size[0] != m1->size[0]) || (t->size[1] != m2->size[1]) ) {
THDescBuff bt = THTensor_(sizeDesc)(t);
THDescBuff bm1 = THTensor_(sizeDesc)(m1);
THDescBuff bm2 = THTensor_(sizeDesc)(m2);
THError("size mismatch, t: %s, m1: %s, m2: %s", bt.str, bm1.str, bm2.str);
}
if(t != r_)
{
THTensor_(resizeAs)(r_, t);
if (beta != 0.0) {
THTensor_(copy)(r_, t);
}
}
/* r_ */
if(r_->stride[0] == 1 &&
r_->stride[1] != 0)
{
transpose_r = 'n';
r__ = r_;
}
else if(r_->stride[1] == 1 &&
r_->stride[0] != 0)
{
THTensor *swap = m2;
m2 = m1;
m1 = swap;
transpose_r = 't';
r__ = r_;
}
else
{
transpose_r = 'n';
THTensor *transp_r_ = THTensor_(newTranspose)(r_, 0, 1);
r__ = THTensor_(newClone)(transp_r_);
THTensor_(free)(transp_r_);
THTensor_(transpose)(r__, NULL, 0, 1);
}
/* m1 */
if(m1->stride[(transpose_r == 'n' ? 0 : 1)] == 1 &&
m1->stride[(transpose_r == 'n' ? 1 : 0)] != 0)
{
transpose_m1 = 'n';
m1_ = m1;
}
else if(m1->stride[(transpose_r == 'n' ? 1 : 0)] == 1 &&
m1->stride[(transpose_r == 'n' ? 0 : 1)] != 0)
{
transpose_m1 = 't';
m1_ = m1;
}
else
{
transpose_m1 = (transpose_r == 'n' ? 't' : 'n');
m1_ = THTensor_(newContiguous)(m1);
}
/* m2 */
if(m2->stride[(transpose_r == 'n' ? 0 : 1)] == 1 &&
m2->stride[(transpose_r == 'n' ? 1 : 0)] != 0)
{
transpose_m2 = 'n';
m2_ = m2;
}
else if(m2->stride[(transpose_r == 'n' ? 1 : 0)] == 1 &&
m2->stride[(transpose_r == 'n' ? 0 : 1)] != 0)
{
transpose_m2 = 't';
m2_ = m2;
}
else
{
transpose_m2 = (transpose_r == 'n' ? 't' : 'n');
m2_ = THTensor_(newContiguous)(m2);
}
#pragma omp critical(blasgemm)
/* do the operation */
THBlas_(gemm)(transpose_m1,
transpose_m2,
r__->size[(transpose_r == 'n' ? 0 : 1)],
r__->size[(transpose_r == 'n' ? 1 : 0)],
m1_->size[(transpose_r == 'n' ? 1 : 0)],
alpha,
THTensor_(data)(m1_),
(transpose_m1 == 'n' ? m1_->stride[(transpose_r == 'n' ? 1 : 0)] : m1_->stride[(transpose_r == 'n' ? 0 : 1)]),
THTensor_(data)(m2_),
(transpose_m2 == 'n' ? m2_->stride[(transpose_r == 'n' ? 1 : 0)] : m2_->stride[(transpose_r == 'n' ? 0 : 1)]),
beta,
THTensor_(data)(r__),
r__->stride[(transpose_r == 'n' ? 1 : 0)]);
/* free intermediate variables */
if(m1_ != m1)
THTensor_(free)(m1_);
if(m2_ != m2)
THTensor_(free)(m2_);
if(r__ != r_)
THTensor_(freeCopyTo)(r__, r_);
}
void THTensor_(addr)(THTensor *r_, real beta, THTensor *t, real alpha, THTensor *vec1, THTensor *vec2)
{
if( (vec1->nDimension != 1) || (vec2->nDimension != 1) )
THError("vector and vector expected, got %dD, %dD tensors",
vec1->nDimension, vec2->nDimension);
if(t->nDimension != 2)
THError("expected matrix, got %dD tensor for t", t->nDimension);
if( (t->size[0] != vec1->size[0]) || (t->size[1] != vec2->size[0]) ) {
THDescBuff bt = THTensor_(sizeDesc)(t);
THDescBuff bv1 = THTensor_(sizeDesc)(vec1);
THDescBuff bv2 = THTensor_(sizeDesc)(vec2);
THError("size mismatch, t: %s, vec1: %s, vec2: %s", bt.str, bv1.str, bv2.str);
}
if(r_ != t)
{
THTensor_(resizeAs)(r_, t);
THTensor_(copy)(r_, t);
}
if(beta == 0) {
THTensor_(zero)(r_);
}
else if(beta != 1)
THTensor_(mul)(r_, r_, beta);
if(r_->stride[0] == 1)
{
THBlas_(ger)(vec1->size[0], vec2->size[0],
alpha, THTensor_(data)(vec1), vec1->stride[0],
THTensor_(data)(vec2), vec2->stride[0],
THTensor_(data)(r_), r_->stride[1]);
}
else if(r_->stride[1] == 1)
{
THBlas_(ger)(vec2->size[0], vec1->size[0],
alpha, THTensor_(data)(vec2), vec2->stride[0],
THTensor_(data)(vec1), vec1->stride[0],
THTensor_(data)(r_), r_->stride[0]);
}
else
{
THTensor *cr = THTensor_(newClone)(r_);
THBlas_(ger)(vec2->size[0], vec1->size[0],
alpha, THTensor_(data)(vec2), vec2->stride[0],
THTensor_(data)(vec1), vec1->stride[0],
THTensor_(data)(cr), cr->stride[0]);
THTensor_(freeCopyTo)(cr, r_);
}
}
void THTensor_(addbmm)(THTensor *result, real beta, THTensor *t, real alpha, THTensor *batch1, THTensor *batch2)
{
long batch;
THArgCheck(THTensor_(nDimension)(batch1) == 3, 1, "expected 3D tensor");
THArgCheck(THTensor_(nDimension)(batch2) == 3, 2, "expected 3D tensor");
THArgCheck(THTensor_(size)(batch1, 0) == THTensor_(size)(batch2, 0), 2,
"equal number of batches expected, got %d, %d",
THTensor_(size)(batch1, 0), THTensor_(size)(batch2, 0));
THArgCheck(THTensor_(size)(batch1, 2) == THTensor_(size)(batch2, 1), 2,
"wrong matrix size, batch1: %dx%d, batch2: %dx%d",
THTensor_(size)(batch1, 1), THTensor_(size)(batch1,2),
THTensor_(size)(batch2, 1), THTensor_(size)(batch2,2));
long dim1 = THTensor_(size)(batch1, 1);
long dim2 = THTensor_(size)(batch2, 2);
THArgCheck(THTensor_(size)(t, 0) == dim1, 1, "output tensor of incorrect size");
THArgCheck(THTensor_(size)(t, 1) == dim2, 1, "output tensor of incorrect size");
if (t != result) {
THTensor_(resizeAs)(result, t);
if (beta != 0.0) {
THTensor_(copy)(result, t);
}
}
THTensor *matrix1 = THTensor_(new)();
THTensor *matrix2 = THTensor_(new)();
for (batch = 0; batch < THTensor_(size)(batch1, 0); ++batch) {
THTensor_(select)(matrix1, batch1, 0, batch);
THTensor_(select)(matrix2, batch2, 0, batch);
THTensor_(addmm)(result, beta, result, alpha, matrix1, matrix2);
beta = 1; // accumulate output once
}
THTensor_(free)(matrix1);
THTensor_(free)(matrix2);
}
void THTensor_(baddbmm)(THTensor *result, real beta, THTensor *t, real alpha, THTensor *batch1, THTensor *batch2)
{
long batch;
THArgCheck(THTensor_(nDimension)(batch1) == 3, 1, "expected 3D tensor, got %dD", THTensor_(nDimension)(batch1));
THArgCheck(THTensor_(nDimension)(batch2) == 3, 2, "expected 3D tensor, got %dD", THTensor_(nDimension)(batch2));
THArgCheck(THTensor_(size)(batch1, 0) == THTensor_(size)(batch2, 0), 2,
"equal number of batches expected, got %d, %d",
THTensor_(size)(batch1, 0), THTensor_(size)(batch2, 0));
THArgCheck(THTensor_(size)(batch1, 2) == THTensor_(size)(batch2, 1), 2,
"wrong matrix size, batch1: %dx%d, batch2: %dx%d",
THTensor_(size)(batch1, 1), THTensor_(size)(batch1, 2),
THTensor_(size)(batch2, 1), THTensor_(size)(batch2, 2));
long bs = THTensor_(size)(batch1, 0);
long dim1 = THTensor_(size)(batch1, 1);
long dim2 = THTensor_(size)(batch2, 2);
THArgCheck(THTensor_(size)(t, 0) == bs, 1, "output tensor of incorrect size");
THArgCheck(THTensor_(size)(t, 1) == dim1, 1, "output tensor of incorrect size");
THArgCheck(THTensor_(size)(t, 2) == dim2, 1, "output tensor of incorrect size");
if (t != result) {
THTensor_(resizeAs)(result, t);
if (beta != 0.0) {
THTensor_(copy)(result, t);
}
}
THTensor *matrix1 = THTensor_(new)();
THTensor *matrix2 = THTensor_(new)();
THTensor *result_matrix = THTensor_(new)();
for (batch = 0; batch < THTensor_(size)(batch1, 0); ++batch) {
THTensor_(select)(matrix1, batch1, 0, batch);
THTensor_(select)(matrix2, batch2, 0, batch);
THTensor_(select)(result_matrix, result, 0, batch);
THTensor_(addmm)(result_matrix, beta, result_matrix, alpha, matrix1, matrix2);
}
THTensor_(free)(matrix1);
THTensor_(free)(matrix2);
THTensor_(free)(result_matrix);
}
ptrdiff_t THTensor_(numel)(THTensor *t)
{
return THTensor_(nElement)(t);
}
void THTensor_(max)(THTensor *values_, THLongTensor *indices_, THTensor *t, int dimension, int keepdim)
{
THLongStorage *dim;
THArgCheck(dimension >= 0 && dimension < THTensor_(nDimension)(t), 2, "dimension %d out of range",
dimension + TH_INDEX_BASE);
dim = THTensor_(newSizeOf)(t);
THLongStorage_set(dim, dimension, 1);
THTensor_(resize)(values_, dim, NULL);
THLongTensor_resize(indices_, dim, NULL);
THLongStorage_free(dim);
// two implementations optimized for data locality
if (t->stride[dimension] == 1) {
real theMax;
real value;
long theIndex;
long i;
TH_TENSOR_DIM_APPLY3(real, t, real, values_, long, indices_, dimension,
theMax = t_data[0];
theIndex = 0;
for(i = 0; i < t_size; i++)
{
value = t_data[i*t_stride];
/* This is not the same as value>theMax in the case of NaNs */
if(!(value <= theMax))
{
theIndex = i;
theMax = value;
th_isnan_break(value)
}
}
*indices__data = theIndex;
*values__data = theMax;);
} else {
if (THTensor_(nDimension)(t) > 1) {
THTensor *t0 = THTensor_(newSelect)(t, dimension, 0);
THTensor_(copy)(values_, t0);
THTensor_(free)(t0);
} else {
THTensor_(fill)(values_, THTensor_(get1d)(t, 0));
}
THLongTensor_zero(indices_);
if(t->size[dimension] == 1) {
if (!keepdim) {
THTensor_(squeeze1d)(values_, values_, dimension);
THLongTensor_squeeze1d(indices_, indices_, dimension);
}
return;
}
THTensor *tempValues_ = THTensor_(newWithTensor)(values_);
// tempValues_.expand_as(t)
tempValues_->size[dimension] = t->size[dimension];
tempValues_->stride[dimension] = 0;
THLongTensor *tempIndices_ = THLongTensor_newWithTensor(indices_);
// tempIndices_.expand_as(t)
tempIndices_->size[dimension] = t->size[dimension];
tempIndices_->stride[dimension] = 0;
TH_TENSOR_APPLY3_D(real, t, real, tempValues_, long, tempIndices_, dimension,
if(!(*t_data <= *tempValues__data) && !th_isnan(*tempValues__data)) {
*tempValues__data = *t_data;
*tempIndices__data = *tempIndices__dimOffset;
});
THTensor_(free)(tempValues_);
THLongTensor_free(tempIndices_);
}
if (!keepdim) {
THTensor_(squeeze1d)(values_, values_, dimension);
THLongTensor_squeeze1d(indices_, indices_, dimension);
}
}
void THTensor_(min)(THTensor *values_, THLongTensor *indices_, THTensor *t, int dimension, int keepdim)
{
THLongStorage *dim;
THArgCheck(dimension >= 0 && dimension < THTensor_(nDimension)(t), 2, "dimension %d out of range",
dimension + TH_INDEX_BASE);
dim = THTensor_(newSizeOf)(t);
THLongStorage_set(dim, dimension, 1);
THTensor_(resize)(values_, dim, NULL);
THLongTensor_resize(indices_, dim, NULL);
THLongStorage_free(dim);
// two implementations optimized for data locality
if (t->stride[dimension] == 1) {
real theMax;
real value;
long theIndex;
long i;
TH_TENSOR_DIM_APPLY3(real, t, real, values_, long, indices_, dimension,
theMax = t_data[0];
theIndex = 0;
for(i = 0; i < t_size; i++)
{
value = t_data[i*t_stride];
/* This is not the same as value>theMax in the case of NaNs */
if(!(value >= theMax))
{
theIndex = i;
theMax = value;
th_isnan_break(value)
}
}
*indices__data = theIndex;
*values__data = theMax;);
} else {
if (THTensor_(nDimension)(t) > 1) {
THTensor *t0 = THTensor_(newSelect)(t, dimension, 0);
THTensor_(copy)(values_, t0);
THTensor_(free)(t0);
} else {
THTensor_(fill)(values_, THTensor_(get1d)(t, 0));
}
THLongTensor_zero(indices_);
if(t->size[dimension] == 1) {
if (!keepdim) {
THTensor_(squeeze1d)(values_, values_, dimension);
THLongTensor_squeeze1d(indices_, indices_, dimension);
}
return;
}
THTensor *tempValues_ = THTensor_(newWithTensor)(values_);
// tempValues_.expand_as(t)
tempValues_->size[dimension] = t->size[dimension];
tempValues_->stride[dimension] = 0;
THLongTensor *tempIndices_ = THLongTensor_newWithTensor(indices_);
// tempIndices_.expand_as(t)
tempIndices_->size[dimension] = t->size[dimension];
tempIndices_->stride[dimension] = 0;
TH_TENSOR_APPLY3_D(real, t, real, tempValues_, long, tempIndices_, dimension,
if(!(*t_data >= *tempValues__data) && !th_isnan(*tempValues__data)) {
*tempValues__data = *t_data;
*tempIndices__data = *tempIndices__dimOffset;
});
}
if (!keepdim) {
THTensor_(squeeze1d)(values_, values_, dimension);
THLongTensor_squeeze1d(indices_, indices_, dimension);
}
}
void THTensor_(sum)(THTensor *r_, THTensor *t, int dimension, int keepdim)
{
THLongStorage *dim;
THArgCheck(dimension >= 0 && dimension < THTensor_(nDimension)(t), 2, "dimension %d out of range",
dimension + TH_INDEX_BASE);
dim = THTensor_(newSizeOf)(t);
THLongStorage_set(dim, dimension, 1);
THTensor_(resize)(r_, dim, NULL);
THLongStorage_free(dim);
// two implementations optimized for data locality
if (t->stride[dimension] == 1) {
TH_TENSOR_DIM_APPLY2(real, t, real, r_, dimension,
accreal sum = 0;
long i;
for(i = 0; i < t_size; i++)
sum += t_data[i*t_stride];
*r__data = (real)sum;);
} else {
THTensor_(zero)(r_);
THTensor *temp_ = THTensor_(newWithTensor)(r_);
// r_.expand_as(t)
temp_->size[dimension] = t->size[dimension];
temp_->stride[dimension] = 0;
TH_TENSOR_APPLY2(real, temp_, real, t, *temp__data = *temp__data + *t_data;);
THTensor_(free)(temp_);
}
if (!keepdim) {
THTensor_(squeeze1d)(r_, r_, dimension);
}
}
void THTensor_(prod)(THTensor *r_, THTensor *t, int dimension, int keepdim)
{
THLongStorage *dim;
THArgCheck(dimension >= 0 && dimension < THTensor_(nDimension)(t), 2, "dimension %d out of range",
dimension + TH_INDEX_BASE);
dim = THTensor_(newSizeOf)(t);
THLongStorage_set(dim, dimension, 1);
THTensor_(resize)(r_, dim, NULL);
THLongStorage_free(dim);
// two implementations optimized for data locality
if (t->stride[dimension] == 1) {
TH_TENSOR_DIM_APPLY2(real, t, real, r_, dimension,
accreal prod = 1;
long i;
for(i = 0; i < t_size; i++)
prod *= t_data[i*t_stride];
*r__data = (real)prod;);
} else {
THTensor_(fill)(r_, 1);
THTensor *temp_ = THTensor_(newWithTensor)(r_);
// r_.expand_as(t)
temp_->size[dimension] = t->size[dimension];
temp_->stride[dimension] = 0;
TH_TENSOR_APPLY2(real, temp_, real, t, *temp__data = *temp__data * *t_data;);
THTensor_(free)(temp_);
}
if (!keepdim) {
THTensor_(squeeze1d)(r_, r_, dimension);
}
}
void THTensor_(cumsum)(THTensor *r_, THTensor *t, int dimension)
{
THArgCheck(dimension >= 0 && dimension < THTensor_(nDimension)(t), 2, "dimension %d out of range",
dimension + TH_INDEX_BASE);
THTensor_(resizeAs)(r_, t);
TH_TENSOR_DIM_APPLY2(real, t, real, r_, dimension,
accreal cumsum = 0;
long i;
for(i = 0; i < t_size; i++)
{
cumsum += t_data[i*t_stride];
r__data[i*r__stride] = (real)cumsum;
});
}
void THTensor_(cumprod)(THTensor *r_, THTensor *t, int dimension)
{
THArgCheck(dimension >= 0 && dimension < THTensor_(nDimension)(t), 2, "dimension %d out of range",
dimension + TH_INDEX_BASE);
THTensor_(resizeAs)(r_, t);
TH_TENSOR_DIM_APPLY2(real, t, real, r_, dimension,
accreal cumprod = 1;
long i;
for(i = 0; i < t_size; i++)
{
cumprod *= t_data[i*t_stride];
r__data[i*r__stride] = (real)cumprod;
});
}
void THTensor_(sign)(THTensor *r_, THTensor *t)
{
THTensor_(resizeAs)(r_, t);
#if defined (TH_REAL_IS_BYTE)
TH_TENSOR_APPLY2(real, r_, real, t,
if (*t_data > 0) *r__data = 1;
else *r__data = 0;);
#else
TH_TENSOR_APPLY2(real, r_, real, t,
if (*t_data > 0) *r__data = 1;
else if (*t_data < 0) *r__data = -1;
else *r__data = 0;);
#endif
}
accreal THTensor_(trace)(THTensor *t)
{
real *t_data = THTensor_(data)(t);
accreal sum = 0;
long i = 0;
long t_stride_0, t_stride_1, t_diag_size;
THArgCheck(THTensor_(nDimension)(t) == 2, 1, "expected a matrix");
t_stride_0 = THTensor_(stride)(t, 0);
t_stride_1 = THTensor_(stride)(t, 1);
t_diag_size = THMin(THTensor_(size)(t, 0), THTensor_(size)(t, 1));
while(i < t_diag_size)
{
sum += t_data[i*(t_stride_0+t_stride_1)];
i++;
}
return sum;
}
void THTensor_(cross)(THTensor *r_, THTensor *a, THTensor *b, int dimension)
{
int i;
if(THTensor_(nDimension)(a) != THTensor_(nDimension)(b))
THError("inconsistent tensor dimension %dD, %dD",
THTensor_(nDimension)(a), THTensor_(nDimension)(b));
for(i = 0; i < THTensor_(nDimension)(a); i++)
{
if(THTensor_(size)(a, i) != THTensor_(size)(b, i)) {
THDescBuff ba = THTensor_(sizeDesc)(a);
THDescBuff bb = THTensor_(sizeDesc)(b);
THError("inconsistent tensor sizes %s, %s", ba.str, bb.str);
}
}
if(dimension < 0)
{
for(i = 0; i < THTensor_(nDimension)(a); i++)
{
if(THTensor_(size)(a, i) == 3)
{
dimension = i;
break;
}
}
if(dimension < 0) {
THDescBuff ba = THTensor_(sizeDesc)(a);
THError("no dimension of size 3 in a: %s", ba.str);
}
}
THArgCheck(dimension >= 0 && dimension < THTensor_(nDimension)(a), 3, "dimension %d out of range",
dimension + TH_INDEX_BASE);
THArgCheck(THTensor_(size)(a, dimension) == 3, 3, "dimension %d does not have size 3",
dimension + TH_INDEX_BASE);
THTensor_(resizeAs)(r_, a);
TH_TENSOR_DIM_APPLY3(real, a, real, b, real, r_, dimension,
r__data[0*r__stride] = a_data[1*a_stride]*b_data[2*b_stride] - a_data[2*a_stride]*b_data[1*b_stride];
r__data[1*r__stride] = a_data[2*a_stride]*b_data[0*b_stride] - a_data[0*a_stride]*b_data[2*b_stride];
r__data[2*r__stride] = a_data[0*a_stride]*b_data[1*b_stride] - a_data[1*a_stride]*b_data[0*b_stride];);
}
void THTensor_(cmax)(THTensor *r, THTensor *t, THTensor *src) {
THTensor_(resizeAs)(r, t);
TH_TENSOR_APPLY3(real, r, real, t, real, src,
*r_data = *t_data > *src_data ? *t_data : *src_data;);
}
void THTensor_(cmin)(THTensor *r, THTensor *t, THTensor *src) {
THTensor_(resizeAs)(r, t);
TH_TENSOR_APPLY3(real, r, real, t, real, src,
*r_data = *t_data < *src_data ? *t_data : *src_data;);
}
void THTensor_(cmaxValue)(THTensor *r, THTensor *t, real value) {
THTensor_(resizeAs)(r, t);
TH_TENSOR_APPLY2(real, r, real, t,
*r_data = *t_data > value ? *t_data : value;);
}
void THTensor_(cminValue)(THTensor *r, THTensor *t, real value) {
THTensor_(resizeAs)(r, t);
TH_TENSOR_APPLY2(real, r, real, t,
*r_data = *t_data < value ? *t_data : value;);
}
void THTensor_(zeros)(THTensor *r_, THLongStorage *size)
{
THTensor_(resize)(r_, size, NULL);
THTensor_(zero)(r_);
}
void THTensor_(zerosLike)(THTensor *r_, THTensor *input)
{
THTensor_(resizeAs)(r_, input);
THTensor_(zero)(r_);
}
void THTensor_(onesLike)(THTensor *r_, THTensor *input)
{
THTensor_(resizeAs)(r_, input);
THTensor_(fill)(r_, 1);
}
void THTensor_(ones)(THTensor *r_, THLongStorage *size)
{
THTensor_(resize)(r_, size, NULL);
THTensor_(fill)(r_, 1);
}
void THTensor_(diag)(THTensor *r_, THTensor *t, int k)
{
THArgCheck(THTensor_(nDimension)(t) == 1 || THTensor_(nDimension)(t) == 2, 1, "matrix or a vector expected");
if(THTensor_(nDimension)(t) == 1)
{
real *t_data = THTensor_(data)(t);
long t_stride_0 = THTensor_(stride)(t, 0);
long t_size = THTensor_(size)(t, 0);
long sz = t_size + (k >= 0 ? k : -k);
real *r__data;
long r__stride_0;
long r__stride_1;
long i;
THTensor_(resize2d)(r_, sz, sz);
THTensor_(zero)(r_);
r__data = THTensor_(data)(r_);
r__stride_0 = THTensor_(stride)(r_, 0);
r__stride_1 = THTensor_(stride)(r_, 1);
r__data += (k >= 0 ? k*r__stride_1 : -k*r__stride_0);
for(i = 0; i < t_size; i++)
r__data[i*(r__stride_0+r__stride_1)] = t_data[i*t_stride_0];
}
else
{
real *t_data = THTensor_(data)(t);
long t_stride_0 = THTensor_(stride)(t, 0);
long t_stride_1 = THTensor_(stride)(t, 1);
long sz;
real *r__data;
long r__stride_0;
long i;
if(k >= 0)
sz = THMin(THTensor_(size)(t, 0), THTensor_(size)(t, 1)-k);
else
sz = THMin(THTensor_(size)(t, 0)+k, THTensor_(size)(t, 1));
THTensor_(resize1d)(r_, sz);
r__data = THTensor_(data)(r_);
r__stride_0 = THTensor_(stride)(r_, 0);
t_data += (k >= 0 ? k*t_stride_1 : -k*t_stride_0);
for(i = 0; i < sz; i++)
r__data[i*r__stride_0] = t_data[i*(t_stride_0+t_stride_1)];
}
}
void THTensor_(eye)(THTensor *r_, long n, long m)
{
real *r__data;
long i, sz;
THArgCheck(n > 0, 1, "invalid argument");
if(m <= 0)
m = n;
THTensor_(resize2d)(r_, n, m);
THTensor_(zero)(r_);
i = 0;
r__data = THTensor_(data)(r_);
sz = THMin(THTensor_(size)(r_, 0), THTensor_(size)(r_, 1));
for(i = 0; i < sz; i++)
r__data[i*(r_->stride[0]+r_->stride[1])] = 1;
}
void THTensor_(range)(THTensor *r_, accreal xmin, accreal xmax, accreal step)
{
ptrdiff_t size;
real i = 0;
THArgCheck(step > 0 || step < 0, 3, "step must be a non-null number");
THArgCheck(((step > 0) && (xmax >= xmin)) || ((step < 0) && (xmax <= xmin))
, 2, "upper bound and larger bound incoherent with step sign");
size = (ptrdiff_t) (((xmax - xmin) / step) + 1);
if (THTensor_(nElement)(r_) != size) {
THTensor_(resize1d)(r_, size);
}
TH_TENSOR_APPLY(real, r_, *r__data = xmin + (i++)*step;);
}
void THTensor_(arange)(THTensor *r_, accreal xmin, accreal xmax, accreal step) {
#if defined(TH_REAL_IS_FLOAT) || defined(TH_REAL_IS_DOUBLE)
int m = fmod(xmax - xmin,step) == 0;
#else
int m = (xmax - xmin) % step == 0;
#endif
if (m)
xmax -= step;
THTensor_(range)(r_,xmin,xmax,step);
}
void THTensor_(randperm)(THTensor *r_, THGenerator *_generator, long n)
{
real *r__data;
long r__stride_0;
long i;
THArgCheck(n > 0, 1, "must be strictly positive");
THTensor_(resize1d)(r_, n);
r__data = THTensor_(data)(r_);
r__stride_0 = THTensor_(stride)(r_,0);
for(i = 0; i < n; i++)
r__data[i*r__stride_0] = (real)(i);
for(i = 0; i < n-1; i++)
{
long z = THRandom_random(_generator) % (n-i);
real sav = r__data[i*r__stride_0];
r__data[i*r__stride_0] = r__data[(z+i)*r__stride_0];
r__data[(z+i)*r__stride_0] = sav;
}
}
void THTensor_(reshape)(THTensor *r_, THTensor *t, THLongStorage *size)
{
THTensor_(resize)(r_, size, NULL);
THTensor_(copy)(r_, t);
}
/* I cut and pasted (slightly adapted) the quicksort code from
Sedgewick's 1978 "Implementing Quicksort Programs" article
http://www.csie.ntu.edu.tw/~b93076/p847-sedgewick.pdf
It is the state of the art existing implementation. The macros
are here to make as close a match as possible to the pseudocode of
Program 2 p.851
Note that other partition schemes exist, and are typically presented
in textbook, but those are less efficient. See e.g.
http://cs.stackexchange.com/questions/11458/quicksort-partitioning-hoare-vs-lomuto
Julien, November 12th 2013
*/
#define MAX_LEVELS 300
#define M_SMALL 10 /* Limit for small subfiles */
#define ARR(III) arr[(III)*stride]
#define IDX(III) idx[(III)*stride]
#define LONG_SWAP(AAA, BBB) swap = AAA; AAA = BBB; BBB = swap
#define REAL_SWAP(AAA, BBB) rswap = AAA; AAA = BBB; BBB = rswap
#define ARR_SWAP(III, JJJ) \
REAL_SWAP(ARR(III), ARR(JJJ));
#define BOTH_SWAP(III, JJJ) \
REAL_SWAP(ARR(III), ARR(JJJ)); \
LONG_SWAP(IDX(III), IDX(JJJ))
static void THTensor_(quicksortascend)(real *arr, long *idx, long elements, long stride)
{
long beg[MAX_LEVELS], end[MAX_LEVELS], i, j, L, R, P, swap, pid, stack = 0, sz_right, sz_left;
real rswap, piv;
unsigned char done = 0;
/* beg[0]=0; end[0]=elements; */
stack = 0;
L = 0; R = elements-1;
done = elements-1 <= M_SMALL;
while(!done) {
/* Use median of three for pivot choice */
P=(L+R)>>1;
BOTH_SWAP(P, L+1);
if (ARR(L+1) > ARR(R)) { BOTH_SWAP(L+1, R); }
if (ARR(L) > ARR(R)) { BOTH_SWAP(L, R); }
if (ARR(L+1) > ARR(L)) { BOTH_SWAP(L+1, L); }
i = L+1; j = R; piv = ARR(L); pid = IDX(L);
do {
do { i = i+1; } while(ARR(i) < piv);
do { j = j-1; } while(ARR(j) > piv);
if (j < i)
break;
BOTH_SWAP(i, j);
} while(1);
BOTH_SWAP(L, j);
/* Left subfile is (L, j-1) */
/* Right subfile is (i, R) */
sz_left = j-L;
sz_right = R-i+1;
if (sz_left <= M_SMALL && sz_right <= M_SMALL) {
/* both subfiles are small */
/* if stack empty */
if (stack == 0) {
done = 1;
} else {
stack--;
L = beg[stack];
R = end[stack];
}
} else if (sz_left <= M_SMALL || sz_right <= M_SMALL) {
/* exactly one of the subfiles is small */
/* (L,R) = large subfile */
if (sz_left > sz_right) {
/* Implicit: L = L; */
R = j-1;
} else {
L = i;
/* Implicit: R = R; */
}
} else {
/* none of the subfiles is small */
/* push large subfile */
/* (L,R) = small subfile */
if (sz_left > sz_right) {
beg[stack] = L;
end[stack] = j-1;
stack++;
L = i;
/* Implicit: R = R */
} else {
beg[stack] = i;
end[stack] = R;
stack++;
/* Implicit: L = L; */
R = j-1;
}
}
} /* while not done */
/* Now insertion sort on the concatenation of subfiles */
for(i=elements-2; i>=0; i--) {
if (ARR(i) > ARR(i+1)) {
piv = ARR(i);
pid = IDX(i);
j = i+1;
do {
ARR(j-1) = ARR(j);
IDX(j-1) = IDX(j);
j = j+1;
} while(j < elements && ARR(j) < piv);
ARR(j-1) = piv;
IDX(j-1) = pid;
}
}
}
static void THTensor_(quicksortdescend)(real *arr, long *idx, long elements, long stride)
{
long beg[MAX_LEVELS], end[MAX_LEVELS], i, j, L, R, P, swap, pid, stack = 0, sz_right, sz_left;
real rswap, piv;
unsigned char done = 0;
/* beg[0]=0; end[0]=elements; */
stack = 0;
L = 0; R = elements-1;
done = elements-1 <= M_SMALL;
while(!done) {
/* Use median of three for pivot choice */
P=(L+R)>>1;
BOTH_SWAP(P, L+1);
if (ARR(L+1) < ARR(R)) { BOTH_SWAP(L+1, R); }
if (ARR(L) < ARR(R)) { BOTH_SWAP(L, R); }
if (ARR(L+1) < ARR(L)) { BOTH_SWAP(L+1, L); }
i = L+1; j = R; piv = ARR(L); pid = IDX(L);
do {
do { i = i+1; } while(ARR(i) > piv);
do { j = j-1; } while(ARR(j) < piv);
if (j < i)
break;
BOTH_SWAP(i, j);
} while(1);
BOTH_SWAP(L, j);
/* Left subfile is (L, j-1) */
/* Right subfile is (i, R) */
sz_left = j-L;
sz_right = R-i+1;
if (sz_left <= M_SMALL && sz_right <= M_SMALL) {
/* both subfiles are small */
/* if stack empty */
if (stack == 0) {
done = 1;
} else {
stack--;
L = beg[stack];
R = end[stack];
}
} else if (sz_left <= M_SMALL || sz_right <= M_SMALL) {
/* exactly one of the subfiles is small */
/* (L,R) = large subfile */
if (sz_left > sz_right) {
/* Implicit: L = L; */
R = j-1;
} else {
L = i;
/* Implicit: R = R; */
}
} else {
/* none of the subfiles is small */
/* push large subfile */
/* (L,R) = small subfile */
if (sz_left > sz_right) {
beg[stack] = L;
end[stack] = j-1;
stack++;
L = i;
/* Implicit: R = R */
} else {
beg[stack] = i;
end[stack] = R;
stack++;
/* Implicit: L = L; */
R = j-1;
}
}
} /* while not done */
/* Now insertion sort on the concatenation of subfiles */
for(i=elements-2; i>=0; i--) {
if (ARR(i) < ARR(i+1)) {
piv = ARR(i);
pid = IDX(i);
j = i+1;
do {
ARR(j-1) = ARR(j);
IDX(j-1) = IDX(j);
j = j+1;
} while(j < elements && ARR(j) > piv);
ARR(j-1) = piv;
IDX(j-1) = pid;
}
}
}
#undef MAX_LEVELS
#undef M_SMALL
void THTensor_(sort)(THTensor *rt_, THLongTensor *ri_, THTensor *t, int dimension, int descendingOrder)
{
THArgCheck(dimension >= 0 && dimension < THTensor_(nDimension)(t), 2, "invalid dimension %d",
dimension + TH_INDEX_BASE);
THTensor_(resizeAs)(rt_, t);
THTensor_(copy)(rt_, t);
{
THLongStorage *size = THTensor_(newSizeOf)(t);
THLongTensor_resize(ri_, size, NULL);
THLongStorage_free(size);
}
if(descendingOrder)
{
TH_TENSOR_DIM_APPLY2(real, rt_, long, ri_, dimension,
long i;
for(i = 0; i < ri__size; i++)
ri__data[i*ri__stride] = i;
THTensor_(quicksortdescend)(rt__data, ri__data, rt__size, rt__stride);)
}
else
{
TH_TENSOR_DIM_APPLY2(real, rt_, long, ri_, dimension,
long i;
for(i = 0; i < ri__size; i++)
ri__data[i*ri__stride] = i;
THTensor_(quicksortascend)(rt__data, ri__data, rt__size, rt__stride);)
}
}
/* Implementation of the Quickselect algorithm, based on Nicolas Devillard's
public domain implementation at http://ndevilla.free.fr/median/median/
Adapted similarly to the above Quicksort algorithm.
This version does not produce indices along with values. */
static void THTensor_(quickselectnoidx)(real *arr, long k, long elements, long stride)
{
long P, L, R, i, j, swap;
real rswap, piv;
L = 0;
R = elements-1;
do {
if (R <= L) /* One element only */
return;
if (R == L+1) { /* Two elements only */
if (ARR(L) > ARR(R)) {
ARR_SWAP(L, R);
}
return;
}
/* Use median of three for pivot choice */
P=(L+R)>>1;
ARR_SWAP(P, L+1);
if (ARR(L+1) > ARR(R)) { ARR_SWAP(L+1, R); }
if (ARR(L) > ARR(R)) { ARR_SWAP(L, R); }
if (ARR(L+1) > ARR(L)) { ARR_SWAP(L+1, L); }
i = L+1;
j = R;
piv = ARR(L);
do {
do i++; while(ARR(i) < piv);
do j--; while(ARR(j) > piv);
if (j < i)
break;
ARR_SWAP(i, j);
} while(1);
ARR_SWAP(L, j);
/* Re-set active partition */
if (j <= k) L=i;
if (j >= k) R=j-1;
} while(1);
}
/* Implementation of the Quickselect algorithm, based on Nicolas Devillard's
public domain implementation at http://ndevilla.free.fr/median/median/
Adapted similarly to the above Quicksort algorithm. */
static void THTensor_(quickselect)(real *arr, long *idx, long k, long elements, long stride)
{
long P, L, R, i, j, swap, pid;
real rswap, piv;
L = 0;
R = elements-1;
do {
if (R <= L) /* One element only */
return;
if (R == L+1) { /* Two elements only */
if (ARR(L) > ARR(R)) {
BOTH_SWAP(L, R);
}
return;
}
/* Use median of three for pivot choice */
P=(L+R)>>1;
BOTH_SWAP(P, L+1);
if (ARR(L+1) > ARR(R)) { BOTH_SWAP(L+1, R); }
if (ARR(L) > ARR(R)) { BOTH_SWAP(L, R); }
if (ARR(L+1) > ARR(L)) { BOTH_SWAP(L+1, L); }
i = L+1;
j = R;
piv = ARR(L);
pid = IDX(L);
do {
do i++; while(ARR(i) < piv);
do j--; while(ARR(j) > piv);
if (j < i)
break;
BOTH_SWAP(i, j);
} while(1);
BOTH_SWAP(L, j);
/* Re-set active partition */
if (j <= k) L=i;
if (j >= k) R=j-1;
} while(1);
}
#undef ARR
#undef IDX
#undef LONG_SWAP
#undef REAL_SWAP
#undef BOTH_SWAP
void THTensor_(mode)(THTensor *values_, THLongTensor *indices_, THTensor *t, int dimension, int keepdim)
{
THLongStorage *dim;
THTensor *temp_;
THLongTensor *tempi_;
real *temp__data;
long *tempi__data;
long t_size_dim;
THArgCheck(dimension >= 0 && dimension < THTensor_(nDimension)(t), 3, "dimension out of range");
dim = THTensor_(newSizeOf)(t);
THLongStorage_set(dim, dimension, 1);
THTensor_(resize)(values_, dim, NULL);
THLongTensor_resize(indices_, dim, NULL);
THLongStorage_free(dim);
t_size_dim = THTensor_(size)(t, dimension);
temp_ = THTensor_(new)();
THTensor_(resize1d)(temp_, t_size_dim);
temp__data = THTensor_(data)(temp_);
tempi_ = THLongTensor_new();
THLongTensor_resize1d(tempi_, t_size_dim);
tempi__data = THLongTensor_data(tempi_);
TH_TENSOR_DIM_APPLY3(real, t, real, values_, long, indices_, dimension,
long i;
real mode = 0;
long modei = 0;
long temp_freq = 0;
long max_freq = 0;
for(i = 0; i < t_size_dim; i++)
temp__data[i] = t_data[i*t_stride];
for(i = 0; i < t_size_dim; i++)
tempi__data[i] = i;
THTensor_(quicksortascend)(temp__data, tempi__data, t_size_dim, 1);
for(i = 0; i < t_size_dim; i++)
{
temp_freq++;
if ((i == t_size_dim - 1) || (temp__data[i] != temp__data[i+1]))
{
if (temp_freq > max_freq)
{
mode = temp__data[i];
modei = tempi__data[i];
max_freq = temp_freq;
}
temp_freq = 0;
}
}
*values__data = mode;
*indices__data = modei;);
THTensor_(free)(temp_);
THLongTensor_free(tempi_);
if (!keepdim) {
THTensor_(squeeze1d)(values_, values_, dimension);
THLongTensor_squeeze1d(indices_, indices_, dimension);
}
}
void THTensor_(kthvalue)(THTensor *values_, THLongTensor *indices_, THTensor *t, long k, int dimension, int keepdim)
{
THLongStorage *dim;
THTensor *temp_;
THLongTensor *tempi_;
real *temp__data;
long *tempi__data;
long t_size_dim;
THArgCheck(dimension >= 0 && dimension < THTensor_(nDimension)(t), 3, "dimension out of range");
THArgCheck(k > 0 && k <= t->size[dimension], 2, "selected index out of range");
dim = THTensor_(newSizeOf)(t);
THLongStorage_set(dim, dimension, 1);
THTensor_(resize)(values_, dim, NULL);
THLongTensor_resize(indices_, dim, NULL);
THLongStorage_free(dim);
t_size_dim = THTensor_(size)(t, dimension);
temp_ = THTensor_(new)();
THTensor_(resize1d)(temp_, t_size_dim);
temp__data = THTensor_(data)(temp_);
tempi_ = THLongTensor_new();
THLongTensor_resize1d(tempi_, t_size_dim);
tempi__data = THLongTensor_data(tempi_);
TH_TENSOR_DIM_APPLY3(real, t, real, values_, long, indices_, dimension,
long i;
for(i = 0; i < t_size_dim; i++)
temp__data[i] = t_data[i*t_stride];
for(i = 0; i < t_size_dim; i++)
tempi__data[i] = i;
THTensor_(quickselect)(temp__data, tempi__data, k - 1, t_size_dim, 1);
*values__data = temp__data[k-1];
*indices__data = tempi__data[k-1];);
THTensor_(free)(temp_);
THLongTensor_free(tempi_);
if (!keepdim) {
THTensor_(squeeze1d)(values_, values_, dimension);
THLongTensor_squeeze1d(indices_, indices_, dimension);
}
}
void THTensor_(median)(THTensor *values_, THLongTensor *indices_, THTensor *t, int dimension, int keepdim)
{
long t_size_dim, k;
THArgCheck(dimension >= 0 && dimension < THTensor_(nDimension)(t), 3, "dimension out of range");
t_size_dim = THTensor_(size)(t, dimension);
k = (t_size_dim-1) >> 1; /* take middle or one-before-middle element */
THTensor_(kthvalue)(values_, indices_, t, k+1, dimension, keepdim);
}
void THTensor_(topk)(THTensor *rt_, THLongTensor *ri_, THTensor *t, long k, int dim, int dir, int sorted)
{
int numDims = THTensor_(nDimension)(t);
THArgCheck(dim >= 0 && dim < numDims, 3, "dim not in range");
long sliceSize = THTensor_(size)(t, dim);
THArgCheck(k > 0 && k <= sliceSize, 2, "k not in range for dimension");
THTensor *tmpResults = THTensor_(new)();
THTensor_(resize1d)(tmpResults, sliceSize);
real *tmp__data = THTensor_(data)(tmpResults);
THLongTensor *tmpIndices = THLongTensor_new();
THLongTensor_resize1d(tmpIndices, sliceSize);
long *tmpi__data = THLongTensor_data(tmpIndices);
THLongStorage *topKSize = THTensor_(newSizeOf)(t);
THLongStorage_set(topKSize, dim, k);
THTensor_(resize)(rt_, topKSize, NULL);
THLongTensor_resize(ri_, topKSize, NULL);
THLongStorage_free(topKSize);
if (dir) {
/* k largest elements, descending order (optional: see sorted) */
long K = sliceSize - k;
TH_TENSOR_DIM_APPLY3(real, t, real, rt_, long, ri_, dim,
long i;
for(i = 0; i < sliceSize; i++)
{
tmp__data[i] = t_data[i*t_stride];
tmpi__data[i] = i;
}
if (K > 0)
THTensor_(quickselect)(tmp__data, tmpi__data, K - 1, sliceSize, 1);
if (sorted)
THTensor_(quicksortdescend)(tmp__data + K, tmpi__data + K, k, 1);
for(i = 0; i < k; i++)
{
rt__data[i*rt__stride] = tmp__data[i + K];
ri__data[i*ri__stride] = tmpi__data[i + K];
})
}
else {
/* k smallest elements, ascending order (optional: see sorted) */
TH_TENSOR_DIM_APPLY3(real, t, real, rt_, long, ri_, dim,
long i;
for(i = 0; i < sliceSize; i++)
{
tmp__data[i] = t_data[i*t_stride];
tmpi__data[i] = i;
}
THTensor_(quickselect)(tmp__data, tmpi__data, k - 1, sliceSize, 1);
if (sorted)
THTensor_(quicksortascend)(tmp__data, tmpi__data, k - 1, 1);
for(i = 0; i < k; i++)
{
rt__data[i*rt__stride] = tmp__data[i];
ri__data[i*ri__stride] = tmpi__data[i];
})
}
THTensor_(free)(tmpResults);
THLongTensor_free(tmpIndices);
}
void THTensor_(tril)(THTensor *r_, THTensor *t, long k)
{
long t_size_0, t_size_1;
long t_stride_0, t_stride_1;
long r__stride_0, r__stride_1;
real *t_data, *r__data;
long r, c;
THArgCheck(THTensor_(nDimension)(t) == 2, 1, "expected a matrix");
THTensor_(resizeAs)(r_, t);
t_size_0 = THTensor_(size)(t, 0);
t_size_1 = THTensor_(size)(t, 1);
t_stride_0 = THTensor_(stride)(t, 0);
t_stride_1 = THTensor_(stride)(t, 1);
r__stride_0 = THTensor_(stride)(r_, 0);
r__stride_1 = THTensor_(stride)(r_, 1);
r__data = THTensor_(data)(r_);
t_data = THTensor_(data)(t);
for(r = 0; r < t_size_0; r++)
{
long sz = THMin(r+k+1, t_size_1);
for(c = THMax(0, r+k+1); c < t_size_1; c++)
r__data[r*r__stride_0+c*r__stride_1] = 0;
for(c = 0; c < sz; c++)
r__data[r*r__stride_0+c*r__stride_1] = t_data[r*t_stride_0+c*t_stride_1];
}
}
void THTensor_(triu)(THTensor *r_, THTensor *t, long k)
{
long t_size_0, t_size_1;
long t_stride_0, t_stride_1;
long r__stride_0, r__stride_1;
real *t_data, *r__data;
long r, c;
THArgCheck(THTensor_(nDimension)(t) == 2, 1, "expected a matrix");
THTensor_(resizeAs)(r_, t);
t_size_0 = THTensor_(size)(t, 0);
t_size_1 = THTensor_(size)(t, 1);
t_stride_0 = THTensor_(stride)(t, 0);
t_stride_1 = THTensor_(stride)(t, 1);
r__stride_0 = THTensor_(stride)(r_, 0);
r__stride_1 = THTensor_(stride)(r_, 1);
r__data = THTensor_(data)(r_);
t_data = THTensor_(data)(t);
for(r = 0; r < t_size_0; r++)
{
long sz = THMin(r+k, t_size_1);
for(c = THMax(0, r+k); c < t_size_1; c++)
r__data[r*r__stride_0+c*r__stride_1] = t_data[r*t_stride_0+c*t_stride_1];
for(c = 0; c < sz; c++)
r__data[r*r__stride_0+c*r__stride_1] = 0;
}
}
void THTensor_(cat)(THTensor *r_, THTensor *ta, THTensor *tb, int dimension)
{
THTensor* inputs[2];
inputs[0] = ta;
inputs[1] = tb;
THTensor_(catArray)(r_, inputs, 2, dimension);
}
void THTensor_(catArray)(THTensor *result, THTensor **inputs, int numInputs, int dimension)
{
THLongStorage *size;
int i, j;
long offset;
int maxDim = dimension + 1;
int allEmpty = 1;
int allContiguous = 1;
// cat_dimension is the actual dimension we cat along
int cat_dimension = dimension;
for (i = 0; i < numInputs; i++)
{
maxDim = THMax(maxDim, inputs[i]->nDimension);
}
// When the user input dimension is -1 (i.e. -2 in C)
// Then we pick the maximum last dimension across all tensors.
if ( dimension + TH_INDEX_BASE == -1 )
{
cat_dimension = maxDim?(maxDim-1):0;
}
THArgCheck(numInputs > 0, 3, "invalid number of inputs %d", numInputs);
THArgCheck(cat_dimension >= 0, 4, "invalid dimension %d", dimension + TH_INDEX_BASE);
size = THLongStorage_newWithSize(maxDim);
for(i = 0; i < maxDim; i++)
{
// dimSize is either the size of the dim if it exists, either 1 if #dim > 0, otherwise 0
long dimSize = i < inputs[0]->nDimension ? inputs[0]->size[i] : THMin(inputs[0]->nDimension, 1);
if (i == cat_dimension)
{
for (j = 1; j < numInputs; j++)
{
// accumulate the size over the dimension we want to cat on.
// Empty tensors are allowed
dimSize += i < inputs[j]->nDimension ? inputs[j]->size[i] : THMin(inputs[j]->nDimension, 1);
}
}
else
{
for (j = 1; j < numInputs; j++)
{
long sz = (i < inputs[j]->nDimension ? inputs[j]->size[i] : THMin(inputs[j]->nDimension, 1));
// If it's a dimension we're not catting on
// Then fail if sizes are different AND > 0
if (dimSize != sz && dimSize && sz)
{
THLongStorage_free(size);
THError("inconsistent tensor sizes");
}
else if(!dimSize)
{
dimSize = sz;
}
}
}
allEmpty = allEmpty && !dimSize;
size->data[i] = dimSize;
}
// Initiate catting and resizing
// If at least one of the input is not empty
if (!allEmpty)
{
THTensor_(resize)(result, size, NULL);
// Check contiguity of all inputs and result
for (i = 0; i < numInputs; i++) {
if(inputs[i]->nDimension) {
allContiguous = allContiguous && THTensor_(isContiguous)(inputs[i]);
}
}
allContiguous = allContiguous && THTensor_(isContiguous)(result);
// First path is for contiguous inputs along dim 1
// Second path for non-contiguous
if (cat_dimension == 0 && allContiguous)
{
real* result_data = result->storage->data + result->storageOffset;
offset = 0;
for (j = 0; j < numInputs; j++)
{
if (inputs[j]->nDimension)
{
THTensor* input0 = inputs[j];
real* input0_data = input0->storage->data + input0->storageOffset;
long input0_size = THTensor_(nElement)(input0);
memcpy(result_data + offset, input0_data, input0_size*sizeof(real));
offset += input0_size;
}
}
}
else
{
offset = 0;
for (j = 0; j < numInputs; j++)
{
if (inputs[j]->nDimension)
{
long dimSize = cat_dimension < inputs[j]->nDimension ? inputs[j]->size[cat_dimension] : 1;
THTensor *nt = THTensor_(newWithTensor)(result);
THTensor_(narrow)(nt, NULL, cat_dimension, offset, dimSize);
THTensor_(copy)(nt, inputs[j]);
THTensor_(free)(nt);
offset += dimSize;
}
}
}
}
THLongStorage_free(size);
}
int THTensor_(equal)(THTensor *ta, THTensor* tb)
{
int equal = 1;
if(!THTensor_(isSameSizeAs)(ta, tb))
return 0;
if (THTensor_(isContiguous)(ta) && THTensor_(isContiguous)(tb)) {
real *tap = THTensor_(data)(ta);
real *tbp = THTensor_(data)(tb);
ptrdiff_t sz = THTensor_(nElement)(ta);
ptrdiff_t i;
for (i=0; i<sz; ++i){
if(tap[i] != tbp[i]) return 0;
}
} else {
// Short-circuit the apply function on inequality
TH_TENSOR_APPLY2(real, ta, real, tb,
if (equal && *ta_data != *tb_data) {
equal = 0;
TH_TENSOR_APPLY_hasFinished = 1; break;
})
}
return equal;
}
#define TENSOR_IMPLEMENT_LOGICAL(NAME,OP) \
void THTensor_(NAME##Value)(THByteTensor *r_, THTensor* t, real value) \
{ \
THByteTensor_resizeNd(r_, t->nDimension, t->size, NULL); \
TH_TENSOR_APPLY2(unsigned char, r_, real, t, \
*r__data = (*t_data OP value) ? 1 : 0;); \
} \
void THTensor_(NAME##ValueT)(THTensor* r_, THTensor* t, real value) \
{ \
THTensor_(resizeNd)(r_, t->nDimension, t->size, NULL); \
TH_TENSOR_APPLY2(real, r_, real, t, \
*r__data = (*t_data OP value) ? 1 : 0;); \
} \
void THTensor_(NAME##Tensor)(THByteTensor *r_, THTensor *ta, THTensor *tb) \
{ \
THByteTensor_resizeNd(r_, ta->nDimension, ta->size, NULL); \
TH_TENSOR_APPLY3(unsigned char, r_, real, ta, real, tb, \
*r__data = (*ta_data OP *tb_data) ? 1 : 0;); \
} \
void THTensor_(NAME##TensorT)(THTensor *r_, THTensor *ta, THTensor *tb) \
{ \
THTensor_(resizeNd)(r_, ta->nDimension, ta->size, NULL); \
TH_TENSOR_APPLY3(real, r_, real, ta, real, tb, \
*r__data = (*ta_data OP *tb_data) ? 1 : 0;); \
} \
TENSOR_IMPLEMENT_LOGICAL(lt,<)
TENSOR_IMPLEMENT_LOGICAL(gt,>)
TENSOR_IMPLEMENT_LOGICAL(le,<=)
TENSOR_IMPLEMENT_LOGICAL(ge,>=)
TENSOR_IMPLEMENT_LOGICAL(eq,==)
TENSOR_IMPLEMENT_LOGICAL(ne,!=)
#define LAB_IMPLEMENT_BASIC_FUNCTION(NAME, CFUNC) \
void THTensor_(NAME)(THTensor *r_, THTensor *t) \
{ \
THTensor_(resizeAs)(r_, t); \
TH_TENSOR_APPLY2(real, t, real, r_, *r__data = CFUNC(*t_data);); \
} \
#if defined(TH_REAL_IS_LONG)
LAB_IMPLEMENT_BASIC_FUNCTION(abs,labs)
LAB_IMPLEMENT_BASIC_FUNCTION(neg,-)
#endif /* long only part */
#if defined(TH_REAL_IS_SHORT) || defined(TH_REAL_IS_INT)
LAB_IMPLEMENT_BASIC_FUNCTION(abs,abs)
LAB_IMPLEMENT_BASIC_FUNCTION(neg,-)
#endif /* int only part */
#if defined(TH_REAL_IS_BYTE)
#define TENSOR_IMPLEMENT_LOGICAL_SUM(NAME, OP, INIT_VALUE) \
int THTensor_(NAME)(THTensor *tensor) \
{ \
THArgCheck(tensor->nDimension > 0, 1, "empty Tensor"); \
int sum = INIT_VALUE; \
TH_TENSOR_APPLY(real, tensor, sum = sum OP *tensor_data;); \
return sum; \
}
TENSOR_IMPLEMENT_LOGICAL_SUM(logicalall, &&, 1)
TENSOR_IMPLEMENT_LOGICAL_SUM(logicalany, ||, 0)
#endif /* Byte only part */
/* floating point only now */
#if defined(TH_REAL_IS_FLOAT) || defined(TH_REAL_IS_DOUBLE)
#if defined (TH_REAL_IS_FLOAT)
#define TH_MATH_NAME(fn) fn##f
#else
#define TH_MATH_NAME(fn) fn
#endif
LAB_IMPLEMENT_BASIC_FUNCTION(log,TH_MATH_NAME(log))
LAB_IMPLEMENT_BASIC_FUNCTION(lgamma,TH_MATH_NAME(lgamma))
LAB_IMPLEMENT_BASIC_FUNCTION(log1p,TH_MATH_NAME(log1p))
LAB_IMPLEMENT_BASIC_FUNCTION(sigmoid,TH_MATH_NAME(TH_sigmoid))
LAB_IMPLEMENT_BASIC_FUNCTION(exp,TH_MATH_NAME(exp))
LAB_IMPLEMENT_BASIC_FUNCTION(cos,TH_MATH_NAME(cos))
LAB_IMPLEMENT_BASIC_FUNCTION(acos,TH_MATH_NAME(acos))
LAB_IMPLEMENT_BASIC_FUNCTION(cosh,TH_MATH_NAME(cosh))
LAB_IMPLEMENT_BASIC_FUNCTION(sin,TH_MATH_NAME(sin))
LAB_IMPLEMENT_BASIC_FUNCTION(asin,TH_MATH_NAME(asin))
LAB_IMPLEMENT_BASIC_FUNCTION(sinh,TH_MATH_NAME(sinh))
LAB_IMPLEMENT_BASIC_FUNCTION(tan,TH_MATH_NAME(tan))
LAB_IMPLEMENT_BASIC_FUNCTION(atan,TH_MATH_NAME(atan))
LAB_IMPLEMENT_BASIC_FUNCTION(tanh,TH_MATH_NAME(tanh))
LAB_IMPLEMENT_BASIC_FUNCTION(sqrt,TH_MATH_NAME(sqrt))
LAB_IMPLEMENT_BASIC_FUNCTION(rsqrt,TH_MATH_NAME(TH_rsqrt))
LAB_IMPLEMENT_BASIC_FUNCTION(ceil,TH_MATH_NAME(ceil))
LAB_IMPLEMENT_BASIC_FUNCTION(floor,TH_MATH_NAME(floor))
LAB_IMPLEMENT_BASIC_FUNCTION(round,TH_MATH_NAME(round))
LAB_IMPLEMENT_BASIC_FUNCTION(abs,TH_MATH_NAME(fabs))
LAB_IMPLEMENT_BASIC_FUNCTION(trunc,TH_MATH_NAME(trunc))
LAB_IMPLEMENT_BASIC_FUNCTION(frac,TH_MATH_NAME(TH_frac))
LAB_IMPLEMENT_BASIC_FUNCTION(neg,-)
LAB_IMPLEMENT_BASIC_FUNCTION(cinv, TH_MATH_NAME(1.0) / )
void THTensor_(pow)(THTensor *r_, THTensor *t, real value)
{
THTensor_(resizeAs)(r_, t);
if(value == 1){
THTensor_(copy)(r_, t);
}
else if(value == 2){
THTensor_(cmul)(r_, t, t);
}
else if(value == 3){
TH_TENSOR_APPLY2(real, t, real, r_, *r__data = *t_data * *t_data * *t_data;);
}
else if(value == 0.5){
THTensor_(sqrt)(r_, t);
}
else if(value == -0.5){
THTensor_(rsqrt)(r_, t);
}
else if(value == -1){
THTensor_(cinv)(r_, t);
}
else if(value == -2){
TH_TENSOR_APPLY2(real, t, real, r_, *r__data = TH_MATH_NAME(1.0) / (*t_data * *t_data););
}
else{
TH_TENSOR_APPLY2(real, t, real, r_, *r__data = TH_MATH_NAME(pow)(*t_data, value););
}
}
void THTensor_(atan2)(THTensor *r_, THTensor *tx, THTensor *ty)
{
THTensor_(resizeAs)(r_, tx);
TH_TENSOR_APPLY3(real, r_, real, tx, real, ty, *r__data = TH_MATH_NAME(atan2)(*tx_data,*ty_data););
}
void THTensor_(lerp)(THTensor *r_, THTensor *a, THTensor *b, real weight)
{
THArgCheck(THTensor_(nElement)(a) == THTensor_(nElement)(b), 2, "sizes do not match");
THTensor_(resizeAs)(r_, a);
TH_TENSOR_APPLY3(real, r_, real, a, real, b, *r__data = TH_MATH_NAME(TH_lerp)(*a_data, *b_data, weight););
}
void THTensor_(mean)(THTensor *r_, THTensor *t, int dimension, int keepdim)
{
THArgCheck(dimension >= 0 && dimension < THTensor_(nDimension)(t), 2, "invalid dimension %d",
dimension + TH_INDEX_BASE);
THTensor_(sum)(r_, t, dimension, keepdim);
THTensor_(div)(r_, r_, t->size[dimension]);
}
void THTensor_(std)(THTensor *r_, THTensor *t, int dimension, int biased, int keepdim)
{
THLongStorage *dim;
THArgCheck(dimension >= 0 && dimension < THTensor_(nDimension)(t), 3, "invalid dimension %d",
dimension + TH_INDEX_BASE);
dim = THTensor_(newSizeOf)(t);
THLongStorage_set(dim, dimension, 1);
THTensor_(resize)(r_, dim, NULL);
THLongStorage_free(dim);
TH_TENSOR_DIM_APPLY2(real, t, real, r_, dimension,
accreal sum = 0;
accreal sum2 = 0;
long i;
for(i = 0; i < t_size; i++)
{
real z = t_data[i*t_stride];
sum += z;
sum2 += z*z;
}
if(biased)
{
sum /= t_size;
sum2 /= t_size;
sum2 -= sum*sum;
sum2 = (sum2 < 0 ? 0 : sum2);
*r__data = (real)TH_MATH_NAME(sqrt)(sum2);
}
else
{
sum /= t_size;
sum2 /= t_size-1;
sum2 -= ((real)t_size)/((real)(t_size-1))*sum*sum;
sum2 = (sum2 < 0 ? 0 : sum2);
*r__data = (real)TH_MATH_NAME(sqrt)(sum2);
});
if (!keepdim) {
THTensor_(squeeze1d)(r_, r_, dimension);
}
}
void THTensor_(var)(THTensor *r_, THTensor *t, int dimension, int biased, int keepdim)
{
THLongStorage *dim;
THArgCheck(dimension >= 0 && dimension < THTensor_(nDimension)(t), 3, "invalid dimension %d",
dimension + TH_INDEX_BASE);
dim = THTensor_(newSizeOf)(t);
THLongStorage_set(dim, dimension, 1);
THTensor_(resize)(r_, dim, NULL);
THLongStorage_free(dim);
TH_TENSOR_DIM_APPLY2(real, t, real, r_, dimension,
accreal sum = 0;
accreal sum2 = 0;
long i;
for(i = 0; i < t_size; i++)
{
real z = t_data[i*t_stride];
sum += z;
sum2 += z*z;
}
if(biased)
{
sum /= t_size;
sum2 /= t_size;
sum2 -= sum*sum;
sum2 = (sum2 < 0 ? 0 : sum2);
*r__data = sum2;
}
else
{
sum /= t_size;
sum2 /= t_size-1;
sum2 -= ((real)t_size)/((real)(t_size-1))*sum*sum;
sum2 = (sum2 < 0 ? 0 : sum2);
*r__data = (real)sum2;
});
if (!keepdim) {
THTensor_(squeeze1d)(r_, r_, dimension);
}
}
void THTensor_(norm)(THTensor *r_, THTensor *t, real value, int dimension, int keepdim)
{
THLongStorage *dim;
THArgCheck(dimension >= 0 && dimension < THTensor_(nDimension)(t), 3, "invalid dimension %d",
dimension + TH_INDEX_BASE);
dim = THTensor_(newSizeOf)(t);
THLongStorage_set(dim, dimension, 1);
THTensor_(resize)(r_, dim, NULL);
THLongStorage_free(dim);
if(value == 0) {
TH_TENSOR_DIM_APPLY2(real, t, real, r_, dimension,
accreal sum = 0;
long i;
for(i = 0; i < t_size; i++)
sum += t_data[i*t_stride] != 0.0;
*r__data = sum;)
} else {
TH_TENSOR_DIM_APPLY2(real, t, real, r_, dimension,
accreal sum = 0;
long i;
for(i = 0; i < t_size; i++) {
sum += TH_MATH_NAME(pow)(
TH_MATH_NAME(fabs)(t_data[i*t_stride]), value);
}
*r__data = TH_MATH_NAME(pow)(sum, 1.0/value);)
}
if (!keepdim) {
THTensor_(squeeze1d)(r_, r_, dimension);
}
}
accreal THTensor_(normall)(THTensor *tensor, real value)
{
accreal sum = 0;
if(value == 0) {
TH_TENSOR_APPLY(real, tensor, sum += *tensor_data != 0.0;);
return sum;
} else if(value == 1) {
TH_TENSOR_APPLY(real, tensor, sum += TH_MATH_NAME(fabs)(*tensor_data););
return sum;
} else if(value == 2) {
TH_TENSOR_APPLY(real, tensor, accreal z = *tensor_data; sum += z*z;);
return sqrt(sum);
} else {
TH_TENSOR_APPLY(real, tensor, sum += TH_MATH_NAME(pow)(TH_MATH_NAME(fabs)(*tensor_data), value););
return TH_MATH_NAME(pow)(sum, 1.0/value);
}
}
void THTensor_(renorm)(THTensor *res, THTensor *src, real value, int dimension, real maxnorm)
{
int i;
THTensor *rowR, *rowS;
THArgCheck(dimension >= 0 && dimension < THTensor_(nDimension)(src), 3, "invalid dimension %d",
dimension + TH_INDEX_BASE);
THArgCheck(value > 0, 2, "non-positive-norm not supported");
THArgCheck(THTensor_(nDimension)(src) > 1, 1, "need at least 2 dimensions, got %d dimensions",
THTensor_(nDimension)(src));
rowR = THTensor_(new)();
rowS = THTensor_(new)();
THTensor_(resizeAs)(res, src);
for (i=0; i<src->size[dimension]; i++)
{
real norm = 0;
real new_norm;
THTensor_(select)(rowS, src, dimension, i);
THTensor_(select)(rowR, res, dimension, i);
if (value == 1) {
TH_TENSOR_APPLY(real, rowS, norm += fabs(*rowS_data););
} else if (value == 2) {
TH_TENSOR_APPLY(real, rowS, accreal z = *rowS_data; norm += z*z;);
} else {
TH_TENSOR_APPLY(real, rowS, norm += TH_MATH_NAME(pow)(TH_MATH_NAME(fabs)(*rowS_data), value););
}
norm = pow(norm, 1/value);
if (norm > maxnorm)
{
new_norm = maxnorm / (norm + 1e-7);
TH_TENSOR_APPLY2(
real, rowR, real, rowS,
*rowR_data = (*rowS_data) * new_norm;
)
}
else
THTensor_(copy)(rowR, rowS);
}
THTensor_(free)(rowR);
THTensor_(free)(rowS);
}
accreal THTensor_(dist)(THTensor *tensor, THTensor *src, real value)
{
real sum = 0;
TH_TENSOR_APPLY2(real, tensor, real, src,
sum += TH_MATH_NAME(pow)(
TH_MATH_NAME(fabs)(*tensor_data - *src_data), value););
return TH_MATH_NAME(pow)(sum, 1.0/value);
}
accreal THTensor_(meanall)(THTensor *tensor)
{
THArgCheck(tensor->nDimension > 0, 1, "empty Tensor");
return THTensor_(sumall)(tensor)/THTensor_(nElement)(tensor);
}
accreal THTensor_(varall)(THTensor *tensor, int biased)
{
accreal mean = THTensor_(meanall)(tensor);
accreal sum = 0;
TH_TENSOR_APPLY(real, tensor, sum += (*tensor_data - mean)*(*tensor_data - mean););
sum /= THTensor_(nElement)(tensor) - (biased ? 0 : 1);
return sum;
}
accreal THTensor_(stdall)(THTensor *tensor, int biased)
{
return sqrt(THTensor_(varall)(tensor, biased));
}
void THTensor_(linspace)(THTensor *r_, real a, real b, long n)
{
real i = 0;
THArgCheck(n > 1 || (n == 1 && (a == b)), 3, "invalid number of points");
if (THTensor_(nElement)(r_) != n) {
THTensor_(resize1d)(r_, n);
}
if(n == 1) {
THTensor_(set1d)(r_, 0, a);
} else {
TH_TENSOR_APPLY(real, r_,
*r__data = a + i*(b-a)/((real)(n-1));
i++;
);
}
}
void THTensor_(logspace)(THTensor *r_, real a, real b, long n)
{
real i = 0;
THArgCheck(n > 1 || (n == 1 && (a == b)), 3, "invalid number of points");
if (THTensor_(nElement)(r_) != n) {
THTensor_(resize1d)(r_, n);
}
if(n == 1) {
THTensor_(set1d)(r_, 0, TH_MATH_NAME(pow)(10.0, a));
} else {
TH_TENSOR_APPLY(real, r_,
*r__data = TH_MATH_NAME(pow)(10.0, a + i*(b-a)/((real)(n-1)));
i++;
);
}
}
void THTensor_(rand)(THTensor *r_, THGenerator *_generator, THLongStorage *size)
{
THTensor_(resize)(r_, size, NULL);
THTensor_(uniform)(r_, _generator, 0, 1);
}
void THTensor_(randn)(THTensor *r_, THGenerator *_generator, THLongStorage *size)
{
THTensor_(resize)(r_, size, NULL);
THTensor_(normal)(r_, _generator, 0, 1);
}
void THTensor_(histc)(THTensor *hist, THTensor *tensor, long nbins, real minvalue, real maxvalue)
{
real minval;
real maxval;
real *h_data;
THTensor_(resize1d)(hist, nbins);
THTensor_(zero)(hist);
minval = minvalue;
maxval = maxvalue;
if (minval == maxval)
{
minval = THTensor_(minall)(tensor);
maxval = THTensor_(maxall)(tensor);
}
if (minval == maxval)
{
minval = minval - 1;
maxval = maxval + 1;
}
h_data = THTensor_(data)(hist);
TH_TENSOR_APPLY(real, tensor,
if (*tensor_data >= minval && *tensor_data <= maxval) {
const int bin = (int)((*tensor_data-minval) / (maxval-minval) * nbins);
h_data[THMin(bin, nbins-1)] += 1;
}
);
}
void THTensor_(bhistc)(THTensor *hist, THTensor *tensor, long nbins, real minvalue, real maxvalue)
{
THArgCheck(THTensor_(nDimension)(tensor) < 3, 2, "invalid dimension %d, the input must be a 2d tensor", THTensor_(nDimension)(tensor));
int dimension = 1;
THArgCheck(dimension >= 0 && dimension < THTensor_(nDimension)(tensor), 2, "invalid dimension %d",
dimension + TH_INDEX_BASE);
real minval;
real maxval;
real *h_data;
THTensor_(resize2d)(hist, tensor->size[0], nbins);
THTensor_(zero)(hist);
minval = minvalue;
maxval = maxvalue;
if (minval == maxval)
{
minval = THTensor_(minall)(tensor);
maxval = THTensor_(maxall)(tensor);
}
if (minval == maxval)
{
minval = minval - 1;
maxval = maxval + 1;
}
TH_TENSOR_DIM_APPLY2(real, tensor, real, hist, dimension, long i;
for(i = 0; i < tensor_size; i++)
{
if(tensor_data[i*tensor_stride] >= minval && tensor_data[i*tensor_stride] <= maxval) {
const int bin = (int)((tensor_data[i*tensor_stride]-minval) / (maxval-minval) * nbins);
hist_data[THMin(bin, nbins-1)] += 1;
}
}
);
}
#undef TH_MATH_NAME
#endif /* floating point only part */
#undef IS_NONZERO
#endif
|
GB_binop__pow_int32.c | //------------------------------------------------------------------------------
// GB_binop: hard-coded functions for each built-in binary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// If this file is in the Generated2/ folder, do not edit it
// (it is auto-generated from Generator/*).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_emult.h"
#include "GB_control.h"
#include "GB_ek_slice.h"
#include "GB_dense.h"
#include "GB_atomics.h"
#include "GB_bitmap_assign_methods.h"
#include "GB_binop__include.h"
// C=binop(A,B) is defined by the following types and operators:
// A+B function (eWiseAdd): GB (_AaddB__pow_int32)
// A.*B function (eWiseMult): GB (_AemultB_01__pow_int32)
// A.*B function (eWiseMult): GB (_AemultB_02__pow_int32)
// A.*B function (eWiseMult): GB (_AemultB_03__pow_int32)
// A.*B function (eWiseMult): GB (_AemultB_bitmap__pow_int32)
// A*D function (colscale): GB ((none))
// D*A function (rowscale): GB ((none))
// C+=B function (dense accum): GB (_Cdense_accumB__pow_int32)
// C+=b function (dense accum): GB (_Cdense_accumb__pow_int32)
// C+=A+B function (dense ewise3): GB ((none))
// C=A+B function (dense ewise3): GB (_Cdense_ewise3_noaccum__pow_int32)
// C=scalar+B GB (_bind1st__pow_int32)
// C=scalar+B' GB (_bind1st_tran__pow_int32)
// C=A+scalar GB (_bind2nd__pow_int32)
// C=A'+scalar GB (_bind2nd_tran__pow_int32)
// C type: int32_t
// A type: int32_t
// B,b type: int32_t
// BinaryOp: cij = GB_pow_int32 (aij, bij)
#define GB_ATYPE \
int32_t
#define GB_BTYPE \
int32_t
#define GB_CTYPE \
int32_t
// true if the types of A and B are identical
#define GB_ATYPE_IS_BTYPE \
1
// true if the types of C and A are identical
#define GB_CTYPE_IS_ATYPE \
1
// true if the types of C and B are identical
#define GB_CTYPE_IS_BTYPE \
1
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA,A_iso) \
int32_t aij = GBX (Ax, pA, A_iso)
// bij = Bx [pB]
#define GB_GETB(bij,Bx,pB,B_iso) \
int32_t bij = GBX (Bx, pB, B_iso)
// declare scalar of the same type as C
#define GB_CTYPE_SCALAR(t) \
int32_t t
// cij = Ax [pA]
#define GB_COPY_A_TO_C(cij,Ax,pA,A_iso) \
cij = GBX (Ax, pA, A_iso)
// cij = Bx [pB]
#define GB_COPY_B_TO_C(cij,Bx,pB,B_iso) \
cij = GBX (Bx, pB, B_iso)
#define GB_CX(p) Cx [p]
// binary operator
#define GB_BINOP(z,x,y,i,j) \
z = GB_pow_int32 (x, y) ;
// true if the binop must be flipped
#define GB_BINOP_FLIP \
1
// op is second
#define GB_OP_IS_SECOND \
0
// do the numerical phases of GB_add and GB_emult
#define GB_PHASE_2_OF_2
// hard-coded loops can be vectorized
#define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_POW || GxB_NO_INT32 || GxB_NO_POW_INT32)
//------------------------------------------------------------------------------
// C += A+B, all 3 matrices dense
//------------------------------------------------------------------------------
#if 0
// The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV.
void GB ((none))
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#include "GB_dense_ewise3_accum_template.c"
}
#endif
//------------------------------------------------------------------------------
// C = A+B, all 3 matrices dense
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_ewise3_noaccum__pow_int32)
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_dense_ewise3_noaccum_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += B, accumulate a sparse matrix into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumB__pow_int32)
(
GrB_Matrix C,
const GrB_Matrix B,
const int64_t *B_ek_slicing, const int B_ntasks, const int B_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
#include "GB_dense_subassign_23_template.c"
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += b, accumulate a scalar into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumb__pow_int32)
(
GrB_Matrix C,
const GB_void *p_bwork,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
// get the scalar b for C += b, of type int32_t
int32_t bwork = (*((int32_t *) p_bwork)) ;
#include "GB_dense_subassign_22_template.c"
return (GrB_SUCCESS) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = A*D, column scale with diagonal D matrix
//------------------------------------------------------------------------------
#if 0
GrB_Info GB ((none))
(
GrB_Matrix C,
const GrB_Matrix A, bool A_is_pattern,
const GrB_Matrix D, bool D_is_pattern,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int32_t *restrict Cx = (int32_t *) C->x ;
#include "GB_AxB_colscale_template.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
//------------------------------------------------------------------------------
// C = D*B, row scale with diagonal D matrix
//------------------------------------------------------------------------------
#if 0
GrB_Info GB ((none))
(
GrB_Matrix C,
const GrB_Matrix D, bool D_is_pattern,
const GrB_Matrix B, bool B_is_pattern,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int32_t *restrict Cx = (int32_t *) C->x ;
#include "GB_AxB_rowscale_template.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
//------------------------------------------------------------------------------
// eWiseAdd: C = A+B or C<M> = A+B
//------------------------------------------------------------------------------
GrB_Info GB (_AaddB__pow_int32)
(
GrB_Matrix C,
const int C_sparsity,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool Ch_is_Mh,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
GB_WERK_DECLARE (M_ek_slicing, int64_t) ;
GB_WERK_DECLARE (A_ek_slicing, int64_t) ;
GB_WERK_DECLARE (B_ek_slicing, int64_t) ;
#include "GB_add_template.c"
GB_FREE_WORK ;
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C = A.*B or C<M> = A.*B
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_01__pow_int32)
(
GrB_Matrix C,
const int C_sparsity,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_01_meta.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C<#> = A.*B when A is sparse/hyper and B is bitmap/full
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_02__pow_int32)
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool flipxy,
const int64_t *restrict Cp_kfirst,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#if GB_BINOP_FLIP
// The operator is not commutative, and does not have a flipped
// variant. For example z=atan2(y,x).
if (flipxy)
{
// use fmult(y,x)
#undef GB_FLIPPED
#define GB_FLIPPED 1
#include "GB_emult_02_template.c"
}
else
{
// use fmult(x,y)
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
}
#else
// No need to handle the flip: the operator is either commutative, or
// has been handled by changing z=div(y,x) to z=rdiv(x,y) for example.
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
#endif
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C<M> = A.*B, M sparse/hyper, A and B bitmap/full
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_03__pow_int32)
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict Cp_kfirst,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_03_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C=A.*B, C<M>=A.*B, C<!M>=A.*B where C is bitmap
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_bitmap__pow_int32)
(
GrB_Matrix C,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_bitmap_emult_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st
//------------------------------------------------------------------------------
GrB_Info GB (_bind1st__pow_int32)
(
GB_void *Cx_output, // Cx and Bx may be aliased
const GB_void *x_input,
const GB_void *Bx_input,
const int8_t *restrict Bb,
int64_t bnz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int32_t *Cx = (int32_t *) Cx_output ;
int32_t x = (*((int32_t *) x_input)) ;
int32_t *Bx = (int32_t *) Bx_input ;
int64_t p ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < bnz ; p++)
{
if (!GBB (Bb, p)) continue ;
int32_t bij = GBX (Bx, p, false) ;
Cx [p] = GB_pow_int32 (x, bij) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd
//------------------------------------------------------------------------------
GrB_Info GB (_bind2nd__pow_int32)
(
GB_void *Cx_output, // Cx and Ax may be aliased
const GB_void *Ax_input,
const GB_void *y_input,
const int8_t *restrict Ab,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
int32_t *Cx = (int32_t *) Cx_output ;
int32_t *Ax = (int32_t *) Ax_input ;
int32_t y = (*((int32_t *) y_input)) ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!GBB (Ab, p)) continue ;
int32_t aij = GBX (Ax, p, false) ;
Cx [p] = GB_pow_int32 (aij, y) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (x, A'): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (x, aij), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
int32_t aij = GBX (Ax, pA, false) ; \
Cx [pC] = GB_pow_int32 (x, aij) ; \
}
GrB_Info GB (_bind1st_tran__pow_int32)
(
GrB_Matrix C,
const GB_void *x_input,
const GrB_Matrix A,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
// GB_unop_transpose.c uses GB_ATYPE, but A is
// the 2nd input to binary operator z=f(x,y).
#undef GB_ATYPE
#define GB_ATYPE \
int32_t
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int32_t x = (*((const int32_t *) x_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
#undef GB_ATYPE
#define GB_ATYPE \
int32_t
}
//------------------------------------------------------------------------------
// C = op (A', y): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (aij, y), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
int32_t aij = GBX (Ax, pA, false) ; \
Cx [pC] = GB_pow_int32 (aij, y) ; \
}
GrB_Info GB (_bind2nd_tran__pow_int32)
(
GrB_Matrix C,
const GrB_Matrix A,
const GB_void *y_input,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int32_t y = (*((const int32_t *) y_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
o3logon_fmt_plug.c | /*
* This software was written by JimF jfoug AT cox dot net
* in 2016. No copyright is claimed, and the software is hereby
* placed in the public domain. In case this attempt to disclaim
* copyright and place the software in the public domain is deemed
* null and void, then the software is Copyright (c) 2016 JimF
* and it is hereby released to the general public under the following
* terms:
*
* This software may be modified, redistributed, and used for any
* purpose, in source and binary forms, with or without modification.
*
*/
#if FMT_EXTERNS_H
extern struct fmt_main fmt_o3logon;
#elif FMT_REGISTERS_H
john_register_one(&fmt_o3logon);
#else
#include <string.h>
#include <openssl/des.h>
#include "arch.h"
#include "misc.h"
#include "common.h"
#include "formats.h"
#include "sha.h"
#include "unicode.h"
#include "base64_convert.h"
#ifdef _OPENMP
static int omp_t = 1;
#include <omp.h>
#ifndef OMP_SCALE
#define OMP_SCALE 512 // tuned on core i7
//#define OMP_SCALE 8192 // tuned on K8-Dual HT
#endif
#endif
#include "memdbg.h"
#define FORMAT_LABEL "o3logon"
#define FORMAT_NAME "Oracle O3LOGON protocol"
#define FORMAT_TAG "$o3logon$"
#define FORMAT_TAG_LEN (sizeof(FORMAT_TAG)-1)
#define ALGORITHM_NAME "SHA1 DES 32/" ARCH_BITS_STR
#define BENCHMARK_COMMENT ""
#define BENCHMARK_LENGTH -1
#define PLAINTEXT_LENGTH 32
#define BINARY_SIZE 0
#define BINARY_ALIGN 1
#define MAX_USERNAME_LEN 30
#define SALT_SIZE (sizeof(ora9_salt))
#define SALT_ALIGN (sizeof(unsigned int))
#define CIPHERTEXT_LENGTH 16
#define MIN_KEYS_PER_CRYPT 1
#define MAX_KEYS_PER_CRYPT 1
#define MAX_HASH_LEN (FORMAT_TAG_LEN+MAX_USERNAME_LEN+1+32+1+80)
//#define DEBUG_ORACLE
//
// The keys are $ora9i$ user $ auth_sess_key $ auth_pass_key These can be found in sniffed network traffic.
static struct fmt_tests tests[] = {
{"$o3logon$PASSWORD9$8CF28B36E4F3D2095729CF59510003BF$3078D7DE44385654CC952A9C56E2659B", "password9"},
{"$o3logon$scott$819D062FE5D93F79FF19BDAFE2F9872A$C6D1ED7E6F4D3A6D94F1E49460122D39A3832CC792AD7137", "scottscottscott1"},
{"$o3logon$SCOTT$8E9E3E07864D99BB602C443F45E4AFC1$3591851B327BB85A114BD73D51B80AF58E942002B9612F82", "scottscottscott1234"},
{"$o3logon$scott$4488AFD7905E9966912CA680A3C0A23E$628FBAC5CF0E5548743E16123BF027B9314D7EE8B4E30DB213F683F8D7E786EA", "scottscottscott12345"},
{NULL}
};
typedef struct ora9_salt_t {
int userlen, auth_pass_len;
UTF16 user[MAX_USERNAME_LEN+1];
unsigned char auth_sesskey[16];
unsigned char auth_pass[40];
} ora9_salt;
static ora9_salt *cur_salt;
static UTF16 (*cur_key)[PLAINTEXT_LENGTH + 1];
static char (*plain_key)[PLAINTEXT_LENGTH + 1];
static int *cur_key_len;
static int *cracked, any_cracked;
static DES_key_schedule desschedule1; // key 0x0123456789abcdef
static void init(struct fmt_main *self)
{
DES_set_key((DES_cblock *)"\x01\x23\x45\x67\x89\xab\xcd\xef", &desschedule1);
#ifdef _OPENMP
omp_t = omp_get_max_threads();
self->params.min_keys_per_crypt *= omp_t;
omp_t *= OMP_SCALE;
self->params.max_keys_per_crypt *= omp_t;
#endif
cur_key = mem_calloc(self->params.max_keys_per_crypt,
sizeof(*cur_key));
plain_key = mem_calloc(self->params.max_keys_per_crypt,
sizeof(*plain_key));
cur_key_len = mem_calloc(self->params.max_keys_per_crypt,
sizeof(*cur_key_len));
cracked = mem_calloc(self->params.max_keys_per_crypt,
sizeof(*cracked));
}
static void done(void)
{
MEM_FREE(cracked);
MEM_FREE(cur_key_len);
MEM_FREE(plain_key);
MEM_FREE(cur_key);
}
static int valid(char *ciphertext, struct fmt_main *self)
{
char *cp;
char tmp[32*5+1];
UTF16 cur_key_mixedcase[MAX_USERNAME_LEN+2];
int len, extra;
if (strncmp(ciphertext, FORMAT_TAG, FORMAT_TAG_LEN))
return 0;
ciphertext += FORMAT_TAG_LEN;
cp = strchr(ciphertext, '$');
if (!cp)
return 0;
// make sure username fits in MAX_USERNAME_LEN UTF16
if (cp-ciphertext > sizeof(tmp)-1)
return 0;
memcpy(tmp, ciphertext, cp-ciphertext);
tmp[cp-ciphertext] = 0;
len = enc_to_utf16((UTF16 *)cur_key_mixedcase, MAX_USERNAME_LEN+1, (unsigned char*)tmp, strlen(tmp));
if (len < 0 || (len == 0 && cp-ciphertext)) {
static int error_shown = 0;
#ifdef HAVE_FUZZ
if (options.flags & (FLG_FUZZ_CHK | FLG_FUZZ_DUMP_CHK))
return 0;
#endif
if (!error_shown)
fprintf(stderr, "%s: Input file is not UTF-8. Please use --input-enc to specify a codepage.\n", self->params.label);
error_shown = 1;
return 0;
}
if (len > MAX_USERNAME_LEN)
return 0;
ciphertext = cp+1;
cp = strchr(ciphertext, '$');
if (!cp || cp-ciphertext != 32 || hexlenu(ciphertext, 0) != 32)
return 0;
ciphertext = cp+1;
cp = strchr(ciphertext, '$');
len = strlen(ciphertext);
if (!len || cp || len%16 || hexlenu(ciphertext, &extra) != len || extra)
return 0;
return 1;
}
static char *split(char *ciphertext, int index, struct fmt_main *self)
{
static char out[MAX_HASH_LEN*5+1];
strnzcpy(out, ciphertext, MAX_HASH_LEN+1);
enc_strupper(&out[FORMAT_TAG_LEN]);
return out;
}
static void set_salt(void *salt) {
cur_salt = (ora9_salt *)salt;
}
static void oracle_set_key(char *key, int index) {
UTF16 cur_key_mixedcase[PLAINTEXT_LENGTH+1];
UTF16 *c;
int key_length;
strnzcpy(plain_key[index], key, sizeof(*plain_key));
// Can't use enc_to_utf16_be() because we need to do utf16_uc later
key_length = enc_to_utf16(cur_key_mixedcase, PLAINTEXT_LENGTH, (unsigned char*)key, strlen(key));
if (key_length < 0)
key_length = strlen16(cur_key_mixedcase);
// We convert and uppercase in one shot
key_length = utf16_uc(cur_key[index], PLAINTEXT_LENGTH, cur_key_mixedcase, key_length);
// we have no way to 'undo' here, since the expansion is due to single-2-multi expansion in the upcase,
// and we can not 'fix' our password. We simply have to 'not' properly decrypt this one, but protect ourselves.
if (key_length < 0)
key_length *= -1;
cur_key_len[index] = key_length * sizeof(UTF16);
// Now byte-swap to UTF16-BE
c = cur_key[index];
while((*c = *c << 8 | *c >> 8))
c++;
#ifdef DEBUG_ORACLE
dump_stuff_msg("cur_key ", (unsigned char*)cur_key[index], cur_key_len[index]);
#endif
}
static char *get_key(int index) {
return plain_key[index];
}
static int ORACLE_TNS_Create_Key_SHA1 (unsigned char *input, int input_len, const unsigned char *Entropy, int EntropyLen, int desired_keylen, unsigned char *out_key)
{
SHA_CTX ctx;
SHA1_Init (&ctx);
SHA1_Update (&ctx, input, input_len);
SHA1_Update (&ctx, Entropy, EntropyLen);
SHA1_Final (out_key, &ctx);
SHA1_Init (&ctx);
SHA1_Update (&ctx, input, input_len);
SHA1_Update (&ctx, "\x2", 1);
SHA1_Update (&ctx, &out_key[1], 19);
SHA1_Update (&ctx, Entropy, EntropyLen);
SHA1_Final (out_key+20, &ctx);
return 0;
}
static int ORACLE_TNS_Decrypt_3DES_CBC (unsigned char* input, int input_len, const unsigned char key[24], unsigned char *decrypted)
{
DES_key_schedule ks1,ks2,ks3;
unsigned char iv[] = {0x80,0x20,0x40,0x04,0x08,0x02,0x10,0x01};
DES_set_key((DES_cblock*) &key[0], &ks1);
DES_set_key((DES_cblock*) &key[8], &ks2);
DES_set_key((DES_cblock*) &key[16], &ks3);
DES_ede3_cbc_encrypt(input,decrypted,input_len,&ks1,&ks2,&ks3,(DES_cblock*) iv,DES_DECRYPT);
return 0;
}
static unsigned char fixed31 [] = {0xA2,0xFB,0xE6,0xAD,0x4C,0x7D,0x1E,0x3D,
0x6E,0xB0,0xB7,0x6C,0x97,0xEF,0xFF,0x84,
0x44,0x71,0x02,0x84,0xAC,0xF1,0x3B,0x29,
0x5C,0x0F,0x0C,0xB1,0x87,0x75,0xEF};
static unsigned char fixed23 [] = {0xF2,0xFF,0x97,0x87,0x15,0x37,0x07,0x76,
0x07,0x27,0xE2,0x7F,0xA3,0xB1,0xD6,0x73,
0x3F,0x2F,0xD1,0x52,0xAB,0xAC,0xC0};
static int ORACLE_TNS_Decrypt_Password_9i (unsigned char OracleHash[8], unsigned char *auth_sesskey, int auto_sesskeylen, unsigned char *auth_password, int auth_passwordlen, unsigned char *decrypted)
{
unsigned char triple_des_key[64];
unsigned char sesskey[16];
unsigned char obfuscated[256];
int PassLen = auth_passwordlen;
ORACLE_TNS_Create_Key_SHA1 (OracleHash, 8, fixed31, sizeof(fixed31), 24, triple_des_key);
ORACLE_TNS_Decrypt_3DES_CBC (auth_sesskey, 16, triple_des_key, sesskey);
ORACLE_TNS_Create_Key_SHA1 (sesskey, 16, fixed23, sizeof(fixed23), 24, triple_des_key);
ORACLE_TNS_Decrypt_3DES_CBC (auth_password, PassLen, triple_des_key, obfuscated);
//ORACLE_TNS_DeObfuscate (triple_des_key, obfuscated, &PassLen);
memcpy(decrypted, &obfuscated[PassLen-4], 4);
memcpy(&decrypted[4], &obfuscated[4], PassLen-4);
return PassLen;
}
static int crypt_all(int *pcount, struct db_salt *salt)
{
const int count = *pcount;
int idx = 0;
if (any_cracked) {
memset(cracked, 0, sizeof(*cracked) * count);
any_cracked = 0;
}
#ifdef DEBUG_ORACLE
dump_stuff_msg("cur_salt ", buf, cur_salt->userlen+key_length);
#endif
#ifdef _OPENMP
#pragma omp parallel for
for (idx = 0; idx < count; idx++)
#endif
{
unsigned char buf[256], buf1[256];
unsigned int l;
uint32_t iv[2];
DES_key_schedule desschedule2;
l = cur_salt->userlen + cur_key_len[idx];
memcpy(buf, cur_salt->user, cur_salt->userlen);
memcpy(buf + cur_salt->userlen, cur_key[idx], cur_key_len[idx]);
iv[0] = iv[1] = 0;
DES_ncbc_encrypt((unsigned char *)buf, buf1, l, &desschedule1, (DES_cblock *) iv, DES_ENCRYPT);
DES_set_key((DES_cblock *)iv, &desschedule2);
iv[0] = iv[1] = 0;
DES_ncbc_encrypt((unsigned char *)buf, buf1, l, &desschedule2, (DES_cblock *) iv, DES_ENCRYPT);
#ifdef DEBUG_ORACLE
dump_stuff_msg(" iv (the hash key) ", (unsigned char*)&iv[0], 8);
#endif
ORACLE_TNS_Decrypt_Password_9i ((unsigned char*)iv, cur_salt->auth_sesskey, 16, cur_salt->auth_pass, cur_salt->auth_pass_len, buf);
if (!strncmp((char*)buf, plain_key[idx], strlen(plain_key[idx])))
{
cracked[idx] = 1;
#ifdef _OPENMP
#pragma omp atomic
#endif
any_cracked |= 1;
}
}
return count;
}
static void *get_salt(char *ciphertext)
{
static ora9_salt salt;
UTF8 tmp[MAX_USERNAME_LEN*5+1];
char *cp;
memset(&salt, 0, sizeof(salt));
ciphertext += FORMAT_TAG_LEN;
cp = strchr(ciphertext, '$');
strncpy((char*)tmp, ciphertext, cp-ciphertext);
tmp[cp-ciphertext] = 0;
salt.userlen = enc_to_utf16_be(salt.user, MAX_USERNAME_LEN, tmp, cp-ciphertext);
if (salt.userlen < 0)
salt.userlen = strlen16(salt.user);
salt.userlen *= 2;
base64_convert(cp+1,e_b64_hex,32,salt.auth_sesskey,e_b64_raw,16,0,0);
cp = strchr(cp+1, '$') + 1;
salt.auth_pass_len = strlen(cp)/2;
base64_convert(cp,e_b64_hex,salt.auth_pass_len*2,salt.auth_pass,e_b64_raw,salt.auth_pass_len,0,0);
return &salt;
}
// Public domain hash function by DJ Bernstein (salt is a username)
static int salt_hash(void *salt)
{
UTF16 *s = ((UTF16*)salt) + 1;
unsigned int hash = 5381;
while (*s)
hash = ((hash << 5) + hash) ^ *s++;
return hash & (SALT_HASH_SIZE - 1);
}
static int cmp_all(void *binary, int count)
{
return any_cracked;
}
static int cmp_one(void *binary, int count)
{
return cracked[count];
}
static int cmp_exact(char *source, int index)
{
return 1;
}
struct fmt_main fmt_o3logon = {
{
FORMAT_LABEL,
FORMAT_NAME,
ALGORITHM_NAME,
BENCHMARK_COMMENT,
BENCHMARK_LENGTH,
0,
PLAINTEXT_LENGTH,
BINARY_SIZE,
BINARY_ALIGN,
SALT_SIZE,
SALT_ALIGN,
MIN_KEYS_PER_CRYPT,
MAX_KEYS_PER_CRYPT,
FMT_8_BIT | FMT_UNICODE | FMT_UTF8 | FMT_SPLIT_UNIFIES_CASE | FMT_CASE | FMT_OMP,
{ NULL },
{ FORMAT_TAG },
tests
}, {
init,
done,
fmt_default_reset,
fmt_default_prepare,
valid,
split,
fmt_default_binary,
get_salt,
{ NULL },
fmt_default_source,
{
fmt_default_binary_hash
},
salt_hash,
NULL,
set_salt,
oracle_set_key,
get_key,
fmt_default_clear_keys,
crypt_all,
{
fmt_default_get_hash
},
cmp_all,
cmp_one,
cmp_exact
}
};
#endif /* plugin stanza */
|
GB_unop__atanh_fc32_fc32.c | //------------------------------------------------------------------------------
// GB_unop: hard-coded functions for each built-in unary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// If this file is in the Generated2/ folder, do not edit it
// (it is auto-generated from Generator/*).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_control.h"
#include "GB_atomics.h"
#include "GB_unop__include.h"
// C=unop(A) is defined by the following types and operators:
// op(A) function: GB (_unop_apply__atanh_fc32_fc32)
// op(A') function: GB (_unop_tran__atanh_fc32_fc32)
// C type: GxB_FC32_t
// A type: GxB_FC32_t
// cast: GxB_FC32_t cij = aij
// unaryop: cij = catanhf (aij)
#define GB_ATYPE \
GxB_FC32_t
#define GB_CTYPE \
GxB_FC32_t
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
GxB_FC32_t aij = Ax [pA]
#define GB_CX(p) Cx [p]
// unary operator
#define GB_OP(z, x) \
z = catanhf (x) ;
// casting
#define GB_CAST(z, aij) \
GxB_FC32_t z = aij ;
// cij = op (aij)
#define GB_CAST_OP(pC,pA) \
{ \
/* aij = Ax [pA] */ \
GxB_FC32_t aij = Ax [pA] ; \
/* Cx [pC] = op (cast (aij)) */ \
GxB_FC32_t z = aij ; \
Cx [pC] = catanhf (z) ; \
}
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_ATANH || GxB_NO_FC32)
//------------------------------------------------------------------------------
// Cx = op (cast (Ax)): apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB (_unop_apply__atanh_fc32_fc32)
(
GxB_FC32_t *Cx, // Cx and Ax may be aliased
const GxB_FC32_t *Ax,
const int8_t *restrict Ab, // A->b if A is bitmap
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
if (Ab == NULL)
{
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
GxB_FC32_t aij = Ax [p] ;
GxB_FC32_t z = aij ;
Cx [p] = catanhf (z) ;
}
}
else
{
// bitmap case, no transpose; A->b already memcpy'd into C->b
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!Ab [p]) continue ;
GxB_FC32_t aij = Ax [p] ;
GxB_FC32_t z = aij ;
Cx [p] = catanhf (z) ;
}
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (cast (A')): transpose, typecast, and apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB (_unop_tran__atanh_fc32_fc32)
(
GrB_Matrix C,
const GrB_Matrix A,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
GB_unaryop__ainv_int64_int64.c | //------------------------------------------------------------------------------
// GB_unaryop: hard-coded functions for each built-in unary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2019, All Rights Reserved.
// http://suitesparse.com See GraphBLAS/Doc/License.txt for license.
//------------------------------------------------------------------------------
// If this file is in the Generated/ folder, do not edit it (auto-generated).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_control.h"
#include "GB_iterator.h"
#include "GB_unaryop__include.h"
// C=unop(A) is defined by the following types and operators:
// op(A) function: GB_unop__ainv_int64_int64
// op(A') function: GB_tran__ainv_int64_int64
// C type: int64_t
// A type: int64_t
// cast: int64_t cij = (int64_t) aij
// unaryop: cij = -aij
#define GB_ATYPE \
int64_t
#define GB_CTYPE \
int64_t
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
int64_t aij = Ax [pA]
#define GB_CX(p) Cx [p]
// unary operator
#define GB_OP(z, x) \
z = -x ;
// casting
#define GB_CASTING(z, x) \
int64_t z = (int64_t) x ;
// cij = op (cast (aij))
#define GB_CAST_OP(pC,pA) \
{ \
/* aij = Ax [pA] */ \
GB_GETA (aij, Ax, pA) ; \
/* Cx [pC] = op (cast (aij)) */ \
GB_CASTING (x, aij) ; \
GB_OP (GB_CX (pC), x) ; \
}
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_AINV || GxB_NO_INT64)
//------------------------------------------------------------------------------
// Cx = op (cast (Ax)): apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_unop__ainv_int64_int64
(
int64_t *restrict Cx,
const int64_t *restrict Ax,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (int64_t p = 0 ; p < anz ; p++)
{
GB_CAST_OP (p, p) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (cast (A')): transpose, typecast, and apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_tran__ainv_int64_int64
(
GrB_Matrix C,
const GrB_Matrix A,
int64_t *restrict *Rowcounts,
GBI_single_iterator Iter,
const int64_t *restrict A_slice,
int naslice
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#define GB_PHASE_2_OF_2
#include "GB_unaryop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
draw.c | /*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% DDDD RRRR AAA W W %
% D D R R A A W W %
% D D RRRR AAAAA W W W %
% D D R RN A A WW WW %
% DDDD R R A A W W %
% %
% %
% MagickCore Image Drawing Methods %
% %
% %
% Software Design %
% Cristy %
% July 1998 %
% %
% %
% Copyright 1999-2020 ImageMagick Studio LLC, a non-profit organization %
% dedicated to making software imaging solutions freely available. %
% %
% You may not use this file except in compliance with the License. You may %
% obtain a copy of the License at %
% %
% https://imagemagick.org/script/license.php %
% %
% Unless required by applicable law or agreed to in writing, software %
% distributed under the License is distributed on an "AS IS" BASIS, %
% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. %
% See the License for the specific language governing permissions and %
% limitations under the License. %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% Bill Radcliffe of Corbis (www.corbis.com) contributed the polygon
% rendering code based on Paul Heckbert's "Concave Polygon Scan Conversion",
% Graphics Gems, 1990. Leonard Rosenthal and David Harr of Appligent
% (www.appligent.com) contributed the dash pattern, linecap stroking
% algorithm, and minor rendering improvements.
%
*/
/*
Include declarations.
*/
#include "MagickCore/studio.h"
#include "MagickCore/annotate.h"
#include "MagickCore/artifact.h"
#include "MagickCore/blob.h"
#include "MagickCore/cache.h"
#include "MagickCore/cache-private.h"
#include "MagickCore/cache-view.h"
#include "MagickCore/channel.h"
#include "MagickCore/color.h"
#include "MagickCore/colorspace-private.h"
#include "MagickCore/composite.h"
#include "MagickCore/composite-private.h"
#include "MagickCore/constitute.h"
#include "MagickCore/draw.h"
#include "MagickCore/draw-private.h"
#include "MagickCore/enhance.h"
#include "MagickCore/exception.h"
#include "MagickCore/exception-private.h"
#include "MagickCore/gem.h"
#include "MagickCore/geometry.h"
#include "MagickCore/image-private.h"
#include "MagickCore/list.h"
#include "MagickCore/log.h"
#include "MagickCore/memory-private.h"
#include "MagickCore/monitor.h"
#include "MagickCore/monitor-private.h"
#include "MagickCore/option.h"
#include "MagickCore/paint.h"
#include "MagickCore/pixel-accessor.h"
#include "MagickCore/pixel-private.h"
#include "MagickCore/property.h"
#include "MagickCore/resample.h"
#include "MagickCore/resample-private.h"
#include "MagickCore/resource_.h"
#include "MagickCore/splay-tree.h"
#include "MagickCore/string_.h"
#include "MagickCore/string-private.h"
#include "MagickCore/thread-private.h"
#include "MagickCore/token.h"
#include "MagickCore/transform-private.h"
#include "MagickCore/utility.h"
/*
Define declarations.
*/
#define BezierQuantum 200
#define PrimitiveExtentPad 2048
#define MaxBezierCoordinates 67108864
#define ThrowPointExpectedException(token,exception) \
{ \
(void) ThrowMagickException(exception,GetMagickModule(),DrawError, \
"NonconformingDrawingPrimitiveDefinition","`%s'",token); \
status=MagickFalse; \
break; \
}
/*
Typedef declarations.
*/
typedef struct _EdgeInfo
{
SegmentInfo
bounds;
double
scanline;
PointInfo
*points;
size_t
number_points;
ssize_t
direction;
MagickBooleanType
ghostline;
size_t
highwater;
} EdgeInfo;
typedef struct _ElementInfo
{
double
cx,
cy,
major,
minor,
angle;
} ElementInfo;
typedef struct _MVGInfo
{
PrimitiveInfo
**primitive_info;
size_t
*extent;
ssize_t
offset;
PointInfo
point;
ExceptionInfo
*exception;
} MVGInfo;
typedef struct _PolygonInfo
{
EdgeInfo
*edges;
size_t
number_edges;
} PolygonInfo;
typedef enum
{
MoveToCode,
OpenCode,
GhostlineCode,
LineToCode,
EndCode
} PathInfoCode;
typedef struct _PathInfo
{
PointInfo
point;
PathInfoCode
code;
} PathInfo;
/*
Forward declarations.
*/
static Image
*DrawClippingMask(Image *,const DrawInfo *,const char *,const char *,
ExceptionInfo *);
static MagickBooleanType
DrawStrokePolygon(Image *,const DrawInfo *,const PrimitiveInfo *,
ExceptionInfo *),
RenderMVGContent(Image *,const DrawInfo *,const size_t,ExceptionInfo *),
TraceArc(MVGInfo *,const PointInfo,const PointInfo,const PointInfo),
TraceArcPath(MVGInfo *,const PointInfo,const PointInfo,const PointInfo,
const double,const MagickBooleanType,const MagickBooleanType),
TraceBezier(MVGInfo *,const size_t),
TraceCircle(MVGInfo *,const PointInfo,const PointInfo),
TraceEllipse(MVGInfo *,const PointInfo,const PointInfo,const PointInfo),
TraceLine(PrimitiveInfo *,const PointInfo,const PointInfo),
TraceRectangle(PrimitiveInfo *,const PointInfo,const PointInfo),
TraceRoundRectangle(MVGInfo *,const PointInfo,const PointInfo,PointInfo),
TraceSquareLinecap(PrimitiveInfo *,const size_t,const double);
static PrimitiveInfo
*TraceStrokePolygon(const Image *,const DrawInfo *,const PrimitiveInfo *);
static ssize_t
TracePath(MVGInfo *,const char *,ExceptionInfo *);
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% A c q u i r e D r a w I n f o %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% AcquireDrawInfo() returns a DrawInfo structure properly initialized.
%
% The format of the AcquireDrawInfo method is:
%
% DrawInfo *AcquireDrawInfo(void)
%
*/
MagickExport DrawInfo *AcquireDrawInfo(void)
{
DrawInfo
*draw_info;
draw_info=(DrawInfo *) AcquireCriticalMemory(sizeof(*draw_info));
GetDrawInfo((ImageInfo *) NULL,draw_info);
return(draw_info);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% C l o n e D r a w I n f o %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% CloneDrawInfo() makes a copy of the given draw_info structure. If NULL
% is specified, a new DrawInfo structure is created initialized to default
% values.
%
% The format of the CloneDrawInfo method is:
%
% DrawInfo *CloneDrawInfo(const ImageInfo *image_info,
% const DrawInfo *draw_info)
%
% A description of each parameter follows:
%
% o image_info: the image info.
%
% o draw_info: the draw info.
%
*/
MagickExport DrawInfo *CloneDrawInfo(const ImageInfo *image_info,
const DrawInfo *draw_info)
{
DrawInfo
*clone_info;
ExceptionInfo
*exception;
clone_info=(DrawInfo *) AcquireCriticalMemory(sizeof(*clone_info));
GetDrawInfo(image_info,clone_info);
if (draw_info == (DrawInfo *) NULL)
return(clone_info);
exception=AcquireExceptionInfo();
if (draw_info->id != (char *) NULL)
(void) CloneString(&clone_info->id,draw_info->id);
if (draw_info->primitive != (char *) NULL)
(void) CloneString(&clone_info->primitive,draw_info->primitive);
if (draw_info->geometry != (char *) NULL)
(void) CloneString(&clone_info->geometry,draw_info->geometry);
clone_info->compliance=draw_info->compliance;
clone_info->viewbox=draw_info->viewbox;
clone_info->affine=draw_info->affine;
clone_info->gravity=draw_info->gravity;
clone_info->fill=draw_info->fill;
clone_info->stroke=draw_info->stroke;
clone_info->stroke_width=draw_info->stroke_width;
if (draw_info->fill_pattern != (Image *) NULL)
clone_info->fill_pattern=CloneImage(draw_info->fill_pattern,0,0,MagickTrue,
exception);
if (draw_info->stroke_pattern != (Image *) NULL)
clone_info->stroke_pattern=CloneImage(draw_info->stroke_pattern,0,0,
MagickTrue,exception);
clone_info->stroke_antialias=draw_info->stroke_antialias;
clone_info->text_antialias=draw_info->text_antialias;
clone_info->fill_rule=draw_info->fill_rule;
clone_info->linecap=draw_info->linecap;
clone_info->linejoin=draw_info->linejoin;
clone_info->miterlimit=draw_info->miterlimit;
clone_info->dash_offset=draw_info->dash_offset;
clone_info->decorate=draw_info->decorate;
clone_info->compose=draw_info->compose;
if (draw_info->text != (char *) NULL)
(void) CloneString(&clone_info->text,draw_info->text);
if (draw_info->font != (char *) NULL)
(void) CloneString(&clone_info->font,draw_info->font);
if (draw_info->metrics != (char *) NULL)
(void) CloneString(&clone_info->metrics,draw_info->metrics);
if (draw_info->family != (char *) NULL)
(void) CloneString(&clone_info->family,draw_info->family);
clone_info->style=draw_info->style;
clone_info->stretch=draw_info->stretch;
clone_info->weight=draw_info->weight;
if (draw_info->encoding != (char *) NULL)
(void) CloneString(&clone_info->encoding,draw_info->encoding);
clone_info->pointsize=draw_info->pointsize;
clone_info->kerning=draw_info->kerning;
clone_info->interline_spacing=draw_info->interline_spacing;
clone_info->interword_spacing=draw_info->interword_spacing;
clone_info->direction=draw_info->direction;
if (draw_info->density != (char *) NULL)
(void) CloneString(&clone_info->density,draw_info->density);
clone_info->align=draw_info->align;
clone_info->undercolor=draw_info->undercolor;
clone_info->border_color=draw_info->border_color;
if (draw_info->server_name != (char *) NULL)
(void) CloneString(&clone_info->server_name,draw_info->server_name);
if (draw_info->dash_pattern != (double *) NULL)
{
register ssize_t
x;
for (x=0; fabs(draw_info->dash_pattern[x]) >= MagickEpsilon; x++) ;
clone_info->dash_pattern=(double *) AcquireQuantumMemory((size_t) (2*x+2),
sizeof(*clone_info->dash_pattern));
if (clone_info->dash_pattern == (double *) NULL)
ThrowFatalException(ResourceLimitFatalError,
"UnableToAllocateDashPattern");
(void) memset(clone_info->dash_pattern,0,(size_t) (2*x+2)*
sizeof(*clone_info->dash_pattern));
(void) memcpy(clone_info->dash_pattern,draw_info->dash_pattern,(size_t)
(x+1)*sizeof(*clone_info->dash_pattern));
}
clone_info->gradient=draw_info->gradient;
if (draw_info->gradient.stops != (StopInfo *) NULL)
{
size_t
number_stops;
number_stops=clone_info->gradient.number_stops;
clone_info->gradient.stops=(StopInfo *) AcquireQuantumMemory((size_t)
number_stops,sizeof(*clone_info->gradient.stops));
if (clone_info->gradient.stops == (StopInfo *) NULL)
ThrowFatalException(ResourceLimitFatalError,
"UnableToAllocateDashPattern");
(void) memcpy(clone_info->gradient.stops,draw_info->gradient.stops,
(size_t) number_stops*sizeof(*clone_info->gradient.stops));
}
clone_info->bounds=draw_info->bounds;
clone_info->fill_alpha=draw_info->fill_alpha;
clone_info->stroke_alpha=draw_info->stroke_alpha;
clone_info->element_reference=draw_info->element_reference;
clone_info->clip_path=draw_info->clip_path;
clone_info->clip_units=draw_info->clip_units;
if (draw_info->clip_mask != (char *) NULL)
(void) CloneString(&clone_info->clip_mask,draw_info->clip_mask);
if (draw_info->clipping_mask != (Image *) NULL)
clone_info->clipping_mask=CloneImage(draw_info->clipping_mask,0,0,
MagickTrue,exception);
if (draw_info->composite_mask != (Image *) NULL)
clone_info->composite_mask=CloneImage(draw_info->composite_mask,0,0,
MagickTrue,exception);
clone_info->render=draw_info->render;
clone_info->debug=IsEventLogging();
exception=DestroyExceptionInfo(exception);
return(clone_info);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ C o n v e r t P a t h T o P o l y g o n %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% ConvertPathToPolygon() converts a path to the more efficient sorted
% rendering form.
%
% The format of the ConvertPathToPolygon method is:
%
% PolygonInfo *ConvertPathToPolygon(const PathInfo *path_info)
%
% A description of each parameter follows:
%
% o Method ConvertPathToPolygon returns the path in a more efficient sorted
% rendering form of type PolygonInfo.
%
% o draw_info: Specifies a pointer to an DrawInfo structure.
%
% o path_info: Specifies a pointer to an PathInfo structure.
%
%
*/
#if defined(__cplusplus) || defined(c_plusplus)
extern "C" {
#endif
static int DrawCompareEdges(const void *p_edge,const void *q_edge)
{
#define DrawCompareEdge(p,q) \
{ \
if (((p)-(q)) < 0.0) \
return(-1); \
if (((p)-(q)) > 0.0) \
return(1); \
}
register const PointInfo
*p,
*q;
/*
Edge sorting for right-handed coordinate system.
*/
p=((const EdgeInfo *) p_edge)->points;
q=((const EdgeInfo *) q_edge)->points;
DrawCompareEdge(p[0].y,q[0].y);
DrawCompareEdge(p[0].x,q[0].x);
DrawCompareEdge((p[1].x-p[0].x)*(q[1].y-q[0].y),(p[1].y-p[0].y)*
(q[1].x-q[0].x));
DrawCompareEdge(p[1].y,q[1].y);
DrawCompareEdge(p[1].x,q[1].x);
return(0);
}
#if defined(__cplusplus) || defined(c_plusplus)
}
#endif
static void LogPolygonInfo(const PolygonInfo *polygon_info)
{
register EdgeInfo
*p;
register ssize_t
i,
j;
(void) LogMagickEvent(DrawEvent,GetMagickModule()," begin active-edge");
p=polygon_info->edges;
for (i=0; i < (ssize_t) polygon_info->number_edges; i++)
{
(void) LogMagickEvent(DrawEvent,GetMagickModule()," edge %.20g:",
(double) i);
(void) LogMagickEvent(DrawEvent,GetMagickModule()," direction: %s",
p->direction != MagickFalse ? "down" : "up");
(void) LogMagickEvent(DrawEvent,GetMagickModule()," ghostline: %s",
p->ghostline != MagickFalse ? "transparent" : "opaque");
(void) LogMagickEvent(DrawEvent,GetMagickModule(),
" bounds: %g,%g - %g,%g",p->bounds.x1,p->bounds.y1,
p->bounds.x2,p->bounds.y2);
for (j=0; j < (ssize_t) p->number_points; j++)
(void) LogMagickEvent(DrawEvent,GetMagickModule()," %g,%g",
p->points[j].x,p->points[j].y);
p++;
}
(void) LogMagickEvent(DrawEvent,GetMagickModule()," end active-edge");
}
static void ReversePoints(PointInfo *points,const size_t number_points)
{
PointInfo
point;
register ssize_t
i;
for (i=0; i < (ssize_t) (number_points >> 1); i++)
{
point=points[i];
points[i]=points[number_points-(i+1)];
points[number_points-(i+1)]=point;
}
}
static PolygonInfo *ConvertPathToPolygon(const PathInfo *path_info)
{
long
direction,
next_direction;
PointInfo
point,
*points;
PolygonInfo
*polygon_info;
SegmentInfo
bounds;
register ssize_t
i,
n;
MagickBooleanType
ghostline;
size_t
edge,
number_edges,
number_points;
/*
Convert a path to the more efficient sorted rendering form.
*/
polygon_info=(PolygonInfo *) AcquireMagickMemory(sizeof(*polygon_info));
if (polygon_info == (PolygonInfo *) NULL)
return((PolygonInfo *) NULL);
number_edges=16;
polygon_info->edges=(EdgeInfo *) AcquireQuantumMemory(number_edges,
sizeof(*polygon_info->edges));
if (polygon_info->edges == (EdgeInfo *) NULL)
return((PolygonInfo *) NULL);
(void) memset(polygon_info->edges,0,number_edges*
sizeof(*polygon_info->edges));
direction=0;
edge=0;
ghostline=MagickFalse;
n=0;
number_points=0;
points=(PointInfo *) NULL;
(void) memset(&point,0,sizeof(point));
(void) memset(&bounds,0,sizeof(bounds));
polygon_info->edges[edge].number_points=(size_t) n;
polygon_info->edges[edge].scanline=0.0;
polygon_info->edges[edge].highwater=0;
polygon_info->edges[edge].ghostline=ghostline;
polygon_info->edges[edge].direction=(ssize_t) direction;
polygon_info->edges[edge].points=points;
polygon_info->edges[edge].bounds=bounds;
polygon_info->number_edges=0;
for (i=0; path_info[i].code != EndCode; i++)
{
if ((path_info[i].code == MoveToCode) || (path_info[i].code == OpenCode) ||
(path_info[i].code == GhostlineCode))
{
/*
Move to.
*/
if ((points != (PointInfo *) NULL) && (n >= 2))
{
if (edge == number_edges)
{
number_edges<<=1;
polygon_info->edges=(EdgeInfo *) ResizeQuantumMemory(
polygon_info->edges,(size_t) number_edges,
sizeof(*polygon_info->edges));
if (polygon_info->edges == (EdgeInfo *) NULL)
return((PolygonInfo *) NULL);
}
polygon_info->edges[edge].number_points=(size_t) n;
polygon_info->edges[edge].scanline=(-1.0);
polygon_info->edges[edge].highwater=0;
polygon_info->edges[edge].ghostline=ghostline;
polygon_info->edges[edge].direction=(ssize_t) (direction > 0);
if (direction < 0)
ReversePoints(points,(size_t) n);
polygon_info->edges[edge].points=points;
polygon_info->edges[edge].bounds=bounds;
polygon_info->edges[edge].bounds.y1=points[0].y;
polygon_info->edges[edge].bounds.y2=points[n-1].y;
points=(PointInfo *) NULL;
ghostline=MagickFalse;
edge++;
}
if (points == (PointInfo *) NULL)
{
number_points=16;
points=(PointInfo *) AcquireQuantumMemory((size_t) number_points,
sizeof(*points));
if (points == (PointInfo *) NULL)
return((PolygonInfo *) NULL);
}
ghostline=path_info[i].code == GhostlineCode ? MagickTrue : MagickFalse;
point=path_info[i].point;
points[0]=point;
bounds.x1=point.x;
bounds.x2=point.x;
direction=0;
n=1;
continue;
}
/*
Line to.
*/
next_direction=((path_info[i].point.y > point.y) ||
((fabs(path_info[i].point.y-point.y) < MagickEpsilon) &&
(path_info[i].point.x > point.x))) ? 1 : -1;
if ((points != (PointInfo *) NULL) && (direction != 0) &&
(direction != next_direction))
{
/*
New edge.
*/
point=points[n-1];
if (edge == number_edges)
{
number_edges<<=1;
polygon_info->edges=(EdgeInfo *) ResizeQuantumMemory(
polygon_info->edges,(size_t) number_edges,
sizeof(*polygon_info->edges));
if (polygon_info->edges == (EdgeInfo *) NULL)
return((PolygonInfo *) NULL);
}
polygon_info->edges[edge].number_points=(size_t) n;
polygon_info->edges[edge].scanline=(-1.0);
polygon_info->edges[edge].highwater=0;
polygon_info->edges[edge].ghostline=ghostline;
polygon_info->edges[edge].direction=(ssize_t) (direction > 0);
if (direction < 0)
ReversePoints(points,(size_t) n);
polygon_info->edges[edge].points=points;
polygon_info->edges[edge].bounds=bounds;
polygon_info->edges[edge].bounds.y1=points[0].y;
polygon_info->edges[edge].bounds.y2=points[n-1].y;
number_points=16;
points=(PointInfo *) AcquireQuantumMemory((size_t) number_points,
sizeof(*points));
if (points == (PointInfo *) NULL)
return((PolygonInfo *) NULL);
n=1;
ghostline=MagickFalse;
points[0]=point;
bounds.x1=point.x;
bounds.x2=point.x;
edge++;
}
direction=next_direction;
if (points == (PointInfo *) NULL)
continue;
if (n == (ssize_t) number_points)
{
number_points<<=1;
points=(PointInfo *) ResizeQuantumMemory(points,(size_t) number_points,
sizeof(*points));
if (points == (PointInfo *) NULL)
return((PolygonInfo *) NULL);
}
point=path_info[i].point;
points[n]=point;
if (point.x < bounds.x1)
bounds.x1=point.x;
if (point.x > bounds.x2)
bounds.x2=point.x;
n++;
}
if (points != (PointInfo *) NULL)
{
if (n < 2)
points=(PointInfo *) RelinquishMagickMemory(points);
else
{
if (edge == number_edges)
{
number_edges<<=1;
polygon_info->edges=(EdgeInfo *) ResizeQuantumMemory(
polygon_info->edges,(size_t) number_edges,
sizeof(*polygon_info->edges));
if (polygon_info->edges == (EdgeInfo *) NULL)
return((PolygonInfo *) NULL);
}
polygon_info->edges[edge].number_points=(size_t) n;
polygon_info->edges[edge].scanline=(-1.0);
polygon_info->edges[edge].highwater=0;
polygon_info->edges[edge].ghostline=ghostline;
polygon_info->edges[edge].direction=(ssize_t) (direction > 0);
if (direction < 0)
ReversePoints(points,(size_t) n);
polygon_info->edges[edge].points=points;
polygon_info->edges[edge].bounds=bounds;
polygon_info->edges[edge].bounds.y1=points[0].y;
polygon_info->edges[edge].bounds.y2=points[n-1].y;
ghostline=MagickFalse;
edge++;
}
}
polygon_info->number_edges=edge;
qsort(polygon_info->edges,(size_t) polygon_info->number_edges,
sizeof(*polygon_info->edges),DrawCompareEdges);
if (IsEventLogging() != MagickFalse)
LogPolygonInfo(polygon_info);
return(polygon_info);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ C o n v e r t P r i m i t i v e T o P a t h %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% ConvertPrimitiveToPath() converts a PrimitiveInfo structure into a vector
% path structure.
%
% The format of the ConvertPrimitiveToPath method is:
%
% PathInfo *ConvertPrimitiveToPath(const DrawInfo *draw_info,
% const PrimitiveInfo *primitive_info)
%
% A description of each parameter follows:
%
% o Method ConvertPrimitiveToPath returns a vector path structure of type
% PathInfo.
%
% o draw_info: a structure of type DrawInfo.
%
% o primitive_info: Specifies a pointer to an PrimitiveInfo structure.
%
%
*/
static void LogPathInfo(const PathInfo *path_info)
{
register const PathInfo
*p;
(void) LogMagickEvent(DrawEvent,GetMagickModule()," begin vector-path");
for (p=path_info; p->code != EndCode; p++)
(void) LogMagickEvent(DrawEvent,GetMagickModule(),
" %g,%g %s",p->point.x,p->point.y,p->code == GhostlineCode ?
"moveto ghostline" : p->code == OpenCode ? "moveto open" :
p->code == MoveToCode ? "moveto" : p->code == LineToCode ? "lineto" :
"?");
(void) LogMagickEvent(DrawEvent,GetMagickModule()," end vector-path");
}
static PathInfo *ConvertPrimitiveToPath(const PrimitiveInfo *primitive_info)
{
MagickBooleanType
closed_subpath;
PathInfo
*path_info;
PathInfoCode
code;
PointInfo
p,
q;
register ssize_t
i,
n;
ssize_t
coordinates,
start;
/*
Converts a PrimitiveInfo structure into a vector path structure.
*/
switch (primitive_info->primitive)
{
case AlphaPrimitive:
case ColorPrimitive:
case ImagePrimitive:
case PointPrimitive:
case TextPrimitive:
return((PathInfo *) NULL);
default:
break;
}
for (i=0; primitive_info[i].primitive != UndefinedPrimitive; i++) ;
path_info=(PathInfo *) AcquireQuantumMemory((size_t) (3UL*i+1UL),
sizeof(*path_info));
if (path_info == (PathInfo *) NULL)
return((PathInfo *) NULL);
coordinates=0;
closed_subpath=MagickFalse;
n=0;
p.x=(-1.0);
p.y=(-1.0);
q.x=(-1.0);
q.y=(-1.0);
start=0;
for (i=0; primitive_info[i].primitive != UndefinedPrimitive; i++)
{
code=LineToCode;
if (coordinates <= 0)
{
/*
New subpath.
*/
coordinates=(ssize_t) primitive_info[i].coordinates;
p=primitive_info[i].point;
start=n;
code=MoveToCode;
closed_subpath=primitive_info[i].closed_subpath;
}
coordinates--;
if ((code == MoveToCode) || (coordinates <= 0) ||
(fabs(q.x-primitive_info[i].point.x) >= MagickEpsilon) ||
(fabs(q.y-primitive_info[i].point.y) >= MagickEpsilon))
{
/*
Eliminate duplicate points.
*/
path_info[n].code=code;
path_info[n].point=primitive_info[i].point;
q=primitive_info[i].point;
n++;
}
if (coordinates > 0)
continue; /* next point in current subpath */
if (closed_subpath != MagickFalse)
{
closed_subpath=MagickFalse;
continue;
}
/*
Mark the p point as open if the subpath is not closed.
*/
path_info[start].code=OpenCode;
path_info[n].code=GhostlineCode;
path_info[n].point=primitive_info[i].point;
n++;
path_info[n].code=LineToCode;
path_info[n].point=p;
n++;
}
path_info[n].code=EndCode;
path_info[n].point.x=0.0;
path_info[n].point.y=0.0;
if (IsEventLogging() != MagickFalse)
LogPathInfo(path_info);
path_info=(PathInfo *) ResizeQuantumMemory(path_info,(size_t) (n+1),
sizeof(*path_info));
return(path_info);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% D e s t r o y D r a w I n f o %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% DestroyDrawInfo() deallocates memory associated with an DrawInfo structure.
%
% The format of the DestroyDrawInfo method is:
%
% DrawInfo *DestroyDrawInfo(DrawInfo *draw_info)
%
% A description of each parameter follows:
%
% o draw_info: the draw info.
%
*/
MagickExport DrawInfo *DestroyDrawInfo(DrawInfo *draw_info)
{
assert(draw_info != (DrawInfo *) NULL);
if (draw_info->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"...");
assert(draw_info->signature == MagickCoreSignature);
if (draw_info->id != (char *) NULL)
draw_info->id=DestroyString(draw_info->id);
if (draw_info->primitive != (char *) NULL)
draw_info->primitive=DestroyString(draw_info->primitive);
if (draw_info->text != (char *) NULL)
draw_info->text=DestroyString(draw_info->text);
if (draw_info->geometry != (char *) NULL)
draw_info->geometry=DestroyString(draw_info->geometry);
if (draw_info->fill_pattern != (Image *) NULL)
draw_info->fill_pattern=DestroyImage(draw_info->fill_pattern);
if (draw_info->stroke_pattern != (Image *) NULL)
draw_info->stroke_pattern=DestroyImage(draw_info->stroke_pattern);
if (draw_info->font != (char *) NULL)
draw_info->font=DestroyString(draw_info->font);
if (draw_info->metrics != (char *) NULL)
draw_info->metrics=DestroyString(draw_info->metrics);
if (draw_info->family != (char *) NULL)
draw_info->family=DestroyString(draw_info->family);
if (draw_info->encoding != (char *) NULL)
draw_info->encoding=DestroyString(draw_info->encoding);
if (draw_info->density != (char *) NULL)
draw_info->density=DestroyString(draw_info->density);
if (draw_info->server_name != (char *) NULL)
draw_info->server_name=(char *)
RelinquishMagickMemory(draw_info->server_name);
if (draw_info->dash_pattern != (double *) NULL)
draw_info->dash_pattern=(double *) RelinquishMagickMemory(
draw_info->dash_pattern);
if (draw_info->gradient.stops != (StopInfo *) NULL)
draw_info->gradient.stops=(StopInfo *) RelinquishMagickMemory(
draw_info->gradient.stops);
if (draw_info->clip_mask != (char *) NULL)
draw_info->clip_mask=DestroyString(draw_info->clip_mask);
if (draw_info->clipping_mask != (Image *) NULL)
draw_info->clipping_mask=DestroyImage(draw_info->clipping_mask);
if (draw_info->composite_mask != (Image *) NULL)
draw_info->composite_mask=DestroyImage(draw_info->composite_mask);
draw_info->signature=(~MagickCoreSignature);
draw_info=(DrawInfo *) RelinquishMagickMemory(draw_info);
return(draw_info);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ D e s t r o y E d g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% DestroyEdge() destroys the specified polygon edge.
%
% The format of the DestroyEdge method is:
%
% ssize_t DestroyEdge(PolygonInfo *polygon_info,const int edge)
%
% A description of each parameter follows:
%
% o polygon_info: Specifies a pointer to an PolygonInfo structure.
%
% o edge: the polygon edge number to destroy.
%
*/
static size_t DestroyEdge(PolygonInfo *polygon_info,
const size_t edge)
{
assert(edge < polygon_info->number_edges);
polygon_info->edges[edge].points=(PointInfo *) RelinquishMagickMemory(
polygon_info->edges[edge].points);
polygon_info->number_edges--;
if (edge < polygon_info->number_edges)
(void) memmove(polygon_info->edges+edge,polygon_info->edges+edge+1,
(size_t) (polygon_info->number_edges-edge)*sizeof(*polygon_info->edges));
return(polygon_info->number_edges);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ D e s t r o y P o l y g o n I n f o %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% DestroyPolygonInfo() destroys the PolygonInfo data structure.
%
% The format of the DestroyPolygonInfo method is:
%
% PolygonInfo *DestroyPolygonInfo(PolygonInfo *polygon_info)
%
% A description of each parameter follows:
%
% o polygon_info: Specifies a pointer to an PolygonInfo structure.
%
*/
static PolygonInfo *DestroyPolygonInfo(PolygonInfo *polygon_info)
{
register ssize_t
i;
if (polygon_info->edges != (EdgeInfo *) NULL)
{
for (i=0; i < (ssize_t) polygon_info->number_edges; i++)
polygon_info->edges[i].points=(PointInfo *)
RelinquishMagickMemory(polygon_info->edges[i].points);
polygon_info->edges=(EdgeInfo *) RelinquishMagickMemory(
polygon_info->edges);
}
return((PolygonInfo *) RelinquishMagickMemory(polygon_info));
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% D r a w A f f i n e I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% DrawAffineImage() composites the source over the destination image as
% dictated by the affine transform.
%
% The format of the DrawAffineImage method is:
%
% MagickBooleanType DrawAffineImage(Image *image,const Image *source,
% const AffineMatrix *affine,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o source: the source image.
%
% o affine: the affine transform.
%
% o exception: return any errors or warnings in this structure.
%
*/
static SegmentInfo AffineEdge(const Image *image,const AffineMatrix *affine,
const double y,const SegmentInfo *edge)
{
double
intercept,
z;
register double
x;
SegmentInfo
inverse_edge;
/*
Determine left and right edges.
*/
inverse_edge.x1=edge->x1;
inverse_edge.y1=edge->y1;
inverse_edge.x2=edge->x2;
inverse_edge.y2=edge->y2;
z=affine->ry*y+affine->tx;
if (affine->sx >= MagickEpsilon)
{
intercept=(-z/affine->sx);
x=intercept;
if (x > inverse_edge.x1)
inverse_edge.x1=x;
intercept=(-z+(double) image->columns)/affine->sx;
x=intercept;
if (x < inverse_edge.x2)
inverse_edge.x2=x;
}
else
if (affine->sx < -MagickEpsilon)
{
intercept=(-z+(double) image->columns)/affine->sx;
x=intercept;
if (x > inverse_edge.x1)
inverse_edge.x1=x;
intercept=(-z/affine->sx);
x=intercept;
if (x < inverse_edge.x2)
inverse_edge.x2=x;
}
else
if ((z < 0.0) || ((size_t) floor(z+0.5) >= image->columns))
{
inverse_edge.x2=edge->x1;
return(inverse_edge);
}
/*
Determine top and bottom edges.
*/
z=affine->sy*y+affine->ty;
if (affine->rx >= MagickEpsilon)
{
intercept=(-z/affine->rx);
x=intercept;
if (x > inverse_edge.x1)
inverse_edge.x1=x;
intercept=(-z+(double) image->rows)/affine->rx;
x=intercept;
if (x < inverse_edge.x2)
inverse_edge.x2=x;
}
else
if (affine->rx < -MagickEpsilon)
{
intercept=(-z+(double) image->rows)/affine->rx;
x=intercept;
if (x > inverse_edge.x1)
inverse_edge.x1=x;
intercept=(-z/affine->rx);
x=intercept;
if (x < inverse_edge.x2)
inverse_edge.x2=x;
}
else
if ((z < 0.0) || ((size_t) floor(z+0.5) >= image->rows))
{
inverse_edge.x2=edge->x2;
return(inverse_edge);
}
return(inverse_edge);
}
static AffineMatrix InverseAffineMatrix(const AffineMatrix *affine)
{
AffineMatrix
inverse_affine;
double
determinant;
determinant=PerceptibleReciprocal(affine->sx*affine->sy-affine->rx*
affine->ry);
inverse_affine.sx=determinant*affine->sy;
inverse_affine.rx=determinant*(-affine->rx);
inverse_affine.ry=determinant*(-affine->ry);
inverse_affine.sy=determinant*affine->sx;
inverse_affine.tx=(-affine->tx)*inverse_affine.sx-affine->ty*
inverse_affine.ry;
inverse_affine.ty=(-affine->tx)*inverse_affine.rx-affine->ty*
inverse_affine.sy;
return(inverse_affine);
}
MagickExport MagickBooleanType DrawAffineImage(Image *image,
const Image *source,const AffineMatrix *affine,ExceptionInfo *exception)
{
AffineMatrix
inverse_affine;
CacheView
*image_view,
*source_view;
MagickBooleanType
status;
PixelInfo
zero;
PointInfo
extent[4],
min,
max;
register ssize_t
i;
SegmentInfo
edge;
ssize_t
start,
stop,
y;
/*
Determine bounding box.
*/
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(source != (const Image *) NULL);
assert(source->signature == MagickCoreSignature);
assert(affine != (AffineMatrix *) NULL);
extent[0].x=0.0;
extent[0].y=0.0;
extent[1].x=(double) source->columns-1.0;
extent[1].y=0.0;
extent[2].x=(double) source->columns-1.0;
extent[2].y=(double) source->rows-1.0;
extent[3].x=0.0;
extent[3].y=(double) source->rows-1.0;
for (i=0; i < 4; i++)
{
PointInfo
point;
point=extent[i];
extent[i].x=point.x*affine->sx+point.y*affine->ry+affine->tx;
extent[i].y=point.x*affine->rx+point.y*affine->sy+affine->ty;
}
min=extent[0];
max=extent[0];
for (i=1; i < 4; i++)
{
if (min.x > extent[i].x)
min.x=extent[i].x;
if (min.y > extent[i].y)
min.y=extent[i].y;
if (max.x < extent[i].x)
max.x=extent[i].x;
if (max.y < extent[i].y)
max.y=extent[i].y;
}
/*
Affine transform image.
*/
if (SetImageStorageClass(image,DirectClass,exception) == MagickFalse)
return(MagickFalse);
status=MagickTrue;
edge.x1=MagickMax(min.x,0.0);
edge.y1=MagickMax(min.y,0.0);
edge.x2=MagickMin(max.x,(double) image->columns-1.0);
edge.y2=MagickMin(max.y,(double) image->rows-1.0);
inverse_affine=InverseAffineMatrix(affine);
GetPixelInfo(image,&zero);
start=(ssize_t) ceil(edge.y1-0.5);
stop=(ssize_t) floor(edge.y2+0.5);
source_view=AcquireVirtualCacheView(source,exception);
image_view=AcquireAuthenticCacheView(image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(status) \
magick_number_threads(source,image,stop-start,1)
#endif
for (y=start; y <= stop; y++)
{
PixelInfo
composite,
pixel;
PointInfo
point;
register ssize_t
x;
register Quantum
*magick_restrict q;
SegmentInfo
inverse_edge;
ssize_t
x_offset;
inverse_edge=AffineEdge(source,&inverse_affine,(double) y,&edge);
if (inverse_edge.x2 < inverse_edge.x1)
continue;
q=GetCacheViewAuthenticPixels(image_view,(ssize_t) ceil(inverse_edge.x1-
0.5),y,(size_t) (floor(inverse_edge.x2+0.5)-ceil(inverse_edge.x1-0.5)+1),
1,exception);
if (q == (Quantum *) NULL)
continue;
pixel=zero;
composite=zero;
x_offset=0;
for (x=(ssize_t) ceil(inverse_edge.x1-0.5); x <= (ssize_t) floor(inverse_edge.x2+0.5); x++)
{
point.x=(double) x*inverse_affine.sx+y*inverse_affine.ry+
inverse_affine.tx;
point.y=(double) x*inverse_affine.rx+y*inverse_affine.sy+
inverse_affine.ty;
status=InterpolatePixelInfo(source,source_view,UndefinedInterpolatePixel,
point.x,point.y,&pixel,exception);
if (status == MagickFalse)
break;
GetPixelInfoPixel(image,q,&composite);
CompositePixelInfoOver(&pixel,pixel.alpha,&composite,composite.alpha,
&composite);
SetPixelViaPixelInfo(image,&composite,q);
x_offset++;
q+=GetPixelChannels(image);
}
if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse)
status=MagickFalse;
}
source_view=DestroyCacheView(source_view);
image_view=DestroyCacheView(image_view);
return(status);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ D r a w B o u n d i n g R e c t a n g l e s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% DrawBoundingRectangles() draws the bounding rectangles on the image. This
% is only useful for developers debugging the rendering algorithm.
%
% The format of the DrawBoundingRectangles method is:
%
% MagickBooleanType DrawBoundingRectangles(Image *image,
% const DrawInfo *draw_info,PolygonInfo *polygon_info,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o draw_info: the draw info.
%
% o polygon_info: Specifies a pointer to a PolygonInfo structure.
%
% o exception: return any errors or warnings in this structure.
%
*/
static MagickBooleanType DrawBoundingRectangles(Image *image,
const DrawInfo *draw_info,const PolygonInfo *polygon_info,
ExceptionInfo *exception)
{
double
mid;
DrawInfo
*clone_info;
MagickStatusType
status;
PointInfo
end,
resolution,
start;
PrimitiveInfo
primitive_info[6];
register ssize_t
i;
SegmentInfo
bounds;
ssize_t
coordinates;
(void) memset(primitive_info,0,sizeof(primitive_info));
clone_info=CloneDrawInfo((ImageInfo *) NULL,draw_info);
status=QueryColorCompliance("#000F",AllCompliance,&clone_info->fill,
exception);
if (status == MagickFalse)
{
clone_info=DestroyDrawInfo(clone_info);
return(MagickFalse);
}
resolution.x=96.0;
resolution.y=96.0;
if (clone_info->density != (char *) NULL)
{
GeometryInfo
geometry_info;
MagickStatusType
flags;
flags=ParseGeometry(clone_info->density,&geometry_info);
resolution.x=geometry_info.rho;
resolution.y=geometry_info.sigma;
if ((flags & SigmaValue) == MagickFalse)
resolution.y=resolution.x;
}
mid=(resolution.x/96.0)*ExpandAffine(&clone_info->affine)*
clone_info->stroke_width/2.0;
bounds.x1=0.0;
bounds.y1=0.0;
bounds.x2=0.0;
bounds.y2=0.0;
if (polygon_info != (PolygonInfo *) NULL)
{
bounds=polygon_info->edges[0].bounds;
for (i=1; i < (ssize_t) polygon_info->number_edges; i++)
{
if (polygon_info->edges[i].bounds.x1 < (double) bounds.x1)
bounds.x1=polygon_info->edges[i].bounds.x1;
if (polygon_info->edges[i].bounds.y1 < (double) bounds.y1)
bounds.y1=polygon_info->edges[i].bounds.y1;
if (polygon_info->edges[i].bounds.x2 > (double) bounds.x2)
bounds.x2=polygon_info->edges[i].bounds.x2;
if (polygon_info->edges[i].bounds.y2 > (double) bounds.y2)
bounds.y2=polygon_info->edges[i].bounds.y2;
}
bounds.x1-=mid;
bounds.x1=bounds.x1 < 0.0 ? 0.0 : bounds.x1 >= (double)
image->columns ? (double) image->columns-1 : bounds.x1;
bounds.y1-=mid;
bounds.y1=bounds.y1 < 0.0 ? 0.0 : bounds.y1 >= (double)
image->rows ? (double) image->rows-1 : bounds.y1;
bounds.x2+=mid;
bounds.x2=bounds.x2 < 0.0 ? 0.0 : bounds.x2 >= (double)
image->columns ? (double) image->columns-1 : bounds.x2;
bounds.y2+=mid;
bounds.y2=bounds.y2 < 0.0 ? 0.0 : bounds.y2 >= (double)
image->rows ? (double) image->rows-1 : bounds.y2;
for (i=0; i < (ssize_t) polygon_info->number_edges; i++)
{
if (polygon_info->edges[i].direction != 0)
status=QueryColorCompliance("#f00",AllCompliance,&clone_info->stroke,
exception);
else
status=QueryColorCompliance("#0f0",AllCompliance,&clone_info->stroke,
exception);
if (status == MagickFalse)
break;
start.x=(double) (polygon_info->edges[i].bounds.x1-mid);
start.y=(double) (polygon_info->edges[i].bounds.y1-mid);
end.x=(double) (polygon_info->edges[i].bounds.x2+mid);
end.y=(double) (polygon_info->edges[i].bounds.y2+mid);
primitive_info[0].primitive=RectanglePrimitive;
status&=TraceRectangle(primitive_info,start,end);
primitive_info[0].method=ReplaceMethod;
coordinates=(ssize_t) primitive_info[0].coordinates;
primitive_info[coordinates].primitive=UndefinedPrimitive;
status=DrawPrimitive(image,clone_info,primitive_info,exception);
if (status == MagickFalse)
break;
}
if (i < (ssize_t) polygon_info->number_edges)
{
clone_info=DestroyDrawInfo(clone_info);
return(status == 0 ? MagickFalse : MagickTrue);
}
}
status=QueryColorCompliance("#00f",AllCompliance,&clone_info->stroke,
exception);
if (status == MagickFalse)
{
clone_info=DestroyDrawInfo(clone_info);
return(MagickFalse);
}
start.x=(double) (bounds.x1-mid);
start.y=(double) (bounds.y1-mid);
end.x=(double) (bounds.x2+mid);
end.y=(double) (bounds.y2+mid);
primitive_info[0].primitive=RectanglePrimitive;
status&=TraceRectangle(primitive_info,start,end);
primitive_info[0].method=ReplaceMethod;
coordinates=(ssize_t) primitive_info[0].coordinates;
primitive_info[coordinates].primitive=UndefinedPrimitive;
status=DrawPrimitive(image,clone_info,primitive_info,exception);
clone_info=DestroyDrawInfo(clone_info);
return(status == 0 ? MagickFalse : MagickTrue);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% D r a w C l i p P a t h %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% DrawClipPath() draws the clip path on the image mask.
%
% The format of the DrawClipPath method is:
%
% MagickBooleanType DrawClipPath(Image *image,const DrawInfo *draw_info,
% const char *id,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o draw_info: the draw info.
%
% o id: the clip path id.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport MagickBooleanType DrawClipPath(Image *image,
const DrawInfo *draw_info,const char *id,ExceptionInfo *exception)
{
const char
*clip_path;
Image
*clipping_mask;
MagickBooleanType
status;
clip_path=GetImageArtifact(image,id);
if (clip_path == (const char *) NULL)
return(MagickFalse);
clipping_mask=DrawClippingMask(image,draw_info,draw_info->clip_mask,clip_path,
exception);
if (clipping_mask == (Image *) NULL)
return(MagickFalse);
status=SetImageMask(image,WritePixelMask,clipping_mask,exception);
clipping_mask=DestroyImage(clipping_mask);
return(status);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% D r a w C l i p p i n g M a s k %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% DrawClippingMask() draws the clip path and returns it as an image clipping
% mask.
%
% The format of the DrawClippingMask method is:
%
% Image *DrawClippingMask(Image *image,const DrawInfo *draw_info,
% const char *id,const char *clip_path,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o draw_info: the draw info.
%
% o id: the clip path id.
%
% o clip_path: the clip path.
%
% o exception: return any errors or warnings in this structure.
%
*/
static Image *DrawClippingMask(Image *image,const DrawInfo *draw_info,
const char *id,const char *clip_path,ExceptionInfo *exception)
{
DrawInfo
*clone_info;
Image
*clip_mask,
*separate_mask;
MagickStatusType
status;
/*
Draw a clip path.
*/
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(draw_info != (const DrawInfo *) NULL);
clip_mask=AcquireImage((const ImageInfo *) NULL,exception);
status=SetImageExtent(clip_mask,image->columns,image->rows,exception);
if (status == MagickFalse)
return(DestroyImage(clip_mask));
status=SetImageMask(clip_mask,WritePixelMask,(Image *) NULL,exception);
status=QueryColorCompliance("#0000",AllCompliance,
&clip_mask->background_color,exception);
clip_mask->background_color.alpha=(MagickRealType) TransparentAlpha;
clip_mask->background_color.alpha_trait=BlendPixelTrait;
status=SetImageBackgroundColor(clip_mask,exception);
if (image->debug != MagickFalse)
(void) LogMagickEvent(DrawEvent,GetMagickModule(),"\nbegin clip-path %s",
id);
clone_info=CloneDrawInfo((ImageInfo *) NULL,draw_info);
(void) CloneString(&clone_info->primitive,clip_path);
status=QueryColorCompliance("#ffffff",AllCompliance,&clone_info->fill,
exception);
if (clone_info->clip_mask != (char *) NULL)
clone_info->clip_mask=DestroyString(clone_info->clip_mask);
status=QueryColorCompliance("#00000000",AllCompliance,&clone_info->stroke,
exception);
clone_info->stroke_width=0.0;
clone_info->alpha=OpaqueAlpha;
clone_info->clip_path=MagickTrue;
status=RenderMVGContent(clip_mask,clone_info,0,exception);
clone_info=DestroyDrawInfo(clone_info);
separate_mask=SeparateImage(clip_mask,AlphaChannel,exception);
if (separate_mask != (Image *) NULL)
{
clip_mask=DestroyImage(clip_mask);
clip_mask=separate_mask;
status=NegateImage(clip_mask,MagickFalse,exception);
if (status == MagickFalse)
clip_mask=DestroyImage(clip_mask);
}
if (status == MagickFalse)
clip_mask=DestroyImage(clip_mask);
if (image->debug != MagickFalse)
(void) LogMagickEvent(DrawEvent,GetMagickModule(),"end clip-path");
return(clip_mask);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% D r a w C o m p o s i t e M a s k %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% DrawCompositeMask() draws the mask path and returns it as an image mask.
%
% The format of the DrawCompositeMask method is:
%
% Image *DrawCompositeMask(Image *image,const DrawInfo *draw_info,
% const char *id,const char *mask_path,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o draw_info: the draw info.
%
% o id: the mask path id.
%
% o mask_path: the mask path.
%
% o exception: return any errors or warnings in this structure.
%
*/
static Image *DrawCompositeMask(Image *image,const DrawInfo *draw_info,
const char *id,const char *mask_path,ExceptionInfo *exception)
{
Image
*composite_mask,
*separate_mask;
DrawInfo
*clone_info;
MagickStatusType
status;
/*
Draw a mask path.
*/
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(draw_info != (const DrawInfo *) NULL);
composite_mask=AcquireImage((const ImageInfo *) NULL,exception);
status=SetImageExtent(composite_mask,image->columns,image->rows,exception);
if (status == MagickFalse)
return(DestroyImage(composite_mask));
status=SetImageMask(composite_mask,CompositePixelMask,(Image *) NULL,
exception);
status=QueryColorCompliance("#0000",AllCompliance,
&composite_mask->background_color,exception);
composite_mask->background_color.alpha=(MagickRealType) TransparentAlpha;
composite_mask->background_color.alpha_trait=BlendPixelTrait;
(void) SetImageBackgroundColor(composite_mask,exception);
if (image->debug != MagickFalse)
(void) LogMagickEvent(DrawEvent,GetMagickModule(),"\nbegin mask-path %s",
id);
clone_info=CloneDrawInfo((ImageInfo *) NULL,draw_info);
(void) CloneString(&clone_info->primitive,mask_path);
status=QueryColorCompliance("#ffffff",AllCompliance,&clone_info->fill,
exception);
status=QueryColorCompliance("#00000000",AllCompliance,&clone_info->stroke,
exception);
clone_info->stroke_width=0.0;
clone_info->alpha=OpaqueAlpha;
status=RenderMVGContent(composite_mask,clone_info,0,exception);
clone_info=DestroyDrawInfo(clone_info);
separate_mask=SeparateImage(composite_mask,AlphaChannel,exception);
if (separate_mask != (Image *) NULL)
{
composite_mask=DestroyImage(composite_mask);
composite_mask=separate_mask;
status=NegateImage(composite_mask,MagickFalse,exception);
if (status == MagickFalse)
composite_mask=DestroyImage(composite_mask);
}
if (status == MagickFalse)
composite_mask=DestroyImage(composite_mask);
if (image->debug != MagickFalse)
(void) LogMagickEvent(DrawEvent,GetMagickModule(),"end mask-path");
return(composite_mask);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ D r a w D a s h P o l y g o n %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% DrawDashPolygon() draws a dashed polygon (line, rectangle, ellipse) on the
% image while respecting the dash offset and dash pattern attributes.
%
% The format of the DrawDashPolygon method is:
%
% MagickBooleanType DrawDashPolygon(const DrawInfo *draw_info,
% const PrimitiveInfo *primitive_info,Image *image,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o draw_info: the draw info.
%
% o primitive_info: Specifies a pointer to a PrimitiveInfo structure.
%
% o image: the image.
%
% o exception: return any errors or warnings in this structure.
%
*/
static MagickBooleanType DrawDashPolygon(const DrawInfo *draw_info,
const PrimitiveInfo *primitive_info,Image *image,ExceptionInfo *exception)
{
double
length,
maximum_length,
offset,
scale,
total_length;
DrawInfo
*clone_info;
MagickStatusType
status;
PrimitiveInfo
*dash_polygon;
register double
dx,
dy;
register ssize_t
i;
size_t
number_vertices;
ssize_t
j,
n;
assert(draw_info != (const DrawInfo *) NULL);
if (image->debug != MagickFalse)
(void) LogMagickEvent(DrawEvent,GetMagickModule()," begin draw-dash");
for (i=0; primitive_info[i].primitive != UndefinedPrimitive; i++) ;
number_vertices=(size_t) i;
dash_polygon=(PrimitiveInfo *) AcquireQuantumMemory((size_t)
(2UL*number_vertices+32UL),sizeof(*dash_polygon));
if (dash_polygon == (PrimitiveInfo *) NULL)
return(MagickFalse);
(void) memset(dash_polygon,0,(2UL*number_vertices+32UL)*
sizeof(*dash_polygon));
clone_info=CloneDrawInfo((ImageInfo *) NULL,draw_info);
clone_info->miterlimit=0;
dash_polygon[0]=primitive_info[0];
scale=ExpandAffine(&draw_info->affine);
length=scale*draw_info->dash_pattern[0];
offset=fabs(draw_info->dash_offset) >= MagickEpsilon ?
scale*draw_info->dash_offset : 0.0;
j=1;
for (n=0; offset > 0.0; j=0)
{
if (draw_info->dash_pattern[n] <= 0.0)
break;
length=scale*(draw_info->dash_pattern[n]+(n == 0 ? -0.5 : 0.5));
if (offset > length)
{
offset-=length;
n++;
length=scale*draw_info->dash_pattern[n];
continue;
}
if (offset < length)
{
length-=offset;
offset=0.0;
break;
}
offset=0.0;
n++;
}
status=MagickTrue;
maximum_length=0.0;
total_length=0.0;
for (i=1; (i < (ssize_t) number_vertices) && (length >= 0.0); i++)
{
dx=primitive_info[i].point.x-primitive_info[i-1].point.x;
dy=primitive_info[i].point.y-primitive_info[i-1].point.y;
maximum_length=hypot(dx,dy);
if (maximum_length > (MaxBezierCoordinates >> 2))
break;
if (fabs(length) < MagickEpsilon)
{
if (fabs(draw_info->dash_pattern[n]) >= MagickEpsilon)
n++;
if (fabs(draw_info->dash_pattern[n]) < MagickEpsilon)
n=0;
length=scale*draw_info->dash_pattern[n];
}
for (total_length=0.0; (length >= 0.0) && (maximum_length >= (total_length+length)); )
{
total_length+=length;
if ((n & 0x01) != 0)
{
dash_polygon[0]=primitive_info[0];
dash_polygon[0].point.x=(double) (primitive_info[i-1].point.x+dx*
total_length*PerceptibleReciprocal(maximum_length));
dash_polygon[0].point.y=(double) (primitive_info[i-1].point.y+dy*
total_length*PerceptibleReciprocal(maximum_length));
j=1;
}
else
{
if ((j+1) > (ssize_t) number_vertices)
break;
dash_polygon[j]=primitive_info[i-1];
dash_polygon[j].point.x=(double) (primitive_info[i-1].point.x+dx*
total_length*PerceptibleReciprocal(maximum_length));
dash_polygon[j].point.y=(double) (primitive_info[i-1].point.y+dy*
total_length*PerceptibleReciprocal(maximum_length));
dash_polygon[j].coordinates=1;
j++;
dash_polygon[0].coordinates=(size_t) j;
dash_polygon[j].primitive=UndefinedPrimitive;
status&=DrawStrokePolygon(image,clone_info,dash_polygon,exception);
if (status == MagickFalse)
break;
}
if (fabs(draw_info->dash_pattern[n]) >= MagickEpsilon)
n++;
if (fabs(draw_info->dash_pattern[n]) < MagickEpsilon)
n=0;
length=scale*draw_info->dash_pattern[n];
}
length-=(maximum_length-total_length);
if ((n & 0x01) != 0)
continue;
dash_polygon[j]=primitive_info[i];
dash_polygon[j].coordinates=1;
j++;
}
if ((status != MagickFalse) && (total_length < maximum_length) &&
((n & 0x01) == 0) && (j > 1))
{
dash_polygon[j]=primitive_info[i-1];
dash_polygon[j].point.x+=MagickEpsilon;
dash_polygon[j].point.y+=MagickEpsilon;
dash_polygon[j].coordinates=1;
j++;
dash_polygon[0].coordinates=(size_t) j;
dash_polygon[j].primitive=UndefinedPrimitive;
status&=DrawStrokePolygon(image,clone_info,dash_polygon,exception);
}
dash_polygon=(PrimitiveInfo *) RelinquishMagickMemory(dash_polygon);
clone_info=DestroyDrawInfo(clone_info);
if (image->debug != MagickFalse)
(void) LogMagickEvent(DrawEvent,GetMagickModule()," end draw-dash");
return(status != 0 ? MagickTrue : MagickFalse);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% D r a w G r a d i e n t I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% DrawGradientImage() draws a linear gradient on the image.
%
% The format of the DrawGradientImage method is:
%
% MagickBooleanType DrawGradientImage(Image *image,
% const DrawInfo *draw_info,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o draw_info: the draw info.
%
% o exception: return any errors or warnings in this structure.
%
*/
static inline double GetStopColorOffset(const GradientInfo *gradient,
const ssize_t x,const ssize_t y)
{
switch (gradient->type)
{
case UndefinedGradient:
case LinearGradient:
{
double
gamma,
length,
offset,
scale;
PointInfo
p,
q;
const SegmentInfo
*gradient_vector;
gradient_vector=(&gradient->gradient_vector);
p.x=gradient_vector->x2-gradient_vector->x1;
p.y=gradient_vector->y2-gradient_vector->y1;
q.x=(double) x-gradient_vector->x1;
q.y=(double) y-gradient_vector->y1;
length=sqrt(q.x*q.x+q.y*q.y);
gamma=sqrt(p.x*p.x+p.y*p.y)*length;
gamma=PerceptibleReciprocal(gamma);
scale=p.x*q.x+p.y*q.y;
offset=gamma*scale*length;
return(offset);
}
case RadialGradient:
{
PointInfo
v;
if (gradient->spread == RepeatSpread)
{
v.x=(double) x-gradient->center.x;
v.y=(double) y-gradient->center.y;
return(sqrt(v.x*v.x+v.y*v.y));
}
v.x=(double) (((x-gradient->center.x)*cos(DegreesToRadians(
gradient->angle)))+((y-gradient->center.y)*sin(DegreesToRadians(
gradient->angle))))*PerceptibleReciprocal(gradient->radii.x);
v.y=(double) (((x-gradient->center.x)*sin(DegreesToRadians(
gradient->angle)))-((y-gradient->center.y)*cos(DegreesToRadians(
gradient->angle))))*PerceptibleReciprocal(gradient->radii.y);
return(sqrt(v.x*v.x+v.y*v.y));
}
}
return(0.0);
}
static int StopInfoCompare(const void *x,const void *y)
{
StopInfo
*stop_1,
*stop_2;
stop_1=(StopInfo *) x;
stop_2=(StopInfo *) y;
if (stop_1->offset > stop_2->offset)
return(1);
if (fabs(stop_1->offset-stop_2->offset) <= MagickEpsilon)
return(0);
return(-1);
}
MagickExport MagickBooleanType DrawGradientImage(Image *image,
const DrawInfo *draw_info,ExceptionInfo *exception)
{
CacheView
*image_view;
const GradientInfo
*gradient;
const SegmentInfo
*gradient_vector;
double
length;
MagickBooleanType
status;
PixelInfo
zero;
PointInfo
point;
RectangleInfo
bounding_box;
ssize_t
y;
/*
Draw linear or radial gradient on image.
*/
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(draw_info != (const DrawInfo *) NULL);
gradient=(&draw_info->gradient);
qsort(gradient->stops,gradient->number_stops,sizeof(StopInfo),
StopInfoCompare);
gradient_vector=(&gradient->gradient_vector);
point.x=gradient_vector->x2-gradient_vector->x1;
point.y=gradient_vector->y2-gradient_vector->y1;
length=sqrt(point.x*point.x+point.y*point.y);
bounding_box=gradient->bounding_box;
status=MagickTrue;
GetPixelInfo(image,&zero);
image_view=AcquireAuthenticCacheView(image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(status) \
magick_number_threads(image,image,bounding_box.height-bounding_box.y,1)
#endif
for (y=bounding_box.y; y < (ssize_t) bounding_box.height; y++)
{
double
alpha,
offset;
PixelInfo
composite,
pixel;
register Quantum
*magick_restrict q;
register ssize_t
i,
x;
ssize_t
j;
if (status == MagickFalse)
continue;
q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception);
if (q == (Quantum *) NULL)
{
status=MagickFalse;
continue;
}
pixel=zero;
composite=zero;
offset=GetStopColorOffset(gradient,0,y);
if (gradient->type != RadialGradient)
offset*=PerceptibleReciprocal(length);
for (x=bounding_box.x; x < (ssize_t) bounding_box.width; x++)
{
GetPixelInfoPixel(image,q,&pixel);
switch (gradient->spread)
{
case UndefinedSpread:
case PadSpread:
{
if ((x != (ssize_t) ceil(gradient_vector->x1-0.5)) ||
(y != (ssize_t) ceil(gradient_vector->y1-0.5)))
{
offset=GetStopColorOffset(gradient,x,y);
if (gradient->type != RadialGradient)
offset*=PerceptibleReciprocal(length);
}
for (i=0; i < (ssize_t) gradient->number_stops; i++)
if (offset < gradient->stops[i].offset)
break;
if ((offset < 0.0) || (i == 0))
composite=gradient->stops[0].color;
else
if ((offset > 1.0) || (i == (ssize_t) gradient->number_stops))
composite=gradient->stops[gradient->number_stops-1].color;
else
{
j=i;
i--;
alpha=(offset-gradient->stops[i].offset)/
(gradient->stops[j].offset-gradient->stops[i].offset);
CompositePixelInfoBlend(&gradient->stops[i].color,1.0-alpha,
&gradient->stops[j].color,alpha,&composite);
}
break;
}
case ReflectSpread:
{
if ((x != (ssize_t) ceil(gradient_vector->x1-0.5)) ||
(y != (ssize_t) ceil(gradient_vector->y1-0.5)))
{
offset=GetStopColorOffset(gradient,x,y);
if (gradient->type != RadialGradient)
offset*=PerceptibleReciprocal(length);
}
if (offset < 0.0)
offset=(-offset);
if ((ssize_t) fmod(offset,2.0) == 0)
offset=fmod(offset,1.0);
else
offset=1.0-fmod(offset,1.0);
for (i=0; i < (ssize_t) gradient->number_stops; i++)
if (offset < gradient->stops[i].offset)
break;
if (i == 0)
composite=gradient->stops[0].color;
else
if (i == (ssize_t) gradient->number_stops)
composite=gradient->stops[gradient->number_stops-1].color;
else
{
j=i;
i--;
alpha=(offset-gradient->stops[i].offset)/
(gradient->stops[j].offset-gradient->stops[i].offset);
CompositePixelInfoBlend(&gradient->stops[i].color,1.0-alpha,
&gradient->stops[j].color,alpha,&composite);
}
break;
}
case RepeatSpread:
{
double
repeat;
MagickBooleanType
antialias;
antialias=MagickFalse;
repeat=0.0;
if ((x != (ssize_t) ceil(gradient_vector->x1-0.5)) ||
(y != (ssize_t) ceil(gradient_vector->y1-0.5)))
{
offset=GetStopColorOffset(gradient,x,y);
if (gradient->type == LinearGradient)
{
repeat=fmod(offset,length);
if (repeat < 0.0)
repeat=length-fmod(-repeat,length);
else
repeat=fmod(offset,length);
antialias=(repeat < length) && ((repeat+1.0) > length) ?
MagickTrue : MagickFalse;
offset=PerceptibleReciprocal(length)*repeat;
}
else
{
repeat=fmod(offset,gradient->radius);
if (repeat < 0.0)
repeat=gradient->radius-fmod(-repeat,gradient->radius);
else
repeat=fmod(offset,gradient->radius);
antialias=repeat+1.0 > gradient->radius ? MagickTrue :
MagickFalse;
offset=repeat/gradient->radius;
}
}
for (i=0; i < (ssize_t) gradient->number_stops; i++)
if (offset < gradient->stops[i].offset)
break;
if (i == 0)
composite=gradient->stops[0].color;
else
if (i == (ssize_t) gradient->number_stops)
composite=gradient->stops[gradient->number_stops-1].color;
else
{
j=i;
i--;
alpha=(offset-gradient->stops[i].offset)/
(gradient->stops[j].offset-gradient->stops[i].offset);
if (antialias != MagickFalse)
{
if (gradient->type == LinearGradient)
alpha=length-repeat;
else
alpha=gradient->radius-repeat;
i=0;
j=(ssize_t) gradient->number_stops-1L;
}
CompositePixelInfoBlend(&gradient->stops[i].color,1.0-alpha,
&gradient->stops[j].color,alpha,&composite);
}
break;
}
}
CompositePixelInfoOver(&composite,composite.alpha,&pixel,pixel.alpha,
&pixel);
SetPixelViaPixelInfo(image,&pixel,q);
q+=GetPixelChannels(image);
}
if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse)
status=MagickFalse;
}
image_view=DestroyCacheView(image_view);
return(status);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% D r a w I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% DrawImage() draws a graphic primitive on your image. The primitive
% may be represented as a string or filename. Precede the filename with an
% "at" sign (@) and the contents of the file are drawn on the image. You
% can affect how text is drawn by setting one or more members of the draw
% info structure.
%
% The format of the DrawImage method is:
%
% MagickBooleanType DrawImage(Image *image,const DrawInfo *draw_info,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o draw_info: the draw info.
%
% o exception: return any errors or warnings in this structure.
%
*/
static MagickBooleanType CheckPrimitiveExtent(MVGInfo *mvg_info,
const size_t pad)
{
double
extent;
size_t
quantum;
/*
Check if there is enough storage for drawing pimitives.
*/
extent=(double) mvg_info->offset+pad+PrimitiveExtentPad;
quantum=sizeof(**mvg_info->primitive_info);
if (((extent*quantum) < (double) SSIZE_MAX) &&
((extent*quantum) < (double) GetMaxMemoryRequest()))
{
if (extent <= (double) *mvg_info->extent)
return(MagickTrue);
*mvg_info->primitive_info=(PrimitiveInfo *) ResizeQuantumMemory(
*mvg_info->primitive_info,(size_t) extent,quantum);
if (*mvg_info->primitive_info != (PrimitiveInfo *) NULL)
{
register ssize_t
i;
*mvg_info->extent=(size_t) extent;
for (i=mvg_info->offset+1; i < (ssize_t) extent; i++)
(*mvg_info->primitive_info)[i].primitive=UndefinedPrimitive;
return(MagickTrue);
}
}
/*
Reallocation failed, allocate a primitive to facilitate unwinding.
*/
(void) ThrowMagickException(mvg_info->exception,GetMagickModule(),
ResourceLimitError,"MemoryAllocationFailed","`%s'","");
if (*mvg_info->primitive_info != (PrimitiveInfo *) NULL)
*mvg_info->primitive_info=(PrimitiveInfo *) RelinquishMagickMemory(
*mvg_info->primitive_info);
*mvg_info->primitive_info=(PrimitiveInfo *) AcquireCriticalMemory(
PrimitiveExtentPad*quantum);
(void) memset(*mvg_info->primitive_info,0,PrimitiveExtentPad*quantum);
*mvg_info->extent=1;
return(MagickFalse);
}
static inline double GetDrawValue(const char *magick_restrict string,
char **magick_restrict sentinal)
{
char
**magick_restrict q;
double
value;
q=sentinal;
value=InterpretLocaleValue(string,q);
if ((IsNaN(value) != 0) || (value < -(SSIZE_MAX-512.0)) ||
(value > (SSIZE_MAX-512.0)))
return(0.0);
sentinal=q;
return(value);
}
static int MVGMacroCompare(const void *target,const void *source)
{
const char
*p,
*q;
p=(const char *) target;
q=(const char *) source;
return(strcmp(p,q));
}
static SplayTreeInfo *GetMVGMacros(const char *primitive)
{
char
*macro,
*token;
const char
*q;
size_t
extent;
SplayTreeInfo
*macros;
/*
Scan graphic primitives for definitions and classes.
*/
if (primitive == (const char *) NULL)
return((SplayTreeInfo *) NULL);
macros=NewSplayTree(MVGMacroCompare,RelinquishMagickMemory,
RelinquishMagickMemory);
macro=AcquireString(primitive);
token=AcquireString(primitive);
extent=strlen(token)+MagickPathExtent;
for (q=primitive; *q != '\0'; )
{
if (GetNextToken(q,&q,extent,token) < 1)
break;
if (*token == '\0')
break;
if (LocaleCompare("push",token) == 0)
{
register const char
*end,
*start;
(void) GetNextToken(q,&q,extent,token);
if (*q == '"')
{
char
name[MagickPathExtent];
const char
*p;
ssize_t
n;
/*
Named macro (e.g. push graphic-context "wheel").
*/
(void) GetNextToken(q,&q,extent,token);
start=q;
end=q;
(void) CopyMagickString(name,token,MagickPathExtent);
n=1;
for (p=q; *p != '\0'; )
{
if (GetNextToken(p,&p,extent,token) < 1)
break;
if (*token == '\0')
break;
if (LocaleCompare(token,"pop") == 0)
{
end=p-strlen(token)-1;
n--;
}
if (LocaleCompare(token,"push") == 0)
n++;
if ((n == 0) && (end > start))
{
/*
Extract macro.
*/
(void) GetNextToken(p,&p,extent,token);
(void) CopyMagickString(macro,start,(size_t) (end-start));
(void) AddValueToSplayTree(macros,ConstantString(name),
ConstantString(macro));
break;
}
}
}
}
}
token=DestroyString(token);
macro=DestroyString(macro);
return(macros);
}
static inline MagickBooleanType IsPoint(const char *point)
{
char
*p;
double
value;
value=GetDrawValue(point,&p);
return((fabs(value) < MagickEpsilon) && (p == point) ? MagickFalse :
MagickTrue);
}
static inline MagickBooleanType TracePoint(PrimitiveInfo *primitive_info,
const PointInfo point)
{
primitive_info->coordinates=1;
primitive_info->closed_subpath=MagickFalse;
primitive_info->point=point;
return(MagickTrue);
}
static MagickBooleanType RenderMVGContent(Image *image,
const DrawInfo *draw_info,const size_t depth,ExceptionInfo *exception)
{
#define RenderImageTag "Render/Image"
AffineMatrix
affine,
current;
char
keyword[MagickPathExtent],
geometry[MagickPathExtent],
*next_token,
pattern[MagickPathExtent],
*primitive,
*token;
const char
*q;
double
angle,
coordinates,
cursor,
factor,
primitive_extent;
DrawInfo
*clone_info,
**graphic_context;
MagickBooleanType
proceed;
MagickStatusType
status;
MVGInfo
mvg_info;
PointInfo
point;
PrimitiveInfo
*primitive_info;
PrimitiveType
primitive_type;
register const char
*p;
register ssize_t
i,
x;
SegmentInfo
bounds;
size_t
extent,
number_points,
number_stops;
SplayTreeInfo
*macros;
ssize_t
defsDepth,
j,
k,
n,
symbolDepth;
StopInfo
*stops;
TypeMetric
metrics;
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(draw_info != (DrawInfo *) NULL);
assert(draw_info->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"...");
if (depth > MagickMaxRecursionDepth)
ThrowBinaryException(DrawError,"VectorGraphicsNestedTooDeeply",
image->filename);
if ((draw_info->primitive == (char *) NULL) ||
(*draw_info->primitive == '\0'))
return(MagickFalse);
if (image->debug != MagickFalse)
(void) LogMagickEvent(DrawEvent,GetMagickModule(),"begin draw-image");
if (SetImageStorageClass(image,DirectClass,exception) == MagickFalse)
return(MagickFalse);
if (image->alpha_trait == UndefinedPixelTrait)
{
status=SetImageAlphaChannel(image,OpaqueAlphaChannel,exception);
if (status == MagickFalse)
return(MagickFalse);
}
if ((*draw_info->primitive == '@') && (strlen(draw_info->primitive) > 1) &&
(*(draw_info->primitive+1) != '-') && (depth == 0))
primitive=FileToString(draw_info->primitive+1,~0UL,exception);
else
primitive=AcquireString(draw_info->primitive);
if (primitive == (char *) NULL)
return(MagickFalse);
primitive_extent=(double) strlen(primitive);
(void) SetImageArtifact(image,"mvg:vector-graphics",primitive);
n=0;
number_stops=0;
stops=(StopInfo *) NULL;
/*
Allocate primitive info memory.
*/
graphic_context=(DrawInfo **) AcquireMagickMemory(sizeof(*graphic_context));
if (graphic_context == (DrawInfo **) NULL)
{
primitive=DestroyString(primitive);
ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed",
image->filename);
}
number_points=PrimitiveExtentPad;
primitive_info=(PrimitiveInfo *) AcquireQuantumMemory((size_t) number_points,
sizeof(*primitive_info));
if (primitive_info == (PrimitiveInfo *) NULL)
{
primitive=DestroyString(primitive);
for ( ; n >= 0; n--)
graphic_context[n]=DestroyDrawInfo(graphic_context[n]);
graphic_context=(DrawInfo **) RelinquishMagickMemory(graphic_context);
ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed",
image->filename);
}
(void) memset(primitive_info,0,(size_t) number_points*
sizeof(*primitive_info));
(void) memset(&mvg_info,0,sizeof(mvg_info));
mvg_info.primitive_info=(&primitive_info);
mvg_info.extent=(&number_points);
mvg_info.exception=exception;
graphic_context[n]=CloneDrawInfo((ImageInfo *) NULL,draw_info);
graphic_context[n]->viewbox=image->page;
if ((image->page.width == 0) || (image->page.height == 0))
{
graphic_context[n]->viewbox.width=image->columns;
graphic_context[n]->viewbox.height=image->rows;
}
token=AcquireString(primitive);
extent=strlen(token)+MagickPathExtent;
defsDepth=0;
symbolDepth=0;
cursor=0.0;
macros=GetMVGMacros(primitive);
status=MagickTrue;
for (q=primitive; *q != '\0'; )
{
/*
Interpret graphic primitive.
*/
if (GetNextToken(q,&q,MagickPathExtent,keyword) < 1)
break;
if (*keyword == '\0')
break;
if (*keyword == '#')
{
/*
Comment.
*/
while ((*q != '\n') && (*q != '\0'))
q++;
continue;
}
p=q-strlen(keyword)-1;
primitive_type=UndefinedPrimitive;
current=graphic_context[n]->affine;
GetAffineMatrix(&affine);
*token='\0';
switch (*keyword)
{
case ';':
break;
case 'a':
case 'A':
{
if (LocaleCompare("affine",keyword) == 0)
{
(void) GetNextToken(q,&q,extent,token);
affine.sx=GetDrawValue(token,&next_token);
if (token == next_token)
ThrowPointExpectedException(token,exception);
(void) GetNextToken(q,&q,extent,token);
if (*token == ',')
(void) GetNextToken(q,&q,extent,token);
affine.rx=GetDrawValue(token,&next_token);
if (token == next_token)
ThrowPointExpectedException(token,exception);
(void) GetNextToken(q,&q,extent,token);
if (*token == ',')
(void) GetNextToken(q,&q,extent,token);
affine.ry=GetDrawValue(token,&next_token);
if (token == next_token)
ThrowPointExpectedException(token,exception);
(void) GetNextToken(q,&q,extent,token);
if (*token == ',')
(void) GetNextToken(q,&q,extent,token);
affine.sy=GetDrawValue(token,&next_token);
if (token == next_token)
ThrowPointExpectedException(token,exception);
(void) GetNextToken(q,&q,extent,token);
if (*token == ',')
(void) GetNextToken(q,&q,extent,token);
affine.tx=GetDrawValue(token,&next_token);
if (token == next_token)
ThrowPointExpectedException(token,exception);
(void) GetNextToken(q,&q,extent,token);
if (*token == ',')
(void) GetNextToken(q,&q,extent,token);
affine.ty=GetDrawValue(token,&next_token);
if (token == next_token)
ThrowPointExpectedException(token,exception);
break;
}
if (LocaleCompare("alpha",keyword) == 0)
{
primitive_type=AlphaPrimitive;
break;
}
if (LocaleCompare("arc",keyword) == 0)
{
primitive_type=ArcPrimitive;
break;
}
status=MagickFalse;
break;
}
case 'b':
case 'B':
{
if (LocaleCompare("bezier",keyword) == 0)
{
primitive_type=BezierPrimitive;
break;
}
if (LocaleCompare("border-color",keyword) == 0)
{
(void) GetNextToken(q,&q,extent,token);
status&=QueryColorCompliance(token,AllCompliance,
&graphic_context[n]->border_color,exception);
break;
}
status=MagickFalse;
break;
}
case 'c':
case 'C':
{
if (LocaleCompare("class",keyword) == 0)
{
const char
*mvg_class;
(void) GetNextToken(q,&q,extent,token);
if (*token == '\0')
{
status=MagickFalse;
break;
}
if (LocaleCompare(token,graphic_context[n]->id) == 0)
break;
mvg_class=(const char *) GetValueFromSplayTree(macros,token);
if ((mvg_class != (const char *) NULL) && (p > primitive))
{
char
*elements;
ssize_t
offset;
/*
Inject class elements in stream.
*/
offset=(ssize_t) (p-primitive);
elements=AcquireString(primitive);
elements[offset]='\0';
(void) ConcatenateString(&elements,mvg_class);
(void) ConcatenateString(&elements,"\n");
(void) ConcatenateString(&elements,q);
primitive=DestroyString(primitive);
primitive=elements;
q=primitive+offset;
}
break;
}
if (LocaleCompare("clip-path",keyword) == 0)
{
const char
*clip_path;
/*
Take a node from within the MVG document, and duplicate it here.
*/
(void) GetNextToken(q,&q,extent,token);
if (*token == '\0')
{
status=MagickFalse;
break;
}
(void) CloneString(&graphic_context[n]->clip_mask,token);
clip_path=(const char *) GetValueFromSplayTree(macros,token);
if (clip_path != (const char *) NULL)
{
if (graphic_context[n]->clipping_mask != (Image *) NULL)
graphic_context[n]->clipping_mask=
DestroyImage(graphic_context[n]->clipping_mask);
graphic_context[n]->clipping_mask=DrawClippingMask(image,
graphic_context[n],token,clip_path,exception);
if (graphic_context[n]->compliance != SVGCompliance)
{
clip_path=(const char *) GetValueFromSplayTree(macros,
graphic_context[n]->clip_mask);
if (clip_path != (const char *) NULL)
(void) SetImageArtifact(image,
graphic_context[n]->clip_mask,clip_path);
status&=DrawClipPath(image,graphic_context[n],
graphic_context[n]->clip_mask,exception);
}
}
break;
}
if (LocaleCompare("clip-rule",keyword) == 0)
{
ssize_t
fill_rule;
(void) GetNextToken(q,&q,extent,token);
fill_rule=ParseCommandOption(MagickFillRuleOptions,MagickFalse,
token);
if (fill_rule == -1)
{
status=MagickFalse;
break;
}
graphic_context[n]->fill_rule=(FillRule) fill_rule;
break;
}
if (LocaleCompare("clip-units",keyword) == 0)
{
ssize_t
clip_units;
(void) GetNextToken(q,&q,extent,token);
clip_units=ParseCommandOption(MagickClipPathOptions,MagickFalse,
token);
if (clip_units == -1)
{
status=MagickFalse;
break;
}
graphic_context[n]->clip_units=(ClipPathUnits) clip_units;
if (clip_units == ObjectBoundingBox)
{
GetAffineMatrix(¤t);
affine.sx=draw_info->bounds.x2;
affine.sy=draw_info->bounds.y2;
affine.tx=draw_info->bounds.x1;
affine.ty=draw_info->bounds.y1;
break;
}
break;
}
if (LocaleCompare("circle",keyword) == 0)
{
primitive_type=CirclePrimitive;
break;
}
if (LocaleCompare("color",keyword) == 0)
{
primitive_type=ColorPrimitive;
break;
}
if (LocaleCompare("compliance",keyword) == 0)
{
/*
MVG compliance associates a clipping mask with an image; SVG
compliance associates a clipping mask with a graphics context.
*/
(void) GetNextToken(q,&q,extent,token);
graphic_context[n]->compliance=(ComplianceType) ParseCommandOption(
MagickComplianceOptions,MagickFalse,token);
break;
}
status=MagickFalse;
break;
}
case 'd':
case 'D':
{
if (LocaleCompare("decorate",keyword) == 0)
{
ssize_t
decorate;
(void) GetNextToken(q,&q,extent,token);
decorate=ParseCommandOption(MagickDecorateOptions,MagickFalse,
token);
if (decorate == -1)
{
status=MagickFalse;
break;
}
graphic_context[n]->decorate=(DecorationType) decorate;
break;
}
if (LocaleCompare("density",keyword) == 0)
{
(void) GetNextToken(q,&q,extent,token);
(void) CloneString(&graphic_context[n]->density,token);
break;
}
if (LocaleCompare("direction",keyword) == 0)
{
ssize_t
direction;
(void) GetNextToken(q,&q,extent,token);
direction=ParseCommandOption(MagickDirectionOptions,MagickFalse,
token);
if (direction == -1)
status=MagickFalse;
else
graphic_context[n]->direction=(DirectionType) direction;
break;
}
status=MagickFalse;
break;
}
case 'e':
case 'E':
{
if (LocaleCompare("ellipse",keyword) == 0)
{
primitive_type=EllipsePrimitive;
break;
}
if (LocaleCompare("encoding",keyword) == 0)
{
(void) GetNextToken(q,&q,extent,token);
(void) CloneString(&graphic_context[n]->encoding,token);
break;
}
status=MagickFalse;
break;
}
case 'f':
case 'F':
{
if (LocaleCompare("fill",keyword) == 0)
{
(void) GetNextToken(q,&q,extent,token);
if (graphic_context[n]->clip_path != MagickFalse)
break;
(void) FormatLocaleString(pattern,MagickPathExtent,"%s",token);
if (GetImageArtifact(image,pattern) != (const char *) NULL)
(void) DrawPatternPath(image,draw_info,token,
&graphic_context[n]->fill_pattern,exception);
else
{
status&=QueryColorCompliance(token,AllCompliance,
&graphic_context[n]->fill,exception);
if (graphic_context[n]->fill_alpha != OpaqueAlpha)
graphic_context[n]->fill.alpha=graphic_context[n]->fill_alpha;
}
break;
}
if (LocaleCompare("fill-opacity",keyword) == 0)
{
double
opacity;
(void) GetNextToken(q,&q,extent,token);
if (graphic_context[n]->clip_path != MagickFalse)
break;
factor=strchr(token,'%') != (char *) NULL ? 0.01 : 1.0;
opacity=MagickMin(MagickMax(factor*
GetDrawValue(token,&next_token),0.0),1.0);
if (token == next_token)
ThrowPointExpectedException(token,exception);
if (graphic_context[n]->compliance == SVGCompliance)
graphic_context[n]->fill_alpha*=opacity;
else
graphic_context[n]->fill_alpha=QuantumRange*opacity;
if (graphic_context[n]->fill.alpha != TransparentAlpha)
graphic_context[n]->fill.alpha=graphic_context[n]->fill_alpha;
else
graphic_context[n]->fill.alpha=(MagickRealType)
ClampToQuantum(QuantumRange*(1.0-opacity));
break;
}
if (LocaleCompare("fill-rule",keyword) == 0)
{
ssize_t
fill_rule;
(void) GetNextToken(q,&q,extent,token);
fill_rule=ParseCommandOption(MagickFillRuleOptions,MagickFalse,
token);
if (fill_rule == -1)
{
status=MagickFalse;
break;
}
graphic_context[n]->fill_rule=(FillRule) fill_rule;
break;
}
if (LocaleCompare("font",keyword) == 0)
{
(void) GetNextToken(q,&q,extent,token);
(void) CloneString(&graphic_context[n]->font,token);
if (LocaleCompare("none",token) == 0)
graphic_context[n]->font=(char *) RelinquishMagickMemory(
graphic_context[n]->font);
break;
}
if (LocaleCompare("font-family",keyword) == 0)
{
(void) GetNextToken(q,&q,extent,token);
(void) CloneString(&graphic_context[n]->family,token);
break;
}
if (LocaleCompare("font-size",keyword) == 0)
{
(void) GetNextToken(q,&q,extent,token);
graphic_context[n]->pointsize=GetDrawValue(token,&next_token);
if (token == next_token)
ThrowPointExpectedException(token,exception);
break;
}
if (LocaleCompare("font-stretch",keyword) == 0)
{
ssize_t
stretch;
(void) GetNextToken(q,&q,extent,token);
stretch=ParseCommandOption(MagickStretchOptions,MagickFalse,token);
if (stretch == -1)
{
status=MagickFalse;
break;
}
graphic_context[n]->stretch=(StretchType) stretch;
break;
}
if (LocaleCompare("font-style",keyword) == 0)
{
ssize_t
style;
(void) GetNextToken(q,&q,extent,token);
style=ParseCommandOption(MagickStyleOptions,MagickFalse,token);
if (style == -1)
{
status=MagickFalse;
break;
}
graphic_context[n]->style=(StyleType) style;
break;
}
if (LocaleCompare("font-weight",keyword) == 0)
{
ssize_t
weight;
(void) GetNextToken(q,&q,extent,token);
weight=ParseCommandOption(MagickWeightOptions,MagickFalse,token);
if (weight == -1)
weight=(ssize_t) StringToUnsignedLong(token);
graphic_context[n]->weight=(size_t) weight;
break;
}
status=MagickFalse;
break;
}
case 'g':
case 'G':
{
if (LocaleCompare("gradient-units",keyword) == 0)
{
(void) GetNextToken(q,&q,extent,token);
break;
}
if (LocaleCompare("gravity",keyword) == 0)
{
ssize_t
gravity;
(void) GetNextToken(q,&q,extent,token);
gravity=ParseCommandOption(MagickGravityOptions,MagickFalse,token);
if (gravity == -1)
{
status=MagickFalse;
break;
}
graphic_context[n]->gravity=(GravityType) gravity;
break;
}
status=MagickFalse;
break;
}
case 'i':
case 'I':
{
if (LocaleCompare("image",keyword) == 0)
{
ssize_t
compose;
primitive_type=ImagePrimitive;
(void) GetNextToken(q,&q,extent,token);
compose=ParseCommandOption(MagickComposeOptions,MagickFalse,token);
if (compose == -1)
{
status=MagickFalse;
break;
}
graphic_context[n]->compose=(CompositeOperator) compose;
break;
}
if (LocaleCompare("interline-spacing",keyword) == 0)
{
(void) GetNextToken(q,&q,extent,token);
graphic_context[n]->interline_spacing=GetDrawValue(token,
&next_token);
if (token == next_token)
ThrowPointExpectedException(token,exception);
break;
}
if (LocaleCompare("interword-spacing",keyword) == 0)
{
(void) GetNextToken(q,&q,extent,token);
graphic_context[n]->interword_spacing=GetDrawValue(token,
&next_token);
if (token == next_token)
ThrowPointExpectedException(token,exception);
break;
}
status=MagickFalse;
break;
}
case 'k':
case 'K':
{
if (LocaleCompare("kerning",keyword) == 0)
{
(void) GetNextToken(q,&q,extent,token);
graphic_context[n]->kerning=GetDrawValue(token,&next_token);
if (token == next_token)
ThrowPointExpectedException(token,exception);
break;
}
status=MagickFalse;
break;
}
case 'l':
case 'L':
{
if (LocaleCompare("letter-spacing",keyword) == 0)
{
(void) GetNextToken(q,&q,extent,token);
if (IsPoint(token) == MagickFalse)
break;
clone_info=CloneDrawInfo((ImageInfo *) NULL,graphic_context[n]);
clone_info->text=AcquireString(" ");
status&=GetTypeMetrics(image,clone_info,&metrics,exception);
graphic_context[n]->kerning=metrics.width*
GetDrawValue(token,&next_token);
clone_info=DestroyDrawInfo(clone_info);
if (token == next_token)
ThrowPointExpectedException(token,exception);
break;
}
if (LocaleCompare("line",keyword) == 0)
{
primitive_type=LinePrimitive;
break;
}
status=MagickFalse;
break;
}
case 'm':
case 'M':
{
if (LocaleCompare("mask",keyword) == 0)
{
const char
*mask_path;
/*
Take a node from within the MVG document, and duplicate it here.
*/
(void) GetNextToken(q,&q,extent,token);
mask_path=(const char *) GetValueFromSplayTree(macros,token);
if (mask_path != (const char *) NULL)
{
if (graphic_context[n]->composite_mask != (Image *) NULL)
graphic_context[n]->composite_mask=
DestroyImage(graphic_context[n]->composite_mask);
graphic_context[n]->composite_mask=DrawCompositeMask(image,
graphic_context[n],token,mask_path,exception);
if (graphic_context[n]->compliance != SVGCompliance)
status=SetImageMask(image,CompositePixelMask,
graphic_context[n]->composite_mask,exception);
}
break;
}
status=MagickFalse;
break;
}
case 'o':
case 'O':
{
if (LocaleCompare("offset",keyword) == 0)
{
(void) GetNextToken(q,&q,extent,token);
break;
}
if (LocaleCompare("opacity",keyword) == 0)
{
double
opacity;
(void) GetNextToken(q,&q,extent,token);
if (graphic_context[n]->clip_path != MagickFalse)
break;
factor=strchr(token,'%') != (char *) NULL ? 0.01 : 1.0;
opacity=MagickMin(MagickMax(factor*
GetDrawValue(token,&next_token),0.0),1.0);
if (token == next_token)
ThrowPointExpectedException(token,exception);
if (graphic_context[n]->compliance == SVGCompliance)
{
graphic_context[n]->fill_alpha*=opacity;
graphic_context[n]->stroke_alpha*=opacity;
}
else
{
graphic_context[n]->fill_alpha=QuantumRange*opacity;
graphic_context[n]->stroke_alpha=QuantumRange*opacity;
}
break;
}
status=MagickFalse;
break;
}
case 'p':
case 'P':
{
if (LocaleCompare("path",keyword) == 0)
{
primitive_type=PathPrimitive;
break;
}
if (LocaleCompare("point",keyword) == 0)
{
primitive_type=PointPrimitive;
break;
}
if (LocaleCompare("polyline",keyword) == 0)
{
primitive_type=PolylinePrimitive;
break;
}
if (LocaleCompare("polygon",keyword) == 0)
{
primitive_type=PolygonPrimitive;
break;
}
if (LocaleCompare("pop",keyword) == 0)
{
if (GetNextToken(q,&q,extent,token) < 1)
break;
if (LocaleCompare("class",token) == 0)
break;
if (LocaleCompare("clip-path",token) == 0)
break;
if (LocaleCompare("defs",token) == 0)
{
defsDepth--;
graphic_context[n]->render=defsDepth > 0 ? MagickFalse :
MagickTrue;
break;
}
if (LocaleCompare("gradient",token) == 0)
break;
if (LocaleCompare("graphic-context",token) == 0)
{
if (n <= 0)
{
(void) ThrowMagickException(exception,GetMagickModule(),
DrawError,"UnbalancedGraphicContextPushPop","`%s'",token);
status=MagickFalse;
n=0;
break;
}
if ((graphic_context[n]->clip_mask != (char *) NULL) &&
(graphic_context[n]->compliance != SVGCompliance))
if (LocaleCompare(graphic_context[n]->clip_mask,
graphic_context[n-1]->clip_mask) != 0)
status=SetImageMask(image,WritePixelMask,(Image *) NULL,
exception);
graphic_context[n]=DestroyDrawInfo(graphic_context[n]);
n--;
break;
}
if (LocaleCompare("mask",token) == 0)
break;
if (LocaleCompare("pattern",token) == 0)
break;
if (LocaleCompare("symbol",token) == 0)
{
symbolDepth--;
graphic_context[n]->render=symbolDepth > 0 ? MagickFalse :
MagickTrue;
break;
}
status=MagickFalse;
break;
}
if (LocaleCompare("push",keyword) == 0)
{
if (GetNextToken(q,&q,extent,token) < 1)
break;
if (LocaleCompare("class",token) == 0)
{
/*
Class context.
*/
for (p=q; *q != '\0'; )
{
if (GetNextToken(q,&q,extent,token) < 1)
break;
if (LocaleCompare(token,"pop") != 0)
continue;
(void) GetNextToken(q,(const char **) NULL,extent,token);
if (LocaleCompare(token,"class") != 0)
continue;
break;
}
(void) GetNextToken(q,&q,extent,token);
break;
}
if (LocaleCompare("clip-path",token) == 0)
{
(void) GetNextToken(q,&q,extent,token);
for (p=q; *q != '\0'; )
{
if (GetNextToken(q,&q,extent,token) < 1)
break;
if (LocaleCompare(token,"pop") != 0)
continue;
(void) GetNextToken(q,(const char **) NULL,extent,token);
if (LocaleCompare(token,"clip-path") != 0)
continue;
break;
}
if ((q == (char *) NULL) || (p == (char *) NULL) || ((q-4) < p))
{
status=MagickFalse;
break;
}
(void) GetNextToken(q,&q,extent,token);
break;
}
if (LocaleCompare("defs",token) == 0)
{
defsDepth++;
graphic_context[n]->render=defsDepth > 0 ? MagickFalse :
MagickTrue;
break;
}
if (LocaleCompare("gradient",token) == 0)
{
char
key[2*MagickPathExtent],
name[MagickPathExtent],
type[MagickPathExtent];
SegmentInfo
segment;
(void) GetNextToken(q,&q,extent,token);
(void) CopyMagickString(name,token,MagickPathExtent);
(void) GetNextToken(q,&q,extent,token);
(void) CopyMagickString(type,token,MagickPathExtent);
(void) GetNextToken(q,&q,extent,token);
segment.x1=GetDrawValue(token,&next_token);
if (token == next_token)
ThrowPointExpectedException(token,exception);
(void) GetNextToken(q,&q,extent,token);
if (*token == ',')
(void) GetNextToken(q,&q,extent,token);
segment.y1=GetDrawValue(token,&next_token);
if (token == next_token)
ThrowPointExpectedException(token,exception);
(void) GetNextToken(q,&q,extent,token);
if (*token == ',')
(void) GetNextToken(q,&q,extent,token);
segment.x2=GetDrawValue(token,&next_token);
if (token == next_token)
ThrowPointExpectedException(token,exception);
(void) GetNextToken(q,&q,extent,token);
if (*token == ',')
(void) GetNextToken(q,&q,extent,token);
segment.y2=GetDrawValue(token,&next_token);
if (token == next_token)
ThrowPointExpectedException(token,exception);
if (LocaleCompare(type,"radial") == 0)
{
(void) GetNextToken(q,&q,extent,token);
if (*token == ',')
(void) GetNextToken(q,&q,extent,token);
}
for (p=q; *q != '\0'; )
{
if (GetNextToken(q,&q,extent,token) < 1)
break;
if (LocaleCompare(token,"pop") != 0)
continue;
(void) GetNextToken(q,(const char **) NULL,extent,token);
if (LocaleCompare(token,"gradient") != 0)
continue;
break;
}
if ((q == (char *) NULL) || (p == (char *) NULL) || ((q-4) < p))
{
status=MagickFalse;
break;
}
(void) CopyMagickString(token,p,(size_t) (q-p-4+1));
bounds.x1=graphic_context[n]->affine.sx*segment.x1+
graphic_context[n]->affine.ry*segment.y1+
graphic_context[n]->affine.tx;
bounds.y1=graphic_context[n]->affine.rx*segment.x1+
graphic_context[n]->affine.sy*segment.y1+
graphic_context[n]->affine.ty;
bounds.x2=graphic_context[n]->affine.sx*segment.x2+
graphic_context[n]->affine.ry*segment.y2+
graphic_context[n]->affine.tx;
bounds.y2=graphic_context[n]->affine.rx*segment.x2+
graphic_context[n]->affine.sy*segment.y2+
graphic_context[n]->affine.ty;
(void) FormatLocaleString(key,MagickPathExtent,"%s",name);
(void) SetImageArtifact(image,key,token);
(void) FormatLocaleString(key,MagickPathExtent,"%s-type",name);
(void) SetImageArtifact(image,key,type);
(void) FormatLocaleString(key,MagickPathExtent,"%s-geometry",
name);
(void) FormatLocaleString(geometry,MagickPathExtent,
"%gx%g%+.15g%+.15g",
MagickMax(fabs(bounds.x2-bounds.x1+1.0),1.0),
MagickMax(fabs(bounds.y2-bounds.y1+1.0),1.0),
bounds.x1,bounds.y1);
(void) SetImageArtifact(image,key,geometry);
(void) GetNextToken(q,&q,extent,token);
break;
}
if (LocaleCompare("graphic-context",token) == 0)
{
n++;
graphic_context=(DrawInfo **) ResizeQuantumMemory(
graphic_context,(size_t) (n+1),sizeof(*graphic_context));
if (graphic_context == (DrawInfo **) NULL)
{
(void) ThrowMagickException(exception,GetMagickModule(),
ResourceLimitError,"MemoryAllocationFailed","`%s'",
image->filename);
break;
}
graphic_context[n]=CloneDrawInfo((ImageInfo *) NULL,
graphic_context[n-1]);
if (*q == '"')
{
(void) GetNextToken(q,&q,extent,token);
(void) CloneString(&graphic_context[n]->id,token);
}
break;
}
if (LocaleCompare("mask",token) == 0)
{
(void) GetNextToken(q,&q,extent,token);
break;
}
if (LocaleCompare("pattern",token) == 0)
{
char
key[2*MagickPathExtent],
name[MagickPathExtent];
RectangleInfo
bounds;
(void) GetNextToken(q,&q,extent,token);
(void) CopyMagickString(name,token,MagickPathExtent);
(void) GetNextToken(q,&q,extent,token);
bounds.x=(ssize_t) ceil(GetDrawValue(token,&next_token)-0.5);
if (token == next_token)
ThrowPointExpectedException(token,exception);
(void) GetNextToken(q,&q,extent,token);
if (*token == ',')
(void) GetNextToken(q,&q,extent,token);
bounds.y=(ssize_t) ceil(GetDrawValue(token,&next_token)-0.5);
if (token == next_token)
ThrowPointExpectedException(token,exception);
(void) GetNextToken(q,&q,extent,token);
if (*token == ',')
(void) GetNextToken(q,&q,extent,token);
bounds.width=(size_t) floor(GetDrawValue(token,&next_token)+
0.5);
if (token == next_token)
ThrowPointExpectedException(token,exception);
(void) GetNextToken(q,&q,extent,token);
if (*token == ',')
(void) GetNextToken(q,&q,extent,token);
bounds.height=(size_t) floor(GetDrawValue(token,&next_token)+
0.5);
if (token == next_token)
ThrowPointExpectedException(token,exception);
for (p=q; *q != '\0'; )
{
if (GetNextToken(q,&q,extent,token) < 1)
break;
if (LocaleCompare(token,"pop") != 0)
continue;
(void) GetNextToken(q,(const char **) NULL,extent,token);
if (LocaleCompare(token,"pattern") != 0)
continue;
break;
}
if ((q == (char *) NULL) || (p == (char *) NULL) || ((q-4) < p))
{
status=MagickFalse;
break;
}
(void) CopyMagickString(token,p,(size_t) (q-p-4+1));
(void) FormatLocaleString(key,MagickPathExtent,"%s",name);
(void) SetImageArtifact(image,key,token);
(void) FormatLocaleString(key,MagickPathExtent,"%s-geometry",
name);
(void) FormatLocaleString(geometry,MagickPathExtent,
"%.20gx%.20g%+.20g%+.20g",(double) bounds.width,(double)
bounds.height,(double) bounds.x,(double) bounds.y);
(void) SetImageArtifact(image,key,geometry);
(void) GetNextToken(q,&q,extent,token);
break;
}
if (LocaleCompare("symbol",token) == 0)
{
symbolDepth++;
graphic_context[n]->render=symbolDepth > 0 ? MagickFalse :
MagickTrue;
break;
}
status=MagickFalse;
break;
}
status=MagickFalse;
break;
}
case 'r':
case 'R':
{
if (LocaleCompare("rectangle",keyword) == 0)
{
primitive_type=RectanglePrimitive;
break;
}
if (LocaleCompare("rotate",keyword) == 0)
{
(void) GetNextToken(q,&q,extent,token);
angle=GetDrawValue(token,&next_token);
if (token == next_token)
ThrowPointExpectedException(token,exception);
affine.sx=cos(DegreesToRadians(fmod((double) angle,360.0)));
affine.rx=sin(DegreesToRadians(fmod((double) angle,360.0)));
affine.ry=(-sin(DegreesToRadians(fmod((double) angle,360.0))));
affine.sy=cos(DegreesToRadians(fmod((double) angle,360.0)));
break;
}
if (LocaleCompare("roundRectangle",keyword) == 0)
{
primitive_type=RoundRectanglePrimitive;
break;
}
status=MagickFalse;
break;
}
case 's':
case 'S':
{
if (LocaleCompare("scale",keyword) == 0)
{
(void) GetNextToken(q,&q,extent,token);
affine.sx=GetDrawValue(token,&next_token);
if (token == next_token)
ThrowPointExpectedException(token,exception);
(void) GetNextToken(q,&q,extent,token);
if (*token == ',')
(void) GetNextToken(q,&q,extent,token);
affine.sy=GetDrawValue(token,&next_token);
if (token == next_token)
ThrowPointExpectedException(token,exception);
break;
}
if (LocaleCompare("skewX",keyword) == 0)
{
(void) GetNextToken(q,&q,extent,token);
angle=GetDrawValue(token,&next_token);
if (token == next_token)
ThrowPointExpectedException(token,exception);
affine.ry=sin(DegreesToRadians(angle));
break;
}
if (LocaleCompare("skewY",keyword) == 0)
{
(void) GetNextToken(q,&q,extent,token);
angle=GetDrawValue(token,&next_token);
if (token == next_token)
ThrowPointExpectedException(token,exception);
affine.rx=(-tan(DegreesToRadians(angle)/2.0));
break;
}
if (LocaleCompare("stop-color",keyword) == 0)
{
PixelInfo
stop_color;
number_stops++;
if (number_stops == 1)
stops=(StopInfo *) AcquireQuantumMemory(2,sizeof(*stops));
else
if (number_stops > 2)
stops=(StopInfo *) ResizeQuantumMemory(stops,number_stops,
sizeof(*stops));
if (stops == (StopInfo *) NULL)
{
(void) ThrowMagickException(exception,GetMagickModule(),
ResourceLimitError,"MemoryAllocationFailed","`%s'",
image->filename);
break;
}
(void) GetNextToken(q,&q,extent,token);
status&=QueryColorCompliance(token,AllCompliance,&stop_color,
exception);
stops[number_stops-1].color=stop_color;
(void) GetNextToken(q,&q,extent,token);
factor=strchr(token,'%') != (char *) NULL ? 0.01 : 1.0;
stops[number_stops-1].offset=factor*GetDrawValue(token,
&next_token);
if (token == next_token)
ThrowPointExpectedException(token,exception);
break;
}
if (LocaleCompare("stroke",keyword) == 0)
{
(void) GetNextToken(q,&q,extent,token);
if (graphic_context[n]->clip_path != MagickFalse)
break;
(void) FormatLocaleString(pattern,MagickPathExtent,"%s",token);
if (GetImageArtifact(image,pattern) != (const char *) NULL)
(void) DrawPatternPath(image,draw_info,token,
&graphic_context[n]->stroke_pattern,exception);
else
{
status&=QueryColorCompliance(token,AllCompliance,
&graphic_context[n]->stroke,exception);
if (graphic_context[n]->stroke_alpha != OpaqueAlpha)
graphic_context[n]->stroke.alpha=
graphic_context[n]->stroke_alpha;
}
break;
}
if (LocaleCompare("stroke-antialias",keyword) == 0)
{
(void) GetNextToken(q,&q,extent,token);
graphic_context[n]->stroke_antialias=StringToLong(token) != 0 ?
MagickTrue : MagickFalse;
break;
}
if (LocaleCompare("stroke-dasharray",keyword) == 0)
{
if (graphic_context[n]->dash_pattern != (double *) NULL)
graphic_context[n]->dash_pattern=(double *)
RelinquishMagickMemory(graphic_context[n]->dash_pattern);
if (IsPoint(q) != MagickFalse)
{
const char
*r;
r=q;
(void) GetNextToken(r,&r,extent,token);
if (*token == ',')
(void) GetNextToken(r,&r,extent,token);
for (x=0; IsPoint(token) != MagickFalse; x++)
{
(void) GetNextToken(r,&r,extent,token);
if (*token == ',')
(void) GetNextToken(r,&r,extent,token);
}
graphic_context[n]->dash_pattern=(double *)
AcquireQuantumMemory((size_t) (2*x+2),
sizeof(*graphic_context[n]->dash_pattern));
if (graphic_context[n]->dash_pattern == (double *) NULL)
{
(void) ThrowMagickException(exception,GetMagickModule(),
ResourceLimitError,"MemoryAllocationFailed","`%s'",
image->filename);
status=MagickFalse;
break;
}
(void) memset(graphic_context[n]->dash_pattern,0,(size_t)
(2*x+2)*sizeof(*graphic_context[n]->dash_pattern));
for (j=0; j < x; j++)
{
(void) GetNextToken(q,&q,extent,token);
if (*token == ',')
(void) GetNextToken(q,&q,extent,token);
graphic_context[n]->dash_pattern[j]=GetDrawValue(token,
&next_token);
if (token == next_token)
ThrowPointExpectedException(token,exception);
if (graphic_context[n]->dash_pattern[j] < 0.0)
status=MagickFalse;
}
if ((x & 0x01) != 0)
for ( ; j < (2*x); j++)
graphic_context[n]->dash_pattern[j]=
graphic_context[n]->dash_pattern[j-x];
graphic_context[n]->dash_pattern[j]=0.0;
break;
}
(void) GetNextToken(q,&q,extent,token);
break;
}
if (LocaleCompare("stroke-dashoffset",keyword) == 0)
{
(void) GetNextToken(q,&q,extent,token);
graphic_context[n]->dash_offset=GetDrawValue(token,&next_token);
if (token == next_token)
ThrowPointExpectedException(token,exception);
break;
}
if (LocaleCompare("stroke-linecap",keyword) == 0)
{
ssize_t
linecap;
(void) GetNextToken(q,&q,extent,token);
linecap=ParseCommandOption(MagickLineCapOptions,MagickFalse,token);
if (linecap == -1)
{
status=MagickFalse;
break;
}
graphic_context[n]->linecap=(LineCap) linecap;
break;
}
if (LocaleCompare("stroke-linejoin",keyword) == 0)
{
ssize_t
linejoin;
(void) GetNextToken(q,&q,extent,token);
linejoin=ParseCommandOption(MagickLineJoinOptions,MagickFalse,
token);
if (linejoin == -1)
{
status=MagickFalse;
break;
}
graphic_context[n]->linejoin=(LineJoin) linejoin;
break;
}
if (LocaleCompare("stroke-miterlimit",keyword) == 0)
{
(void) GetNextToken(q,&q,extent,token);
graphic_context[n]->miterlimit=StringToUnsignedLong(token);
break;
}
if (LocaleCompare("stroke-opacity",keyword) == 0)
{
double
opacity;
(void) GetNextToken(q,&q,extent,token);
if (graphic_context[n]->clip_path != MagickFalse)
break;
factor=strchr(token,'%') != (char *) NULL ? 0.01 : 1.0;
opacity=MagickMin(MagickMax(factor*
GetDrawValue(token,&next_token),0.0),1.0);
if (token == next_token)
ThrowPointExpectedException(token,exception);
if (graphic_context[n]->compliance == SVGCompliance)
graphic_context[n]->stroke_alpha*=opacity;
else
graphic_context[n]->stroke_alpha=QuantumRange*opacity;
if (graphic_context[n]->stroke.alpha != TransparentAlpha)
graphic_context[n]->stroke.alpha=graphic_context[n]->stroke_alpha;
else
graphic_context[n]->stroke.alpha=(MagickRealType)
ClampToQuantum(QuantumRange*(1.0-opacity));
break;
}
if (LocaleCompare("stroke-width",keyword) == 0)
{
(void) GetNextToken(q,&q,extent,token);
if (graphic_context[n]->clip_path != MagickFalse)
break;
graphic_context[n]->stroke_width=GetDrawValue(token,&next_token);
if (token == next_token)
ThrowPointExpectedException(token,exception);
break;
}
status=MagickFalse;
break;
}
case 't':
case 'T':
{
if (LocaleCompare("text",keyword) == 0)
{
primitive_type=TextPrimitive;
cursor=0.0;
break;
}
if (LocaleCompare("text-align",keyword) == 0)
{
ssize_t
align;
(void) GetNextToken(q,&q,extent,token);
align=ParseCommandOption(MagickAlignOptions,MagickFalse,token);
if (align == -1)
{
status=MagickFalse;
break;
}
graphic_context[n]->align=(AlignType) align;
break;
}
if (LocaleCompare("text-anchor",keyword) == 0)
{
ssize_t
align;
(void) GetNextToken(q,&q,extent,token);
align=ParseCommandOption(MagickAlignOptions,MagickFalse,token);
if (align == -1)
{
status=MagickFalse;
break;
}
graphic_context[n]->align=(AlignType) align;
break;
}
if (LocaleCompare("text-antialias",keyword) == 0)
{
(void) GetNextToken(q,&q,extent,token);
graphic_context[n]->text_antialias=StringToLong(token) != 0 ?
MagickTrue : MagickFalse;
break;
}
if (LocaleCompare("text-undercolor",keyword) == 0)
{
(void) GetNextToken(q,&q,extent,token);
status&=QueryColorCompliance(token,AllCompliance,
&graphic_context[n]->undercolor,exception);
break;
}
if (LocaleCompare("translate",keyword) == 0)
{
(void) GetNextToken(q,&q,extent,token);
affine.tx=GetDrawValue(token,&next_token);
if (token == next_token)
ThrowPointExpectedException(token,exception);
(void) GetNextToken(q,&q,extent,token);
if (*token == ',')
(void) GetNextToken(q,&q,extent,token);
affine.ty=GetDrawValue(token,&next_token);
if (token == next_token)
ThrowPointExpectedException(token,exception);
cursor=0.0;
break;
}
status=MagickFalse;
break;
}
case 'u':
case 'U':
{
if (LocaleCompare("use",keyword) == 0)
{
const char
*use;
/*
Get a macro from the MVG document, and "use" it here.
*/
(void) GetNextToken(q,&q,extent,token);
use=(const char *) GetValueFromSplayTree(macros,token);
if (use != (const char *) NULL)
{
clone_info=CloneDrawInfo((ImageInfo *) NULL,graphic_context[n]);
(void) CloneString(&clone_info->primitive,use);
status=RenderMVGContent(image,clone_info,depth+1,exception);
clone_info=DestroyDrawInfo(clone_info);
}
break;
}
status=MagickFalse;
break;
}
case 'v':
case 'V':
{
if (LocaleCompare("viewbox",keyword) == 0)
{
(void) GetNextToken(q,&q,extent,token);
graphic_context[n]->viewbox.x=(ssize_t) ceil(GetDrawValue(token,
&next_token)-0.5);
if (token == next_token)
ThrowPointExpectedException(token,exception);
(void) GetNextToken(q,&q,extent,token);
if (*token == ',')
(void) GetNextToken(q,&q,extent,token);
graphic_context[n]->viewbox.y=(ssize_t) ceil(GetDrawValue(token,
&next_token)-0.5);
if (token == next_token)
ThrowPointExpectedException(token,exception);
(void) GetNextToken(q,&q,extent,token);
if (*token == ',')
(void) GetNextToken(q,&q,extent,token);
graphic_context[n]->viewbox.width=(size_t) floor(GetDrawValue(
token,&next_token)+0.5);
if (token == next_token)
ThrowPointExpectedException(token,exception);
(void) GetNextToken(q,&q,extent,token);
if (*token == ',')
(void) GetNextToken(q,&q,extent,token);
graphic_context[n]->viewbox.height=(size_t) floor(GetDrawValue(
token,&next_token)+0.5);
if (token == next_token)
ThrowPointExpectedException(token,exception);
break;
}
status=MagickFalse;
break;
}
case 'w':
case 'W':
{
if (LocaleCompare("word-spacing",keyword) == 0)
{
(void) GetNextToken(q,&q,extent,token);
graphic_context[n]->interword_spacing=GetDrawValue(token,
&next_token);
if (token == next_token)
ThrowPointExpectedException(token,exception);
break;
}
status=MagickFalse;
break;
}
default:
{
status=MagickFalse;
break;
}
}
if (status == MagickFalse)
break;
if ((fabs(affine.sx-1.0) >= MagickEpsilon) ||
(fabs(affine.rx) >= MagickEpsilon) || (fabs(affine.ry) >= MagickEpsilon) ||
(fabs(affine.sy-1.0) >= MagickEpsilon) ||
(fabs(affine.tx) >= MagickEpsilon) || (fabs(affine.ty) >= MagickEpsilon))
{
graphic_context[n]->affine.sx=current.sx*affine.sx+current.ry*affine.rx;
graphic_context[n]->affine.rx=current.rx*affine.sx+current.sy*affine.rx;
graphic_context[n]->affine.ry=current.sx*affine.ry+current.ry*affine.sy;
graphic_context[n]->affine.sy=current.rx*affine.ry+current.sy*affine.sy;
graphic_context[n]->affine.tx=current.sx*affine.tx+current.ry*affine.ty+
current.tx;
graphic_context[n]->affine.ty=current.rx*affine.tx+current.sy*affine.ty+
current.ty;
}
if (primitive_type == UndefinedPrimitive)
{
if (*q == '\0')
{
if (number_stops > 1)
{
GradientType
type;
type=LinearGradient;
if (draw_info->gradient.type == RadialGradient)
type=RadialGradient;
(void) GradientImage(image,type,PadSpread,stops,number_stops,
exception);
}
if (number_stops > 0)
stops=(StopInfo *) RelinquishMagickMemory(stops);
}
if ((image->debug != MagickFalse) && (q > p))
(void) LogMagickEvent(DrawEvent,GetMagickModule()," %.*s",(int)
(q-p-1),p);
continue;
}
/*
Parse the primitive attributes.
*/
for (i=0; primitive_info[i].primitive != UndefinedPrimitive; i++)
if ((primitive_info[i].primitive == TextPrimitive) ||
(primitive_info[i].primitive == ImagePrimitive))
if (primitive_info[i].text != (char *) NULL)
primitive_info[i].text=DestroyString(primitive_info[i].text);
i=0;
mvg_info.offset=i;
j=0;
primitive_info[0].point.x=0.0;
primitive_info[0].point.y=0.0;
primitive_info[0].coordinates=0;
primitive_info[0].method=FloodfillMethod;
primitive_info[0].closed_subpath=MagickFalse;
for (x=0; *q != '\0'; x++)
{
/*
Define points.
*/
if (IsPoint(q) == MagickFalse)
break;
(void) GetNextToken(q,&q,extent,token);
point.x=GetDrawValue(token,&next_token);
if (token == next_token)
ThrowPointExpectedException(token,exception);
(void) GetNextToken(q,&q,extent,token);
if (*token == ',')
(void) GetNextToken(q,&q,extent,token);
point.y=GetDrawValue(token,&next_token);
if (token == next_token)
ThrowPointExpectedException(token,exception);
(void) GetNextToken(q,(const char **) NULL,extent,token);
if (*token == ',')
(void) GetNextToken(q,&q,extent,token);
primitive_info[i].primitive=primitive_type;
primitive_info[i].point=point;
primitive_info[i].coordinates=0;
primitive_info[i].method=FloodfillMethod;
primitive_info[i].closed_subpath=MagickFalse;
i++;
mvg_info.offset=i;
if (i < (ssize_t) number_points)
continue;
status&=CheckPrimitiveExtent(&mvg_info,number_points);
}
if (status == MagickFalse)
break;
if ((primitive_info[j].primitive == TextPrimitive) ||
(primitive_info[j].primitive == ImagePrimitive))
if (primitive_info[j].text != (char *) NULL)
primitive_info[j].text=DestroyString(primitive_info[j].text);
primitive_info[j].primitive=primitive_type;
primitive_info[j].coordinates=(size_t) x;
primitive_info[j].method=FloodfillMethod;
primitive_info[j].closed_subpath=MagickFalse;
/*
Circumscribe primitive within a circle.
*/
bounds.x1=primitive_info[j].point.x;
bounds.y1=primitive_info[j].point.y;
bounds.x2=primitive_info[j].point.x;
bounds.y2=primitive_info[j].point.y;
for (k=1; k < (ssize_t) primitive_info[j].coordinates; k++)
{
point=primitive_info[j+k].point;
if (point.x < bounds.x1)
bounds.x1=point.x;
if (point.y < bounds.y1)
bounds.y1=point.y;
if (point.x > bounds.x2)
bounds.x2=point.x;
if (point.y > bounds.y2)
bounds.y2=point.y;
}
/*
Speculate how many points our primitive might consume.
*/
coordinates=(double) primitive_info[j].coordinates;
switch (primitive_type)
{
case RectanglePrimitive:
{
coordinates*=5.0;
break;
}
case RoundRectanglePrimitive:
{
double
alpha,
beta,
radius;
alpha=bounds.x2-bounds.x1;
beta=bounds.y2-bounds.y1;
radius=hypot((double) alpha,(double) beta);
coordinates*=5.0;
coordinates+=2.0*((size_t) ceil((double) MagickPI*radius))+6.0*
BezierQuantum+360.0;
break;
}
case BezierPrimitive:
{
coordinates=(double) (BezierQuantum*primitive_info[j].coordinates);
if (primitive_info[j].coordinates > (107*BezierQuantum))
{
(void) ThrowMagickException(exception,GetMagickModule(),DrawError,
"TooManyBezierCoordinates","`%s'",token);
status=MagickFalse;
break;
}
break;
}
case PathPrimitive:
{
char
*s,
*t;
(void) GetNextToken(q,&q,extent,token);
coordinates=1.0;
t=token;
for (s=token; *s != '\0'; s=t)
{
double
value;
value=GetDrawValue(s,&t);
(void) value;
if (s == t)
{
t++;
continue;
}
coordinates++;
}
for (s=token; *s != '\0'; s++)
if (strspn(s,"AaCcQqSsTt") != 0)
coordinates+=(20.0*BezierQuantum)+360.0;
break;
}
case CirclePrimitive:
case ArcPrimitive:
case EllipsePrimitive:
{
double
alpha,
beta,
radius;
alpha=bounds.x2-bounds.x1;
beta=bounds.y2-bounds.y1;
radius=hypot(alpha,beta);
coordinates=2.0*(ceil(MagickPI*radius))+6.0*BezierQuantum+360.0;
if (coordinates > (107*BezierQuantum))
{
(void) ThrowMagickException(exception,GetMagickModule(),DrawError,
"TooManyBezierCoordinates","`%s'",token);
status=MagickFalse;
break;
}
break;
}
default:
break;
}
if (status == MagickFalse)
break;
if (((size_t) (i+coordinates)) >= number_points)
{
/*
Resize based on speculative points required by primitive.
*/
number_points+=coordinates+1;
if (number_points < (size_t) coordinates)
{
(void) ThrowMagickException(exception,GetMagickModule(),
ResourceLimitError,"MemoryAllocationFailed","`%s'",
image->filename);
break;
}
mvg_info.offset=i;
status&=CheckPrimitiveExtent(&mvg_info,number_points);
}
status&=CheckPrimitiveExtent(&mvg_info,PrimitiveExtentPad);
if (status == MagickFalse)
break;
mvg_info.offset=j;
switch (primitive_type)
{
case PointPrimitive:
default:
{
if (primitive_info[j].coordinates != 1)
{
status=MagickFalse;
break;
}
status&=TracePoint(primitive_info+j,primitive_info[j].point);
i=(ssize_t) (j+primitive_info[j].coordinates);
break;
}
case LinePrimitive:
{
double
dx,
dy,
maximum_length;
if (primitive_info[j].coordinates != 2)
{
status=MagickFalse;
break;
}
dx=primitive_info[i].point.x-primitive_info[i-1].point.x;
dy=primitive_info[i].point.y-primitive_info[i-1].point.y;
maximum_length=hypot(dx,dy);
if (maximum_length > (MaxBezierCoordinates/100.0))
ThrowPointExpectedException(keyword,exception);
status&=TraceLine(primitive_info+j,primitive_info[j].point,
primitive_info[j+1].point);
i=(ssize_t) (j+primitive_info[j].coordinates);
break;
}
case RectanglePrimitive:
{
if (primitive_info[j].coordinates != 2)
{
status=MagickFalse;
break;
}
status&=TraceRectangle(primitive_info+j,primitive_info[j].point,
primitive_info[j+1].point);
i=(ssize_t) (j+primitive_info[j].coordinates);
break;
}
case RoundRectanglePrimitive:
{
if (primitive_info[j].coordinates != 3)
{
status=MagickFalse;
break;
}
if ((primitive_info[j+2].point.x < 0.0) ||
(primitive_info[j+2].point.y < 0.0))
{
status=MagickFalse;
break;
}
if ((primitive_info[j+1].point.x-primitive_info[j].point.x) < 0.0)
{
status=MagickFalse;
break;
}
if ((primitive_info[j+1].point.y-primitive_info[j].point.y) < 0.0)
{
status=MagickFalse;
break;
}
status&=TraceRoundRectangle(&mvg_info,primitive_info[j].point,
primitive_info[j+1].point,primitive_info[j+2].point);
i=(ssize_t) (j+primitive_info[j].coordinates);
break;
}
case ArcPrimitive:
{
if (primitive_info[j].coordinates != 3)
{
status=MagickFalse;
break;
}
status&=TraceArc(&mvg_info,primitive_info[j].point,
primitive_info[j+1].point,primitive_info[j+2].point);
i=(ssize_t) (j+primitive_info[j].coordinates);
break;
}
case EllipsePrimitive:
{
if (primitive_info[j].coordinates != 3)
{
status=MagickFalse;
break;
}
if ((primitive_info[j+1].point.x < 0.0) ||
(primitive_info[j+1].point.y < 0.0))
{
status=MagickFalse;
break;
}
status&=TraceEllipse(&mvg_info,primitive_info[j].point,
primitive_info[j+1].point,primitive_info[j+2].point);
i=(ssize_t) (j+primitive_info[j].coordinates);
break;
}
case CirclePrimitive:
{
if (primitive_info[j].coordinates != 2)
{
status=MagickFalse;
break;
}
status&=TraceCircle(&mvg_info,primitive_info[j].point,
primitive_info[j+1].point);
i=(ssize_t) (j+primitive_info[j].coordinates);
break;
}
case PolylinePrimitive:
{
if (primitive_info[j].coordinates < 1)
{
status=MagickFalse;
break;
}
break;
}
case PolygonPrimitive:
{
if (primitive_info[j].coordinates < 3)
{
status=MagickFalse;
break;
}
primitive_info[i]=primitive_info[j];
primitive_info[i].coordinates=0;
primitive_info[j].coordinates++;
primitive_info[j].closed_subpath=MagickTrue;
i++;
break;
}
case BezierPrimitive:
{
if (primitive_info[j].coordinates < 3)
{
status=MagickFalse;
break;
}
status&=TraceBezier(&mvg_info,primitive_info[j].coordinates);
i=(ssize_t) (j+primitive_info[j].coordinates);
break;
}
case PathPrimitive:
{
coordinates=(double) TracePath(&mvg_info,token,exception);
if (coordinates < 0.0)
{
status=MagickFalse;
break;
}
i=(ssize_t) (j+coordinates);
break;
}
case AlphaPrimitive:
case ColorPrimitive:
{
ssize_t
method;
if (primitive_info[j].coordinates != 1)
{
status=MagickFalse;
break;
}
(void) GetNextToken(q,&q,extent,token);
method=ParseCommandOption(MagickMethodOptions,MagickFalse,token);
if (method == -1)
{
status=MagickFalse;
break;
}
primitive_info[j].method=(PaintMethod) method;
break;
}
case TextPrimitive:
{
char
geometry[MagickPathExtent];
if (primitive_info[j].coordinates != 1)
{
status=MagickFalse;
break;
}
if (*token != ',')
(void) GetNextToken(q,&q,extent,token);
(void) CloneString(&primitive_info[j].text,token);
/*
Compute text cursor offset.
*/
clone_info=CloneDrawInfo((ImageInfo *) NULL,graphic_context[n]);
if ((fabs(mvg_info.point.x-primitive_info->point.x) < MagickEpsilon) &&
(fabs(mvg_info.point.y-primitive_info->point.y) < MagickEpsilon))
{
mvg_info.point=primitive_info->point;
primitive_info->point.x+=cursor;
}
else
{
mvg_info.point=primitive_info->point;
cursor=0.0;
}
(void) FormatLocaleString(geometry,MagickPathExtent,"%+f%+f",
primitive_info->point.x,primitive_info->point.y);
clone_info->render=MagickFalse;
clone_info->text=AcquireString(token);
status&=GetTypeMetrics(image,clone_info,&metrics,exception);
clone_info=DestroyDrawInfo(clone_info);
cursor+=metrics.width;
if (graphic_context[n]->compliance != SVGCompliance)
cursor=0.0;
break;
}
case ImagePrimitive:
{
if (primitive_info[j].coordinates != 2)
{
status=MagickFalse;
break;
}
(void) GetNextToken(q,&q,extent,token);
(void) CloneString(&primitive_info[j].text,token);
break;
}
}
mvg_info.offset=i;
if (status == 0)
break;
primitive_info[i].primitive=UndefinedPrimitive;
if ((image->debug != MagickFalse) && (q > p))
(void) LogMagickEvent(DrawEvent,GetMagickModule()," %.*s",(int) (q-p-1),
p);
/*
Sanity check.
*/
status&=CheckPrimitiveExtent(&mvg_info,
ExpandAffine(&graphic_context[n]->affine));
if (status == 0)
break;
status&=CheckPrimitiveExtent(&mvg_info,graphic_context[n]->stroke_width);
if (status == 0)
break;
if (i == 0)
continue;
/*
Transform points.
*/
for (i=0; primitive_info[i].primitive != UndefinedPrimitive; i++)
{
point=primitive_info[i].point;
primitive_info[i].point.x=graphic_context[n]->affine.sx*point.x+
graphic_context[n]->affine.ry*point.y+graphic_context[n]->affine.tx;
primitive_info[i].point.y=graphic_context[n]->affine.rx*point.x+
graphic_context[n]->affine.sy*point.y+graphic_context[n]->affine.ty;
point=primitive_info[i].point;
if (point.x < graphic_context[n]->bounds.x1)
graphic_context[n]->bounds.x1=point.x;
if (point.y < graphic_context[n]->bounds.y1)
graphic_context[n]->bounds.y1=point.y;
if (point.x > graphic_context[n]->bounds.x2)
graphic_context[n]->bounds.x2=point.x;
if (point.y > graphic_context[n]->bounds.y2)
graphic_context[n]->bounds.y2=point.y;
if (primitive_info[i].primitive == ImagePrimitive)
break;
if (i >= (ssize_t) number_points)
ThrowFatalException(ResourceLimitFatalError,"MemoryAllocationFailed");
}
if (graphic_context[n]->render != MagickFalse)
{
if ((n != 0) && (graphic_context[n]->compliance != SVGCompliance) &&
(graphic_context[n]->clip_mask != (char *) NULL) &&
(LocaleCompare(graphic_context[n]->clip_mask,
graphic_context[n-1]->clip_mask) != 0))
{
const char
*clip_path;
clip_path=(const char *) GetValueFromSplayTree(macros,
graphic_context[n]->clip_mask);
if (clip_path != (const char *) NULL)
(void) SetImageArtifact(image,graphic_context[n]->clip_mask,
clip_path);
status&=DrawClipPath(image,graphic_context[n],
graphic_context[n]->clip_mask,exception);
}
status&=DrawPrimitive(image,graphic_context[n],primitive_info,
exception);
}
proceed=SetImageProgress(image,RenderImageTag,q-primitive,(MagickSizeType)
primitive_extent);
if (proceed == MagickFalse)
break;
if (status == 0)
break;
}
if (image->debug != MagickFalse)
(void) LogMagickEvent(DrawEvent,GetMagickModule(),"end draw-image");
/*
Relinquish resources.
*/
macros=DestroySplayTree(macros);
token=DestroyString(token);
if (primitive_info != (PrimitiveInfo *) NULL)
{
for (i=0; primitive_info[i].primitive != UndefinedPrimitive; i++)
if ((primitive_info[i].primitive == TextPrimitive) ||
(primitive_info[i].primitive == ImagePrimitive))
if (primitive_info[i].text != (char *) NULL)
primitive_info[i].text=DestroyString(primitive_info[i].text);
primitive_info=(PrimitiveInfo *) RelinquishMagickMemory(primitive_info);
}
primitive=DestroyString(primitive);
if (stops != (StopInfo *) NULL)
stops=(StopInfo *) RelinquishMagickMemory(stops);
for ( ; n >= 0; n--)
graphic_context[n]=DestroyDrawInfo(graphic_context[n]);
graphic_context=(DrawInfo **) RelinquishMagickMemory(graphic_context);
if (status == MagickFalse)
ThrowBinaryException(DrawError,"NonconformingDrawingPrimitiveDefinition",
keyword);
return(status != 0 ? MagickTrue : MagickFalse);
}
MagickExport MagickBooleanType DrawImage(Image *image,const DrawInfo *draw_info,
ExceptionInfo *exception)
{
return(RenderMVGContent(image,draw_info,0,exception));
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% D r a w P a t t e r n P a t h %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% DrawPatternPath() draws a pattern.
%
% The format of the DrawPatternPath method is:
%
% MagickBooleanType DrawPatternPath(Image *image,const DrawInfo *draw_info,
% const char *name,Image **pattern,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o draw_info: the draw info.
%
% o name: the pattern name.
%
% o image: the image.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport MagickBooleanType DrawPatternPath(Image *image,
const DrawInfo *draw_info,const char *name,Image **pattern,
ExceptionInfo *exception)
{
char
property[MagickPathExtent];
const char
*geometry,
*path,
*type;
DrawInfo
*clone_info;
ImageInfo
*image_info;
MagickBooleanType
status;
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(draw_info != (const DrawInfo *) NULL);
assert(name != (const char *) NULL);
(void) FormatLocaleString(property,MagickPathExtent,"%s",name);
path=GetImageArtifact(image,property);
if (path == (const char *) NULL)
return(MagickFalse);
(void) FormatLocaleString(property,MagickPathExtent,"%s-geometry",name);
geometry=GetImageArtifact(image,property);
if (geometry == (const char *) NULL)
return(MagickFalse);
if ((*pattern) != (Image *) NULL)
*pattern=DestroyImage(*pattern);
image_info=AcquireImageInfo();
image_info->size=AcquireString(geometry);
*pattern=AcquireImage(image_info,exception);
image_info=DestroyImageInfo(image_info);
(void) QueryColorCompliance("#00000000",AllCompliance,
&(*pattern)->background_color,exception);
(void) SetImageBackgroundColor(*pattern,exception);
if (image->debug != MagickFalse)
(void) LogMagickEvent(DrawEvent,GetMagickModule(),
"begin pattern-path %s %s",name,geometry);
clone_info=CloneDrawInfo((ImageInfo *) NULL,draw_info);
if (clone_info->fill_pattern != (Image *) NULL)
clone_info->fill_pattern=DestroyImage(clone_info->fill_pattern);
if (clone_info->stroke_pattern != (Image *) NULL)
clone_info->stroke_pattern=DestroyImage(clone_info->stroke_pattern);
(void) FormatLocaleString(property,MagickPathExtent,"%s-type",name);
type=GetImageArtifact(image,property);
if (type != (const char *) NULL)
clone_info->gradient.type=(GradientType) ParseCommandOption(
MagickGradientOptions,MagickFalse,type);
(void) CloneString(&clone_info->primitive,path);
status=RenderMVGContent(*pattern,clone_info,0,exception);
clone_info=DestroyDrawInfo(clone_info);
if (image->debug != MagickFalse)
(void) LogMagickEvent(DrawEvent,GetMagickModule(),"end pattern-path");
return(status);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ D r a w P o l y g o n P r i m i t i v e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% DrawPolygonPrimitive() draws a polygon on the image.
%
% The format of the DrawPolygonPrimitive method is:
%
% MagickBooleanType DrawPolygonPrimitive(Image *image,
% const DrawInfo *draw_info,const PrimitiveInfo *primitive_info,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o draw_info: the draw info.
%
% o primitive_info: Specifies a pointer to a PrimitiveInfo structure.
%
% o exception: return any errors or warnings in this structure.
%
*/
static PolygonInfo **DestroyPolygonThreadSet(PolygonInfo **polygon_info)
{
register ssize_t
i;
assert(polygon_info != (PolygonInfo **) NULL);
for (i=0; i < (ssize_t) GetMagickResourceLimit(ThreadResource); i++)
if (polygon_info[i] != (PolygonInfo *) NULL)
polygon_info[i]=DestroyPolygonInfo(polygon_info[i]);
polygon_info=(PolygonInfo **) RelinquishMagickMemory(polygon_info);
return(polygon_info);
}
static PolygonInfo **AcquirePolygonThreadSet(
const PrimitiveInfo *primitive_info)
{
PathInfo
*magick_restrict path_info;
PolygonInfo
**polygon_info;
register ssize_t
i;
size_t
number_threads;
number_threads=(size_t) GetMagickResourceLimit(ThreadResource);
polygon_info=(PolygonInfo **) AcquireQuantumMemory(number_threads,
sizeof(*polygon_info));
if (polygon_info == (PolygonInfo **) NULL)
return((PolygonInfo **) NULL);
(void) memset(polygon_info,0,number_threads*sizeof(*polygon_info));
path_info=ConvertPrimitiveToPath(primitive_info);
if (path_info == (PathInfo *) NULL)
return(DestroyPolygonThreadSet(polygon_info));
for (i=0; i < (ssize_t) number_threads; i++)
{
polygon_info[i]=ConvertPathToPolygon(path_info);
if (polygon_info[i] == (PolygonInfo *) NULL)
return(DestroyPolygonThreadSet(polygon_info));
}
path_info=(PathInfo *) RelinquishMagickMemory(path_info);
return(polygon_info);
}
static double GetFillAlpha(PolygonInfo *polygon_info,const double mid,
const MagickBooleanType fill,const FillRule fill_rule,const ssize_t x,
const ssize_t y,double *stroke_alpha)
{
double
alpha,
beta,
distance,
subpath_alpha;
PointInfo
delta;
register const PointInfo
*q;
register EdgeInfo
*p;
register ssize_t
i;
ssize_t
j,
winding_number;
/*
Compute fill & stroke opacity for this (x,y) point.
*/
*stroke_alpha=0.0;
subpath_alpha=0.0;
p=polygon_info->edges;
for (j=0; j < (ssize_t) polygon_info->number_edges; j++, p++)
{
if ((double) y <= (p->bounds.y1-mid-0.5))
break;
if ((double) y > (p->bounds.y2+mid+0.5))
{
(void) DestroyEdge(polygon_info,(size_t) j);
continue;
}
if (((double) x <= (p->bounds.x1-mid-0.5)) ||
((double) x > (p->bounds.x2+mid+0.5)))
continue;
i=(ssize_t) MagickMax((double) p->highwater,1.0);
for ( ; i < (ssize_t) p->number_points; i++)
{
if ((double) y <= (p->points[i-1].y-mid-0.5))
break;
if ((double) y > (p->points[i].y+mid+0.5))
continue;
if (p->scanline != (double) y)
{
p->scanline=(double) y;
p->highwater=(size_t) i;
}
/*
Compute distance between a point and an edge.
*/
q=p->points+i-1;
delta.x=(q+1)->x-q->x;
delta.y=(q+1)->y-q->y;
beta=delta.x*(x-q->x)+delta.y*(y-q->y);
if (beta <= 0.0)
{
delta.x=(double) x-q->x;
delta.y=(double) y-q->y;
distance=delta.x*delta.x+delta.y*delta.y;
}
else
{
alpha=delta.x*delta.x+delta.y*delta.y;
if (beta >= alpha)
{
delta.x=(double) x-(q+1)->x;
delta.y=(double) y-(q+1)->y;
distance=delta.x*delta.x+delta.y*delta.y;
}
else
{
alpha=PerceptibleReciprocal(alpha);
beta=delta.x*(y-q->y)-delta.y*(x-q->x)+MagickEpsilon;
distance=alpha*beta*beta;
}
}
/*
Compute stroke & subpath opacity.
*/
beta=0.0;
if (p->ghostline == MagickFalse)
{
alpha=mid+0.5;
if ((*stroke_alpha < 1.0) &&
(distance <= ((alpha+0.25)*(alpha+0.25))))
{
alpha=mid-0.5;
if (distance <= ((alpha+0.25)*(alpha+0.25)))
*stroke_alpha=1.0;
else
{
beta=1.0;
if (fabs(distance-1.0) >= MagickEpsilon)
beta=sqrt((double) distance);
alpha=beta-mid-0.5;
if (*stroke_alpha < ((alpha-0.25)*(alpha-0.25)))
*stroke_alpha=(alpha-0.25)*(alpha-0.25);
}
}
}
if ((fill == MagickFalse) || (distance > 1.0) || (subpath_alpha >= 1.0))
continue;
if (distance <= 0.0)
{
subpath_alpha=1.0;
continue;
}
if (distance > 1.0)
continue;
if (fabs(beta) < MagickEpsilon)
{
beta=1.0;
if (fabs(distance-1.0) >= MagickEpsilon)
beta=sqrt(distance);
}
alpha=beta-1.0;
if (subpath_alpha < (alpha*alpha))
subpath_alpha=alpha*alpha;
}
}
/*
Compute fill opacity.
*/
if (fill == MagickFalse)
return(0.0);
if (subpath_alpha >= 1.0)
return(1.0);
/*
Determine winding number.
*/
winding_number=0;
p=polygon_info->edges;
for (j=0; j < (ssize_t) polygon_info->number_edges; j++, p++)
{
if ((double) y <= p->bounds.y1)
break;
if (((double) y > p->bounds.y2) || ((double) x <= p->bounds.x1))
continue;
if ((double) x > p->bounds.x2)
{
winding_number+=p->direction ? 1 : -1;
continue;
}
i=(ssize_t) MagickMax((double) p->highwater,1.0);
for ( ; i < (ssize_t) (p->number_points-1); i++)
if ((double) y <= p->points[i].y)
break;
q=p->points+i-1;
if ((((q+1)->x-q->x)*(y-q->y)) <= (((q+1)->y-q->y)*(x-q->x)))
winding_number+=p->direction ? 1 : -1;
}
if (fill_rule != NonZeroRule)
{
if ((MagickAbsoluteValue(winding_number) & 0x01) != 0)
return(1.0);
}
else
if (MagickAbsoluteValue(winding_number) != 0)
return(1.0);
return(subpath_alpha);
}
static MagickBooleanType DrawPolygonPrimitive(Image *image,
const DrawInfo *draw_info,const PrimitiveInfo *primitive_info,
ExceptionInfo *exception)
{
CacheView
*image_view;
MagickBooleanType
fill,
status;
double
mid;
PolygonInfo
**magick_restrict polygon_info;
register EdgeInfo
*p;
register ssize_t
i;
SegmentInfo
bounds;
ssize_t
start_y,
stop_y,
y;
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(draw_info != (DrawInfo *) NULL);
assert(draw_info->signature == MagickCoreSignature);
assert(primitive_info != (PrimitiveInfo *) NULL);
if (primitive_info->coordinates <= 1)
return(MagickTrue);
/*
Compute bounding box.
*/
polygon_info=AcquirePolygonThreadSet(primitive_info);
if (polygon_info == (PolygonInfo **) NULL)
return(MagickFalse);
if (image->debug != MagickFalse)
(void) LogMagickEvent(DrawEvent,GetMagickModule()," begin draw-polygon");
fill=(primitive_info->method == FillToBorderMethod) ||
(primitive_info->method == FloodfillMethod) ? MagickTrue : MagickFalse;
mid=ExpandAffine(&draw_info->affine)*draw_info->stroke_width/2.0;
bounds=polygon_info[0]->edges[0].bounds;
for (i=1; i < (ssize_t) polygon_info[0]->number_edges; i++)
{
p=polygon_info[0]->edges+i;
if (p->bounds.x1 < bounds.x1)
bounds.x1=p->bounds.x1;
if (p->bounds.y1 < bounds.y1)
bounds.y1=p->bounds.y1;
if (p->bounds.x2 > bounds.x2)
bounds.x2=p->bounds.x2;
if (p->bounds.y2 > bounds.y2)
bounds.y2=p->bounds.y2;
}
bounds.x1-=(mid+1.0);
bounds.y1-=(mid+1.0);
bounds.x2+=(mid+1.0);
bounds.y2+=(mid+1.0);
if ((bounds.x1 >= (double) image->columns) ||
(bounds.y1 >= (double) image->rows) ||
(bounds.x2 <= 0.0) || (bounds.y2 <= 0.0))
{
polygon_info=DestroyPolygonThreadSet(polygon_info);
return(MagickTrue); /* virtual polygon */
}
bounds.x1=bounds.x1 < 0.0 ? 0.0 : bounds.x1 >= (double) image->columns-1.0 ?
(double) image->columns-1.0 : bounds.x1;
bounds.y1=bounds.y1 < 0.0 ? 0.0 : bounds.y1 >= (double) image->rows-1.0 ?
(double) image->rows-1.0 : bounds.y1;
bounds.x2=bounds.x2 < 0.0 ? 0.0 : bounds.x2 >= (double) image->columns-1.0 ?
(double) image->columns-1.0 : bounds.x2;
bounds.y2=bounds.y2 < 0.0 ? 0.0 : bounds.y2 >= (double) image->rows-1.0 ?
(double) image->rows-1.0 : bounds.y2;
status=MagickTrue;
image_view=AcquireAuthenticCacheView(image,exception);
if ((primitive_info->coordinates == 1) ||
(polygon_info[0]->number_edges == 0))
{
/*
Draw point.
*/
start_y=(ssize_t) ceil(bounds.y1-0.5);
stop_y=(ssize_t) floor(bounds.y2+0.5);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(status) \
magick_number_threads(image,image,stop_y-start_y+1,1)
#endif
for (y=start_y; y <= stop_y; y++)
{
MagickBooleanType
sync;
PixelInfo
pixel;
register ssize_t
x;
register Quantum
*magick_restrict q;
ssize_t
start_x,
stop_x;
if (status == MagickFalse)
continue;
start_x=(ssize_t) ceil(bounds.x1-0.5);
stop_x=(ssize_t) floor(bounds.x2+0.5);
x=start_x;
q=GetCacheViewAuthenticPixels(image_view,x,y,(size_t) (stop_x-x+1),1,
exception);
if (q == (Quantum *) NULL)
{
status=MagickFalse;
continue;
}
GetPixelInfo(image,&pixel);
for ( ; x <= stop_x; x++)
{
if ((x == (ssize_t) ceil(primitive_info->point.x-0.5)) &&
(y == (ssize_t) ceil(primitive_info->point.y-0.5)))
{
GetFillColor(draw_info,x-start_x,y-start_y,&pixel,exception);
SetPixelViaPixelInfo(image,&pixel,q);
}
q+=GetPixelChannels(image);
}
sync=SyncCacheViewAuthenticPixels(image_view,exception);
if (sync == MagickFalse)
status=MagickFalse;
}
image_view=DestroyCacheView(image_view);
polygon_info=DestroyPolygonThreadSet(polygon_info);
if (image->debug != MagickFalse)
(void) LogMagickEvent(DrawEvent,GetMagickModule(),
" end draw-polygon");
return(status);
}
/*
Draw polygon or line.
*/
start_y=(ssize_t) ceil(bounds.y1-0.5);
stop_y=(ssize_t) floor(bounds.y2+0.5);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(status) \
magick_number_threads(image,image,stop_y-start_y+1,1)
#endif
for (y=start_y; y <= stop_y; y++)
{
const int
id = GetOpenMPThreadId();
register Quantum
*magick_restrict q;
register ssize_t
x;
ssize_t
start_x,
stop_x;
if (status == MagickFalse)
continue;
start_x=(ssize_t) ceil(bounds.x1-0.5);
stop_x=(ssize_t) floor(bounds.x2+0.5);
q=GetCacheViewAuthenticPixels(image_view,start_x,y,(size_t) (stop_x-start_x+
1),1,exception);
if (q == (Quantum *) NULL)
{
status=MagickFalse;
continue;
}
for (x=start_x; x <= stop_x; x++)
{
double
fill_alpha,
stroke_alpha;
PixelInfo
fill_color,
stroke_color;
/*
Fill and/or stroke.
*/
fill_alpha=GetFillAlpha(polygon_info[id],mid,fill,draw_info->fill_rule,
x,y,&stroke_alpha);
if (draw_info->stroke_antialias == MagickFalse)
{
fill_alpha=fill_alpha > 0.5 ? 1.0 : 0.0;
stroke_alpha=stroke_alpha > 0.5 ? 1.0 : 0.0;
}
GetFillColor(draw_info,x-start_x,y-start_y,&fill_color,exception);
CompositePixelOver(image,&fill_color,fill_alpha*fill_color.alpha,q,
(double) GetPixelAlpha(image,q),q);
GetStrokeColor(draw_info,x-start_x,y-start_y,&stroke_color,exception);
CompositePixelOver(image,&stroke_color,stroke_alpha*stroke_color.alpha,q,
(double) GetPixelAlpha(image,q),q);
q+=GetPixelChannels(image);
}
if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse)
status=MagickFalse;
}
image_view=DestroyCacheView(image_view);
polygon_info=DestroyPolygonThreadSet(polygon_info);
if (image->debug != MagickFalse)
(void) LogMagickEvent(DrawEvent,GetMagickModule()," end draw-polygon");
return(status);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% D r a w P r i m i t i v e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% DrawPrimitive() draws a primitive (line, rectangle, ellipse) on the image.
%
% The format of the DrawPrimitive method is:
%
% MagickBooleanType DrawPrimitive(Image *image,const DrawInfo *draw_info,
% PrimitiveInfo *primitive_info,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o draw_info: the draw info.
%
% o primitive_info: Specifies a pointer to a PrimitiveInfo structure.
%
% o exception: return any errors or warnings in this structure.
%
*/
static void LogPrimitiveInfo(const PrimitiveInfo *primitive_info)
{
const char
*methods[] =
{
"point",
"replace",
"floodfill",
"filltoborder",
"reset",
"?"
};
PointInfo
p,
point,
q;
register ssize_t
i,
x;
ssize_t
coordinates,
y;
x=(ssize_t) ceil(primitive_info->point.x-0.5);
y=(ssize_t) ceil(primitive_info->point.y-0.5);
switch (primitive_info->primitive)
{
case AlphaPrimitive:
{
(void) LogMagickEvent(DrawEvent,GetMagickModule(),
"AlphaPrimitive %.20g,%.20g %s",(double) x,(double) y,
methods[primitive_info->method]);
return;
}
case ColorPrimitive:
{
(void) LogMagickEvent(DrawEvent,GetMagickModule(),
"ColorPrimitive %.20g,%.20g %s",(double) x,(double) y,
methods[primitive_info->method]);
return;
}
case ImagePrimitive:
{
(void) LogMagickEvent(DrawEvent,GetMagickModule(),
"ImagePrimitive %.20g,%.20g",(double) x,(double) y);
return;
}
case PointPrimitive:
{
(void) LogMagickEvent(DrawEvent,GetMagickModule(),
"PointPrimitive %.20g,%.20g %s",(double) x,(double) y,
methods[primitive_info->method]);
return;
}
case TextPrimitive:
{
(void) LogMagickEvent(DrawEvent,GetMagickModule(),
"TextPrimitive %.20g,%.20g",(double) x,(double) y);
return;
}
default:
break;
}
coordinates=0;
p=primitive_info[0].point;
q.x=(-1.0);
q.y=(-1.0);
for (i=0; primitive_info[i].primitive != UndefinedPrimitive; i++)
{
point=primitive_info[i].point;
if (coordinates <= 0)
{
coordinates=(ssize_t) primitive_info[i].coordinates;
(void) LogMagickEvent(DrawEvent,GetMagickModule(),
" begin open (%.20g)",(double) coordinates);
p=point;
}
point=primitive_info[i].point;
if ((fabs(q.x-point.x) >= MagickEpsilon) ||
(fabs(q.y-point.y) >= MagickEpsilon))
(void) LogMagickEvent(DrawEvent,GetMagickModule(),
" %.20g: %.18g,%.18g",(double) coordinates,point.x,point.y);
else
(void) LogMagickEvent(DrawEvent,GetMagickModule(),
" %.20g: %g %g (duplicate)",(double) coordinates,point.x,point.y);
q=point;
coordinates--;
if (coordinates > 0)
continue;
if ((fabs(p.x-point.x) >= MagickEpsilon) ||
(fabs(p.y-point.y) >= MagickEpsilon))
(void) LogMagickEvent(DrawEvent,GetMagickModule()," end last (%.20g)",
(double) coordinates);
else
(void) LogMagickEvent(DrawEvent,GetMagickModule()," end open (%.20g)",
(double) coordinates);
}
}
MagickExport MagickBooleanType DrawPrimitive(Image *image,
const DrawInfo *draw_info,const PrimitiveInfo *primitive_info,
ExceptionInfo *exception)
{
CacheView
*image_view;
MagickStatusType
status;
register ssize_t
i,
x;
ssize_t
y;
if (image->debug != MagickFalse)
{
(void) LogMagickEvent(DrawEvent,GetMagickModule(),
" begin draw-primitive");
(void) LogMagickEvent(DrawEvent,GetMagickModule(),
" affine: %g,%g,%g,%g,%g,%g",draw_info->affine.sx,
draw_info->affine.rx,draw_info->affine.ry,draw_info->affine.sy,
draw_info->affine.tx,draw_info->affine.ty);
}
status=MagickTrue;
if ((IsGrayColorspace(image->colorspace) != MagickFalse) &&
((IsPixelInfoGray(&draw_info->fill) == MagickFalse) ||
(IsPixelInfoGray(&draw_info->stroke) == MagickFalse)))
status&=SetImageColorspace(image,sRGBColorspace,exception);
if (draw_info->compliance == SVGCompliance)
{
status&=SetImageMask(image,WritePixelMask,draw_info->clipping_mask,
exception);
status&=SetImageMask(image,CompositePixelMask,draw_info->composite_mask,
exception);
}
x=(ssize_t) ceil(primitive_info->point.x-0.5);
y=(ssize_t) ceil(primitive_info->point.y-0.5);
image_view=AcquireAuthenticCacheView(image,exception);
switch (primitive_info->primitive)
{
case AlphaPrimitive:
{
if (image->alpha_trait == UndefinedPixelTrait)
status&=SetImageAlphaChannel(image,OpaqueAlphaChannel,exception);
switch (primitive_info->method)
{
case PointMethod:
default:
{
PixelInfo
pixel;
register Quantum
*q;
q=GetCacheViewAuthenticPixels(image_view,x,y,1,1,exception);
if (q == (Quantum *) NULL)
break;
GetFillColor(draw_info,x,y,&pixel,exception);
SetPixelAlpha(image,ClampToQuantum(pixel.alpha),q);
status&=SyncCacheViewAuthenticPixels(image_view,exception);
break;
}
case ReplaceMethod:
{
PixelInfo
pixel,
target;
status&=GetOneCacheViewVirtualPixelInfo(image_view,x,y,&target,
exception);
GetPixelInfo(image,&pixel);
for (y=0; y < (ssize_t) image->rows; y++)
{
register Quantum
*magick_restrict q;
q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,
exception);
if (q == (Quantum *) NULL)
break;
for (x=0; x < (ssize_t) image->columns; x++)
{
GetPixelInfoPixel(image,q,&pixel);
if (IsFuzzyEquivalencePixelInfo(&pixel,&target) == MagickFalse)
{
q+=GetPixelChannels(image);
continue;
}
GetFillColor(draw_info,x,y,&pixel,exception);
SetPixelAlpha(image,ClampToQuantum(pixel.alpha),q);
q+=GetPixelChannels(image);
}
status&=SyncCacheViewAuthenticPixels(image_view,exception);
if (status == MagickFalse)
break;
}
break;
}
case FloodfillMethod:
case FillToBorderMethod:
{
ChannelType
channel_mask;
PixelInfo
target;
status&=GetOneVirtualPixelInfo(image,TileVirtualPixelMethod,x,y,
&target,exception);
if (primitive_info->method == FillToBorderMethod)
{
target.red=(double) draw_info->border_color.red;
target.green=(double) draw_info->border_color.green;
target.blue=(double) draw_info->border_color.blue;
}
channel_mask=SetImageChannelMask(image,AlphaChannel);
status&=FloodfillPaintImage(image,draw_info,&target,x,y,
primitive_info->method == FloodfillMethod ? MagickFalse :
MagickTrue,exception);
(void) SetImageChannelMask(image,channel_mask);
break;
}
case ResetMethod:
{
PixelInfo
pixel;
for (y=0; y < (ssize_t) image->rows; y++)
{
register Quantum
*magick_restrict q;
q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,
exception);
if (q == (Quantum *) NULL)
break;
for (x=0; x < (ssize_t) image->columns; x++)
{
GetFillColor(draw_info,x,y,&pixel,exception);
SetPixelAlpha(image,ClampToQuantum(pixel.alpha),q);
q+=GetPixelChannels(image);
}
status&=SyncCacheViewAuthenticPixels(image_view,exception);
if (status == MagickFalse)
break;
}
break;
}
}
break;
}
case ColorPrimitive:
{
switch (primitive_info->method)
{
case PointMethod:
default:
{
PixelInfo
pixel;
register Quantum
*q;
q=GetCacheViewAuthenticPixels(image_view,x,y,1,1,exception);
if (q == (Quantum *) NULL)
break;
GetPixelInfo(image,&pixel);
GetFillColor(draw_info,x,y,&pixel,exception);
SetPixelViaPixelInfo(image,&pixel,q);
status&=SyncCacheViewAuthenticPixels(image_view,exception);
break;
}
case ReplaceMethod:
{
PixelInfo
pixel,
target;
status&=GetOneCacheViewVirtualPixelInfo(image_view,x,y,&target,
exception);
for (y=0; y < (ssize_t) image->rows; y++)
{
register Quantum
*magick_restrict q;
q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,
exception);
if (q == (Quantum *) NULL)
break;
for (x=0; x < (ssize_t) image->columns; x++)
{
GetPixelInfoPixel(image,q,&pixel);
if (IsFuzzyEquivalencePixelInfo(&pixel,&target) == MagickFalse)
{
q+=GetPixelChannels(image);
continue;
}
GetFillColor(draw_info,x,y,&pixel,exception);
SetPixelViaPixelInfo(image,&pixel,q);
q+=GetPixelChannels(image);
}
status&=SyncCacheViewAuthenticPixels(image_view,exception);
if (status == MagickFalse)
break;
}
break;
}
case FloodfillMethod:
case FillToBorderMethod:
{
PixelInfo
target;
status&=GetOneVirtualPixelInfo(image,TileVirtualPixelMethod,x,y,
&target,exception);
if (primitive_info->method == FillToBorderMethod)
{
target.red=(double) draw_info->border_color.red;
target.green=(double) draw_info->border_color.green;
target.blue=(double) draw_info->border_color.blue;
}
status&=FloodfillPaintImage(image,draw_info,&target,x,y,
primitive_info->method == FloodfillMethod ? MagickFalse :
MagickTrue,exception);
break;
}
case ResetMethod:
{
PixelInfo
pixel;
GetPixelInfo(image,&pixel);
for (y=0; y < (ssize_t) image->rows; y++)
{
register Quantum
*magick_restrict q;
q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,
exception);
if (q == (Quantum *) NULL)
break;
for (x=0; x < (ssize_t) image->columns; x++)
{
GetFillColor(draw_info,x,y,&pixel,exception);
SetPixelViaPixelInfo(image,&pixel,q);
q+=GetPixelChannels(image);
}
status&=SyncCacheViewAuthenticPixels(image_view,exception);
if (status == MagickFalse)
break;
}
break;
}
}
break;
}
case ImagePrimitive:
{
AffineMatrix
affine;
char
composite_geometry[MagickPathExtent];
Image
*composite_image,
*composite_images;
ImageInfo
*clone_info;
RectangleInfo
geometry;
ssize_t
x1,
y1;
if (primitive_info->text == (char *) NULL)
break;
clone_info=AcquireImageInfo();
composite_images=(Image *) NULL;
if (LocaleNCompare(primitive_info->text,"data:",5) == 0)
composite_images=ReadInlineImage(clone_info,primitive_info->text,
exception);
else
if (*primitive_info->text != '\0')
{
(void) CopyMagickString(clone_info->filename,primitive_info->text,
MagickPathExtent);
status&=SetImageInfo(clone_info,0,exception);
if ((LocaleNCompare(clone_info->magick,"http",4) == 0) ||
(LocaleCompare(clone_info->magick,"mpri") == 0))
(void) CopyMagickString(clone_info->filename,primitive_info->text,
MagickPathExtent);
composite_images=ReadImage(clone_info,exception);
}
clone_info=DestroyImageInfo(clone_info);
if (composite_images == (Image *) NULL)
{
status=MagickFalse;
break;
}
composite_image=RemoveFirstImageFromList(&composite_images);
composite_images=DestroyImageList(composite_images);
(void) SetImageProgressMonitor(composite_image,(MagickProgressMonitor)
NULL,(void *) NULL);
x1=(ssize_t) ceil(primitive_info[1].point.x-0.5);
y1=(ssize_t) ceil(primitive_info[1].point.y-0.5);
if (((x1 != 0L) && (x1 != (ssize_t) composite_image->columns)) ||
((y1 != 0L) && (y1 != (ssize_t) composite_image->rows)))
{
/*
Resize image.
*/
(void) FormatLocaleString(composite_geometry,MagickPathExtent,
"%gx%g!",primitive_info[1].point.x,primitive_info[1].point.y);
composite_image->filter=image->filter;
status&=TransformImage(&composite_image,(char *) NULL,
composite_geometry,exception);
}
if (composite_image->alpha_trait == UndefinedPixelTrait)
status&=SetImageAlphaChannel(composite_image,OpaqueAlphaChannel,
exception);
if (draw_info->alpha != OpaqueAlpha)
status&=SetImageAlpha(composite_image,draw_info->alpha,exception);
SetGeometry(image,&geometry);
image->gravity=draw_info->gravity;
geometry.x=x;
geometry.y=y;
(void) FormatLocaleString(composite_geometry,MagickPathExtent,
"%.20gx%.20g%+.20g%+.20g",(double) composite_image->columns,(double)
composite_image->rows,(double) geometry.x,(double) geometry.y);
(void) ParseGravityGeometry(image,composite_geometry,&geometry,exception);
affine=draw_info->affine;
affine.tx=(double) geometry.x;
affine.ty=(double) geometry.y;
composite_image->interpolate=image->interpolate;
if ((draw_info->compose == OverCompositeOp) ||
(draw_info->compose == SrcOverCompositeOp))
status&=DrawAffineImage(image,composite_image,&affine,exception);
else
status&=CompositeImage(image,composite_image,draw_info->compose,
MagickTrue,geometry.x,geometry.y,exception);
composite_image=DestroyImage(composite_image);
break;
}
case PointPrimitive:
{
PixelInfo
fill_color;
register Quantum
*q;
if ((y < 0) || (y >= (ssize_t) image->rows))
break;
if ((x < 0) || (x >= (ssize_t) image->columns))
break;
q=GetCacheViewAuthenticPixels(image_view,x,y,1,1,exception);
if (q == (Quantum *) NULL)
break;
GetFillColor(draw_info,x,y,&fill_color,exception);
CompositePixelOver(image,&fill_color,(double) fill_color.alpha,q,(double)
GetPixelAlpha(image,q),q);
status&=SyncCacheViewAuthenticPixels(image_view,exception);
break;
}
case TextPrimitive:
{
char
geometry[MagickPathExtent];
DrawInfo
*clone_info;
if (primitive_info->text == (char *) NULL)
break;
clone_info=CloneDrawInfo((ImageInfo *) NULL,draw_info);
(void) CloneString(&clone_info->text,primitive_info->text);
(void) FormatLocaleString(geometry,MagickPathExtent,"%+f%+f",
primitive_info->point.x,primitive_info->point.y);
(void) CloneString(&clone_info->geometry,geometry);
status&=AnnotateImage(image,clone_info,exception);
clone_info=DestroyDrawInfo(clone_info);
break;
}
default:
{
double
mid,
scale;
DrawInfo
*clone_info;
if (IsEventLogging() != MagickFalse)
LogPrimitiveInfo(primitive_info);
scale=ExpandAffine(&draw_info->affine);
if ((draw_info->dash_pattern != (double *) NULL) &&
(fabs(draw_info->dash_pattern[0]) >= MagickEpsilon) &&
(fabs(scale*draw_info->stroke_width) >= MagickEpsilon) &&
(draw_info->stroke.alpha != (Quantum) TransparentAlpha))
{
/*
Draw dash polygon.
*/
clone_info=CloneDrawInfo((ImageInfo *) NULL,draw_info);
clone_info->stroke_width=0.0;
clone_info->stroke.alpha=(MagickRealType) TransparentAlpha;
status&=DrawPolygonPrimitive(image,clone_info,primitive_info,
exception);
clone_info=DestroyDrawInfo(clone_info);
if (status != MagickFalse)
status&=DrawDashPolygon(draw_info,primitive_info,image,exception);
break;
}
mid=ExpandAffine(&draw_info->affine)*draw_info->stroke_width/2.0;
if ((mid > 1.0) &&
((draw_info->stroke.alpha != (Quantum) TransparentAlpha) ||
(draw_info->stroke_pattern != (Image *) NULL)))
{
double
x,
y;
MagickBooleanType
closed_path;
/*
Draw strokes while respecting line cap/join attributes.
*/
closed_path=primitive_info[0].closed_subpath;
i=(ssize_t) primitive_info[0].coordinates;
x=fabs(primitive_info[i-1].point.x-primitive_info[0].point.x);
y=fabs(primitive_info[i-1].point.y-primitive_info[0].point.y);
if ((x < MagickEpsilon) && (y < MagickEpsilon))
closed_path=MagickTrue;
if ((((draw_info->linecap == RoundCap) ||
(closed_path != MagickFalse)) &&
(draw_info->linejoin == RoundJoin)) ||
(primitive_info[i].primitive != UndefinedPrimitive))
{
status&=DrawPolygonPrimitive(image,draw_info,primitive_info,
exception);
break;
}
clone_info=CloneDrawInfo((ImageInfo *) NULL,draw_info);
clone_info->stroke_width=0.0;
clone_info->stroke.alpha=(MagickRealType) TransparentAlpha;
status&=DrawPolygonPrimitive(image,clone_info,primitive_info,
exception);
clone_info=DestroyDrawInfo(clone_info);
if (status != MagickFalse)
status&=DrawStrokePolygon(image,draw_info,primitive_info,exception);
break;
}
status&=DrawPolygonPrimitive(image,draw_info,primitive_info,exception);
break;
}
}
image_view=DestroyCacheView(image_view);
if (draw_info->compliance == SVGCompliance)
{
status&=SetImageMask(image,WritePixelMask,(Image *) NULL,exception);
status&=SetImageMask(image,CompositePixelMask,(Image *) NULL,exception);
}
if (image->debug != MagickFalse)
(void) LogMagickEvent(DrawEvent,GetMagickModule()," end draw-primitive");
return(status != 0 ? MagickTrue : MagickFalse);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ D r a w S t r o k e P o l y g o n %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% DrawStrokePolygon() draws a stroked polygon (line, rectangle, ellipse) on
% the image while respecting the line cap and join attributes.
%
% The format of the DrawStrokePolygon method is:
%
% MagickBooleanType DrawStrokePolygon(Image *image,
% const DrawInfo *draw_info,const PrimitiveInfo *primitive_info)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o draw_info: the draw info.
%
% o primitive_info: Specifies a pointer to a PrimitiveInfo structure.
%
%
*/
static MagickBooleanType DrawRoundLinecap(Image *image,
const DrawInfo *draw_info,const PrimitiveInfo *primitive_info,
ExceptionInfo *exception)
{
PrimitiveInfo
linecap[5];
register ssize_t
i;
for (i=0; i < 4; i++)
linecap[i]=(*primitive_info);
linecap[0].coordinates=4;
linecap[1].point.x+=2.0*MagickEpsilon;
linecap[2].point.x+=2.0*MagickEpsilon;
linecap[2].point.y+=2.0*MagickEpsilon;
linecap[3].point.y+=2.0*MagickEpsilon;
linecap[4].primitive=UndefinedPrimitive;
return(DrawPolygonPrimitive(image,draw_info,linecap,exception));
}
static MagickBooleanType DrawStrokePolygon(Image *image,
const DrawInfo *draw_info,const PrimitiveInfo *primitive_info,
ExceptionInfo *exception)
{
DrawInfo
*clone_info;
MagickBooleanType
closed_path;
MagickStatusType
status;
PrimitiveInfo
*stroke_polygon;
register const PrimitiveInfo
*p,
*q;
/*
Draw stroked polygon.
*/
if (image->debug != MagickFalse)
(void) LogMagickEvent(DrawEvent,GetMagickModule(),
" begin draw-stroke-polygon");
clone_info=CloneDrawInfo((ImageInfo *) NULL,draw_info);
clone_info->fill=draw_info->stroke;
if (clone_info->fill_pattern != (Image *) NULL)
clone_info->fill_pattern=DestroyImage(clone_info->fill_pattern);
if (clone_info->stroke_pattern != (Image *) NULL)
clone_info->fill_pattern=CloneImage(clone_info->stroke_pattern,0,0,
MagickTrue,exception);
clone_info->stroke.alpha=(MagickRealType) TransparentAlpha;
clone_info->stroke_width=0.0;
clone_info->fill_rule=NonZeroRule;
status=MagickTrue;
for (p=primitive_info; p->primitive != UndefinedPrimitive; p+=p->coordinates)
{
if (p->coordinates == 1)
continue;
stroke_polygon=TraceStrokePolygon(image,draw_info,p);
if (stroke_polygon == (PrimitiveInfo *) NULL)
{
status=0;
break;
}
status&=DrawPolygonPrimitive(image,clone_info,stroke_polygon,exception);
stroke_polygon=(PrimitiveInfo *) RelinquishMagickMemory(stroke_polygon);
if (status == 0)
break;
q=p+p->coordinates-1;
closed_path=p->closed_subpath;
if ((draw_info->linecap == RoundCap) && (closed_path == MagickFalse))
{
status&=DrawRoundLinecap(image,draw_info,p,exception);
status&=DrawRoundLinecap(image,draw_info,q,exception);
}
}
clone_info=DestroyDrawInfo(clone_info);
if (image->debug != MagickFalse)
(void) LogMagickEvent(DrawEvent,GetMagickModule(),
" end draw-stroke-polygon");
return(status != 0 ? MagickTrue : MagickFalse);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% G e t A f f i n e M a t r i x %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetAffineMatrix() returns an AffineMatrix initialized to the identity
% matrix.
%
% The format of the GetAffineMatrix method is:
%
% void GetAffineMatrix(AffineMatrix *affine_matrix)
%
% A description of each parameter follows:
%
% o affine_matrix: the affine matrix.
%
*/
MagickExport void GetAffineMatrix(AffineMatrix *affine_matrix)
{
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"...");
assert(affine_matrix != (AffineMatrix *) NULL);
(void) memset(affine_matrix,0,sizeof(*affine_matrix));
affine_matrix->sx=1.0;
affine_matrix->sy=1.0;
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ G e t D r a w I n f o %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetDrawInfo() initializes draw_info to default values from image_info.
%
% The format of the GetDrawInfo method is:
%
% void GetDrawInfo(const ImageInfo *image_info,DrawInfo *draw_info)
%
% A description of each parameter follows:
%
% o image_info: the image info..
%
% o draw_info: the draw info.
%
*/
MagickExport void GetDrawInfo(const ImageInfo *image_info,DrawInfo *draw_info)
{
char
*next_token;
const char
*option;
ExceptionInfo
*exception;
ImageInfo
*clone_info;
/*
Initialize draw attributes.
*/
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"...");
assert(draw_info != (DrawInfo *) NULL);
(void) memset(draw_info,0,sizeof(*draw_info));
clone_info=CloneImageInfo(image_info);
GetAffineMatrix(&draw_info->affine);
exception=AcquireExceptionInfo();
(void) QueryColorCompliance("#000F",AllCompliance,&draw_info->fill,
exception);
(void) QueryColorCompliance("#FFF0",AllCompliance,&draw_info->stroke,
exception);
draw_info->stroke_antialias=clone_info->antialias;
draw_info->stroke_width=1.0;
draw_info->fill_rule=EvenOddRule;
draw_info->alpha=OpaqueAlpha;
draw_info->fill_alpha=OpaqueAlpha;
draw_info->stroke_alpha=OpaqueAlpha;
draw_info->linecap=ButtCap;
draw_info->linejoin=MiterJoin;
draw_info->miterlimit=10;
draw_info->decorate=NoDecoration;
draw_info->pointsize=12.0;
draw_info->undercolor.alpha=(MagickRealType) TransparentAlpha;
draw_info->compose=OverCompositeOp;
draw_info->render=MagickTrue;
draw_info->clip_path=MagickFalse;
draw_info->debug=IsEventLogging();
if (clone_info->font != (char *) NULL)
draw_info->font=AcquireString(clone_info->font);
if (clone_info->density != (char *) NULL)
draw_info->density=AcquireString(clone_info->density);
draw_info->text_antialias=clone_info->antialias;
if (fabs(clone_info->pointsize) >= MagickEpsilon)
draw_info->pointsize=clone_info->pointsize;
draw_info->border_color=clone_info->border_color;
if (clone_info->server_name != (char *) NULL)
draw_info->server_name=AcquireString(clone_info->server_name);
option=GetImageOption(clone_info,"direction");
if (option != (const char *) NULL)
draw_info->direction=(DirectionType) ParseCommandOption(
MagickDirectionOptions,MagickFalse,option);
else
draw_info->direction=UndefinedDirection;
option=GetImageOption(clone_info,"encoding");
if (option != (const char *) NULL)
(void) CloneString(&draw_info->encoding,option);
option=GetImageOption(clone_info,"family");
if (option != (const char *) NULL)
(void) CloneString(&draw_info->family,option);
option=GetImageOption(clone_info,"fill");
if (option != (const char *) NULL)
(void) QueryColorCompliance(option,AllCompliance,&draw_info->fill,
exception);
option=GetImageOption(clone_info,"gravity");
if (option != (const char *) NULL)
draw_info->gravity=(GravityType) ParseCommandOption(MagickGravityOptions,
MagickFalse,option);
option=GetImageOption(clone_info,"interline-spacing");
if (option != (const char *) NULL)
draw_info->interline_spacing=GetDrawValue(option,&next_token);
option=GetImageOption(clone_info,"interword-spacing");
if (option != (const char *) NULL)
draw_info->interword_spacing=GetDrawValue(option,&next_token);
option=GetImageOption(clone_info,"kerning");
if (option != (const char *) NULL)
draw_info->kerning=GetDrawValue(option,&next_token);
option=GetImageOption(clone_info,"stroke");
if (option != (const char *) NULL)
(void) QueryColorCompliance(option,AllCompliance,&draw_info->stroke,
exception);
option=GetImageOption(clone_info,"strokewidth");
if (option != (const char *) NULL)
draw_info->stroke_width=GetDrawValue(option,&next_token);
option=GetImageOption(clone_info,"style");
if (option != (const char *) NULL)
draw_info->style=(StyleType) ParseCommandOption(MagickStyleOptions,
MagickFalse,option);
option=GetImageOption(clone_info,"undercolor");
if (option != (const char *) NULL)
(void) QueryColorCompliance(option,AllCompliance,&draw_info->undercolor,
exception);
option=GetImageOption(clone_info,"weight");
if (option != (const char *) NULL)
{
ssize_t
weight;
weight=ParseCommandOption(MagickWeightOptions,MagickFalse,option);
if (weight == -1)
weight=(ssize_t) StringToUnsignedLong(option);
draw_info->weight=(size_t) weight;
}
exception=DestroyExceptionInfo(exception);
draw_info->signature=MagickCoreSignature;
clone_info=DestroyImageInfo(clone_info);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ P e r m u t a t e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% Permutate() returns the permuation of the (n,k).
%
% The format of the Permutate method is:
%
% void Permutate(ssize_t n,ssize_t k)
%
% A description of each parameter follows:
%
% o n:
%
% o k:
%
%
*/
static inline double Permutate(const ssize_t n,const ssize_t k)
{
double
r;
register ssize_t
i;
r=1.0;
for (i=k+1; i <= n; i++)
r*=i;
for (i=1; i <= (n-k); i++)
r/=i;
return(r);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ T r a c e P r i m i t i v e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% TracePrimitive is a collection of methods for generating graphic
% primitives such as arcs, ellipses, paths, etc.
%
*/
static MagickBooleanType TraceArc(MVGInfo *mvg_info,const PointInfo start,
const PointInfo end,const PointInfo degrees)
{
PointInfo
center,
radius;
center.x=0.5*(end.x+start.x);
center.y=0.5*(end.y+start.y);
radius.x=fabs(center.x-start.x);
radius.y=fabs(center.y-start.y);
return(TraceEllipse(mvg_info,center,radius,degrees));
}
static MagickBooleanType TraceArcPath(MVGInfo *mvg_info,const PointInfo start,
const PointInfo end,const PointInfo arc,const double angle,
const MagickBooleanType large_arc,const MagickBooleanType sweep)
{
double
alpha,
beta,
delta,
factor,
gamma,
theta;
MagickStatusType
status;
PointInfo
center,
points[3],
radii;
register double
cosine,
sine;
PrimitiveInfo
*primitive_info;
register PrimitiveInfo
*p;
register ssize_t
i;
size_t
arc_segments;
ssize_t
offset;
offset=mvg_info->offset;
primitive_info=(*mvg_info->primitive_info)+mvg_info->offset;
primitive_info->coordinates=0;
if ((fabs(start.x-end.x) < MagickEpsilon) &&
(fabs(start.y-end.y) < MagickEpsilon))
return(TracePoint(primitive_info,end));
radii.x=fabs(arc.x);
radii.y=fabs(arc.y);
if ((radii.x < MagickEpsilon) || (radii.y < MagickEpsilon))
return(TraceLine(primitive_info,start,end));
cosine=cos(DegreesToRadians(fmod((double) angle,360.0)));
sine=sin(DegreesToRadians(fmod((double) angle,360.0)));
center.x=(double) (cosine*(end.x-start.x)/2+sine*(end.y-start.y)/2);
center.y=(double) (cosine*(end.y-start.y)/2-sine*(end.x-start.x)/2);
delta=(center.x*center.x)/(radii.x*radii.x)+(center.y*center.y)/
(radii.y*radii.y);
if (delta < MagickEpsilon)
return(TraceLine(primitive_info,start,end));
if (delta > 1.0)
{
radii.x*=sqrt((double) delta);
radii.y*=sqrt((double) delta);
}
points[0].x=(double) (cosine*start.x/radii.x+sine*start.y/radii.x);
points[0].y=(double) (cosine*start.y/radii.y-sine*start.x/radii.y);
points[1].x=(double) (cosine*end.x/radii.x+sine*end.y/radii.x);
points[1].y=(double) (cosine*end.y/radii.y-sine*end.x/radii.y);
alpha=points[1].x-points[0].x;
beta=points[1].y-points[0].y;
if (fabs(alpha*alpha+beta*beta) < MagickEpsilon)
return(TraceLine(primitive_info,start,end));
factor=PerceptibleReciprocal(alpha*alpha+beta*beta)-0.25;
if (factor <= 0.0)
factor=0.0;
else
{
factor=sqrt((double) factor);
if (sweep == large_arc)
factor=(-factor);
}
center.x=(double) ((points[0].x+points[1].x)/2-factor*beta);
center.y=(double) ((points[0].y+points[1].y)/2+factor*alpha);
alpha=atan2(points[0].y-center.y,points[0].x-center.x);
theta=atan2(points[1].y-center.y,points[1].x-center.x)-alpha;
if ((theta < 0.0) && (sweep != MagickFalse))
theta+=2.0*MagickPI;
else
if ((theta > 0.0) && (sweep == MagickFalse))
theta-=2.0*MagickPI;
arc_segments=(size_t) ceil(fabs((double) (theta/(0.5*MagickPI+
MagickEpsilon))));
status=MagickTrue;
p=primitive_info;
for (i=0; i < (ssize_t) arc_segments; i++)
{
beta=0.5*((alpha+(i+1)*theta/arc_segments)-(alpha+i*theta/arc_segments));
gamma=(8.0/3.0)*sin(fmod((double) (0.5*beta),DegreesToRadians(360.0)))*
sin(fmod((double) (0.5*beta),DegreesToRadians(360.0)))/
sin(fmod((double) beta,DegreesToRadians(360.0)));
points[0].x=(double) (center.x+cos(fmod((double) (alpha+(double) i*theta/
arc_segments),DegreesToRadians(360.0)))-gamma*sin(fmod((double) (alpha+
(double) i*theta/arc_segments),DegreesToRadians(360.0))));
points[0].y=(double) (center.y+sin(fmod((double) (alpha+(double) i*theta/
arc_segments),DegreesToRadians(360.0)))+gamma*cos(fmod((double) (alpha+
(double) i*theta/arc_segments),DegreesToRadians(360.0))));
points[2].x=(double) (center.x+cos(fmod((double) (alpha+(double) (i+1)*
theta/arc_segments),DegreesToRadians(360.0))));
points[2].y=(double) (center.y+sin(fmod((double) (alpha+(double) (i+1)*
theta/arc_segments),DegreesToRadians(360.0))));
points[1].x=(double) (points[2].x+gamma*sin(fmod((double) (alpha+(double)
(i+1)*theta/arc_segments),DegreesToRadians(360.0))));
points[1].y=(double) (points[2].y-gamma*cos(fmod((double) (alpha+(double)
(i+1)*theta/arc_segments),DegreesToRadians(360.0))));
p->point.x=(p == primitive_info) ? start.x : (p-1)->point.x;
p->point.y=(p == primitive_info) ? start.y : (p-1)->point.y;
(p+1)->point.x=(double) (cosine*radii.x*points[0].x-sine*radii.y*
points[0].y);
(p+1)->point.y=(double) (sine*radii.x*points[0].x+cosine*radii.y*
points[0].y);
(p+2)->point.x=(double) (cosine*radii.x*points[1].x-sine*radii.y*
points[1].y);
(p+2)->point.y=(double) (sine*radii.x*points[1].x+cosine*radii.y*
points[1].y);
(p+3)->point.x=(double) (cosine*radii.x*points[2].x-sine*radii.y*
points[2].y);
(p+3)->point.y=(double) (sine*radii.x*points[2].x+cosine*radii.y*
points[2].y);
if (i == (ssize_t) (arc_segments-1))
(p+3)->point=end;
status&=TraceBezier(mvg_info,4);
if (status == 0)
break;
p=(*mvg_info->primitive_info)+mvg_info->offset;
mvg_info->offset+=p->coordinates;
p+=p->coordinates;
}
if (status == 0)
return(MagickFalse);
mvg_info->offset=offset;
primitive_info=(*mvg_info->primitive_info)+mvg_info->offset;
primitive_info->coordinates=(size_t) (p-primitive_info);
primitive_info->closed_subpath=MagickFalse;
for (i=0; i < (ssize_t) primitive_info->coordinates; i++)
{
p->primitive=primitive_info->primitive;
p--;
}
return(MagickTrue);
}
static MagickBooleanType TraceBezier(MVGInfo *mvg_info,
const size_t number_coordinates)
{
double
alpha,
*coefficients,
weight;
PointInfo
end,
point,
*points;
PrimitiveInfo
*primitive_info;
register PrimitiveInfo
*p;
register ssize_t
i,
j;
size_t
control_points,
quantum;
/*
Allocate coefficients.
*/
primitive_info=(*mvg_info->primitive_info)+mvg_info->offset;
quantum=number_coordinates;
for (i=0; i < (ssize_t) number_coordinates; i++)
{
for (j=i+1; j < (ssize_t) number_coordinates; j++)
{
alpha=fabs(primitive_info[j].point.x-primitive_info[i].point.x);
if (alpha > (double) SSIZE_MAX)
{
(void) ThrowMagickException(mvg_info->exception,GetMagickModule(),
ResourceLimitError,"MemoryAllocationFailed","`%s'","");
return(MagickFalse);
}
if (alpha > (double) quantum)
quantum=(size_t) alpha;
alpha=fabs(primitive_info[j].point.y-primitive_info[i].point.y);
if (alpha > (double) SSIZE_MAX)
{
(void) ThrowMagickException(mvg_info->exception,GetMagickModule(),
ResourceLimitError,"MemoryAllocationFailed","`%s'","");
return(MagickFalse);
}
if (alpha > (double) quantum)
quantum=(size_t) alpha;
}
}
primitive_info=(*mvg_info->primitive_info)+mvg_info->offset;
quantum=MagickMin(quantum/number_coordinates,BezierQuantum);
coefficients=(double *) AcquireQuantumMemory(number_coordinates,
sizeof(*coefficients));
points=(PointInfo *) AcquireQuantumMemory(quantum,number_coordinates*
sizeof(*points));
if ((coefficients == (double *) NULL) || (points == (PointInfo *) NULL))
{
if (points != (PointInfo *) NULL)
points=(PointInfo *) RelinquishMagickMemory(points);
if (coefficients != (double *) NULL)
coefficients=(double *) RelinquishMagickMemory(coefficients);
(void) ThrowMagickException(mvg_info->exception,GetMagickModule(),
ResourceLimitError,"MemoryAllocationFailed","`%s'","");
return(MagickFalse);
}
control_points=quantum*number_coordinates;
if (CheckPrimitiveExtent(mvg_info,control_points+1) == MagickFalse)
{
points=(PointInfo *) RelinquishMagickMemory(points);
coefficients=(double *) RelinquishMagickMemory(coefficients);
return(MagickFalse);
}
primitive_info=(*mvg_info->primitive_info)+mvg_info->offset;
/*
Compute bezier points.
*/
end=primitive_info[number_coordinates-1].point;
for (i=0; i < (ssize_t) number_coordinates; i++)
coefficients[i]=Permutate((ssize_t) number_coordinates-1,i);
weight=0.0;
for (i=0; i < (ssize_t) control_points; i++)
{
p=primitive_info;
point.x=0.0;
point.y=0.0;
alpha=pow((double) (1.0-weight),(double) number_coordinates-1.0);
for (j=0; j < (ssize_t) number_coordinates; j++)
{
point.x+=alpha*coefficients[j]*p->point.x;
point.y+=alpha*coefficients[j]*p->point.y;
alpha*=weight/(1.0-weight);
p++;
}
points[i]=point;
weight+=1.0/control_points;
}
/*
Bezier curves are just short segmented polys.
*/
p=primitive_info;
for (i=0; i < (ssize_t) control_points; i++)
{
if (TracePoint(p,points[i]) == MagickFalse)
{
points=(PointInfo *) RelinquishMagickMemory(points);
coefficients=(double *) RelinquishMagickMemory(coefficients);
return(MagickFalse);
}
p+=p->coordinates;
}
if (TracePoint(p,end) == MagickFalse)
{
points=(PointInfo *) RelinquishMagickMemory(points);
coefficients=(double *) RelinquishMagickMemory(coefficients);
return(MagickFalse);
}
p+=p->coordinates;
primitive_info->coordinates=(size_t) (p-primitive_info);
primitive_info->closed_subpath=MagickFalse;
for (i=0; i < (ssize_t) primitive_info->coordinates; i++)
{
p->primitive=primitive_info->primitive;
p--;
}
points=(PointInfo *) RelinquishMagickMemory(points);
coefficients=(double *) RelinquishMagickMemory(coefficients);
return(MagickTrue);
}
static MagickBooleanType TraceCircle(MVGInfo *mvg_info,const PointInfo start,
const PointInfo end)
{
double
alpha,
beta,
radius;
PointInfo
offset,
degrees;
alpha=end.x-start.x;
beta=end.y-start.y;
radius=hypot((double) alpha,(double) beta);
offset.x=(double) radius;
offset.y=(double) radius;
degrees.x=0.0;
degrees.y=360.0;
return(TraceEllipse(mvg_info,start,offset,degrees));
}
static MagickBooleanType TraceEllipse(MVGInfo *mvg_info,const PointInfo center,
const PointInfo radii,const PointInfo arc)
{
double
coordinates,
delta,
step,
x,
y;
PointInfo
angle,
point;
PrimitiveInfo
*primitive_info;
register PrimitiveInfo
*p;
register ssize_t
i;
/*
Ellipses are just short segmented polys.
*/
primitive_info=(*mvg_info->primitive_info)+mvg_info->offset;
primitive_info->coordinates=0;
if ((fabs(radii.x) < MagickEpsilon) || (fabs(radii.y) < MagickEpsilon))
return(MagickTrue);
delta=2.0*PerceptibleReciprocal(MagickMax(radii.x,radii.y));
step=MagickPI/8.0;
if ((delta >= 0.0) && (delta < (MagickPI/8.0)))
step=MagickPI/4.0/(MagickPI*PerceptibleReciprocal(delta)/2.0);
angle.x=DegreesToRadians(arc.x);
y=arc.y;
while (y < arc.x)
y+=360.0;
angle.y=DegreesToRadians(y);
coordinates=ceil((angle.y-angle.x)/step+1.0);
if (coordinates > (double) SSIZE_MAX)
{
(void) ThrowMagickException(mvg_info->exception,GetMagickModule(),
ResourceLimitError,"MemoryAllocationFailed","`%s'","");
return(MagickFalse);
}
if (CheckPrimitiveExtent(mvg_info,(size_t) coordinates) == MagickFalse)
return(MagickFalse);
primitive_info=(*mvg_info->primitive_info)+mvg_info->offset;
for (p=primitive_info; angle.x < angle.y; angle.x+=step)
{
point.x=cos(fmod(angle.x,DegreesToRadians(360.0)))*radii.x+center.x;
point.y=sin(fmod(angle.x,DegreesToRadians(360.0)))*radii.y+center.y;
if (TracePoint(p,point) == MagickFalse)
return(MagickFalse);
p+=p->coordinates;
}
point.x=cos(fmod(angle.y,DegreesToRadians(360.0)))*radii.x+center.x;
point.y=sin(fmod(angle.y,DegreesToRadians(360.0)))*radii.y+center.y;
if (TracePoint(p,point) == MagickFalse)
return(MagickFalse);
p+=p->coordinates;
primitive_info->coordinates=(size_t) (p-primitive_info);
primitive_info->closed_subpath=MagickFalse;
x=fabs(primitive_info[0].point.x-
primitive_info[primitive_info->coordinates-1].point.x);
y=fabs(primitive_info[0].point.y-
primitive_info[primitive_info->coordinates-1].point.y);
if ((x < MagickEpsilon) && (y < MagickEpsilon))
primitive_info->closed_subpath=MagickTrue;
for (i=0; i < (ssize_t) primitive_info->coordinates; i++)
{
p->primitive=primitive_info->primitive;
p--;
}
return(MagickTrue);
}
static MagickBooleanType TraceLine(PrimitiveInfo *primitive_info,
const PointInfo start,const PointInfo end)
{
if (TracePoint(primitive_info,start) == MagickFalse)
return(MagickFalse);
if ((fabs(start.x-end.x) < MagickEpsilon) &&
(fabs(start.y-end.y) < MagickEpsilon))
{
primitive_info->primitive=PointPrimitive;
primitive_info->coordinates=1;
return(MagickTrue);
}
if (TracePoint(primitive_info+1,end) == MagickFalse)
return(MagickFalse);
(primitive_info+1)->primitive=primitive_info->primitive;
primitive_info->coordinates=2;
primitive_info->closed_subpath=MagickFalse;
return(MagickTrue);
}
static ssize_t TracePath(MVGInfo *mvg_info,const char *path,
ExceptionInfo *exception)
{
char
*next_token,
token[MagickPathExtent];
const char
*p;
double
x,
y;
int
attribute,
last_attribute;
MagickBooleanType
status;
PointInfo
end = {0.0, 0.0},
points[4] = { {0.0, 0.0}, {0.0, 0.0}, {0.0, 0.0}, {0.0, 0.0} },
point = {0.0, 0.0},
start = {0.0, 0.0};
PrimitiveInfo
*primitive_info;
PrimitiveType
primitive_type;
register PrimitiveInfo
*q;
register ssize_t
i;
size_t
number_coordinates,
z_count;
ssize_t
subpath_offset;
subpath_offset=mvg_info->offset;
primitive_info=(*mvg_info->primitive_info)+mvg_info->offset;
status=MagickTrue;
attribute=0;
number_coordinates=0;
z_count=0;
primitive_type=primitive_info->primitive;
q=primitive_info;
for (p=path; *p != '\0'; )
{
if (status == MagickFalse)
break;
while (isspace((int) ((unsigned char) *p)) != 0)
p++;
if (*p == '\0')
break;
last_attribute=attribute;
attribute=(int) (*p++);
switch (attribute)
{
case 'a':
case 'A':
{
double
angle = 0.0;
MagickBooleanType
large_arc = MagickFalse,
sweep = MagickFalse;
PointInfo
arc = {0.0, 0.0};
/*
Elliptical arc.
*/
do
{
(void) GetNextToken(p,&p,MagickPathExtent,token);
if (*token == ',')
(void) GetNextToken(p,&p,MagickPathExtent,token);
arc.x=GetDrawValue(token,&next_token);
if (token == next_token)
ThrowPointExpectedException(token,exception);
(void) GetNextToken(p,&p,MagickPathExtent,token);
if (*token == ',')
(void) GetNextToken(p,&p,MagickPathExtent,token);
arc.y=GetDrawValue(token,&next_token);
if (token == next_token)
ThrowPointExpectedException(token,exception);
(void) GetNextToken(p,&p,MagickPathExtent,token);
if (*token == ',')
(void) GetNextToken(p,&p,MagickPathExtent,token);
angle=GetDrawValue(token,&next_token);
if (token == next_token)
ThrowPointExpectedException(token,exception);
(void) GetNextToken(p,&p,MagickPathExtent,token);
if (*token == ',')
(void) GetNextToken(p,&p,MagickPathExtent,token);
large_arc=StringToLong(token) != 0 ? MagickTrue : MagickFalse;
(void) GetNextToken(p,&p,MagickPathExtent,token);
if (*token == ',')
(void) GetNextToken(p,&p,MagickPathExtent,token);
sweep=StringToLong(token) != 0 ? MagickTrue : MagickFalse;
if (*token == ',')
(void) GetNextToken(p,&p,MagickPathExtent,token);
(void) GetNextToken(p,&p,MagickPathExtent,token);
if (*token == ',')
(void) GetNextToken(p,&p,MagickPathExtent,token);
x=GetDrawValue(token,&next_token);
if (token == next_token)
ThrowPointExpectedException(token,exception);
(void) GetNextToken(p,&p,MagickPathExtent,token);
if (*token == ',')
(void) GetNextToken(p,&p,MagickPathExtent,token);
y=GetDrawValue(token,&next_token);
if (token == next_token)
ThrowPointExpectedException(token,exception);
end.x=(double) (attribute == (int) 'A' ? x : point.x+x);
end.y=(double) (attribute == (int) 'A' ? y : point.y+y);
if (TraceArcPath(mvg_info,point,end,arc,angle,large_arc,sweep) == MagickFalse)
return(-1);
q=(*mvg_info->primitive_info)+mvg_info->offset;
mvg_info->offset+=q->coordinates;
q+=q->coordinates;
point=end;
while (isspace((int) ((unsigned char) *p)) != 0)
p++;
if (*p == ',')
p++;
} while (IsPoint(p) != MagickFalse);
break;
}
case 'c':
case 'C':
{
/*
Cubic Bézier curve.
*/
do
{
points[0]=point;
for (i=1; i < 4; i++)
{
(void) GetNextToken(p,&p,MagickPathExtent,token);
if (*token == ',')
(void) GetNextToken(p,&p,MagickPathExtent,token);
x=GetDrawValue(token,&next_token);
if (token == next_token)
ThrowPointExpectedException(token,exception);
(void) GetNextToken(p,&p,MagickPathExtent,token);
if (*token == ',')
(void) GetNextToken(p,&p,MagickPathExtent,token);
y=GetDrawValue(token,&next_token);
if (token == next_token)
ThrowPointExpectedException(token,exception);
end.x=(double) (attribute == (int) 'C' ? x : point.x+x);
end.y=(double) (attribute == (int) 'C' ? y : point.y+y);
points[i]=end;
}
for (i=0; i < 4; i++)
(q+i)->point=points[i];
if (TraceBezier(mvg_info,4) == MagickFalse)
return(-1);
q=(*mvg_info->primitive_info)+mvg_info->offset;
mvg_info->offset+=q->coordinates;
q+=q->coordinates;
point=end;
while (isspace((int) ((unsigned char) *p)) != 0)
p++;
if (*p == ',')
p++;
} while (IsPoint(p) != MagickFalse);
break;
}
case 'H':
case 'h':
{
do
{
(void) GetNextToken(p,&p,MagickPathExtent,token);
if (*token == ',')
(void) GetNextToken(p,&p,MagickPathExtent,token);
x=GetDrawValue(token,&next_token);
if (token == next_token)
ThrowPointExpectedException(token,exception);
point.x=(double) (attribute == (int) 'H' ? x: point.x+x);
if (CheckPrimitiveExtent(mvg_info,PrimitiveExtentPad) == MagickFalse)
return(-1);
q=(*mvg_info->primitive_info)+mvg_info->offset;
if (TracePoint(q,point) == MagickFalse)
return(-1);
mvg_info->offset+=q->coordinates;
q+=q->coordinates;
while (isspace((int) ((unsigned char) *p)) != 0)
p++;
if (*p == ',')
p++;
} while (IsPoint(p) != MagickFalse);
break;
}
case 'l':
case 'L':
{
/*
Line to.
*/
do
{
(void) GetNextToken(p,&p,MagickPathExtent,token);
if (*token == ',')
(void) GetNextToken(p,&p,MagickPathExtent,token);
x=GetDrawValue(token,&next_token);
if (token == next_token)
ThrowPointExpectedException(token,exception);
(void) GetNextToken(p,&p,MagickPathExtent,token);
if (*token == ',')
(void) GetNextToken(p,&p,MagickPathExtent,token);
y=GetDrawValue(token,&next_token);
if (token == next_token)
ThrowPointExpectedException(token,exception);
point.x=(double) (attribute == (int) 'L' ? x : point.x+x);
point.y=(double) (attribute == (int) 'L' ? y : point.y+y);
if (CheckPrimitiveExtent(mvg_info,PrimitiveExtentPad) == MagickFalse)
return(-1);
q=(*mvg_info->primitive_info)+mvg_info->offset;
if (TracePoint(q,point) == MagickFalse)
return(-1);
mvg_info->offset+=q->coordinates;
q+=q->coordinates;
while (isspace((int) ((unsigned char) *p)) != 0)
p++;
if (*p == ',')
p++;
} while (IsPoint(p) != MagickFalse);
break;
}
case 'M':
case 'm':
{
/*
Move to.
*/
if (mvg_info->offset != subpath_offset)
{
primitive_info=(*mvg_info->primitive_info)+subpath_offset;
primitive_info->coordinates=(size_t) (q-primitive_info);
number_coordinates+=primitive_info->coordinates;
primitive_info=q;
subpath_offset=mvg_info->offset;
}
i=0;
do
{
(void) GetNextToken(p,&p,MagickPathExtent,token);
if (*token == ',')
(void) GetNextToken(p,&p,MagickPathExtent,token);
x=GetDrawValue(token,&next_token);
if (token == next_token)
ThrowPointExpectedException(token,exception);
(void) GetNextToken(p,&p,MagickPathExtent,token);
if (*token == ',')
(void) GetNextToken(p,&p,MagickPathExtent,token);
y=GetDrawValue(token,&next_token);
if (token == next_token)
ThrowPointExpectedException(token,exception);
point.x=(double) (attribute == (int) 'M' ? x : point.x+x);
point.y=(double) (attribute == (int) 'M' ? y : point.y+y);
if (i == 0)
start=point;
i++;
if (CheckPrimitiveExtent(mvg_info,PrimitiveExtentPad) == MagickFalse)
return(-1);
q=(*mvg_info->primitive_info)+mvg_info->offset;
if (TracePoint(q,point) == MagickFalse)
return(-1);
mvg_info->offset+=q->coordinates;
q+=q->coordinates;
while (isspace((int) ((unsigned char) *p)) != 0)
p++;
if (*p == ',')
p++;
} while (IsPoint(p) != MagickFalse);
break;
}
case 'q':
case 'Q':
{
/*
Quadratic Bézier curve.
*/
do
{
points[0]=point;
for (i=1; i < 3; i++)
{
(void) GetNextToken(p,&p,MagickPathExtent,token);
if (*token == ',')
(void) GetNextToken(p,&p,MagickPathExtent,token);
x=GetDrawValue(token,&next_token);
if (token == next_token)
ThrowPointExpectedException(token,exception);
(void) GetNextToken(p,&p,MagickPathExtent,token);
if (*token == ',')
(void) GetNextToken(p,&p,MagickPathExtent,token);
y=GetDrawValue(token,&next_token);
if (token == next_token)
ThrowPointExpectedException(token,exception);
if (*p == ',')
p++;
end.x=(double) (attribute == (int) 'Q' ? x : point.x+x);
end.y=(double) (attribute == (int) 'Q' ? y : point.y+y);
points[i]=end;
}
for (i=0; i < 3; i++)
(q+i)->point=points[i];
if (TraceBezier(mvg_info,3) == MagickFalse)
return(-1);
q=(*mvg_info->primitive_info)+mvg_info->offset;
mvg_info->offset+=q->coordinates;
q+=q->coordinates;
point=end;
while (isspace((int) ((unsigned char) *p)) != 0)
p++;
if (*p == ',')
p++;
} while (IsPoint(p) != MagickFalse);
break;
}
case 's':
case 'S':
{
/*
Cubic Bézier curve.
*/
do
{
points[0]=points[3];
points[1].x=2.0*points[3].x-points[2].x;
points[1].y=2.0*points[3].y-points[2].y;
for (i=2; i < 4; i++)
{
(void) GetNextToken(p,&p,MagickPathExtent,token);
if (*token == ',')
(void) GetNextToken(p,&p,MagickPathExtent,token);
x=GetDrawValue(token,&next_token);
if (token == next_token)
ThrowPointExpectedException(token,exception);
(void) GetNextToken(p,&p,MagickPathExtent,token);
if (*token == ',')
(void) GetNextToken(p,&p,MagickPathExtent,token);
y=GetDrawValue(token,&next_token);
if (token == next_token)
ThrowPointExpectedException(token,exception);
if (*p == ',')
p++;
end.x=(double) (attribute == (int) 'S' ? x : point.x+x);
end.y=(double) (attribute == (int) 'S' ? y : point.y+y);
points[i]=end;
}
if (strchr("CcSs",last_attribute) == (char *) NULL)
{
points[0]=point;
points[1]=point;
}
for (i=0; i < 4; i++)
(q+i)->point=points[i];
if (TraceBezier(mvg_info,4) == MagickFalse)
return(-1);
q=(*mvg_info->primitive_info)+mvg_info->offset;
mvg_info->offset+=q->coordinates;
q+=q->coordinates;
point=end;
last_attribute=attribute;
while (isspace((int) ((unsigned char) *p)) != 0)
p++;
if (*p == ',')
p++;
} while (IsPoint(p) != MagickFalse);
break;
}
case 't':
case 'T':
{
/*
Quadratic Bézier curve.
*/
do
{
points[0]=points[2];
points[1].x=2.0*points[2].x-points[1].x;
points[1].y=2.0*points[2].y-points[1].y;
for (i=2; i < 3; i++)
{
(void) GetNextToken(p,&p,MagickPathExtent,token);
if (*token == ',')
(void) GetNextToken(p,&p,MagickPathExtent,token);
x=GetDrawValue(token,&next_token);
if (token == next_token)
ThrowPointExpectedException(token,exception);
(void) GetNextToken(p,&p,MagickPathExtent,token);
if (*token == ',')
(void) GetNextToken(p,&p,MagickPathExtent,token);
y=GetDrawValue(token,&next_token);
if (token == next_token)
ThrowPointExpectedException(token,exception);
end.x=(double) (attribute == (int) 'T' ? x : point.x+x);
end.y=(double) (attribute == (int) 'T' ? y : point.y+y);
points[i]=end;
}
if (status == MagickFalse)
break;
if (strchr("QqTt",last_attribute) == (char *) NULL)
{
points[0]=point;
points[1]=point;
}
for (i=0; i < 3; i++)
(q+i)->point=points[i];
if (TraceBezier(mvg_info,3) == MagickFalse)
return(-1);
q=(*mvg_info->primitive_info)+mvg_info->offset;
mvg_info->offset+=q->coordinates;
q+=q->coordinates;
point=end;
last_attribute=attribute;
while (isspace((int) ((unsigned char) *p)) != 0)
p++;
if (*p == ',')
p++;
} while (IsPoint(p) != MagickFalse);
break;
}
case 'v':
case 'V':
{
/*
Line to.
*/
do
{
(void) GetNextToken(p,&p,MagickPathExtent,token);
if (*token == ',')
(void) GetNextToken(p,&p,MagickPathExtent,token);
y=GetDrawValue(token,&next_token);
if (token == next_token)
ThrowPointExpectedException(token,exception);
point.y=(double) (attribute == (int) 'V' ? y : point.y+y);
if (CheckPrimitiveExtent(mvg_info,PrimitiveExtentPad) == MagickFalse)
return(-1);
q=(*mvg_info->primitive_info)+mvg_info->offset;
if (TracePoint(q,point) == MagickFalse)
return(-1);
mvg_info->offset+=q->coordinates;
q+=q->coordinates;
while (isspace((int) ((unsigned char) *p)) != 0)
p++;
if (*p == ',')
p++;
} while (IsPoint(p) != MagickFalse);
break;
}
case 'z':
case 'Z':
{
/*
Close path.
*/
point=start;
if (CheckPrimitiveExtent(mvg_info,PrimitiveExtentPad) == MagickFalse)
return(-1);
q=(*mvg_info->primitive_info)+mvg_info->offset;
if (TracePoint(q,point) == MagickFalse)
return(-1);
mvg_info->offset+=q->coordinates;
q+=q->coordinates;
primitive_info=(*mvg_info->primitive_info)+subpath_offset;
primitive_info->coordinates=(size_t) (q-primitive_info);
primitive_info->closed_subpath=MagickTrue;
number_coordinates+=primitive_info->coordinates;
primitive_info=q;
subpath_offset=mvg_info->offset;
z_count++;
break;
}
default:
{
ThrowPointExpectedException(token,exception);
break;
}
}
}
if (status == MagickFalse)
return(-1);
primitive_info=(*mvg_info->primitive_info)+subpath_offset;
primitive_info->coordinates=(size_t) (q-primitive_info);
number_coordinates+=primitive_info->coordinates;
for (i=0; i < (ssize_t) number_coordinates; i++)
{
q--;
q->primitive=primitive_type;
if (z_count > 1)
q->method=FillToBorderMethod;
}
q=primitive_info;
return((ssize_t) number_coordinates);
}
static MagickBooleanType TraceRectangle(PrimitiveInfo *primitive_info,
const PointInfo start,const PointInfo end)
{
PointInfo
point;
register PrimitiveInfo
*p;
register ssize_t
i;
p=primitive_info;
if (TracePoint(p,start) == MagickFalse)
return(MagickFalse);
p+=p->coordinates;
point.x=start.x;
point.y=end.y;
if (TracePoint(p,point) == MagickFalse)
return(MagickFalse);
p+=p->coordinates;
if (TracePoint(p,end) == MagickFalse)
return(MagickFalse);
p+=p->coordinates;
point.x=end.x;
point.y=start.y;
if (TracePoint(p,point) == MagickFalse)
return(MagickFalse);
p+=p->coordinates;
if (TracePoint(p,start) == MagickFalse)
return(MagickFalse);
p+=p->coordinates;
primitive_info->coordinates=(size_t) (p-primitive_info);
primitive_info->closed_subpath=MagickTrue;
for (i=0; i < (ssize_t) primitive_info->coordinates; i++)
{
p->primitive=primitive_info->primitive;
p--;
}
return(MagickTrue);
}
static MagickBooleanType TraceRoundRectangle(MVGInfo *mvg_info,
const PointInfo start,const PointInfo end,PointInfo arc)
{
PointInfo
degrees,
point,
segment;
PrimitiveInfo
*primitive_info;
register PrimitiveInfo
*p;
register ssize_t
i;
ssize_t
offset;
offset=mvg_info->offset;
segment.x=fabs(end.x-start.x);
segment.y=fabs(end.y-start.y);
if ((segment.x < MagickEpsilon) || (segment.y < MagickEpsilon))
{
(*mvg_info->primitive_info+mvg_info->offset)->coordinates=0;
return(MagickTrue);
}
if (arc.x > (0.5*segment.x))
arc.x=0.5*segment.x;
if (arc.y > (0.5*segment.y))
arc.y=0.5*segment.y;
point.x=start.x+segment.x-arc.x;
point.y=start.y+arc.y;
degrees.x=270.0;
degrees.y=360.0;
if (TraceEllipse(mvg_info,point,arc,degrees) == MagickFalse)
return(MagickFalse);
p=(*mvg_info->primitive_info)+mvg_info->offset;
mvg_info->offset+=p->coordinates;
point.x=start.x+segment.x-arc.x;
point.y=start.y+segment.y-arc.y;
degrees.x=0.0;
degrees.y=90.0;
if (TraceEllipse(mvg_info,point,arc,degrees) == MagickFalse)
return(MagickFalse);
p=(*mvg_info->primitive_info)+mvg_info->offset;
mvg_info->offset+=p->coordinates;
point.x=start.x+arc.x;
point.y=start.y+segment.y-arc.y;
degrees.x=90.0;
degrees.y=180.0;
if (TraceEllipse(mvg_info,point,arc,degrees) == MagickFalse)
return(MagickFalse);
p=(*mvg_info->primitive_info)+mvg_info->offset;
mvg_info->offset+=p->coordinates;
point.x=start.x+arc.x;
point.y=start.y+arc.y;
degrees.x=180.0;
degrees.y=270.0;
if (TraceEllipse(mvg_info,point,arc,degrees) == MagickFalse)
return(MagickFalse);
p=(*mvg_info->primitive_info)+mvg_info->offset;
mvg_info->offset+=p->coordinates;
if (CheckPrimitiveExtent(mvg_info,PrimitiveExtentPad) == MagickFalse)
return(MagickFalse);
p=(*mvg_info->primitive_info)+mvg_info->offset;
if (TracePoint(p,(*mvg_info->primitive_info+offset)->point) == MagickFalse)
return(MagickFalse);
p+=p->coordinates;
mvg_info->offset=offset;
primitive_info=(*mvg_info->primitive_info)+offset;
primitive_info->coordinates=(size_t) (p-primitive_info);
primitive_info->closed_subpath=MagickTrue;
for (i=0; i < (ssize_t) primitive_info->coordinates; i++)
{
p->primitive=primitive_info->primitive;
p--;
}
return(MagickTrue);
}
static MagickBooleanType TraceSquareLinecap(PrimitiveInfo *primitive_info,
const size_t number_vertices,const double offset)
{
double
distance;
register double
dx,
dy;
register ssize_t
i;
ssize_t
j;
dx=0.0;
dy=0.0;
for (i=1; i < (ssize_t) number_vertices; i++)
{
dx=primitive_info[0].point.x-primitive_info[i].point.x;
dy=primitive_info[0].point.y-primitive_info[i].point.y;
if ((fabs((double) dx) >= MagickEpsilon) ||
(fabs((double) dy) >= MagickEpsilon))
break;
}
if (i == (ssize_t) number_vertices)
i=(ssize_t) number_vertices-1L;
distance=hypot((double) dx,(double) dy);
primitive_info[0].point.x=(double) (primitive_info[i].point.x+
dx*(distance+offset)/distance);
primitive_info[0].point.y=(double) (primitive_info[i].point.y+
dy*(distance+offset)/distance);
for (j=(ssize_t) number_vertices-2; j >= 0; j--)
{
dx=primitive_info[number_vertices-1].point.x-primitive_info[j].point.x;
dy=primitive_info[number_vertices-1].point.y-primitive_info[j].point.y;
if ((fabs((double) dx) >= MagickEpsilon) ||
(fabs((double) dy) >= MagickEpsilon))
break;
}
distance=hypot((double) dx,(double) dy);
primitive_info[number_vertices-1].point.x=(double) (primitive_info[j].point.x+
dx*(distance+offset)/distance);
primitive_info[number_vertices-1].point.y=(double) (primitive_info[j].point.y+
dy*(distance+offset)/distance);
return(MagickTrue);
}
static PrimitiveInfo *TraceStrokePolygon(const Image *image,
const DrawInfo *draw_info,const PrimitiveInfo *primitive_info)
{
#define MaxStrokePad (6*BezierQuantum+360)
#define CheckPathExtent(pad_p,pad_q) \
{ \
if ((pad_p) > MaxBezierCoordinates) \
stroke_p=(PointInfo *) RelinquishMagickMemory(stroke_p); \
else \
if ((ssize_t) (p+(pad_p)) >= (ssize_t) extent_p) \
{ \
if (~extent_p < (pad_p)) \
stroke_p=(PointInfo *) RelinquishMagickMemory(stroke_p); \
else \
{ \
extent_p+=(pad_p); \
stroke_p=(PointInfo *) ResizeQuantumMemory(stroke_p,extent_p+ \
MaxStrokePad,sizeof(*stroke_p)); \
} \
} \
if ((pad_q) > MaxBezierCoordinates) \
stroke_q=(PointInfo *) RelinquishMagickMemory(stroke_q); \
else \
if ((ssize_t) (q+(pad_q)) >= (ssize_t) extent_q) \
{ \
if (~extent_q < (pad_q)) \
stroke_q=(PointInfo *) RelinquishMagickMemory(stroke_q); \
else \
{ \
extent_q+=(pad_q); \
stroke_q=(PointInfo *) ResizeQuantumMemory(stroke_q,extent_q+ \
MaxStrokePad,sizeof(*stroke_q)); \
} \
} \
if ((stroke_p == (PointInfo *) NULL) || (stroke_q == (PointInfo *) NULL)) \
{ \
if (stroke_p != (PointInfo *) NULL) \
stroke_p=(PointInfo *) RelinquishMagickMemory(stroke_p); \
if (stroke_q != (PointInfo *) NULL) \
stroke_q=(PointInfo *) RelinquishMagickMemory(stroke_q); \
polygon_primitive=(PrimitiveInfo *) \
RelinquishMagickMemory(polygon_primitive); \
return((PrimitiveInfo *) NULL); \
} \
}
typedef struct _StrokeSegment
{
double
p,
q;
} StrokeSegment;
double
delta_theta,
dot_product,
mid,
miterlimit;
MagickBooleanType
closed_path;
PointInfo
box_p[5],
box_q[5],
center,
offset,
*stroke_p,
*stroke_q;
PrimitiveInfo
*polygon_primitive,
*stroke_polygon;
register ssize_t
i;
size_t
arc_segments,
extent_p,
extent_q,
number_vertices;
ssize_t
j,
n,
p,
q;
StrokeSegment
dx = {0.0, 0.0},
dy = {0.0, 0.0},
inverse_slope = {0.0, 0.0},
slope = {0.0, 0.0},
theta = {0.0, 0.0};
/*
Allocate paths.
*/
number_vertices=primitive_info->coordinates;
polygon_primitive=(PrimitiveInfo *) AcquireQuantumMemory((size_t)
number_vertices+2UL,sizeof(*polygon_primitive));
if (polygon_primitive == (PrimitiveInfo *) NULL)
return((PrimitiveInfo *) NULL);
(void) memcpy(polygon_primitive,primitive_info,(size_t) number_vertices*
sizeof(*polygon_primitive));
offset.x=primitive_info[number_vertices-1].point.x-primitive_info[0].point.x;
offset.y=primitive_info[number_vertices-1].point.y-primitive_info[0].point.y;
closed_path=(fabs(offset.x) < MagickEpsilon) &&
(fabs(offset.y) < MagickEpsilon) ? MagickTrue : MagickFalse;
if (((draw_info->linejoin == RoundJoin) ||
(draw_info->linejoin == MiterJoin)) && (closed_path != MagickFalse))
{
polygon_primitive[number_vertices]=primitive_info[1];
number_vertices++;
}
polygon_primitive[number_vertices].primitive=UndefinedPrimitive;
/*
Compute the slope for the first line segment, p.
*/
dx.p=0.0;
dy.p=0.0;
for (n=1; n < (ssize_t) number_vertices; n++)
{
dx.p=polygon_primitive[n].point.x-polygon_primitive[0].point.x;
dy.p=polygon_primitive[n].point.y-polygon_primitive[0].point.y;
if ((fabs(dx.p) >= MagickEpsilon) || (fabs(dy.p) >= MagickEpsilon))
break;
}
if (n == (ssize_t) number_vertices)
{
if ((draw_info->linecap != RoundCap) || (closed_path != MagickFalse))
{
/*
Zero length subpath.
*/
stroke_polygon=(PrimitiveInfo *) AcquireCriticalMemory(
sizeof(*stroke_polygon));
stroke_polygon[0]=polygon_primitive[0];
stroke_polygon[0].coordinates=0;
polygon_primitive=(PrimitiveInfo *) RelinquishMagickMemory(
polygon_primitive);
return(stroke_polygon);
}
n=(ssize_t) number_vertices-1L;
}
extent_p=2*number_vertices;
extent_q=2*number_vertices;
stroke_p=(PointInfo *) AcquireQuantumMemory((size_t) extent_p+MaxStrokePad,
sizeof(*stroke_p));
stroke_q=(PointInfo *) AcquireQuantumMemory((size_t) extent_q+MaxStrokePad,
sizeof(*stroke_q));
if ((stroke_p == (PointInfo *) NULL) || (stroke_q == (PointInfo *) NULL))
{
if (stroke_p != (PointInfo *) NULL)
stroke_p=(PointInfo *) RelinquishMagickMemory(stroke_p);
if (stroke_q != (PointInfo *) NULL)
stroke_q=(PointInfo *) RelinquishMagickMemory(stroke_q);
polygon_primitive=(PrimitiveInfo *)
RelinquishMagickMemory(polygon_primitive);
return((PrimitiveInfo *) NULL);
}
slope.p=0.0;
inverse_slope.p=0.0;
if (fabs(dx.p) < MagickEpsilon)
{
if (dx.p >= 0.0)
slope.p=dy.p < 0.0 ? -1.0/MagickEpsilon : 1.0/MagickEpsilon;
else
slope.p=dy.p < 0.0 ? 1.0/MagickEpsilon : -1.0/MagickEpsilon;
}
else
if (fabs(dy.p) < MagickEpsilon)
{
if (dy.p >= 0.0)
inverse_slope.p=dx.p < 0.0 ? -1.0/MagickEpsilon : 1.0/MagickEpsilon;
else
inverse_slope.p=dx.p < 0.0 ? 1.0/MagickEpsilon : -1.0/MagickEpsilon;
}
else
{
slope.p=dy.p/dx.p;
inverse_slope.p=(-1.0/slope.p);
}
mid=ExpandAffine(&draw_info->affine)*draw_info->stroke_width/2.0;
miterlimit=(double) (draw_info->miterlimit*draw_info->miterlimit*mid*mid);
if ((draw_info->linecap == SquareCap) && (closed_path == MagickFalse))
(void) TraceSquareLinecap(polygon_primitive,number_vertices,mid);
offset.x=sqrt((double) (mid*mid/(inverse_slope.p*inverse_slope.p+1.0)));
offset.y=(double) (offset.x*inverse_slope.p);
if ((dy.p*offset.x-dx.p*offset.y) > 0.0)
{
box_p[0].x=polygon_primitive[0].point.x-offset.x;
box_p[0].y=polygon_primitive[0].point.y-offset.x*inverse_slope.p;
box_p[1].x=polygon_primitive[n].point.x-offset.x;
box_p[1].y=polygon_primitive[n].point.y-offset.x*inverse_slope.p;
box_q[0].x=polygon_primitive[0].point.x+offset.x;
box_q[0].y=polygon_primitive[0].point.y+offset.x*inverse_slope.p;
box_q[1].x=polygon_primitive[n].point.x+offset.x;
box_q[1].y=polygon_primitive[n].point.y+offset.x*inverse_slope.p;
}
else
{
box_p[0].x=polygon_primitive[0].point.x+offset.x;
box_p[0].y=polygon_primitive[0].point.y+offset.y;
box_p[1].x=polygon_primitive[n].point.x+offset.x;
box_p[1].y=polygon_primitive[n].point.y+offset.y;
box_q[0].x=polygon_primitive[0].point.x-offset.x;
box_q[0].y=polygon_primitive[0].point.y-offset.y;
box_q[1].x=polygon_primitive[n].point.x-offset.x;
box_q[1].y=polygon_primitive[n].point.y-offset.y;
}
/*
Create strokes for the line join attribute: bevel, miter, round.
*/
p=0;
q=0;
stroke_q[p++]=box_q[0];
stroke_p[q++]=box_p[0];
for (i=(ssize_t) n+1; i < (ssize_t) number_vertices; i++)
{
/*
Compute the slope for this line segment, q.
*/
dx.q=polygon_primitive[i].point.x-polygon_primitive[n].point.x;
dy.q=polygon_primitive[i].point.y-polygon_primitive[n].point.y;
dot_product=dx.q*dx.q+dy.q*dy.q;
if (dot_product < 0.25)
continue;
slope.q=0.0;
inverse_slope.q=0.0;
if (fabs(dx.q) < MagickEpsilon)
{
if (dx.q >= 0.0)
slope.q=dy.q < 0.0 ? -1.0/MagickEpsilon : 1.0/MagickEpsilon;
else
slope.q=dy.q < 0.0 ? 1.0/MagickEpsilon : -1.0/MagickEpsilon;
}
else
if (fabs(dy.q) < MagickEpsilon)
{
if (dy.q >= 0.0)
inverse_slope.q=dx.q < 0.0 ? -1.0/MagickEpsilon : 1.0/MagickEpsilon;
else
inverse_slope.q=dx.q < 0.0 ? 1.0/MagickEpsilon : -1.0/MagickEpsilon;
}
else
{
slope.q=dy.q/dx.q;
inverse_slope.q=(-1.0/slope.q);
}
offset.x=sqrt((double) (mid*mid/(inverse_slope.q*inverse_slope.q+1.0)));
offset.y=(double) (offset.x*inverse_slope.q);
dot_product=dy.q*offset.x-dx.q*offset.y;
if (dot_product > 0.0)
{
box_p[2].x=polygon_primitive[n].point.x-offset.x;
box_p[2].y=polygon_primitive[n].point.y-offset.y;
box_p[3].x=polygon_primitive[i].point.x-offset.x;
box_p[3].y=polygon_primitive[i].point.y-offset.y;
box_q[2].x=polygon_primitive[n].point.x+offset.x;
box_q[2].y=polygon_primitive[n].point.y+offset.y;
box_q[3].x=polygon_primitive[i].point.x+offset.x;
box_q[3].y=polygon_primitive[i].point.y+offset.y;
}
else
{
box_p[2].x=polygon_primitive[n].point.x+offset.x;
box_p[2].y=polygon_primitive[n].point.y+offset.y;
box_p[3].x=polygon_primitive[i].point.x+offset.x;
box_p[3].y=polygon_primitive[i].point.y+offset.y;
box_q[2].x=polygon_primitive[n].point.x-offset.x;
box_q[2].y=polygon_primitive[n].point.y-offset.y;
box_q[3].x=polygon_primitive[i].point.x-offset.x;
box_q[3].y=polygon_primitive[i].point.y-offset.y;
}
if (fabs((double) (slope.p-slope.q)) < MagickEpsilon)
{
box_p[4]=box_p[1];
box_q[4]=box_q[1];
}
else
{
box_p[4].x=(double) ((slope.p*box_p[0].x-box_p[0].y-slope.q*box_p[3].x+
box_p[3].y)/(slope.p-slope.q));
box_p[4].y=(double) (slope.p*(box_p[4].x-box_p[0].x)+box_p[0].y);
box_q[4].x=(double) ((slope.p*box_q[0].x-box_q[0].y-slope.q*box_q[3].x+
box_q[3].y)/(slope.p-slope.q));
box_q[4].y=(double) (slope.p*(box_q[4].x-box_q[0].x)+box_q[0].y);
}
CheckPathExtent(MaxStrokePad,MaxStrokePad);
dot_product=dx.q*dy.p-dx.p*dy.q;
if (dot_product <= 0.0)
switch (draw_info->linejoin)
{
case BevelJoin:
{
stroke_q[q++]=box_q[1];
stroke_q[q++]=box_q[2];
dot_product=(box_q[4].x-box_p[4].x)*(box_q[4].x-box_p[4].x)+
(box_q[4].y-box_p[4].y)*(box_q[4].y-box_p[4].y);
if (dot_product <= miterlimit)
stroke_p[p++]=box_p[4];
else
{
stroke_p[p++]=box_p[1];
stroke_p[p++]=box_p[2];
}
break;
}
case MiterJoin:
{
dot_product=(box_q[4].x-box_p[4].x)*(box_q[4].x-box_p[4].x)+
(box_q[4].y-box_p[4].y)*(box_q[4].y-box_p[4].y);
if (dot_product <= miterlimit)
{
stroke_q[q++]=box_q[4];
stroke_p[p++]=box_p[4];
}
else
{
stroke_q[q++]=box_q[1];
stroke_q[q++]=box_q[2];
stroke_p[p++]=box_p[1];
stroke_p[p++]=box_p[2];
}
break;
}
case RoundJoin:
{
dot_product=(box_q[4].x-box_p[4].x)*(box_q[4].x-box_p[4].x)+
(box_q[4].y-box_p[4].y)*(box_q[4].y-box_p[4].y);
if (dot_product <= miterlimit)
stroke_p[p++]=box_p[4];
else
{
stroke_p[p++]=box_p[1];
stroke_p[p++]=box_p[2];
}
center=polygon_primitive[n].point;
theta.p=atan2(box_q[1].y-center.y,box_q[1].x-center.x);
theta.q=atan2(box_q[2].y-center.y,box_q[2].x-center.x);
if (theta.q < theta.p)
theta.q+=2.0*MagickPI;
arc_segments=(size_t) ceil((double) ((theta.q-theta.p)/
(2.0*sqrt((double) (1.0/mid)))));
CheckPathExtent(MaxStrokePad,arc_segments+MaxStrokePad);
stroke_q[q].x=box_q[1].x;
stroke_q[q].y=box_q[1].y;
q++;
for (j=1; j < (ssize_t) arc_segments; j++)
{
delta_theta=(double) (j*(theta.q-theta.p)/arc_segments);
stroke_q[q].x=(double) (center.x+mid*cos(fmod((double)
(theta.p+delta_theta),DegreesToRadians(360.0))));
stroke_q[q].y=(double) (center.y+mid*sin(fmod((double)
(theta.p+delta_theta),DegreesToRadians(360.0))));
q++;
}
stroke_q[q++]=box_q[2];
break;
}
default:
break;
}
else
switch (draw_info->linejoin)
{
case BevelJoin:
{
stroke_p[p++]=box_p[1];
stroke_p[p++]=box_p[2];
dot_product=(box_q[4].x-box_p[4].x)*(box_q[4].x-box_p[4].x)+
(box_q[4].y-box_p[4].y)*(box_q[4].y-box_p[4].y);
if (dot_product <= miterlimit)
stroke_q[q++]=box_q[4];
else
{
stroke_q[q++]=box_q[1];
stroke_q[q++]=box_q[2];
}
break;
}
case MiterJoin:
{
dot_product=(box_q[4].x-box_p[4].x)*(box_q[4].x-box_p[4].x)+
(box_q[4].y-box_p[4].y)*(box_q[4].y-box_p[4].y);
if (dot_product <= miterlimit)
{
stroke_q[q++]=box_q[4];
stroke_p[p++]=box_p[4];
}
else
{
stroke_q[q++]=box_q[1];
stroke_q[q++]=box_q[2];
stroke_p[p++]=box_p[1];
stroke_p[p++]=box_p[2];
}
break;
}
case RoundJoin:
{
dot_product=(box_q[4].x-box_p[4].x)*(box_q[4].x-box_p[4].x)+
(box_q[4].y-box_p[4].y)*(box_q[4].y-box_p[4].y);
if (dot_product <= miterlimit)
stroke_q[q++]=box_q[4];
else
{
stroke_q[q++]=box_q[1];
stroke_q[q++]=box_q[2];
}
center=polygon_primitive[n].point;
theta.p=atan2(box_p[1].y-center.y,box_p[1].x-center.x);
theta.q=atan2(box_p[2].y-center.y,box_p[2].x-center.x);
if (theta.p < theta.q)
theta.p+=2.0*MagickPI;
arc_segments=(size_t) ceil((double) ((theta.p-theta.q)/
(2.0*sqrt((double) (1.0/mid)))));
CheckPathExtent(arc_segments+MaxStrokePad,MaxStrokePad);
stroke_p[p++]=box_p[1];
for (j=1; j < (ssize_t) arc_segments; j++)
{
delta_theta=(double) (j*(theta.q-theta.p)/arc_segments);
stroke_p[p].x=(double) (center.x+mid*cos(fmod((double)
(theta.p+delta_theta),DegreesToRadians(360.0))));
stroke_p[p].y=(double) (center.y+mid*sin(fmod((double)
(theta.p+delta_theta),DegreesToRadians(360.0))));
p++;
}
stroke_p[p++]=box_p[2];
break;
}
default:
break;
}
slope.p=slope.q;
inverse_slope.p=inverse_slope.q;
box_p[0]=box_p[2];
box_p[1]=box_p[3];
box_q[0]=box_q[2];
box_q[1]=box_q[3];
dx.p=dx.q;
dy.p=dy.q;
n=i;
}
stroke_p[p++]=box_p[1];
stroke_q[q++]=box_q[1];
/*
Trace stroked polygon.
*/
stroke_polygon=(PrimitiveInfo *) AcquireQuantumMemory((size_t)
(p+q+2UL*closed_path+2UL),sizeof(*stroke_polygon));
if (stroke_polygon != (PrimitiveInfo *) NULL)
{
for (i=0; i < (ssize_t) p; i++)
{
stroke_polygon[i]=polygon_primitive[0];
stroke_polygon[i].point=stroke_p[i];
}
if (closed_path != MagickFalse)
{
stroke_polygon[i]=polygon_primitive[0];
stroke_polygon[i].point=stroke_polygon[0].point;
i++;
}
for ( ; i < (ssize_t) (p+q+closed_path); i++)
{
stroke_polygon[i]=polygon_primitive[0];
stroke_polygon[i].point=stroke_q[p+q+closed_path-(i+1)];
}
if (closed_path != MagickFalse)
{
stroke_polygon[i]=polygon_primitive[0];
stroke_polygon[i].point=stroke_polygon[p+closed_path].point;
i++;
}
stroke_polygon[i]=polygon_primitive[0];
stroke_polygon[i].point=stroke_polygon[0].point;
i++;
stroke_polygon[i].primitive=UndefinedPrimitive;
stroke_polygon[0].coordinates=(size_t) (p+q+2*closed_path+1);
}
stroke_p=(PointInfo *) RelinquishMagickMemory(stroke_p);
stroke_q=(PointInfo *) RelinquishMagickMemory(stroke_q);
polygon_primitive=(PrimitiveInfo *) RelinquishMagickMemory(polygon_primitive);
return(stroke_polygon);
}
|
GB_binop__band_uint8.c | //------------------------------------------------------------------------------
// GB_binop: hard-coded functions for each built-in binary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// If this file is in the Generated/ folder, do not edit it (auto-generated).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_control.h"
#include "GB_ek_slice.h"
#include "GB_dense.h"
#include "GB_atomics.h"
#include "GB_bitmap_assign_methods.h"
#include "GB_binop__include.h"
// C=binop(A,B) is defined by the following types and operators:
// A+B function (eWiseAdd): GB_AaddB__band_uint8
// A.*B function (eWiseMult): GB_AemultB__band_uint8
// A*D function (colscale): GB_AxD__band_uint8
// D*A function (rowscale): GB_DxB__band_uint8
// C+=B function (dense accum): GB_Cdense_accumB__band_uint8
// C+=b function (dense accum): GB_Cdense_accumb__band_uint8
// C+=A+B function (dense ewise3): (none)
// C=A+B function (dense ewise3): GB_Cdense_ewise3_noaccum__band_uint8
// C=scalar+B GB_bind1st__band_uint8
// C=scalar+B' GB_bind1st_tran__band_uint8
// C=A+scalar GB_bind2nd__band_uint8
// C=A'+scalar GB_bind2nd_tran__band_uint8
// C type: uint8_t
// A type: uint8_t
// B,b type: uint8_t
// BinaryOp: cij = (aij) & (bij)
#define GB_ATYPE \
uint8_t
#define GB_BTYPE \
uint8_t
#define GB_CTYPE \
uint8_t
// true if the types of A and B are identical
#define GB_ATYPE_IS_BTYPE \
1
// true if the types of C and A are identical
#define GB_CTYPE_IS_ATYPE \
1
// true if the types of C and B are identical
#define GB_CTYPE_IS_BTYPE \
1
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
uint8_t aij = Ax [pA]
// bij = Bx [pB]
#define GB_GETB(bij,Bx,pB) \
uint8_t bij = Bx [pB]
// declare scalar of the same type as C
#define GB_CTYPE_SCALAR(t) \
uint8_t t
// cij = Ax [pA]
#define GB_COPY_A_TO_C(cij,Ax,pA) \
cij = Ax [pA]
// cij = Bx [pB]
#define GB_COPY_B_TO_C(cij,Bx,pB) \
cij = Bx [pB]
#define GB_CX(p) Cx [p]
// binary operator
#define GB_BINOP(z, x, y, i, j) \
z = (x) & (y) ;
// op is second
#define GB_OP_IS_SECOND \
0
// op is plus_fp32 or plus_fp64
#define GB_OP_IS_PLUS_REAL \
0
// op is minus_fp32 or minus_fp64
#define GB_OP_IS_MINUS_REAL \
0
// GB_cblas_*axpy gateway routine, if it exists for this operator and type:
#define GB_CBLAS_AXPY \
(none)
// do the numerical phases of GB_add and GB_emult
#define GB_PHASE_2_OF_2
// hard-coded loops can be vectorized
#define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_BAND || GxB_NO_UINT8 || GxB_NO_BAND_UINT8)
//------------------------------------------------------------------------------
// C += A+B, all 3 matrices dense
//------------------------------------------------------------------------------
#if 0
// The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV.
void (none)
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#include "GB_dense_ewise3_accum_template.c"
}
#endif
//------------------------------------------------------------------------------
// C = A+B, all 3 matrices dense
//------------------------------------------------------------------------------
GrB_Info GB_Cdense_ewise3_noaccum__band_uint8
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_dense_ewise3_noaccum_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += B, accumulate a sparse matrix into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB_Cdense_accumB__band_uint8
(
GrB_Matrix C,
const GrB_Matrix B,
const int64_t *GB_RESTRICT kfirst_slice,
const int64_t *GB_RESTRICT klast_slice,
const int64_t *GB_RESTRICT pstart_slice,
const int ntasks,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
#include "GB_dense_subassign_23_template.c"
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += b, accumulate a scalar into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB_Cdense_accumb__band_uint8
(
GrB_Matrix C,
const GB_void *p_bwork,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
// get the scalar b for C += b, of type uint8_t
uint8_t bwork = (*((uint8_t *) p_bwork)) ;
#include "GB_dense_subassign_22_template.c"
return (GrB_SUCCESS) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = A*D, column scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB_AxD__band_uint8
(
GrB_Matrix C,
const GrB_Matrix A, bool A_is_pattern,
const GrB_Matrix D, bool D_is_pattern,
const int64_t *GB_RESTRICT kfirst_slice,
const int64_t *GB_RESTRICT klast_slice,
const int64_t *GB_RESTRICT pstart_slice,
const int ntasks,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint8_t *GB_RESTRICT Cx = (uint8_t *) C->x ;
#include "GB_AxB_colscale_meta.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = D*B, row scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB_DxB__band_uint8
(
GrB_Matrix C,
const GrB_Matrix D, bool D_is_pattern,
const GrB_Matrix B, bool B_is_pattern,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint8_t *GB_RESTRICT Cx = (uint8_t *) C->x ;
#include "GB_AxB_rowscale_meta.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseAdd: C = A+B or C<M> = A+B
//------------------------------------------------------------------------------
#undef GB_FREE_ALL
#define GB_FREE_ALL \
{ \
GB_ek_slice_free (&pstart_Mslice, &kfirst_Mslice, &klast_Mslice) ; \
GB_ek_slice_free (&pstart_Aslice, &kfirst_Aslice, &klast_Aslice) ; \
GB_ek_slice_free (&pstart_Bslice, &kfirst_Bslice, &klast_Bslice) ; \
}
GrB_Info GB_AaddB__band_uint8
(
GrB_Matrix C,
const int C_sparsity,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool Ch_is_Mh,
const int64_t *GB_RESTRICT C_to_M,
const int64_t *GB_RESTRICT C_to_A,
const int64_t *GB_RESTRICT C_to_B,
const GB_task_struct *GB_RESTRICT TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t *pstart_Mslice = NULL, *kfirst_Mslice = NULL, *klast_Mslice = NULL ;
int64_t *pstart_Aslice = NULL, *kfirst_Aslice = NULL, *klast_Aslice = NULL ;
int64_t *pstart_Bslice = NULL, *kfirst_Bslice = NULL, *klast_Bslice = NULL ;
#include "GB_add_template.c"
GB_FREE_ALL ;
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C = A.*B or C<M> = A.*B
//------------------------------------------------------------------------------
GrB_Info GB_AemultB__band_uint8
(
GrB_Matrix C,
const int C_sparsity,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *GB_RESTRICT C_to_M,
const int64_t *GB_RESTRICT C_to_A,
const int64_t *GB_RESTRICT C_to_B,
const GB_task_struct *GB_RESTRICT TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t *pstart_Mslice = NULL, *kfirst_Mslice = NULL, *klast_Mslice = NULL ;
int64_t *pstart_Aslice = NULL, *kfirst_Aslice = NULL, *klast_Aslice = NULL ;
int64_t *pstart_Bslice = NULL, *kfirst_Bslice = NULL, *klast_Bslice = NULL ;
#include "GB_emult_template.c"
GB_FREE_ALL ;
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st
//------------------------------------------------------------------------------
GrB_Info GB_bind1st__band_uint8
(
GB_void *Cx_output, // Cx and Bx may be aliased
const GB_void *x_input,
const GB_void *Bx_input,
const int8_t *GB_RESTRICT Bb,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint8_t *Cx = (uint8_t *) Cx_output ;
uint8_t x = (*((uint8_t *) x_input)) ;
uint8_t *Bx = (uint8_t *) Bx_input ;
int64_t p ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!GBB (Bb, p)) continue ;
uint8_t bij = Bx [p] ;
Cx [p] = (x) & (bij) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd
//------------------------------------------------------------------------------
GrB_Info GB_bind2nd__band_uint8
(
GB_void *Cx_output, // Cx and Ax may be aliased
const GB_void *Ax_input,
const GB_void *y_input,
const int8_t *GB_RESTRICT Ab,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
uint8_t *Cx = (uint8_t *) Cx_output ;
uint8_t *Ax = (uint8_t *) Ax_input ;
uint8_t y = (*((uint8_t *) y_input)) ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!GBB (Ab, p)) continue ;
uint8_t aij = Ax [p] ;
Cx [p] = (aij) & (y) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (x, A'): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (x, aij), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
uint8_t aij = Ax [pA] ; \
Cx [pC] = (x) & (aij) ; \
}
GrB_Info GB_bind1st_tran__band_uint8
(
GrB_Matrix C,
const GB_void *x_input,
const GrB_Matrix A,
int64_t *GB_RESTRICT *Workspaces,
const int64_t *GB_RESTRICT A_slice,
int nworkspaces,
int nthreads
)
{
// GB_unop_transpose.c uses GB_ATYPE, but A is
// the 2nd input to binary operator z=f(x,y).
#undef GB_ATYPE
#define GB_ATYPE \
uint8_t
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint8_t x = (*((const uint8_t *) x_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
#undef GB_ATYPE
#define GB_ATYPE \
uint8_t
}
//------------------------------------------------------------------------------
// C = op (A', y): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (aij, y), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
uint8_t aij = Ax [pA] ; \
Cx [pC] = (aij) & (y) ; \
}
GrB_Info GB_bind2nd_tran__band_uint8
(
GrB_Matrix C,
const GrB_Matrix A,
const GB_void *y_input,
int64_t *GB_RESTRICT *Workspaces,
const int64_t *GB_RESTRICT A_slice,
int nworkspaces,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint8_t y = (*((const uint8_t *) y_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
3d7pt.c | /*
* Order-1, 3D 7 point stencil
* Adapted from PLUTO and Pochoir test bench
*
* Tareq Malas
*/
#include <stdio.h>
#include <stdlib.h>
#include <sys/time.h>
#ifdef LIKWID_PERFMON
#include <likwid.h>
#endif
#include "print_utils.h"
#define TESTS 2
#define MAX(a,b) ((a) > (b) ? a : b)
#define MIN(a,b) ((a) < (b) ? a : b)
/* Subtract the `struct timeval' values X and Y,
* storing the result in RESULT.
*
* Return 1 if the difference is negative, otherwise 0.
*/
int timeval_subtract(struct timeval *result, struct timeval *x, struct timeval *y)
{
/* Perform the carry for the later subtraction by updating y. */
if (x->tv_usec < y->tv_usec)
{
int nsec = (y->tv_usec - x->tv_usec) / 1000000 + 1;
y->tv_usec -= 1000000 * nsec;
y->tv_sec += nsec;
}
if (x->tv_usec - y->tv_usec > 1000000)
{
int nsec = (x->tv_usec - y->tv_usec) / 1000000;
y->tv_usec += 1000000 * nsec;
y->tv_sec -= nsec;
}
/* Compute the time remaining to wait.
* tv_usec is certainly positive.
*/
result->tv_sec = x->tv_sec - y->tv_sec;
result->tv_usec = x->tv_usec - y->tv_usec;
/* Return 1 if result is negative. */
return x->tv_sec < y->tv_sec;
}
int main(int argc, char *argv[])
{
int t, i, j, k, test;
int Nx, Ny, Nz, Nt;
if (argc > 3) {
Nx = atoi(argv[1])+2;
Ny = atoi(argv[2])+2;
Nz = atoi(argv[3])+2;
}
if (argc > 4)
Nt = atoi(argv[4]);
double ****A = (double ****) malloc(sizeof(double***)*2);
A[0] = (double ***) malloc(sizeof(double**)*Nz);
A[1] = (double ***) malloc(sizeof(double**)*Nz);
for(i=0; i<Nz; i++){
A[0][i] = (double**) malloc(sizeof(double*)*Ny);
A[1][i] = (double**) malloc(sizeof(double*)*Ny);
for(j=0;j<Ny;j++){
A[0][i][j] = (double*) malloc(sizeof(double)*Nx);
A[1][i][j] = (double*) malloc(sizeof(double)*Nx);
}
}
// tile size information, including extra element to decide the list length
int *tile_size = (int*) malloc(sizeof(int));
tile_size[0] = -1;
// The list is modified here before source-to-source transformations
tile_size = (int*) realloc((void *)tile_size, sizeof(int)*5);
tile_size[0] = 24;
tile_size[1] = 24;
tile_size[2] = 32;
tile_size[3] = 256;
tile_size[4] = -1;
// for timekeeping
int ts_return = -1;
struct timeval start, end, result;
double tdiff = 0.0, min_tdiff=1.e100;
const int BASE = 1024;
const double alpha = 0.0876;
const double beta = 0.0765;
// initialize variables
//
srand(42);
for (i = 1; i < Nz; i++) {
for (j = 1; j < Ny; j++) {
for (k = 1; k < Nx; k++) {
A[0][i][j][k] = 1.0 * (rand() % BASE);
}
}
}
#ifdef LIKWID_PERFMON
LIKWID_MARKER_INIT;
#pragma omp parallel
{
LIKWID_MARKER_THREADINIT;
#pragma omp barrier
LIKWID_MARKER_START("calc");
}
#endif
int num_threads = 1;
#if defined(_OPENMP)
num_threads = omp_get_max_threads();
#endif
for(test=0; test<TESTS; test++){
gettimeofday(&start, 0);
// serial execution - Addition: 6 && Multiplication: 2
#pragma scop
for (t = 0; t < Nt-1; t++) {
for (i = 1; i < Nz-1; i++) {
for (j = 1; j < Ny-1; j++) {
for (k = 1; k < Nx-1; k++) {
A[(t+1)%2][i][j][k] = alpha * (A[t%2][i][j][k])
+ beta * (A[t%2][i - 1][j][k] + A[t%2][i][j - 1][k] + A[t%2][i][j][k - 1] +
A[t%2][i + 1][j][k] + A[t%2][i][j + 1][k] + A[t%2][i][j][k + 1]);
}
}
}
}
#pragma endscop
gettimeofday(&end, 0);
ts_return = timeval_subtract(&result, &end, &start);
tdiff = (double) (result.tv_sec + result.tv_usec * 1.0e-6);
min_tdiff = min(min_tdiff, tdiff);
printf("Rank 0 TEST# %d time: %f\n", test, tdiff);
}
PRINT_RESULTS(1, "constant")
#ifdef LIKWID_PERFMON
#pragma omp parallel
{
LIKWID_MARKER_STOP("calc");
}
LIKWID_MARKER_CLOSE;
#endif
// Free allocated arrays (Causing performance degradation
/* for(i=0; i<Nz; i++){
for(j=0;j<Ny;j++){
free(A[0][i][j]);
free(A[1][i][j]);
}
free(A[0][i]);
free(A[1][i]);
}
free(A[0]);
free(A[1]);
*/
return 0;
}
|
GB_select_phase2.c | //------------------------------------------------------------------------------
// GB_select_phase2: C=select(A,thunk)
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2019, All Rights Reserved.
// http://suitesparse.com See GraphBLAS/Doc/License.txt for license.
//------------------------------------------------------------------------------
{
// asize, avlen, avdim, and Ah unused for some uses of this template
#include "GB_unused.h"
//--------------------------------------------------------------------------
// get A
//--------------------------------------------------------------------------
const int64_t *restrict Ap = A->p ;
const int64_t *restrict Ah = A->h ;
const int64_t *restrict Ai = A->i ;
const GB_ATYPE *restrict Ax = A->x ;
size_t asize = A->type->size ;
int64_t avlen = A->vlen ;
int64_t avdim = A->vdim ;
//--------------------------------------------------------------------------
// C = select (A)
//--------------------------------------------------------------------------
#pragma omp parallel for num_threads(nthreads) schedule(dynamic,1)
for (int tid = 0 ; tid < ntasks ; tid++)
{
// if kfirst > klast then task tid does no work at all
int64_t kfirst = kfirst_slice [tid] ;
int64_t klast = klast_slice [tid] ;
//----------------------------------------------------------------------
// selection from vectors kfirst to klast
//----------------------------------------------------------------------
for (int64_t k = kfirst ; k <= klast ; k++)
{
//------------------------------------------------------------------
// find the part of A(:,k) to be operated on by this task
//------------------------------------------------------------------
int64_t pA_start, pA_end, pC ;
GB_get_pA_and_pC (&pA_start, &pA_end, &pC,
tid, k, kfirst, klast, pstart_slice, C_pstart_slice, Cp, Ap) ;
//------------------------------------------------------------------
// compact Ai and Ax [pA_start ... pA_end-1] into Ci and Cx
//------------------------------------------------------------------
#if defined ( GB_ENTRY_SELECTOR )
GB_GET_J ;
for (int64_t pA = pA_start ; pA < pA_end ; pA++)
{
int64_t i = Ai [pA] ;
if (GB_SELECT (pA))
{
ASSERT (pC >= Cp [k] && pC < Cp [k+1]) ;
Ci [pC] = i ;
// Cx [pC] = Ax [pA] ;
GB_SELECT_ENTRY (Cx, pC, Ax, pA) ;
pC++ ;
}
}
#elif defined ( GB_TRIU_SELECTOR ) \
|| defined ( GB_RESIZE_SELECTOR )
// keep pA_start to Zp[k]-1
int64_t p = GB_IMIN (Zp [k], pA_end) ;
int64_t mynz = p - pA_start ;
if (mynz > 0)
{
ASSERT (pC >= Cp [k] && pC + mynz <= Cp [k+1]) ;
memcpy (Ci +pC, Ai +pA_start, mynz*sizeof (int64_t)) ;
memcpy (Cx +pC*asize, Ax +pA_start*asize, mynz*asize) ;
}
#elif defined ( GB_DIAG_SELECTOR )
// task that owns the diagonal entry does this work
int64_t p = Zp [k] ;
if (pA_start <= p && p < pA_end)
{
ASSERT (pC >= Cp [k] && pC + 1 <= Cp [k+1]) ;
Ci [pC] = Ai [p] ;
memcpy (Cx +pC*asize, Ax +p*asize, asize) ;
}
#elif defined ( GB_OFFDIAG_SELECTOR )
// keep pA_start to Zp[k]-1
int64_t p = GB_IMIN (Zp [k], pA_end) ;
int64_t mynz = p - pA_start ;
if (mynz > 0)
{
ASSERT (pC >= Cp [k] && pC + mynz <= Cp [k+1]) ;
memcpy (Ci +pC, Ai +pA_start, mynz*sizeof (int64_t)) ;
memcpy (Cx +pC*asize, Ax +pA_start*asize, mynz*asize) ;
pC += mynz ;
}
// keep Zp[k]+1 to pA_end-1
p = GB_IMAX (Zp [k]+1, pA_start) ;
mynz = pA_end - p ;
if (mynz > 0)
{
ASSERT (pA_start <= p && p < pA_end) ;
ASSERT (pC >= Cp [k] && pC + mynz <= Cp [k+1]) ;
memcpy (Ci +pC, Ai +p, mynz*sizeof (int64_t)) ;
memcpy (Cx +pC*asize, Ax +p*asize, mynz*asize) ;
}
#elif defined ( GB_TRIL_SELECTOR )
// keep Zp [k] to pA_end-1
int64_t p = GB_IMAX (Zp [k], pA_start) ;
int64_t mynz = pA_end - p ;
if (mynz > 0)
{
ASSERT (pA_start <= p && p + mynz <= pA_end) ;
ASSERT (pC >= Cp [k] && pC + mynz <= Cp [k+1]) ;
memcpy (Ci +pC, Ai +p, mynz*sizeof (int64_t)) ;
memcpy (Cx +pC*asize, Ax +p*asize, mynz*asize) ;
}
#endif
}
}
}
|
sphgrid.h | #ifndef SPHGRID_H
#define SPHGRID_H
#include <omp.h>
#include <vector>
#include <map>
#include <unordered_map>
#include <cmath>
#include <algorithm>
#include <glm/vec3.hpp>
#include <glm/mat3x3.hpp>
#include <glm/geometric.hpp>
#include <glm/gtx/scalar_multiplication.hpp>
#include "sphparticle.h"
#include "sphmap.h"
#include "sphparticle.h"
#include "marching_cube.h"
#define PI 3.14159265358979323846264338327950288
class SpatialGrid {
private:
int imax, jmax, kmax;
vec3 lb, ub;
float g_d, smtRadius;
float* densityTable; vec3* normTable;
FluidParticle** gridPs;
float f(float q) {
float ans;
if (0 <= q && q < 1)
ans = 2.0 / 3.0 - pow(q, 2) + 0.5 * pow(q, 3);
else if (1 <= q && q < 2)
ans = 1.0 / 6.0 * pow(2 - q, 3);
else
ans = 0.0;
return ans * 3.0 / 2.0 / PI;
}
float df(float q) {
float ans;
if (0 <= q && q < 1)
ans = -2 * q + 3.0 / 2.0 * pow(q, 2);
else if (1 <= q && q < 2)
ans = -0.5 * pow(2 - q, 2);
else
ans = 0.0;
return ans * 3.0 / 2.0 / PI;
}
float W(FluidParticle* x_i, FluidParticle* x_j) {
float q = distance(x_i->getPosition(), x_j->getPosition()) / smtRadius;
return 1.0 / pow(smtRadius, 3) * f(q);
}
vec3 dW(FluidParticle* x_i, FluidParticle* x_j) {
float q = distance(x_i->getPosition(), x_j->getPosition()) / smtRadius;
vec3 norm = x_i->getPosition() - x_j->getPosition();
norm = norm / sqrt(dot(norm, norm));
return 1.0 / pow(smtRadius, 4) * df(q) * norm;
}
vec3 XYZ2Pos(int3 xyz) {
return vec3(lb.x + xyz.x * g_d, lb.y + xyz.y * g_d, lb.z + xyz.z * g_d);
}
int3 index2XYZ(int index) {
return int3(index / jmax / kmax, index / kmax % jmax, index % kmax);
}
vec3 index2Pos(int index) {
return XYZ2Pos(index2XYZ(index));
}
int XYZ2Index(int3 xyz) {
return xyz.x * jmax * kmax + xyz.y * kmax + xyz.z;
}
public:
SpatialGrid(vec3 lb, vec3 ub, float g_d, float smtRadius) {
this->lb = lb;
this->ub = ub;
this->g_d = g_d;
this->smtRadius = smtRadius;
this->imax = (ub.x - lb.x) / g_d + 1;
this->jmax = (ub.y - lb.y) / g_d + 1;
this->kmax = (ub.z - lb.z) / g_d + 1;
densityTable = new float[imax * jmax * kmax];
normTable = new vec3[imax * jmax * kmax];
gridPs = new FluidParticle * [imax * jmax * kmax];
for (int i = 0; i < imax * jmax * kmax; i++) {
int3 xyz = index2XYZ(i);
vec3 pos = XYZ2Pos(xyz);
gridPs[i] = new FluidParticle(pos, 0.0f);
densityTable[i] = 0.0f;
normTable[i] = vec3(0.0f, 0.0f, 0.0f);
}
}
void computeDensityTable(SpatialHashTable* hashTable) {
for (int i = 0; i < imax * jmax * kmax; i++) {
hashTable->computeNeighborBlocks(gridPs[i]);
}
#pragma omp parallel for
for (int i = 0; i < imax * jmax * kmax; i++) {
hashTable->computeNeighbors(gridPs[i]);
gridPs[i]->setBuiltNeighborsFlag(true);
}
for (int i = 0; i < imax * jmax * kmax; i++) {
float dens = 0; vec3 norm(0, 0, 0);
vector<FluidParticle*>* neighbors = gridPs[i]->getNeighbors(true);
for (int j = 0; j < neighbors->size(); j++) {
FluidParticle* p_j = neighbors->at(j);
dens += p_j->getMass() * W(gridPs[i], p_j);
norm += p_j->getMass() * dW(gridPs[i], p_j);
}
densityTable[i] = dens;
normTable[i] = -norm;
}
}
int computeTriangleFacets(float* res, float isolevel) {
vector<vec3> temp;
int numTriangle = 0;
int cube_edgeToVerts[12][2] = {
{0,1}, {1,2}, {2,3}, {3,0},
{4,5}, {5,6}, {6,7}, {7,4},
{0,4}, {1,5}, {2,6}, {3,7},
};
for (int i = 0 ; i < imax * jmax * kmax ; i++) {
int3 xyz = index2XYZ(i);
vec3 pos = XYZ2Pos(xyz);
if (xyz.x == imax - 1 || xyz.y == jmax - 1 || xyz.z == kmax - 1) continue;
int3 indices[8] = {
xyz + int3(0,0,0), xyz + int3(0,1,0), xyz + int3(1,1,0), xyz + int3(1,0,0),
xyz + int3(0,0,1), xyz + int3(0,1,1), xyz + int3(1,1,1), xyz + int3(1,0,1)
};
float dens[8]; vec3 poss[8]; vec3 norm[8];
for (int j = 0 ; j < 8 ; j++) {
poss[j] = XYZ2Pos(indices[j]);
dens[j] = densityTable[XYZ2Index(indices[j])];
norm[j] = normTable[XYZ2Index(indices[j])];
}
numTriangle += Polygonise(poss, norm, dens, isolevel, &temp);
}
for (int i = 0; i < temp.size(); i++) {
res[3 * i + 0] = temp[i].x;
res[3 * i + 1] = temp[i].y;
res[3 * i + 2] = temp[i].z;
}
return numTriangle;
}
};
#endif |
par_interp.c | /*BHEADER**********************************************************************
* Copyright (c) 2008, Lawrence Livermore National Security, LLC.
* Produced at the Lawrence Livermore National Laboratory.
* This file is part of HYPRE. See file COPYRIGHT for details.
*
* HYPRE is free software; you can redistribute it and/or modify it under the
* terms of the GNU Lesser General Public License (as published by the Free
* Software Foundation) version 2.1 dated February 1999.
*
* $Revision$
***********************************************************************EHEADER*/
#include "_hypre_parcsr_ls.h"
/*---------------------------------------------------------------------------
* hypre_BoomerAMGBuildInterp
*--------------------------------------------------------------------------*/
HYPRE_Int
hypre_BoomerAMGBuildInterp( hypre_ParCSRMatrix *A,
HYPRE_Int *CF_marker,
hypre_ParCSRMatrix *S,
HYPRE_Int *num_cpts_global,
HYPRE_Int num_functions,
HYPRE_Int *dof_func,
HYPRE_Int debug_flag,
HYPRE_Real trunc_factor,
HYPRE_Int max_elmts,
HYPRE_Int *col_offd_S_to_A,
hypre_ParCSRMatrix **P_ptr)
{
MPI_Comm comm = hypre_ParCSRMatrixComm(A);
hypre_ParCSRCommPkg *comm_pkg = hypre_ParCSRMatrixCommPkg(A);
hypre_ParCSRCommHandle *comm_handle;
hypre_CSRMatrix *A_diag = hypre_ParCSRMatrixDiag(A);
HYPRE_Real *A_diag_data = hypre_CSRMatrixData(A_diag);
HYPRE_Int *A_diag_i = hypre_CSRMatrixI(A_diag);
HYPRE_Int *A_diag_j = hypre_CSRMatrixJ(A_diag);
hypre_CSRMatrix *A_offd = hypre_ParCSRMatrixOffd(A);
HYPRE_Real *A_offd_data = hypre_CSRMatrixData(A_offd);
HYPRE_Int *A_offd_i = hypre_CSRMatrixI(A_offd);
HYPRE_Int *A_offd_j = hypre_CSRMatrixJ(A_offd);
HYPRE_Int num_cols_A_offd = hypre_CSRMatrixNumCols(A_offd);
HYPRE_Int *col_map_offd = hypre_ParCSRMatrixColMapOffd(A);
hypre_CSRMatrix *S_diag = hypre_ParCSRMatrixDiag(S);
HYPRE_Int *S_diag_i = hypre_CSRMatrixI(S_diag);
HYPRE_Int *S_diag_j = hypre_CSRMatrixJ(S_diag);
hypre_CSRMatrix *S_offd = hypre_ParCSRMatrixOffd(S);
HYPRE_Int *S_offd_i = hypre_CSRMatrixI(S_offd);
HYPRE_Int *S_offd_j = hypre_CSRMatrixJ(S_offd);
hypre_ParCSRMatrix *P;
HYPRE_Int *col_map_offd_P;
HYPRE_Int *CF_marker_offd = NULL;
HYPRE_Int *dof_func_offd = NULL;
hypre_CSRMatrix *A_ext;
HYPRE_Real *A_ext_data = NULL;
HYPRE_Int *A_ext_i = NULL;
HYPRE_Int *A_ext_j = NULL;
hypre_CSRMatrix *P_diag;
hypre_CSRMatrix *P_offd;
HYPRE_Real *P_diag_data;
HYPRE_Int *P_diag_i;
HYPRE_Int *P_diag_j;
HYPRE_Real *P_offd_data;
HYPRE_Int *P_offd_i;
HYPRE_Int *P_offd_j;
HYPRE_Int P_diag_size, P_offd_size;
HYPRE_Int *P_marker, *P_marker_offd;
HYPRE_Int jj_counter,jj_counter_offd;
HYPRE_Int *jj_count, *jj_count_offd;
HYPRE_Int jj_begin_row,jj_begin_row_offd;
HYPRE_Int jj_end_row,jj_end_row_offd;
HYPRE_Int start_indexing = 0; /* start indexing for P_data at 0 */
HYPRE_Int n_fine = hypre_CSRMatrixNumRows(A_diag);
HYPRE_Int strong_f_marker;
HYPRE_Int *fine_to_coarse;
HYPRE_Int *fine_to_coarse_offd;
HYPRE_Int *coarse_counter;
HYPRE_Int coarse_shift;
HYPRE_Int total_global_cpts;
HYPRE_Int num_cols_P_offd,my_first_cpt;
HYPRE_Int i,i1,i2;
HYPRE_Int j,jl,jj,jj1;
HYPRE_Int k,kc;
HYPRE_Int start;
HYPRE_Int sgn;
HYPRE_Int c_num;
HYPRE_Real diagonal;
HYPRE_Real sum;
HYPRE_Real distribute;
HYPRE_Real zero = 0.0;
HYPRE_Real one = 1.0;
HYPRE_Int my_id;
HYPRE_Int num_procs;
HYPRE_Int num_threads;
HYPRE_Int num_sends;
HYPRE_Int index;
HYPRE_Int ns, ne, size, rest;
HYPRE_Int print_level = 0;
HYPRE_Int *int_buf_data;
HYPRE_Int col_1 = hypre_ParCSRMatrixFirstRowIndex(A);
HYPRE_Int local_numrows = hypre_CSRMatrixNumRows(A_diag);
HYPRE_Int col_n = col_1 + local_numrows;
HYPRE_Real wall_time; /* for debugging instrumentation */
hypre_MPI_Comm_size(comm, &num_procs);
hypre_MPI_Comm_rank(comm,&my_id);
num_threads = hypre_NumThreads();
#ifdef HYPRE_NO_GLOBAL_PARTITION
my_first_cpt = num_cpts_global[0];
if (my_id == (num_procs -1)) total_global_cpts = num_cpts_global[1];
hypre_MPI_Bcast(&total_global_cpts, 1, HYPRE_MPI_INT, num_procs-1, comm);
#else
my_first_cpt = num_cpts_global[my_id];
total_global_cpts = num_cpts_global[num_procs];
#endif
/*-------------------------------------------------------------------
* Get the CF_marker data for the off-processor columns
*-------------------------------------------------------------------*/
if (debug_flag < 0)
{
debug_flag = -debug_flag;
print_level = 1;
}
if (debug_flag==4) wall_time = time_getWallclockSeconds();
if (num_cols_A_offd) CF_marker_offd = hypre_CTAlloc(HYPRE_Int, num_cols_A_offd);
if (num_functions > 1 && num_cols_A_offd)
dof_func_offd = hypre_CTAlloc(HYPRE_Int, num_cols_A_offd);
if (!comm_pkg)
{
hypre_MatvecCommPkgCreate(A);
comm_pkg = hypre_ParCSRMatrixCommPkg(A);
}
num_sends = hypre_ParCSRCommPkgNumSends(comm_pkg);
int_buf_data = hypre_CTAlloc(HYPRE_Int, hypre_ParCSRCommPkgSendMapStart(comm_pkg,
num_sends));
index = 0;
for (i = 0; i < num_sends; i++)
{
start = hypre_ParCSRCommPkgSendMapStart(comm_pkg, i);
for (j = start; j < hypre_ParCSRCommPkgSendMapStart(comm_pkg, i+1); j++)
int_buf_data[index++]
= CF_marker[hypre_ParCSRCommPkgSendMapElmt(comm_pkg,j)];
}
comm_handle = hypre_ParCSRCommHandleCreate( 11, comm_pkg, int_buf_data,
CF_marker_offd);
hypre_ParCSRCommHandleDestroy(comm_handle);
if (num_functions > 1)
{
index = 0;
for (i = 0; i < num_sends; i++)
{
start = hypre_ParCSRCommPkgSendMapStart(comm_pkg, i);
for (j=start; j < hypre_ParCSRCommPkgSendMapStart(comm_pkg, i+1); j++)
int_buf_data[index++]
= dof_func[hypre_ParCSRCommPkgSendMapElmt(comm_pkg,j)];
}
comm_handle = hypre_ParCSRCommHandleCreate( 11, comm_pkg, int_buf_data,
dof_func_offd);
hypre_ParCSRCommHandleDestroy(comm_handle);
}
if (debug_flag==4)
{
wall_time = time_getWallclockSeconds() - wall_time;
hypre_printf("Proc = %d Interp: Comm 1 CF_marker = %f\n",
my_id, wall_time);
fflush(NULL);
}
/*----------------------------------------------------------------------
* Get the ghost rows of A
*---------------------------------------------------------------------*/
if (debug_flag==4) wall_time = time_getWallclockSeconds();
if (num_procs > 1)
{
A_ext = hypre_ParCSRMatrixExtractBExt(A,A,1);
A_ext_i = hypre_CSRMatrixI(A_ext);
A_ext_j = hypre_CSRMatrixJ(A_ext);
A_ext_data = hypre_CSRMatrixData(A_ext);
}
index = 0;
for (i=0; i < num_cols_A_offd; i++)
{
for (j=A_ext_i[i]; j < A_ext_i[i+1]; j++)
{
k = A_ext_j[j];
if (k >= col_1 && k < col_n)
{
A_ext_j[index] = k - col_1;
A_ext_data[index++] = A_ext_data[j];
}
else
{
kc = hypre_BinarySearch(col_map_offd,k,num_cols_A_offd);
if (kc > -1)
{
A_ext_j[index] = -kc-1;
A_ext_data[index++] = A_ext_data[j];
}
}
}
A_ext_i[i] = index;
}
for (i = num_cols_A_offd; i > 0; i--)
A_ext_i[i] = A_ext_i[i-1];
if (num_procs > 1) A_ext_i[0] = 0;
if (debug_flag==4)
{
wall_time = time_getWallclockSeconds() - wall_time;
hypre_printf("Proc = %d Interp: Comm 2 Get A_ext = %f\n",
my_id, wall_time);
fflush(NULL);
}
/*-----------------------------------------------------------------------
* First Pass: Determine size of P and fill in fine_to_coarse mapping.
*-----------------------------------------------------------------------*/
/*-----------------------------------------------------------------------
* Intialize counters and allocate mapping vector.
*-----------------------------------------------------------------------*/
coarse_counter = hypre_CTAlloc(HYPRE_Int, num_threads);
jj_count = hypre_CTAlloc(HYPRE_Int, num_threads);
jj_count_offd = hypre_CTAlloc(HYPRE_Int, num_threads);
fine_to_coarse = hypre_CTAlloc(HYPRE_Int, n_fine);
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE
#endif
for (i = 0; i < n_fine; i++) fine_to_coarse[i] = -1;
jj_counter = start_indexing;
jj_counter_offd = start_indexing;
/*-----------------------------------------------------------------------
* Loop over fine grid.
*-----------------------------------------------------------------------*/
/* RDF: this looks a little tricky, but doable */
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel for private(i,j,i1,jj,ns,ne,size,rest) HYPRE_SMP_SCHEDULE
#endif
for (j = 0; j < num_threads; j++)
{
size = n_fine/num_threads;
rest = n_fine - size*num_threads;
if (j < rest)
{
ns = j*size+j;
ne = (j+1)*size+j+1;
}
else
{
ns = j*size+rest;
ne = (j+1)*size+rest;
}
for (i = ns; i < ne; i++)
{
/*--------------------------------------------------------------------
* If i is a C-point, interpolation is the identity. Also set up
* mapping vector.
*--------------------------------------------------------------------*/
if (CF_marker[i] >= 0)
{
jj_count[j]++;
fine_to_coarse[i] = coarse_counter[j];
coarse_counter[j]++;
}
/*--------------------------------------------------------------------
* If i is an F-point, interpolation is from the C-points that
* strongly influence i.
*--------------------------------------------------------------------*/
else
{
for (jj = S_diag_i[i]; jj < S_diag_i[i+1]; jj++)
{
i1 = S_diag_j[jj];
if (CF_marker[i1] >= 0)
{
jj_count[j]++;
}
}
if (num_procs > 1)
{
if (col_offd_S_to_A)
{
for (jj = S_offd_i[i]; jj < S_offd_i[i+1]; jj++)
{
i1 = col_offd_S_to_A[S_offd_j[jj]];
if (CF_marker_offd[i1] >= 0)
{
jj_count_offd[j]++;
}
}
}
else
{
for (jj = S_offd_i[i]; jj < S_offd_i[i+1]; jj++)
{
i1 = S_offd_j[jj];
if (CF_marker_offd[i1] >= 0)
{
jj_count_offd[j]++;
}
}
}
}
}
}
}
/*-----------------------------------------------------------------------
* Allocate arrays.
*-----------------------------------------------------------------------*/
for (i=0; i < num_threads-1; i++)
{
coarse_counter[i+1] += coarse_counter[i];
jj_count[i+1] += jj_count[i];
jj_count_offd[i+1] += jj_count_offd[i];
}
i = num_threads-1;
jj_counter = jj_count[i];
jj_counter_offd = jj_count_offd[i];
P_diag_size = jj_counter;
P_diag_i = hypre_CTAlloc(HYPRE_Int, n_fine+1);
P_diag_j = hypre_CTAlloc(HYPRE_Int, P_diag_size);
P_diag_data = hypre_CTAlloc(HYPRE_Real, P_diag_size);
P_diag_i[n_fine] = jj_counter;
P_offd_size = jj_counter_offd;
P_offd_i = hypre_CTAlloc(HYPRE_Int, n_fine+1);
P_offd_j = hypre_CTAlloc(HYPRE_Int, P_offd_size);
P_offd_data = hypre_CTAlloc(HYPRE_Real, P_offd_size);
/*-----------------------------------------------------------------------
* Intialize some stuff.
*-----------------------------------------------------------------------*/
jj_counter = start_indexing;
jj_counter_offd = start_indexing;
if (debug_flag==4)
{
wall_time = time_getWallclockSeconds() - wall_time;
hypre_printf("Proc = %d Interp: Internal work 1 = %f\n",
my_id, wall_time);
fflush(NULL);
}
/*-----------------------------------------------------------------------
* Send and receive fine_to_coarse info.
*-----------------------------------------------------------------------*/
if (debug_flag==4) wall_time = time_getWallclockSeconds();
fine_to_coarse_offd = hypre_CTAlloc(HYPRE_Int, num_cols_A_offd);
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel for private(i,j,ns,ne,size,rest,coarse_shift) HYPRE_SMP_SCHEDULE
#endif
for (j = 0; j < num_threads; j++)
{
coarse_shift = 0;
if (j > 0) coarse_shift = coarse_counter[j-1];
size = n_fine/num_threads;
rest = n_fine - size*num_threads;
if (j < rest)
{
ns = j*size+j;
ne = (j+1)*size+j+1;
}
else
{
ns = j*size+rest;
ne = (j+1)*size+rest;
}
for (i = ns; i < ne; i++)
fine_to_coarse[i] += my_first_cpt+coarse_shift;
}
index = 0;
for (i = 0; i < num_sends; i++)
{
start = hypre_ParCSRCommPkgSendMapStart(comm_pkg, i);
for (j = start; j < hypre_ParCSRCommPkgSendMapStart(comm_pkg, i+1); j++)
int_buf_data[index++]
= fine_to_coarse[hypre_ParCSRCommPkgSendMapElmt(comm_pkg,j)];
}
comm_handle = hypre_ParCSRCommHandleCreate( 11, comm_pkg, int_buf_data,
fine_to_coarse_offd);
hypre_ParCSRCommHandleDestroy(comm_handle);
if (debug_flag==4)
{
wall_time = time_getWallclockSeconds() - wall_time;
hypre_printf("Proc = %d Interp: Comm 4 FineToCoarse = %f\n",
my_id, wall_time);
fflush(NULL);
}
if (debug_flag==4) wall_time = time_getWallclockSeconds();
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE
#endif
for (i = 0; i < n_fine; i++) fine_to_coarse[i] -= my_first_cpt;
/*-----------------------------------------------------------------------
* Loop over fine grid points.
*-----------------------------------------------------------------------*/
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel for private(i,j,jl,i1,i2,jj,jj1,ns,ne,size,rest,sum,diagonal,distribute,P_marker,P_marker_offd,strong_f_marker,jj_counter,jj_counter_offd,sgn,c_num,jj_begin_row,jj_end_row,jj_begin_row_offd,jj_end_row_offd) HYPRE_SMP_SCHEDULE
#endif
for (jl = 0; jl < num_threads; jl++)
{
size = n_fine/num_threads;
rest = n_fine - size*num_threads;
if (jl < rest)
{
ns = jl*size+jl;
ne = (jl+1)*size+jl+1;
}
else
{
ns = jl*size+rest;
ne = (jl+1)*size+rest;
}
jj_counter = 0;
if (jl > 0) jj_counter = jj_count[jl-1];
jj_counter_offd = 0;
if (jl > 0) jj_counter_offd = jj_count_offd[jl-1];
P_marker = hypre_CTAlloc(HYPRE_Int, n_fine);
if (num_cols_A_offd)
P_marker_offd = hypre_CTAlloc(HYPRE_Int, num_cols_A_offd);
else
P_marker_offd = NULL;
for (i = 0; i < n_fine; i++)
{
P_marker[i] = -1;
}
for (i = 0; i < num_cols_A_offd; i++)
{
P_marker_offd[i] = -1;
}
strong_f_marker = -2;
for (i = ns; i < ne; i++)
{
/*--------------------------------------------------------------------
* If i is a c-point, interpolation is the identity.
*--------------------------------------------------------------------*/
if (CF_marker[i] >= 0)
{
P_diag_i[i] = jj_counter;
P_diag_j[jj_counter] = fine_to_coarse[i];
P_diag_data[jj_counter] = one;
jj_counter++;
}
/*--------------------------------------------------------------------
* If i is an F-point, build interpolation.
*--------------------------------------------------------------------*/
else
{
/* Diagonal part of P */
P_diag_i[i] = jj_counter;
jj_begin_row = jj_counter;
for (jj = S_diag_i[i]; jj < S_diag_i[i+1]; jj++)
{
i1 = S_diag_j[jj];
/*--------------------------------------------------------------
* If neighbor i1 is a C-point, set column number in P_diag_j
* and initialize interpolation weight to zero.
*--------------------------------------------------------------*/
if (CF_marker[i1] >= 0)
{
P_marker[i1] = jj_counter;
P_diag_j[jj_counter] = fine_to_coarse[i1];
P_diag_data[jj_counter] = zero;
jj_counter++;
}
/*--------------------------------------------------------------
* If neighbor i1 is an F-point, mark it as a strong F-point
* whose connection needs to be distributed.
*--------------------------------------------------------------*/
else if (CF_marker[i1] != -3)
{
P_marker[i1] = strong_f_marker;
}
}
jj_end_row = jj_counter;
/* Off-Diagonal part of P */
P_offd_i[i] = jj_counter_offd;
jj_begin_row_offd = jj_counter_offd;
if (num_procs > 1)
{
if (col_offd_S_to_A)
{
for (jj = S_offd_i[i]; jj < S_offd_i[i+1]; jj++)
{
i1 = col_offd_S_to_A[S_offd_j[jj]];
/*-----------------------------------------------------------
* If neighbor i1 is a C-point, set column number in P_offd_j
* and initialize interpolation weight to zero.
*-----------------------------------------------------------*/
if (CF_marker_offd[i1] >= 0)
{
P_marker_offd[i1] = jj_counter_offd;
/*P_offd_j[jj_counter_offd] = fine_to_coarse_offd[i1];*/
P_offd_j[jj_counter_offd] = i1;
P_offd_data[jj_counter_offd] = zero;
jj_counter_offd++;
}
/*-----------------------------------------------------------
* If neighbor i1 is an F-point, mark it as a strong F-point
* whose connection needs to be distributed.
*-----------------------------------------------------------*/
else if (CF_marker_offd[i1] != -3)
{
P_marker_offd[i1] = strong_f_marker;
}
}
}
else
{
for (jj = S_offd_i[i]; jj < S_offd_i[i+1]; jj++)
{
i1 = S_offd_j[jj];
/*-----------------------------------------------------------
* If neighbor i1 is a C-point, set column number in P_offd_j
* and initialize interpolation weight to zero.
*-----------------------------------------------------------*/
if (CF_marker_offd[i1] >= 0)
{
P_marker_offd[i1] = jj_counter_offd;
/*P_offd_j[jj_counter_offd] = fine_to_coarse_offd[i1];*/
P_offd_j[jj_counter_offd] = i1;
P_offd_data[jj_counter_offd] = zero;
jj_counter_offd++;
}
/*-----------------------------------------------------------
* If neighbor i1 is an F-point, mark it as a strong F-point
* whose connection needs to be distributed.
*-----------------------------------------------------------*/
else if (CF_marker_offd[i1] != -3)
{
P_marker_offd[i1] = strong_f_marker;
}
}
}
}
jj_end_row_offd = jj_counter_offd;
diagonal = A_diag_data[A_diag_i[i]];
/* Loop over ith row of A. First, the diagonal part of A */
for (jj = A_diag_i[i]+1; jj < A_diag_i[i+1]; jj++)
{
i1 = A_diag_j[jj];
/*--------------------------------------------------------------
* Case 1: neighbor i1 is a C-point and strongly influences i,
* accumulate a_{i,i1} into the interpolation weight.
*--------------------------------------------------------------*/
if (P_marker[i1] >= jj_begin_row)
{
P_diag_data[P_marker[i1]] += A_diag_data[jj];
}
/*--------------------------------------------------------------
* Case 2: neighbor i1 is an F-point and strongly influences i,
* distribute a_{i,i1} to C-points that strongly infuence i.
* Note: currently no distribution to the diagonal in this case.
*--------------------------------------------------------------*/
else if (P_marker[i1] == strong_f_marker)
{
sum = zero;
/*-----------------------------------------------------------
* Loop over row of A for point i1 and calculate the sum
* of the connections to c-points that strongly influence i.
*-----------------------------------------------------------*/
sgn = 1;
if (A_diag_data[A_diag_i[i1]] < 0) sgn = -1;
/* Diagonal block part of row i1 */
for (jj1 = A_diag_i[i1]; jj1 < A_diag_i[i1+1]; jj1++)
{
i2 = A_diag_j[jj1];
if (P_marker[i2] >= jj_begin_row &&
(sgn*A_diag_data[jj1]) < 0)
{
sum += A_diag_data[jj1];
}
}
/* Off-Diagonal block part of row i1 */
if (num_procs > 1)
{
for (jj1 = A_offd_i[i1]; jj1 < A_offd_i[i1+1]; jj1++)
{
i2 = A_offd_j[jj1];
if (P_marker_offd[i2] >= jj_begin_row_offd
&& (sgn*A_offd_data[jj1]) < 0)
{
sum += A_offd_data[jj1];
}
}
}
if (sum != 0)
{
distribute = A_diag_data[jj] / sum;
/*-----------------------------------------------------------
* Loop over row of A for point i1 and do the distribution.
*-----------------------------------------------------------*/
/* Diagonal block part of row i1 */
for (jj1 = A_diag_i[i1]; jj1 < A_diag_i[i1+1]; jj1++)
{
i2 = A_diag_j[jj1];
if (P_marker[i2] >= jj_begin_row
&& (sgn*A_diag_data[jj1]) < 0)
{
P_diag_data[P_marker[i2]]
+= distribute * A_diag_data[jj1];
}
}
/* Off-Diagonal block part of row i1 */
if (num_procs > 1)
{
for (jj1 = A_offd_i[i1]; jj1 < A_offd_i[i1+1]; jj1++)
{
i2 = A_offd_j[jj1];
if (P_marker_offd[i2] >= jj_begin_row_offd
&& (sgn*A_offd_data[jj1]) < 0)
{
P_offd_data[P_marker_offd[i2]]
+= distribute * A_offd_data[jj1];
}
}
}
}
else
{
if (num_functions == 1 || dof_func[i] == dof_func[i1])
diagonal += A_diag_data[jj];
}
}
/*--------------------------------------------------------------
* Case 3: neighbor i1 weakly influences i, accumulate a_{i,i1}
* into the diagonal.
*--------------------------------------------------------------*/
else if (CF_marker[i1] != -3)
{
if (num_functions == 1 || dof_func[i] == dof_func[i1])
diagonal += A_diag_data[jj];
}
}
/*----------------------------------------------------------------
* Still looping over ith row of A. Next, loop over the
* off-diagonal part of A
*---------------------------------------------------------------*/
if (num_procs > 1)
{
for (jj = A_offd_i[i]; jj < A_offd_i[i+1]; jj++)
{
i1 = A_offd_j[jj];
/*--------------------------------------------------------------
* Case 1: neighbor i1 is a C-point and strongly influences i,
* accumulate a_{i,i1} into the interpolation weight.
*--------------------------------------------------------------*/
if (P_marker_offd[i1] >= jj_begin_row_offd)
{
P_offd_data[P_marker_offd[i1]] += A_offd_data[jj];
}
/*------------------------------------------------------------
* Case 2: neighbor i1 is an F-point and strongly influences i,
* distribute a_{i,i1} to C-points that strongly infuence i.
* Note: currently no distribution to the diagonal in this case.
*-----------------------------------------------------------*/
else if (P_marker_offd[i1] == strong_f_marker)
{
sum = zero;
/*---------------------------------------------------------
* Loop over row of A_ext for point i1 and calculate the sum
* of the connections to c-points that strongly influence i.
*---------------------------------------------------------*/
/* find row number */
c_num = A_offd_j[jj];
sgn = 1;
if (A_ext_data[A_ext_i[c_num]] < 0) sgn = -1;
for (jj1 = A_ext_i[c_num]; jj1 < A_ext_i[c_num+1]; jj1++)
{
i2 = A_ext_j[jj1];
if (i2 > -1)
{
/* in the diagonal block */
if (P_marker[i2] >= jj_begin_row
&& (sgn*A_ext_data[jj1]) < 0)
{
sum += A_ext_data[jj1];
}
}
else
{
/* in the off_diagonal block */
if (P_marker_offd[-i2-1] >= jj_begin_row_offd
&& (sgn*A_ext_data[jj1]) < 0)
{
sum += A_ext_data[jj1];
}
}
}
if (sum != 0)
{
distribute = A_offd_data[jj] / sum;
/*---------------------------------------------------------
* Loop over row of A_ext for point i1 and do
* the distribution.
*--------------------------------------------------------*/
/* Diagonal block part of row i1 */
for (jj1 = A_ext_i[c_num]; jj1 < A_ext_i[c_num+1]; jj1++)
{
i2 = A_ext_j[jj1];
if (i2 > -1) /* in the diagonal block */
{
if (P_marker[i2] >= jj_begin_row
&& (sgn*A_ext_data[jj1]) < 0)
{
P_diag_data[P_marker[i2]]
+= distribute * A_ext_data[jj1];
}
}
else
{
/* in the off_diagonal block */
if (P_marker_offd[-i2-1] >= jj_begin_row_offd
&& (sgn*A_ext_data[jj1]) < 0)
P_offd_data[P_marker_offd[-i2-1]]
+= distribute * A_ext_data[jj1];
}
}
}
else
{
if (num_functions == 1 || dof_func[i] == dof_func_offd[i1])
diagonal += A_offd_data[jj];
}
}
/*-----------------------------------------------------------
* Case 3: neighbor i1 weakly influences i, accumulate a_{i,i1}
* into the diagonal.
*-----------------------------------------------------------*/
else if (CF_marker_offd[i1] != -3)
{
if (num_functions == 1 || dof_func[i] == dof_func_offd[i1])
diagonal += A_offd_data[jj];
}
}
}
/*-----------------------------------------------------------------
* Set interpolation weight by dividing by the diagonal.
*-----------------------------------------------------------------*/
if (diagonal == 0.0)
{
if (print_level)
hypre_printf(" Warning! zero diagonal! Proc id %d row %d\n", my_id,i);
for (jj = jj_begin_row; jj < jj_end_row; jj++)
{
P_diag_data[jj] = 0.0;
}
for (jj = jj_begin_row_offd; jj < jj_end_row_offd; jj++)
{
P_offd_data[jj] = 0.0;
}
}
else
{
for (jj = jj_begin_row; jj < jj_end_row; jj++)
{
P_diag_data[jj] /= -diagonal;
}
for (jj = jj_begin_row_offd; jj < jj_end_row_offd; jj++)
{
P_offd_data[jj] /= -diagonal;
}
}
}
strong_f_marker--;
P_offd_i[i+1] = jj_counter_offd;
}
hypre_TFree(P_marker);
hypre_TFree(P_marker_offd);
}
P = hypre_ParCSRMatrixCreate(comm,
hypre_ParCSRMatrixGlobalNumRows(A),
total_global_cpts,
hypre_ParCSRMatrixColStarts(A),
num_cpts_global,
0,
P_diag_i[n_fine],
P_offd_i[n_fine]);
P_diag = hypre_ParCSRMatrixDiag(P);
hypre_CSRMatrixData(P_diag) = P_diag_data;
hypre_CSRMatrixI(P_diag) = P_diag_i;
hypre_CSRMatrixJ(P_diag) = P_diag_j;
P_offd = hypre_ParCSRMatrixOffd(P);
hypre_CSRMatrixData(P_offd) = P_offd_data;
hypre_CSRMatrixI(P_offd) = P_offd_i;
hypre_CSRMatrixJ(P_offd) = P_offd_j;
hypre_ParCSRMatrixOwnsRowStarts(P) = 0;
/* Compress P, removing coefficients smaller than trunc_factor * Max */
if (trunc_factor != 0.0 || max_elmts > 0)
{
hypre_BoomerAMGInterpTruncation(P, trunc_factor, max_elmts);
P_diag_data = hypre_CSRMatrixData(P_diag);
P_diag_i = hypre_CSRMatrixI(P_diag);
P_diag_j = hypre_CSRMatrixJ(P_diag);
P_offd_data = hypre_CSRMatrixData(P_offd);
P_offd_i = hypre_CSRMatrixI(P_offd);
P_offd_j = hypre_CSRMatrixJ(P_offd);
P_diag_size = P_diag_i[n_fine];
P_offd_size = P_offd_i[n_fine];
}
num_cols_P_offd = 0;
if (P_offd_size)
{
P_marker = hypre_CTAlloc(HYPRE_Int, num_cols_A_offd);
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE
#endif
for (i=0; i < num_cols_A_offd; i++)
P_marker[i] = 0;
num_cols_P_offd = 0;
for (i=0; i < P_offd_size; i++)
{
index = P_offd_j[i];
if (!P_marker[index])
{
num_cols_P_offd++;
P_marker[index] = 1;
}
}
col_map_offd_P = hypre_CTAlloc(HYPRE_Int,num_cols_P_offd);
index = 0;
for (i=0; i < num_cols_P_offd; i++)
{
while (P_marker[index]==0) index++;
col_map_offd_P[i] = index++;
}
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE
#endif
for (i=0; i < P_offd_size; i++)
P_offd_j[i] = hypre_BinarySearch(col_map_offd_P,
P_offd_j[i],
num_cols_P_offd);
hypre_TFree(P_marker);
}
for (i=0; i < n_fine; i++)
if (CF_marker[i] == -3) CF_marker[i] = -1;
if (num_cols_P_offd)
{
hypre_ParCSRMatrixColMapOffd(P) = col_map_offd_P;
hypre_CSRMatrixNumCols(P_offd) = num_cols_P_offd;
}
hypre_GetCommPkgRTFromCommPkgA(P,A, fine_to_coarse_offd);
*P_ptr = P;
hypre_TFree(CF_marker_offd);
hypre_TFree(dof_func_offd);
hypre_TFree(int_buf_data);
hypre_TFree(fine_to_coarse);
hypre_TFree(fine_to_coarse_offd);
hypre_TFree(coarse_counter);
hypre_TFree(jj_count);
hypre_TFree(jj_count_offd);
if (num_procs > 1) hypre_CSRMatrixDestroy(A_ext);
return(0);
}
/*---------------------------------------------------------------------------
* hypre_BoomerAMGBuildInterpHE
* interpolation routine for hyperbolic PDEs
* treats weak fine connections like strong fine connections
*--------------------------------------------------------------------------*/
HYPRE_Int
hypre_BoomerAMGBuildInterpHE( hypre_ParCSRMatrix *A,
HYPRE_Int *CF_marker,
hypre_ParCSRMatrix *S,
HYPRE_Int *num_cpts_global,
HYPRE_Int num_functions,
HYPRE_Int *dof_func,
HYPRE_Int debug_flag,
HYPRE_Real trunc_factor,
HYPRE_Int max_elmts,
HYPRE_Int *col_offd_S_to_A,
hypre_ParCSRMatrix **P_ptr)
{
MPI_Comm comm = hypre_ParCSRMatrixComm(A);
hypre_ParCSRCommPkg *comm_pkg = hypre_ParCSRMatrixCommPkg(A);
hypre_ParCSRCommHandle *comm_handle;
hypre_CSRMatrix *A_diag = hypre_ParCSRMatrixDiag(A);
HYPRE_Real *A_diag_data = hypre_CSRMatrixData(A_diag);
HYPRE_Int *A_diag_i = hypre_CSRMatrixI(A_diag);
HYPRE_Int *A_diag_j = hypre_CSRMatrixJ(A_diag);
hypre_CSRMatrix *A_offd = hypre_ParCSRMatrixOffd(A);
HYPRE_Real *A_offd_data = hypre_CSRMatrixData(A_offd);
HYPRE_Int *A_offd_i = hypre_CSRMatrixI(A_offd);
HYPRE_Int *A_offd_j = hypre_CSRMatrixJ(A_offd);
HYPRE_Int num_cols_A_offd = hypre_CSRMatrixNumCols(A_offd);
HYPRE_Int *col_map_offd = hypre_ParCSRMatrixColMapOffd(A);
hypre_CSRMatrix *S_diag = hypre_ParCSRMatrixDiag(S);
HYPRE_Int *S_diag_i = hypre_CSRMatrixI(S_diag);
HYPRE_Int *S_diag_j = hypre_CSRMatrixJ(S_diag);
hypre_CSRMatrix *S_offd = hypre_ParCSRMatrixOffd(S);
HYPRE_Int *S_offd_i = hypre_CSRMatrixI(S_offd);
HYPRE_Int *S_offd_j = hypre_CSRMatrixJ(S_offd);
hypre_ParCSRMatrix *P;
HYPRE_Int *col_map_offd_P;
HYPRE_Int *CF_marker_offd = NULL;
HYPRE_Int *dof_func_offd = NULL;
hypre_CSRMatrix *A_ext;
HYPRE_Real *A_ext_data = NULL;
HYPRE_Int *A_ext_i = NULL;
HYPRE_Int *A_ext_j = NULL;
hypre_CSRMatrix *P_diag;
hypre_CSRMatrix *P_offd;
HYPRE_Real *P_diag_data;
HYPRE_Int *P_diag_i;
HYPRE_Int *P_diag_j;
HYPRE_Real *P_offd_data;
HYPRE_Int *P_offd_i;
HYPRE_Int *P_offd_j;
HYPRE_Int P_diag_size, P_offd_size;
HYPRE_Int *P_marker, *P_marker_offd;
HYPRE_Int jj_counter,jj_counter_offd;
HYPRE_Int *jj_count, *jj_count_offd;
HYPRE_Int jj_begin_row,jj_begin_row_offd;
HYPRE_Int jj_end_row,jj_end_row_offd;
HYPRE_Int start_indexing = 0; /* start indexing for P_data at 0 */
HYPRE_Int n_fine = hypre_CSRMatrixNumRows(A_diag);
HYPRE_Int *fine_to_coarse;
HYPRE_Int *fine_to_coarse_offd;
HYPRE_Int *coarse_counter;
HYPRE_Int coarse_shift;
HYPRE_Int total_global_cpts;
HYPRE_Int num_cols_P_offd,my_first_cpt;
HYPRE_Int i,i1,i2;
HYPRE_Int j,jl,jj,jj1;
HYPRE_Int k, kc;
HYPRE_Int start;
HYPRE_Int sgn;
HYPRE_Int c_num;
HYPRE_Real diagonal;
HYPRE_Real sum;
HYPRE_Real distribute;
HYPRE_Real zero = 0.0;
HYPRE_Real one = 1.0;
HYPRE_Int my_id;
HYPRE_Int num_procs;
HYPRE_Int num_threads;
HYPRE_Int num_sends;
HYPRE_Int index;
HYPRE_Int ns, ne, size, rest;
HYPRE_Int *int_buf_data;
HYPRE_Int col_1 = hypre_ParCSRMatrixFirstRowIndex(A);
HYPRE_Int local_numrows = hypre_CSRMatrixNumRows(A_diag);
HYPRE_Int col_n = col_1 + local_numrows;
HYPRE_Real wall_time; /* for debugging instrumentation */
hypre_MPI_Comm_size(comm, &num_procs);
hypre_MPI_Comm_rank(comm,&my_id);
num_threads = hypre_NumThreads();
#ifdef HYPRE_NO_GLOBAL_PARTITION
my_first_cpt = num_cpts_global[0];
if (my_id == (num_procs -1)) total_global_cpts = num_cpts_global[1];
hypre_MPI_Bcast(&total_global_cpts, 1, HYPRE_MPI_INT, num_procs-1, comm);
#else
my_first_cpt = num_cpts_global[my_id];
total_global_cpts = num_cpts_global[num_procs];
#endif
/*-------------------------------------------------------------------
* Get the CF_marker data for the off-processor columns
*-------------------------------------------------------------------*/
if (debug_flag==4) wall_time = time_getWallclockSeconds();
if (num_cols_A_offd) CF_marker_offd = hypre_CTAlloc(HYPRE_Int, num_cols_A_offd);
if (num_functions > 1 && num_cols_A_offd)
dof_func_offd = hypre_CTAlloc(HYPRE_Int, num_cols_A_offd);
if (!comm_pkg)
{
hypre_MatvecCommPkgCreate(A);
comm_pkg = hypre_ParCSRMatrixCommPkg(A);
}
num_sends = hypre_ParCSRCommPkgNumSends(comm_pkg);
int_buf_data = hypre_CTAlloc(HYPRE_Int, hypre_ParCSRCommPkgSendMapStart(comm_pkg,
num_sends));
index = 0;
for (i = 0; i < num_sends; i++)
{
start = hypre_ParCSRCommPkgSendMapStart(comm_pkg, i);
for (j = start; j < hypre_ParCSRCommPkgSendMapStart(comm_pkg, i+1); j++)
int_buf_data[index++]
= CF_marker[hypre_ParCSRCommPkgSendMapElmt(comm_pkg,j)];
}
comm_handle = hypre_ParCSRCommHandleCreate( 11, comm_pkg, int_buf_data,
CF_marker_offd);
hypre_ParCSRCommHandleDestroy(comm_handle);
if (num_functions > 1)
{
index = 0;
for (i = 0; i < num_sends; i++)
{
start = hypre_ParCSRCommPkgSendMapStart(comm_pkg, i);
for (j=start; j < hypre_ParCSRCommPkgSendMapStart(comm_pkg, i+1); j++)
int_buf_data[index++]
= dof_func[hypre_ParCSRCommPkgSendMapElmt(comm_pkg,j)];
}
comm_handle = hypre_ParCSRCommHandleCreate( 11, comm_pkg, int_buf_data,
dof_func_offd);
hypre_ParCSRCommHandleDestroy(comm_handle);
}
if (debug_flag==4)
{
wall_time = time_getWallclockSeconds() - wall_time;
hypre_printf("Proc = %d Interp: Comm 1 CF_marker = %f\n",
my_id, wall_time);
fflush(NULL);
}
/*----------------------------------------------------------------------
* Get the ghost rows of A
*---------------------------------------------------------------------*/
if (debug_flag==4) wall_time = time_getWallclockSeconds();
if (num_procs > 1)
{
A_ext = hypre_ParCSRMatrixExtractBExt(A,A,1);
A_ext_i = hypre_CSRMatrixI(A_ext);
A_ext_j = hypre_CSRMatrixJ(A_ext);
A_ext_data = hypre_CSRMatrixData(A_ext);
}
index = 0;
for (i=0; i < num_cols_A_offd; i++)
{
for (j=A_ext_i[i]; j < A_ext_i[i+1]; j++)
{
k = A_ext_j[j];
if (k >= col_1 && k < col_n)
{
A_ext_j[index] = k - col_1;
A_ext_data[index++] = A_ext_data[j];
}
else
{
kc = hypre_BinarySearch(col_map_offd,k,num_cols_A_offd);
if (kc > -1)
{
A_ext_j[index] = -kc-1;
A_ext_data[index++] = A_ext_data[j];
}
}
}
A_ext_i[i] = index;
}
for (i = num_cols_A_offd; i > 0; i--)
A_ext_i[i] = A_ext_i[i-1];
if (num_procs > 1) A_ext_i[0] = 0;
if (debug_flag==4)
{
wall_time = time_getWallclockSeconds() - wall_time;
hypre_printf("Proc = %d Interp: Comm 2 Get A_ext = %f\n",
my_id, wall_time);
fflush(NULL);
}
/*-----------------------------------------------------------------------
* First Pass: Determine size of P and fill in fine_to_coarse mapping.
*-----------------------------------------------------------------------*/
/*-----------------------------------------------------------------------
* Intialize counters and allocate mapping vector.
*-----------------------------------------------------------------------*/
coarse_counter = hypre_CTAlloc(HYPRE_Int, num_threads);
jj_count = hypre_CTAlloc(HYPRE_Int, num_threads);
jj_count_offd = hypre_CTAlloc(HYPRE_Int, num_threads);
fine_to_coarse = hypre_CTAlloc(HYPRE_Int, n_fine);
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE
#endif
for (i = 0; i < n_fine; i++) fine_to_coarse[i] = -1;
jj_counter = start_indexing;
jj_counter_offd = start_indexing;
/*-----------------------------------------------------------------------
* Loop over fine grid.
*-----------------------------------------------------------------------*/
/* RDF: this looks a little tricky, but doable */
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel for private(i,j,i1,jj,ns,ne,size,rest) HYPRE_SMP_SCHEDULE
#endif
for (j = 0; j < num_threads; j++)
{
size = n_fine/num_threads;
rest = n_fine - size*num_threads;
if (j < rest)
{
ns = j*size+j;
ne = (j+1)*size+j+1;
}
else
{
ns = j*size+rest;
ne = (j+1)*size+rest;
}
for (i = ns; i < ne; i++)
{
/*--------------------------------------------------------------------
* If i is a C-point, interpolation is the identity. Also set up
* mapping vector.
*--------------------------------------------------------------------*/
if (CF_marker[i] >= 0)
{
jj_count[j]++;
fine_to_coarse[i] = coarse_counter[j];
coarse_counter[j]++;
}
/*--------------------------------------------------------------------
* If i is an F-point, interpolation is from the C-points that
* strongly influence i.
*--------------------------------------------------------------------*/
else
{
for (jj = S_diag_i[i]; jj < S_diag_i[i+1]; jj++)
{
i1 = S_diag_j[jj];
if (CF_marker[i1] >= 0)
{
jj_count[j]++;
}
}
if (num_procs > 1)
{
if (col_offd_S_to_A)
{
for (jj = S_offd_i[i]; jj < S_offd_i[i+1]; jj++)
{
i1 = col_offd_S_to_A[S_offd_j[jj]];
if (CF_marker_offd[i1] >= 0)
{
jj_count_offd[j]++;
}
}
}
else
{
for (jj = S_offd_i[i]; jj < S_offd_i[i+1]; jj++)
{
i1 = S_offd_j[jj];
if (CF_marker_offd[i1] >= 0)
{
jj_count_offd[j]++;
}
}
}
}
}
}
}
/*-----------------------------------------------------------------------
* Allocate arrays.
*-----------------------------------------------------------------------*/
for (i=0; i < num_threads-1; i++)
{
coarse_counter[i+1] += coarse_counter[i];
jj_count[i+1] += jj_count[i];
jj_count_offd[i+1] += jj_count_offd[i];
}
i = num_threads-1;
jj_counter = jj_count[i];
jj_counter_offd = jj_count_offd[i];
P_diag_size = jj_counter;
P_diag_i = hypre_CTAlloc(HYPRE_Int, n_fine+1);
P_diag_j = hypre_CTAlloc(HYPRE_Int, P_diag_size);
P_diag_data = hypre_CTAlloc(HYPRE_Real, P_diag_size);
P_diag_i[n_fine] = jj_counter;
P_offd_size = jj_counter_offd;
P_offd_i = hypre_CTAlloc(HYPRE_Int, n_fine+1);
P_offd_j = hypre_CTAlloc(HYPRE_Int, P_offd_size);
P_offd_data = hypre_CTAlloc(HYPRE_Real, P_offd_size);
/*-----------------------------------------------------------------------
* Intialize some stuff.
*-----------------------------------------------------------------------*/
jj_counter = start_indexing;
jj_counter_offd = start_indexing;
if (debug_flag==4)
{
wall_time = time_getWallclockSeconds() - wall_time;
hypre_printf("Proc = %d Interp: Internal work 1 = %f\n",
my_id, wall_time);
fflush(NULL);
}
/*-----------------------------------------------------------------------
* Send and receive fine_to_coarse info.
*-----------------------------------------------------------------------*/
if (debug_flag==4) wall_time = time_getWallclockSeconds();
fine_to_coarse_offd = hypre_CTAlloc(HYPRE_Int, num_cols_A_offd);
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel for private(i,j,ns,ne,size,rest,coarse_shift) HYPRE_SMP_SCHEDULE
#endif
for (j = 0; j < num_threads; j++)
{
coarse_shift = 0;
if (j > 0) coarse_shift = coarse_counter[j-1];
size = n_fine/num_threads;
rest = n_fine - size*num_threads;
if (j < rest)
{
ns = j*size+j;
ne = (j+1)*size+j+1;
}
else
{
ns = j*size+rest;
ne = (j+1)*size+rest;
}
for (i = ns; i < ne; i++)
fine_to_coarse[i] += my_first_cpt+coarse_shift;
}
index = 0;
for (i = 0; i < num_sends; i++)
{
start = hypre_ParCSRCommPkgSendMapStart(comm_pkg, i);
for (j = start; j < hypre_ParCSRCommPkgSendMapStart(comm_pkg, i+1); j++)
int_buf_data[index++]
= fine_to_coarse[hypre_ParCSRCommPkgSendMapElmt(comm_pkg,j)];
}
comm_handle = hypre_ParCSRCommHandleCreate( 11, comm_pkg, int_buf_data,
fine_to_coarse_offd);
hypre_ParCSRCommHandleDestroy(comm_handle);
if (debug_flag==4)
{
wall_time = time_getWallclockSeconds() - wall_time;
hypre_printf("Proc = %d Interp: Comm 4 FineToCoarse = %f\n",
my_id, wall_time);
fflush(NULL);
}
if (debug_flag==4) wall_time = time_getWallclockSeconds();
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE
#endif
for (i = 0; i < n_fine; i++) fine_to_coarse[i] -= my_first_cpt;
/*-----------------------------------------------------------------------
* Loop over fine grid points.
*-----------------------------------------------------------------------*/
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel for private(i,j,jl,i1,i2,jj,jj1,ns,ne,size,rest,sum,diagonal,distribute,P_marker,P_marker_offd,jj_counter,jj_counter_offd,sgn,c_num,jj_begin_row,jj_end_row,jj_begin_row_offd,jj_end_row_offd) HYPRE_SMP_SCHEDULE
#endif
for (jl = 0; jl < num_threads; jl++)
{
size = n_fine/num_threads;
rest = n_fine - size*num_threads;
if (jl < rest)
{
ns = jl*size+jl;
ne = (jl+1)*size+jl+1;
}
else
{
ns = jl*size+rest;
ne = (jl+1)*size+rest;
}
jj_counter = 0;
if (jl > 0) jj_counter = jj_count[jl-1];
jj_counter_offd = 0;
if (jl > 0) jj_counter_offd = jj_count_offd[jl-1];
P_marker = hypre_CTAlloc(HYPRE_Int, n_fine);
if (num_cols_A_offd)
P_marker_offd = hypre_CTAlloc(HYPRE_Int, num_cols_A_offd);
else
P_marker_offd = NULL;
for (i = 0; i < n_fine; i++)
{
P_marker[i] = -1;
}
for (i = 0; i < num_cols_A_offd; i++)
{
P_marker_offd[i] = -1;
}
for (i = ns; i < ne; i++)
{
/*--------------------------------------------------------------------
* If i is a c-point, interpolation is the identity.
*--------------------------------------------------------------------*/
if (CF_marker[i] >= 0)
{
P_diag_i[i] = jj_counter;
P_diag_j[jj_counter] = fine_to_coarse[i];
P_diag_data[jj_counter] = one;
jj_counter++;
}
/*--------------------------------------------------------------------
* If i is an F-point, build interpolation.
*--------------------------------------------------------------------*/
else
{
/* Diagonal part of P */
P_diag_i[i] = jj_counter;
jj_begin_row = jj_counter;
for (jj = S_diag_i[i]; jj < S_diag_i[i+1]; jj++)
{
i1 = S_diag_j[jj];
/*--------------------------------------------------------------
* If neighbor i1 is a C-point, set column number in P_diag_j
* and initialize interpolation weight to zero.
*--------------------------------------------------------------*/
if (CF_marker[i1] >= 0)
{
P_marker[i1] = jj_counter;
P_diag_j[jj_counter] = fine_to_coarse[i1];
P_diag_data[jj_counter] = zero;
jj_counter++;
}
}
jj_end_row = jj_counter;
/* Off-Diagonal part of P */
P_offd_i[i] = jj_counter_offd;
jj_begin_row_offd = jj_counter_offd;
if (num_procs > 1)
{
if (col_offd_S_to_A)
{
for (jj = S_offd_i[i]; jj < S_offd_i[i+1]; jj++)
{
i1 = col_offd_S_to_A[S_offd_j[jj]];
/*-----------------------------------------------------------
* If neighbor i1 is a C-point, set column number in P_offd_j
* and initialize interpolation weight to zero.
*-----------------------------------------------------------*/
if (CF_marker_offd[i1] >= 0)
{
P_marker_offd[i1] = jj_counter_offd;
P_offd_j[jj_counter_offd] = i1;
P_offd_data[jj_counter_offd] = zero;
jj_counter_offd++;
}
}
}
else
{
for (jj = S_offd_i[i]; jj < S_offd_i[i+1]; jj++)
{
i1 = S_offd_j[jj];
/*-----------------------------------------------------------
* If neighbor i1 is a C-point, set column number in P_offd_j
* and initialize interpolation weight to zero.
*-----------------------------------------------------------*/
if (CF_marker_offd[i1] >= 0)
{
P_marker_offd[i1] = jj_counter_offd;
P_offd_j[jj_counter_offd] = i1;
P_offd_data[jj_counter_offd] = zero;
jj_counter_offd++;
}
}
}
}
jj_end_row_offd = jj_counter_offd;
diagonal = A_diag_data[A_diag_i[i]];
/* Loop over ith row of A. First, the diagonal part of A */
for (jj = A_diag_i[i]+1; jj < A_diag_i[i+1]; jj++)
{
i1 = A_diag_j[jj];
/*--------------------------------------------------------------
* Case 1: neighbor i1 is a C-point and strongly influences i,
* accumulate a_{i,i1} into the interpolation weight.
*--------------------------------------------------------------*/
if (P_marker[i1] >= jj_begin_row)
{
P_diag_data[P_marker[i1]] += A_diag_data[jj];
}
/*--------------------------------------------------------------
* Case 2: neighbor i1 is an F-point and influences i,
* distribute a_{i,i1} to C-points that strongly influence i.
* Note: currently no distribution to the diagonal in this case.
*--------------------------------------------------------------*/
else
{
sum = zero;
/*-----------------------------------------------------------
* Loop over row of A for point i1 and calculate the sum
* of the connections to c-points that strongly influence i.
*-----------------------------------------------------------*/
sgn = 1;
if (A_diag_data[A_diag_i[i1]] < 0) sgn = -1;
/* Diagonal block part of row i1 */
for (jj1 = A_diag_i[i1]; jj1 < A_diag_i[i1+1]; jj1++)
{
i2 = A_diag_j[jj1];
if (P_marker[i2] >= jj_begin_row &&
(sgn*A_diag_data[jj1]) < 0)
{
sum += A_diag_data[jj1];
}
}
/* Off-Diagonal block part of row i1 */
if (num_procs > 1)
{
for (jj1 = A_offd_i[i1]; jj1 < A_offd_i[i1+1]; jj1++)
{
i2 = A_offd_j[jj1];
if (P_marker_offd[i2] >= jj_begin_row_offd
&& (sgn*A_offd_data[jj1]) < 0)
{
sum += A_offd_data[jj1];
}
}
}
if (sum != 0)
{
distribute = A_diag_data[jj] / sum;
/*-----------------------------------------------------------
* Loop over row of A for point i1 and do the distribution.
*-----------------------------------------------------------*/
/* Diagonal block part of row i1 */
for (jj1 = A_diag_i[i1]; jj1 < A_diag_i[i1+1]; jj1++)
{
i2 = A_diag_j[jj1];
if (P_marker[i2] >= jj_begin_row
&& (sgn*A_diag_data[jj1]) < 0)
{
P_diag_data[P_marker[i2]]
+= distribute * A_diag_data[jj1];
}
}
/* Off-Diagonal block part of row i1 */
if (num_procs > 1)
{
for (jj1 = A_offd_i[i1]; jj1 < A_offd_i[i1+1]; jj1++)
{
i2 = A_offd_j[jj1];
if (P_marker_offd[i2] >= jj_begin_row_offd
&& (sgn*A_offd_data[jj1]) < 0)
{
P_offd_data[P_marker_offd[i2]]
+= distribute * A_offd_data[jj1];
}
}
}
}
else
{
if (num_functions == 1 || dof_func[i] == dof_func[i1])
diagonal += A_diag_data[jj];
}
}
}
/*----------------------------------------------------------------
* Still looping over ith row of A. Next, loop over the
* off-diagonal part of A
*---------------------------------------------------------------*/
if (num_procs > 1)
{
for (jj = A_offd_i[i]; jj < A_offd_i[i+1]; jj++)
{
i1 = A_offd_j[jj];
/*--------------------------------------------------------------
* Case 1: neighbor i1 is a C-point and strongly influences i,
* accumulate a_{i,i1} into the interpolation weight.
*--------------------------------------------------------------*/
if (P_marker_offd[i1] >= jj_begin_row_offd)
{
P_offd_data[P_marker_offd[i1]] += A_offd_data[jj];
}
/*------------------------------------------------------------
* Case 2: neighbor i1 is an F-point and influences i,
* distribute a_{i,i1} to C-points that strongly infuence i.
* Note: currently no distribution to the diagonal in this case.
*-----------------------------------------------------------*/
else
{
sum = zero;
/*---------------------------------------------------------
* Loop over row of A_ext for point i1 and calculate the sum
* of the connections to c-points that strongly influence i.
*---------------------------------------------------------*/
/* find row number */
c_num = A_offd_j[jj];
sgn = 1;
if (A_ext_data[A_ext_i[c_num]] < 0) sgn = -1;
for (jj1 = A_ext_i[c_num]; jj1 < A_ext_i[c_num+1]; jj1++)
{
i2 = A_ext_j[jj1];
if (i2 > -1)
{
/* in the diagonal block */
if (P_marker[i2] >= jj_begin_row
&& (sgn*A_ext_data[jj1]) < 0)
{
sum += A_ext_data[jj1];
}
}
else
{
/* in the off_diagonal block */
if (P_marker_offd[-i2-1] >= jj_begin_row_offd
&& (sgn*A_ext_data[jj1]) < 0)
{
sum += A_ext_data[jj1];
}
}
}
if (sum != 0)
{
distribute = A_offd_data[jj] / sum;
/*---------------------------------------------------------
* Loop over row of A_ext for point i1 and do
* the distribution.
*--------------------------------------------------------*/
/* Diagonal block part of row i1 */
for (jj1 = A_ext_i[c_num]; jj1 < A_ext_i[c_num+1]; jj1++)
{
i2 = A_ext_j[jj1];
if (i2 > -1) /* in the diagonal block */
{
if (P_marker[i2] >= jj_begin_row
&& (sgn*A_ext_data[jj1]) < 0)
{
P_diag_data[P_marker[i2]]
+= distribute * A_ext_data[jj1];
}
}
else
{
/* in the off_diagonal block */
if (P_marker_offd[-i2-1] >= jj_begin_row_offd
&& (sgn*A_ext_data[jj1]) < 0)
P_offd_data[P_marker_offd[-i2-1]]
+= distribute * A_ext_data[jj1];
}
}
}
else
{
if (num_functions == 1 || dof_func[i] == dof_func_offd[i1])
diagonal += A_offd_data[jj];
}
}
}
}
/*-----------------------------------------------------------------
* Set interpolation weight by dividing by the diagonal.
*-----------------------------------------------------------------*/
for (jj = jj_begin_row; jj < jj_end_row; jj++)
{
P_diag_data[jj] /= -diagonal;
}
for (jj = jj_begin_row_offd; jj < jj_end_row_offd; jj++)
{
P_offd_data[jj] /= -diagonal;
}
}
P_offd_i[i+1] = jj_counter_offd;
}
hypre_TFree(P_marker);
hypre_TFree(P_marker_offd);
}
P = hypre_ParCSRMatrixCreate(comm,
hypre_ParCSRMatrixGlobalNumRows(A),
total_global_cpts,
hypre_ParCSRMatrixColStarts(A),
num_cpts_global,
0,
P_diag_i[n_fine],
P_offd_i[n_fine]);
P_diag = hypre_ParCSRMatrixDiag(P);
hypre_CSRMatrixData(P_diag) = P_diag_data;
hypre_CSRMatrixI(P_diag) = P_diag_i;
hypre_CSRMatrixJ(P_diag) = P_diag_j;
P_offd = hypre_ParCSRMatrixOffd(P);
hypre_CSRMatrixData(P_offd) = P_offd_data;
hypre_CSRMatrixI(P_offd) = P_offd_i;
hypre_CSRMatrixJ(P_offd) = P_offd_j;
hypre_ParCSRMatrixOwnsRowStarts(P) = 0;
/* Compress P, removing coefficients smaller than trunc_factor * Max */
if (trunc_factor != 0.0 || max_elmts > 0)
{
hypre_BoomerAMGInterpTruncation(P, trunc_factor, max_elmts);
P_diag_data = hypre_CSRMatrixData(P_diag);
P_diag_i = hypre_CSRMatrixI(P_diag);
P_diag_j = hypre_CSRMatrixJ(P_diag);
P_offd_data = hypre_CSRMatrixData(P_offd);
P_offd_i = hypre_CSRMatrixI(P_offd);
P_offd_j = hypre_CSRMatrixJ(P_offd);
P_diag_size = P_diag_i[n_fine];
P_offd_size = P_offd_i[n_fine];
}
num_cols_P_offd = 0;
if (P_offd_size)
{
P_marker = hypre_CTAlloc(HYPRE_Int, num_cols_A_offd);
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE
#endif
for (i=0; i < num_cols_A_offd; i++)
P_marker[i] = 0;
num_cols_P_offd = 0;
for (i=0; i < P_offd_size; i++)
{
index = P_offd_j[i];
if (!P_marker[index])
{
num_cols_P_offd++;
P_marker[index] = 1;
}
}
col_map_offd_P = hypre_CTAlloc(HYPRE_Int,num_cols_P_offd);
index = 0;
for (i=0; i < num_cols_P_offd; i++)
{
while (P_marker[index]==0) index++;
col_map_offd_P[i] = index++;
}
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE
#endif
for (i=0; i < P_offd_size; i++)
P_offd_j[i] = hypre_BinarySearch(col_map_offd_P,
P_offd_j[i],
num_cols_P_offd);
hypre_TFree(P_marker);
}
for (i=0; i < n_fine; i++)
if (CF_marker[i] == -3) CF_marker[i] = -1;
if (num_cols_P_offd)
{
hypre_ParCSRMatrixColMapOffd(P) = col_map_offd_P;
hypre_CSRMatrixNumCols(P_offd) = num_cols_P_offd;
}
hypre_GetCommPkgRTFromCommPkgA(P,A,fine_to_coarse_offd);
*P_ptr = P;
hypre_TFree(CF_marker_offd);
hypre_TFree(dof_func_offd);
hypre_TFree(int_buf_data);
hypre_TFree(fine_to_coarse);
hypre_TFree(fine_to_coarse_offd);
hypre_TFree(coarse_counter);
hypre_TFree(jj_count);
hypre_TFree(jj_count_offd);
if (num_procs > 1) hypre_CSRMatrixDestroy(A_ext);
return(0);
}
/*---------------------------------------------------------------------------
* hypre_BoomerAMGBuildDirInterp
*--------------------------------------------------------------------------*/
HYPRE_Int
hypre_BoomerAMGBuildDirInterp( hypre_ParCSRMatrix *A,
HYPRE_Int *CF_marker,
hypre_ParCSRMatrix *S,
HYPRE_Int *num_cpts_global,
HYPRE_Int num_functions,
HYPRE_Int *dof_func,
HYPRE_Int debug_flag,
HYPRE_Real trunc_factor,
HYPRE_Int max_elmts,
HYPRE_Int *col_offd_S_to_A,
hypre_ParCSRMatrix **P_ptr)
{
MPI_Comm comm = hypre_ParCSRMatrixComm(A);
hypre_ParCSRCommPkg *comm_pkg = hypre_ParCSRMatrixCommPkg(A);
hypre_ParCSRCommHandle *comm_handle;
hypre_CSRMatrix *A_diag = hypre_ParCSRMatrixDiag(A);
HYPRE_Real *A_diag_data = hypre_CSRMatrixData(A_diag);
HYPRE_Int *A_diag_i = hypre_CSRMatrixI(A_diag);
HYPRE_Int *A_diag_j = hypre_CSRMatrixJ(A_diag);
hypre_CSRMatrix *A_offd = hypre_ParCSRMatrixOffd(A);
HYPRE_Real *A_offd_data = hypre_CSRMatrixData(A_offd);
HYPRE_Int *A_offd_i = hypre_CSRMatrixI(A_offd);
HYPRE_Int *A_offd_j = hypre_CSRMatrixJ(A_offd);
HYPRE_Int num_cols_A_offd = hypre_CSRMatrixNumCols(A_offd);
hypre_CSRMatrix *S_diag = hypre_ParCSRMatrixDiag(S);
HYPRE_Int *S_diag_i = hypre_CSRMatrixI(S_diag);
HYPRE_Int *S_diag_j = hypre_CSRMatrixJ(S_diag);
hypre_CSRMatrix *S_offd = hypre_ParCSRMatrixOffd(S);
HYPRE_Int *S_offd_i = hypre_CSRMatrixI(S_offd);
HYPRE_Int *S_offd_j = hypre_CSRMatrixJ(S_offd);
hypre_ParCSRMatrix *P;
HYPRE_Int *col_map_offd_P;
HYPRE_Int *CF_marker_offd = NULL;
HYPRE_Int *dof_func_offd = NULL;
hypre_CSRMatrix *P_diag;
hypre_CSRMatrix *P_offd;
HYPRE_Real *P_diag_data;
HYPRE_Int *P_diag_i;
HYPRE_Int *P_diag_j;
HYPRE_Real *P_offd_data;
HYPRE_Int *P_offd_i;
HYPRE_Int *P_offd_j;
HYPRE_Int P_diag_size, P_offd_size;
HYPRE_Int *P_marker, *P_marker_offd;
HYPRE_Int jj_counter,jj_counter_offd;
HYPRE_Int *jj_count, *jj_count_offd;
HYPRE_Int jj_begin_row,jj_begin_row_offd;
HYPRE_Int jj_end_row,jj_end_row_offd;
HYPRE_Int start_indexing = 0; /* start indexing for P_data at 0 */
HYPRE_Int n_fine = hypre_CSRMatrixNumRows(A_diag);
HYPRE_Int *fine_to_coarse;
HYPRE_Int *fine_to_coarse_offd;
HYPRE_Int *coarse_counter;
HYPRE_Int coarse_shift;
HYPRE_Int total_global_cpts;
HYPRE_Int num_cols_P_offd,my_first_cpt;
HYPRE_Int i,i1;
HYPRE_Int j,jl,jj;
HYPRE_Int start;
HYPRE_Real diagonal;
HYPRE_Real sum_N_pos, sum_P_pos;
HYPRE_Real sum_N_neg, sum_P_neg;
HYPRE_Real alfa = 1.0;
HYPRE_Real beta = 1.0;
HYPRE_Real zero = 0.0;
HYPRE_Real one = 1.0;
HYPRE_Int my_id;
HYPRE_Int num_procs;
HYPRE_Int num_threads;
HYPRE_Int num_sends;
HYPRE_Int index;
HYPRE_Int ns, ne, size, rest;
HYPRE_Int *int_buf_data;
HYPRE_Real wall_time; /* for debugging instrumentation */
hypre_MPI_Comm_size(comm, &num_procs);
hypre_MPI_Comm_rank(comm,&my_id);
num_threads = hypre_NumThreads();
#ifdef HYPRE_NO_GLOBAL_PARTITION
my_first_cpt = num_cpts_global[0];
if (my_id == (num_procs -1)) total_global_cpts = num_cpts_global[1];
hypre_MPI_Bcast(&total_global_cpts, 1, HYPRE_MPI_INT, num_procs-1, comm);
#else
my_first_cpt = num_cpts_global[my_id];
total_global_cpts = num_cpts_global[num_procs];
#endif
/*-------------------------------------------------------------------
* Get the CF_marker data for the off-processor columns
*-------------------------------------------------------------------*/
if (debug_flag==4) wall_time = time_getWallclockSeconds();
if (num_cols_A_offd) CF_marker_offd = hypre_CTAlloc(HYPRE_Int, num_cols_A_offd);
if (num_functions > 1 && num_cols_A_offd)
dof_func_offd = hypre_CTAlloc(HYPRE_Int, num_cols_A_offd);
if (!comm_pkg)
{
hypre_MatvecCommPkgCreate(A);
comm_pkg = hypre_ParCSRMatrixCommPkg(A);
}
num_sends = hypre_ParCSRCommPkgNumSends(comm_pkg);
int_buf_data = hypre_CTAlloc(HYPRE_Int, hypre_ParCSRCommPkgSendMapStart(comm_pkg,
num_sends));
index = 0;
for (i = 0; i < num_sends; i++)
{
start = hypre_ParCSRCommPkgSendMapStart(comm_pkg, i);
for (j = start; j < hypre_ParCSRCommPkgSendMapStart(comm_pkg, i+1); j++)
int_buf_data[index++]
= CF_marker[hypre_ParCSRCommPkgSendMapElmt(comm_pkg,j)];
}
comm_handle = hypre_ParCSRCommHandleCreate( 11, comm_pkg, int_buf_data,
CF_marker_offd);
hypre_ParCSRCommHandleDestroy(comm_handle);
if (num_functions > 1)
{
index = 0;
for (i = 0; i < num_sends; i++)
{
start = hypre_ParCSRCommPkgSendMapStart(comm_pkg, i);
for (j=start; j < hypre_ParCSRCommPkgSendMapStart(comm_pkg, i+1); j++)
int_buf_data[index++]
= dof_func[hypre_ParCSRCommPkgSendMapElmt(comm_pkg,j)];
}
comm_handle = hypre_ParCSRCommHandleCreate( 11, comm_pkg, int_buf_data,
dof_func_offd);
hypre_ParCSRCommHandleDestroy(comm_handle);
}
if (debug_flag==4)
{
wall_time = time_getWallclockSeconds() - wall_time;
hypre_printf("Proc = %d Interp: Comm 1 CF_marker = %f\n",
my_id, wall_time);
fflush(NULL);
}
/*-----------------------------------------------------------------------
* First Pass: Determine size of P and fill in fine_to_coarse mapping.
*-----------------------------------------------------------------------*/
/*-----------------------------------------------------------------------
* Intialize counters and allocate mapping vector.
*-----------------------------------------------------------------------*/
coarse_counter = hypre_CTAlloc(HYPRE_Int, num_threads);
jj_count = hypre_CTAlloc(HYPRE_Int, num_threads);
jj_count_offd = hypre_CTAlloc(HYPRE_Int, num_threads);
fine_to_coarse = hypre_CTAlloc(HYPRE_Int, n_fine);
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE
#endif
for (i = 0; i < n_fine; i++) fine_to_coarse[i] = -1;
jj_counter = start_indexing;
jj_counter_offd = start_indexing;
/*-----------------------------------------------------------------------
* Loop over fine grid.
*-----------------------------------------------------------------------*/
/* RDF: this looks a little tricky, but doable */
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel for private(i,j,i1,jj,ns,ne,size,rest) HYPRE_SMP_SCHEDULE
#endif
for (j = 0; j < num_threads; j++)
{
size = n_fine/num_threads;
rest = n_fine - size*num_threads;
if (j < rest)
{
ns = j*size+j;
ne = (j+1)*size+j+1;
}
else
{
ns = j*size+rest;
ne = (j+1)*size+rest;
}
for (i = ns; i < ne; i++)
{
/*--------------------------------------------------------------------
* If i is a C-point, interpolation is the identity. Also set up
* mapping vector.
*--------------------------------------------------------------------*/
if (CF_marker[i] >= 0)
{
jj_count[j]++;
fine_to_coarse[i] = coarse_counter[j];
coarse_counter[j]++;
}
/*--------------------------------------------------------------------
* If i is an F-point, interpolation is from the C-points that
* strongly influence i.
*--------------------------------------------------------------------*/
else
{
for (jj = S_diag_i[i]; jj < S_diag_i[i+1]; jj++)
{
i1 = S_diag_j[jj];
if (CF_marker[i1] > 0)
{
jj_count[j]++;
}
}
if (num_procs > 1)
{
if (col_offd_S_to_A)
{
for (jj = S_offd_i[i]; jj < S_offd_i[i+1]; jj++)
{
i1 = col_offd_S_to_A[S_offd_j[jj]];
if (CF_marker_offd[i1] > 0)
{
jj_count_offd[j]++;
}
}
}
else
{
for (jj = S_offd_i[i]; jj < S_offd_i[i+1]; jj++)
{
i1 = S_offd_j[jj];
if (CF_marker_offd[i1] > 0)
{
jj_count_offd[j]++;
}
}
}
}
}
}
}
/*-----------------------------------------------------------------------
* Allocate arrays.
*-----------------------------------------------------------------------*/
for (i=0; i < num_threads-1; i++)
{
coarse_counter[i+1] += coarse_counter[i];
jj_count[i+1] += jj_count[i];
jj_count_offd[i+1] += jj_count_offd[i];
}
i = num_threads-1;
jj_counter = jj_count[i];
jj_counter_offd = jj_count_offd[i];
P_diag_size = jj_counter;
P_diag_i = hypre_CTAlloc(HYPRE_Int, n_fine+1);
P_diag_j = hypre_CTAlloc(HYPRE_Int, P_diag_size);
P_diag_data = hypre_CTAlloc(HYPRE_Real, P_diag_size);
P_diag_i[n_fine] = jj_counter;
P_offd_size = jj_counter_offd;
P_offd_i = hypre_CTAlloc(HYPRE_Int, n_fine+1);
P_offd_j = hypre_CTAlloc(HYPRE_Int, P_offd_size);
P_offd_data = hypre_CTAlloc(HYPRE_Real, P_offd_size);
/*-----------------------------------------------------------------------
* Intialize some stuff.
*-----------------------------------------------------------------------*/
jj_counter = start_indexing;
jj_counter_offd = start_indexing;
if (debug_flag==4)
{
wall_time = time_getWallclockSeconds() - wall_time;
hypre_printf("Proc = %d Interp: Internal work 1 = %f\n",
my_id, wall_time);
fflush(NULL);
}
/*-----------------------------------------------------------------------
* Send and receive fine_to_coarse info.
*-----------------------------------------------------------------------*/
if (debug_flag==4) wall_time = time_getWallclockSeconds();
fine_to_coarse_offd = hypre_CTAlloc(HYPRE_Int, num_cols_A_offd);
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel for private(i,j,ns,ne,size,rest,coarse_shift) HYPRE_SMP_SCHEDULE
#endif
for (j = 0; j < num_threads; j++)
{
coarse_shift = 0;
if (j > 0) coarse_shift = coarse_counter[j-1];
size = n_fine/num_threads;
rest = n_fine - size*num_threads;
if (j < rest)
{
ns = j*size+j;
ne = (j+1)*size+j+1;
}
else
{
ns = j*size+rest;
ne = (j+1)*size+rest;
}
for (i = ns; i < ne; i++)
fine_to_coarse[i] += my_first_cpt+coarse_shift;
}
index = 0;
for (i = 0; i < num_sends; i++)
{
start = hypre_ParCSRCommPkgSendMapStart(comm_pkg, i);
for (j = start; j < hypre_ParCSRCommPkgSendMapStart(comm_pkg, i+1); j++)
int_buf_data[index++]
= fine_to_coarse[hypre_ParCSRCommPkgSendMapElmt(comm_pkg,j)];
}
comm_handle = hypre_ParCSRCommHandleCreate( 11, comm_pkg, int_buf_data,
fine_to_coarse_offd);
hypre_ParCSRCommHandleDestroy(comm_handle);
if (debug_flag==4)
{
wall_time = time_getWallclockSeconds() - wall_time;
hypre_printf("Proc = %d Interp: Comm 4 FineToCoarse = %f\n",
my_id, wall_time);
fflush(NULL);
}
if (debug_flag==4) wall_time = time_getWallclockSeconds();
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE
#endif
for (i = 0; i < n_fine; i++) fine_to_coarse[i] -= my_first_cpt;
/*-----------------------------------------------------------------------
* Loop over fine grid points.
*-----------------------------------------------------------------------*/
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel for private(i,j,jl,i1,jj,ns,ne,size,rest,diagonal,P_marker,P_marker_offd,jj_counter,jj_counter_offd,jj_begin_row,jj_end_row,jj_begin_row_offd,jj_end_row_offd) HYPRE_SMP_SCHEDULE
#endif
for (jl = 0; jl < num_threads; jl++)
{
size = n_fine/num_threads;
rest = n_fine - size*num_threads;
if (jl < rest)
{
ns = jl*size+jl;
ne = (jl+1)*size+jl+1;
}
else
{
ns = jl*size+rest;
ne = (jl+1)*size+rest;
}
jj_counter = 0;
if (jl > 0) jj_counter = jj_count[jl-1];
jj_counter_offd = 0;
if (jl > 0) jj_counter_offd = jj_count_offd[jl-1];
P_marker = hypre_CTAlloc(HYPRE_Int, n_fine);
if (num_cols_A_offd)
P_marker_offd = hypre_CTAlloc(HYPRE_Int, num_cols_A_offd);
else
P_marker_offd = NULL;
for (i = 0; i < n_fine; i++)
{
P_marker[i] = -1;
}
for (i = 0; i < num_cols_A_offd; i++)
{
P_marker_offd[i] = -1;
}
for (i = ns; i < ne; i++)
{
/*--------------------------------------------------------------------
* If i is a c-point, interpolation is the identity.
*--------------------------------------------------------------------*/
if (CF_marker[i] >= 0)
{
P_diag_i[i] = jj_counter;
P_diag_j[jj_counter] = fine_to_coarse[i];
P_diag_data[jj_counter] = one;
jj_counter++;
}
/*--------------------------------------------------------------------
* If i is an F-point, build interpolation.
*--------------------------------------------------------------------*/
else
{
/* Diagonal part of P */
P_diag_i[i] = jj_counter;
jj_begin_row = jj_counter;
for (jj = S_diag_i[i]; jj < S_diag_i[i+1]; jj++)
{
i1 = S_diag_j[jj];
/*--------------------------------------------------------------
* If neighbor i1 is a C-point, set column number in P_diag_j
* and initialize interpolation weight to zero.
*--------------------------------------------------------------*/
if (CF_marker[i1] >= 0)
{
P_marker[i1] = jj_counter;
P_diag_j[jj_counter] = fine_to_coarse[i1];
P_diag_data[jj_counter] = zero;
jj_counter++;
}
}
jj_end_row = jj_counter;
/* Off-Diagonal part of P */
P_offd_i[i] = jj_counter_offd;
jj_begin_row_offd = jj_counter_offd;
if (num_procs > 1)
{
if (col_offd_S_to_A)
{
for (jj = S_offd_i[i]; jj < S_offd_i[i+1]; jj++)
{
i1 = col_offd_S_to_A[S_offd_j[jj]];
/*-----------------------------------------------------------
* If neighbor i1 is a C-point, set column number in P_offd_j
* and initialize interpolation weight to zero.
*-----------------------------------------------------------*/
if (CF_marker_offd[i1] >= 0)
{
P_marker_offd[i1] = jj_counter_offd;
P_offd_j[jj_counter_offd] = i1;
P_offd_data[jj_counter_offd] = zero;
jj_counter_offd++;
}
}
}
else
{
for (jj = S_offd_i[i]; jj < S_offd_i[i+1]; jj++)
{
i1 = S_offd_j[jj];
/*-----------------------------------------------------------
* If neighbor i1 is a C-point, set column number in P_offd_j
* and initialize interpolation weight to zero.
*-----------------------------------------------------------*/
if (CF_marker_offd[i1] >= 0)
{
P_marker_offd[i1] = jj_counter_offd;
P_offd_j[jj_counter_offd] = i1;
P_offd_data[jj_counter_offd] = zero;
jj_counter_offd++;
}
}
}
}
jj_end_row_offd = jj_counter_offd;
diagonal = A_diag_data[A_diag_i[i]];
/* Loop over ith row of A. First, the diagonal part of A */
sum_N_pos = 0;
sum_N_neg = 0;
sum_P_pos = 0;
sum_P_neg = 0;
for (jj = A_diag_i[i]+1; jj < A_diag_i[i+1]; jj++)
{
i1 = A_diag_j[jj];
if (num_functions == 1 || dof_func[i1] == dof_func[i])
{
if (A_diag_data[jj] > 0)
sum_N_pos += A_diag_data[jj];
else
sum_N_neg += A_diag_data[jj];
}
/*--------------------------------------------------------------
* Case 1: neighbor i1 is a C-point and strongly influences i,
* accumulate a_{i,i1} into the interpolation weight.
*--------------------------------------------------------------*/
if (P_marker[i1] >= jj_begin_row)
{
P_diag_data[P_marker[i1]] += A_diag_data[jj];
if (A_diag_data[jj] > 0)
sum_P_pos += A_diag_data[jj];
else
sum_P_neg += A_diag_data[jj];
}
}
/*----------------------------------------------------------------
* Still looping over ith row of A. Next, loop over the
* off-diagonal part of A
*---------------------------------------------------------------*/
if (num_procs > 1)
{
for (jj = A_offd_i[i]; jj < A_offd_i[i+1]; jj++)
{
i1 = A_offd_j[jj];
if (num_functions == 1 || dof_func_offd[i1] == dof_func[i])
{
if (A_offd_data[jj] > 0)
sum_N_pos += A_offd_data[jj];
else
sum_N_neg += A_offd_data[jj];
}
/*--------------------------------------------------------------
* Case 1: neighbor i1 is a C-point and strongly influences i,
* accumulate a_{i,i1} into the interpolation weight.
*--------------------------------------------------------------*/
if (P_marker_offd[i1] >= jj_begin_row_offd)
{
P_offd_data[P_marker_offd[i1]] += A_offd_data[jj];
if (A_offd_data[jj] > 0)
sum_P_pos += A_offd_data[jj];
else
sum_P_neg += A_offd_data[jj];
}
}
}
if (sum_P_neg) alfa = sum_N_neg/sum_P_neg/diagonal;
if (sum_P_pos) beta = sum_N_pos/sum_P_pos/diagonal;
/*-----------------------------------------------------------------
* Set interpolation weight by dividing by the diagonal.
*-----------------------------------------------------------------*/
for (jj = jj_begin_row; jj < jj_end_row; jj++)
{
if (P_diag_data[jj]> 0)
P_diag_data[jj] *= -beta;
else
P_diag_data[jj] *= -alfa;
}
for (jj = jj_begin_row_offd; jj < jj_end_row_offd; jj++)
{
if (P_offd_data[jj]> 0)
P_offd_data[jj] *= -beta;
else
P_offd_data[jj] *= -alfa;
}
}
P_offd_i[i+1] = jj_counter_offd;
}
hypre_TFree(P_marker);
hypre_TFree(P_marker_offd);
}
P = hypre_ParCSRMatrixCreate(comm,
hypre_ParCSRMatrixGlobalNumRows(A),
total_global_cpts,
hypre_ParCSRMatrixColStarts(A),
num_cpts_global,
0,
P_diag_i[n_fine],
P_offd_i[n_fine]);
P_diag = hypre_ParCSRMatrixDiag(P);
hypre_CSRMatrixData(P_diag) = P_diag_data;
hypre_CSRMatrixI(P_diag) = P_diag_i;
hypre_CSRMatrixJ(P_diag) = P_diag_j;
P_offd = hypre_ParCSRMatrixOffd(P);
hypre_CSRMatrixData(P_offd) = P_offd_data;
hypre_CSRMatrixI(P_offd) = P_offd_i;
hypre_CSRMatrixJ(P_offd) = P_offd_j;
hypre_ParCSRMatrixOwnsRowStarts(P) = 0;
/* Compress P, removing coefficients smaller than trunc_factor * Max */
if (trunc_factor != 0.0 || max_elmts > 0)
{
hypre_BoomerAMGInterpTruncation(P, trunc_factor, max_elmts);
P_diag_data = hypre_CSRMatrixData(P_diag);
P_diag_i = hypre_CSRMatrixI(P_diag);
P_diag_j = hypre_CSRMatrixJ(P_diag);
P_offd_data = hypre_CSRMatrixData(P_offd);
P_offd_i = hypre_CSRMatrixI(P_offd);
P_offd_j = hypre_CSRMatrixJ(P_offd);
P_diag_size = P_diag_i[n_fine];
P_offd_size = P_offd_i[n_fine];
}
num_cols_P_offd = 0;
if (P_offd_size)
{
P_marker = hypre_CTAlloc(HYPRE_Int, num_cols_A_offd);
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE
#endif
for (i=0; i < num_cols_A_offd; i++)
P_marker[i] = 0;
num_cols_P_offd = 0;
for (i=0; i < P_offd_size; i++)
{
index = P_offd_j[i];
if (!P_marker[index])
{
num_cols_P_offd++;
P_marker[index] = 1;
}
}
col_map_offd_P = hypre_CTAlloc(HYPRE_Int,num_cols_P_offd);
index = 0;
for (i=0; i < num_cols_P_offd; i++)
{
while (P_marker[index]==0) index++;
col_map_offd_P[i] = index++;
}
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE
#endif
for (i=0; i < P_offd_size; i++)
P_offd_j[i] = hypre_BinarySearch(col_map_offd_P,
P_offd_j[i],
num_cols_P_offd);
hypre_TFree(P_marker);
}
for (i=0; i < n_fine; i++)
if (CF_marker[i] == -3) CF_marker[i] = -1;
if (num_cols_P_offd)
{
hypre_ParCSRMatrixColMapOffd(P) = col_map_offd_P;
hypre_CSRMatrixNumCols(P_offd) = num_cols_P_offd;
}
hypre_GetCommPkgRTFromCommPkgA(P,A,fine_to_coarse_offd);
*P_ptr = P;
hypre_TFree(CF_marker_offd);
hypre_TFree(dof_func_offd);
hypre_TFree(int_buf_data);
hypre_TFree(fine_to_coarse);
hypre_TFree(fine_to_coarse_offd);
hypre_TFree(coarse_counter);
hypre_TFree(jj_count);
hypre_TFree(jj_count_offd);
return(0);
}
HYPRE_Int
hypre_BoomerAMGInterpTruncation( hypre_ParCSRMatrix *P,
HYPRE_Real trunc_factor,
HYPRE_Int max_elmts)
{
#ifdef HYPRE_PROFILE
hypre_profile_times[HYPRE_TIMER_ID_INTERP_TRUNC] -= hypre_MPI_Wtime();
#endif
hypre_CSRMatrix *P_diag = hypre_ParCSRMatrixDiag(P);
HYPRE_Int *P_diag_i = hypre_CSRMatrixI(P_diag);
HYPRE_Int *P_diag_j = hypre_CSRMatrixJ(P_diag);
HYPRE_Real *P_diag_data = hypre_CSRMatrixData(P_diag);
HYPRE_Int *P_diag_j_new;
HYPRE_Real *P_diag_data_new;
hypre_CSRMatrix *P_offd = hypre_ParCSRMatrixOffd(P);
HYPRE_Int *P_offd_i = hypre_CSRMatrixI(P_offd);
HYPRE_Int *P_offd_j = hypre_CSRMatrixJ(P_offd);
HYPRE_Real *P_offd_data = hypre_CSRMatrixData(P_offd);
HYPRE_Int *P_offd_j_new;
HYPRE_Real *P_offd_data_new;
HYPRE_Int n_fine = hypre_CSRMatrixNumRows(P_diag);
HYPRE_Int num_cols = hypre_CSRMatrixNumCols(P_diag);
HYPRE_Int i, j, start_j;
HYPRE_Int ierr = 0;
HYPRE_Int next_open;
HYPRE_Int now_checking;
HYPRE_Int num_lost;
HYPRE_Int num_lost_global=0;
HYPRE_Int next_open_offd;
HYPRE_Int now_checking_offd;
HYPRE_Int num_lost_offd;
HYPRE_Int num_lost_global_offd;
HYPRE_Int P_diag_size;
HYPRE_Int P_offd_size;
HYPRE_Int num_elmts;
HYPRE_Int cnt, cnt_diag, cnt_offd;
HYPRE_Real max_coef;
HYPRE_Real row_sum;
HYPRE_Real scale;
/* Threading variables. Entry i of num_lost_(offd_)per_thread holds the
* number of dropped entries over thread i's row range. Cum_lost_per_thread
* will temporarily store the cumulative number of dropped entries up to
* each thread. */
HYPRE_Int my_thread_num, num_threads, start, stop;
HYPRE_Int * max_num_threads = hypre_CTAlloc(HYPRE_Int, 1);
HYPRE_Int * cum_lost_per_thread;
HYPRE_Int * num_lost_per_thread;
HYPRE_Int * num_lost_offd_per_thread;
/* Initialize threading variables */
max_num_threads[0] = hypre_NumThreads();
cum_lost_per_thread = hypre_CTAlloc(HYPRE_Int, max_num_threads[0]);
num_lost_per_thread = hypre_CTAlloc(HYPRE_Int, max_num_threads[0]);
num_lost_offd_per_thread = hypre_CTAlloc(HYPRE_Int, max_num_threads[0]);
for(i=0; i < max_num_threads[0]; i++)
{
num_lost_per_thread[i] = 0;
num_lost_offd_per_thread[i] = 0;
}
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel private(i,my_thread_num,num_threads,max_coef,j,start_j,row_sum,scale,num_lost,now_checking,next_open,num_lost_offd,now_checking_offd,next_open_offd,start,stop,cnt_diag,cnt_offd,num_elmts,cnt)
#endif
{
my_thread_num = hypre_GetThreadNum();
num_threads = hypre_NumActiveThreads();
/* Compute each thread's range of rows to truncate and compress. Note,
* that i, j and data are all compressed as entries are dropped, but
* that the compression only occurs locally over each thread's row
* range. P_diag_i is only made globally consistent at the end of this
* routine. During the dropping phases, P_diag_i[stop] will point to
* the start of the next thread's row range. */
/* my row range */
start = (n_fine/num_threads)*my_thread_num;
if (my_thread_num == num_threads-1)
{ stop = n_fine; }
else
{ stop = (n_fine/num_threads)*(my_thread_num+1); }
/*
* Truncate based on truncation tolerance
*/
if (trunc_factor > 0)
{
num_lost = 0;
num_lost_offd = 0;
next_open = P_diag_i[start];
now_checking = P_diag_i[start];
next_open_offd = P_offd_i[start];;
now_checking_offd = P_offd_i[start];;
for (i = start; i < stop; i++)
{
max_coef = 0;
for (j = P_diag_i[i]; j < P_diag_i[i+1]; j++)
max_coef = (max_coef < fabs(P_diag_data[j])) ?
fabs(P_diag_data[j]) : max_coef;
for (j = P_offd_i[i]; j < P_offd_i[i+1]; j++)
max_coef = (max_coef < fabs(P_offd_data[j])) ?
fabs(P_offd_data[j]) : max_coef;
max_coef *= trunc_factor;
start_j = P_diag_i[i];
if (num_lost) P_diag_i[i] -= num_lost;
row_sum = 0;
scale = 0;
for (j = start_j; j < P_diag_i[i+1]; j++)
{
row_sum += P_diag_data[now_checking];
if (fabs(P_diag_data[now_checking]) < max_coef)
{
num_lost++;
now_checking++;
}
else
{
scale += P_diag_data[now_checking];
P_diag_data[next_open] = P_diag_data[now_checking];
P_diag_j[next_open] = P_diag_j[now_checking];
now_checking++;
next_open++;
}
}
start_j = P_offd_i[i];
if (num_lost_offd) P_offd_i[i] -= num_lost_offd;
for (j = start_j; j < P_offd_i[i+1]; j++)
{
row_sum += P_offd_data[now_checking_offd];
if (fabs(P_offd_data[now_checking_offd]) < max_coef)
{
num_lost_offd++;
now_checking_offd++;
}
else
{
scale += P_offd_data[now_checking_offd];
P_offd_data[next_open_offd] = P_offd_data[now_checking_offd];
P_offd_j[next_open_offd] = P_offd_j[now_checking_offd];
now_checking_offd++;
next_open_offd++;
}
}
/* normalize row of P */
if (scale != 0.)
{
if (scale != row_sum)
{
scale = row_sum/scale;
for (j = P_diag_i[i]; j < (P_diag_i[i+1]-num_lost); j++)
P_diag_data[j] *= scale;
for (j = P_offd_i[i]; j < (P_offd_i[i+1]-num_lost_offd); j++)
P_offd_data[j] *= scale;
}
}
} /* end loop for (i = 0; i < n_fine; i++) */
/* store number of dropped elements and number of threads */
if(my_thread_num == 0)
{ max_num_threads[0] = num_threads; }
num_lost_per_thread[my_thread_num] = num_lost;
num_lost_offd_per_thread[my_thread_num] = num_lost_offd;
} /* end if (trunc_factor > 0) */
/*
* Truncate based on capping the nnz per row
*
*/
if (max_elmts > 0)
{
HYPRE_Int P_mxnum, cnt1, last_index, last_index_offd;
HYPRE_Int *P_aux_j;
HYPRE_Real *P_aux_data;
/* find maximum row length locally over this row range */
P_mxnum = 0;
for (i=start; i<stop; i++)
{
/* Note P_diag_i[stop] is the starting point for the next thread
* in j and data, not the stop point for this thread */
last_index = P_diag_i[i+1];
last_index_offd = P_offd_i[i+1];
if(i == stop-1)
{
last_index -= num_lost_per_thread[my_thread_num];
last_index_offd -= num_lost_offd_per_thread[my_thread_num];
}
cnt1 = last_index-P_diag_i[i] + last_index_offd-P_offd_i[i];
if (cnt1 > P_mxnum) P_mxnum = cnt1;
}
/* Some rows exceed max_elmts, and require truncation. Essentially,
* each thread truncates and compresses its range of rows locally. */
if (P_mxnum > max_elmts)
{
num_lost = 0;
num_lost_offd = 0;
/* two temporary arrays to hold row i for temporary operations */
P_aux_j = hypre_CTAlloc(HYPRE_Int, P_mxnum);
P_aux_data = hypre_CTAlloc(HYPRE_Real, P_mxnum);
cnt_diag = P_diag_i[start];
cnt_offd = P_offd_i[start];
for (i = start; i < stop; i++)
{
/* Note P_diag_i[stop] is the starting point for the next thread
* in j and data, not the stop point for this thread */
last_index = P_diag_i[i+1];
last_index_offd = P_offd_i[i+1];
if(i == stop-1)
{
last_index -= num_lost_per_thread[my_thread_num];
last_index_offd -= num_lost_offd_per_thread[my_thread_num];
}
row_sum = 0;
num_elmts = last_index-P_diag_i[i] + last_index_offd-P_offd_i[i];
if (max_elmts < num_elmts)
{
/* copy both diagonal and off-diag parts of row i to _aux_ arrays */
cnt = 0;
for (j = P_diag_i[i]; j < last_index; j++)
{
P_aux_j[cnt] = P_diag_j[j];
P_aux_data[cnt++] = P_diag_data[j];
row_sum += P_diag_data[j];
}
num_lost += cnt;
cnt1 = cnt;
for (j = P_offd_i[i]; j < last_index_offd; j++)
{
P_aux_j[cnt] = P_offd_j[j]+num_cols;
P_aux_data[cnt++] = P_offd_data[j];
row_sum += P_offd_data[j];
}
num_lost_offd += cnt-cnt1;
/* sort data */
hypre_qsort2abs(P_aux_j,P_aux_data,0,cnt-1);
scale = 0;
if (i > start)
{
P_diag_i[i] = cnt_diag;
P_offd_i[i] = cnt_offd;
}
for (j = 0; j < max_elmts; j++)
{
scale += P_aux_data[j];
if (P_aux_j[j] < num_cols)
{
P_diag_j[cnt_diag] = P_aux_j[j];
P_diag_data[cnt_diag++] = P_aux_data[j];
}
else
{
P_offd_j[cnt_offd] = P_aux_j[j]-num_cols;
P_offd_data[cnt_offd++] = P_aux_data[j];
}
}
num_lost -= cnt_diag-P_diag_i[i];
num_lost_offd -= cnt_offd-P_offd_i[i];
/* normalize row of P */
if (scale != 0.)
{
if (scale != row_sum)
{
scale = row_sum/scale;
for (j = P_diag_i[i]; j < cnt_diag; j++)
P_diag_data[j] *= scale;
for (j = P_offd_i[i]; j < cnt_offd; j++)
P_offd_data[j] *= scale;
}
}
} /* end if (max_elmts < num_elmts) */
else
{
/* nothing dropped from this row, but still have to shift entries back
* by the number dropped so far */
if (P_diag_i[i] != cnt_diag)
{
start_j = P_diag_i[i];
P_diag_i[i] = cnt_diag;
for (j = start_j; j < last_index; j++)
{
P_diag_j[cnt_diag] = P_diag_j[j];
P_diag_data[cnt_diag++] = P_diag_data[j];
}
}
else
cnt_diag += last_index-P_diag_i[i];
if (P_offd_i[i] != cnt_offd)
{
start_j = P_offd_i[i];
P_offd_i[i] = cnt_offd;
for (j = start_j; j < last_index_offd; j++)
{
P_offd_j[cnt_offd] = P_offd_j[j];
P_offd_data[cnt_offd++] = P_offd_data[j];
}
}
else
cnt_offd += last_index_offd-P_offd_i[i];
}
} /* end for (i = 0; i < n_fine; i++) */
num_lost_per_thread[my_thread_num] += num_lost;
num_lost_offd_per_thread[my_thread_num] += num_lost_offd;
hypre_TFree(P_aux_j);
hypre_TFree(P_aux_data);
} /* end if (P_mxnum > max_elmts) */
} /* end if (max_elmts > 0) */
/* Sum up num_lost_global */
#ifdef HYPRE_USING_OPENMP
#pragma omp barrier
#endif
if(my_thread_num == 0)
{
num_lost_global = 0;
num_lost_global_offd = 0;
for(i = 0; i < max_num_threads[0]; i++)
{
num_lost_global += num_lost_per_thread[i];
num_lost_global_offd += num_lost_offd_per_thread[i];
}
}
#ifdef HYPRE_USING_OPENMP
#pragma omp barrier
#endif
/*
* Synchronize and create new diag data structures
*/
if (num_lost_global)
{
/* Each thread has it's own locally compressed CSR matrix from rows start
* to stop. Now, we have to copy each thread's chunk into the new
* process-wide CSR data structures
*
* First, we compute the new process-wide number of nonzeros (i.e.,
* P_diag_size), and compute cum_lost_per_thread[k] so that this
* entry holds the cumulative sum of entries dropped up to and
* including thread k. */
if(my_thread_num == 0)
{
P_diag_size = P_diag_i[n_fine];
for(i = 0; i < max_num_threads[0]; i++)
{
P_diag_size -= num_lost_per_thread[i];
if(i > 0)
{ cum_lost_per_thread[i] = num_lost_per_thread[i] + cum_lost_per_thread[i-1]; }
else
{ cum_lost_per_thread[i] = num_lost_per_thread[i]; }
}
P_diag_j_new = hypre_CTAlloc(HYPRE_Int,P_diag_size);
P_diag_data_new = hypre_CTAlloc(HYPRE_Real,P_diag_size);
}
#ifdef HYPRE_USING_OPENMP
#pragma omp barrier
#endif
/* points to next open spot in new data structures for this thread */
if(my_thread_num == 0)
{ next_open = 0; }
else
{
/* remember, cum_lost_per_thread[k] stores the num dropped up to and
* including thread k */
next_open = P_diag_i[start] - cum_lost_per_thread[my_thread_num-1];
}
/* copy the j and data arrays over */
for(i = P_diag_i[start]; i < P_diag_i[stop] - num_lost_per_thread[my_thread_num]; i++)
{
P_diag_j_new[next_open] = P_diag_j[i];
P_diag_data_new[next_open] = P_diag_data[i];
next_open += 1;
}
#ifdef HYPRE_USING_OPENMP
#pragma omp barrier
#endif
/* update P_diag_i with number of dropped entries by all lower ranked
* threads */
if(my_thread_num > 0)
{
for(i=start; i<stop; i++)
{
P_diag_i[i] -= cum_lost_per_thread[my_thread_num-1];
}
}
if(my_thread_num == 0)
{
/* Set last entry */
P_diag_i[n_fine] = P_diag_size ;
hypre_TFree(P_diag_j);
hypre_TFree(P_diag_data);
hypre_CSRMatrixJ(P_diag) = P_diag_j_new;
hypre_CSRMatrixData(P_diag) = P_diag_data_new;
hypre_CSRMatrixNumNonzeros(P_diag) = P_diag_size;
}
}
/*
* Synchronize and create new offd data structures
*/
#ifdef HYPRE_USING_OPENMP
#pragma omp barrier
#endif
if (num_lost_global_offd)
{
/* Repeat process for off-diagonal */
if(my_thread_num == 0)
{
P_offd_size = P_offd_i[n_fine];
for(i = 0; i < max_num_threads[0]; i++)
{
P_offd_size -= num_lost_offd_per_thread[i];
if(i > 0)
{ cum_lost_per_thread[i] = num_lost_offd_per_thread[i] + cum_lost_per_thread[i-1]; }
else
{ cum_lost_per_thread[i] = num_lost_offd_per_thread[i]; }
}
P_offd_j_new = hypre_CTAlloc(HYPRE_Int,P_offd_size);
P_offd_data_new = hypre_CTAlloc(HYPRE_Real,P_offd_size);
}
#ifdef HYPRE_USING_OPENMP
#pragma omp barrier
#endif
/* points to next open spot in new data structures for this thread */
if(my_thread_num == 0)
{ next_open = 0; }
else
{
/* remember, cum_lost_per_thread[k] stores the num dropped up to and
* including thread k */
next_open = P_offd_i[start] - cum_lost_per_thread[my_thread_num-1];
}
/* copy the j and data arrays over */
for(i = P_offd_i[start]; i < P_offd_i[stop] - num_lost_offd_per_thread[my_thread_num]; i++)
{
P_offd_j_new[next_open] = P_offd_j[i];
P_offd_data_new[next_open] = P_offd_data[i];
next_open += 1;
}
#ifdef HYPRE_USING_OPENMP
#pragma omp barrier
#endif
/* update P_offd_i with number of dropped entries by all lower ranked
* threads */
if(my_thread_num > 0)
{
for(i=start; i<stop; i++)
{
P_offd_i[i] -= cum_lost_per_thread[my_thread_num-1];
}
}
if(my_thread_num == 0)
{
/* Set last entry */
P_offd_i[n_fine] = P_offd_size ;
hypre_TFree(P_offd_j);
hypre_TFree(P_offd_data);
hypre_CSRMatrixJ(P_offd) = P_offd_j_new;
hypre_CSRMatrixData(P_offd) = P_offd_data_new;
hypre_CSRMatrixNumNonzeros(P_offd) = P_offd_size;
}
}
} /* end parallel region */
hypre_TFree(max_num_threads);
hypre_TFree(cum_lost_per_thread);
hypre_TFree(num_lost_per_thread);
hypre_TFree(num_lost_offd_per_thread);
#ifdef HYPRE_PROFILE
hypre_profile_times[HYPRE_TIMER_ID_INTERP_TRUNC] += hypre_MPI_Wtime();
#endif
return ierr;
}
/* sort both v and w, in place, but based only on entries in w */
void hypre_qsort2abs( HYPRE_Int *v,
HYPRE_Real *w,
HYPRE_Int left,
HYPRE_Int right )
{
HYPRE_Int i, last;
if (left >= right)
return;
hypre_swap2( v, w, left, (left+right)/2);
last = left;
for (i = left+1; i <= right; i++)
if (fabs(w[i]) > fabs(w[left]))
{
hypre_swap2(v, w, ++last, i);
}
hypre_swap2(v, w, left, last);
hypre_qsort2abs(v, w, left, last-1);
hypre_qsort2abs(v, w, last+1, right);
}
/*---------------------------------------------------------------------------
* hypre_BoomerAMGBuildInterpModUnk - this is a modified interpolation for the unknown approach.
* here we need to pass in a strength matrix built on the entire matrix.
*
*--------------------------------------------------------------------------*/
HYPRE_Int
hypre_BoomerAMGBuildInterpModUnk( hypre_ParCSRMatrix *A,
HYPRE_Int *CF_marker,
hypre_ParCSRMatrix *S,
HYPRE_Int *num_cpts_global,
HYPRE_Int num_functions,
HYPRE_Int *dof_func,
HYPRE_Int debug_flag,
HYPRE_Real trunc_factor,
HYPRE_Int max_elmts,
HYPRE_Int *col_offd_S_to_A,
hypre_ParCSRMatrix **P_ptr)
{
MPI_Comm comm = hypre_ParCSRMatrixComm(A);
hypre_ParCSRCommPkg *comm_pkg = hypre_ParCSRMatrixCommPkg(A);
hypre_ParCSRCommHandle *comm_handle;
hypre_CSRMatrix *A_diag = hypre_ParCSRMatrixDiag(A);
HYPRE_Real *A_diag_data = hypre_CSRMatrixData(A_diag);
HYPRE_Int *A_diag_i = hypre_CSRMatrixI(A_diag);
HYPRE_Int *A_diag_j = hypre_CSRMatrixJ(A_diag);
hypre_CSRMatrix *A_offd = hypre_ParCSRMatrixOffd(A);
HYPRE_Real *A_offd_data = hypre_CSRMatrixData(A_offd);
HYPRE_Int *A_offd_i = hypre_CSRMatrixI(A_offd);
HYPRE_Int *A_offd_j = hypre_CSRMatrixJ(A_offd);
HYPRE_Int num_cols_A_offd = hypre_CSRMatrixNumCols(A_offd);
HYPRE_Int *col_map_offd = hypre_ParCSRMatrixColMapOffd(A);
hypre_CSRMatrix *S_diag = hypre_ParCSRMatrixDiag(S);
HYPRE_Int *S_diag_i = hypre_CSRMatrixI(S_diag);
HYPRE_Int *S_diag_j = hypre_CSRMatrixJ(S_diag);
hypre_CSRMatrix *S_offd = hypre_ParCSRMatrixOffd(S);
HYPRE_Int *S_offd_i = hypre_CSRMatrixI(S_offd);
HYPRE_Int *S_offd_j = hypre_CSRMatrixJ(S_offd);
hypre_ParCSRMatrix *P;
HYPRE_Int *col_map_offd_P;
HYPRE_Int *CF_marker_offd = NULL;
HYPRE_Int *dof_func_offd = NULL;
hypre_CSRMatrix *A_ext;
HYPRE_Real *A_ext_data = NULL;
HYPRE_Int *A_ext_i = NULL;
HYPRE_Int *A_ext_j = NULL;
hypre_CSRMatrix *P_diag;
hypre_CSRMatrix *P_offd;
HYPRE_Real *P_diag_data;
HYPRE_Int *P_diag_i;
HYPRE_Int *P_diag_j;
HYPRE_Real *P_offd_data;
HYPRE_Int *P_offd_i;
HYPRE_Int *P_offd_j;
HYPRE_Int P_diag_size, P_offd_size;
HYPRE_Int *P_marker, *P_marker_offd;
HYPRE_Int jj_counter,jj_counter_offd;
HYPRE_Int *jj_count, *jj_count_offd;
HYPRE_Int jj_begin_row,jj_begin_row_offd;
HYPRE_Int jj_end_row,jj_end_row_offd;
HYPRE_Int start_indexing = 0; /* start indexing for P_data at 0 */
HYPRE_Int n_fine = hypre_CSRMatrixNumRows(A_diag);
HYPRE_Int strong_f_marker;
HYPRE_Int *fine_to_coarse;
HYPRE_Int *fine_to_coarse_offd;
HYPRE_Int *coarse_counter;
HYPRE_Int coarse_shift;
HYPRE_Int total_global_cpts;
HYPRE_Int num_cols_P_offd,my_first_cpt;
HYPRE_Int i,i1,i2;
HYPRE_Int j,jl,jj,jj1;
HYPRE_Int k,kc;
HYPRE_Int start;
HYPRE_Int sgn;
HYPRE_Int c_num;
HYPRE_Real diagonal;
HYPRE_Real sum;
HYPRE_Real distribute;
HYPRE_Real zero = 0.0;
HYPRE_Real one = 1.0;
HYPRE_Int my_id;
HYPRE_Int num_procs;
HYPRE_Int num_threads;
HYPRE_Int num_sends;
HYPRE_Int index;
HYPRE_Int ns, ne, size, rest;
HYPRE_Int print_level = 0;
HYPRE_Int *int_buf_data;
HYPRE_Int col_1 = hypre_ParCSRMatrixFirstRowIndex(A);
HYPRE_Int local_numrows = hypre_CSRMatrixNumRows(A_diag);
HYPRE_Int col_n = col_1 + local_numrows;
HYPRE_Real wall_time; /* for debugging instrumentation */
hypre_MPI_Comm_size(comm, &num_procs);
hypre_MPI_Comm_rank(comm,&my_id);
num_threads = hypre_NumThreads();
#ifdef HYPRE_NO_GLOBAL_PARTITION
my_first_cpt = num_cpts_global[0];
if (my_id == (num_procs -1)) total_global_cpts = num_cpts_global[1];
hypre_MPI_Bcast(&total_global_cpts, 1, HYPRE_MPI_INT, num_procs-1, comm);
#else
my_first_cpt = num_cpts_global[my_id];
total_global_cpts = num_cpts_global[num_procs];
#endif
/*-------------------------------------------------------------------
* Get the CF_marker data for the off-processor columns
*-------------------------------------------------------------------*/
if (debug_flag < 0)
{
debug_flag = -debug_flag;
print_level = 1;
}
if (debug_flag==4) wall_time = time_getWallclockSeconds();
if (num_cols_A_offd) CF_marker_offd = hypre_CTAlloc(HYPRE_Int, num_cols_A_offd);
if (num_functions > 1 && num_cols_A_offd)
dof_func_offd = hypre_CTAlloc(HYPRE_Int, num_cols_A_offd);
if (!comm_pkg)
{
hypre_MatvecCommPkgCreate(A);
comm_pkg = hypre_ParCSRMatrixCommPkg(A);
}
num_sends = hypre_ParCSRCommPkgNumSends(comm_pkg);
int_buf_data = hypre_CTAlloc(HYPRE_Int, hypre_ParCSRCommPkgSendMapStart(comm_pkg,
num_sends));
index = 0;
for (i = 0; i < num_sends; i++)
{
start = hypre_ParCSRCommPkgSendMapStart(comm_pkg, i);
for (j = start; j < hypre_ParCSRCommPkgSendMapStart(comm_pkg, i+1); j++)
int_buf_data[index++]
= CF_marker[hypre_ParCSRCommPkgSendMapElmt(comm_pkg,j)];
}
comm_handle = hypre_ParCSRCommHandleCreate( 11, comm_pkg, int_buf_data,
CF_marker_offd);
hypre_ParCSRCommHandleDestroy(comm_handle);
if (num_functions > 1)
{
index = 0;
for (i = 0; i < num_sends; i++)
{
start = hypre_ParCSRCommPkgSendMapStart(comm_pkg, i);
for (j=start; j < hypre_ParCSRCommPkgSendMapStart(comm_pkg, i+1); j++)
int_buf_data[index++]
= dof_func[hypre_ParCSRCommPkgSendMapElmt(comm_pkg,j)];
}
comm_handle = hypre_ParCSRCommHandleCreate( 11, comm_pkg, int_buf_data,
dof_func_offd);
hypre_ParCSRCommHandleDestroy(comm_handle);
}
if (debug_flag==4)
{
wall_time = time_getWallclockSeconds() - wall_time;
hypre_printf("Proc = %d Interp: Comm 1 CF_marker = %f\n",
my_id, wall_time);
fflush(NULL);
}
/*----------------------------------------------------------------------
* Get the ghost rows of A
*---------------------------------------------------------------------*/
if (debug_flag==4) wall_time = time_getWallclockSeconds();
if (num_procs > 1)
{
A_ext = hypre_ParCSRMatrixExtractBExt(A,A,1);
A_ext_i = hypre_CSRMatrixI(A_ext);
A_ext_j = hypre_CSRMatrixJ(A_ext);
A_ext_data = hypre_CSRMatrixData(A_ext);
}
index = 0;
for (i=0; i < num_cols_A_offd; i++)
{
for (j=A_ext_i[i]; j < A_ext_i[i+1]; j++)
{
k = A_ext_j[j];
if (k >= col_1 && k < col_n)
{
A_ext_j[index] = k - col_1;
A_ext_data[index++] = A_ext_data[j];
}
else
{
kc = hypre_BinarySearch(col_map_offd,k,num_cols_A_offd);
if (kc > -1)
{
A_ext_j[index] = -kc-1;
A_ext_data[index++] = A_ext_data[j];
}
}
}
A_ext_i[i] = index;
}
for (i = num_cols_A_offd; i > 0; i--)
A_ext_i[i] = A_ext_i[i-1];
if (num_procs > 1) A_ext_i[0] = 0;
if (debug_flag==4)
{
wall_time = time_getWallclockSeconds() - wall_time;
hypre_printf("Proc = %d Interp: Comm 2 Get A_ext = %f\n",
my_id, wall_time);
fflush(NULL);
}
/*-----------------------------------------------------------------------
* First Pass: Determine size of P and fill in fine_to_coarse mapping.
*-----------------------------------------------------------------------*/
/*-----------------------------------------------------------------------
* Intialize counters and allocate mapping vector.
*-----------------------------------------------------------------------*/
coarse_counter = hypre_CTAlloc(HYPRE_Int, num_threads);
jj_count = hypre_CTAlloc(HYPRE_Int, num_threads);
jj_count_offd = hypre_CTAlloc(HYPRE_Int, num_threads);
fine_to_coarse = hypre_CTAlloc(HYPRE_Int, n_fine);
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE
#endif
for (i = 0; i < n_fine; i++) fine_to_coarse[i] = -1;
jj_counter = start_indexing;
jj_counter_offd = start_indexing;
/*-----------------------------------------------------------------------
* Loop over fine grid.
*-----------------------------------------------------------------------*/
/* RDF: this looks a little tricky, but doable */
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel for private(i,j,i1,jj,ns,ne,size,rest) HYPRE_SMP_SCHEDULE
#endif
for (j = 0; j < num_threads; j++)
{
size = n_fine/num_threads;
rest = n_fine - size*num_threads;
if (j < rest)
{
ns = j*size+j;
ne = (j+1)*size+j+1;
}
else
{
ns = j*size+rest;
ne = (j+1)*size+rest;
}
for (i = ns; i < ne; i++)
{
/*--------------------------------------------------------------------
* If i is a C-point, interpolation is the identity. Also set up
* mapping vector.
*--------------------------------------------------------------------*/
if (CF_marker[i] >= 0)
{
jj_count[j]++;
fine_to_coarse[i] = coarse_counter[j];
coarse_counter[j]++;
}
/*--------------------------------------------------------------------
* If i is an F-point, interpolation is from the C-points that
* strongly influence i.
*--------------------------------------------------------------------*/
else
{
for (jj = S_diag_i[i]; jj < S_diag_i[i+1]; jj++)
{
i1 = S_diag_j[jj];
if (CF_marker[i1] >= 0)
{
jj_count[j]++;
}
}
if (num_procs > 1)
{
if (col_offd_S_to_A)
{
for (jj = S_offd_i[i]; jj < S_offd_i[i+1]; jj++)
{
i1 = col_offd_S_to_A[S_offd_j[jj]];
if (CF_marker_offd[i1] >= 0)
{
jj_count_offd[j]++;
}
}
}
else
{
for (jj = S_offd_i[i]; jj < S_offd_i[i+1]; jj++)
{
i1 = S_offd_j[jj];
if (CF_marker_offd[i1] >= 0)
{
jj_count_offd[j]++;
}
}
}
}
}
}
}
/*-----------------------------------------------------------------------
* Allocate arrays.
*-----------------------------------------------------------------------*/
for (i=0; i < num_threads-1; i++)
{
coarse_counter[i+1] += coarse_counter[i];
jj_count[i+1] += jj_count[i];
jj_count_offd[i+1] += jj_count_offd[i];
}
i = num_threads-1;
jj_counter = jj_count[i];
jj_counter_offd = jj_count_offd[i];
P_diag_size = jj_counter;
P_diag_i = hypre_CTAlloc(HYPRE_Int, n_fine+1);
P_diag_j = hypre_CTAlloc(HYPRE_Int, P_diag_size);
P_diag_data = hypre_CTAlloc(HYPRE_Real, P_diag_size);
P_diag_i[n_fine] = jj_counter;
P_offd_size = jj_counter_offd;
P_offd_i = hypre_CTAlloc(HYPRE_Int, n_fine+1);
P_offd_j = hypre_CTAlloc(HYPRE_Int, P_offd_size);
P_offd_data = hypre_CTAlloc(HYPRE_Real, P_offd_size);
/*-----------------------------------------------------------------------
* Intialize some stuff.
*-----------------------------------------------------------------------*/
jj_counter = start_indexing;
jj_counter_offd = start_indexing;
if (debug_flag==4)
{
wall_time = time_getWallclockSeconds() - wall_time;
hypre_printf("Proc = %d Interp: Internal work 1 = %f\n",
my_id, wall_time);
fflush(NULL);
}
/*-----------------------------------------------------------------------
* Send and receive fine_to_coarse info.
*-----------------------------------------------------------------------*/
if (debug_flag==4) wall_time = time_getWallclockSeconds();
fine_to_coarse_offd = hypre_CTAlloc(HYPRE_Int, num_cols_A_offd);
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel for private(i,j,ns,ne,size,rest,coarse_shift) HYPRE_SMP_SCHEDULE
#endif
for (j = 0; j < num_threads; j++)
{
coarse_shift = 0;
if (j > 0) coarse_shift = coarse_counter[j-1];
size = n_fine/num_threads;
rest = n_fine - size*num_threads;
if (j < rest)
{
ns = j*size+j;
ne = (j+1)*size+j+1;
}
else
{
ns = j*size+rest;
ne = (j+1)*size+rest;
}
for (i = ns; i < ne; i++)
fine_to_coarse[i] += my_first_cpt+coarse_shift;
}
index = 0;
for (i = 0; i < num_sends; i++)
{
start = hypre_ParCSRCommPkgSendMapStart(comm_pkg, i);
for (j = start; j < hypre_ParCSRCommPkgSendMapStart(comm_pkg, i+1); j++)
int_buf_data[index++]
= fine_to_coarse[hypre_ParCSRCommPkgSendMapElmt(comm_pkg,j)];
}
comm_handle = hypre_ParCSRCommHandleCreate( 11, comm_pkg, int_buf_data,
fine_to_coarse_offd);
hypre_ParCSRCommHandleDestroy(comm_handle);
if (debug_flag==4)
{
wall_time = time_getWallclockSeconds() - wall_time;
hypre_printf("Proc = %d Interp: Comm 4 FineToCoarse = %f\n",
my_id, wall_time);
fflush(NULL);
}
if (debug_flag==4) wall_time = time_getWallclockSeconds();
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE
#endif
for (i = 0; i < n_fine; i++) fine_to_coarse[i] -= my_first_cpt;
/*-----------------------------------------------------------------------
* Loop over fine grid points.
*-----------------------------------------------------------------------*/
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel for private(i,j,jl,i1,i2,jj,jj1,ns,ne,size,rest,sum,diagonal,distribute,P_marker,P_marker_offd,strong_f_marker,jj_counter,jj_counter_offd,sgn,c_num,jj_begin_row,jj_end_row,jj_begin_row_offd,jj_end_row_offd) HYPRE_SMP_SCHEDULE
#endif
for (jl = 0; jl < num_threads; jl++)
{
size = n_fine/num_threads;
rest = n_fine - size*num_threads;
if (jl < rest)
{
ns = jl*size+jl;
ne = (jl+1)*size+jl+1;
}
else
{
ns = jl*size+rest;
ne = (jl+1)*size+rest;
}
jj_counter = 0;
if (jl > 0) jj_counter = jj_count[jl-1];
jj_counter_offd = 0;
if (jl > 0) jj_counter_offd = jj_count_offd[jl-1];
P_marker = hypre_CTAlloc(HYPRE_Int, n_fine);
if (num_cols_A_offd)
P_marker_offd = hypre_CTAlloc(HYPRE_Int, num_cols_A_offd);
else
P_marker_offd = NULL;
for (i = 0; i < n_fine; i++)
{
P_marker[i] = -1;
}
for (i = 0; i < num_cols_A_offd; i++)
{
P_marker_offd[i] = -1;
}
strong_f_marker = -2;
for (i = ns; i < ne; i++)
{
/*--------------------------------------------------------------------
* If i is a c-point, interpolation is the identity.
*--------------------------------------------------------------------*/
if (CF_marker[i] >= 0)
{
P_diag_i[i] = jj_counter;
P_diag_j[jj_counter] = fine_to_coarse[i];
P_diag_data[jj_counter] = one;
jj_counter++;
}
/*--------------------------------------------------------------------
* If i is an F-point, build interpolation.
*--------------------------------------------------------------------*/
else
{
/* Diagonal part of P */
P_diag_i[i] = jj_counter;
jj_begin_row = jj_counter;
for (jj = S_diag_i[i]; jj < S_diag_i[i+1]; jj++)
{
i1 = S_diag_j[jj];
/*--------------------------------------------------------------
* If neighbor i1 is a C-point, set column number in P_diag_j
* and initialize interpolation weight to zero.
*--------------------------------------------------------------*/
if (CF_marker[i1] >= 0)
{
P_marker[i1] = jj_counter;
P_diag_j[jj_counter] = fine_to_coarse[i1];
P_diag_data[jj_counter] = zero;
jj_counter++;
}
/*--------------------------------------------------------------
* If neighbor i1 is an F-point, mark it as a strong F-point
* whose connection needs to be distributed.
*--------------------------------------------------------------*/
else if (CF_marker[i1] != -3)
{
P_marker[i1] = strong_f_marker;
}
}
jj_end_row = jj_counter;
/* Off-Diagonal part of P */
P_offd_i[i] = jj_counter_offd;
jj_begin_row_offd = jj_counter_offd;
if (num_procs > 1)
{
if (col_offd_S_to_A)
{
for (jj = S_offd_i[i]; jj < S_offd_i[i+1]; jj++)
{
i1 = col_offd_S_to_A[S_offd_j[jj]];
/*-----------------------------------------------------------
* If neighbor i1 is a C-point, set column number in P_offd_j
* and initialize interpolation weight to zero.
*-----------------------------------------------------------*/
if (CF_marker_offd[i1] >= 0)
{
P_marker_offd[i1] = jj_counter_offd;
/*P_offd_j[jj_counter_offd] = fine_to_coarse_offd[i1];*/
P_offd_j[jj_counter_offd] = i1;
P_offd_data[jj_counter_offd] = zero;
jj_counter_offd++;
}
/*-----------------------------------------------------------
* If neighbor i1 is an F-point, mark it as a strong F-point
* whose connection needs to be distributed.
*-----------------------------------------------------------*/
else if (CF_marker_offd[i1] != -3)
{
P_marker_offd[i1] = strong_f_marker;
}
}
}
else
{
for (jj = S_offd_i[i]; jj < S_offd_i[i+1]; jj++)
{
i1 = S_offd_j[jj];
/*-----------------------------------------------------------
* If neighbor i1 is a C-point, set column number in P_offd_j
* and initialize interpolation weight to zero.
*-----------------------------------------------------------*/
if (CF_marker_offd[i1] >= 0)
{
P_marker_offd[i1] = jj_counter_offd;
/*P_offd_j[jj_counter_offd] = fine_to_coarse_offd[i1];*/
P_offd_j[jj_counter_offd] = i1;
P_offd_data[jj_counter_offd] = zero;
jj_counter_offd++;
}
/*-----------------------------------------------------------
* If neighbor i1 is an F-point, mark it as a strong F-point
* whose connection needs to be distributed.
*-----------------------------------------------------------*/
else if (CF_marker_offd[i1] != -3)
{
P_marker_offd[i1] = strong_f_marker;
}
}
}
}
jj_end_row_offd = jj_counter_offd;
diagonal = A_diag_data[A_diag_i[i]];
/* Loop over ith row of A. First, the diagonal part of A */
for (jj = A_diag_i[i]+1; jj < A_diag_i[i+1]; jj++)
{
i1 = A_diag_j[jj];
/*--------------------------------------------------------------
* Case 1: neighbor i1 is a C-point and strongly influences i,
* accumulate a_{i,i1} into the interpolation weight.
*--------------------------------------------------------------*/
if (P_marker[i1] >= jj_begin_row)
{
P_diag_data[P_marker[i1]] += A_diag_data[jj];
}
/*--------------------------------------------------------------
* Case 2: neighbor i1 is an F-point and strongly influences i,
* distribute a_{i,i1} to C-points that strongly infuence i.
* Note: currently no distribution to the diagonal in this case.
HERE, we only want to distribut to points of the SAME function type
*--------------------------------------------------------------*/
else if (P_marker[i1] == strong_f_marker)
{
sum = zero;
/*-----------------------------------------------------------
* Loop over row of A for point i1 and calculate the sum
* of the connections to c-points that strongly influence i.
*-----------------------------------------------------------*/
sgn = 1;
if (A_diag_data[A_diag_i[i1]] < 0) sgn = -1;
/* Diagonal block part of row i1 */
for (jj1 = A_diag_i[i1]; jj1 < A_diag_i[i1+1]; jj1++)
{
i2 = A_diag_j[jj1];
if (num_functions == 1 || dof_func[i1] == dof_func[i2])
{
if (P_marker[i2] >= jj_begin_row &&
(sgn*A_diag_data[jj1]) < 0 )
{
sum += A_diag_data[jj1];
}
}
}
/* Off-Diagonal block part of row i1 */
if (num_procs > 1)
{
for (jj1 = A_offd_i[i1]; jj1 < A_offd_i[i1+1]; jj1++)
{
i2 = A_offd_j[jj1];
if (num_functions == 1 || dof_func[i1] == dof_func[i2])
{
if (P_marker_offd[i2] >= jj_begin_row_offd
&& (sgn*A_offd_data[jj1]) < 0)
{
sum += A_offd_data[jj1];
}
}
}
}
if (sum != 0)
{
distribute = A_diag_data[jj] / sum;
/*-----------------------------------------------------------
* Loop over row of A for point i1 and do the distribution.
*-----------------------------------------------------------*/
/* Diagonal block part of row i1 */
for (jj1 = A_diag_i[i1]; jj1 < A_diag_i[i1+1]; jj1++)
{
i2 = A_diag_j[jj1];
if (num_functions == 1 || dof_func[i1] == dof_func[i2])
{
if (P_marker[i2] >= jj_begin_row
&& (sgn*A_diag_data[jj1]) < 0)
{
P_diag_data[P_marker[i2]]
+= distribute * A_diag_data[jj1];
}
}
}
/* Off-Diagonal block part of row i1 */
if (num_procs > 1)
{
for (jj1 = A_offd_i[i1]; jj1 < A_offd_i[i1+1]; jj1++)
{
i2 = A_offd_j[jj1];
if (num_functions == 1 || dof_func[i1] == dof_func[i2])
{
if (P_marker_offd[i2] >= jj_begin_row_offd
&& (sgn*A_offd_data[jj1]) < 0)
{
P_offd_data[P_marker_offd[i2]]
+= distribute * A_offd_data[jj1];
}
}
}
}
}
else /* sum = 0 - only add to diag if the same function type */
{
if (num_functions == 1 || dof_func[i] == dof_func[i1])
diagonal += A_diag_data[jj];
}
}
/*--------------------------------------------------------------
* Case 3: neighbor i1 weakly influences i, accumulate a_{i,i1}
* into the diagonal. (only if the same function type)
*--------------------------------------------------------------*/
else if (CF_marker[i1] != -3)
{
if (num_functions == 1 || dof_func[i] == dof_func[i1])
diagonal += A_diag_data[jj];
}
}
/*----------------------------------------------------------------
* Still looping over ith row of A. Next, loop over the
* off-diagonal part of A
*---------------------------------------------------------------*/
if (num_procs > 1)
{
for (jj = A_offd_i[i]; jj < A_offd_i[i+1]; jj++)
{
i1 = A_offd_j[jj];
/*--------------------------------------------------------------
* Case 1: neighbor i1 is a C-point and strongly influences i,
* accumulate a_{i,i1} into the interpolation weight.
*--------------------------------------------------------------*/
if (P_marker_offd[i1] >= jj_begin_row_offd)
{
P_offd_data[P_marker_offd[i1]] += A_offd_data[jj];
}
/*------------------------------------------------------------
* Case 2: neighbor i1 is an F-point and strongly influences i,
* distribute a_{i,i1} to C-points that strongly infuence i.
* Note: currently no distribution to the diagonal in this case.
AGAIN, we only want to distribut to points of the SAME function type
*-----------------------------------------------------------*/
else if (P_marker_offd[i1] == strong_f_marker)
{
sum = zero;
/*---------------------------------------------------------
* Loop over row of A_ext for point i1 and calculate the sum
* of the connections to c-points that strongly influence i.
*---------------------------------------------------------*/
/* find row number */
c_num = A_offd_j[jj];
sgn = 1;
if (A_ext_data[A_ext_i[c_num]] < 0) sgn = -1;
for (jj1 = A_ext_i[c_num]; jj1 < A_ext_i[c_num+1]; jj1++)
{
i2 = A_ext_j[jj1];
if (num_functions == 1 || dof_func[i1] == dof_func[i2])
{
if (i2 > -1)
{
/* in the diagonal block */
if (P_marker[i2] >= jj_begin_row
&& (sgn*A_ext_data[jj1]) < 0)
{
sum += A_ext_data[jj1];
}
}
else
{
/* in the off_diagonal block */
if (P_marker_offd[-i2-1] >= jj_begin_row_offd
&& (sgn*A_ext_data[jj1]) < 0)
{
sum += A_ext_data[jj1];
}
}
}
}
if (sum != 0)
{
distribute = A_offd_data[jj] / sum;
/*---------------------------------------------------------
* Loop over row of A_ext for point i1 and do
* the distribution.
*--------------------------------------------------------*/
/* Diagonal block part of row i1 */
for (jj1 = A_ext_i[c_num]; jj1 < A_ext_i[c_num+1]; jj1++)
{
i2 = A_ext_j[jj1];
if (num_functions == 1 || dof_func[i1] == dof_func[i2])
{
if (i2 > -1) /* in the diagonal block */
{
if (P_marker[i2] >= jj_begin_row
&& (sgn*A_ext_data[jj1]) < 0)
{
P_diag_data[P_marker[i2]]
+= distribute * A_ext_data[jj1];
}
}
else
{
/* in the off_diagonal block */
if (P_marker_offd[-i2-1] >= jj_begin_row_offd
&& (sgn*A_ext_data[jj1]) < 0)
P_offd_data[P_marker_offd[-i2-1]]
+= distribute * A_ext_data[jj1];
}
}
}
}
else /* sum = 0 */
{
if (num_functions == 1 || dof_func[i] == dof_func_offd[i1])
diagonal += A_offd_data[jj];
}
}
/*-----------------------------------------------------------
* Case 3: neighbor i1 weakly influences i, accumulate a_{i,i1}
* into the diagonal.
*-----------------------------------------------------------*/
else if (CF_marker_offd[i1] != -3)
{
if (num_functions == 1 || dof_func[i] == dof_func_offd[i1])
diagonal += A_offd_data[jj];
}
}
}
/*-----------------------------------------------------------------
* Set interpolation weight by dividing by the diagonal.
*-----------------------------------------------------------------*/
if (diagonal == 0.0)
{
if (print_level)
hypre_printf(" Warning! zero diagonal! Proc id %d row %d\n", my_id,i);
for (jj = jj_begin_row; jj < jj_end_row; jj++)
{
P_diag_data[jj] = 0.0;
}
for (jj = jj_begin_row_offd; jj < jj_end_row_offd; jj++)
{
P_offd_data[jj] = 0.0;
}
}
else
{
for (jj = jj_begin_row; jj < jj_end_row; jj++)
{
P_diag_data[jj] /= -diagonal;
}
for (jj = jj_begin_row_offd; jj < jj_end_row_offd; jj++)
{
P_offd_data[jj] /= -diagonal;
}
}
}
strong_f_marker--;
P_offd_i[i+1] = jj_counter_offd;
}
hypre_TFree(P_marker);
hypre_TFree(P_marker_offd);
}
P = hypre_ParCSRMatrixCreate(comm,
hypre_ParCSRMatrixGlobalNumRows(A),
total_global_cpts,
hypre_ParCSRMatrixColStarts(A),
num_cpts_global,
0,
P_diag_i[n_fine],
P_offd_i[n_fine]);
P_diag = hypre_ParCSRMatrixDiag(P);
hypre_CSRMatrixData(P_diag) = P_diag_data;
hypre_CSRMatrixI(P_diag) = P_diag_i;
hypre_CSRMatrixJ(P_diag) = P_diag_j;
P_offd = hypre_ParCSRMatrixOffd(P);
hypre_CSRMatrixData(P_offd) = P_offd_data;
hypre_CSRMatrixI(P_offd) = P_offd_i;
hypre_CSRMatrixJ(P_offd) = P_offd_j;
hypre_ParCSRMatrixOwnsRowStarts(P) = 0;
/* Compress P, removing coefficients smaller than trunc_factor * Max */
if (trunc_factor != 0.0 || max_elmts > 0)
{
hypre_BoomerAMGInterpTruncation(P, trunc_factor, max_elmts);
P_diag_data = hypre_CSRMatrixData(P_diag);
P_diag_i = hypre_CSRMatrixI(P_diag);
P_diag_j = hypre_CSRMatrixJ(P_diag);
P_offd_data = hypre_CSRMatrixData(P_offd);
P_offd_i = hypre_CSRMatrixI(P_offd);
P_offd_j = hypre_CSRMatrixJ(P_offd);
P_diag_size = P_diag_i[n_fine];
P_offd_size = P_offd_i[n_fine];
}
num_cols_P_offd = 0;
if (P_offd_size)
{
P_marker = hypre_CTAlloc(HYPRE_Int, num_cols_A_offd);
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE
#endif
for (i=0; i < num_cols_A_offd; i++)
P_marker[i] = 0;
num_cols_P_offd = 0;
for (i=0; i < P_offd_size; i++)
{
index = P_offd_j[i];
if (!P_marker[index])
{
num_cols_P_offd++;
P_marker[index] = 1;
}
}
col_map_offd_P = hypre_CTAlloc(HYPRE_Int,num_cols_P_offd);
index = 0;
for (i=0; i < num_cols_P_offd; i++)
{
while (P_marker[index]==0) index++;
col_map_offd_P[i] = index++;
}
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE
#endif
for (i=0; i < P_offd_size; i++)
P_offd_j[i] = hypre_BinarySearch(col_map_offd_P,
P_offd_j[i],
num_cols_P_offd);
hypre_TFree(P_marker);
}
for (i=0; i < n_fine; i++)
if (CF_marker[i] == -3) CF_marker[i] = -1;
if (num_cols_P_offd)
{
hypre_ParCSRMatrixColMapOffd(P) = col_map_offd_P;
hypre_CSRMatrixNumCols(P_offd) = num_cols_P_offd;
}
hypre_GetCommPkgRTFromCommPkgA(P,A, fine_to_coarse_offd);
*P_ptr = P;
hypre_TFree(CF_marker_offd);
hypre_TFree(dof_func_offd);
hypre_TFree(int_buf_data);
hypre_TFree(fine_to_coarse);
hypre_TFree(fine_to_coarse_offd);
hypre_TFree(coarse_counter);
hypre_TFree(jj_count);
hypre_TFree(jj_count_offd);
if (num_procs > 1) hypre_CSRMatrixDestroy(A_ext);
return(0);
}
/*---------------------------------------------------------------------------
* hypre_BoomerAMGTruncandBuild
*--------------------------------------------------------------------------*/
HYPRE_Int
hypre_BoomerAMGTruncandBuild( hypre_ParCSRMatrix *P,
HYPRE_Real trunc_factor,
HYPRE_Int max_elmts)
{
hypre_CSRMatrix *P_offd = hypre_ParCSRMatrixOffd(P);
hypre_ParCSRCommPkg *commpkg_P = hypre_ParCSRMatrixCommPkg(P);
HYPRE_Int *col_map_offd = hypre_ParCSRMatrixColMapOffd(P);
HYPRE_Int *P_offd_i = hypre_CSRMatrixI(P_offd);
HYPRE_Int *P_offd_j = hypre_CSRMatrixJ(P_offd);
HYPRE_Int num_cols_offd = hypre_CSRMatrixNumCols(P_offd);
HYPRE_Int n_fine = hypre_CSRMatrixNumRows(P_offd);
HYPRE_Int *new_col_map_offd;
HYPRE_Int P_offd_size=0, new_num_cols_offd;
HYPRE_Int *P_marker;
HYPRE_Int i;
HYPRE_Int index;
/* Compress P, removing coefficients smaller than trunc_factor * Max */
if (trunc_factor != 0.0 || max_elmts > 0)
{
hypre_BoomerAMGInterpTruncation(P, trunc_factor, max_elmts);
P_offd_j = hypre_CSRMatrixJ(P_offd);
P_offd_i = hypre_CSRMatrixI(P_offd);
P_offd_size = P_offd_i[n_fine];
}
new_num_cols_offd = 0;
if (P_offd_size)
{
P_marker = hypre_CTAlloc(HYPRE_Int, num_cols_offd);
/*#define HYPRE_SMP_PRIVATE i
#include "../utilities/hypre_smp_forloop.h"*/
for (i=0; i < num_cols_offd; i++)
P_marker[i] = 0;
for (i=0; i < P_offd_size; i++)
{
index = P_offd_j[i];
if (!P_marker[index])
{
new_num_cols_offd++;
P_marker[index] = 1;
}
}
if (new_num_cols_offd)
new_col_map_offd = hypre_CTAlloc(HYPRE_Int,new_num_cols_offd);
index = 0;
for (i=0; i < new_num_cols_offd; i++)
{
while (P_marker[index]==0) index++;
new_col_map_offd[i] = index++;
}
/*#define HYPRE_SMP_PRIVATE i
#include "../utilities/hypre_smp_forloop.h"*/
for (i=0; i < P_offd_size; i++)
P_offd_j[i] = hypre_BinarySearch(new_col_map_offd,
P_offd_j[i],
new_num_cols_offd);
}
index = 0;
for(i = 0; i < new_num_cols_offd; i++)
{
while (P_marker[index] == 0) index++;
new_col_map_offd[i] = col_map_offd[index];
index++;
}
if (P_offd_size) hypre_TFree(P_marker);
if (new_num_cols_offd)
{
hypre_TFree(col_map_offd);
hypre_ParCSRMatrixColMapOffd(P) = new_col_map_offd;
hypre_CSRMatrixNumCols(P_offd) = new_num_cols_offd;
}
if (commpkg_P != NULL) hypre_MatvecCommPkgDestroy(commpkg_P);
hypre_MatvecCommPkgCreate(P);
return(0);
}
hypre_ParCSRMatrix *hypre_CreateC( hypre_ParCSRMatrix *A,
HYPRE_Real w)
{
MPI_Comm comm = hypre_ParCSRMatrixComm(A);
hypre_CSRMatrix *A_diag = hypre_ParCSRMatrixDiag(A);
HYPRE_Real *A_diag_data = hypre_CSRMatrixData(A_diag);
HYPRE_Int *A_diag_i = hypre_CSRMatrixI(A_diag);
HYPRE_Int *A_diag_j = hypre_CSRMatrixJ(A_diag);
hypre_CSRMatrix *A_offd = hypre_ParCSRMatrixOffd(A);
HYPRE_Real *A_offd_data = hypre_CSRMatrixData(A_offd);
HYPRE_Int *A_offd_i = hypre_CSRMatrixI(A_offd);
HYPRE_Int *A_offd_j = hypre_CSRMatrixJ(A_offd);
HYPRE_Int *row_starts = hypre_ParCSRMatrixRowStarts(A);
HYPRE_Int *col_map_offd_A = hypre_ParCSRMatrixColMapOffd(A);
HYPRE_Int num_rows = hypre_CSRMatrixNumRows(A_diag);
HYPRE_Int num_cols_offd = hypre_CSRMatrixNumCols(A_offd);
HYPRE_Int global_num_rows = hypre_ParCSRMatrixGlobalNumRows(A);
hypre_ParCSRMatrix *C;
hypre_CSRMatrix *C_diag;
hypre_CSRMatrix *C_offd;
HYPRE_Real *C_diag_data;
HYPRE_Int *C_diag_i;
HYPRE_Int *C_diag_j;
HYPRE_Real *C_offd_data;
HYPRE_Int *C_offd_i;
HYPRE_Int *C_offd_j;
HYPRE_Int *col_map_offd_C;
HYPRE_Int i, j, index;
HYPRE_Real invdiag;
HYPRE_Real w_local = w;
C = hypre_ParCSRMatrixCreate(comm, global_num_rows, global_num_rows, row_starts,
row_starts, num_cols_offd, A_diag_i[num_rows], A_offd_i[num_rows]);
hypre_ParCSRMatrixInitialize(C);
C_diag = hypre_ParCSRMatrixDiag(C);
C_offd = hypre_ParCSRMatrixOffd(C);
C_diag_i = hypre_CSRMatrixI(C_diag);
C_diag_j = hypre_CSRMatrixJ(C_diag);
C_diag_data = hypre_CSRMatrixData(C_diag);
C_offd_i = hypre_CSRMatrixI(C_offd);
C_offd_j = hypre_CSRMatrixJ(C_offd);
C_offd_data = hypre_CSRMatrixData(C_offd);
col_map_offd_C = hypre_ParCSRMatrixColMapOffd(C);
hypre_ParCSRMatrixOwnsRowStarts(C) = 0;
hypre_ParCSRMatrixOwnsColStarts(C) = 0;
for (i=0; i < num_cols_offd; i++)
col_map_offd_C[i] = col_map_offd_A[i];
for (i=0; i < num_rows; i++)
{
index = A_diag_i[i];
invdiag = -w/A_diag_data[index];
C_diag_data[index] = 1.0-w;
C_diag_j[index] = A_diag_j[index];
if (w == 0)
{
w_local = fabs(A_diag_data[index]);
for (j = index+1; j < A_diag_i[i+1]; j++)
w_local += fabs(A_diag_data[j]);
for (j = A_offd_i[i]; j < A_offd_i[i+1]; j++)
w_local += fabs(A_offd_data[j]);
invdiag = -1/w_local;
C_diag_data[index] = 1.0-A_diag_data[index]/w_local;
}
C_diag_i[i] = index;
C_offd_i[i] = A_offd_i[i];
for (j = index+1; j < A_diag_i[i+1]; j++)
{
C_diag_data[j] = A_diag_data[j]*invdiag;
C_diag_j[j] = A_diag_j[j];
}
for (j = A_offd_i[i]; j < A_offd_i[i+1]; j++)
{
C_offd_data[j] = A_offd_data[j]*invdiag;
C_offd_j[j] = A_offd_j[j];
}
}
C_diag_i[num_rows] = A_diag_i[num_rows];
C_offd_i[num_rows] = A_offd_i[num_rows];
return C;
}
|
BsplineFunctor.h | //////////////////////////////////////////////////////////////////////////////////////
// This file is distributed under the University of Illinois/NCSA Open Source License.
// See LICENSE file in top directory for details.
//
// Copyright (c) 2016 Jeongnim Kim and QMCPACK developers.
//
// File developed by: John R. Gergely, University of Illinois at Urbana-Champaign
// Ken Esler, kpesler@gmail.com, University of Illinois at Urbana-Champaign
// Miguel Morales, moralessilva2@llnl.gov, Lawrence Livermore National Laboratory
// Raymond Clay III, j.k.rofling@gmail.com, Lawrence Livermore National Laboratory
// Jeremy McMinnis, jmcminis@gmail.com, University of Illinois at Urbana-Champaign
// Jeongnim Kim, jeongnim.kim@gmail.com, University of Illinois at Urbana-Champaign
// Jaron T. Krogel, krogeljt@ornl.gov, Oak Ridge National Laboratory
// Mark A. Berrill, berrillma@ornl.gov, Oak Ridge National Laboratory
// Amrita Mathuriya, amrita.mathuriya@intel.com, Intel Corp.
//
// File created by: Ken Esler, kpesler@gmail.com, University of Illinois at Urbana-Champaign
//////////////////////////////////////////////////////////////////////////////////////
#ifndef QMCPLUSPLUS_BSPLINE_FUNCTOR_H
#define QMCPLUSPLUS_BSPLINE_FUNCTOR_H
#include "Numerics/OptimizableFunctorBase.h"
#include "Utilities/ProgressReportEngine.h"
#include "OhmmsData/AttributeSet.h"
#include "Numerics/LinearFit.h"
#include "simd/allocator.hpp"
#include <cstdio>
namespace qmcplusplus
{
template<class T>
struct BsplineFunctor : public OptimizableFunctorBase
{
typedef real_type value_type;
int NumParams;
int Dummy;
const real_type A[16], dA[16], d2A[16], d3A[16];
aligned_vector<real_type> SplineCoefs;
//static const real_type A[16], dA[16], d2A[16];
real_type DeltaR, DeltaRInv;
real_type CuspValue;
real_type Y, dY, d2Y;
// Stores the derivatives w.r.t. SplineCoefs
// of the u, du/dr, and d2u/dr2
std::vector<TinyVector<real_type, 3>> SplineDerivs;
std::vector<real_type> Parameters;
std::vector<std::string> ParameterNames;
std::string elementType, pairType;
std::string fileName;
int ResetCount;
bool notOpt;
bool periodic;
///constructor
BsplineFunctor(real_type cusp = 0.0)
: NumParams(0),
A{-1.0 / 6.0,
3.0 / 6.0,
-3.0 / 6.0,
1.0 / 6.0,
3.0 / 6.0,
-6.0 / 6.0,
0.0 / 6.0,
4.0 / 6.0,
-3.0 / 6.0,
3.0 / 6.0,
3.0 / 6.0,
1.0 / 6.0,
1.0 / 6.0,
0.0 / 6.0,
0.0 / 6.0,
0.0 / 6.0},
dA{0.0, -0.5, 1.0, -0.5, 0.0, 1.5, -2.0, 0.0, 0.0, -1.5, 1.0, 0.5, 0.0, 0.5, 0.0, 0.0},
d2A{0.0, 0.0, -1.0, 1.0, 0.0, 0.0, 3.0, -2.0, 0.0, 0.0, -3.0, 1.0, 0.0, 0.0, 1.0, 0.0},
d3A{0.0, 0.0, 0.0, -1.0, 0.0, 0.0, 0.0, 3.0, 0.0, 0.0, 0.0, -3.0, 0.0, 0.0, 0.0, 1.0},
CuspValue(cusp),
ResetCount(0),
notOpt(false),
periodic(true)
{
cutoff_radius = 0.0;
}
OptimizableFunctorBase* makeClone() const { return new BsplineFunctor(*this); }
void setCusp(real_type c) { CuspValue = c; }
void setPeriodic(bool p) { periodic = p; }
void resize(int n)
{
NumParams = n;
int numCoefs = NumParams + 4;
int numKnots = numCoefs - 2;
DeltaR = cutoff_radius / (real_type)(numKnots - 1);
DeltaRInv = 1.0 / DeltaR;
Parameters.resize(n);
SplineCoefs.resize(numCoefs);
SplineDerivs.resize(numCoefs);
}
void reset()
{
int numCoefs = NumParams + 4;
int numKnots = numCoefs - 2;
DeltaR = cutoff_radius / (real_type)(numKnots - 1);
DeltaRInv = 1.0 / DeltaR;
for (int i = 0; i < SplineCoefs.size(); i++)
SplineCoefs[i] = 0.0;
// Ensure that cusp conditions is satisfied at the origin
SplineCoefs[1] = Parameters[0];
SplineCoefs[2] = Parameters[1];
SplineCoefs[0] = Parameters[1] - 2.0 * DeltaR * CuspValue;
for (int i = 2; i < Parameters.size(); i++)
SplineCoefs[i + 1] = Parameters[i];
}
/** compute value, gradient and laplacian for [iStart, iEnd) pairs
* @param iat dummy
* @param iStart starting particle index
* @param iEnd ending particle index
* @param _distArray distance arrUay
* @param _valArray u(r_j) for j=[iStart,iEnd)
* @param _gradArray du(r_j)/dr /r_j for j=[iStart,iEnd)
* @param _lapArray d2u(r_j)/dr2 for j=[iStart,iEnd)
* @param distArrayCompressed temp storage to filter r_j < cutoff_radius
* @param distIndices temp storage for the compressed index
*/
void evaluateVGL(const int iat,
const int iStart,
const int iEnd,
const T* _distArray,
T* restrict _valArray,
T* restrict _gradArray,
T* restrict _laplArray,
T* restrict distArrayCompressed,
int* restrict distIndices) const;
/** evaluate sum of the pair potentials for [iStart,iEnd)
* @param iat dummy
* @param iStart starting particle index
* @param iEnd ending particle index
* @param _distArray distance arrUay
* @param distArrayCompressed temp storage to filter r_j < cutoff_radius
* @return \f$\sum u(r_j)\f$ for r_j < cutoff_radius
*/
T evaluateV(const int iat,
const int iStart,
const int iEnd,
const T* restrict _distArray,
T* restrict distArrayCompressed) const;
inline real_type evaluate(real_type r)
{
if (r >= cutoff_radius)
return 0.0;
r *= DeltaRInv;
real_type ipart, t;
t = std::modf(r, &ipart);
int i = (int)ipart;
real_type tp[4];
tp[0] = t * t * t;
tp[1] = t * t;
tp[2] = t;
tp[3] = 1.0;
return (SplineCoefs[i + 0] * (A[0] * tp[0] + A[1] * tp[1] + A[2] * tp[2] + A[3] * tp[3]) +
SplineCoefs[i + 1] * (A[4] * tp[0] + A[5] * tp[1] + A[6] * tp[2] + A[7] * tp[3]) +
SplineCoefs[i + 2] * (A[8] * tp[0] + A[9] * tp[1] + A[10] * tp[2] + A[11] * tp[3]) +
SplineCoefs[i + 3] * (A[12] * tp[0] + A[13] * tp[1] + A[14] * tp[2] + A[15] * tp[3]));
}
inline real_type evaluate(real_type r, real_type rinv) { return Y = evaluate(r, dY, d2Y); }
inline void evaluateAll(real_type r, real_type rinv) { Y = evaluate(r, dY, d2Y); }
inline real_type evaluate(real_type r, real_type& dudr, real_type& d2udr2)
{
if (r >= cutoff_radius)
{
dudr = d2udr2 = 0.0;
return 0.0;
}
// real_type eps = 1.0e-5;
// real_type dudr_FD = (evaluate(r+eps)-evaluate(r-eps))/(2.0*eps);
// real_type d2udr2_FD = (evaluate(r+eps)+evaluate(r-eps)-2.0*evaluate(r))/(eps*eps);
r *= DeltaRInv;
real_type ipart, t;
t = std::modf(r, &ipart);
int i = (int)ipart;
real_type tp[4];
tp[0] = t * t * t;
tp[1] = t * t;
tp[2] = t;
tp[3] = 1.0;
d2udr2 = DeltaRInv * DeltaRInv *
(SplineCoefs[i + 0] * (d2A[0] * tp[0] + d2A[1] * tp[1] + d2A[2] * tp[2] + d2A[3] * tp[3]) +
SplineCoefs[i + 1] * (d2A[4] * tp[0] + d2A[5] * tp[1] + d2A[6] * tp[2] + d2A[7] * tp[3]) +
SplineCoefs[i + 2] * (d2A[8] * tp[0] + d2A[9] * tp[1] + d2A[10] * tp[2] + d2A[11] * tp[3]) +
SplineCoefs[i + 3] * (d2A[12] * tp[0] + d2A[13] * tp[1] + d2A[14] * tp[2] + d2A[15] * tp[3]));
dudr = DeltaRInv *
(SplineCoefs[i + 0] * (dA[0] * tp[0] + dA[1] * tp[1] + dA[2] * tp[2] + dA[3] * tp[3]) +
SplineCoefs[i + 1] * (dA[4] * tp[0] + dA[5] * tp[1] + dA[6] * tp[2] + dA[7] * tp[3]) +
SplineCoefs[i + 2] * (dA[8] * tp[0] + dA[9] * tp[1] + dA[10] * tp[2] + dA[11] * tp[3]) +
SplineCoefs[i + 3] * (dA[12] * tp[0] + dA[13] * tp[1] + dA[14] * tp[2] + dA[15] * tp[3]));
// if (std::abs(dudr_FD-dudr) > 1.0e-8)
// std::cerr << "Error in BsplineFunction: dudr = " << dudr
// << " dudr_FD = " << dudr_FD << std::endl;
// if (std::abs(d2udr2_FD-d2udr2) > 1.0e-4)
// std::cerr << "Error in BsplineFunction: r = " << r << " d2udr2 = " << dudr
// << " d2udr2_FD = " << d2udr2_FD << " rcut = " << cutoff_radius << std::endl;
return (SplineCoefs[i + 0] * (A[0] * tp[0] + A[1] * tp[1] + A[2] * tp[2] + A[3] * tp[3]) +
SplineCoefs[i + 1] * (A[4] * tp[0] + A[5] * tp[1] + A[6] * tp[2] + A[7] * tp[3]) +
SplineCoefs[i + 2] * (A[8] * tp[0] + A[9] * tp[1] + A[10] * tp[2] + A[11] * tp[3]) +
SplineCoefs[i + 3] * (A[12] * tp[0] + A[13] * tp[1] + A[14] * tp[2] + A[15] * tp[3]));
}
inline real_type evaluate(real_type r, real_type& dudr, real_type& d2udr2, real_type& d3udr3)
{
if (r >= cutoff_radius)
{
dudr = d2udr2 = d3udr3 = 0.0;
return 0.0;
}
// real_type eps = 1.0e-5;
// real_type dudr_FD = (evaluate(r+eps)-evaluate(r-eps))/(2.0*eps);
// real_type d2udr2_FD = (evaluate(r+eps)+evaluate(r-eps)-2.0*evaluate(r))/(eps*eps);
// real_type d3udr3_FD = (-1.0*evaluate(r+1.0*eps)
// +2.0*evaluate(r+0.5*eps)
// -2.0*evaluate(r-0.5*eps)
// +1.0*evaluate(r-1.0*eps))/(eps*eps*eps);
r *= DeltaRInv;
real_type ipart, t;
t = std::modf(r, &ipart);
int i = (int)ipart;
real_type tp[4];
tp[0] = t * t * t;
tp[1] = t * t;
tp[2] = t;
tp[3] = 1.0;
d3udr3 = DeltaRInv * DeltaRInv * DeltaRInv *
(SplineCoefs[i + 0] * (d3A[0] * tp[0] + d3A[1] * tp[1] + d3A[2] * tp[2] + d3A[3] * tp[3]) +
SplineCoefs[i + 1] * (d3A[4] * tp[0] + d3A[5] * tp[1] + d3A[6] * tp[2] + d3A[7] * tp[3]) +
SplineCoefs[i + 2] * (d3A[8] * tp[0] + d3A[9] * tp[1] + d3A[10] * tp[2] + d3A[11] * tp[3]) +
SplineCoefs[i + 3] * (d3A[12] * tp[0] + d3A[13] * tp[1] + d3A[14] * tp[2] + d3A[15] * tp[3]));
d2udr2 = DeltaRInv * DeltaRInv *
(SplineCoefs[i + 0] * (d2A[0] * tp[0] + d2A[1] * tp[1] + d2A[2] * tp[2] + d2A[3] * tp[3]) +
SplineCoefs[i + 1] * (d2A[4] * tp[0] + d2A[5] * tp[1] + d2A[6] * tp[2] + d2A[7] * tp[3]) +
SplineCoefs[i + 2] * (d2A[8] * tp[0] + d2A[9] * tp[1] + d2A[10] * tp[2] + d2A[11] * tp[3]) +
SplineCoefs[i + 3] * (d2A[12] * tp[0] + d2A[13] * tp[1] + d2A[14] * tp[2] + d2A[15] * tp[3]));
dudr = DeltaRInv *
(SplineCoefs[i + 0] * (dA[0] * tp[0] + dA[1] * tp[1] + dA[2] * tp[2] + dA[3] * tp[3]) +
SplineCoefs[i + 1] * (dA[4] * tp[0] + dA[5] * tp[1] + dA[6] * tp[2] + dA[7] * tp[3]) +
SplineCoefs[i + 2] * (dA[8] * tp[0] + dA[9] * tp[1] + dA[10] * tp[2] + dA[11] * tp[3]) +
SplineCoefs[i + 3] * (dA[12] * tp[0] + dA[13] * tp[1] + dA[14] * tp[2] + dA[15] * tp[3]));
// if (std::abs(dudr_FD-dudr) > 1.0e-8)
// std::cerr << "Error in BsplineFunction: dudr = " << dudr
// << " dudr_FD = " << dudr_FD << std::endl;
// if (std::abs(d2udr2_FD-d2udr2) > 1.0e-4)
// std::cerr << "Error in BsplineFunction: r = " << r << " d2udr2 = " << dudr
// << " d2udr2_FD = " << d2udr2_FD << " rcut = " << cutoff_radius << std::endl;
// if (std::abs(d3udr3_FD-d3udr3) > 1.0e-4)
// std::cerr << "Error in BsplineFunction: r = " << r << " d3udr3 = " << dudr
// << " d3udr3_FD = " << d3udr3_FD << " rcut = " << cutoff_radius << std::endl;
return (SplineCoefs[i + 0] * (A[0] * tp[0] + A[1] * tp[1] + A[2] * tp[2] + A[3] * tp[3]) +
SplineCoefs[i + 1] * (A[4] * tp[0] + A[5] * tp[1] + A[6] * tp[2] + A[7] * tp[3]) +
SplineCoefs[i + 2] * (A[8] * tp[0] + A[9] * tp[1] + A[10] * tp[2] + A[11] * tp[3]) +
SplineCoefs[i + 3] * (A[12] * tp[0] + A[13] * tp[1] + A[14] * tp[2] + A[15] * tp[3]));
}
inline bool evaluateDerivatives(real_type r, std::vector<TinyVector<real_type, 3>>& derivs)
{
if (r >= cutoff_radius)
return false;
r *= DeltaRInv;
real_type ipart, t;
t = std::modf(r, &ipart);
int i = (int)ipart;
real_type tp[4];
tp[0] = t * t * t;
tp[1] = t * t;
tp[2] = t;
tp[3] = 1.0;
SplineDerivs[0] = TinyVector<real_type, 3>(0.0);
// d/dp_i u(r)
SplineDerivs[i + 0][0] = A[0] * tp[0] + A[1] * tp[1] + A[2] * tp[2] + A[3] * tp[3];
SplineDerivs[i + 1][0] = A[4] * tp[0] + A[5] * tp[1] + A[6] * tp[2] + A[7] * tp[3];
SplineDerivs[i + 2][0] = A[8] * tp[0] + A[9] * tp[1] + A[10] * tp[2] + A[11] * tp[3];
SplineDerivs[i + 3][0] = A[12] * tp[0] + A[13] * tp[1] + A[14] * tp[2] + A[15] * tp[3];
// d/dp_i du/dr
SplineDerivs[i + 0][1] = DeltaRInv * (dA[1] * tp[1] + dA[2] * tp[2] + dA[3] * tp[3]);
SplineDerivs[i + 1][1] = DeltaRInv * (dA[5] * tp[1] + dA[6] * tp[2] + dA[7] * tp[3]);
SplineDerivs[i + 2][1] = DeltaRInv * (dA[9] * tp[1] + dA[10] * tp[2] + dA[11] * tp[3]);
SplineDerivs[i + 3][1] = DeltaRInv * (dA[13] * tp[1] + dA[14] * tp[2] + dA[15] * tp[3]);
// d/dp_i d2u/dr2
SplineDerivs[i + 0][2] = DeltaRInv * DeltaRInv * (d2A[2] * tp[2] + d2A[3] * tp[3]);
SplineDerivs[i + 1][2] = DeltaRInv * DeltaRInv * (d2A[6] * tp[2] + d2A[7] * tp[3]);
SplineDerivs[i + 2][2] = DeltaRInv * DeltaRInv * (d2A[10] * tp[2] + d2A[11] * tp[3]);
SplineDerivs[i + 3][2] = DeltaRInv * DeltaRInv * (d2A[14] * tp[2] + d2A[15] * tp[3]);
int imin = std::max(i, 1);
int imax = std::min(i + 4, NumParams + 1);
for (int n = imin; n < imax; ++n)
derivs[n - 1] = SplineDerivs[n];
derivs[1] += SplineDerivs[0];
//real_type v[4],dv[4],d2v[4];
//v[0] = A[ 0]*tp[0] + A[ 1]*tp[1] + A[ 2]*tp[2] + A[ 3]*tp[3];
//v[1] = A[ 4]*tp[0] + A[ 5]*tp[1] + A[ 6]*tp[2] + A[ 7]*tp[3];
//v[2] = A[ 8]*tp[0] + A[ 9]*tp[1] + A[10]*tp[2] + A[11]*tp[3];
//v[3] = A[12]*tp[0] + A[13]*tp[1] + A[14]*tp[2] + A[15]*tp[3];
//// d/dp_i du/dr
//dv[0] = DeltaRInv * (dA[ 1]*tp[1] + dA[ 2]*tp[2] + dA[ 3]*tp[3]);
//dv[1] = DeltaRInv * (dA[ 5]*tp[1] + dA[ 6]*tp[2] + dA[ 7]*tp[3]);
//dv[2] = DeltaRInv * (dA[ 9]*tp[1] + dA[10]*tp[2] + dA[11]*tp[3]);
//dv[3] = DeltaRInv * (dA[13]*tp[1] + dA[14]*tp[2] + dA[15]*tp[3]);
//// d/dp_i d2u/dr2
//d2v[0] = DeltaRInv * DeltaRInv * (d2A[ 2]*tp[2] + d2A[ 3]*tp[3]);
//d2v[1] = DeltaRInv * DeltaRInv * (d2A[ 6]*tp[2] + d2A[ 7]*tp[3]);
//d2v[2] = DeltaRInv * DeltaRInv * (d2A[10]*tp[2] + d2A[11]*tp[3]);
//d2v[3] = DeltaRInv * DeltaRInv * (d2A[14]*tp[2] + d2A[15]*tp[3]);
//int imin=std::max(i,1);
//int imax=std::min(i+4,NumParams+1)-1;
//int n=imin-1, j=imin-i;
//while(n<imax && j<4)
//{
// derivs[n] = TinyVector<real_type,3>(v[j],dv[j],d2v[j]);
// n++; j++;
//}
//if(i==0) derivs[1]+= TinyVector<real_type,3>(v[0],dv[0],d2v[0]);
return true;
}
inline bool evaluateDerivatives(real_type r, std::vector<real_type>& derivs)
{
if (r >= cutoff_radius)
return false;
real_type tp[4], v[4], ipart, t;
t = std::modf(r * DeltaRInv, &ipart);
tp[0] = t * t * t;
tp[1] = t * t;
tp[2] = t;
tp[3] = 1.0;
v[0] = A[0] * tp[0] + A[1] * tp[1] + A[2] * tp[2] + A[3] * tp[3];
v[1] = A[4] * tp[0] + A[5] * tp[1] + A[6] * tp[2] + A[7] * tp[3];
v[2] = A[8] * tp[0] + A[9] * tp[1] + A[10] * tp[2] + A[11] * tp[3];
v[3] = A[12] * tp[0] + A[13] * tp[1] + A[14] * tp[2] + A[15] * tp[3];
int i = (int)ipart;
int imin = std::max(i, 1);
int imax = std::min(i + 4, NumParams + 1) - 1;
int n = imin - 1, j = imin - i;
while (n < imax && j < 4)
{
derivs[n] = v[j];
n++;
j++;
}
if (i == 0)
derivs[1] += v[0];
return true;
}
inline real_type f(real_type r)
{
if (r >= cutoff_radius)
return 0.0;
return evaluate(r);
}
inline real_type df(real_type r)
{
if (r >= cutoff_radius)
return 0.0;
real_type du, d2u;
evaluate(r, du, d2u);
return du;
}
bool put(xmlNodePtr cur)
{
ReportEngine PRE("BsplineFunctor", "put(xmlNodePtr)");
//CuspValue = -1.0e10;
NumParams = 0;
//cutoff_radius = 0.0;
OhmmsAttributeSet rAttrib;
real_type radius = -1.0;
rAttrib.add(NumParams, "size");
rAttrib.add(radius, "rcut");
rAttrib.add(radius, "cutoff");
rAttrib.put(cur);
if (radius < 0.0)
if (periodic)
{
app_log() << " Jastrow cutoff unspecified. Setting to Wigner-Seitz radius = " << cutoff_radius << std::endl;
app_log() << std::endl;
}
else
{
APP_ABORT(" Jastrow cutoff unspecified. Cutoff must be given when using open boundary conditions");
}
else if (periodic && radius > cutoff_radius)
{
if (radius - cutoff_radius > 1e-4)
{
APP_ABORT(" The Jastrow cutoff specified should not be larger than Wigner-Seitz radius.");
}
else
{
app_log() << " The Jastrow cutoff specified is slightly larger than the Wigner-Seitz radius.";
app_log() << " Setting to Wigner-Seitz radius = " << cutoff_radius << ".\n";
}
}
else
cutoff_radius = radius;
if (NumParams == 0)
{
PRE.error("You must specify a positive number of parameters for the Bspline jastrow function.", true);
}
app_summary() << " Number of parameters: " << NumParams << std::endl;
app_summary() << " Cusp: " << CuspValue << std::endl;
app_summary() << " Cutoff radius: " << cutoff_radius << std::endl;
resize(NumParams);
// Now read coefficents
xmlNodePtr xmlCoefs = cur->xmlChildrenNode;
while (xmlCoefs != NULL)
{
std::string cname((const char*)xmlCoefs->name);
if (cname == "coefficients")
{
std::string type("0"), id("0");
std::string optimize("yes");
OhmmsAttributeSet cAttrib;
cAttrib.add(id, "id");
cAttrib.add(type, "type");
cAttrib.add(optimize, "optimize");
cAttrib.put(xmlCoefs);
if (type != "Array")
{
PRE.error("Unknown correlation type " + type + " in BsplineFunctor." + "Resetting to \"Array\"");
xmlNewProp(xmlCoefs, (const xmlChar*)"type", (const xmlChar*)"Array");
}
std::vector<real_type> params;
putContent(params, xmlCoefs);
if (params.size() == NumParams)
Parameters = params;
else
{
app_log() << " Changing number of Bspline parameters from " << params.size() << " to " << NumParams
<< ". Performing fit:\n";
// Fit function to new number of parameters
const int numPoints = 500;
BsplineFunctor<T> tmp_func(CuspValue);
tmp_func.cutoff_radius = cutoff_radius;
tmp_func.resize(params.size());
tmp_func.Parameters = params;
tmp_func.reset();
std::vector<real_type> y(numPoints);
Matrix<real_type> basis(numPoints, NumParams);
std::vector<TinyVector<real_type, 3>> derivs(NumParams);
for (int i = 0; i < numPoints; i++)
{
real_type r = (real_type)i / (real_type)numPoints * cutoff_radius;
y[i] = tmp_func.evaluate(r);
evaluateDerivatives(r, derivs);
for (int j = 0; j < NumParams; j++)
basis(i, j) = derivs[j][0];
}
resize(NumParams);
LinearFit(y, basis, Parameters);
app_log() << "New parameters are:\n";
for (int i = 0; i < Parameters.size(); i++)
app_log() << " " << Parameters[i] << std::endl;
}
if (optimize == "yes")
{
notOpt = false;
}
else
{
notOpt = true;
}
for (int i = 0; i < NumParams; i++)
{
std::stringstream sstr;
sstr << id << "_" << i;
myVars.insert(sstr.str(), (value_type)Parameters[i], !notOpt, optimize::LOGLINEAR_P);
}
int left_pad_space = 5;
app_log() << std::endl;
myVars.print(app_log(), left_pad_space, true);
}
xmlCoefs = xmlCoefs->next;
}
reset();
real_type zeros = 0;
for (int i = 0; i < NumParams; i++)
zeros += Parameters[i] * Parameters[i];
return zeros > 1.0e-12; //true if Parameters are not zero
}
void initialize(int numPoints,
std::vector<real_type>& x,
std::vector<real_type>& y,
real_type cusp,
real_type rcut,
std::string& id,
std::string& optimize)
{
ReportEngine PRE("BsplineFunctor", "initialize");
NumParams = numPoints;
cutoff_radius = rcut;
CuspValue = cusp;
if (NumParams == 0)
{
PRE.error("You must specify a positive number of parameters for the Bspline jastrow function.", true);
}
app_log() << "Initializing BsplineFunctor from array. \n";
app_log() << " size = " << NumParams << " parameters " << std::endl;
app_log() << " cusp = " << CuspValue << std::endl;
app_log() << " rcut = " << cutoff_radius << std::endl;
resize(NumParams);
int npts = x.size();
Matrix<real_type> basis(npts, NumParams);
std::vector<TinyVector<real_type, 3>> derivs(NumParams);
for (int i = 0; i < npts; i++)
{
real_type r = x[i];
if (r > cutoff_radius)
{
PRE.error("Error in BsplineFunctor::initialize: r > cutoff_radius.", true);
}
evaluateDerivatives(r, derivs);
for (int j = 0; j < NumParams; j++)
basis(i, j) = derivs[j][0];
}
resize(NumParams);
LinearFit(y, basis, Parameters);
app_log() << "New parameters are:\n";
for (int i = 0; i < Parameters.size(); i++)
app_log() << " " << Parameters[i] << std::endl;
#if !defined(QMC_BUILD_SANDBOX_ONLY)
if (optimize == "yes")
{
// Setup parameter names
for (int i = 0; i < NumParams; i++)
{
std::stringstream sstr;
sstr << id << "_" << i;
myVars.insert(sstr.str(), (value_type)Parameters[i], true, optimize::LOGLINEAR_P);
}
myVars.print(app_log());
}
else
#endif
{
notOpt = true;
app_log() << "Parameters of BsplineFunctor id:" << id << " are not being optimized.\n";
}
reset();
}
void reportStatus(std::ostream& os)
{
if (notOpt)
return;
myVars.print(os);
}
void checkOutVariables(const opt_variables_type& active)
{
if (notOpt)
return;
myVars.getIndex(active);
}
void checkInVariables(opt_variables_type& active)
{
if (notOpt)
return;
active.insertFrom(myVars);
}
void resetParameters(const opt_variables_type& active)
{
if (notOpt)
return;
for (int i = 0; i < Parameters.size(); ++i)
{
int loc = myVars.where(i);
if (loc >= 0) {
Parameters[i] = std::real( myVars[i] = active[loc] );
}
}
// if (ResetCount++ == 100)
// {
// ResetCount = 0;
// if(ReportLevel) print();
// }
reset();
}
// check if this object has active optimizable parameters
bool isOptimizable()
{
if (notOpt)
return false;
for (int i = 0; i < Parameters.size(); ++i)
{
int loc = myVars.where(i);
if (loc >= 0)
return true;
}
return false;
}
};
template<typename T>
inline T BsplineFunctor<T>::evaluateV(const int iat,
const int iStart,
const int iEnd,
const T* restrict _distArray,
T* restrict distArrayCompressed) const
{
const real_type* restrict distArray = _distArray + iStart;
ASSUME_ALIGNED(distArrayCompressed);
int iCount = 0;
const int iLimit = iEnd - iStart;
#pragma vector always
for (int jat = 0; jat < iLimit; jat++)
{
real_type r = distArray[jat];
// pick the distances smaller than the cutoff and avoid the reference atom
if (r < cutoff_radius && iStart + jat != iat)
distArrayCompressed[iCount++] = distArray[jat];
}
real_type d = 0.0;
#pragma omp simd reduction(+ : d)
for (int jat = 0; jat < iCount; jat++)
{
real_type r = distArrayCompressed[jat];
r *= DeltaRInv;
int i = (int)r;
real_type t = r - real_type(i);
real_type tp0 = t * t * t;
real_type tp1 = t * t;
real_type tp2 = t;
real_type d1 = SplineCoefs[i + 0] * (A[0] * tp0 + A[1] * tp1 + A[2] * tp2 + A[3]);
real_type d2 = SplineCoefs[i + 1] * (A[4] * tp0 + A[5] * tp1 + A[6] * tp2 + A[7]);
real_type d3 = SplineCoefs[i + 2] * (A[8] * tp0 + A[9] * tp1 + A[10] * tp2 + A[11]);
real_type d4 = SplineCoefs[i + 3] * (A[12] * tp0 + A[13] * tp1 + A[14] * tp2 + A[15]);
d += (d1 + d2 + d3 + d4);
}
return d;
}
template<typename T>
inline void BsplineFunctor<T>::evaluateVGL(const int iat,
const int iStart,
const int iEnd,
const T* _distArray,
T* restrict _valArray,
T* restrict _gradArray,
T* restrict _laplArray,
T* restrict distArrayCompressed,
int* restrict distIndices) const
{
real_type dSquareDeltaRinv = DeltaRInv * DeltaRInv;
constexpr real_type cZero(0);
constexpr real_type cOne(1);
constexpr real_type cMOne(-1);
// START_MARK_FIRST();
ASSUME_ALIGNED(distIndices);
ASSUME_ALIGNED(distArrayCompressed);
int iCount = 0;
int iLimit = iEnd - iStart;
const real_type* distArray = _distArray + iStart;
real_type* valArray = _valArray + iStart;
real_type* gradArray = _gradArray + iStart;
real_type* laplArray = _laplArray + iStart;
#pragma vector always
for (int jat = 0; jat < iLimit; jat++)
{
real_type r = distArray[jat];
if (r < cutoff_radius && iStart + jat != iat)
{
distIndices[iCount] = jat;
distArrayCompressed[iCount] = r;
iCount++;
}
}
#pragma omp simd
for (int j = 0; j < iCount; j++)
{
real_type r = distArrayCompressed[j];
int iScatter = distIndices[j];
real_type rinv = cOne / r;
r *= DeltaRInv;
int iGather = (int)r;
real_type t = r - real_type(iGather);
real_type tp0 = t * t * t;
real_type tp1 = t * t;
real_type tp2 = t;
real_type sCoef0 = SplineCoefs[iGather + 0];
real_type sCoef1 = SplineCoefs[iGather + 1];
real_type sCoef2 = SplineCoefs[iGather + 2];
real_type sCoef3 = SplineCoefs[iGather + 3];
laplArray[iScatter] = dSquareDeltaRinv *
(sCoef0 * (d2A[2] * tp2 + d2A[3]) + sCoef1 * (d2A[6] * tp2 + d2A[7]) + sCoef2 * (d2A[10] * tp2 + d2A[11]) +
sCoef3 * (d2A[14] * tp2 + d2A[15]));
gradArray[iScatter] = DeltaRInv * rinv *
(sCoef0 * (dA[1] * tp1 + dA[2] * tp2 + dA[3]) + sCoef1 * (dA[5] * tp1 + dA[6] * tp2 + dA[7]) +
sCoef2 * (dA[9] * tp1 + dA[10] * tp2 + dA[11]) + sCoef3 * (dA[13] * tp1 + dA[14] * tp2 + dA[15]));
valArray[iScatter] = (sCoef0 * (A[0] * tp0 + A[1] * tp1 + A[2] * tp2 + A[3]) +
sCoef1 * (A[4] * tp0 + A[5] * tp1 + A[6] * tp2 + A[7]) +
sCoef2 * (A[8] * tp0 + A[9] * tp1 + A[10] * tp2 + A[11]) +
sCoef3 * (A[12] * tp0 + A[13] * tp1 + A[14] * tp2 + A[15]));
}
}
} // namespace qmcplusplus
#endif
|
ZBinDumper_MPI.h | /*
* ZBinDumper_MPI.h
* Cubism
*
* Created by Panos Hadjidoukas on 3/20/14.
* Copyright 2014 CSE Lab, ETH Zurich. All rights reserved.
*
*/
#pragma once
#include <iostream>
#include <cstdio>
#include <vector>
#include <string>
#include <cassert>
#include <sstream>
#include "BlockInfo.h"
#include "LosslessCompression.h"
CUBISM_NAMESPACE_BEGIN
#define MAX_MPI_PROCS (16*1024) // header: 0.25MB* 8
typedef struct _header
{
long offset[8]; // 1 for single compression, NCHANNELS otherwise
long size[8];
} header;
// The following requirements for the data TStreamer are required:
// TStreamer::NCHANNELS : Number of data elements (1=Scalar, 3=Vector, 9=Tensor)
// TStreamer::operate : Data access methods for read and write
// TStreamer::getAttributeName : Attribute name of the date ("Scalar", "Vector", "Tensor")
template<typename TStreamer, typename TGrid>
void DumpZBin_MPI(
const TGrid &grid,
const int iCounter,
const typename TGrid::Real t,
const std::string &f_name,
const std::string &dump_path = ".",
const bool bDummy = false)
{
typedef typename TGrid::BlockType B;
int rank, nranks;
// f_name is the base filename without file type extension
std::ostringstream filename;
filename << dump_path << "/" << f_name;
MPI_Status status;
MPI_File file_id;
MPI_Comm comm = grid.getCartComm();
MPI_Comm_rank(comm, &rank);
MPI_Comm_size(comm, &nranks);
int coords[3];
grid.peindex(coords);
const unsigned int NX = grid.getResidentBlocksPerDimension(0)*B::sizeX;
const unsigned int NY = grid.getResidentBlocksPerDimension(1)*B::sizeY;
const unsigned int NZ = grid.getResidentBlocksPerDimension(2)*B::sizeZ;
static const unsigned int NCHANNELS = TStreamer::NCHANNELS;
if (rank==0)
{
//Real memsize = (NX * NY * NZ * NCHANNELS * sizeof(Real))/(1024.*1024.*1024.);
Real memsize = (NX * NY * NZ * sizeof(Real))/(1024.*1024.*1024.);
std::cout << "Allocating " << memsize << " GB of BIN data per rank (" << memsize*nranks << " GB in total)" << std::endl;
}
// Real * array_all = new Real[NX * NY * NZ * NCHANNELS];
Real * array_all = new Real[NX * NY * NZ];
std::vector<BlockInfo> vInfo_local = grid.getResidentBlocksInfo();
static const unsigned int sX = 0;
static const unsigned int sY = 0;
static const unsigned int sZ = 0;
static const unsigned int eX = B::sizeX;
static const unsigned int eY = B::sizeY;
static const unsigned int eZ = B::sizeZ;
int rc = MPI_File_open( MPI_COMM_SELF, const_cast<char*>( (filename.str()+".zbin").c_str() ), MPI_MODE_CREATE | MPI_MODE_WRONLY, MPI_INFO_NULL, &file_id );
if (rc) {
printf("Unable to create ZBIN file\n");
exit(1);
}
long previous_offset = 0;
header tag; // moved here
for (unsigned int ichannel = 0; ichannel < NCHANNELS; ichannel++)
{
#pragma omp parallel for
for(unsigned int i=0; i<vInfo_local.size(); i++)
{
BlockInfo& info = vInfo_local[i];
const unsigned int idx[3] = {info.index[0], info.index[1], info.index[2]};
B & b = *(B*)info.ptrBlock;
for(unsigned int ix=sX; ix<eX; ix++)
{
const unsigned int gx = idx[0]*B::sizeX + ix;
for(unsigned int iy=sY; iy<eY; iy++)
{
const unsigned int gy = idx[1]*B::sizeY + iy;
for(unsigned int iz=sZ; iz<eZ; iz++)
{
const unsigned int gz = idx[2]*B::sizeZ + iz;
assert((gz + NZ * (gy + NY * gx)) < NX * NY * NZ);
Real * const ptr = array_all + (gz + NZ * (gy + NY * gx));
Real output;
TStreamer::operate(b, ix, iy, iz, &output, ichannel); // point -> output,
ptr[0] = output;
}
}
}
}
// long local_count = NX * NY * NZ * NCHANNELS;
long local_count = NX * NY * NZ * 1;
long local_bytes = local_count * sizeof(Real);
long offset; // global offset
unsigned int max = local_bytes;
// int layout[4] = {NCHANNELS, NX, NY, NZ};
int layout[4] = {NX, NY, NZ, 1};
long compressed_bytes = ZZcompress<typename TGrid::Real>((unsigned char *)array_all, local_bytes, layout, &max); // "in place"
#if DBG
printf("Writing %ld bytes of Compressed data (cr = %.2f)\n", compressed_bytes, local_bytes*1.0/compressed_bytes);
#endif
MPI_Exscan( &compressed_bytes, &offset, 1, MPI_LONG, MPI_SUM, comm);
if (rank == 0) offset = 0;
#if DBG
printf("rank %d, offset = %ld, size = %ld\n", rank, offset, compressed_bytes); fflush(0);
#endif
// header tag;
tag.offset[ichannel] = offset + previous_offset;
tag.size[ichannel] = compressed_bytes;
#if DBG
printf("rank %d, offset = %ld, size = %ld\n", rank, tag.offset[ichannel], tag.size[ichannel]); fflush(0);
#endif
previous_offset = (tag.offset[ichannel] + tag.size[ichannel]);
MPI_Bcast(&previous_offset, 1, MPI_LONG, nranks-1, comm);
long base = MAX_MPI_PROCS*sizeof(tag); // full Header
MPI_File_write_at(file_id, base + tag.offset[ichannel], (char *)array_all, tag.size[ichannel], MPI_CHAR, &status);
} /* ichannel */
MPI_File_write_at(file_id, rank*sizeof(tag), &tag, 2*8, MPI_LONG, &status);
MPI_File_close(&file_id);
delete [] array_all;
}
template<typename TStreamer, typename TGrid>
void ReadZBin_MPI(TGrid &grid, const std::string& f_name, const std::string& read_path=".")
{
typedef typename TGrid::BlockType B;
int rank, nranks;
// f_name is the base filename without file type extension
std::ostringstream filename;
filename << read_path << "/" << f_name;
MPI_Status status;
MPI_File file_id;
MPI_Comm comm = grid.getCartComm();
MPI_Comm_rank(comm, &rank);
MPI_Comm_size(comm, &nranks);
int coords[3];
grid.peindex(coords);
const int NX = grid.getResidentBlocksPerDimension(0)*B::sizeX;
const int NY = grid.getResidentBlocksPerDimension(1)*B::sizeY;
const int NZ = grid.getResidentBlocksPerDimension(2)*B::sizeZ;
static const int NCHANNELS = TStreamer::NCHANNELS;
Real * array_all = new Real[NX * NY * NZ * NCHANNELS];
std::vector<BlockInfo> vInfo_local = grid.getResidentBlocksInfo();
static const int sX = 0;
static const int sY = 0;
static const int sZ = 0;
const int eX = B::sizeX;
const int eY = B::sizeY;
const int eZ = B::sizeZ;
int rc = MPI_File_open( MPI_COMM_SELF, const_cast<char*>( (filename.str()+".zbin").c_str() ), MPI_MODE_RDONLY, MPI_INFO_NULL, &file_id );
if (rc) {
printf("Unable to read ZBIN file\n");
exit(1);
}
// long local_count = NX * NY * NZ * NCHANNELS;
long local_count = NX * NY * NZ * 1;
long local_bytes = local_count * sizeof(Real);
long offset;
header tag;
MPI_File_read_at(file_id, rank*sizeof(tag), &tag, 2*8, MPI_LONG, &status);
#if DBG
printf("HEADER(%d):\n", rank);
for (int i = 0; i < NCHANNELS; i++) {
printf("channel %d: %ld %ld\n", i, tag.offset[i], tag.size[i]);
}
#endif
for (unsigned int ichannel = 0; ichannel < NCHANNELS; ichannel++)
{
//MPI_File_read_at(file_id, (rank*2+0)*sizeof(long), &offset , 1, MPI_LONG, &status);
//MPI_File_read_at(file_id, (rank*2+1)*sizeof(long), &compressed_bytes, 1, MPI_LONG, &status);
#if DBG
printf("rank %d, offset = %ld, compr. size = %ld\n", rank, tag.offset[ichannel], tag.size[ichannel]); fflush(0);
#endif
long compressed_bytes = tag.size[ichannel];
#if DBG
printf("Reading %ld bytes of Compressed data (cr = %.2f)\n", compressed_bytes, local_bytes*1.0/compressed_bytes);
#endif
unsigned char *tmp = (unsigned char *) malloc(compressed_bytes+4096);
long base = MAX_MPI_PROCS*sizeof(tag); // Header
MPI_File_read_at(file_id, base + tag.offset[ichannel], (char *)tmp, compressed_bytes, MPI_CHAR, &status);
// int layout[4] = {NCHANNELS, NX, NY, NZ};
int layout[4] = {NX, NY, NZ, 1};
size_t decompressed_bytes = ZZdecompress<typename TGrid::Real>(tmp, compressed_bytes, layout, (unsigned char *)array_all, local_bytes);
free(tmp);
#if DBG
printf("rank %d, offset = %ld, size = %ld (%ld)\n", rank, offset, decompressed_bytes, local_bytes); fflush(0);
#endif
#pragma omp parallel for
for(int i=0; i<vInfo_local.size(); i++)
{
BlockInfo& info = vInfo_local[i];
const int idx[3] = {info.index[0], info.index[1], info.index[2]};
B & b = *(B*)info.ptrBlock;
for(int ix=sX; ix<eX; ix++)
for(int iy=sY; iy<eY; iy++)
for(int iz=sZ; iz<eZ; iz++)
{
const int gx = idx[0]*B::sizeX + ix;
const int gy = idx[1]*B::sizeY + iy;
const int gz = idx[2]*B::sizeZ + iz;
//Real * const ptr_input = array_all + NCHANNELS*(gz + NZ * (gy + NY * gx));
Real * const ptr_input = array_all + (gz + NZ * (gy + NY * gx));
TStreamer::operate(b, *ptr_input, ix, iy, iz, ichannel); // output -> point
}
}
} /* ichannel */
MPI_File_close(&file_id);
delete [] array_all;
}
CUBISM_NAMESPACE_END
|
mat_mul_p4a_10000.c | /*
* file for mat_mul.c
*/
#include "./mat_mul.h"
#include "./size.h"
void mat_mul(int *a, int *b, int *c);
void mat_mul(int *a, int *b, int *c)
{
int i, j, k, t;
#pragma omp parallel for private(j, t, k)
for(i = 0; i <= 9999; i += 1)
for(j = 0; j <= 9999; j += 1) {
c[i*10000+j] = 0;
for(k = 0; k <= 9999; k += 1)
for(t = 0; t <= 99; t += 1)
c[i*10000+j] += a[i*10000+k]*b[j*10000+k];
}
return;
}
|
GB_unop__identity_fc32_fc32.c | //------------------------------------------------------------------------------
// GB_unop: hard-coded functions for each built-in unary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2022, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// If this file is in the Generated2/ folder, do not edit it
// (it is auto-generated from Generator/*).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_control.h"
#include "GB_atomics.h"
#include "GB_unop__include.h"
// C=unop(A) is defined by the following types and operators:
// op(A) function: GB (_unop_apply__(none))
// op(A') function: GB (_unop_tran__identity_fc32_fc32)
// C type: GxB_FC32_t
// A type: GxB_FC32_t
// cast: GxB_FC32_t cij = aij
// unaryop: cij = aij
#define GB_ATYPE \
GxB_FC32_t
#define GB_CTYPE \
GxB_FC32_t
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
GxB_FC32_t aij = Ax [pA]
#define GB_CX(p) Cx [p]
// unary operator
#define GB_OP(z, x) \
z = x ;
// casting
#define GB_CAST(z, aij) \
GxB_FC32_t z = aij ;
// cij = op (aij)
#define GB_CAST_OP(pC,pA) \
{ \
/* aij = Ax [pA] */ \
GxB_FC32_t aij = Ax [pA] ; \
/* Cx [pC] = op (cast (aij)) */ \
GxB_FC32_t z = aij ; \
Cx [pC] = z ; \
}
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_IDENTITY || GxB_NO_FC32)
//------------------------------------------------------------------------------
// Cx = op (cast (Ax)): apply a unary operator
//------------------------------------------------------------------------------
#if 0
GrB_Info GB (_unop_apply__(none))
(
GxB_FC32_t *Cx, // Cx and Ax may be aliased
const GxB_FC32_t *Ax,
const int8_t *restrict Ab, // A->b if A is bitmap
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
if (Ab == NULL)
{
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
GxB_FC32_t aij = Ax [p] ;
GxB_FC32_t z = aij ;
Cx [p] = z ;
}
}
else
{
// bitmap case, no transpose; A->b already memcpy'd into C->b
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!Ab [p]) continue ;
GxB_FC32_t aij = Ax [p] ;
GxB_FC32_t z = aij ;
Cx [p] = z ;
}
}
return (GrB_SUCCESS) ;
#endif
}
#endif
//------------------------------------------------------------------------------
// C = op (cast (A')): transpose, typecast, and apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB (_unop_tran__identity_fc32_fc32)
(
GrB_Matrix C,
const GrB_Matrix A,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
problem0.c | //------------------------------------------------------------------------------------------------------------------------------
// Samuel Williams
// SWWilliams@lbl.gov
// Lawrence Berkeley National Lab
//------------------------------------------------------------------------------------------------------------------------------
#include <stdint.h>
#include "../timer.h"
//------------------------------------------------------------------------------------------------------------------------------
void initialize_problem(domain_type *domain, int level, double hLevel, double a, double b){
double NPi = 2.0*M_PI;
double Bmin = 1.0;
double Bmax = 10.0;
double c2 = (Bmax-Bmin)/2;
double c1 = (Bmax+Bmin)/2;
double c3=10.0; // how sharply (B)eta transitions
double c4 = -5.0/0.25;
int box;
for(box=0;box<domain->subdomains_per_rank;box++){
memset(domain->subdomains[box].levels[level].grids[__u_exact],0,domain->subdomains[box].levels[level].volume*sizeof(double));
memset(domain->subdomains[box].levels[level].grids[__f ],0,domain->subdomains[box].levels[level].volume*sizeof(double));
int i,j,k;
#pragma omp parallel for private(k,j,i) collapse(2)
for(k=0;k<domain->subdomains[box].levels[level].dim.k;k++){
for(j=0;j<domain->subdomains[box].levels[level].dim.j;j++){
for(i=0;i<domain->subdomains[box].levels[level].dim.i;i++){
double x = hLevel*((double)(i+domain->subdomains[box].levels[level].low.i)+0.5);
double y = hLevel*((double)(j+domain->subdomains[box].levels[level].low.j)+0.5);
double z = hLevel*((double)(k+domain->subdomains[box].levels[level].low.k)+0.5);
int ijk = (i+domain->subdomains[box].levels[level].ghosts)+
domain->subdomains[box].levels[level].pencil*(j+domain->subdomains[box].levels[level].ghosts)+
domain->subdomains[box].levels[level].plane *(k+domain->subdomains[box].levels[level].ghosts);
//- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
// constant coefficient
double A = 1.0;
double B = 1.0;
double Bx = 0.0;
double By = 0.0;
double Bz = 0.0;
//- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
//- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
#if 1
// should be continuous in u, u', u'', u''', and u'''' to guarantee high order and periodic boundaries
// u = ax^6 + bx^5 + cx^4 + dx^3 + ex^2 + fx + g
// ux = 6ax^5 + 5bx^4 + 4cx^3 + 3dx^2 + 2ex + f
// uxx = 30ax^4 + 20bx^3 + 12cx^2 + 6dx + 2e
// a = 42.0
// b = -126.0
// c = 105.0
// d = 0.0
// e = -21.0
// f = 0.0
// g = 1.0
double X = 42.0*pow(x,6) - 126.0*pow(x,5) + 105.0*pow(x,4) - 21.0*pow(x,2) + 1.0;
double Y = 42.0*pow(y,6) - 126.0*pow(y,5) + 105.0*pow(y,4) - 21.0*pow(y,2) + 1.0;
double Z = 42.0*pow(z,6) - 126.0*pow(z,5) + 105.0*pow(z,4) - 21.0*pow(z,2) + 1.0;
double Xx = 252.0*pow(x,5) - 630.0*pow(x,4) + 420.0*pow(x,3) - 42.0*x;
double Yy = 252.0*pow(y,5) - 630.0*pow(y,4) + 420.0*pow(y,3) - 42.0*y;
double Zz = 252.0*pow(z,5) - 630.0*pow(z,4) + 420.0*pow(z,3) - 42.0*z;
double Xxx = 1260.0*pow(x,4) - 2520.0*pow(x,3) + 1260.0*pow(x,2) - 42.0;
double Yyy = 1260.0*pow(y,4) - 2520.0*pow(y,3) + 1260.0*pow(y,2) - 42.0;
double Zzz = 1260.0*pow(z,4) - 2520.0*pow(z,3) + 1260.0*pow(z,2) - 42.0;
double u = X *Y *Z ;
double ux = Xx *Y *Z ;
double uy = X *Yy *Z ;
double uz = X *Y *Zz ;
double uxx = Xxx*Y *Z ;
double uyy = X *Yyy*Z ;
double uzz = X *Y *Zzz;
#else
#if 0
// should be continuous in u, u', and u''
// v(w) = w^4 - 2w^3 + w^2
// u(x,y,z) = v(x)v(y)v(z)
double X = 1.0*pow(x,4) - 2.0*pow(x,3) + 1.0*pow(x,2) - 1.0/30.0;
double Y = 1.0*pow(y,4) - 2.0*pow(y,3) + 1.0*pow(y,2) - 1.0/30.0;
double Z = 1.0*pow(z,4) - 2.0*pow(z,3) + 1.0*pow(z,2) - 1.0/30.0;
double Xx = 4.0*pow(x,3) - 6.0*pow(x,2) + 2.0*x;
double Yy = 4.0*pow(y,3) - 6.0*pow(y,2) + 2.0*y;
double Zz = 4.0*pow(z,3) - 6.0*pow(z,2) + 2.0*z;
double Xxx = 12.0*pow(x,2) - 12.0*x + 2.0;
double Yyy = 12.0*pow(y,2) - 12.0*y + 2.0;
double Zzz = 12.0*pow(z,2) - 12.0*z + 2.0;
double u = X*Y*Z;
double ux = Xx*Y*Z;
double uy = X*Yy*Z;
double uz = X*Y*Zz;
double uxx = Xxx*Y*Z;
double uyy = X*Yyy*Z;
double uzz = X*Y*Zzz;
#else
double u = sin(NPi*x)*sin(NPi*y)*sin(NPi*z);
double ux = NPi*cos(NPi*x)*sin(NPi*y)*sin(NPi*z);
double uy = NPi*sin(NPi*x)*cos(NPi*y)*sin(NPi*z);
double uz = NPi*sin(NPi*x)*sin(NPi*y)*cos(NPi*z);
double uxx = -NPi*NPi*sin(NPi*x)*sin(NPi*y)*sin(NPi*z);
double uyy = -NPi*NPi*sin(NPi*x)*sin(NPi*y)*sin(NPi*z);
double uzz = -NPi*NPi*sin(NPi*x)*sin(NPi*y)*sin(NPi*z);
#endif
#endif
//- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
double f = a*A*u - b*( (Bx*ux + By*uy + Bz*uz) + B*(uxx + uyy + uzz) );
//- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
domain->subdomains[box].levels[level].grids[__alpha ][ijk] = A;
domain->subdomains[box].levels[level].grids[__beta ][ijk] = B;
domain->subdomains[box].levels[level].grids[__u_exact][ijk] = u;
domain->subdomains[box].levels[level].grids[__f ][ijk] = f;
}}}
}
double average_value_of_f = mean(domain,level,__f);
if(domain->rank==0){printf("\n average value of f = %20.12e\n",average_value_of_f);fflush(stdout);}
if(a!=0){
shift_grid(domain,level,__f ,__f ,-average_value_of_f);
shift_grid(domain,level,__u_exact,__u_exact,-average_value_of_f/a);
}
// what if a==0 and average_value_of_f != 0 ???
}
//------------------------------------------------------------------------------------------------------------------------------
|
smul_glv4.c | #ifdef DEBUG_MODE
#include <stdio.h>
#include "kernel.h"
#else
#include "_core.h"
#endif
#include "multiprecision.h"
#include "multiprecision_stack.h"
#include "finite128.h"
static inline void phi(DIV_hec_fp_2e128mc D3, const DIV_hec_fp_2e128mc D1, const CNS_hec_fp_2e128mc_glv4 cn){
fp_mul_2e128mc_x8664(D3->P, cn->prm, NULL, D1->P, cn->u1z);
fp_mul_2e128mc_x8664(D3->R, cn->prm, NULL, D1->R, cn->u0z);
fp_mul_2e128mc_x8664(D3->S, cn->prm, NULL, D1->S, cn->v1z);
fp_mul_2e128mc_x8664(D3->T, cn->prm, NULL, D1->T, cn->v0z);
fp_cpy_2e128mc_x8664(D3->Z, D1->Z);
}
void hec_fp_smul_2e128mc_fkt_glv4(DIV_hec_fp_2e128mc D1, const uni kn, DIV_hec_fp_2e128mc D2, CNS_hec_fp_2e128mc_glv4 cn)
{
uni_t y1s[4*FP_LEN], y2s[4*FP_LEN], y3s[4*FP_LEN], y4s[4*FP_LEN], rs[4*FP_LEN], ts[4*FP_LEN], k0s[4*FP_LEN], k1s[4*FP_LEN], k2s[4*FP_LEN], k3s[4*FP_LEN], Ts[4*FP_LEN];
MI_t y1, y2, y3, y4, ah1, ah2, ah3, ah4, N, r, k, t, A1, A2, A3, A4, k0, k1, k2, k3, T, Nt;
DIV_hec_fp_2e128mc_t d0, d1, d2, d3, tbl[16];
int j, ei, b, bt;
/*uni_t i; fp_cnt_bits(&i, kn, FP_LEN*2); printf("%d ", i);*/
ah1->v->n = (uni)cn->ah1; ah1->v->l = 3*FP_LEN/2; ah1->s = NEGATIVE;
ah2->v->n = (uni)cn->ah2; ah2->v->l = 3*FP_LEN/2; ah2->s = NEGATIVE;
ah3->v->n = (uni)cn->ah3; ah3->v->l = 3*FP_LEN/2; ah3->s = POSITIVE;
ah4->v->n = (uni)cn->ah4; ah4->v->l = 3*FP_LEN/2; ah4->s = NEGATIVE;
A1->v->n = (uni)cn->A1; A1->v->l = FP_LEN/2; A1->s = POSITIVE;
A2->v->n = (uni)cn->A2; A2->v->l = FP_LEN/2; A2->s = POSITIVE;
A3->v->n = (uni)cn->A3; A3->v->l = FP_LEN/2; A3->s = POSITIVE;
A4->v->n = (uni)cn->A4; A4->v->l = FP_LEN/2; A4->s = POSITIVE;
N->v->n = (uni)cn->N; N->v->l = 2*FP_LEN; N->s = POSITIVE;
Nt->v->n = (uni)cn->Nhalf; Nt->v->l = 2*FP_LEN; Nt->s = POSITIVE;
k->v->n = kn; k->v->l = 2*FP_LEN; k->s = POSITIVE;
y1->v->n = y1s; y2->v->n = y2s; y3->v->n = y3s; y4->v->n = y4s;
k0->v->n = k0s; k1->v->n = k1s; k2->v->n = k2s; k3->v->n = k3s;
t->v->n = ts; r->v->n = rs; T->v->n = Ts;
mi_mul_stack(t, ah1, k); mi_div_q_r_stack(y1, r, t, N);
if(mi_compare_abs_stack(r, Nt) == GREATER){
if(t->s == POSITIVE){
mi_add_1_stack(y1, y1, 1);
}
else{
mi_sub_1_stack(y1, y1, 1);
}
}
mi_mul_stack(t, ah2, k); mi_div_q_r_stack(y2, r, t, N);
if(mi_compare_abs_stack(r, Nt) == GREATER){
if(t->s == POSITIVE){
mi_add_1_stack(y2, y2, 1);
}
else{
mi_sub_1_stack(y2, y2, 1);
}
}
mi_mul_stack(t, ah3, k); mi_div_q_r_stack(y3, r, t, N);
if(mi_compare_abs_stack(r, Nt) == GREATER){
if(t->s == POSITIVE){
mi_add_1_stack(y3, y3, 1);
}
else{
mi_sub_1_stack(y3, y3, 1);
}
}
mi_mul_stack(t, ah4, k); mi_div_q_r_stack(y4, r, t, N);
if(mi_compare_abs_stack(r, Nt) == GREATER){
if(t->s == POSITIVE){
mi_add_1_stack(y4, y4, 1);
}
else{
mi_sub_1_stack(y4, y4, 1);
}
}
mi_mul_stack(k0, A1, y1);
mi_mul_stack(T, A2, y4);
mi_sub_stack(k0, k0, T);
mi_mul_stack(T, A3, y3);
mi_sub_stack(k0, k0, T);
mi_mul_stack(T, A4, y2);
mi_sub_stack(k0, k0, T);
mi_sub_stack(k0, k, k0);
mi_mul_stack(k1, A1, y2);
mi_mul_stack(T, A2, y1);
mi_add_stack(k1, k1, T);
mi_mul_stack(T, A3, y4);
mi_sub_stack(k1, k1, T);
mi_mul_stack(T, A4, y3);
mi_sub_stack(k1, T, k1);
mi_mul_stack(k2, A1, y3);
mi_mul_stack(T, A2, y2);
mi_add_stack(k2, k2, T);
mi_mul_stack(T, A3, y1);
mi_add_stack(k2, k2, T);
mi_mul_stack(T, A4, y4);
mi_sub_stack(k2, T, k2);
mi_mul_stack(k3, A1, y4);
mi_mul_stack(T, A2, y3);
mi_add_stack(k3, k3, T);
mi_mul_stack(T, A3, y2);
mi_add_stack(k3, k3, T);
mi_mul_stack(T, A4, y1);
mi_add_stack(k3, k3, T);
hec_fp_cpy_2e128mc_g2i(d0, D2);
phi(d1, d0, cn);
phi(d2, d1, cn);
phi(d3, d2, cn);
if(k0->s == NEGATIVE){
hec_fp_neg_2e128mc_g2i(d0, d0, (CNS_hec_fp_2e128mc)cn);
}
if(k1->s == NEGATIVE){
hec_fp_neg_2e128mc_g2i(d1, d1, (CNS_hec_fp_2e128mc)cn);
}
if(k2->s == NEGATIVE){
hec_fp_neg_2e128mc_g2i(d2, d2, (CNS_hec_fp_2e128mc)cn);
}
if(k3->s == POSITIVE){ /* Actually if negative. */
hec_fp_neg_2e128mc_g2i(d3, d3, (CNS_hec_fp_2e128mc)cn);
}
fp_set_1_2e128mc_x8664(tbl[0]->Z, 0); //Marker for the point at infinity.
hec_fp_cpy_2e128mc_g2i(tbl[1], d0);
hec_fp_cpy_2e128mc_g2i(tbl[2], d1);
hec_fp_aadd_2e128mc_g2i(tbl[3], tbl[2], tbl[1], (CNS_hec_fp_2e128mc)cn);
hec_fp_cpy_2e128mc_g2i(tbl[4], d2);
hec_fp_aadd_2e128mc_g2i(tbl[5], tbl[4], tbl[1], (CNS_hec_fp_2e128mc)cn);
hec_fp_aadd_2e128mc_g2i(tbl[6], tbl[4], tbl[2], (CNS_hec_fp_2e128mc)cn);
hec_fp_add_2e128mc_g2i(tbl[7], tbl[4], tbl[3], (CNS_hec_fp_2e128mc)cn);
hec_fp_cpy_2e128mc_g2i(tbl[8], d3);
hec_fp_aadd_2e128mc_g2i(tbl[9], tbl[8], tbl[1], (CNS_hec_fp_2e128mc)cn);
hec_fp_aadd_2e128mc_g2i(tbl[10], tbl[8], tbl[2], (CNS_hec_fp_2e128mc)cn);
hec_fp_add_2e128mc_g2i(tbl[11], tbl[8], tbl[3], (CNS_hec_fp_2e128mc)cn);
hec_fp_aadd_2e128mc_g2i(tbl[12], tbl[8], tbl[4], (CNS_hec_fp_2e128mc)cn);
hec_fp_add_2e128mc_g2i(tbl[13], tbl[8], tbl[5], (CNS_hec_fp_2e128mc)cn);
hec_fp_add_2e128mc_g2i(tbl[14], tbl[8], tbl[6], (CNS_hec_fp_2e128mc)cn);
hec_fp_add_2e128mc_g2i(tbl[15], tbl[8], tbl[7], (CNS_hec_fp_2e128mc)cn);
fp_set_1_2e128mc_x8664(D1->Z, 0); //Marker for the point at infinity.
/*TODO: The following lines solves a minor problem caused by 1-bit-longer-than-expected mini-scalars at low-level. This can be made prettier. */
fp_cnt_bits(&bt, k0->v->n, k0->v->l);
fp_cnt_bits(&b, k1->v->n, k1->v->l);
if(b > bt){ bt = b; }
fp_cnt_bits(&b, k2->v->n, k2->v->l);
if(b > bt){ bt = b; }
fp_cnt_bits(&b, k3->v->n, k3->v->l);
if(b > bt){ bt = b; }
k0->v->n[k0->v->l] = 0;
k1->v->n[k1->v->l] = 0;
k2->v->n[k2->v->l] = 0;
k3->v->n[k3->v->l] = 0;
j = bt;
mam_ith_bit(ei, k3->v->n, j); b = ei;
mam_ith_bit(ei, k2->v->n, j); b = (b << 1) + ei;
mam_ith_bit(ei, k1->v->n, j); b = (b << 1) + ei;
mam_ith_bit(ei, k0->v->n, j); b = (b << 1) + ei;
hec_fp_cpy_2e128mc_g2i(D1, tbl[b]);
//#pragma omp parallel for num_threads(2)
for(j = bt-1; j > 0; j--){
hec_fp_dbl_2e128mc_g2i_a2is0_a3is0(D1, D1, (CNS_hec_fp_2e128mc)cn);
mam_ith_bit(ei, k3->v->n, j); b = ei;
mam_ith_bit(ei, k2->v->n, j); b = (b << 1) + ei;
mam_ith_bit(ei, k1->v->n, j); b = (b << 1) + ei;
mam_ith_bit(ei, k0->v->n, j); b = (b << 1) + ei;
hec_fp_add_2e128mc_g2i(D1, D1, tbl[b], (CNS_hec_fp_2e128mc)cn);
}
}
|
GB_unaryop__minv_bool_int8.c | //------------------------------------------------------------------------------
// GB_unaryop: hard-coded functions for each built-in unary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2019, All Rights Reserved.
// http://suitesparse.com See GraphBLAS/Doc/License.txt for license.
//------------------------------------------------------------------------------
// If this file is in the Generated/ folder, do not edit it (auto-generated).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_control.h"
#include "GB_iterator.h"
#include "GB_unaryop__include.h"
// C=unop(A) is defined by the following types and operators:
// op(A) function: GB_unop__minv_bool_int8
// op(A') function: GB_tran__minv_bool_int8
// C type: bool
// A type: int8_t
// cast: ;
// unaryop: cij = true
#define GB_ATYPE \
int8_t
#define GB_CTYPE \
bool
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
;
#define GB_CX(p) Cx [p]
// unary operator
#define GB_OP(z, x) \
z = true ;
// casting
#define GB_CASTING(z, x) \
; ;
// cij = op (cast (aij))
#define GB_CAST_OP(pC,pA) \
{ \
/* aij = Ax [pA] */ \
GB_GETA (aij, Ax, pA) ; \
/* Cx [pC] = op (cast (aij)) */ \
GB_CASTING (x, aij) ; \
GB_OP (GB_CX (pC), x) ; \
}
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_MINV || GxB_NO_BOOL || GxB_NO_INT8)
//------------------------------------------------------------------------------
// Cx = op (cast (Ax)): apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_unop__minv_bool_int8
(
bool *restrict Cx,
const int8_t *restrict Ax,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (int64_t p = 0 ; p < anz ; p++)
{
GB_CAST_OP (p, p) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (cast (A')): transpose, typecast, and apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_tran__minv_bool_int8
(
GrB_Matrix C,
const GrB_Matrix A,
int64_t **Rowcounts,
GBI_single_iterator Iter,
const int64_t *restrict A_slice,
int naslice
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#define GB_PHASE_2_OF_2
#include "GB_unaryop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
omp_barrier.c | <ompts:test>
<ompts:testdescription>Test which checks the omp barrier directive. The test creates several threads and sends one of them sleeping before setting a flag. After the barrier the other ones do some littel work depending on the flag.</ompts:testdescription>
<ompts:ompversion>2.0</ompts:ompversion>
<ompts:directive>omp barrier</ompts:directive>
<ompts:testcode>
#include <stdio.h>
#include <unistd.h>
#include "omp_testsuite.h"
#include "omp_my_sleep.h"
int <ompts:testcode:functionname>omp_barrier</ompts:testcode:functionname> (FILE * logFile)
{
<ompts:orphan:vars>
int result1;
int result2;
</ompts:orphan:vars>
result1 = 0;
result2 = 0;
#pragma omp parallel
{
<ompts:orphan>
int rank;
rank = omp_get_thread_num ();
if (rank ==1) {
my_sleep(SLEEPTIME_LONG);
result2 = 3;
}
<ompts:check>#pragma omp barrier</ompts:check>
if (rank == 2) {
result1 = result2;
}
</ompts:orphan>
}
printf("result1=%d\n",result1);
return (result1 == 3);
}
</ompts:testcode>
</ompts:test>
|
thread_pool.h | /**
* Non-metric Space Library
*
* Main developers: Bilegsaikhan Naidan, Leonid Boytsov, Yury Malkov, Ben Frederickson, David Novak
*
* For the complete list of contributors and further details see:
* https://github.com/searchivarius/NonMetricSpaceLib
*
* Copyright (c) 2013-2018
*
* This code is released under the
* Apache License Version 2.0 http://www.apache.org/licenses/.
*
*/
#include <atomic>
#include <thread>
#include <queue>
#include <mutex>
namespace similarity {
// See sample usage below
template <class T>
bool GetNextQueueObj(std::mutex &mtx, std::queue<T>& queue, T& obj) {
std::unique_lock<std::mutex> lock(mtx);
if (queue.empty()) {
return false;
}
obj = queue.front();
queue.pop();
return true;
}
/*
Sample usage of helper function GetNextQueueObj:
queue<MSWNode*> toPatchQueue; // the job queue
for (MSWNode* node : toPatchNodes) toPatchQueue.push(node);
mutex mtx;
vector<thread> threads;
for (int i = 0; i < indexThreadQty_; ++i) {
threads.push_back(thread(
[&]() {
MSWNode* node = nullptr;
// get the next job from the queue
while(GetNextQueueObj(mtx, toPatchQueue, node)) {
node->removeGivenFriends(delNodesBitset);
}
}
));
}
// Don't forget to join!
for (auto& thread : threads) thread.join();
*/
/*
* replacement for the openmp '#pragma omp parallel for' directive
* only handles a subset of functionality (no reductions etc)
* Process ids from start (inclusive) to end (EXCLUSIVE)
*/
template <class Function>
inline void ParallelFor(size_t start, size_t end, size_t numThreads, Function fn) {
if (numThreads <= 0) {
numThreads = std::thread::hardware_concurrency();
}
if (numThreads == 1) {
for (size_t id = start; id < end; id++) {
fn(id, 0);
}
} else {
std::vector<std::thread> threads;
std::atomic<size_t> current(start);
// keep track of exceptions in threads
// https://stackoverflow.com/a/32428427/1713196
std::exception_ptr lastException = nullptr;
std::mutex lastExceptMutex;
for (size_t threadId = 0; threadId < numThreads; ++threadId) {
threads.push_back(std::thread([&, threadId] {
while (true) {
size_t id = current.fetch_add(1);
if ((id >= end)) {
break;
}
try {
fn(id, threadId);
} catch (...) {
std::unique_lock<std::mutex> lastExcepLock(lastExceptMutex);
lastException = std::current_exception();
/*
* This will work even when current is the largest value that
* size_t can fit, because fetch_add returns the previous value
* before the increment (what will result in overflow
* and produce 0 instead of current + 1).
*/
current = end;
break;
}
}
}));
}
for (auto & thread : threads) {
thread.join();
}
if (lastException) {
std::rethrow_exception(lastException);
}
}
}
};
|
GB_unaryop__lnot_bool_uint8.c | //------------------------------------------------------------------------------
// GB_unaryop: hard-coded functions for each built-in unary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2020, All Rights Reserved.
// http://suitesparse.com See GraphBLAS/Doc/License.txt for license.
//------------------------------------------------------------------------------
// If this file is in the Generated/ folder, do not edit it (auto-generated).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_control.h"
#include "GB_iterator.h"
#include "GB_unaryop__include.h"
// C=unop(A) is defined by the following types and operators:
// op(A) function: GB_unop__lnot_bool_uint8
// op(A') function: GB_tran__lnot_bool_uint8
// C type: bool
// A type: uint8_t
// cast: bool cij = (bool) aij
// unaryop: cij = !aij
#define GB_ATYPE \
uint8_t
#define GB_CTYPE \
bool
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
uint8_t aij = Ax [pA]
#define GB_CX(p) Cx [p]
// unary operator
#define GB_OP(z, x) \
z = !x ;
// casting
#define GB_CASTING(z, aij) \
bool z = (bool) aij ;
// cij = op (cast (aij))
#define GB_CAST_OP(pC,pA) \
{ \
/* aij = Ax [pA] */ \
GB_GETA (aij, Ax, pA) ; \
/* Cx [pC] = op (cast (aij)) */ \
GB_CASTING (z, aij) ; \
GB_OP (GB_CX (pC), z) ; \
}
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_LNOT || GxB_NO_BOOL || GxB_NO_UINT8)
//------------------------------------------------------------------------------
// Cx = op (cast (Ax)): apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_unop__lnot_bool_uint8
(
bool *Cx, // Cx and Ax may be aliased
uint8_t *Ax,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
GB_CAST_OP (p, p) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (cast (A')): transpose, typecast, and apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_tran__lnot_bool_uint8
(
GrB_Matrix C,
const GrB_Matrix A,
int64_t *GB_RESTRICT *Rowcounts,
GBI_single_iterator Iter,
const int64_t *GB_RESTRICT A_slice,
int naslice
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#define GB_PHASE_2_OF_2
#include "GB_unaryop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
hybrid_report_mask.c | /* hybid,id report_mask collects and prints the masks from each rank.
1.) Get mask for each rank (and integer array -- no hex).
Root gathers masks and prints them
2.) Get node name from each rank.
Determine and use maximum length of node name
3.) Call print_mask.
*/
#include <stdio.h>
#include <mpi.h>
#include <omp.h>
#include <sched.h>
#include <unistd.h>
#include <stdlib.h>
#include <ctype.h>
// basic routes
void print_mask(int hd_prnt, char* name, int multi_node, int rank, int thrd, int ncpus, int nranks, int nthrds, int *proc_mask);
int hybrid_report_mask(void){
int thrd, nthrds;
int rank, nranks;
static int multi_node = 0;
int ncpus, nel_set;
static int ** omp_proc_mask;
static int * omp_mask_pac;
char *dummy;
char proc_name[MPI_MAX_PROCESSOR_NAME];
static char * all_names;
int name_len;
static int max_name_len;
// General
int i,j,ierr;
int id, rid,tid;
int in_mpi, in_omp;
// Mask storage
int ** proc_mask;
static int * all_masks=0;
MPI_Initialized(&in_mpi);
in_omp = omp_in_parallel();
if(in_mpi != 0 && in_omp == 0){
// Get number of cpus (this gives no. of cpu_ids in /proc/cpuinfo)
// Get rank number & no of ranks via MPI
ncpus = (int) sysconf(_SC_NPROCESSORS_ONLN);
MPI_Comm_rank(MPI_COMM_WORLD, &rank);
MPI_Comm_size(MPI_COMM_WORLD, &nranks);
// Create a 2-D array for mask
// proc_mask[rank][ncpus] -- for simplicity, size is [ncpus][ncpus]
// Thinking ahead for hybrid code.
// zero out proc_mask[ncpus][ncpus]
// I could have made proc_mask a single array (proc_mask[ncpus]); but didn't
// This is a hold-over from the openmp version that holds everything for all threads.
// For MPI I made a continguous collection array (all_masks).
proc_mask = malloc(sizeof(int*)*ncpus);
for(i=0;i<ncpus;i++) proc_mask[i] = malloc(sizeof(int)*ncpus);
for(i=0;i<ncpus;i++) for(j=0;j<ncpus;j++) proc_mask[i][j] =0;
all_masks = (int *) malloc(sizeof(int)*ncpus*ncpus);
// get map for this processor
ierr=boundto(&nel_set,proc_mask[rank]);
// Gather information to rank 0
MPI_Gather( proc_mask[rank], ncpus, MPI_INT,
all_masks, ncpus, MPI_INT,
0, MPI_COMM_WORLD);
// Get a list of nodes from all ranks.
MPI_Get_processor_name(proc_name,&name_len);
MPI_Allreduce(&name_len, &max_name_len, 1,MPI_INT, MPI_MAX, MPI_COMM_WORLD);
all_names = malloc(sizeof(int*)*nranks*(max_name_len+1));
MPI_Gather( proc_name, max_name_len+1 , MPI_CHAR,
all_names, max_name_len+1, MPI_CHAR,
0, MPI_COMM_WORLD);
// If multiple nodes, make muti_node not equal to 0.
if(rank == 0)
for(id=0;id<nranks;id++){
if( strcmp(&all_names[id*(max_name_len+1)],&all_names[0]) ) multi_node++;
}
} // End of Pure MPI part
if(in_mpi != 0 && in_omp != 0){
if(all_masks == 0) {
printf("ERROR: ***** You must call hybrid_report_mask() in a Pure MPI region first. ***** \n");
exit(1);
}
thrd = omp_get_thread_num();
nthrds = omp_get_num_threads();
ncpus = (int) sysconf(_SC_NPROCESSORS_ONLN);
#pragma omp single
{
omp_proc_mask = malloc(sizeof(int*)*nthrds);
for(i=0;i<nthrds;i++) omp_proc_mask[i] = malloc(sizeof(int)*ncpus );
for(i=0;i<nthrds;i++) for(j=0;j<ncpus;j++) omp_proc_mask[i][j] =0;
}
#pragma omp critical
ierr = boundto(&nel_set,omp_proc_mask[thrd]);
#pragma omp barrier
MPI_Comm_size(MPI_COMM_WORLD, &nranks);
MPI_Comm_rank(MPI_COMM_WORLD, &rank);
#pragma omp master
{
omp_mask_pac = (int *) malloc(sizeof(int)*nranks*ncpus); // need packing space for mpi send/recv
if(rank == 0){
print_mask(1, dummy, multi_node, 0, 0, ncpus, nthrds,nranks, omp_proc_mask[0]); //print header
fflush(stdout);
for(tid=0;tid<nthrds;tid++){
print_mask(0, &all_names[tid*(max_name_len+1)], multi_node, 0,tid, ncpus, nthrds,nranks, omp_proc_mask[tid]);
}
fflush(stdout);
for(rid=1;rid<nranks;rid++){
// Receive other rank's packed mask arrays
MPI_Recv(omp_mask_pac, nthrds*ncpus, MPI_INT, rid, 99, MPI_COMM_WORLD, MPI_STATUS_IGNORE);
for(tid=0;tid<nthrds;tid++){
print_mask(0, &all_names[rid*(max_name_len+1)], multi_node, rid,tid, ncpus, nthrds,nranks, &omp_mask_pac[tid*ncpus]);
}
fflush(stdout);
} // rank loop
} // end root printing
else{ //all other ranks
// All non-root ranks send to root.
for(rid=1;rid<nranks;rid++){
// Pack up the ranks' mask arrays (Uh, should have made one array from beginning!)
for( tid=0;tid<nthrds;tid++){
for( id=0; id<ncpus; id++) omp_mask_pac[(tid*ncpus)+id] = omp_proc_mask[tid][id];
}
// Send to root
MPI_Send(omp_mask_pac, nthrds*ncpus, MPI_INT, 0, 99, MPI_COMM_WORLD);
} //all other ranks
} // end non-root printing
MPI_Barrier(MPI_COMM_WORLD);
} // end of Master
#pragma omp barrier
} // end of OpenMP part
}
|
gemm.c | #include <stdio.h>
#include <stdlib.h>
#include <unistd.h>
#include <string.h>
#include <math.h>
#include "gemm.h"
#include <time.h>
#include <windows.h>
//#include <polybench.h>
/**
* This version is stamped on May 10, 2016
*
* Contact:
* Louis-Noel Pouchet <pouchet.ohio-state.edu>
* Tomofumi Yuki <tomofumi.yuki.fr>
*
* Web address: http://polybench.sourceforge.net
*/
void * polybench_alloc_data(unsigned long long n, int elt_size) {
/// FIXME: detect overflow!
size_t val = n;
val *= elt_size;
void * ret = malloc(val);
return ret;
}
/*gemm.c: this file is part of PolyBench/C*/
/*Include polybench common header.*/
/*Include benchmark-specific header.*/
/*Array initialization.*/
static void init_array(int ni, int nj, int nk, double * alpha, double * beta, double C[1000][1100], double A[1000][1200], double B[1200][1100]) {
int i, j;
*alpha = 1.5;
*beta = 1.2;
//#pragma omp parallel for default(shared) private(i, j) firstprivate(ni, nj)
for(i = 0; i < ni; i++) {
//#pragma omp parallel for default(shared) private(j) firstprivate(nj, i, ni)
for(j = 0; j < nj; j++) C[i][j] = (double) ((i * j + 1) % ni) / ni;
}
//#pragma omp parallel for default(shared) private(i, j) firstprivate(ni, nk)
for(i = 0; i < ni; i++) {
//#pragma omp parallel for default(shared) private(j) firstprivate(nk, i)
for(j = 0; j < nk; j++) A[i][j] = (double) (i * (j + 1) % nk) / nk;
}
//#pragma omp parallel for default(shared) private(i, j) firstprivate(nk, nj)
for(i = 0; i < nk; i++) {
//#pragma omp parallel for default(shared) private(j) firstprivate(nj, i)
for(j = 0; j < nj; j++) B[i][j] = (double) (i * (j + 2) % nj) / nj;
}
}
/*DCE code. Must scan the entire live-out data.
Can be used also to check the correctness of the output.*/
static void print_array(int ni, int nj, double C[1000][1100]) {
int i, j;
fprintf((__acrt_iob_func(2)), "==BEGIN DUMP_ARRAYS==\n");
fprintf((__acrt_iob_func(2)), "begin dump: %s", "C");
/*************** Clava msgError **************
Variables Access as passed arguments Can not be traced inside of function calls :
fprintf#67{fprintf(stderr, "\n")}
fprintf#69{fprintf(stderr, "%0.2lf ", C[i][j])}
****************************************/
for(i = 0; i < ni; i++) {
/*************** Clava msgError **************
Variables Access as passed arguments Can not be traced inside of function calls :
fprintf#67{fprintf(stderr, "\n")}
fprintf#69{fprintf(stderr, "%0.2lf ", C[i][j])}
****************************************/
for(j = 0; j < nj; j++) {
if((i * ni + j) % 20 == 0) fprintf((__acrt_iob_func(2)), "\n");
fprintf((__acrt_iob_func(2)), "%0.2lf ", C[i][j]);
}
}
fprintf((__acrt_iob_func(2)), "\nend dump: %s\n", "C");
fprintf((__acrt_iob_func(2)), "==END DUMP_ARRAYS==\n");
}
/*Main computational kernel. The whole function will be timed,
including the call and return.*/
static void kernel_gemm(int ni, int nj, int nk, double alpha, double beta, double C[1000][1100], double A[1000][1200], double B[1200][1100]) {
int i, j, k;
#pragma omp parallel for default(shared) private(i, j, k) firstprivate(ni, nj, beta, nk, alpha)
for(i = 0; i < ni; i++) {
#pragma omp parallel for default(shared) private(j) firstprivate(nj, i, beta)
for(j = 0; j < nj; j++) C[i][j] *= beta;
#pragma omp parallel for default(shared) private(k, j) firstprivate(nk, nj, alpha, i)
for(k = 0; k < nk; k++) {
#pragma omp parallel for default(shared) private(j) firstprivate(nj, alpha, i, k)
for(j = 0; j < nj; j++) C[i][j] += alpha * A[i][k] * B[k][j];
}
}
}
int main(int argc, char ** argv) {
/*Retrieve problem size.*/
int ni = 1000;
int nj = 1100;
int nk = 1200;
/*Variable declaration/allocation.*/
double alpha;
double beta;
double (*C)[1000][1100];
C = (double (*)[1000][1100]) polybench_alloc_data((1000 + 0) * (1100 + 0), sizeof(double));
;
double (*A)[1000][1200];
A = (double (*)[1000][1200]) polybench_alloc_data((1000 + 0) * (1200 + 0), sizeof(double));
;
double (*B)[1200][1100];
B = (double (*)[1200][1100]) polybench_alloc_data((1200 + 0) * (1100 + 0), sizeof(double));
;
/*Initialize array(s).*/
init_array(ni, nj, nk, &alpha, &beta, *C, *A, *B);
/*Start timer.*/
;
/*Run kernel.*/
printf("Started gemm\n");
LARGE_INTEGER clava_timing_start_0, clava_timing_end_0, clava_timing_frequency_0;
QueryPerformanceFrequency(&clava_timing_frequency_0);
QueryPerformanceCounter(&clava_timing_start_0);
kernel_gemm(ni, nj, nk, alpha, beta, *C, *A, *B);
QueryPerformanceCounter(&clava_timing_end_0);
double clava_timing_duration_0 = ((clava_timing_end_0.QuadPart-clava_timing_start_0.QuadPart) / (double)clava_timing_frequency_0.QuadPart) * (1000);
printf("Exec time (ms):%f\n", clava_timing_duration_0);
/*Stop and print timer.*/
;
;
/*Prevent dead-code elimination. All live-out data must be printed
by the function call in argument.*/
if(argc > 42 && !strcmp(argv[0], "")) print_array(ni, nj, *C);
free((void *) C);
/*Be clean.*/
;
free((void *) A);
;
free((void *) B);
;
return 0;
}
|
papiw_util.h | #ifndef PAPIWUTIL
#define PAPIWUTIL
#ifndef NOPAPIW
#include <stdlib.h>
#include <stdio.h>
#include <vector>
#include <iostream>
#include <algorithm>
#include <papi.h>
#include <omp.h>
#include <pthread.h>
/**
* PapiWrapper abstract class
*
* This interface defines the public interface and provides default functionality
* and further utility functions
*/
class PapiWrapper
{
public:
virtual ~PapiWrapper() {}
virtual void AddEvent(const int eventCode) = 0;
virtual void Start() = 0;
virtual void Stop() = 0;
virtual long long GetResult(const int eventCode) = 0;
virtual void Print() = 0;
virtual void Reset() = 0;
/**
* Default
*
* @tparam PapiCodes a variadic list of PAPI eventcodes
* @warning Exits with an error if called in a parallel region
*/
template <typename... PapiCodes>
void Init(PapiCodes const... eventcodes)
{
/* Initialize the PAPI library */
retval = PAPI_library_init(PAPI_VER_CURRENT);
if (retval != PAPI_VER_CURRENT)
handle_error("Init", "PAPI library init error!\n", retval);
/* Some more initialization inside the specialization classes*/
localInit();
/* Prepare Events */
static_assert(std::conjunction<std::is_integral<PapiCodes>...>(),
"All parameters to Init must be of integral type");
int args[]{eventcodes...}; //unpack
for (auto eventcode : args)
AddEvent(eventcode);
}
protected:
int retval;
virtual void localInit() {}
/* Exit with an error message */
void handle_error(const char *location, const char *msg, const int retval = PAPI_OK)
{
if (retval == PAPI_OK)
fprintf(stderr, "PAPI ERROR in %s: %s\n", location, msg);
else
fprintf(stderr, "PAPI ERROR (Code %d) in %s: %s\n", retval, location, msg);
exit(1);
}
/* Print a warning message */
void issue_waring(const char *location, const char *msg, const int retval = PAPI_OK)
{
if (retval == PAPI_OK)
fprintf(stderr, "PAPI WARNING in %s: %s\n", location, msg);
else
fprintf(stderr, "PAPI WARNING (Code %d) in %s: %s\n", retval, location, msg);
}
/* Print results */
void print(const std::vector<int> &events, const long long *values)
{
for (auto eventCode : events)
std::cout << getDescription(eventCode) << ": " << GetResult(eventCode) << std::endl;
/* Print Headers */
std::cout << "@%% ";
for (auto eventCode : events)
{
auto description = getDescription(eventCode);
for (int j = 0; description[j] != '\0' && description[j] != ' ' && j < 20; j++)
std::cout << description[j];
std::cout << " ";
}
std::cout << std::endl;
/* Print results */
int count = events.size();
std::cout << "@%@ ";
for (int i = 0; i < count; i++)
std::cout << values[i] << " ";
std::cout << std::endl;
}
/* Get Descriptiion Text of event */
const char *getDescription(const int eventCode)
{
switch (eventCode)
{
case PAPI_L1_DCM:
return "PAPI_L1_DCM (Level 1 data cache misses)";
case PAPI_L1_ICM:
return "PAPI_L1_ICM (Level 1 instruction cache misses)";
case PAPI_L2_DCM:
return "PAPI_L2_DCM (Level 2 data cache misses)";
case PAPI_L2_ICM:
return "PAPI_L2_ICM (Level 2 instruction cache misses)";
case PAPI_L3_DCM:
return "PAPI_L3_DCM (Level 3 data cache misses)";
case PAPI_L3_ICM:
return "PAPI_L3_ICM (Level 3 instruction cache misses)";
case PAPI_L1_TCM:
return "PAPI_L1_TCM (Level 1 total cache misses)";
case PAPI_L2_TCM:
return "PAPI_L2_TCM (Level 2 total cache misses)";
case PAPI_L3_TCM:
return "PAPI_L3_TCM (Level 3 total cache misses)";
case PAPI_CA_SNP:
return "PAPI_CA_SNP (Snoops)";
case PAPI_CA_SHR:
return "PAPI_CA_SHR (Request for shared cache line (SMP))";
case PAPI_CA_CLN:
return "PAPI_CA_CLN (Request for clean cache line (SMP))";
case PAPI_CA_INV:
return "PAPI_CA_INV (Request for cache line Invalidation (SMP))";
case PAPI_CA_ITV:
return "PAPI_CA_ITV (Request for cache line Intervention (SMP))";
case PAPI_L3_LDM:
return "PAPI_L3_LDM (Level 3 load misses)";
case PAPI_L3_STM:
return "PAPI_L3_STM (Level 3 store misses)";
case PAPI_BRU_IDL:
return "PAPI_BRU_IDL (Cycles branch units are idle)";
case PAPI_FXU_IDL:
return "PAPI_FXU_IDL (Cycles integer units are idle)";
case PAPI_FPU_IDL:
return "PAPI_FPU_IDL (Cycles floating point units are idle)";
case PAPI_LSU_IDL:
return "PAPI_LSU_IDL (Cycles load/store units are idle)";
case PAPI_TLB_DM:
return "PAPI_TLB_DM (Data translation lookaside buffer misses)";
case PAPI_TLB_IM:
return "PAPI_TLB_IM (Instr translation lookaside buffer misses)";
case PAPI_TLB_TL:
return "PAPI_TLB_TL (Total translation lookaside buffer misses)";
case PAPI_L1_LDM:
return "PAPI_L1_LDM (Level 1 load misses)";
case PAPI_L1_STM:
return "PAPI_L1_STM (Level 1 store misses)";
case PAPI_L2_LDM:
return "PAPI_L2_LDM (Level 2 load misses)";
case PAPI_L2_STM:
return "PAPI_L2_STM (Level 2 store misses)";
case PAPI_BTAC_M:
return "PAPI_BTAC_M (BTAC miss)";
case PAPI_PRF_DM:
return "PAPI_PRF_DM (Prefetch data instruction caused a miss)";
case PAPI_L3_DCH:
return "PAPI_L3_DCH (Level 3 Data Cache Hit)";
case PAPI_TLB_SD:
return "PAPI_TLB_SD (Xlation lookaside buffer shootdowns (SMP))";
case PAPI_CSR_FAL:
return "PAPI_CSR_FAL (Failed store conditional instructions)";
case PAPI_CSR_SUC:
return "PAPI_CSR_SUC (Successful store conditional instructions)";
case PAPI_CSR_TOT:
return "PAPI_CSR_TOT (Total store conditional instructions)";
case PAPI_MEM_SCY:
return "PAPI_MEM_SCY (Cycles Stalled Waiting for Memory Access)";
case PAPI_MEM_RCY:
return "PAPI_MEM_RCY (Cycles Stalled Waiting for Memory Read)";
case PAPI_MEM_WCY:
return "PAPI_MEM_WCY (Cycles Stalled Waiting for Memory Write)";
case PAPI_STL_ICY:
return "PAPI_STL_ICY (Cycles with No Instruction Issue)";
case PAPI_FUL_ICY:
return "PAPI_FUL_ICY (Cycles with Maximum Instruction Issue)";
case PAPI_STL_CCY:
return "PAPI_STL_CCY (Cycles with No Instruction Completion)";
case PAPI_FUL_CCY:
return "PAPI_FUL_CCY (Cycles with Maximum Instruction Completion)";
case PAPI_HW_INT:
return "PAPI_HW_INT (Hardware interrupts)";
case PAPI_BR_UCN:
return "PAPI_BR_UCN (Unconditional branch instructions executed)";
case PAPI_BR_CN:
return "PAPI_BR_CN (Conditional branch instructions executed)";
case PAPI_BR_TKN:
return "PAPI_BR_TKN (Conditional branch instructions taken)";
case PAPI_BR_NTK:
return "PAPI_BR_NTK (Conditional branch instructions not taken)";
case PAPI_BR_MSP:
return "PAPI_BR_MSP (Conditional branch instructions mispred)";
case PAPI_BR_PRC:
return "PAPI_BR_PRC (Conditional branch instructions corr. pred)";
case PAPI_FMA_INS:
return "PAPI_FMA_INS (FMA instructions completed)";
case PAPI_TOT_IIS:
return "PAPI_TOT_IIS (Total instructions issued)";
case PAPI_TOT_INS:
return "PAPI_TOT_INS (Total instructions executed)";
case PAPI_INT_INS:
return "PAPI_INT_INS (Integer instructions executed)";
case PAPI_FP_INS:
return "PAPI_FP_INS (Floating point instructions executed)";
case PAPI_LD_INS:
return "PAPI_LD_INS (Load instructions executed)";
case PAPI_SR_INS:
return "PAPI_SR_INS (Store instructions executed)";
case PAPI_BR_INS:
return "PAPI_BR_INS (Total branch instructions executed)";
case PAPI_VEC_INS:
return "PAPI_VEC_INS (Vector/SIMD instructions executed (could include integer))";
case PAPI_RES_STL:
return "PAPI_RES_STL (Cycles processor is stalled on resource)";
case PAPI_FP_STAL:
return "PAPI_FP_STAL (Cycles any FP units are stalled)";
case PAPI_TOT_CYC:
return "PAPI_TOT_CYC (Total cycles executed)";
case PAPI_LST_INS:
return "PAPI_LST_INS (Total load/store inst. executed)";
case PAPI_SYC_INS:
return "PAPI_SYC_INS (Sync. inst. executed)";
case PAPI_L1_DCH:
return "PAPI_L1_DCH (L1 D Cache Hit)";
case PAPI_L2_DCH:
return "PAPI_L2_DCH (L2 D Cache Hit)";
case PAPI_L1_DCA:
return "PAPI_L1_DCA (L1 D Cache Access)";
case PAPI_L2_DCA:
return "PAPI_L2_DCA (L2 D Cache Access)";
case PAPI_L3_DCA:
return "PAPI_L3_DCA (L3 D Cache Access)";
case PAPI_L1_DCR:
return "PAPI_L1_DCR (L1 D Cache Read)";
case PAPI_L2_DCR:
return "PAPI_L2_DCR (L2 D Cache Read)";
case PAPI_L3_DCR:
return "PAPI_L3_DCR (L3 D Cache Read)";
case PAPI_L1_DCW:
return "PAPI_L1_DCW (L1 D Cache Write)";
case PAPI_L2_DCW:
return "PAPI_L2_DCW (L2 D Cache Write)";
case PAPI_L3_DCW:
return "PAPI_L3_DCW (L3 D Cache Write)";
case PAPI_L1_ICH:
return "PAPI_L1_ICH (L1 instruction cache hits)";
case PAPI_L2_ICH:
return "PAPI_L2_ICH (L2 instruction cache hits)";
case PAPI_L3_ICH:
return "PAPI_L3_ICH (L3 instruction cache hits)";
case PAPI_L1_ICA:
return "PAPI_L1_ICA (L1 instruction cache accesses)";
case PAPI_L2_ICA:
return "PAPI_L2_ICA (L2 instruction cache accesses)";
case PAPI_L3_ICA:
return "PAPI_L3_ICA (L3 instruction cache accesses)";
case PAPI_L1_ICR:
return "PAPI_L1_ICR (L1 instruction cache reads)";
case PAPI_L2_ICR:
return "PAPI_L2_ICR (L2 instruction cache reads)";
case PAPI_L3_ICR:
return "PAPI_L3_ICR (L3 instruction cache reads)";
case PAPI_L1_ICW:
return "PAPI_L1_ICW (L1 instruction cache writes)";
case PAPI_L2_ICW:
return "PAPI_L2_ICW (L2 instruction cache writes)";
case PAPI_L3_ICW:
return "PAPI_L3_ICW (L3 instruction cache writes)";
case PAPI_L1_TCH:
return "PAPI_L1_TCH (L1 total cache hits)";
case PAPI_L2_TCH:
return "PAPI_L2_TCH (L2 total cache hits)";
case PAPI_L3_TCH:
return "PAPI_L3_TCH (L3 total cache hits)";
case PAPI_L1_TCA:
return "PAPI_L1_TCA (L1 total cache accesses)";
case PAPI_L2_TCA:
return "PAPI_L2_TCA (L2 total cache accesses)";
case PAPI_L3_TCA:
return "PAPI_L3_TCA (L3 total cache accesses)";
case PAPI_L1_TCR:
return "PAPI_L1_TCR (L1 total cache reads)";
case PAPI_L2_TCR:
return "PAPI_L2_TCR (L2 total cache reads)";
case PAPI_L3_TCR:
return "PAPI_L3_TCR (L3 total cache reads)";
case PAPI_L1_TCW:
return "PAPI_L1_TCW (L1 total cache writes)";
case PAPI_L2_TCW:
return "PAPI_L2_TCW (L2 total cache writes)";
case PAPI_L3_TCW:
return "PAPI_L3_TCW (L3 total cache writes)";
case PAPI_FML_INS:
return "PAPI_FML_INS (FM ins)";
case PAPI_FAD_INS:
return "PAPI_FAD_INS (FA ins)";
case PAPI_FDV_INS:
return "PAPI_FDV_INS (FD ins)";
case PAPI_FSQ_INS:
return "PAPI_FSQ_INS (FSq ins)";
case PAPI_FNV_INS:
return "PAPI_FNV_INS (Finv ins)";
case PAPI_FP_OPS:
return "PAPI_FP_OPS (Floating point operations executed)";
case PAPI_SP_OPS:
return "PAPI_SP_OPS (Floating point operations executed: optimized to count scaled single precision vector operations)";
case PAPI_DP_OPS:
return "PAPI_DP_OPS (Floating point operations executed: optimized to count scaled double precision vector operations)";
case PAPI_VEC_SP:
return "PAPI_VEC_SP (Single precision vector/SIMD instructions)";
case PAPI_VEC_DP:
return "PAPI_VEC_DP (Double precision vector/SIMD instructions)";
case PAPI_REF_CYC:
return "PAPI_REF_CYC (Reference clock cycles)";
default:
return "UNKNOWN CODE";
}
}
};
/**
* PapiWrapper class for Sequential use
*
* It is discoureaged to use this class directly but rather through the utility functions
* inside the PAPIW namespace.
*/
class PapiWrapperSingle : public PapiWrapper
{
private:
static int const papiMaxAllowedCounters = 20;
int eventSet = PAPI_NULL;
bool running = false;
long long buffer[papiMaxAllowedCounters];
long long values[papiMaxAllowedCounters];
std::vector<int> events;
public:
PapiWrapperSingle() : ThreadID(0) {}
PapiWrapperSingle(const unsigned long threadID) : ThreadID(threadID) {}
~PapiWrapperSingle() {}
const unsigned long ThreadID;
/* Add an event to be counted */
void AddEvent(const int eventCode) override
{
if (running)
handle_error("AddEvent", "You can't add events while Papi is running\n");
if (events.size() >= papiMaxAllowedCounters)
handle_error("AddEvent", "Event count limit exceeded. Check papiMaxAllowedCounters\n");
if (eventSet == PAPI_NULL)
{
retval = PAPI_create_eventset(&eventSet);
if (retval != PAPI_OK)
handle_error("AddEvent", "Could not create event set", retval);
}
retval = PAPI_add_event(eventSet, eventCode);
if (retval != PAPI_OK)
issue_waring("AddEvent. Could not add", getDescription(eventCode), retval);
else
events.push_back(eventCode);
}
/* Start the counter */
void Start() override
{
if (running)
handle_error("Start", "You can not start an already running PAPI instance");
retval = PAPI_start(eventSet);
if (retval != PAPI_OK)
handle_error("Start", "Could not start PAPI counters", retval);
running = true;
}
/* Stop the counter */
void Stop() override
{
if (!running)
handle_error("Stop", "You can not stop an already stopped Papi instance");
retval = PAPI_stop(eventSet, buffer);
if (retval != PAPI_OK)
handle_error("Stop", "Could not stop PAPI counters", retval);
int count = events.size();
for (int i = 0; i < count; i++)
values[i] += buffer[i];
running = false;
}
/* Get the result of a specific event */
long long GetResult(const int eventCode) override
{
if (running)
handle_error("GetResult", "You can't get results while Papi is running\n");
auto indexInResult = std::find(events.begin(), events.end(), eventCode);
if (indexInResult == events.end())
handle_error("GetResult", "The event is not supported or has not been added to the set");
return values[indexInResult - events.begin()];
}
/* Reset the intermediate counter values */
void Reset()
{
if (running)
handle_error("Reset", "You can't reset while Papi is running\n");
localInit();
}
/* Getter Method for running state */
bool IsRunning()
{
return running;
}
/* Print the results */
const long long *GetValues()
{
return values;
}
/* Print the results */
void Print() override
{
if (running)
handle_error("Print", "You can not print while Papi is running. Stop the counters first!");
std::cout << "PAPIW Single PapiWrapper instance report:" << std::endl;
print(events, values);
}
protected:
/* Initialize the values array */
void localInit() override
{
for (int i = 0; i < papiMaxAllowedCounters; i++)
values[i] = 0;
}
};
#ifdef _OPENMP
/**
* PapiWrapper class for Parallel use
*
* It is discoureaged to use this class directly but rather through the utility functions
* inside the PAPIW namespace.
*/
class PapiWrapperParallel : public PapiWrapper
{
private:
inline static PapiWrapperSingle *localPapi;
#pragma omp threadprivate(localPapi)
std::vector<int> events;
std::vector<long long> values;
int numRunningThreads = 0; //0 is none running
bool startedFromParallelRegion = false;
public:
PapiWrapperParallel() {}
~PapiWrapperParallel()
{
std::cout << "Destructing Local Papis" << std::endl;
checkNotInParallelRegion("DESTRUCTOR");
#pragma omp parallel
{
delete localPapi;
localPapi = nullptr;
}
}
/* Register events to be counted */
void AddEvent(const int eventCode) override
{
checkNotInParallelRegion("ADD_EVENT");
checkNoneRunning("ADD_EVENT");
events.push_back(eventCode);
values.push_back(0);
}
/* Start the counters */
void Start() override
{
if (isInParallelRegion())
{
#pragma omp single
checkNoneRunning("START");
startedFromParallelRegion = true;
start();
}
else
{
checkNoneRunning("START");
startedFromParallelRegion = false;
#pragma omp parallel
{
start();
}
}
}
/* Stop the counters */
void Stop() override
{
checkNumberOfThreads("STOP");
if (isInParallelRegion())
{
if (!startedFromParallelRegion)
issue_waring("Stop", "The Papi Counters have not been started in a parallel Region. You should not stop them in a parallel region, however, the results should be fine.");
stop();
#pragma omp barrier
numRunningThreads = 0;
}
else
{
if (startedFromParallelRegion)
issue_waring("Stop", "The Papi Counters have been started in a parallel Region. You should stop them in the same parallel region or move Start/Stop completely out of the parallel region.");
#pragma omp parallel
{
stop();
}
numRunningThreads = 0;
}
}
/* Get the result of a specific event */
long long GetResult(const int eventCode) override
{
checkNoneRunning("GET_RESULT");
auto indexInResult = std::find(events.begin(), events.end(), eventCode);
if (indexInResult == events.end())
handle_error("GetResult", "The event is not supported or has not been added to the set");
return values[indexInResult - events.begin()];
}
/* Print the values */
void Print() override
{
checkNoneRunning("PRINT");
#pragma omp single
{
std::cout << "PAPIW Parallel PapiWrapper instance report:" << std::endl;
print(events, values.data());
}
}
/* Reset the values */
void Reset()
{
checkNoneRunning("RESET");
#pragma omp single
std::fill(values.begin(), values.end(), 0);
}
protected:
/* Initialize the instance */
void localInit() override
{
checkNotInParallelRegion("INIT");
retval = PAPI_thread_init(pthread_self);
if (retval != PAPI_OK)
handle_error("localInit in PapiWrapperParallel", "Could not initialize OMP Support", retval);
else
std::cout << "Papi Parallel support enabled" << std::endl;
}
/* Helper function to start the counters */
void start()
{
#pragma omp single
numRunningThreads = omp_get_num_threads();
retval = PAPI_register_thread();
if (retval != PAPI_OK)
handle_error("Start", "Couldn't register thread", retval);
localPapi = new PapiWrapperSingle(pthread_self());
for (auto eventCode : events)
localPapi->AddEvent(eventCode);
localPapi->Start();
}
/* Helper function to stop the counters and accumulate the values to total */
void stop()
{
localPapi->Stop();
/*Check that same thread is used since starting the counters*/
if (PAPI_thread_id() != localPapi->ThreadID)
handle_error("Stop", "Invalid State: The Thread Ids differs from initialization!\nApparently, new threads were use without reassigning the Papi counters. Please Start and Stop more often to avoid this error.");
int eventCount = events.size();
for (int i = 0; i < eventCount; i++)
{
auto localVal = localPapi->GetResult(events[i]);
#pragma omp atomic
values[i] += localVal;
}
delete localPapi;
localPapi = nullptr;
retval = PAPI_unregister_thread();
if (retval != PAPI_OK)
handle_error("Stop", "Couldn't unregister thread", retval);
}
/* Returns the current OMP team size */
int GetNumThreads()
{
if (isInParallelRegion())
return omp_get_num_threads();
int count;
#pragma omp parallel
{
#pragma omp master
count = omp_get_num_threads();
}
return count;
}
/* Returns true if it is called in a parallel region or false otherwise */
bool isInParallelRegion()
{
return omp_get_level() != 0;
}
/* Check that the current thread team size is not larger then it was last registered or exit with an error otherwise */
void checkNumberOfThreads(const char *actionMsg)
{
/* State check */
if (GetNumThreads() != numRunningThreads)
handle_error(actionMsg, "The OMP teamsize is different than indicated in Start!");
}
/* Check that this method is not called from a parallel context or exit with an error otherwise */
void checkNotInParallelRegion(const char *actionMsg)
{
if (isInParallelRegion())
handle_error(actionMsg, "You may not perform this operation from a parallel region");
}
/* Check that no Papi Counter is running or exit with an error otherwise */
void checkNoneRunning(const char *actionMsg)
{
if (numRunningThreads)
handle_error(actionMsg, "You can not perform this action while Papi is running. Stop the counters first!");
}
};
#endif
#endif
#endif |
mssql12_fmt_plug.c | /* Modified in August, 2012 by Dhiru Kholia (dhiru at openwall.com) for MS SQL 2012
*
* This software is Copyright (c) 2010 bartavelle, <bartavelle at bandecon.com>,
* and it is hereby released to the general public under the following terms:
* Redistribution and use in source and binary forms, with or without modification, are permitted.
*
* Modified by Mathieu Perrin (mathieu at tpfh.org) 09/06
* Microsoft MS-SQL05 password cracker
*
* UTF-8 support by magnum 2011, same terms as above
*
* Creating MS SQL 2012 hashes:
*
* sqlcmd -L
* sqlcmd -S <server> -U sa -P <password>
* 1> select pwdencrypt("openwall")
* 2> go
*
* Dumping hashes from MS SQL server 2012:
*
* sqlcmd -S <server> -U sa -P <password>
* 1> select * from sys.sql_logins
* 2> go
*/
#if FMT_EXTERNS_H
extern struct fmt_main fmt_mssql12;
#elif FMT_REGISTERS_H
john_register_one(&fmt_mssql12);
#else
#include <string.h>
#include "arch.h"
//#undef _OPENMP
//#undef SIMD_COEF_32
//#undef SIMD_COEF_64
//#undef SIMD_PARA_SHA512
/*
* Only effective for SIMD.
* Undef to disable reversing steps for benchmarking.
*/
#define REVERSE_STEPS
#include "misc.h"
#include "params.h"
#include "common.h"
#include "formats.h"
#include "options.h"
#include "unicode.h"
#include "sha2.h"
#include "johnswap.h"
#include "simd-intrinsics.h"
#include "memdbg.h"
#ifdef _OPENMP
#include <omp.h>
#ifdef SIMD_COEF_64
#ifndef OMP_SCALE
#define OMP_SCALE 2048
#endif
#else
#ifndef OMP_SCALE
#define OMP_SCALE 1024 // tuned K8-dual HT
#endif
#endif
#endif
#define FORMAT_LABEL "mssql12"
#define FORMAT_NAME "MS SQL 2012/2014"
#define ALGORITHM_NAME "SHA512 " SHA512_ALGORITHM_NAME
#define BENCHMARK_COMMENT ""
#define BENCHMARK_LENGTH 0
#define PLAINTEXT_LENGTH ((111 - SALT_SIZE) / 2)
#define CIPHERTEXT_LENGTH 54 + 44 * 2
#define BINARY_SIZE 8
#define DIGEST_SIZE 64
#define BINARY_ALIGN 8
#define SALT_SIZE 4
#define SALT_ALIGN 4
#ifdef SIMD_COEF_64
#define MIN_KEYS_PER_CRYPT (SIMD_COEF_64*SIMD_PARA_SHA512)
#define MAX_KEYS_PER_CRYPT (SIMD_COEF_64*SIMD_PARA_SHA512)
#else
#define MIN_KEYS_PER_CRYPT 1
#define MAX_KEYS_PER_CRYPT 1
#endif
#ifndef SHA_BUF_SIZ
#define SHA_BUF_SIZ 16
#endif
static struct fmt_tests tests[] = {
{"0x0200F733058A07892C5CACE899768F89965F6BD1DED7955FE89E1C9A10E27849B0B213B5CE92CC9347ECCB34C3EFADAF2FD99BFFECD8D9150DD6AACB5D409A9D2652A4E0AF16", "Password1!"},
{"0x0200AB3E1F9028A739EEF62ABF672427276A32D5EDD349E638E7F2CD81DAA247CFE20EE4E3B0A30B2D0AE3C3FA010E61752F1BF45E045041F1B988C083C7F118527E3E5F0562", "openwall"},
/* hashes from https://hashcat.net/forum */
{"0x02006BF4AB05873FF0C8A4AFD1DC5912CBFDEF62E0520A3353B04E1184F05C873C9C76BBADDEAAC1E9948C7B6ABFFD62BFEFD7139F17F6AFE10BE0FEE7A178644623067C2423", "carlos"},
{"0x0200935819BA20F1C7289CFF2F8FF9F0E40DA5E6D04986F988CFE6603DA0D2BC0160776614763198967D603FBD8C103151A15E70D18E7B494C7F13F16804A7A4EB206084E632", "test"},
{"0x0200570AC969EF7C6CCB3312E8BEDE1D635EB852C06496957F0FA845B20FCD1C7C457474A5B948B68C47C2CB704D08978871F532C9EB11199BB5F56A06AC915C3799DB8A64C1", "test1"},
{"0x0200A56045DBCD848E297FA8D06E7579D62B7129928CA0BC5D232A7320972EF5A5455C01411B8D3A7FF3D18A55058A12FAEE5DA410AFE6CE61FF5C39E5FF57CD3EDD57DB1C3B", "test2"},
{"0x020059799F1B6D897BE2C5A76D3FFDC52B308190E82FA01F2FA51129B4863A7EE21B3FF6FE9F7850976045237805F338DD36DC9345B429F47A402614C6F2F2B02C56DF14C4F4", "Paul"},
{"0x0200881E2999DD8E3583695F405696257B99559953705A34D774C15AC1D42699BB77BC56DB5F657751335C1B350890E643790553B60329CAE7A2E7D3C04CF8856C4DB0058723", "DBAmaster"},
{"0x0200D648446E70180A6DFB6DF14DB38623EBFE490FE445751900FD5DC45A2B5D20D7AFFE8C6FFC2890BAE1AF34430A21F2F1E4DE50E25757FDB4789716D8D85C6985A00BC454", "database"},
{"0x02008AC3B9DC7B67EF9D3C1D25D8007A4B957D5BD61D71E5E9DA08D9F8F012EDDAD168E1CADD93D4627433FBFEE8BCF6CBB42D5B9A31886FC5FF7F970B164F4B5815E03D6DE7", "jhl9mqe5"},
{"0x020094C4D05A082DB1362B1A972C5D5F1C04C527090A7427E93C13AFEC705A011D8980E994FA647C7D44E25A427246218E25674571DB1710E49C713FB17129549C29E303086A", "coldfusion"},
{"0x0200B9BD5C85918D9BEE84417957618FBA1CB80B71E81550FAE09AD027B4089017CD6461D8EC9509873C2D5096CDBE8F16E4EFA9035C35F9F4917CE58DB99DC6836CEA7483A7", "sql2005"},
{NULL}
};
static unsigned char cursalt[SALT_SIZE];
#ifdef SIMD_COEF_64
static ARCH_WORD_64 (*saved_key)[SHA_BUF_SIZ];
static ARCH_WORD_64 (*crypt_out);
static int max_keys;
static int new_keys;
#else
static char (*saved_key)[(PLAINTEXT_LENGTH + 1) * 2 + SALT_SIZE];
static ARCH_WORD_64 (*crypt_out)[DIGEST_SIZE / 8];
static int *saved_len;
#endif
static int valid(char *ciphertext, struct fmt_main *self)
{
int i;
if (strlen(ciphertext) != CIPHERTEXT_LENGTH)
return 0;
if (strncmp(ciphertext, "0x0200", 6))
return 0;
for (i = 6; i < CIPHERTEXT_LENGTH; i++) {
if (!((('0' <= ciphertext[i])&&(ciphertext[i] <= '9')) ||
//(('a' <= ciphertext[i])&&(ciphertext[i] <= 'f')) ||
(('A' <= ciphertext[i])&&(ciphertext[i] <= 'F'))))
return 0;
}
return 1;
}
static void set_salt(void *salt)
{
memcpy(cursalt, salt, SALT_SIZE);
#ifdef SIMD_COEF_64
new_keys = 1;
#endif
}
static void *get_salt(char *ciphertext)
{
static unsigned char *out2;
int l;
if (!out2) out2 = mem_alloc_tiny(SALT_SIZE, MEM_ALIGN_WORD);
for (l = 0;l<SALT_SIZE;l++)
{
out2[l] = atoi16[ARCH_INDEX(ciphertext[l*2+6])]*16
+ atoi16[ARCH_INDEX(ciphertext[l*2+7])];
}
return out2;
}
static void set_key_enc(char *_key, int index);
static void init(struct fmt_main *self)
{
#if defined (_OPENMP)
int omp_t = omp_get_max_threads();
self->params.min_keys_per_crypt *= omp_t;
omp_t *= OMP_SCALE;
self->params.max_keys_per_crypt *= omp_t;
#endif
#ifdef SIMD_COEF_64
saved_key = mem_calloc_align(self->params.max_keys_per_crypt,
sizeof(*saved_key),
MEM_ALIGN_SIMD);
crypt_out = mem_calloc_align(self->params.max_keys_per_crypt,
8 * sizeof(ARCH_WORD_64),
MEM_ALIGN_SIMD);
max_keys = self->params.max_keys_per_crypt;
#else
saved_key = mem_calloc(self->params.max_keys_per_crypt,
sizeof(*saved_key));
crypt_out = mem_calloc(self->params.max_keys_per_crypt,
sizeof(*crypt_out));
saved_len = mem_calloc(self->params.max_keys_per_crypt,
sizeof(*saved_len));
#endif
if (options.target_enc == UTF_8)
self->params.plaintext_length = MIN(125, PLAINTEXT_LENGTH * 3);
if (options.target_enc != ISO_8859_1 &&
options.target_enc != ASCII)
self->methods.set_key = set_key_enc;
}
static void done(void)
{
#ifndef SIMD_COEF_64
MEM_FREE(saved_len);
#endif
MEM_FREE(crypt_out);
MEM_FREE(saved_key);
}
#ifdef SIMD_COEF_64
static void clear_keys(void)
{
memset(saved_key, 0, sizeof(*saved_key) * max_keys);
}
#endif
static void set_key(char *_key, int index)
{
#ifndef SIMD_COEF_64
/* ASCII or ISO-8859-1 to UCS-2 */
UTF8 *s = (UTF8*)_key;
UTF16 *d = (UTF16*)saved_key[index];
for (saved_len[index] = 0; s[saved_len[index]]; saved_len[index]++)
#if ARCH_LITTLE_ENDIAN
d[saved_len[index]] = s[saved_len[index]];
#else
d[saved_len[index]] = s[saved_len[index]] << 8;
#endif
d[saved_len[index]] = 0;
saved_len[index] <<= 1;
#else
ARCH_WORD_64 *keybuffer = saved_key[index];
unsigned short *w16 = (unsigned short*)keybuffer;
UTF8 *key = (UTF8*)_key;
int len = 0;
while ((*w16++ = *key++))
len++;
keybuffer[15] = ((len << 1) + SALT_SIZE) << 3;
new_keys = 1;
#endif
}
static void set_key_enc(char *_key, int index)
{
#ifndef SIMD_COEF_64
/* Any encoding -> UTF-16 */
saved_len[index] = enc_to_utf16((UTF16*)saved_key[index],
PLAINTEXT_LENGTH,
(unsigned char*)_key, strlen(_key));
if (saved_len[index] < 0)
saved_len[index] = strlen16((UTF16*)saved_key[index]);
saved_len[index] <<= 1;
#else
ARCH_WORD_64 *keybuffer = saved_key[index];
UTF16 *w16 = (UTF16*)keybuffer;
UTF8 *key = (UTF8*)_key;
int len;
len = enc_to_utf16(w16, PLAINTEXT_LENGTH, key, strlen(_key));
if (len < 0)
len = strlen16(w16);
keybuffer[15] = ((len << 1) + SALT_SIZE) << 3;
new_keys = 1;
#endif
}
static char *get_key(int index)
{
#ifndef SIMD_COEF_64
((UTF16*)saved_key[index])[saved_len[index]>>1] = 0;
return (char*)utf16_to_enc((UTF16*)saved_key[index]);
#else
ARCH_WORD_64 *keybuffer = saved_key[index];
UTF16 *w16 = (UTF16*)keybuffer;
static UTF16 out[PLAINTEXT_LENGTH + 1];
unsigned int i, len;
len = ((keybuffer[15] >> 3) - SALT_SIZE) >> 1;
for(i = 0; i < len; i++)
out[i] = w16[i];
out[i] = 0;
return (char*)utf16_to_enc(out);
#endif
}
static void *get_binary(char *ciphertext)
{
static ARCH_WORD_64 out[SHA_BUF_SIZ];
char *realcipher = (char*)out;
int i;
for (i = 0;i<DIGEST_SIZE;i++)
realcipher[i] = atoi16[ARCH_INDEX(ciphertext[i*2+14])]*16 +
atoi16[ARCH_INDEX(ciphertext[i*2+15])];
#ifdef SIMD_COEF_64
alter_endianity_to_BE64 (realcipher, DIGEST_SIZE/8);
#ifdef REVERSE_STEPS
sha512_reverse(out);
#endif
#endif
return (void *)realcipher;
}
#define BASE_IDX (((unsigned int)index&(SIMD_COEF_64-1))+(unsigned int)index/SIMD_COEF_64*8*SIMD_COEF_64)
#ifndef REVERSE_STEPS
#undef SSEi_REVERSE_STEPS
#define SSEi_REVERSE_STEPS 0
#endif
static int crypt_all(int *pcount, struct db_salt *salt)
{
const int count = *pcount;
int index = 0;
#ifdef _OPENMP
#pragma omp parallel for
#endif
#if defined(_OPENMP) || PLAINTEXT_LENGTH > 1
for (index = 0; index < count; index += MAX_KEYS_PER_CRYPT)
#endif
{
#ifdef SIMD_COEF_64
if (new_keys) {
int i;
for (i = 0; i < MAX_KEYS_PER_CRYPT; i++) {
ARCH_WORD_64 *keybuffer = saved_key[index + i];
unsigned char *wucp = (unsigned char*)keybuffer;
int j, len = (keybuffer[15] >> 3) - SALT_SIZE;
if (len >= 0)
for (j = 0; j < SALT_SIZE; j++)
wucp[len + j] = cursalt[j];
wucp[len + 4] = 0x80;
}
}
SIMDSHA512body(&saved_key[index], &crypt_out[BASE_IDX], NULL, SSEi_REVERSE_STEPS | SSEi_FLAT_IN);
#else
SHA512_CTX ctx;
memcpy(saved_key[index]+saved_len[index], cursalt, SALT_SIZE);
SHA512_Init(&ctx );
SHA512_Update(&ctx, saved_key[index], saved_len[index]+SALT_SIZE );
SHA512_Final((unsigned char *)crypt_out[index], &ctx);
#endif
}
#ifdef SIMD_COEF_64
new_keys = 0;
#endif
return count;
}
#define HASH_IDX (((unsigned int)index&(SIMD_COEF_64-1))+(unsigned int)index/SIMD_COEF_64*8*SIMD_COEF_64)
#ifdef SIMD_COEF_64
static int get_hash_0 (int index) { return crypt_out[HASH_IDX] & PH_MASK_0; }
static int get_hash_1 (int index) { return crypt_out[HASH_IDX] & PH_MASK_1; }
static int get_hash_2 (int index) { return crypt_out[HASH_IDX] & PH_MASK_2; }
static int get_hash_3 (int index) { return crypt_out[HASH_IDX] & PH_MASK_3; }
static int get_hash_4 (int index) { return crypt_out[HASH_IDX] & PH_MASK_4; }
static int get_hash_5 (int index) { return crypt_out[HASH_IDX] & PH_MASK_5; }
static int get_hash_6 (int index) { return crypt_out[HASH_IDX] & PH_MASK_6; }
#else
static int get_hash_0(int index) { return (crypt_out[index])[0] & PH_MASK_0; }
static int get_hash_1(int index) { return (crypt_out[index])[0] & PH_MASK_1; }
static int get_hash_2(int index) { return (crypt_out[index])[0] & PH_MASK_2; }
static int get_hash_3(int index) { return (crypt_out[index])[0] & PH_MASK_3; }
static int get_hash_4(int index) { return (crypt_out[index])[0] & PH_MASK_4; }
static int get_hash_5(int index) { return (crypt_out[index])[0] & PH_MASK_5; }
static int get_hash_6(int index) { return (crypt_out[index])[0] & PH_MASK_6; }
#endif
static int binary_hash_0(void *binary) { return ((ARCH_WORD_64*)binary)[0] & PH_MASK_0; }
static int binary_hash_1(void *binary) { return ((ARCH_WORD_64*)binary)[0] & PH_MASK_1; }
static int binary_hash_2(void *binary) { return ((ARCH_WORD_64*)binary)[0] & PH_MASK_2; }
static int binary_hash_3(void *binary) { return ((ARCH_WORD_64*)binary)[0] & PH_MASK_3; }
static int binary_hash_4(void *binary) { return ((ARCH_WORD_64*)binary)[0] & PH_MASK_4; }
static int binary_hash_5(void *binary) { return ((ARCH_WORD_64*)binary)[0] & PH_MASK_5; }
static int binary_hash_6(void *binary) { return ((ARCH_WORD_64*)binary)[0] & PH_MASK_6; }
static int cmp_all(void *binary, int count)
{
unsigned int index;
for (index = 0; index < count; index++)
#ifdef SIMD_COEF_64
if (((ARCH_WORD_64*)binary)[0] == crypt_out[HASH_IDX])
return 1;
#else
if ( ((ARCH_WORD_64*)binary)[0] == crypt_out[index][0] )
return 1;
#endif
return 0;
}
static int cmp_one(void *binary, int index)
{
#ifdef SIMD_COEF_64
return (((ARCH_WORD_64*)binary)[0] == crypt_out[HASH_IDX]);
#else
return !memcmp(binary, crypt_out[index], BINARY_SIZE);
#endif
}
static int cmp_exact(char *source, int index)
{
ARCH_WORD_64 *binary = get_binary(source);
#if SIMD_COEF_64
char *key = get_key(index);
UTF16 wkey[PLAINTEXT_LENGTH];
SHA512_CTX ctx;
ARCH_WORD_64 crypt_out[DIGEST_SIZE / sizeof(ARCH_WORD_64)];
int len;
len = enc_to_utf16(wkey, PLAINTEXT_LENGTH, (UTF8*)key, strlen(key));
if (len < 0)
len = strlen16(wkey);
len *= 2;
SHA512_Init(&ctx);
SHA512_Update(&ctx, wkey, len);
SHA512_Update(&ctx, cursalt, SALT_SIZE);
SHA512_Final((unsigned char*)crypt_out, &ctx);
alter_endianity_to_BE64(crypt_out, DIGEST_SIZE/8);
#ifdef REVERSE_STEPS
sha512_reverse(crypt_out);
#endif
return !memcmp(binary, crypt_out, DIGEST_SIZE);
#else
return !memcmp(binary, crypt_out[index], DIGEST_SIZE);
#endif
}
static int salt_hash(void *salt)
{
// The >> 8 gave much better distribution on a huge set I analysed
// although that was mssql05
return (*((ARCH_WORD_32 *)salt) >> 8) & (SALT_HASH_SIZE - 1);
}
struct fmt_main fmt_mssql12 = {
{
FORMAT_LABEL,
FORMAT_NAME,
ALGORITHM_NAME,
BENCHMARK_COMMENT,
BENCHMARK_LENGTH,
0,
PLAINTEXT_LENGTH,
BINARY_SIZE,
BINARY_ALIGN,
SALT_SIZE,
SALT_ALIGN,
MIN_KEYS_PER_CRYPT,
MAX_KEYS_PER_CRYPT,
FMT_CASE | FMT_8_BIT | FMT_UNICODE | FMT_UTF8 | FMT_OMP,
{ NULL },
tests
}, {
init,
done,
fmt_default_reset,
fmt_default_prepare,
valid,
fmt_default_split,
get_binary,
get_salt,
{ NULL },
fmt_default_source,
{
binary_hash_0,
binary_hash_1,
binary_hash_2,
binary_hash_3,
binary_hash_4,
binary_hash_5,
binary_hash_6
},
salt_hash,
NULL,
set_salt,
set_key,
get_key,
#ifdef SIMD_COEF_64
clear_keys,
#else
fmt_default_clear_keys,
#endif
crypt_all,
{
get_hash_0,
get_hash_1,
get_hash_2,
get_hash_3,
get_hash_4,
get_hash_5,
get_hash_6
},
cmp_all,
cmp_one,
cmp_exact
}
};
#endif /* plugin stanza */
|
libperf.c | /**
* Copyright (C) Mellanox Technologies Ltd. 2001-2019. ALL RIGHTS RESERVED.
* Copyright (C) UT-Battelle, LLC. 2015. ALL RIGHTS RESERVED.
* Copyright (C) The University of Tennessee and The University
* of Tennessee Research Foundation. 2015-2016. ALL RIGHTS RESERVED.
* Copyright (C) ARM Ltd. 2017. ALL RIGHTS RESERVED.
* See file LICENSE for terms.
*/
#ifdef HAVE_CONFIG_H
# include "config.h"
#endif
#include <ucs/debug/log.h>
#include <ucs/arch/bitops.h>
#include <ucs/sys/module.h>
#include <ucs/sys/string.h>
#include <string.h>
#include <tools/perf/lib/libperf_int.h>
#include <unistd.h>
#include <ucs/sys/sock.h>
#if _OPENMP
#include <omp.h>
#endif /* _OPENMP */
#define ATOMIC_OP_CONFIG(_size, _op32, _op64, _op, _msg, _params, _status) \
_status = __get_atomic_flag((_size), (_op32), (_op64), (_op)); \
if (_status != UCS_OK) { \
ucs_error(UCT_PERF_TEST_PARAMS_FMT" does not support atomic %s for " \
"message size %zu bytes", UCT_PERF_TEST_PARAMS_ARG(_params), \
(_msg)[_op], (_size)); \
return _status; \
}
#define ATOMIC_OP_CHECK(_size, _attr, _required, _params, _msg) \
if (!ucs_test_all_flags(_attr, _required)) { \
if ((_params)->flags & UCX_PERF_TEST_FLAG_VERBOSE) { \
ucs_error(UCT_PERF_TEST_PARAMS_FMT" does not support required " \
#_size"-bit atomic: %s", UCT_PERF_TEST_PARAMS_ARG(_params), \
(_msg)[ucs_ffs64(~(_attr) & (_required))]); \
} \
return UCS_ERR_UNSUPPORTED; \
}
typedef struct {
union {
struct {
size_t dev_addr_len;
size_t iface_addr_len;
size_t ep_addr_len;
} uct;
struct {
size_t worker_addr_len;
size_t total_wireup_len;
} ucp;
};
size_t rkey_size;
unsigned long recv_buffer;
} ucx_perf_ep_info_t;
const ucx_perf_allocator_t* ucx_perf_mem_type_allocators[UCS_MEMORY_TYPE_LAST];
static const char *perf_iface_ops[] = {
[ucs_ilog2(UCT_IFACE_FLAG_AM_SHORT)] = "am short",
[ucs_ilog2(UCT_IFACE_FLAG_AM_BCOPY)] = "am bcopy",
[ucs_ilog2(UCT_IFACE_FLAG_AM_ZCOPY)] = "am zcopy",
[ucs_ilog2(UCT_IFACE_FLAG_PUT_SHORT)] = "put short",
[ucs_ilog2(UCT_IFACE_FLAG_PUT_BCOPY)] = "put bcopy",
[ucs_ilog2(UCT_IFACE_FLAG_PUT_ZCOPY)] = "put zcopy",
[ucs_ilog2(UCT_IFACE_FLAG_GET_SHORT)] = "get short",
[ucs_ilog2(UCT_IFACE_FLAG_GET_BCOPY)] = "get bcopy",
[ucs_ilog2(UCT_IFACE_FLAG_GET_ZCOPY)] = "get zcopy",
[ucs_ilog2(UCT_IFACE_FLAG_ERRHANDLE_PEER_FAILURE)] = "peer failure handler",
[ucs_ilog2(UCT_IFACE_FLAG_CONNECT_TO_IFACE)] = "connect to iface",
[ucs_ilog2(UCT_IFACE_FLAG_CONNECT_TO_EP)] = "connect to ep",
[ucs_ilog2(UCT_IFACE_FLAG_AM_DUP)] = "full reliability",
[ucs_ilog2(UCT_IFACE_FLAG_CB_SYNC)] = "sync callback",
[ucs_ilog2(UCT_IFACE_FLAG_CB_ASYNC)] = "async callback",
[ucs_ilog2(UCT_IFACE_FLAG_PENDING)] = "pending",
[ucs_ilog2(UCT_IFACE_FLAG_TAG_EAGER_SHORT)] = "tag eager short",
[ucs_ilog2(UCT_IFACE_FLAG_TAG_EAGER_BCOPY)] = "tag eager bcopy",
[ucs_ilog2(UCT_IFACE_FLAG_TAG_EAGER_ZCOPY)] = "tag eager zcopy",
[ucs_ilog2(UCT_IFACE_FLAG_TAG_RNDV_ZCOPY)] = "tag rndv zcopy"
};
static const char *perf_atomic_op[] = {
[UCT_ATOMIC_OP_ADD] = "add",
[UCT_ATOMIC_OP_AND] = "and",
[UCT_ATOMIC_OP_OR] = "or" ,
[UCT_ATOMIC_OP_XOR] = "xor"
};
static const char *perf_atomic_fop[] = {
[UCT_ATOMIC_OP_ADD] = "fetch-add",
[UCT_ATOMIC_OP_AND] = "fetch-and",
[UCT_ATOMIC_OP_OR] = "fetch-or",
[UCT_ATOMIC_OP_XOR] = "fetch-xor",
[UCT_ATOMIC_OP_SWAP] = "swap",
[UCT_ATOMIC_OP_CSWAP] = "cswap"
};
/*
* This Quickselect routine is based on the algorithm described in
* "Numerical recipes in C", Second Edition,
* Cambridge University Press, 1992, Section 8.5, ISBN 0-521-43108-5
* This code by Nicolas Devillard - 1998. Public domain.
*/
static ucs_time_t __find_median_quick_select(ucs_time_t arr[], int n)
{
int low, high ;
int median;
int middle, ll, hh;
#define ELEM_SWAP(a,b) { register ucs_time_t t=(a);(a)=(b);(b)=t; }
low = 0 ; high = n-1 ; median = (low + high) / 2;
for (;;) {
if (high <= low) /* One element only */
return arr[median] ;
if (high == low + 1) { /* Two elements only */
if (arr[low] > arr[high])
ELEM_SWAP(arr[low], arr[high]) ;
return arr[median] ;
}
/* Find median of low, middle and high items; swap into position low */
middle = (low + high) / 2;
if (arr[middle] > arr[high]) ELEM_SWAP(arr[middle], arr[high]) ;
if (arr[low] > arr[high]) ELEM_SWAP(arr[low], arr[high]) ;
if (arr[middle] > arr[low]) ELEM_SWAP(arr[middle], arr[low]) ;
/* Swap low item (now in position middle) into position (low+1) */
ELEM_SWAP(arr[middle], arr[low+1]) ;
/* Nibble from each end towards middle, swapping items when stuck */
ll = low + 1;
hh = high;
for (;;) {
do ll++; while (arr[low] > arr[ll]) ;
do hh--; while (arr[hh] > arr[low]) ;
if (hh < ll)
break;
ELEM_SWAP(arr[ll], arr[hh]) ;
}
/* Swap middle item (in position low) back into correct position */
ELEM_SWAP(arr[low], arr[hh]) ;
/* Re-set active partition */
if (hh <= median)
low = ll;
if (hh >= median)
high = hh - 1;
}
}
static ucs_status_t
uct_perf_test_alloc_host(const ucx_perf_context_t *perf, size_t length,
unsigned flags, uct_allocated_memory_t *alloc_mem)
{
ucs_status_t status;
status = uct_iface_mem_alloc(perf->uct.iface, length,
flags, "perftest", alloc_mem);
if (status != UCS_OK) {
ucs_free(alloc_mem);
ucs_error("failed to allocate memory: %s", ucs_status_string(status));
return status;
}
ucs_assert(alloc_mem->md == perf->uct.md);
return UCS_OK;
}
static void uct_perf_test_free_host(const ucx_perf_context_t *perf,
uct_allocated_memory_t *alloc_mem)
{
uct_iface_mem_free(alloc_mem);
}
static void ucx_perf_test_memcpy_host(void *dst, ucs_memory_type_t dst_mem_type,
const void *src, ucs_memory_type_t src_mem_type,
size_t count)
{
if ((dst_mem_type != UCS_MEMORY_TYPE_HOST) ||
(src_mem_type != UCS_MEMORY_TYPE_HOST)) {
ucs_error("wrong memory type passed src - %d, dst - %d",
src_mem_type, dst_mem_type);
} else {
memcpy(dst, src, count);
}
}
static ucs_status_t uct_perf_test_alloc_mem(ucx_perf_context_t *perf)
{
ucx_perf_params_t *params = &perf->params;
ucs_status_t status;
unsigned flags;
size_t buffer_size;
if ((UCT_PERF_DATA_LAYOUT_ZCOPY == params->uct.data_layout) && params->iov_stride) {
buffer_size = params->msg_size_cnt * params->iov_stride;
} else {
buffer_size = ucx_perf_get_message_size(params);
}
/* TODO use params->alignment */
flags = (params->flags & UCX_PERF_TEST_FLAG_MAP_NONBLOCK) ?
UCT_MD_MEM_FLAG_NONBLOCK : 0;
flags |= UCT_MD_MEM_ACCESS_ALL;
/* Allocate send buffer memory */
status = perf->allocator->uct_alloc(perf, buffer_size * params->thread_count,
flags, &perf->uct.send_mem);
if (status != UCS_OK) {
goto err;
}
perf->send_buffer = perf->uct.send_mem.address;
/* Allocate receive buffer memory */
status = perf->allocator->uct_alloc(perf, buffer_size * params->thread_count,
flags, &perf->uct.recv_mem);
if (status != UCS_OK) {
goto err_free_send;
}
perf->recv_buffer = perf->uct.recv_mem.address;
/* Allocate IOV datatype memory */
perf->params.msg_size_cnt = params->msg_size_cnt;
perf->uct.iov = malloc(sizeof(*perf->uct.iov) *
perf->params.msg_size_cnt *
params->thread_count);
if (NULL == perf->uct.iov) {
status = UCS_ERR_NO_MEMORY;
ucs_error("Failed allocate send IOV(%lu) buffer: %s",
perf->params.msg_size_cnt, ucs_status_string(status));
goto err_free_recv;
}
ucs_debug("allocated memory. Send buffer %p, Recv buffer %p",
perf->send_buffer, perf->recv_buffer);
return UCS_OK;
err_free_recv:
perf->allocator->uct_free(perf, &perf->uct.recv_mem);
err_free_send:
perf->allocator->uct_free(perf, &perf->uct.send_mem);
err:
return status;
}
static void uct_perf_test_free_mem(ucx_perf_context_t *perf)
{
perf->allocator->uct_free(perf, &perf->uct.send_mem);
perf->allocator->uct_free(perf, &perf->uct.recv_mem);
free(perf->uct.iov);
}
void ucx_perf_test_start_clock(ucx_perf_context_t *perf)
{
ucs_time_t start_time = ucs_get_time();
perf->start_time_acc = ucs_get_accurate_time();
perf->end_time = (perf->params.max_time == 0.0) ? UINT64_MAX :
ucs_time_from_sec(perf->params.max_time) + start_time;
perf->prev_time = start_time;
perf->prev.time = start_time;
perf->prev.time_acc = perf->start_time_acc;
perf->current.time_acc = perf->start_time_acc;
}
/* Initialize/reset all parameters that could be modified by the warm-up run */
static void ucx_perf_test_prepare_new_run(ucx_perf_context_t *perf,
const ucx_perf_params_t *params)
{
unsigned i;
perf->max_iter = (perf->params.max_iter == 0) ? UINT64_MAX :
perf->params.max_iter;
perf->report_interval = ucs_time_from_sec(perf->params.report_interval);
perf->current.time = 0;
perf->current.msgs = 0;
perf->current.bytes = 0;
perf->current.iters = 0;
perf->prev.msgs = 0;
perf->prev.bytes = 0;
perf->prev.iters = 0;
perf->timing_queue_head = 0;
for (i = 0; i < TIMING_QUEUE_SIZE; ++i) {
perf->timing_queue[i] = 0;
}
ucx_perf_test_start_clock(perf);
}
static void ucx_perf_test_init(ucx_perf_context_t *perf,
const ucx_perf_params_t *params)
{
unsigned group_index;
perf->params = *params;
group_index = rte_call(perf, group_index);
if (0 == group_index) {
perf->allocator = ucx_perf_mem_type_allocators[params->send_mem_type];
} else {
perf->allocator = ucx_perf_mem_type_allocators[params->recv_mem_type];
}
ucx_perf_test_prepare_new_run(perf, params);
}
void ucx_perf_calc_result(ucx_perf_context_t *perf, ucx_perf_result_t *result)
{
ucs_time_t median;
double factor;
if (perf->params.test_type == UCX_PERF_TEST_TYPE_PINGPONG) {
factor = 2.0;
} else {
factor = 1.0;
}
result->iters = perf->current.iters;
result->bytes = perf->current.bytes;
result->elapsed_time = perf->current.time_acc - perf->start_time_acc;
/* Latency */
median = __find_median_quick_select(perf->timing_queue, TIMING_QUEUE_SIZE);
result->latency.typical = ucs_time_to_sec(median) / factor;
result->latency.moment_average =
(perf->current.time_acc - perf->prev.time_acc)
/ (perf->current.iters - perf->prev.iters)
/ factor;
result->latency.total_average =
(perf->current.time_acc - perf->start_time_acc)
/ perf->current.iters
/ factor;
/* Bandwidth */
result->bandwidth.typical = 0.0; // Undefined
result->bandwidth.moment_average =
(perf->current.bytes - perf->prev.bytes) /
(perf->current.time_acc - perf->prev.time_acc) * factor;
result->bandwidth.total_average =
perf->current.bytes /
(perf->current.time_acc - perf->start_time_acc) * factor;
/* Packet rate */
result->msgrate.typical = 0.0; // Undefined
result->msgrate.moment_average =
(perf->current.msgs - perf->prev.msgs) /
(perf->current.time_acc - perf->prev.time_acc) * factor;
result->msgrate.total_average =
perf->current.msgs /
(perf->current.time_acc - perf->start_time_acc) * factor;
}
static ucs_status_t ucx_perf_test_check_params(ucx_perf_params_t *params)
{
size_t it;
/* check if zero-size messages are requested and supported */
if ((/* they are not supported by: */
/* - UCT tests, except UCT AM Short/Bcopy */
(params->api == UCX_PERF_API_UCT) ||
(/* - UCP RMA and AMO tests */
(params->api == UCX_PERF_API_UCP) &&
(params->command != UCX_PERF_CMD_AM) &&
(params->command != UCX_PERF_CMD_TAG) &&
(params->command != UCX_PERF_CMD_TAG_SYNC) &&
(params->command != UCX_PERF_CMD_STREAM))) &&
ucx_perf_get_message_size(params) < 1) {
if (params->flags & UCX_PERF_TEST_FLAG_VERBOSE) {
ucs_error("Message size too small, need to be at least 1");
}
return UCS_ERR_INVALID_PARAM;
}
if ((params->api == UCX_PERF_API_UCP) &&
((params->send_mem_type != UCS_MEMORY_TYPE_HOST) ||
(params->recv_mem_type != UCS_MEMORY_TYPE_HOST)) &&
((params->command == UCX_PERF_CMD_PUT) ||
(params->command == UCX_PERF_CMD_GET) ||
(params->command == UCX_PERF_CMD_ADD) ||
(params->command == UCX_PERF_CMD_FADD) ||
(params->command == UCX_PERF_CMD_SWAP) ||
(params->command == UCX_PERF_CMD_CSWAP))) {
/* TODO: remove when support for non-HOST memory types will be added */
if (params->flags & UCX_PERF_TEST_FLAG_VERBOSE) {
ucs_error("UCP doesn't support RMA/AMO for \"%s\"<->\"%s\" memory types",
ucs_memory_type_names[params->send_mem_type],
ucs_memory_type_names[params->recv_mem_type]);
}
return UCS_ERR_INVALID_PARAM;
}
if (params->max_outstanding < 1) {
if (params->flags & UCX_PERF_TEST_FLAG_VERBOSE) {
ucs_error("max_outstanding, need to be at least 1");
}
return UCS_ERR_INVALID_PARAM;
}
/* check if particular message size fit into stride size */
if (params->iov_stride) {
for (it = 0; it < params->msg_size_cnt; ++it) {
if (params->msg_size_list[it] > params->iov_stride) {
if (params->flags & UCX_PERF_TEST_FLAG_VERBOSE) {
ucs_error("Buffer size %lu bigger than stride %lu",
params->msg_size_list[it], params->iov_stride);
}
return UCS_ERR_INVALID_PARAM;
}
}
}
return UCS_OK;
}
void uct_perf_ep_flush_b(ucx_perf_context_t *perf, int peer_index)
{
uct_ep_h ep = perf->uct.peers[peer_index].ep;
uct_completion_t comp;
ucs_status_t status;
int started;
started = 0;
comp.func = NULL;
comp.count = 2;
do {
if (!started) {
status = uct_ep_flush(ep, 0, &comp);
if (status == UCS_OK) {
--comp.count;
} else if (status == UCS_INPROGRESS) {
started = 1;
} else if (status != UCS_ERR_NO_RESOURCE) {
ucs_error("uct_ep_flush() failed: %s", ucs_status_string(status));
return;
}
}
uct_worker_progress(perf->uct.worker);
} while (comp.count > 1);
}
void uct_perf_iface_flush_b(ucx_perf_context_t *perf)
{
ucs_status_t status;
do {
status = uct_iface_flush(perf->uct.iface, 0, NULL);
uct_worker_progress(perf->uct.worker);
} while (status == UCS_INPROGRESS);
if (status != UCS_OK) {
ucs_error("uct_iface_flush() failed: %s", ucs_status_string(status));
}
}
static inline uint64_t __get_flag(uct_perf_data_layout_t layout, uint64_t short_f,
uint64_t bcopy_f, uint64_t zcopy_f)
{
return (layout == UCT_PERF_DATA_LAYOUT_SHORT) ? short_f :
(layout == UCT_PERF_DATA_LAYOUT_BCOPY) ? bcopy_f :
(layout == UCT_PERF_DATA_LAYOUT_ZCOPY) ? zcopy_f :
0;
}
static inline ucs_status_t __get_atomic_flag(size_t size, uint64_t *op32,
uint64_t *op64, uint64_t op)
{
if (size == sizeof(uint32_t)) {
*op32 = UCS_BIT(op);
return UCS_OK;
} else if (size == sizeof(uint64_t)) {
*op64 = UCS_BIT(op);
return UCS_OK;
}
return UCS_ERR_UNSUPPORTED;
}
static inline size_t __get_max_size(uct_perf_data_layout_t layout, size_t short_m,
size_t bcopy_m, uint64_t zcopy_m)
{
return (layout == UCT_PERF_DATA_LAYOUT_SHORT) ? short_m :
(layout == UCT_PERF_DATA_LAYOUT_BCOPY) ? bcopy_m :
(layout == UCT_PERF_DATA_LAYOUT_ZCOPY) ? zcopy_m :
0;
}
static ucs_status_t uct_perf_test_check_md_support(ucx_perf_params_t *params,
ucs_memory_type_t mem_type,
uct_md_attr_t *md_attr)
{
if (!(md_attr->cap.access_mem_type == mem_type) &&
!(md_attr->cap.reg_mem_types & UCS_BIT(mem_type))) {
if (params->flags & UCX_PERF_TEST_FLAG_VERBOSE) {
ucs_error("Unsupported memory type %s by "UCT_PERF_TEST_PARAMS_FMT,
ucs_memory_type_names[mem_type],
UCT_PERF_TEST_PARAMS_ARG(params));
return UCS_ERR_INVALID_PARAM;
}
}
return UCS_OK;
}
static ucs_status_t uct_perf_test_check_capabilities(ucx_perf_params_t *params,
uct_iface_h iface, uct_md_h md)
{
uint64_t required_flags = 0;
uint64_t atomic_op32 = 0;
uint64_t atomic_op64 = 0;
uint64_t atomic_fop32 = 0;
uint64_t atomic_fop64 = 0;
uct_md_attr_t md_attr;
uct_iface_attr_t attr;
ucs_status_t status;
size_t min_size, max_size, max_iov, message_size;
status = uct_md_query(md, &md_attr);
if (status != UCS_OK) {
ucs_error("uct_md_query(%s) failed: %s",
params->uct.md_name, ucs_status_string(status));
return status;
}
status = uct_iface_query(iface, &attr);
if (status != UCS_OK) {
ucs_error("uct_iface_query("UCT_PERF_TEST_PARAMS_FMT") failed: %s",
UCT_PERF_TEST_PARAMS_ARG(params),
ucs_status_string(status));
return status;
}
min_size = 0;
max_iov = 1;
message_size = ucx_perf_get_message_size(params);
switch (params->command) {
case UCX_PERF_CMD_AM:
required_flags = __get_flag(params->uct.data_layout, UCT_IFACE_FLAG_AM_SHORT,
UCT_IFACE_FLAG_AM_BCOPY, UCT_IFACE_FLAG_AM_ZCOPY);
required_flags |= UCT_IFACE_FLAG_CB_SYNC;
min_size = __get_max_size(params->uct.data_layout, 0, 0,
attr.cap.am.min_zcopy);
max_size = __get_max_size(params->uct.data_layout, attr.cap.am.max_short,
attr.cap.am.max_bcopy, attr.cap.am.max_zcopy);
max_iov = attr.cap.am.max_iov;
break;
case UCX_PERF_CMD_PUT:
required_flags = __get_flag(params->uct.data_layout, UCT_IFACE_FLAG_PUT_SHORT,
UCT_IFACE_FLAG_PUT_BCOPY, UCT_IFACE_FLAG_PUT_ZCOPY);
min_size = __get_max_size(params->uct.data_layout, 0, 0,
attr.cap.put.min_zcopy);
max_size = __get_max_size(params->uct.data_layout, attr.cap.put.max_short,
attr.cap.put.max_bcopy, attr.cap.put.max_zcopy);
max_iov = attr.cap.put.max_iov;
break;
case UCX_PERF_CMD_GET:
required_flags = __get_flag(params->uct.data_layout, UCT_IFACE_FLAG_GET_SHORT,
UCT_IFACE_FLAG_GET_BCOPY, UCT_IFACE_FLAG_GET_ZCOPY);
min_size = __get_max_size(params->uct.data_layout, 0, 0,
attr.cap.get.min_zcopy);
max_size = __get_max_size(params->uct.data_layout, attr.cap.get.max_short,
attr.cap.get.max_bcopy, attr.cap.get.max_zcopy);
max_iov = attr.cap.get.max_iov;
break;
case UCX_PERF_CMD_ADD:
ATOMIC_OP_CONFIG(message_size, &atomic_op32, &atomic_op64, UCT_ATOMIC_OP_ADD,
perf_atomic_op, params, status);
max_size = 8;
break;
case UCX_PERF_CMD_FADD:
ATOMIC_OP_CONFIG(message_size, &atomic_fop32, &atomic_fop64, UCT_ATOMIC_OP_ADD,
perf_atomic_fop, params, status);
max_size = 8;
break;
case UCX_PERF_CMD_SWAP:
ATOMIC_OP_CONFIG(message_size, &atomic_fop32, &atomic_fop64, UCT_ATOMIC_OP_SWAP,
perf_atomic_fop, params, status);
max_size = 8;
break;
case UCX_PERF_CMD_CSWAP:
ATOMIC_OP_CONFIG(message_size, &atomic_fop32, &atomic_fop64, UCT_ATOMIC_OP_CSWAP,
perf_atomic_fop, params, status);
max_size = 8;
break;
default:
if (params->flags & UCX_PERF_TEST_FLAG_VERBOSE) {
ucs_error("Invalid test command");
}
return UCS_ERR_INVALID_PARAM;
}
status = ucx_perf_test_check_params(params);
if (status != UCS_OK) {
return status;
}
/* check atomics first */
ATOMIC_OP_CHECK(32, attr.cap.atomic32.op_flags, atomic_op32, params, perf_atomic_op);
ATOMIC_OP_CHECK(64, attr.cap.atomic64.op_flags, atomic_op64, params, perf_atomic_op);
ATOMIC_OP_CHECK(32, attr.cap.atomic32.fop_flags, atomic_fop32, params, perf_atomic_fop);
ATOMIC_OP_CHECK(64, attr.cap.atomic64.fop_flags, atomic_fop64, params, perf_atomic_fop);
/* check iface flags */
if (!(atomic_op32 | atomic_op64 | atomic_fop32 | atomic_fop64) &&
(!ucs_test_all_flags(attr.cap.flags, required_flags) || !required_flags)) {
if (params->flags & UCX_PERF_TEST_FLAG_VERBOSE) {
ucs_error(UCT_PERF_TEST_PARAMS_FMT" does not support operation %s",
UCT_PERF_TEST_PARAMS_ARG(params),
perf_iface_ops[ucs_ffs64(~attr.cap.flags & required_flags)]);
}
return UCS_ERR_UNSUPPORTED;
}
if (message_size < min_size) {
if (params->flags & UCX_PERF_TEST_FLAG_VERBOSE) {
ucs_error("Message size (%zu) is smaller than min supported (%zu)",
message_size, min_size);
}
return UCS_ERR_UNSUPPORTED;
}
if (message_size > max_size) {
if (params->flags & UCX_PERF_TEST_FLAG_VERBOSE) {
ucs_error("Message size (%zu) is larger than max supported (%zu)",
message_size, max_size);
}
return UCS_ERR_UNSUPPORTED;
}
if (params->command == UCX_PERF_CMD_AM) {
if ((params->uct.data_layout == UCT_PERF_DATA_LAYOUT_SHORT) &&
(params->am_hdr_size != sizeof(uint64_t)))
{
if (params->flags & UCX_PERF_TEST_FLAG_VERBOSE) {
ucs_error("Short AM header size must be 8 bytes");
}
return UCS_ERR_INVALID_PARAM;
}
if ((params->uct.data_layout == UCT_PERF_DATA_LAYOUT_ZCOPY) &&
(params->am_hdr_size > attr.cap.am.max_hdr))
{
if (params->flags & UCX_PERF_TEST_FLAG_VERBOSE) {
ucs_error("AM header size (%zu) is larger than max supported (%zu)",
params->am_hdr_size, attr.cap.am.max_hdr);
}
return UCS_ERR_UNSUPPORTED;
}
if (params->am_hdr_size > message_size) {
if (params->flags & UCX_PERF_TEST_FLAG_VERBOSE) {
ucs_error("AM header size (%zu) is larger than message size (%zu)",
params->am_hdr_size, message_size);
}
return UCS_ERR_INVALID_PARAM;
}
if (params->uct.fc_window > UCT_PERF_TEST_MAX_FC_WINDOW) {
if (params->flags & UCX_PERF_TEST_FLAG_VERBOSE) {
ucs_error("AM flow-control window (%d) too large (should be <= %d)",
params->uct.fc_window, UCT_PERF_TEST_MAX_FC_WINDOW);
}
return UCS_ERR_INVALID_PARAM;
}
if ((params->flags & UCX_PERF_TEST_FLAG_ONE_SIDED) &&
(params->flags & UCX_PERF_TEST_FLAG_VERBOSE))
{
ucs_warn("Running active-message test with on-sided progress");
}
}
if (UCT_PERF_DATA_LAYOUT_ZCOPY == params->uct.data_layout) {
if (params->msg_size_cnt > max_iov) {
if ((params->flags & UCX_PERF_TEST_FLAG_VERBOSE) ||
!params->msg_size_cnt) {
ucs_error("Wrong number of IOV entries. Requested is %lu, "
"should be in the range 1...%lu", params->msg_size_cnt,
max_iov);
}
return UCS_ERR_UNSUPPORTED;
}
/* if msg_size_cnt == 1 the message size checked above */
if ((UCX_PERF_CMD_AM == params->command) && (params->msg_size_cnt > 1)) {
if (params->am_hdr_size > params->msg_size_list[0]) {
if (params->flags & UCX_PERF_TEST_FLAG_VERBOSE) {
ucs_error("AM header size (%lu) larger than the first IOV "
"message size (%lu)", params->am_hdr_size,
params->msg_size_list[0]);
}
return UCS_ERR_INVALID_PARAM;
}
}
}
status = uct_perf_test_check_md_support(params, params->send_mem_type, &md_attr);
if (status != UCS_OK) {
return status;
}
status = uct_perf_test_check_md_support(params, params->recv_mem_type, &md_attr);
if (status != UCS_OK) {
return status;
}
return UCS_OK;
}
static ucs_status_t uct_perf_test_setup_endpoints(ucx_perf_context_t *perf)
{
const size_t buffer_size = ADDR_BUF_SIZE;
ucx_perf_ep_info_t info, *remote_info;
unsigned group_size, i, group_index;
uct_device_addr_t *dev_addr;
uct_iface_addr_t *iface_addr;
uct_ep_addr_t *ep_addr;
uct_iface_attr_t iface_attr;
uct_md_attr_t md_attr;
uct_ep_params_t ep_params;
void *rkey_buffer;
ucs_status_t status;
struct iovec vec[5];
void *buffer;
void *req;
buffer = malloc(buffer_size);
if (buffer == NULL) {
ucs_error("Failed to allocate RTE buffer");
status = UCS_ERR_NO_MEMORY;
goto err;
}
status = uct_iface_query(perf->uct.iface, &iface_attr);
if (status != UCS_OK) {
ucs_error("Failed to uct_iface_query: %s", ucs_status_string(status));
goto err_free;
}
status = uct_md_query(perf->uct.md, &md_attr);
if (status != UCS_OK) {
ucs_error("Failed to uct_md_query: %s", ucs_status_string(status));
goto err_free;
}
if (md_attr.cap.flags & (UCT_MD_FLAG_ALLOC|UCT_MD_FLAG_REG)) {
info.rkey_size = md_attr.rkey_packed_size;
} else {
info.rkey_size = 0;
}
info.uct.dev_addr_len = iface_attr.device_addr_len;
info.uct.iface_addr_len = iface_attr.iface_addr_len;
info.uct.ep_addr_len = iface_attr.ep_addr_len;
info.recv_buffer = (uintptr_t)perf->recv_buffer;
rkey_buffer = buffer;
dev_addr = UCS_PTR_BYTE_OFFSET(rkey_buffer, info.rkey_size);
iface_addr = UCS_PTR_BYTE_OFFSET(dev_addr, info.uct.dev_addr_len);
ep_addr = UCS_PTR_BYTE_OFFSET(iface_addr, info.uct.iface_addr_len);
ucs_assert_always(UCS_PTR_BYTE_OFFSET(ep_addr, info.uct.ep_addr_len) <=
UCS_PTR_BYTE_OFFSET(buffer, buffer_size));
status = uct_iface_get_device_address(perf->uct.iface, dev_addr);
if (status != UCS_OK) {
ucs_error("Failed to uct_iface_get_device_address: %s",
ucs_status_string(status));
goto err_free;
}
status = uct_iface_get_address(perf->uct.iface, iface_addr);
if (status != UCS_OK) {
ucs_error("Failed to uct_iface_get_address: %s", ucs_status_string(status));
goto err_free;
}
if (info.rkey_size > 0) {
memset(rkey_buffer, 0, info.rkey_size);
status = uct_md_mkey_pack(perf->uct.md, perf->uct.recv_mem.memh, rkey_buffer);
if (status != UCS_OK) {
ucs_error("Failed to uct_rkey_pack: %s", ucs_status_string(status));
goto err_free;
}
}
group_size = rte_call(perf, group_size);
group_index = rte_call(perf, group_index);
perf->uct.peers = calloc(group_size, sizeof(*perf->uct.peers));
if (perf->uct.peers == NULL) {
goto err_free;
}
ep_params.field_mask = UCT_EP_PARAM_FIELD_IFACE;
ep_params.iface = perf->uct.iface;
if (iface_attr.cap.flags & UCT_IFACE_FLAG_CONNECT_TO_EP) {
for (i = 0; i < group_size; ++i) {
if (i == group_index) {
continue;
}
status = uct_ep_create(&ep_params, &perf->uct.peers[i].ep);
if (status != UCS_OK) {
ucs_error("Failed to uct_ep_create: %s", ucs_status_string(status));
goto err_destroy_eps;
}
status = uct_ep_get_address(perf->uct.peers[i].ep, ep_addr);
if (status != UCS_OK) {
ucs_error("Failed to uct_ep_get_address: %s", ucs_status_string(status));
goto err_destroy_eps;
}
}
} else if (iface_attr.cap.flags & UCT_IFACE_FLAG_CONNECT_TO_IFACE) {
ep_params.field_mask |= UCT_EP_PARAM_FIELD_DEV_ADDR |
UCT_EP_PARAM_FIELD_IFACE_ADDR;
}
vec[0].iov_base = &info;
vec[0].iov_len = sizeof(info);
vec[1].iov_base = buffer;
vec[1].iov_len = info.rkey_size + info.uct.dev_addr_len +
info.uct.iface_addr_len + info.uct.ep_addr_len;
rte_call(perf, post_vec, vec, 2, &req);
rte_call(perf, exchange_vec, req);
for (i = 0; i < group_size; ++i) {
if (i == group_index) {
continue;
}
rte_call(perf, recv, i, buffer, buffer_size, req);
remote_info = buffer;
rkey_buffer = remote_info + 1;
dev_addr = UCS_PTR_BYTE_OFFSET(rkey_buffer, remote_info->rkey_size);
iface_addr = UCS_PTR_BYTE_OFFSET(dev_addr, remote_info->uct.dev_addr_len);
ep_addr = UCS_PTR_BYTE_OFFSET(iface_addr, remote_info->uct.iface_addr_len);
perf->uct.peers[i].remote_addr = remote_info->recv_buffer;
if (!uct_iface_is_reachable(perf->uct.iface, dev_addr,
remote_info->uct.iface_addr_len ?
iface_addr : NULL)) {
ucs_error("Destination is unreachable");
status = UCS_ERR_UNREACHABLE;
goto err_destroy_eps;
}
if (remote_info->rkey_size > 0) {
status = uct_rkey_unpack(perf->uct.cmpt, rkey_buffer,
&perf->uct.peers[i].rkey);
if (status != UCS_OK) {
ucs_error("Failed to uct_rkey_unpack: %s", ucs_status_string(status));
goto err_destroy_eps;
}
} else {
perf->uct.peers[i].rkey.handle = NULL;
perf->uct.peers[i].rkey.rkey = UCT_INVALID_RKEY;
}
if (iface_attr.cap.flags & UCT_IFACE_FLAG_CONNECT_TO_EP) {
status = uct_ep_connect_to_ep(perf->uct.peers[i].ep, dev_addr, ep_addr);
} else if (iface_attr.cap.flags & UCT_IFACE_FLAG_CONNECT_TO_IFACE) {
ep_params.dev_addr = dev_addr;
ep_params.iface_addr = iface_addr;
status = uct_ep_create(&ep_params, &perf->uct.peers[i].ep);
} else {
status = UCS_ERR_UNSUPPORTED;
}
if (status != UCS_OK) {
ucs_error("Failed to connect endpoint: %s", ucs_status_string(status));
goto err_destroy_eps;
}
}
uct_perf_iface_flush_b(perf);
free(buffer);
uct_perf_barrier(perf);
return UCS_OK;
err_destroy_eps:
for (i = 0; i < group_size; ++i) {
if (perf->uct.peers[i].rkey.rkey != UCT_INVALID_RKEY) {
uct_rkey_release(perf->uct.cmpt, &perf->uct.peers[i].rkey);
}
if (perf->uct.peers[i].ep != NULL) {
uct_ep_destroy(perf->uct.peers[i].ep);
}
}
free(perf->uct.peers);
err_free:
free(buffer);
err:
return status;
}
static void uct_perf_test_cleanup_endpoints(ucx_perf_context_t *perf)
{
unsigned group_size, group_index, i;
uct_perf_barrier(perf);
uct_iface_set_am_handler(perf->uct.iface, UCT_PERF_TEST_AM_ID, NULL, NULL, 0);
group_size = rte_call(perf, group_size);
group_index = rte_call(perf, group_index);
for (i = 0; i < group_size; ++i) {
if (i != group_index) {
if (perf->uct.peers[i].rkey.rkey != UCT_INVALID_RKEY) {
uct_rkey_release(perf->uct.cmpt, &perf->uct.peers[i].rkey);
}
if (perf->uct.peers[i].ep) {
uct_ep_destroy(perf->uct.peers[i].ep);
}
}
}
free(perf->uct.peers);
}
static ucs_status_t ucp_perf_test_fill_params(ucx_perf_params_t *params,
ucp_params_t *ucp_params)
{
ucs_status_t status;
size_t message_size;
message_size = ucx_perf_get_message_size(params);
switch (params->command) {
case UCX_PERF_CMD_PUT:
case UCX_PERF_CMD_GET:
ucp_params->features |= UCP_FEATURE_RMA;
break;
case UCX_PERF_CMD_ADD:
case UCX_PERF_CMD_FADD:
case UCX_PERF_CMD_SWAP:
case UCX_PERF_CMD_CSWAP:
if (message_size == sizeof(uint32_t)) {
ucp_params->features |= UCP_FEATURE_AMO32;
} else if (message_size == sizeof(uint64_t)) {
ucp_params->features |= UCP_FEATURE_AMO64;
} else {
if (params->flags & UCX_PERF_TEST_FLAG_VERBOSE) {
ucs_error("Atomic size should be either 32 or 64 bit");
}
return UCS_ERR_INVALID_PARAM;
}
break;
case UCX_PERF_CMD_TAG:
case UCX_PERF_CMD_TAG_SYNC:
ucp_params->features |= UCP_FEATURE_TAG;
break;
case UCX_PERF_CMD_STREAM:
ucp_params->features |= UCP_FEATURE_STREAM;
break;
default:
if (params->flags & UCX_PERF_TEST_FLAG_VERBOSE) {
ucs_error("Invalid test command");
}
return UCS_ERR_INVALID_PARAM;
}
status = ucx_perf_test_check_params(params);
if (status != UCS_OK) {
return status;
}
return UCS_OK;
}
static ucs_status_t ucp_perf_test_alloc_iov_mem(ucp_perf_datatype_t datatype,
size_t iovcnt, unsigned thread_count,
ucp_dt_iov_t **iov_p)
{
ucp_dt_iov_t *iov;
if (UCP_PERF_DATATYPE_IOV == datatype) {
iov = malloc(sizeof(*iov) * iovcnt * thread_count);
if (NULL == iov) {
ucs_error("Failed allocate IOV buffer with iovcnt=%lu", iovcnt);
return UCS_ERR_NO_MEMORY;
}
*iov_p = iov;
}
return UCS_OK;
}
static ucs_status_t
ucp_perf_test_alloc_host(const ucx_perf_context_t *perf, size_t length,
void **address_p, ucp_mem_h *memh, int non_blk_flag)
{
ucp_mem_map_params_t mem_map_params;
ucp_mem_attr_t mem_attr;
ucs_status_t status;
mem_map_params.field_mask = UCP_MEM_MAP_PARAM_FIELD_ADDRESS |
UCP_MEM_MAP_PARAM_FIELD_LENGTH |
UCP_MEM_MAP_PARAM_FIELD_FLAGS;
mem_map_params.address = *address_p;
mem_map_params.length = length;
mem_map_params.flags = UCP_MEM_MAP_ALLOCATE;
if (perf->params.flags & UCX_PERF_TEST_FLAG_MAP_NONBLOCK) {
mem_map_params.flags |= non_blk_flag;
}
status = ucp_mem_map(perf->ucp.context, &mem_map_params, memh);
if (status != UCS_OK) {
goto err;
}
mem_attr.field_mask = UCP_MEM_ATTR_FIELD_ADDRESS;
status = ucp_mem_query(*memh, &mem_attr);
if (status != UCS_OK) {
goto err;
}
*address_p = mem_attr.address;
return UCS_OK;
err:
return status;
}
static void ucp_perf_test_free_host(const ucx_perf_context_t *perf,
void *address, ucp_mem_h memh)
{
ucs_status_t status;
status = ucp_mem_unmap(perf->ucp.context, memh);
if (status != UCS_OK) {
ucs_warn("ucp_mem_unmap() failed: %s", ucs_status_string(status));
}
}
static ucs_status_t ucp_perf_test_alloc_mem(ucx_perf_context_t *perf)
{
ucx_perf_params_t *params = &perf->params;
ucs_status_t status;
size_t buffer_size;
if (params->iov_stride) {
buffer_size = params->msg_size_cnt * params->iov_stride;
} else {
buffer_size = ucx_perf_get_message_size(params);
}
/* Allocate send buffer memory */
perf->send_buffer = NULL;
status = perf->allocator->ucp_alloc(perf, buffer_size * params->thread_count,
&perf->send_buffer, &perf->ucp.send_memh,
UCP_MEM_MAP_NONBLOCK);
if (status != UCS_OK) {
goto err;
}
/* Allocate receive buffer memory */
perf->recv_buffer = NULL;
status = perf->allocator->ucp_alloc(perf, buffer_size * params->thread_count,
&perf->recv_buffer, &perf->ucp.recv_memh,
0);
if (status != UCS_OK) {
goto err_free_send_buffer;
}
/* Allocate IOV datatype memory */
perf->ucp.send_iov = NULL;
status = ucp_perf_test_alloc_iov_mem(params->ucp.send_datatype,
perf->params.msg_size_cnt,
params->thread_count,
&perf->ucp.send_iov);
if (UCS_OK != status) {
goto err_free_buffers;
}
perf->ucp.recv_iov = NULL;
status = ucp_perf_test_alloc_iov_mem(params->ucp.recv_datatype,
perf->params.msg_size_cnt,
params->thread_count,
&perf->ucp.recv_iov);
if (UCS_OK != status) {
goto err_free_send_iov_buffers;
}
return UCS_OK;
err_free_send_iov_buffers:
free(perf->ucp.send_iov);
err_free_buffers:
perf->allocator->ucp_free(perf, perf->recv_buffer, perf->ucp.recv_memh);
err_free_send_buffer:
perf->allocator->ucp_free(perf, perf->send_buffer, perf->ucp.send_memh);
err:
return UCS_ERR_NO_MEMORY;
}
static void ucp_perf_test_free_mem(ucx_perf_context_t *perf)
{
free(perf->ucp.recv_iov);
free(perf->ucp.send_iov);
perf->allocator->ucp_free(perf, perf->recv_buffer, perf->ucp.recv_memh);
perf->allocator->ucp_free(perf, perf->send_buffer, perf->ucp.send_memh);
}
static void ucp_perf_test_destroy_eps(ucx_perf_context_t* perf)
{
unsigned i, thread_count = perf->params.thread_count;
ucs_status_ptr_t *req;
ucs_status_t status;
for (i = 0; i < thread_count; ++i) {
if (perf->ucp.tctx[i].perf.ucp.rkey != NULL) {
ucp_rkey_destroy(perf->ucp.tctx[i].perf.ucp.rkey);
}
if (perf->ucp.tctx[i].perf.ucp.ep != NULL) {
req = ucp_ep_close_nb(perf->ucp.tctx[i].perf.ucp.ep,
UCP_EP_CLOSE_MODE_FLUSH);
if (UCS_PTR_IS_PTR(req)) {
do {
ucp_worker_progress(perf->ucp.tctx[i].perf.ucp.worker);
status = ucp_request_check_status(req);
} while (status == UCS_INPROGRESS);
ucp_request_release(req);
} else if (UCS_PTR_STATUS(req) != UCS_OK) {
ucs_warn("failed to close ep %p on thread %d: %s\n",
perf->ucp.tctx[i].perf.ucp.ep, i,
ucs_status_string(UCS_PTR_STATUS(req)));
}
}
}
}
static ucs_status_t ucp_perf_test_exchange_status(ucx_perf_context_t *perf,
ucs_status_t status)
{
unsigned group_size = rte_call(perf, group_size);
ucs_status_t collective_status = status;
struct iovec vec;
void *req = NULL;
unsigned i;
vec.iov_base = &status;
vec.iov_len = sizeof(status);
rte_call(perf, post_vec, &vec, 1, &req);
rte_call(perf, exchange_vec, req);
for (i = 0; i < group_size; ++i) {
rte_call(perf, recv, i, &status, sizeof(status), req);
if (status != UCS_OK) {
collective_status = status;
}
}
return collective_status;
}
static ucs_status_t ucp_perf_test_receive_remote_data(ucx_perf_context_t *perf)
{
unsigned thread_count = perf->params.thread_count;
void *rkey_buffer = NULL;
void *req = NULL;
unsigned group_size, group_index, i;
ucx_perf_ep_info_t *remote_info;
ucp_ep_params_t ep_params;
ucp_address_t *address;
ucs_status_t status;
size_t buffer_size;
void *buffer;
group_size = rte_call(perf, group_size);
group_index = rte_call(perf, group_index);
if (group_size != 2) {
ucs_error("perftest requires group size to be exactly 2 "
"(actual group size: %u)", group_size);
return UCS_ERR_UNSUPPORTED;
}
buffer_size = ADDR_BUF_SIZE * thread_count;
buffer = malloc(buffer_size);
if (buffer == NULL) {
ucs_error("failed to allocate RTE receive buffer");
status = UCS_ERR_NO_MEMORY;
goto err;
}
/* Initialize all endpoints and rkeys to NULL to handle error flow */
for (i = 0; i < thread_count; i++) {
perf->ucp.tctx[i].perf.ucp.ep = NULL;
perf->ucp.tctx[i].perf.ucp.rkey = NULL;
}
/* receive the data from the remote peer, extract the address from it
* (along with additional wireup info) and create an endpoint to the peer */
rte_call(perf, recv, 1 - group_index, buffer, buffer_size, req);
remote_info = buffer;
for (i = 0; i < thread_count; i++) {
address = (ucp_address_t*)(remote_info + 1);
rkey_buffer = UCS_PTR_BYTE_OFFSET(address,
remote_info->ucp.worker_addr_len);
perf->ucp.tctx[i].perf.ucp.remote_addr = remote_info->recv_buffer;
ep_params.field_mask = UCP_EP_PARAM_FIELD_REMOTE_ADDRESS;
ep_params.address = address;
status = ucp_ep_create(perf->ucp.tctx[i].perf.ucp.worker, &ep_params,
&perf->ucp.tctx[i].perf.ucp.ep);
if (status != UCS_OK) {
if (perf->params.flags & UCX_PERF_TEST_FLAG_VERBOSE) {
ucs_error("ucp_ep_create() failed: %s", ucs_status_string(status));
}
goto err_free_eps_buffer;
}
if (remote_info->rkey_size > 0) {
status = ucp_ep_rkey_unpack(perf->ucp.tctx[i].perf.ucp.ep, rkey_buffer,
&perf->ucp.tctx[i].perf.ucp.rkey);
if (status != UCS_OK) {
if (perf->params.flags & UCX_PERF_TEST_FLAG_VERBOSE) {
ucs_fatal("ucp_rkey_unpack() failed: %s", ucs_status_string(status));
}
goto err_free_eps_buffer;
}
} else {
perf->ucp.tctx[i].perf.ucp.rkey = NULL;
}
remote_info = UCS_PTR_BYTE_OFFSET(remote_info,
remote_info->ucp.total_wireup_len);
}
free(buffer);
return UCS_OK;
err_free_eps_buffer:
ucp_perf_test_destroy_eps(perf);
free(buffer);
err:
return status;
}
static ucs_status_t ucp_perf_test_send_local_data(ucx_perf_context_t *perf,
uint64_t features)
{
unsigned i, j, thread_count = perf->params.thread_count;
size_t address_length = 0;
void *rkey_buffer = NULL;
void *req = NULL;
ucx_perf_ep_info_t *info;
ucp_address_t *address;
ucs_status_t status;
struct iovec *vec;
size_t rkey_size;
if (features & (UCP_FEATURE_RMA|UCP_FEATURE_AMO32|UCP_FEATURE_AMO64)) {
status = ucp_rkey_pack(perf->ucp.context, perf->ucp.recv_memh,
&rkey_buffer, &rkey_size);
if (status != UCS_OK) {
if (perf->params.flags & UCX_PERF_TEST_FLAG_VERBOSE) {
ucs_error("ucp_rkey_pack() failed: %s", ucs_status_string(status));
}
goto err;
}
} else {
rkey_size = 0;
}
/* each thread has an iovec with 3 entries to send to the remote peer:
* ep_info, worker_address and rkey buffer */
vec = calloc(3 * thread_count, sizeof(struct iovec));
if (vec == NULL) {
ucs_error("failed to allocate iovec");
status = UCS_ERR_NO_MEMORY;
goto err_rkey_release;
}
/* get the worker address created for every thread and send it to the remote
* peer */
for (i = 0; i < thread_count; i++) {
status = ucp_worker_get_address(perf->ucp.tctx[i].perf.ucp.worker,
&address, &address_length);
if (status != UCS_OK) {
if (perf->params.flags & UCX_PERF_TEST_FLAG_VERBOSE) {
ucs_error("ucp_worker_get_address() failed: %s",
ucs_status_string(status));
}
goto err_free_workers_vec;
}
vec[i * 3].iov_base = malloc(sizeof(*info));
if (vec[i * 3].iov_base == NULL) {
ucs_error("failed to allocate vec entry for info");
status = UCS_ERR_NO_MEMORY;
ucp_worker_destroy(perf->ucp.tctx[i].perf.ucp.worker);
goto err_free_workers_vec;
}
info = vec[i * 3].iov_base;
info->ucp.worker_addr_len = address_length;
info->ucp.total_wireup_len = sizeof(*info) + address_length + rkey_size;
info->rkey_size = rkey_size;
info->recv_buffer = (uintptr_t)perf->ucp.tctx[i].perf.recv_buffer;
vec[(i * 3) + 0].iov_len = sizeof(*info);
vec[(i * 3) + 1].iov_base = address;
vec[(i * 3) + 1].iov_len = address_length;
vec[(i * 3) + 2].iov_base = rkey_buffer;
vec[(i * 3) + 2].iov_len = info->rkey_size;
address_length = 0;
}
/* send to the remote peer */
rte_call(perf, post_vec, vec, 3 * thread_count, &req);
rte_call(perf, exchange_vec, req);
if (features & (UCP_FEATURE_RMA|UCP_FEATURE_AMO32|UCP_FEATURE_AMO64)) {
ucp_rkey_buffer_release(rkey_buffer);
}
for (i = 0; i < thread_count; i++) {
free(vec[i * 3].iov_base);
ucp_worker_release_address(perf->ucp.tctx[i].perf.ucp.worker,
vec[(i * 3) + 1].iov_base);
}
free(vec);
return UCS_OK;
err_free_workers_vec:
for (j = 0; j < i; j++) {
ucp_worker_destroy(perf->ucp.tctx[i].perf.ucp.worker);
}
free(vec);
err_rkey_release:
if (features & (UCP_FEATURE_RMA|UCP_FEATURE_AMO32|UCP_FEATURE_AMO64)) {
ucp_rkey_buffer_release(rkey_buffer);
}
err:
return status;
}
static ucs_status_t ucp_perf_test_setup_endpoints(ucx_perf_context_t *perf,
uint64_t features)
{
ucs_status_t status;
unsigned i;
/* pack the local endpoints data and send to the remote peer */
status = ucp_perf_test_send_local_data(perf, features);
if (status != UCS_OK) {
goto err;
}
/* receive remote peer's endpoints' data and connect to them */
status = ucp_perf_test_receive_remote_data(perf);
if (status != UCS_OK) {
goto err;
}
/* sync status across all processes */
status = ucp_perf_test_exchange_status(perf, UCS_OK);
if (status != UCS_OK) {
goto err_destroy_eps;
}
/* force wireup completion */
for (i = 0; i < perf->params.thread_count; i++) {
status = ucp_worker_flush(perf->ucp.tctx[i].perf.ucp.worker);
if (status != UCS_OK) {
ucs_warn("ucp_worker_flush() failed on theread %d: %s",
i, ucs_status_string(status));
}
}
return status;
err_destroy_eps:
ucp_perf_test_destroy_eps(perf);
err:
(void)ucp_perf_test_exchange_status(perf, status);
return status;
}
static void ucp_perf_test_cleanup_endpoints(ucx_perf_context_t *perf)
{
ucp_perf_barrier(perf);
ucp_perf_test_destroy_eps(perf);
}
static void ucp_perf_test_destroy_workers(ucx_perf_context_t *perf)
{
unsigned i;
for (i = 0; i < perf->params.thread_count; i++) {
if (perf->ucp.tctx[i].perf.ucp.worker != NULL) {
ucp_worker_destroy(perf->ucp.tctx[i].perf.ucp.worker);
}
}
}
static void ucx_perf_set_warmup(ucx_perf_context_t* perf,
const ucx_perf_params_t* params)
{
perf->max_iter = ucs_min(params->warmup_iter,
ucs_div_round_up(params->max_iter, 10));
perf->report_interval = ULONG_MAX;
}
static ucs_status_t uct_perf_create_md(ucx_perf_context_t *perf)
{
uct_component_h *uct_components;
uct_component_attr_t component_attr;
uct_tl_resource_desc_t *tl_resources;
unsigned md_index, num_components;
unsigned tl_index, num_tl_resources;
unsigned cmpt_index;
ucs_status_t status;
uct_md_h md;
uct_md_config_t *md_config;
status = uct_query_components(&uct_components, &num_components);
if (status != UCS_OK) {
goto out;
}
for (cmpt_index = 0; cmpt_index < num_components; ++cmpt_index) {
component_attr.field_mask = UCT_COMPONENT_ATTR_FIELD_MD_RESOURCE_COUNT;
status = uct_component_query(uct_components[cmpt_index], &component_attr);
if (status != UCS_OK) {
goto out_release_components_list;
}
component_attr.field_mask = UCT_COMPONENT_ATTR_FIELD_MD_RESOURCES;
component_attr.md_resources = alloca(sizeof(*component_attr.md_resources) *
component_attr.md_resource_count);
status = uct_component_query(uct_components[cmpt_index], &component_attr);
if (status != UCS_OK) {
goto out_release_components_list;
}
for (md_index = 0; md_index < component_attr.md_resource_count; ++md_index) {
status = uct_md_config_read(uct_components[cmpt_index], NULL, NULL,
&md_config);
if (status != UCS_OK) {
goto out_release_components_list;
}
ucs_strncpy_zero(perf->params.uct.md_name,
component_attr.md_resources[md_index].md_name,
UCT_MD_NAME_MAX);
status = uct_md_open(uct_components[cmpt_index],
component_attr.md_resources[md_index].md_name,
md_config, &md);
uct_config_release(md_config);
if (status != UCS_OK) {
goto out_release_components_list;
}
status = uct_md_query_tl_resources(md, &tl_resources, &num_tl_resources);
if (status != UCS_OK) {
uct_md_close(md);
goto out_release_components_list;
}
for (tl_index = 0; tl_index < num_tl_resources; ++tl_index) {
if (!strcmp(perf->params.uct.tl_name, tl_resources[tl_index].tl_name) &&
!strcmp(perf->params.uct.dev_name, tl_resources[tl_index].dev_name))
{
uct_release_tl_resource_list(tl_resources);
perf->uct.cmpt = uct_components[cmpt_index];
perf->uct.md = md;
status = UCS_OK;
goto out_release_components_list;
}
}
uct_md_close(md);
uct_release_tl_resource_list(tl_resources);
}
}
ucs_error("Cannot use "UCT_PERF_TEST_PARAMS_FMT,
UCT_PERF_TEST_PARAMS_ARG(&perf->params));
status = UCS_ERR_NO_DEVICE;
out_release_components_list:
uct_release_component_list(uct_components);
out:
return status;
}
void uct_perf_barrier(ucx_perf_context_t *perf)
{
rte_call(perf, barrier, (void(*)(void*))uct_worker_progress,
(void*)perf->uct.worker);
}
void ucp_perf_barrier(ucx_perf_context_t *perf)
{
rte_call(perf, barrier, (void(*)(void*))ucp_worker_progress,
#if _OPENMP
(void*)perf->ucp.tctx[omp_get_thread_num()].perf.ucp.worker);
#else
(void*)perf->ucp.tctx[0].perf.ucp.worker);
#endif
}
static ucs_status_t uct_perf_setup(ucx_perf_context_t *perf)
{
ucx_perf_params_t *params = &perf->params;
uct_iface_config_t *iface_config;
ucs_status_t status;
uct_iface_params_t iface_params = {
.field_mask = UCT_IFACE_PARAM_FIELD_OPEN_MODE |
UCT_IFACE_PARAM_FIELD_STATS_ROOT |
UCT_IFACE_PARAM_FIELD_RX_HEADROOM |
UCT_IFACE_PARAM_FIELD_CPU_MASK,
.open_mode = UCT_IFACE_OPEN_MODE_DEVICE,
.mode.device.tl_name = params->uct.tl_name,
.mode.device.dev_name = params->uct.dev_name,
.mode.device.ifaddr = params->uct.ifaddr,
.mode.device.netmask = params->uct.netmask,
.stats_root = ucs_stats_get_root(),
.rx_headroom = 0
};
char dest_str[UCS_SOCKADDR_STRING_LEN];
ucs_info("uct_perf_setup(dest_addr=%s)", ucs_sockaddr_str((const struct sockaddr *)(params->uct.ifaddr), dest_str, UCS_SOCKADDR_STRING_LEN));
UCS_CPU_ZERO(&iface_params.cpu_mask);
status = ucs_async_context_init(&perf->uct.async, params->async_mode);
if (status != UCS_OK) {
goto out;
}
status = uct_worker_create(&perf->uct.async, params->thread_mode,
&perf->uct.worker);
if (status != UCS_OK) {
goto out_cleanup_async;
}
status = uct_perf_create_md(perf);
if (status != UCS_OK) {
goto out_destroy_worker;
}
status = uct_md_iface_config_read(perf->uct.md, params->uct.tl_name, NULL,
NULL, &iface_config);
if (status != UCS_OK) {
goto out_destroy_md;
}
status = uct_iface_open(perf->uct.md, perf->uct.worker, &iface_params,
iface_config, &perf->uct.iface);
uct_config_release(iface_config);
if (status != UCS_OK) {
ucs_error("Failed to open iface: %s", ucs_status_string(status));
goto out_destroy_md;
}
status = uct_perf_test_check_capabilities(params, perf->uct.iface,
perf->uct.md);
/* sync status across all processes */
status = ucp_perf_test_exchange_status(perf, status);
if (status != UCS_OK) {
goto out_iface_close;
}
status = uct_perf_test_alloc_mem(perf);
if (status != UCS_OK) {
goto out_iface_close;
}
/* Enable progress before `uct_iface_flush` and `uct_worker_progress` called
* to give a chance to finish connection for some tranports (ib/ud, tcp).
* They may return UCS_INPROGRESS from `uct_iface_flush` when connections are
* in progress */
uct_iface_progress_enable(perf->uct.iface,
UCT_PROGRESS_SEND | UCT_PROGRESS_RECV);
status = uct_perf_test_setup_endpoints(perf);
if (status != UCS_OK) {
ucs_error("Failed to setup endpoints: %s", ucs_status_string(status));
goto out_free_mem;
}
return UCS_OK;
out_free_mem:
uct_perf_test_free_mem(perf);
out_iface_close:
uct_iface_close(perf->uct.iface);
out_destroy_md:
uct_md_close(perf->uct.md);
out_destroy_worker:
uct_worker_destroy(perf->uct.worker);
out_cleanup_async:
ucs_async_context_cleanup(&perf->uct.async);
out:
return status;
}
static void uct_perf_cleanup(ucx_perf_context_t *perf)
{
uct_perf_test_cleanup_endpoints(perf);
uct_perf_test_free_mem(perf);
uct_iface_close(perf->uct.iface);
uct_md_close(perf->uct.md);
uct_worker_destroy(perf->uct.worker);
ucs_async_context_cleanup(&perf->uct.async);
}
static void ucp_perf_request_init(void *req)
{
ucp_perf_request_t *request = req;
request->context = NULL;
}
static ucs_status_t ucp_perf_setup(ucx_perf_context_t *perf)
{
ucp_params_t ucp_params;
ucp_worker_params_t worker_params;
ucp_config_t *config;
ucs_status_t status;
unsigned i, thread_count;
size_t message_size;
ucp_params.field_mask = UCP_PARAM_FIELD_FEATURES |
UCP_PARAM_FIELD_REQUEST_SIZE |
UCP_PARAM_FIELD_REQUEST_INIT;
ucp_params.features = 0;
ucp_params.request_size = sizeof(ucp_perf_request_t);
ucp_params.request_init = ucp_perf_request_init;
if (perf->params.thread_count > 1) {
/* when there is more than one thread, a ucp_worker would be created for
* each. all of them will share the same ucp_context */
ucp_params.features |= UCP_PARAM_FIELD_MT_WORKERS_SHARED;
ucp_params.mt_workers_shared = 1;
}
status = ucp_perf_test_fill_params(&perf->params, &ucp_params);
if (status != UCS_OK) {
goto err;
}
status = ucp_config_read(NULL, NULL, &config);
if (status != UCS_OK) {
goto err;
}
status = ucp_init(&ucp_params, config, &perf->ucp.context);
ucp_config_release(config);
if (status != UCS_OK) {
goto err;
}
thread_count = perf->params.thread_count;
message_size = ucx_perf_get_message_size(&perf->params);
status = ucp_perf_test_alloc_mem(perf);
if (status != UCS_OK) {
ucs_warn("ucp test failed to allocate memory");
goto err_cleanup;
}
perf->ucp.tctx = calloc(thread_count, sizeof(ucx_perf_thread_context_t));
if (perf->ucp.tctx == NULL) {
ucs_warn("ucp test failed to allocate memory for thread contexts");
goto err_free_mem;
}
worker_params.field_mask = UCP_WORKER_PARAM_FIELD_THREAD_MODE;
worker_params.thread_mode = perf->params.thread_mode;
for (i = 0; i < thread_count; i++) {
perf->ucp.tctx[i].tid = i;
perf->ucp.tctx[i].perf = *perf;
/* Doctor the src and dst buffers to make them thread specific */
perf->ucp.tctx[i].perf.send_buffer =
UCS_PTR_BYTE_OFFSET(perf->send_buffer, i * message_size);
perf->ucp.tctx[i].perf.recv_buffer =
UCS_PTR_BYTE_OFFSET(perf->recv_buffer, i * message_size);
status = ucp_worker_create(perf->ucp.context, &worker_params,
&perf->ucp.tctx[i].perf.ucp.worker);
if (status != UCS_OK) {
goto err_free_tctx_destroy_workers;
}
}
status = ucp_perf_test_setup_endpoints(perf, ucp_params.features);
if (status != UCS_OK) {
if (perf->params.flags & UCX_PERF_TEST_FLAG_VERBOSE) {
ucs_error("Failed to setup endpoints: %s", ucs_status_string(status));
}
goto err_free_tctx_destroy_workers;
}
return UCS_OK;
err_free_tctx_destroy_workers:
ucp_perf_test_destroy_workers(perf);
free(perf->ucp.tctx);
err_free_mem:
ucp_perf_test_free_mem(perf);
err_cleanup:
ucp_cleanup(perf->ucp.context);
err:
return status;
}
static void ucp_perf_cleanup(ucx_perf_context_t *perf)
{
ucp_perf_test_cleanup_endpoints(perf);
ucp_perf_barrier(perf);
ucp_perf_test_free_mem(perf);
ucp_perf_test_destroy_workers(perf);
free(perf->ucp.tctx);
ucp_cleanup(perf->ucp.context);
}
static struct {
ucs_status_t (*setup)(ucx_perf_context_t *perf);
void (*cleanup)(ucx_perf_context_t *perf);
ucs_status_t (*run)(ucx_perf_context_t *perf);
void (*barrier)(ucx_perf_context_t *perf);
} ucx_perf_funcs[] = {
[UCX_PERF_API_UCT] = {uct_perf_setup, uct_perf_cleanup,
uct_perf_test_dispatch, uct_perf_barrier},
[UCX_PERF_API_UCP] = {ucp_perf_setup, ucp_perf_cleanup,
ucp_perf_test_dispatch, ucp_perf_barrier}
};
static ucs_status_t ucx_perf_thread_spawn(ucx_perf_context_t *perf,
ucx_perf_result_t* result);
ucs_status_t ucx_perf_run(const ucx_perf_params_t *params,
ucx_perf_result_t *result)
{
ucx_perf_context_t *perf;
ucs_status_t status;
ucx_perf_global_init();
if (params->command == UCX_PERF_CMD_LAST) {
ucs_error("Test is not selected");
status = UCS_ERR_INVALID_PARAM;
goto out;
}
if ((params->api != UCX_PERF_API_UCT) && (params->api != UCX_PERF_API_UCP)) {
ucs_error("Invalid test API parameter (should be UCT or UCP)");
status = UCS_ERR_INVALID_PARAM;
goto out;
}
perf = malloc(sizeof(*perf));
if (perf == NULL) {
status = UCS_ERR_NO_MEMORY;
goto out;
}
ucx_perf_test_init(perf, params);
if (perf->allocator == NULL) {
ucs_error("Unsupported memory types %s<->%s",
ucs_memory_type_names[params->send_mem_type],
ucs_memory_type_names[params->recv_mem_type]);
status = UCS_ERR_UNSUPPORTED;
goto out_free;
}
if ((params->api == UCX_PERF_API_UCT) &&
(perf->allocator->mem_type != UCS_MEMORY_TYPE_HOST)) {
ucs_warn("UCT tests also copy 2-byte values from %s memory to "
"%s memory, which may impact performance results",
ucs_memory_type_names[perf->allocator->mem_type],
ucs_memory_type_names[UCS_MEMORY_TYPE_HOST]);
}
status = perf->allocator->init(perf);
if (status != UCS_OK) {
goto out_free;
}
status = ucx_perf_funcs[params->api].setup(perf);
if (status != UCS_OK) {
goto out_free;
}
if (params->thread_count == 1) {
if (params->api == UCX_PERF_API_UCP) {
perf->ucp.worker = perf->ucp.tctx[0].perf.ucp.worker;
perf->ucp.ep = perf->ucp.tctx[0].perf.ucp.ep;
perf->ucp.remote_addr = perf->ucp.tctx[0].perf.ucp.remote_addr;
perf->ucp.rkey = perf->ucp.tctx[0].perf.ucp.rkey;
}
if (params->warmup_iter > 0) {
ucx_perf_set_warmup(perf, params);
status = ucx_perf_funcs[params->api].run(perf);
if (status != UCS_OK) {
goto out_cleanup;
}
ucx_perf_funcs[params->api].barrier(perf);
ucx_perf_test_prepare_new_run(perf, params);
}
/* Run test */
status = ucx_perf_funcs[params->api].run(perf);
ucx_perf_funcs[params->api].barrier(perf);
if (status == UCS_OK) {
ucx_perf_calc_result(perf, result);
rte_call(perf, report, result, perf->params.report_arg, 1, 0);
}
} else {
status = ucx_perf_thread_spawn(perf, result);
}
out_cleanup:
ucx_perf_funcs[params->api].cleanup(perf);
out_free:
free(perf);
out:
return status;
}
#if _OPENMP
static ucs_status_t ucx_perf_thread_run_test(void* arg)
{
ucx_perf_thread_context_t* tctx = (ucx_perf_thread_context_t*) arg; /* a single thread context */
ucx_perf_result_t* result = &tctx->result;
ucx_perf_context_t* perf = &tctx->perf;
ucx_perf_params_t* params = &perf->params;
ucs_status_t status;
if (params->warmup_iter > 0) {
ucx_perf_set_warmup(perf, params);
status = ucx_perf_funcs[params->api].run(perf);
ucx_perf_funcs[params->api].barrier(perf);
if (UCS_OK != status) {
goto out;
}
ucx_perf_test_prepare_new_run(perf, params);
}
/* Run test */
#pragma omp barrier
status = ucx_perf_funcs[params->api].run(perf);
ucx_perf_funcs[params->api].barrier(perf);
if (UCS_OK != status) {
goto out;
}
ucx_perf_calc_result(perf, result);
out:
return status;
}
static void ucx_perf_thread_report_aggregated_results(ucx_perf_context_t *perf)
{
ucx_perf_thread_context_t* tctx = perf->ucp.tctx; /* all the thread contexts on perf */
unsigned i, thread_count = perf->params.thread_count;
double lat_sum_total_avegare = 0.0;
ucx_perf_result_t agg_result;
agg_result.iters = tctx[0].result.iters;
agg_result.bytes = tctx[0].result.bytes;
agg_result.elapsed_time = tctx[0].result.elapsed_time;
agg_result.bandwidth.total_average = 0.0;
agg_result.bandwidth.typical = 0.0; /* Undefined since used only for latency calculations */
agg_result.latency.total_average = 0.0;
agg_result.msgrate.total_average = 0.0;
agg_result.msgrate.typical = 0.0; /* Undefined since used only for latency calculations */
/* when running with multiple threads, the moment average value is
* undefined since we don't capture the values of the last iteration */
agg_result.msgrate.moment_average = 0.0;
agg_result.bandwidth.moment_average = 0.0;
agg_result.latency.moment_average = 0.0;
agg_result.latency.typical = 0.0;
/* in case of multiple threads, we have to aggregate the results so that the
* final output of the result would show the performance numbers that were
* collected from all the threads.
* BW and message rate values will be the sum of their values from all
* the threads, while the latency value is the average latency from the
* threads. */
for (i = 0; i < thread_count; i++) {
agg_result.bandwidth.total_average += tctx[i].result.bandwidth.total_average;
agg_result.msgrate.total_average += tctx[i].result.msgrate.total_average;
lat_sum_total_avegare += tctx[i].result.latency.total_average;
}
agg_result.latency.total_average = lat_sum_total_avegare / thread_count;
rte_call(perf, report, &agg_result, perf->params.report_arg, 1, 1);
}
static ucs_status_t ucx_perf_thread_spawn(ucx_perf_context_t *perf,
ucx_perf_result_t* result)
{
ucx_perf_thread_context_t* tctx = perf->ucp.tctx; /* all the thread contexts on perf */
int ti, thread_count = perf->params.thread_count;
ucs_status_t* statuses;
ucs_status_t status;
omp_set_num_threads(thread_count);
statuses = calloc(thread_count, sizeof(ucs_status_t));
if (statuses == NULL) {
status = UCS_ERR_NO_MEMORY;
goto out;
}
#pragma omp parallel private(ti)
{
ti = omp_get_thread_num();
tctx[ti].status = ucx_perf_thread_run_test((void*)&tctx[ti]);
}
status = UCS_OK;
for (ti = 0; ti < thread_count; ti++) {
if (UCS_OK != tctx[ti].status) {
ucs_error("Thread %d failed to run test: %s", tctx[ti].tid,
ucs_status_string(tctx[ti].status));
status = tctx[ti].status;
}
}
ucx_perf_thread_report_aggregated_results(perf);
free(statuses);
out:
return status;
}
#else
static ucs_status_t ucx_perf_thread_spawn(ucx_perf_context_t *perf,
ucx_perf_result_t* result) {
ucs_error("Invalid test parameter (thread mode requested without OpenMP capabilities)");
return UCS_ERR_INVALID_PARAM;
}
#endif /* _OPENMP */
void ucx_perf_global_init()
{
static ucx_perf_allocator_t host_allocator = {
.mem_type = UCS_MEMORY_TYPE_HOST,
.init = ucs_empty_function_return_success,
.ucp_alloc = ucp_perf_test_alloc_host,
.ucp_free = ucp_perf_test_free_host,
.uct_alloc = uct_perf_test_alloc_host,
.uct_free = uct_perf_test_free_host,
.memcpy = ucx_perf_test_memcpy_host,
.memset = memset
};
UCS_MODULE_FRAMEWORK_DECLARE(ucx_perftest);
ucx_perf_mem_type_allocators[UCS_MEMORY_TYPE_HOST] = &host_allocator;
/* FIXME Memtype allocator modules must be loaded to global scope, otherwise
* alloc hooks, which are using dlsym() to get pointer to original function,
* do not work. Need to use bistro for memtype hooks to fix it.
*/
UCS_MODULE_FRAMEWORK_LOAD(ucx_perftest, UCS_MODULE_LOAD_FLAG_GLOBAL);
}
|
tiling-2.c | /*
matmul.c : Matrix Multiplication with tiling for openmp4 example
*/
#include <stdlib.h>
#include <math.h>
#define BLOCK_SIZE 16
/*
#define BLOCK_SIZE 32
*/
#define NSECPERSEC 1000000000L
typedef struct {
int width;
int height;
int stride;
int hpad;
float* elements;
} Matrix;
/* Correctly extract the number of nanoseconds from the two time structures */
long int get_nanosecs( struct timespec start_time, struct timespec end_time) {
long int nanosecs;
if ((end_time.tv_nsec-start_time.tv_nsec)<0) nanosecs =
((((long int) end_time.tv_sec- (long int) start_time.tv_sec )-1)*NSECPERSEC ) +
( NSECPERSEC + (long int) end_time.tv_nsec - (long int) start_time.tv_nsec) ;
else nanosecs =
(((long int) end_time.tv_sec- (long int) start_time.tv_sec )*NSECPERSEC ) +
( (long int) end_time.tv_nsec - (long int) start_time.tv_nsec );
return nanosecs;
}
void simple_sgemm_tt(const int M,const int N,const int K,const float alpha, const float* A,const int LDA,
const float* B,const int LDB, const float beta,float* C, const int LDC) ;
void simple_sgemm_tn(const int M,const int N,const int K,const float alpha, const float* A,const int LDA,
const float* B,const int LDB, const float beta,float* C, const int LDC) ;
void tiled_sgemm_tt(const int M,const int N,const int K,const float alpha, const float*A, const int LDA,
const float* B,const int LDB, const float beta,float* C, const int LDC) ;
int verify(float* v_res, float* v_ref, int len) {
int passed = 1;
int i;
for (i = 0; i < len; ++i) {
if (fabs(v_res[i] - v_ref[i]) > 0.001*v_ref[i]) {
__builtin_abort ();
}
}
return passed;
}
int main(int argc, char* argv[]){
Matrix A,B,Bt,C,Cref;
int a1,a2,a3,i,j;
struct timespec start_time1, end_time1;
struct timespec start_time2, end_time2;
long int nanosecs,total_ops;
float gflopsTiled,gflopsCPU;
a1 = 35;
a2 = 28;
a3 = 47;
A.height = a1;
A.width = a2;
A.stride = (((A.width-1)/BLOCK_SIZE)+1) * BLOCK_SIZE;
A.hpad = (((A.height-1)/BLOCK_SIZE)+1) * BLOCK_SIZE;
A.elements = (float*)malloc(A.stride * A.hpad* sizeof(float));
B.height = a2;
B.width = a3;
B.stride = (((B.width-1)/BLOCK_SIZE)+1) * BLOCK_SIZE;
B.hpad = (((B.height-1)/BLOCK_SIZE)+1) * BLOCK_SIZE;
B.elements = (float*)malloc(B.stride * B.hpad * sizeof(float));
/* Bt is same as B but stored in column-major order */
Bt.height = B.height;
Bt.width = B.width;
Bt.stride = B.stride;
Bt.hpad = B.hpad;
Bt.elements = (float*)malloc(Bt.stride * Bt.hpad * sizeof(float));
C.height = a1;
C.width = a3;
C.stride = (((C.width-1)/BLOCK_SIZE)+1) * BLOCK_SIZE;
C.hpad = (((C.height-1)/BLOCK_SIZE)+1) * BLOCK_SIZE;
C.elements = (float*)malloc(C.stride * C.hpad * sizeof(float));
Cref.height = a1;
Cref.width = a3;
Cref.stride = (((Cref.width-1)/BLOCK_SIZE)+1) * BLOCK_SIZE;
Cref.hpad = (((Cref.height-1)/BLOCK_SIZE)+1) * BLOCK_SIZE;
Cref.elements = (float*)malloc(Cref.stride * Cref.hpad * sizeof(float));
for(i = 0; i < A.hpad ; i++)
for(j = 0; j < A.stride; j++) {
if (( j<A.width ) && (i<A.height)) {
A.elements[i*A.stride + j] = (i % 3);
} else {
A.elements[i*A.stride + j] = 0.0;
}
}
/* Initialize B and Bt */
for(i = 0; i < B.hpad ; i++)
for(j = 0; j < B.stride; j++) {
if (( j<B.width ) && (i<B.height)) {
B.elements[i*B.stride+j] = (j % 2);
Bt.elements[j*Bt.stride+i] = B.elements[i*B.stride+j] ;
} else {
B.elements[i*B.stride+j] = 0.0;
Bt.elements[j*Bt.stride+i] = 0.0;
}
}
/* zero C, and Cref */
for(i = 0; i < C.hpad; i++)
for(j = 0; j < C.stride; j++) {
C.elements[i*C.stride+j] = 0.0;
Cref.elements[i*Cref.stride+j] = 0.0;
}
simple_sgemm_tt(A.height,B.width,B.height,1.0,A.elements,A.stride,B.elements,B.stride,1.0,Cref.elements,Cref.stride);
tiled_sgemm_tt(A.height,B.width,B.height,1.0,A.elements,A.stride,B.elements,B.stride,1.0,C.elements,C.stride);
verify(C.elements, Cref.elements, C.height * C.stride);
return 0;
}
void simple_sgemm_tt(const int M,const int N,const int K,const float alpha, const float* A,const int LDA,
const float* B,const int LDB, const float beta,float* C, const int LDC) {
/* A,B, and C are in row-major order */
int c_row,c_col,inner;
float sum;
for (c_col = 0 ; c_col<N; c_col++ ) {
for (c_row = 0 ; c_row<M; c_row++ ) {
sum = 0.0 ;
for (inner = 0 ; inner<K; inner++ ) {
sum += A[c_row*LDA + inner] * B[inner*LDB + c_col] ;
}
C[c_row*LDC + c_col] = alpha*sum + beta*C[ c_row*LDC + c_col] ;
}
}
}
/***************************
tiled_sgemm_tt: Tiled matrix multiplication:
***************************/
void tiled_sgemm_tt(const int M, const int N, const int K, const float alpha, const float*A, const int LDA,
const float*B, const int LDB, const float beta, float*C, const int LDC){
#pragma omp target teams map(to:A[M*K],B[K*N]) map(from:C[M*N])
#pragma omp distribute collapse(2)
for (int C_row_start=0 ; C_row_start < M ; C_row_start+=BLOCK_SIZE) {
for (int C_col_start=0 ; C_col_start < N ; C_col_start+=BLOCK_SIZE) {
// We now have M/BLOCK_SIZE * N/BLOCK_SIZE teams = (M*N)/(BLOCK_SIZE*BLOCK_SIZE)
// The grid global dimensions are M,N,1
// The grid local dimensions are BLOCK_SIZE,BLOCK_SIZE,1
// -------------------------------------------------------------------
// The rest of this code forms the HSAIL kernel with the
// pairs of "paralell for collapse(2)" loops repalced with a barrier.
// The kernel initializes these values
// C_row_start = get_group_id(0) * BLOCK_SIZE
// C_col_start = get_group_id(1) * BLOCK_SIZE
// row=get_local_id(0)
// col=get_local_id(1)
// -------------------------------------------------------------------
// Each team has a local copy of these mini matrices
float As[BLOCK_SIZE][BLOCK_SIZE];
float Bs[BLOCK_SIZE][BLOCK_SIZE];
float Cs[BLOCK_SIZE][BLOCK_SIZE];
int C_row, C_col;
/* Zero Cs for this BLOCK */
// - - - - - - - - - - - - - - - - - - - -
// REPLACE NEXT THREE LINES WITH A BARRIER
#pragma omp parallel for collapse(2)
for (int row=0 ; row < BLOCK_SIZE ; row++) {
for (int col=0 ; col < BLOCK_SIZE ; col++) {
// END BARRIER
// - - - - - - - - - - - - - - - - - - - -
Cs[row][col] = 0.0;
}
}
// This kblock loop is run on the master thread of each team
for (int kblock = 0; kblock < K ; kblock += BLOCK_SIZE ) {
// Copy global memory values to local memory
// - - - - - - - - - - - - - - - - - - - -
// REPLACE NEXT THREE LINES WITH A BARRIER
#pragma omp parallel for collapse(2)
for (int row=0 ; row < BLOCK_SIZE ; row++) {
for (int col=0 ; col < BLOCK_SIZE ; col++) {
// END BARRIER
// - - - - - - - - - - - - - - - - - - - -
C_row = C_row_start + row;
C_col = C_col_start + col;
if ((C_row < M) && (kblock + col < K))
As[row][col] = A[(C_row*LDA)+ kblock + col];
else
As[row][col] = 0;
if ((kblock + row < K) && C_col < N)
Bs[row][col] = B[((kblock+row)*LDB)+ C_col];
else
Bs[row][col] = 0;
}
}
// Calculate Cs <- Sum(As X Bs) across all kblocks
// - - - - - - - - - - - - - - - - - - - -
// REPLACE NEXT THREE LINES WITH A BARRIER
#pragma omp parallel for collapse(2)
for (int row=0 ; row < BLOCK_SIZE ; row++) {
for (int col=0 ; col < BLOCK_SIZE ; col++) {
// END BARRIER
// - - - - - - - - - - - - - - - - - - - -
for (int e = 0; e < BLOCK_SIZE; ++e)
Cs[row][col] += As[row][e] * Bs[e][col];
}
}
} /* End for kblock .. */
// Scale Update actual C from Cs
// - - - - - - - - - - - - - - - - - - - -
// REPLACE NEXT THREE LINES WITH A BARRIER
#pragma omp parallel for collapse(2)
for (int row=0 ; row < BLOCK_SIZE ; row++) {
for (int col=0 ; col < BLOCK_SIZE ; col++) {
// END BARRIER
// - - - - - - - - - - - - - - - - - - - -
C_row = C_row_start + row;
C_col = C_col_start + col;
if ((C_row < M) && (C_col < N)) {
C[(C_row*LDC)+C_col] = alpha*Cs[row][col] + beta*C[(C_row*LDC)+C_col];
}
}
}
// -------------------------------------------------------------------
// This is the end of the kernel
}
}
}
|
omp-low.c | /* Lowering pass for OpenMP directives. Converts OpenMP directives
into explicit calls to the runtime library (libgomp) and data
marshalling to implement data sharing and copying clauses.
Contributed by Diego Novillo <dnovillo@redhat.com>
Copyright (C) 2005, 2006 Free Software Foundation, Inc.
This file is part of GCC.
GCC is free software; you can redistribute it and/or modify it under
the terms of the GNU General Public License as published by the Free
Software Foundation; either version 2, or (at your option) any later
version.
GCC is distributed in the hope that it will be useful, but WITHOUT ANY
WARRANTY; without even the implied warranty of MERCHANTABILITY or
FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
for more details.
You should have received a copy of the GNU General Public License
along with GCC; see the file COPYING. If not, write to the Free
Software Foundation, 51 Franklin Street, Fifth Floor, Boston, MA
02110-1301, USA. */
#include "config.h"
#include "system.h"
#include "coretypes.h"
#include "tm.h"
#include "tree.h"
#include "rtl.h"
#include "tree-gimple.h"
#include "tree-inline.h"
#include "langhooks.h"
#include "diagnostic.h"
#include "tree-flow.h"
#include "timevar.h"
#include "flags.h"
#include "function.h"
#include "expr.h"
#include "toplev.h"
#include "tree-pass.h"
#include "ggc.h"
#include "except.h"
/* Lowering of OpenMP parallel and workshare constructs proceeds in two
phases. The first phase scans the function looking for OMP statements
and then for variables that must be replaced to satisfy data sharing
clauses. The second phase expands code for the constructs, as well as
re-gimplifying things when variables have been replaced with complex
expressions.
Final code generation is done by pass_expand_omp. The flowgraph is
scanned for parallel regions which are then moved to a new
function, to be invoked by the thread library. */
/* Context structure. Used to store information about each parallel
directive in the code. */
typedef struct omp_context
{
/* This field must be at the beginning, as we do "inheritance": Some
callback functions for tree-inline.c (e.g., omp_copy_decl)
receive a copy_body_data pointer that is up-casted to an
omp_context pointer. */
copy_body_data cb;
/* The tree of contexts corresponding to the encountered constructs. */
struct omp_context *outer;
tree stmt;
/* Map variables to fields in a structure that allows communication
between sending and receiving threads. */
splay_tree field_map;
tree record_type;
tree sender_decl;
tree receiver_decl;
/* A chain of variables to add to the top-level block surrounding the
construct. In the case of a parallel, this is in the child function. */
tree block_vars;
/* What to do with variables with implicitly determined sharing
attributes. */
enum omp_clause_default_kind default_kind;
/* Nesting depth of this context. Used to beautify error messages re
invalid gotos. The outermost ctx is depth 1, with depth 0 being
reserved for the main body of the function. */
int depth;
/* True if this parallel directive is nested within another. */
bool is_nested;
} omp_context;
/* A structure describing the main elements of a parallel loop. */
struct omp_for_data
{
tree v, n1, n2, step, chunk_size, for_stmt;
enum tree_code cond_code;
tree pre;
bool have_nowait, have_ordered;
enum omp_clause_schedule_kind sched_kind;
};
static splay_tree all_contexts;
static int parallel_nesting_level;
struct omp_region *root_omp_region;
static void scan_omp (tree *, omp_context *);
static void lower_omp (tree *, omp_context *);
static tree lookup_decl_in_outer_ctx (tree, omp_context *);
static tree maybe_lookup_decl_in_outer_ctx (tree, omp_context *);
/* Find an OpenMP clause of type KIND within CLAUSES. */
static tree
find_omp_clause (tree clauses, enum tree_code kind)
{
for (; clauses ; clauses = OMP_CLAUSE_CHAIN (clauses))
if (OMP_CLAUSE_CODE (clauses) == kind)
return clauses;
return NULL_TREE;
}
/* Return true if CTX is for an omp parallel. */
static inline bool
is_parallel_ctx (omp_context *ctx)
{
return TREE_CODE (ctx->stmt) == OMP_PARALLEL;
}
/* Return true if REGION is a combined parallel+workshare region. */
static inline bool
is_combined_parallel (struct omp_region *region)
{
return region->is_combined_parallel;
}
/* Extract the header elements of parallel loop FOR_STMT and store
them into *FD. */
static void
extract_omp_for_data (tree for_stmt, struct omp_for_data *fd)
{
tree t;
fd->for_stmt = for_stmt;
fd->pre = NULL;
t = OMP_FOR_INIT (for_stmt);
gcc_assert (TREE_CODE (t) == MODIFY_EXPR);
fd->v = TREE_OPERAND (t, 0);
gcc_assert (DECL_P (fd->v));
gcc_assert (TREE_CODE (TREE_TYPE (fd->v)) == INTEGER_TYPE);
fd->n1 = TREE_OPERAND (t, 1);
t = OMP_FOR_COND (for_stmt);
fd->cond_code = TREE_CODE (t);
gcc_assert (TREE_OPERAND (t, 0) == fd->v);
fd->n2 = TREE_OPERAND (t, 1);
switch (fd->cond_code)
{
case LT_EXPR:
case GT_EXPR:
break;
case LE_EXPR:
fd->n2 = fold_build2 (PLUS_EXPR, TREE_TYPE (fd->n2), fd->n2,
build_int_cst (TREE_TYPE (fd->n2), 1));
fd->cond_code = LT_EXPR;
break;
case GE_EXPR:
fd->n2 = fold_build2 (MINUS_EXPR, TREE_TYPE (fd->n2), fd->n2,
build_int_cst (TREE_TYPE (fd->n2), 1));
fd->cond_code = GT_EXPR;
break;
default:
gcc_unreachable ();
}
t = OMP_FOR_INCR (fd->for_stmt);
gcc_assert (TREE_CODE (t) == MODIFY_EXPR);
gcc_assert (TREE_OPERAND (t, 0) == fd->v);
t = TREE_OPERAND (t, 1);
gcc_assert (TREE_OPERAND (t, 0) == fd->v);
switch (TREE_CODE (t))
{
case PLUS_EXPR:
fd->step = TREE_OPERAND (t, 1);
break;
case MINUS_EXPR:
fd->step = TREE_OPERAND (t, 1);
fd->step = fold_build1 (NEGATE_EXPR, TREE_TYPE (fd->step), fd->step);
break;
default:
gcc_unreachable ();
}
fd->have_nowait = fd->have_ordered = false;
fd->sched_kind = OMP_CLAUSE_SCHEDULE_STATIC;
fd->chunk_size = NULL_TREE;
for (t = OMP_FOR_CLAUSES (for_stmt); t ; t = OMP_CLAUSE_CHAIN (t))
switch (OMP_CLAUSE_CODE (t))
{
case OMP_CLAUSE_NOWAIT:
fd->have_nowait = true;
break;
case OMP_CLAUSE_ORDERED:
fd->have_ordered = true;
break;
case OMP_CLAUSE_SCHEDULE:
fd->sched_kind = OMP_CLAUSE_SCHEDULE_KIND (t);
fd->chunk_size = OMP_CLAUSE_SCHEDULE_CHUNK_EXPR (t);
break;
default:
break;
}
if (fd->sched_kind == OMP_CLAUSE_SCHEDULE_RUNTIME)
gcc_assert (fd->chunk_size == NULL);
else if (fd->chunk_size == NULL)
{
/* We only need to compute a default chunk size for ordered
static loops and dynamic loops. */
if (fd->sched_kind != OMP_CLAUSE_SCHEDULE_STATIC || fd->have_ordered)
fd->chunk_size = (fd->sched_kind == OMP_CLAUSE_SCHEDULE_STATIC)
? integer_zero_node : integer_one_node;
}
}
/* Given two blocks PAR_ENTRY_BB and WS_ENTRY_BB such that WS_ENTRY_BB
is the immediate dominator of PAR_ENTRY_BB, return true if there
are no data dependencies that would prevent expanding the parallel
directive at PAR_ENTRY_BB as a combined parallel+workshare region.
When expanding a combined parallel+workshare region, the call to
the child function may need additional arguments in the case of
OMP_FOR regions. In some cases, these arguments are computed out
of variables passed in from the parent to the child via 'struct
.omp_data_s'. For instance:
#pragma omp parallel for schedule (guided, i * 4)
for (j ...)
Is lowered into:
# BLOCK 2 (PAR_ENTRY_BB)
.omp_data_o.i = i;
#pragma omp parallel [child fn: bar.omp_fn.0 ( ..., D.1598)
# BLOCK 3 (WS_ENTRY_BB)
.omp_data_i = &.omp_data_o;
D.1667 = .omp_data_i->i;
D.1598 = D.1667 * 4;
#pragma omp for schedule (guided, D.1598)
When we outline the parallel region, the call to the child function
'bar.omp_fn.0' will need the value D.1598 in its argument list, but
that value is computed *after* the call site. So, in principle we
cannot do the transformation.
To see whether the code in WS_ENTRY_BB blocks the combined
parallel+workshare call, we collect all the variables used in the
OMP_FOR header check whether they appear on the LHS of any
statement in WS_ENTRY_BB. If so, then we cannot emit the combined
call.
FIXME. If we had the SSA form built at this point, we could merely
hoist the code in block 3 into block 2 and be done with it. But at
this point we don't have dataflow information and though we could
hack something up here, it is really not worth the aggravation. */
static bool
workshare_safe_to_combine_p (basic_block par_entry_bb, basic_block ws_entry_bb)
{
struct omp_for_data fd;
tree par_stmt, ws_stmt;
par_stmt = last_stmt (par_entry_bb);
ws_stmt = last_stmt (ws_entry_bb);
if (TREE_CODE (ws_stmt) == OMP_SECTIONS)
return true;
gcc_assert (TREE_CODE (ws_stmt) == OMP_FOR);
extract_omp_for_data (ws_stmt, &fd);
/* FIXME. We give up too easily here. If any of these arguments
are not constants, they will likely involve variables that have
been mapped into fields of .omp_data_s for sharing with the child
function. With appropriate data flow, it would be possible to
see through this. */
if (!is_gimple_min_invariant (fd.n1)
|| !is_gimple_min_invariant (fd.n2)
|| !is_gimple_min_invariant (fd.step)
|| (fd.chunk_size && !is_gimple_min_invariant (fd.chunk_size)))
return false;
return true;
}
/* Collect additional arguments needed to emit a combined
parallel+workshare call. WS_STMT is the workshare directive being
expanded. */
static tree
get_ws_args_for (tree ws_stmt)
{
tree t;
if (TREE_CODE (ws_stmt) == OMP_FOR)
{
struct omp_for_data fd;
tree ws_args;
extract_omp_for_data (ws_stmt, &fd);
ws_args = NULL_TREE;
if (fd.chunk_size)
{
t = fold_convert (long_integer_type_node, fd.chunk_size);
ws_args = tree_cons (NULL, t, ws_args);
}
t = fold_convert (long_integer_type_node, fd.step);
ws_args = tree_cons (NULL, t, ws_args);
t = fold_convert (long_integer_type_node, fd.n2);
ws_args = tree_cons (NULL, t, ws_args);
t = fold_convert (long_integer_type_node, fd.n1);
ws_args = tree_cons (NULL, t, ws_args);
return ws_args;
}
else if (TREE_CODE (ws_stmt) == OMP_SECTIONS)
{
basic_block bb = bb_for_stmt (ws_stmt);
t = build_int_cst (unsigned_type_node, EDGE_COUNT (bb->succs));
t = tree_cons (NULL, t, NULL);
return t;
}
gcc_unreachable ();
}
/* Discover whether REGION is a combined parallel+workshare region. */
static void
determine_parallel_type (struct omp_region *region)
{
basic_block par_entry_bb, par_exit_bb;
basic_block ws_entry_bb, ws_exit_bb;
if (region == NULL || region->inner == NULL
|| region->exit == NULL || region->inner->exit == NULL)
return;
/* We only support parallel+for and parallel+sections. */
if (region->type != OMP_PARALLEL
|| (region->inner->type != OMP_FOR
&& region->inner->type != OMP_SECTIONS))
return;
/* Check for perfect nesting PAR_ENTRY_BB -> WS_ENTRY_BB and
WS_EXIT_BB -> PAR_EXIT_BB. */
par_entry_bb = region->entry;
par_exit_bb = region->exit;
ws_entry_bb = region->inner->entry;
ws_exit_bb = region->inner->exit;
if (single_succ (par_entry_bb) == ws_entry_bb
&& single_succ (ws_exit_bb) == par_exit_bb
&& workshare_safe_to_combine_p (par_entry_bb, ws_entry_bb)
&& (OMP_PARALLEL_COMBINED (last_stmt (par_entry_bb))
|| (last_and_only_stmt (ws_entry_bb)
&& last_and_only_stmt (par_exit_bb))))
{
tree ws_stmt = last_stmt (ws_entry_bb);
if (region->inner->type == OMP_FOR)
{
/* If this is a combined parallel loop, we need to determine
whether or not to use the combined library calls. There
are two cases where we do not apply the transformation:
static loops and any kind of ordered loop. In the first
case, we already open code the loop so there is no need
to do anything else. In the latter case, the combined
parallel loop call would still need extra synchronization
to implement ordered semantics, so there would not be any
gain in using the combined call. */
tree clauses = OMP_FOR_CLAUSES (ws_stmt);
tree c = find_omp_clause (clauses, OMP_CLAUSE_SCHEDULE);
if (c == NULL
|| OMP_CLAUSE_SCHEDULE_KIND (c) == OMP_CLAUSE_SCHEDULE_STATIC
|| find_omp_clause (clauses, OMP_CLAUSE_ORDERED))
{
region->is_combined_parallel = false;
region->inner->is_combined_parallel = false;
return;
}
}
region->is_combined_parallel = true;
region->inner->is_combined_parallel = true;
region->ws_args = get_ws_args_for (ws_stmt);
}
}
/* Return true if EXPR is variable sized. */
static inline bool
is_variable_sized (tree expr)
{
return !TREE_CONSTANT (TYPE_SIZE_UNIT (TREE_TYPE (expr)));
}
/* Return true if DECL is a reference type. */
static inline bool
is_reference (tree decl)
{
return lang_hooks.decls.omp_privatize_by_reference (decl);
}
/* Lookup variables in the decl or field splay trees. The "maybe" form
allows for the variable form to not have been entered, otherwise we
assert that the variable must have been entered. */
static inline tree
lookup_decl (tree var, omp_context *ctx)
{
splay_tree_node n;
n = splay_tree_lookup (ctx->cb.decl_map, (splay_tree_key) var);
return (tree) n->value;
}
static inline tree
maybe_lookup_decl (tree var, omp_context *ctx)
{
splay_tree_node n;
n = splay_tree_lookup (ctx->cb.decl_map, (splay_tree_key) var);
return n ? (tree) n->value : NULL_TREE;
}
static inline tree
lookup_field (tree var, omp_context *ctx)
{
splay_tree_node n;
n = splay_tree_lookup (ctx->field_map, (splay_tree_key) var);
return (tree) n->value;
}
static inline tree
maybe_lookup_field (tree var, omp_context *ctx)
{
splay_tree_node n;
n = splay_tree_lookup (ctx->field_map, (splay_tree_key) var);
return n ? (tree) n->value : NULL_TREE;
}
/* Return true if DECL should be copied by pointer. SHARED_P is true
if DECL is to be shared. */
static bool
use_pointer_for_field (tree decl, bool shared_p)
{
if (AGGREGATE_TYPE_P (TREE_TYPE (decl)))
return true;
/* We can only use copy-in/copy-out semantics for shared variables
when we know the value is not accessible from an outer scope. */
if (shared_p)
{
/* ??? Trivially accessible from anywhere. But why would we even
be passing an address in this case? Should we simply assert
this to be false, or should we have a cleanup pass that removes
these from the list of mappings? */
if (TREE_STATIC (decl) || DECL_EXTERNAL (decl))
return true;
/* For variables with DECL_HAS_VALUE_EXPR_P set, we cannot tell
without analyzing the expression whether or not its location
is accessible to anyone else. In the case of nested parallel
regions it certainly may be. */
if (TREE_CODE (decl) != RESULT_DECL && DECL_HAS_VALUE_EXPR_P (decl))
return true;
/* Do not use copy-in/copy-out for variables that have their
address taken. */
if (TREE_ADDRESSABLE (decl))
return true;
}
return false;
}
/* Construct a new automatic decl similar to VAR. */
static tree
omp_copy_decl_2 (tree var, tree name, tree type, omp_context *ctx)
{
tree copy = build_decl (VAR_DECL, name, type);
TREE_ADDRESSABLE (copy) = TREE_ADDRESSABLE (var);
DECL_COMPLEX_GIMPLE_REG_P (copy) = DECL_COMPLEX_GIMPLE_REG_P (var);
DECL_ARTIFICIAL (copy) = DECL_ARTIFICIAL (var);
DECL_IGNORED_P (copy) = DECL_IGNORED_P (var);
TREE_USED (copy) = 1;
DECL_CONTEXT (copy) = current_function_decl;
DECL_SEEN_IN_BIND_EXPR_P (copy) = 1;
TREE_CHAIN (copy) = ctx->block_vars;
ctx->block_vars = copy;
return copy;
}
static tree
omp_copy_decl_1 (tree var, omp_context *ctx)
{
return omp_copy_decl_2 (var, DECL_NAME (var), TREE_TYPE (var), ctx);
}
/* Build tree nodes to access the field for VAR on the receiver side. */
static tree
build_receiver_ref (tree var, bool by_ref, omp_context *ctx)
{
tree x, field = lookup_field (var, ctx);
/* If the receiver record type was remapped in the child function,
remap the field into the new record type. */
x = maybe_lookup_field (field, ctx);
if (x != NULL)
field = x;
x = build_fold_indirect_ref (ctx->receiver_decl);
x = build3 (COMPONENT_REF, TREE_TYPE (field), x, field, NULL);
if (by_ref)
x = build_fold_indirect_ref (x);
return x;
}
/* Build tree nodes to access VAR in the scope outer to CTX. In the case
of a parallel, this is a component reference; for workshare constructs
this is some variable. */
static tree
build_outer_var_ref (tree var, omp_context *ctx)
{
tree x;
if (is_global_var (maybe_lookup_decl_in_outer_ctx (var, ctx)))
x = var;
else if (is_variable_sized (var))
{
x = TREE_OPERAND (DECL_VALUE_EXPR (var), 0);
x = build_outer_var_ref (x, ctx);
x = build_fold_indirect_ref (x);
}
else if (is_parallel_ctx (ctx))
{
bool by_ref = use_pointer_for_field (var, false);
x = build_receiver_ref (var, by_ref, ctx);
}
else if (ctx->outer)
x = lookup_decl (var, ctx->outer);
else if (is_reference (var))
/* This can happen with orphaned constructs. If var is reference, it is
possible it is shared and as such valid. */
x = var;
else
gcc_unreachable ();
if (is_reference (var))
x = build_fold_indirect_ref (x);
return x;
}
/* Build tree nodes to access the field for VAR on the sender side. */
static tree
build_sender_ref (tree var, omp_context *ctx)
{
tree field = lookup_field (var, ctx);
return build3 (COMPONENT_REF, TREE_TYPE (field),
ctx->sender_decl, field, NULL);
}
/* Add a new field for VAR inside the structure CTX->SENDER_DECL. */
static void
install_var_field (tree var, bool by_ref, omp_context *ctx)
{
tree field, type;
gcc_assert (!splay_tree_lookup (ctx->field_map, (splay_tree_key) var));
type = TREE_TYPE (var);
if (by_ref)
type = build_pointer_type (type);
field = build_decl (FIELD_DECL, DECL_NAME (var), type);
/* Remember what variable this field was created for. This does have a
side effect of making dwarf2out ignore this member, so for helpful
debugging we clear it later in delete_omp_context. */
DECL_ABSTRACT_ORIGIN (field) = var;
insert_field_into_struct (ctx->record_type, field);
splay_tree_insert (ctx->field_map, (splay_tree_key) var,
(splay_tree_value) field);
}
static tree
install_var_local (tree var, omp_context *ctx)
{
tree new_var = omp_copy_decl_1 (var, ctx);
insert_decl_map (&ctx->cb, var, new_var);
return new_var;
}
/* Adjust the replacement for DECL in CTX for the new context. This means
copying the DECL_VALUE_EXPR, and fixing up the type. */
static void
fixup_remapped_decl (tree decl, omp_context *ctx, bool private_debug)
{
tree new_decl, size;
new_decl = lookup_decl (decl, ctx);
TREE_TYPE (new_decl) = remap_type (TREE_TYPE (decl), &ctx->cb);
if ((!TREE_CONSTANT (DECL_SIZE (new_decl)) || private_debug)
&& DECL_HAS_VALUE_EXPR_P (decl))
{
tree ve = DECL_VALUE_EXPR (decl);
walk_tree (&ve, copy_body_r, &ctx->cb, NULL);
SET_DECL_VALUE_EXPR (new_decl, ve);
DECL_HAS_VALUE_EXPR_P (new_decl) = 1;
}
if (!TREE_CONSTANT (DECL_SIZE (new_decl)))
{
size = remap_decl (DECL_SIZE (decl), &ctx->cb);
if (size == error_mark_node)
size = TYPE_SIZE (TREE_TYPE (new_decl));
DECL_SIZE (new_decl) = size;
size = remap_decl (DECL_SIZE_UNIT (decl), &ctx->cb);
if (size == error_mark_node)
size = TYPE_SIZE_UNIT (TREE_TYPE (new_decl));
DECL_SIZE_UNIT (new_decl) = size;
}
}
/* The callback for remap_decl. Search all containing contexts for a
mapping of the variable; this avoids having to duplicate the splay
tree ahead of time. We know a mapping doesn't already exist in the
given context. Create new mappings to implement default semantics. */
static tree
omp_copy_decl (tree var, copy_body_data *cb)
{
omp_context *ctx = (omp_context *) cb;
tree new_var;
if (TREE_CODE (var) == LABEL_DECL)
{
new_var = create_artificial_label ();
DECL_CONTEXT (new_var) = current_function_decl;
insert_decl_map (&ctx->cb, var, new_var);
return new_var;
}
while (!is_parallel_ctx (ctx))
{
ctx = ctx->outer;
if (ctx == NULL)
return var;
new_var = maybe_lookup_decl (var, ctx);
if (new_var)
return new_var;
}
if (is_global_var (var) || decl_function_context (var) != ctx->cb.src_fn)
return var;
return error_mark_node;
}
/* Return the parallel region associated with STMT. */
/* Debugging dumps for parallel regions. */
void dump_omp_region (FILE *, struct omp_region *, int);
void debug_omp_region (struct omp_region *);
void debug_all_omp_regions (void);
/* Dump the parallel region tree rooted at REGION. */
void
dump_omp_region (FILE *file, struct omp_region *region, int indent)
{
fprintf (file, "%*sbb %d: %s\n", indent, "", region->entry->index,
tree_code_name[region->type]);
if (region->inner)
dump_omp_region (file, region->inner, indent + 4);
if (region->cont)
{
fprintf (file, "%*sbb %d: OMP_CONTINUE\n", indent, "",
region->cont->index);
}
if (region->exit)
fprintf (file, "%*sbb %d: OMP_RETURN\n", indent, "",
region->exit->index);
else
fprintf (file, "%*s[no exit marker]\n", indent, "");
if (region->next)
dump_omp_region (file, region->next, indent);
}
void
debug_omp_region (struct omp_region *region)
{
dump_omp_region (stderr, region, 0);
}
void
debug_all_omp_regions (void)
{
dump_omp_region (stderr, root_omp_region, 0);
}
/* Create a new parallel region starting at STMT inside region PARENT. */
struct omp_region *
new_omp_region (basic_block bb, enum tree_code type, struct omp_region *parent)
{
struct omp_region *region = xcalloc (1, sizeof (*region));
region->outer = parent;
region->entry = bb;
region->type = type;
if (parent)
{
/* This is a nested region. Add it to the list of inner
regions in PARENT. */
region->next = parent->inner;
parent->inner = region;
}
else
{
/* This is a toplevel region. Add it to the list of toplevel
regions in ROOT_OMP_REGION. */
region->next = root_omp_region;
root_omp_region = region;
}
return region;
}
/* Release the memory associated with the region tree rooted at REGION. */
static void
free_omp_region_1 (struct omp_region *region)
{
struct omp_region *i, *n;
for (i = region->inner; i ; i = n)
{
n = i->next;
free_omp_region_1 (i);
}
free (region);
}
/* Release the memory for the entire omp region tree. */
void
free_omp_regions (void)
{
struct omp_region *r, *n;
for (r = root_omp_region; r ; r = n)
{
n = r->next;
free_omp_region_1 (r);
}
root_omp_region = NULL;
}
/* Create a new context, with OUTER_CTX being the surrounding context. */
static omp_context *
new_omp_context (tree stmt, omp_context *outer_ctx)
{
omp_context *ctx = XCNEW (omp_context);
splay_tree_insert (all_contexts, (splay_tree_key) stmt,
(splay_tree_value) ctx);
ctx->stmt = stmt;
if (outer_ctx)
{
ctx->outer = outer_ctx;
ctx->cb = outer_ctx->cb;
ctx->cb.block = NULL;
ctx->depth = outer_ctx->depth + 1;
}
else
{
ctx->cb.src_fn = current_function_decl;
ctx->cb.dst_fn = current_function_decl;
ctx->cb.src_node = cgraph_node (current_function_decl);
ctx->cb.dst_node = ctx->cb.src_node;
ctx->cb.src_cfun = cfun;
ctx->cb.copy_decl = omp_copy_decl;
ctx->cb.eh_region = -1;
ctx->cb.transform_call_graph_edges = CB_CGE_MOVE;
ctx->depth = 1;
}
ctx->cb.decl_map = splay_tree_new (splay_tree_compare_pointers, 0, 0);
return ctx;
}
/* Destroy a omp_context data structures. Called through the splay tree
value delete callback. */
static void
delete_omp_context (splay_tree_value value)
{
omp_context *ctx = (omp_context *) value;
splay_tree_delete (ctx->cb.decl_map);
if (ctx->field_map)
splay_tree_delete (ctx->field_map);
/* We hijacked DECL_ABSTRACT_ORIGIN earlier. We need to clear it before
it produces corrupt debug information. */
if (ctx->record_type)
{
tree t;
for (t = TYPE_FIELDS (ctx->record_type); t ; t = TREE_CHAIN (t))
DECL_ABSTRACT_ORIGIN (t) = NULL;
}
XDELETE (ctx);
}
/* Fix up RECEIVER_DECL with a type that has been remapped to the child
context. */
static void
fixup_child_record_type (omp_context *ctx)
{
tree f, type = ctx->record_type;
/* ??? It isn't sufficient to just call remap_type here, because
variably_modified_type_p doesn't work the way we expect for
record types. Testing each field for whether it needs remapping
and creating a new record by hand works, however. */
for (f = TYPE_FIELDS (type); f ; f = TREE_CHAIN (f))
if (variably_modified_type_p (TREE_TYPE (f), ctx->cb.src_fn))
break;
if (f)
{
tree name, new_fields = NULL;
type = lang_hooks.types.make_type (RECORD_TYPE);
name = DECL_NAME (TYPE_NAME (ctx->record_type));
name = build_decl (TYPE_DECL, name, type);
TYPE_NAME (type) = name;
for (f = TYPE_FIELDS (ctx->record_type); f ; f = TREE_CHAIN (f))
{
tree new_f = copy_node (f);
DECL_CONTEXT (new_f) = type;
TREE_TYPE (new_f) = remap_type (TREE_TYPE (f), &ctx->cb);
TREE_CHAIN (new_f) = new_fields;
new_fields = new_f;
/* Arrange to be able to look up the receiver field
given the sender field. */
splay_tree_insert (ctx->field_map, (splay_tree_key) f,
(splay_tree_value) new_f);
}
TYPE_FIELDS (type) = nreverse (new_fields);
layout_type (type);
}
TREE_TYPE (ctx->receiver_decl) = build_pointer_type (type);
}
/* Instantiate decls as necessary in CTX to satisfy the data sharing
specified by CLAUSES. */
static void
scan_sharing_clauses (tree clauses, omp_context *ctx)
{
tree c, decl;
bool scan_array_reductions = false;
for (c = clauses; c; c = OMP_CLAUSE_CHAIN (c))
{
bool by_ref;
switch (OMP_CLAUSE_CODE (c))
{
case OMP_CLAUSE_PRIVATE:
decl = OMP_CLAUSE_DECL (c);
if (!is_variable_sized (decl))
install_var_local (decl, ctx);
break;
case OMP_CLAUSE_SHARED:
gcc_assert (is_parallel_ctx (ctx));
decl = OMP_CLAUSE_DECL (c);
gcc_assert (!is_variable_sized (decl));
by_ref = use_pointer_for_field (decl, true);
/* Global variables don't need to be copied,
the receiver side will use them directly. */
if (is_global_var (maybe_lookup_decl_in_outer_ctx (decl, ctx)))
break;
if (! TREE_READONLY (decl)
|| TREE_ADDRESSABLE (decl)
|| by_ref
|| is_reference (decl))
{
install_var_field (decl, by_ref, ctx);
install_var_local (decl, ctx);
break;
}
/* We don't need to copy const scalar vars back. */
OMP_CLAUSE_SET_CODE (c, OMP_CLAUSE_FIRSTPRIVATE);
goto do_private;
case OMP_CLAUSE_LASTPRIVATE:
/* Let the corresponding firstprivate clause create
the variable. */
if (OMP_CLAUSE_LASTPRIVATE_FIRSTPRIVATE (c))
break;
/* FALLTHRU */
case OMP_CLAUSE_FIRSTPRIVATE:
case OMP_CLAUSE_REDUCTION:
decl = OMP_CLAUSE_DECL (c);
do_private:
if (is_variable_sized (decl))
break;
else if (is_parallel_ctx (ctx)
&& ! is_global_var (maybe_lookup_decl_in_outer_ctx (decl,
ctx)))
{
by_ref = use_pointer_for_field (decl, false);
install_var_field (decl, by_ref, ctx);
}
install_var_local (decl, ctx);
break;
case OMP_CLAUSE_COPYPRIVATE:
if (ctx->outer)
scan_omp (&OMP_CLAUSE_DECL (c), ctx->outer);
/* FALLTHRU */
case OMP_CLAUSE_COPYIN:
decl = OMP_CLAUSE_DECL (c);
by_ref = use_pointer_for_field (decl, false);
install_var_field (decl, by_ref, ctx);
break;
case OMP_CLAUSE_DEFAULT:
ctx->default_kind = OMP_CLAUSE_DEFAULT_KIND (c);
break;
case OMP_CLAUSE_IF:
case OMP_CLAUSE_NUM_THREADS:
case OMP_CLAUSE_SCHEDULE:
if (ctx->outer)
scan_omp (&OMP_CLAUSE_OPERAND (c, 0), ctx->outer);
break;
case OMP_CLAUSE_NOWAIT:
case OMP_CLAUSE_ORDERED:
break;
default:
gcc_unreachable ();
}
}
for (c = clauses; c; c = OMP_CLAUSE_CHAIN (c))
{
switch (OMP_CLAUSE_CODE (c))
{
case OMP_CLAUSE_LASTPRIVATE:
/* Let the corresponding firstprivate clause create
the variable. */
if (OMP_CLAUSE_LASTPRIVATE_FIRSTPRIVATE (c))
break;
/* FALLTHRU */
case OMP_CLAUSE_PRIVATE:
case OMP_CLAUSE_FIRSTPRIVATE:
case OMP_CLAUSE_REDUCTION:
decl = OMP_CLAUSE_DECL (c);
if (is_variable_sized (decl))
install_var_local (decl, ctx);
fixup_remapped_decl (decl, ctx,
OMP_CLAUSE_CODE (c) == OMP_CLAUSE_PRIVATE
&& OMP_CLAUSE_PRIVATE_DEBUG (c));
if (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_REDUCTION
&& OMP_CLAUSE_REDUCTION_PLACEHOLDER (c))
scan_array_reductions = true;
break;
case OMP_CLAUSE_SHARED:
decl = OMP_CLAUSE_DECL (c);
if (! is_global_var (maybe_lookup_decl_in_outer_ctx (decl, ctx)))
fixup_remapped_decl (decl, ctx, false);
break;
case OMP_CLAUSE_COPYPRIVATE:
case OMP_CLAUSE_COPYIN:
case OMP_CLAUSE_DEFAULT:
case OMP_CLAUSE_IF:
case OMP_CLAUSE_NUM_THREADS:
case OMP_CLAUSE_SCHEDULE:
case OMP_CLAUSE_NOWAIT:
case OMP_CLAUSE_ORDERED:
break;
default:
gcc_unreachable ();
}
}
if (scan_array_reductions)
for (c = clauses; c; c = OMP_CLAUSE_CHAIN (c))
if (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_REDUCTION
&& OMP_CLAUSE_REDUCTION_PLACEHOLDER (c))
{
scan_omp (&OMP_CLAUSE_REDUCTION_INIT (c), ctx);
scan_omp (&OMP_CLAUSE_REDUCTION_MERGE (c), ctx);
}
}
/* Create a new name for omp child function. Returns an identifier. */
static GTY(()) unsigned int tmp_ompfn_id_num;
static tree
create_omp_child_function_name (void)
{
tree name = DECL_ASSEMBLER_NAME (current_function_decl);
size_t len = IDENTIFIER_LENGTH (name);
char *tmp_name, *prefix;
prefix = alloca (len + sizeof ("_omp_fn"));
memcpy (prefix, IDENTIFIER_POINTER (name), len);
strcpy (prefix + len, "_omp_fn");
#ifndef NO_DOT_IN_LABEL
prefix[len] = '.';
#elif !defined NO_DOLLAR_IN_LABEL
prefix[len] = '$';
#endif
ASM_FORMAT_PRIVATE_NAME (tmp_name, prefix, tmp_ompfn_id_num++);
return get_identifier (tmp_name);
}
/* Build a decl for the omp child function. It'll not contain a body
yet, just the bare decl. */
static void
create_omp_child_function (omp_context *ctx)
{
tree decl, type, name, t;
name = create_omp_child_function_name ();
type = build_function_type_list (void_type_node, ptr_type_node, NULL_TREE);
decl = build_decl (FUNCTION_DECL, name, type);
decl = lang_hooks.decls.pushdecl (decl);
ctx->cb.dst_fn = decl;
TREE_STATIC (decl) = 1;
TREE_USED (decl) = 1;
DECL_ARTIFICIAL (decl) = 1;
DECL_IGNORED_P (decl) = 0;
TREE_PUBLIC (decl) = 0;
DECL_UNINLINABLE (decl) = 1;
DECL_EXTERNAL (decl) = 0;
DECL_CONTEXT (decl) = NULL_TREE;
DECL_INITIAL (decl) = make_node (BLOCK);
t = build_decl (RESULT_DECL, NULL_TREE, void_type_node);
DECL_ARTIFICIAL (t) = 1;
DECL_IGNORED_P (t) = 1;
DECL_RESULT (decl) = t;
t = build_decl (PARM_DECL, get_identifier (".omp_data_i"), ptr_type_node);
DECL_ARTIFICIAL (t) = 1;
DECL_ARG_TYPE (t) = ptr_type_node;
DECL_CONTEXT (t) = current_function_decl;
TREE_USED (t) = 1;
DECL_ARGUMENTS (decl) = t;
ctx->receiver_decl = t;
/* Allocate memory for the function structure. The call to
allocate_struct_function clobbers CFUN, so we need to restore
it afterward. */
allocate_struct_function (decl);
DECL_SOURCE_LOCATION (decl) = EXPR_LOCATION (ctx->stmt);
cfun->function_end_locus = EXPR_LOCATION (ctx->stmt);
cfun = ctx->cb.src_cfun;
}
/* Scan an OpenMP parallel directive. */
static void
scan_omp_parallel (tree *stmt_p, omp_context *outer_ctx)
{
omp_context *ctx;
tree name;
/* Ignore parallel directives with empty bodies, unless there
are copyin clauses. */
if (optimize > 0
&& empty_body_p (OMP_PARALLEL_BODY (*stmt_p))
&& find_omp_clause (OMP_CLAUSES (*stmt_p), OMP_CLAUSE_COPYIN) == NULL)
{
*stmt_p = build_empty_stmt ();
return;
}
ctx = new_omp_context (*stmt_p, outer_ctx);
if (parallel_nesting_level > 1)
ctx->is_nested = true;
ctx->field_map = splay_tree_new (splay_tree_compare_pointers, 0, 0);
ctx->default_kind = OMP_CLAUSE_DEFAULT_SHARED;
ctx->record_type = lang_hooks.types.make_type (RECORD_TYPE);
name = create_tmp_var_name (".omp_data_s");
name = build_decl (TYPE_DECL, name, ctx->record_type);
TYPE_NAME (ctx->record_type) = name;
create_omp_child_function (ctx);
OMP_PARALLEL_FN (*stmt_p) = ctx->cb.dst_fn;
scan_sharing_clauses (OMP_PARALLEL_CLAUSES (*stmt_p), ctx);
scan_omp (&OMP_PARALLEL_BODY (*stmt_p), ctx);
if (TYPE_FIELDS (ctx->record_type) == NULL)
ctx->record_type = ctx->receiver_decl = NULL;
else
{
layout_type (ctx->record_type);
fixup_child_record_type (ctx);
}
}
/* Scan an OpenMP loop directive. */
static void
scan_omp_for (tree *stmt_p, omp_context *outer_ctx)
{
omp_context *ctx;
tree stmt;
stmt = *stmt_p;
ctx = new_omp_context (stmt, outer_ctx);
scan_sharing_clauses (OMP_FOR_CLAUSES (stmt), ctx);
scan_omp (&OMP_FOR_PRE_BODY (stmt), ctx);
scan_omp (&OMP_FOR_INIT (stmt), ctx);
scan_omp (&OMP_FOR_COND (stmt), ctx);
scan_omp (&OMP_FOR_INCR (stmt), ctx);
scan_omp (&OMP_FOR_BODY (stmt), ctx);
}
/* Scan an OpenMP sections directive. */
static void
scan_omp_sections (tree *stmt_p, omp_context *outer_ctx)
{
tree stmt;
omp_context *ctx;
stmt = *stmt_p;
ctx = new_omp_context (stmt, outer_ctx);
scan_sharing_clauses (OMP_SECTIONS_CLAUSES (stmt), ctx);
scan_omp (&OMP_SECTIONS_BODY (stmt), ctx);
}
/* Scan an OpenMP single directive. */
static void
scan_omp_single (tree *stmt_p, omp_context *outer_ctx)
{
tree stmt = *stmt_p;
omp_context *ctx;
tree name;
ctx = new_omp_context (stmt, outer_ctx);
ctx->field_map = splay_tree_new (splay_tree_compare_pointers, 0, 0);
ctx->record_type = lang_hooks.types.make_type (RECORD_TYPE);
name = create_tmp_var_name (".omp_copy_s");
name = build_decl (TYPE_DECL, name, ctx->record_type);
TYPE_NAME (ctx->record_type) = name;
scan_sharing_clauses (OMP_SINGLE_CLAUSES (stmt), ctx);
scan_omp (&OMP_SINGLE_BODY (stmt), ctx);
if (TYPE_FIELDS (ctx->record_type) == NULL)
ctx->record_type = NULL;
else
layout_type (ctx->record_type);
}
/* Check OpenMP nesting restrictions. */
static void
check_omp_nesting_restrictions (tree t, omp_context *ctx)
{
switch (TREE_CODE (t))
{
case OMP_FOR:
case OMP_SECTIONS:
case OMP_SINGLE:
for (; ctx != NULL; ctx = ctx->outer)
switch (TREE_CODE (ctx->stmt))
{
case OMP_FOR:
case OMP_SECTIONS:
case OMP_SINGLE:
case OMP_ORDERED:
case OMP_MASTER:
warning (0, "work-sharing region may not be closely nested inside "
"of work-sharing, critical, ordered or master region");
return;
case OMP_PARALLEL:
return;
default:
break;
}
break;
case OMP_MASTER:
for (; ctx != NULL; ctx = ctx->outer)
switch (TREE_CODE (ctx->stmt))
{
case OMP_FOR:
case OMP_SECTIONS:
case OMP_SINGLE:
warning (0, "master region may not be closely nested inside "
"of work-sharing region");
return;
case OMP_PARALLEL:
return;
default:
break;
}
break;
case OMP_ORDERED:
for (; ctx != NULL; ctx = ctx->outer)
switch (TREE_CODE (ctx->stmt))
{
case OMP_CRITICAL:
warning (0, "ordered region may not be closely nested inside "
"of critical region");
return;
case OMP_FOR:
if (find_omp_clause (OMP_CLAUSES (ctx->stmt),
OMP_CLAUSE_ORDERED) == NULL)
warning (0, "ordered region must be closely nested inside "
"a loop region with an ordered clause");
return;
case OMP_PARALLEL:
return;
default:
break;
}
break;
case OMP_CRITICAL:
for (; ctx != NULL; ctx = ctx->outer)
if (TREE_CODE (ctx->stmt) == OMP_CRITICAL
&& OMP_CRITICAL_NAME (t) == OMP_CRITICAL_NAME (ctx->stmt))
{
warning (0, "critical region may not be nested inside a critical "
"region with the same name");
return;
}
break;
default:
break;
}
}
/* Callback for walk_stmts used to scan for OpenMP directives at TP. */
static tree
scan_omp_1 (tree *tp, int *walk_subtrees, void *data)
{
struct walk_stmt_info *wi = data;
omp_context *ctx = wi->info;
tree t = *tp;
if (EXPR_HAS_LOCATION (t))
input_location = EXPR_LOCATION (t);
/* Check the OpenMP nesting restrictions. */
if (OMP_DIRECTIVE_P (t) && ctx != NULL)
check_omp_nesting_restrictions (t, ctx);
*walk_subtrees = 0;
switch (TREE_CODE (t))
{
case OMP_PARALLEL:
parallel_nesting_level++;
scan_omp_parallel (tp, ctx);
parallel_nesting_level--;
break;
case OMP_FOR:
scan_omp_for (tp, ctx);
break;
case OMP_SECTIONS:
scan_omp_sections (tp, ctx);
break;
case OMP_SINGLE:
scan_omp_single (tp, ctx);
break;
case OMP_SECTION:
case OMP_MASTER:
case OMP_ORDERED:
case OMP_CRITICAL:
ctx = new_omp_context (*tp, ctx);
scan_omp (&OMP_BODY (*tp), ctx);
break;
case BIND_EXPR:
{
tree var;
*walk_subtrees = 1;
for (var = BIND_EXPR_VARS (t); var ; var = TREE_CHAIN (var))
insert_decl_map (&ctx->cb, var, var);
}
break;
case VAR_DECL:
case PARM_DECL:
case LABEL_DECL:
case RESULT_DECL:
if (ctx)
*tp = remap_decl (t, &ctx->cb);
break;
default:
if (ctx && TYPE_P (t))
*tp = remap_type (t, &ctx->cb);
else if (!DECL_P (t))
*walk_subtrees = 1;
break;
}
return NULL_TREE;
}
/* Scan all the statements starting at STMT_P. CTX contains context
information about the OpenMP directives and clauses found during
the scan. */
static void
scan_omp (tree *stmt_p, omp_context *ctx)
{
location_t saved_location;
struct walk_stmt_info wi;
memset (&wi, 0, sizeof (wi));
wi.callback = scan_omp_1;
wi.info = ctx;
wi.want_bind_expr = (ctx != NULL);
wi.want_locations = true;
saved_location = input_location;
walk_stmts (&wi, stmt_p);
input_location = saved_location;
}
/* Re-gimplification and code generation routines. */
/* Build a call to GOMP_barrier. */
static void
build_omp_barrier (tree *stmt_list)
{
tree t;
t = built_in_decls[BUILT_IN_GOMP_BARRIER];
t = build_function_call_expr (t, NULL);
gimplify_and_add (t, stmt_list);
}
/* If a context was created for STMT when it was scanned, return it. */
static omp_context *
maybe_lookup_ctx (tree stmt)
{
splay_tree_node n;
n = splay_tree_lookup (all_contexts, (splay_tree_key) stmt);
return n ? (omp_context *) n->value : NULL;
}
/* Find the mapping for DECL in CTX or the immediately enclosing
context that has a mapping for DECL.
If CTX is a nested parallel directive, we may have to use the decl
mappings created in CTX's parent context. Suppose that we have the
following parallel nesting (variable UIDs showed for clarity):
iD.1562 = 0;
#omp parallel shared(iD.1562) -> outer parallel
iD.1562 = iD.1562 + 1;
#omp parallel shared (iD.1562) -> inner parallel
iD.1562 = iD.1562 - 1;
Each parallel structure will create a distinct .omp_data_s structure
for copying iD.1562 in/out of the directive:
outer parallel .omp_data_s.1.i -> iD.1562
inner parallel .omp_data_s.2.i -> iD.1562
A shared variable mapping will produce a copy-out operation before
the parallel directive and a copy-in operation after it. So, in
this case we would have:
iD.1562 = 0;
.omp_data_o.1.i = iD.1562;
#omp parallel shared(iD.1562) -> outer parallel
.omp_data_i.1 = &.omp_data_o.1
.omp_data_i.1->i = .omp_data_i.1->i + 1;
.omp_data_o.2.i = iD.1562; -> **
#omp parallel shared(iD.1562) -> inner parallel
.omp_data_i.2 = &.omp_data_o.2
.omp_data_i.2->i = .omp_data_i.2->i - 1;
** This is a problem. The symbol iD.1562 cannot be referenced
inside the body of the outer parallel region. But since we are
emitting this copy operation while expanding the inner parallel
directive, we need to access the CTX structure of the outer
parallel directive to get the correct mapping:
.omp_data_o.2.i = .omp_data_i.1->i
Since there may be other workshare or parallel directives enclosing
the parallel directive, it may be necessary to walk up the context
parent chain. This is not a problem in general because nested
parallelism happens only rarely. */
static tree
lookup_decl_in_outer_ctx (tree decl, omp_context *ctx)
{
tree t;
omp_context *up;
gcc_assert (ctx->is_nested);
for (up = ctx->outer, t = NULL; up && t == NULL; up = up->outer)
t = maybe_lookup_decl (decl, up);
gcc_assert (t || is_global_var (decl));
return t ? t : decl;
}
/* Similar to lookup_decl_in_outer_ctx, but return DECL if not found
in outer contexts. */
static tree
maybe_lookup_decl_in_outer_ctx (tree decl, omp_context *ctx)
{
tree t = NULL;
omp_context *up;
if (ctx->is_nested)
for (up = ctx->outer, t = NULL; up && t == NULL; up = up->outer)
t = maybe_lookup_decl (decl, up);
return t ? t : decl;
}
/* Construct the initialization value for reduction CLAUSE. */
tree
omp_reduction_init (tree clause, tree type)
{
switch (OMP_CLAUSE_REDUCTION_CODE (clause))
{
case PLUS_EXPR:
case MINUS_EXPR:
case BIT_IOR_EXPR:
case BIT_XOR_EXPR:
case TRUTH_OR_EXPR:
case TRUTH_ORIF_EXPR:
case TRUTH_XOR_EXPR:
case NE_EXPR:
return fold_convert (type, integer_zero_node);
case MULT_EXPR:
case TRUTH_AND_EXPR:
case TRUTH_ANDIF_EXPR:
case EQ_EXPR:
return fold_convert (type, integer_one_node);
case BIT_AND_EXPR:
return fold_convert (type, integer_minus_one_node);
case MAX_EXPR:
if (SCALAR_FLOAT_TYPE_P (type))
{
REAL_VALUE_TYPE max, min;
if (HONOR_INFINITIES (TYPE_MODE (type)))
{
real_inf (&max);
real_arithmetic (&min, NEGATE_EXPR, &max, NULL);
}
else
real_maxval (&min, 1, TYPE_MODE (type));
return build_real (type, min);
}
else
{
gcc_assert (INTEGRAL_TYPE_P (type));
return TYPE_MIN_VALUE (type);
}
case MIN_EXPR:
if (SCALAR_FLOAT_TYPE_P (type))
{
REAL_VALUE_TYPE max;
if (HONOR_INFINITIES (TYPE_MODE (type)))
real_inf (&max);
else
real_maxval (&max, 0, TYPE_MODE (type));
return build_real (type, max);
}
else
{
gcc_assert (INTEGRAL_TYPE_P (type));
return TYPE_MAX_VALUE (type);
}
default:
gcc_unreachable ();
}
}
/* Generate code to implement the input clauses, FIRSTPRIVATE and COPYIN,
from the receiver (aka child) side and initializers for REFERENCE_TYPE
private variables. Initialization statements go in ILIST, while calls
to destructors go in DLIST. */
static void
lower_rec_input_clauses (tree clauses, tree *ilist, tree *dlist,
omp_context *ctx)
{
tree_stmt_iterator diter;
tree c, dtor, copyin_seq, x, args, ptr;
bool copyin_by_ref = false;
bool lastprivate_firstprivate = false;
int pass;
*dlist = alloc_stmt_list ();
diter = tsi_start (*dlist);
copyin_seq = NULL;
/* Do all the fixed sized types in the first pass, and the variable sized
types in the second pass. This makes sure that the scalar arguments to
the variable sized types are processed before we use them in the
variable sized operations. */
for (pass = 0; pass < 2; ++pass)
{
for (c = clauses; c ; c = OMP_CLAUSE_CHAIN (c))
{
enum omp_clause_code c_kind = OMP_CLAUSE_CODE (c);
tree var, new_var;
bool by_ref;
switch (c_kind)
{
case OMP_CLAUSE_PRIVATE:
if (OMP_CLAUSE_PRIVATE_DEBUG (c))
continue;
break;
case OMP_CLAUSE_SHARED:
if (maybe_lookup_decl (OMP_CLAUSE_DECL (c), ctx) == NULL)
{
gcc_assert (is_global_var (OMP_CLAUSE_DECL (c)));
continue;
}
case OMP_CLAUSE_FIRSTPRIVATE:
case OMP_CLAUSE_COPYIN:
case OMP_CLAUSE_REDUCTION:
break;
case OMP_CLAUSE_LASTPRIVATE:
if (OMP_CLAUSE_LASTPRIVATE_FIRSTPRIVATE (c))
{
lastprivate_firstprivate = true;
if (pass != 0)
continue;
}
break;
default:
continue;
}
new_var = var = OMP_CLAUSE_DECL (c);
if (c_kind != OMP_CLAUSE_COPYIN)
new_var = lookup_decl (var, ctx);
if (c_kind == OMP_CLAUSE_SHARED || c_kind == OMP_CLAUSE_COPYIN)
{
if (pass != 0)
continue;
}
else if (is_variable_sized (var))
{
/* For variable sized types, we need to allocate the
actual storage here. Call alloca and store the
result in the pointer decl that we created elsewhere. */
if (pass == 0)
continue;
ptr = DECL_VALUE_EXPR (new_var);
gcc_assert (TREE_CODE (ptr) == INDIRECT_REF);
ptr = TREE_OPERAND (ptr, 0);
gcc_assert (DECL_P (ptr));
x = TYPE_SIZE_UNIT (TREE_TYPE (new_var));
args = tree_cons (NULL, x, NULL);
x = built_in_decls[BUILT_IN_ALLOCA];
x = build_function_call_expr (x, args);
x = fold_convert (TREE_TYPE (ptr), x);
x = build2 (MODIFY_EXPR, void_type_node, ptr, x);
gimplify_and_add (x, ilist);
}
else if (is_reference (var))
{
/* For references that are being privatized for Fortran,
allocate new backing storage for the new pointer
variable. This allows us to avoid changing all the
code that expects a pointer to something that expects
a direct variable. Note that this doesn't apply to
C++, since reference types are disallowed in data
sharing clauses there, except for NRV optimized
return values. */
if (pass == 0)
continue;
x = TYPE_SIZE_UNIT (TREE_TYPE (TREE_TYPE (new_var)));
if (TREE_CONSTANT (x))
{
const char *name = NULL;
if (DECL_NAME (var))
name = IDENTIFIER_POINTER (DECL_NAME (new_var));
x = create_tmp_var_raw (TREE_TYPE (TREE_TYPE (new_var)),
name);
gimple_add_tmp_var (x);
x = build_fold_addr_expr_with_type (x, TREE_TYPE (new_var));
}
else
{
args = tree_cons (NULL, x, NULL);
x = built_in_decls[BUILT_IN_ALLOCA];
x = build_function_call_expr (x, args);
x = fold_convert (TREE_TYPE (new_var), x);
}
x = build2 (MODIFY_EXPR, void_type_node, new_var, x);
gimplify_and_add (x, ilist);
new_var = build_fold_indirect_ref (new_var);
}
else if (c_kind == OMP_CLAUSE_REDUCTION
&& OMP_CLAUSE_REDUCTION_PLACEHOLDER (c))
{
if (pass == 0)
continue;
}
else if (pass != 0)
continue;
switch (OMP_CLAUSE_CODE (c))
{
case OMP_CLAUSE_SHARED:
/* Shared global vars are just accessed directly. */
if (is_global_var (new_var))
break;
/* Set up the DECL_VALUE_EXPR for shared variables now. This
needs to be delayed until after fixup_child_record_type so
that we get the correct type during the dereference. */
by_ref = use_pointer_for_field (var, true);
x = build_receiver_ref (var, by_ref, ctx);
SET_DECL_VALUE_EXPR (new_var, x);
DECL_HAS_VALUE_EXPR_P (new_var) = 1;
/* ??? If VAR is not passed by reference, and the variable
hasn't been initialized yet, then we'll get a warning for
the store into the omp_data_s structure. Ideally, we'd be
able to notice this and not store anything at all, but
we're generating code too early. Suppress the warning. */
if (!by_ref)
TREE_NO_WARNING (var) = 1;
break;
case OMP_CLAUSE_LASTPRIVATE:
if (OMP_CLAUSE_LASTPRIVATE_FIRSTPRIVATE (c))
break;
/* FALLTHRU */
case OMP_CLAUSE_PRIVATE:
x = lang_hooks.decls.omp_clause_default_ctor (c, new_var);
if (x)
gimplify_and_add (x, ilist);
/* FALLTHRU */
do_dtor:
x = lang_hooks.decls.omp_clause_dtor (c, new_var);
if (x)
{
dtor = x;
gimplify_stmt (&dtor);
tsi_link_before (&diter, dtor, TSI_SAME_STMT);
}
break;
case OMP_CLAUSE_FIRSTPRIVATE:
x = build_outer_var_ref (var, ctx);
x = lang_hooks.decls.omp_clause_copy_ctor (c, new_var, x);
gimplify_and_add (x, ilist);
goto do_dtor;
break;
case OMP_CLAUSE_COPYIN:
by_ref = use_pointer_for_field (var, false);
x = build_receiver_ref (var, by_ref, ctx);
x = lang_hooks.decls.omp_clause_assign_op (c, new_var, x);
append_to_statement_list (x, ©in_seq);
copyin_by_ref |= by_ref;
break;
case OMP_CLAUSE_REDUCTION:
if (OMP_CLAUSE_REDUCTION_PLACEHOLDER (c))
{
gimplify_and_add (OMP_CLAUSE_REDUCTION_INIT (c), ilist);
OMP_CLAUSE_REDUCTION_INIT (c) = NULL;
}
else
{
x = omp_reduction_init (c, TREE_TYPE (new_var));
gcc_assert (TREE_CODE (TREE_TYPE (new_var)) != ARRAY_TYPE);
x = build2 (MODIFY_EXPR, void_type_node, new_var, x);
gimplify_and_add (x, ilist);
}
break;
default:
gcc_unreachable ();
}
}
}
/* The copyin sequence is not to be executed by the main thread, since
that would result in self-copies. Perhaps not visible to scalars,
but it certainly is to C++ operator=. */
if (copyin_seq)
{
x = built_in_decls[BUILT_IN_OMP_GET_THREAD_NUM];
x = build_function_call_expr (x, NULL);
x = build2 (NE_EXPR, boolean_type_node, x,
build_int_cst (TREE_TYPE (x), 0));
x = build3 (COND_EXPR, void_type_node, x, copyin_seq, NULL);
gimplify_and_add (x, ilist);
}
/* If any copyin variable is passed by reference, we must ensure the
master thread doesn't modify it before it is copied over in all
threads. Similarly for variables in both firstprivate and
lastprivate clauses we need to ensure the lastprivate copying
happens after firstprivate copying in all threads. */
if (copyin_by_ref || lastprivate_firstprivate)
build_omp_barrier (ilist);
}
/* Generate code to implement the LASTPRIVATE clauses. This is used for
both parallel and workshare constructs. PREDICATE may be NULL if it's
always true. */
static void
lower_lastprivate_clauses (tree clauses, tree predicate, tree *stmt_list,
omp_context *ctx)
{
tree sub_list, x, c;
/* Early exit if there are no lastprivate clauses. */
clauses = find_omp_clause (clauses, OMP_CLAUSE_LASTPRIVATE);
if (clauses == NULL)
{
/* If this was a workshare clause, see if it had been combined
with its parallel. In that case, look for the clauses on the
parallel statement itself. */
if (is_parallel_ctx (ctx))
return;
ctx = ctx->outer;
if (ctx == NULL || !is_parallel_ctx (ctx))
return;
clauses = find_omp_clause (OMP_PARALLEL_CLAUSES (ctx->stmt),
OMP_CLAUSE_LASTPRIVATE);
if (clauses == NULL)
return;
}
sub_list = alloc_stmt_list ();
for (c = clauses; c ; c = OMP_CLAUSE_CHAIN (c))
{
tree var, new_var;
if (OMP_CLAUSE_CODE (c) != OMP_CLAUSE_LASTPRIVATE)
continue;
var = OMP_CLAUSE_DECL (c);
new_var = lookup_decl (var, ctx);
x = build_outer_var_ref (var, ctx);
if (is_reference (var))
new_var = build_fold_indirect_ref (new_var);
x = lang_hooks.decls.omp_clause_assign_op (c, x, new_var);
append_to_statement_list (x, &sub_list);
}
if (predicate)
x = build3 (COND_EXPR, void_type_node, predicate, sub_list, NULL);
else
x = sub_list;
gimplify_and_add (x, stmt_list);
}
/* Generate code to implement the REDUCTION clauses. */
static void
lower_reduction_clauses (tree clauses, tree *stmt_list, omp_context *ctx)
{
tree sub_list = NULL, x, c;
int count = 0;
/* First see if there is exactly one reduction clause. Use OMP_ATOMIC
update in that case, otherwise use a lock. */
for (c = clauses; c && count < 2; c = OMP_CLAUSE_CHAIN (c))
if (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_REDUCTION)
{
if (OMP_CLAUSE_REDUCTION_PLACEHOLDER (c))
{
/* Never use OMP_ATOMIC for array reductions. */
count = -1;
break;
}
count++;
}
if (count == 0)
return;
for (c = clauses; c ; c = OMP_CLAUSE_CHAIN (c))
{
tree var, ref, new_var;
enum tree_code code;
if (OMP_CLAUSE_CODE (c) != OMP_CLAUSE_REDUCTION)
continue;
var = OMP_CLAUSE_DECL (c);
new_var = lookup_decl (var, ctx);
if (is_reference (var))
new_var = build_fold_indirect_ref (new_var);
ref = build_outer_var_ref (var, ctx);
code = OMP_CLAUSE_REDUCTION_CODE (c);
/* reduction(-:var) sums up the partial results, so it acts
identically to reduction(+:var). */
if (code == MINUS_EXPR)
code = PLUS_EXPR;
if (count == 1)
{
tree addr = build_fold_addr_expr (ref);
addr = save_expr (addr);
ref = build1 (INDIRECT_REF, TREE_TYPE (TREE_TYPE (addr)), addr);
x = fold_build2 (code, TREE_TYPE (ref), ref, new_var);
x = build2 (OMP_ATOMIC, void_type_node, addr, x);
gimplify_and_add (x, stmt_list);
return;
}
if (OMP_CLAUSE_REDUCTION_PLACEHOLDER (c))
{
tree placeholder = OMP_CLAUSE_REDUCTION_PLACEHOLDER (c);
if (is_reference (var))
ref = build_fold_addr_expr (ref);
SET_DECL_VALUE_EXPR (placeholder, ref);
DECL_HAS_VALUE_EXPR_P (placeholder) = 1;
gimplify_and_add (OMP_CLAUSE_REDUCTION_MERGE (c), &sub_list);
OMP_CLAUSE_REDUCTION_MERGE (c) = NULL;
OMP_CLAUSE_REDUCTION_PLACEHOLDER (c) = NULL;
}
else
{
x = build2 (code, TREE_TYPE (ref), ref, new_var);
ref = build_outer_var_ref (var, ctx);
x = build2 (MODIFY_EXPR, void_type_node, ref, x);
append_to_statement_list (x, &sub_list);
}
}
x = built_in_decls[BUILT_IN_GOMP_ATOMIC_START];
x = build_function_call_expr (x, NULL);
gimplify_and_add (x, stmt_list);
gimplify_and_add (sub_list, stmt_list);
x = built_in_decls[BUILT_IN_GOMP_ATOMIC_END];
x = build_function_call_expr (x, NULL);
gimplify_and_add (x, stmt_list);
}
/* Generate code to implement the COPYPRIVATE clauses. */
static void
lower_copyprivate_clauses (tree clauses, tree *slist, tree *rlist,
omp_context *ctx)
{
tree c;
for (c = clauses; c ; c = OMP_CLAUSE_CHAIN (c))
{
tree var, ref, x;
bool by_ref;
if (OMP_CLAUSE_CODE (c) != OMP_CLAUSE_COPYPRIVATE)
continue;
var = OMP_CLAUSE_DECL (c);
by_ref = use_pointer_for_field (var, false);
ref = build_sender_ref (var, ctx);
x = (ctx->is_nested) ? lookup_decl_in_outer_ctx (var, ctx) : var;
x = by_ref ? build_fold_addr_expr (x) : x;
x = build2 (MODIFY_EXPR, void_type_node, ref, x);
gimplify_and_add (x, slist);
ref = build_receiver_ref (var, by_ref, ctx);
if (is_reference (var))
{
ref = build_fold_indirect_ref (ref);
var = build_fold_indirect_ref (var);
}
x = lang_hooks.decls.omp_clause_assign_op (c, var, ref);
gimplify_and_add (x, rlist);
}
}
/* Generate code to implement the clauses, FIRSTPRIVATE, COPYIN, LASTPRIVATE,
and REDUCTION from the sender (aka parent) side. */
static void
lower_send_clauses (tree clauses, tree *ilist, tree *olist, omp_context *ctx)
{
tree c;
for (c = clauses; c ; c = OMP_CLAUSE_CHAIN (c))
{
tree val, ref, x, var;
bool by_ref, do_in = false, do_out = false;
switch (OMP_CLAUSE_CODE (c))
{
case OMP_CLAUSE_FIRSTPRIVATE:
case OMP_CLAUSE_COPYIN:
case OMP_CLAUSE_LASTPRIVATE:
case OMP_CLAUSE_REDUCTION:
break;
default:
continue;
}
var = val = OMP_CLAUSE_DECL (c);
if (ctx->is_nested)
var = lookup_decl_in_outer_ctx (val, ctx);
if (OMP_CLAUSE_CODE (c) != OMP_CLAUSE_COPYIN
&& is_global_var (var))
continue;
if (is_variable_sized (val))
continue;
by_ref = use_pointer_for_field (val, false);
switch (OMP_CLAUSE_CODE (c))
{
case OMP_CLAUSE_FIRSTPRIVATE:
case OMP_CLAUSE_COPYIN:
do_in = true;
break;
case OMP_CLAUSE_LASTPRIVATE:
if (by_ref || is_reference (val))
{
if (OMP_CLAUSE_LASTPRIVATE_FIRSTPRIVATE (c))
continue;
do_in = true;
}
else
do_out = true;
break;
case OMP_CLAUSE_REDUCTION:
do_in = true;
do_out = !(by_ref || is_reference (val));
break;
default:
gcc_unreachable ();
}
if (do_in)
{
ref = build_sender_ref (val, ctx);
x = by_ref ? build_fold_addr_expr (var) : var;
x = build2 (MODIFY_EXPR, void_type_node, ref, x);
gimplify_and_add (x, ilist);
}
if (do_out)
{
ref = build_sender_ref (val, ctx);
x = build2 (MODIFY_EXPR, void_type_node, var, ref);
gimplify_and_add (x, olist);
}
}
}
/* Generate code to implement SHARED from the sender (aka parent) side.
This is trickier, since OMP_PARALLEL_CLAUSES doesn't list things that
got automatically shared. */
static void
lower_send_shared_vars (tree *ilist, tree *olist, omp_context *ctx)
{
tree var, ovar, nvar, f, x;
if (ctx->record_type == NULL)
return;
for (f = TYPE_FIELDS (ctx->record_type); f ; f = TREE_CHAIN (f))
{
ovar = DECL_ABSTRACT_ORIGIN (f);
nvar = maybe_lookup_decl (ovar, ctx);
if (!nvar || !DECL_HAS_VALUE_EXPR_P (nvar))
continue;
var = ovar;
/* If CTX is a nested parallel directive. Find the immediately
enclosing parallel or workshare construct that contains a
mapping for OVAR. */
if (ctx->is_nested)
var = lookup_decl_in_outer_ctx (ovar, ctx);
if (use_pointer_for_field (ovar, true))
{
x = build_sender_ref (ovar, ctx);
var = build_fold_addr_expr (var);
x = build2 (MODIFY_EXPR, void_type_node, x, var);
gimplify_and_add (x, ilist);
}
else
{
x = build_sender_ref (ovar, ctx);
x = build2 (MODIFY_EXPR, void_type_node, x, var);
gimplify_and_add (x, ilist);
x = build_sender_ref (ovar, ctx);
x = build2 (MODIFY_EXPR, void_type_node, var, x);
gimplify_and_add (x, olist);
}
}
}
/* Build the function calls to GOMP_parallel_start etc to actually
generate the parallel operation. REGION is the parallel region
being expanded. BB is the block where to insert the code. WS_ARGS
will be set if this is a call to a combined parallel+workshare
construct, it contains the list of additional arguments needed by
the workshare construct. */
static void
expand_parallel_call (struct omp_region *region, basic_block bb,
tree entry_stmt, tree ws_args)
{
tree t, args, val, cond, c, list, clauses;
block_stmt_iterator si;
int start_ix;
clauses = OMP_PARALLEL_CLAUSES (entry_stmt);
push_gimplify_context ();
/* Determine what flavor of GOMP_parallel_start we will be
emitting. */
start_ix = BUILT_IN_GOMP_PARALLEL_START;
if (is_combined_parallel (region))
{
switch (region->inner->type)
{
case OMP_FOR:
start_ix = BUILT_IN_GOMP_PARALLEL_LOOP_STATIC_START
+ region->inner->sched_kind;
break;
case OMP_SECTIONS:
start_ix = BUILT_IN_GOMP_PARALLEL_SECTIONS_START;
break;
default:
gcc_unreachable ();
}
}
/* By default, the value of NUM_THREADS is zero (selected at run time)
and there is no conditional. */
cond = NULL_TREE;
val = build_int_cst (unsigned_type_node, 0);
c = find_omp_clause (clauses, OMP_CLAUSE_IF);
if (c)
cond = OMP_CLAUSE_IF_EXPR (c);
c = find_omp_clause (clauses, OMP_CLAUSE_NUM_THREADS);
if (c)
val = OMP_CLAUSE_NUM_THREADS_EXPR (c);
/* Ensure 'val' is of the correct type. */
val = fold_convert (unsigned_type_node, val);
/* If we found the clause 'if (cond)', build either
(cond != 0) or (cond ? val : 1u). */
if (cond)
{
block_stmt_iterator si;
cond = gimple_boolify (cond);
if (integer_zerop (val))
val = build2 (EQ_EXPR, unsigned_type_node, cond,
build_int_cst (TREE_TYPE (cond), 0));
else
{
basic_block cond_bb, then_bb, else_bb;
edge e;
tree t, then_lab, else_lab, tmp;
tmp = create_tmp_var (TREE_TYPE (val), NULL);
e = split_block (bb, NULL);
cond_bb = e->src;
bb = e->dest;
remove_edge (e);
then_bb = create_empty_bb (cond_bb);
else_bb = create_empty_bb (then_bb);
then_lab = create_artificial_label ();
else_lab = create_artificial_label ();
t = build3 (COND_EXPR, void_type_node,
cond,
build_and_jump (&then_lab),
build_and_jump (&else_lab));
si = bsi_start (cond_bb);
bsi_insert_after (&si, t, BSI_CONTINUE_LINKING);
si = bsi_start (then_bb);
t = build1 (LABEL_EXPR, void_type_node, then_lab);
bsi_insert_after (&si, t, BSI_CONTINUE_LINKING);
t = build2 (MODIFY_EXPR, void_type_node, tmp, val);
bsi_insert_after (&si, t, BSI_CONTINUE_LINKING);
si = bsi_start (else_bb);
t = build1 (LABEL_EXPR, void_type_node, else_lab);
bsi_insert_after (&si, t, BSI_CONTINUE_LINKING);
t = build2 (MODIFY_EXPR, void_type_node, tmp,
build_int_cst (unsigned_type_node, 1));
bsi_insert_after (&si, t, BSI_CONTINUE_LINKING);
make_edge (cond_bb, then_bb, EDGE_TRUE_VALUE);
make_edge (cond_bb, else_bb, EDGE_FALSE_VALUE);
make_edge (then_bb, bb, EDGE_FALLTHRU);
make_edge (else_bb, bb, EDGE_FALLTHRU);
val = tmp;
}
list = NULL_TREE;
val = get_formal_tmp_var (val, &list);
si = bsi_start (bb);
bsi_insert_after (&si, list, BSI_CONTINUE_LINKING);
}
list = NULL_TREE;
args = tree_cons (NULL, val, NULL);
t = OMP_PARALLEL_DATA_ARG (entry_stmt);
if (t == NULL)
t = null_pointer_node;
else
t = build_fold_addr_expr (t);
args = tree_cons (NULL, t, args);
t = build_fold_addr_expr (OMP_PARALLEL_FN (entry_stmt));
args = tree_cons (NULL, t, args);
if (ws_args)
args = chainon (args, ws_args);
t = built_in_decls[start_ix];
t = build_function_call_expr (t, args);
gimplify_and_add (t, &list);
t = OMP_PARALLEL_DATA_ARG (entry_stmt);
if (t == NULL)
t = null_pointer_node;
else
t = build_fold_addr_expr (t);
args = tree_cons (NULL, t, NULL);
t = build_function_call_expr (OMP_PARALLEL_FN (entry_stmt), args);
gimplify_and_add (t, &list);
t = built_in_decls[BUILT_IN_GOMP_PARALLEL_END];
t = build_function_call_expr (t, NULL);
gimplify_and_add (t, &list);
si = bsi_last (bb);
bsi_insert_after (&si, list, BSI_CONTINUE_LINKING);
pop_gimplify_context (NULL_TREE);
}
/* If exceptions are enabled, wrap *STMT_P in a MUST_NOT_THROW catch
handler. This prevents programs from violating the structured
block semantics with throws. */
static void
maybe_catch_exception (tree *stmt_p)
{
tree f, t;
if (!flag_exceptions)
return;
if (lang_protect_cleanup_actions)
t = lang_protect_cleanup_actions ();
else
{
t = built_in_decls[BUILT_IN_TRAP];
t = build_function_call_expr (t, NULL);
}
f = build2 (EH_FILTER_EXPR, void_type_node, NULL, NULL);
EH_FILTER_MUST_NOT_THROW (f) = 1;
gimplify_and_add (t, &EH_FILTER_FAILURE (f));
t = build2 (TRY_CATCH_EXPR, void_type_node, *stmt_p, NULL);
append_to_statement_list (f, &TREE_OPERAND (t, 1));
*stmt_p = NULL;
append_to_statement_list (t, stmt_p);
}
/* Chain all the DECLs in LIST by their TREE_CHAIN fields. */
static tree
list2chain (tree list)
{
tree t;
for (t = list; t; t = TREE_CHAIN (t))
{
tree var = TREE_VALUE (t);
if (TREE_CHAIN (t))
TREE_CHAIN (var) = TREE_VALUE (TREE_CHAIN (t));
else
TREE_CHAIN (var) = NULL_TREE;
}
return list ? TREE_VALUE (list) : NULL_TREE;
}
/* Remove barriers in REGION->EXIT's block. Note that this is only
valid for OMP_PARALLEL regions. Since the end of a parallel region
is an implicit barrier, any workshare inside the OMP_PARALLEL that
left a barrier at the end of the OMP_PARALLEL region can now be
removed. */
static void
remove_exit_barrier (struct omp_region *region)
{
block_stmt_iterator si;
basic_block exit_bb;
edge_iterator ei;
edge e;
tree t;
exit_bb = region->exit;
/* If the parallel region doesn't return, we don't have REGION->EXIT
block at all. */
if (! exit_bb)
return;
/* The last insn in the block will be the parallel's OMP_RETURN. The
workshare's OMP_RETURN will be in a preceding block. The kinds of
statements that can appear in between are extremely limited -- no
memory operations at all. Here, we allow nothing at all, so the
only thing we allow to precede this OMP_RETURN is a label. */
si = bsi_last (exit_bb);
gcc_assert (TREE_CODE (bsi_stmt (si)) == OMP_RETURN);
bsi_prev (&si);
if (!bsi_end_p (si) && TREE_CODE (bsi_stmt (si)) != LABEL_EXPR)
return;
FOR_EACH_EDGE (e, ei, exit_bb->preds)
{
si = bsi_last (e->src);
if (bsi_end_p (si))
continue;
t = bsi_stmt (si);
if (TREE_CODE (t) == OMP_RETURN)
OMP_RETURN_NOWAIT (t) = 1;
}
}
static void
remove_exit_barriers (struct omp_region *region)
{
if (region->type == OMP_PARALLEL)
remove_exit_barrier (region);
if (region->inner)
{
region = region->inner;
remove_exit_barriers (region);
while (region->next)
{
region = region->next;
remove_exit_barriers (region);
}
}
}
/* Expand the OpenMP parallel directive starting at REGION. */
static void
expand_omp_parallel (struct omp_region *region)
{
basic_block entry_bb, exit_bb, new_bb;
struct function *child_cfun, *saved_cfun;
tree child_fn, block, t, ws_args;
block_stmt_iterator si;
tree entry_stmt;
edge e;
bool do_cleanup_cfg = false;
entry_stmt = last_stmt (region->entry);
child_fn = OMP_PARALLEL_FN (entry_stmt);
child_cfun = DECL_STRUCT_FUNCTION (child_fn);
saved_cfun = cfun;
entry_bb = region->entry;
exit_bb = region->exit;
if (is_combined_parallel (region))
ws_args = region->ws_args;
else
ws_args = NULL_TREE;
if (child_cfun->cfg)
{
/* Due to inlining, it may happen that we have already outlined
the region, in which case all we need to do is make the
sub-graph unreachable and emit the parallel call. */
edge entry_succ_e, exit_succ_e;
block_stmt_iterator si;
entry_succ_e = single_succ_edge (entry_bb);
si = bsi_last (entry_bb);
gcc_assert (TREE_CODE (bsi_stmt (si)) == OMP_PARALLEL);
bsi_remove (&si, true);
new_bb = entry_bb;
remove_edge (entry_succ_e);
if (exit_bb)
{
exit_succ_e = single_succ_edge (exit_bb);
make_edge (new_bb, exit_succ_e->dest, EDGE_FALLTHRU);
}
do_cleanup_cfg = true;
}
else
{
/* If the parallel region needs data sent from the parent
function, then the very first statement (except possible
tree profile counter updates) of the parallel body
is a copy assignment .OMP_DATA_I = &.OMP_DATA_O. Since
&.OMP_DATA_O is passed as an argument to the child function,
we need to replace it with the argument as seen by the child
function.
In most cases, this will end up being the identity assignment
.OMP_DATA_I = .OMP_DATA_I. However, if the parallel body had
a function call that has been inlined, the original PARM_DECL
.OMP_DATA_I may have been converted into a different local
variable. In which case, we need to keep the assignment. */
if (OMP_PARALLEL_DATA_ARG (entry_stmt))
{
basic_block entry_succ_bb = single_succ (entry_bb);
block_stmt_iterator si;
for (si = bsi_start (entry_succ_bb); ; bsi_next (&si))
{
tree stmt, arg;
gcc_assert (!bsi_end_p (si));
stmt = bsi_stmt (si);
if (TREE_CODE (stmt) != MODIFY_EXPR)
continue;
arg = TREE_OPERAND (stmt, 1);
STRIP_NOPS (arg);
if (TREE_CODE (arg) == ADDR_EXPR
&& TREE_OPERAND (arg, 0)
== OMP_PARALLEL_DATA_ARG (entry_stmt))
{
if (TREE_OPERAND (stmt, 0) == DECL_ARGUMENTS (child_fn))
bsi_remove (&si, true);
else
TREE_OPERAND (stmt, 1) = DECL_ARGUMENTS (child_fn);
break;
}
}
}
/* Declare local variables needed in CHILD_CFUN. */
block = DECL_INITIAL (child_fn);
BLOCK_VARS (block) = list2chain (child_cfun->unexpanded_var_list);
DECL_SAVED_TREE (child_fn) = single_succ (entry_bb)->stmt_list;
/* Reset DECL_CONTEXT on locals and function arguments. */
for (t = BLOCK_VARS (block); t; t = TREE_CHAIN (t))
DECL_CONTEXT (t) = child_fn;
for (t = DECL_ARGUMENTS (child_fn); t; t = TREE_CHAIN (t))
DECL_CONTEXT (t) = child_fn;
/* Split ENTRY_BB at OMP_PARALLEL so that it can be moved to the
child function. */
si = bsi_last (entry_bb);
t = bsi_stmt (si);
gcc_assert (t && TREE_CODE (t) == OMP_PARALLEL);
bsi_remove (&si, true);
e = split_block (entry_bb, t);
entry_bb = e->dest;
single_succ_edge (entry_bb)->flags = EDGE_FALLTHRU;
/* Move the parallel region into CHILD_CFUN. We need to reset
dominance information because the expansion of the inner
regions has invalidated it. */
free_dominance_info (CDI_DOMINATORS);
new_bb = move_sese_region_to_fn (child_cfun, entry_bb, exit_bb);
if (exit_bb)
single_succ_edge (new_bb)->flags = EDGE_FALLTHRU;
cgraph_add_new_function (child_fn);
/* Convert OMP_RETURN into a RETURN_EXPR. */
if (exit_bb)
{
si = bsi_last (exit_bb);
gcc_assert (!bsi_end_p (si)
&& TREE_CODE (bsi_stmt (si)) == OMP_RETURN);
t = build1 (RETURN_EXPR, void_type_node, NULL);
bsi_insert_after (&si, t, BSI_SAME_STMT);
bsi_remove (&si, true);
}
}
/* Emit a library call to launch the children threads. */
expand_parallel_call (region, new_bb, entry_stmt, ws_args);
if (do_cleanup_cfg)
{
/* Clean up the unreachable sub-graph we created above. */
free_dominance_info (CDI_DOMINATORS);
free_dominance_info (CDI_POST_DOMINATORS);
cleanup_tree_cfg ();
}
}
/* A subroutine of expand_omp_for. Generate code for a parallel
loop with any schedule. Given parameters:
for (V = N1; V cond N2; V += STEP) BODY;
where COND is "<" or ">", we generate pseudocode
more = GOMP_loop_foo_start (N1, N2, STEP, CHUNK, &istart0, &iend0);
if (more) goto L0; else goto L3;
L0:
V = istart0;
iend = iend0;
L1:
BODY;
V += STEP;
if (V cond iend) goto L1; else goto L2;
L2:
if (GOMP_loop_foo_next (&istart0, &iend0)) goto L0; else goto L3;
L3:
If this is a combined omp parallel loop, instead of the call to
GOMP_loop_foo_start, we emit 'goto L3'. */
static void
expand_omp_for_generic (struct omp_region *region,
struct omp_for_data *fd,
enum built_in_function start_fn,
enum built_in_function next_fn)
{
tree l0, l1, l2 = NULL, l3 = NULL;
tree type, istart0, iend0, iend;
tree t, args, list;
basic_block entry_bb, cont_bb, exit_bb, l0_bb, l1_bb;
basic_block l2_bb = NULL, l3_bb = NULL;
block_stmt_iterator si;
bool in_combined_parallel = is_combined_parallel (region);
type = TREE_TYPE (fd->v);
istart0 = create_tmp_var (long_integer_type_node, ".istart0");
iend0 = create_tmp_var (long_integer_type_node, ".iend0");
iend = create_tmp_var (type, NULL);
TREE_ADDRESSABLE (istart0) = 1;
TREE_ADDRESSABLE (iend0) = 1;
gcc_assert ((region->cont != NULL) ^ (region->exit == NULL));
entry_bb = region->entry;
l0_bb = create_empty_bb (entry_bb);
l1_bb = single_succ (entry_bb);
l0 = tree_block_label (l0_bb);
l1 = tree_block_label (l1_bb);
cont_bb = region->cont;
exit_bb = region->exit;
if (cont_bb)
{
l2_bb = create_empty_bb (cont_bb);
l3_bb = single_succ (cont_bb);
l2 = tree_block_label (l2_bb);
l3 = tree_block_label (l3_bb);
}
si = bsi_last (entry_bb);
gcc_assert (TREE_CODE (bsi_stmt (si)) == OMP_FOR);
if (!in_combined_parallel)
{
/* If this is not a combined parallel loop, emit a call to
GOMP_loop_foo_start in ENTRY_BB. */
list = alloc_stmt_list ();
t = build_fold_addr_expr (iend0);
args = tree_cons (NULL, t, NULL);
t = build_fold_addr_expr (istart0);
args = tree_cons (NULL, t, args);
if (fd->chunk_size)
{
t = fold_convert (long_integer_type_node, fd->chunk_size);
args = tree_cons (NULL, t, args);
}
t = fold_convert (long_integer_type_node, fd->step);
args = tree_cons (NULL, t, args);
t = fold_convert (long_integer_type_node, fd->n2);
args = tree_cons (NULL, t, args);
t = fold_convert (long_integer_type_node, fd->n1);
args = tree_cons (NULL, t, args);
t = build_function_call_expr (built_in_decls[start_fn], args);
t = get_formal_tmp_var (t, &list);
if (cont_bb)
{
t = build3 (COND_EXPR, void_type_node, t, build_and_jump (&l0),
build_and_jump (&l3));
append_to_statement_list (t, &list);
}
bsi_insert_after (&si, list, BSI_SAME_STMT);
}
bsi_remove (&si, true);
/* Iteration setup for sequential loop goes in L0_BB. */
list = alloc_stmt_list ();
t = fold_convert (type, istart0);
t = build2 (MODIFY_EXPR, void_type_node, fd->v, t);
gimplify_and_add (t, &list);
t = fold_convert (type, iend0);
t = build2 (MODIFY_EXPR, void_type_node, iend, t);
gimplify_and_add (t, &list);
si = bsi_start (l0_bb);
bsi_insert_after (&si, list, BSI_CONTINUE_LINKING);
/* Handle the rare case where BODY doesn't ever return. */
if (cont_bb == NULL)
{
remove_edge (single_succ_edge (entry_bb));
make_edge (entry_bb, l0_bb, EDGE_FALLTHRU);
make_edge (l0_bb, l1_bb, EDGE_FALLTHRU);
return;
}
/* Code to control the increment and predicate for the sequential
loop goes in the first half of EXIT_BB (we split EXIT_BB so
that we can inherit all the edges going out of the loop
body). */
list = alloc_stmt_list ();
t = build2 (PLUS_EXPR, type, fd->v, fd->step);
t = build2 (MODIFY_EXPR, void_type_node, fd->v, t);
gimplify_and_add (t, &list);
t = build2 (fd->cond_code, boolean_type_node, fd->v, iend);
t = get_formal_tmp_var (t, &list);
t = build3 (COND_EXPR, void_type_node, t, build_and_jump (&l1),
build_and_jump (&l2));
append_to_statement_list (t, &list);
si = bsi_last (cont_bb);
bsi_insert_after (&si, list, BSI_SAME_STMT);
gcc_assert (TREE_CODE (bsi_stmt (si)) == OMP_CONTINUE);
bsi_remove (&si, true);
/* Emit code to get the next parallel iteration in L2_BB. */
list = alloc_stmt_list ();
t = build_fold_addr_expr (iend0);
args = tree_cons (NULL, t, NULL);
t = build_fold_addr_expr (istart0);
args = tree_cons (NULL, t, args);
t = build_function_call_expr (built_in_decls[next_fn], args);
t = get_formal_tmp_var (t, &list);
t = build3 (COND_EXPR, void_type_node, t, build_and_jump (&l0),
build_and_jump (&l3));
append_to_statement_list (t, &list);
si = bsi_start (l2_bb);
bsi_insert_after (&si, list, BSI_CONTINUE_LINKING);
/* Add the loop cleanup function. */
si = bsi_last (exit_bb);
if (OMP_RETURN_NOWAIT (bsi_stmt (si)))
t = built_in_decls[BUILT_IN_GOMP_LOOP_END_NOWAIT];
else
t = built_in_decls[BUILT_IN_GOMP_LOOP_END];
t = build_function_call_expr (t, NULL);
bsi_insert_after (&si, t, BSI_SAME_STMT);
bsi_remove (&si, true);
/* Connect the new blocks. */
remove_edge (single_succ_edge (entry_bb));
if (in_combined_parallel)
make_edge (entry_bb, l2_bb, EDGE_FALLTHRU);
else
{
make_edge (entry_bb, l0_bb, EDGE_TRUE_VALUE);
make_edge (entry_bb, l3_bb, EDGE_FALSE_VALUE);
}
make_edge (l0_bb, l1_bb, EDGE_FALLTHRU);
remove_edge (single_succ_edge (cont_bb));
make_edge (cont_bb, l1_bb, EDGE_TRUE_VALUE);
make_edge (cont_bb, l2_bb, EDGE_FALSE_VALUE);
make_edge (l2_bb, l0_bb, EDGE_TRUE_VALUE);
make_edge (l2_bb, l3_bb, EDGE_FALSE_VALUE);
}
/* A subroutine of expand_omp_for. Generate code for a parallel
loop with static schedule and no specified chunk size. Given
parameters:
for (V = N1; V cond N2; V += STEP) BODY;
where COND is "<" or ">", we generate pseudocode
if (cond is <)
adj = STEP - 1;
else
adj = STEP + 1;
n = (adj + N2 - N1) / STEP;
q = n / nthreads;
q += (q * nthreads != n);
s0 = q * threadid;
e0 = min(s0 + q, n);
if (s0 >= e0) goto L2; else goto L0;
L0:
V = s0 * STEP + N1;
e = e0 * STEP + N1;
L1:
BODY;
V += STEP;
if (V cond e) goto L1;
L2:
*/
static void
expand_omp_for_static_nochunk (struct omp_region *region,
struct omp_for_data *fd)
{
tree l0, l1, l2, n, q, s0, e0, e, t, nthreads, threadid;
tree type, list;
basic_block entry_bb, exit_bb, seq_start_bb, body_bb, cont_bb;
basic_block fin_bb;
block_stmt_iterator si;
type = TREE_TYPE (fd->v);
entry_bb = region->entry;
seq_start_bb = create_empty_bb (entry_bb);
body_bb = single_succ (entry_bb);
cont_bb = region->cont;
fin_bb = single_succ (cont_bb);
exit_bb = region->exit;
l0 = tree_block_label (seq_start_bb);
l1 = tree_block_label (body_bb);
l2 = tree_block_label (fin_bb);
/* Iteration space partitioning goes in ENTRY_BB. */
list = alloc_stmt_list ();
t = built_in_decls[BUILT_IN_OMP_GET_NUM_THREADS];
t = build_function_call_expr (t, NULL);
t = fold_convert (type, t);
nthreads = get_formal_tmp_var (t, &list);
t = built_in_decls[BUILT_IN_OMP_GET_THREAD_NUM];
t = build_function_call_expr (t, NULL);
t = fold_convert (type, t);
threadid = get_formal_tmp_var (t, &list);
fd->n1 = fold_convert (type, fd->n1);
if (!is_gimple_val (fd->n1))
fd->n1 = get_formal_tmp_var (fd->n1, &list);
fd->n2 = fold_convert (type, fd->n2);
if (!is_gimple_val (fd->n2))
fd->n2 = get_formal_tmp_var (fd->n2, &list);
fd->step = fold_convert (type, fd->step);
if (!is_gimple_val (fd->step))
fd->step = get_formal_tmp_var (fd->step, &list);
t = build_int_cst (type, (fd->cond_code == LT_EXPR ? -1 : 1));
t = fold_build2 (PLUS_EXPR, type, fd->step, t);
t = fold_build2 (PLUS_EXPR, type, t, fd->n2);
t = fold_build2 (MINUS_EXPR, type, t, fd->n1);
t = fold_build2 (TRUNC_DIV_EXPR, type, t, fd->step);
t = fold_convert (type, t);
if (is_gimple_val (t))
n = t;
else
n = get_formal_tmp_var (t, &list);
t = build2 (TRUNC_DIV_EXPR, type, n, nthreads);
q = get_formal_tmp_var (t, &list);
t = build2 (MULT_EXPR, type, q, nthreads);
t = build2 (NE_EXPR, type, t, n);
t = build2 (PLUS_EXPR, type, q, t);
q = get_formal_tmp_var (t, &list);
t = build2 (MULT_EXPR, type, q, threadid);
s0 = get_formal_tmp_var (t, &list);
t = build2 (PLUS_EXPR, type, s0, q);
t = build2 (MIN_EXPR, type, t, n);
e0 = get_formal_tmp_var (t, &list);
t = build2 (GE_EXPR, boolean_type_node, s0, e0);
t = build3 (COND_EXPR, void_type_node, t, build_and_jump (&l2),
build_and_jump (&l0));
append_to_statement_list (t, &list);
si = bsi_last (entry_bb);
gcc_assert (TREE_CODE (bsi_stmt (si)) == OMP_FOR);
bsi_insert_after (&si, list, BSI_SAME_STMT);
bsi_remove (&si, true);
/* Setup code for sequential iteration goes in SEQ_START_BB. */
list = alloc_stmt_list ();
t = fold_convert (type, s0);
t = build2 (MULT_EXPR, type, t, fd->step);
t = build2 (PLUS_EXPR, type, t, fd->n1);
t = build2 (MODIFY_EXPR, void_type_node, fd->v, t);
/* LLVM LOCAL begin 7387470 */
/* Decorate this MODIFY_EXPR with the correct lexical block. */
{
extern tree debug_find_var_in_block_tree (tree, tree);
TREE_BLOCK(t) =
debug_find_var_in_block_tree(fd->v,
DECL_INITIAL(current_function_decl));
}
/* LLVM LOCAL end 7387470 */
gimplify_and_add (t, &list);
t = fold_convert (type, e0);
t = build2 (MULT_EXPR, type, t, fd->step);
t = build2 (PLUS_EXPR, type, t, fd->n1);
e = get_formal_tmp_var (t, &list);
si = bsi_start (seq_start_bb);
bsi_insert_after (&si, list, BSI_CONTINUE_LINKING);
/* The code controlling the sequential loop replaces the OMP_CONTINUE. */
list = alloc_stmt_list ();
t = build2 (PLUS_EXPR, type, fd->v, fd->step);
t = build2 (MODIFY_EXPR, void_type_node, fd->v, t);
gimplify_and_add (t, &list);
t = build2 (fd->cond_code, boolean_type_node, fd->v, e);
t = get_formal_tmp_var (t, &list);
t = build3 (COND_EXPR, void_type_node, t, build_and_jump (&l1),
build_and_jump (&l2));
append_to_statement_list (t, &list);
si = bsi_last (cont_bb);
gcc_assert (TREE_CODE (bsi_stmt (si)) == OMP_CONTINUE);
bsi_insert_after (&si, list, BSI_SAME_STMT);
bsi_remove (&si, true);
/* Replace the OMP_RETURN with a barrier, or nothing. */
si = bsi_last (exit_bb);
if (!OMP_RETURN_NOWAIT (bsi_stmt (si)))
{
list = alloc_stmt_list ();
build_omp_barrier (&list);
bsi_insert_after (&si, list, BSI_SAME_STMT);
}
bsi_remove (&si, true);
/* Connect all the blocks. */
make_edge (seq_start_bb, body_bb, EDGE_FALLTHRU);
remove_edge (single_succ_edge (entry_bb));
make_edge (entry_bb, fin_bb, EDGE_TRUE_VALUE);
make_edge (entry_bb, seq_start_bb, EDGE_FALSE_VALUE);
make_edge (cont_bb, body_bb, EDGE_TRUE_VALUE);
find_edge (cont_bb, fin_bb)->flags = EDGE_FALSE_VALUE;
}
/* A subroutine of expand_omp_for. Generate code for a parallel
loop with static schedule and a specified chunk size. Given
parameters:
for (V = N1; V cond N2; V += STEP) BODY;
where COND is "<" or ">", we generate pseudocode
if (cond is <)
adj = STEP - 1;
else
adj = STEP + 1;
n = (adj + N2 - N1) / STEP;
trip = 0;
L0:
s0 = (trip * nthreads + threadid) * CHUNK;
e0 = min(s0 + CHUNK, n);
if (s0 < n) goto L1; else goto L4;
L1:
V = s0 * STEP + N1;
e = e0 * STEP + N1;
L2:
BODY;
V += STEP;
if (V cond e) goto L2; else goto L3;
L3:
trip += 1;
goto L0;
L4:
*/
static void
expand_omp_for_static_chunk (struct omp_region *region, struct omp_for_data *fd)
{
tree l0, l1, l2, l3, l4, n, s0, e0, e, t;
tree trip, nthreads, threadid;
tree type;
basic_block entry_bb, exit_bb, body_bb, seq_start_bb, iter_part_bb;
basic_block trip_update_bb, cont_bb, fin_bb;
tree list;
block_stmt_iterator si;
type = TREE_TYPE (fd->v);
entry_bb = region->entry;
iter_part_bb = create_empty_bb (entry_bb);
seq_start_bb = create_empty_bb (iter_part_bb);
body_bb = single_succ (entry_bb);
cont_bb = region->cont;
trip_update_bb = create_empty_bb (cont_bb);
fin_bb = single_succ (cont_bb);
exit_bb = region->exit;
l0 = tree_block_label (iter_part_bb);
l1 = tree_block_label (seq_start_bb);
l2 = tree_block_label (body_bb);
l3 = tree_block_label (trip_update_bb);
l4 = tree_block_label (fin_bb);
/* Trip and adjustment setup goes in ENTRY_BB. */
list = alloc_stmt_list ();
t = built_in_decls[BUILT_IN_OMP_GET_NUM_THREADS];
t = build_function_call_expr (t, NULL);
t = fold_convert (type, t);
nthreads = get_formal_tmp_var (t, &list);
t = built_in_decls[BUILT_IN_OMP_GET_THREAD_NUM];
t = build_function_call_expr (t, NULL);
t = fold_convert (type, t);
threadid = get_formal_tmp_var (t, &list);
fd->n1 = fold_convert (type, fd->n1);
if (!is_gimple_val (fd->n1))
fd->n1 = get_formal_tmp_var (fd->n1, &list);
fd->n2 = fold_convert (type, fd->n2);
if (!is_gimple_val (fd->n2))
fd->n2 = get_formal_tmp_var (fd->n2, &list);
fd->step = fold_convert (type, fd->step);
if (!is_gimple_val (fd->step))
fd->step = get_formal_tmp_var (fd->step, &list);
fd->chunk_size = fold_convert (type, fd->chunk_size);
if (!is_gimple_val (fd->chunk_size))
fd->chunk_size = get_formal_tmp_var (fd->chunk_size, &list);
t = build_int_cst (type, (fd->cond_code == LT_EXPR ? -1 : 1));
t = fold_build2 (PLUS_EXPR, type, fd->step, t);
t = fold_build2 (PLUS_EXPR, type, t, fd->n2);
t = fold_build2 (MINUS_EXPR, type, t, fd->n1);
t = fold_build2 (TRUNC_DIV_EXPR, type, t, fd->step);
t = fold_convert (type, t);
if (is_gimple_val (t))
n = t;
else
n = get_formal_tmp_var (t, &list);
t = build_int_cst (type, 0);
trip = get_initialized_tmp_var (t, &list, NULL);
si = bsi_last (entry_bb);
gcc_assert (TREE_CODE (bsi_stmt (si)) == OMP_FOR);
bsi_insert_after (&si, list, BSI_SAME_STMT);
bsi_remove (&si, true);
/* Iteration space partitioning goes in ITER_PART_BB. */
list = alloc_stmt_list ();
t = build2 (MULT_EXPR, type, trip, nthreads);
t = build2 (PLUS_EXPR, type, t, threadid);
t = build2 (MULT_EXPR, type, t, fd->chunk_size);
s0 = get_formal_tmp_var (t, &list);
t = build2 (PLUS_EXPR, type, s0, fd->chunk_size);
t = build2 (MIN_EXPR, type, t, n);
e0 = get_formal_tmp_var (t, &list);
t = build2 (LT_EXPR, boolean_type_node, s0, n);
t = build3 (COND_EXPR, void_type_node, t,
build_and_jump (&l1), build_and_jump (&l4));
append_to_statement_list (t, &list);
si = bsi_start (iter_part_bb);
bsi_insert_after (&si, list, BSI_CONTINUE_LINKING);
/* Setup code for sequential iteration goes in SEQ_START_BB. */
list = alloc_stmt_list ();
t = fold_convert (type, s0);
t = build2 (MULT_EXPR, type, t, fd->step);
t = build2 (PLUS_EXPR, type, t, fd->n1);
t = build2 (MODIFY_EXPR, void_type_node, fd->v, t);
gimplify_and_add (t, &list);
t = fold_convert (type, e0);
t = build2 (MULT_EXPR, type, t, fd->step);
t = build2 (PLUS_EXPR, type, t, fd->n1);
e = get_formal_tmp_var (t, &list);
si = bsi_start (seq_start_bb);
bsi_insert_after (&si, list, BSI_CONTINUE_LINKING);
/* The code controlling the sequential loop goes in CONT_BB,
replacing the OMP_CONTINUE. */
list = alloc_stmt_list ();
t = build2 (PLUS_EXPR, type, fd->v, fd->step);
t = build2 (MODIFY_EXPR, void_type_node, fd->v, t);
gimplify_and_add (t, &list);
t = build2 (fd->cond_code, boolean_type_node, fd->v, e);
t = get_formal_tmp_var (t, &list);
t = build3 (COND_EXPR, void_type_node, t,
build_and_jump (&l2), build_and_jump (&l3));
append_to_statement_list (t, &list);
si = bsi_last (cont_bb);
gcc_assert (TREE_CODE (bsi_stmt (si)) == OMP_CONTINUE);
bsi_insert_after (&si, list, BSI_SAME_STMT);
bsi_remove (&si, true);
/* Trip update code goes into TRIP_UPDATE_BB. */
list = alloc_stmt_list ();
t = build_int_cst (type, 1);
t = build2 (PLUS_EXPR, type, trip, t);
t = build2 (MODIFY_EXPR, void_type_node, trip, t);
gimplify_and_add (t, &list);
si = bsi_start (trip_update_bb);
bsi_insert_after (&si, list, BSI_CONTINUE_LINKING);
/* Replace the OMP_RETURN with a barrier, or nothing. */
si = bsi_last (exit_bb);
if (!OMP_RETURN_NOWAIT (bsi_stmt (si)))
{
list = alloc_stmt_list ();
build_omp_barrier (&list);
bsi_insert_after (&si, list, BSI_SAME_STMT);
}
bsi_remove (&si, true);
/* Connect the new blocks. */
remove_edge (single_succ_edge (entry_bb));
make_edge (entry_bb, iter_part_bb, EDGE_FALLTHRU);
make_edge (iter_part_bb, seq_start_bb, EDGE_TRUE_VALUE);
make_edge (iter_part_bb, fin_bb, EDGE_FALSE_VALUE);
make_edge (seq_start_bb, body_bb, EDGE_FALLTHRU);
remove_edge (single_succ_edge (cont_bb));
make_edge (cont_bb, body_bb, EDGE_TRUE_VALUE);
make_edge (cont_bb, trip_update_bb, EDGE_FALSE_VALUE);
make_edge (trip_update_bb, iter_part_bb, EDGE_FALLTHRU);
}
/* Expand the OpenMP loop defined by REGION. */
static void
expand_omp_for (struct omp_region *region)
{
struct omp_for_data fd;
push_gimplify_context ();
extract_omp_for_data (last_stmt (region->entry), &fd);
region->sched_kind = fd.sched_kind;
if (fd.sched_kind == OMP_CLAUSE_SCHEDULE_STATIC
&& !fd.have_ordered
&& region->cont
&& region->exit)
{
if (fd.chunk_size == NULL)
expand_omp_for_static_nochunk (region, &fd);
else
expand_omp_for_static_chunk (region, &fd);
}
else
{
int fn_index = fd.sched_kind + fd.have_ordered * 4;
int start_ix = BUILT_IN_GOMP_LOOP_STATIC_START + fn_index;
int next_ix = BUILT_IN_GOMP_LOOP_STATIC_NEXT + fn_index;
expand_omp_for_generic (region, &fd, start_ix, next_ix);
}
pop_gimplify_context (NULL);
}
/* Expand code for an OpenMP sections directive. In pseudo code, we generate
v = GOMP_sections_start (n);
L0:
switch (v)
{
case 0:
goto L2;
case 1:
section 1;
goto L1;
case 2:
...
case n:
...
default:
abort ();
}
L1:
v = GOMP_sections_next ();
goto L0;
L2:
reduction;
If this is a combined parallel sections, replace the call to
GOMP_sections_start with 'goto L1'. */
static void
expand_omp_sections (struct omp_region *region)
{
tree label_vec, l0, l1, l2, t, u, v, sections_stmt;
unsigned i, len;
basic_block entry_bb, exit_bb, l0_bb, l1_bb, l2_bb, default_bb;
block_stmt_iterator si;
struct omp_region *inner;
edge e;
entry_bb = region->entry;
l0_bb = create_empty_bb (entry_bb);
l0 = tree_block_label (l0_bb);
gcc_assert ((region->cont != NULL) ^ (region->exit == NULL));
l1_bb = region->cont;
if (l1_bb)
{
l2_bb = single_succ (l1_bb);
default_bb = create_empty_bb (l1_bb->prev_bb);
l1 = tree_block_label (l1_bb);
}
else
{
l2_bb = create_empty_bb (l0_bb);
default_bb = l2_bb;
l1 = NULL;
}
l2 = tree_block_label (l2_bb);
exit_bb = region->exit;
v = create_tmp_var (unsigned_type_node, ".section");
/* We will build a switch() with enough cases for all the
OMP_SECTION regions, a '0' case to handle the end of more work
and a default case to abort if something goes wrong. */
len = EDGE_COUNT (entry_bb->succs);
label_vec = make_tree_vec (len + 2);
/* The call to GOMP_sections_start goes in ENTRY_BB, replacing the
OMP_SECTIONS statement. */
si = bsi_last (entry_bb);
sections_stmt = bsi_stmt (si);
gcc_assert (TREE_CODE (sections_stmt) == OMP_SECTIONS);
if (!is_combined_parallel (region))
{
/* If we are not inside a combined parallel+sections region,
call GOMP_sections_start. */
t = build_int_cst (unsigned_type_node, len);
t = tree_cons (NULL, t, NULL);
u = built_in_decls[BUILT_IN_GOMP_SECTIONS_START];
t = build_function_call_expr (u, t);
t = build2 (MODIFY_EXPR, void_type_node, v, t);
bsi_insert_after (&si, t, BSI_SAME_STMT);
}
bsi_remove (&si, true);
/* The switch() statement replacing OMP_SECTIONS goes in L0_BB. */
si = bsi_start (l0_bb);
t = build3 (SWITCH_EXPR, void_type_node, v, NULL, label_vec);
bsi_insert_after (&si, t, BSI_CONTINUE_LINKING);
t = build3 (CASE_LABEL_EXPR, void_type_node,
build_int_cst (unsigned_type_node, 0), NULL, l2);
TREE_VEC_ELT (label_vec, 0) = t;
make_edge (l0_bb, l2_bb, 0);
/* Convert each OMP_SECTION into a CASE_LABEL_EXPR. */
for (inner = region->inner, i = 1; inner; inner = inner->next, ++i)
{
basic_block s_entry_bb, s_exit_bb;
s_entry_bb = inner->entry;
s_exit_bb = inner->exit;
t = tree_block_label (s_entry_bb);
u = build_int_cst (unsigned_type_node, i);
u = build3 (CASE_LABEL_EXPR, void_type_node, u, NULL, t);
TREE_VEC_ELT (label_vec, i) = u;
si = bsi_last (s_entry_bb);
gcc_assert (TREE_CODE (bsi_stmt (si)) == OMP_SECTION);
gcc_assert (i < len || OMP_SECTION_LAST (bsi_stmt (si)));
bsi_remove (&si, true);
e = single_pred_edge (s_entry_bb);
e->flags = 0;
redirect_edge_pred (e, l0_bb);
single_succ_edge (s_entry_bb)->flags = EDGE_FALLTHRU;
if (s_exit_bb == NULL)
continue;
si = bsi_last (s_exit_bb);
gcc_assert (TREE_CODE (bsi_stmt (si)) == OMP_RETURN);
bsi_remove (&si, true);
single_succ_edge (s_exit_bb)->flags = EDGE_FALLTHRU;
}
/* Error handling code goes in DEFAULT_BB. */
t = tree_block_label (default_bb);
u = build3 (CASE_LABEL_EXPR, void_type_node, NULL, NULL, t);
TREE_VEC_ELT (label_vec, len + 1) = u;
make_edge (l0_bb, default_bb, 0);
si = bsi_start (default_bb);
t = built_in_decls[BUILT_IN_TRAP];
t = build_function_call_expr (t, NULL);
bsi_insert_after (&si, t, BSI_CONTINUE_LINKING);
/* Code to get the next section goes in L1_BB. */
if (l1_bb)
{
si = bsi_last (l1_bb);
gcc_assert (TREE_CODE (bsi_stmt (si)) == OMP_CONTINUE);
t = built_in_decls[BUILT_IN_GOMP_SECTIONS_NEXT];
t = build_function_call_expr (t, NULL);
t = build2 (MODIFY_EXPR, void_type_node, v, t);
bsi_insert_after (&si, t, BSI_SAME_STMT);
bsi_remove (&si, true);
}
/* Cleanup function replaces OMP_RETURN in EXIT_BB. */
if (exit_bb)
{
si = bsi_last (exit_bb);
if (OMP_RETURN_NOWAIT (bsi_stmt (si)))
t = built_in_decls[BUILT_IN_GOMP_SECTIONS_END_NOWAIT];
else
t = built_in_decls[BUILT_IN_GOMP_SECTIONS_END];
t = build_function_call_expr (t, NULL);
bsi_insert_after (&si, t, BSI_SAME_STMT);
bsi_remove (&si, true);
}
/* Connect the new blocks. */
if (is_combined_parallel (region))
{
/* If this was a combined parallel+sections region, we did not
emit a GOMP_sections_start in the entry block, so we just
need to jump to L1_BB to get the next section. */
make_edge (entry_bb, l1_bb, EDGE_FALLTHRU);
}
else
make_edge (entry_bb, l0_bb, EDGE_FALLTHRU);
if (l1_bb)
{
e = single_succ_edge (l1_bb);
redirect_edge_succ (e, l0_bb);
e->flags = EDGE_FALLTHRU;
}
}
/* Expand code for an OpenMP single directive. We've already expanded
much of the code, here we simply place the GOMP_barrier call. */
static void
expand_omp_single (struct omp_region *region)
{
basic_block entry_bb, exit_bb;
block_stmt_iterator si;
bool need_barrier = false;
entry_bb = region->entry;
exit_bb = region->exit;
si = bsi_last (entry_bb);
/* The terminal barrier at the end of a GOMP_single_copy sequence cannot
be removed. We need to ensure that the thread that entered the single
does not exit before the data is copied out by the other threads. */
if (find_omp_clause (OMP_SINGLE_CLAUSES (bsi_stmt (si)),
OMP_CLAUSE_COPYPRIVATE))
need_barrier = true;
gcc_assert (TREE_CODE (bsi_stmt (si)) == OMP_SINGLE);
bsi_remove (&si, true);
single_succ_edge (entry_bb)->flags = EDGE_FALLTHRU;
si = bsi_last (exit_bb);
if (!OMP_RETURN_NOWAIT (bsi_stmt (si)) || need_barrier)
{
tree t = alloc_stmt_list ();
build_omp_barrier (&t);
bsi_insert_after (&si, t, BSI_SAME_STMT);
}
bsi_remove (&si, true);
single_succ_edge (exit_bb)->flags = EDGE_FALLTHRU;
}
/* Generic expansion for OpenMP synchronization directives: master,
ordered and critical. All we need to do here is remove the entry
and exit markers for REGION. */
static void
expand_omp_synch (struct omp_region *region)
{
basic_block entry_bb, exit_bb;
block_stmt_iterator si;
entry_bb = region->entry;
exit_bb = region->exit;
si = bsi_last (entry_bb);
gcc_assert (TREE_CODE (bsi_stmt (si)) == OMP_SINGLE
|| TREE_CODE (bsi_stmt (si)) == OMP_MASTER
|| TREE_CODE (bsi_stmt (si)) == OMP_ORDERED
|| TREE_CODE (bsi_stmt (si)) == OMP_CRITICAL);
bsi_remove (&si, true);
single_succ_edge (entry_bb)->flags = EDGE_FALLTHRU;
if (exit_bb)
{
si = bsi_last (exit_bb);
gcc_assert (TREE_CODE (bsi_stmt (si)) == OMP_RETURN);
bsi_remove (&si, true);
single_succ_edge (exit_bb)->flags = EDGE_FALLTHRU;
}
}
/* Expand the parallel region tree rooted at REGION. Expansion
proceeds in depth-first order. Innermost regions are expanded
first. This way, parallel regions that require a new function to
be created (e.g., OMP_PARALLEL) can be expanded without having any
internal dependencies in their body. */
static void
expand_omp (struct omp_region *region)
{
while (region)
{
if (region->inner)
expand_omp (region->inner);
switch (region->type)
{
case OMP_PARALLEL:
expand_omp_parallel (region);
break;
case OMP_FOR:
expand_omp_for (region);
break;
case OMP_SECTIONS:
expand_omp_sections (region);
break;
case OMP_SECTION:
/* Individual omp sections are handled together with their
parent OMP_SECTIONS region. */
break;
case OMP_SINGLE:
expand_omp_single (region);
break;
case OMP_MASTER:
case OMP_ORDERED:
case OMP_CRITICAL:
expand_omp_synch (region);
break;
default:
gcc_unreachable ();
}
region = region->next;
}
}
/* Helper for build_omp_regions. Scan the dominator tree starting at
block BB. PARENT is the region that contains BB. */
static void
build_omp_regions_1 (basic_block bb, struct omp_region *parent)
{
block_stmt_iterator si;
tree stmt;
basic_block son;
si = bsi_last (bb);
if (!bsi_end_p (si) && OMP_DIRECTIVE_P (bsi_stmt (si)))
{
struct omp_region *region;
enum tree_code code;
stmt = bsi_stmt (si);
code = TREE_CODE (stmt);
if (code == OMP_RETURN)
{
/* STMT is the return point out of region PARENT. Mark it
as the exit point and make PARENT the immediately
enclosing region. */
gcc_assert (parent);
region = parent;
region->exit = bb;
parent = parent->outer;
/* If REGION is a parallel region, determine whether it is
a combined parallel+workshare region. */
if (region->type == OMP_PARALLEL)
determine_parallel_type (region);
}
else if (code == OMP_CONTINUE)
{
gcc_assert (parent);
parent->cont = bb;
}
else
{
/* Otherwise, this directive becomes the parent for a new
region. */
region = new_omp_region (bb, code, parent);
parent = region;
}
}
for (son = first_dom_son (CDI_DOMINATORS, bb);
son;
son = next_dom_son (CDI_DOMINATORS, son))
build_omp_regions_1 (son, parent);
}
/* Scan the CFG and build a tree of OMP regions. Return the root of
the OMP region tree. */
static void
build_omp_regions (void)
{
gcc_assert (root_omp_region == NULL);
calculate_dominance_info (CDI_DOMINATORS);
build_omp_regions_1 (ENTRY_BLOCK_PTR, NULL);
}
/* Main entry point for expanding OMP-GIMPLE into runtime calls. */
static unsigned int
execute_expand_omp (void)
{
build_omp_regions ();
if (!root_omp_region)
return 0;
if (dump_file)
{
fprintf (dump_file, "\nOMP region tree\n\n");
dump_omp_region (dump_file, root_omp_region, 0);
fprintf (dump_file, "\n");
}
remove_exit_barriers (root_omp_region);
expand_omp (root_omp_region);
free_dominance_info (CDI_DOMINATORS);
free_dominance_info (CDI_POST_DOMINATORS);
cleanup_tree_cfg ();
free_omp_regions ();
return 0;
}
static bool
gate_expand_omp (void)
{
return flag_openmp != 0 && errorcount == 0;
}
struct tree_opt_pass pass_expand_omp =
{
"ompexp", /* name */
gate_expand_omp, /* gate */
execute_expand_omp, /* execute */
NULL, /* sub */
NULL, /* next */
0, /* static_pass_number */
0, /* tv_id */
PROP_gimple_any, /* properties_required */
PROP_gimple_lomp, /* properties_provided */
0, /* properties_destroyed */
0, /* todo_flags_start */
TODO_dump_func, /* todo_flags_finish */
0 /* letter */
};
/* Routines to lower OpenMP directives into OMP-GIMPLE. */
/* Lower the OpenMP sections directive in *STMT_P. */
static void
lower_omp_sections (tree *stmt_p, omp_context *ctx)
{
tree new_stmt, stmt, body, bind, block, ilist, olist, new_body;
tree t, dlist;
tree_stmt_iterator tsi;
unsigned i, len;
stmt = *stmt_p;
push_gimplify_context ();
dlist = NULL;
ilist = NULL;
lower_rec_input_clauses (OMP_SECTIONS_CLAUSES (stmt), &ilist, &dlist, ctx);
tsi = tsi_start (OMP_SECTIONS_BODY (stmt));
for (len = 0; !tsi_end_p (tsi); len++, tsi_next (&tsi))
continue;
tsi = tsi_start (OMP_SECTIONS_BODY (stmt));
body = alloc_stmt_list ();
for (i = 0; i < len; i++, tsi_next (&tsi))
{
omp_context *sctx;
tree sec_start, sec_end;
sec_start = tsi_stmt (tsi);
sctx = maybe_lookup_ctx (sec_start);
gcc_assert (sctx);
append_to_statement_list (sec_start, &body);
lower_omp (&OMP_SECTION_BODY (sec_start), sctx);
append_to_statement_list (OMP_SECTION_BODY (sec_start), &body);
OMP_SECTION_BODY (sec_start) = NULL;
if (i == len - 1)
{
tree l = alloc_stmt_list ();
lower_lastprivate_clauses (OMP_SECTIONS_CLAUSES (stmt), NULL,
&l, ctx);
append_to_statement_list (l, &body);
OMP_SECTION_LAST (sec_start) = 1;
}
sec_end = make_node (OMP_RETURN);
append_to_statement_list (sec_end, &body);
}
block = make_node (BLOCK);
bind = build3 (BIND_EXPR, void_type_node, NULL, body, block);
olist = NULL_TREE;
lower_reduction_clauses (OMP_SECTIONS_CLAUSES (stmt), &olist, ctx);
pop_gimplify_context (NULL_TREE);
record_vars_into (ctx->block_vars, ctx->cb.dst_fn);
new_stmt = build3 (BIND_EXPR, void_type_node, NULL, NULL, NULL);
TREE_SIDE_EFFECTS (new_stmt) = 1;
new_body = alloc_stmt_list ();
append_to_statement_list (ilist, &new_body);
append_to_statement_list (stmt, &new_body);
append_to_statement_list (bind, &new_body);
t = make_node (OMP_CONTINUE);
append_to_statement_list (t, &new_body);
append_to_statement_list (olist, &new_body);
append_to_statement_list (dlist, &new_body);
maybe_catch_exception (&new_body);
t = make_node (OMP_RETURN);
OMP_RETURN_NOWAIT (t) = !!find_omp_clause (OMP_SECTIONS_CLAUSES (stmt),
OMP_CLAUSE_NOWAIT);
append_to_statement_list (t, &new_body);
BIND_EXPR_BODY (new_stmt) = new_body;
OMP_SECTIONS_BODY (stmt) = NULL;
*stmt_p = new_stmt;
}
/* A subroutine of lower_omp_single. Expand the simple form of
an OMP_SINGLE, without a copyprivate clause:
if (GOMP_single_start ())
BODY;
[ GOMP_barrier (); ] -> unless 'nowait' is present.
FIXME. It may be better to delay expanding the logic of this until
pass_expand_omp. The expanded logic may make the job more difficult
to a synchronization analysis pass. */
static void
lower_omp_single_simple (tree single_stmt, tree *pre_p)
{
tree t;
t = built_in_decls[BUILT_IN_GOMP_SINGLE_START];
t = build_function_call_expr (t, NULL);
t = build3 (COND_EXPR, void_type_node, t,
OMP_SINGLE_BODY (single_stmt), NULL);
gimplify_and_add (t, pre_p);
}
/* A subroutine of lower_omp_single. Expand the simple form of
an OMP_SINGLE, with a copyprivate clause:
#pragma omp single copyprivate (a, b, c)
Create a new structure to hold copies of 'a', 'b' and 'c' and emit:
{
if ((copyout_p = GOMP_single_copy_start ()) == NULL)
{
BODY;
copyout.a = a;
copyout.b = b;
copyout.c = c;
GOMP_single_copy_end (©out);
}
else
{
a = copyout_p->a;
b = copyout_p->b;
c = copyout_p->c;
}
GOMP_barrier ();
}
FIXME. It may be better to delay expanding the logic of this until
pass_expand_omp. The expanded logic may make the job more difficult
to a synchronization analysis pass. */
static void
lower_omp_single_copy (tree single_stmt, tree *pre_p, omp_context *ctx)
{
tree ptr_type, t, args, l0, l1, l2, copyin_seq;
ctx->sender_decl = create_tmp_var (ctx->record_type, ".omp_copy_o");
ptr_type = build_pointer_type (ctx->record_type);
ctx->receiver_decl = create_tmp_var (ptr_type, ".omp_copy_i");
l0 = create_artificial_label ();
l1 = create_artificial_label ();
l2 = create_artificial_label ();
t = built_in_decls[BUILT_IN_GOMP_SINGLE_COPY_START];
t = build_function_call_expr (t, NULL);
t = fold_convert (ptr_type, t);
t = build2 (MODIFY_EXPR, void_type_node, ctx->receiver_decl, t);
gimplify_and_add (t, pre_p);
t = build2 (EQ_EXPR, boolean_type_node, ctx->receiver_decl,
build_int_cst (ptr_type, 0));
t = build3 (COND_EXPR, void_type_node, t,
build_and_jump (&l0), build_and_jump (&l1));
gimplify_and_add (t, pre_p);
t = build1 (LABEL_EXPR, void_type_node, l0);
gimplify_and_add (t, pre_p);
append_to_statement_list (OMP_SINGLE_BODY (single_stmt), pre_p);
copyin_seq = NULL;
lower_copyprivate_clauses (OMP_SINGLE_CLAUSES (single_stmt), pre_p,
©in_seq, ctx);
t = build_fold_addr_expr (ctx->sender_decl);
args = tree_cons (NULL, t, NULL);
t = built_in_decls[BUILT_IN_GOMP_SINGLE_COPY_END];
t = build_function_call_expr (t, args);
gimplify_and_add (t, pre_p);
t = build_and_jump (&l2);
gimplify_and_add (t, pre_p);
t = build1 (LABEL_EXPR, void_type_node, l1);
gimplify_and_add (t, pre_p);
append_to_statement_list (copyin_seq, pre_p);
t = build1 (LABEL_EXPR, void_type_node, l2);
gimplify_and_add (t, pre_p);
}
/* Expand code for an OpenMP single directive. */
static void
lower_omp_single (tree *stmt_p, omp_context *ctx)
{
tree t, bind, block, single_stmt = *stmt_p, dlist;
push_gimplify_context ();
block = make_node (BLOCK);
*stmt_p = bind = build3 (BIND_EXPR, void_type_node, NULL, NULL, block);
TREE_SIDE_EFFECTS (bind) = 1;
lower_rec_input_clauses (OMP_SINGLE_CLAUSES (single_stmt),
&BIND_EXPR_BODY (bind), &dlist, ctx);
lower_omp (&OMP_SINGLE_BODY (single_stmt), ctx);
append_to_statement_list (single_stmt, &BIND_EXPR_BODY (bind));
if (ctx->record_type)
lower_omp_single_copy (single_stmt, &BIND_EXPR_BODY (bind), ctx);
else
lower_omp_single_simple (single_stmt, &BIND_EXPR_BODY (bind));
OMP_SINGLE_BODY (single_stmt) = NULL;
append_to_statement_list (dlist, &BIND_EXPR_BODY (bind));
maybe_catch_exception (&BIND_EXPR_BODY (bind));
t = make_node (OMP_RETURN);
OMP_RETURN_NOWAIT (t) = !!find_omp_clause (OMP_SINGLE_CLAUSES (single_stmt),
OMP_CLAUSE_NOWAIT);
append_to_statement_list (t, &BIND_EXPR_BODY (bind));
pop_gimplify_context (bind);
BIND_EXPR_VARS (bind) = chainon (BIND_EXPR_VARS (bind), ctx->block_vars);
BLOCK_VARS (block) = BIND_EXPR_VARS (bind);
}
/* Expand code for an OpenMP master directive. */
static void
lower_omp_master (tree *stmt_p, omp_context *ctx)
{
tree bind, block, stmt = *stmt_p, lab = NULL, x;
push_gimplify_context ();
block = make_node (BLOCK);
*stmt_p = bind = build3 (BIND_EXPR, void_type_node, NULL, NULL, block);
TREE_SIDE_EFFECTS (bind) = 1;
append_to_statement_list (stmt, &BIND_EXPR_BODY (bind));
x = built_in_decls[BUILT_IN_OMP_GET_THREAD_NUM];
x = build_function_call_expr (x, NULL);
x = build2 (EQ_EXPR, boolean_type_node, x, integer_zero_node);
x = build3 (COND_EXPR, void_type_node, x, NULL, build_and_jump (&lab));
gimplify_and_add (x, &BIND_EXPR_BODY (bind));
lower_omp (&OMP_MASTER_BODY (stmt), ctx);
maybe_catch_exception (&OMP_MASTER_BODY (stmt));
append_to_statement_list (OMP_MASTER_BODY (stmt), &BIND_EXPR_BODY (bind));
OMP_MASTER_BODY (stmt) = NULL;
x = build1 (LABEL_EXPR, void_type_node, lab);
gimplify_and_add (x, &BIND_EXPR_BODY (bind));
x = make_node (OMP_RETURN);
OMP_RETURN_NOWAIT (x) = 1;
append_to_statement_list (x, &BIND_EXPR_BODY (bind));
pop_gimplify_context (bind);
BIND_EXPR_VARS (bind) = chainon (BIND_EXPR_VARS (bind), ctx->block_vars);
BLOCK_VARS (block) = BIND_EXPR_VARS (bind);
}
/* Expand code for an OpenMP ordered directive. */
static void
lower_omp_ordered (tree *stmt_p, omp_context *ctx)
{
tree bind, block, stmt = *stmt_p, x;
push_gimplify_context ();
block = make_node (BLOCK);
*stmt_p = bind = build3 (BIND_EXPR, void_type_node, NULL, NULL, block);
TREE_SIDE_EFFECTS (bind) = 1;
append_to_statement_list (stmt, &BIND_EXPR_BODY (bind));
x = built_in_decls[BUILT_IN_GOMP_ORDERED_START];
x = build_function_call_expr (x, NULL);
gimplify_and_add (x, &BIND_EXPR_BODY (bind));
lower_omp (&OMP_ORDERED_BODY (stmt), ctx);
maybe_catch_exception (&OMP_ORDERED_BODY (stmt));
append_to_statement_list (OMP_ORDERED_BODY (stmt), &BIND_EXPR_BODY (bind));
OMP_ORDERED_BODY (stmt) = NULL;
x = built_in_decls[BUILT_IN_GOMP_ORDERED_END];
x = build_function_call_expr (x, NULL);
gimplify_and_add (x, &BIND_EXPR_BODY (bind));
x = make_node (OMP_RETURN);
OMP_RETURN_NOWAIT (x) = 1;
append_to_statement_list (x, &BIND_EXPR_BODY (bind));
pop_gimplify_context (bind);
BIND_EXPR_VARS (bind) = chainon (BIND_EXPR_VARS (bind), ctx->block_vars);
BLOCK_VARS (block) = BIND_EXPR_VARS (bind);
}
/* Gimplify an OMP_CRITICAL statement. This is a relatively simple
substitution of a couple of function calls. But in the NAMED case,
requires that languages coordinate a symbol name. It is therefore
best put here in common code. */
static GTY((param1_is (tree), param2_is (tree)))
splay_tree critical_name_mutexes;
static void
lower_omp_critical (tree *stmt_p, omp_context *ctx)
{
tree bind, block, stmt = *stmt_p;
tree t, lock, unlock, name;
name = OMP_CRITICAL_NAME (stmt);
if (name)
{
tree decl, args;
splay_tree_node n;
if (!critical_name_mutexes)
critical_name_mutexes
= splay_tree_new_ggc (splay_tree_compare_pointers);
n = splay_tree_lookup (critical_name_mutexes, (splay_tree_key) name);
if (n == NULL)
{
char *new_str;
decl = create_tmp_var_raw (ptr_type_node, NULL);
new_str = ACONCAT ((".gomp_critical_user_",
IDENTIFIER_POINTER (name), NULL));
DECL_NAME (decl) = get_identifier (new_str);
TREE_PUBLIC (decl) = 1;
TREE_STATIC (decl) = 1;
DECL_COMMON (decl) = 1;
DECL_ARTIFICIAL (decl) = 1;
DECL_IGNORED_P (decl) = 1;
cgraph_varpool_finalize_decl (decl);
splay_tree_insert (critical_name_mutexes, (splay_tree_key) name,
(splay_tree_value) decl);
}
else
decl = (tree) n->value;
args = tree_cons (NULL, build_fold_addr_expr (decl), NULL);
lock = built_in_decls[BUILT_IN_GOMP_CRITICAL_NAME_START];
lock = build_function_call_expr (lock, args);
args = tree_cons (NULL, build_fold_addr_expr (decl), NULL);
unlock = built_in_decls[BUILT_IN_GOMP_CRITICAL_NAME_END];
unlock = build_function_call_expr (unlock, args);
}
else
{
lock = built_in_decls[BUILT_IN_GOMP_CRITICAL_START];
lock = build_function_call_expr (lock, NULL);
unlock = built_in_decls[BUILT_IN_GOMP_CRITICAL_END];
unlock = build_function_call_expr (unlock, NULL);
}
push_gimplify_context ();
block = make_node (BLOCK);
*stmt_p = bind = build3 (BIND_EXPR, void_type_node, NULL, NULL, block);
TREE_SIDE_EFFECTS (bind) = 1;
append_to_statement_list (stmt, &BIND_EXPR_BODY (bind));
gimplify_and_add (lock, &BIND_EXPR_BODY (bind));
lower_omp (&OMP_CRITICAL_BODY (stmt), ctx);
maybe_catch_exception (&OMP_CRITICAL_BODY (stmt));
append_to_statement_list (OMP_CRITICAL_BODY (stmt), &BIND_EXPR_BODY (bind));
OMP_CRITICAL_BODY (stmt) = NULL;
gimplify_and_add (unlock, &BIND_EXPR_BODY (bind));
t = make_node (OMP_RETURN);
OMP_RETURN_NOWAIT (t) = 1;
append_to_statement_list (t, &BIND_EXPR_BODY (bind));
pop_gimplify_context (bind);
BIND_EXPR_VARS (bind) = chainon (BIND_EXPR_VARS (bind), ctx->block_vars);
BLOCK_VARS (block) = BIND_EXPR_VARS (bind);
}
/* A subroutine of lower_omp_for. Generate code to emit the predicate
for a lastprivate clause. Given a loop control predicate of (V
cond N2), we gate the clause on (!(V cond N2)). The lowered form
is appended to *DLIST, iterator initialization is appended to
*BODY_P. */
static void
lower_omp_for_lastprivate (struct omp_for_data *fd, tree *body_p,
tree *dlist, struct omp_context *ctx)
{
tree clauses, cond, stmts, vinit, t;
enum tree_code cond_code;
cond_code = fd->cond_code;
cond_code = cond_code == LT_EXPR ? GE_EXPR : LE_EXPR;
/* When possible, use a strict equality expression. This can let VRP
type optimizations deduce the value and remove a copy. */
if (host_integerp (fd->step, 0))
{
HOST_WIDE_INT step = TREE_INT_CST_LOW (fd->step);
if (step == 1 || step == -1)
cond_code = EQ_EXPR;
}
cond = build2 (cond_code, boolean_type_node, fd->v, fd->n2);
clauses = OMP_FOR_CLAUSES (fd->for_stmt);
stmts = NULL;
lower_lastprivate_clauses (clauses, cond, &stmts, ctx);
if (stmts != NULL)
{
append_to_statement_list (stmts, dlist);
/* Optimize: v = 0; is usually cheaper than v = some_other_constant. */
vinit = fd->n1;
if (cond_code == EQ_EXPR
&& host_integerp (fd->n2, 0)
&& ! integer_zerop (fd->n2))
vinit = build_int_cst (TREE_TYPE (fd->v), 0);
/* Initialize the iterator variable, so that threads that don't execute
any iterations don't execute the lastprivate clauses by accident. */
t = build2 (MODIFY_EXPR, void_type_node, fd->v, vinit);
gimplify_and_add (t, body_p);
}
}
/* Lower code for an OpenMP loop directive. */
static void
lower_omp_for (tree *stmt_p, omp_context *ctx)
{
tree t, stmt, ilist, dlist, new_stmt, *body_p, *rhs_p;
struct omp_for_data fd;
stmt = *stmt_p;
push_gimplify_context ();
lower_omp (&OMP_FOR_PRE_BODY (stmt), ctx);
lower_omp (&OMP_FOR_BODY (stmt), ctx);
/* Move declaration of temporaries in the loop body before we make
it go away. */
if (TREE_CODE (OMP_FOR_BODY (stmt)) == BIND_EXPR)
record_vars_into (BIND_EXPR_VARS (OMP_FOR_BODY (stmt)), ctx->cb.dst_fn);
new_stmt = build3 (BIND_EXPR, void_type_node, NULL, NULL, NULL);
TREE_SIDE_EFFECTS (new_stmt) = 1;
body_p = &BIND_EXPR_BODY (new_stmt);
/* The pre-body and input clauses go before the lowered OMP_FOR. */
ilist = NULL;
dlist = NULL;
append_to_statement_list (OMP_FOR_PRE_BODY (stmt), body_p);
lower_rec_input_clauses (OMP_FOR_CLAUSES (stmt), body_p, &dlist, ctx);
/* Lower the header expressions. At this point, we can assume that
the header is of the form:
#pragma omp for (V = VAL1; V {<|>|<=|>=} VAL2; V = V [+-] VAL3)
We just need to make sure that VAL1, VAL2 and VAL3 are lowered
using the .omp_data_s mapping, if needed. */
rhs_p = &TREE_OPERAND (OMP_FOR_INIT (stmt), 1);
if (!is_gimple_min_invariant (*rhs_p))
*rhs_p = get_formal_tmp_var (*rhs_p, body_p);
rhs_p = &TREE_OPERAND (OMP_FOR_COND (stmt), 1);
if (!is_gimple_min_invariant (*rhs_p))
*rhs_p = get_formal_tmp_var (*rhs_p, body_p);
rhs_p = &TREE_OPERAND (TREE_OPERAND (OMP_FOR_INCR (stmt), 1), 1);
if (!is_gimple_min_invariant (*rhs_p))
*rhs_p = get_formal_tmp_var (*rhs_p, body_p);
/* Once lowered, extract the bounds and clauses. */
extract_omp_for_data (stmt, &fd);
lower_omp_for_lastprivate (&fd, body_p, &dlist, ctx);
append_to_statement_list (stmt, body_p);
append_to_statement_list (OMP_FOR_BODY (stmt), body_p);
t = make_node (OMP_CONTINUE);
append_to_statement_list (t, body_p);
/* After the loop, add exit clauses. */
lower_reduction_clauses (OMP_FOR_CLAUSES (stmt), body_p, ctx);
append_to_statement_list (dlist, body_p);
maybe_catch_exception (body_p);
/* Region exit marker goes at the end of the loop body. */
t = make_node (OMP_RETURN);
OMP_RETURN_NOWAIT (t) = fd.have_nowait;
append_to_statement_list (t, body_p);
pop_gimplify_context (NULL_TREE);
record_vars_into (ctx->block_vars, ctx->cb.dst_fn);
OMP_FOR_BODY (stmt) = NULL_TREE;
OMP_FOR_PRE_BODY (stmt) = NULL_TREE;
*stmt_p = new_stmt;
}
/* Callback for walk_stmts. Check if *TP only contains OMP_FOR
or OMP_PARALLEL. */
static tree
check_combined_parallel (tree *tp, int *walk_subtrees, void *data)
{
struct walk_stmt_info *wi = data;
int *info = wi->info;
*walk_subtrees = 0;
switch (TREE_CODE (*tp))
{
case OMP_FOR:
case OMP_SECTIONS:
*info = *info == 0 ? 1 : -1;
break;
default:
*info = -1;
break;
}
return NULL;
}
/* Lower the OpenMP parallel directive in *STMT_P. CTX holds context
information for the directive. */
static void
lower_omp_parallel (tree *stmt_p, omp_context *ctx)
{
tree clauses, par_bind, par_body, new_body, bind;
tree olist, ilist, par_olist, par_ilist;
tree stmt, child_fn, t;
stmt = *stmt_p;
clauses = OMP_PARALLEL_CLAUSES (stmt);
par_bind = OMP_PARALLEL_BODY (stmt);
par_body = BIND_EXPR_BODY (par_bind);
child_fn = ctx->cb.dst_fn;
if (!OMP_PARALLEL_COMBINED (stmt))
{
struct walk_stmt_info wi;
int ws_num = 0;
memset (&wi, 0, sizeof (wi));
wi.callback = check_combined_parallel;
wi.info = &ws_num;
wi.val_only = true;
walk_stmts (&wi, &par_bind);
if (ws_num == 1)
OMP_PARALLEL_COMBINED (stmt) = 1;
}
push_gimplify_context ();
par_olist = NULL_TREE;
par_ilist = NULL_TREE;
lower_rec_input_clauses (clauses, &par_ilist, &par_olist, ctx);
lower_omp (&par_body, ctx);
lower_reduction_clauses (clauses, &par_olist, ctx);
/* Declare all the variables created by mapping and the variables
declared in the scope of the parallel body. */
record_vars_into (ctx->block_vars, child_fn);
record_vars_into (BIND_EXPR_VARS (par_bind), child_fn);
if (ctx->record_type)
{
ctx->sender_decl = create_tmp_var (ctx->record_type, ".omp_data_o");
OMP_PARALLEL_DATA_ARG (stmt) = ctx->sender_decl;
}
olist = NULL_TREE;
ilist = NULL_TREE;
lower_send_clauses (clauses, &ilist, &olist, ctx);
lower_send_shared_vars (&ilist, &olist, ctx);
/* Once all the expansions are done, sequence all the different
fragments inside OMP_PARALLEL_BODY. */
bind = build3 (BIND_EXPR, void_type_node, NULL, NULL, NULL);
append_to_statement_list (ilist, &BIND_EXPR_BODY (bind));
new_body = alloc_stmt_list ();
if (ctx->record_type)
{
t = build_fold_addr_expr (ctx->sender_decl);
/* fixup_child_record_type might have changed receiver_decl's type. */
t = fold_convert (TREE_TYPE (ctx->receiver_decl), t);
t = build2 (MODIFY_EXPR, void_type_node, ctx->receiver_decl, t);
append_to_statement_list (t, &new_body);
}
append_to_statement_list (par_ilist, &new_body);
append_to_statement_list (par_body, &new_body);
append_to_statement_list (par_olist, &new_body);
maybe_catch_exception (&new_body);
t = make_node (OMP_RETURN);
append_to_statement_list (t, &new_body);
OMP_PARALLEL_BODY (stmt) = new_body;
append_to_statement_list (stmt, &BIND_EXPR_BODY (bind));
append_to_statement_list (olist, &BIND_EXPR_BODY (bind));
*stmt_p = bind;
pop_gimplify_context (NULL_TREE);
}
/* Pass *TP back through the gimplifier within the context determined by WI.
This handles replacement of DECL_VALUE_EXPR, as well as adjusting the
flags on ADDR_EXPR. */
static void
lower_regimplify (tree *tp, struct walk_stmt_info *wi)
{
enum gimplify_status gs;
tree pre = NULL;
if (wi->is_lhs)
gs = gimplify_expr (tp, &pre, NULL, is_gimple_lvalue, fb_lvalue);
else if (wi->val_only)
gs = gimplify_expr (tp, &pre, NULL, is_gimple_val, fb_rvalue);
else
gs = gimplify_expr (tp, &pre, NULL, is_gimple_formal_tmp_var, fb_rvalue);
gcc_assert (gs == GS_ALL_DONE);
if (pre)
tsi_link_before (&wi->tsi, pre, TSI_SAME_STMT);
}
/* Copy EXP into a temporary. Insert the initialization statement before TSI. */
static tree
init_tmp_var (tree exp, tree_stmt_iterator *tsi)
{
tree t, stmt;
t = create_tmp_var (TREE_TYPE (exp), NULL);
if (TREE_CODE (TREE_TYPE (t)) == COMPLEX_TYPE)
DECL_COMPLEX_GIMPLE_REG_P (t) = 1;
stmt = build2 (MODIFY_EXPR, TREE_TYPE (t), t, exp);
SET_EXPR_LOCUS (stmt, EXPR_LOCUS (tsi_stmt (*tsi)));
tsi_link_before (tsi, stmt, TSI_SAME_STMT);
return t;
}
/* Similarly, but copy from the temporary and insert the statement
after the iterator. */
static tree
save_tmp_var (tree exp, tree_stmt_iterator *tsi)
{
tree t, stmt;
t = create_tmp_var (TREE_TYPE (exp), NULL);
if (TREE_CODE (TREE_TYPE (t)) == COMPLEX_TYPE)
DECL_COMPLEX_GIMPLE_REG_P (t) = 1;
stmt = build2 (MODIFY_EXPR, TREE_TYPE (t), exp, t);
SET_EXPR_LOCUS (stmt, EXPR_LOCUS (tsi_stmt (*tsi)));
tsi_link_after (tsi, stmt, TSI_SAME_STMT);
return t;
}
/* Callback for walk_stmts. Lower the OpenMP directive pointed by TP. */
static tree
lower_omp_1 (tree *tp, int *walk_subtrees, void *data)
{
struct walk_stmt_info *wi = data;
omp_context *ctx = wi->info;
tree t = *tp;
/* If we have issued syntax errors, avoid doing any heavy lifting.
Just replace the OpenMP directives with a NOP to avoid
confusing RTL expansion. */
if (errorcount && OMP_DIRECTIVE_P (*tp))
{
*tp = build_empty_stmt ();
return NULL_TREE;
}
*walk_subtrees = 0;
switch (TREE_CODE (*tp))
{
case OMP_PARALLEL:
ctx = maybe_lookup_ctx (t);
lower_omp_parallel (tp, ctx);
break;
case OMP_FOR:
ctx = maybe_lookup_ctx (t);
gcc_assert (ctx);
lower_omp_for (tp, ctx);
break;
case OMP_SECTIONS:
ctx = maybe_lookup_ctx (t);
gcc_assert (ctx);
lower_omp_sections (tp, ctx);
break;
case OMP_SINGLE:
ctx = maybe_lookup_ctx (t);
gcc_assert (ctx);
lower_omp_single (tp, ctx);
break;
case OMP_MASTER:
ctx = maybe_lookup_ctx (t);
gcc_assert (ctx);
lower_omp_master (tp, ctx);
break;
case OMP_ORDERED:
ctx = maybe_lookup_ctx (t);
gcc_assert (ctx);
lower_omp_ordered (tp, ctx);
break;
case OMP_CRITICAL:
ctx = maybe_lookup_ctx (t);
gcc_assert (ctx);
lower_omp_critical (tp, ctx);
break;
case VAR_DECL:
if (ctx && DECL_HAS_VALUE_EXPR_P (t))
{
lower_regimplify (&t, wi);
if (wi->val_only)
{
if (wi->is_lhs)
t = save_tmp_var (t, &wi->tsi);
else
t = init_tmp_var (t, &wi->tsi);
}
*tp = t;
}
break;
case ADDR_EXPR:
if (ctx)
lower_regimplify (tp, wi);
break;
case ARRAY_REF:
case ARRAY_RANGE_REF:
case REALPART_EXPR:
case IMAGPART_EXPR:
case COMPONENT_REF:
case VIEW_CONVERT_EXPR:
if (ctx)
lower_regimplify (tp, wi);
break;
case INDIRECT_REF:
if (ctx)
{
wi->is_lhs = false;
wi->val_only = true;
lower_regimplify (&TREE_OPERAND (t, 0), wi);
}
break;
default:
if (!TYPE_P (t) && !DECL_P (t))
*walk_subtrees = 1;
break;
}
return NULL_TREE;
}
static void
lower_omp (tree *stmt_p, omp_context *ctx)
{
struct walk_stmt_info wi;
memset (&wi, 0, sizeof (wi));
wi.callback = lower_omp_1;
wi.info = ctx;
wi.val_only = true;
wi.want_locations = true;
walk_stmts (&wi, stmt_p);
}
/* Main entry point. */
static unsigned int
execute_lower_omp (void)
{
all_contexts = splay_tree_new (splay_tree_compare_pointers, 0,
delete_omp_context);
scan_omp (&DECL_SAVED_TREE (current_function_decl), NULL);
gcc_assert (parallel_nesting_level == 0);
if (all_contexts->root)
lower_omp (&DECL_SAVED_TREE (current_function_decl), NULL);
if (all_contexts)
{
splay_tree_delete (all_contexts);
all_contexts = NULL;
}
return 0;
}
static bool
gate_lower_omp (void)
{
return flag_openmp != 0;
}
struct tree_opt_pass pass_lower_omp =
{
"omplower", /* name */
gate_lower_omp, /* gate */
execute_lower_omp, /* execute */
NULL, /* sub */
NULL, /* next */
0, /* static_pass_number */
0, /* tv_id */
PROP_gimple_any, /* properties_required */
PROP_gimple_lomp, /* properties_provided */
0, /* properties_destroyed */
0, /* todo_flags_start */
TODO_dump_func, /* todo_flags_finish */
0 /* letter */
};
/* The following is a utility to diagnose OpenMP structured block violations.
It is not part of the "omplower" pass, as that's invoked too late. It
should be invoked by the respective front ends after gimplification. */
static splay_tree all_labels;
/* Check for mismatched contexts and generate an error if needed. Return
true if an error is detected. */
static bool
diagnose_sb_0 (tree *stmt_p, tree branch_ctx, tree label_ctx)
{
bool exit_p = true;
if ((label_ctx ? TREE_VALUE (label_ctx) : NULL) == branch_ctx)
return false;
/* Try to avoid confusing the user by producing and error message
with correct "exit" or "enter" verbage. We prefer "exit"
unless we can show that LABEL_CTX is nested within BRANCH_CTX. */
if (branch_ctx == NULL)
exit_p = false;
else
{
while (label_ctx)
{
if (TREE_VALUE (label_ctx) == branch_ctx)
{
exit_p = false;
break;
}
label_ctx = TREE_CHAIN (label_ctx);
}
}
if (exit_p)
error ("invalid exit from OpenMP structured block");
else
error ("invalid entry to OpenMP structured block");
*stmt_p = build_empty_stmt ();
return true;
}
/* Pass 1: Create a minimal tree of OpenMP structured blocks, and record
where in the tree each label is found. */
static tree
diagnose_sb_1 (tree *tp, int *walk_subtrees, void *data)
{
struct walk_stmt_info *wi = data;
tree context = (tree) wi->info;
tree inner_context;
tree t = *tp;
*walk_subtrees = 0;
switch (TREE_CODE (t))
{
case OMP_PARALLEL:
case OMP_SECTIONS:
case OMP_SINGLE:
walk_tree (&OMP_CLAUSES (t), diagnose_sb_1, wi, NULL);
/* FALLTHRU */
case OMP_SECTION:
case OMP_MASTER:
case OMP_ORDERED:
case OMP_CRITICAL:
/* The minimal context here is just a tree of statements. */
inner_context = tree_cons (NULL, t, context);
wi->info = inner_context;
walk_stmts (wi, &OMP_BODY (t));
wi->info = context;
break;
case OMP_FOR:
walk_tree (&OMP_FOR_CLAUSES (t), diagnose_sb_1, wi, NULL);
inner_context = tree_cons (NULL, t, context);
wi->info = inner_context;
walk_tree (&OMP_FOR_INIT (t), diagnose_sb_1, wi, NULL);
walk_tree (&OMP_FOR_COND (t), diagnose_sb_1, wi, NULL);
walk_tree (&OMP_FOR_INCR (t), diagnose_sb_1, wi, NULL);
walk_stmts (wi, &OMP_FOR_PRE_BODY (t));
walk_stmts (wi, &OMP_FOR_BODY (t));
wi->info = context;
break;
case LABEL_EXPR:
splay_tree_insert (all_labels, (splay_tree_key) LABEL_EXPR_LABEL (t),
(splay_tree_value) context);
break;
default:
break;
}
return NULL_TREE;
}
/* Pass 2: Check each branch and see if its context differs from that of
the destination label's context. */
static tree
diagnose_sb_2 (tree *tp, int *walk_subtrees, void *data)
{
struct walk_stmt_info *wi = data;
tree context = (tree) wi->info;
splay_tree_node n;
tree t = *tp;
*walk_subtrees = 0;
switch (TREE_CODE (t))
{
case OMP_PARALLEL:
case OMP_SECTIONS:
case OMP_SINGLE:
walk_tree (&OMP_CLAUSES (t), diagnose_sb_2, wi, NULL);
/* FALLTHRU */
case OMP_SECTION:
case OMP_MASTER:
case OMP_ORDERED:
case OMP_CRITICAL:
wi->info = t;
walk_stmts (wi, &OMP_BODY (t));
wi->info = context;
break;
case OMP_FOR:
walk_tree (&OMP_FOR_CLAUSES (t), diagnose_sb_2, wi, NULL);
wi->info = t;
walk_tree (&OMP_FOR_INIT (t), diagnose_sb_2, wi, NULL);
walk_tree (&OMP_FOR_COND (t), diagnose_sb_2, wi, NULL);
walk_tree (&OMP_FOR_INCR (t), diagnose_sb_2, wi, NULL);
walk_stmts (wi, &OMP_FOR_PRE_BODY (t));
walk_stmts (wi, &OMP_FOR_BODY (t));
wi->info = context;
break;
case GOTO_EXPR:
{
tree lab = GOTO_DESTINATION (t);
if (TREE_CODE (lab) != LABEL_DECL)
break;
n = splay_tree_lookup (all_labels, (splay_tree_key) lab);
diagnose_sb_0 (tp, context, n ? (tree) n->value : NULL_TREE);
}
break;
case SWITCH_EXPR:
{
tree vec = SWITCH_LABELS (t);
int i, len = TREE_VEC_LENGTH (vec);
for (i = 0; i < len; ++i)
{
tree lab = CASE_LABEL (TREE_VEC_ELT (vec, i));
n = splay_tree_lookup (all_labels, (splay_tree_key) lab);
if (diagnose_sb_0 (tp, context, (tree) n->value))
break;
}
}
break;
case RETURN_EXPR:
diagnose_sb_0 (tp, context, NULL_TREE);
break;
default:
break;
}
return NULL_TREE;
}
void
diagnose_omp_structured_block_errors (tree fndecl)
{
tree save_current = current_function_decl;
struct walk_stmt_info wi;
current_function_decl = fndecl;
all_labels = splay_tree_new (splay_tree_compare_pointers, 0, 0);
memset (&wi, 0, sizeof (wi));
wi.callback = diagnose_sb_1;
walk_stmts (&wi, &DECL_SAVED_TREE (fndecl));
memset (&wi, 0, sizeof (wi));
wi.callback = diagnose_sb_2;
wi.want_locations = true;
wi.want_return_expr = true;
walk_stmts (&wi, &DECL_SAVED_TREE (fndecl));
splay_tree_delete (all_labels);
all_labels = NULL;
current_function_decl = save_current;
}
#include "gt-omp-low.h"
|
chain_access_omp.c | // RUN: ${CATO_ROOT}/src/scripts/cexecute_pass.py %s -o %t
// RUN: diff <(mpirun -np 1 %t) %s.reference_output
#include <stdlib.h>
#include <stdio.h>
#include <omp.h>
void use_pointer(int * p, int x)
{
*p = x;
}
int main()
{
int* ptr = (int*)malloc(sizeof(int));
*ptr = 0;
#pragma omp parallel
{
use_pointer(ptr, 42);
}
printf("p: %d\n", *ptr);
free(ptr);
}
|
rt_dlange.c | #include "runtime.h"
void RT_CORE_dlange(Quark *quark, Quark_Task_Flags *task_flags,
int norm, int M, int N,
const double *A, int LDA, int szeA,
int szeW, double *result)
{
plasma_context_t *plasma;
plasma = plasma_context_self();
if (plasma->runtime == PLASMA_QUARK) {
QUARK_CORE_dlange(
quark, task_flags,
norm, M, N,
A, LDA, szeA,
szeW, result);
}
else if (plasma->runtime == PLASMA_OMPSS) {
szeW = max(1, szeW);
double *work = malloc(szeW * sizeof(double));
#pragma omp register ([szeW]work)
#pragma omp target device (smp) no_copy_deps
#pragma omp task in([szeA]A) out([1]result)
*result = LAPACKE_dlange_work(LAPACK_COL_MAJOR,
lapack_const(norm), M, N, A, LDA, work);
}
}
void RT_CORE_dlange_f1(Quark *quark, Quark_Task_Flags *task_flags,
PLASMA_enum norm, int M, int N,
const double *A, int LDA, int szeA,
int szeW, double *result,
double *fake, int szeF)
{
plasma_context_t *plasma;
plasma = plasma_context_self();
if (plasma->runtime == PLASMA_QUARK) {
QUARK_CORE_dlange_f1(
quark, task_flags,
norm, M, N,
A, LDA, szeA,
szeW, result,
fake, szeF);
}
else if (plasma->runtime == PLASMA_OMPSS) {
szeW = max(1, szeW);
double *work = malloc(szeW * sizeof(double));
if (result == fake)
#pragma omp target device (smp) no_copy_deps
#pragma omp task in([szeA]A) out([1]result)
*result = LAPACKE_dlange_work(LAPACK_COL_MAJOR,
lapack_const(norm), M, N, A, LDA, work);
else
#pragma omp target device (smp) no_copy_deps
#pragma omp task in([szeA]A) out([1]result) out([szeF]fake)
*result = LAPACKE_dlange_work(LAPACK_COL_MAJOR,
lapack_const(norm), M, N, A, LDA, work);
}
}
#if 0
void RT_CORE_dlansy(Quark *quark, Quark_Task_Flags *task_flags,
int norm, PLASMA_enum uplo, int N,
const double *A, int LDA, int szeA,
int szeW, double *result)
{
plasma_context_t *plasma;
plasma = plasma_context_self();
if (plasma->runtime == PLASMA_QUARK) {
QUARK_CORE_dlansy( quark, task_flags, norm, uplo, N,
A, LDA, szeA,
szeW, result);
} else if ( plasma->runtime == PLASMA_OMPSS ) {
szeW = max(1, szeW);
double *work = malloc(szeW * sizeof(double));
#pragma omp task in([szeA]A) out([1]result) no_copy_deps
*result = LAPACKE_dlansy_work(LAPACK_COL_MAJOR, lapack_const(norm),
lapack_const(uplo), N, A, LDA, work);
}
}
void RT_CORE_dlansy_f1(Quark *quark, Quark_Task_Flags *task_flags,
PLASMA_enum norm, PLASMA_enum uplo, int N,
const double *A, int LDA, int szeA,
int szeW, double *result,
double *fake, int szeF)
{
plasma_context_t *plasma;
plasma = plasma_context_self();
if (plasma->runtime == PLASMA_QUARK) {
QUARK_CORE_dlansy_f1( quark, task_flags, norm, uplo, N,
A, LDA, szeA,
szeW, result,
fake, szeF);
} else if ( plasma->runtime == PLASMA_OMPSS ) {
szeW = max(1, szeW);
double *work = malloc(szeW * sizeof(double));
if ( result == fake ) {
#pragma omp task in([szeA]A) out([1]result) no_copy_deps
*result = LAPACKE_dlansy_work(LAPACK_COL_MAJOR, lapack_const(norm),
lapack_const(uplo), N, A, LDA, work);
} else {
#pragma omp task in([szeA]A) out([1]result) fake([szeF]fake) no_copy_deps
*result = LAPACKE_dlansy_work(LAPACK_COL_MAJOR, lapack_const(norm),
lapack_const(uplo), N, A, LDA, work);
}
}
}
#endif
|
GB_binop__eq_uint32.c | //------------------------------------------------------------------------------
// GB_binop: hard-coded functions for each built-in binary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// If this file is in the Generated2/ folder, do not edit it
// (it is auto-generated from Generator/*).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_emult.h"
#include "GB_control.h"
#include "GB_ek_slice.h"
#include "GB_dense.h"
#include "GB_atomics.h"
#include "GB_bitmap_assign_methods.h"
#include "GB_binop__include.h"
// C=binop(A,B) is defined by the following types and operators:
// A+B function (eWiseAdd): GB (_AaddB__eq_uint32)
// A.*B function (eWiseMult): GB (_AemultB_01__eq_uint32)
// A.*B function (eWiseMult): GB (_AemultB_02__eq_uint32)
// A.*B function (eWiseMult): GB (_AemultB_03__eq_uint32)
// A.*B function (eWiseMult): GB (_AemultB_bitmap__eq_uint32)
// A*D function (colscale): GB (_AxD__eq_uint32)
// D*A function (rowscale): GB (_DxB__eq_uint32)
// C+=B function (dense accum): GB (_Cdense_accumB__eq_uint32)
// C+=b function (dense accum): GB (_Cdense_accumb__eq_uint32)
// C+=A+B function (dense ewise3): GB ((none))
// C=A+B function (dense ewise3): GB (_Cdense_ewise3_noaccum__eq_uint32)
// C=scalar+B GB (_bind1st__eq_uint32)
// C=scalar+B' GB (_bind1st_tran__eq_uint32)
// C=A+scalar GB (_bind2nd__eq_uint32)
// C=A'+scalar GB (_bind2nd_tran__eq_uint32)
// C type: bool
// A type: uint32_t
// B,b type: uint32_t
// BinaryOp: cij = (aij == bij)
#define GB_ATYPE \
uint32_t
#define GB_BTYPE \
uint32_t
#define GB_CTYPE \
bool
// true if the types of A and B are identical
#define GB_ATYPE_IS_BTYPE \
1
// true if the types of C and A are identical
#define GB_CTYPE_IS_ATYPE \
0
// true if the types of C and B are identical
#define GB_CTYPE_IS_BTYPE \
0
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA,A_iso) \
uint32_t aij = GBX (Ax, pA, A_iso)
// bij = Bx [pB]
#define GB_GETB(bij,Bx,pB,B_iso) \
uint32_t bij = GBX (Bx, pB, B_iso)
// declare scalar of the same type as C
#define GB_CTYPE_SCALAR(t) \
bool t
// cij = Ax [pA]
#define GB_COPY_A_TO_C(cij,Ax,pA,A_iso) \
cij = GBX (Ax, pA, A_iso)
// cij = Bx [pB]
#define GB_COPY_B_TO_C(cij,Bx,pB,B_iso) \
cij = GBX (Bx, pB, B_iso)
#define GB_CX(p) Cx [p]
// binary operator
#define GB_BINOP(z,x,y,i,j) \
z = (x == y) ;
// true if the binop must be flipped
#define GB_BINOP_FLIP \
0
// op is second
#define GB_OP_IS_SECOND \
0
// do the numerical phases of GB_add and GB_emult
#define GB_PHASE_2_OF_2
// hard-coded loops can be vectorized
#define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_EQ || GxB_NO_UINT32 || GxB_NO_EQ_UINT32)
//------------------------------------------------------------------------------
// C += A+B, all 3 matrices dense
//------------------------------------------------------------------------------
#if 0
// The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV.
void GB ((none))
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#include "GB_dense_ewise3_accum_template.c"
}
#endif
//------------------------------------------------------------------------------
// C = A+B, all 3 matrices dense
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_ewise3_noaccum__eq_uint32)
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_dense_ewise3_noaccum_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += B, accumulate a sparse matrix into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumB__eq_uint32)
(
GrB_Matrix C,
const GrB_Matrix B,
const int64_t *B_ek_slicing, const int B_ntasks, const int B_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#if 0
{
#include "GB_dense_subassign_23_template.c"
}
#endif
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += b, accumulate a scalar into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumb__eq_uint32)
(
GrB_Matrix C,
const GB_void *p_bwork,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#if 0
{
// get the scalar b for C += b, of type uint32_t
uint32_t bwork = (*((uint32_t *) p_bwork)) ;
#include "GB_dense_subassign_22_template.c"
return (GrB_SUCCESS) ;
}
#endif
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = A*D, column scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB (_AxD__eq_uint32)
(
GrB_Matrix C,
const GrB_Matrix A, bool A_is_pattern,
const GrB_Matrix D, bool D_is_pattern,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
bool *restrict Cx = (bool *) C->x ;
#include "GB_AxB_colscale_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = D*B, row scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB (_DxB__eq_uint32)
(
GrB_Matrix C,
const GrB_Matrix D, bool D_is_pattern,
const GrB_Matrix B, bool B_is_pattern,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
bool *restrict Cx = (bool *) C->x ;
#include "GB_AxB_rowscale_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseAdd: C = A+B or C<M> = A+B
//------------------------------------------------------------------------------
GrB_Info GB (_AaddB__eq_uint32)
(
GrB_Matrix C,
const int C_sparsity,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool Ch_is_Mh,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
GB_WERK_DECLARE (M_ek_slicing, int64_t) ;
GB_WERK_DECLARE (A_ek_slicing, int64_t) ;
GB_WERK_DECLARE (B_ek_slicing, int64_t) ;
#include "GB_add_template.c"
GB_FREE_WORK ;
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C = A.*B or C<M> = A.*B
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_01__eq_uint32)
(
GrB_Matrix C,
const int C_sparsity,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_01_meta.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C<#> = A.*B when A is sparse/hyper and B is bitmap/full
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_02__eq_uint32)
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool flipxy,
const int64_t *restrict Cp_kfirst,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#if GB_BINOP_FLIP
// The operator is not commutative, and does not have a flipped
// variant. For example z=atan2(y,x).
if (flipxy)
{
// use fmult(y,x)
#undef GB_FLIPPED
#define GB_FLIPPED 1
#include "GB_emult_02_template.c"
}
else
{
// use fmult(x,y)
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
}
#else
// No need to handle the flip: the operator is either commutative, or
// has been handled by changing z=div(y,x) to z=rdiv(x,y) for example.
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
#endif
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C<M> = A.*B, M sparse/hyper, A and B bitmap/full
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_03__eq_uint32)
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict Cp_kfirst,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_03_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C=A.*B, C<M>=A.*B, C<!M>=A.*B where C is bitmap
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_bitmap__eq_uint32)
(
GrB_Matrix C,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_bitmap_emult_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st
//------------------------------------------------------------------------------
GrB_Info GB (_bind1st__eq_uint32)
(
GB_void *Cx_output, // Cx and Bx may be aliased
const GB_void *x_input,
const GB_void *Bx_input,
const int8_t *restrict Bb,
int64_t bnz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
bool *Cx = (bool *) Cx_output ;
uint32_t x = (*((uint32_t *) x_input)) ;
uint32_t *Bx = (uint32_t *) Bx_input ;
int64_t p ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < bnz ; p++)
{
if (!GBB (Bb, p)) continue ;
uint32_t bij = GBX (Bx, p, false) ;
Cx [p] = (x == bij) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd
//------------------------------------------------------------------------------
GrB_Info GB (_bind2nd__eq_uint32)
(
GB_void *Cx_output, // Cx and Ax may be aliased
const GB_void *Ax_input,
const GB_void *y_input,
const int8_t *restrict Ab,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
bool *Cx = (bool *) Cx_output ;
uint32_t *Ax = (uint32_t *) Ax_input ;
uint32_t y = (*((uint32_t *) y_input)) ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!GBB (Ab, p)) continue ;
uint32_t aij = GBX (Ax, p, false) ;
Cx [p] = (aij == y) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (x, A'): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (x, aij), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
uint32_t aij = GBX (Ax, pA, false) ; \
Cx [pC] = (x == aij) ; \
}
GrB_Info GB (_bind1st_tran__eq_uint32)
(
GrB_Matrix C,
const GB_void *x_input,
const GrB_Matrix A,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
// GB_unop_transpose.c uses GB_ATYPE, but A is
// the 2nd input to binary operator z=f(x,y).
#undef GB_ATYPE
#define GB_ATYPE \
uint32_t
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint32_t x = (*((const uint32_t *) x_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
#undef GB_ATYPE
#define GB_ATYPE \
uint32_t
}
//------------------------------------------------------------------------------
// C = op (A', y): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (aij, y), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
uint32_t aij = GBX (Ax, pA, false) ; \
Cx [pC] = (aij == y) ; \
}
GrB_Info GB (_bind2nd_tran__eq_uint32)
(
GrB_Matrix C,
const GrB_Matrix A,
const GB_void *y_input,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint32_t y = (*((const uint32_t *) y_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
deconvolution_4x4.h | // Tencent is pleased to support the open source community by making ncnn available.
//
// Copyright (C) 2017 THL A29 Limited, a Tencent company. All rights reserved.
//
// Licensed under the BSD 3-Clause License (the "License"); you may not use this file except
// in compliance with the License. You may obtain a copy of the License at
//
// https://opensource.org/licenses/BSD-3-Clause
//
// Unless required by applicable law or agreed to in writing, software distributed
// under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
// CONDITIONS OF ANY KIND, either express or implied. See the License for the
// specific language governing permissions and limitations under the License.
#if __ARM_NEON
#include <arm_neon.h>
#endif // __ARM_NEON
static void deconv4x4s1_neon(const Mat& bottom_blob, Mat& top_blob, const Mat& _kernel, const Mat& _bias)
{
int w = bottom_blob.w;
int h = bottom_blob.h;
int inch = bottom_blob.c;
int outw = top_blob.w;
int outch = top_blob.c;
const float* kernel = _kernel;
const float* bias = _bias;
#pragma omp parallel for
for (int p=0; p<outch; p++)
{
Mat out = top_blob.channel(p);
const float bias0 = bias ? bias[p] : 0.f;
out.fill(bias0);
for (int q=0; q<inch; q++)
{
const float* img0 = bottom_blob.channel(q);
const float* kernel0 = kernel + p*inch*16 + q*16;
const float* r0 = img0;
const float* k0 = kernel0;
const float* k1 = kernel0 + 4;
const float* k2 = kernel0 + 8;
const float* k3 = kernel0 + 12;
#if __ARM_NEON
float32x4_t _k0 = vld1q_f32(k0);
float32x4_t _k1 = vld1q_f32(k1);
float32x4_t _k2 = vld1q_f32(k2);
float32x4_t _k3 = vld1q_f32(k3);
#endif // __ARM_NEON
for (int i = 0; i < h; i++)
{
float* outptr = out.data + out.w * i;
float* outptr0 = outptr;
float* outptr1 = outptr0 + outw;
float* outptr2 = outptr1 + outw;
float* outptr3 = outptr2 + outw;
int j = 0;
#if __ARM_NEON
for (; j+3<w; j+=4)
{
float32x4_t _v = vld1q_f32(r0);
//
float32x4_t _out00 = vld1q_f32(outptr0 + 0);
_out00 = vmlaq_lane_f32(_out00, _v, vget_low_f32(_k0), 0);
vst1q_f32(outptr0 + 0, _out00);
float32x4_t _out01 = vld1q_f32(outptr0 + 1);
_out01 = vmlaq_lane_f32(_out01, _v, vget_low_f32(_k0), 1);
vst1q_f32(outptr0 + 1, _out01);
float32x4_t _out02 = vld1q_f32(outptr0 + 2);
_out02 = vmlaq_lane_f32(_out02, _v, vget_high_f32(_k0), 0);
vst1q_f32(outptr0 + 2, _out02);
float32x4_t _out03 = vld1q_f32(outptr0 + 3);
_out03 = vmlaq_lane_f32(_out03, _v, vget_high_f32(_k0), 1);
vst1q_f32(outptr0 + 3, _out03);
//
float32x4_t _out10 = vld1q_f32(outptr1 + 0);
_out10 = vmlaq_lane_f32(_out10, _v, vget_low_f32(_k1), 0);
vst1q_f32(outptr1 + 0, _out10);
float32x4_t _out11 = vld1q_f32(outptr1 + 1);
_out11 = vmlaq_lane_f32(_out11, _v, vget_low_f32(_k1), 1);
vst1q_f32(outptr1 + 1, _out11);
float32x4_t _out12 = vld1q_f32(outptr1 + 2);
_out12 = vmlaq_lane_f32(_out12, _v, vget_high_f32(_k1), 0);
vst1q_f32(outptr1 + 2, _out12);
float32x4_t _out13 = vld1q_f32(outptr1 + 3);
_out13 = vmlaq_lane_f32(_out13, _v, vget_high_f32(_k1), 1);
vst1q_f32(outptr1 + 3, _out13);
//
float32x4_t _out20 = vld1q_f32(outptr2 + 0);
_out20 = vmlaq_lane_f32(_out20, _v, vget_low_f32(_k2), 0);
vst1q_f32(outptr2 + 0, _out20);
float32x4_t _out21 = vld1q_f32(outptr2 + 1);
_out21 = vmlaq_lane_f32(_out21, _v, vget_low_f32(_k2), 1);
vst1q_f32(outptr2 + 1, _out21);
float32x4_t _out22 = vld1q_f32(outptr2 + 2);
_out22 = vmlaq_lane_f32(_out22, _v, vget_high_f32(_k2), 0);
vst1q_f32(outptr2 + 2, _out22);
float32x4_t _out23 = vld1q_f32(outptr2 + 3);
_out23 = vmlaq_lane_f32(_out23, _v, vget_high_f32(_k2), 1);
vst1q_f32(outptr2 + 3, _out23);
//
float32x4_t _out30 = vld1q_f32(outptr3 + 0);
_out30 = vmlaq_lane_f32(_out30, _v, vget_low_f32(_k3), 0);
vst1q_f32(outptr3 + 0, _out30);
float32x4_t _out31 = vld1q_f32(outptr3 + 1);
_out31 = vmlaq_lane_f32(_out31, _v, vget_low_f32(_k3), 1);
vst1q_f32(outptr3 + 1, _out31);
float32x4_t _out32 = vld1q_f32(outptr3 + 2);
_out32 = vmlaq_lane_f32(_out32, _v, vget_high_f32(_k3), 0);
vst1q_f32(outptr3 + 2, _out32);
float32x4_t _out33 = vld1q_f32(outptr3 + 3);
_out33 = vmlaq_lane_f32(_out33, _v, vget_high_f32(_k3), 1);
vst1q_f32(outptr3 + 3, _out33);
r0 += 4;
outptr0 += 4;
outptr1 += 4;
outptr2 += 4;
outptr3 += 4;
}
#endif // __ARM_NEON
for (; j < w; j++)
{
float val = r0[0];
outptr0[0] += val * k0[0];
outptr0[1] += val * k0[1];
outptr0[2] += val * k0[2];
outptr0[3] += val * k0[3];
outptr1[0] += val * k1[0];
outptr1[1] += val * k1[1];
outptr1[2] += val * k1[2];
outptr1[3] += val * k1[3];
outptr2[0] += val * k2[0];
outptr2[1] += val * k2[1];
outptr2[2] += val * k2[2];
outptr2[3] += val * k2[3];
outptr3[0] += val * k3[0];
outptr3[1] += val * k3[1];
outptr3[2] += val * k3[2];
outptr3[3] += val * k3[3];
r0++;
outptr0++;
outptr1++;
outptr2++;
outptr3++;
}
}
}
}
}
static void deconv4x4s2_neon(const Mat& bottom_blob, Mat& top_blob, const Mat& _kernel, const Mat& _bias)
{
int w = bottom_blob.w;
int h = bottom_blob.h;
int inch = bottom_blob.c;
int outw = top_blob.w;
int outch = top_blob.c;
const float* kernel = _kernel;
const float* bias = _bias;
#pragma omp parallel for
for (int p=0; p<outch; p++)
{
Mat out = top_blob.channel(p);
const float bias0 = bias ? bias[p] : 0.f;
out.fill(bias0);
for (int q=0; q<inch; q++)
{
const float* img0 = bottom_blob.channel(q);
const float* kernel0 = kernel + p*inch*16 + q*16;
const float* r0 = img0;
const float* k0 = kernel0;
const float* k1 = kernel0 + 4;
const float* k2 = kernel0 + 8;
const float* k3 = kernel0 + 12;
#if __ARM_NEON
float32x4_t _k0 = vld1q_f32(k0);
float32x4_t _k1 = vld1q_f32(k1);
float32x4_t _k2 = vld1q_f32(k2);
float32x4_t _k3 = vld1q_f32(k3);
#endif // __ARM_NEON
for (int i = 0; i < h; i++)
{
float* outptr = out.data + out.w * i*2;
float* outptr0 = outptr;
float* outptr1 = outptr0 + outw;
float* outptr2 = outptr1 + outw;
float* outptr3 = outptr2 + outw;
int j = 0;
#if __ARM_NEON
for (; j+3<w; j+=4)
{
float32x4_t _v = vld1q_f32(r0);
// row 0
float32x4x2_t _out0 = vld2q_f32(outptr0);
// 0,2,4,6
_out0.val[0] = vmlaq_lane_f32(_out0.val[0], _v, vget_low_f32(_k0), 0);
// 1,3,5,7
_out0.val[1] = vmlaq_lane_f32(_out0.val[1], _v, vget_low_f32(_k0), 1);
vst2q_f32(outptr0, _out0);
_out0 = vld2q_f32(outptr0 + 2);
// 2,4,6,8
_out0.val[0] = vmlaq_lane_f32(_out0.val[0], _v, vget_high_f32(_k0), 0);
// 3,5,7,9
_out0.val[1] = vmlaq_lane_f32(_out0.val[1], _v, vget_high_f32(_k0), 1);
vst2q_f32(outptr0 + 2, _out0);
// row 1
float32x4x2_t _out1 = vld2q_f32(outptr1);
// 0,2,4,6
_out1.val[0] = vmlaq_lane_f32(_out1.val[0], _v, vget_low_f32(_k1), 0);
// 1,3,5,7
_out1.val[1] = vmlaq_lane_f32(_out1.val[1], _v, vget_low_f32(_k1), 1);
vst2q_f32(outptr1, _out1);
_out1 = vld2q_f32(outptr1 + 2);
// 2,4,6,8
_out1.val[0] = vmlaq_lane_f32(_out1.val[0], _v, vget_high_f32(_k1), 0);
// 3,5,7,9
_out1.val[1] = vmlaq_lane_f32(_out1.val[1], _v, vget_high_f32(_k1), 1);
vst2q_f32(outptr1 + 2, _out1);
// row 2
float32x4x2_t _out2 = vld2q_f32(outptr2);
_out2.val[0] = vmlaq_lane_f32(_out2.val[0], _v, vget_low_f32(_k2), 0);
_out2.val[1] = vmlaq_lane_f32(_out2.val[1], _v, vget_low_f32(_k2), 1);
vst2q_f32(outptr2, _out2);
_out2 = vld2q_f32(outptr2 + 2);
_out2.val[0] = vmlaq_lane_f32(_out2.val[0], _v, vget_high_f32(_k2), 0);
_out2.val[1] = vmlaq_lane_f32(_out2.val[1], _v, vget_high_f32(_k2), 1);
vst2q_f32(outptr2 + 2, _out2);
// row 3
float32x4x2_t _out3 = vld2q_f32(outptr3);
_out3.val[0] = vmlaq_lane_f32(_out3.val[0], _v, vget_low_f32(_k3), 0);
_out3.val[1] = vmlaq_lane_f32(_out3.val[1], _v, vget_low_f32(_k3), 1);
vst2q_f32(outptr3, _out3);
_out3 = vld2q_f32(outptr3 + 2);
_out3.val[0] = vmlaq_lane_f32(_out3.val[0], _v, vget_high_f32(_k3), 0);
_out3.val[1] = vmlaq_lane_f32(_out3.val[1], _v, vget_high_f32(_k3), 1);
vst2q_f32(outptr3 + 2, _out3);
r0 += 4;
outptr0 += 8;
outptr1 += 8;
outptr2 += 8;
outptr3 += 8;
}
#endif // __ARM_NEON
for (; j < w; j++)
{
float val = r0[0];
outptr0[0] += val * k0[0];
outptr0[1] += val * k0[1];
outptr0[2] += val * k0[2];
outptr0[3] += val * k0[3];
outptr1[0] += val * k1[0];
outptr1[1] += val * k1[1];
outptr1[2] += val * k1[2];
outptr1[3] += val * k1[3];
outptr2[0] += val * k2[0];
outptr2[1] += val * k2[1];
outptr2[2] += val * k2[2];
outptr2[3] += val * k2[3];
outptr3[0] += val * k3[0];
outptr3[1] += val * k3[1];
outptr3[2] += val * k3[2];
outptr3[3] += val * k3[3];
r0++;
outptr0 += 2;
outptr1 += 2;
outptr2 += 2;
outptr3 += 2;
}
}
}
}
}
|
irbuilder_for_unsigned.c | // NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --function-signature --include-generated-funcs
// RUN: %clang_cc1 -no-opaque-pointers -fopenmp-enable-irbuilder -verify -fopenmp -fopenmp-version=45 -x c++ -triple x86_64-unknown-unknown -emit-llvm %s -o - | FileCheck %s
// expected-no-diagnostics
#ifndef HEADER
#define HEADER
extern "C" void workshareloop_unsigned(float *a, float *b, float *c, float *d) {
#pragma omp for
for (unsigned i = 33; i < 32000000; i += 7) {
a[i] = b[i] * c[i] * d[i];
}
}
#endif // HEADER
// CHECK-LABEL: define {{[^@]+}}@workshareloop_unsigned
// CHECK-SAME: (float* noundef [[A:%.*]], float* noundef [[B:%.*]], float* noundef [[C:%.*]], float* noundef [[D:%.*]]) #[[ATTR0:[0-9]+]] {
// CHECK-NEXT: entry:
// CHECK-NEXT: [[A_ADDR:%.*]] = alloca float*, align 8
// CHECK-NEXT: [[B_ADDR:%.*]] = alloca float*, align 8
// CHECK-NEXT: [[C_ADDR:%.*]] = alloca float*, align 8
// CHECK-NEXT: [[D_ADDR:%.*]] = alloca float*, align 8
// CHECK-NEXT: [[I:%.*]] = alloca i32, align 4
// CHECK-NEXT: [[AGG_CAPTURED:%.*]] = alloca [[STRUCT_ANON:%.*]], align 8
// CHECK-NEXT: [[AGG_CAPTURED1:%.*]] = alloca [[STRUCT_ANON_0:%.*]], align 4
// CHECK-NEXT: [[DOTCOUNT_ADDR:%.*]] = alloca i32, align 4
// CHECK-NEXT: [[P_LASTITER:%.*]] = alloca i32, align 4
// CHECK-NEXT: [[P_LOWERBOUND:%.*]] = alloca i32, align 4
// CHECK-NEXT: [[P_UPPERBOUND:%.*]] = alloca i32, align 4
// CHECK-NEXT: [[P_STRIDE:%.*]] = alloca i32, align 4
// CHECK-NEXT: store float* [[A]], float** [[A_ADDR]], align 8
// CHECK-NEXT: store float* [[B]], float** [[B_ADDR]], align 8
// CHECK-NEXT: store float* [[C]], float** [[C_ADDR]], align 8
// CHECK-NEXT: store float* [[D]], float** [[D_ADDR]], align 8
// CHECK-NEXT: store i32 33, i32* [[I]], align 4
// CHECK-NEXT: [[TMP0:%.*]] = getelementptr inbounds [[STRUCT_ANON]], %struct.anon* [[AGG_CAPTURED]], i32 0, i32 0
// CHECK-NEXT: store i32* [[I]], i32** [[TMP0]], align 8
// CHECK-NEXT: [[TMP1:%.*]] = getelementptr inbounds [[STRUCT_ANON_0]], %struct.anon.0* [[AGG_CAPTURED1]], i32 0, i32 0
// CHECK-NEXT: [[TMP2:%.*]] = load i32, i32* [[I]], align 4
// CHECK-NEXT: store i32 [[TMP2]], i32* [[TMP1]], align 4
// CHECK-NEXT: call void @__captured_stmt(i32* [[DOTCOUNT_ADDR]], %struct.anon* [[AGG_CAPTURED]])
// CHECK-NEXT: [[DOTCOUNT:%.*]] = load i32, i32* [[DOTCOUNT_ADDR]], align 4
// CHECK-NEXT: br label [[OMP_LOOP_PREHEADER:%.*]]
// CHECK: omp_loop.preheader:
// CHECK-NEXT: store i32 0, i32* [[P_LOWERBOUND]], align 4
// CHECK-NEXT: [[TMP3:%.*]] = sub i32 [[DOTCOUNT]], 1
// CHECK-NEXT: store i32 [[TMP3]], i32* [[P_UPPERBOUND]], align 4
// CHECK-NEXT: store i32 1, i32* [[P_STRIDE]], align 4
// CHECK-NEXT: [[OMP_GLOBAL_THREAD_NUM:%.*]] = call i32 @__kmpc_global_thread_num(%struct.ident_t* @[[GLOB1:[0-9]+]])
// CHECK-NEXT: call void @__kmpc_for_static_init_4u(%struct.ident_t* @[[GLOB1]], i32 [[OMP_GLOBAL_THREAD_NUM]], i32 34, i32* [[P_LASTITER]], i32* [[P_LOWERBOUND]], i32* [[P_UPPERBOUND]], i32* [[P_STRIDE]], i32 1, i32 0)
// CHECK-NEXT: [[TMP4:%.*]] = load i32, i32* [[P_LOWERBOUND]], align 4
// CHECK-NEXT: [[TMP5:%.*]] = load i32, i32* [[P_UPPERBOUND]], align 4
// CHECK-NEXT: [[TMP6:%.*]] = sub i32 [[TMP5]], [[TMP4]]
// CHECK-NEXT: [[TMP7:%.*]] = add i32 [[TMP6]], 1
// CHECK-NEXT: br label [[OMP_LOOP_HEADER:%.*]]
// CHECK: omp_loop.header:
// CHECK-NEXT: [[OMP_LOOP_IV:%.*]] = phi i32 [ 0, [[OMP_LOOP_PREHEADER]] ], [ [[OMP_LOOP_NEXT:%.*]], [[OMP_LOOP_INC:%.*]] ]
// CHECK-NEXT: br label [[OMP_LOOP_COND:%.*]]
// CHECK: omp_loop.cond:
// CHECK-NEXT: [[OMP_LOOP_CMP:%.*]] = icmp ult i32 [[OMP_LOOP_IV]], [[TMP7]]
// CHECK-NEXT: br i1 [[OMP_LOOP_CMP]], label [[OMP_LOOP_BODY:%.*]], label [[OMP_LOOP_EXIT:%.*]]
// CHECK: omp_loop.body:
// CHECK-NEXT: [[TMP8:%.*]] = add i32 [[OMP_LOOP_IV]], [[TMP4]]
// CHECK-NEXT: call void @__captured_stmt.1(i32* [[I]], i32 [[TMP8]], %struct.anon.0* [[AGG_CAPTURED1]])
// CHECK-NEXT: [[TMP9:%.*]] = load float*, float** [[B_ADDR]], align 8
// CHECK-NEXT: [[TMP10:%.*]] = load i32, i32* [[I]], align 4
// CHECK-NEXT: [[IDXPROM:%.*]] = zext i32 [[TMP10]] to i64
// CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds float, float* [[TMP9]], i64 [[IDXPROM]]
// CHECK-NEXT: [[TMP11:%.*]] = load float, float* [[ARRAYIDX]], align 4
// CHECK-NEXT: [[TMP12:%.*]] = load float*, float** [[C_ADDR]], align 8
// CHECK-NEXT: [[TMP13:%.*]] = load i32, i32* [[I]], align 4
// CHECK-NEXT: [[IDXPROM2:%.*]] = zext i32 [[TMP13]] to i64
// CHECK-NEXT: [[ARRAYIDX3:%.*]] = getelementptr inbounds float, float* [[TMP12]], i64 [[IDXPROM2]]
// CHECK-NEXT: [[TMP14:%.*]] = load float, float* [[ARRAYIDX3]], align 4
// CHECK-NEXT: [[MUL:%.*]] = fmul float [[TMP11]], [[TMP14]]
// CHECK-NEXT: [[TMP15:%.*]] = load float*, float** [[D_ADDR]], align 8
// CHECK-NEXT: [[TMP16:%.*]] = load i32, i32* [[I]], align 4
// CHECK-NEXT: [[IDXPROM4:%.*]] = zext i32 [[TMP16]] to i64
// CHECK-NEXT: [[ARRAYIDX5:%.*]] = getelementptr inbounds float, float* [[TMP15]], i64 [[IDXPROM4]]
// CHECK-NEXT: [[TMP17:%.*]] = load float, float* [[ARRAYIDX5]], align 4
// CHECK-NEXT: [[MUL6:%.*]] = fmul float [[MUL]], [[TMP17]]
// CHECK-NEXT: [[TMP18:%.*]] = load float*, float** [[A_ADDR]], align 8
// CHECK-NEXT: [[TMP19:%.*]] = load i32, i32* [[I]], align 4
// CHECK-NEXT: [[IDXPROM7:%.*]] = zext i32 [[TMP19]] to i64
// CHECK-NEXT: [[ARRAYIDX8:%.*]] = getelementptr inbounds float, float* [[TMP18]], i64 [[IDXPROM7]]
// CHECK-NEXT: store float [[MUL6]], float* [[ARRAYIDX8]], align 4
// CHECK-NEXT: br label [[OMP_LOOP_INC]]
// CHECK: omp_loop.inc:
// CHECK-NEXT: [[OMP_LOOP_NEXT]] = add nuw i32 [[OMP_LOOP_IV]], 1
// CHECK-NEXT: br label [[OMP_LOOP_HEADER]]
// CHECK: omp_loop.exit:
// CHECK-NEXT: call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[OMP_GLOBAL_THREAD_NUM]])
// CHECK-NEXT: [[OMP_GLOBAL_THREAD_NUM9:%.*]] = call i32 @__kmpc_global_thread_num(%struct.ident_t* @[[GLOB1]])
// CHECK-NEXT: call void @__kmpc_barrier(%struct.ident_t* @[[GLOB2:[0-9]+]], i32 [[OMP_GLOBAL_THREAD_NUM9]])
// CHECK-NEXT: br label [[OMP_LOOP_AFTER:%.*]]
// CHECK: omp_loop.after:
// CHECK-NEXT: ret void
//
//
// CHECK-LABEL: define {{[^@]+}}@__captured_stmt
// CHECK-SAME: (i32* noundef nonnull align 4 dereferenceable(4) [[DISTANCE:%.*]], %struct.anon* noalias noundef [[__CONTEXT:%.*]]) #[[ATTR1:[0-9]+]] {
// CHECK-NEXT: entry:
// CHECK-NEXT: [[DISTANCE_ADDR:%.*]] = alloca i32*, align 8
// CHECK-NEXT: [[__CONTEXT_ADDR:%.*]] = alloca %struct.anon*, align 8
// CHECK-NEXT: [[DOTSTART:%.*]] = alloca i32, align 4
// CHECK-NEXT: [[DOTSTOP:%.*]] = alloca i32, align 4
// CHECK-NEXT: [[DOTSTEP:%.*]] = alloca i32, align 4
// CHECK-NEXT: store i32* [[DISTANCE]], i32** [[DISTANCE_ADDR]], align 8
// CHECK-NEXT: store %struct.anon* [[__CONTEXT]], %struct.anon** [[__CONTEXT_ADDR]], align 8
// CHECK-NEXT: [[TMP0:%.*]] = load %struct.anon*, %struct.anon** [[__CONTEXT_ADDR]], align 8
// CHECK-NEXT: [[TMP1:%.*]] = getelementptr inbounds [[STRUCT_ANON:%.*]], %struct.anon* [[TMP0]], i32 0, i32 0
// CHECK-NEXT: [[TMP2:%.*]] = load i32*, i32** [[TMP1]], align 8
// CHECK-NEXT: [[TMP3:%.*]] = load i32, i32* [[TMP2]], align 4
// CHECK-NEXT: store i32 [[TMP3]], i32* [[DOTSTART]], align 4
// CHECK-NEXT: store i32 32000000, i32* [[DOTSTOP]], align 4
// CHECK-NEXT: store i32 7, i32* [[DOTSTEP]], align 4
// CHECK-NEXT: [[TMP4:%.*]] = load i32, i32* [[DOTSTART]], align 4
// CHECK-NEXT: [[TMP5:%.*]] = load i32, i32* [[DOTSTOP]], align 4
// CHECK-NEXT: [[CMP:%.*]] = icmp ult i32 [[TMP4]], [[TMP5]]
// CHECK-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
// CHECK: cond.true:
// CHECK-NEXT: [[TMP6:%.*]] = load i32, i32* [[DOTSTOP]], align 4
// CHECK-NEXT: [[TMP7:%.*]] = load i32, i32* [[DOTSTART]], align 4
// CHECK-NEXT: [[SUB:%.*]] = sub i32 [[TMP6]], [[TMP7]]
// CHECK-NEXT: [[TMP8:%.*]] = load i32, i32* [[DOTSTEP]], align 4
// CHECK-NEXT: [[SUB1:%.*]] = sub i32 [[TMP8]], 1
// CHECK-NEXT: [[ADD:%.*]] = add i32 [[SUB]], [[SUB1]]
// CHECK-NEXT: [[TMP9:%.*]] = load i32, i32* [[DOTSTEP]], align 4
// CHECK-NEXT: [[DIV:%.*]] = udiv i32 [[ADD]], [[TMP9]]
// CHECK-NEXT: br label [[COND_END:%.*]]
// CHECK: cond.false:
// CHECK-NEXT: br label [[COND_END]]
// CHECK: cond.end:
// CHECK-NEXT: [[COND:%.*]] = phi i32 [ [[DIV]], [[COND_TRUE]] ], [ 0, [[COND_FALSE]] ]
// CHECK-NEXT: [[TMP10:%.*]] = load i32*, i32** [[DISTANCE_ADDR]], align 8
// CHECK-NEXT: store i32 [[COND]], i32* [[TMP10]], align 4
// CHECK-NEXT: ret void
//
//
// CHECK-LABEL: define {{[^@]+}}@__captured_stmt.1
// CHECK-SAME: (i32* noundef nonnull align 4 dereferenceable(4) [[LOOPVAR:%.*]], i32 noundef [[LOGICAL:%.*]], %struct.anon.0* noalias noundef [[__CONTEXT:%.*]]) #[[ATTR1]] {
// CHECK-NEXT: entry:
// CHECK-NEXT: [[LOOPVAR_ADDR:%.*]] = alloca i32*, align 8
// CHECK-NEXT: [[LOGICAL_ADDR:%.*]] = alloca i32, align 4
// CHECK-NEXT: [[__CONTEXT_ADDR:%.*]] = alloca %struct.anon.0*, align 8
// CHECK-NEXT: store i32* [[LOOPVAR]], i32** [[LOOPVAR_ADDR]], align 8
// CHECK-NEXT: store i32 [[LOGICAL]], i32* [[LOGICAL_ADDR]], align 4
// CHECK-NEXT: store %struct.anon.0* [[__CONTEXT]], %struct.anon.0** [[__CONTEXT_ADDR]], align 8
// CHECK-NEXT: [[TMP0:%.*]] = load %struct.anon.0*, %struct.anon.0** [[__CONTEXT_ADDR]], align 8
// CHECK-NEXT: [[TMP1:%.*]] = getelementptr inbounds [[STRUCT_ANON_0:%.*]], %struct.anon.0* [[TMP0]], i32 0, i32 0
// CHECK-NEXT: [[TMP2:%.*]] = load i32, i32* [[TMP1]], align 4
// CHECK-NEXT: [[TMP3:%.*]] = load i32, i32* [[LOGICAL_ADDR]], align 4
// CHECK-NEXT: [[MUL:%.*]] = mul i32 7, [[TMP3]]
// CHECK-NEXT: [[ADD:%.*]] = add i32 [[TMP2]], [[MUL]]
// CHECK-NEXT: [[TMP4:%.*]] = load i32*, i32** [[LOOPVAR_ADDR]], align 8
// CHECK-NEXT: store i32 [[ADD]], i32* [[TMP4]], align 4
// CHECK-NEXT: ret void
//
|
loops.c | #include <stdio.h>
#include <math.h>
#define N 729
#define reps 1000
#include <omp.h>
double a[N][N], b[N][N], c[N];
int jmax[N];
void init1(void);
void init2(void);
void runloop(int);
void loop1chunk(int, int, int*);
void loop2chunk(int, int,int*);
void valid1(void);
void valid2(void);
int main(int argc, char *argv[]) {
double start1,start2,end1,end2;
int r;
init1();
start1 = omp_get_wtime();
for (r=0; r<reps; r++){
runloop(1);
}
end1 = omp_get_wtime();
valid1();
printf("Total time for %d reps of loop 1 = %f\n, calc",reps, (float)(end1-start1));
init2();
start2 = omp_get_wtime();
for (r=0; r<reps; r++){
runloop(2);
}
end2 = omp_get_wtime();
valid2();
printf("Total time for %d reps of loop 2 = %f\n",reps, (float)(end2-start2));
}
void init1(void){
int i,j;
for (i=0; i<N; i++){
for (j=0; j<N; j++){
a[i][j] = 0.0;
b[i][j] = 3.142*(i+j);
}
}
}
void init2(void){
int i,j, expr;
for (i=0; i<N; i++){
expr = i%( 3*(i/30) + 1);
if ( expr == 0) {
jmax[i] = N;
}
else {
jmax[i] = 1;
}
c[i] = 0.0;
}
for (i=0; i<N; i++){
for (j=0; j<N; j++){
b[i][j] = (double) (i*j+1) / (double) (N*N);
}
}
}
void runloop(int loopid) {
#pragma omp parallel default(none) shared(loopid)
{
int myid = omp_get_thread_num();
int nthreads = omp_get_num_threads();
int ipt = (int) ceil((double)N/(double)nthreads);
int lo = myid*ipt;
int hi = (myid+1)*ipt;
if (hi > N) hi = N;
int cnt = 0;
switch (loopid) {
case 1: loop1chunk(lo,hi,&cnt); break;
case 2: loop2chunk(lo,hi,&cnt ); break;
}
printf("LOOP,%d,Thread,%d,SIZE,%d,CALCCNT,%d\n",loopid,myid,hi-lo,cnt);
}
}
void loop1chunk(int lo, int hi , int *pcnt) {
int i,j;
for (i=lo; i<hi; i++){
for (j=N-1; j>i; j--){
a[i][j] += cos(b[i][j]);
(*pcnt) ++;
}
}
}
void loop2chunk(int lo, int hi,int *pcnt) {
int i,j,k;
double rN2;
rN2 = 1.0 / (double) (N*N);
for (i=lo; i<hi; i++){
for (j=0; j < jmax[i]; j++){
for (k=0; k<j; k++){
c[i] += (k+1) * log (b[i][j]) * rN2;
(*pcnt) ++;
}
}
}
}
void valid1(void) {
int i,j;
double suma;
suma= 0.0;
for (i=0; i<N; i++){
for (j=0; j<N; j++){
suma += a[i][j];
}
}
printf("Loop 1 check: Sum of a is %lf\n", suma);
}
void valid2(void) {
int i;
double sumc;
sumc= 0.0;
for (i=0; i<N; i++){
sumc += c[i];
}
printf("Loop 2 check: Sum of c is %f\n", sumc);
}
|
bshot_bits.h | #ifndef BSHOT_BITS_H
#define BSHOT_BITS_H
#include <bshot_headers_bits.h>
template< typename T >
T minVect(const T *v, int n, int *ind=NULL)
{
assert(n > 0);
T min = v[0];
if (ind != NULL) *ind = 0;
for (int i=1; i<n; i++)
if (v[i] < min) {
min = v[i];
if (ind != NULL) *ind=i;
}
return min;
}
class bshot_descriptor
{
public:
std::bitset< 352 > bits;
};
class bshot
{
public :
pcl::PointCloud<pcl::PointXYZ> cloud1, cloud2;
pcl::PointCloud<pcl::Normal> cloud1_normals, cloud2_normals;
pcl::PointCloud<pcl::PointXYZ> cloud1_keypoints, cloud2_keypoints;
pcl::PointCloud<pcl::SHOT352> cloud1_shot, cloud2_shot;
std::vector<bshot_descriptor> cloud1_bshot, cloud2_bshot;
void calculate_normals ( float radius )
{
// Estimate the normals.
pcl::NormalEstimationOMP<pcl::PointXYZ, pcl::Normal> normalEstimation;
// normalEstimation.setKSearch(radius);
normalEstimation.setRadiusSearch(radius);
normalEstimation.setNumberOfThreads(12);
// pcl::search::KdTree<pcl::PointXYZ>::Ptr kdtree(new pcl::search::KdTree<pcl::PointXYZ>);
// normalEstimation.setSearchMethod(kdtree);
pcl::search::KdTree<pcl::PointXYZ> kdtree;
kdtree.setInputCloud(cloud1.makeShared());
std::vector<int> nn_indices;
std::vector<float> nn_dists;
cloud1_normals.is_dense = true;
cloud1_normals.points.resize(cloud1.size());
#ifdef _OPENMP
#pragma omp parallel for private (nn_indices, nn_dists) num_threads(12)
#endif
// Iterating over the entire index vector
for (int idx = 0; idx < static_cast<int> (cloud1_keypoints.size ()); ++idx)
{
if (!pcl::isFinite(cloud1_keypoints[idx]) ||
kdtree.radiusSearch(cloud1_keypoints[idx], radius, nn_indices, nn_dists, 300) == 0)
{
cloud1_normals.points[idx].normal[0] = cloud1_normals.points[idx].normal[1] = cloud1_normals.points[idx].normal[2] = cloud1_normals.points[idx].curvature = std::numeric_limits<float>::quiet_NaN ();
cloud1_normals.is_dense = false;
continue;
}
Eigen::Vector4f n;
pcl::computePointNormal<PointXYZ> (cloud1, nn_indices, n,
cloud1_normals.points[idx].curvature);
cloud1_normals.points[idx].normal_x = n[0];
cloud1_normals.points[idx].normal_y = n[1];
cloud1_normals.points[idx].normal_z = n[2];
pcl::flipNormalTowardsViewpoint (cloud1_keypoints[idx], 0, 0, 0,
cloud1_normals.points[idx].normal[0],
cloud1_normals.points[idx].normal[1],
cloud1_normals.points[idx].normal[2]);
}
// normalEstimation.setInputCloud(cloud1.makeShared());
// normalEstimation.compute(cloud1_normals);
// normalEstimation.setInputCloud(cloud2.makeShared());
// normalEstimation.compute(cloud2_normals);
}
void calculate_voxel_grid_keypoints ( float leaf_size )
{
// Find Keypoints on the input cloud
pcl::VoxelGrid<pcl::PointXYZ> voxel_grid;
voxel_grid.setLeafSize(leaf_size, leaf_size, leaf_size);
voxel_grid.setInputCloud(cloud1.makeShared());
voxel_grid.filter(cloud1_keypoints);
// voxel_grid.setInputCloud(cloud2.makeShared());
// voxel_grid.filter(cloud2_keypoints);
}
void calculate_SHOT ( float radius )
{
// SHOT estimation object.
pcl::SHOTEstimationOMP<pcl::PointXYZ, pcl::Normal, pcl::SHOT352> shot;
shot.setRadiusSearch(radius);
shot.setNumberOfThreads(12);
pcl::search::KdTree<pcl::PointXYZ>::Ptr kdtree(new pcl::search::KdTree<pcl::PointXYZ>);
shot.setSearchMethod(kdtree);
shot.setInputCloud(cloud1_keypoints.makeShared());
shot.setSearchSurface(cloud1.makeShared());
shot.setInputNormals(cloud1_normals.makeShared());
shot.compute(cloud1_shot);
// shot.setInputCloud(cloud2_keypoints.makeShared());
// shot.setSearchSurface(cloud2.makeShared());
// shot.setInputNormals(cloud2_normals.makeShared());
// shot.compute(cloud2_shot);
}
void compute_bshot()
{
compute_bshot_from_SHOT( cloud1_shot, cloud1_bshot);
// compute_bshot_from_SHOT( cloud2_shot, cloud2_bshot);
}
void compute_bshot_from_SHOT(pcl::PointCloud<pcl::SHOT352>& shot_descriptors_here, std::vector<bshot_descriptor>& bshot_descriptors)
{
bshot_descriptors.resize(shot_descriptors_here.size());
for (int i = 0; i < (int)shot_descriptors_here.size(); i++)
{
std::bitset < 352 > temp;
temp.reset();
for (int j = 0 ; j < 88 ; j++)
{
float vec[4] = { 0 };
for (int k = 0 ; k < 4 ; k++)
{
vec[k] = shot_descriptors_here[i].descriptor[ j*4 + k ];
}
std::bitset< 4 > bit;
bit.reset();
float sum = vec[0]+vec[1]+vec[2]+vec[3];
if (vec[0] == 0 and vec [1] == 0 and vec[2] == 0 and vec[3] == 0)
{
//bin[0] = bin[1] = bin[2] = bin[3] = 0;
// by default , they are all ZEROS
}
else if ( vec[0] > (0.9 * (sum) ) )
{
bit.set(0);
}
else if ( vec[1] > (0.9 * (sum) ) )
{
bit.set(1);
}
else if ( vec[2] > (0.9 * (sum) ) )
{
bit.set(2);
}
else if ( vec[3] > (0.9 * (sum) ) )
{
bit.set(3);
}
else if ( (vec[0]+vec[1]) > (0.9 * (sum)) )
{
bit.set(0);
bit.set(1);
}
else if ( (vec[1]+vec[2]) > (0.9 * (sum)) )
{
bit.set(1);
bit.set(2);
}
else if ( (vec[2]+vec[3]) > (0.9 * (sum)) )
{
;
bit.set(2);
bit.set(3);
}
else if ( (vec[0]+vec[3]) > (0.9 * (sum)) )
{
bit.set(0);
bit.set(3);
}
else if ( (vec[1]+vec[3]) > (0.9 * (sum)) )
{
bit.set(1);
bit.set(3);
}
else if ( (vec[0]+vec[2]) > (0.9 * (sum)) )
{
bit.set(0);
bit.set(2);
}
else if ( (vec[0]+ vec[1] +vec[2]) > (0.9 * (sum)) )
{
bit.set(0);
bit.set(1);
bit.set(2);
}
else if ( (vec[1]+ vec[2] +vec[3]) > (0.9 * (sum)) )
{
bit.set(1);
bit.set(2);
bit.set(3);
}
else if ( (vec[0]+ vec[2] +vec[3]) > (0.9 * (sum)) )
{
bit.set(0);
bit.set(2);
bit.set(3);
}
else if ( (vec[0]+ vec[1] +vec[3]) > (0.9 * (sum)) )
{
bit.set(0);
bit.set(1);
bit.set(3);
}
else
{
bit.set(0);
bit.set(1);
bit.set(2);
bit.set(3);
}
if (bit.test(0))
temp.set(j*4);
if (bit.test(1))
temp.set(j*4 + 1);
if (bit.test(2))
temp.set(j*4 + 2);
if (bit.test(3))
temp.set(j*4 + 3);
}
bshot_descriptors[i].bits = temp;
}
}
};
#endif
|
ast-dump-openmp-ordered.c | // RUN: %clang_cc1 -triple x86_64-unknown-unknown -fopenmp -ast-dump %s | FileCheck --match-full-lines -implicit-check-not=openmp_structured_block %s
void test_one() {
#pragma omp ordered
;
}
void test_two(int x) {
#pragma omp for ordered
for (int i = 0; i < x; i++)
;
}
void test_three(int x) {
#pragma omp for ordered(1)
for (int i = 0; i < x; i++) {
#pragma omp ordered depend(source)
}
}
// CHECK: TranslationUnitDecl {{.*}} <<invalid sloc>> <invalid sloc>
// CHECK: |-FunctionDecl {{.*}} <{{.*}}ast-dump-openmp-ordered.c:3:1, line:6:1> line:3:6 test_one 'void ()'
// CHECK-NEXT: | `-CompoundStmt {{.*}} <col:17, line:6:1>
// CHECK-NEXT: | `-OMPOrderedDirective {{.*}} <line:4:1, col:20>
// CHECK-NEXT: | `-CapturedStmt {{.*}} <line:5:3>
// CHECK-NEXT: | `-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc>
// CHECK-NEXT: | |-NullStmt {{.*}} <col:3>
// CHECK-NEXT: | `-ImplicitParamDecl {{.*}} <line:4:1> col:1 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-ordered.c:4:1) *const restrict'
// CHECK-NEXT: |-FunctionDecl {{.*}} <line:8:1, line:12:1> line:8:6 test_two 'void (int)'
// CHECK-NEXT: | |-ParmVarDecl {{.*}} <col:15, col:19> col:19 used x 'int'
// CHECK-NEXT: | `-CompoundStmt {{.*}} <col:22, line:12:1>
// CHECK-NEXT: | `-OMPForDirective {{.*}} <line:9:1, col:24>
// CHECK-NEXT: | |-OMPOrderedClause {{.*}} <col:17, col:24>
// CHECK-NEXT: | | `-<<<NULL>>>
// CHECK-NEXT: | `-CapturedStmt {{.*}} <line:10:3, line:11:5>
// CHECK-NEXT: | |-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc>
// CHECK-NEXT: | | |-ForStmt {{.*}} <line:10:3, line:11:5>
// CHECK-NEXT: | | | |-DeclStmt {{.*}} <line:10:8, col:17>
// CHECK-NEXT: | | | | `-VarDecl {{.*}} <col:8, col:16> col:12 used i 'int' cinit
// CHECK-NEXT: | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0
// CHECK-NEXT: | | | |-<<<NULL>>>
// CHECK-NEXT: | | | |-BinaryOperator {{.*}} <col:19, col:23> 'int' '<'
// CHECK-NEXT: | | | | |-ImplicitCastExpr {{.*}} <col:19> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | `-DeclRefExpr {{.*}} <col:19> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | `-ImplicitCastExpr {{.*}} <col:23> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue ParmVar {{.*}} 'x' 'int'
// CHECK-NEXT: | | | |-UnaryOperator {{.*}} <col:26, col:27> 'int' postfix '++'
// CHECK-NEXT: | | | | `-DeclRefExpr {{.*}} <col:26> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | `-NullStmt {{.*}} <line:11:5>
// CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <line:9:1> col:1 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-ordered.c:9:1) *const restrict'
// CHECK-NEXT: | | `-VarDecl {{.*}} <line:10:8, col:16> col:12 used i 'int' cinit
// CHECK-NEXT: | | `-IntegerLiteral {{.*}} <col:16> 'int' 0
// CHECK-NEXT: | `-DeclRefExpr {{.*}} <col:3> 'int' lvalue ParmVar {{.*}} 'x' 'int'
// CHECK-NEXT: `-FunctionDecl {{.*}} <line:14:1, line:19:1> line:14:6 test_three 'void (int)'
// CHECK-NEXT: |-ParmVarDecl {{.*}} <col:17, col:21> col:21 used x 'int'
// CHECK-NEXT: `-CompoundStmt {{.*}} <col:24, line:19:1>
// CHECK-NEXT: `-OMPForDirective {{.*}} <line:15:1, col:27>
// CHECK-NEXT: |-OMPOrderedClause {{.*}} <col:17, col:26>
// CHECK-NEXT: | `-ConstantExpr {{.*}} <col:25> 'int'
// CHECK-NEXT: | `-IntegerLiteral {{.*}} <col:25> 'int' 1
// CHECK-NEXT: `-CapturedStmt {{.*}} <line:16:3, line:18:3>
// CHECK-NEXT: |-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc>
// CHECK-NEXT: | |-ForStmt {{.*}} <line:16:3, line:18:3>
// CHECK-NEXT: | | |-DeclStmt {{.*}} <line:16:8, col:17>
// CHECK-NEXT: | | | `-VarDecl {{.*}} <col:8, col:16> col:12 used i 'int' cinit
// CHECK-NEXT: | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0
// CHECK-NEXT: | | |-<<<NULL>>>
// CHECK-NEXT: | | |-BinaryOperator {{.*}} <col:19, col:23> 'int' '<'
// CHECK-NEXT: | | | |-ImplicitCastExpr {{.*}} <col:19> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | `-DeclRefExpr {{.*}} <col:19> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | `-ImplicitCastExpr {{.*}} <col:23> 'int' <LValueToRValue>
// CHECK-NEXT: | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue ParmVar {{.*}} 'x' 'int'
// CHECK-NEXT: | | |-UnaryOperator {{.*}} <col:26, col:27> 'int' postfix '++'
// CHECK-NEXT: | | | `-DeclRefExpr {{.*}} <col:26> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | `-CompoundStmt {{.*}} <col:31, line:18:3>
// CHECK-NEXT: | | `-OMPOrderedDirective {{.*}} <line:17:1, col:35> openmp_standalone_directive
// CHECK-NEXT: | | `-OMPDependClause {{.*}} <col:21, <invalid sloc>>
// CHECK-NEXT: | |-ImplicitParamDecl {{.*}} <line:15:1> col:1 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-ordered.c:15:1) *const restrict'
// CHECK-NEXT: | `-VarDecl {{.*}} <line:16:8, col:16> col:12 used i 'int' cinit
// CHECK-NEXT: | `-IntegerLiteral {{.*}} <col:16> 'int' 0
// CHECK-NEXT: `-DeclRefExpr {{.*}} <col:3> 'int' lvalue ParmVar {{.*}} 'x' 'int'
|
simd-12.c | /* { dg-do run } */
/* { dg-additional-options "-msse2" { target sse2_runtime } } */
/* { dg-additional-options "-mavx" { target avx_runtime } } */
int
main ()
{
int k = 0, i, s = 0;
#pragma omp parallel
#pragma omp for simd linear(k : 3) reduction(+: s) schedule (static, 16)
for (i = 0; i < 128; i++)
{
k = k + 3;
s = s + k;
}
if (s != 128 * 129 / 2 * 3) __builtin_abort ();
return 0;
}
|
CB_PackingVectorNode.h | #ifndef _CB_PACK_V_NODE_H_
#define _CB_PACK_V_NODE_H_
/*
###################################################################################
#
# CBrick
#
# Copyright (c) 2017-2020 Research Institute for Information Technology(RIIT),
# Kyushu University. All rights reserved.
#
####################################################################################
*/
/*
* @file CB_PackingVectorNode.h
* @brief BrickComm class
*/
// #########################################################
/*
* @brief pack send data for I direction
* @param [in] array source array
* @param [in] gc number of guide cell layer to be sent
* @param [out] sendm send buffer of I- direction
* @param [out] sendp send buffer of I+ direction
* @param [in] nIDm Rank number of I- direction
* @param [in] nIDp Rank number of I+ direction
*/
template <class T>
void BrickComm::pack_VXnode(const T *array,
const int gc,
T *sendm,
T *sendp,
const int nIDm,
const int nIDp)
{
int NI = size[0];
int NJ = size[1];
int NK = size[2];
int VC = halo_width;
/*
<--gc-->
rankA [NI-3] [NI-2] [NI-1] [NI] [NI+1]
-----+------+------|------+------+-------> i
rankB [-2] [-1] [0] [1] [2]
<--gc-->
*/
if( nIDm >= 0 )
{
#pragma omp parallel for collapse(3)
for (int l=0; l<3; l++) {
for( int k=0; k<NK; k++ ){
for( int j=0; j<NJ; j++ ){
#pragma novector
for( int i=0; i<gc; i++ ){
sendm[_IDX_VI(i,j,k,l,NJ,NK,gc)] = array[_IDX_V3D(i+1,j,k,l,NI,NJ,NK,VC)];
}
}
}
}
}
if( nIDp >= 0 )
{
#pragma omp parallel for collapse(3)
for (int l=0; l<3; l++) {
for( int k=0; k<NK; k++ ){
for( int j=0; j<NJ; j++ ){
#pragma novector
for( int i=0; i<gc; i++ ){
sendp[_IDX_VI(i,j,k,l,NJ,NK,gc)] = array[_IDX_V3D(NI-2+i,j,k,l,NI,NJ,NK,VC)];
}
}
}
}
}
}
// #########################################################
/*
* @brief unpack send data for I direction
* @param [out] array dest array
* @param [in] gc number of guide cell layer to be sent
* @param [in] recvm recv buffer of I- direction
* @param [in] recvp recv buffer of I+ direction
* @param [in] nIDm Rank number of I- direction
* @param [in] nIDp Rank number of I+ direction
*/
template <class T>
void BrickComm::unpack_VXnode(T *array,
const int gc,
const T *recvm,
const T *recvp,
const int nIDm,
const int nIDp)
{
int NI = size[0];
int NJ = size[1];
int NK = size[2];
int VC = halo_width;
if( nIDm >= 0 )
{
#pragma omp parallel for collapse(3)
for (int l=0; l<3; l++) {
for( int k=0; k<NK; k++ ){
for( int j=0; j<NJ; j++ ){
#pragma novector
for( int i=0; i<gc; i++ ){
array[_IDX_V3D(i-1,j,k,l,NI,NJ,NK,VC)] = recvm[_IDX_VI(i,j,k,l,NJ,NK,gc)];
}
}
}
}
}
if( nIDp >= 0 )
{
#pragma omp parallel for collapse(3)
for (int l=0; l<3; l++) {
for( int k=0; k<NK; k++ ){
for( int j=0; j<NJ; j++ ){
#pragma novector
for( int i=0; i<gc; i++ ){
array[_IDX_V3D(NI+i,j,k,l,NI,NJ,NK,VC)] = recvp[_IDX_VI(i,j,k,l,NJ,NK,gc)];
}
}
}
}
}
}
// #########################################################
/*
* @brief pack send data for J direction
* @param [in] array source array
* @param [in] gc number of guide cell layer to be sent
* @param [out] sendm send buffer of J- direction
* @param [out] sendp send buffer of J+ direction
* @param [in] nIDm Rank number of J- direction
* @param [in] nIDp Rank number of J+ direction
*/
template <class T>
void BrickComm::pack_VYnode(const T *array,
const int gc,
T *sendm,
T *sendp,
const int nIDm,
const int nIDp)
{
int NI = size[0];
int NJ = size[1];
int NK = size[2];
int VC = halo_width;
if( nIDm >= 0 )
{
#pragma omp parallel for collapse(3)
for (int l=0; l<3; l++) {
for( int k=0; k<NK; k++ ){
for( int j=0; j<gc; j++ ){
#pragma novector
for( int i=0; i<NI; i++ ){
sendm[_IDX_VJ(i,j,k,l,NI,NK,gc)] = array[_IDX_V3D(i,j+1,k,l,NI,NJ,NK,VC)];
}
}
}
}
}
if( nIDp >= 0 )
{
#pragma omp parallel for collapse(3)
for (int l=0; l<3; l++) {
for( int k=0; k<NK; k++ ){
for( int j=0; j<gc; j++ ){
#pragma novector
for( int i=0; i<NI; i++ ){
sendp[_IDX_VJ(i,j,k,l,NI,NK,gc)] = array[_IDX_V3D(i,NJ-2+j,k,l,NI,NJ,NK,VC)];
}
}
}
}
}
}
// #########################################################
/*
* @brief unpack send data for J direction
* @param [out] array dest array
* @param [in] gc number of guide cell layer to be sent
* @param [in] recvm recv buffer of J- direction
* @param [in] recvp recv buffer of J+ direction
* @param [in] nIDm Rank number of J- direction
* @param [in] nIDp Rank number of J+ direction
*/
template <class T>
void BrickComm::unpack_VYnode(T *array,
const int gc,
const T *recvm,
const T *recvp,
const int nIDm,
const int nIDp)
{
int NI = size[0];
int NJ = size[1];
int NK = size[2];
int VC = halo_width;
if( nIDm >= 0 )
{
#pragma omp parallel for collapse(3)
for (int l=0; l<3; l++) {
for( int k=0; k<NK; k++ ){
for( int j=0; j<gc; j++ ){
#pragma novector
for( int i=0; i<NI; i++ ){
array[_IDX_V3D(i,j-1,k,l,NI,NJ,NK,VC)] = recvm[_IDX_VJ(i,j,k,l,NI,NK,gc)];
}
}
}
}
}
if( nIDp >= 0 )
{
#pragma omp parallel for collapse(3)
for (int l=0; l<3; l++) {
for( int k=0; k<NK; k++ ){
for( int j=0; j<gc; j++ ){
#pragma novector
for( int i=0; i<NI; i++ ){
array[_IDX_V3D(i,NJ+j,k,l,NI,NJ,NK,VC)] = recvp[_IDX_VJ(i,j,k,l,NI,NK,gc)];
}
}
}
}
}
}
// #########################################################
/*
* @brief pack send data for K direction
* @param [in] array source array
* @param [in] gc number of guide cell layer actually to be sent
* @param [out] sendm send buffer of K- direction
* @param [out] sendp send buffer of K+ direction
* @param [in] nIDm Rank number of K- direction
* @param [in] nIDp Rank number of K+ direction
*/
template <class T>
void BrickComm::pack_VZnode(const T *array,
const int gc,
T *sendm,
T *sendp,
const int nIDm,
const int nIDp)
{
int NI = size[0];
int NJ = size[1];
int NK = size[2];
int VC = halo_width;
if( nIDm >= 0 )
{
#pragma omp parallel for collapse(3)
for (int l=0; l<3; l++) {
for( int k=0; k<gc; k++ ){
for( int j=0; j<NJ; j++ ){
#pragma novector
for( int i=0; i<NI; i++ ){
sendm[_IDX_VK(i,j,k,l,NI,NJ,gc)] = array[_IDX_V3D(i,j,k+1,l,NI,NJ,NK,VC)];
}
}
}
}
}
if( nIDp >= 0 )
{
#pragma omp parallel for collapse(3)
for (int l=0; l<3; l++) {
for( int k=0; k<gc; k++ ){
for( int j=0; j<NJ; j++ ){
#pragma novector
for( int i=0; i<NI; i++ ){
sendp[_IDX_VK(i,j,k,l,NI,NJ,gc)] = array[_IDX_V3D(i,j,NK-2+k,l,NI,NJ,NK,VC)];
}
}
}
}
}
}
// #########################################################
/*
* @brief unpack send data for K direction
* @param [out] array dest array
* @param [in] gc number of guide cell layer to be sent
* @param [in] recvm recv buffer of K- direction
* @param [in] recvp recv buffer of K+ direction
* @param [in] nIDm Rank number of K- direction
* @param [in] nIDp Rank number of K+ direction
*/
template <class T>
void BrickComm::unpack_VZnode(T *array,
const int gc,
const T *recvm,
const T *recvp,
const int nIDm,
const int nIDp)
{
int NI = size[0];
int NJ = size[1];
int NK = size[2];
int VC = halo_width;
if( nIDm >= 0 )
{
#pragma omp parallel for collapse(3)
for (int l=0; l<3; l++) {
for( int k=0; k<gc; k++ ){
for( int j=0; j<NJ; j++ ){
#pragma novector
for( int i=0; i<NI; i++ ){
array[_IDX_V3D(i,j,k-1,l,NI,NJ,NK,VC)] = recvm[_IDX_VK(i,j,k,l,NI,NJ,gc)];
}
}
}
}
}
if( nIDp >= 0 )
{
#pragma omp parallel for collapse(3)
for (int l=0; l<3; l++) {
for( int k=0; k<gc; k++ ){
for( int j=0; j<NJ; j++ ){
#pragma novector
for( int i=0; i<NI; i++ ){
array[_IDX_V3D(i,j,NK+k,l,NI,NJ,NK,VC)] = recvp[_IDX_VK(i,j,k,l,NI,NJ,gc)];
}
}
}
}
}
}
#ifdef _DIAGONAL_COMM
// #########################################################
/*
* @brief pack send data for diagonal edge
* @param [in] array source array
* @param [in] gc number of guide cell layer to be sent
* @param [out] sendbuf send buffer
* @param [out] recvbuf recv buffer
* @param [out] req Array of MPI request
* @retval true-success, false-fail
*/
template <class T>
bool BrickComm::pack_VEnode(T *array,
const int gc,
T *sendbuf,
T *recvbuf,
MPI_Request *req)
{
int NI = size[0];
int NJ = size[1];
int NK = size[2];
int VC = halo_width;
int tag = 0;
size_t ptr = 0;
//// X edge ////
for( int dir=int(E_mYmZ);dir<=int(E_pYpZ);dir++ )
{
if( comm_tbl[dir] >= 0 )
{
T *sendptr = &sendbuf[ptr];
T *recvptr = &recvbuf[ptr];
size_t sz = (NI-1) * gc * gc * 3;
/* recv
if ( MPI_SUCCESS != MPI_Irecv(recvptr,
sz,
dtype,
comm_tbl[dir],
tag,
MPI_COMM_WORLD,
&req[dir*2]) ) return false;
*/
if ( !IrecvData(recvptr,
sz,
comm_tbl[dir],
&req[dir*2]) ) return false;
// pack
switch(dir)
{
case int(E_mYmZ):
#pragma omp parallel for collapse(4)
for( int l=0; l<3; l++ ){
for( int k=1; k<=gc; k++ ){
for( int j=1; j<=gc; j++ ){
for( int i=1; i<NI; i++ ){
sendptr[_IDX_V3D(i-1,j-1,k-1,l,NI-1,gc,gc,0)] = array[_IDX_V3D(i,j,k,l,NI,NJ,NK,VC)];
}
}
}
}
break;
case int(E_pYmZ):
#pragma omp parallel for collapse(4)
for( int l=0; l<3; l++ ){
for( int k=1; k<=gc; k++ ){
for( int j=NJ-gc; j<NJ; j++ ){
for( int i=1; i<NI; i++ ){
sendptr[_IDX_V3D(i-1,j-(NJ-gc),k-1,l,NI-1,gc,gc,0)] = array[_IDX_V3D(i,j,k,l,NI,NJ,NK,VC)];
}
}
}
}
break;
case int(E_mYpZ):
#pragma omp parallel for collapse(4)
for( int l=0; l<3; l++ ){
for( int k=NK-gc; k<NK; k++ ){
for( int j=1; j<=gc; j++ ){
for( int i=1; i<NI; i++ ){
sendptr[_IDX_V3D(i-1,j-1,k-(NK-gc),l,NI-1,gc,gc,0)] = array[_IDX_V3D(i,j,k,l,NI,NJ,NK,VC)];
}
}
}
}
break;
case int(E_pYpZ):
#pragma omp parallel for collapse(4)
for( int l=0; l<3; l++ ){
for( int k=NK-gc; k<NK; k++ ){
for( int j=NJ-gc; j<NJ; j++ ){
for( int i=1; i<NI; i++ ){
sendptr[_IDX_V3D(i-1,j-(NJ-gc),k-(NK-gc),l,NI-1,gc,gc,0)] = array[_IDX_V3D(i,j,k,l,NI,NJ,NK,VC)];
}
}
}
}
break;
}
/* send
if ( MPI_SUCCESS != MPI_Isend(sendptr,
sz,
dtype,
comm_tbl[dir],
tag,
MPI_COMM_WORLD,
&req[dir*2+1]) ) return false;
*/
if ( !IsendData(sendptr,
sz,
comm_tbl[dir],
&req[dir*2+1]) ) return false;
// pointer
ptr += sz;
}
}
//// Y edge ////
for( int dir=int(E_mXmZ);dir<=int(E_pXpZ);dir++ )
{
if( comm_tbl[dir] >= 0 )
{
T *sendptr = &sendbuf[ptr];
T *recvptr = &recvbuf[ptr];
size_t sz = gc * (NJ-1) * gc * 3;
/* recv
if ( MPI_SUCCESS != MPI_Irecv(recvptr,
sz,
dtype,
comm_tbl[dir],
tag,
MPI_COMM_WORLD,
&req[dir*2]) ) return false;
*/
if ( !IrecvData(recvptr,
sz,
comm_tbl[dir],
&req[dir*2]) ) return false;
// pack
switch(dir)
{
case int(E_mXmZ):
#pragma omp parallel for collapse(4)
for( int l=0; l<3; l++ ){
for( int k=1; k<=gc; k++ ){
for( int j=1; j<NJ; j++ ){
for( int i=1; i<=gc; i++ ){
sendptr[_IDX_V3D(i-1,j-1,k-1,l,gc,NJ-1,gc,0)] = array[_IDX_V3D(i,j,k,l,NI,NJ,NK,VC)];
}
}
}
}
break;
case int(E_pXmZ):
#pragma omp parallel for collapse(4)
for( int l=0; l<3; l++ ){
for( int k=1; k<=gc; k++ ){
for( int j=1; j<NJ; j++ ){
for( int i=NI-gc; i<NI; i++ ){
sendptr[_IDX_V3D(i-(NI-gc),j-1,k-1,l,gc,NJ-1,gc,0)] = array[_IDX_V3D(i,j,k,l,NI,NJ,NK,VC)];
}
}
}
}
break;
case int(E_mXpZ):
#pragma omp parallel for collapse(4)
for( int l=0; l<3; l++ ){
for( int k=NK-gc; k<NK; k++ ){
for( int j=1; j<NJ; j++ ){
for( int i=1; i<=gc; i++ ){
sendptr[_IDX_V3D(i-1,j-1,k-(NK-gc),l,gc,NJ-1,gc,0)] = array[_IDX_V3D(i,j,k,l,NI,NJ,NK,VC)];
}
}
}
}
break;
case int(E_pXpZ):
#pragma omp parallel for collapse(4)
for( int l=0; l<3; l++ ){
for( int k=NK-gc; k<NK; k++ ){
for( int j=1; j<NJ; j++ ){
for( int i=NI-gc; i<NI; i++ ){
sendptr[_IDX_V3D(i-(NI-gc),j-1,k-(NK-gc),l,gc,NJ-1,gc,0)] = array[_IDX_V3D(i,j,k,l,NI,NJ,NK,VC)];
}
}
}
}
break;
}
/* send
if ( MPI_SUCCESS != MPI_Isend(sendptr,
sz,
dtype,
comm_tbl[dir],
tag,
MPI_COMM_WORLD,
&req[dir*2+1]) ) return false;
*/
if ( !IsendData(sendptr,
sz,
comm_tbl[dir],
&req[dir*2+1]) ) return false;
// pointer
ptr += sz;
}
}
//// Z edge ////
for( int dir=int(E_mXmY);dir<=int(E_pXpY);dir++ )
{
if( comm_tbl[dir] >= 0 )
{
T *sendptr = &sendbuf[ptr];
T *recvptr = &recvbuf[ptr];
size_t sz = gc * gc * (NK-1) * 3;
/* recv
if ( MPI_SUCCESS != MPI_Irecv(recvptr,
sz,
dtype,
comm_tbl[dir],
tag,
MPI_COMM_WORLD,
&req[dir*2]) ) return false;
*/
if ( !IrecvData(recvptr,
sz,
comm_tbl[dir],
&req[dir*2]) ) return false;
// pack
switch(dir)
{
case int(E_mXmY):
#pragma omp parallel for collapse(4)
for( int l=0; l<3; l++ ){
for( int k=1; k<NK; k++ ){
for( int j=1; j<=gc; j++ ){
for( int i=1; i<=gc; i++ ){
sendptr[_IDX_V3D(i-1,j-1,k-1,l,gc,gc,NK-1,0)] = array[_IDX_V3D(i,j,k,l,NI,NJ,NK,VC)];
}
}
}
}
break;
case int(E_pXmY):
#pragma omp parallel for collapse(4)
for( int l=0; l<3; l++ ){
for( int k=1; k<NK; k++ ){
for( int j=1; j<=gc; j++ ){
for( int i=NI-gc; i<NI; i++ ){
sendptr[_IDX_V3D(i-(NI-gc),j-1,k-1,l,gc,gc,NK-1,0)] = array[_IDX_V3D(i,j,k,l,NI,NJ,NK,VC)];
}
}
}
}
break;
case int(E_mXpY):
#pragma omp parallel for collapse(4)
for( int l=0; l<3; l++ ){
for( int k=1; k<NK; k++ ){
for( int j=NJ-gc; j<NJ; j++ ){
for( int i=1; i<=gc; i++ ){
sendptr[_IDX_V3D(i-1,j-(NJ-gc),k-1,l,gc,gc,NK-1,0)] = array[_IDX_V3D(i,j,k,l,NI,NJ,NK,VC)];
}
}
}
}
break;
case int(E_pXpY):
#pragma omp parallel for collapse(4)
for( int l=0; l<3; l++ ){
for( int k=1; k<NK; k++ ){
for( int j=NJ-gc; j<NJ; j++ ){
for( int i=NI-gc; i<NI; i++ ){
sendptr[_IDX_V3D(i-(NI-gc),j-(NJ-gc),k-1,l,gc,gc,NK-1,0)] = array[_IDX_V3D(i,j,k,l,NI,NJ,NK,VC)];
}
}
}
}
break;
}
/* send
if ( MPI_SUCCESS != MPI_Isend(sendptr,
sz,
dtype,
comm_tbl[dir],
tag,
MPI_COMM_WORLD,
&req[dir*2+1]) ) return false;
*/
if ( !IsendData(sendptr,
sz,
comm_tbl[dir],
&req[dir*2+1]) ) return false;
// pointer
ptr += sz;
}
}
return true;
}
// #########################################################
/*
* @brief unpack send data for diagonal edge
* @param [out] array dest array
* @param [in] gc number of guide cell layer to be sent
* @param [in] recvbuf recv buffer
*/
template <class T>
void BrickComm::unpack_VEnode(T *array,
const int gc,
const T *recvbuf)
{
int NI = size[0];
int NJ = size[1];
int NK = size[2];
int VC = halo_width;
size_t ptr = 0;
//// X edge ////
for( int dir=int(E_mYmZ);dir<=int(E_pYpZ);dir++ )
{
if( comm_tbl[dir] >= 0 )
{
const T *recvptr = &recvbuf[ptr];
size_t sz = (NI-1) * gc * gc * 3;
// unpack
switch(dir)
{
case int(E_mYmZ):
#pragma omp parallel for collapse(4)
for( int l=0; l<3; l++ ){
for( int k=1-gc; k<=0; k++ ){
for( int j=1-gc; j<=0; j++ ){
for( int i=1; i<NI; i++ ){
array[_IDX_V3D(i,j,k,l,NI,NJ,NK,VC)] = recvptr[_IDX_V3D(i-1,j-(1-gc),k-(1-gc),l,NI-1,gc,gc,0)];
}
}
}
}
break;
case int(E_pYmZ):
#pragma omp parallel for collapse(4)
for( int l=0; l<3; l++ ){
for( int k=1-gc; k<=0; k++ ){
for( int j=NJ; j<NJ+gc; j++ ){
for( int i=1; i<NI; i++ ){
array[_IDX_V3D(i,j,k,l,NI,NJ,NK,VC)] = recvptr[_IDX_V3D(i-1,j-(NJ),k-(1-gc),l,NI-1,gc,gc,0)];
}
}
}
}
break;
case int(E_mYpZ):
#pragma omp parallel for collapse(4)
for( int l=0; l<3; l++ ){
for( int k=NK; k<NK+gc; k++ ){
for( int j=1-gc; j<=0; j++ ){
for( int i=1; i<NI; i++ ){
array[_IDX_V3D(i,j,k,l,NI,NJ,NK,VC)] = recvptr[_IDX_V3D(i-1,j-(1-gc),k-(NK),l,NI-1,gc,gc,0)];
}
}
}
}
break;
case int(E_pYpZ):
#pragma omp parallel for collapse(4)
for( int l=0; l<3; l++ ){
for( int k=NK; k<NK+gc; k++ ){
for( int j=NJ; j<NJ+gc; j++ ){
for( int i=1; i<NI; i++ ){
array[_IDX_V3D(i,j,k,l,NI,NJ,NK,VC)] = recvptr[_IDX_V3D(i-1,j-(NJ),k-(NK),l,NI-1,gc,gc,0)];
}
}
}
}
break;
}
ptr += sz;
}
}
//// Y edge ////
for( int dir=int(E_mXmZ);dir<=int(E_pXpZ);dir++ )
{
if( comm_tbl[dir] >= 0 )
{
const T *recvptr = &recvbuf[ptr];
size_t sz = gc * (NJ-1) * gc * 3;
// unpack
switch(dir)
{
case int(E_mXmZ):
#pragma omp parallel for collapse(4)
for( int l=0; l<3; l++ ){
for( int k=1-gc; k<=0; k++ ){
for( int j=1; j<NJ; j++ ){
for( int i=1-gc; i<=0; i++ ){
array[_IDX_V3D(i,j,k,l,NI,NJ,NK,VC)] = recvptr[_IDX_V3D(i-(1-gc),j-1,k-(1-gc),l,gc,NJ-1,gc,0)];
}
}
}
}
break;
case int(E_pXmZ):
#pragma omp parallel for collapse(4)
for( int l=0; l<3; l++ ){
for( int k=1-gc; k<=0; k++ ){
for( int j=1; j<NJ; j++ ){
for( int i=NI; i<NI+gc; i++ ){
array[_IDX_V3D(i,j,k,l,NI,NJ,NK,VC)] = recvptr[_IDX_V3D(i-(NI),j-1,k-(1-gc),l,gc,NJ-1,gc,0)];
}
}
}
}
break;
case int(E_mXpZ):
#pragma omp parallel for collapse(4)
for( int l=0; l<3; l++ ){
for( int k=NK; k<NK+gc; k++ ){
for( int j=1; j<NJ; j++ ){
for( int i=1-gc; i<=0; i++ ){
array[_IDX_V3D(i,j,k,l,NI,NJ,NK,VC)] = recvptr[_IDX_V3D(i-(1-gc),j-1,k-(NK),l,gc,NJ-1,gc,0)];
}
}
}
}
break;
case int(E_pXpZ):
#pragma omp parallel for collapse(4)
for( int l=0; l<3; l++ ){
for( int k=NK; k<NK+gc; k++ ){
for( int j=1; j<NJ; j++ ){
for( int i=NI; i<NI+gc; i++ ){
array[_IDX_V3D(i,j,k,l,NI,NJ,NK,VC)] = recvptr[_IDX_V3D(i-(NI),j-1,k-(NK),l,gc,NJ-1,gc,0)];
}
}
}
}
break;
}
ptr += sz;
}
}
//// Z edge ////
for( int dir=int(E_mXmY);dir<=int(E_pXpY);dir++ )
{
if( comm_tbl[dir] >= 0 )
{
const T *recvptr = &recvbuf[ptr];
size_t sz = gc * gc * (NK-1) * 3;
// unpack
switch(dir)
{
case int(E_mXmY):
#pragma omp parallel for collapse(4)
for( int l=0; l<3; l++ ){
for( int k=1; k<NK; k++ ){
for( int j=1-gc; j<=0; j++ ){
for( int i=1-gc; i<=0; i++ ){
array[_IDX_V3D(i,j,k,l,NI,NJ,NK,VC)] = recvptr[_IDX_V3D(i-(1-gc),j-(1-gc),k-1,l,gc,gc,NK-1,0)];
}
}
}
}
break;
case int(E_pXmY):
#pragma omp parallel for collapse(4)
for( int l=0; l<3; l++ ){
for( int k=1; k<NK; k++ ){
for( int j=1-gc; j<=0; j++ ){
for( int i=NI; i<NI+gc; i++ ){
array[_IDX_V3D(i,j,k,l,NI,NJ,NK,VC)] = recvptr[_IDX_V3D(i-(NI),j-(1-gc),k-1,l,gc,gc,NK-1,0)];
}
}
}
}
break;
case int(E_mXpY):
#pragma omp parallel for collapse(4)
for( int l=0; l<3; l++ ){
for( int k=1; k<NK; k++ ){
for( int j=NJ; j<NJ+gc; j++ ){
for( int i=1-gc; i<=0; i++ ){
array[_IDX_V3D(i,j,k,l,NI,NJ,NK,VC)] = recvptr[_IDX_V3D(i-(1-gc),j-(NJ),k-1,l,gc,gc,NK-1,0)];
}
}
}
}
break;
case int(E_pXpY):
#pragma omp parallel for collapse(4)
for( int l=0; l<3; l++ ){
for( int k=1; k<NK; k++ ){
for( int j=NJ; j<NJ+gc; j++ ){
for( int i=NI; i<NI+gc; i++ ){
array[_IDX_V3D(i,j,k,l,NI,NJ,NK,VC)] = recvptr[_IDX_V3D(i-(NI),j-(NJ),k-1,l,gc,gc,NK-1,0)];
}
}
}
}
break;
}
ptr += sz;
}
}
}
// #########################################################
/*
* @brief pack send data for diagonal corner
* @param [in] array source array
* @param [in] gc number of guide cell layer to be sent
* @param [out] sendbuf send buffer
* @param [out] recvbuf recv buffer
* @param [out] req Array of MPI request
* @retval true-success, false-fail
*/
template <class T>
bool BrickComm::pack_VCnode(T *array,
const int gc,
T *sendbuf,
T *recvbuf,
MPI_Request *req)
{
int NI = size[0];
int NJ = size[1];
int NK = size[2];
int VC = halo_width;
int tag = 0;
size_t ptr = 0;
//// 8 corner ////
for( int dir=int(C_mXmYmZ);dir<=int(C_pXpYpZ);dir++ )
{
if( comm_tbl[dir] >= 0 )
{
T *sendptr = &sendbuf[ptr];
T *recvptr = &recvbuf[ptr];
size_t sz = gc * gc * gc * 3;
/* recv
if ( MPI_SUCCESS != MPI_Irecv(recvptr,
sz,
dtype,
comm_tbl[dir],
tag,
MPI_COMM_WORLD,
&req[dir*2]) ) return false;
*/
if ( !IrecvData(recvptr,
sz,
comm_tbl[dir],
&req[dir*2]) ) return false;
// pack
switch(dir)
{
case int(C_mXmYmZ):
#pragma omp parallel for collapse(4)
for( int l=0; l<3; l++ ){
for( int k=1; k<=gc; k++ ){
for( int j=1; j<=gc; j++ ){
for( int i=1; i<=gc; i++ ){
sendptr[_IDX_V3D(i-1,j-1,k-1,l,gc,gc,gc,0)] = array[_IDX_V3D(i,j,k,l,NI,NJ,NK,VC)];
}
}
}
}
break;
case int(C_pXmYmZ):
#pragma omp parallel for collapse(4)
for( int l=0; l<3; l++ ){
for( int k=1; k<=gc; k++ ){
for( int j=1; j<=gc; j++ ){
for( int i=NI-gc; i<NI; i++ ){
sendptr[_IDX_V3D(i-(NI-gc),j-1,k-1,l,gc,gc,gc,0)] = array[_IDX_V3D(i,j,k,l,NI,NJ,NK,VC)];
}
}
}
}
break;
case int(C_mXpYmZ):
#pragma omp parallel for collapse(4)
for( int l=0; l<3; l++ ){
for( int k=1; k<=gc; k++ ){
for( int j=NJ-gc; j<NJ; j++ ){
for( int i=1; i<=gc; i++ ){
sendptr[_IDX_V3D(i-1,j-(NJ-gc),k-1,l,gc,gc,gc,0)] = array[_IDX_V3D(i,j,k,l,NI,NJ,NK,VC)];
}
}
}
}
break;
case int(C_pXpYmZ):
#pragma omp parallel for collapse(4)
for( int l=0; l<3; l++ ){
for( int k=1; k<=gc; k++ ){
for( int j=NJ-gc; j<NJ; j++ ){
for( int i=NI-gc; i<NI; i++ ){
sendptr[_IDX_V3D(i-(NI-gc),j-(NJ-gc),k-1,l,gc,gc,gc,0)] = array[_IDX_V3D(i,j,k,l,NI,NJ,NK,VC)];
}
}
}
}
break;
case int(C_mXmYpZ):
#pragma omp parallel for collapse(4)
for( int l=0; l<3; l++ ){
for( int k=NK-gc; k<NK; k++ ){
for( int j=1; j<=gc; j++ ){
for( int i=1; i<=gc; i++ ){
sendptr[_IDX_V3D(i-1,j-1,k-(NK-gc),l,gc,gc,gc,0)] = array[_IDX_V3D(i,j,k,l,NI,NJ,NK,VC)];
}
}
}
}
break;
case int(C_pXmYpZ):
#pragma omp parallel for collapse(4)
for( int l=0; l<3; l++ ){
for( int k=NK-gc; k<NK; k++ ){
for( int j=1; j<=gc; j++ ){
for( int i=NI-gc; i<NI; i++ ){
sendptr[_IDX_V3D(i-(NI-gc),j-1,k-(NK-gc),l,gc,gc,gc,0)] = array[_IDX_V3D(i,j,k,l,NI,NJ,NK,VC)];
}
}
}
}
break;
case int(C_mXpYpZ):
#pragma omp parallel for collapse(4)
for( int l=0; l<3; l++ ){
for( int k=NK-gc; k<NK; k++ ){
for( int j=NJ-gc; j<NJ; j++ ){
for( int i=1; i<=gc; i++ ){
sendptr[_IDX_V3D(i-1,j-(NJ-gc),k-(NK-gc),l,gc,gc,gc,0)] = array[_IDX_V3D(i,j,k,l,NI,NJ,NK,VC)];
}
}
}
}
break;
case int(C_pXpYpZ):
#pragma omp parallel for collapse(4)
for( int l=0; l<3; l++ ){
for( int k=NK-gc; k<NK; k++ ){
for( int j=NJ-gc; j<NJ; j++ ){
for( int i=NI-gc; i<NI; i++ ){
sendptr[_IDX_V3D(i-(NI-gc),j-(NJ-gc),k-(NK-gc),l,gc,gc,gc,0)] = array[_IDX_V3D(i,j,k,l,NI,NJ,NK,VC)];
}
}
}
}
break;
}
/* send
if ( MPI_SUCCESS != MPI_Isend(sendptr,
sz,
dtype,
comm_tbl[dir],
tag,
MPI_COMM_WORLD,
&req[dir*2+1]) ) return false;
*/
if ( !IsendData(sendptr,
sz,
comm_tbl[dir],
&req[dir*2+1]) ) return false;
// pointer
ptr += sz;
}
}
return true;
}
// #########################################################
/*
* @brief unpack send data for diagonal corner
* @param [out] array dest array
* @param [in] gc number of guide cell layer to be sent
* @param [in] recvbuf recv buffer
*/
template <class T>
void BrickComm::unpack_VCnode(T *array,
const int gc,
const T *recvbuf)
{
int NI = size[0];
int NJ = size[1];
int NK = size[2];
int VC = halo_width;
size_t ptr = 0;
//// 8 corner ////
for( int dir=int(C_mXmYmZ);dir<=int(C_pXpYpZ);dir++ )
{
if( comm_tbl[dir] >= 0 )
{
const T *recvptr = &recvbuf[ptr];
size_t sz = gc * gc * gc * 3;
// unpack
switch(dir)
{
case int(C_mXmYmZ):
#pragma omp parallel for collapse(4)
for( int l=0; l<3; l++ ){
for( int k=1-gc; k<=0; k++ ){
for( int j=1-gc; j<=0; j++ ){
for( int i=1-gc; i<=0; i++ ){
array[_IDX_V3D(i,j,k,l,NI,NJ,NK,VC)] = recvptr[_IDX_V3D(i-(1-gc),j-(1-gc),k-(1-gc),l,gc,gc,gc,0)];
}
}
}
}
break;
case int(C_pXmYmZ):
#pragma omp parallel for collapse(4)
for( int l=0; l<3; l++ ){
for( int k=1-gc; k<=0; k++ ){
for( int j=1-gc; j<=0; j++ ){
for( int i=NI; i<NI+gc; i++ ){
array[_IDX_V3D(i,j,k,l,NI,NJ,NK,VC)] = recvptr[_IDX_V3D(i-(NI),j-(1-gc),k-(1-gc),l,gc,gc,gc,0)];
}
}
}
}
break;
case int(C_mXpYmZ):
#pragma omp parallel for collapse(4)
for( int l=0; l<3; l++ ){
for( int k=1-gc; k<=0; k++ ){
for( int j=NJ; j<NJ+gc; j++ ){
for( int i=1-gc; i<=0; i++ ){
array[_IDX_V3D(i,j,k,l,NI,NJ,NK,VC)] = recvptr[_IDX_V3D(i-(1-gc),j-(NJ),k-(1-gc),l,gc,gc,gc,0)];
}
}
}
}
break;
case int(C_pXpYmZ):
#pragma omp parallel for collapse(4)
for( int l=0; l<3; l++ ){
for( int k=1-gc; k<=0; k++ ){
for( int j=NJ; j<NJ+gc; j++ ){
for( int i=NI; i<NI+gc; i++ ){
array[_IDX_V3D(i,j,k,l,NI,NJ,NK,VC)] = recvptr[_IDX_V3D(i-(NI),j-(NJ),k-(1-gc),l,gc,gc,gc,0)];
}
}
}
}
break;
case int(C_mXmYpZ):
#pragma omp parallel for collapse(4)
for( int l=0; l<3; l++ ){
for( int k=NK; k<NK+gc; k++ ){
for( int j=1-gc; j<=0; j++ ){
for( int i=1-gc; i<=0; i++ ){
array[_IDX_V3D(i,j,k,l,NI,NJ,NK,VC)] = recvptr[_IDX_V3D(i-(1-gc),j-(1-gc),k-(NK),l,gc,gc,gc,0)];
}
}
}
}
break;
case int(C_pXmYpZ):
#pragma omp parallel for collapse(4)
for( int l=0; l<3; l++ ){
for( int k=NK; k<NK+gc; k++ ){
for( int j=1-gc; j<=0; j++ ){
for( int i=NI; i<NI+gc; i++ ){
array[_IDX_V3D(i,j,k,l,NI,NJ,NK,VC)] = recvptr[_IDX_V3D(i-(NI),j-(1-gc),k-(NK),l,gc,gc,gc,0)];
}
}
}
}
break;
case int(C_mXpYpZ):
#pragma omp parallel for collapse(4)
for( int l=0; l<3; l++ ){
for( int k=NK; k<NK+gc; k++ ){
for( int j=NJ; j<NJ+gc; j++ ){
for( int i=1-gc; i<=0; i++ ){
array[_IDX_V3D(i,j,k,l,NI,NJ,NK,VC)] = recvptr[_IDX_V3D(i-(1-gc),j-(NJ),k-(NK),l,gc,gc,gc,0)];
}
}
}
}
break;
case int(C_pXpYpZ):
#pragma omp parallel for collapse(4)
for( int l=0; l<3; l++ ){
for( int k=NK; k<NK+gc; k++ ){
for( int j=NJ; j<NJ+gc; j++ ){
for( int i=NI; i<NI+gc; i++ ){
array[_IDX_V3D(i,j,k,l,NI,NJ,NK,VC)] = recvptr[_IDX_V3D(i-(NI),j-(NJ),k-(NK),l,gc,gc,gc,0)];
}
}
}
}
break;
}
ptr += sz;
}
}
}
#endif // _DIAGONAL_COMM
#endif // _CB_PACK_V_NODE_H_
|
DiracMatrix.h | //////////////////////////////////////////////////////////////////////////////////////
// This file is distributed under the University of Illinois/NCSA Open Source License.
// See LICENSE file in top directory for details.
//
// Copyright (c) 2016 Jeongnim Kim and QMCPACK developers.
//
// File developed by:
//
// File created by: Jeongnim Kim, jeongnim.kim@intel.com, Intel Corp.
//////////////////////////////////////////////////////////////////////////////////////
#ifndef QMCPLUSPLUS_DIRAC_MATRIX_H
#define QMCPLUSPLUS_DIRAC_MATRIX_H
#include "Numerics/Blasf.h"
#include <OhmmsPETE/OhmmsMatrix.h>
#include <type_traits/scalar_traits.h>
namespace qmcplusplus {
inline void Xgetrf(int n, int m, float* restrict a, int lda, int* restrict piv)
{
int status;
sgetrf(n,m,a,lda,piv,status);
}
inline void Xgetri(int n, float* restrict a, int lda, int* restrict piv, float* restrict work, int& lwork)
{
int status;
sgetri(n,a,lda,piv,work,lwork,status);
}
inline void Xgetrf(int n, int m, std::complex<float>* restrict a, int lda, int* restrict piv)
{
int status;
cgetrf(n,m,a,lda,piv,status);
}
/** inversion of a float matrix after lu factorization*/
inline void Xgetri(int n, std::complex<float>* restrict a, int lda, int* restrict piv, std::complex<float>* restrict work, int& lwork)
{
int status;
cgetri(n,a,lda,piv,work,lwork,status);
}
inline void Xgetrf(int n, int m, double* restrict a, int lda, int* restrict piv)
{
int status;
dgetrf(n,m,a,lda,piv,status);
}
inline void Xgetri(int n, double* restrict a, int lda, int* restrict piv, double* restrict work, int& lwork)
{
int status;
dgetri(n,a,lda,piv,work,lwork,status);
}
inline void Xgetrf(int n, int m, std::complex<double>* restrict a, int lda, int* restrict piv)
{
int status;
zgetrf(n,m,a,lda,piv,status);
}
/** inversion of a std::complex<double> matrix after lu factorization*/
inline void Xgetri(int n, std::complex<double>* restrict a, int lda, int* restrict piv, std::complex<double>* restrict work, int& lwork)
{
int status;
zgetri(n,a,lda,piv,work,lwork,status);
}
template<typename TIN, typename TOUT>
inline void TansposeSquare(const TIN* restrict in, TOUT* restrict out, size_t n, size_t lda)
{
#pragma omp simd
for(size_t i=0; i<n; ++i)
for(size_t j=0; j<n; ++j)
out[i*lda+j]=in[i+j*lda];
}
template<typename T>
inline T computeLogDet(const T* restrict X, int n, int lda, const int* restrict pivot, T& phase)
{
T logdet(0);
int sign_det=1;
for(size_t i=0; i<n; i++)
{
const size_t ii=i*lda+i;
sign_det *= (pivot[i] == i+1)?1:-1;
sign_det *= (X[ii]>0)?1:-1;
logdet += std::log(std::abs(X[ii]));
}
phase=(sign_det>0)? T(0):M_PI;
return logdet;
}
template<typename T>
inline T computeLogDet(const std::complex<T>* restrict X, int n, int lda, const int* restrict pivot, T& phase)
{
T logdet(0);
phase=T(0);
for(size_t i=0; i<n; i++)
{
const size_t ii=i*lda+i;
phase += std::arg(X[ii]);
if(pivot[i]!=i+1)
phase += M_PI;
logdet+=std::log(X[ii].real()*X[ii].real()+X[ii].imag()*X[ii].imag());
//slightly smaller error with the following
// logdet+=2.0*std::log(std::abs(x[ii]);
}
CONSTEXPR T one_over_2pi=T(1)/TWOPI;
phase -= std::floor(phase*one_over_2pi)*TWOPI;
return 0.5*logdet;
}
template<typename T>
struct DiracMatrix
{
typedef typename scalar_traits<T>::real_type real_type;
aligned_vector<T> m_work;
aligned_vector<int> m_pivot;
int Lwork;
real_type LogDet;
real_type Phase;
DiracMatrix():Lwork(0) {}
inline void invert(Matrix<T>& amat, bool computeDet)
{
const int n=amat.rows();
const int lda=amat.cols();
if(Lwork<n) reset(amat,n,lda);
int status;
Xgetrf(n,n,amat.data(),lda,m_pivot.data());
if(computeDet)
{
LogDet=computeLogDet(amat.data(),n,lda,m_pivot.data(),Phase);
}
Xgetri(n, amat.data(),lda,m_pivot.data(),m_work.data(),Lwork);
}
inline void reset(Matrix<T>& amat,int n, int lda)
{
//Lwork=n;
//m_work.resize(n);
//m_pivot.resize(n);
m_pivot.resize(lda);
Lwork=-1;
T tmp;
real_type lw;
Xgetri(n, amat.data(),lda,m_pivot.data(),&tmp,Lwork);
convert(tmp,lw);
Lwork=static_cast<int>(lw);
m_work.resize(Lwork);
}
inline void updateRow(Matrix<T>& a, T* arow, int rowchanged, T c_ratio_in)
{
const int m=a.rows();
const int lda=a.cols();
CONSTEXPR T cone(1);
CONSTEXPR T czero(0);
T temp[lda], rcopy[lda];
T c_ratio=cone/c_ratio_in;
BLAS::gemv('T', m, m, c_ratio, a.data(), lda, arow, 1, czero, temp, 1);
temp[rowchanged]=cone-c_ratio;
simd::copy_n(a[rowchanged],m,rcopy);
BLAS::ger(m,m,-cone,rcopy,1,temp,1,a.data(),lda);
}
};
}
#endif // OHMMS_PETE_MATRIX_H
|
wbb3_fmt_plug.c | /* WoltLab Burning Board 3 (WBB3) cracker patch for JtR. Hacked together during
* May of 2012 by Dhiru Kholia <dhiru.kholia at gmail.com>.
*
* This software is Copyright (c) 2012, Dhiru Kholia <dhiru.kholia at gmail.com>,
* and it is hereby released to the general public under the following terms:
* Redistribution and use in source and binary forms, with or without modification,
* are permitted.
*
* Input Format => user:$wbb3$*type*hash
*
* Where,
*
* type => 1, for sha1($salt.sha1($salt.sha1($pass))) hashing scheme
*
* JimF, July 2012.
* Made small change in hex_encode 10x improvement in speed. Also some other
* changes. Should be a thin dyanamic.
*/
#if FMT_EXTERNS_H
extern struct fmt_main fmt_wbb3;
#elif FMT_REGISTERS_H
john_register_one(&fmt_wbb3);
#else
#include "arch.h"
#include "sha.h"
#include <string.h>
#include <assert.h>
#include <errno.h>
#include "misc.h"
#include "common.h"
#include "formats.h"
#include "params.h"
#include "options.h"
#ifdef _OPENMP
#include <omp.h>
#ifndef OMP_SCALE
#define OMP_SCALE 8 // tuned on core i7
#endif
#endif
#include "memdbg.h"
#define FORMAT_LABEL "wbb3"
#define FORMAT_NAME "WoltLab BB3"
#define ALGORITHM_NAME "SHA1 32/" ARCH_BITS_STR
#define BENCHMARK_COMMENT ""
#define BENCHMARK_LENGTH 0
#define PLAINTEXT_LENGTH 32
#define BINARY_SIZE 20
#define SALT_SIZE sizeof(struct custom_salt)
#define BINARY_ALIGN sizeof(ARCH_WORD_32)
#define SALT_ALIGN sizeof(int)
#define MIN_KEYS_PER_CRYPT 1
#define MAX_KEYS_PER_CRYPT 64
static struct fmt_tests wbb3_tests[] = {
{"$wbb3$*1*0b053db07dc02bc6f6e24e00462f17e3c550afa9*e2063f7c629d852302d3020599376016ff340399", "123456"},
{"$wbb3$*1*0b053db07dc02bc6f6e24e00462f17e3c550afa9*f6975cc560c5d03feb702158d08f90bf2fa773d6", "password"},
{"$wbb3$*1*a710463f75bf4568d398db32a53f9803007388a3*2c56d23b44eb122bb176dfa2a1452afaf89f1143", "123456"},
{"$wbb3$*1*1039145e9e785ddb2ac7ccca89ac1b159b595cc1*2596b5f8e7cdaf4b15604ad336b810e8e2935b1d", "12345678"},
{"$wbb3$*1*db763342e23f8ccdbd9c90d1cc7896d80b7e0a44*26496a87c1a7dd68f7beceb2fc40b6fc4223a453", "12345678"},
{"$wbb3$*1*bf2c7d0c8fb6cb146adf8933e32da012d31b5bbb*d945c02cf85738b7db4f4f05edd676283280a513", "123456789"},
{"$wbb3$*1*d132b22d3f1d942b99cc1f5fbd5cc3eb0824d608*e3e03fe02223c5030e834f81997f614b43441853", "1234567890"},
{NULL}
};
static char (*saved_key)[PLAINTEXT_LENGTH + 1];
static ARCH_WORD_32 (*crypt_out)[BINARY_SIZE / sizeof(ARCH_WORD_32)];
static unsigned char (*hexhash1)[40];
static int dirty;
static struct custom_salt {
int type;
unsigned char salt[41];
} *cur_salt;
static inline void hex_encode(unsigned char *str, int len, unsigned char *out)
{
int i;
for (i = 0; i < len; ++i) {
out[0] = itoa16[str[i]>>4];
out[1] = itoa16[str[i]&0xF];
out += 2;
}
}
static void init(struct fmt_main *self)
{
#ifdef _OPENMP
static int omp_t = 1;
omp_t = omp_get_max_threads();
self->params.min_keys_per_crypt *= omp_t;
omp_t *= OMP_SCALE;
self->params.max_keys_per_crypt *= omp_t;
#endif
saved_key = mem_calloc(self->params.max_keys_per_crypt,
sizeof(*saved_key));
crypt_out = mem_calloc(self->params.max_keys_per_crypt,
sizeof(*crypt_out));
hexhash1 = mem_calloc(self->params.max_keys_per_crypt,
sizeof(*hexhash1));
}
static void done(void)
{
MEM_FREE(hexhash1);
MEM_FREE(crypt_out);
MEM_FREE(saved_key);
}
static int valid(char *ciphertext, struct fmt_main *self)
{
char _ctcopy[256], *ctcopy = _ctcopy;
char *p;
int res;
if (strncmp(ciphertext, "$wbb3$*", 7))
return 0;
strnzcpy(ctcopy, ciphertext, 255);
ctcopy += 7;
p = strtokm(ctcopy, "*"); /* type */
if(!p)
goto err;
if(!isdec(p))
goto err;
res = atoi(p);
if (res != 1)
goto err;
if ((p = strtokm(NULL, "*")) == NULL) /* salt */
goto err;
res = strlen(p);
if (hexlenl(p) != res)
goto err;
if ((p = strtokm(NULL, "*")) == NULL) /* hash */
goto err;
if (hexlenl(p) != BINARY_SIZE * 2)
goto err;
return 1;
err:
return 0;
}
static void *get_salt(char *ciphertext)
{
static struct custom_salt cs;
char _ctcopy[256], *ctcopy = _ctcopy;
char *p;
memset(&cs, 0, sizeof(cs));
strnzcpy(ctcopy, ciphertext, 255);
ctcopy += 7; /* skip over "$wbb3$*" */
p = strtokm(ctcopy, "*");
cs.type = atoi(p);
p = strtokm(NULL, "*");
strcpy((char *)cs.salt, p);
return (void *)&cs;
}
static void *get_binary(char *ciphertext)
{
static union {
unsigned char c[BINARY_SIZE+1];
ARCH_WORD dummy;
} buf;
unsigned char *out = buf.c;
char *p;
int i;
p = strrchr(ciphertext, '*') + 1;
for (i = 0; i < BINARY_SIZE; i++) {
out[i] =
(atoi16[ARCH_INDEX(*p)] << 4) |
atoi16[ARCH_INDEX(p[1])];
p += 2;
}
return out;
}
static int get_hash_0(int index) { return crypt_out[index][0] & PH_MASK_0; }
static int get_hash_1(int index) { return crypt_out[index][0] & PH_MASK_1; }
static int get_hash_2(int index) { return crypt_out[index][0] & PH_MASK_2; }
static int get_hash_3(int index) { return crypt_out[index][0] & PH_MASK_3; }
static int get_hash_4(int index) { return crypt_out[index][0] & PH_MASK_4; }
static int get_hash_5(int index) { return crypt_out[index][0] & PH_MASK_5; }
static int get_hash_6(int index) { return crypt_out[index][0] & PH_MASK_6; }
static void set_salt(void *salt)
{
cur_salt = (struct custom_salt *)salt;
}
static int crypt_all(int *pcount, struct db_salt *salt)
{
const int count = *pcount;
int index = 0;
#ifdef _OPENMP
#pragma omp parallel for
#endif
for (index = 0; index < count; index++)
{
unsigned char hexhash[40];
SHA_CTX ctx;
if (dirty) {
unsigned char out[20];
SHA1_Init(&ctx);
SHA1_Update(&ctx, saved_key[index], strlen(saved_key[index]));
SHA1_Final(out, &ctx);
hex_encode(out, 20, hexhash1[index]);
}
SHA1_Init(&ctx);
SHA1_Update(&ctx, cur_salt->salt, 40);
SHA1_Update(&ctx, hexhash1[index], 40);
SHA1_Final((unsigned char*)crypt_out[index], &ctx);
hex_encode((unsigned char*)crypt_out[index], 20, hexhash);
SHA1_Init(&ctx);
SHA1_Update(&ctx, cur_salt->salt, 40);
SHA1_Update(&ctx, hexhash, 40);
SHA1_Final((unsigned char*)crypt_out[index], &ctx);
}
dirty = 0;
return count;
}
static int cmp_all(void *binary, int count)
{
int index = 0;
for (; index < count; index++)
if (*((ARCH_WORD_32*)binary) == crypt_out[index][0])
return 1;
return 0;
}
static int cmp_one(void *binary, int index)
{
return *((ARCH_WORD_32*)binary) == crypt_out[index][0];
}
static int cmp_exact(char *source, int index)
{
void *binary = get_binary(source);
return !memcmp(binary, crypt_out[index], BINARY_SIZE);
}
static void wbb3_set_key(char *key, int index)
{
int saved_len = strlen(key);
if (saved_len > PLAINTEXT_LENGTH)
saved_len = PLAINTEXT_LENGTH;
memcpy(saved_key[index], key, saved_len);
saved_key[index][saved_len] = 0;
dirty = 1;
}
static char *get_key(int index)
{
return saved_key[index];
}
struct fmt_main fmt_wbb3 = {
{
FORMAT_LABEL,
FORMAT_NAME,
ALGORITHM_NAME,
BENCHMARK_COMMENT,
BENCHMARK_LENGTH,
0,
PLAINTEXT_LENGTH,
BINARY_SIZE,
BINARY_ALIGN,
SALT_SIZE,
SALT_ALIGN,
MIN_KEYS_PER_CRYPT,
MAX_KEYS_PER_CRYPT,
FMT_CASE | FMT_8_BIT | FMT_OMP,
{ NULL },
wbb3_tests
}, {
init,
done,
fmt_default_reset,
fmt_default_prepare,
valid,
fmt_default_split,
get_binary,
get_salt,
{ NULL },
fmt_default_source,
{
fmt_default_binary_hash_0,
fmt_default_binary_hash_1,
fmt_default_binary_hash_2,
fmt_default_binary_hash_3,
fmt_default_binary_hash_4,
fmt_default_binary_hash_5,
fmt_default_binary_hash_6
},
fmt_default_salt_hash,
NULL,
set_salt,
wbb3_set_key,
get_key,
fmt_default_clear_keys,
crypt_all,
{
get_hash_0,
get_hash_1,
get_hash_2,
get_hash_3,
get_hash_4,
get_hash_5,
get_hash_6
},
cmp_all,
cmp_one,
cmp_exact
}
};
#endif /* plugin stanza */
|
ch_common.c |
#define MAIN
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <unistd.h>
#include <errno.h>
#include <assert.h>
#include "ch_common.h"
#include "cholesky.h"
#include "mpi-detach.h"
#include "../timing.h"
static void get_block_rank(int *block_rank, int nt);
void omp_potrf(double * const A, int ts, int ld)
{
static int INFO;
static const char L = 'L';
dpotrf_(&L, &ts, A, &ld, &INFO);
}
void omp_trsm(double *A, double *B, int ts, int ld)
{
static char LO = 'L', TR = 'T', NU = 'N', RI = 'R';
static double DONE = 1.0;
dtrsm_(&RI, &LO, &TR, &NU, &ts, &ts, &DONE, A, &ld, B, &ld );
}
void omp_gemm(double *A, double *B, double *C, int ts, int ld)
{
static const char TR = 'T', NT = 'N';
static double DONE = 1.0, DMONE = -1.0;
dgemm_(&NT, &TR, &ts, &ts, &ts, &DMONE, A, &ld, B, &ld, &DONE, C, &ld);
}
void omp_syrk(double *A, double *B, int ts, int ld)
{
static char LO = 'L', NT = 'N';
static double DONE = 1.0, DMONE = -1.0;
dsyrk_(&LO, &NT, &ts, &ts, &DMONE, A, &ld, &DONE, B, &ld );
}
void cholesky_single(const int ts, const int nt, double* A[nt][nt])
{
for (int k = 0; k < nt; k++) {
#ifdef USE_NANOS6
#pragma oss task depend(out: A[k][k])
#else
#pragma omp task depend(out: A[k][k])
#endif
{
omp_potrf(A[k][k], ts, ts);
#ifdef DEBUG
if (mype == 0) printf("potrf:out:A[%d][%d]\n", k, k);
#endif
}
for (int i = k + 1; i < nt; i++) {
#ifdef USE_NANOS6
#pragma oss task depend(in: A[k][k]) depend(out: A[k][i])
#else
#pragma omp task depend(in: A[k][k]) depend(out: A[k][i])
#endif
{
omp_trsm(A[k][k], A[k][i], ts, ts);
#ifdef DEBUG
if (mype == 0) printf("trsm :in:A[%d][%d]:out:A[%d][%d]\n", k, k, k, i);
#endif
}
}
for (int i = k + 1; i < nt; i++) {
for (int j = k + 1; j < i; j++) {
#ifdef USE_NANOS6
#pragma oss task depend(in: A[k][i], A[k][j]) depend(out: A[j][i])
#else
#pragma omp task depend(in: A[k][i], A[k][j]) depend(out: A[j][i])
#endif
{
omp_gemm(A[k][i], A[k][j], A[j][i], ts, ts);
#ifdef DEBUG
if (mype == 0) printf("gemm :in:A[%d][%d]:A[%d][%d]:out:A[%d][%d]\n", k, i, k, j, j, i);
#endif
}
}
#ifdef USE_NANOS6
#pragma oss task depend(in: A[k][i]) depend(out: A[i][i])
#else
#pragma omp task depend(in: A[k][i]) depend(out: A[i][i])
#endif
{
omp_syrk(A[k][i], A[i][i], ts, ts);
#ifdef DEBUG
if (mype == 0) printf("syrk :in:A[%d][%d]:out:A[%d][%d]\n", k, i, i, i);
#endif
}
}
}
#ifdef USE_NANOS6
#pragma oss taskwait
#else
#pragma omp taskwait
#endif
}
inline void reset_send_flags(char *send_flags)
{
for (int i = 0; i < np; i++) send_flags[i] = 0;
}
int main(int argc, char *argv[])
{
/* MPI Initialize */
int provided;
MPI_Init_thread(&argc, &argv, MPI_THREAD_MULTIPLE, &provided);
if (provided != MPI_THREAD_MULTIPLE) {
printf("This Compiler does not support MPI_THREAD_MULTIPLE\n");
exit(0);
}
MPI_Comm_rank(MPI_COMM_WORLD, &mype);
MPI_Comm_size(MPI_COMM_WORLD, &np);
/* cholesky init */
const char *result[3] = {"n/a","successful","UNSUCCESSFUL"};
const double eps = BLAS_dfpinfo(blas_eps);
if (argc < 4) {
printf("cholesky matrix_size block_size check\n");
exit(-1);
}
const int n = atoi(argv[1]); // matrix size
const int ts = atoi(argv[2]); // tile size
int check = atoi(argv[3]); // check result?
const int nt = n / ts;
if ( ts < nt ) {
printf("block_size must be larger than number of blocks\n");
exit(-1);
}
if (mype == 0)
printf("nt = %d, ts = %d\n", nt, ts);
/* Set block rank */
int *block_rank = malloc(nt * nt * sizeof(int));
get_block_rank(block_rank, nt);
#ifdef DEBUG
if (mype == 0) {
for (int i = 0; i < nt; i++) {
for (int j = 0; j < nt; j++) {
printf("%d ", block_rank[i * nt + j]);
}
printf("\n");
}
}
#endif
double *A[nt][nt], *B, *C[nt], *Ans[nt][nt];
#ifdef USE_NANOS6
#ifdef USE_POLLING
nanos6_register_polling_service("MPI_Progress", MPIX_Progress, NULL);
#endif
#else
#pragma omp parallel
#pragma omp single
#endif
{
for (int i = 0; i < nt; i++) {
for (int j = 0; j < nt; j++) {
#ifdef USE_NANOS6
#pragma oss task depend(out: A[i][j]) shared(Ans, A)
#else
#pragma omp task depend(out: A[i][j]) shared(Ans, A)
#endif
{
if (check) {
MPI_Alloc_mem(ts * ts * sizeof(double), MPI_INFO_NULL, &Ans[i][j]);
initialize_tile(ts, Ans[i][j]);
}
if (block_rank[i*nt+j] == mype) {
MPI_Alloc_mem(ts * ts * sizeof(double), MPI_INFO_NULL, &A[i][j]);
if (!check) {
initialize_tile(ts, A[i][j]);
} else {
for (int k = 0; k < ts * ts; k++) {
A[i][j][k] = Ans[i][j][k];
}
}
}
}
}
#ifdef USE_NANOS6
#pragma oss task depend(inout: A[i][i]) shared(Ans, A)
#else
#pragma omp task depend(inout: A[i][i]) shared(Ans, A)
#endif
{
// add to diagonal
if (check) {
Ans[i][i][ts/2*ts+ts/2] = (double)nt;
}
if (block_rank[i*nt+i] == mype) {
A[i][i][ts/2*ts+ts/2] = (double)nt;
}
}
}
// omp single
} // omp parallel
MPI_Alloc_mem(ts * ts * sizeof(double), MPI_INFO_NULL, &B);
for (int i = 0; i < nt; i++) {
MPI_Alloc_mem(ts * ts * sizeof(double), MPI_INFO_NULL, &C[i]);
}
#ifdef USE_NANOS6
#else
#pragma omp parallel
#pragma omp single
#endif
num_threads = NUM_THREADS;
const float t3 = get_time();
if (check) cholesky_single(ts, nt, (double* (*)[nt]) Ans);
const float t4 = get_time() - t3;
MPI_Barrier(MPI_COMM_WORLD);
if (mype == 0)
printf("Starting parallel computation\n");
const float t1 = get_time();
cholesky_mpi(ts, nt, (double* (*)[nt])A, B, C, block_rank);
const float t2 = get_time() - t1;
if (mype == 0)
printf("Finished parallel computation\n");
MPI_Barrier(MPI_COMM_WORLD);
/* Verification */
if (check) {
for (int i = 0; i < nt; i++) {
for (int j = 0; j < nt; j++) {
if (block_rank[i * nt + j] == mype) {
for (int k = 0; k < ts*ts; k++) {
if (Ans[i][j][k] != A[i][j][k]) check = 2;
}
}
}
}
}
float time_mpi = t2;
float gflops_mpi = (((1.0 / 3.0) * n * n * n) / ((time_mpi) * 1.0e+9));
float time_ser = t4;
float gflops_ser = (((1.0 / 3.0) * n * n * n) / ((time_ser) * 1.0e+9));
printf("test:%s-%d-%d-%d:mype:%2d:np:%2d:threads:%2d:result:%s:gflops:%f:time:%f:gflops_ser:%f:time_ser:%f\n", argv[0], n, ts, num_threads, mype, np, num_threads, result[check], gflops_mpi, t2, gflops_ser, t4);
for (int i = 0; i < nt; i++) {
for (int j = 0; j < nt; j++) {
if (block_rank[i*nt+j] == mype) {
free(A[i][j]);
}
if (check)
free(Ans[i][j]);
}
free(C[i]);
}
free(B);
free(block_rank);
MPI_Finalize();
return 0;
}
static void get_block_rank(int *block_rank, int nt)
{
int row, col;
row = col = np;
int blocks[np];
for (int i=0; i<np; i++)
blocks[i]=0;
if (np != 1) {
while (1) {
row = row / 2;
if (row * col == np) break;
col = col / 2;
if (row * col == np) break;
}
}
if (mype == 0) printf("row = %d, col = %d\n", row, col);
int i, j, tmp_rank = 0, offset = 0;
for (i = 0; i < nt; i++) {
for (j = 0; j < nt; j++) {
block_rank[i*nt + j] = tmp_rank + offset;
blocks[tmp_rank + offset]++;
tmp_rank++;
if (tmp_rank >= col) tmp_rank = 0;
}
tmp_rank = 0;
offset = (offset + col >= np) ? 0 : offset + col;
}
if (mype == 0)
for (int i=0; i<np; i++)
printf("blocks[%i]=%i\n", i, blocks[i]);
}
|
output.h | #pragma once
#include <gms/common/types.h>
#include <omp.h>
#include "../sequential/output.h"
#include <gms/third_party/robin_hood.h>
#include <shared_mutex>
namespace GMS::KCliqueStar::Par {
using Seq::ListOutput;
using OutputMode = Seq::OutputMode;
template <class TValueType, OutputMode TOutputMode, int TNumOutputs>
class ListOutputPar {
public:
using OutputSeq = typename Seq::ListOutput<TValueType, TOutputMode, TNumOutputs>;
class Writer {
friend ListOutputPar;
using Array = std::array<TValueType, TNumOutputs>;
public:
Writer(const Writer &other) : target(other.target), local(nullptr)
{
auto writer = target.writer();
local = std::move(writer.local);
}
void push(const Array &value) {
local->push(value);
}
void push(Array &&value) {
local->push(std::move(value));
}
protected:
// Target parallel output.
ListOutputPar ⌖
// The thread-local output.
std::shared_ptr<OutputSeq> local;
Writer(ListOutputPar &target, std::shared_ptr<OutputSeq> &&local) :
target(target), local(std::move(local))
{}
};
ListOutputPar() = default;
Writer writer() {
auto output = std::make_shared<OutputSeq>();
#pragma omp critical (outputvector)
outputs.push_back(output);
return Writer(*this, std::move(output));
}
OutputSeq collect() {
OutputSeq result;
#pragma omp critical (outputvector)
{
for (const auto &o : outputs) {
result.extend(std::move(*o));
}
outputs.clear();
}
return result;
}
private:
std::vector<std::shared_ptr<OutputSeq>> outputs;
};
} |
random.c | #include <stdio.h>
#include <stdlib.h>
#include <time.h>
#include <stdbool.h>
#include <string.h>
#include <omp.h>
#ifndef TOTAL_SEATS
#define TOTAL_SEATS 20
#endif
#ifndef TOTAL_THREADS
#define TOTAL_THREADS 4
#endif
int randomAssignment()
{
return rand() % TOTAL_SEATS;
}
int main(int argc, char **argv)
{
omp_set_num_threads(TOTAL_THREADS);
srand(time(0));
bool seats[TOTAL_SEATS];
memset(seats, false, TOTAL_SEATS);
int wrongly_assigned = 0;
#pragma omp parallel
{
#pragma omp master
{
int seatAssigned = randomAssignment();
if (seatAssigned != 0)
{
wrongly_assigned++;
}
seats[seatAssigned] = true;
}
#pragma omp barrier
#pragma omp for ordered
for (int i = 1; i < TOTAL_SEATS; i++)
{
int seatAssigned;
#pragma omp critical
{
if (!seats[i])
{
seats[i] = true;
}
else
{
seatAssigned = randomAssignment();
while (seats[seatAssigned])
{
seatAssigned = randomAssignment();
}
printf("Person %d assigned %d\n", i, seatAssigned);
seats[seatAssigned] = true;
wrongly_assigned++;
}
}
}
}
float correctly_seated = 100 * (TOTAL_SEATS - wrongly_assigned) / (float)TOTAL_SEATS;
float incorrectly_seated = 100 - correctly_seated;
printf("Correctly : %f\n", correctly_seated);
printf("Incorrectly : %f\n", incorrectly_seated);
return 0;
} |
GB_unop__identity_fc32_uint16.c | //------------------------------------------------------------------------------
// GB_unop: hard-coded functions for each built-in unary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2022, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// If this file is in the Generated2/ folder, do not edit it
// (it is auto-generated from Generator/*).
#include "GB.h"
#ifndef GBCUDA_DEV
#include "GB_control.h"
#include "GB_atomics.h"
#include "GB_unop__include.h"
// C=unop(A) is defined by the following types and operators:
// op(A) function: GB (_unop_apply__identity_fc32_uint16)
// op(A') function: GB (_unop_tran__identity_fc32_uint16)
// C type: GxB_FC32_t
// A type: uint16_t
// cast: GxB_FC32_t cij = GxB_CMPLXF ((float) (aij), 0)
// unaryop: cij = aij
#define GB_ATYPE \
uint16_t
#define GB_CTYPE \
GxB_FC32_t
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
uint16_t aij = Ax [pA]
#define GB_CX(p) Cx [p]
// unary operator
#define GB_OP(z, x) \
z = x ;
// casting
#define GB_CAST(z, aij) \
GxB_FC32_t z = GxB_CMPLXF ((float) (aij), 0) ;
// cij = op (aij)
#define GB_CAST_OP(pC,pA) \
{ \
/* aij = Ax [pA] */ \
uint16_t aij = Ax [pA] ; \
/* Cx [pC] = op (cast (aij)) */ \
GxB_FC32_t z = GxB_CMPLXF ((float) (aij), 0) ; \
Cx [pC] = z ; \
}
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_IDENTITY || GxB_NO_FC32 || GxB_NO_UINT16)
//------------------------------------------------------------------------------
// Cx = op (cast (Ax)): apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB (_unop_apply__identity_fc32_uint16)
(
GxB_FC32_t *Cx, // Cx and Ax may be aliased
const uint16_t *Ax,
const int8_t *restrict Ab, // A->b if A is bitmap
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
if (Ab == NULL)
{
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
uint16_t aij = Ax [p] ;
GxB_FC32_t z = GxB_CMPLXF ((float) (aij), 0) ;
Cx [p] = z ;
}
}
else
{
// bitmap case, no transpose; A->b already memcpy'd into C->b
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!Ab [p]) continue ;
uint16_t aij = Ax [p] ;
GxB_FC32_t z = GxB_CMPLXF ((float) (aij), 0) ;
Cx [p] = z ;
}
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (cast (A')): transpose, typecast, and apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB (_unop_tran__identity_fc32_uint16)
(
GrB_Matrix C,
const GrB_Matrix A,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
panama_fmt_plug.c | /* Panama cracker patch for JtR. Hacked together during May of 2013 by Dhiru
* Kholia <dhiru at openwall.com>.
*
* This software is Copyright (c) 2013 Dhiru Kholia <dhiru at openwall.com> and
* it is hereby released to the general public under the following terms:
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted.
*/
#if FMT_EXTERNS_H
extern struct fmt_main fmt_panama_;
#elif FMT_REGISTERS_H
john_register_one(&fmt_panama_);
#else
#include <string.h>
#include "arch.h"
#include "sph_panama.h"
#include "misc.h"
#include "common.h"
#include "formats.h"
#include "params.h"
#include "options.h"
#ifdef _OPENMP
static int omp_t = 1;
#include <omp.h>
// OMP_SCALE tuned on core i7 quad core HT
// 1 - 217k
// 64 - 1930k
// 128 - 2099k
// 256 - 2204k *** set to this level
// 512 - 2203k
// 1k - 2124k
#ifndef OMP_SCALE
#ifdef __MIC__
#define OMP_SCALE 8
#else
#define OMP_SCALE 256
#endif // __MIC__
#endif // OMP_SCALE
#endif // _OPENMP
#include "memdbg.h"
#define FORMAT_LABEL "Panama"
#define FORMAT_NAME ""
#define FORMAT_TAG "$panama$"
#define TAG_LENGTH 8
#define ALGORITHM_NAME "Panama 32/" ARCH_BITS_STR
#define BENCHMARK_COMMENT ""
#define BENCHMARK_LENGTH -1
#define PLAINTEXT_LENGTH 125
#define BINARY_SIZE 32
#define SALT_SIZE 0
#define BINARY_ALIGN 4
#define SALT_ALIGN 1
#define MIN_KEYS_PER_CRYPT 1
#define MAX_KEYS_PER_CRYPT 1
static struct fmt_tests panama__tests[] = {
{"049d698307d8541f22870dfa0a551099d3d02bc6d57c610a06a4585ed8d35ff8", "T"},
{"$panama$049d698307d8541f22870dfa0a551099d3d02bc6d57c610a06a4585ed8d35ff8", "T"},
{"a2a70386b81fb918be17f00ff3e3b376a0462c4dc2eec7f2c63202c8874c037d", "abc"},
{"$panama$a2a70386b81fb918be17f00ff3e3b376a0462c4dc2eec7f2c63202c8874c037d", "abc"},
{"017686a23c4af3b9c074888ec76f893945d541cd17ee8011b2bd0ee2d581db34", "john"},
{"$panama$017686a23c4af3b9c074888ec76f893945d541cd17ee8011b2bd0ee2d581db34", "john"},
{"3919248ab4c8dea4843663c532db9823169a71d03b0f918082c9f53748dea1e8", "passweird"},
{"$panama$3919248ab4c8dea4843663c532db9823169a71d03b0f918082c9f53748dea1e8", "passweird"},
{NULL}
};
static char (*saved_key)[PLAINTEXT_LENGTH + 1];
static ARCH_WORD_32 (*crypt_out)[BINARY_SIZE / sizeof(ARCH_WORD_32)];
static void init(struct fmt_main *self)
{
#ifdef _OPENMP
omp_t = omp_get_max_threads();
self->params.min_keys_per_crypt *= omp_t;
omp_t *= OMP_SCALE;
self->params.max_keys_per_crypt *= omp_t;
#endif
saved_key = mem_calloc(sizeof(*saved_key), self->params.max_keys_per_crypt);
crypt_out = mem_calloc(sizeof(*crypt_out), self->params.max_keys_per_crypt);
}
static void done(void)
{
MEM_FREE(crypt_out);
MEM_FREE(saved_key);
}
static int valid(char *ciphertext, struct fmt_main *self)
{
char *p;
p = ciphertext;
if (!strncmp(p, FORMAT_TAG, TAG_LENGTH))
p += TAG_LENGTH;
if (hexlenl(p) != BINARY_SIZE*2)
return 0;
return 1;
}
static void *get_binary(char *ciphertext)
{
static union {
unsigned char c[BINARY_SIZE];
ARCH_WORD dummy;
} buf;
unsigned char *out = buf.c;
char *p;
int i;
if (!strncmp(ciphertext, FORMAT_TAG, TAG_LENGTH))
p = strrchr(ciphertext, '$') + 1;
else
p = ciphertext;
for (i = 0; i < BINARY_SIZE; i++) {
out[i] =
(atoi16[ARCH_INDEX(*p)] << 4) |
atoi16[ARCH_INDEX(p[1])];
p += 2;
}
return out;
}
static int get_hash_0(int index) { return crypt_out[index][0] & PH_MASK_0; }
static int get_hash_1(int index) { return crypt_out[index][0] & PH_MASK_1; }
static int get_hash_2(int index) { return crypt_out[index][0] & PH_MASK_2; }
static int get_hash_3(int index) { return crypt_out[index][0] & PH_MASK_3; }
static int get_hash_4(int index) { return crypt_out[index][0] & PH_MASK_4; }
static int get_hash_5(int index) { return crypt_out[index][0] & PH_MASK_5; }
static int get_hash_6(int index) { return crypt_out[index][0] & PH_MASK_6; }
static int crypt_all(int *pcount, struct db_salt *salt)
{
const int count = *pcount;
int index = 0;
#ifdef _OPENMP
#pragma omp parallel for
for (index = 0; index < count; index++)
#endif
{
sph_panama_context ctx;
sph_panama_init(&ctx);
sph_panama(&ctx, saved_key[index], strlen(saved_key[index]));
sph_panama_close(&ctx, (unsigned char*)crypt_out[index]);
}
return count;
}
static int cmp_all(void *binary, int count)
{
int index = 0;
#ifdef _OPENMP
for (; index < count; index++)
#endif
if (!memcmp(binary, crypt_out[index], ARCH_SIZE))
return 1;
return 0;
}
static int cmp_one(void *binary, int index)
{
return !memcmp(binary, crypt_out[index], BINARY_SIZE);
}
static int cmp_exact(char *source, int index)
{
return 1;
}
static void panama_set_key(char *key, int index)
{
int saved_len = strlen(key);
if (saved_len > PLAINTEXT_LENGTH)
saved_len = PLAINTEXT_LENGTH;
memcpy(saved_key[index], key, saved_len);
saved_key[index][saved_len] = 0;
}
static char *get_key(int index)
{
return saved_key[index];
}
static char *prepare(char *fields[10], struct fmt_main *self) {
static char buf[BINARY_SIZE*2+TAG_LENGTH+1];
char *hash = fields[1];
if (strlen(hash) == BINARY_SIZE*2 && valid(hash, self)) {
sprintf(buf, "%s%s", FORMAT_TAG, hash);
return buf;
}
return hash;
}
struct fmt_main fmt_panama_ = {
{
FORMAT_LABEL,
FORMAT_NAME,
ALGORITHM_NAME,
BENCHMARK_COMMENT,
BENCHMARK_LENGTH,
0,
PLAINTEXT_LENGTH,
BINARY_SIZE,
BINARY_ALIGN,
SALT_SIZE,
SALT_ALIGN,
MIN_KEYS_PER_CRYPT,
MAX_KEYS_PER_CRYPT,
FMT_CASE | FMT_8_BIT | FMT_OMP,
{ NULL },
panama__tests
}, {
init,
done,
fmt_default_reset,
prepare,
valid,
fmt_default_split,
get_binary,
fmt_default_salt,
{ NULL },
fmt_default_source,
{
fmt_default_binary_hash_0,
fmt_default_binary_hash_1,
fmt_default_binary_hash_2,
fmt_default_binary_hash_3,
fmt_default_binary_hash_4,
fmt_default_binary_hash_5,
fmt_default_binary_hash_6
},
fmt_default_salt_hash,
NULL,
fmt_default_set_salt,
panama_set_key,
get_key,
fmt_default_clear_keys,
crypt_all,
{
get_hash_0,
get_hash_1,
get_hash_2,
get_hash_3,
get_hash_4,
get_hash_5,
get_hash_6
},
cmp_all,
cmp_one,
cmp_exact
}
};
#endif /* plugin stanza */
|
GB_unop__sin_fp64_fp64.c | //------------------------------------------------------------------------------
// GB_unop: hard-coded functions for each built-in unary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// If this file is in the Generated2/ folder, do not edit it
// (it is auto-generated from Generator/*).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_control.h"
#include "GB_atomics.h"
#include "GB_unop__include.h"
// C=unop(A) is defined by the following types and operators:
// op(A) function: GB (_unop_apply__sin_fp64_fp64)
// op(A') function: GB (_unop_tran__sin_fp64_fp64)
// C type: double
// A type: double
// cast: double cij = aij
// unaryop: cij = sin (aij)
#define GB_ATYPE \
double
#define GB_CTYPE \
double
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
double aij = Ax [pA]
#define GB_CX(p) Cx [p]
// unary operator
#define GB_OP(z, x) \
z = sin (x) ;
// casting
#define GB_CAST(z, aij) \
double z = aij ;
// cij = op (aij)
#define GB_CAST_OP(pC,pA) \
{ \
/* aij = Ax [pA] */ \
double aij = Ax [pA] ; \
/* Cx [pC] = op (cast (aij)) */ \
double z = aij ; \
Cx [pC] = sin (z) ; \
}
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_SIN || GxB_NO_FP64)
//------------------------------------------------------------------------------
// Cx = op (cast (Ax)): apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB (_unop_apply__sin_fp64_fp64)
(
double *Cx, // Cx and Ax may be aliased
const double *Ax,
const int8_t *restrict Ab, // A->b if A is bitmap
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
if (Ab == NULL)
{
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
double aij = Ax [p] ;
double z = aij ;
Cx [p] = sin (z) ;
}
}
else
{
// bitmap case, no transpose; A->b already memcpy'd into C->b
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!Ab [p]) continue ;
double aij = Ax [p] ;
double z = aij ;
Cx [p] = sin (z) ;
}
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (cast (A')): transpose, typecast, and apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB (_unop_tran__sin_fp64_fp64)
(
GrB_Matrix C,
const GrB_Matrix A,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
broadcast_reduce-inl.h | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
/*!
* Copyright (c) 2015-2017 by Contributors
* \file broadcast_reduce-inl.h
* \brief CPU-specific Function definition of broadcast and reduce operators
*/
#ifndef MXNET_OPERATOR_TENSOR_BROADCAST_REDUCE_INL_H_
#define MXNET_OPERATOR_TENSOR_BROADCAST_REDUCE_INL_H_
#include <mxnet/operator_util.h>
#include <algorithm>
#include <vector>
#include <string>
#include <utility>
#include "../mshadow_op.h"
#include "../operator_common.h"
namespace mxnet {
namespace op {
namespace broadcast {
using namespace mshadow;
const int MAX_DIM = 5;
template<int ndim>
MSHADOW_XINLINE Shape<ndim> calc_stride(const Shape<ndim>& shape) {
Shape<ndim> stride;
index_t cumprod = 1;
#pragma unroll
for (int i = ndim - 1; i >= 0; --i) {
stride[i] = (shape[i] > 1) ? cumprod : 0;
cumprod *= shape[i];
}
return stride;
}
template<int ndim>
MSHADOW_XINLINE void unravel_dot(const int idx, const Shape<ndim>& shape,
const Shape<ndim>& stridej, const Shape<ndim>& stridek, int* j, int* k) {
*j = 0;
*k = 0;
#pragma unroll
for (int i = ndim-1, idx_t = idx; i >=0; --i) {
const int tmp = idx_t / shape[i];
const int coord = idx_t - tmp*shape[i];
*j += coord*stridej[i];
*k += coord*stridek[i];
idx_t = tmp;
}
}
template<int ndim>
MSHADOW_XINLINE Shape<ndim> unravel(const int idx, const Shape<ndim>& shape) {
Shape<ndim> ret;
#pragma unroll
for (int i = ndim-1, j = idx; i >=0; --i) {
int tmp = j / shape[i];
ret[i] = j - tmp*shape[i];
j = tmp;
}
return ret;
}
template<int ndim>
MSHADOW_XINLINE int ravel(const Shape<ndim>& coord, const Shape<ndim>& shape) {
int ret = 0;
#pragma unroll
for (int i = 0; i < ndim; ++i) {
ret = ret * shape[i] + (shape[i] > 1) * coord[i];
}
return ret;
}
template<int ndim>
MSHADOW_XINLINE int diff(const Shape<ndim>& small, const Shape<ndim>& big, Shape<ndim>* dims,
Shape<ndim>* stride) {
int mdim = 0;
#pragma unroll
for (int i = 0; i < ndim; ++i) {
mdim += small[i] != big[i];
(*dims)[i] = (*stride)[i] = 1;
}
#pragma unroll
for (int i = ndim-1, j = mdim, s = 1; i >= 0; --i) {
if (small[i] != big[i]) {
--j;
(*stride)[j] = s;
(*dims)[j] = big[i];
}
s *= big[i];
}
return mdim;
}
template<int ndim>
MSHADOW_XINLINE int unravel_dot(const int idx, const Shape<ndim>& shape,
const Shape<ndim>& stride) {
int ret = 0;
#pragma unroll
for (int i = ndim-1, j = idx; i >=0; --i) {
int tmp = j / shape[i];
ret += (j - tmp*shape[i])*stride[i];
j = tmp;
}
return ret;
}
template<int ndim>
MSHADOW_XINLINE int dot(const Shape<ndim>& coord, const Shape<ndim>& stride) {
int ret = 0;
#pragma unroll
for (int i = 0; i < ndim; ++i)
ret += coord[i] * stride[i];
return ret;
}
template<typename DType>
MSHADOW_XINLINE void assign(DType* dst, const bool addto, const DType src) {
if (addto) {
*dst += src;
} else {
*dst = src;
}
}
template<int ndim, typename DType, typename OP>
MSHADOW_XINLINE void binary_broadcast_assign(const int idx, const bool addto,
const DType* __restrict lhs,
const DType* __restrict rhs, DType* out,
const Shape<ndim>& lshape, const Shape<ndim>& rshape,
const Shape<ndim>& oshape) {
const Shape<ndim> coord = unravel(idx, oshape);
const int j = ravel(coord, lshape);
const int k = ravel(coord, rshape);
assign(&out[idx], addto, OP::Map(lhs[j], rhs[k]));
}
template<typename Reducer, int ndim, typename DType, typename OP>
MSHADOW_XINLINE void seq_reduce_assign(const int idx, const int M, const bool addto,
const DType* __restrict big, DType *small,
const Shape<ndim>& bshape, const Shape<ndim>& sshape,
const Shape<ndim>& rshape, const Shape<ndim>& rstride) {
Shape<ndim> coord = unravel(idx, sshape);
int j = ravel(coord, bshape);
DType val, residual;
Reducer::SetInitValue(val, residual);
for (int k = 0; k < M; ++k) {
coord = unravel(k, rshape);
Reducer::Reduce(val, OP::Map(big[j + dot(coord, rstride)]), residual);
}
Reducer::Finalize(val, residual);
assign(&small[idx], addto, val);
}
#ifdef __HIPCC__
#include "broadcast_reduce-inl.cuh"
#else
template<int ndim, typename DType, typename OP>
void binary_broadcast_compute(const int N, const bool addto, const DType *lhs,
const DType *rhs, DType *out, const Shape<ndim> lshape,
const Shape<ndim> rshape, const Shape<ndim> oshape) {
for (int idx = 0; idx < N; ++idx) {
binary_broadcast_assign<ndim, DType, OP>(idx, addto, lhs, rhs, out, lshape, rshape, oshape);
}
}
template<int ndim, typename DType, typename OP>
void BinaryBroadcastComputeImpl(Stream<cpu> *s, const OpReqType req,
const TBlob& lhs, const TBlob& rhs, const TBlob& out) {
if (req == kNullOp) return;
int N = out.shape_.Size();
binary_broadcast_compute<ndim, DType, OP>(N, req == kAddTo, lhs.dptr<DType>(), rhs.dptr<DType>(),
out.dptr<DType>(), lhs.shape_.get<ndim>(), rhs.shape_.get<ndim>(),
out.shape_.get<ndim>());
}
template<typename Reducer, int ndim, typename DType, typename OP>
void seq_reduce_compute(const int N, const int M, const bool addto,
const DType *big, DType *small, const Shape<ndim> bshape,
const Shape<ndim> sshape, const Shape<ndim> rshape,
const Shape<ndim> rstride) {
#pragma omp parallel for num_threads(engine::OpenMP::Get()->GetRecommendedOMPThreadCount())
for (int idx = 0; idx < N; ++idx) {
seq_reduce_assign<Reducer, ndim, DType, OP>(idx, M, addto, big, small, bshape, sshape, rshape,
rstride);
}
}
template <typename Reducer, int ndim, typename DType, typename OP>
void seq_reduce_compute_extra_mem(const int N, const int M, const bool addto,
const DType* big, DType* small,
const Shape<ndim> bshape,
const Shape<ndim> sshape,
const Shape<ndim> rshape,
const Shape<ndim> rstride,
const index_t* ws_dptr) {
#pragma omp parallel for num_threads(engine::OpenMP::Get()->GetRecommendedOMPThreadCount())
for (int idx = 0; idx < N; ++idx) {
Shape<ndim> coord = unravel(idx, sshape);
int j = ravel(coord, bshape);
DType val, residual;
Reducer::SetInitValue(val, residual);
for (int k = 0; k < M; ++k) {
Reducer::Reduce(val, OP::Map(big[j + ws_dptr[k]]), residual);
}
assign(&small[idx], addto, val);
}
}
template <typename Reducer, int ndim, typename DType, typename OP>
void Reduce(Stream<cpu>* s, const TBlob& small, const OpReqType req,
const Tensor<cpu, 1, char>& workspace, const TBlob& big) {
if (req == kNullOp) return;
Shape<ndim> rshape, rstride;
diff(small.shape_.get<ndim>(), big.shape_.get<ndim>(), &rshape, &rstride);
int N = small.shape_.Size(), M = rshape.Size();
seq_reduce_compute<Reducer, ndim, DType, OP>(
N, M, req == kAddTo, big.dptr<DType>(), small.dptr<DType>(),
big.shape_.get<ndim>(), small.shape_.get<ndim>(), rshape, rstride);
}
template <typename Reducer, int ndim, typename DType, typename OP>
void ReduceWithExtraMem(Stream<cpu>* s, const TBlob& small, const OpReqType req,
const Tensor<cpu, 1, char>& workspace, const TBlob& big) {
using namespace mxnet_op;
if (req == kNullOp) return;
Shape<ndim> rshape, rstride;
diff(small.shape_.get<ndim>(), big.shape_.get<ndim>(), &rshape, &rstride);
index_t* ws_dptr = reinterpret_cast<index_t*>(workspace.dptr_);
int N = small.shape_.Size(), M = rshape.Size();
#pragma omp parallel for num_threads(engine::OpenMP::Get()->GetRecommendedOMPThreadCount())
for (int k = 0; k < M; k++) {
Shape<ndim> coord = unravel(k, rshape);
ws_dptr[k] = dot(coord, rstride);
}
seq_reduce_compute_extra_mem<Reducer, ndim, DType, OP>(
N, M, req == kAddTo, big.dptr<DType>(), small.dptr<DType>(), big.shape_.get<ndim>(),
small.shape_.get<ndim>(), rshape, rstride, ws_dptr);
}
template<int ndim, typename DType>
size_t ReduceWorkspaceSize(Stream<cpu> *s, const TShape& small, const OpReqType req,
const TShape& big) {
return 0;
}
template<int ndim, typename DType>
size_t ReduceWorkspaceSize(Stream<cpu> *s, const TShape& small, const OpReqType req,
const TShape& big, const TShape& lhs, const TShape& rhs) {
return 0;
}
template<typename Reducer, int ndim, typename DType, typename OP1, typename OP2>
MSHADOW_XINLINE void seq_reduce_assign(const int idx, const int M, const bool addto,
const DType* __restrict big, const DType* __restrict lhs,
const DType* __restrict rhs, DType *small,
const Shape<ndim>& big_shape, const Shape<ndim>& lhs_shape0,
const Shape<ndim>& rhs_shape0,
const Shape<ndim>& small_shape, const Shape<ndim>& rshape,
const Shape<ndim>& lhs_shape, const Shape<ndim>& rhs_shape,
const Shape<ndim>& rstride, const Shape<ndim>& lhs_stride,
const Shape<ndim>& rhs_stride) {
Shape<ndim> coord = unravel(idx, small_shape);
const int idx_big0 = ravel(coord, big_shape);
const int idx_lhs0 = ravel(coord, lhs_shape0);
const int idx_rhs0 = ravel(coord, rhs_shape0);
DType val, residual;
Reducer::SetInitValue(val, residual);
for (int k = 0; k < M; ++k) {
Shape<ndim> coord_big = unravel(k, rshape);
int idx_big = idx_big0 + dot(coord_big, rstride);
Shape<ndim> coord_lhs = unravel(k, lhs_shape);
int idx_lhs = idx_lhs0 + dot(coord_lhs, lhs_stride);
Shape<ndim> coord_rhs = unravel(k, rhs_shape);
int idx_rhs = idx_rhs0 + dot(coord_rhs, rhs_stride);
Reducer::Reduce(val, OP1::Map(big[idx_big], OP2::Map(lhs[idx_lhs], rhs[idx_rhs])), residual);
}
Reducer::Finalize(val, residual);
assign(&small[idx], addto, val);
}
template<typename Reducer, int ndim, typename DType, typename OP1, typename OP2>
void seq_reduce_compute(const int N, const int M, const bool addto,
const DType *big, const DType *lhs, const DType *rhs, DType *small,
const Shape<ndim> big_shape, const Shape<ndim> small_shape,
const Shape<ndim> rshape, const Shape<ndim> rstride,
const Shape<ndim> lhs_shape, const Shape<ndim> lhs_stride,
const Shape<ndim> rhs_shape, const Shape<ndim> rhs_stride,
const Shape<ndim>& lhs_shape0, const Shape<ndim>& rhs_shape0) {
#pragma omp parallel for num_threads(engine::OpenMP::Get()->GetRecommendedOMPThreadCount())
for (int idx = 0; idx < N; ++idx) {
seq_reduce_assign<Reducer, ndim, DType, OP1, OP2>(idx, M, addto, big, lhs, rhs, small,
big_shape, lhs_shape0, rhs_shape0, small_shape, rshape, lhs_shape, rhs_shape, rstride,
lhs_stride, rhs_stride);
}
}
template<typename Reducer, int ndim, typename DType, typename OP1, typename OP2>
void Reduce(Stream<cpu> *s, const TBlob& small, const OpReqType req,
const Tensor<cpu, 1, char>& workspace, const TBlob& big, const TBlob& lhs,
const TBlob& rhs) {
if (req == kNullOp) return;
Shape<ndim> rshape, rstride;
diff(small.shape_.get<ndim>(), big.shape_.get<ndim>(), &rshape, &rstride);
int N = small.shape_.Size();
int M = rshape.Size();
Shape<ndim> lhs_shape, lhs_stride;
diff(small.shape_.get<ndim>(), lhs.shape_.get<ndim>(), &lhs_shape, &lhs_stride);
Shape<ndim> rhs_shape, rhs_stride;
diff(small.shape_.get<ndim>(), rhs.shape_.get<ndim>(), &rhs_shape, &rhs_stride);
seq_reduce_compute<Reducer, ndim, DType, OP1, OP2>(
N, M, req == kAddTo,
big.dptr<DType>(), lhs.dptr<DType>(), rhs.dptr<DType>(), small.dptr<DType>(),
big.shape_.get<ndim>(), small.shape_.get<ndim>(),
rshape, rstride,
lhs_shape, lhs_stride,
rhs_shape, rhs_stride,
lhs.shape_.get<ndim>(), rhs.shape_.get<ndim>());
}
#endif
} // namespace broadcast
} // namespace op
} // namespace mxnet
#endif // MXNET_OPERATOR_TENSOR_BROADCAST_REDUCE_INL_H_
|
ASTMatchers.h | //===- ASTMatchers.h - Structural query framework ---------------*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
// This file implements matchers to be used together with the MatchFinder to
// match AST nodes.
//
// Matchers are created by generator functions, which can be combined in
// a functional in-language DSL to express queries over the C++ AST.
//
// For example, to match a class with a certain name, one would call:
// cxxRecordDecl(hasName("MyClass"))
// which returns a matcher that can be used to find all AST nodes that declare
// a class named 'MyClass'.
//
// For more complicated match expressions we're often interested in accessing
// multiple parts of the matched AST nodes once a match is found. In that case,
// call `.bind("name")` on match expressions that match the nodes you want to
// access.
//
// For example, when we're interested in child classes of a certain class, we
// would write:
// cxxRecordDecl(hasName("MyClass"), has(recordDecl().bind("child")))
// When the match is found via the MatchFinder, a user provided callback will
// be called with a BoundNodes instance that contains a mapping from the
// strings that we provided for the `.bind()` calls to the nodes that were
// matched.
// In the given example, each time our matcher finds a match we get a callback
// where "child" is bound to the RecordDecl node of the matching child
// class declaration.
//
// See ASTMatchersInternal.h for a more in-depth explanation of the
// implementation details of the matcher framework.
//
// See ASTMatchFinder.h for how to use the generated matchers to run over
// an AST.
//
//===----------------------------------------------------------------------===//
#ifndef LLVM_CLANG_ASTMATCHERS_ASTMATCHERS_H
#define LLVM_CLANG_ASTMATCHERS_ASTMATCHERS_H
#include "clang/AST/ASTContext.h"
#include "clang/AST/ASTTypeTraits.h"
#include "clang/AST/Attr.h"
#include "clang/AST/Decl.h"
#include "clang/AST/DeclCXX.h"
#include "clang/AST/DeclFriend.h"
#include "clang/AST/DeclObjC.h"
#include "clang/AST/DeclTemplate.h"
#include "clang/AST/Expr.h"
#include "clang/AST/ExprCXX.h"
#include "clang/AST/ExprObjC.h"
#include "clang/AST/LambdaCapture.h"
#include "clang/AST/NestedNameSpecifier.h"
#include "clang/AST/OpenMPClause.h"
#include "clang/AST/OperationKinds.h"
#include "clang/AST/ParentMapContext.h"
#include "clang/AST/Stmt.h"
#include "clang/AST/StmtCXX.h"
#include "clang/AST/StmtObjC.h"
#include "clang/AST/StmtOpenMP.h"
#include "clang/AST/TemplateBase.h"
#include "clang/AST/TemplateName.h"
#include "clang/AST/Type.h"
#include "clang/AST/TypeLoc.h"
#include "clang/ASTMatchers/ASTMatchersInternal.h"
#include "clang/ASTMatchers/ASTMatchersMacros.h"
#include "clang/Basic/AttrKinds.h"
#include "clang/Basic/ExceptionSpecificationType.h"
#include "clang/Basic/FileManager.h"
#include "clang/Basic/IdentifierTable.h"
#include "clang/Basic/LLVM.h"
#include "clang/Basic/SourceManager.h"
#include "clang/Basic/Specifiers.h"
#include "clang/Basic/TypeTraits.h"
#include "llvm/ADT/ArrayRef.h"
#include "llvm/ADT/SmallVector.h"
#include "llvm/ADT/StringRef.h"
#include "llvm/Support/Casting.h"
#include "llvm/Support/Compiler.h"
#include "llvm/Support/ErrorHandling.h"
#include "llvm/Support/Regex.h"
#include <cassert>
#include <cstddef>
#include <iterator>
#include <limits>
#include <string>
#include <utility>
#include <vector>
namespace clang {
namespace ast_matchers {
/// Maps string IDs to AST nodes matched by parts of a matcher.
///
/// The bound nodes are generated by calling \c bind("id") on the node matchers
/// of the nodes we want to access later.
///
/// The instances of BoundNodes are created by \c MatchFinder when the user's
/// callbacks are executed every time a match is found.
class BoundNodes {
public:
/// Returns the AST node bound to \c ID.
///
/// Returns NULL if there was no node bound to \c ID or if there is a node but
/// it cannot be converted to the specified type.
template <typename T>
const T *getNodeAs(StringRef ID) const {
return MyBoundNodes.getNodeAs<T>(ID);
}
/// Type of mapping from binding identifiers to bound nodes. This type
/// is an associative container with a key type of \c std::string and a value
/// type of \c clang::DynTypedNode
using IDToNodeMap = internal::BoundNodesMap::IDToNodeMap;
/// Retrieve mapping from binding identifiers to bound nodes.
const IDToNodeMap &getMap() const {
return MyBoundNodes.getMap();
}
private:
friend class internal::BoundNodesTreeBuilder;
/// Create BoundNodes from a pre-filled map of bindings.
BoundNodes(internal::BoundNodesMap &MyBoundNodes)
: MyBoundNodes(MyBoundNodes) {}
internal::BoundNodesMap MyBoundNodes;
};
/// Types of matchers for the top-level classes in the AST class
/// hierarchy.
/// @{
using DeclarationMatcher = internal::Matcher<Decl>;
using StatementMatcher = internal::Matcher<Stmt>;
using TypeMatcher = internal::Matcher<QualType>;
using TypeLocMatcher = internal::Matcher<TypeLoc>;
using NestedNameSpecifierMatcher = internal::Matcher<NestedNameSpecifier>;
using NestedNameSpecifierLocMatcher = internal::Matcher<NestedNameSpecifierLoc>;
using CXXCtorInitializerMatcher = internal::Matcher<CXXCtorInitializer>;
/// @}
/// Matches any node.
///
/// Useful when another matcher requires a child matcher, but there's no
/// additional constraint. This will often be used with an explicit conversion
/// to an \c internal::Matcher<> type such as \c TypeMatcher.
///
/// Example: \c DeclarationMatcher(anything()) matches all declarations, e.g.,
/// \code
/// "int* p" and "void f()" in
/// int* p;
/// void f();
/// \endcode
///
/// Usable as: Any Matcher
inline internal::TrueMatcher anything() { return internal::TrueMatcher(); }
/// Matches the top declaration context.
///
/// Given
/// \code
/// int X;
/// namespace NS {
/// int Y;
/// } // namespace NS
/// \endcode
/// decl(hasDeclContext(translationUnitDecl()))
/// matches "int X", but not "int Y".
extern const internal::VariadicDynCastAllOfMatcher<Decl, TranslationUnitDecl>
translationUnitDecl;
/// Matches typedef declarations.
///
/// Given
/// \code
/// typedef int X;
/// using Y = int;
/// \endcode
/// typedefDecl()
/// matches "typedef int X", but not "using Y = int"
extern const internal::VariadicDynCastAllOfMatcher<Decl, TypedefDecl>
typedefDecl;
/// Matches typedef name declarations.
///
/// Given
/// \code
/// typedef int X;
/// using Y = int;
/// \endcode
/// typedefNameDecl()
/// matches "typedef int X" and "using Y = int"
extern const internal::VariadicDynCastAllOfMatcher<Decl, TypedefNameDecl>
typedefNameDecl;
/// Matches type alias declarations.
///
/// Given
/// \code
/// typedef int X;
/// using Y = int;
/// \endcode
/// typeAliasDecl()
/// matches "using Y = int", but not "typedef int X"
extern const internal::VariadicDynCastAllOfMatcher<Decl, TypeAliasDecl>
typeAliasDecl;
/// Matches type alias template declarations.
///
/// typeAliasTemplateDecl() matches
/// \code
/// template <typename T>
/// using Y = X<T>;
/// \endcode
extern const internal::VariadicDynCastAllOfMatcher<Decl, TypeAliasTemplateDecl>
typeAliasTemplateDecl;
/// Matches AST nodes that were expanded within the main-file.
///
/// Example matches X but not Y
/// (matcher = cxxRecordDecl(isExpansionInMainFile())
/// \code
/// #include <Y.h>
/// class X {};
/// \endcode
/// Y.h:
/// \code
/// class Y {};
/// \endcode
///
/// Usable as: Matcher<Decl>, Matcher<Stmt>, Matcher<TypeLoc>
AST_POLYMORPHIC_MATCHER(isExpansionInMainFile,
AST_POLYMORPHIC_SUPPORTED_TYPES(Decl, Stmt, TypeLoc)) {
auto &SourceManager = Finder->getASTContext().getSourceManager();
return SourceManager.isInMainFile(
SourceManager.getExpansionLoc(Node.getBeginLoc()));
}
/// Matches AST nodes that were expanded within system-header-files.
///
/// Example matches Y but not X
/// (matcher = cxxRecordDecl(isExpansionInSystemHeader())
/// \code
/// #include <SystemHeader.h>
/// class X {};
/// \endcode
/// SystemHeader.h:
/// \code
/// class Y {};
/// \endcode
///
/// Usable as: Matcher<Decl>, Matcher<Stmt>, Matcher<TypeLoc>
AST_POLYMORPHIC_MATCHER(isExpansionInSystemHeader,
AST_POLYMORPHIC_SUPPORTED_TYPES(Decl, Stmt, TypeLoc)) {
auto &SourceManager = Finder->getASTContext().getSourceManager();
auto ExpansionLoc = SourceManager.getExpansionLoc(Node.getBeginLoc());
if (ExpansionLoc.isInvalid()) {
return false;
}
return SourceManager.isInSystemHeader(ExpansionLoc);
}
/// Matches AST nodes that were expanded within files whose name is
/// partially matching a given regex.
///
/// Example matches Y but not X
/// (matcher = cxxRecordDecl(isExpansionInFileMatching("AST.*"))
/// \code
/// #include "ASTMatcher.h"
/// class X {};
/// \endcode
/// ASTMatcher.h:
/// \code
/// class Y {};
/// \endcode
///
/// Usable as: Matcher<Decl>, Matcher<Stmt>, Matcher<TypeLoc>
AST_POLYMORPHIC_MATCHER_P(isExpansionInFileMatching,
AST_POLYMORPHIC_SUPPORTED_TYPES(Decl, Stmt, TypeLoc),
std::string, RegExp) {
auto &SourceManager = Finder->getASTContext().getSourceManager();
auto ExpansionLoc = SourceManager.getExpansionLoc(Node.getBeginLoc());
if (ExpansionLoc.isInvalid()) {
return false;
}
auto FileEntry =
SourceManager.getFileEntryForID(SourceManager.getFileID(ExpansionLoc));
if (!FileEntry) {
return false;
}
auto Filename = FileEntry->getName();
llvm::Regex RE(RegExp);
return RE.match(Filename);
}
/// Matches statements that are (transitively) expanded from the named macro.
/// Does not match if only part of the statement is expanded from that macro or
/// if different parts of the the statement are expanded from different
/// appearances of the macro.
///
/// FIXME: Change to be a polymorphic matcher that works on any syntactic
/// node. There's nothing `Stmt`-specific about it.
AST_MATCHER_P(Stmt, isExpandedFromMacro, llvm::StringRef, MacroName) {
// Verifies that the statement' beginning and ending are both expanded from
// the same instance of the given macro.
auto& Context = Finder->getASTContext();
llvm::Optional<SourceLocation> B =
internal::getExpansionLocOfMacro(MacroName, Node.getBeginLoc(), Context);
if (!B) return false;
llvm::Optional<SourceLocation> E =
internal::getExpansionLocOfMacro(MacroName, Node.getEndLoc(), Context);
if (!E) return false;
return *B == *E;
}
/// Matches declarations.
///
/// Examples matches \c X, \c C, and the friend declaration inside \c C;
/// \code
/// void X();
/// class C {
/// friend X;
/// };
/// \endcode
extern const internal::VariadicAllOfMatcher<Decl> decl;
/// Matches a declaration of a linkage specification.
///
/// Given
/// \code
/// extern "C" {}
/// \endcode
/// linkageSpecDecl()
/// matches "extern "C" {}"
extern const internal::VariadicDynCastAllOfMatcher<Decl, LinkageSpecDecl>
linkageSpecDecl;
/// Matches a declaration of anything that could have a name.
///
/// Example matches \c X, \c S, the anonymous union type, \c i, and \c U;
/// \code
/// typedef int X;
/// struct S {
/// union {
/// int i;
/// } U;
/// };
/// \endcode
extern const internal::VariadicDynCastAllOfMatcher<Decl, NamedDecl> namedDecl;
/// Matches a declaration of label.
///
/// Given
/// \code
/// goto FOO;
/// FOO: bar();
/// \endcode
/// labelDecl()
/// matches 'FOO:'
extern const internal::VariadicDynCastAllOfMatcher<Decl, LabelDecl> labelDecl;
/// Matches a declaration of a namespace.
///
/// Given
/// \code
/// namespace {}
/// namespace test {}
/// \endcode
/// namespaceDecl()
/// matches "namespace {}" and "namespace test {}"
extern const internal::VariadicDynCastAllOfMatcher<Decl, NamespaceDecl>
namespaceDecl;
/// Matches a declaration of a namespace alias.
///
/// Given
/// \code
/// namespace test {}
/// namespace alias = ::test;
/// \endcode
/// namespaceAliasDecl()
/// matches "namespace alias" but not "namespace test"
extern const internal::VariadicDynCastAllOfMatcher<Decl, NamespaceAliasDecl>
namespaceAliasDecl;
/// Matches class, struct, and union declarations.
///
/// Example matches \c X, \c Z, \c U, and \c S
/// \code
/// class X;
/// template<class T> class Z {};
/// struct S {};
/// union U {};
/// \endcode
extern const internal::VariadicDynCastAllOfMatcher<Decl, RecordDecl> recordDecl;
/// Matches C++ class declarations.
///
/// Example matches \c X, \c Z
/// \code
/// class X;
/// template<class T> class Z {};
/// \endcode
extern const internal::VariadicDynCastAllOfMatcher<Decl, CXXRecordDecl>
cxxRecordDecl;
/// Matches C++ class template declarations.
///
/// Example matches \c Z
/// \code
/// template<class T> class Z {};
/// \endcode
extern const internal::VariadicDynCastAllOfMatcher<Decl, ClassTemplateDecl>
classTemplateDecl;
/// Matches C++ class template specializations.
///
/// Given
/// \code
/// template<typename T> class A {};
/// template<> class A<double> {};
/// A<int> a;
/// \endcode
/// classTemplateSpecializationDecl()
/// matches the specializations \c A<int> and \c A<double>
extern const internal::VariadicDynCastAllOfMatcher<
Decl, ClassTemplateSpecializationDecl>
classTemplateSpecializationDecl;
/// Matches C++ class template partial specializations.
///
/// Given
/// \code
/// template<class T1, class T2, int I>
/// class A {};
///
/// template<class T, int I>
/// class A<T, T*, I> {};
///
/// template<>
/// class A<int, int, 1> {};
/// \endcode
/// classTemplatePartialSpecializationDecl()
/// matches the specialization \c A<T,T*,I> but not \c A<int,int,1>
extern const internal::VariadicDynCastAllOfMatcher<
Decl, ClassTemplatePartialSpecializationDecl>
classTemplatePartialSpecializationDecl;
/// Matches declarator declarations (field, variable, function
/// and non-type template parameter declarations).
///
/// Given
/// \code
/// class X { int y; };
/// \endcode
/// declaratorDecl()
/// matches \c int y.
extern const internal::VariadicDynCastAllOfMatcher<Decl, DeclaratorDecl>
declaratorDecl;
/// Matches parameter variable declarations.
///
/// Given
/// \code
/// void f(int x);
/// \endcode
/// parmVarDecl()
/// matches \c int x.
extern const internal::VariadicDynCastAllOfMatcher<Decl, ParmVarDecl>
parmVarDecl;
/// Matches C++ access specifier declarations.
///
/// Given
/// \code
/// class C {
/// public:
/// int a;
/// };
/// \endcode
/// accessSpecDecl()
/// matches 'public:'
extern const internal::VariadicDynCastAllOfMatcher<Decl, AccessSpecDecl>
accessSpecDecl;
/// Matches constructor initializers.
///
/// Examples matches \c i(42).
/// \code
/// class C {
/// C() : i(42) {}
/// int i;
/// };
/// \endcode
extern const internal::VariadicAllOfMatcher<CXXCtorInitializer>
cxxCtorInitializer;
/// Matches template arguments.
///
/// Given
/// \code
/// template <typename T> struct C {};
/// C<int> c;
/// \endcode
/// templateArgument()
/// matches 'int' in C<int>.
extern const internal::VariadicAllOfMatcher<TemplateArgument> templateArgument;
/// Matches template name.
///
/// Given
/// \code
/// template <typename T> class X { };
/// X<int> xi;
/// \endcode
/// templateName()
/// matches 'X' in X<int>.
extern const internal::VariadicAllOfMatcher<TemplateName> templateName;
/// Matches non-type template parameter declarations.
///
/// Given
/// \code
/// template <typename T, int N> struct C {};
/// \endcode
/// nonTypeTemplateParmDecl()
/// matches 'N', but not 'T'.
extern const internal::VariadicDynCastAllOfMatcher<Decl,
NonTypeTemplateParmDecl>
nonTypeTemplateParmDecl;
/// Matches template type parameter declarations.
///
/// Given
/// \code
/// template <typename T, int N> struct C {};
/// \endcode
/// templateTypeParmDecl()
/// matches 'T', but not 'N'.
extern const internal::VariadicDynCastAllOfMatcher<Decl, TemplateTypeParmDecl>
templateTypeParmDecl;
/// Matches public C++ declarations.
///
/// Given
/// \code
/// class C {
/// public: int a;
/// protected: int b;
/// private: int c;
/// };
/// \endcode
/// fieldDecl(isPublic())
/// matches 'int a;'
AST_MATCHER(Decl, isPublic) {
return Node.getAccess() == AS_public;
}
/// Matches protected C++ declarations.
///
/// Given
/// \code
/// class C {
/// public: int a;
/// protected: int b;
/// private: int c;
/// };
/// \endcode
/// fieldDecl(isProtected())
/// matches 'int b;'
AST_MATCHER(Decl, isProtected) {
return Node.getAccess() == AS_protected;
}
/// Matches private C++ declarations.
///
/// Given
/// \code
/// class C {
/// public: int a;
/// protected: int b;
/// private: int c;
/// };
/// \endcode
/// fieldDecl(isPrivate())
/// matches 'int c;'
AST_MATCHER(Decl, isPrivate) {
return Node.getAccess() == AS_private;
}
/// Matches non-static data members that are bit-fields.
///
/// Given
/// \code
/// class C {
/// int a : 2;
/// int b;
/// };
/// \endcode
/// fieldDecl(isBitField())
/// matches 'int a;' but not 'int b;'.
AST_MATCHER(FieldDecl, isBitField) {
return Node.isBitField();
}
/// Matches non-static data members that are bit-fields of the specified
/// bit width.
///
/// Given
/// \code
/// class C {
/// int a : 2;
/// int b : 4;
/// int c : 2;
/// };
/// \endcode
/// fieldDecl(hasBitWidth(2))
/// matches 'int a;' and 'int c;' but not 'int b;'.
AST_MATCHER_P(FieldDecl, hasBitWidth, unsigned, Width) {
return Node.isBitField() &&
Node.getBitWidthValue(Finder->getASTContext()) == Width;
}
/// Matches non-static data members that have an in-class initializer.
///
/// Given
/// \code
/// class C {
/// int a = 2;
/// int b = 3;
/// int c;
/// };
/// \endcode
/// fieldDecl(hasInClassInitializer(integerLiteral(equals(2))))
/// matches 'int a;' but not 'int b;'.
/// fieldDecl(hasInClassInitializer(anything()))
/// matches 'int a;' and 'int b;' but not 'int c;'.
AST_MATCHER_P(FieldDecl, hasInClassInitializer, internal::Matcher<Expr>,
InnerMatcher) {
const Expr *Initializer = Node.getInClassInitializer();
return (Initializer != nullptr &&
InnerMatcher.matches(*Initializer, Finder, Builder));
}
/// Determines whether the function is "main", which is the entry point
/// into an executable program.
AST_MATCHER(FunctionDecl, isMain) {
return Node.isMain();
}
/// Matches the specialized template of a specialization declaration.
///
/// Given
/// \code
/// template<typename T> class A {}; #1
/// template<> class A<int> {}; #2
/// \endcode
/// classTemplateSpecializationDecl(hasSpecializedTemplate(classTemplateDecl()))
/// matches '#2' with classTemplateDecl() matching the class template
/// declaration of 'A' at #1.
AST_MATCHER_P(ClassTemplateSpecializationDecl, hasSpecializedTemplate,
internal::Matcher<ClassTemplateDecl>, InnerMatcher) {
const ClassTemplateDecl* Decl = Node.getSpecializedTemplate();
return (Decl != nullptr &&
InnerMatcher.matches(*Decl, Finder, Builder));
}
/// Matches a declaration that has been implicitly added
/// by the compiler (eg. implicit default/copy constructors).
AST_MATCHER(Decl, isImplicit) {
return Node.isImplicit();
}
/// Matches classTemplateSpecializations, templateSpecializationType and
/// functionDecl that have at least one TemplateArgument matching the given
/// InnerMatcher.
///
/// Given
/// \code
/// template<typename T> class A {};
/// template<> class A<double> {};
/// A<int> a;
///
/// template<typename T> f() {};
/// void func() { f<int>(); };
/// \endcode
///
/// \endcode
/// classTemplateSpecializationDecl(hasAnyTemplateArgument(
/// refersToType(asString("int"))))
/// matches the specialization \c A<int>
///
/// functionDecl(hasAnyTemplateArgument(refersToType(asString("int"))))
/// matches the specialization \c f<int>
AST_POLYMORPHIC_MATCHER_P(
hasAnyTemplateArgument,
AST_POLYMORPHIC_SUPPORTED_TYPES(ClassTemplateSpecializationDecl,
TemplateSpecializationType,
FunctionDecl),
internal::Matcher<TemplateArgument>, InnerMatcher) {
ArrayRef<TemplateArgument> List =
internal::getTemplateSpecializationArgs(Node);
return matchesFirstInRange(InnerMatcher, List.begin(), List.end(), Finder,
Builder);
}
/// Causes all nested matchers to be matched with the specified traversal kind.
///
/// Given
/// \code
/// void foo()
/// {
/// int i = 3.0;
/// }
/// \endcode
/// The matcher
/// \code
/// traverse(TK_IgnoreImplicitCastsAndParentheses,
/// varDecl(hasInitializer(floatLiteral().bind("init")))
/// )
/// \endcode
/// matches the variable declaration with "init" bound to the "3.0".
template <typename T>
internal::Matcher<T> traverse(TraversalKind TK,
const internal::Matcher<T> &InnerMatcher) {
return internal::DynTypedMatcher::constructRestrictedWrapper(
new internal::TraversalMatcher<T>(TK, InnerMatcher),
InnerMatcher.getID().first)
.template unconditionalConvertTo<T>();
}
template <typename T>
internal::BindableMatcher<T>
traverse(TraversalKind TK, const internal::BindableMatcher<T> &InnerMatcher) {
return internal::BindableMatcher<T>(
internal::DynTypedMatcher::constructRestrictedWrapper(
new internal::TraversalMatcher<T>(TK, InnerMatcher),
InnerMatcher.getID().first)
.template unconditionalConvertTo<T>());
}
template <typename... T>
internal::TraversalWrapper<internal::VariadicOperatorMatcher<T...>>
traverse(TraversalKind TK,
const internal::VariadicOperatorMatcher<T...> &InnerMatcher) {
return internal::TraversalWrapper<internal::VariadicOperatorMatcher<T...>>(
TK, InnerMatcher);
}
template <template <typename ToArg, typename FromArg> class ArgumentAdapterT,
typename T, typename ToTypes>
internal::TraversalWrapper<
internal::ArgumentAdaptingMatcherFuncAdaptor<ArgumentAdapterT, T, ToTypes>>
traverse(TraversalKind TK, const internal::ArgumentAdaptingMatcherFuncAdaptor<
ArgumentAdapterT, T, ToTypes> &InnerMatcher) {
return internal::TraversalWrapper<
internal::ArgumentAdaptingMatcherFuncAdaptor<ArgumentAdapterT, T,
ToTypes>>(TK, InnerMatcher);
}
template <template <typename T, typename P1> class MatcherT, typename P1,
typename ReturnTypesF>
internal::TraversalWrapper<
internal::PolymorphicMatcherWithParam1<MatcherT, P1, ReturnTypesF>>
traverse(TraversalKind TK, const internal::PolymorphicMatcherWithParam1<
MatcherT, P1, ReturnTypesF> &InnerMatcher) {
return internal::TraversalWrapper<
internal::PolymorphicMatcherWithParam1<MatcherT, P1, ReturnTypesF>>(
TK, InnerMatcher);
}
template <template <typename T, typename P1, typename P2> class MatcherT,
typename P1, typename P2, typename ReturnTypesF>
internal::TraversalWrapper<
internal::PolymorphicMatcherWithParam2<MatcherT, P1, P2, ReturnTypesF>>
traverse(TraversalKind TK, const internal::PolymorphicMatcherWithParam2<
MatcherT, P1, P2, ReturnTypesF> &InnerMatcher) {
return internal::TraversalWrapper<
internal::PolymorphicMatcherWithParam2<MatcherT, P1, P2, ReturnTypesF>>(
TK, InnerMatcher);
}
/// Matches expressions that match InnerMatcher after any implicit AST
/// nodes are stripped off.
///
/// Parentheses and explicit casts are not discarded.
/// Given
/// \code
/// class C {};
/// C a = C();
/// C b;
/// C c = b;
/// \endcode
/// The matchers
/// \code
/// varDecl(hasInitializer(ignoringImplicit(cxxConstructExpr())))
/// \endcode
/// would match the declarations for a, b, and c.
/// While
/// \code
/// varDecl(hasInitializer(cxxConstructExpr()))
/// \endcode
/// only match the declarations for b and c.
AST_MATCHER_P(Expr, ignoringImplicit, internal::Matcher<Expr>,
InnerMatcher) {
return InnerMatcher.matches(*Node.IgnoreImplicit(), Finder, Builder);
}
/// Matches expressions that match InnerMatcher after any implicit casts
/// are stripped off.
///
/// Parentheses and explicit casts are not discarded.
/// Given
/// \code
/// int arr[5];
/// int a = 0;
/// char b = 0;
/// const int c = a;
/// int *d = arr;
/// long e = (long) 0l;
/// \endcode
/// The matchers
/// \code
/// varDecl(hasInitializer(ignoringImpCasts(integerLiteral())))
/// varDecl(hasInitializer(ignoringImpCasts(declRefExpr())))
/// \endcode
/// would match the declarations for a, b, c, and d, but not e.
/// While
/// \code
/// varDecl(hasInitializer(integerLiteral()))
/// varDecl(hasInitializer(declRefExpr()))
/// \endcode
/// only match the declarations for b, c, and d.
AST_MATCHER_P(Expr, ignoringImpCasts,
internal::Matcher<Expr>, InnerMatcher) {
return InnerMatcher.matches(*Node.IgnoreImpCasts(), Finder, Builder);
}
/// Matches expressions that match InnerMatcher after parentheses and
/// casts are stripped off.
///
/// Implicit and non-C Style casts are also discarded.
/// Given
/// \code
/// int a = 0;
/// char b = (0);
/// void* c = reinterpret_cast<char*>(0);
/// char d = char(0);
/// \endcode
/// The matcher
/// varDecl(hasInitializer(ignoringParenCasts(integerLiteral())))
/// would match the declarations for a, b, c, and d.
/// while
/// varDecl(hasInitializer(integerLiteral()))
/// only match the declaration for a.
AST_MATCHER_P(Expr, ignoringParenCasts, internal::Matcher<Expr>, InnerMatcher) {
return InnerMatcher.matches(*Node.IgnoreParenCasts(), Finder, Builder);
}
/// Matches expressions that match InnerMatcher after implicit casts and
/// parentheses are stripped off.
///
/// Explicit casts are not discarded.
/// Given
/// \code
/// int arr[5];
/// int a = 0;
/// char b = (0);
/// const int c = a;
/// int *d = (arr);
/// long e = ((long) 0l);
/// \endcode
/// The matchers
/// varDecl(hasInitializer(ignoringParenImpCasts(integerLiteral())))
/// varDecl(hasInitializer(ignoringParenImpCasts(declRefExpr())))
/// would match the declarations for a, b, c, and d, but not e.
/// while
/// varDecl(hasInitializer(integerLiteral()))
/// varDecl(hasInitializer(declRefExpr()))
/// would only match the declaration for a.
AST_MATCHER_P(Expr, ignoringParenImpCasts,
internal::Matcher<Expr>, InnerMatcher) {
return InnerMatcher.matches(*Node.IgnoreParenImpCasts(), Finder, Builder);
}
/// Matches types that match InnerMatcher after any parens are stripped.
///
/// Given
/// \code
/// void (*fp)(void);
/// \endcode
/// The matcher
/// \code
/// varDecl(hasType(pointerType(pointee(ignoringParens(functionType())))))
/// \endcode
/// would match the declaration for fp.
AST_MATCHER_P_OVERLOAD(QualType, ignoringParens, internal::Matcher<QualType>,
InnerMatcher, 0) {
return InnerMatcher.matches(Node.IgnoreParens(), Finder, Builder);
}
/// Overload \c ignoringParens for \c Expr.
///
/// Given
/// \code
/// const char* str = ("my-string");
/// \endcode
/// The matcher
/// \code
/// implicitCastExpr(hasSourceExpression(ignoringParens(stringLiteral())))
/// \endcode
/// would match the implicit cast resulting from the assignment.
AST_MATCHER_P_OVERLOAD(Expr, ignoringParens, internal::Matcher<Expr>,
InnerMatcher, 1) {
const Expr *E = Node.IgnoreParens();
return InnerMatcher.matches(*E, Finder, Builder);
}
/// Matches expressions that are instantiation-dependent even if it is
/// neither type- nor value-dependent.
///
/// In the following example, the expression sizeof(sizeof(T() + T()))
/// is instantiation-dependent (since it involves a template parameter T),
/// but is neither type- nor value-dependent, since the type of the inner
/// sizeof is known (std::size_t) and therefore the size of the outer
/// sizeof is known.
/// \code
/// template<typename T>
/// void f(T x, T y) { sizeof(sizeof(T() + T()); }
/// \endcode
/// expr(isInstantiationDependent()) matches sizeof(sizeof(T() + T())
AST_MATCHER(Expr, isInstantiationDependent) {
return Node.isInstantiationDependent();
}
/// Matches expressions that are type-dependent because the template type
/// is not yet instantiated.
///
/// For example, the expressions "x" and "x + y" are type-dependent in
/// the following code, but "y" is not type-dependent:
/// \code
/// template<typename T>
/// void add(T x, int y) {
/// x + y;
/// }
/// \endcode
/// expr(isTypeDependent()) matches x + y
AST_MATCHER(Expr, isTypeDependent) { return Node.isTypeDependent(); }
/// Matches expression that are value-dependent because they contain a
/// non-type template parameter.
///
/// For example, the array bound of "Chars" in the following example is
/// value-dependent.
/// \code
/// template<int Size> int f() { return Size; }
/// \endcode
/// expr(isValueDependent()) matches return Size
AST_MATCHER(Expr, isValueDependent) { return Node.isValueDependent(); }
/// Matches classTemplateSpecializations, templateSpecializationType and
/// functionDecl where the n'th TemplateArgument matches the given InnerMatcher.
///
/// Given
/// \code
/// template<typename T, typename U> class A {};
/// A<bool, int> b;
/// A<int, bool> c;
///
/// template<typename T> void f() {}
/// void func() { f<int>(); };
/// \endcode
/// classTemplateSpecializationDecl(hasTemplateArgument(
/// 1, refersToType(asString("int"))))
/// matches the specialization \c A<bool, int>
///
/// functionDecl(hasTemplateArgument(0, refersToType(asString("int"))))
/// matches the specialization \c f<int>
AST_POLYMORPHIC_MATCHER_P2(
hasTemplateArgument,
AST_POLYMORPHIC_SUPPORTED_TYPES(ClassTemplateSpecializationDecl,
TemplateSpecializationType,
FunctionDecl),
unsigned, N, internal::Matcher<TemplateArgument>, InnerMatcher) {
ArrayRef<TemplateArgument> List =
internal::getTemplateSpecializationArgs(Node);
if (List.size() <= N)
return false;
return InnerMatcher.matches(List[N], Finder, Builder);
}
/// Matches if the number of template arguments equals \p N.
///
/// Given
/// \code
/// template<typename T> struct C {};
/// C<int> c;
/// \endcode
/// classTemplateSpecializationDecl(templateArgumentCountIs(1))
/// matches C<int>.
AST_POLYMORPHIC_MATCHER_P(
templateArgumentCountIs,
AST_POLYMORPHIC_SUPPORTED_TYPES(ClassTemplateSpecializationDecl,
TemplateSpecializationType),
unsigned, N) {
return internal::getTemplateSpecializationArgs(Node).size() == N;
}
/// Matches a TemplateArgument that refers to a certain type.
///
/// Given
/// \code
/// struct X {};
/// template<typename T> struct A {};
/// A<X> a;
/// \endcode
/// classTemplateSpecializationDecl(hasAnyTemplateArgument(
/// refersToType(class(hasName("X")))))
/// matches the specialization \c A<X>
AST_MATCHER_P(TemplateArgument, refersToType,
internal::Matcher<QualType>, InnerMatcher) {
if (Node.getKind() != TemplateArgument::Type)
return false;
return InnerMatcher.matches(Node.getAsType(), Finder, Builder);
}
/// Matches a TemplateArgument that refers to a certain template.
///
/// Given
/// \code
/// template<template <typename> class S> class X {};
/// template<typename T> class Y {};
/// X<Y> xi;
/// \endcode
/// classTemplateSpecializationDecl(hasAnyTemplateArgument(
/// refersToTemplate(templateName())))
/// matches the specialization \c X<Y>
AST_MATCHER_P(TemplateArgument, refersToTemplate,
internal::Matcher<TemplateName>, InnerMatcher) {
if (Node.getKind() != TemplateArgument::Template)
return false;
return InnerMatcher.matches(Node.getAsTemplate(), Finder, Builder);
}
/// Matches a canonical TemplateArgument that refers to a certain
/// declaration.
///
/// Given
/// \code
/// struct B { int next; };
/// template<int(B::*next_ptr)> struct A {};
/// A<&B::next> a;
/// \endcode
/// classTemplateSpecializationDecl(hasAnyTemplateArgument(
/// refersToDeclaration(fieldDecl(hasName("next")))))
/// matches the specialization \c A<&B::next> with \c fieldDecl(...) matching
/// \c B::next
AST_MATCHER_P(TemplateArgument, refersToDeclaration,
internal::Matcher<Decl>, InnerMatcher) {
if (Node.getKind() == TemplateArgument::Declaration)
return InnerMatcher.matches(*Node.getAsDecl(), Finder, Builder);
return false;
}
/// Matches a sugar TemplateArgument that refers to a certain expression.
///
/// Given
/// \code
/// struct B { int next; };
/// template<int(B::*next_ptr)> struct A {};
/// A<&B::next> a;
/// \endcode
/// templateSpecializationType(hasAnyTemplateArgument(
/// isExpr(hasDescendant(declRefExpr(to(fieldDecl(hasName("next"))))))))
/// matches the specialization \c A<&B::next> with \c fieldDecl(...) matching
/// \c B::next
AST_MATCHER_P(TemplateArgument, isExpr, internal::Matcher<Expr>, InnerMatcher) {
if (Node.getKind() == TemplateArgument::Expression)
return InnerMatcher.matches(*Node.getAsExpr(), Finder, Builder);
return false;
}
/// Matches a TemplateArgument that is an integral value.
///
/// Given
/// \code
/// template<int T> struct C {};
/// C<42> c;
/// \endcode
/// classTemplateSpecializationDecl(
/// hasAnyTemplateArgument(isIntegral()))
/// matches the implicit instantiation of C in C<42>
/// with isIntegral() matching 42.
AST_MATCHER(TemplateArgument, isIntegral) {
return Node.getKind() == TemplateArgument::Integral;
}
/// Matches a TemplateArgument that referes to an integral type.
///
/// Given
/// \code
/// template<int T> struct C {};
/// C<42> c;
/// \endcode
/// classTemplateSpecializationDecl(
/// hasAnyTemplateArgument(refersToIntegralType(asString("int"))))
/// matches the implicit instantiation of C in C<42>.
AST_MATCHER_P(TemplateArgument, refersToIntegralType,
internal::Matcher<QualType>, InnerMatcher) {
if (Node.getKind() != TemplateArgument::Integral)
return false;
return InnerMatcher.matches(Node.getIntegralType(), Finder, Builder);
}
/// Matches a TemplateArgument of integral type with a given value.
///
/// Note that 'Value' is a string as the template argument's value is
/// an arbitrary precision integer. 'Value' must be euqal to the canonical
/// representation of that integral value in base 10.
///
/// Given
/// \code
/// template<int T> struct C {};
/// C<42> c;
/// \endcode
/// classTemplateSpecializationDecl(
/// hasAnyTemplateArgument(equalsIntegralValue("42")))
/// matches the implicit instantiation of C in C<42>.
AST_MATCHER_P(TemplateArgument, equalsIntegralValue,
std::string, Value) {
if (Node.getKind() != TemplateArgument::Integral)
return false;
return Node.getAsIntegral().toString(10) == Value;
}
/// Matches an Objective-C autorelease pool statement.
///
/// Given
/// \code
/// @autoreleasepool {
/// int x = 0;
/// }
/// \endcode
/// autoreleasePoolStmt(stmt()) matches the declaration of "x"
/// inside the autorelease pool.
extern const internal::VariadicDynCastAllOfMatcher<Stmt,
ObjCAutoreleasePoolStmt> autoreleasePoolStmt;
/// Matches any value declaration.
///
/// Example matches A, B, C and F
/// \code
/// enum X { A, B, C };
/// void F();
/// \endcode
extern const internal::VariadicDynCastAllOfMatcher<Decl, ValueDecl> valueDecl;
/// Matches C++ constructor declarations.
///
/// Example matches Foo::Foo() and Foo::Foo(int)
/// \code
/// class Foo {
/// public:
/// Foo();
/// Foo(int);
/// int DoSomething();
/// };
/// \endcode
extern const internal::VariadicDynCastAllOfMatcher<Decl, CXXConstructorDecl>
cxxConstructorDecl;
/// Matches explicit C++ destructor declarations.
///
/// Example matches Foo::~Foo()
/// \code
/// class Foo {
/// public:
/// virtual ~Foo();
/// };
/// \endcode
extern const internal::VariadicDynCastAllOfMatcher<Decl, CXXDestructorDecl>
cxxDestructorDecl;
/// Matches enum declarations.
///
/// Example matches X
/// \code
/// enum X {
/// A, B, C
/// };
/// \endcode
extern const internal::VariadicDynCastAllOfMatcher<Decl, EnumDecl> enumDecl;
/// Matches enum constants.
///
/// Example matches A, B, C
/// \code
/// enum X {
/// A, B, C
/// };
/// \endcode
extern const internal::VariadicDynCastAllOfMatcher<Decl, EnumConstantDecl>
enumConstantDecl;
/// Matches tag declarations.
///
/// Example matches X, Z, U, S, E
/// \code
/// class X;
/// template<class T> class Z {};
/// struct S {};
/// union U {};
/// enum E {
/// A, B, C
/// };
/// \endcode
extern const internal::VariadicDynCastAllOfMatcher<Decl, TagDecl> tagDecl;
/// Matches method declarations.
///
/// Example matches y
/// \code
/// class X { void y(); };
/// \endcode
extern const internal::VariadicDynCastAllOfMatcher<Decl, CXXMethodDecl>
cxxMethodDecl;
/// Matches conversion operator declarations.
///
/// Example matches the operator.
/// \code
/// class X { operator int() const; };
/// \endcode
extern const internal::VariadicDynCastAllOfMatcher<Decl, CXXConversionDecl>
cxxConversionDecl;
/// Matches user-defined and implicitly generated deduction guide.
///
/// Example matches the deduction guide.
/// \code
/// template<typename T>
/// class X { X(int) };
/// X(int) -> X<int>;
/// \endcode
extern const internal::VariadicDynCastAllOfMatcher<Decl, CXXDeductionGuideDecl>
cxxDeductionGuideDecl;
/// Matches variable declarations.
///
/// Note: this does not match declarations of member variables, which are
/// "field" declarations in Clang parlance.
///
/// Example matches a
/// \code
/// int a;
/// \endcode
extern const internal::VariadicDynCastAllOfMatcher<Decl, VarDecl> varDecl;
/// Matches field declarations.
///
/// Given
/// \code
/// class X { int m; };
/// \endcode
/// fieldDecl()
/// matches 'm'.
extern const internal::VariadicDynCastAllOfMatcher<Decl, FieldDecl> fieldDecl;
/// Matches indirect field declarations.
///
/// Given
/// \code
/// struct X { struct { int a; }; };
/// \endcode
/// indirectFieldDecl()
/// matches 'a'.
extern const internal::VariadicDynCastAllOfMatcher<Decl, IndirectFieldDecl>
indirectFieldDecl;
/// Matches function declarations.
///
/// Example matches f
/// \code
/// void f();
/// \endcode
extern const internal::VariadicDynCastAllOfMatcher<Decl, FunctionDecl>
functionDecl;
/// Matches C++ function template declarations.
///
/// Example matches f
/// \code
/// template<class T> void f(T t) {}
/// \endcode
extern const internal::VariadicDynCastAllOfMatcher<Decl, FunctionTemplateDecl>
functionTemplateDecl;
/// Matches friend declarations.
///
/// Given
/// \code
/// class X { friend void foo(); };
/// \endcode
/// friendDecl()
/// matches 'friend void foo()'.
extern const internal::VariadicDynCastAllOfMatcher<Decl, FriendDecl> friendDecl;
/// Matches statements.
///
/// Given
/// \code
/// { ++a; }
/// \endcode
/// stmt()
/// matches both the compound statement '{ ++a; }' and '++a'.
extern const internal::VariadicAllOfMatcher<Stmt> stmt;
/// Matches declaration statements.
///
/// Given
/// \code
/// int a;
/// \endcode
/// declStmt()
/// matches 'int a'.
extern const internal::VariadicDynCastAllOfMatcher<Stmt, DeclStmt> declStmt;
/// Matches member expressions.
///
/// Given
/// \code
/// class Y {
/// void x() { this->x(); x(); Y y; y.x(); a; this->b; Y::b; }
/// int a; static int b;
/// };
/// \endcode
/// memberExpr()
/// matches this->x, x, y.x, a, this->b
extern const internal::VariadicDynCastAllOfMatcher<Stmt, MemberExpr> memberExpr;
/// Matches unresolved member expressions.
///
/// Given
/// \code
/// struct X {
/// template <class T> void f();
/// void g();
/// };
/// template <class T> void h() { X x; x.f<T>(); x.g(); }
/// \endcode
/// unresolvedMemberExpr()
/// matches x.f<T>
extern const internal::VariadicDynCastAllOfMatcher<Stmt, UnresolvedMemberExpr>
unresolvedMemberExpr;
/// Matches member expressions where the actual member referenced could not be
/// resolved because the base expression or the member name was dependent.
///
/// Given
/// \code
/// template <class T> void f() { T t; t.g(); }
/// \endcode
/// cxxDependentScopeMemberExpr()
/// matches t.g
extern const internal::VariadicDynCastAllOfMatcher<Stmt,
CXXDependentScopeMemberExpr>
cxxDependentScopeMemberExpr;
/// Matches call expressions.
///
/// Example matches x.y() and y()
/// \code
/// X x;
/// x.y();
/// y();
/// \endcode
extern const internal::VariadicDynCastAllOfMatcher<Stmt, CallExpr> callExpr;
/// Matches call expressions which were resolved using ADL.
///
/// Example matches y(x) but not y(42) or NS::y(x).
/// \code
/// namespace NS {
/// struct X {};
/// void y(X);
/// }
///
/// void y(...);
///
/// void test() {
/// NS::X x;
/// y(x); // Matches
/// NS::y(x); // Doesn't match
/// y(42); // Doesn't match
/// using NS::y;
/// y(x); // Found by both unqualified lookup and ADL, doesn't match
// }
/// \endcode
AST_MATCHER(CallExpr, usesADL) { return Node.usesADL(); }
/// Matches lambda expressions.
///
/// Example matches [&](){return 5;}
/// \code
/// [&](){return 5;}
/// \endcode
extern const internal::VariadicDynCastAllOfMatcher<Stmt, LambdaExpr> lambdaExpr;
/// Matches member call expressions.
///
/// Example matches x.y()
/// \code
/// X x;
/// x.y();
/// \endcode
extern const internal::VariadicDynCastAllOfMatcher<Stmt, CXXMemberCallExpr>
cxxMemberCallExpr;
/// Matches ObjectiveC Message invocation expressions.
///
/// The innermost message send invokes the "alloc" class method on the
/// NSString class, while the outermost message send invokes the
/// "initWithString" instance method on the object returned from
/// NSString's "alloc". This matcher should match both message sends.
/// \code
/// [[NSString alloc] initWithString:@"Hello"]
/// \endcode
extern const internal::VariadicDynCastAllOfMatcher<Stmt, ObjCMessageExpr>
objcMessageExpr;
/// Matches Objective-C interface declarations.
///
/// Example matches Foo
/// \code
/// @interface Foo
/// @end
/// \endcode
extern const internal::VariadicDynCastAllOfMatcher<Decl, ObjCInterfaceDecl>
objcInterfaceDecl;
/// Matches Objective-C implementation declarations.
///
/// Example matches Foo
/// \code
/// @implementation Foo
/// @end
/// \endcode
extern const internal::VariadicDynCastAllOfMatcher<Decl, ObjCImplementationDecl>
objcImplementationDecl;
/// Matches Objective-C protocol declarations.
///
/// Example matches FooDelegate
/// \code
/// @protocol FooDelegate
/// @end
/// \endcode
extern const internal::VariadicDynCastAllOfMatcher<Decl, ObjCProtocolDecl>
objcProtocolDecl;
/// Matches Objective-C category declarations.
///
/// Example matches Foo (Additions)
/// \code
/// @interface Foo (Additions)
/// @end
/// \endcode
extern const internal::VariadicDynCastAllOfMatcher<Decl, ObjCCategoryDecl>
objcCategoryDecl;
/// Matches Objective-C category definitions.
///
/// Example matches Foo (Additions)
/// \code
/// @implementation Foo (Additions)
/// @end
/// \endcode
extern const internal::VariadicDynCastAllOfMatcher<Decl, ObjCCategoryImplDecl>
objcCategoryImplDecl;
/// Matches Objective-C method declarations.
///
/// Example matches both declaration and definition of -[Foo method]
/// \code
/// @interface Foo
/// - (void)method;
/// @end
///
/// @implementation Foo
/// - (void)method {}
/// @end
/// \endcode
extern const internal::VariadicDynCastAllOfMatcher<Decl, ObjCMethodDecl>
objcMethodDecl;
/// Matches block declarations.
///
/// Example matches the declaration of the nameless block printing an input
/// integer.
///
/// \code
/// myFunc(^(int p) {
/// printf("%d", p);
/// })
/// \endcode
extern const internal::VariadicDynCastAllOfMatcher<Decl, BlockDecl>
blockDecl;
/// Matches Objective-C instance variable declarations.
///
/// Example matches _enabled
/// \code
/// @implementation Foo {
/// BOOL _enabled;
/// }
/// @end
/// \endcode
extern const internal::VariadicDynCastAllOfMatcher<Decl, ObjCIvarDecl>
objcIvarDecl;
/// Matches Objective-C property declarations.
///
/// Example matches enabled
/// \code
/// @interface Foo
/// @property BOOL enabled;
/// @end
/// \endcode
extern const internal::VariadicDynCastAllOfMatcher<Decl, ObjCPropertyDecl>
objcPropertyDecl;
/// Matches Objective-C \@throw statements.
///
/// Example matches \@throw
/// \code
/// @throw obj;
/// \endcode
extern const internal::VariadicDynCastAllOfMatcher<Stmt, ObjCAtThrowStmt>
objcThrowStmt;
/// Matches Objective-C @try statements.
///
/// Example matches @try
/// \code
/// @try {}
/// @catch (...) {}
/// \endcode
extern const internal::VariadicDynCastAllOfMatcher<Stmt, ObjCAtTryStmt>
objcTryStmt;
/// Matches Objective-C @catch statements.
///
/// Example matches @catch
/// \code
/// @try {}
/// @catch (...) {}
/// \endcode
extern const internal::VariadicDynCastAllOfMatcher<Stmt, ObjCAtCatchStmt>
objcCatchStmt;
/// Matches Objective-C @finally statements.
///
/// Example matches @finally
/// \code
/// @try {}
/// @finally {}
/// \endcode
extern const internal::VariadicDynCastAllOfMatcher<Stmt, ObjCAtFinallyStmt>
objcFinallyStmt;
/// Matches expressions that introduce cleanups to be run at the end
/// of the sub-expression's evaluation.
///
/// Example matches std::string()
/// \code
/// const std::string str = std::string();
/// \endcode
extern const internal::VariadicDynCastAllOfMatcher<Stmt, ExprWithCleanups>
exprWithCleanups;
/// Matches init list expressions.
///
/// Given
/// \code
/// int a[] = { 1, 2 };
/// struct B { int x, y; };
/// B b = { 5, 6 };
/// \endcode
/// initListExpr()
/// matches "{ 1, 2 }" and "{ 5, 6 }"
extern const internal::VariadicDynCastAllOfMatcher<Stmt, InitListExpr>
initListExpr;
/// Matches the syntactic form of init list expressions
/// (if expression have it).
AST_MATCHER_P(InitListExpr, hasSyntacticForm,
internal::Matcher<Expr>, InnerMatcher) {
const Expr *SyntForm = Node.getSyntacticForm();
return (SyntForm != nullptr &&
InnerMatcher.matches(*SyntForm, Finder, Builder));
}
/// Matches C++ initializer list expressions.
///
/// Given
/// \code
/// std::vector<int> a({ 1, 2, 3 });
/// std::vector<int> b = { 4, 5 };
/// int c[] = { 6, 7 };
/// std::pair<int, int> d = { 8, 9 };
/// \endcode
/// cxxStdInitializerListExpr()
/// matches "{ 1, 2, 3 }" and "{ 4, 5 }"
extern const internal::VariadicDynCastAllOfMatcher<Stmt,
CXXStdInitializerListExpr>
cxxStdInitializerListExpr;
/// Matches implicit initializers of init list expressions.
///
/// Given
/// \code
/// point ptarray[10] = { [2].y = 1.0, [2].x = 2.0, [0].x = 1.0 };
/// \endcode
/// implicitValueInitExpr()
/// matches "[0].y" (implicitly)
extern const internal::VariadicDynCastAllOfMatcher<Stmt, ImplicitValueInitExpr>
implicitValueInitExpr;
/// Matches paren list expressions.
/// ParenListExprs don't have a predefined type and are used for late parsing.
/// In the final AST, they can be met in template declarations.
///
/// Given
/// \code
/// template<typename T> class X {
/// void f() {
/// X x(*this);
/// int a = 0, b = 1; int i = (a, b);
/// }
/// };
/// \endcode
/// parenListExpr() matches "*this" but NOT matches (a, b) because (a, b)
/// has a predefined type and is a ParenExpr, not a ParenListExpr.
extern const internal::VariadicDynCastAllOfMatcher<Stmt, ParenListExpr>
parenListExpr;
/// Matches substitutions of non-type template parameters.
///
/// Given
/// \code
/// template <int N>
/// struct A { static const int n = N; };
/// struct B : public A<42> {};
/// \endcode
/// substNonTypeTemplateParmExpr()
/// matches "N" in the right-hand side of "static const int n = N;"
extern const internal::VariadicDynCastAllOfMatcher<Stmt,
SubstNonTypeTemplateParmExpr>
substNonTypeTemplateParmExpr;
/// Matches using declarations.
///
/// Given
/// \code
/// namespace X { int x; }
/// using X::x;
/// \endcode
/// usingDecl()
/// matches \code using X::x \endcode
extern const internal::VariadicDynCastAllOfMatcher<Decl, UsingDecl> usingDecl;
/// Matches using namespace declarations.
///
/// Given
/// \code
/// namespace X { int x; }
/// using namespace X;
/// \endcode
/// usingDirectiveDecl()
/// matches \code using namespace X \endcode
extern const internal::VariadicDynCastAllOfMatcher<Decl, UsingDirectiveDecl>
usingDirectiveDecl;
/// Matches reference to a name that can be looked up during parsing
/// but could not be resolved to a specific declaration.
///
/// Given
/// \code
/// template<typename T>
/// T foo() { T a; return a; }
/// template<typename T>
/// void bar() {
/// foo<T>();
/// }
/// \endcode
/// unresolvedLookupExpr()
/// matches \code foo<T>() \endcode
extern const internal::VariadicDynCastAllOfMatcher<Stmt, UnresolvedLookupExpr>
unresolvedLookupExpr;
/// Matches unresolved using value declarations.
///
/// Given
/// \code
/// template<typename X>
/// class C : private X {
/// using X::x;
/// };
/// \endcode
/// unresolvedUsingValueDecl()
/// matches \code using X::x \endcode
extern const internal::VariadicDynCastAllOfMatcher<Decl,
UnresolvedUsingValueDecl>
unresolvedUsingValueDecl;
/// Matches unresolved using value declarations that involve the
/// typename.
///
/// Given
/// \code
/// template <typename T>
/// struct Base { typedef T Foo; };
///
/// template<typename T>
/// struct S : private Base<T> {
/// using typename Base<T>::Foo;
/// };
/// \endcode
/// unresolvedUsingTypenameDecl()
/// matches \code using Base<T>::Foo \endcode
extern const internal::VariadicDynCastAllOfMatcher<Decl,
UnresolvedUsingTypenameDecl>
unresolvedUsingTypenameDecl;
/// Matches a constant expression wrapper.
///
/// Example matches the constant in the case statement:
/// (matcher = constantExpr())
/// \code
/// switch (a) {
/// case 37: break;
/// }
/// \endcode
extern const internal::VariadicDynCastAllOfMatcher<Stmt, ConstantExpr>
constantExpr;
/// Matches parentheses used in expressions.
///
/// Example matches (foo() + 1)
/// \code
/// int foo() { return 1; }
/// int a = (foo() + 1);
/// \endcode
extern const internal::VariadicDynCastAllOfMatcher<Stmt, ParenExpr> parenExpr;
/// Matches constructor call expressions (including implicit ones).
///
/// Example matches string(ptr, n) and ptr within arguments of f
/// (matcher = cxxConstructExpr())
/// \code
/// void f(const string &a, const string &b);
/// char *ptr;
/// int n;
/// f(string(ptr, n), ptr);
/// \endcode
extern const internal::VariadicDynCastAllOfMatcher<Stmt, CXXConstructExpr>
cxxConstructExpr;
/// Matches unresolved constructor call expressions.
///
/// Example matches T(t) in return statement of f
/// (matcher = cxxUnresolvedConstructExpr())
/// \code
/// template <typename T>
/// void f(const T& t) { return T(t); }
/// \endcode
extern const internal::VariadicDynCastAllOfMatcher<Stmt,
CXXUnresolvedConstructExpr>
cxxUnresolvedConstructExpr;
/// Matches implicit and explicit this expressions.
///
/// Example matches the implicit this expression in "return i".
/// (matcher = cxxThisExpr())
/// \code
/// struct foo {
/// int i;
/// int f() { return i; }
/// };
/// \endcode
extern const internal::VariadicDynCastAllOfMatcher<Stmt, CXXThisExpr>
cxxThisExpr;
/// Matches nodes where temporaries are created.
///
/// Example matches FunctionTakesString(GetStringByValue())
/// (matcher = cxxBindTemporaryExpr())
/// \code
/// FunctionTakesString(GetStringByValue());
/// FunctionTakesStringByPointer(GetStringPointer());
/// \endcode
extern const internal::VariadicDynCastAllOfMatcher<Stmt, CXXBindTemporaryExpr>
cxxBindTemporaryExpr;
/// Matches nodes where temporaries are materialized.
///
/// Example: Given
/// \code
/// struct T {void func();};
/// T f();
/// void g(T);
/// \endcode
/// materializeTemporaryExpr() matches 'f()' in these statements
/// \code
/// T u(f());
/// g(f());
/// f().func();
/// \endcode
/// but does not match
/// \code
/// f();
/// \endcode
extern const internal::VariadicDynCastAllOfMatcher<Stmt,
MaterializeTemporaryExpr>
materializeTemporaryExpr;
/// Matches new expressions.
///
/// Given
/// \code
/// new X;
/// \endcode
/// cxxNewExpr()
/// matches 'new X'.
extern const internal::VariadicDynCastAllOfMatcher<Stmt, CXXNewExpr> cxxNewExpr;
/// Matches delete expressions.
///
/// Given
/// \code
/// delete X;
/// \endcode
/// cxxDeleteExpr()
/// matches 'delete X'.
extern const internal::VariadicDynCastAllOfMatcher<Stmt, CXXDeleteExpr>
cxxDeleteExpr;
/// Matches noexcept expressions.
///
/// Given
/// \code
/// bool a() noexcept;
/// bool b() noexcept(true);
/// bool c() noexcept(false);
/// bool d() noexcept(noexcept(a()));
/// bool e = noexcept(b()) || noexcept(c());
/// \endcode
/// cxxNoexceptExpr()
/// matches `noexcept(a())`, `noexcept(b())` and `noexcept(c())`.
/// doesn't match the noexcept specifier in the declarations a, b, c or d.
extern const internal::VariadicDynCastAllOfMatcher<Stmt, CXXNoexceptExpr>
cxxNoexceptExpr;
/// Matches array subscript expressions.
///
/// Given
/// \code
/// int i = a[1];
/// \endcode
/// arraySubscriptExpr()
/// matches "a[1]"
extern const internal::VariadicDynCastAllOfMatcher<Stmt, ArraySubscriptExpr>
arraySubscriptExpr;
/// Matches the value of a default argument at the call site.
///
/// Example matches the CXXDefaultArgExpr placeholder inserted for the
/// default value of the second parameter in the call expression f(42)
/// (matcher = cxxDefaultArgExpr())
/// \code
/// void f(int x, int y = 0);
/// f(42);
/// \endcode
extern const internal::VariadicDynCastAllOfMatcher<Stmt, CXXDefaultArgExpr>
cxxDefaultArgExpr;
/// Matches overloaded operator calls.
///
/// Note that if an operator isn't overloaded, it won't match. Instead, use
/// binaryOperator matcher.
/// Currently it does not match operators such as new delete.
/// FIXME: figure out why these do not match?
///
/// Example matches both operator<<((o << b), c) and operator<<(o, b)
/// (matcher = cxxOperatorCallExpr())
/// \code
/// ostream &operator<< (ostream &out, int i) { };
/// ostream &o; int b = 1, c = 1;
/// o << b << c;
/// \endcode
extern const internal::VariadicDynCastAllOfMatcher<Stmt, CXXOperatorCallExpr>
cxxOperatorCallExpr;
/// Matches expressions.
///
/// Example matches x()
/// \code
/// void f() { x(); }
/// \endcode
extern const internal::VariadicDynCastAllOfMatcher<Stmt, Expr> expr;
/// Matches expressions that refer to declarations.
///
/// Example matches x in if (x)
/// \code
/// bool x;
/// if (x) {}
/// \endcode
extern const internal::VariadicDynCastAllOfMatcher<Stmt, DeclRefExpr>
declRefExpr;
/// Matches a reference to an ObjCIvar.
///
/// Example: matches "a" in "init" method:
/// \code
/// @implementation A {
/// NSString *a;
/// }
/// - (void) init {
/// a = @"hello";
/// }
/// \endcode
extern const internal::VariadicDynCastAllOfMatcher<Stmt, ObjCIvarRefExpr>
objcIvarRefExpr;
/// Matches a reference to a block.
///
/// Example: matches "^{}":
/// \code
/// void f() { ^{}(); }
/// \endcode
extern const internal::VariadicDynCastAllOfMatcher<Stmt, BlockExpr> blockExpr;
/// Matches if statements.
///
/// Example matches 'if (x) {}'
/// \code
/// if (x) {}
/// \endcode
extern const internal::VariadicDynCastAllOfMatcher<Stmt, IfStmt> ifStmt;
/// Matches for statements.
///
/// Example matches 'for (;;) {}'
/// \code
/// for (;;) {}
/// int i[] = {1, 2, 3}; for (auto a : i);
/// \endcode
extern const internal::VariadicDynCastAllOfMatcher<Stmt, ForStmt> forStmt;
/// Matches the increment statement of a for loop.
///
/// Example:
/// forStmt(hasIncrement(unaryOperator(hasOperatorName("++"))))
/// matches '++x' in
/// \code
/// for (x; x < N; ++x) { }
/// \endcode
AST_MATCHER_P(ForStmt, hasIncrement, internal::Matcher<Stmt>,
InnerMatcher) {
const Stmt *const Increment = Node.getInc();
return (Increment != nullptr &&
InnerMatcher.matches(*Increment, Finder, Builder));
}
/// Matches the initialization statement of a for loop.
///
/// Example:
/// forStmt(hasLoopInit(declStmt()))
/// matches 'int x = 0' in
/// \code
/// for (int x = 0; x < N; ++x) { }
/// \endcode
AST_MATCHER_P(ForStmt, hasLoopInit, internal::Matcher<Stmt>,
InnerMatcher) {
const Stmt *const Init = Node.getInit();
return (Init != nullptr && InnerMatcher.matches(*Init, Finder, Builder));
}
/// Matches range-based for statements.
///
/// cxxForRangeStmt() matches 'for (auto a : i)'
/// \code
/// int i[] = {1, 2, 3}; for (auto a : i);
/// for(int j = 0; j < 5; ++j);
/// \endcode
extern const internal::VariadicDynCastAllOfMatcher<Stmt, CXXForRangeStmt>
cxxForRangeStmt;
/// Matches the initialization statement of a for loop.
///
/// Example:
/// forStmt(hasLoopVariable(anything()))
/// matches 'int x' in
/// \code
/// for (int x : a) { }
/// \endcode
AST_MATCHER_P(CXXForRangeStmt, hasLoopVariable, internal::Matcher<VarDecl>,
InnerMatcher) {
const VarDecl *const Var = Node.getLoopVariable();
return (Var != nullptr && InnerMatcher.matches(*Var, Finder, Builder));
}
/// Matches the range initialization statement of a for loop.
///
/// Example:
/// forStmt(hasRangeInit(anything()))
/// matches 'a' in
/// \code
/// for (int x : a) { }
/// \endcode
AST_MATCHER_P(CXXForRangeStmt, hasRangeInit, internal::Matcher<Expr>,
InnerMatcher) {
const Expr *const Init = Node.getRangeInit();
return (Init != nullptr && InnerMatcher.matches(*Init, Finder, Builder));
}
/// Matches while statements.
///
/// Given
/// \code
/// while (true) {}
/// \endcode
/// whileStmt()
/// matches 'while (true) {}'.
extern const internal::VariadicDynCastAllOfMatcher<Stmt, WhileStmt> whileStmt;
/// Matches do statements.
///
/// Given
/// \code
/// do {} while (true);
/// \endcode
/// doStmt()
/// matches 'do {} while(true)'
extern const internal::VariadicDynCastAllOfMatcher<Stmt, DoStmt> doStmt;
/// Matches break statements.
///
/// Given
/// \code
/// while (true) { break; }
/// \endcode
/// breakStmt()
/// matches 'break'
extern const internal::VariadicDynCastAllOfMatcher<Stmt, BreakStmt> breakStmt;
/// Matches continue statements.
///
/// Given
/// \code
/// while (true) { continue; }
/// \endcode
/// continueStmt()
/// matches 'continue'
extern const internal::VariadicDynCastAllOfMatcher<Stmt, ContinueStmt>
continueStmt;
/// Matches return statements.
///
/// Given
/// \code
/// return 1;
/// \endcode
/// returnStmt()
/// matches 'return 1'
extern const internal::VariadicDynCastAllOfMatcher<Stmt, ReturnStmt> returnStmt;
/// Matches goto statements.
///
/// Given
/// \code
/// goto FOO;
/// FOO: bar();
/// \endcode
/// gotoStmt()
/// matches 'goto FOO'
extern const internal::VariadicDynCastAllOfMatcher<Stmt, GotoStmt> gotoStmt;
/// Matches label statements.
///
/// Given
/// \code
/// goto FOO;
/// FOO: bar();
/// \endcode
/// labelStmt()
/// matches 'FOO:'
extern const internal::VariadicDynCastAllOfMatcher<Stmt, LabelStmt> labelStmt;
/// Matches address of label statements (GNU extension).
///
/// Given
/// \code
/// FOO: bar();
/// void *ptr = &&FOO;
/// goto *bar;
/// \endcode
/// addrLabelExpr()
/// matches '&&FOO'
extern const internal::VariadicDynCastAllOfMatcher<Stmt, AddrLabelExpr>
addrLabelExpr;
/// Matches switch statements.
///
/// Given
/// \code
/// switch(a) { case 42: break; default: break; }
/// \endcode
/// switchStmt()
/// matches 'switch(a)'.
extern const internal::VariadicDynCastAllOfMatcher<Stmt, SwitchStmt> switchStmt;
/// Matches case and default statements inside switch statements.
///
/// Given
/// \code
/// switch(a) { case 42: break; default: break; }
/// \endcode
/// switchCase()
/// matches 'case 42:' and 'default:'.
extern const internal::VariadicDynCastAllOfMatcher<Stmt, SwitchCase> switchCase;
/// Matches case statements inside switch statements.
///
/// Given
/// \code
/// switch(a) { case 42: break; default: break; }
/// \endcode
/// caseStmt()
/// matches 'case 42:'.
extern const internal::VariadicDynCastAllOfMatcher<Stmt, CaseStmt> caseStmt;
/// Matches default statements inside switch statements.
///
/// Given
/// \code
/// switch(a) { case 42: break; default: break; }
/// \endcode
/// defaultStmt()
/// matches 'default:'.
extern const internal::VariadicDynCastAllOfMatcher<Stmt, DefaultStmt>
defaultStmt;
/// Matches compound statements.
///
/// Example matches '{}' and '{{}}' in 'for (;;) {{}}'
/// \code
/// for (;;) {{}}
/// \endcode
extern const internal::VariadicDynCastAllOfMatcher<Stmt, CompoundStmt>
compoundStmt;
/// Matches catch statements.
///
/// \code
/// try {} catch(int i) {}
/// \endcode
/// cxxCatchStmt()
/// matches 'catch(int i)'
extern const internal::VariadicDynCastAllOfMatcher<Stmt, CXXCatchStmt>
cxxCatchStmt;
/// Matches try statements.
///
/// \code
/// try {} catch(int i) {}
/// \endcode
/// cxxTryStmt()
/// matches 'try {}'
extern const internal::VariadicDynCastAllOfMatcher<Stmt, CXXTryStmt> cxxTryStmt;
/// Matches throw expressions.
///
/// \code
/// try { throw 5; } catch(int i) {}
/// \endcode
/// cxxThrowExpr()
/// matches 'throw 5'
extern const internal::VariadicDynCastAllOfMatcher<Stmt, CXXThrowExpr>
cxxThrowExpr;
/// Matches null statements.
///
/// \code
/// foo();;
/// \endcode
/// nullStmt()
/// matches the second ';'
extern const internal::VariadicDynCastAllOfMatcher<Stmt, NullStmt> nullStmt;
/// Matches asm statements.
///
/// \code
/// int i = 100;
/// __asm("mov al, 2");
/// \endcode
/// asmStmt()
/// matches '__asm("mov al, 2")'
extern const internal::VariadicDynCastAllOfMatcher<Stmt, AsmStmt> asmStmt;
/// Matches bool literals.
///
/// Example matches true
/// \code
/// true
/// \endcode
extern const internal::VariadicDynCastAllOfMatcher<Stmt, CXXBoolLiteralExpr>
cxxBoolLiteral;
/// Matches string literals (also matches wide string literals).
///
/// Example matches "abcd", L"abcd"
/// \code
/// char *s = "abcd";
/// wchar_t *ws = L"abcd";
/// \endcode
extern const internal::VariadicDynCastAllOfMatcher<Stmt, StringLiteral>
stringLiteral;
/// Matches character literals (also matches wchar_t).
///
/// Not matching Hex-encoded chars (e.g. 0x1234, which is a IntegerLiteral),
/// though.
///
/// Example matches 'a', L'a'
/// \code
/// char ch = 'a';
/// wchar_t chw = L'a';
/// \endcode
extern const internal::VariadicDynCastAllOfMatcher<Stmt, CharacterLiteral>
characterLiteral;
/// Matches integer literals of all sizes / encodings, e.g.
/// 1, 1L, 0x1 and 1U.
///
/// Does not match character-encoded integers such as L'a'.
extern const internal::VariadicDynCastAllOfMatcher<Stmt, IntegerLiteral>
integerLiteral;
/// Matches float literals of all sizes / encodings, e.g.
/// 1.0, 1.0f, 1.0L and 1e10.
///
/// Does not match implicit conversions such as
/// \code
/// float a = 10;
/// \endcode
extern const internal::VariadicDynCastAllOfMatcher<Stmt, FloatingLiteral>
floatLiteral;
/// Matches imaginary literals, which are based on integer and floating
/// point literals e.g.: 1i, 1.0i
extern const internal::VariadicDynCastAllOfMatcher<Stmt, ImaginaryLiteral>
imaginaryLiteral;
/// Matches fixed point literals
extern const internal::VariadicDynCastAllOfMatcher<Stmt, FixedPointLiteral>
fixedPointLiteral;
/// Matches user defined literal operator call.
///
/// Example match: "foo"_suffix
extern const internal::VariadicDynCastAllOfMatcher<Stmt, UserDefinedLiteral>
userDefinedLiteral;
/// Matches compound (i.e. non-scalar) literals
///
/// Example match: {1}, (1, 2)
/// \code
/// int array[4] = {1};
/// vector int myvec = (vector int)(1, 2);
/// \endcode
extern const internal::VariadicDynCastAllOfMatcher<Stmt, CompoundLiteralExpr>
compoundLiteralExpr;
/// Matches nullptr literal.
extern const internal::VariadicDynCastAllOfMatcher<Stmt, CXXNullPtrLiteralExpr>
cxxNullPtrLiteralExpr;
/// Matches GNU __builtin_choose_expr.
extern const internal::VariadicDynCastAllOfMatcher<Stmt, ChooseExpr>
chooseExpr;
/// Matches GNU __null expression.
extern const internal::VariadicDynCastAllOfMatcher<Stmt, GNUNullExpr>
gnuNullExpr;
/// Matches atomic builtins.
/// Example matches __atomic_load_n(ptr, 1)
/// \code
/// void foo() { int *ptr; __atomic_load_n(ptr, 1); }
/// \endcode
extern const internal::VariadicDynCastAllOfMatcher<Stmt, AtomicExpr> atomicExpr;
/// Matches statement expression (GNU extension).
///
/// Example match: ({ int X = 4; X; })
/// \code
/// int C = ({ int X = 4; X; });
/// \endcode
extern const internal::VariadicDynCastAllOfMatcher<Stmt, StmtExpr> stmtExpr;
/// Matches binary operator expressions.
///
/// Example matches a || b
/// \code
/// !(a || b)
/// \endcode
extern const internal::VariadicDynCastAllOfMatcher<Stmt, BinaryOperator>
binaryOperator;
/// Matches unary operator expressions.
///
/// Example matches !a
/// \code
/// !a || b
/// \endcode
extern const internal::VariadicDynCastAllOfMatcher<Stmt, UnaryOperator>
unaryOperator;
/// Matches conditional operator expressions.
///
/// Example matches a ? b : c
/// \code
/// (a ? b : c) + 42
/// \endcode
extern const internal::VariadicDynCastAllOfMatcher<Stmt, ConditionalOperator>
conditionalOperator;
/// Matches binary conditional operator expressions (GNU extension).
///
/// Example matches a ?: b
/// \code
/// (a ?: b) + 42;
/// \endcode
extern const internal::VariadicDynCastAllOfMatcher<Stmt,
BinaryConditionalOperator>
binaryConditionalOperator;
/// Matches opaque value expressions. They are used as helpers
/// to reference another expressions and can be met
/// in BinaryConditionalOperators, for example.
///
/// Example matches 'a'
/// \code
/// (a ?: c) + 42;
/// \endcode
extern const internal::VariadicDynCastAllOfMatcher<Stmt, OpaqueValueExpr>
opaqueValueExpr;
/// Matches a C++ static_assert declaration.
///
/// Example:
/// staticAssertExpr()
/// matches
/// static_assert(sizeof(S) == sizeof(int))
/// in
/// \code
/// struct S {
/// int x;
/// };
/// static_assert(sizeof(S) == sizeof(int));
/// \endcode
extern const internal::VariadicDynCastAllOfMatcher<Decl, StaticAssertDecl>
staticAssertDecl;
/// Matches a reinterpret_cast expression.
///
/// Either the source expression or the destination type can be matched
/// using has(), but hasDestinationType() is more specific and can be
/// more readable.
///
/// Example matches reinterpret_cast<char*>(&p) in
/// \code
/// void* p = reinterpret_cast<char*>(&p);
/// \endcode
extern const internal::VariadicDynCastAllOfMatcher<Stmt, CXXReinterpretCastExpr>
cxxReinterpretCastExpr;
/// Matches a C++ static_cast expression.
///
/// \see hasDestinationType
/// \see reinterpretCast
///
/// Example:
/// cxxStaticCastExpr()
/// matches
/// static_cast<long>(8)
/// in
/// \code
/// long eight(static_cast<long>(8));
/// \endcode
extern const internal::VariadicDynCastAllOfMatcher<Stmt, CXXStaticCastExpr>
cxxStaticCastExpr;
/// Matches a dynamic_cast expression.
///
/// Example:
/// cxxDynamicCastExpr()
/// matches
/// dynamic_cast<D*>(&b);
/// in
/// \code
/// struct B { virtual ~B() {} }; struct D : B {};
/// B b;
/// D* p = dynamic_cast<D*>(&b);
/// \endcode
extern const internal::VariadicDynCastAllOfMatcher<Stmt, CXXDynamicCastExpr>
cxxDynamicCastExpr;
/// Matches a const_cast expression.
///
/// Example: Matches const_cast<int*>(&r) in
/// \code
/// int n = 42;
/// const int &r(n);
/// int* p = const_cast<int*>(&r);
/// \endcode
extern const internal::VariadicDynCastAllOfMatcher<Stmt, CXXConstCastExpr>
cxxConstCastExpr;
/// Matches a C-style cast expression.
///
/// Example: Matches (int) 2.2f in
/// \code
/// int i = (int) 2.2f;
/// \endcode
extern const internal::VariadicDynCastAllOfMatcher<Stmt, CStyleCastExpr>
cStyleCastExpr;
/// Matches explicit cast expressions.
///
/// Matches any cast expression written in user code, whether it be a
/// C-style cast, a functional-style cast, or a keyword cast.
///
/// Does not match implicit conversions.
///
/// Note: the name "explicitCast" is chosen to match Clang's terminology, as
/// Clang uses the term "cast" to apply to implicit conversions as well as to
/// actual cast expressions.
///
/// \see hasDestinationType.
///
/// Example: matches all five of the casts in
/// \code
/// int((int)(reinterpret_cast<int>(static_cast<int>(const_cast<int>(42)))))
/// \endcode
/// but does not match the implicit conversion in
/// \code
/// long ell = 42;
/// \endcode
extern const internal::VariadicDynCastAllOfMatcher<Stmt, ExplicitCastExpr>
explicitCastExpr;
/// Matches the implicit cast nodes of Clang's AST.
///
/// This matches many different places, including function call return value
/// eliding, as well as any type conversions.
extern const internal::VariadicDynCastAllOfMatcher<Stmt, ImplicitCastExpr>
implicitCastExpr;
/// Matches any cast nodes of Clang's AST.
///
/// Example: castExpr() matches each of the following:
/// \code
/// (int) 3;
/// const_cast<Expr *>(SubExpr);
/// char c = 0;
/// \endcode
/// but does not match
/// \code
/// int i = (0);
/// int k = 0;
/// \endcode
extern const internal::VariadicDynCastAllOfMatcher<Stmt, CastExpr> castExpr;
/// Matches functional cast expressions
///
/// Example: Matches Foo(bar);
/// \code
/// Foo f = bar;
/// Foo g = (Foo) bar;
/// Foo h = Foo(bar);
/// \endcode
extern const internal::VariadicDynCastAllOfMatcher<Stmt, CXXFunctionalCastExpr>
cxxFunctionalCastExpr;
/// Matches functional cast expressions having N != 1 arguments
///
/// Example: Matches Foo(bar, bar)
/// \code
/// Foo h = Foo(bar, bar);
/// \endcode
extern const internal::VariadicDynCastAllOfMatcher<Stmt, CXXTemporaryObjectExpr>
cxxTemporaryObjectExpr;
/// Matches predefined identifier expressions [C99 6.4.2.2].
///
/// Example: Matches __func__
/// \code
/// printf("%s", __func__);
/// \endcode
extern const internal::VariadicDynCastAllOfMatcher<Stmt, PredefinedExpr>
predefinedExpr;
/// Matches C99 designated initializer expressions [C99 6.7.8].
///
/// Example: Matches { [2].y = 1.0, [0].x = 1.0 }
/// \code
/// point ptarray[10] = { [2].y = 1.0, [0].x = 1.0 };
/// \endcode
extern const internal::VariadicDynCastAllOfMatcher<Stmt, DesignatedInitExpr>
designatedInitExpr;
/// Matches designated initializer expressions that contain
/// a specific number of designators.
///
/// Example: Given
/// \code
/// point ptarray[10] = { [2].y = 1.0, [0].x = 1.0 };
/// point ptarray2[10] = { [2].y = 1.0, [2].x = 0.0, [0].x = 1.0 };
/// \endcode
/// designatorCountIs(2)
/// matches '{ [2].y = 1.0, [0].x = 1.0 }',
/// but not '{ [2].y = 1.0, [2].x = 0.0, [0].x = 1.0 }'.
AST_MATCHER_P(DesignatedInitExpr, designatorCountIs, unsigned, N) {
return Node.size() == N;
}
/// Matches \c QualTypes in the clang AST.
extern const internal::VariadicAllOfMatcher<QualType> qualType;
/// Matches \c Types in the clang AST.
extern const internal::VariadicAllOfMatcher<Type> type;
/// Matches \c TypeLocs in the clang AST.
extern const internal::VariadicAllOfMatcher<TypeLoc> typeLoc;
/// Matches if any of the given matchers matches.
///
/// Unlike \c anyOf, \c eachOf will generate a match result for each
/// matching submatcher.
///
/// For example, in:
/// \code
/// class A { int a; int b; };
/// \endcode
/// The matcher:
/// \code
/// cxxRecordDecl(eachOf(has(fieldDecl(hasName("a")).bind("v")),
/// has(fieldDecl(hasName("b")).bind("v"))))
/// \endcode
/// will generate two results binding "v", the first of which binds
/// the field declaration of \c a, the second the field declaration of
/// \c b.
///
/// Usable as: Any Matcher
extern const internal::VariadicOperatorMatcherFunc<
2, std::numeric_limits<unsigned>::max()>
eachOf;
/// Matches if any of the given matchers matches.
///
/// Usable as: Any Matcher
extern const internal::VariadicOperatorMatcherFunc<
2, std::numeric_limits<unsigned>::max()>
anyOf;
/// Matches if all given matchers match.
///
/// Usable as: Any Matcher
extern const internal::VariadicOperatorMatcherFunc<
2, std::numeric_limits<unsigned>::max()>
allOf;
/// Matches any node regardless of the submatcher.
///
/// However, \c optionally will retain any bindings generated by the submatcher.
/// Useful when additional information which may or may not present about a main
/// matching node is desired.
///
/// For example, in:
/// \code
/// class Foo {
/// int bar;
/// }
/// \endcode
/// The matcher:
/// \code
/// cxxRecordDecl(
/// optionally(has(
/// fieldDecl(hasName("bar")).bind("var")
/// ))).bind("record")
/// \endcode
/// will produce a result binding for both "record" and "var".
/// The matcher will produce a "record" binding for even if there is no data
/// member named "bar" in that class.
///
/// Usable as: Any Matcher
extern const internal::VariadicOperatorMatcherFunc<1, 1> optionally;
/// Matches sizeof (C99), alignof (C++11) and vec_step (OpenCL)
///
/// Given
/// \code
/// Foo x = bar;
/// int y = sizeof(x) + alignof(x);
/// \endcode
/// unaryExprOrTypeTraitExpr()
/// matches \c sizeof(x) and \c alignof(x)
extern const internal::VariadicDynCastAllOfMatcher<Stmt,
UnaryExprOrTypeTraitExpr>
unaryExprOrTypeTraitExpr;
/// Matches unary expressions that have a specific type of argument.
///
/// Given
/// \code
/// int a, c; float b; int s = sizeof(a) + sizeof(b) + alignof(c);
/// \endcode
/// unaryExprOrTypeTraitExpr(hasArgumentOfType(asString("int"))
/// matches \c sizeof(a) and \c alignof(c)
AST_MATCHER_P(UnaryExprOrTypeTraitExpr, hasArgumentOfType,
internal::Matcher<QualType>, InnerMatcher) {
const QualType ArgumentType = Node.getTypeOfArgument();
return InnerMatcher.matches(ArgumentType, Finder, Builder);
}
/// Matches unary expressions of a certain kind.
///
/// Given
/// \code
/// int x;
/// int s = sizeof(x) + alignof(x)
/// \endcode
/// unaryExprOrTypeTraitExpr(ofKind(UETT_SizeOf))
/// matches \c sizeof(x)
///
/// If the matcher is use from clang-query, UnaryExprOrTypeTrait parameter
/// should be passed as a quoted string. e.g., ofKind("UETT_SizeOf").
AST_MATCHER_P(UnaryExprOrTypeTraitExpr, ofKind, UnaryExprOrTypeTrait, Kind) {
return Node.getKind() == Kind;
}
/// Same as unaryExprOrTypeTraitExpr, but only matching
/// alignof.
inline internal::Matcher<Stmt> alignOfExpr(
const internal::Matcher<UnaryExprOrTypeTraitExpr> &InnerMatcher) {
return stmt(unaryExprOrTypeTraitExpr(
allOf(anyOf(ofKind(UETT_AlignOf), ofKind(UETT_PreferredAlignOf)),
InnerMatcher)));
}
/// Same as unaryExprOrTypeTraitExpr, but only matching
/// sizeof.
inline internal::Matcher<Stmt> sizeOfExpr(
const internal::Matcher<UnaryExprOrTypeTraitExpr> &InnerMatcher) {
return stmt(unaryExprOrTypeTraitExpr(
allOf(ofKind(UETT_SizeOf), InnerMatcher)));
}
/// Matches NamedDecl nodes that have the specified name.
///
/// Supports specifying enclosing namespaces or classes by prefixing the name
/// with '<enclosing>::'.
/// Does not match typedefs of an underlying type with the given name.
///
/// Example matches X (Name == "X")
/// \code
/// class X;
/// \endcode
///
/// Example matches X (Name is one of "::a::b::X", "a::b::X", "b::X", "X")
/// \code
/// namespace a { namespace b { class X; } }
/// \endcode
inline internal::Matcher<NamedDecl> hasName(StringRef Name) {
return internal::Matcher<NamedDecl>(
new internal::HasNameMatcher({std::string(Name)}));
}
/// Matches NamedDecl nodes that have any of the specified names.
///
/// This matcher is only provided as a performance optimization of hasName.
/// \code
/// hasAnyName(a, b, c)
/// \endcode
/// is equivalent to, but faster than
/// \code
/// anyOf(hasName(a), hasName(b), hasName(c))
/// \endcode
extern const internal::VariadicFunction<internal::Matcher<NamedDecl>, StringRef,
internal::hasAnyNameFunc>
hasAnyName;
/// Matches NamedDecl nodes whose fully qualified names contain
/// a substring matched by the given RegExp.
///
/// Supports specifying enclosing namespaces or classes by
/// prefixing the name with '<enclosing>::'. Does not match typedefs
/// of an underlying type with the given name.
///
/// Example matches X (regexp == "::X")
/// \code
/// class X;
/// \endcode
///
/// Example matches X (regexp is one of "::X", "^foo::.*X", among others)
/// \code
/// namespace foo { namespace bar { class X; } }
/// \endcode
AST_MATCHER_P(NamedDecl, matchesName, std::string, RegExp) {
assert(!RegExp.empty());
std::string FullNameString = "::" + Node.getQualifiedNameAsString();
llvm::Regex RE(RegExp);
return RE.match(FullNameString);
}
/// Matches overloaded operator names.
///
/// Matches overloaded operator names specified in strings without the
/// "operator" prefix: e.g. "<<".
///
/// Given:
/// \code
/// class A { int operator*(); };
/// const A &operator<<(const A &a, const A &b);
/// A a;
/// a << a; // <-- This matches
/// \endcode
///
/// \c cxxOperatorCallExpr(hasOverloadedOperatorName("<<"))) matches the
/// specified line and
/// \c cxxRecordDecl(hasMethod(hasOverloadedOperatorName("*")))
/// matches the declaration of \c A.
///
/// Usable as: Matcher<CXXOperatorCallExpr>, Matcher<FunctionDecl>
inline internal::PolymorphicMatcherWithParam1<
internal::HasOverloadedOperatorNameMatcher, std::vector<std::string>,
AST_POLYMORPHIC_SUPPORTED_TYPES(CXXOperatorCallExpr, FunctionDecl)>
hasOverloadedOperatorName(StringRef Name) {
return internal::PolymorphicMatcherWithParam1<
internal::HasOverloadedOperatorNameMatcher, std::vector<std::string>,
AST_POLYMORPHIC_SUPPORTED_TYPES(CXXOperatorCallExpr, FunctionDecl)>(
{std::string(Name)});
}
/// Matches overloaded operator names.
///
/// Matches overloaded operator names specified in strings without the
/// "operator" prefix: e.g. "<<".
///
/// hasAnyOverloadesOperatorName("+", "-")
/// Is equivalent to
/// anyOf(hasOverloadedOperatorName("+"), hasOverloadedOperatorName("-"))
extern const internal::VariadicFunction<
internal::PolymorphicMatcherWithParam1<
internal::HasOverloadedOperatorNameMatcher, std::vector<std::string>,
AST_POLYMORPHIC_SUPPORTED_TYPES(CXXOperatorCallExpr, FunctionDecl)>,
StringRef, internal::hasAnyOverloadedOperatorNameFunc>
hasAnyOverloadedOperatorName;
/// Matches C++ classes that are directly or indirectly derived from a class
/// matching \c Base, or Objective-C classes that directly or indirectly
/// subclass a class matching \c Base.
///
/// Note that a class is not considered to be derived from itself.
///
/// Example matches Y, Z, C (Base == hasName("X"))
/// \code
/// class X;
/// class Y : public X {}; // directly derived
/// class Z : public Y {}; // indirectly derived
/// typedef X A;
/// typedef A B;
/// class C : public B {}; // derived from a typedef of X
/// \endcode
///
/// In the following example, Bar matches isDerivedFrom(hasName("X")):
/// \code
/// class Foo;
/// typedef Foo X;
/// class Bar : public Foo {}; // derived from a type that X is a typedef of
/// \endcode
///
/// In the following example, Bar matches isDerivedFrom(hasName("NSObject"))
/// \code
/// @interface NSObject @end
/// @interface Bar : NSObject @end
/// \endcode
///
/// Usable as: Matcher<CXXRecordDecl>, Matcher<ObjCInterfaceDecl>
AST_POLYMORPHIC_MATCHER_P(
isDerivedFrom,
AST_POLYMORPHIC_SUPPORTED_TYPES(CXXRecordDecl, ObjCInterfaceDecl),
internal::Matcher<NamedDecl>, Base) {
// Check if the node is a C++ struct/union/class.
if (const auto *RD = dyn_cast<CXXRecordDecl>(&Node))
return Finder->classIsDerivedFrom(RD, Base, Builder, /*Directly=*/false);
// The node must be an Objective-C class.
const auto *InterfaceDecl = cast<ObjCInterfaceDecl>(&Node);
return Finder->objcClassIsDerivedFrom(InterfaceDecl, Base, Builder,
/*Directly=*/false);
}
/// Overloaded method as shortcut for \c isDerivedFrom(hasName(...)).
AST_POLYMORPHIC_MATCHER_P_OVERLOAD(
isDerivedFrom,
AST_POLYMORPHIC_SUPPORTED_TYPES(CXXRecordDecl, ObjCInterfaceDecl),
std::string, BaseName, 1) {
if (BaseName.empty())
return false;
const auto M = isDerivedFrom(hasName(BaseName));
if (const auto *RD = dyn_cast<CXXRecordDecl>(&Node))
return Matcher<CXXRecordDecl>(M).matches(*RD, Finder, Builder);
const auto *InterfaceDecl = cast<ObjCInterfaceDecl>(&Node);
return Matcher<ObjCInterfaceDecl>(M).matches(*InterfaceDecl, Finder, Builder);
}
/// Similar to \c isDerivedFrom(), but also matches classes that directly
/// match \c Base.
AST_POLYMORPHIC_MATCHER_P_OVERLOAD(
isSameOrDerivedFrom,
AST_POLYMORPHIC_SUPPORTED_TYPES(CXXRecordDecl, ObjCInterfaceDecl),
internal::Matcher<NamedDecl>, Base, 0) {
const auto M = anyOf(Base, isDerivedFrom(Base));
if (const auto *RD = dyn_cast<CXXRecordDecl>(&Node))
return Matcher<CXXRecordDecl>(M).matches(*RD, Finder, Builder);
const auto *InterfaceDecl = cast<ObjCInterfaceDecl>(&Node);
return Matcher<ObjCInterfaceDecl>(M).matches(*InterfaceDecl, Finder, Builder);
}
/// Overloaded method as shortcut for
/// \c isSameOrDerivedFrom(hasName(...)).
AST_POLYMORPHIC_MATCHER_P_OVERLOAD(
isSameOrDerivedFrom,
AST_POLYMORPHIC_SUPPORTED_TYPES(CXXRecordDecl, ObjCInterfaceDecl),
std::string, BaseName, 1) {
if (BaseName.empty())
return false;
const auto M = isSameOrDerivedFrom(hasName(BaseName));
if (const auto *RD = dyn_cast<CXXRecordDecl>(&Node))
return Matcher<CXXRecordDecl>(M).matches(*RD, Finder, Builder);
const auto *InterfaceDecl = cast<ObjCInterfaceDecl>(&Node);
return Matcher<ObjCInterfaceDecl>(M).matches(*InterfaceDecl, Finder, Builder);
}
/// Matches C++ or Objective-C classes that are directly derived from a class
/// matching \c Base.
///
/// Note that a class is not considered to be derived from itself.
///
/// Example matches Y, C (Base == hasName("X"))
/// \code
/// class X;
/// class Y : public X {}; // directly derived
/// class Z : public Y {}; // indirectly derived
/// typedef X A;
/// typedef A B;
/// class C : public B {}; // derived from a typedef of X
/// \endcode
///
/// In the following example, Bar matches isDerivedFrom(hasName("X")):
/// \code
/// class Foo;
/// typedef Foo X;
/// class Bar : public Foo {}; // derived from a type that X is a typedef of
/// \endcode
AST_POLYMORPHIC_MATCHER_P_OVERLOAD(
isDirectlyDerivedFrom,
AST_POLYMORPHIC_SUPPORTED_TYPES(CXXRecordDecl, ObjCInterfaceDecl),
internal::Matcher<NamedDecl>, Base, 0) {
// Check if the node is a C++ struct/union/class.
if (const auto *RD = dyn_cast<CXXRecordDecl>(&Node))
return Finder->classIsDerivedFrom(RD, Base, Builder, /*Directly=*/true);
// The node must be an Objective-C class.
const auto *InterfaceDecl = cast<ObjCInterfaceDecl>(&Node);
return Finder->objcClassIsDerivedFrom(InterfaceDecl, Base, Builder,
/*Directly=*/true);
}
/// Overloaded method as shortcut for \c isDirectlyDerivedFrom(hasName(...)).
AST_POLYMORPHIC_MATCHER_P_OVERLOAD(
isDirectlyDerivedFrom,
AST_POLYMORPHIC_SUPPORTED_TYPES(CXXRecordDecl, ObjCInterfaceDecl),
std::string, BaseName, 1) {
if (BaseName.empty())
return false;
const auto M = isDirectlyDerivedFrom(hasName(BaseName));
if (const auto *RD = dyn_cast<CXXRecordDecl>(&Node))
return Matcher<CXXRecordDecl>(M).matches(*RD, Finder, Builder);
const auto *InterfaceDecl = cast<ObjCInterfaceDecl>(&Node);
return Matcher<ObjCInterfaceDecl>(M).matches(*InterfaceDecl, Finder, Builder);
}
/// Matches the first method of a class or struct that satisfies \c
/// InnerMatcher.
///
/// Given:
/// \code
/// class A { void func(); };
/// class B { void member(); };
/// \endcode
///
/// \c cxxRecordDecl(hasMethod(hasName("func"))) matches the declaration of
/// \c A but not \c B.
AST_MATCHER_P(CXXRecordDecl, hasMethod, internal::Matcher<CXXMethodDecl>,
InnerMatcher) {
return matchesFirstInPointerRange(InnerMatcher, Node.method_begin(),
Node.method_end(), Finder, Builder);
}
/// Matches the generated class of lambda expressions.
///
/// Given:
/// \code
/// auto x = []{};
/// \endcode
///
/// \c cxxRecordDecl(isLambda()) matches the implicit class declaration of
/// \c decltype(x)
AST_MATCHER(CXXRecordDecl, isLambda) {
return Node.isLambda();
}
/// Matches AST nodes that have child AST nodes that match the
/// provided matcher.
///
/// Example matches X, Y
/// (matcher = cxxRecordDecl(has(cxxRecordDecl(hasName("X")))
/// \code
/// class X {}; // Matches X, because X::X is a class of name X inside X.
/// class Y { class X {}; };
/// class Z { class Y { class X {}; }; }; // Does not match Z.
/// \endcode
///
/// ChildT must be an AST base type.
///
/// Usable as: Any Matcher
/// Note that has is direct matcher, so it also matches things like implicit
/// casts and paren casts. If you are matching with expr then you should
/// probably consider using ignoringParenImpCasts like:
/// has(ignoringParenImpCasts(expr())).
extern const internal::ArgumentAdaptingMatcherFunc<internal::HasMatcher> has;
/// Matches AST nodes that have descendant AST nodes that match the
/// provided matcher.
///
/// Example matches X, Y, Z
/// (matcher = cxxRecordDecl(hasDescendant(cxxRecordDecl(hasName("X")))))
/// \code
/// class X {}; // Matches X, because X::X is a class of name X inside X.
/// class Y { class X {}; };
/// class Z { class Y { class X {}; }; };
/// \endcode
///
/// DescendantT must be an AST base type.
///
/// Usable as: Any Matcher
extern const internal::ArgumentAdaptingMatcherFunc<
internal::HasDescendantMatcher>
hasDescendant;
/// Matches AST nodes that have child AST nodes that match the
/// provided matcher.
///
/// Example matches X, Y, Y::X, Z::Y, Z::Y::X
/// (matcher = cxxRecordDecl(forEach(cxxRecordDecl(hasName("X")))
/// \code
/// class X {};
/// class Y { class X {}; }; // Matches Y, because Y::X is a class of name X
/// // inside Y.
/// class Z { class Y { class X {}; }; }; // Does not match Z.
/// \endcode
///
/// ChildT must be an AST base type.
///
/// As opposed to 'has', 'forEach' will cause a match for each result that
/// matches instead of only on the first one.
///
/// Usable as: Any Matcher
extern const internal::ArgumentAdaptingMatcherFunc<internal::ForEachMatcher>
forEach;
/// Matches AST nodes that have descendant AST nodes that match the
/// provided matcher.
///
/// Example matches X, A, A::X, B, B::C, B::C::X
/// (matcher = cxxRecordDecl(forEachDescendant(cxxRecordDecl(hasName("X")))))
/// \code
/// class X {};
/// class A { class X {}; }; // Matches A, because A::X is a class of name
/// // X inside A.
/// class B { class C { class X {}; }; };
/// \endcode
///
/// DescendantT must be an AST base type.
///
/// As opposed to 'hasDescendant', 'forEachDescendant' will cause a match for
/// each result that matches instead of only on the first one.
///
/// Note: Recursively combined ForEachDescendant can cause many matches:
/// cxxRecordDecl(forEachDescendant(cxxRecordDecl(
/// forEachDescendant(cxxRecordDecl())
/// )))
/// will match 10 times (plus injected class name matches) on:
/// \code
/// class A { class B { class C { class D { class E {}; }; }; }; };
/// \endcode
///
/// Usable as: Any Matcher
extern const internal::ArgumentAdaptingMatcherFunc<
internal::ForEachDescendantMatcher>
forEachDescendant;
/// Matches if the node or any descendant matches.
///
/// Generates results for each match.
///
/// For example, in:
/// \code
/// class A { class B {}; class C {}; };
/// \endcode
/// The matcher:
/// \code
/// cxxRecordDecl(hasName("::A"),
/// findAll(cxxRecordDecl(isDefinition()).bind("m")))
/// \endcode
/// will generate results for \c A, \c B and \c C.
///
/// Usable as: Any Matcher
template <typename T>
internal::Matcher<T> findAll(const internal::Matcher<T> &Matcher) {
return eachOf(Matcher, forEachDescendant(Matcher));
}
/// Matches AST nodes that have a parent that matches the provided
/// matcher.
///
/// Given
/// \code
/// void f() { for (;;) { int x = 42; if (true) { int x = 43; } } }
/// \endcode
/// \c compoundStmt(hasParent(ifStmt())) matches "{ int x = 43; }".
///
/// Usable as: Any Matcher
extern const internal::ArgumentAdaptingMatcherFunc<
internal::HasParentMatcher,
internal::TypeList<Decl, NestedNameSpecifierLoc, Stmt, TypeLoc>,
internal::TypeList<Decl, NestedNameSpecifierLoc, Stmt, TypeLoc>>
hasParent;
/// Matches AST nodes that have an ancestor that matches the provided
/// matcher.
///
/// Given
/// \code
/// void f() { if (true) { int x = 42; } }
/// void g() { for (;;) { int x = 43; } }
/// \endcode
/// \c expr(integerLiteral(hasAncestor(ifStmt()))) matches \c 42, but not 43.
///
/// Usable as: Any Matcher
extern const internal::ArgumentAdaptingMatcherFunc<
internal::HasAncestorMatcher,
internal::TypeList<Decl, NestedNameSpecifierLoc, Stmt, TypeLoc>,
internal::TypeList<Decl, NestedNameSpecifierLoc, Stmt, TypeLoc>>
hasAncestor;
/// Matches if the provided matcher does not match.
///
/// Example matches Y (matcher = cxxRecordDecl(unless(hasName("X"))))
/// \code
/// class X {};
/// class Y {};
/// \endcode
///
/// Usable as: Any Matcher
extern const internal::VariadicOperatorMatcherFunc<1, 1> unless;
/// Matches a node if the declaration associated with that node
/// matches the given matcher.
///
/// The associated declaration is:
/// - for type nodes, the declaration of the underlying type
/// - for CallExpr, the declaration of the callee
/// - for MemberExpr, the declaration of the referenced member
/// - for CXXConstructExpr, the declaration of the constructor
/// - for CXXNewExpr, the declaration of the operator new
/// - for ObjCIvarExpr, the declaration of the ivar
///
/// For type nodes, hasDeclaration will generally match the declaration of the
/// sugared type. Given
/// \code
/// class X {};
/// typedef X Y;
/// Y y;
/// \endcode
/// in varDecl(hasType(hasDeclaration(decl()))) the decl will match the
/// typedefDecl. A common use case is to match the underlying, desugared type.
/// This can be achieved by using the hasUnqualifiedDesugaredType matcher:
/// \code
/// varDecl(hasType(hasUnqualifiedDesugaredType(
/// recordType(hasDeclaration(decl())))))
/// \endcode
/// In this matcher, the decl will match the CXXRecordDecl of class X.
///
/// Usable as: Matcher<AddrLabelExpr>, Matcher<CallExpr>,
/// Matcher<CXXConstructExpr>, Matcher<CXXNewExpr>, Matcher<DeclRefExpr>,
/// Matcher<EnumType>, Matcher<InjectedClassNameType>, Matcher<LabelStmt>,
/// Matcher<MemberExpr>, Matcher<QualType>, Matcher<RecordType>,
/// Matcher<TagType>, Matcher<TemplateSpecializationType>,
/// Matcher<TemplateTypeParmType>, Matcher<TypedefType>,
/// Matcher<UnresolvedUsingType>
inline internal::PolymorphicMatcherWithParam1<
internal::HasDeclarationMatcher, internal::Matcher<Decl>,
void(internal::HasDeclarationSupportedTypes)>
hasDeclaration(const internal::Matcher<Decl> &InnerMatcher) {
return internal::PolymorphicMatcherWithParam1<
internal::HasDeclarationMatcher, internal::Matcher<Decl>,
void(internal::HasDeclarationSupportedTypes)>(InnerMatcher);
}
/// Matches a \c NamedDecl whose underlying declaration matches the given
/// matcher.
///
/// Given
/// \code
/// namespace N { template<class T> void f(T t); }
/// template <class T> void g() { using N::f; f(T()); }
/// \endcode
/// \c unresolvedLookupExpr(hasAnyDeclaration(
/// namedDecl(hasUnderlyingDecl(hasName("::N::f")))))
/// matches the use of \c f in \c g() .
AST_MATCHER_P(NamedDecl, hasUnderlyingDecl, internal::Matcher<NamedDecl>,
InnerMatcher) {
const NamedDecl *UnderlyingDecl = Node.getUnderlyingDecl();
return UnderlyingDecl != nullptr &&
InnerMatcher.matches(*UnderlyingDecl, Finder, Builder);
}
/// Matches on the implicit object argument of a member call expression, after
/// stripping off any parentheses or implicit casts.
///
/// Given
/// \code
/// class Y { public: void m(); };
/// Y g();
/// class X : public Y {};
/// void z(Y y, X x) { y.m(); (g()).m(); x.m(); }
/// \endcode
/// cxxMemberCallExpr(on(hasType(cxxRecordDecl(hasName("Y")))))
/// matches `y.m()` and `(g()).m()`.
/// cxxMemberCallExpr(on(hasType(cxxRecordDecl(hasName("X")))))
/// matches `x.m()`.
/// cxxMemberCallExpr(on(callExpr()))
/// matches `(g()).m()`.
///
/// FIXME: Overload to allow directly matching types?
AST_MATCHER_P(CXXMemberCallExpr, on, internal::Matcher<Expr>,
InnerMatcher) {
const Expr *ExprNode = Node.getImplicitObjectArgument()
->IgnoreParenImpCasts();
return (ExprNode != nullptr &&
InnerMatcher.matches(*ExprNode, Finder, Builder));
}
/// Matches on the receiver of an ObjectiveC Message expression.
///
/// Example
/// matcher = objCMessageExpr(hasReceiverType(asString("UIWebView *")));
/// matches the [webView ...] message invocation.
/// \code
/// NSString *webViewJavaScript = ...
/// UIWebView *webView = ...
/// [webView stringByEvaluatingJavaScriptFromString:webViewJavascript];
/// \endcode
AST_MATCHER_P(ObjCMessageExpr, hasReceiverType, internal::Matcher<QualType>,
InnerMatcher) {
const QualType TypeDecl = Node.getReceiverType();
return InnerMatcher.matches(TypeDecl, Finder, Builder);
}
/// Returns true when the Objective-C method declaration is a class method.
///
/// Example
/// matcher = objcMethodDecl(isClassMethod())
/// matches
/// \code
/// @interface I + (void)foo; @end
/// \endcode
/// but not
/// \code
/// @interface I - (void)bar; @end
/// \endcode
AST_MATCHER(ObjCMethodDecl, isClassMethod) {
return Node.isClassMethod();
}
/// Returns true when the Objective-C method declaration is an instance method.
///
/// Example
/// matcher = objcMethodDecl(isInstanceMethod())
/// matches
/// \code
/// @interface I - (void)bar; @end
/// \endcode
/// but not
/// \code
/// @interface I + (void)foo; @end
/// \endcode
AST_MATCHER(ObjCMethodDecl, isInstanceMethod) {
return Node.isInstanceMethod();
}
/// Returns true when the Objective-C message is sent to a class.
///
/// Example
/// matcher = objcMessageExpr(isClassMessage())
/// matches
/// \code
/// [NSString stringWithFormat:@"format"];
/// \endcode
/// but not
/// \code
/// NSString *x = @"hello";
/// [x containsString:@"h"];
/// \endcode
AST_MATCHER(ObjCMessageExpr, isClassMessage) {
return Node.isClassMessage();
}
/// Returns true when the Objective-C message is sent to an instance.
///
/// Example
/// matcher = objcMessageExpr(isInstanceMessage())
/// matches
/// \code
/// NSString *x = @"hello";
/// [x containsString:@"h"];
/// \endcode
/// but not
/// \code
/// [NSString stringWithFormat:@"format"];
/// \endcode
AST_MATCHER(ObjCMessageExpr, isInstanceMessage) {
return Node.isInstanceMessage();
}
/// Matches if the Objective-C message is sent to an instance,
/// and the inner matcher matches on that instance.
///
/// For example the method call in
/// \code
/// NSString *x = @"hello";
/// [x containsString:@"h"];
/// \endcode
/// is matched by
/// objcMessageExpr(hasReceiver(declRefExpr(to(varDecl(hasName("x"))))))
AST_MATCHER_P(ObjCMessageExpr, hasReceiver, internal::Matcher<Expr>,
InnerMatcher) {
const Expr *ReceiverNode = Node.getInstanceReceiver();
return (ReceiverNode != nullptr &&
InnerMatcher.matches(*ReceiverNode->IgnoreParenImpCasts(), Finder,
Builder));
}
/// Matches when BaseName == Selector.getAsString()
///
/// matcher = objCMessageExpr(hasSelector("loadHTMLString:baseURL:"));
/// matches the outer message expr in the code below, but NOT the message
/// invocation for self.bodyView.
/// \code
/// [self.bodyView loadHTMLString:html baseURL:NULL];
/// \endcode
AST_MATCHER_P(ObjCMessageExpr, hasSelector, std::string, BaseName) {
Selector Sel = Node.getSelector();
return BaseName.compare(Sel.getAsString()) == 0;
}
/// Matches when at least one of the supplied string equals to the
/// Selector.getAsString()
///
/// matcher = objCMessageExpr(hasSelector("methodA:", "methodB:"));
/// matches both of the expressions below:
/// \code
/// [myObj methodA:argA];
/// [myObj methodB:argB];
/// \endcode
extern const internal::VariadicFunction<internal::Matcher<ObjCMessageExpr>,
StringRef,
internal::hasAnySelectorFunc>
hasAnySelector;
/// Matches ObjC selectors whose name contains
/// a substring matched by the given RegExp.
/// matcher = objCMessageExpr(matchesSelector("loadHTMLString\:baseURL?"));
/// matches the outer message expr in the code below, but NOT the message
/// invocation for self.bodyView.
/// \code
/// [self.bodyView loadHTMLString:html baseURL:NULL];
/// \endcode
AST_MATCHER_P(ObjCMessageExpr, matchesSelector, std::string, RegExp) {
assert(!RegExp.empty());
std::string SelectorString = Node.getSelector().getAsString();
llvm::Regex RE(RegExp);
return RE.match(SelectorString);
}
/// Matches when the selector is the empty selector
///
/// Matches only when the selector of the objCMessageExpr is NULL. This may
/// represent an error condition in the tree!
AST_MATCHER(ObjCMessageExpr, hasNullSelector) {
return Node.getSelector().isNull();
}
/// Matches when the selector is a Unary Selector
///
/// matcher = objCMessageExpr(matchesSelector(hasUnarySelector());
/// matches self.bodyView in the code below, but NOT the outer message
/// invocation of "loadHTMLString:baseURL:".
/// \code
/// [self.bodyView loadHTMLString:html baseURL:NULL];
/// \endcode
AST_MATCHER(ObjCMessageExpr, hasUnarySelector) {
return Node.getSelector().isUnarySelector();
}
/// Matches when the selector is a keyword selector
///
/// objCMessageExpr(hasKeywordSelector()) matches the generated setFrame
/// message expression in
///
/// \code
/// UIWebView *webView = ...;
/// CGRect bodyFrame = webView.frame;
/// bodyFrame.size.height = self.bodyContentHeight;
/// webView.frame = bodyFrame;
/// // ^---- matches here
/// \endcode
AST_MATCHER(ObjCMessageExpr, hasKeywordSelector) {
return Node.getSelector().isKeywordSelector();
}
/// Matches when the selector has the specified number of arguments
///
/// matcher = objCMessageExpr(numSelectorArgs(0));
/// matches self.bodyView in the code below
///
/// matcher = objCMessageExpr(numSelectorArgs(2));
/// matches the invocation of "loadHTMLString:baseURL:" but not that
/// of self.bodyView
/// \code
/// [self.bodyView loadHTMLString:html baseURL:NULL];
/// \endcode
AST_MATCHER_P(ObjCMessageExpr, numSelectorArgs, unsigned, N) {
return Node.getSelector().getNumArgs() == N;
}
/// Matches if the call expression's callee expression matches.
///
/// Given
/// \code
/// class Y { void x() { this->x(); x(); Y y; y.x(); } };
/// void f() { f(); }
/// \endcode
/// callExpr(callee(expr()))
/// matches this->x(), x(), y.x(), f()
/// with callee(...)
/// matching this->x, x, y.x, f respectively
///
/// Note: Callee cannot take the more general internal::Matcher<Expr>
/// because this introduces ambiguous overloads with calls to Callee taking a
/// internal::Matcher<Decl>, as the matcher hierarchy is purely
/// implemented in terms of implicit casts.
AST_MATCHER_P(CallExpr, callee, internal::Matcher<Stmt>,
InnerMatcher) {
const Expr *ExprNode = Node.getCallee();
return (ExprNode != nullptr &&
InnerMatcher.matches(*ExprNode, Finder, Builder));
}
/// Matches if the call expression's callee's declaration matches the
/// given matcher.
///
/// Example matches y.x() (matcher = callExpr(callee(
/// cxxMethodDecl(hasName("x")))))
/// \code
/// class Y { public: void x(); };
/// void z() { Y y; y.x(); }
/// \endcode
AST_MATCHER_P_OVERLOAD(CallExpr, callee, internal::Matcher<Decl>, InnerMatcher,
1) {
return callExpr(hasDeclaration(InnerMatcher)).matches(Node, Finder, Builder);
}
/// Matches if the expression's or declaration's type matches a type
/// matcher.
///
/// Example matches x (matcher = expr(hasType(cxxRecordDecl(hasName("X")))))
/// and z (matcher = varDecl(hasType(cxxRecordDecl(hasName("X")))))
/// and U (matcher = typedefDecl(hasType(asString("int")))
/// and friend class X (matcher = friendDecl(hasType("X"))
/// \code
/// class X {};
/// void y(X &x) { x; X z; }
/// typedef int U;
/// class Y { friend class X; };
/// \endcode
AST_POLYMORPHIC_MATCHER_P_OVERLOAD(
hasType,
AST_POLYMORPHIC_SUPPORTED_TYPES(Expr, FriendDecl, TypedefNameDecl,
ValueDecl),
internal::Matcher<QualType>, InnerMatcher, 0) {
QualType QT = internal::getUnderlyingType(Node);
if (!QT.isNull())
return InnerMatcher.matches(QT, Finder, Builder);
return false;
}
/// Overloaded to match the declaration of the expression's or value
/// declaration's type.
///
/// In case of a value declaration (for example a variable declaration),
/// this resolves one layer of indirection. For example, in the value
/// declaration "X x;", cxxRecordDecl(hasName("X")) matches the declaration of
/// X, while varDecl(hasType(cxxRecordDecl(hasName("X")))) matches the
/// declaration of x.
///
/// Example matches x (matcher = expr(hasType(cxxRecordDecl(hasName("X")))))
/// and z (matcher = varDecl(hasType(cxxRecordDecl(hasName("X")))))
/// and friend class X (matcher = friendDecl(hasType("X"))
/// \code
/// class X {};
/// void y(X &x) { x; X z; }
/// class Y { friend class X; };
/// \endcode
///
/// Usable as: Matcher<Expr>, Matcher<ValueDecl>
AST_POLYMORPHIC_MATCHER_P_OVERLOAD(
hasType, AST_POLYMORPHIC_SUPPORTED_TYPES(Expr, FriendDecl, ValueDecl),
internal::Matcher<Decl>, InnerMatcher, 1) {
QualType QT = internal::getUnderlyingType(Node);
if (!QT.isNull())
return qualType(hasDeclaration(InnerMatcher)).matches(QT, Finder, Builder);
return false;
}
/// Matches if the type location of the declarator decl's type matches
/// the inner matcher.
///
/// Given
/// \code
/// int x;
/// \endcode
/// declaratorDecl(hasTypeLoc(loc(asString("int"))))
/// matches int x
AST_MATCHER_P(DeclaratorDecl, hasTypeLoc, internal::Matcher<TypeLoc>, Inner) {
if (!Node.getTypeSourceInfo())
// This happens for example for implicit destructors.
return false;
return Inner.matches(Node.getTypeSourceInfo()->getTypeLoc(), Finder, Builder);
}
/// Matches if the matched type is represented by the given string.
///
/// Given
/// \code
/// class Y { public: void x(); };
/// void z() { Y* y; y->x(); }
/// \endcode
/// cxxMemberCallExpr(on(hasType(asString("class Y *"))))
/// matches y->x()
AST_MATCHER_P(QualType, asString, std::string, Name) {
return Name == Node.getAsString();
}
/// Matches if the matched type is a pointer type and the pointee type
/// matches the specified matcher.
///
/// Example matches y->x()
/// (matcher = cxxMemberCallExpr(on(hasType(pointsTo
/// cxxRecordDecl(hasName("Y")))))))
/// \code
/// class Y { public: void x(); };
/// void z() { Y *y; y->x(); }
/// \endcode
AST_MATCHER_P(
QualType, pointsTo, internal::Matcher<QualType>,
InnerMatcher) {
return (!Node.isNull() && Node->isAnyPointerType() &&
InnerMatcher.matches(Node->getPointeeType(), Finder, Builder));
}
/// Overloaded to match the pointee type's declaration.
AST_MATCHER_P_OVERLOAD(QualType, pointsTo, internal::Matcher<Decl>,
InnerMatcher, 1) {
return pointsTo(qualType(hasDeclaration(InnerMatcher)))
.matches(Node, Finder, Builder);
}
/// Matches if the matched type matches the unqualified desugared
/// type of the matched node.
///
/// For example, in:
/// \code
/// class A {};
/// using B = A;
/// \endcode
/// The matcher type(hasUnqualifiedDesugaredType(recordType())) matches
/// both B and A.
AST_MATCHER_P(Type, hasUnqualifiedDesugaredType, internal::Matcher<Type>,
InnerMatcher) {
return InnerMatcher.matches(*Node.getUnqualifiedDesugaredType(), Finder,
Builder);
}
/// Matches if the matched type is a reference type and the referenced
/// type matches the specified matcher.
///
/// Example matches X &x and const X &y
/// (matcher = varDecl(hasType(references(cxxRecordDecl(hasName("X"))))))
/// \code
/// class X {
/// void a(X b) {
/// X &x = b;
/// const X &y = b;
/// }
/// };
/// \endcode
AST_MATCHER_P(QualType, references, internal::Matcher<QualType>,
InnerMatcher) {
return (!Node.isNull() && Node->isReferenceType() &&
InnerMatcher.matches(Node->getPointeeType(), Finder, Builder));
}
/// Matches QualTypes whose canonical type matches InnerMatcher.
///
/// Given:
/// \code
/// typedef int &int_ref;
/// int a;
/// int_ref b = a;
/// \endcode
///
/// \c varDecl(hasType(qualType(referenceType()))))) will not match the
/// declaration of b but \c
/// varDecl(hasType(qualType(hasCanonicalType(referenceType())))))) does.
AST_MATCHER_P(QualType, hasCanonicalType, internal::Matcher<QualType>,
InnerMatcher) {
if (Node.isNull())
return false;
return InnerMatcher.matches(Node.getCanonicalType(), Finder, Builder);
}
/// Overloaded to match the referenced type's declaration.
AST_MATCHER_P_OVERLOAD(QualType, references, internal::Matcher<Decl>,
InnerMatcher, 1) {
return references(qualType(hasDeclaration(InnerMatcher)))
.matches(Node, Finder, Builder);
}
/// Matches on the implicit object argument of a member call expression. Unlike
/// `on`, matches the argument directly without stripping away anything.
///
/// Given
/// \code
/// class Y { public: void m(); };
/// Y g();
/// class X : public Y { void g(); };
/// void z(Y y, X x) { y.m(); x.m(); x.g(); (g()).m(); }
/// \endcode
/// cxxMemberCallExpr(onImplicitObjectArgument(hasType(
/// cxxRecordDecl(hasName("Y")))))
/// matches `y.m()`, `x.m()` and (g()).m(), but not `x.g()`.
/// cxxMemberCallExpr(on(callExpr()))
/// does not match `(g()).m()`, because the parens are not ignored.
///
/// FIXME: Overload to allow directly matching types?
AST_MATCHER_P(CXXMemberCallExpr, onImplicitObjectArgument,
internal::Matcher<Expr>, InnerMatcher) {
const Expr *ExprNode = Node.getImplicitObjectArgument();
return (ExprNode != nullptr &&
InnerMatcher.matches(*ExprNode, Finder, Builder));
}
/// Matches if the type of the expression's implicit object argument either
/// matches the InnerMatcher, or is a pointer to a type that matches the
/// InnerMatcher.
///
/// Given
/// \code
/// class Y { public: void m(); };
/// class X : public Y { void g(); };
/// void z() { Y y; y.m(); Y *p; p->m(); X x; x.m(); x.g(); }
/// \endcode
/// cxxMemberCallExpr(thisPointerType(hasDeclaration(
/// cxxRecordDecl(hasName("Y")))))
/// matches `y.m()`, `p->m()` and `x.m()`.
/// cxxMemberCallExpr(thisPointerType(hasDeclaration(
/// cxxRecordDecl(hasName("X")))))
/// matches `x.g()`.
AST_MATCHER_P_OVERLOAD(CXXMemberCallExpr, thisPointerType,
internal::Matcher<QualType>, InnerMatcher, 0) {
return onImplicitObjectArgument(
anyOf(hasType(InnerMatcher), hasType(pointsTo(InnerMatcher))))
.matches(Node, Finder, Builder);
}
/// Overloaded to match the type's declaration.
AST_MATCHER_P_OVERLOAD(CXXMemberCallExpr, thisPointerType,
internal::Matcher<Decl>, InnerMatcher, 1) {
return onImplicitObjectArgument(
anyOf(hasType(InnerMatcher), hasType(pointsTo(InnerMatcher))))
.matches(Node, Finder, Builder);
}
/// Matches a DeclRefExpr that refers to a declaration that matches the
/// specified matcher.
///
/// Example matches x in if(x)
/// (matcher = declRefExpr(to(varDecl(hasName("x")))))
/// \code
/// bool x;
/// if (x) {}
/// \endcode
AST_MATCHER_P(DeclRefExpr, to, internal::Matcher<Decl>,
InnerMatcher) {
const Decl *DeclNode = Node.getDecl();
return (DeclNode != nullptr &&
InnerMatcher.matches(*DeclNode, Finder, Builder));
}
/// Matches a \c DeclRefExpr that refers to a declaration through a
/// specific using shadow declaration.
///
/// Given
/// \code
/// namespace a { void f() {} }
/// using a::f;
/// void g() {
/// f(); // Matches this ..
/// a::f(); // .. but not this.
/// }
/// \endcode
/// declRefExpr(throughUsingDecl(anything()))
/// matches \c f()
AST_MATCHER_P(DeclRefExpr, throughUsingDecl,
internal::Matcher<UsingShadowDecl>, InnerMatcher) {
const NamedDecl *FoundDecl = Node.getFoundDecl();
if (const UsingShadowDecl *UsingDecl = dyn_cast<UsingShadowDecl>(FoundDecl))
return InnerMatcher.matches(*UsingDecl, Finder, Builder);
return false;
}
/// Matches an \c OverloadExpr if any of the declarations in the set of
/// overloads matches the given matcher.
///
/// Given
/// \code
/// template <typename T> void foo(T);
/// template <typename T> void bar(T);
/// template <typename T> void baz(T t) {
/// foo(t);
/// bar(t);
/// }
/// \endcode
/// unresolvedLookupExpr(hasAnyDeclaration(
/// functionTemplateDecl(hasName("foo"))))
/// matches \c foo in \c foo(t); but not \c bar in \c bar(t);
AST_MATCHER_P(OverloadExpr, hasAnyDeclaration, internal::Matcher<Decl>,
InnerMatcher) {
return matchesFirstInPointerRange(InnerMatcher, Node.decls_begin(),
Node.decls_end(), Finder, Builder);
}
/// Matches the Decl of a DeclStmt which has a single declaration.
///
/// Given
/// \code
/// int a, b;
/// int c;
/// \endcode
/// declStmt(hasSingleDecl(anything()))
/// matches 'int c;' but not 'int a, b;'.
AST_MATCHER_P(DeclStmt, hasSingleDecl, internal::Matcher<Decl>, InnerMatcher) {
if (Node.isSingleDecl()) {
const Decl *FoundDecl = Node.getSingleDecl();
return InnerMatcher.matches(*FoundDecl, Finder, Builder);
}
return false;
}
/// Matches a variable declaration that has an initializer expression
/// that matches the given matcher.
///
/// Example matches x (matcher = varDecl(hasInitializer(callExpr())))
/// \code
/// bool y() { return true; }
/// bool x = y();
/// \endcode
AST_MATCHER_P(
VarDecl, hasInitializer, internal::Matcher<Expr>,
InnerMatcher) {
const Expr *Initializer = Node.getAnyInitializer();
return (Initializer != nullptr &&
InnerMatcher.matches(*Initializer, Finder, Builder));
}
/// \brief Matches a static variable with local scope.
///
/// Example matches y (matcher = varDecl(isStaticLocal()))
/// \code
/// void f() {
/// int x;
/// static int y;
/// }
/// static int z;
/// \endcode
AST_MATCHER(VarDecl, isStaticLocal) {
return Node.isStaticLocal();
}
/// Matches a variable declaration that has function scope and is a
/// non-static local variable.
///
/// Example matches x (matcher = varDecl(hasLocalStorage())
/// \code
/// void f() {
/// int x;
/// static int y;
/// }
/// int z;
/// \endcode
AST_MATCHER(VarDecl, hasLocalStorage) {
return Node.hasLocalStorage();
}
/// Matches a variable declaration that does not have local storage.
///
/// Example matches y and z (matcher = varDecl(hasGlobalStorage())
/// \code
/// void f() {
/// int x;
/// static int y;
/// }
/// int z;
/// \endcode
AST_MATCHER(VarDecl, hasGlobalStorage) {
return Node.hasGlobalStorage();
}
/// Matches a variable declaration that has automatic storage duration.
///
/// Example matches x, but not y, z, or a.
/// (matcher = varDecl(hasAutomaticStorageDuration())
/// \code
/// void f() {
/// int x;
/// static int y;
/// thread_local int z;
/// }
/// int a;
/// \endcode
AST_MATCHER(VarDecl, hasAutomaticStorageDuration) {
return Node.getStorageDuration() == SD_Automatic;
}
/// Matches a variable declaration that has static storage duration.
/// It includes the variable declared at namespace scope and those declared
/// with "static" and "extern" storage class specifiers.
///
/// \code
/// void f() {
/// int x;
/// static int y;
/// thread_local int z;
/// }
/// int a;
/// static int b;
/// extern int c;
/// varDecl(hasStaticStorageDuration())
/// matches the function declaration y, a, b and c.
/// \endcode
AST_MATCHER(VarDecl, hasStaticStorageDuration) {
return Node.getStorageDuration() == SD_Static;
}
/// Matches a variable declaration that has thread storage duration.
///
/// Example matches z, but not x, z, or a.
/// (matcher = varDecl(hasThreadStorageDuration())
/// \code
/// void f() {
/// int x;
/// static int y;
/// thread_local int z;
/// }
/// int a;
/// \endcode
AST_MATCHER(VarDecl, hasThreadStorageDuration) {
return Node.getStorageDuration() == SD_Thread;
}
/// Matches a variable declaration that is an exception variable from
/// a C++ catch block, or an Objective-C \@catch statement.
///
/// Example matches x (matcher = varDecl(isExceptionVariable())
/// \code
/// void f(int y) {
/// try {
/// } catch (int x) {
/// }
/// }
/// \endcode
AST_MATCHER(VarDecl, isExceptionVariable) {
return Node.isExceptionVariable();
}
/// Checks that a call expression or a constructor call expression has
/// a specific number of arguments (including absent default arguments).
///
/// Example matches f(0, 0) (matcher = callExpr(argumentCountIs(2)))
/// \code
/// void f(int x, int y);
/// f(0, 0);
/// \endcode
AST_POLYMORPHIC_MATCHER_P(argumentCountIs,
AST_POLYMORPHIC_SUPPORTED_TYPES(CallExpr,
CXXConstructExpr,
ObjCMessageExpr),
unsigned, N) {
return Node.getNumArgs() == N;
}
/// Matches the n'th argument of a call expression or a constructor
/// call expression.
///
/// Example matches y in x(y)
/// (matcher = callExpr(hasArgument(0, declRefExpr())))
/// \code
/// void x(int) { int y; x(y); }
/// \endcode
AST_POLYMORPHIC_MATCHER_P2(hasArgument,
AST_POLYMORPHIC_SUPPORTED_TYPES(CallExpr,
CXXConstructExpr,
ObjCMessageExpr),
unsigned, N, internal::Matcher<Expr>, InnerMatcher) {
return (N < Node.getNumArgs() &&
InnerMatcher.matches(
*Node.getArg(N)->IgnoreParenImpCasts(), Finder, Builder));
}
/// Matches the n'th item of an initializer list expression.
///
/// Example matches y.
/// (matcher = initListExpr(hasInit(0, expr())))
/// \code
/// int x{y}.
/// \endcode
AST_MATCHER_P2(InitListExpr, hasInit, unsigned, N,
ast_matchers::internal::Matcher<Expr>, InnerMatcher) {
return N < Node.getNumInits() &&
InnerMatcher.matches(*Node.getInit(N), Finder, Builder);
}
/// Matches declaration statements that contain a specific number of
/// declarations.
///
/// Example: Given
/// \code
/// int a, b;
/// int c;
/// int d = 2, e;
/// \endcode
/// declCountIs(2)
/// matches 'int a, b;' and 'int d = 2, e;', but not 'int c;'.
AST_MATCHER_P(DeclStmt, declCountIs, unsigned, N) {
return std::distance(Node.decl_begin(), Node.decl_end()) == (ptrdiff_t)N;
}
/// Matches the n'th declaration of a declaration statement.
///
/// Note that this does not work for global declarations because the AST
/// breaks up multiple-declaration DeclStmt's into multiple single-declaration
/// DeclStmt's.
/// Example: Given non-global declarations
/// \code
/// int a, b = 0;
/// int c;
/// int d = 2, e;
/// \endcode
/// declStmt(containsDeclaration(
/// 0, varDecl(hasInitializer(anything()))))
/// matches only 'int d = 2, e;', and
/// declStmt(containsDeclaration(1, varDecl()))
/// \code
/// matches 'int a, b = 0' as well as 'int d = 2, e;'
/// but 'int c;' is not matched.
/// \endcode
AST_MATCHER_P2(DeclStmt, containsDeclaration, unsigned, N,
internal::Matcher<Decl>, InnerMatcher) {
const unsigned NumDecls = std::distance(Node.decl_begin(), Node.decl_end());
if (N >= NumDecls)
return false;
DeclStmt::const_decl_iterator Iterator = Node.decl_begin();
std::advance(Iterator, N);
return InnerMatcher.matches(**Iterator, Finder, Builder);
}
/// Matches a C++ catch statement that has a catch-all handler.
///
/// Given
/// \code
/// try {
/// // ...
/// } catch (int) {
/// // ...
/// } catch (...) {
/// // ...
/// }
/// \endcode
/// cxxCatchStmt(isCatchAll()) matches catch(...) but not catch(int).
AST_MATCHER(CXXCatchStmt, isCatchAll) {
return Node.getExceptionDecl() == nullptr;
}
/// Matches a constructor initializer.
///
/// Given
/// \code
/// struct Foo {
/// Foo() : foo_(1) { }
/// int foo_;
/// };
/// \endcode
/// cxxRecordDecl(has(cxxConstructorDecl(
/// hasAnyConstructorInitializer(anything())
/// )))
/// record matches Foo, hasAnyConstructorInitializer matches foo_(1)
AST_MATCHER_P(CXXConstructorDecl, hasAnyConstructorInitializer,
internal::Matcher<CXXCtorInitializer>, InnerMatcher) {
return matchesFirstInPointerRange(InnerMatcher, Node.init_begin(),
Node.init_end(), Finder, Builder);
}
/// Matches the field declaration of a constructor initializer.
///
/// Given
/// \code
/// struct Foo {
/// Foo() : foo_(1) { }
/// int foo_;
/// };
/// \endcode
/// cxxRecordDecl(has(cxxConstructorDecl(hasAnyConstructorInitializer(
/// forField(hasName("foo_"))))))
/// matches Foo
/// with forField matching foo_
AST_MATCHER_P(CXXCtorInitializer, forField,
internal::Matcher<FieldDecl>, InnerMatcher) {
const FieldDecl *NodeAsDecl = Node.getAnyMember();
return (NodeAsDecl != nullptr &&
InnerMatcher.matches(*NodeAsDecl, Finder, Builder));
}
/// Matches the initializer expression of a constructor initializer.
///
/// Given
/// \code
/// struct Foo {
/// Foo() : foo_(1) { }
/// int foo_;
/// };
/// \endcode
/// cxxRecordDecl(has(cxxConstructorDecl(hasAnyConstructorInitializer(
/// withInitializer(integerLiteral(equals(1)))))))
/// matches Foo
/// with withInitializer matching (1)
AST_MATCHER_P(CXXCtorInitializer, withInitializer,
internal::Matcher<Expr>, InnerMatcher) {
const Expr* NodeAsExpr = Node.getInit();
return (NodeAsExpr != nullptr &&
InnerMatcher.matches(*NodeAsExpr, Finder, Builder));
}
/// Matches a constructor initializer if it is explicitly written in
/// code (as opposed to implicitly added by the compiler).
///
/// Given
/// \code
/// struct Foo {
/// Foo() { }
/// Foo(int) : foo_("A") { }
/// string foo_;
/// };
/// \endcode
/// cxxConstructorDecl(hasAnyConstructorInitializer(isWritten()))
/// will match Foo(int), but not Foo()
AST_MATCHER(CXXCtorInitializer, isWritten) {
return Node.isWritten();
}
/// Matches a constructor initializer if it is initializing a base, as
/// opposed to a member.
///
/// Given
/// \code
/// struct B {};
/// struct D : B {
/// int I;
/// D(int i) : I(i) {}
/// };
/// struct E : B {
/// E() : B() {}
/// };
/// \endcode
/// cxxConstructorDecl(hasAnyConstructorInitializer(isBaseInitializer()))
/// will match E(), but not match D(int).
AST_MATCHER(CXXCtorInitializer, isBaseInitializer) {
return Node.isBaseInitializer();
}
/// Matches a constructor initializer if it is initializing a member, as
/// opposed to a base.
///
/// Given
/// \code
/// struct B {};
/// struct D : B {
/// int I;
/// D(int i) : I(i) {}
/// };
/// struct E : B {
/// E() : B() {}
/// };
/// \endcode
/// cxxConstructorDecl(hasAnyConstructorInitializer(isMemberInitializer()))
/// will match D(int), but not match E().
AST_MATCHER(CXXCtorInitializer, isMemberInitializer) {
return Node.isMemberInitializer();
}
/// Matches any argument of a call expression or a constructor call
/// expression, or an ObjC-message-send expression.
///
/// Given
/// \code
/// void x(int, int, int) { int y; x(1, y, 42); }
/// \endcode
/// callExpr(hasAnyArgument(declRefExpr()))
/// matches x(1, y, 42)
/// with hasAnyArgument(...)
/// matching y
///
/// For ObjectiveC, given
/// \code
/// @interface I - (void) f:(int) y; @end
/// void foo(I *i) { [i f:12]; }
/// \endcode
/// objcMessageExpr(hasAnyArgument(integerLiteral(equals(12))))
/// matches [i f:12]
AST_POLYMORPHIC_MATCHER_P(hasAnyArgument,
AST_POLYMORPHIC_SUPPORTED_TYPES(
CallExpr, CXXConstructExpr,
CXXUnresolvedConstructExpr, ObjCMessageExpr),
internal::Matcher<Expr>, InnerMatcher) {
for (const Expr *Arg : Node.arguments()) {
BoundNodesTreeBuilder Result(*Builder);
if (InnerMatcher.matches(*Arg, Finder, &Result)) {
*Builder = std::move(Result);
return true;
}
}
return false;
}
/// Matches any capture of a lambda expression.
///
/// Given
/// \code
/// void foo() {
/// int x;
/// auto f = [x](){};
/// }
/// \endcode
/// lambdaExpr(hasAnyCapture(anything()))
/// matches [x](){};
AST_MATCHER_P_OVERLOAD(LambdaExpr, hasAnyCapture, internal::Matcher<VarDecl>,
InnerMatcher, 0) {
for (const LambdaCapture &Capture : Node.captures()) {
if (Capture.capturesVariable()) {
BoundNodesTreeBuilder Result(*Builder);
if (InnerMatcher.matches(*Capture.getCapturedVar(), Finder, &Result)) {
*Builder = std::move(Result);
return true;
}
}
}
return false;
}
/// Matches any capture of 'this' in a lambda expression.
///
/// Given
/// \code
/// struct foo {
/// void bar() {
/// auto f = [this](){};
/// }
/// }
/// \endcode
/// lambdaExpr(hasAnyCapture(cxxThisExpr()))
/// matches [this](){};
AST_MATCHER_P_OVERLOAD(LambdaExpr, hasAnyCapture,
internal::Matcher<CXXThisExpr>, InnerMatcher, 1) {
return llvm::any_of(Node.captures(), [](const LambdaCapture &LC) {
return LC.capturesThis();
});
}
/// Matches a constructor call expression which uses list initialization.
AST_MATCHER(CXXConstructExpr, isListInitialization) {
return Node.isListInitialization();
}
/// Matches a constructor call expression which requires
/// zero initialization.
///
/// Given
/// \code
/// void foo() {
/// struct point { double x; double y; };
/// point pt[2] = { { 1.0, 2.0 } };
/// }
/// \endcode
/// initListExpr(has(cxxConstructExpr(requiresZeroInitialization()))
/// will match the implicit array filler for pt[1].
AST_MATCHER(CXXConstructExpr, requiresZeroInitialization) {
return Node.requiresZeroInitialization();
}
/// Matches the n'th parameter of a function or an ObjC method
/// declaration or a block.
///
/// Given
/// \code
/// class X { void f(int x) {} };
/// \endcode
/// cxxMethodDecl(hasParameter(0, hasType(varDecl())))
/// matches f(int x) {}
/// with hasParameter(...)
/// matching int x
///
/// For ObjectiveC, given
/// \code
/// @interface I - (void) f:(int) y; @end
/// \endcode
//
/// the matcher objcMethodDecl(hasParameter(0, hasName("y")))
/// matches the declaration of method f with hasParameter
/// matching y.
AST_POLYMORPHIC_MATCHER_P2(hasParameter,
AST_POLYMORPHIC_SUPPORTED_TYPES(FunctionDecl,
ObjCMethodDecl,
BlockDecl),
unsigned, N, internal::Matcher<ParmVarDecl>,
InnerMatcher) {
return (N < Node.parameters().size()
&& InnerMatcher.matches(*Node.parameters()[N], Finder, Builder));
}
/// Matches all arguments and their respective ParmVarDecl.
///
/// Given
/// \code
/// void f(int i);
/// int y;
/// f(y);
/// \endcode
/// callExpr(
/// forEachArgumentWithParam(
/// declRefExpr(to(varDecl(hasName("y")))),
/// parmVarDecl(hasType(isInteger()))
/// ))
/// matches f(y);
/// with declRefExpr(...)
/// matching int y
/// and parmVarDecl(...)
/// matching int i
AST_POLYMORPHIC_MATCHER_P2(forEachArgumentWithParam,
AST_POLYMORPHIC_SUPPORTED_TYPES(CallExpr,
CXXConstructExpr),
internal::Matcher<Expr>, ArgMatcher,
internal::Matcher<ParmVarDecl>, ParamMatcher) {
BoundNodesTreeBuilder Result;
// The first argument of an overloaded member operator is the implicit object
// argument of the method which should not be matched against a parameter, so
// we skip over it here.
BoundNodesTreeBuilder Matches;
unsigned ArgIndex = cxxOperatorCallExpr(callee(cxxMethodDecl()))
.matches(Node, Finder, &Matches)
? 1
: 0;
int ParamIndex = 0;
bool Matched = false;
for (; ArgIndex < Node.getNumArgs(); ++ArgIndex) {
BoundNodesTreeBuilder ArgMatches(*Builder);
if (ArgMatcher.matches(*(Node.getArg(ArgIndex)->IgnoreParenCasts()),
Finder, &ArgMatches)) {
BoundNodesTreeBuilder ParamMatches(ArgMatches);
if (expr(anyOf(cxxConstructExpr(hasDeclaration(cxxConstructorDecl(
hasParameter(ParamIndex, ParamMatcher)))),
callExpr(callee(functionDecl(
hasParameter(ParamIndex, ParamMatcher))))))
.matches(Node, Finder, &ParamMatches)) {
Result.addMatch(ParamMatches);
Matched = true;
}
}
++ParamIndex;
}
*Builder = std::move(Result);
return Matched;
}
/// Matches any parameter of a function or an ObjC method declaration or a
/// block.
///
/// Does not match the 'this' parameter of a method.
///
/// Given
/// \code
/// class X { void f(int x, int y, int z) {} };
/// \endcode
/// cxxMethodDecl(hasAnyParameter(hasName("y")))
/// matches f(int x, int y, int z) {}
/// with hasAnyParameter(...)
/// matching int y
///
/// For ObjectiveC, given
/// \code
/// @interface I - (void) f:(int) y; @end
/// \endcode
//
/// the matcher objcMethodDecl(hasAnyParameter(hasName("y")))
/// matches the declaration of method f with hasParameter
/// matching y.
///
/// For blocks, given
/// \code
/// b = ^(int y) { printf("%d", y) };
/// \endcode
///
/// the matcher blockDecl(hasAnyParameter(hasName("y")))
/// matches the declaration of the block b with hasParameter
/// matching y.
AST_POLYMORPHIC_MATCHER_P(hasAnyParameter,
AST_POLYMORPHIC_SUPPORTED_TYPES(FunctionDecl,
ObjCMethodDecl,
BlockDecl),
internal::Matcher<ParmVarDecl>,
InnerMatcher) {
return matchesFirstInPointerRange(InnerMatcher, Node.param_begin(),
Node.param_end(), Finder, Builder);
}
/// Matches \c FunctionDecls and \c FunctionProtoTypes that have a
/// specific parameter count.
///
/// Given
/// \code
/// void f(int i) {}
/// void g(int i, int j) {}
/// void h(int i, int j);
/// void j(int i);
/// void k(int x, int y, int z, ...);
/// \endcode
/// functionDecl(parameterCountIs(2))
/// matches \c g and \c h
/// functionProtoType(parameterCountIs(2))
/// matches \c g and \c h
/// functionProtoType(parameterCountIs(3))
/// matches \c k
AST_POLYMORPHIC_MATCHER_P(parameterCountIs,
AST_POLYMORPHIC_SUPPORTED_TYPES(FunctionDecl,
FunctionProtoType),
unsigned, N) {
return Node.getNumParams() == N;
}
/// Matches \c FunctionDecls that have a noreturn attribute.
///
/// Given
/// \code
/// void nope();
/// [[noreturn]] void a();
/// __attribute__((noreturn)) void b();
/// struct c { [[noreturn]] c(); };
/// \endcode
/// functionDecl(isNoReturn())
/// matches all of those except
/// \code
/// void nope();
/// \endcode
AST_MATCHER(FunctionDecl, isNoReturn) { return Node.isNoReturn(); }
/// Matches the return type of a function declaration.
///
/// Given:
/// \code
/// class X { int f() { return 1; } };
/// \endcode
/// cxxMethodDecl(returns(asString("int")))
/// matches int f() { return 1; }
AST_MATCHER_P(FunctionDecl, returns,
internal::Matcher<QualType>, InnerMatcher) {
return InnerMatcher.matches(Node.getReturnType(), Finder, Builder);
}
/// Matches extern "C" function or variable declarations.
///
/// Given:
/// \code
/// extern "C" void f() {}
/// extern "C" { void g() {} }
/// void h() {}
/// extern "C" int x = 1;
/// extern "C" int y = 2;
/// int z = 3;
/// \endcode
/// functionDecl(isExternC())
/// matches the declaration of f and g, but not the declaration of h.
/// varDecl(isExternC())
/// matches the declaration of x and y, but not the declaration of z.
AST_POLYMORPHIC_MATCHER(isExternC, AST_POLYMORPHIC_SUPPORTED_TYPES(FunctionDecl,
VarDecl)) {
return Node.isExternC();
}
/// Matches variable/function declarations that have "static" storage
/// class specifier ("static" keyword) written in the source.
///
/// Given:
/// \code
/// static void f() {}
/// static int i = 0;
/// extern int j;
/// int k;
/// \endcode
/// functionDecl(isStaticStorageClass())
/// matches the function declaration f.
/// varDecl(isStaticStorageClass())
/// matches the variable declaration i.
AST_POLYMORPHIC_MATCHER(isStaticStorageClass,
AST_POLYMORPHIC_SUPPORTED_TYPES(FunctionDecl,
VarDecl)) {
return Node.getStorageClass() == SC_Static;
}
/// Matches deleted function declarations.
///
/// Given:
/// \code
/// void Func();
/// void DeletedFunc() = delete;
/// \endcode
/// functionDecl(isDeleted())
/// matches the declaration of DeletedFunc, but not Func.
AST_MATCHER(FunctionDecl, isDeleted) {
return Node.isDeleted();
}
/// Matches defaulted function declarations.
///
/// Given:
/// \code
/// class A { ~A(); };
/// class B { ~B() = default; };
/// \endcode
/// functionDecl(isDefaulted())
/// matches the declaration of ~B, but not ~A.
AST_MATCHER(FunctionDecl, isDefaulted) {
return Node.isDefaulted();
}
/// Matches functions that have a dynamic exception specification.
///
/// Given:
/// \code
/// void f();
/// void g() noexcept;
/// void h() noexcept(true);
/// void i() noexcept(false);
/// void j() throw();
/// void k() throw(int);
/// void l() throw(...);
/// \endcode
/// functionDecl(hasDynamicExceptionSpec()) and
/// functionProtoType(hasDynamicExceptionSpec())
/// match the declarations of j, k, and l, but not f, g, h, or i.
AST_POLYMORPHIC_MATCHER(hasDynamicExceptionSpec,
AST_POLYMORPHIC_SUPPORTED_TYPES(FunctionDecl,
FunctionProtoType)) {
if (const FunctionProtoType *FnTy = internal::getFunctionProtoType(Node))
return FnTy->hasDynamicExceptionSpec();
return false;
}
/// Matches functions that have a non-throwing exception specification.
///
/// Given:
/// \code
/// void f();
/// void g() noexcept;
/// void h() throw();
/// void i() throw(int);
/// void j() noexcept(false);
/// \endcode
/// functionDecl(isNoThrow()) and functionProtoType(isNoThrow())
/// match the declarations of g, and h, but not f, i or j.
AST_POLYMORPHIC_MATCHER(isNoThrow,
AST_POLYMORPHIC_SUPPORTED_TYPES(FunctionDecl,
FunctionProtoType)) {
const FunctionProtoType *FnTy = internal::getFunctionProtoType(Node);
// If the function does not have a prototype, then it is assumed to be a
// throwing function (as it would if the function did not have any exception
// specification).
if (!FnTy)
return false;
// Assume the best for any unresolved exception specification.
if (isUnresolvedExceptionSpec(FnTy->getExceptionSpecType()))
return true;
return FnTy->isNothrow();
}
/// Matches constexpr variable and function declarations,
/// and if constexpr.
///
/// Given:
/// \code
/// constexpr int foo = 42;
/// constexpr int bar();
/// void baz() { if constexpr(1 > 0) {} }
/// \endcode
/// varDecl(isConstexpr())
/// matches the declaration of foo.
/// functionDecl(isConstexpr())
/// matches the declaration of bar.
/// ifStmt(isConstexpr())
/// matches the if statement in baz.
AST_POLYMORPHIC_MATCHER(isConstexpr,
AST_POLYMORPHIC_SUPPORTED_TYPES(VarDecl,
FunctionDecl,
IfStmt)) {
return Node.isConstexpr();
}
/// Matches selection statements with initializer.
///
/// Given:
/// \code
/// void foo() {
/// if (int i = foobar(); i > 0) {}
/// switch (int i = foobar(); i) {}
/// for (auto& a = get_range(); auto& x : a) {}
/// }
/// void bar() {
/// if (foobar() > 0) {}
/// switch (foobar()) {}
/// for (auto& x : get_range()) {}
/// }
/// \endcode
/// ifStmt(hasInitStatement(anything()))
/// matches the if statement in foo but not in bar.
/// switchStmt(hasInitStatement(anything()))
/// matches the switch statement in foo but not in bar.
/// cxxForRangeStmt(hasInitStatement(anything()))
/// matches the range for statement in foo but not in bar.
AST_POLYMORPHIC_MATCHER_P(hasInitStatement,
AST_POLYMORPHIC_SUPPORTED_TYPES(IfStmt, SwitchStmt,
CXXForRangeStmt),
internal::Matcher<Stmt>, InnerMatcher) {
const Stmt *Init = Node.getInit();
return Init != nullptr && InnerMatcher.matches(*Init, Finder, Builder);
}
/// Matches the condition expression of an if statement, for loop,
/// switch statement or conditional operator.
///
/// Example matches true (matcher = hasCondition(cxxBoolLiteral(equals(true))))
/// \code
/// if (true) {}
/// \endcode
AST_POLYMORPHIC_MATCHER_P(
hasCondition,
AST_POLYMORPHIC_SUPPORTED_TYPES(IfStmt, ForStmt, WhileStmt, DoStmt,
SwitchStmt, AbstractConditionalOperator),
internal::Matcher<Expr>, InnerMatcher) {
const Expr *const Condition = Node.getCond();
return (Condition != nullptr &&
InnerMatcher.matches(*Condition, Finder, Builder));
}
/// Matches the then-statement of an if statement.
///
/// Examples matches the if statement
/// (matcher = ifStmt(hasThen(cxxBoolLiteral(equals(true)))))
/// \code
/// if (false) true; else false;
/// \endcode
AST_MATCHER_P(IfStmt, hasThen, internal::Matcher<Stmt>, InnerMatcher) {
const Stmt *const Then = Node.getThen();
return (Then != nullptr && InnerMatcher.matches(*Then, Finder, Builder));
}
/// Matches the else-statement of an if statement.
///
/// Examples matches the if statement
/// (matcher = ifStmt(hasElse(cxxBoolLiteral(equals(true)))))
/// \code
/// if (false) false; else true;
/// \endcode
AST_MATCHER_P(IfStmt, hasElse, internal::Matcher<Stmt>, InnerMatcher) {
const Stmt *const Else = Node.getElse();
return (Else != nullptr && InnerMatcher.matches(*Else, Finder, Builder));
}
/// Matches if a node equals a previously bound node.
///
/// Matches a node if it equals the node previously bound to \p ID.
///
/// Given
/// \code
/// class X { int a; int b; };
/// \endcode
/// cxxRecordDecl(
/// has(fieldDecl(hasName("a"), hasType(type().bind("t")))),
/// has(fieldDecl(hasName("b"), hasType(type(equalsBoundNode("t"))))))
/// matches the class \c X, as \c a and \c b have the same type.
///
/// Note that when multiple matches are involved via \c forEach* matchers,
/// \c equalsBoundNodes acts as a filter.
/// For example:
/// compoundStmt(
/// forEachDescendant(varDecl().bind("d")),
/// forEachDescendant(declRefExpr(to(decl(equalsBoundNode("d"))))))
/// will trigger a match for each combination of variable declaration
/// and reference to that variable declaration within a compound statement.
AST_POLYMORPHIC_MATCHER_P(equalsBoundNode,
AST_POLYMORPHIC_SUPPORTED_TYPES(Stmt, Decl, Type,
QualType),
std::string, ID) {
// FIXME: Figure out whether it makes sense to allow this
// on any other node types.
// For *Loc it probably does not make sense, as those seem
// unique. For NestedNameSepcifier it might make sense, as
// those also have pointer identity, but I'm not sure whether
// they're ever reused.
internal::NotEqualsBoundNodePredicate Predicate;
Predicate.ID = ID;
Predicate.Node = DynTypedNode::create(Node);
return Builder->removeBindings(Predicate);
}
/// Matches the condition variable statement in an if statement.
///
/// Given
/// \code
/// if (A* a = GetAPointer()) {}
/// \endcode
/// hasConditionVariableStatement(...)
/// matches 'A* a = GetAPointer()'.
AST_MATCHER_P(IfStmt, hasConditionVariableStatement,
internal::Matcher<DeclStmt>, InnerMatcher) {
const DeclStmt* const DeclarationStatement =
Node.getConditionVariableDeclStmt();
return DeclarationStatement != nullptr &&
InnerMatcher.matches(*DeclarationStatement, Finder, Builder);
}
/// Matches the index expression of an array subscript expression.
///
/// Given
/// \code
/// int i[5];
/// void f() { i[1] = 42; }
/// \endcode
/// arraySubscriptExpression(hasIndex(integerLiteral()))
/// matches \c i[1] with the \c integerLiteral() matching \c 1
AST_MATCHER_P(ArraySubscriptExpr, hasIndex,
internal::Matcher<Expr>, InnerMatcher) {
if (const Expr* Expression = Node.getIdx())
return InnerMatcher.matches(*Expression, Finder, Builder);
return false;
}
/// Matches the base expression of an array subscript expression.
///
/// Given
/// \code
/// int i[5];
/// void f() { i[1] = 42; }
/// \endcode
/// arraySubscriptExpression(hasBase(implicitCastExpr(
/// hasSourceExpression(declRefExpr()))))
/// matches \c i[1] with the \c declRefExpr() matching \c i
AST_MATCHER_P(ArraySubscriptExpr, hasBase,
internal::Matcher<Expr>, InnerMatcher) {
if (const Expr* Expression = Node.getBase())
return InnerMatcher.matches(*Expression, Finder, Builder);
return false;
}
/// Matches a 'for', 'while', 'do while' statement or a function
/// definition that has a given body.
///
/// Given
/// \code
/// for (;;) {}
/// \endcode
/// hasBody(compoundStmt())
/// matches 'for (;;) {}'
/// with compoundStmt()
/// matching '{}'
AST_POLYMORPHIC_MATCHER_P(hasBody,
AST_POLYMORPHIC_SUPPORTED_TYPES(DoStmt, ForStmt,
WhileStmt,
CXXForRangeStmt,
FunctionDecl),
internal::Matcher<Stmt>, InnerMatcher) {
const Stmt *const Statement = internal::GetBodyMatcher<NodeType>::get(Node);
return (Statement != nullptr &&
InnerMatcher.matches(*Statement, Finder, Builder));
}
/// Matches compound statements where at least one substatement matches
/// a given matcher. Also matches StmtExprs that have CompoundStmt as children.
///
/// Given
/// \code
/// { {}; 1+2; }
/// \endcode
/// hasAnySubstatement(compoundStmt())
/// matches '{ {}; 1+2; }'
/// with compoundStmt()
/// matching '{}'
AST_POLYMORPHIC_MATCHER_P(hasAnySubstatement,
AST_POLYMORPHIC_SUPPORTED_TYPES(CompoundStmt,
StmtExpr),
internal::Matcher<Stmt>, InnerMatcher) {
const CompoundStmt *CS = CompoundStmtMatcher<NodeType>::get(Node);
return CS && matchesFirstInPointerRange(InnerMatcher, CS->body_begin(),
CS->body_end(), Finder, Builder);
}
/// Checks that a compound statement contains a specific number of
/// child statements.
///
/// Example: Given
/// \code
/// { for (;;) {} }
/// \endcode
/// compoundStmt(statementCountIs(0)))
/// matches '{}'
/// but does not match the outer compound statement.
AST_MATCHER_P(CompoundStmt, statementCountIs, unsigned, N) {
return Node.size() == N;
}
/// Matches literals that are equal to the given value of type ValueT.
///
/// Given
/// \code
/// f('\0', false, 3.14, 42);
/// \endcode
/// characterLiteral(equals(0))
/// matches '\0'
/// cxxBoolLiteral(equals(false)) and cxxBoolLiteral(equals(0))
/// match false
/// floatLiteral(equals(3.14)) and floatLiteral(equals(314e-2))
/// match 3.14
/// integerLiteral(equals(42))
/// matches 42
///
/// Note that you cannot directly match a negative numeric literal because the
/// minus sign is not part of the literal: It is a unary operator whose operand
/// is the positive numeric literal. Instead, you must use a unaryOperator()
/// matcher to match the minus sign:
///
/// unaryOperator(hasOperatorName("-"),
/// hasUnaryOperand(integerLiteral(equals(13))))
///
/// Usable as: Matcher<CharacterLiteral>, Matcher<CXXBoolLiteralExpr>,
/// Matcher<FloatingLiteral>, Matcher<IntegerLiteral>
template <typename ValueT>
internal::PolymorphicMatcherWithParam1<internal::ValueEqualsMatcher, ValueT>
equals(const ValueT &Value) {
return internal::PolymorphicMatcherWithParam1<
internal::ValueEqualsMatcher,
ValueT>(Value);
}
AST_POLYMORPHIC_MATCHER_P_OVERLOAD(equals,
AST_POLYMORPHIC_SUPPORTED_TYPES(CharacterLiteral,
CXXBoolLiteralExpr,
IntegerLiteral),
bool, Value, 0) {
return internal::ValueEqualsMatcher<NodeType, ParamT>(Value)
.matchesNode(Node);
}
AST_POLYMORPHIC_MATCHER_P_OVERLOAD(equals,
AST_POLYMORPHIC_SUPPORTED_TYPES(CharacterLiteral,
CXXBoolLiteralExpr,
IntegerLiteral),
unsigned, Value, 1) {
return internal::ValueEqualsMatcher<NodeType, ParamT>(Value)
.matchesNode(Node);
}
AST_POLYMORPHIC_MATCHER_P_OVERLOAD(equals,
AST_POLYMORPHIC_SUPPORTED_TYPES(CharacterLiteral,
CXXBoolLiteralExpr,
FloatingLiteral,
IntegerLiteral),
double, Value, 2) {
return internal::ValueEqualsMatcher<NodeType, ParamT>(Value)
.matchesNode(Node);
}
/// Matches the operator Name of operator expressions (binary or
/// unary).
///
/// Example matches a || b (matcher = binaryOperator(hasOperatorName("||")))
/// \code
/// !(a || b)
/// \endcode
AST_POLYMORPHIC_MATCHER_P(hasOperatorName,
AST_POLYMORPHIC_SUPPORTED_TYPES(BinaryOperator,
UnaryOperator),
std::string, Name) {
return Name == Node.getOpcodeStr(Node.getOpcode());
}
/// Matches operator expressions (binary or unary) that have any of the
/// specified names.
///
/// hasAnyOperatorName("+", "-")
/// Is equivalent to
/// anyOf(hasOperatorName("+"), hasOperatorName("-"))
extern const internal::VariadicFunction<
internal::PolymorphicMatcherWithParam1<
internal::HasAnyOperatorNameMatcher, std::vector<std::string>,
AST_POLYMORPHIC_SUPPORTED_TYPES(BinaryOperator, UnaryOperator)>,
StringRef, internal::hasAnyOperatorNameFunc>
hasAnyOperatorName;
/// Matches all kinds of assignment operators.
///
/// Example 1: matches a += b (matcher = binaryOperator(isAssignmentOperator()))
/// \code
/// if (a == b)
/// a += b;
/// \endcode
///
/// Example 2: matches s1 = s2
/// (matcher = cxxOperatorCallExpr(isAssignmentOperator()))
/// \code
/// struct S { S& operator=(const S&); };
/// void x() { S s1, s2; s1 = s2; }
/// \endcode
AST_POLYMORPHIC_MATCHER(isAssignmentOperator,
AST_POLYMORPHIC_SUPPORTED_TYPES(BinaryOperator,
CXXOperatorCallExpr)) {
return Node.isAssignmentOp();
}
/// Matches comparison operators.
///
/// Example 1: matches a == b (matcher = binaryOperator(isComparisonOperator()))
/// \code
/// if (a == b)
/// a += b;
/// \endcode
///
/// Example 2: matches s1 < s2
/// (matcher = cxxOperatorCallExpr(isComparisonOperator()))
/// \code
/// struct S { bool operator<(const S& other); };
/// void x(S s1, S s2) { bool b1 = s1 < s2; }
/// \endcode
AST_POLYMORPHIC_MATCHER(isComparisonOperator,
AST_POLYMORPHIC_SUPPORTED_TYPES(BinaryOperator,
CXXOperatorCallExpr)) {
return Node.isComparisonOp();
}
/// Matches the left hand side of binary operator expressions.
///
/// Example matches a (matcher = binaryOperator(hasLHS()))
/// \code
/// a || b
/// \endcode
AST_POLYMORPHIC_MATCHER_P(hasLHS,
AST_POLYMORPHIC_SUPPORTED_TYPES(BinaryOperator,
ArraySubscriptExpr),
internal::Matcher<Expr>, InnerMatcher) {
const Expr *LeftHandSide = Node.getLHS();
return (LeftHandSide != nullptr &&
InnerMatcher.matches(*LeftHandSide, Finder, Builder));
}
/// Matches the right hand side of binary operator expressions.
///
/// Example matches b (matcher = binaryOperator(hasRHS()))
/// \code
/// a || b
/// \endcode
AST_POLYMORPHIC_MATCHER_P(hasRHS,
AST_POLYMORPHIC_SUPPORTED_TYPES(BinaryOperator,
ArraySubscriptExpr),
internal::Matcher<Expr>, InnerMatcher) {
const Expr *RightHandSide = Node.getRHS();
return (RightHandSide != nullptr &&
InnerMatcher.matches(*RightHandSide, Finder, Builder));
}
/// Matches if either the left hand side or the right hand side of a
/// binary operator matches.
inline internal::Matcher<BinaryOperator> hasEitherOperand(
const internal::Matcher<Expr> &InnerMatcher) {
return anyOf(hasLHS(InnerMatcher), hasRHS(InnerMatcher));
}
/// Matches if the operand of a unary operator matches.
///
/// Example matches true (matcher = hasUnaryOperand(
/// cxxBoolLiteral(equals(true))))
/// \code
/// !true
/// \endcode
AST_MATCHER_P(UnaryOperator, hasUnaryOperand,
internal::Matcher<Expr>, InnerMatcher) {
const Expr * const Operand = Node.getSubExpr();
return (Operand != nullptr &&
InnerMatcher.matches(*Operand, Finder, Builder));
}
/// Matches if the cast's source expression
/// or opaque value's source expression matches the given matcher.
///
/// Example 1: matches "a string"
/// (matcher = castExpr(hasSourceExpression(cxxConstructExpr())))
/// \code
/// class URL { URL(string); };
/// URL url = "a string";
/// \endcode
///
/// Example 2: matches 'b' (matcher =
/// opaqueValueExpr(hasSourceExpression(implicitCastExpr(declRefExpr())))
/// \code
/// int a = b ?: 1;
/// \endcode
AST_POLYMORPHIC_MATCHER_P(hasSourceExpression,
AST_POLYMORPHIC_SUPPORTED_TYPES(CastExpr,
OpaqueValueExpr),
internal::Matcher<Expr>, InnerMatcher) {
const Expr *const SubExpression =
internal::GetSourceExpressionMatcher<NodeType>::get(Node);
return (SubExpression != nullptr &&
InnerMatcher.matches(*SubExpression, Finder, Builder));
}
/// Matches casts that has a given cast kind.
///
/// Example: matches the implicit cast around \c 0
/// (matcher = castExpr(hasCastKind(CK_NullToPointer)))
/// \code
/// int *p = 0;
/// \endcode
///
/// If the matcher is use from clang-query, CastKind parameter
/// should be passed as a quoted string. e.g., hasCastKind("CK_NullToPointer").
AST_MATCHER_P(CastExpr, hasCastKind, CastKind, Kind) {
return Node.getCastKind() == Kind;
}
/// Matches casts whose destination type matches a given matcher.
///
/// (Note: Clang's AST refers to other conversions as "casts" too, and calls
/// actual casts "explicit" casts.)
AST_MATCHER_P(ExplicitCastExpr, hasDestinationType,
internal::Matcher<QualType>, InnerMatcher) {
const QualType NodeType = Node.getTypeAsWritten();
return InnerMatcher.matches(NodeType, Finder, Builder);
}
/// Matches implicit casts whose destination type matches a given
/// matcher.
///
/// FIXME: Unit test this matcher
AST_MATCHER_P(ImplicitCastExpr, hasImplicitDestinationType,
internal::Matcher<QualType>, InnerMatcher) {
return InnerMatcher.matches(Node.getType(), Finder, Builder);
}
/// Matches TagDecl object that are spelled with "struct."
///
/// Example matches S, but not C, U or E.
/// \code
/// struct S {};
/// class C {};
/// union U {};
/// enum E {};
/// \endcode
AST_MATCHER(TagDecl, isStruct) {
return Node.isStruct();
}
/// Matches TagDecl object that are spelled with "union."
///
/// Example matches U, but not C, S or E.
/// \code
/// struct S {};
/// class C {};
/// union U {};
/// enum E {};
/// \endcode
AST_MATCHER(TagDecl, isUnion) {
return Node.isUnion();
}
/// Matches TagDecl object that are spelled with "class."
///
/// Example matches C, but not S, U or E.
/// \code
/// struct S {};
/// class C {};
/// union U {};
/// enum E {};
/// \endcode
AST_MATCHER(TagDecl, isClass) {
return Node.isClass();
}
/// Matches TagDecl object that are spelled with "enum."
///
/// Example matches E, but not C, S or U.
/// \code
/// struct S {};
/// class C {};
/// union U {};
/// enum E {};
/// \endcode
AST_MATCHER(TagDecl, isEnum) {
return Node.isEnum();
}
/// Matches the true branch expression of a conditional operator.
///
/// Example 1 (conditional ternary operator): matches a
/// \code
/// condition ? a : b
/// \endcode
///
/// Example 2 (conditional binary operator): matches opaqueValueExpr(condition)
/// \code
/// condition ?: b
/// \endcode
AST_MATCHER_P(AbstractConditionalOperator, hasTrueExpression,
internal::Matcher<Expr>, InnerMatcher) {
const Expr *Expression = Node.getTrueExpr();
return (Expression != nullptr &&
InnerMatcher.matches(*Expression, Finder, Builder));
}
/// Matches the false branch expression of a conditional operator
/// (binary or ternary).
///
/// Example matches b
/// \code
/// condition ? a : b
/// condition ?: b
/// \endcode
AST_MATCHER_P(AbstractConditionalOperator, hasFalseExpression,
internal::Matcher<Expr>, InnerMatcher) {
const Expr *Expression = Node.getFalseExpr();
return (Expression != nullptr &&
InnerMatcher.matches(*Expression, Finder, Builder));
}
/// Matches if a declaration has a body attached.
///
/// Example matches A, va, fa
/// \code
/// class A {};
/// class B; // Doesn't match, as it has no body.
/// int va;
/// extern int vb; // Doesn't match, as it doesn't define the variable.
/// void fa() {}
/// void fb(); // Doesn't match, as it has no body.
/// @interface X
/// - (void)ma; // Doesn't match, interface is declaration.
/// @end
/// @implementation X
/// - (void)ma {}
/// @end
/// \endcode
///
/// Usable as: Matcher<TagDecl>, Matcher<VarDecl>, Matcher<FunctionDecl>,
/// Matcher<ObjCMethodDecl>
AST_POLYMORPHIC_MATCHER(isDefinition,
AST_POLYMORPHIC_SUPPORTED_TYPES(TagDecl, VarDecl,
ObjCMethodDecl,
FunctionDecl)) {
return Node.isThisDeclarationADefinition();
}
/// Matches if a function declaration is variadic.
///
/// Example matches f, but not g or h. The function i will not match, even when
/// compiled in C mode.
/// \code
/// void f(...);
/// void g(int);
/// template <typename... Ts> void h(Ts...);
/// void i();
/// \endcode
AST_MATCHER(FunctionDecl, isVariadic) {
return Node.isVariadic();
}
/// Matches the class declaration that the given method declaration
/// belongs to.
///
/// FIXME: Generalize this for other kinds of declarations.
/// FIXME: What other kind of declarations would we need to generalize
/// this to?
///
/// Example matches A() in the last line
/// (matcher = cxxConstructExpr(hasDeclaration(cxxMethodDecl(
/// ofClass(hasName("A"))))))
/// \code
/// class A {
/// public:
/// A();
/// };
/// A a = A();
/// \endcode
AST_MATCHER_P(CXXMethodDecl, ofClass,
internal::Matcher<CXXRecordDecl>, InnerMatcher) {
const CXXRecordDecl *Parent = Node.getParent();
return (Parent != nullptr &&
InnerMatcher.matches(*Parent, Finder, Builder));
}
/// Matches each method overridden by the given method. This matcher may
/// produce multiple matches.
///
/// Given
/// \code
/// class A { virtual void f(); };
/// class B : public A { void f(); };
/// class C : public B { void f(); };
/// \endcode
/// cxxMethodDecl(ofClass(hasName("C")),
/// forEachOverridden(cxxMethodDecl().bind("b"))).bind("d")
/// matches once, with "b" binding "A::f" and "d" binding "C::f" (Note
/// that B::f is not overridden by C::f).
///
/// The check can produce multiple matches in case of multiple inheritance, e.g.
/// \code
/// class A1 { virtual void f(); };
/// class A2 { virtual void f(); };
/// class C : public A1, public A2 { void f(); };
/// \endcode
/// cxxMethodDecl(ofClass(hasName("C")),
/// forEachOverridden(cxxMethodDecl().bind("b"))).bind("d")
/// matches twice, once with "b" binding "A1::f" and "d" binding "C::f", and
/// once with "b" binding "A2::f" and "d" binding "C::f".
AST_MATCHER_P(CXXMethodDecl, forEachOverridden,
internal::Matcher<CXXMethodDecl>, InnerMatcher) {
BoundNodesTreeBuilder Result;
bool Matched = false;
for (const auto *Overridden : Node.overridden_methods()) {
BoundNodesTreeBuilder OverriddenBuilder(*Builder);
const bool OverriddenMatched =
InnerMatcher.matches(*Overridden, Finder, &OverriddenBuilder);
if (OverriddenMatched) {
Matched = true;
Result.addMatch(OverriddenBuilder);
}
}
*Builder = std::move(Result);
return Matched;
}
/// Matches if the given method declaration is virtual.
///
/// Given
/// \code
/// class A {
/// public:
/// virtual void x();
/// };
/// \endcode
/// matches A::x
AST_MATCHER(CXXMethodDecl, isVirtual) {
return Node.isVirtual();
}
/// Matches if the given method declaration has an explicit "virtual".
///
/// Given
/// \code
/// class A {
/// public:
/// virtual void x();
/// };
/// class B : public A {
/// public:
/// void x();
/// };
/// \endcode
/// matches A::x but not B::x
AST_MATCHER(CXXMethodDecl, isVirtualAsWritten) {
return Node.isVirtualAsWritten();
}
/// Matches if the given method or class declaration is final.
///
/// Given:
/// \code
/// class A final {};
///
/// struct B {
/// virtual void f();
/// };
///
/// struct C : B {
/// void f() final;
/// };
/// \endcode
/// matches A and C::f, but not B, C, or B::f
AST_POLYMORPHIC_MATCHER(isFinal,
AST_POLYMORPHIC_SUPPORTED_TYPES(CXXRecordDecl,
CXXMethodDecl)) {
return Node.template hasAttr<FinalAttr>();
}
/// Matches if the given method declaration is pure.
///
/// Given
/// \code
/// class A {
/// public:
/// virtual void x() = 0;
/// };
/// \endcode
/// matches A::x
AST_MATCHER(CXXMethodDecl, isPure) {
return Node.isPure();
}
/// Matches if the given method declaration is const.
///
/// Given
/// \code
/// struct A {
/// void foo() const;
/// void bar();
/// };
/// \endcode
///
/// cxxMethodDecl(isConst()) matches A::foo() but not A::bar()
AST_MATCHER(CXXMethodDecl, isConst) {
return Node.isConst();
}
/// Matches if the given method declaration declares a copy assignment
/// operator.
///
/// Given
/// \code
/// struct A {
/// A &operator=(const A &);
/// A &operator=(A &&);
/// };
/// \endcode
///
/// cxxMethodDecl(isCopyAssignmentOperator()) matches the first method but not
/// the second one.
AST_MATCHER(CXXMethodDecl, isCopyAssignmentOperator) {
return Node.isCopyAssignmentOperator();
}
/// Matches if the given method declaration declares a move assignment
/// operator.
///
/// Given
/// \code
/// struct A {
/// A &operator=(const A &);
/// A &operator=(A &&);
/// };
/// \endcode
///
/// cxxMethodDecl(isMoveAssignmentOperator()) matches the second method but not
/// the first one.
AST_MATCHER(CXXMethodDecl, isMoveAssignmentOperator) {
return Node.isMoveAssignmentOperator();
}
/// Matches if the given method declaration overrides another method.
///
/// Given
/// \code
/// class A {
/// public:
/// virtual void x();
/// };
/// class B : public A {
/// public:
/// virtual void x();
/// };
/// \endcode
/// matches B::x
AST_MATCHER(CXXMethodDecl, isOverride) {
return Node.size_overridden_methods() > 0 || Node.hasAttr<OverrideAttr>();
}
/// Matches method declarations that are user-provided.
///
/// Given
/// \code
/// struct S {
/// S(); // #1
/// S(const S &) = default; // #2
/// S(S &&) = delete; // #3
/// };
/// \endcode
/// cxxConstructorDecl(isUserProvided()) will match #1, but not #2 or #3.
AST_MATCHER(CXXMethodDecl, isUserProvided) {
return Node.isUserProvided();
}
/// Matches member expressions that are called with '->' as opposed
/// to '.'.
///
/// Member calls on the implicit this pointer match as called with '->'.
///
/// Given
/// \code
/// class Y {
/// void x() { this->x(); x(); Y y; y.x(); a; this->b; Y::b; }
/// template <class T> void f() { this->f<T>(); f<T>(); }
/// int a;
/// static int b;
/// };
/// template <class T>
/// class Z {
/// void x() { this->m; }
/// };
/// \endcode
/// memberExpr(isArrow())
/// matches this->x, x, y.x, a, this->b
/// cxxDependentScopeMemberExpr(isArrow())
/// matches this->m
/// unresolvedMemberExpr(isArrow())
/// matches this->f<T>, f<T>
AST_POLYMORPHIC_MATCHER(
isArrow, AST_POLYMORPHIC_SUPPORTED_TYPES(MemberExpr, UnresolvedMemberExpr,
CXXDependentScopeMemberExpr)) {
return Node.isArrow();
}
/// Matches QualType nodes that are of integer type.
///
/// Given
/// \code
/// void a(int);
/// void b(long);
/// void c(double);
/// \endcode
/// functionDecl(hasAnyParameter(hasType(isInteger())))
/// matches "a(int)", "b(long)", but not "c(double)".
AST_MATCHER(QualType, isInteger) {
return Node->isIntegerType();
}
/// Matches QualType nodes that are of unsigned integer type.
///
/// Given
/// \code
/// void a(int);
/// void b(unsigned long);
/// void c(double);
/// \endcode
/// functionDecl(hasAnyParameter(hasType(isUnsignedInteger())))
/// matches "b(unsigned long)", but not "a(int)" and "c(double)".
AST_MATCHER(QualType, isUnsignedInteger) {
return Node->isUnsignedIntegerType();
}
/// Matches QualType nodes that are of signed integer type.
///
/// Given
/// \code
/// void a(int);
/// void b(unsigned long);
/// void c(double);
/// \endcode
/// functionDecl(hasAnyParameter(hasType(isSignedInteger())))
/// matches "a(int)", but not "b(unsigned long)" and "c(double)".
AST_MATCHER(QualType, isSignedInteger) {
return Node->isSignedIntegerType();
}
/// Matches QualType nodes that are of character type.
///
/// Given
/// \code
/// void a(char);
/// void b(wchar_t);
/// void c(double);
/// \endcode
/// functionDecl(hasAnyParameter(hasType(isAnyCharacter())))
/// matches "a(char)", "b(wchar_t)", but not "c(double)".
AST_MATCHER(QualType, isAnyCharacter) {
return Node->isAnyCharacterType();
}
/// Matches QualType nodes that are of any pointer type; this includes
/// the Objective-C object pointer type, which is different despite being
/// syntactically similar.
///
/// Given
/// \code
/// int *i = nullptr;
///
/// @interface Foo
/// @end
/// Foo *f;
///
/// int j;
/// \endcode
/// varDecl(hasType(isAnyPointer()))
/// matches "int *i" and "Foo *f", but not "int j".
AST_MATCHER(QualType, isAnyPointer) {
return Node->isAnyPointerType();
}
/// Matches QualType nodes that are const-qualified, i.e., that
/// include "top-level" const.
///
/// Given
/// \code
/// void a(int);
/// void b(int const);
/// void c(const int);
/// void d(const int*);
/// void e(int const) {};
/// \endcode
/// functionDecl(hasAnyParameter(hasType(isConstQualified())))
/// matches "void b(int const)", "void c(const int)" and
/// "void e(int const) {}". It does not match d as there
/// is no top-level const on the parameter type "const int *".
AST_MATCHER(QualType, isConstQualified) {
return Node.isConstQualified();
}
/// Matches QualType nodes that are volatile-qualified, i.e., that
/// include "top-level" volatile.
///
/// Given
/// \code
/// void a(int);
/// void b(int volatile);
/// void c(volatile int);
/// void d(volatile int*);
/// void e(int volatile) {};
/// \endcode
/// functionDecl(hasAnyParameter(hasType(isVolatileQualified())))
/// matches "void b(int volatile)", "void c(volatile int)" and
/// "void e(int volatile) {}". It does not match d as there
/// is no top-level volatile on the parameter type "volatile int *".
AST_MATCHER(QualType, isVolatileQualified) {
return Node.isVolatileQualified();
}
/// Matches QualType nodes that have local CV-qualifiers attached to
/// the node, not hidden within a typedef.
///
/// Given
/// \code
/// typedef const int const_int;
/// const_int i;
/// int *const j;
/// int *volatile k;
/// int m;
/// \endcode
/// \c varDecl(hasType(hasLocalQualifiers())) matches only \c j and \c k.
/// \c i is const-qualified but the qualifier is not local.
AST_MATCHER(QualType, hasLocalQualifiers) {
return Node.hasLocalQualifiers();
}
/// Matches a member expression where the member is matched by a
/// given matcher.
///
/// Given
/// \code
/// struct { int first, second; } first, second;
/// int i(second.first);
/// int j(first.second);
/// \endcode
/// memberExpr(member(hasName("first")))
/// matches second.first
/// but not first.second (because the member name there is "second").
AST_MATCHER_P(MemberExpr, member,
internal::Matcher<ValueDecl>, InnerMatcher) {
return InnerMatcher.matches(*Node.getMemberDecl(), Finder, Builder);
}
/// Matches a member expression where the object expression is matched by a
/// given matcher. Implicit object expressions are included; that is, it matches
/// use of implicit `this`.
///
/// Given
/// \code
/// struct X {
/// int m;
/// int f(X x) { x.m; return m; }
/// };
/// \endcode
/// memberExpr(hasObjectExpression(hasType(cxxRecordDecl(hasName("X")))))
/// matches `x.m`, but not `m`; however,
/// memberExpr(hasObjectExpression(hasType(pointsTo(
// cxxRecordDecl(hasName("X"))))))
/// matches `m` (aka. `this->m`), but not `x.m`.
AST_POLYMORPHIC_MATCHER_P(
hasObjectExpression,
AST_POLYMORPHIC_SUPPORTED_TYPES(MemberExpr, UnresolvedMemberExpr,
CXXDependentScopeMemberExpr),
internal::Matcher<Expr>, InnerMatcher) {
if (const auto *E = dyn_cast<UnresolvedMemberExpr>(&Node))
if (E->isImplicitAccess())
return false;
if (const auto *E = dyn_cast<CXXDependentScopeMemberExpr>(&Node))
if (E->isImplicitAccess())
return false;
return InnerMatcher.matches(*Node.getBase(), Finder, Builder);
}
/// Matches any using shadow declaration.
///
/// Given
/// \code
/// namespace X { void b(); }
/// using X::b;
/// \endcode
/// usingDecl(hasAnyUsingShadowDecl(hasName("b"))))
/// matches \code using X::b \endcode
AST_MATCHER_P(UsingDecl, hasAnyUsingShadowDecl,
internal::Matcher<UsingShadowDecl>, InnerMatcher) {
return matchesFirstInPointerRange(InnerMatcher, Node.shadow_begin(),
Node.shadow_end(), Finder, Builder);
}
/// Matches a using shadow declaration where the target declaration is
/// matched by the given matcher.
///
/// Given
/// \code
/// namespace X { int a; void b(); }
/// using X::a;
/// using X::b;
/// \endcode
/// usingDecl(hasAnyUsingShadowDecl(hasTargetDecl(functionDecl())))
/// matches \code using X::b \endcode
/// but not \code using X::a \endcode
AST_MATCHER_P(UsingShadowDecl, hasTargetDecl,
internal::Matcher<NamedDecl>, InnerMatcher) {
return InnerMatcher.matches(*Node.getTargetDecl(), Finder, Builder);
}
/// Matches template instantiations of function, class, or static
/// member variable template instantiations.
///
/// Given
/// \code
/// template <typename T> class X {}; class A {}; X<A> x;
/// \endcode
/// or
/// \code
/// template <typename T> class X {}; class A {}; template class X<A>;
/// \endcode
/// or
/// \code
/// template <typename T> class X {}; class A {}; extern template class X<A>;
/// \endcode
/// cxxRecordDecl(hasName("::X"), isTemplateInstantiation())
/// matches the template instantiation of X<A>.
///
/// But given
/// \code
/// template <typename T> class X {}; class A {};
/// template <> class X<A> {}; X<A> x;
/// \endcode
/// cxxRecordDecl(hasName("::X"), isTemplateInstantiation())
/// does not match, as X<A> is an explicit template specialization.
///
/// Usable as: Matcher<FunctionDecl>, Matcher<VarDecl>, Matcher<CXXRecordDecl>
AST_POLYMORPHIC_MATCHER(isTemplateInstantiation,
AST_POLYMORPHIC_SUPPORTED_TYPES(FunctionDecl, VarDecl,
CXXRecordDecl)) {
return (Node.getTemplateSpecializationKind() == TSK_ImplicitInstantiation ||
Node.getTemplateSpecializationKind() ==
TSK_ExplicitInstantiationDefinition ||
Node.getTemplateSpecializationKind() ==
TSK_ExplicitInstantiationDeclaration);
}
/// Matches declarations that are template instantiations or are inside
/// template instantiations.
///
/// Given
/// \code
/// template<typename T> void A(T t) { T i; }
/// A(0);
/// A(0U);
/// \endcode
/// functionDecl(isInstantiated())
/// matches 'A(int) {...};' and 'A(unsigned) {...}'.
AST_MATCHER_FUNCTION(internal::Matcher<Decl>, isInstantiated) {
auto IsInstantiation = decl(anyOf(cxxRecordDecl(isTemplateInstantiation()),
functionDecl(isTemplateInstantiation())));
return decl(anyOf(IsInstantiation, hasAncestor(IsInstantiation)));
}
/// Matches statements inside of a template instantiation.
///
/// Given
/// \code
/// int j;
/// template<typename T> void A(T t) { T i; j += 42;}
/// A(0);
/// A(0U);
/// \endcode
/// declStmt(isInTemplateInstantiation())
/// matches 'int i;' and 'unsigned i'.
/// unless(stmt(isInTemplateInstantiation()))
/// will NOT match j += 42; as it's shared between the template definition and
/// instantiation.
AST_MATCHER_FUNCTION(internal::Matcher<Stmt>, isInTemplateInstantiation) {
return stmt(
hasAncestor(decl(anyOf(cxxRecordDecl(isTemplateInstantiation()),
functionDecl(isTemplateInstantiation())))));
}
/// Matches explicit template specializations of function, class, or
/// static member variable template instantiations.
///
/// Given
/// \code
/// template<typename T> void A(T t) { }
/// template<> void A(int N) { }
/// \endcode
/// functionDecl(isExplicitTemplateSpecialization())
/// matches the specialization A<int>().
///
/// Usable as: Matcher<FunctionDecl>, Matcher<VarDecl>, Matcher<CXXRecordDecl>
AST_POLYMORPHIC_MATCHER(isExplicitTemplateSpecialization,
AST_POLYMORPHIC_SUPPORTED_TYPES(FunctionDecl, VarDecl,
CXXRecordDecl)) {
return (Node.getTemplateSpecializationKind() == TSK_ExplicitSpecialization);
}
/// Matches \c TypeLocs for which the given inner
/// QualType-matcher matches.
AST_MATCHER_FUNCTION_P_OVERLOAD(internal::BindableMatcher<TypeLoc>, loc,
internal::Matcher<QualType>, InnerMatcher, 0) {
return internal::BindableMatcher<TypeLoc>(
new internal::TypeLocTypeMatcher(InnerMatcher));
}
/// Matches type \c bool.
///
/// Given
/// \code
/// struct S { bool func(); };
/// \endcode
/// functionDecl(returns(booleanType()))
/// matches "bool func();"
AST_MATCHER(Type, booleanType) {
return Node.isBooleanType();
}
/// Matches type \c void.
///
/// Given
/// \code
/// struct S { void func(); };
/// \endcode
/// functionDecl(returns(voidType()))
/// matches "void func();"
AST_MATCHER(Type, voidType) {
return Node.isVoidType();
}
template <typename NodeType>
using AstTypeMatcher = internal::VariadicDynCastAllOfMatcher<Type, NodeType>;
/// Matches builtin Types.
///
/// Given
/// \code
/// struct A {};
/// A a;
/// int b;
/// float c;
/// bool d;
/// \endcode
/// builtinType()
/// matches "int b", "float c" and "bool d"
extern const AstTypeMatcher<BuiltinType> builtinType;
/// Matches all kinds of arrays.
///
/// Given
/// \code
/// int a[] = { 2, 3 };
/// int b[4];
/// void f() { int c[a[0]]; }
/// \endcode
/// arrayType()
/// matches "int a[]", "int b[4]" and "int c[a[0]]";
extern const AstTypeMatcher<ArrayType> arrayType;
/// Matches C99 complex types.
///
/// Given
/// \code
/// _Complex float f;
/// \endcode
/// complexType()
/// matches "_Complex float f"
extern const AstTypeMatcher<ComplexType> complexType;
/// Matches any real floating-point type (float, double, long double).
///
/// Given
/// \code
/// int i;
/// float f;
/// \endcode
/// realFloatingPointType()
/// matches "float f" but not "int i"
AST_MATCHER(Type, realFloatingPointType) {
return Node.isRealFloatingType();
}
/// Matches arrays and C99 complex types that have a specific element
/// type.
///
/// Given
/// \code
/// struct A {};
/// A a[7];
/// int b[7];
/// \endcode
/// arrayType(hasElementType(builtinType()))
/// matches "int b[7]"
///
/// Usable as: Matcher<ArrayType>, Matcher<ComplexType>
AST_TYPELOC_TRAVERSE_MATCHER_DECL(hasElementType, getElement,
AST_POLYMORPHIC_SUPPORTED_TYPES(ArrayType,
ComplexType));
/// Matches C arrays with a specified constant size.
///
/// Given
/// \code
/// void() {
/// int a[2];
/// int b[] = { 2, 3 };
/// int c[b[0]];
/// }
/// \endcode
/// constantArrayType()
/// matches "int a[2]"
extern const AstTypeMatcher<ConstantArrayType> constantArrayType;
/// Matches nodes that have the specified size.
///
/// Given
/// \code
/// int a[42];
/// int b[2 * 21];
/// int c[41], d[43];
/// char *s = "abcd";
/// wchar_t *ws = L"abcd";
/// char *w = "a";
/// \endcode
/// constantArrayType(hasSize(42))
/// matches "int a[42]" and "int b[2 * 21]"
/// stringLiteral(hasSize(4))
/// matches "abcd", L"abcd"
AST_POLYMORPHIC_MATCHER_P(hasSize,
AST_POLYMORPHIC_SUPPORTED_TYPES(ConstantArrayType,
StringLiteral),
unsigned, N) {
return internal::HasSizeMatcher<NodeType>::hasSize(Node, N);
}
/// Matches C++ arrays whose size is a value-dependent expression.
///
/// Given
/// \code
/// template<typename T, int Size>
/// class array {
/// T data[Size];
/// };
/// \endcode
/// dependentSizedArrayType
/// matches "T data[Size]"
extern const AstTypeMatcher<DependentSizedArrayType> dependentSizedArrayType;
/// Matches C arrays with unspecified size.
///
/// Given
/// \code
/// int a[] = { 2, 3 };
/// int b[42];
/// void f(int c[]) { int d[a[0]]; };
/// \endcode
/// incompleteArrayType()
/// matches "int a[]" and "int c[]"
extern const AstTypeMatcher<IncompleteArrayType> incompleteArrayType;
/// Matches C arrays with a specified size that is not an
/// integer-constant-expression.
///
/// Given
/// \code
/// void f() {
/// int a[] = { 2, 3 }
/// int b[42];
/// int c[a[0]];
/// }
/// \endcode
/// variableArrayType()
/// matches "int c[a[0]]"
extern const AstTypeMatcher<VariableArrayType> variableArrayType;
/// Matches \c VariableArrayType nodes that have a specific size
/// expression.
///
/// Given
/// \code
/// void f(int b) {
/// int a[b];
/// }
/// \endcode
/// variableArrayType(hasSizeExpr(ignoringImpCasts(declRefExpr(to(
/// varDecl(hasName("b")))))))
/// matches "int a[b]"
AST_MATCHER_P(VariableArrayType, hasSizeExpr,
internal::Matcher<Expr>, InnerMatcher) {
return InnerMatcher.matches(*Node.getSizeExpr(), Finder, Builder);
}
/// Matches atomic types.
///
/// Given
/// \code
/// _Atomic(int) i;
/// \endcode
/// atomicType()
/// matches "_Atomic(int) i"
extern const AstTypeMatcher<AtomicType> atomicType;
/// Matches atomic types with a specific value type.
///
/// Given
/// \code
/// _Atomic(int) i;
/// _Atomic(float) f;
/// \endcode
/// atomicType(hasValueType(isInteger()))
/// matches "_Atomic(int) i"
///
/// Usable as: Matcher<AtomicType>
AST_TYPELOC_TRAVERSE_MATCHER_DECL(hasValueType, getValue,
AST_POLYMORPHIC_SUPPORTED_TYPES(AtomicType));
/// Matches types nodes representing C++11 auto types.
///
/// Given:
/// \code
/// auto n = 4;
/// int v[] = { 2, 3 }
/// for (auto i : v) { }
/// \endcode
/// autoType()
/// matches "auto n" and "auto i"
extern const AstTypeMatcher<AutoType> autoType;
/// Matches types nodes representing C++11 decltype(<expr>) types.
///
/// Given:
/// \code
/// short i = 1;
/// int j = 42;
/// decltype(i + j) result = i + j;
/// \endcode
/// decltypeType()
/// matches "decltype(i + j)"
extern const AstTypeMatcher<DecltypeType> decltypeType;
/// Matches \c AutoType nodes where the deduced type is a specific type.
///
/// Note: There is no \c TypeLoc for the deduced type and thus no
/// \c getDeducedLoc() matcher.
///
/// Given
/// \code
/// auto a = 1;
/// auto b = 2.0;
/// \endcode
/// autoType(hasDeducedType(isInteger()))
/// matches "auto a"
///
/// Usable as: Matcher<AutoType>
AST_TYPE_TRAVERSE_MATCHER(hasDeducedType, getDeducedType,
AST_POLYMORPHIC_SUPPORTED_TYPES(AutoType));
/// Matches \c DecltypeType nodes to find out the underlying type.
///
/// Given
/// \code
/// decltype(1) a = 1;
/// decltype(2.0) b = 2.0;
/// \endcode
/// decltypeType(hasUnderlyingType(isInteger()))
/// matches the type of "a"
///
/// Usable as: Matcher<DecltypeType>
AST_TYPE_TRAVERSE_MATCHER(hasUnderlyingType, getUnderlyingType,
AST_POLYMORPHIC_SUPPORTED_TYPES(DecltypeType));
/// Matches \c FunctionType nodes.
///
/// Given
/// \code
/// int (*f)(int);
/// void g();
/// \endcode
/// functionType()
/// matches "int (*f)(int)" and the type of "g".
extern const AstTypeMatcher<FunctionType> functionType;
/// Matches \c FunctionProtoType nodes.
///
/// Given
/// \code
/// int (*f)(int);
/// void g();
/// \endcode
/// functionProtoType()
/// matches "int (*f)(int)" and the type of "g" in C++ mode.
/// In C mode, "g" is not matched because it does not contain a prototype.
extern const AstTypeMatcher<FunctionProtoType> functionProtoType;
/// Matches \c ParenType nodes.
///
/// Given
/// \code
/// int (*ptr_to_array)[4];
/// int *array_of_ptrs[4];
/// \endcode
///
/// \c varDecl(hasType(pointsTo(parenType()))) matches \c ptr_to_array but not
/// \c array_of_ptrs.
extern const AstTypeMatcher<ParenType> parenType;
/// Matches \c ParenType nodes where the inner type is a specific type.
///
/// Given
/// \code
/// int (*ptr_to_array)[4];
/// int (*ptr_to_func)(int);
/// \endcode
///
/// \c varDecl(hasType(pointsTo(parenType(innerType(functionType()))))) matches
/// \c ptr_to_func but not \c ptr_to_array.
///
/// Usable as: Matcher<ParenType>
AST_TYPE_TRAVERSE_MATCHER(innerType, getInnerType,
AST_POLYMORPHIC_SUPPORTED_TYPES(ParenType));
/// Matches block pointer types, i.e. types syntactically represented as
/// "void (^)(int)".
///
/// The \c pointee is always required to be a \c FunctionType.
extern const AstTypeMatcher<BlockPointerType> blockPointerType;
/// Matches member pointer types.
/// Given
/// \code
/// struct A { int i; }
/// A::* ptr = A::i;
/// \endcode
/// memberPointerType()
/// matches "A::* ptr"
extern const AstTypeMatcher<MemberPointerType> memberPointerType;
/// Matches pointer types, but does not match Objective-C object pointer
/// types.
///
/// Given
/// \code
/// int *a;
/// int &b = *a;
/// int c = 5;
///
/// @interface Foo
/// @end
/// Foo *f;
/// \endcode
/// pointerType()
/// matches "int *a", but does not match "Foo *f".
extern const AstTypeMatcher<PointerType> pointerType;
/// Matches an Objective-C object pointer type, which is different from
/// a pointer type, despite being syntactically similar.
///
/// Given
/// \code
/// int *a;
///
/// @interface Foo
/// @end
/// Foo *f;
/// \endcode
/// pointerType()
/// matches "Foo *f", but does not match "int *a".
extern const AstTypeMatcher<ObjCObjectPointerType> objcObjectPointerType;
/// Matches both lvalue and rvalue reference types.
///
/// Given
/// \code
/// int *a;
/// int &b = *a;
/// int &&c = 1;
/// auto &d = b;
/// auto &&e = c;
/// auto &&f = 2;
/// int g = 5;
/// \endcode
///
/// \c referenceType() matches the types of \c b, \c c, \c d, \c e, and \c f.
extern const AstTypeMatcher<ReferenceType> referenceType;
/// Matches lvalue reference types.
///
/// Given:
/// \code
/// int *a;
/// int &b = *a;
/// int &&c = 1;
/// auto &d = b;
/// auto &&e = c;
/// auto &&f = 2;
/// int g = 5;
/// \endcode
///
/// \c lValueReferenceType() matches the types of \c b, \c d, and \c e. \c e is
/// matched since the type is deduced as int& by reference collapsing rules.
extern const AstTypeMatcher<LValueReferenceType> lValueReferenceType;
/// Matches rvalue reference types.
///
/// Given:
/// \code
/// int *a;
/// int &b = *a;
/// int &&c = 1;
/// auto &d = b;
/// auto &&e = c;
/// auto &&f = 2;
/// int g = 5;
/// \endcode
///
/// \c rValueReferenceType() matches the types of \c c and \c f. \c e is not
/// matched as it is deduced to int& by reference collapsing rules.
extern const AstTypeMatcher<RValueReferenceType> rValueReferenceType;
/// Narrows PointerType (and similar) matchers to those where the
/// \c pointee matches a given matcher.
///
/// Given
/// \code
/// int *a;
/// int const *b;
/// float const *f;
/// \endcode
/// pointerType(pointee(isConstQualified(), isInteger()))
/// matches "int const *b"
///
/// Usable as: Matcher<BlockPointerType>, Matcher<MemberPointerType>,
/// Matcher<PointerType>, Matcher<ReferenceType>
AST_TYPELOC_TRAVERSE_MATCHER_DECL(
pointee, getPointee,
AST_POLYMORPHIC_SUPPORTED_TYPES(BlockPointerType, MemberPointerType,
PointerType, ReferenceType));
/// Matches typedef types.
///
/// Given
/// \code
/// typedef int X;
/// \endcode
/// typedefType()
/// matches "typedef int X"
extern const AstTypeMatcher<TypedefType> typedefType;
/// Matches enum types.
///
/// Given
/// \code
/// enum C { Green };
/// enum class S { Red };
///
/// C c;
/// S s;
/// \endcode
//
/// \c enumType() matches the type of the variable declarations of both \c c and
/// \c s.
extern const AstTypeMatcher<EnumType> enumType;
/// Matches template specialization types.
///
/// Given
/// \code
/// template <typename T>
/// class C { };
///
/// template class C<int>; // A
/// C<char> var; // B
/// \endcode
///
/// \c templateSpecializationType() matches the type of the explicit
/// instantiation in \c A and the type of the variable declaration in \c B.
extern const AstTypeMatcher<TemplateSpecializationType>
templateSpecializationType;
/// Matches C++17 deduced template specialization types, e.g. deduced class
/// template types.
///
/// Given
/// \code
/// template <typename T>
/// class C { public: C(T); };
///
/// C c(123);
/// \endcode
/// \c deducedTemplateSpecializationType() matches the type in the declaration
/// of the variable \c c.
extern const AstTypeMatcher<DeducedTemplateSpecializationType>
deducedTemplateSpecializationType;
/// Matches types nodes representing unary type transformations.
///
/// Given:
/// \code
/// typedef __underlying_type(T) type;
/// \endcode
/// unaryTransformType()
/// matches "__underlying_type(T)"
extern const AstTypeMatcher<UnaryTransformType> unaryTransformType;
/// Matches record types (e.g. structs, classes).
///
/// Given
/// \code
/// class C {};
/// struct S {};
///
/// C c;
/// S s;
/// \endcode
///
/// \c recordType() matches the type of the variable declarations of both \c c
/// and \c s.
extern const AstTypeMatcher<RecordType> recordType;
/// Matches tag types (record and enum types).
///
/// Given
/// \code
/// enum E {};
/// class C {};
///
/// E e;
/// C c;
/// \endcode
///
/// \c tagType() matches the type of the variable declarations of both \c e
/// and \c c.
extern const AstTypeMatcher<TagType> tagType;
/// Matches types specified with an elaborated type keyword or with a
/// qualified name.
///
/// Given
/// \code
/// namespace N {
/// namespace M {
/// class D {};
/// }
/// }
/// class C {};
///
/// class C c;
/// N::M::D d;
/// \endcode
///
/// \c elaboratedType() matches the type of the variable declarations of both
/// \c c and \c d.
extern const AstTypeMatcher<ElaboratedType> elaboratedType;
/// Matches ElaboratedTypes whose qualifier, a NestedNameSpecifier,
/// matches \c InnerMatcher if the qualifier exists.
///
/// Given
/// \code
/// namespace N {
/// namespace M {
/// class D {};
/// }
/// }
/// N::M::D d;
/// \endcode
///
/// \c elaboratedType(hasQualifier(hasPrefix(specifiesNamespace(hasName("N"))))
/// matches the type of the variable declaration of \c d.
AST_MATCHER_P(ElaboratedType, hasQualifier,
internal::Matcher<NestedNameSpecifier>, InnerMatcher) {
if (const NestedNameSpecifier *Qualifier = Node.getQualifier())
return InnerMatcher.matches(*Qualifier, Finder, Builder);
return false;
}
/// Matches ElaboratedTypes whose named type matches \c InnerMatcher.
///
/// Given
/// \code
/// namespace N {
/// namespace M {
/// class D {};
/// }
/// }
/// N::M::D d;
/// \endcode
///
/// \c elaboratedType(namesType(recordType(
/// hasDeclaration(namedDecl(hasName("D")))))) matches the type of the variable
/// declaration of \c d.
AST_MATCHER_P(ElaboratedType, namesType, internal::Matcher<QualType>,
InnerMatcher) {
return InnerMatcher.matches(Node.getNamedType(), Finder, Builder);
}
/// Matches types that represent the result of substituting a type for a
/// template type parameter.
///
/// Given
/// \code
/// template <typename T>
/// void F(T t) {
/// int i = 1 + t;
/// }
/// \endcode
///
/// \c substTemplateTypeParmType() matches the type of 't' but not '1'
extern const AstTypeMatcher<SubstTemplateTypeParmType>
substTemplateTypeParmType;
/// Matches template type parameter substitutions that have a replacement
/// type that matches the provided matcher.
///
/// Given
/// \code
/// template <typename T>
/// double F(T t);
/// int i;
/// double j = F(i);
/// \endcode
///
/// \c substTemplateTypeParmType(hasReplacementType(type())) matches int
AST_TYPE_TRAVERSE_MATCHER(
hasReplacementType, getReplacementType,
AST_POLYMORPHIC_SUPPORTED_TYPES(SubstTemplateTypeParmType));
/// Matches template type parameter types.
///
/// Example matches T, but not int.
/// (matcher = templateTypeParmType())
/// \code
/// template <typename T> void f(int i);
/// \endcode
extern const AstTypeMatcher<TemplateTypeParmType> templateTypeParmType;
/// Matches injected class name types.
///
/// Example matches S s, but not S<T> s.
/// (matcher = parmVarDecl(hasType(injectedClassNameType())))
/// \code
/// template <typename T> struct S {
/// void f(S s);
/// void g(S<T> s);
/// };
/// \endcode
extern const AstTypeMatcher<InjectedClassNameType> injectedClassNameType;
/// Matches decayed type
/// Example matches i[] in declaration of f.
/// (matcher = valueDecl(hasType(decayedType(hasDecayedType(pointerType())))))
/// Example matches i[1].
/// (matcher = expr(hasType(decayedType(hasDecayedType(pointerType())))))
/// \code
/// void f(int i[]) {
/// i[1] = 0;
/// }
/// \endcode
extern const AstTypeMatcher<DecayedType> decayedType;
/// Matches the decayed type, whos decayed type matches \c InnerMatcher
AST_MATCHER_P(DecayedType, hasDecayedType, internal::Matcher<QualType>,
InnerType) {
return InnerType.matches(Node.getDecayedType(), Finder, Builder);
}
/// Matches declarations whose declaration context, interpreted as a
/// Decl, matches \c InnerMatcher.
///
/// Given
/// \code
/// namespace N {
/// namespace M {
/// class D {};
/// }
/// }
/// \endcode
///
/// \c cxxRcordDecl(hasDeclContext(namedDecl(hasName("M")))) matches the
/// declaration of \c class \c D.
AST_MATCHER_P(Decl, hasDeclContext, internal::Matcher<Decl>, InnerMatcher) {
const DeclContext *DC = Node.getDeclContext();
if (!DC) return false;
return InnerMatcher.matches(*Decl::castFromDeclContext(DC), Finder, Builder);
}
/// Matches nested name specifiers.
///
/// Given
/// \code
/// namespace ns {
/// struct A { static void f(); };
/// void A::f() {}
/// void g() { A::f(); }
/// }
/// ns::A a;
/// \endcode
/// nestedNameSpecifier()
/// matches "ns::" and both "A::"
extern const internal::VariadicAllOfMatcher<NestedNameSpecifier>
nestedNameSpecifier;
/// Same as \c nestedNameSpecifier but matches \c NestedNameSpecifierLoc.
extern const internal::VariadicAllOfMatcher<NestedNameSpecifierLoc>
nestedNameSpecifierLoc;
/// Matches \c NestedNameSpecifierLocs for which the given inner
/// NestedNameSpecifier-matcher matches.
AST_MATCHER_FUNCTION_P_OVERLOAD(
internal::BindableMatcher<NestedNameSpecifierLoc>, loc,
internal::Matcher<NestedNameSpecifier>, InnerMatcher, 1) {
return internal::BindableMatcher<NestedNameSpecifierLoc>(
new internal::LocMatcher<NestedNameSpecifierLoc, NestedNameSpecifier>(
InnerMatcher));
}
/// Matches nested name specifiers that specify a type matching the
/// given \c QualType matcher without qualifiers.
///
/// Given
/// \code
/// struct A { struct B { struct C {}; }; };
/// A::B::C c;
/// \endcode
/// nestedNameSpecifier(specifiesType(
/// hasDeclaration(cxxRecordDecl(hasName("A")))
/// ))
/// matches "A::"
AST_MATCHER_P(NestedNameSpecifier, specifiesType,
internal::Matcher<QualType>, InnerMatcher) {
if (!Node.getAsType())
return false;
return InnerMatcher.matches(QualType(Node.getAsType(), 0), Finder, Builder);
}
/// Matches nested name specifier locs that specify a type matching the
/// given \c TypeLoc.
///
/// Given
/// \code
/// struct A { struct B { struct C {}; }; };
/// A::B::C c;
/// \endcode
/// nestedNameSpecifierLoc(specifiesTypeLoc(loc(type(
/// hasDeclaration(cxxRecordDecl(hasName("A")))))))
/// matches "A::"
AST_MATCHER_P(NestedNameSpecifierLoc, specifiesTypeLoc,
internal::Matcher<TypeLoc>, InnerMatcher) {
return Node && Node.getNestedNameSpecifier()->getAsType() &&
InnerMatcher.matches(Node.getTypeLoc(), Finder, Builder);
}
/// Matches on the prefix of a \c NestedNameSpecifier.
///
/// Given
/// \code
/// struct A { struct B { struct C {}; }; };
/// A::B::C c;
/// \endcode
/// nestedNameSpecifier(hasPrefix(specifiesType(asString("struct A")))) and
/// matches "A::"
AST_MATCHER_P_OVERLOAD(NestedNameSpecifier, hasPrefix,
internal::Matcher<NestedNameSpecifier>, InnerMatcher,
0) {
const NestedNameSpecifier *NextNode = Node.getPrefix();
if (!NextNode)
return false;
return InnerMatcher.matches(*NextNode, Finder, Builder);
}
/// Matches on the prefix of a \c NestedNameSpecifierLoc.
///
/// Given
/// \code
/// struct A { struct B { struct C {}; }; };
/// A::B::C c;
/// \endcode
/// nestedNameSpecifierLoc(hasPrefix(loc(specifiesType(asString("struct A")))))
/// matches "A::"
AST_MATCHER_P_OVERLOAD(NestedNameSpecifierLoc, hasPrefix,
internal::Matcher<NestedNameSpecifierLoc>, InnerMatcher,
1) {
NestedNameSpecifierLoc NextNode = Node.getPrefix();
if (!NextNode)
return false;
return InnerMatcher.matches(NextNode, Finder, Builder);
}
/// Matches nested name specifiers that specify a namespace matching the
/// given namespace matcher.
///
/// Given
/// \code
/// namespace ns { struct A {}; }
/// ns::A a;
/// \endcode
/// nestedNameSpecifier(specifiesNamespace(hasName("ns")))
/// matches "ns::"
AST_MATCHER_P(NestedNameSpecifier, specifiesNamespace,
internal::Matcher<NamespaceDecl>, InnerMatcher) {
if (!Node.getAsNamespace())
return false;
return InnerMatcher.matches(*Node.getAsNamespace(), Finder, Builder);
}
/// Overloads for the \c equalsNode matcher.
/// FIXME: Implement for other node types.
/// @{
/// Matches if a node equals another node.
///
/// \c Decl has pointer identity in the AST.
AST_MATCHER_P_OVERLOAD(Decl, equalsNode, const Decl*, Other, 0) {
return &Node == Other;
}
/// Matches if a node equals another node.
///
/// \c Stmt has pointer identity in the AST.
AST_MATCHER_P_OVERLOAD(Stmt, equalsNode, const Stmt*, Other, 1) {
return &Node == Other;
}
/// Matches if a node equals another node.
///
/// \c Type has pointer identity in the AST.
AST_MATCHER_P_OVERLOAD(Type, equalsNode, const Type*, Other, 2) {
return &Node == Other;
}
/// @}
/// Matches each case or default statement belonging to the given switch
/// statement. This matcher may produce multiple matches.
///
/// Given
/// \code
/// switch (1) { case 1: case 2: default: switch (2) { case 3: case 4: ; } }
/// \endcode
/// switchStmt(forEachSwitchCase(caseStmt().bind("c"))).bind("s")
/// matches four times, with "c" binding each of "case 1:", "case 2:",
/// "case 3:" and "case 4:", and "s" respectively binding "switch (1)",
/// "switch (1)", "switch (2)" and "switch (2)".
AST_MATCHER_P(SwitchStmt, forEachSwitchCase, internal::Matcher<SwitchCase>,
InnerMatcher) {
BoundNodesTreeBuilder Result;
// FIXME: getSwitchCaseList() does not necessarily guarantee a stable
// iteration order. We should use the more general iterating matchers once
// they are capable of expressing this matcher (for example, it should ignore
// case statements belonging to nested switch statements).
bool Matched = false;
for (const SwitchCase *SC = Node.getSwitchCaseList(); SC;
SC = SC->getNextSwitchCase()) {
BoundNodesTreeBuilder CaseBuilder(*Builder);
bool CaseMatched = InnerMatcher.matches(*SC, Finder, &CaseBuilder);
if (CaseMatched) {
Matched = true;
Result.addMatch(CaseBuilder);
}
}
*Builder = std::move(Result);
return Matched;
}
/// Matches each constructor initializer in a constructor definition.
///
/// Given
/// \code
/// class A { A() : i(42), j(42) {} int i; int j; };
/// \endcode
/// cxxConstructorDecl(forEachConstructorInitializer(
/// forField(decl().bind("x"))
/// ))
/// will trigger two matches, binding for 'i' and 'j' respectively.
AST_MATCHER_P(CXXConstructorDecl, forEachConstructorInitializer,
internal::Matcher<CXXCtorInitializer>, InnerMatcher) {
BoundNodesTreeBuilder Result;
bool Matched = false;
for (const auto *I : Node.inits()) {
BoundNodesTreeBuilder InitBuilder(*Builder);
if (InnerMatcher.matches(*I, Finder, &InitBuilder)) {
Matched = true;
Result.addMatch(InitBuilder);
}
}
*Builder = std::move(Result);
return Matched;
}
/// Matches constructor declarations that are copy constructors.
///
/// Given
/// \code
/// struct S {
/// S(); // #1
/// S(const S &); // #2
/// S(S &&); // #3
/// };
/// \endcode
/// cxxConstructorDecl(isCopyConstructor()) will match #2, but not #1 or #3.
AST_MATCHER(CXXConstructorDecl, isCopyConstructor) {
return Node.isCopyConstructor();
}
/// Matches constructor declarations that are move constructors.
///
/// Given
/// \code
/// struct S {
/// S(); // #1
/// S(const S &); // #2
/// S(S &&); // #3
/// };
/// \endcode
/// cxxConstructorDecl(isMoveConstructor()) will match #3, but not #1 or #2.
AST_MATCHER(CXXConstructorDecl, isMoveConstructor) {
return Node.isMoveConstructor();
}
/// Matches constructor declarations that are default constructors.
///
/// Given
/// \code
/// struct S {
/// S(); // #1
/// S(const S &); // #2
/// S(S &&); // #3
/// };
/// \endcode
/// cxxConstructorDecl(isDefaultConstructor()) will match #1, but not #2 or #3.
AST_MATCHER(CXXConstructorDecl, isDefaultConstructor) {
return Node.isDefaultConstructor();
}
/// Matches constructors that delegate to another constructor.
///
/// Given
/// \code
/// struct S {
/// S(); // #1
/// S(int) {} // #2
/// S(S &&) : S() {} // #3
/// };
/// S::S() : S(0) {} // #4
/// \endcode
/// cxxConstructorDecl(isDelegatingConstructor()) will match #3 and #4, but not
/// #1 or #2.
AST_MATCHER(CXXConstructorDecl, isDelegatingConstructor) {
return Node.isDelegatingConstructor();
}
/// Matches constructor, conversion function, and deduction guide declarations
/// that have an explicit specifier if this explicit specifier is resolved to
/// true.
///
/// Given
/// \code
/// template<bool b>
/// struct S {
/// S(int); // #1
/// explicit S(double); // #2
/// operator int(); // #3
/// explicit operator bool(); // #4
/// explicit(false) S(bool) // # 7
/// explicit(true) S(char) // # 8
/// explicit(b) S(S) // # 9
/// };
/// S(int) -> S<true> // #5
/// explicit S(double) -> S<false> // #6
/// \endcode
/// cxxConstructorDecl(isExplicit()) will match #2 and #8, but not #1, #7 or #9.
/// cxxConversionDecl(isExplicit()) will match #4, but not #3.
/// cxxDeductionGuideDecl(isExplicit()) will match #6, but not #5.
AST_POLYMORPHIC_MATCHER(isExplicit, AST_POLYMORPHIC_SUPPORTED_TYPES(
CXXConstructorDecl, CXXConversionDecl,
CXXDeductionGuideDecl)) {
return Node.isExplicit();
}
/// Matches the expression in an explicit specifier if present in the given
/// declaration.
///
/// Given
/// \code
/// template<bool b>
/// struct S {
/// S(int); // #1
/// explicit S(double); // #2
/// operator int(); // #3
/// explicit operator bool(); // #4
/// explicit(false) S(bool) // # 7
/// explicit(true) S(char) // # 8
/// explicit(b) S(S) // # 9
/// };
/// S(int) -> S<true> // #5
/// explicit S(double) -> S<false> // #6
/// \endcode
/// cxxConstructorDecl(hasExplicitSpecifier(constantExpr())) will match #7, #8 and #9, but not #1 or #2.
/// cxxConversionDecl(hasExplicitSpecifier(constantExpr())) will not match #3 or #4.
/// cxxDeductionGuideDecl(hasExplicitSpecifier(constantExpr())) will not match #5 or #6.
AST_MATCHER_P(FunctionDecl, hasExplicitSpecifier, internal::Matcher<Expr>,
InnerMatcher) {
ExplicitSpecifier ES = ExplicitSpecifier::getFromDecl(&Node);
if (!ES.getExpr())
return false;
return InnerMatcher.matches(*ES.getExpr(), Finder, Builder);
}
/// Matches function and namespace declarations that are marked with
/// the inline keyword.
///
/// Given
/// \code
/// inline void f();
/// void g();
/// namespace n {
/// inline namespace m {}
/// }
/// \endcode
/// functionDecl(isInline()) will match ::f().
/// namespaceDecl(isInline()) will match n::m.
AST_POLYMORPHIC_MATCHER(isInline,
AST_POLYMORPHIC_SUPPORTED_TYPES(NamespaceDecl,
FunctionDecl)) {
// This is required because the spelling of the function used to determine
// whether inline is specified or not differs between the polymorphic types.
if (const auto *FD = dyn_cast<FunctionDecl>(&Node))
return FD->isInlineSpecified();
else if (const auto *NSD = dyn_cast<NamespaceDecl>(&Node))
return NSD->isInline();
llvm_unreachable("Not a valid polymorphic type");
}
/// Matches anonymous namespace declarations.
///
/// Given
/// \code
/// namespace n {
/// namespace {} // #1
/// }
/// \endcode
/// namespaceDecl(isAnonymous()) will match #1 but not ::n.
AST_MATCHER(NamespaceDecl, isAnonymous) {
return Node.isAnonymousNamespace();
}
/// Matches declarations in the namespace `std`, but not in nested namespaces.
///
/// Given
/// \code
/// class vector {};
/// namespace foo {
/// class vector {};
/// namespace std {
/// class vector {};
/// }
/// }
/// namespace std {
/// inline namespace __1 {
/// class vector {}; // #1
/// namespace experimental {
/// class vector {};
/// }
/// }
/// }
/// \endcode
/// cxxRecordDecl(hasName("vector"), isInStdNamespace()) will match only #1.
AST_MATCHER(Decl, isInStdNamespace) { return Node.isInStdNamespace(); }
/// If the given case statement does not use the GNU case range
/// extension, matches the constant given in the statement.
///
/// Given
/// \code
/// switch (1) { case 1: case 1+1: case 3 ... 4: ; }
/// \endcode
/// caseStmt(hasCaseConstant(integerLiteral()))
/// matches "case 1:"
AST_MATCHER_P(CaseStmt, hasCaseConstant, internal::Matcher<Expr>,
InnerMatcher) {
if (Node.getRHS())
return false;
return InnerMatcher.matches(*Node.getLHS(), Finder, Builder);
}
/// Matches declaration that has a given attribute.
///
/// Given
/// \code
/// __attribute__((device)) void f() { ... }
/// \endcode
/// decl(hasAttr(clang::attr::CUDADevice)) matches the function declaration of
/// f. If the matcher is used from clang-query, attr::Kind parameter should be
/// passed as a quoted string. e.g., hasAttr("attr::CUDADevice").
AST_MATCHER_P(Decl, hasAttr, attr::Kind, AttrKind) {
for (const auto *Attr : Node.attrs()) {
if (Attr->getKind() == AttrKind)
return true;
}
return false;
}
/// Matches the return value expression of a return statement
///
/// Given
/// \code
/// return a + b;
/// \endcode
/// hasReturnValue(binaryOperator())
/// matches 'return a + b'
/// with binaryOperator()
/// matching 'a + b'
AST_MATCHER_P(ReturnStmt, hasReturnValue, internal::Matcher<Expr>,
InnerMatcher) {
if (const auto *RetValue = Node.getRetValue())
return InnerMatcher.matches(*RetValue, Finder, Builder);
return false;
}
/// Matches CUDA kernel call expression.
///
/// Example matches,
/// \code
/// kernel<<<i,j>>>();
/// \endcode
extern const internal::VariadicDynCastAllOfMatcher<Stmt, CUDAKernelCallExpr>
cudaKernelCallExpr;
/// Matches expressions that resolve to a null pointer constant, such as
/// GNU's __null, C++11's nullptr, or C's NULL macro.
///
/// Given:
/// \code
/// void *v1 = NULL;
/// void *v2 = nullptr;
/// void *v3 = __null; // GNU extension
/// char *cp = (char *)0;
/// int *ip = 0;
/// int i = 0;
/// \endcode
/// expr(nullPointerConstant())
/// matches the initializer for v1, v2, v3, cp, and ip. Does not match the
/// initializer for i.
AST_MATCHER(Expr, nullPointerConstant) {
return Node.isNullPointerConstant(Finder->getASTContext(),
Expr::NPC_ValueDependentIsNull);
}
/// Matches declaration of the function the statement belongs to
///
/// Given:
/// \code
/// F& operator=(const F& o) {
/// std::copy_if(o.begin(), o.end(), begin(), [](V v) { return v > 0; });
/// return *this;
/// }
/// \endcode
/// returnStmt(forFunction(hasName("operator=")))
/// matches 'return *this'
/// but does not match 'return v > 0'
AST_MATCHER_P(Stmt, forFunction, internal::Matcher<FunctionDecl>,
InnerMatcher) {
const auto &Parents = Finder->getASTContext().getParents(Node);
llvm::SmallVector<DynTypedNode, 8> Stack(Parents.begin(), Parents.end());
while(!Stack.empty()) {
const auto &CurNode = Stack.back();
Stack.pop_back();
if(const auto *FuncDeclNode = CurNode.get<FunctionDecl>()) {
if(InnerMatcher.matches(*FuncDeclNode, Finder, Builder)) {
return true;
}
} else if(const auto *LambdaExprNode = CurNode.get<LambdaExpr>()) {
if(InnerMatcher.matches(*LambdaExprNode->getCallOperator(),
Finder, Builder)) {
return true;
}
} else {
for(const auto &Parent: Finder->getASTContext().getParents(CurNode))
Stack.push_back(Parent);
}
}
return false;
}
/// Matches a declaration that has external formal linkage.
///
/// Example matches only z (matcher = varDecl(hasExternalFormalLinkage()))
/// \code
/// void f() {
/// int x;
/// static int y;
/// }
/// int z;
/// \endcode
///
/// Example matches f() because it has external formal linkage despite being
/// unique to the translation unit as though it has internal likage
/// (matcher = functionDecl(hasExternalFormalLinkage()))
///
/// \code
/// namespace {
/// void f() {}
/// }
/// \endcode
AST_MATCHER(NamedDecl, hasExternalFormalLinkage) {
return Node.hasExternalFormalLinkage();
}
/// Matches a declaration that has default arguments.
///
/// Example matches y (matcher = parmVarDecl(hasDefaultArgument()))
/// \code
/// void x(int val) {}
/// void y(int val = 0) {}
/// \endcode
///
/// Deprecated. Use hasInitializer() instead to be able to
/// match on the contents of the default argument. For example:
///
/// \code
/// void x(int val = 7) {}
/// void y(int val = 42) {}
/// \endcode
/// parmVarDecl(hasInitializer(integerLiteral(equals(42))))
/// matches the parameter of y
///
/// A matcher such as
/// parmVarDecl(hasInitializer(anything()))
/// is equivalent to parmVarDecl(hasDefaultArgument()).
AST_MATCHER(ParmVarDecl, hasDefaultArgument) {
return Node.hasDefaultArg();
}
/// Matches array new expressions.
///
/// Given:
/// \code
/// MyClass *p1 = new MyClass[10];
/// \endcode
/// cxxNewExpr(isArray())
/// matches the expression 'new MyClass[10]'.
AST_MATCHER(CXXNewExpr, isArray) {
return Node.isArray();
}
/// Matches placement new expression arguments.
///
/// Given:
/// \code
/// MyClass *p1 = new (Storage, 16) MyClass();
/// \endcode
/// cxxNewExpr(hasPlacementArg(1, integerLiteral(equals(16))))
/// matches the expression 'new (Storage, 16) MyClass()'.
AST_MATCHER_P2(CXXNewExpr, hasPlacementArg, unsigned, Index,
internal::Matcher<Expr>, InnerMatcher) {
return Node.getNumPlacementArgs() > Index &&
InnerMatcher.matches(*Node.getPlacementArg(Index), Finder, Builder);
}
/// Matches any placement new expression arguments.
///
/// Given:
/// \code
/// MyClass *p1 = new (Storage) MyClass();
/// \endcode
/// cxxNewExpr(hasAnyPlacementArg(anything()))
/// matches the expression 'new (Storage, 16) MyClass()'.
AST_MATCHER_P(CXXNewExpr, hasAnyPlacementArg, internal::Matcher<Expr>,
InnerMatcher) {
return llvm::any_of(Node.placement_arguments(), [&](const Expr *Arg) {
return InnerMatcher.matches(*Arg, Finder, Builder);
});
}
/// Matches array new expressions with a given array size.
///
/// Given:
/// \code
/// MyClass *p1 = new MyClass[10];
/// \endcode
/// cxxNewExpr(hasArraySize(integerLiteral(equals(10))))
/// matches the expression 'new MyClass[10]'.
AST_MATCHER_P(CXXNewExpr, hasArraySize, internal::Matcher<Expr>, InnerMatcher) {
return Node.isArray() && *Node.getArraySize() &&
InnerMatcher.matches(**Node.getArraySize(), Finder, Builder);
}
/// Matches a class declaration that is defined.
///
/// Example matches x (matcher = cxxRecordDecl(hasDefinition()))
/// \code
/// class x {};
/// class y;
/// \endcode
AST_MATCHER(CXXRecordDecl, hasDefinition) {
return Node.hasDefinition();
}
/// Matches C++11 scoped enum declaration.
///
/// Example matches Y (matcher = enumDecl(isScoped()))
/// \code
/// enum X {};
/// enum class Y {};
/// \endcode
AST_MATCHER(EnumDecl, isScoped) {
return Node.isScoped();
}
/// Matches a function declared with a trailing return type.
///
/// Example matches Y (matcher = functionDecl(hasTrailingReturn()))
/// \code
/// int X() {}
/// auto Y() -> int {}
/// \endcode
AST_MATCHER(FunctionDecl, hasTrailingReturn) {
if (const auto *F = Node.getType()->getAs<FunctionProtoType>())
return F->hasTrailingReturn();
return false;
}
/// Matches expressions that match InnerMatcher that are possibly wrapped in an
/// elidable constructor and other corresponding bookkeeping nodes.
///
/// In C++17, elidable copy constructors are no longer being generated in the
/// AST as it is not permitted by the standard. They are, however, part of the
/// AST in C++14 and earlier. So, a matcher must abstract over these differences
/// to work in all language modes. This matcher skips elidable constructor-call
/// AST nodes, `ExprWithCleanups` nodes wrapping elidable constructor-calls and
/// various implicit nodes inside the constructor calls, all of which will not
/// appear in the C++17 AST.
///
/// Given
///
/// \code
/// struct H {};
/// H G();
/// void f() {
/// H D = G();
/// }
/// \endcode
///
/// ``varDecl(hasInitializer(ignoringElidableConstructorCall(callExpr())))``
/// matches ``H D = G()`` in C++11 through C++17 (and beyond).
AST_MATCHER_P(Expr, ignoringElidableConstructorCall,
ast_matchers::internal::Matcher<Expr>, InnerMatcher) {
// E tracks the node that we are examining.
const Expr *E = &Node;
// If present, remove an outer `ExprWithCleanups` corresponding to the
// underlying `CXXConstructExpr`. This check won't cover all cases of added
// `ExprWithCleanups` corresponding to `CXXConstructExpr` nodes (because the
// EWC is placed on the outermost node of the expression, which this may not
// be), but, it still improves the coverage of this matcher.
if (const auto *CleanupsExpr = dyn_cast<ExprWithCleanups>(&Node))
E = CleanupsExpr->getSubExpr();
if (const auto *CtorExpr = dyn_cast<CXXConstructExpr>(E)) {
if (CtorExpr->isElidable()) {
if (const auto *MaterializeTemp =
dyn_cast<MaterializeTemporaryExpr>(CtorExpr->getArg(0))) {
return InnerMatcher.matches(*MaterializeTemp->getSubExpr(), Finder,
Builder);
}
}
}
return InnerMatcher.matches(Node, Finder, Builder);
}
//----------------------------------------------------------------------------//
// OpenMP handling.
//----------------------------------------------------------------------------//
/// Matches any ``#pragma omp`` executable directive.
///
/// Given
///
/// \code
/// #pragma omp parallel
/// #pragma omp parallel default(none)
/// #pragma omp taskyield
/// \endcode
///
/// ``ompExecutableDirective()`` matches ``omp parallel``,
/// ``omp parallel default(none)`` and ``omp taskyield``.
extern const internal::VariadicDynCastAllOfMatcher<Stmt, OMPExecutableDirective>
ompExecutableDirective;
/// Matches standalone OpenMP directives,
/// i.e., directives that can't have a structured block.
///
/// Given
///
/// \code
/// #pragma omp parallel
/// {}
/// #pragma omp taskyield
/// \endcode
///
/// ``ompExecutableDirective(isStandaloneDirective()))`` matches
/// ``omp taskyield``.
AST_MATCHER(OMPExecutableDirective, isStandaloneDirective) {
return Node.isStandaloneDirective();
}
/// Matches the structured-block of the OpenMP executable directive
///
/// Prerequisite: the executable directive must not be standalone directive.
/// If it is, it will never match.
///
/// Given
///
/// \code
/// #pragma omp parallel
/// ;
/// #pragma omp parallel
/// {}
/// \endcode
///
/// ``ompExecutableDirective(hasStructuredBlock(nullStmt()))`` will match ``;``
AST_MATCHER_P(OMPExecutableDirective, hasStructuredBlock,
internal::Matcher<Stmt>, InnerMatcher) {
if (Node.isStandaloneDirective())
return false; // Standalone directives have no structured blocks.
return InnerMatcher.matches(*Node.getStructuredBlock(), Finder, Builder);
}
/// Matches any clause in an OpenMP directive.
///
/// Given
///
/// \code
/// #pragma omp parallel
/// #pragma omp parallel default(none)
/// \endcode
///
/// ``ompExecutableDirective(hasAnyClause(anything()))`` matches
/// ``omp parallel default(none)``.
AST_MATCHER_P(OMPExecutableDirective, hasAnyClause,
internal::Matcher<OMPClause>, InnerMatcher) {
ArrayRef<OMPClause *> Clauses = Node.clauses();
return matchesFirstInPointerRange(InnerMatcher, Clauses.begin(),
Clauses.end(), Finder, Builder);
}
/// Matches OpenMP ``default`` clause.
///
/// Given
///
/// \code
/// #pragma omp parallel default(none)
/// #pragma omp parallel default(shared)
/// #pragma omp parallel
/// \endcode
///
/// ``ompDefaultClause()`` matches ``default(none)`` and ``default(shared)``.
extern const internal::VariadicDynCastAllOfMatcher<OMPClause, OMPDefaultClause>
ompDefaultClause;
/// Matches if the OpenMP ``default`` clause has ``none`` kind specified.
///
/// Given
///
/// \code
/// #pragma omp parallel
/// #pragma omp parallel default(none)
/// #pragma omp parallel default(shared)
/// \endcode
///
/// ``ompDefaultClause(isNoneKind())`` matches only ``default(none)``.
AST_MATCHER(OMPDefaultClause, isNoneKind) {
return Node.getDefaultKind() == llvm::omp::OMP_DEFAULT_none;
}
/// Matches if the OpenMP ``default`` clause has ``shared`` kind specified.
///
/// Given
///
/// \code
/// #pragma omp parallel
/// #pragma omp parallel default(none)
/// #pragma omp parallel default(shared)
/// \endcode
///
/// ``ompDefaultClause(isSharedKind())`` matches only ``default(shared)``.
AST_MATCHER(OMPDefaultClause, isSharedKind) {
return Node.getDefaultKind() == llvm::omp::OMP_DEFAULT_shared;
}
/// Matches if the OpenMP directive is allowed to contain the specified OpenMP
/// clause kind.
///
/// Given
///
/// \code
/// #pragma omp parallel
/// #pragma omp parallel for
/// #pragma omp for
/// \endcode
///
/// `ompExecutableDirective(isAllowedToContainClause(OMPC_default))`` matches
/// ``omp parallel`` and ``omp parallel for``.
///
/// If the matcher is use from clang-query, ``OpenMPClauseKind`` parameter
/// should be passed as a quoted string. e.g.,
/// ``isAllowedToContainClauseKind("OMPC_default").``
AST_MATCHER_P(OMPExecutableDirective, isAllowedToContainClauseKind,
OpenMPClauseKind, CKind) {
return llvm::omp::isAllowedClauseForDirective(
Node.getDirectiveKind(), CKind,
Finder->getASTContext().getLangOpts().OpenMP);
}
//----------------------------------------------------------------------------//
// End OpenMP handling.
//----------------------------------------------------------------------------//
} // namespace ast_matchers
} // namespace clang
#endif // LLVM_CLANG_ASTMATCHERS_ASTMATCHERS_H
|
3d7pt.lbpar.c | #include <omp.h>
#include <math.h>
#define ceild(n,d) ceil(((double)(n))/((double)(d)))
#define floord(n,d) floor(((double)(n))/((double)(d)))
#define max(x,y) ((x) > (y)? (x) : (y))
#define min(x,y) ((x) < (y)? (x) : (y))
/*
* Order-1, 3D 7 point stencil
* Adapted from PLUTO and Pochoir test bench
*
* Tareq Malas
*/
#include <stdio.h>
#include <stdlib.h>
#include <sys/time.h>
#ifdef LIKWID_PERFMON
#include <likwid.h>
#endif
#include "print_utils.h"
#define TESTS 2
#define MAX(a,b) ((a) > (b) ? a : b)
#define MIN(a,b) ((a) < (b) ? a : b)
/* Subtract the `struct timeval' values X and Y,
* storing the result in RESULT.
*
* Return 1 if the difference is negative, otherwise 0.
*/
int timeval_subtract(struct timeval *result, struct timeval *x, struct timeval *y)
{
/* Perform the carry for the later subtraction by updating y. */
if (x->tv_usec < y->tv_usec)
{
int nsec = (y->tv_usec - x->tv_usec) / 1000000 + 1;
y->tv_usec -= 1000000 * nsec;
y->tv_sec += nsec;
}
if (x->tv_usec - y->tv_usec > 1000000)
{
int nsec = (x->tv_usec - y->tv_usec) / 1000000;
y->tv_usec += 1000000 * nsec;
y->tv_sec -= nsec;
}
/* Compute the time remaining to wait.
* tv_usec is certainly positive.
*/
result->tv_sec = x->tv_sec - y->tv_sec;
result->tv_usec = x->tv_usec - y->tv_usec;
/* Return 1 if result is negative. */
return x->tv_sec < y->tv_sec;
}
int main(int argc, char *argv[])
{
int t, i, j, k, test;
int Nx, Ny, Nz, Nt;
if (argc > 3) {
Nx = atoi(argv[1])+2;
Ny = atoi(argv[2])+2;
Nz = atoi(argv[3])+2;
}
if (argc > 4)
Nt = atoi(argv[4]);
double ****A = (double ****) malloc(sizeof(double***)*2);
A[0] = (double ***) malloc(sizeof(double**)*Nz);
A[1] = (double ***) malloc(sizeof(double**)*Nz);
for(i=0; i<Nz; i++){
A[0][i] = (double**) malloc(sizeof(double*)*Ny);
A[1][i] = (double**) malloc(sizeof(double*)*Ny);
for(j=0;j<Ny;j++){
A[0][i][j] = (double*) malloc(sizeof(double)*Nx);
A[1][i][j] = (double*) malloc(sizeof(double)*Nx);
}
}
// tile size information, including extra element to decide the list length
int *tile_size = (int*) malloc(sizeof(int));
tile_size[0] = -1;
// The list is modified here before source-to-source transformations
tile_size = (int*) realloc((void *)tile_size, sizeof(int)*5);
tile_size[0] = 24;
tile_size[1] = 24;
tile_size[2] = 24;
tile_size[3] = 2048;
tile_size[4] = -1;
// for timekeeping
int ts_return = -1;
struct timeval start, end, result;
double tdiff = 0.0, min_tdiff=1.e100;
const int BASE = 1024;
const double alpha = 0.0876;
const double beta = 0.0765;
// initialize variables
//
srand(42);
for (i = 1; i < Nz; i++) {
for (j = 1; j < Ny; j++) {
for (k = 1; k < Nx; k++) {
A[0][i][j][k] = 1.0 * (rand() % BASE);
}
}
}
#ifdef LIKWID_PERFMON
LIKWID_MARKER_INIT;
#pragma omp parallel
{
LIKWID_MARKER_THREADINIT;
#pragma omp barrier
LIKWID_MARKER_START("calc");
}
#endif
int num_threads = 1;
#if defined(_OPENMP)
num_threads = omp_get_max_threads();
#endif
for(test=0; test<TESTS; test++){
gettimeofday(&start, 0);
// serial execution - Addition: 6 && Multiplication: 2
/* Copyright (C) 1991-2014 Free Software Foundation, Inc.
This file is part of the GNU C Library.
The GNU C Library is free software; you can redistribute it and/or
modify it under the terms of the GNU Lesser General Public
License as published by the Free Software Foundation; either
version 2.1 of the License, or (at your option) any later version.
The GNU C Library is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
Lesser General Public License for more details.
You should have received a copy of the GNU Lesser General Public
License along with the GNU C Library; if not, see
<http://www.gnu.org/licenses/>. */
/* This header is separate from features.h so that the compiler can
include it implicitly at the start of every compilation. It must
not itself include <features.h> or any other header that includes
<features.h> because the implicit include comes before any feature
test macros that may be defined in a source file before it first
explicitly includes a system header. GCC knows the name of this
header in order to preinclude it. */
/* glibc's intent is to support the IEC 559 math functionality, real
and complex. If the GCC (4.9 and later) predefined macros
specifying compiler intent are available, use them to determine
whether the overall intent is to support these features; otherwise,
presume an older compiler has intent to support these features and
define these macros by default. */
/* wchar_t uses ISO/IEC 10646 (2nd ed., published 2011-03-15) /
Unicode 6.0. */
/* We do not support C11 <threads.h>. */
int t1, t2, t3, t4, t5, t6, t7, t8;
int lb, ub, lbp, ubp, lb2, ub2;
register int lbv, ubv;
/* Start of CLooG code */
if ((Nt >= 2) && (Nx >= 3) && (Ny >= 3) && (Nz >= 3)) {
for (t1=-1;t1<=floord(Nt-2,12);t1++) {
lbp=max(ceild(t1,2),ceild(24*t1-Nt+3,24));
ubp=min(floord(Nt+Nz-4,24),floord(12*t1+Nz+9,24));
#pragma omp parallel for private(lbv,ubv,t3,t4,t5,t6,t7,t8)
for (t2=lbp;t2<=ubp;t2++) {
for (t3=max(max(0,ceild(t1-1,2)),ceild(24*t2-Nz-20,24));t3<=min(min(min(floord(Nt+Ny-4,24),floord(12*t1+Ny+21,24)),floord(24*t2+Ny+20,24)),floord(24*t1-24*t2+Nz+Ny+19,24));t3++) {
for (t4=max(max(max(0,ceild(3*t1-511,512)),ceild(24*t2-Nz-2044,2048)),ceild(24*t3-Ny-2044,2048));t4<=min(min(min(min(floord(Nt+Nx-4,2048),floord(12*t1+Nx+21,2048)),floord(24*t2+Nx+20,2048)),floord(24*t3+Nx+20,2048)),floord(24*t1-24*t2+Nz+Nx+19,2048));t4++) {
for (t5=max(max(max(max(max(0,12*t1),24*t1-24*t2+1),24*t2-Nz+2),24*t3-Ny+2),2048*t4-Nx+2);t5<=min(min(min(min(min(Nt-2,12*t1+23),24*t2+22),24*t3+22),2048*t4+2046),24*t1-24*t2+Nz+21);t5++) {
for (t6=max(max(24*t2,t5+1),-24*t1+24*t2+2*t5-23);t6<=min(min(24*t2+23,-24*t1+24*t2+2*t5),t5+Nz-2);t6++) {
for (t7=max(24*t3,t5+1);t7<=min(24*t3+23,t5+Ny-2);t7++) {
lbv=max(2048*t4,t5+1);
ubv=min(2048*t4+2047,t5+Nx-2);
#pragma ivdep
#pragma vector always
for (t8=lbv;t8<=ubv;t8++) {
A[( t5 + 1) % 2][ (-t5+t6)][ (-t5+t7)][ (-t5+t8)] = ((alpha * A[ t5 % 2][ (-t5+t6)][ (-t5+t7)][ (-t5+t8)]) + (beta * (((((A[ t5 % 2][ (-t5+t6) - 1][ (-t5+t7)][ (-t5+t8)] + A[ t5 % 2][ (-t5+t6)][ (-t5+t7) - 1][ (-t5+t8)]) + A[ t5 % 2][ (-t5+t6)][ (-t5+t7)][ (-t5+t8) - 1]) + A[ t5 % 2][ (-t5+t6) + 1][ (-t5+t7)][ (-t5+t8)]) + A[ t5 % 2][ (-t5+t6)][ (-t5+t7) + 1][ (-t5+t8)]) + A[ t5 % 2][ (-t5+t6)][ (-t5+t7)][ (-t5+t8) + 1])));;
}
}
}
}
}
}
}
}
}
/* End of CLooG code */
gettimeofday(&end, 0);
ts_return = timeval_subtract(&result, &end, &start);
tdiff = (double) (result.tv_sec + result.tv_usec * 1.0e-6);
min_tdiff = min(min_tdiff, tdiff);
printf("Rank 0 TEST# %d time: %f\n", test, tdiff);
}
PRINT_RESULTS(1, "constant")
#ifdef LIKWID_PERFMON
#pragma omp parallel
{
LIKWID_MARKER_STOP("calc");
}
LIKWID_MARKER_CLOSE;
#endif
// Free allocated arrays (Causing performance degradation
/* for(i=0; i<Nz; i++){
for(j=0;j<Ny;j++){
free(A[0][i][j]);
free(A[1][i][j]);
}
free(A[0][i]);
free(A[1][i]);
}
free(A[0]);
free(A[1]);
*/
return 0;
}
|
GB_binop__second_int32.c | //------------------------------------------------------------------------------
// GB_binop: hard-coded functions for each built-in binary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// If this file is in the Generated2/ folder, do not edit it
// (it is auto-generated from Generator/*).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_emult.h"
#include "GB_control.h"
#include "GB_ek_slice.h"
#include "GB_dense.h"
#include "GB_atomics.h"
#include "GB_bitmap_assign_methods.h"
#include "GB_binop__include.h"
// C=binop(A,B) is defined by the following types and operators:
// A+B function (eWiseAdd): GB (_AaddB__second_int32)
// A.*B function (eWiseMult): GB (_AemultB_01__second_int32)
// A.*B function (eWiseMult): GB (_AemultB_02__second_int32)
// A.*B function (eWiseMult): GB (_AemultB_03__second_int32)
// A.*B function (eWiseMult): GB (_AemultB_bitmap__second_int32)
// A*D function (colscale): GB (_AxD__second_int32)
// D*A function (rowscale): GB (_DxB__second_int32)
// C+=B function (dense accum): GB (_Cdense_accumB__second_int32)
// C+=b function (dense accum): GB (_Cdense_accumb__second_int32)
// C+=A+B function (dense ewise3): GB ((none))
// C=A+B function (dense ewise3): GB (_Cdense_ewise3_noaccum__second_int32)
// C=scalar+B GB ((none))
// C=scalar+B' GB ((none))
// C=A+scalar GB ((none))
// C=A'+scalar GB ((none))
// C type: int32_t
// A type: int32_t
// B,b type: int32_t
// BinaryOp: cij = bij
#define GB_ATYPE \
int32_t
#define GB_BTYPE \
int32_t
#define GB_CTYPE \
int32_t
// true if the types of A and B are identical
#define GB_ATYPE_IS_BTYPE \
1
// true if the types of C and A are identical
#define GB_CTYPE_IS_ATYPE \
1
// true if the types of C and B are identical
#define GB_CTYPE_IS_BTYPE \
1
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA,A_iso) \
;
// bij = Bx [pB]
#define GB_GETB(bij,Bx,pB,B_iso) \
int32_t bij = GBX (Bx, pB, B_iso)
// declare scalar of the same type as C
#define GB_CTYPE_SCALAR(t) \
int32_t t
// cij = Ax [pA]
#define GB_COPY_A_TO_C(cij,Ax,pA,A_iso) \
cij = GBX (Ax, pA, A_iso)
// cij = Bx [pB]
#define GB_COPY_B_TO_C(cij,Bx,pB,B_iso) \
cij = GBX (Bx, pB, B_iso)
#define GB_CX(p) Cx [p]
// binary operator
#define GB_BINOP(z,x,y,i,j) \
z = y ;
// true if the binop must be flipped
#define GB_BINOP_FLIP \
0
// op is second
#define GB_OP_IS_SECOND \
1
// do the numerical phases of GB_add and GB_emult
#define GB_PHASE_2_OF_2
// hard-coded loops can be vectorized
#define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_SECOND || GxB_NO_INT32 || GxB_NO_SECOND_INT32)
//------------------------------------------------------------------------------
// C += A+B, all 3 matrices dense
//------------------------------------------------------------------------------
#if 0
// The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV.
void GB ((none))
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#include "GB_dense_ewise3_accum_template.c"
}
#endif
//------------------------------------------------------------------------------
// C = A+B, all 3 matrices dense
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_ewise3_noaccum__second_int32)
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_dense_ewise3_noaccum_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += B, accumulate a sparse matrix into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumB__second_int32)
(
GrB_Matrix C,
const GrB_Matrix B,
const int64_t *B_ek_slicing, const int B_ntasks, const int B_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
#include "GB_dense_subassign_23_template.c"
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += b, accumulate a scalar into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumb__second_int32)
(
GrB_Matrix C,
const GB_void *p_bwork,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
// get the scalar b for C += b, of type int32_t
int32_t bwork = (*((int32_t *) p_bwork)) ;
#include "GB_dense_subassign_22_template.c"
return (GrB_SUCCESS) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = A*D, column scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB (_AxD__second_int32)
(
GrB_Matrix C,
const GrB_Matrix A, bool A_is_pattern,
const GrB_Matrix D, bool D_is_pattern,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int32_t *restrict Cx = (int32_t *) C->x ;
#include "GB_AxB_colscale_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = D*B, row scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB (_DxB__second_int32)
(
GrB_Matrix C,
const GrB_Matrix D, bool D_is_pattern,
const GrB_Matrix B, bool B_is_pattern,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int32_t *restrict Cx = (int32_t *) C->x ;
#include "GB_AxB_rowscale_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseAdd: C = A+B or C<M> = A+B
//------------------------------------------------------------------------------
GrB_Info GB (_AaddB__second_int32)
(
GrB_Matrix C,
const int C_sparsity,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool Ch_is_Mh,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
GB_WERK_DECLARE (M_ek_slicing, int64_t) ;
GB_WERK_DECLARE (A_ek_slicing, int64_t) ;
GB_WERK_DECLARE (B_ek_slicing, int64_t) ;
#include "GB_add_template.c"
GB_FREE_WORK ;
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C = A.*B or C<M> = A.*B
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_01__second_int32)
(
GrB_Matrix C,
const int C_sparsity,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_01_meta.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C<#> = A.*B when A is sparse/hyper and B is bitmap/full
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_02__second_int32)
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool flipxy,
const int64_t *restrict Cp_kfirst,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#if GB_BINOP_FLIP
// The operator is not commutative, and does not have a flipped
// variant. For example z=atan2(y,x).
if (flipxy)
{
// use fmult(y,x)
#undef GB_FLIPPED
#define GB_FLIPPED 1
#include "GB_emult_02_template.c"
}
else
{
// use fmult(x,y)
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
}
#else
// No need to handle the flip: the operator is either commutative, or
// has been handled by changing z=div(y,x) to z=rdiv(x,y) for example.
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
#endif
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C<M> = A.*B, M sparse/hyper, A and B bitmap/full
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_03__second_int32)
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict Cp_kfirst,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_03_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C=A.*B, C<M>=A.*B, C<!M>=A.*B where C is bitmap
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_bitmap__second_int32)
(
GrB_Matrix C,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_bitmap_emult_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st
//------------------------------------------------------------------------------
#if 0
GrB_Info GB ((none))
(
GB_void *Cx_output, // Cx and Bx may be aliased
const GB_void *x_input,
const GB_void *Bx_input,
const int8_t *restrict Bb,
int64_t bnz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int32_t *Cx = (int32_t *) Cx_output ;
int32_t x = (*((int32_t *) x_input)) ;
int32_t *Bx = (int32_t *) Bx_input ;
int64_t p ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < bnz ; p++)
{
if (!GBB (Bb, p)) continue ;
int32_t bij = GBX (Bx, p, false) ;
Cx [p] = bij ;
}
return (GrB_SUCCESS) ;
#endif
}
#endif
//------------------------------------------------------------------------------
// Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd
//------------------------------------------------------------------------------
#if 0
GrB_Info GB ((none))
(
GB_void *Cx_output, // Cx and Ax may be aliased
const GB_void *Ax_input,
const GB_void *y_input,
const int8_t *restrict Ab,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
int32_t *Cx = (int32_t *) Cx_output ;
int32_t *Ax = (int32_t *) Ax_input ;
int32_t y = (*((int32_t *) y_input)) ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!GBB (Ab, p)) continue ;
; ;
Cx [p] = y ;
}
return (GrB_SUCCESS) ;
#endif
}
#endif
//------------------------------------------------------------------------------
// C = op (x, A'): transpose and apply a binary operator
//------------------------------------------------------------------------------
#if 0
// cij = op (x, aij), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
int32_t aij = GBX (Ax, pA, false) ; \
Cx [pC] = aij ; \
}
GrB_Info GB ((none))
(
GrB_Matrix C,
const GB_void *x_input,
const GrB_Matrix A,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
// GB_unop_transpose.c uses GB_ATYPE, but A is
// the 2nd input to binary operator z=f(x,y).
#undef GB_ATYPE
#define GB_ATYPE \
int32_t
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int32_t x = (*((const int32_t *) x_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
#undef GB_ATYPE
#define GB_ATYPE \
int32_t
}
#endif
//------------------------------------------------------------------------------
// C = op (A', y): transpose and apply a binary operator
//------------------------------------------------------------------------------
#if 0
// cij = op (aij, y), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
; ; \
Cx [pC] = y ; \
}
GrB_Info GB ((none))
(
GrB_Matrix C,
const GrB_Matrix A,
const GB_void *y_input,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int32_t y = (*((const int32_t *) y_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
#endif
|
sphkmeans.c | /*!
\file
\brief A parallel spherical k-means program
\date Started 4/20/2013
\author George
*/
#include <GKlib.h>
#include <bdmpi.h>
#include <sys/types.h>
#include <sys/time.h>
#include <sys/resource.h>
#include <unistd.h>
/**************************************************************************/
/* data structures */
/**************************************************************************/
typedef struct {
int npes, mype, nthreads;
BDMPI_Comm comm;
int nclusters;
int ntrials, niters;
char *filename;
/* the total number of rows and their overall distribution */
int nrows, ncols;
/* temporary files */
char mfile[8192];
char cfile[8192];
char cpfile[8192];
char bpfile[8192];
/* timers */
double totalTmr;
double compTmr;
double commTmr;
} params_t;
/**************************************************************************/
/* prototypes */
/**************************************************************************/
int LoadData(params_t *params);
void WriteClustering(params_t *params);
void PreprocessData(params_t *params);
void ClusterData(params_t *params, int nrows);
void ComputeClusteringStatistics(params_t *params);
/**************************************************************************/
/**************************************************************************/
int main(int argc, char **argv)
{
params_t *params;
int nrows;
BDMPI_Status status;
double max, current;
setbuf(stdout, NULL);
setbuf(stderr, NULL);
BDMPI_Init(&argc, &argv);
params = (params_t *)gk_malloc(sizeof(params_t), "params");
memset(params, 0, sizeof(params_t));
params->comm = BDMPI_COMM_WORLD;
BDMPI_Comm_size(params->comm, &(params->npes));
BDMPI_Comm_rank(params->comm, &(params->mype));
if (argc != 6) {
if (params->mype == 0)
fprintf(stderr, "Usage: %s filename #clusters #trials #iters #threads\n", argv[0]);
BDMPI_Finalize();
return EXIT_FAILURE;
}
params->filename = strdup(argv[1]);
params->nclusters = atoi(argv[2]);
params->ntrials = atoi(argv[3]);
params->niters = atoi(argv[4]);
params->nthreads = atoi(argv[5]);
sprintf(params->mfile, "m%d.%d", (int)getpid(), params->mype);
sprintf(params->cfile, "c%d.%d", (int)getpid(), params->mype);
sprintf(params->cpfile, "cp%d.%d", (int)getpid(), params->mype);
sprintf(params->bpfile, "bp%d.%d", (int)getpid(), params->mype);
omp_set_num_threads(params->nthreads);
gk_clearwctimer(params->totalTmr);
gk_clearwctimer(params->compTmr);
gk_clearwctimer(params->commTmr);
BDMPI_Barrier(params->comm);
BDMPI_Barrier(params->comm);
gk_startwctimer(params->totalTmr);
nrows = LoadData(params);
PreprocessData(params);
srand(params->mype+101);
printf("[%03d] timestamp01: %zu\n", params->mype, (size_t)time(NULL));
ClusterData(params, nrows);
printf("[%03d] timestamp02: %zu\n", params->mype, (size_t)time(NULL));
WriteClustering(params);
BDMPI_Barrier(params->comm);
BDMPI_Barrier(params->comm);
gk_stopwctimer(params->totalTmr);
/* print timing stats */
current = gk_getwctimer(params->compTmr);
BDMPI_Reduce(¤t, &max, 1, BDMPI_DOUBLE, BDMPI_MAX, 0, params->comm);
if (params->mype == 0)
printf(" compTmr: %10.4lf\n", max);
current = gk_getwctimer(params->commTmr);
BDMPI_Reduce(¤t, &max, 1, BDMPI_DOUBLE, BDMPI_MAX, 0, params->comm);
if (params->mype == 0)
printf(" commTmr: %10.4lf\n", max);
current = gk_getwctimer(params->totalTmr);
BDMPI_Reduce(¤t, &max, 1, BDMPI_DOUBLE, BDMPI_MAX, 0, params->comm);
if (params->mype == 0)
printf(" totalTmr: %10.4lf\n", max);
BDMPI_Finalize();
return EXIT_SUCCESS;
}
/**************************************************************************/
/*! Reads a sparse matrix in headerless CSR format. The same matrix is read
by everybody in order to simulate a large file clustering.
\returns the local portion of the matrix.
*/
/**************************************************************************/
int LoadData0(params_t *params)
{
int mype = params->mype, npes = params->npes;
size_t i, k, l, p, lnlen;
int ncols, lnrows;
size_t nrows, nnz, lnnz, cnnz;
ssize_t *rowptr;
int *rowind, ival;
float *rowval=NULL, fval;
char *line=NULL, *head, *tail;
FILE *fpin;
gk_csr_t *mat=NULL;
BDMPI_Status status;
long int fpos;
mat = gk_csr_Create();
if (mype == 0) {
if (!gk_fexists(params->filename))
gk_errexit(SIGERR, "File %s does not exist!\n", params->filename);
gk_getfilestats(params->filename, &nrows, &nnz, NULL, NULL);
if (nnz%2 == 1)
gk_errexit(SIGERR, "LoadData: The number of non-zeros [%zu] is not an even number.\n", nnz);
params->nrows = nrows;
nnz = nnz/2; /* account for the values */
}
/* send size info to everybody */
BDMPI_Bcast(¶ms->nrows, 1, BDMPI_INT, 0, params->comm);
BDMPI_Bcast(&nnz, sizeof(size_t), BDMPI_BYTE, 0, params->comm);
/* wait your turn */
if (mype != 0)
BDMPI_Recv(&fpos, sizeof(long int), BDMPI_BYTE, mype-1, 1, params->comm, &status);
else
fpos = 0;
lnnz = nnz;
lnrows = params->nrows;
if (mype == 0)
printf("[%3d] lnrows: %d, lnnz: %zu\n", mype, lnrows, lnnz);
rowptr = gk_zmalloc(lnrows+1, "LoadData: rowptr");
rowind = gk_imalloc(lnnz, "LoadData: rowind");
rowval = gk_fsmalloc(lnnz, 1.0, "LoadData: rowval");
params->nrows *= npes; /* duplicate the data to each pe */
fpos = 0; /* in order to read everything */
fpin = gk_fopen(params->filename, "r", "LoadData: fpin");
fseek(fpin, fpos, SEEK_SET);
/* read the file, one row at a time */
cnnz = rowptr[0] = ncols = 0;
for (i=0; i<lnrows; i++) {
do {
if (gk_getline(&line, &lnlen, fpin) == -1)
gk_errexit(SIGERR, "Premature end of input file: file while reading row %zd\n", i);
} while (line[0] == '%');
head = line;
tail = NULL;
/* parse the row */
while (1) {
ival = (int)strtol(head, &tail, 0);
if (tail == head)
break;
head = tail;
fval = strtof(head, &tail);
if (tail == head)
gk_errexit(SIGERR, "Value could not be found for column! Row:%zd, NNZ:%zd\n", i, cnnz);
head = tail;
if (cnnz == lnnz) { /* adjust the memory */
lnnz = 1.2*lnnz;
rowind = gk_irealloc(rowind, lnnz, "LoadData: rowind");
rowval = gk_frealloc(rowval, lnnz, "LoadData: rowval");
}
rowind[cnnz] = ival;
rowval[cnnz] = fval;
cnnz++;
ncols = (ncols < ival ? ival : ncols);
}
rowptr[i+1] = cnnz;
}
mat->nrows = lnrows;
mat->ncols = ncols+1;
mat->rowptr = rowptr;
mat->rowind = rowind;
mat->rowval = rowval;
fpos = ftell(fpin);
gk_fclose(fpin);
gk_free((void **)&line, LTERM);
gk_csr_Write(mat, params->mfile, GK_CSR_FMT_BINROW, 1, 0);
gk_csr_Free(&mat);
if (mype != npes-1)
BDMPI_Send(&fpos, sizeof(long int), BDMPI_BYTE, mype+1, 1, params->comm);
BDMPI_Allreduce(&ncols, &ncols, 1, BDMPI_INT, BDMPI_MAX, params->comm);
params->ncols = ncols+1;
if (mype == 0)
printf("[%3d] params->nrows: %d, params->ncols: %d, cnnz: %zu\n",
mype, params->nrows, params->ncols, cnnz);
return lnrows;
}
/**************************************************************************/
/*! Reads a sparse matrix in binary CSR format. The same matrix is read
by everybody in order to simulate a large file clustering.
\returns the local portion of the matrix.
*/
/**************************************************************************/
int LoadData(params_t *params)
{
int mype=params->mype, npes=params->npes;
int lrank, lsize;
int lnrows, flag;
size_t lnnz;
gk_csr_t *mat=NULL;
BDMPI_Status status;
BDMPI_Comm_lrank(params->comm, &lrank);
BDMPI_Comm_lsize(params->comm, &lsize);
if (mype == 0) {
if (!gk_fexists(params->filename))
gk_errexit(SIGERR, "File %s does not exist!\n", params->filename);
}
/* wait your turn */
if (lrank != 0)
BDMPI_Recv(&flag, 1, BDMPI_INT, mype-1, 1, params->comm, &status);
mat = gk_csr_Read(params->filename, GK_CSR_FMT_BINROW, 1, 0);
lnrows = mat->nrows;
lnnz = mat->rowptr[mat->nrows];
params->nrows = lnrows*npes;
params->ncols = mat->ncols;
if (mype == 0)
printf("[%3d] lnrows: %d, lnnz: %zu\n", mype, mat->nrows, lnnz);
BDMPI_Entercritical();
gk_csr_Write(mat, params->mfile, GK_CSR_FMT_BINROW, 1, 0);
BDMPI_Exitcritical();
gk_csr_Free(&mat);
if (lrank != lsize-1)
BDMPI_Send(&flag, 1, BDMPI_INT, mype+1, 1, params->comm);
if (mype == 0)
printf("[%3d] params->nrows: %d, params->ncols: %d, tnnz: %zu\n",
mype, params->nrows, params->ncols, npes*lnnz);
return lnrows;
}
/**************************************************************************/
/*! Writes a clustering vector. It just let each process write its portion
to the file in a round-robin fashion.
*/
/**************************************************************************/
void WriteClustering(params_t *params)
{
int npes=params->npes, mype=params->mype, dummy=0, *cvec;
size_t i, nrows;
BDMPI_Status status;
FILE *fpout;
char outfile[1024];
sprintf(outfile, "%s.part.%d", params->filename, params->nclusters);
if (mype == 0) {
BDMPI_Entercritical();
cvec = gk_i32readfilebin(params->bpfile, &nrows);
BDMPI_Exitcritical();
unlink(params->bpfile);
fpout = gk_fopen(outfile, "w", "outfile");
for (i=0; i<nrows; i++)
fprintf(fpout, "%d\n", cvec[i]);
gk_fclose(fpout);
gk_free((void **)&cvec, LTERM);
if (mype+1 < npes)
BDMPI_Send(&dummy, 1, BDMPI_INT, mype+1, 1, params->comm);
}
else {
BDMPI_Recv(&dummy, 1, BDMPI_INT, mype-1, 1, params->comm, &status);
BDMPI_Entercritical();
cvec = gk_i32readfilebin(params->bpfile, &nrows);
BDMPI_Exitcritical();
unlink(params->bpfile);
fpout = gk_fopen(outfile, "a", "outfile");
for (i=0; i<nrows; i++)
fprintf(fpout, "%d\n", cvec[i]);
gk_fclose(fpout);
gk_free((void **)&cvec, LTERM);
if (mype+1 < npes)
BDMPI_Send(&mype, 1, BDMPI_INT, mype+1, 1, params->comm);
}
}
/**************************************************************************/
/*! This function performs various pre-processing steps on the matrix.
*/
/**************************************************************************/
void PreprocessData(params_t *params)
{
int mype=params->mype;
gk_csr_t *mat;
BDMPI_Entercritical();
mat = gk_csr_Read(params->mfile, GK_CSR_FMT_BINROW, 1, 0);
BDMPI_Exitcritical();
gk_csr_Normalize(mat, GK_CSR_ROW, 2);
BDMPI_Entercritical();
gk_csr_Write(mat, params->mfile, GK_CSR_FMT_BINROW, 1, 0);
BDMPI_Exitcritical();
gk_csr_Free(&mat);
}
/**************************************************************************/
/*! This function computes the k-way clustering solution.
*/
/**************************************************************************/
void ClusterData(params_t *params, int nrows)
{
int npes=params->npes, mype=params->mype;
size_t trial, iter, i, j, k, offset, nelmnts;
int ncols, nclusters, *rowind, *cpart, *bcpart;
int myrnum, grnum, p;
int lnmoves, gnmoves;
ssize_t *rowptr;
float *rowval, *centers;
float dnorms[params->nclusters], crval, bcrval;
gk_csr_t *mat;
BDMPI_Status status;
nclusters = params->nclusters;
ncols = params->ncols;
/* perform a number of random trials */
for (bcrval=0.0, trial=0; trial<params->ntrials; trial++) {
/* select the initial cluster seeds */
myrnum = rand();
BDMPI_Allreduce(&myrnum, &grnum, 1, BDMPI_INT, BDMPI_MAX, params->comm);
if (myrnum == grnum)
myrnum = mype;
else
myrnum = npes;
BDMPI_Allreduce(&myrnum, &grnum, 1, BDMPI_INT, BDMPI_MIN, params->comm);
if (grnum == mype) { /* this pe will be selecting the initial centers */
/* load the data */
BDMPI_Entercritical();
mat = gk_csr_Read(params->mfile, GK_CSR_FMT_BINROW, 1, 0);
BDMPI_Exitcritical();
rowptr = mat->rowptr;
rowind = mat->rowind;
rowval = mat->rowval;
centers = gk_fsmalloc(nclusters*ncols, 0.0, "centers");
/* pick the centers */
myrnum = RandomInRange(nrows/nclusters);
for (k=0; k<nclusters; k++) {
i = ((k+1)*myrnum)%nrows;
for (j=rowptr[i]; j<rowptr[i+1]; j++)
centers[rowind[j]*nclusters+k] = rowval[j];
}
gk_csr_Free(&mat);
BDMPI_Entercritical();
gk_fwritefilebin(params->cfile, nclusters*ncols, centers);
BDMPI_Exitcritical();
gk_free((void **)¢ers, LTERM);
}
/* get into the iterative refinement */
cpart = gk_ismalloc(nrows, -1, "cpart");
BDMPI_Entercritical();
gk_i32writefilebin(params->cpfile, nrows, cpart);
BDMPI_Exitcritical();
gk_free((void **)&cpart, LTERM);
for (iter=0; iter<params->niters; iter++) {
if (grnum == mype) { /* root sends the centers to everybody else */
BDMPI_Entercritical();
centers = gk_freadfilebin(params->cfile, &nelmnts);
BDMPI_Exitcritical();
for (p=0; p<npes; p++) {
if (p != grnum)
BDMPI_Send(centers, ncols*nclusters, BDMPI_FLOAT, p, 1, params->comm);
}
}
else { /* everybody else receives the data */
centers = gk_fmalloc(nclusters*ncols, "centers");
BDMPI_Recv(centers, ncols*nclusters, BDMPI_FLOAT, grnum, 1, params->comm, &status);
}
printf("[%03d]%04zu.%04zu.0 ts: %d\n", mype, trial, iter, (int)time(NULL));
/* assign each local row to the closest cluster */
gk_startwctimer(params->compTmr);
BDMPI_Entercritical();
mat = gk_csr_Read(params->mfile, GK_CSR_FMT_BINROW, 1, 0);
rowptr = mat->rowptr;
rowind = mat->rowind;
rowval = mat->rowval;
cpart = gk_i32readfilebin(params->cpfile, &nelmnts);
BDMPI_Exitcritical();
lnmoves = 0;
#pragma omp parallel default(none),\
shared(i, nrows, nclusters, rowptr, rowind, rowval, centers, cpart),\
private(j, k, offset),\
reduction(+:lnmoves)
{
float sims[nclusters];
#pragma omp for schedule(dynamic,32)
for (i=0; i<nrows; i++) {
for (k=0; k<nclusters; k++)
sims[k] = 0.0;
for (j=rowptr[i]; j<rowptr[i+1]; j++) {
offset = rowind[j]*nclusters;
for (k=0; k<nclusters; k++)
sims[k] += rowval[j]*centers[offset+k];
}
k = gk_fargmax(nclusters, sims, 1);
if (k != cpart[i])
lnmoves++;
cpart[i] = k;
}
}
/* compute the new local centers */
gk_fset(nclusters*ncols, 0.0, centers);
for (i=0; i<nrows; i++) {
k = cpart[i];
for (j=rowptr[i]; j<rowptr[i+1]; j++)
centers[rowind[j]*nclusters+k] += rowval[j];
}
BDMPI_Entercritical();
gk_i32writefilebin(params->cpfile, nrows, cpart);
BDMPI_Exitcritical();
gk_free((void **)&cpart, LTERM);
gk_csr_Free(&mat); /* done with the matrix */
printf("[%03d]%04zu.%04zu.1 ts: %d\n", mype, trial, iter, (int)time(NULL));
if (mype == grnum) {
float *rcenters;
/* write the centers */
BDMPI_Entercritical();
gk_fwritefilebin(params->cfile, nclusters*ncols, centers);
BDMPI_Exitcritical();
gk_free((void **)¢ers, LTERM);
/* get the data from everybody else */
for (p=0; p<npes-1; p++) {
rcenters = gk_fmalloc(nclusters*ncols, "rcenters");
BDMPI_Recv(rcenters, ncols*nclusters, BDMPI_FLOAT, BDMPI_ANY_SOURCE, 2, params->comm,
&status);
BDMPI_Entercritical();
centers = gk_freadfilebin(params->cfile, &nelmnts);
BDMPI_Exitcritical();
for (i=0; i<nclusters*ncols; i++)
centers[i] += rcenters[i];
BDMPI_Entercritical();
gk_fwritefilebin(params->cfile, nclusters*ncols, centers);
BDMPI_Exitcritical();
gk_free((void **)&rcenters, ¢ers, LTERM);
}
/* compute the new global centers */
for (k=0; k<nclusters; k++)
dnorms[k] = 0.0;
BDMPI_Entercritical();
centers = gk_freadfilebin(params->cfile, &nelmnts);
BDMPI_Exitcritical();
for (i=0; i<ncols; i++) {
offset = i*nclusters;
for (k=0; k<nclusters; k++)
dnorms[k] += centers[offset+k]*centers[offset+k];
}
for (crval=0.0, k=0; k<nclusters; k++) {
if (dnorms[k] > 0) {
crval += sqrt(dnorms[k]);
dnorms[k] = 1.0/sqrt(dnorms[k]);
}
}
for (i=0; i<ncols; i++) {
offset = i*nclusters;
for (k=0; k<nclusters; k++)
centers[offset+k] *= dnorms[k];
}
//printf("trial: %2zd; iter: %3zd; crval: %.8e; bcrval: %.8e\n", trial, iter, crval, bcrval);
BDMPI_Entercritical();
gk_fwritefilebin(params->cfile, nclusters*ncols, centers);
BDMPI_Exitcritical();
gk_free((void **)¢ers, LTERM);
}
else {
BDMPI_Send(centers, ncols*nclusters, BDMPI_FLOAT, grnum, 2, params->comm);
gk_free((void **)¢ers, LTERM);
}
gk_stopwctimer(params->compTmr);
/* see if you are done refining */
if (iter > 0) {
BDMPI_Allreduce(&lnmoves, &gnmoves, 1, BDMPI_INT, BDMPI_SUM, params->comm);
//printf("[%3d] lnmoves: %d, gnmoves: %d\n", mype, lnmoves, gnmoves);
if (gnmoves == 0)
break;
}
/* send the new crval to everybody */
BDMPI_Bcast(&crval, 1, BDMPI_FLOAT, grnum, params->comm);
if (crval > bcrval) {
char cmd[8192];
sprintf(cmd, "cp %s %s", params->cpfile, params->bpfile);
GKASSERT(system(cmd) != -1);
bcrval = crval;
}
}
if (mype == 0)
printf("[%3zu:%3zu] gnmoves: %8d; crval: %8.4e; bcrval: %8.4e\n",
trial, iter, gnmoves, crval, bcrval);
if (mype == grnum)
unlink(params->cfile);
unlink(params->cpfile);
}
/* compute the clustering statistics */
//ComputeClusteringStatistics(params);
unlink(params->mfile);
}
/**************************************************************************/
/*! This function prints final statistics for the clustering solution.
*/
/**************************************************************************/
void ComputeClusteringStatistics(params_t *params)
{
int npes=params->npes, mype=params->mype, nclusters=params->nclusters, ncols=params->ncols;
int nrows;
size_t i, j, k, offset, nelmnts;
int *cpart, *rowind, *pwgts;
ssize_t *rowptr;
float *rowval, *centers, *dnorms;
float crval, tcrval;
gk_csr_t *mat;
centers = gk_fsmalloc(nclusters*ncols, 0.0, "centers");
pwgts = gk_ismalloc(nclusters, 0.0, "pwgts");
/* load the data */
mat = gk_csr_Read(params->mfile, GK_CSR_FMT_BINROW, 1, 0);
nrows = mat->nrows;
rowptr = mat->rowptr;
rowind = mat->rowind;
rowval = mat->rowval;
cpart = gk_i32readfilebin(params->bpfile, &nelmnts);
/* compute the local centers and local partition weights */
for (i=0; i<nrows; i++) {
k = cpart[i];
pwgts[k]++;
for (j=rowptr[i]; j<rowptr[i+1]; j++) {
centers[rowind[j]*nclusters+k] += rowval[j];
}
}
gk_csr_Free(&mat);
gk_free((void **)&cpart, LTERM);
/* compute global centroids and partition weights */
BDMPI_Reduce(pwgts, pwgts, nclusters, BDMPI_INT, BDMPI_SUM, 0, params->comm);
BDMPI_Reduce(centers, centers, nclusters*ncols, BDMPI_FLOAT, BDMPI_SUM, 0, params->comm);
if (mype == 0) {
dnorms = gk_fsmalloc(nclusters, 0.0, "dnorms");
for (i=0; i<ncols; i++) {
offset = i*nclusters;
for (k=0; k<nclusters; k++)
dnorms[k] += centers[offset+k]*centers[offset+k];
}
for (tcrval=0.0, k=0; k<nclusters; k++) {
crval = (dnorms[k] > 0 ? sqrt(dnorms[k]) : 0.0);
tcrval += crval;
printf("Cluster: %4zu %6d %.4e\n", k, pwgts[k], crval);
}
printf("Overall: %.4e\n", tcrval);
gk_free((void **)&dnorms, LTERM);
}
gk_free((void **)¢ers, &pwgts, LTERM);
}
|
watchpoint_support.c | //
// WatchPointDriver.cpp
//
//
// Created by Milind Chabbi on 2/21/17.
//
//
#if !defined(_GNU_SOURCE)
#define _GNU_SOURCE
#endif
#include <asm/unistd.h>
#include <errno.h>
#include <fcntl.h>
#include <linux/hw_breakpoint.h>
#include <linux/perf_event.h>
#include <linux/kernel.h>
#include <signal.h>
#include <stdlib.h>
#include <stdio.h>
#include <stdint.h>
#include <string.h>
#include <sys/ioctl.h>
#include <sys/types.h>
#include <ucontext.h>
#include <unistd.h>
#include <sys/mman.h>
#include <stdint.h>
#include <stdbool.h>
#include <stdlib.h>
#include <math.h>
#include <assert.h>
#include <strings.h>
#include <asm/prctl.h>
#include <sys/prctl.h>
#include "common.h"
#include <hpcrun/main.h>
#include <hpcrun/hpcrun_options.h>
#include <hpcrun/write_data.h>
#include <hpcrun/safe-sampling.h>
#include <hpcrun/hpcrun_stats.h>
#include <hpcrun/memory/mmap.h>
#include <hpcrun/cct/cct.h>
#include <hpcrun/metrics.h>
#include <hpcrun/sample_event.h>
#include <hpcrun/sample_sources_registered.h>
#include <hpcrun/thread_data.h>
#include <hpcrun/trace.h>
#include <lush/lush-backtrace.h>
#include <messages/messages.h>
#include <utilities/tokenize.h>
#include <utilities/arch/context-pc.h>
#include <unwind/common/unwind.h>
#include "watchpoint_support.h"
#include <unwind/x86-family/x86-misc.h>
#define MAX_WP_SLOTS (5)
#define IS_ALIGNED(address, alignment) (! ((size_t)(address) & (alignment-1)))
#define ADDRESSES_OVERLAP(addr1, len1, addr2, len2) (((addr1)+(len1) > (addr2)) && ((addr2)+(len2) > (addr1) ))
#define CACHE_LINE_SIZE (64)
//#define ALT_STACK_SZ (4 * SIGSTKSZ)
#define ALT_STACK_SZ ((1L<<20) > 4 * SIGSTKSZ? (1L<<20): 4* SIGSTKSZ)
//#define TEST
#ifdef TEST
#define EMSG(...) fprintf(stderr, __VA_ARGS__)
#define hpcrun_abort() abort()
#define hpcrun_safe_exit() (1)
#define hpcrun_safe_enter() (1)
#define hpcrun_context_pc(context) (0)
#define get_previous_instruction(ip, pip) (0)
#define get_mem_access_length_and_type(a, b, c) (0)
#endif
#if defined(PERF_EVENT_IOC_UPDATE_BREAKPOINT)
#define FAST_BP_IOC_FLAG (PERF_EVENT_IOC_UPDATE_BREAKPOINT)
#elif defined(PERF_EVENT_IOC_MODIFY_ATTRIBUTES)
#define FAST_BP_IOC_FLAG (PERF_EVENT_IOC_MODIFY_ATTRIBUTES)
#else
#endif
#define CHECK(x) ({int err = (x); \
if (err) { \
EMSG("%s: Failed with %d on line %d of file %s\n", strerror(errno), err, __LINE__, __FILE__); \
monitor_real_abort(); }\
err;})
#define HANDLE_ERROR_IF_ANY(val, expected, errstr) {if (val != expected) {perror(errstr); abort();}}
#define SAMPLES_POST_FULL_RESET_VAL (1)
WPConfig_t wpConfig;
//const WatchPointInfo_t dummyWPInfo = {.sample = {}, .startTime =0, .fileHandle= -1, .isActive= false, .mmapBuffer=0};
//const struct DUMMY_WATCHPOINT dummyWP[MAX_WP_SLOTS];
// Data structure that is given by clients to set a WP
typedef struct ThreadData{
int lbrDummyFD __attribute__((aligned(CACHE_LINE_SZ)));
stack_t ss;
void * fs_reg_val;
void * gs_reg_val;
long numWatchpointTriggers;
long numWatchpointImpreciseIP;
long numWatchpointImpreciseAddressArbitraryLength;
long numWatchpointImpreciseAddress8ByteLength;
long numSampleTriggeringWatchpoints;
long numWatchpointDropped;
long numInsaneIP;
struct drand48_data randBuffer;
WatchPointInfo_t watchPointArray[MAX_WP_SLOTS];
WatchPointUpCall_t fptr;
char dummy[CACHE_LINE_SZ];
} ThreadData_t;
static __thread ThreadData_t tData;
bool IsAltStackAddress(void *addr){
if((addr >= tData.ss.ss_sp) && (addr < tData.ss.ss_sp + tData.ss.ss_size))
return true;
return false;
}
bool IsFSorGS(void * addr) {
if (tData.fs_reg_val == (void *) -1) {
syscall(SYS_arch_prctl, ARCH_GET_FS, &tData.fs_reg_val);
syscall(SYS_arch_prctl, ARCH_GET_GS, &tData.gs_reg_val);
}
// 4096 smallest one page size
if ( (tData.fs_reg_val <= addr) && (addr < tData.fs_reg_val + 4096))
return true;
if ( (tData.gs_reg_val <= addr) && (addr < tData.gs_reg_val + 4096))
return true;
return false;
}
/********* OS SUPPORT ****************/
// perf-util.h has it
static long perf_event_open(struct perf_event_attr *hw_event, pid_t pid, int cpu, int group_fd, unsigned long flags) {
return syscall(__NR_perf_event_open, hw_event, pid, cpu, group_fd, flags);
}
pid_t gettid() {
return syscall(__NR_gettid);
}
static inline void EnableWatchpoint(int fd) {
// Start the event
CHECK(ioctl(fd, PERF_EVENT_IOC_ENABLE, 0));
}
static inline void DisableWatchpoint(WatchPointInfo_t *wpi) {
// Stop the event
assert(wpi->fileHandle != -1);
CHECK(ioctl(wpi->fileHandle, PERF_EVENT_IOC_DISABLE, 0));
wpi->isActive = false;
}
static void * MAPWPMBuffer(int fd){
void * buf = mmap(0, 2 * wpConfig.pgsz, PROT_READ | PROT_WRITE, MAP_SHARED, fd, 0);
if (buf == MAP_FAILED) {
EMSG("Failed to mmap : %s\n", strerror(errno));
monitor_real_abort();
}
return buf;
}
static void UNMAPWPMBuffer(void * buf){
CHECK(munmap(buf, 2 * wpConfig.pgsz));
}
static int OnWatchPoint(int signum, siginfo_t *info, void *context);
__attribute__((constructor))
static void InitConfig(){
tData.fptr = NULL;
volatile int dummyWP[MAX_WP_SLOTS];
wpConfig.isLBREnabled = true;
struct perf_event_attr peLBR = {
.type = PERF_TYPE_BREAKPOINT,
.size = sizeof(struct perf_event_attr),
.bp_type = HW_BREAKPOINT_W,
.bp_len = HW_BREAKPOINT_LEN_1,
.bp_addr = (uintptr_t)&dummyWP[0],
.sample_period = 1,
.precise_ip = 0 /* arbitraty skid */,
.sample_type = 0,
.exclude_user = 0,
.exclude_kernel = 1,
.exclude_hv = 1,
.disabled = 0, /* enabled */
};
int fd = perf_event_open(&peLBR, 0, -1, -1 /*group*/, 0);
if (fd != -1) {
wpConfig.isLBREnabled = true;
} else {
wpConfig.isLBREnabled = false;
}
CHECK(close(fd));
#if defined(FAST_BP_IOC_FLAG)
wpConfig.isWPModifyEnabled = true;
#else
wpConfig.isWPModifyEnabled = false;
#endif
//wpConfig.signalDelivered = SIGTRAP;
//wpConfig.signalDelivered = SIGIO;
//wpConfig.signalDelivered = SIGUSR1;
wpConfig.signalDelivered = SIGRTMIN + 3;
// Setup the signal handler
sigset_t block_mask;
sigfillset(&block_mask);
// Set a signal handler for SIGUSR1
struct sigaction sa1 = {
.sa_sigaction = OnWatchPoint,
.sa_mask = block_mask,
.sa_flags = SA_SIGINFO | SA_RESTART | SA_NODEFER | SA_ONSTACK
};
if(monitor_sigaction(wpConfig.signalDelivered, OnWatchPoint, 0 /*flags*/, &sa1) == -1) {
fprintf(stderr, "Failed to set WHICH_SIG handler: %s\n", strerror(errno));
monitor_real_abort();
}
wpConfig.pgsz = sysconf(_SC_PAGESIZE);
// identify max WP supported by the architecture
volatile int wpHandles[MAX_WP_SLOTS];
int i = 0;
for(; i < MAX_WP_SLOTS; i++){
struct perf_event_attr pe = {
.type = PERF_TYPE_BREAKPOINT,
.size = sizeof(struct perf_event_attr),
.bp_type = HW_BREAKPOINT_W,
.bp_len = HW_BREAKPOINT_LEN_1,
.bp_addr = (uintptr_t)&dummyWP[i],
.sample_period = 1,
.precise_ip = 0 /* arbitraty skid */,
.sample_type = 0,
.exclude_user = 0,
.exclude_kernel = 1,
.exclude_hv = 1,
.disabled = 0, /* enabled */
};
wpHandles[i] = perf_event_open(&pe, 0, -1, -1 /*group*/, 0);
if (wpHandles[i] == -1) {
break;
}
}
if(i == 0) {
fprintf(stderr, "Cannot create a single watch point\n");
monitor_real_abort();
}
for (int j = 0 ; j < i; j ++) {
CHECK(close(wpHandles[j]));
}
wpConfig.maxWP = i;
// Should we get the floating point type in an access?
wpConfig.getFloatType = false;
// Get the replacement scheme
char * replacementScheme = getenv("HPCRUN_WP_REPLACEMENT_SCHEME");
if(replacementScheme){
if(0 == strcasecmp(replacementScheme, "AUTO")) {
wpConfig.replacementPolicy = AUTO;
} else if (0 == strcasecmp(replacementScheme, "OLDEST")) {
wpConfig.replacementPolicy = OLDEST;
} else if (0 == strcasecmp(replacementScheme, "NEWEST")) {
wpConfig.replacementPolicy = NEWEST;
} else {
// default;
wpConfig.replacementPolicy = AUTO;
}
} else {
// default;
wpConfig.replacementPolicy = AUTO;
}
// Should we fix IP off by one?
char * fixIP = getenv("HPCRUN_WP_DONT_FIX_IP");
if(fixIP){
if(0 == strcasecmp(fixIP, "1")) {
wpConfig.dontFixIP = true;
} if (0 == strcasecmp(fixIP, "true")) {
wpConfig.dontFixIP = true;
} else {
// default;
wpConfig.dontFixIP = false;
}
} else {
// default;
wpConfig.dontFixIP = false;
}
// Should we get the address in a WP trigger?
char * disassembleWPAddress = getenv("HPCRUN_WP_DONT_DISASSEMBLE_TRIGGER_ADDRESS");
if(disassembleWPAddress){
if(0 == strcasecmp(disassembleWPAddress, "1")) {
wpConfig.dontDisassembleWPAddress = true;
} if (0 == strcasecmp(disassembleWPAddress, "true")) {
wpConfig.dontDisassembleWPAddress = true;
} else {
// default;
wpConfig.dontDisassembleWPAddress = false;
}
} else {
// default;
wpConfig.dontDisassembleWPAddress = false;
}
}
void RedSpyWPConfigOverride(void *v){
wpConfig.getFloatType = true;
}
void LoadSpyWPConfigOverride(void *v){
wpConfig.getFloatType = true;
}
void FalseSharingWPConfigOverride(void *v){
// replacement policy is OLDEST forced.
wpConfig.replacementPolicy = OLDEST;
}
void TrueSharingWPConfigOverride(void *v){
// replacement policy is OLDEST forced.
wpConfig.replacementPolicy = OLDEST;
}
void AllSharingWPConfigOverride(void *v){
// replacement policy is OLDEST forced.
wpConfig.replacementPolicy = OLDEST;
}
void IPCFalseSharingWPConfigOverride(void *v){
// replacement policy is OLDEST forced.
wpConfig.replacementPolicy = OLDEST;
}
void IPCTrueSharingWPConfigOverride(void *v){
// replacement policy is OLDEST forced.
wpConfig.replacementPolicy = OLDEST;
}
void IPCAllSharingWPConfigOverride(void *v){
// replacement policy is OLDEST forced.
wpConfig.replacementPolicy = OLDEST;
}
void TemporalReuseWPConfigOverride(void *v){
// dont fix IP
wpConfig.dontFixIP = true;
wpConfig.dontDisassembleWPAddress = true;
}
void SpatialReuseWPConfigOverride(void *v){
// dont fix IP
wpConfig.dontFixIP = true;
wpConfig.dontDisassembleWPAddress = true;
}
static void CreateWatchPoint(WatchPointInfo_t * wpi, SampleData_t * sampleData, bool modify) {
// Perf event settings
struct perf_event_attr pe = {
.type = PERF_TYPE_BREAKPOINT,
.size = sizeof(struct perf_event_attr),
// .bp_type = HW_BREAKPOINT_W,
// .bp_len = HW_BREAKPOINT_LEN_4,
.sample_period = 1,
.precise_ip = wpConfig.isLBREnabled? 2 /*precise_ip 0 skid*/ : 0 /* arbitraty skid */,
.sample_type = (PERF_SAMPLE_IP),
.exclude_user = 0,
.exclude_kernel = 1,
.exclude_hv = 1,
.disabled = 0, /* enabled */
};
switch (sampleData->wpLength) {
case 1: pe.bp_len = HW_BREAKPOINT_LEN_1; break;
case 2: pe.bp_len = HW_BREAKPOINT_LEN_2; break;
case 4: pe.bp_len = HW_BREAKPOINT_LEN_4; break;
case 8: pe.bp_len = HW_BREAKPOINT_LEN_8; break;
default:
EMSG("Unsupported .bp_len %d: %s\n", wpi->sample.wpLength,strerror(errno));
monitor_real_abort();
}
pe.bp_addr = (uintptr_t)sampleData->va;
switch (sampleData->type) {
case WP_READ: pe.bp_type = HW_BREAKPOINT_R; break;
case WP_WRITE: pe.bp_type = HW_BREAKPOINT_W; break;
default: pe.bp_type = HW_BREAKPOINT_W | HW_BREAKPOINT_R;
}
#if defined(FAST_BP_IOC_FLAG)
if(modify) {
// modification
assert(wpi->fileHandle != -1);
assert(wpi->mmapBuffer != 0);
//DisableWatchpoint(wpi);
CHECK(ioctl(wpi->fileHandle, FAST_BP_IOC_FLAG, (unsigned long) (&pe)));
//if(wpi->isActive == false) {
//EnableWatchpoint(wpi->fileHandle);
//}
} else
#endif
{
// fresh creation
// Create the perf_event for this thread on all CPUs with no event group
int perf_fd = perf_event_open(&pe, 0, -1, -1 /*group*/, 0);
if (perf_fd == -1) {
EMSG("Failed to open perf event file: %s\n",strerror(errno));
monitor_real_abort();
}
// Set the perf_event file to async mode
CHECK(fcntl(perf_fd, F_SETFL, fcntl(perf_fd, F_GETFL, 0) | O_ASYNC));
// Tell the file to send a signal when an event occurs
CHECK(fcntl(perf_fd, F_SETSIG, wpConfig.signalDelivered));
// Deliver the signal to this thread
struct f_owner_ex fown_ex;
fown_ex.type = F_OWNER_TID;
fown_ex.pid = gettid();
int ret = fcntl(perf_fd, F_SETOWN_EX, &fown_ex);
if (ret == -1){
EMSG("Failed to set the owner of the perf event file: %s\n", strerror(errno));
return;
}
// CHECK(fcntl(perf_fd, F_SETOWN, gettid()));
wpi->fileHandle = perf_fd;
// mmap the file if lbr is enabled
if(wpConfig.isLBREnabled) {
wpi->mmapBuffer = MAPWPMBuffer(perf_fd);
}
}
wpi->isActive = true;
wpi->va = (void *) pe.bp_addr;
wpi->sample = *sampleData;
wpi->startTime = rdtsc();
}
/* create a dummy PERF_TYPE_HARDWARE event that will never fire */
static void CreateDummyHardwareEvent(void) {
// Perf event settings
struct perf_event_attr pe = {
.type = PERF_TYPE_HARDWARE,
.size = sizeof(struct perf_event_attr),
.config = PERF_COUNT_HW_CACHE_MISSES,
.sample_period = 0x7fffffffffffffff, /* some insanely large sample period */
.precise_ip = 2,
.sample_type = PERF_SAMPLE_BRANCH_STACK,
.exclude_user = 0,
.exclude_kernel = 1,
.exclude_hv = 1,
.branch_sample_type = PERF_SAMPLE_BRANCH_ANY,
};
// Create the perf_event for this thread on all CPUs with no event group
int perf_fd = perf_event_open(&pe, 0, -1, -1, 0);
if (perf_fd == -1) {
EMSG("Failed to open perf event file: %s\n", strerror(errno));
monitor_real_abort();
}
tData.lbrDummyFD = perf_fd;
}
static void CloseDummyHardwareEvent(int perf_fd){
CHECK(close(perf_fd));
}
/*********** Client interfaces *******/
static void DisArm(WatchPointInfo_t * wpi){
// assert(wpi->isActive);
assert(wpi->fileHandle != -1);
if(wpi->mmapBuffer)
UNMAPWPMBuffer(wpi->mmapBuffer);
wpi->mmapBuffer = 0;
CHECK(close(wpi->fileHandle));
wpi->fileHandle = -1;
wpi->isActive = false;
}
static bool ArmWatchPoint(WatchPointInfo_t * wpi, SampleData_t * sampleData) {
// if WP modification is suppoted use it
if(wpConfig.isWPModifyEnabled){
// Does not matter whether it was active or not.
// If it was not active, enable it.
if(wpi->fileHandle != -1) {
CreateWatchPoint(wpi, sampleData, true);
return true;
}
}
// disable the old WP if active
if(wpi->isActive) {
DisArm(wpi);
}
CreateWatchPoint(wpi, sampleData, false);
return true;
}
// Per thread initialization
void WatchpointThreadInit(WatchPointUpCall_t func){
tData.ss.ss_sp = malloc(ALT_STACK_SZ);
if (tData.ss.ss_sp == NULL){
EMSG("Failed to malloc ALT_STACK_SZ");
monitor_real_abort();
}
tData.ss.ss_size = ALT_STACK_SZ;
tData.ss.ss_flags = 0;
if (sigaltstack(&tData.ss, NULL) == -1){
EMSG("Failed sigaltstack");
monitor_real_abort();
}
tData.lbrDummyFD = -1;
tData.fptr = func;
tData.fs_reg_val = (void*)-1;
tData.gs_reg_val = (void*)-1;
srand48_r(time(NULL), &tData.randBuffer);
tData.numWatchpointTriggers = 0;
tData.numWatchpointImpreciseIP = 0;
tData.numWatchpointImpreciseAddressArbitraryLength = 0;
tData.numWatchpointImpreciseAddress8ByteLength = 0;
tData.numWatchpointDropped = 0;
tData.numSampleTriggeringWatchpoints = 0;
tData.numInsaneIP = 0;
for (int i=0; i<wpConfig.maxWP; i++) {
tData.watchPointArray[i].isActive = false;
tData.watchPointArray[i].fileHandle = -1;
tData.watchPointArray[i].startTime = 0;
tData.watchPointArray[i].samplePostFull = SAMPLES_POST_FULL_RESET_VAL;
}
//if LBR is supported create a dummy PERF_TYPE_HARDWARE for Linux workaround
if(wpConfig.isLBREnabled) {
CreateDummyHardwareEvent();
}
}
void WatchpointThreadTerminate(){
for (int i = 0; i < wpConfig.maxWP; i++) {
if(tData.watchPointArray[i].fileHandle != -1) {
DisArm(&tData.watchPointArray[i]);
}
}
if(tData.lbrDummyFD != -1) {
CloseDummyHardwareEvent(tData.lbrDummyFD);
tData.lbrDummyFD = -1;
}
tData.fs_reg_val = (void*)-1;
tData.gs_reg_val = (void*)-1;
hpcrun_stats_num_watchpoints_triggered_inc(tData.numWatchpointTriggers);
hpcrun_stats_num_watchpoints_imprecise_inc(tData.numWatchpointImpreciseIP);
hpcrun_stats_num_watchpoints_imprecise_address_inc(tData.numWatchpointImpreciseAddressArbitraryLength);
hpcrun_stats_num_watchpoints_imprecise_address_8_byte_inc(tData.numWatchpointImpreciseAddress8ByteLength);
hpcrun_stats_num_insane_ip_inc(tData.numInsaneIP);
hpcrun_stats_num_watchpoints_dropped_inc(tData.numWatchpointDropped);
hpcrun_stats_num_sample_triggering_watchpoints_inc(tData.numSampleTriggeringWatchpoints);
#if 0
tData.ss.ss_flags = SS_DISABLE;
if (sigaltstack(&tData.ss, NULL) == -1){
EMSG("Failed sigaltstack WatchpointThreadTerminate");
// no need to abort , just leak the memory
// monitor_real_abort();
} else {
if(tData.ss.ss_sp)
free(tData.ss.ss_sp);
}
#endif
}
// Finds a victim slot to set a new WP
static VictimType GetVictim(int * location, ReplacementPolicy policy){
// If any WP slot is inactive, return it;
for(int i = 0; i < wpConfig.maxWP; i++){
if(!tData.watchPointArray[i].isActive) {
*location = i;
// Increase samplePostFull for those who survived.
for(int rest = 0; rest < wpConfig.maxWP; rest++){
if (tData.watchPointArray[rest].isActive) {
tData.watchPointArray[rest].samplePostFull++;
}
}
return EMPTY_SLOT;
}
}
switch (policy) {
case AUTO:{
// Shuffle the visit order
int slots[MAX_WP_SLOTS];
for(int i = 0; i < wpConfig.maxWP; i++)
slots[i] = i;
// Shuffle
for(int i = 0; i < wpConfig.maxWP; i++){
long int randVal;
lrand48_r(&tData.randBuffer, &randVal);
randVal = randVal % wpConfig.maxWP;
int tmp = slots[i];
slots[i] = slots[randVal];
slots[randVal] = tmp;
}
// attempt to replace each WP with its own probability
for(int i = 0; i < wpConfig.maxWP; i++) {
int loc = slots[i];
double probabilityToReplace = 1.0/(1.0 + (double)tData.watchPointArray[loc].samplePostFull);
double randValue;
drand48_r(&tData.randBuffer, &randValue);
// update tData.samplePostFull
tData.watchPointArray[loc].samplePostFull++;
if(randValue <= probabilityToReplace) {
*location = loc;
// TODO: Milind: Not sure whether I should increment samplePostFull of the remainiing slots.
// In Qingsen's experiments, doing this not hurt.
for(int rest = i+1; rest < wpConfig.maxWP; rest++){
tData.watchPointArray[slots[rest]].samplePostFull++;
}
return NON_EMPTY_SLOT;
}
}
// this is an indication not to replace, but if the client chooses to force, they can
*location = slots[0] /*random value*/;
return NONE_AVAILABLE;
}
break;
case NEWEST:{
// Always replace the newest
int64_t newestTime = 0;
for(int i = 0; i < wpConfig.maxWP; i++){
if(newestTime < tData.watchPointArray[i].startTime) {
*location = i;
newestTime = tData.watchPointArray[i].startTime;
}
}
return NON_EMPTY_SLOT;
}
break;
case OLDEST:{
// Always replace the oldest
int64_t oldestTime = INT64_MAX;
for(int i = 0; i < wpConfig.maxWP; i++){
if(oldestTime > tData.watchPointArray[i].startTime) {
*location = i;
oldestTime = tData.watchPointArray[i].startTime;
}
}
return NON_EMPTY_SLOT;
}
break;
case EMPTY_SLOT_ONLY:{
return NONE_AVAILABLE;
}
break;
default:
return NONE_AVAILABLE;
}
// No unarmed WP slot found.
}
static inline void
rmb(void) {
asm volatile("lfence":::"memory");
}
static void ConsumeAllRingBufferData(void *mbuf) {
struct perf_event_mmap_page *hdr = (struct perf_event_mmap_page *)mbuf;
unsigned long tail;
size_t avail_sz;
size_t pgmsk = wpConfig.pgsz - 1;
/*
* data points to beginning of buffer payload
*/
void * data = ((void *)hdr) + wpConfig.pgsz;
/*
* position of tail within the buffer payload
*/
tail = hdr->data_tail & pgmsk;
/*
* size of what is available
*
* data_head, data_tail never wrap around
*/
avail_sz = hdr->data_head - hdr->data_tail;
rmb();
#if 0
if(avail_sz == 0 )
EMSG("\n avail_sz = %d\n", avail_sz);
else
EMSG("\n EEavail_sz = %d\n", avail_sz);
#endif
// reset tail to head
hdr->data_tail = hdr->data_head;
}
static int ReadMampBuffer(void *mbuf, void *buf, size_t sz) {
struct perf_event_mmap_page *hdr = (struct perf_event_mmap_page *)mbuf;
void *data;
unsigned long tail;
size_t avail_sz, m, c;
size_t pgmsk = wpConfig.pgsz - 1;
/*
* data points to beginning of buffer payload
*/
data = ((void *)hdr) + wpConfig.pgsz;
/*
* position of tail within the buffer payload
*/
tail = hdr->data_tail & pgmsk;
/*
* size of what is available
*
* data_head, data_tail never wrap around
*/
avail_sz = hdr->data_head - hdr->data_tail;
if (sz > avail_sz) {
printf("\n sz > avail_sz: sz = %lu, avail_sz = %lu\n", sz, avail_sz);
rmb();
return -1;
}
/* From perf_event_open() manpage */
rmb();
/*
* sz <= avail_sz, we can satisfy the request
*/
/*
* c = size till end of buffer
*
* buffer payload size is necessarily
* a power of two, so we can do:
*/
c = pgmsk + 1 - tail;
/*
* min with requested size
*/
m = c < sz ? c : sz;
/* copy beginning */
memcpy(buf, data + tail, m);
/*
* copy wrapped around leftover
*/
if (sz > m)
memcpy(buf + m, data, sz - m);
hdr->data_tail += sz;
return 0;
}
void
SkipBuffer(struct perf_event_mmap_page *hdr, size_t sz){
if ((hdr->data_tail + sz) > hdr->data_head)
sz = hdr->data_head - hdr->data_tail;
rmb();
hdr->data_tail += sz;
}
static inline bool IsPCSane(void * contextPC, void *possiblePC){
if( (possiblePC==0) || ((possiblePC > contextPC) || (contextPC-possiblePC > 15))){
return false;
}
return true;
}
double ProportionOfWatchpointAmongOthersSharingTheSameContext(WatchPointInfo_t *wpi){
#if 0
int share = 0;
for(int i = 0; i < wpConfig.maxWP; i++) {
if(tData.watchPointArray[i].isActive && tData.watchPointArray[i].sample.node == wpi->sample.node) {
share ++;
}
}
assert(share > 0);
return 1.0/share;
#else
return 1.0;
#endif
}
static inline void * GetPatchedIP(void * contextIP) {
void * patchedIP;
void * excludeList[MAX_WP_SLOTS] = {0};
int numExcludes = 0;
for(int idx = 0; idx < wpConfig.maxWP; idx++){
if(tData.watchPointArray[idx].isActive) {
excludeList[numExcludes]=tData.watchPointArray[idx].va;
numExcludes++;
}
}
get_previous_instruction(contextIP, &patchedIP, excludeList, numExcludes);
return patchedIP;
}
// Gather all useful data when a WP triggers
static bool CollectWatchPointTriggerInfo(WatchPointInfo_t * wpi, WatchPointTrigger_t *wpt, void * context){
//struct perf_event_mmap_page * b = wpi->mmapBuffer;
struct perf_event_header hdr;
if (ReadMampBuffer(wpi->mmapBuffer, &hdr, sizeof(struct perf_event_header)) < 0) {
EMSG("Failed to ReadMampBuffer: %s\n", strerror(errno));
monitor_real_abort();
}
switch(hdr.type) {
case PERF_RECORD_SAMPLE:
assert (hdr.type & PERF_SAMPLE_IP);
void * contextIP = hpcrun_context_pc(context);
void * preciseIP = (void *)-1;
void * patchedIP = (void *)-1;
void * reliableIP = (void *)-1;
void * addr = (void *)-1;
if (hdr.type & PERF_SAMPLE_IP){
if (ReadMampBuffer(wpi->mmapBuffer, &preciseIP, sizeof(uint64_t)) < 0) {
EMSG("Failed to ReadMampBuffer: %s\n", strerror(errno));
monitor_real_abort();
}
if(! (hdr.misc & PERF_RECORD_MISC_EXACT_IP)){
//EMSG("PERF_SAMPLE_IP imprecise\n");
tData.numWatchpointImpreciseIP ++;
if(wpConfig.dontFixIP == false) {
patchedIP = GetPatchedIP(contextIP);
if(!IsPCSane(contextIP, patchedIP)) {
//EMSG("get_previous_instruction failed \n");
tData.numInsaneIP ++;
goto ErrExit;
}
reliableIP = patchedIP;
} else {
// Fake as requested by Xu for reuse clients
reliableIP = contextIP-1;
}
//EMSG("PERF_SAMPLE_IP imprecise: %p patched to %p in WP handler\n", tmpIP, patchedIP);
} else {
#if 0 // Precise PC can be far away in jump/call instructions.
// Ensure the "precise" PC is within one instruction from context pc
if(!IsPCSane(contextIP, preciseIP)) {
tData.numInsaneIP ++;
//EMSG("get_previous_instruction failed \n");
goto ErrExit;
}
#endif
reliableIP = preciseIP;
//if(! ((ip <= tmpIP) && (tmpIP-ip < 20))) ConsumeAllRingBufferData(wpi->mmapBuffer);
//assert( (ip <= tmpIP) && (tmpIP-ip < 20));
}
} else {
// Should happen only for wpConfig.isLBREnabled==false
assert(wpConfig.isLBREnabled==false);
// Fall back to old scheme of disassembling and capturing the info
if(wpConfig.dontFixIP == false) {
patchedIP = GetPatchedIP(contextIP);
if(!IsPCSane(contextIP, patchedIP)) {
tData.numInsaneIP ++;
//EMSG("PERF_SAMPLE_IP imprecise: %p failed to patch in WP handler, WP dropped\n", tmpIP);
goto ErrExit;
}
reliableIP = patchedIP;
}else {
// Fake as requested by Xu for reuse clients
reliableIP = contextIP-1;
}
}
wpt->pc = reliableIP;
if(wpConfig.dontDisassembleWPAddress == false){
FloatType * floatType = wpConfig.getFloatType? &wpt->floatType : 0;
if(false == get_mem_access_length_and_type_address(wpt->pc, (uint32_t*) &(wpt->accessLength), &(wpt->accessType), floatType, context, &addr)){
//EMSG("WP triggered on a non Load/Store add = %p\n", wpt->pc);
goto ErrExit;
}
if (wpt->accessLength == 0) {
//EMSG("WP triggered 0 access length! at pc=%p\n", wpt->pc);
goto ErrExit;
}
void * patchedAddr = (void *)-1;
// Stack affecting addresses will be off by 8
// Some instructions affect the address computing register: mov (%rax),%eax
// Hence, if the addresses do NOT overlap, merely use the Sample address!
if(false == ADDRESSES_OVERLAP(addr, wpt->accessLength, wpi->va, wpi->sample.wpLength)) {
if ((wpt->accessLength == sizeof(void *)) && (wpt->accessLength == wpi->sample.wpLength) && (((addr - wpi->va) == sizeof(void *)) || ((wpi->va - addr) == sizeof(void *))))
tData.numWatchpointImpreciseAddress8ByteLength ++;
else
tData.numWatchpointImpreciseAddressArbitraryLength ++;
tData.numWatchpointImpreciseAddressArbitraryLength ++;
patchedAddr = wpi->va;
} else {
patchedAddr = addr;
}
wpt->va = patchedAddr;
} else {
wpt->va = (void *)-1;
}
wpt->ctxt = context;
// We must cleanup the mmap buffer if there is any data left
ConsumeAllRingBufferData(wpi->mmapBuffer);
return true;
case PERF_RECORD_EXIT:
EMSG("PERF_RECORD_EXIT sample type %d sz=%d\n", hdr.type, hdr.size);
//SkipBuffer(wpi->mmapBuffer , hdr.size - sizeof(hdr));
goto ErrExit;
case PERF_RECORD_LOST:
EMSG("PERF_RECORD_LOST sample type %d sz=%d\n", hdr.type, hdr.size);
//SkipBuffer(wpi->mmapBuffer , hdr.size - sizeof(hdr));
goto ErrExit;
case PERF_RECORD_THROTTLE:
EMSG("PERF_RECORD_THROTTLE sample type %d sz=%d\n", hdr.type, hdr.size);
//SkipBuffer(wpi->mmapBuffer , hdr.size - sizeof(hdr));
goto ErrExit;
case PERF_RECORD_UNTHROTTLE:
EMSG("PERF_RECORD_UNTHROTTLE sample type %d sz=%d\n", hdr.type, hdr.size);
//SkipBuffer(wpi->mmapBuffer , hdr.size - sizeof(hdr));
goto ErrExit;
default:
EMSG("unknown sample type %d sz=%d\n", hdr.type, hdr.size);
//SkipBuffer(wpi->mmapBuffer , hdr.size - sizeof(hdr));
goto ErrExit;
}
ErrExit:
// We must cleanup the mmap buffer if there is any data left
ConsumeAllRingBufferData(wpi->mmapBuffer);
return false;
}
void DisableWatchpointWrapper(WatchPointInfo_t *wpi){
if(wpConfig.isWPModifyEnabled) {
DisableWatchpoint(wpi);
} else {
DisArm(wpi);
}
}
static int OnWatchPoint(int signum, siginfo_t *info, void *context){
//volatile int x;
//fprintf(stderr, "OnWatchPoint=%p\n", &x);
// Disable HPCRUN sampling
// if the trap is already in hpcrun, return
// If the interrupt came from inside our code, then drop the sample
// and return and avoid any MSG.
void* pc = hpcrun_context_pc(context);
if (!hpcrun_safe_enter_async(pc)) return 0;
linux_perf_events_pause();
tData.numWatchpointTriggers++;
//fprintf(stderr, " numWatchpointTriggers = %lu, \n", tData.numWatchpointTriggers);
//find which watchpoint fired
int location = -1;
for(int i = 0 ; i < wpConfig.maxWP; i++) {
if((tData.watchPointArray[i].isActive) && (info->si_fd == tData.watchPointArray[i].fileHandle)) {
location = i;
break;
}
}
// Ensure it is an active WP
if(location == -1) {
EMSG("\n WP trigger did not match any known active WP\n");
//monitor_real_abort();
hpcrun_safe_exit();
linux_perf_events_resume();
//fprintf("\n WP trigger did not match any known active WP\n");
return 0;
}
WatchPointTrigger_t wpt;
WPTriggerActionType retVal;
WatchPointInfo_t *wpi = &tData.watchPointArray[location];
// Perform Pre watchpoint action
switch (wpi->sample.preWPAction) {
case DISABLE_WP:
DisableWatchpointWrapper(wpi);
break;
case DISABLE_ALL_WP:
for(int i = 0; i < wpConfig.maxWP; i++) {
if(tData.watchPointArray[i].isActive){
DisableWatchpointWrapper(&tData.watchPointArray[i]);
}
}
break;
default:
assert(0 && "NYI");
monitor_real_abort();
break;
}
if( false == CollectWatchPointTriggerInfo(wpi, &wpt, context)) {
tData.numWatchpointDropped++;
retVal = DISABLE_WP; // disable if unable to collect any info.
} else {
retVal = tData.fptr(wpi, 0, wpt.accessLength/* invalid*/, &wpt);
}
// Let the client take action.
switch (retVal) {
case DISABLE_WP: {
if(wpi->isActive){
DisableWatchpointWrapper(wpi);
}
// Reset per WP probability
wpi->samplePostFull = SAMPLES_POST_FULL_RESET_VAL;
}
break;
case DISABLE_ALL_WP: {
for(int i = 0; i < wpConfig.maxWP; i++) {
if(tData.watchPointArray[i].isActive){
DisableWatchpointWrapper(&tData.watchPointArray[i]);
}
// Reset per WP probability
tData.watchPointArray[i].samplePostFull = SAMPLES_POST_FULL_RESET_VAL;
}
}
break;
case ALREADY_DISABLED: { // Already disabled, perhaps in pre-WP action
assert(wpi->isActive == false);
// Reset per WP probability
wpi->samplePostFull = SAMPLES_POST_FULL_RESET_VAL;
}
break;
case RETAIN_WP: { // resurrect this wp
if(!wpi->isActive){
EnableWatchpoint(wpi->fileHandle);
wpi->isActive = true;
}
}
break;
default: // Retain the state
break;
}
// hpcrun_all_sources_start();
linux_perf_events_resume();
hpcrun_safe_exit();
return 0;
}
static bool ValidateWPData(SampleData_t * sampleData){
// Check alignment
#if defined(__x86_64__) || defined(__amd64__) || defined(__x86_64) || defined(__amd64)
switch (sampleData->wpLength) {
case 0: EMSG("\nValidateWPData: 0 length WP never allowed"); monitor_real_abort();
case 1:
case 2:
case 4:
case 8:
if(IS_ALIGNED(sampleData->va, sampleData->wpLength))
return true; // unaligned
else
return false;
break;
default:
EMSG("Unsuppported WP length %d", sampleData->wpLength);
monitor_real_abort();
return false; // unsupported alignment
}
#else
#error "unknown architecture"
#endif
}
static bool IsOveralpped(SampleData_t * sampleData){
// Is a WP with the same/overlapping address active?
for (int i = 0; i < wpConfig.maxWP; i++) {
if(tData.watchPointArray[i].isActive){
if(ADDRESSES_OVERLAP(tData.watchPointArray[i].sample.va, tData.watchPointArray[i].sample.wpLength, sampleData->va, sampleData->wpLength)){
return true;
}
}
}
return false;
}
void CaptureValue(SampleData_t * sampleData, WatchPointInfo_t * wpi){
void * valLoc = & (wpi->value[0]);
switch(sampleData->wpLength) {
default: // force 1 length
case 1: *((uint8_t*)valLoc) = *(uint8_t*)(sampleData->va); break;
case 2: *((uint16_t*)valLoc) = *(uint16_t*)(sampleData->va); break;
case 4: *((uint32_t*)valLoc) = *(uint32_t*)(sampleData->va); break;
case 8: *((uint64_t*)valLoc) = *(uint64_t*)(sampleData->va); break;
}
}
bool SubscribeWatchpoint(SampleData_t * sampleData, OverwritePolicy overwritePolicy, bool captureValue){
if(ValidateWPData(sampleData) == false) {
return false;
}
if(IsOveralpped(sampleData)){
return false; // drop the sample if it overlaps an existing address
}
// No overlap, look for a victim slot
int victimLocation = -1;
// Find a slot to install WP
VictimType r = GetVictim(&victimLocation, wpConfig.replacementPolicy);
if(r != NONE_AVAILABLE) {
// VV IMP: Capture value before arming the WP.
if(captureValue)
CaptureValue(sampleData, &tData.watchPointArray[victimLocation]);
// I know the error case that we have captured the value but ArmWatchPoint fails.
// I am not handling that corner case because ArmWatchPoint() will fail with a monitor_real_abort().
if(ArmWatchPoint(&tData.watchPointArray[victimLocation], sampleData) == false){
//LOG to hpcrun log
EMSG("ArmWatchPoint failed for address %p", sampleData->va);
return false;
}
return true;
}
return false;
}
#ifdef TEST
#include<omp.h>
__thread volatile int cnt;
WPUpCallTRetType Test1UpCall(WatchPointInfo_t * wp, WatchPointTrigger_t * wt) {
printf("\n Test1UpCall %p\n", wt->va);
if(wpConfig.isLBREnabled)
assert(wp->sample.va == wt->va);
cnt ++;
return DISABLE;
}
void TestBasic(){
tData.fptr = Test1UpCall;
sigset_t block_mask;
sigemptyset (&block_mask);
// Set a signal handler for SIGUSR1
struct sigaction sa1 = {
.sa_sigaction = OnWatchPoint,
// .sa_mask = block_mask,
.sa_flags = SA_SIGINFO | SA_RESTART | SA_NODEFER
};
if(sigaction(wpConfig.signalDelivered, &sa1, NULL) == -1) {
fprintf(stderr, "Failed to set WHICH_SIG handler: %s\n", strerror(errno));
monitor_real_abort();
}
WatchpointThreadInit();
int N = 10000;
volatile int dummyWPLocation[10000];
cnt = 0;
for(int i = 0 ; i < N; i++) {
SampleData_t s = {.va = &dummyWPLocation[i], .wpLength = sizeof(int), .type = WP_WRITE};
SubscribeWatchpoint(&s, AUTO);
}
for(int i = 0 ; i < N; i++) {
dummyWPLocation[i]++;
}
printf("\n cnt = %d\n", cnt);
assert(cnt == wpConfig.maxWP);
WatchpointThreadTerminate();
}
int main() {
printf("\n Test 1: single threaded");
while(1) {
#pragma omp parallel
{
TestBasic();
}
}
return 0;
}
#endif
|
omp_array.c |
/******************************************************************************
* * FILE: omp_array.c
* * DESCRIPTION:
* * Array addition - C/C++ Version
* * This is a simple array adition running with omp
* * AUTHOR: Victor Rodriguez
* * LAST REVISED: 04/06/05
* ******************************************************************************/
#include <omp.h>
#include <stdio.h>
#include <stdlib.h>
int main (int argc, char *argv[])
{
int x = 0;
long int sz2G=2000000000; // c dynamic array
long int * myarray_1 = malloc(sizeof(long int) *sz2G);
long int * myarray_2 = malloc(sizeof(long int) *sz2G);
long int * myarray_3 = malloc(sizeof(long int) *sz2G);
#pragma omp parallel shared(myarray_1,myarray_2,myarray_3) private(x)
{
for ( x = 0; x < sz2G ; x++ ) {
myarray_1[x]= myarray_2[x] + myarray_3[x];
}
}
free(myarray_1);
free(myarray_2);
free(myarray_3);
}
|
lu.c | /*--------------------------------------------------------------------
NAS Parallel Benchmarks 2.3 OpenMP C versions - LU
This benchmark is an OpenMP C version of the NPB LU code.
The OpenMP C versions are developed by RWCP and derived from the serial
Fortran versions in "NPB 2.3-serial" developed by NAS.
Permission to use, copy, distribute and modify this software for any
purpose with or without fee is hereby granted.
This software is provided "as is" without express or implied warranty.
Send comments on the OpenMP C versions to pdp-openmp@rwcp.or.jp
Information on OpenMP activities at RWCP is available at:
http://pdplab.trc.rwcp.or.jp/pdperf/Omni/
Information on NAS Parallel Benchmarks 2.3 is available at:
http://www.nas.nasa.gov/NAS/NPB/
--------------------------------------------------------------------*/
/*--------------------------------------------------------------------
Authors: S. Weeratunga
V. Venkatakrishnan
E. Barszcz
M. Yarrow
OpenMP C version: S. Satoh
--------------------------------------------------------------------*/
#include "npb-C.h"
/* global variables */
#include "applu.h"
#if defined(_OPENMP)
/* for thread synchronization */
static boolean flag[ISIZ1/2*2+1];
#endif /* _OPENMP */
/* function declarations */
static void blts (int nx, int ny, int nz, int k,
double omega,
double v[ISIZ1][ISIZ2/2*2+1][ISIZ3/2*2+1][5],
double ldz[ISIZ1][ISIZ2][5][5],
double ldy[ISIZ1][ISIZ2][5][5],
double ldx[ISIZ1][ISIZ2][5][5],
double d[ISIZ1][ISIZ2][5][5],
int ist, int iend, int jst, int jend,
int nx0, int ny0 );
static void buts(int nx, int ny, int nz, int k,
double omega,
double v[ISIZ1][ISIZ2/2*2+1][ISIZ3/2*2+1][5],
double tv[ISIZ1][ISIZ2][5],
double d[ISIZ1][ISIZ2][5][5],
double udx[ISIZ1][ISIZ2][5][5],
double udy[ISIZ1][ISIZ2][5][5],
double udz[ISIZ1][ISIZ2][5][5],
int ist, int iend, int jst, int jend,
int nx0, int ny0 );
static void domain(void);
static void erhs(void);
static void error(void);
static void exact( int i, int j, int k, double u000ijk[5] );
static void jacld(int k);
static void jacu(int k);
static void l2norm (int nx0, int ny0, int nz0,
int ist, int iend,
int jst, int jend,
double v[ISIZ1][ISIZ2/2*2+1][ISIZ3/2*2+1][5],
double sum[5]);
static void pintgr(void);
static void read_input(void);
static void rhs(void);
static void setbv(void);
static void setcoeff(void);
static void setiv(void);
static void ssor(void);
static void verify(double xcr[5], double xce[5], double xci,
char *cclass, boolean *verified);
/*--------------------------------------------------------------------
program applu
--------------------------------------------------------------------*/
int main(int argc, char **argv) {
/*--------------------------------------------------------------------
c
c driver for the performance evaluation of the solver for
c five coupled parabolic/elliptic partial differential equations.
c
--------------------------------------------------------------------*/
char cclass;
boolean verified;
double mflops;
int nthreads = 1;
/*--------------------------------------------------------------------
c read input data
--------------------------------------------------------------------*/
read_input();
/*--------------------------------------------------------------------
c set up domain sizes
--------------------------------------------------------------------*/
domain();
/*--------------------------------------------------------------------
c set up coefficients
--------------------------------------------------------------------*/
setcoeff();
#pragma omp parallel
{
/*--------------------------------------------------------------------
c set the boundary values for dependent variables
--------------------------------------------------------------------*/
setbv();
/*--------------------------------------------------------------------
c set the initial values for dependent variables
--------------------------------------------------------------------*/
setiv();
/*--------------------------------------------------------------------
c compute the forcing term based on prescribed exact solution
--------------------------------------------------------------------*/
erhs();
#if defined(_OPENMP)
#pragma omp master
nthreads = omp_get_num_threads();
#endif /* _OPENMP */
}
/*--------------------------------------------------------------------
c perform the SSOR iterations
--------------------------------------------------------------------*/
ssor();
/*--------------------------------------------------------------------
c compute the solution error
--------------------------------------------------------------------*/
error();
/*--------------------------------------------------------------------
c compute the surface integral
--------------------------------------------------------------------*/
pintgr();
/*--------------------------------------------------------------------
c verification test
--------------------------------------------------------------------*/
verify ( rsdnm, errnm, frc, &cclass, &verified );
mflops = (double)itmax*(1984.77*(double)nx0
*(double)ny0
*(double)nz0
-10923.3*pow2((double)( nx0+ny0+nz0 )/3.0)
+27770.9* (double)( nx0+ny0+nz0 )/3.0
-144010.0)
/ (maxtime*1000000.0);
c_print_results("LU", cclass, nx0,
ny0, nz0, itmax, nthreads,
maxtime, mflops, " floating point", verified,
NPBVERSION, COMPILETIME, CS1, CS2, CS3, CS4, CS5, CS6,
"(none)");
}
/*--------------------------------------------------------------------
--------------------------------------------------------------------*/
static void blts (int nx, int ny, int nz, int k,
double omega,
/*--------------------------------------------------------------------
c To improve cache performance, second two dimensions padded by 1
c for even number sizes only. Only needed in v.
--------------------------------------------------------------------*/
double v[ISIZ1][ISIZ2/2*2+1][ISIZ3/2*2+1][5],
double ldz[ISIZ1][ISIZ2][5][5],
double ldy[ISIZ1][ISIZ2][5][5],
double ldx[ISIZ1][ISIZ2][5][5],
double d[ISIZ1][ISIZ2][5][5],
int ist, int iend, int jst, int jend,
int nx0, int ny0 ) {
/*--------------------------------------------------------------------
c
c compute the regular-sparse, block lower triangular solution:
c
c v <-- ( L-inv ) * v
c
--------------------------------------------------------------------*/
/*--------------------------------------------------------------------
c local variables
--------------------------------------------------------------------*/
int i, j, m;
double tmp, tmp1;
double tmat[5][5];
#pragma omp for nowait schedule(static)
for (i = ist; i <= iend; i++) {
for (j = jst; j <= jend; j++) {
for (m = 0; m < 5; m++) {
v[i][j][k][m] = v[i][j][k][m]
- omega * ( ldz[i][j][m][0] * v[i][j][k-1][0]
+ ldz[i][j][m][1] * v[i][j][k-1][1]
+ ldz[i][j][m][2] * v[i][j][k-1][2]
+ ldz[i][j][m][3] * v[i][j][k-1][3]
+ ldz[i][j][m][4] * v[i][j][k-1][4] );
}
}
}
#pragma omp for nowait schedule(static)
for (i = ist; i <= iend; i++) {
#if defined(_OPENMP)
if (i != ist) {
while (flag[i-1] == 0) {
#pragma omp flush(flag)
;
}
}
if (i != iend) {
while (flag[i] == 1) {
#pragma omp flush(flag)
;
}
}
#endif /* _OPENMP */
for (j = jst; j <= jend; j++) {
for (m = 0; m < 5; m++) {
v[i][j][k][m] = v[i][j][k][m]
- omega * ( ldy[i][j][m][0] * v[i][j-1][k][0]
+ ldx[i][j][m][0] * v[i-1][j][k][0]
+ ldy[i][j][m][1] * v[i][j-1][k][1]
+ ldx[i][j][m][1] * v[i-1][j][k][1]
+ ldy[i][j][m][2] * v[i][j-1][k][2]
+ ldx[i][j][m][2] * v[i-1][j][k][2]
+ ldy[i][j][m][3] * v[i][j-1][k][3]
+ ldx[i][j][m][3] * v[i-1][j][k][3]
+ ldy[i][j][m][4] * v[i][j-1][k][4]
+ ldx[i][j][m][4] * v[i-1][j][k][4] );
}
/*--------------------------------------------------------------------
c diagonal block inversion
c
c forward elimination
--------------------------------------------------------------------*/
for (m = 0; m < 5; m++) {
tmat[m][0] = d[i][j][m][0];
tmat[m][1] = d[i][j][m][1];
tmat[m][2] = d[i][j][m][2];
tmat[m][3] = d[i][j][m][3];
tmat[m][4] = d[i][j][m][4];
}
tmp1 = 1.0 / tmat[0][0];
tmp = tmp1 * tmat[1][0];
tmat[1][1] = tmat[1][1]
- tmp * tmat[0][1];
tmat[1][2] = tmat[1][2]
- tmp * tmat[0][2];
tmat[1][3] = tmat[1][3]
- tmp * tmat[0][3];
tmat[1][4] = tmat[1][4]
- tmp * tmat[0][4];
v[i][j][k][1] = v[i][j][k][1]
- v[i][j][k][0] * tmp;
tmp = tmp1 * tmat[2][0];
tmat[2][1] = tmat[2][1]
- tmp * tmat[0][1];
tmat[2][2] = tmat[2][2]
- tmp * tmat[0][2];
tmat[2][3] = tmat[2][3]
- tmp * tmat[0][3];
tmat[2][4] = tmat[2][4]
- tmp * tmat[0][4];
v[i][j][k][2] = v[i][j][k][2]
- v[i][j][k][0] * tmp;
tmp = tmp1 * tmat[3][0];
tmat[3][1] = tmat[3][1]
- tmp * tmat[0][1];
tmat[3][2] = tmat[3][2]
- tmp * tmat[0][2];
tmat[3][3] = tmat[3][3]
- tmp * tmat[0][3];
tmat[3][4] = tmat[3][4]
- tmp * tmat[0][4];
v[i][j][k][3] = v[i][j][k][3]
- v[i][j][k][0] * tmp;
tmp = tmp1 * tmat[4][0];
tmat[4][1] = tmat[4][1]
- tmp * tmat[0][1];
tmat[4][2] = tmat[4][2]
- tmp * tmat[0][2];
tmat[4][3] = tmat[4][3]
- tmp * tmat[0][3];
tmat[4][4] = tmat[4][4]
- tmp * tmat[0][4];
v[i][j][k][4] = v[i][j][k][4]
- v[i][j][k][0] * tmp;
tmp1 = 1.0 / tmat[ 1][1];
tmp = tmp1 * tmat[ 2][1];
tmat[2][2] = tmat[2][2]
- tmp * tmat[1][2];
tmat[2][3] = tmat[2][3]
- tmp * tmat[1][3];
tmat[2][4] = tmat[2][4]
- tmp * tmat[1][4];
v[i][j][k][2] = v[i][j][k][2]
- v[i][j][k][1] * tmp;
tmp = tmp1 * tmat[3][1];
tmat[3][2] = tmat[3][2]
- tmp * tmat[1][2];
tmat[3][3] = tmat[3][3]
- tmp * tmat[1][3];
tmat[3][4] = tmat[3][4]
- tmp * tmat[1][4];
v[i][j][k][3] = v[i][j][k][3]
- v[i][j][k][1] * tmp;
tmp = tmp1 * tmat[4][1];
tmat[4][2] = tmat[4][2]
- tmp * tmat[1][2];
tmat[4][3] = tmat[4][3]
- tmp * tmat[1][3];
tmat[4][4] = tmat[4][4]
- tmp * tmat[1][4];
v[i][j][k][4] = v[i][j][k][4]
- v[i][j][k][1] * tmp;
tmp1 = 1.0 / tmat[2][2];
tmp = tmp1 * tmat[3][2];
tmat[3][3] = tmat[3][3]
- tmp * tmat[2][3];
tmat[3][4] = tmat[3][4]
- tmp * tmat[2][4];
v[i][j][k][3] = v[i][j][k][3]
- v[i][j][k][2] * tmp;
tmp = tmp1 * tmat[4][2];
tmat[4][3] = tmat[4][3]
- tmp * tmat[2][3];
tmat[4][4] = tmat[4][4]
- tmp * tmat[2][4];
v[i][j][k][4] = v[i][j][k][4]
- v[i][j][k][2] * tmp;
tmp1 = 1.0 / tmat[3][3];
tmp = tmp1 * tmat[4][3];
tmat[4][4] = tmat[4][4]
- tmp * tmat[3][4];
v[i][j][k][4] = v[i][j][k][4]
- v[i][j][k][3] * tmp;
/*--------------------------------------------------------------------
c back substitution
--------------------------------------------------------------------*/
v[i][j][k][4] = v[i][j][k][4]
/ tmat[4][4];
v[i][j][k][3] = v[i][j][k][3]
- tmat[3][4] * v[i][j][k][4];
v[i][j][k][3] = v[i][j][k][3]
/ tmat[3][3];
v[i][j][k][2] = v[i][j][k][2]
- tmat[2][3] * v[i][j][k][3]
- tmat[2][4] * v[i][j][k][4];
v[i][j][k][2] = v[i][j][k][2]
/ tmat[2][2];
v[i][j][k][1] = v[i][j][k][1]
- tmat[1][2] * v[i][j][k][2]
- tmat[1][3] * v[i][j][k][3]
- tmat[1][4] * v[i][j][k][4];
v[i][j][k][1] = v[i][j][k][1]
/ tmat[1][1];
v[i][j][k][0] = v[i][j][k][0]
- tmat[0][1] * v[i][j][k][1]
- tmat[0][2] * v[i][j][k][2]
- tmat[0][3] * v[i][j][k][3]
- tmat[0][4] * v[i][j][k][4];
v[i][j][k][0] = v[i][j][k][0]
/ tmat[0][0];
}
#if defined(_OPENMP)
if (i != ist) flag[i-1] = 0;
if (i != iend) flag[i] = 1;
#pragma omp flush(flag)
#endif /* _OPENMP */
}
}
/*--------------------------------------------------------------------
--------------------------------------------------------------------*/
static void buts(int nx, int ny, int nz, int k,
double omega,
/*--------------------------------------------------------------------
c To improve cache performance, second two dimensions padded by 1
c for even number sizes only. Only needed in v.
--------------------------------------------------------------------*/
double v[ISIZ1][ISIZ2/2*2+1][ISIZ3/2*2+1][5],
double tv[ISIZ1][ISIZ2][5],
double d[ISIZ1][ISIZ2][5][5],
double udx[ISIZ1][ISIZ2][5][5],
double udy[ISIZ1][ISIZ2][5][5],
double udz[ISIZ1][ISIZ2][5][5],
int ist, int iend, int jst, int jend,
int nx0, int ny0 ) {
/*--------------------------------------------------------------------
c
c compute the regular-sparse, block upper triangular solution:
c
c v <-- ( U-inv ) * v
c
--------------------------------------------------------------------*/
/*--------------------------------------------------------------------
c local variables
--------------------------------------------------------------------*/
int i, j, m;
double tmp, tmp1;
double tmat[5][5];
#pragma omp for nowait schedule(static)
for (i = iend; i >= ist; i--) {
for (j = jend; j >= jst; j--) {
for (m = 0; m < 5; m++) {
tv[i][j][m] =
omega * ( udz[i][j][m][0] * v[i][j][k+1][0]
+ udz[i][j][m][1] * v[i][j][k+1][1]
+ udz[i][j][m][2] * v[i][j][k+1][2]
+ udz[i][j][m][3] * v[i][j][k+1][3]
+ udz[i][j][m][4] * v[i][j][k+1][4] );
}
}
}
#pragma omp for nowait schedule(static)
for (i = iend; i >= ist; i--) {
#if defined(_OPENMP)
if (i != iend) {
while (flag[i+1] == 0) {
#pragma omp flush(flag)
;
}
}
if (i != ist) {
while (flag[i] == 1) {
#pragma omp flush(flag)
;
}
}
#endif /* _OPENMP */
for (j = jend; j >= jst; j--) {
for (m = 0; m < 5; m++) {
tv[i][j][m] = tv[i][j][m]
+ omega * ( udy[i][j][m][0] * v[i][j+1][k][0]
+ udx[i][j][m][0] * v[i+1][j][k][0]
+ udy[i][j][m][1] * v[i][j+1][k][1]
+ udx[i][j][m][1] * v[i+1][j][k][1]
+ udy[i][j][m][2] * v[i][j+1][k][2]
+ udx[i][j][m][2] * v[i+1][j][k][2]
+ udy[i][j][m][3] * v[i][j+1][k][3]
+ udx[i][j][m][3] * v[i+1][j][k][3]
+ udy[i][j][m][4] * v[i][j+1][k][4]
+ udx[i][j][m][4] * v[i+1][j][k][4] );
}
/*--------------------------------------------------------------------
c diagonal block inversion
--------------------------------------------------------------------*/
for (m = 0; m < 5; m++) {
tmat[m][0] = d[i][j][m][0];
tmat[m][1] = d[i][j][m][1];
tmat[m][2] = d[i][j][m][2];
tmat[m][3] = d[i][j][m][3];
tmat[m][4] = d[i][j][m][4];
}
tmp1 = 1.0 / tmat[0][0];
tmp = tmp1 * tmat[1][0];
tmat[1][1] = tmat[1][1]
- tmp * tmat[0][1];
tmat[1][2] = tmat[1][2]
- tmp * tmat[0][2];
tmat[1][3] = tmat[1][3]
- tmp * tmat[0][3];
tmat[1][4] = tmat[1][4]
- tmp * tmat[0][4];
tv[i][j][1] = tv[i][j][1]
- tv[i][j][0] * tmp;
tmp = tmp1 * tmat[2][0];
tmat[2][1] = tmat[2][1]
- tmp * tmat[0][1];
tmat[2][2] = tmat[2][2]
- tmp * tmat[0][2];
tmat[2][3] = tmat[2][3]
- tmp * tmat[0][3];
tmat[2][4] = tmat[2][4]
- tmp * tmat[0][4];
tv[i][j][2] = tv[i][j][2]
- tv[i][j][0] * tmp;
tmp = tmp1 * tmat[3][0];
tmat[3][1] = tmat[3][1]
- tmp * tmat[0][1];
tmat[3][2] = tmat[3][2]
- tmp * tmat[0][2];
tmat[3][3] = tmat[3][3]
- tmp * tmat[0][3];
tmat[3][4] = tmat[3][4]
- tmp * tmat[0][4];
tv[i][j][3] = tv[i][j][3]
- tv[i][j][0] * tmp;
tmp = tmp1 * tmat[4][0];
tmat[4][1] = tmat[4][1]
- tmp * tmat[0][1];
tmat[4][2] = tmat[4][2]
- tmp * tmat[0][2];
tmat[4][3] = tmat[4][3]
- tmp * tmat[0][3];
tmat[4][4] = tmat[4][4]
- tmp * tmat[0][4];
tv[i][j][4] = tv[i][j][4]
- tv[i][j][0] * tmp;
tmp1 = 1.0 / tmat[1][1];
tmp = tmp1 * tmat[2][1];
tmat[2][2] = tmat[2][2]
- tmp * tmat[1][2];
tmat[2][3] = tmat[2][3]
- tmp * tmat[1][3];
tmat[2][4] = tmat[2][4]
- tmp * tmat[1][4];
tv[i][j][2] = tv[i][j][2]
- tv[i][j][1] * tmp;
tmp = tmp1 * tmat[3][1];
tmat[3][2] = tmat[3][2]
- tmp * tmat[1][2];
tmat[3][3] = tmat[3][3]
- tmp * tmat[1][3];
tmat[3][4] = tmat[3][4]
- tmp * tmat[1][4];
tv[i][j][3] = tv[i][j][3]
- tv[i][j][1] * tmp;
tmp = tmp1 * tmat[4][1];
tmat[4][2] = tmat[4][2]
- tmp * tmat[1][2];
tmat[4][3] = tmat[4][3]
- tmp * tmat[1][3];
tmat[4][4] = tmat[4][4]
- tmp * tmat[1][4];
tv[i][j][4] = tv[i][j][4]
- tv[i][j][1] * tmp;
tmp1 = 1.0 / tmat[2][2];
tmp = tmp1 * tmat[3][2];
tmat[3][3] = tmat[3][3]
- tmp * tmat[2][3];
tmat[3][4] = tmat[3][4]
- tmp * tmat[2][4];
tv[i][j][3] = tv[i][j][3]
- tv[i][j][2] * tmp;
tmp = tmp1 * tmat[4][2];
tmat[4][3] = tmat[4][3]
- tmp * tmat[2][3];
tmat[4][4] = tmat[4][4]
- tmp * tmat[2][4];
tv[i][j][4] = tv[i][j][4]
- tv[i][j][2] * tmp;
tmp1 = 1.0 / tmat[3][3];
tmp = tmp1 * tmat[4][3];
tmat[4][4] = tmat[4][4]
- tmp * tmat[3][4];
tv[i][j][4] = tv[i][j][4]
- tv[i][j][3] * tmp;
/*--------------------------------------------------------------------
c back substitution
--------------------------------------------------------------------*/
tv[i][j][4] = tv[i][j][4]
/ tmat[4][4];
tv[i][j][3] = tv[i][j][3]
- tmat[3][4] * tv[i][j][4];
tv[i][j][3] = tv[i][j][3]
/ tmat[3][3];
tv[i][j][2] = tv[i][j][2]
- tmat[2][3] * tv[i][j][3]
- tmat[2][4] * tv[i][j][4];
tv[i][j][2] = tv[i][j][2]
/ tmat[2][2];
tv[i][j][1] = tv[i][j][1]
- tmat[1][2] * tv[i][j][2]
- tmat[1][3] * tv[i][j][3]
- tmat[1][4] * tv[i][j][4];
tv[i][j][1] = tv[i][j][1]
/ tmat[1][1];
tv[i][j][0] = tv[i][j][0]
- tmat[0][1] * tv[i][j][1]
- tmat[0][2] * tv[i][j][2]
- tmat[0][3] * tv[i][j][3]
- tmat[0][4] * tv[i][j][4];
tv[i][j][0] = tv[i][j][0]
/ tmat[0][0];
v[i][j][k][0] = v[i][j][k][0] - tv[i][j][0];
v[i][j][k][1] = v[i][j][k][1] - tv[i][j][1];
v[i][j][k][2] = v[i][j][k][2] - tv[i][j][2];
v[i][j][k][3] = v[i][j][k][3] - tv[i][j][3];
v[i][j][k][4] = v[i][j][k][4] - tv[i][j][4];
}
#if defined(_OPENMP)
if (i != iend) flag[i+1] = 0;
if (i != ist) flag[i] = 1;
#pragma omp flush(flag)
#endif /* _OPENMP */
}
}
/*--------------------------------------------------------------------
--------------------------------------------------------------------*/
static void domain(void) {
/*--------------------------------------------------------------------
c local variables
--------------------------------------------------------------------*/
nx = nx0;
ny = ny0;
nz = nz0;
/*--------------------------------------------------------------------
c check the sub-domain size
--------------------------------------------------------------------*/
if ( nx < 4 || ny < 4 || nz < 4 ) {
printf(" SUBDOMAIN SIZE IS TOO SMALL - \n"
" ADJUST PROBLEM SIZE OR NUMBER OF PROCESSORS\n"
" SO THAT NX, NY AND NZ ARE GREATER THAN OR EQUAL\n"
" TO 4 THEY ARE CURRENTLY%3d%3d%3d\n", nx, ny, nz);
exit(1);
}
if ( nx > ISIZ1 || ny > ISIZ2 || nz > ISIZ3 ) {
printf(" SUBDOMAIN SIZE IS TOO LARGE - \n"
" ADJUST PROBLEM SIZE OR NUMBER OF PROCESSORS\n"
" SO THAT NX, NY AND NZ ARE LESS THAN OR EQUAL TO \n"
" ISIZ1, ISIZ2 AND ISIZ3 RESPECTIVELY. THEY ARE\n"
" CURRENTLY%4d%4d%4d\n", nx, ny, nz);
exit(1);
}
/*--------------------------------------------------------------------
c set up the start and end in i and j extents for all processors
--------------------------------------------------------------------*/
ist = 1;
iend = nx - 2;
jst = 1;
jend = ny - 2;
}
/*--------------------------------------------------------------------
--------------------------------------------------------------------*/
static void erhs(void) {
/*--------------------------------------------------------------------
c
c compute the right hand side based on exact solution
c
--------------------------------------------------------------------*/
/*--------------------------------------------------------------------
c local variables
--------------------------------------------------------------------*/
int i, j, k, m;
int iglob, jglob;
int L1, L2;
int ist1, iend1;
int jst1, jend1;
double dsspm;
double xi, eta, zeta;
double q;
double u21, u31, u41;
double tmp;
double u21i, u31i, u41i, u51i;
double u21j, u31j, u41j, u51j;
double u21k, u31k, u41k, u51k;
double u21im1, u31im1, u41im1, u51im1;
double u21jm1, u31jm1, u41jm1, u51jm1;
double u21km1, u31km1, u41km1, u51km1;
dsspm = dssp;
#pragma omp for
for (i = 0; i < nx; i++) {
for (j = 0; j < ny; j++) {
for (k = 0; k < nz; k++) {
for (m = 0; m < 5; m++) {
frct[i][j][k][m] = 0.0;
}
}
}
}
#pragma omp for
for (i = 0; i < nx; i++) {
iglob = i;
xi = ( (double)(iglob) ) / ( nx0 - 1 );
for (j = 0; j < ny; j++) {
jglob = j;
eta = ( (double)(jglob) ) / ( ny0 - 1 );
for (k = 0; k < nz; k++) {
zeta = ( (double)(k) ) / ( nz - 1 );
for (m = 0; m < 5; m++) {
rsd[i][j][k][m] = ce[m][0]
+ ce[m][1] * xi
+ ce[m][2] * eta
+ ce[m][3] * zeta
+ ce[m][4] * xi * xi
+ ce[m][5] * eta * eta
+ ce[m][6] * zeta * zeta
+ ce[m][7] * xi * xi * xi
+ ce[m][8] * eta * eta * eta
+ ce[m][9] * zeta * zeta * zeta
+ ce[m][10] * xi * xi * xi * xi
+ ce[m][11] * eta * eta * eta * eta
+ ce[m][12] * zeta * zeta * zeta * zeta;
}
}
}
}
/*--------------------------------------------------------------------
c xi-direction flux differences
--------------------------------------------------------------------*/
L1 = 0;
L2 = nx-1;
#pragma omp for
for (i = L1; i <= L2; i++) {
for (j = jst; j <= jend; j++) {
for (k = 1; k < nz - 1; k++) {
flux[i][j][k][0] = rsd[i][j][k][1];
u21 = rsd[i][j][k][1] / rsd[i][j][k][0];
q = 0.50 * ( rsd[i][j][k][1] * rsd[i][j][k][1]
+ rsd[i][j][k][2] * rsd[i][j][k][2]
+ rsd[i][j][k][3] * rsd[i][j][k][3] )
/ rsd[i][j][k][0];
flux[i][j][k][1] = rsd[i][j][k][1] * u21 + C2 *
( rsd[i][j][k][4] - q );
flux[i][j][k][2] = rsd[i][j][k][2] * u21;
flux[i][j][k][3] = rsd[i][j][k][3] * u21;
flux[i][j][k][4] = ( C1 * rsd[i][j][k][4] - C2 * q ) * u21;
}
}
}
#pragma omp for
for (j = jst; j <= jend; j++) {
for (k = 1; k <= nz - 2; k++) {
for (i = ist; i <= iend; i++) {
for (m = 0; m < 5; m++) {
frct[i][j][k][m] = frct[i][j][k][m]
- tx2 * ( flux[i+1][j][k][m] - flux[i-1][j][k][m] );
}
}
for (i = ist; i <= L2; i++) {
tmp = 1.0 / rsd[i][j][k][0];
u21i = tmp * rsd[i][j][k][1];
u31i = tmp * rsd[i][j][k][2];
u41i = tmp * rsd[i][j][k][3];
u51i = tmp * rsd[i][j][k][4];
tmp = 1.0 / rsd[i-1][j][k][0];
u21im1 = tmp * rsd[i-1][j][k][1];
u31im1 = tmp * rsd[i-1][j][k][2];
u41im1 = tmp * rsd[i-1][j][k][3];
u51im1 = tmp * rsd[i-1][j][k][4];
flux[i][j][k][1] = (4.0/3.0) * tx3 *
( u21i - u21im1 );
flux[i][j][k][2] = tx3 * ( u31i - u31im1 );
flux[i][j][k][3] = tx3 * ( u41i - u41im1 );
flux[i][j][k][4] = 0.50 * ( 1.0 - C1*C5 )
* tx3 * ( ( u21i * u21i + u31i * u31i + u41i * u41i )
- ( u21im1*u21im1 + u31im1*u31im1 + u41im1*u41im1 ) )
+ (1.0/6.0)
* tx3 * ( u21i*u21i - u21im1*u21im1 )
+ C1 * C5 * tx3 * ( u51i - u51im1 );
}
for (i = ist; i <= iend; i++) {
frct[i][j][k][0] = frct[i][j][k][0]
+ dx1 * tx1 * ( rsd[i-1][j][k][0]
- 2.0 * rsd[i][j][k][0]
+ rsd[i+1][j][k][0] );
frct[i][j][k][1] = frct[i][j][k][1]
+ tx3 * C3 * C4 * ( flux[i+1][j][k][1] - flux[i][j][k][1] )
+ dx2 * tx1 * ( rsd[i-1][j][k][1]
- 2.0 * rsd[i][j][k][1]
+ rsd[i+1][j][k][1] );
frct[i][j][k][2] = frct[i][j][k][2]
+ tx3 * C3 * C4 * ( flux[i+1][j][k][2] - flux[i][j][k][2] )
+ dx3 * tx1 * ( rsd[i-1][j][k][2]
- 2.0 * rsd[i][j][k][2]
+ rsd[i+1][j][k][2] );
frct[i][j][k][3] = frct[i][j][k][3]
+ tx3 * C3 * C4 * ( flux[i+1][j][k][3] - flux[i][j][k][3] )
+ dx4 * tx1 * ( rsd[i-1][j][k][3]
- 2.0 * rsd[i][j][k][3]
+ rsd[i+1][j][k][3] );
frct[i][j][k][4] = frct[i][j][k][4]
+ tx3 * C3 * C4 * ( flux[i+1][j][k][4] - flux[i][j][k][4] )
+ dx5 * tx1 * ( rsd[i-1][j][k][4]
- 2.0 * rsd[i][j][k][4]
+ rsd[i+1][j][k][4] );
}
/*--------------------------------------------------------------------
c Fourth-order dissipation
--------------------------------------------------------------------*/
for (m = 0; m < 5; m++) {
frct[1][j][k][m] = frct[1][j][k][m]
- dsspm * ( + 5.0 * rsd[1][j][k][m]
- 4.0 * rsd[2][j][k][m]
+ rsd[3][j][k][m] );
frct[2][j][k][m] = frct[2][j][k][m]
- dsspm * ( - 4.0 * rsd[1][j][k][m]
+ 6.0 * rsd[2][j][k][m]
- 4.0 * rsd[3][j][k][m]
+ rsd[4][j][k][m] );
}
ist1 = 3;
iend1 = nx - 4;
for (i = ist1; i <=iend1; i++) {
for (m = 0; m < 5; m++) {
frct[i][j][k][m] = frct[i][j][k][m]
- dsspm * ( rsd[i-2][j][k][m]
- 4.0 * rsd[i-1][j][k][m]
+ 6.0 * rsd[i][j][k][m]
- 4.0 * rsd[i+1][j][k][m]
+ rsd[i+2][j][k][m] );
}
}
for (m = 0; m < 5; m++) {
frct[nx-3][j][k][m] = frct[nx-3][j][k][m]
- dsspm * ( rsd[nx-5][j][k][m]
- 4.0 * rsd[nx-4][j][k][m]
+ 6.0 * rsd[nx-3][j][k][m]
- 4.0 * rsd[nx-2][j][k][m] );
frct[nx-2][j][k][m] = frct[nx-2][j][k][m]
- dsspm * ( rsd[nx-4][j][k][m]
- 4.0 * rsd[nx-3][j][k][m]
+ 5.0 * rsd[nx-2][j][k][m] );
}
}
}
/*--------------------------------------------------------------------
c eta-direction flux differences
--------------------------------------------------------------------*/
L1 = 0;
L2 = ny-1;
#pragma omp for
for (i = ist; i <= iend; i++) {
for (j = L1; j <= L2; j++) {
for (k = 1; k <= nz - 2; k++) {
flux[i][j][k][0] = rsd[i][j][k][2];
u31 = rsd[i][j][k][2] / rsd[i][j][k][0];
q = 0.50 * ( rsd[i][j][k][1] * rsd[i][j][k][1]
+ rsd[i][j][k][2] * rsd[i][j][k][2]
+ rsd[i][j][k][3] * rsd[i][j][k][3] )
/ rsd[i][j][k][0];
flux[i][j][k][1] = rsd[i][j][k][1] * u31;
flux[i][j][k][2] = rsd[i][j][k][2] * u31 + C2 *
( rsd[i][j][k][4] - q );
flux[i][j][k][3] = rsd[i][j][k][3] * u31;
flux[i][j][k][4] = ( C1 * rsd[i][j][k][4] - C2 * q ) * u31;
}
}
}
#pragma omp for
for (i = ist; i <= iend; i++) {
for (k = 1; k <= nz - 2; k++) {
for (j = jst; j <= jend; j++) {
for (m = 0; m < 5; m++) {
frct[i][j][k][m] = frct[i][j][k][m]
- ty2 * ( flux[i][j+1][k][m] - flux[i][j-1][k][m] );
}
}
for (j = jst; j <= L2; j++) {
tmp = 1.0 / rsd[i][j][k][0];
u21j = tmp * rsd[i][j][k][1];
u31j = tmp * rsd[i][j][k][2];
u41j = tmp * rsd[i][j][k][3];
u51j = tmp * rsd[i][j][k][4];
tmp = 1.0 / rsd[i][j-1][k][0];
u21jm1 = tmp * rsd[i][j-1][k][1];
u31jm1 = tmp * rsd[i][j-1][k][2];
u41jm1 = tmp * rsd[i][j-1][k][3];
u51jm1 = tmp * rsd[i][j-1][k][4];
flux[i][j][k][1] = ty3 * ( u21j - u21jm1 );
flux[i][j][k][2] = (4.0/3.0) * ty3 *
( u31j - u31jm1 );
flux[i][j][k][3] = ty3 * ( u41j - u41jm1 );
flux[i][j][k][4] = 0.50 * ( 1.0 - C1*C5 )
* ty3 * ( ( u21j *u21j + u31j *u31j + u41j *u41j )
- ( u21jm1*u21jm1 + u31jm1*u31jm1 + u41jm1*u41jm1 ) )
+ (1.0/6.0)
* ty3 * ( u31j*u31j - u31jm1*u31jm1 )
+ C1 * C5 * ty3 * ( u51j - u51jm1 );
}
for (j = jst; j <= jend; j++) {
frct[i][j][k][0] = frct[i][j][k][0]
+ dy1 * ty1 * ( rsd[i][j-1][k][0]
- 2.0 * rsd[i][j][k][0]
+ rsd[i][j+1][k][0] );
frct[i][j][k][1] = frct[i][j][k][1]
+ ty3 * C3 * C4 * ( flux[i][j+1][k][1] - flux[i][j][k][1] )
+ dy2 * ty1 * ( rsd[i][j-1][k][1]
- 2.0 * rsd[i][j][k][1]
+ rsd[i][j+1][k][1] );
frct[i][j][k][2] = frct[i][j][k][2]
+ ty3 * C3 * C4 * ( flux[i][j+1][k][2] - flux[i][j][k][2] )
+ dy3 * ty1 * ( rsd[i][j-1][k][2]
- 2.0 * rsd[i][j][k][2]
+ rsd[i][j+1][k][2] );
frct[i][j][k][3] = frct[i][j][k][3]
+ ty3 * C3 * C4 * ( flux[i][j+1][k][3] - flux[i][j][k][3] )
+ dy4 * ty1 * ( rsd[i][j-1][k][3]
- 2.0 * rsd[i][j][k][3]
+ rsd[i][j+1][k][3] );
frct[i][j][k][4] = frct[i][j][k][4]
+ ty3 * C3 * C4 * ( flux[i][j+1][k][4] - flux[i][j][k][4] )
+ dy5 * ty1 * ( rsd[i][j-1][k][4]
- 2.0 * rsd[i][j][k][4]
+ rsd[i][j+1][k][4] );
}
/*--------------------------------------------------------------------
c fourth-order dissipation
--------------------------------------------------------------------*/
for (m = 0; m < 5; m++) {
frct[i][1][k][m] = frct[i][1][k][m]
- dsspm * ( + 5.0 * rsd[i][1][k][m]
- 4.0 * rsd[i][2][k][m]
+ rsd[i][3][k][m] );
frct[i][2][k][m] = frct[i][2][k][m]
- dsspm * ( - 4.0 * rsd[i][1][k][m]
+ 6.0 * rsd[i][2][k][m]
- 4.0 * rsd[i][3][k][m]
+ rsd[i][4][k][m] );
}
jst1 = 3;
jend1 = ny - 4;
for (j = jst1; j <= jend1; j++) {
for (m = 0; m < 5; m++) {
frct[i][j][k][m] = frct[i][j][k][m]
- dsspm * ( rsd[i][j-2][k][m]
- 4.0 * rsd[i][j-1][k][m]
+ 6.0 * rsd[i][j][k][m]
- 4.0 * rsd[i][j+1][k][m]
+ rsd[i][j+2][k][m] );
}
}
for (m = 0; m < 5; m++) {
frct[i][ny-3][k][m] = frct[i][ny-3][k][m]
- dsspm * ( rsd[i][ny-5][k][m]
- 4.0 * rsd[i][ny-4][k][m]
+ 6.0 * rsd[i][ny-3][k][m]
- 4.0 * rsd[i][ny-2][k][m] );
frct[i][ny-2][k][m] = frct[i][ny-2][k][m]
- dsspm * ( rsd[i][ny-4][k][m]
- 4.0 * rsd[i][ny-3][k][m]
+ 5.0 * rsd[i][ny-2][k][m] );
}
}
}
/*--------------------------------------------------------------------
c zeta-direction flux differences
--------------------------------------------------------------------*/
#pragma omp for
for (i = ist; i <= iend; i++) {
for (j = jst; j <= jend; j++) {
for (k = 0; k <= nz-1; k++) {
flux[i][j][k][0] = rsd[i][j][k][3];
u41 = rsd[i][j][k][3] / rsd[i][j][k][0];
q = 0.50 * ( rsd[i][j][k][1] * rsd[i][j][k][1]
+ rsd[i][j][k][2] * rsd[i][j][k][2]
+ rsd[i][j][k][3] * rsd[i][j][k][3] )
/ rsd[i][j][k][0];
flux[i][j][k][1] = rsd[i][j][k][1] * u41;
flux[i][j][k][2] = rsd[i][j][k][2] * u41;
flux[i][j][k][3] = rsd[i][j][k][3] * u41 + C2 *
( rsd[i][j][k][4] - q );
flux[i][j][k][4] = ( C1 * rsd[i][j][k][4] - C2 * q ) * u41;
}
for (k = 1; k <= nz - 2; k++) {
for (m = 0; m < 5; m++) {
frct[i][j][k][m] = frct[i][j][k][m]
- tz2 * ( flux[i][j][k+1][m] - flux[i][j][k-1][m] );
}
}
for (k = 1; k <= nz-1; k++) {
tmp = 1.0 / rsd[i][j][k][0];
u21k = tmp * rsd[i][j][k][1];
u31k = tmp * rsd[i][j][k][2];
u41k = tmp * rsd[i][j][k][3];
u51k = tmp * rsd[i][j][k][4];
tmp = 1.0 / rsd[i][j][k-1][0];
u21km1 = tmp * rsd[i][j][k-1][1];
u31km1 = tmp * rsd[i][j][k-1][2];
u41km1 = tmp * rsd[i][j][k-1][3];
u51km1 = tmp * rsd[i][j][k-1][4];
flux[i][j][k][1] = tz3 * ( u21k - u21km1 );
flux[i][j][k][2] = tz3 * ( u31k - u31km1 );
flux[i][j][k][3] = (4.0/3.0) * tz3 * ( u41k
- u41km1 );
flux[i][j][k][4] = 0.50 * ( 1.0 - C1*C5 )
* tz3 * ( ( u21k *u21k + u31k *u31k + u41k *u41k )
- ( u21km1*u21km1 + u31km1*u31km1 + u41km1*u41km1 ) )
+ (1.0/6.0)
* tz3 * ( u41k*u41k - u41km1*u41km1 )
+ C1 * C5 * tz3 * ( u51k - u51km1 );
}
for (k = 1; k <= nz - 2; k++) {
frct[i][j][k][0] = frct[i][j][k][0]
+ dz1 * tz1 * ( rsd[i][j][k+1][0]
- 2.0 * rsd[i][j][k][0]
+ rsd[i][j][k-1][0] );
frct[i][j][k][1] = frct[i][j][k][1]
+ tz3 * C3 * C4 * ( flux[i][j][k+1][1] - flux[i][j][k][1] )
+ dz2 * tz1 * ( rsd[i][j][k+1][1]
- 2.0 * rsd[i][j][k][1]
+ rsd[i][j][k-1][1] );
frct[i][j][k][2] = frct[i][j][k][2]
+ tz3 * C3 * C4 * ( flux[i][j][k+1][2] - flux[i][j][k][2] )
+ dz3 * tz1 * ( rsd[i][j][k+1][2]
- 2.0 * rsd[i][j][k][2]
+ rsd[i][j][k-1][2] );
frct[i][j][k][3] = frct[i][j][k][3]
+ tz3 * C3 * C4 * ( flux[i][j][k+1][3] - flux[i][j][k][3] )
+ dz4 * tz1 * ( rsd[i][j][k+1][3]
- 2.0 * rsd[i][j][k][3]
+ rsd[i][j][k-1][3] );
frct[i][j][k][4] = frct[i][j][k][4]
+ tz3 * C3 * C4 * ( flux[i][j][k+1][4] - flux[i][j][k][4] )
+ dz5 * tz1 * ( rsd[i][j][k+1][4]
- 2.0 * rsd[i][j][k][4]
+ rsd[i][j][k-1][4] );
}
/*--------------------------------------------------------------------
c fourth-order dissipation
--------------------------------------------------------------------*/
for (m = 0; m < 5; m++) {
frct[i][j][1][m] = frct[i][j][1][m]
- dsspm * ( + 5.0 * rsd[i][j][1][m]
- 4.0 * rsd[i][j][2][m]
+ rsd[i][j][3][m] );
frct[i][j][2][m] = frct[i][j][2][m]
- dsspm * (- 4.0 * rsd[i][j][1][m]
+ 6.0 * rsd[i][j][2][m]
- 4.0 * rsd[i][j][3][m]
+ rsd[i][j][4][m] );
}
for (k = 3; k <= nz - 4; k++) {
for (m = 0; m < 5; m++) {
frct[i][j][k][m] = frct[i][j][k][m]
- dsspm * ( rsd[i][j][k-2][m]
- 4.0 * rsd[i][j][k-1][m]
+ 6.0 * rsd[i][j][k][m]
- 4.0 * rsd[i][j][k+1][m]
+ rsd[i][j][k+2][m] );
}
}
for (m = 0; m < 5; m++) {
frct[i][j][nz-3][m] = frct[i][j][nz-3][m]
- dsspm * ( rsd[i][j][nz-5][m]
- 4.0 * rsd[i][j][nz-4][m]
+ 6.0 * rsd[i][j][nz-3][m]
- 4.0 * rsd[i][j][nz-2][m] );
frct[i][j][nz-2][m] = frct[i][j][nz-2][m]
- dsspm * ( rsd[i][j][nz-4][m]
- 4.0 * rsd[i][j][nz-3][m]
+ 5.0 * rsd[i][j][nz-2][m] );
}
}
}
}
/*--------------------------------------------------------------------
--------------------------------------------------------------------*/
static void error(void) {
/*--------------------------------------------------------------------
c
c compute the solution error
c
--------------------------------------------------------------------*/
/*--------------------------------------------------------------------
c local variables
--------------------------------------------------------------------*/
int i, j, k, m;
int iglob, jglob;
double tmp;
double u000ijk[5];
for (m = 0; m < 5; m++) {
errnm[m] = 0.0;
}
for (i = ist; i <= iend; i++) {
iglob = i;
for (j = jst; j <= jend; j++) {
jglob = j;
for (k = 1; k <= nz-2; k++) {
exact( iglob, jglob, k, u000ijk );
for (m = 0; m < 5; m++) {
tmp = ( u000ijk[m] - u[i][j][k][m] );
errnm[m] = errnm[m] + tmp *tmp;
}
}
}
}
for (m = 0; m < 5; m++) {
errnm[m] = sqrt ( errnm[m] / ( (nx0-2)*(ny0-2)*(nz0-2) ) );
}
}
/*--------------------------------------------------------------------
--------------------------------------------------------------------*/
static void exact( int i, int j, int k, double u000ijk[5] ) {
/*--------------------------------------------------------------------
c
c compute the exact solution at (i,j,k)
c
--------------------------------------------------------------------*/
/*--------------------------------------------------------------------
c local variables
--------------------------------------------------------------------*/
int m;
double xi, eta, zeta;
xi = ((double)i) / (nx0 - 1);
eta = ((double)j) / (ny0 - 1);
zeta = ((double)k) / (nz - 1);
for (m = 0; m < 5; m++) {
u000ijk[m] = ce[m][0]
+ ce[m][1] * xi
+ ce[m][2] * eta
+ ce[m][3] * zeta
+ ce[m][4] * xi * xi
+ ce[m][5] * eta * eta
+ ce[m][6] * zeta * zeta
+ ce[m][7] * xi * xi * xi
+ ce[m][8] * eta * eta * eta
+ ce[m][9] * zeta * zeta * zeta
+ ce[m][10] * xi * xi * xi * xi
+ ce[m][11] * eta * eta * eta * eta
+ ce[m][12] * zeta * zeta * zeta * zeta;
}
}
/*--------------------------------------------------------------------
--------------------------------------------------------------------*/
static void jacld(int k) {
/*--------------------------------------------------------------------
c compute the lower triangular part of the jacobian matrix
--------------------------------------------------------------------*/
/*--------------------------------------------------------------------
c local variables
--------------------------------------------------------------------*/
int i, j;
double r43;
double c1345;
double c34;
double tmp1, tmp2, tmp3;
r43 = ( 4.0 / 3.0 );
c1345 = C1 * C3 * C4 * C5;
c34 = C3 * C4;
#pragma omp for nowait schedule(static)
for (i = ist; i <= iend; i++) {
for (j = jst; j <= jend; j++) {
/*--------------------------------------------------------------------
c form the block daigonal
--------------------------------------------------------------------*/
tmp1 = 1.0 / u[i][j][k][0];
tmp2 = tmp1 * tmp1;
tmp3 = tmp1 * tmp2;
d[i][j][0][0] = 1.0
+ dt * 2.0 * ( tx1 * dx1
+ ty1 * dy1
+ tz1 * dz1 );
d[i][j][0][1] = 0.0;
d[i][j][0][2] = 0.0;
d[i][j][0][3] = 0.0;
d[i][j][0][4] = 0.0;
d[i][j][1][0] = dt * 2.0
* ( tx1 * ( - r43 * c34 * tmp2 * u[i][j][k][1] )
+ ty1 * ( - c34 * tmp2 * u[i][j][k][1] )
+ tz1 * ( - c34 * tmp2 * u[i][j][k][1] ) );
d[i][j][1][1] = 1.0
+ dt * 2.0
* ( tx1 * r43 * c34 * tmp1
+ ty1 * c34 * tmp1
+ tz1 * c34 * tmp1 )
+ dt * 2.0 * ( tx1 * dx2
+ ty1 * dy2
+ tz1 * dz2 );
d[i][j][1][2] = 0.0;
d[i][j][1][3] = 0.0;
d[i][j][1][4] = 0.0;
d[i][j][2][0] = dt * 2.0
* ( tx1 * ( - c34 * tmp2 * u[i][j][k][2] )
+ ty1 * ( - r43 * c34 * tmp2 * u[i][j][k][2] )
+ tz1 * ( - c34 * tmp2 * u[i][j][k][2] ) );
d[i][j][2][1] = 0.0;
d[i][j][2][2] = 1.0
+ dt * 2.0
* ( tx1 * c34 * tmp1
+ ty1 * r43 * c34 * tmp1
+ tz1 * c34 * tmp1 )
+ dt * 2.0 * ( tx1 * dx3
+ ty1 * dy3
+ tz1 * dz3 );
d[i][j][2][3] = 0.0;
d[i][j][2][4] = 0.0;
d[i][j][3][0] = dt * 2.0
* ( tx1 * ( - c34 * tmp2 * u[i][j][k][3] )
+ ty1 * ( - c34 * tmp2 * u[i][j][k][3] )
+ tz1 * ( - r43 * c34 * tmp2 * u[i][j][k][3] ) );
d[i][j][3][1] = 0.0;
d[i][j][3][2] = 0.0;
d[i][j][3][3] = 1.0
+ dt * 2.0
* ( tx1 * c34 * tmp1
+ ty1 * c34 * tmp1
+ tz1 * r43 * c34 * tmp1 )
+ dt * 2.0 * ( tx1 * dx4
+ ty1 * dy4
+ tz1 * dz4 );
d[i][j][3][4] = 0.0;
d[i][j][4][0] = dt * 2.0
* ( tx1 * ( - ( r43*c34 - c1345 ) * tmp3 * ( pow2(u[i][j][k][1]) )
- ( c34 - c1345 ) * tmp3 * ( pow2(u[i][j][k][2]) )
- ( c34 - c1345 ) * tmp3 * ( pow2(u[i][j][k][3]) )
- ( c1345 ) * tmp2 * u[i][j][k][4] )
+ ty1 * ( - ( c34 - c1345 ) * tmp3 * ( pow2(u[i][j][k][1]) )
- ( r43*c34 - c1345 ) * tmp3 * ( pow2(u[i][j][k][2]) )
- ( c34 - c1345 ) * tmp3 * ( pow2(u[i][j][k][3]) )
- ( c1345 ) * tmp2 * u[i][j][k][4] )
+ tz1 * ( - ( c34 - c1345 ) * tmp3 * ( pow2(u[i][j][k][1]) )
- ( c34 - c1345 ) * tmp3 * ( pow2(u[i][j][k][2]) )
- ( r43*c34 - c1345 ) * tmp3 * ( pow2(u[i][j][k][3]) )
- ( c1345 ) * tmp2 * u[i][j][k][4] ) );
d[i][j][4][1] = dt * 2.0
* ( tx1 * ( r43*c34 - c1345 ) * tmp2 * u[i][j][k][1]
+ ty1 * ( c34 - c1345 ) * tmp2 * u[i][j][k][1]
+ tz1 * ( c34 - c1345 ) * tmp2 * u[i][j][k][1] );
d[i][j][4][2] = dt * 2.0
* ( tx1 * ( c34 - c1345 ) * tmp2 * u[i][j][k][2]
+ ty1 * ( r43*c34 -c1345 ) * tmp2 * u[i][j][k][2]
+ tz1 * ( c34 - c1345 ) * tmp2 * u[i][j][k][2] );
d[i][j][4][3] = dt * 2.0
* ( tx1 * ( c34 - c1345 ) * tmp2 * u[i][j][k][3]
+ ty1 * ( c34 - c1345 ) * tmp2 * u[i][j][k][3]
+ tz1 * ( r43*c34 - c1345 ) * tmp2 * u[i][j][k][3] );
d[i][j][4][4] = 1.0
+ dt * 2.0 * ( tx1 * c1345 * tmp1
+ ty1 * c1345 * tmp1
+ tz1 * c1345 * tmp1 )
+ dt * 2.0 * ( tx1 * dx5
+ ty1 * dy5
+ tz1 * dz5 );
/*--------------------------------------------------------------------
c form the first block sub-diagonal
--------------------------------------------------------------------*/
tmp1 = 1.0 / u[i][j][k-1][0];
tmp2 = tmp1 * tmp1;
tmp3 = tmp1 * tmp2;
a[i][j][0][0] = - dt * tz1 * dz1;
a[i][j][0][1] = 0.0;
a[i][j][0][2] = 0.0;
a[i][j][0][3] = - dt * tz2;
a[i][j][0][4] = 0.0;
a[i][j][1][0] = - dt * tz2
* ( - ( u[i][j][k-1][1]*u[i][j][k-1][3] ) * tmp2 )
- dt * tz1 * ( - c34 * tmp2 * u[i][j][k-1][1] );
a[i][j][1][1] = - dt * tz2 * ( u[i][j][k-1][3] * tmp1 )
- dt * tz1 * c34 * tmp1
- dt * tz1 * dz2 ;
a[i][j][1][2] = 0.0;
a[i][j][1][3] = - dt * tz2 * ( u[i][j][k-1][1] * tmp1 );
a[i][j][1][4] = 0.0;
a[i][j][2][0] = - dt * tz2
* ( - ( u[i][j][k-1][2]*u[i][j][k-1][3] ) * tmp2 )
- dt * tz1 * ( - c34 * tmp2 * u[i][j][k-1][2] );
a[i][j][2][1] = 0.0;
a[i][j][2][2] = - dt * tz2 * ( u[i][j][k-1][3] * tmp1 )
- dt * tz1 * ( c34 * tmp1 )
- dt * tz1 * dz3;
a[i][j][2][3] = - dt * tz2 * ( u[i][j][k-1][2] * tmp1 );
a[i][j][2][4] = 0.0;
a[i][j][3][0] = - dt * tz2
* ( - ( u[i][j][k-1][3] * tmp1 ) *( u[i][j][k-1][3] * tmp1 )
+ 0.50 * C2
* ( ( u[i][j][k-1][1] * u[i][j][k-1][1]
+ u[i][j][k-1][2] * u[i][j][k-1][2]
+ u[i][j][k-1][3] * u[i][j][k-1][3] ) * tmp2 ) )
- dt * tz1 * ( - r43 * c34 * tmp2 * u[i][j][k-1][3] );
a[i][j][3][1] = - dt * tz2
* ( - C2 * ( u[i][j][k-1][1] * tmp1 ) );
a[i][j][3][2] = - dt * tz2
* ( - C2 * ( u[i][j][k-1][2] * tmp1 ) );
a[i][j][3][3] = - dt * tz2 * ( 2.0 - C2 )
* ( u[i][j][k-1][3] * tmp1 )
- dt * tz1 * ( r43 * c34 * tmp1 )
- dt * tz1 * dz4;
a[i][j][3][4] = - dt * tz2 * C2;
a[i][j][4][0] = - dt * tz2
* ( ( C2 * ( u[i][j][k-1][1] * u[i][j][k-1][1]
+ u[i][j][k-1][2] * u[i][j][k-1][2]
+ u[i][j][k-1][3] * u[i][j][k-1][3] ) * tmp2
- C1 * ( u[i][j][k-1][4] * tmp1 ) )
* ( u[i][j][k-1][3] * tmp1 ) )
- dt * tz1
* ( - ( c34 - c1345 ) * tmp3 * (u[i][j][k-1][1]*u[i][j][k-1][1])
- ( c34 - c1345 ) * tmp3 * (u[i][j][k-1][2]*u[i][j][k-1][2])
- ( r43*c34 - c1345 )* tmp3 * (u[i][j][k-1][3]*u[i][j][k-1][3])
- c1345 * tmp2 * u[i][j][k-1][4] );
a[i][j][4][1] = - dt * tz2
* ( - C2 * ( u[i][j][k-1][1]*u[i][j][k-1][3] ) * tmp2 )
- dt * tz1 * ( c34 - c1345 ) * tmp2 * u[i][j][k-1][1];
a[i][j][4][2] = - dt * tz2
* ( - C2 * ( u[i][j][k-1][2]*u[i][j][k-1][3] ) * tmp2 )
- dt * tz1 * ( c34 - c1345 ) * tmp2 * u[i][j][k-1][2];
a[i][j][4][3] = - dt * tz2
* ( C1 * ( u[i][j][k-1][4] * tmp1 )
- 0.50 * C2
* ( ( u[i][j][k-1][1]*u[i][j][k-1][1]
+ u[i][j][k-1][2]*u[i][j][k-1][2]
+ 3.0*u[i][j][k-1][3]*u[i][j][k-1][3] ) * tmp2 ) )
- dt * tz1 * ( r43*c34 - c1345 ) * tmp2 * u[i][j][k-1][3];
a[i][j][4][4] = - dt * tz2
* ( C1 * ( u[i][j][k-1][3] * tmp1 ) )
- dt * tz1 * c1345 * tmp1
- dt * tz1 * dz5;
/*--------------------------------------------------------------------
c form the second block sub-diagonal
--------------------------------------------------------------------*/
tmp1 = 1.0 / u[i][j-1][k][0];
tmp2 = tmp1 * tmp1;
tmp3 = tmp1 * tmp2;
b[i][j][0][0] = - dt * ty1 * dy1;
b[i][j][0][1] = 0.0;
b[i][j][0][2] = - dt * ty2;
b[i][j][0][3] = 0.0;
b[i][j][0][4] = 0.0;
b[i][j][1][0] = - dt * ty2
* ( - ( u[i][j-1][k][1]*u[i][j-1][k][2] ) * tmp2 )
- dt * ty1 * ( - c34 * tmp2 * u[i][j-1][k][1] );
b[i][j][1][1] = - dt * ty2 * ( u[i][j-1][k][2] * tmp1 )
- dt * ty1 * ( c34 * tmp1 )
- dt * ty1 * dy2;
b[i][j][1][2] = - dt * ty2 * ( u[i][j-1][k][1] * tmp1 );
b[i][j][1][3] = 0.0;
b[i][j][1][4] = 0.0;
b[i][j][2][0] = - dt * ty2
* ( - ( u[i][j-1][k][2] * tmp1 ) *( u[i][j-1][k][2] * tmp1 )
+ 0.50 * C2 * ( ( u[i][j-1][k][1] * u[i][j-1][k][1]
+ u[i][j-1][k][2] * u[i][j-1][k][2]
+ u[i][j-1][k][3] * u[i][j-1][k][3] )
* tmp2 ) )
- dt * ty1 * ( - r43 * c34 * tmp2 * u[i][j-1][k][2] );
b[i][j][2][1] = - dt * ty2
* ( - C2 * ( u[i][j-1][k][1] * tmp1 ) );
b[i][j][2][2] = - dt * ty2 * ( ( 2.0 - C2 )
* ( u[i][j-1][k][2] * tmp1 ) )
- dt * ty1 * ( r43 * c34 * tmp1 )
- dt * ty1 * dy3;
b[i][j][2][3] = - dt * ty2
* ( - C2 * ( u[i][j-1][k][3] * tmp1 ) );
b[i][j][2][4] = - dt * ty2 * C2;
b[i][j][3][0] = - dt * ty2
* ( - ( u[i][j-1][k][2]*u[i][j-1][k][3] ) * tmp2 )
- dt * ty1 * ( - c34 * tmp2 * u[i][j-1][k][3] );
b[i][j][3][1] = 0.0;
b[i][j][3][2] = - dt * ty2 * ( u[i][j-1][k][3] * tmp1 );
b[i][j][3][3] = - dt * ty2 * ( u[i][j-1][k][2] * tmp1 )
- dt * ty1 * ( c34 * tmp1 )
- dt * ty1 * dy4;
b[i][j][3][4] = 0.0;
b[i][j][4][0] = - dt * ty2
* ( ( C2 * ( u[i][j-1][k][1] * u[i][j-1][k][1]
+ u[i][j-1][k][2] * u[i][j-1][k][2]
+ u[i][j-1][k][3] * u[i][j-1][k][3] ) * tmp2
- C1 * ( u[i][j-1][k][4] * tmp1 ) )
* ( u[i][j-1][k][2] * tmp1 ) )
- dt * ty1
* ( - ( c34 - c1345 )*tmp3*(pow2(u[i][j-1][k][1]))
- ( r43*c34 - c1345 )*tmp3*(pow2(u[i][j-1][k][2]))
- ( c34 - c1345 )*tmp3*(pow2(u[i][j-1][k][3]))
- c1345*tmp2*u[i][j-1][k][4] );
b[i][j][4][1] = - dt * ty2
* ( - C2 * ( u[i][j-1][k][1]*u[i][j-1][k][2] ) * tmp2 )
- dt * ty1
* ( c34 - c1345 ) * tmp2 * u[i][j-1][k][1];
b[i][j][4][2] = - dt * ty2
* ( C1 * ( u[i][j-1][k][4] * tmp1 )
- 0.50 * C2
* ( ( u[i][j-1][k][1]*u[i][j-1][k][1]
+ 3.0 * u[i][j-1][k][2]*u[i][j-1][k][2]
+ u[i][j-1][k][3]*u[i][j-1][k][3] ) * tmp2 ) )
- dt * ty1
* ( r43*c34 - c1345 ) * tmp2 * u[i][j-1][k][2];
b[i][j][4][3] = - dt * ty2
* ( - C2 * ( u[i][j-1][k][2]*u[i][j-1][k][3] ) * tmp2 )
- dt * ty1 * ( c34 - c1345 ) * tmp2 * u[i][j-1][k][3];
b[i][j][4][4] = - dt * ty2
* ( C1 * ( u[i][j-1][k][2] * tmp1 ) )
- dt * ty1 * c1345 * tmp1
- dt * ty1 * dy5;
/*--------------------------------------------------------------------
c form the third block sub-diagonal
--------------------------------------------------------------------*/
tmp1 = 1.0 / u[i-1][j][k][0];
tmp2 = tmp1 * tmp1;
tmp3 = tmp1 * tmp2;
c[i][j][0][0] = - dt * tx1 * dx1;
c[i][j][0][1] = - dt * tx2;
c[i][j][0][2] = 0.0;
c[i][j][0][3] = 0.0;
c[i][j][0][4] = 0.0;
c[i][j][1][0] = - dt * tx2
* ( - ( u[i-1][j][k][1] * tmp1 ) *( u[i-1][j][k][1] * tmp1 )
+ C2 * 0.50 * ( u[i-1][j][k][1] * u[i-1][j][k][1]
+ u[i-1][j][k][2] * u[i-1][j][k][2]
+ u[i-1][j][k][3] * u[i-1][j][k][3] ) * tmp2 )
- dt * tx1 * ( - r43 * c34 * tmp2 * u[i-1][j][k][1] );
c[i][j][1][1] = - dt * tx2
* ( ( 2.0 - C2 ) * ( u[i-1][j][k][1] * tmp1 ) )
- dt * tx1 * ( r43 * c34 * tmp1 )
- dt * tx1 * dx2;
c[i][j][1][2] = - dt * tx2
* ( - C2 * ( u[i-1][j][k][2] * tmp1 ) );
c[i][j][1][3] = - dt * tx2
* ( - C2 * ( u[i-1][j][k][3] * tmp1 ) );
c[i][j][1][4] = - dt * tx2 * C2;
c[i][j][2][0] = - dt * tx2
* ( - ( u[i-1][j][k][1] * u[i-1][j][k][2] ) * tmp2 )
- dt * tx1 * ( - c34 * tmp2 * u[i-1][j][k][2] );
c[i][j][2][1] = - dt * tx2 * ( u[i-1][j][k][2] * tmp1 );
c[i][j][2][2] = - dt * tx2 * ( u[i-1][j][k][1] * tmp1 )
- dt * tx1 * ( c34 * tmp1 )
- dt * tx1 * dx3;
c[i][j][2][3] = 0.0;
c[i][j][2][4] = 0.0;
c[i][j][3][0] = - dt * tx2
* ( - ( u[i-1][j][k][1]*u[i-1][j][k][3] ) * tmp2 )
- dt * tx1 * ( - c34 * tmp2 * u[i-1][j][k][3] );
c[i][j][3][1] = - dt * tx2 * ( u[i-1][j][k][3] * tmp1 );
c[i][j][3][2] = 0.0;
c[i][j][3][3] = - dt * tx2 * ( u[i-1][j][k][1] * tmp1 )
- dt * tx1 * ( c34 * tmp1 )
- dt * tx1 * dx4;
c[i][j][3][4] = 0.0;
c[i][j][4][0] = - dt * tx2
* ( ( C2 * ( u[i-1][j][k][1] * u[i-1][j][k][1]
+ u[i-1][j][k][2] * u[i-1][j][k][2]
+ u[i-1][j][k][3] * u[i-1][j][k][3] ) * tmp2
- C1 * ( u[i-1][j][k][4] * tmp1 ) )
* ( u[i-1][j][k][1] * tmp1 ) )
- dt * tx1
* ( - ( r43*c34 - c1345 ) * tmp3 * ( pow2(u[i-1][j][k][1]) )
- ( c34 - c1345 ) * tmp3 * ( pow2(u[i-1][j][k][2]) )
- ( c34 - c1345 ) * tmp3 * ( pow2(u[i-1][j][k][3]) )
- c1345 * tmp2 * u[i-1][j][k][4] );
c[i][j][4][1] = - dt * tx2
* ( C1 * ( u[i-1][j][k][4] * tmp1 )
- 0.50 * C2
* ( ( 3.0*u[i-1][j][k][1]*u[i-1][j][k][1]
+ u[i-1][j][k][2]*u[i-1][j][k][2]
+ u[i-1][j][k][3]*u[i-1][j][k][3] ) * tmp2 ) )
- dt * tx1
* ( r43*c34 - c1345 ) * tmp2 * u[i-1][j][k][1];
c[i][j][4][2] = - dt * tx2
* ( - C2 * ( u[i-1][j][k][2]*u[i-1][j][k][1] ) * tmp2 )
- dt * tx1
* ( c34 - c1345 ) * tmp2 * u[i-1][j][k][2];
c[i][j][4][3] = - dt * tx2
* ( - C2 * ( u[i-1][j][k][3]*u[i-1][j][k][1] ) * tmp2 )
- dt * tx1
* ( c34 - c1345 ) * tmp2 * u[i-1][j][k][3];
c[i][j][4][4] = - dt * tx2
* ( C1 * ( u[i-1][j][k][1] * tmp1 ) )
- dt * tx1 * c1345 * tmp1
- dt * tx1 * dx5;
}
}
}
/*--------------------------------------------------------------------
--------------------------------------------------------------------*/
static void jacu(int k) {
/*--------------------------------------------------------------------
c compute the upper triangular part of the jacobian matrix
--------------------------------------------------------------------*/
/*--------------------------------------------------------------------
c local variables
--------------------------------------------------------------------*/
int i, j;
double r43;
double c1345;
double c34;
double tmp1, tmp2, tmp3;
r43 = ( 4.0 / 3.0 );
c1345 = C1 * C3 * C4 * C5;
c34 = C3 * C4;
#pragma omp for nowait schedule(static)
#if defined(_OPENMP)
for (i = iend; i >= ist; i--) {
for (j = jend; j >= jst; j--) {
#else
for (i = ist; i <= iend; i++) {
for (j = jst; j <= jend; j++) {
#endif
/*--------------------------------------------------------------------
c form the block daigonal
--------------------------------------------------------------------*/
tmp1 = 1.0 / u[i][j][k][0];
tmp2 = tmp1 * tmp1;
tmp3 = tmp1 * tmp2;
d[i][j][0][0] = 1.0
+ dt * 2.0 * ( tx1 * dx1
+ ty1 * dy1
+ tz1 * dz1 );
d[i][j][0][1] = 0.0;
d[i][j][0][2] = 0.0;
d[i][j][0][3] = 0.0;
d[i][j][0][4] = 0.0;
d[i][j][1][0] = dt * 2.0
* ( tx1 * ( - r43 * c34 * tmp2 * u[i][j][k][1] )
+ ty1 * ( - c34 * tmp2 * u[i][j][k][1] )
+ tz1 * ( - c34 * tmp2 * u[i][j][k][1] ) );
d[i][j][1][1] = 1.0
+ dt * 2.0
* ( tx1 * r43 * c34 * tmp1
+ ty1 * c34 * tmp1
+ tz1 * c34 * tmp1 )
+ dt * 2.0 * ( tx1 * dx2
+ ty1 * dy2
+ tz1 * dz2 );
d[i][j][1][2] = 0.0;
d[i][j][1][3] = 0.0;
d[i][j][1][4] = 0.0;
d[i][j][2][0] = dt * 2.0
* ( tx1 * ( - c34 * tmp2 * u[i][j][k][2] )
+ ty1 * ( - r43 * c34 * tmp2 * u[i][j][k][2] )
+ tz1 * ( - c34 * tmp2 * u[i][j][k][2] ) );
d[i][j][2][1] = 0.0;
d[i][j][2][2] = 1.0
+ dt * 2.0
* ( tx1 * c34 * tmp1
+ ty1 * r43 * c34 * tmp1
+ tz1 * c34 * tmp1 )
+ dt * 2.0 * ( tx1 * dx3
+ ty1 * dy3
+ tz1 * dz3 );
d[i][j][2][3] = 0.0;
d[i][j][2][4] = 0.0;
d[i][j][3][0] = dt * 2.0
* ( tx1 * ( - c34 * tmp2 * u[i][j][k][3] )
+ ty1 * ( - c34 * tmp2 * u[i][j][k][3] )
+ tz1 * ( - r43 * c34 * tmp2 * u[i][j][k][3] ) );
d[i][j][3][1] = 0.0;
d[i][j][3][2] = 0.0;
d[i][j][3][3] = 1.0
+ dt * 2.0
* ( tx1 * c34 * tmp1
+ ty1 * c34 * tmp1
+ tz1 * r43 * c34 * tmp1 )
+ dt * 2.0 * ( tx1 * dx4
+ ty1 * dy4
+ tz1 * dz4 );
d[i][j][3][4] = 0.0;
d[i][j][4][0] = dt * 2.0
* ( tx1 * ( - ( r43*c34 - c1345 ) * tmp3 * ( pow2(u[i][j][k][1]) )
- ( c34 - c1345 ) * tmp3 * ( pow2(u[i][j][k][2]) )
- ( c34 - c1345 ) * tmp3 * ( pow2(u[i][j][k][3]) )
- ( c1345 ) * tmp2 * u[i][j][k][4] )
+ ty1 * ( - ( c34 - c1345 ) * tmp3 * ( pow2(u[i][j][k][1]) )
- ( r43*c34 - c1345 ) * tmp3 * ( pow2(u[i][j][k][2]) )
- ( c34 - c1345 ) * tmp3 * ( pow2(u[i][j][k][3]) )
- ( c1345 ) * tmp2 * u[i][j][k][4] )
+ tz1 * ( - ( c34 - c1345 ) * tmp3 * ( pow2(u[i][j][k][1]) )
- ( c34 - c1345 ) * tmp3 * ( pow2(u[i][j][k][2]) )
- ( r43*c34 - c1345 ) * tmp3 * ( pow2(u[i][j][k][3]) )
- ( c1345 ) * tmp2 * u[i][j][k][4] ) );
d[i][j][4][1] = dt * 2.0
* ( tx1 * ( r43*c34 - c1345 ) * tmp2 * u[i][j][k][1]
+ ty1 * ( c34 - c1345 ) * tmp2 * u[i][j][k][1]
+ tz1 * ( c34 - c1345 ) * tmp2 * u[i][j][k][1] );
d[i][j][4][2] = dt * 2.0
* ( tx1 * ( c34 - c1345 ) * tmp2 * u[i][j][k][2]
+ ty1 * ( r43*c34 -c1345 ) * tmp2 * u[i][j][k][2]
+ tz1 * ( c34 - c1345 ) * tmp2 * u[i][j][k][2] );
d[i][j][4][3] = dt * 2.0
* ( tx1 * ( c34 - c1345 ) * tmp2 * u[i][j][k][3]
+ ty1 * ( c34 - c1345 ) * tmp2 * u[i][j][k][3]
+ tz1 * ( r43*c34 - c1345 ) * tmp2 * u[i][j][k][3] );
d[i][j][4][4] = 1.0
+ dt * 2.0 * ( tx1 * c1345 * tmp1
+ ty1 * c1345 * tmp1
+ tz1 * c1345 * tmp1 )
+ dt * 2.0 * ( tx1 * dx5
+ ty1 * dy5
+ tz1 * dz5 );
/*--------------------------------------------------------------------
c form the first block sub-diagonal
--------------------------------------------------------------------*/
tmp1 = 1.0 / u[i+1][j][k][0];
tmp2 = tmp1 * tmp1;
tmp3 = tmp1 * tmp2;
a[i][j][0][0] = - dt * tx1 * dx1;
a[i][j][0][1] = dt * tx2;
a[i][j][0][2] = 0.0;
a[i][j][0][3] = 0.0;
a[i][j][0][4] = 0.0;
a[i][j][1][0] = dt * tx2
* ( - ( u[i+1][j][k][1] * tmp1 ) *( u[i+1][j][k][1] * tmp1 )
+ C2 * 0.50 * ( u[i+1][j][k][1] * u[i+1][j][k][1]
+ u[i+1][j][k][2] * u[i+1][j][k][2]
+ u[i+1][j][k][3] * u[i+1][j][k][3] ) * tmp2 )
- dt * tx1 * ( - r43 * c34 * tmp2 * u[i+1][j][k][1] );
a[i][j][1][1] = dt * tx2
* ( ( 2.0 - C2 ) * ( u[i+1][j][k][1] * tmp1 ) )
- dt * tx1 * ( r43 * c34 * tmp1 )
- dt * tx1 * dx2;
a[i][j][1][2] = dt * tx2
* ( - C2 * ( u[i+1][j][k][2] * tmp1 ) );
a[i][j][1][3] = dt * tx2
* ( - C2 * ( u[i+1][j][k][3] * tmp1 ) );
a[i][j][1][4] = dt * tx2 * C2 ;
a[i][j][2][0] = dt * tx2
* ( - ( u[i+1][j][k][1] * u[i+1][j][k][2] ) * tmp2 )
- dt * tx1 * ( - c34 * tmp2 * u[i+1][j][k][2] );
a[i][j][2][1] = dt * tx2 * ( u[i+1][j][k][2] * tmp1 );
a[i][j][2][2] = dt * tx2 * ( u[i+1][j][k][1] * tmp1 )
- dt * tx1 * ( c34 * tmp1 )
- dt * tx1 * dx3;
a[i][j][2][3] = 0.0;
a[i][j][2][4] = 0.0;
a[i][j][3][0] = dt * tx2
* ( - ( u[i+1][j][k][1]*u[i+1][j][k][3] ) * tmp2 )
- dt * tx1 * ( - c34 * tmp2 * u[i+1][j][k][3] );
a[i][j][3][1] = dt * tx2 * ( u[i+1][j][k][3] * tmp1 );
a[i][j][3][2] = 0.0;
a[i][j][3][3] = dt * tx2 * ( u[i+1][j][k][1] * tmp1 )
- dt * tx1 * ( c34 * tmp1 )
- dt * tx1 * dx4;
a[i][j][3][4] = 0.0;
a[i][j][4][0] = dt * tx2
* ( ( C2 * ( u[i+1][j][k][1] * u[i+1][j][k][1]
+ u[i+1][j][k][2] * u[i+1][j][k][2]
+ u[i+1][j][k][3] * u[i+1][j][k][3] ) * tmp2
- C1 * ( u[i+1][j][k][4] * tmp1 ) )
* ( u[i+1][j][k][1] * tmp1 ) )
- dt * tx1
* ( - ( r43*c34 - c1345 ) * tmp3 * ( pow2(u[i+1][j][k][1]) )
- ( c34 - c1345 ) * tmp3 * ( pow2(u[i+1][j][k][2]) )
- ( c34 - c1345 ) * tmp3 * ( pow2(u[i+1][j][k][3]) )
- c1345 * tmp2 * u[i+1][j][k][4] );
a[i][j][4][1] = dt * tx2
* ( C1 * ( u[i+1][j][k][4] * tmp1 )
- 0.50 * C2
* ( ( 3.0*u[i+1][j][k][1]*u[i+1][j][k][1]
+ u[i+1][j][k][2]*u[i+1][j][k][2]
+ u[i+1][j][k][3]*u[i+1][j][k][3] ) * tmp2 ) )
- dt * tx1
* ( r43*c34 - c1345 ) * tmp2 * u[i+1][j][k][1];
a[i][j][4][2] = dt * tx2
* ( - C2 * ( u[i+1][j][k][2]*u[i+1][j][k][1] ) * tmp2 )
- dt * tx1
* ( c34 - c1345 ) * tmp2 * u[i+1][j][k][2];
a[i][j][4][3] = dt * tx2
* ( - C2 * ( u[i+1][j][k][3]*u[i+1][j][k][1] ) * tmp2 )
- dt * tx1
* ( c34 - c1345 ) * tmp2 * u[i+1][j][k][3];
a[i][j][4][4] = dt * tx2
* ( C1 * ( u[i+1][j][k][1] * tmp1 ) )
- dt * tx1 * c1345 * tmp1
- dt * tx1 * dx5;
/*--------------------------------------------------------------------
c form the second block sub-diagonal
--------------------------------------------------------------------*/
tmp1 = 1.0 / u[i][j+1][k][0];
tmp2 = tmp1 * tmp1;
tmp3 = tmp1 * tmp2;
b[i][j][0][0] = - dt * ty1 * dy1;
b[i][j][0][1] = 0.0;
b[i][j][0][2] = dt * ty2;
b[i][j][0][3] = 0.0;
b[i][j][0][4] = 0.0;
b[i][j][1][0] = dt * ty2
* ( - ( u[i][j+1][k][1]*u[i][j+1][k][2] ) * tmp2 )
- dt * ty1 * ( - c34 * tmp2 * u[i][j+1][k][1] );
b[i][j][1][1] = dt * ty2 * ( u[i][j+1][k][2] * tmp1 )
- dt * ty1 * ( c34 * tmp1 )
- dt * ty1 * dy2;
b[i][j][1][2] = dt * ty2 * ( u[i][j+1][k][1] * tmp1 );
b[i][j][1][3] = 0.0;
b[i][j][1][4] = 0.0;
b[i][j][2][0] = dt * ty2
* ( - ( u[i][j+1][k][2] * tmp1 ) *( u[i][j+1][k][2] * tmp1 )
+ 0.50 * C2 * ( ( u[i][j+1][k][1] * u[i][j+1][k][1]
+ u[i][j+1][k][2] * u[i][j+1][k][2]
+ u[i][j+1][k][3] * u[i][j+1][k][3] )
* tmp2 ) )
- dt * ty1 * ( - r43 * c34 * tmp2 * u[i][j+1][k][2] );
b[i][j][2][1] = dt * ty2
* ( - C2 * ( u[i][j+1][k][1] * tmp1 ) );
b[i][j][2][2] = dt * ty2 * ( ( 2.0 - C2 )
* ( u[i][j+1][k][2] * tmp1 ) )
- dt * ty1 * ( r43 * c34 * tmp1 )
- dt * ty1 * dy3;
b[i][j][2][3] = dt * ty2
* ( - C2 * ( u[i][j+1][k][3] * tmp1 ) );
b[i][j][2][4] = dt * ty2 * C2;
b[i][j][3][0] = dt * ty2
* ( - ( u[i][j+1][k][2]*u[i][j+1][k][3] ) * tmp2 )
- dt * ty1 * ( - c34 * tmp2 * u[i][j+1][k][3] );
b[i][j][3][1] = 0.0;
b[i][j][3][2] = dt * ty2 * ( u[i][j+1][k][3] * tmp1 );
b[i][j][3][3] = dt * ty2 * ( u[i][j+1][k][2] * tmp1 )
- dt * ty1 * ( c34 * tmp1 )
- dt * ty1 * dy4;
b[i][j][3][4] = 0.0;
b[i][j][4][0] = dt * ty2
* ( ( C2 * ( u[i][j+1][k][1] * u[i][j+1][k][1]
+ u[i][j+1][k][2] * u[i][j+1][k][2]
+ u[i][j+1][k][3] * u[i][j+1][k][3] ) * tmp2
- C1 * ( u[i][j+1][k][4] * tmp1 ) )
* ( u[i][j+1][k][2] * tmp1 ) )
- dt * ty1
* ( - ( c34 - c1345 )*tmp3*( pow2(u[i][j+1][k][1]) )
- ( r43*c34 - c1345 )*tmp3*( pow2(u[i][j+1][k][2]) )
- ( c34 - c1345 )*tmp3*( pow2(u[i][j+1][k][3]) )
- c1345*tmp2*u[i][j+1][k][4] );
b[i][j][4][1] = dt * ty2
* ( - C2 * ( u[i][j+1][k][1]*u[i][j+1][k][2] ) * tmp2 )
- dt * ty1
* ( c34 - c1345 ) * tmp2 * u[i][j+1][k][1];
b[i][j][4][2] = dt * ty2
* ( C1 * ( u[i][j+1][k][4] * tmp1 )
- 0.50 * C2
* ( ( u[i][j+1][k][1]*u[i][j+1][k][1]
+ 3.0 * u[i][j+1][k][2]*u[i][j+1][k][2]
+ u[i][j+1][k][3]*u[i][j+1][k][3] ) * tmp2 ) )
- dt * ty1
* ( r43*c34 - c1345 ) * tmp2 * u[i][j+1][k][2];
b[i][j][4][3] = dt * ty2
* ( - C2 * ( u[i][j+1][k][2]*u[i][j+1][k][3] ) * tmp2 )
- dt * ty1 * ( c34 - c1345 ) * tmp2 * u[i][j+1][k][3];
b[i][j][4][4] = dt * ty2
* ( C1 * ( u[i][j+1][k][2] * tmp1 ) )
- dt * ty1 * c1345 * tmp1
- dt * ty1 * dy5;
/*--------------------------------------------------------------------
c form the third block sub-diagonal
--------------------------------------------------------------------*/
tmp1 = 1.0 / u[i][j][k+1][0];
tmp2 = tmp1 * tmp1;
tmp3 = tmp1 * tmp2;
c[i][j][0][0] = - dt * tz1 * dz1;
c[i][j][0][1] = 0.0;
c[i][j][0][2] = 0.0;
c[i][j][0][3] = dt * tz2;
c[i][j][0][4] = 0.0;
c[i][j][1][0] = dt * tz2
* ( - ( u[i][j][k+1][1]*u[i][j][k+1][3] ) * tmp2 )
- dt * tz1 * ( - c34 * tmp2 * u[i][j][k+1][1] );
c[i][j][1][1] = dt * tz2 * ( u[i][j][k+1][3] * tmp1 )
- dt * tz1 * c34 * tmp1
- dt * tz1 * dz2 ;
c[i][j][1][2] = 0.0;
c[i][j][1][3] = dt * tz2 * ( u[i][j][k+1][1] * tmp1 );
c[i][j][1][4] = 0.0;
c[i][j][2][0] = dt * tz2
* ( - ( u[i][j][k+1][2]*u[i][j][k+1][3] ) * tmp2 )
- dt * tz1 * ( - c34 * tmp2 * u[i][j][k+1][2] );
c[i][j][2][1] = 0.0;
c[i][j][2][2] = dt * tz2 * ( u[i][j][k+1][3] * tmp1 )
- dt * tz1 * ( c34 * tmp1 )
- dt * tz1 * dz3;
c[i][j][2][3] = dt * tz2 * ( u[i][j][k+1][2] * tmp1 );
c[i][j][2][4] = 0.0;
c[i][j][3][0] = dt * tz2
* ( - ( u[i][j][k+1][3] * tmp1 ) *( u[i][j][k+1][3] * tmp1 )
+ 0.50 * C2
* ( ( u[i][j][k+1][1] * u[i][j][k+1][1]
+ u[i][j][k+1][2] * u[i][j][k+1][2]
+ u[i][j][k+1][3] * u[i][j][k+1][3] ) * tmp2 ) )
- dt * tz1 * ( - r43 * c34 * tmp2 * u[i][j][k+1][3] );
c[i][j][3][1] = dt * tz2
* ( - C2 * ( u[i][j][k+1][1] * tmp1 ) );
c[i][j][3][2] = dt * tz2
* ( - C2 * ( u[i][j][k+1][2] * tmp1 ) );
c[i][j][3][3] = dt * tz2 * ( 2.0 - C2 )
* ( u[i][j][k+1][3] * tmp1 )
- dt * tz1 * ( r43 * c34 * tmp1 )
- dt * tz1 * dz4;
c[i][j][3][4] = dt * tz2 * C2;
c[i][j][4][0] = dt * tz2
* ( ( C2 * ( u[i][j][k+1][1] * u[i][j][k+1][1]
+ u[i][j][k+1][2] * u[i][j][k+1][2]
+ u[i][j][k+1][3] * u[i][j][k+1][3] ) * tmp2
- C1 * ( u[i][j][k+1][4] * tmp1 ) )
* ( u[i][j][k+1][3] * tmp1 ) )
- dt * tz1
* ( - ( c34 - c1345 ) * tmp3 * ( pow2(u[i][j][k+1][1]) )
- ( c34 - c1345 ) * tmp3 * ( pow2(u[i][j][k+1][2]) )
- ( r43*c34 - c1345 )* tmp3 * ( pow2(u[i][j][k+1][3]) )
- c1345 * tmp2 * u[i][j][k+1][4] );
c[i][j][4][1] = dt * tz2
* ( - C2 * ( u[i][j][k+1][1]*u[i][j][k+1][3] ) * tmp2 )
- dt * tz1 * ( c34 - c1345 ) * tmp2 * u[i][j][k+1][1];
c[i][j][4][2] = dt * tz2
* ( - C2 * ( u[i][j][k+1][2]*u[i][j][k+1][3] ) * tmp2 )
- dt * tz1 * ( c34 - c1345 ) * tmp2 * u[i][j][k+1][2];
c[i][j][4][3] = dt * tz2
* ( C1 * ( u[i][j][k+1][4] * tmp1 )
- 0.50 * C2
* ( ( u[i][j][k+1][1]*u[i][j][k+1][1]
+ u[i][j][k+1][2]*u[i][j][k+1][2]
+ 3.0*u[i][j][k+1][3]*u[i][j][k+1][3] ) * tmp2 ) )
- dt * tz1 * ( r43*c34 - c1345 ) * tmp2 * u[i][j][k+1][3];
c[i][j][4][4] = dt * tz2
* ( C1 * ( u[i][j][k+1][3] * tmp1 ) )
- dt * tz1 * c1345 * tmp1
- dt * tz1 * dz5;
}
}
}
/*--------------------------------------------------------------------
--------------------------------------------------------------------*/
static void l2norm (int nx0, int ny0, int nz0,
int ist, int iend,
int jst, int jend,
/*--------------------------------------------------------------------
c To improve cache performance, second two dimensions padded by 1
c for even number sizes only. Only needed in v.
--------------------------------------------------------------------*/
double v[ISIZ1][ISIZ2/2*2+1][ISIZ3/2*2+1][5],
double sum[5]) {
/*--------------------------------------------------------------------
c to compute the l2-norm of vector v.
--------------------------------------------------------------------*/
/*--------------------------------------------------------------------
c local variables
--------------------------------------------------------------------*/
int i, j, k, m;
double sum0=0.0, sum1=0.0, sum2=0.0, sum3=0.0, sum4=0.0;
#pragma omp single
for (m = 0; m < 5; m++) {
sum[m] = 0.0;
}
#pragma omp for nowait
for (i = ist; i <= iend; i++) {
for (j = jst; j <= jend; j++) {
for (k = 1; k <= nz0-2; k++) {
sum0 = sum0 + v[i][j][k][0] * v[i][j][k][0];
sum1 = sum1 + v[i][j][k][1] * v[i][j][k][1];
sum2 = sum2 + v[i][j][k][2] * v[i][j][k][2];
sum3 = sum3 + v[i][j][k][3] * v[i][j][k][3];
sum4 = sum4 + v[i][j][k][4] * v[i][j][k][4];
}
}
}
#pragma omp critical
{
sum[0] += sum0;
sum[1] += sum1;
sum[2] += sum2;
sum[3] += sum3;
sum[4] += sum4;
}
#pragma omp barrier
#pragma omp single
for (m = 0; m < 5; m++) {
sum[m] = sqrt ( sum[m] / ( (nx0-2)*(ny0-2)*(nz0-2) ) );
}
}
/*--------------------------------------------------------------------
--------------------------------------------------------------------*/
static void pintgr(void) {
/*--------------------------------------------------------------------
c local variables
--------------------------------------------------------------------*/
int i, j, k;
int ibeg, ifin, ifin1;
int jbeg, jfin, jfin1;
int iglob, iglob1, iglob2;
int jglob, jglob1, jglob2;
double phi1[ISIZ2+2][ISIZ3+2]; /* phi1(0:isiz2+1,0:isiz3+1) */
double phi2[ISIZ2+2][ISIZ3+2]; /* phi2(0:isiz2+1,0:isiz3+1) */
double frc1, frc2, frc3;
/*--------------------------------------------------------------------
c set up the sub-domains for integeration in each processor
--------------------------------------------------------------------*/
ibeg = nx;
ifin = 0;
iglob1 = -1;
iglob2 = nx-1;
if (iglob1 >= ii1 && iglob2 < ii2+nx) ibeg = 0;
if (iglob1 >= ii1-nx && iglob2 <= ii2) ifin = nx;
if (ii1 >= iglob1 && ii1 <= iglob2) ibeg = ii1;
if (ii2 >= iglob1 && ii2 <= iglob2) ifin = ii2;
jbeg = ny;
jfin = -1;
jglob1 = 0;
jglob2 = ny-1;
if (jglob1 >= ji1 && jglob2 < ji2+ny) jbeg = 0;
if (jglob1 > ji1-ny && jglob2 <= ji2) jfin = ny;
if (ji1 >= jglob1 && ji1 <= jglob2) jbeg = ji1;
if (ji2 >= jglob1 && ji2 <= jglob2) jfin = ji2;
ifin1 = ifin;
jfin1 = jfin;
if (ifin1 == ii2) ifin1 = ifin -1;
if (jfin1 == ji2) jfin1 = jfin -1;
/*--------------------------------------------------------------------
c initialize
--------------------------------------------------------------------*/
for (i = 0; i <= ISIZ2+1; i++) {
for (k = 0; k <= ISIZ3+1; k++) {
phi1[i][k] = 0.0;
phi2[i][k] = 0.0;
}
}
for (i = ibeg; i <= ifin; i++) {
iglob = i;
for (j = jbeg; j <= jfin; j++) {
jglob = j;
k = ki1;
phi1[i][j] = C2*( u[i][j][k][4]
- 0.50 * ( pow2(u[i][j][k][1])
+ pow2(u[i][j][k][2])
+ pow2(u[i][j][k][3]) )
/ u[i][j][k][0] );
k = ki2;
phi2[i][j] = C2*( u[i][j][k][4]
- 0.50 * ( pow2(u[i][j][k][1])
+ pow2(u[i][j][k][2])
+ pow2(u[i][j][k][3]) )
/ u[i][j][k][0] );
}
}
frc1 = 0.0;
for (i = ibeg; i <= ifin1; i++) {
for (j = jbeg; j <= jfin1; j++) {
frc1 = frc1 + ( phi1[i][j]
+ phi1[i+1][j]
+ phi1[i][j+1]
+ phi1[i+1][j+1]
+ phi2[i][j]
+ phi2[i+1][j]
+ phi2[i][j+1]
+ phi2[i+1][j+1] );
}
}
frc1 = dxi * deta * frc1;
/*--------------------------------------------------------------------
c initialize
--------------------------------------------------------------------*/
for (i = 0; i <= ISIZ2+1; i++) {
for (k = 0; k <= ISIZ3+1; k++) {
phi1[i][k] = 0.0;
phi2[i][k] = 0.0;
}
}
jglob = jbeg;
if (jglob == ji1) {
for (i = ibeg; i <= ifin; i++) {
iglob = i;
for (k = ki1; k <= ki2; k++) {
phi1[i][k] = C2*( u[i][jbeg][k][4]
- 0.50 * ( pow2(u[i][jbeg][k][1])
+ pow2(u[i][jbeg][k][2])
+ pow2(u[i][jbeg][k][3]) )
/ u[i][jbeg][k][0] );
}
}
}
jglob = jfin;
if (jglob == ji2) {
for (i = ibeg; i <= ifin; i++) {
iglob = i;
for (k = ki1; k <= ki2; k++) {
phi2[i][k] = C2*( u[i][jfin][k][4]
- 0.50 * ( pow2(u[i][jfin][k][1])
+ pow2(u[i][jfin][k][2])
+ pow2(u[i][jfin][k][3]) )
/ u[i][jfin][k][0] );
}
}
}
frc2 = 0.0;
for (i = ibeg; i <= ifin1; i++) {
for (k = ki1; k <= ki2-1; k++) {
frc2 = frc2 + ( phi1[i][k]
+ phi1[i+1][k]
+ phi1[i][k+1]
+ phi1[i+1][k+1]
+ phi2[i][k]
+ phi2[i+1][k]
+ phi2[i][k+1]
+ phi2[i+1][k+1] );
}
}
frc2 = dxi * dzeta * frc2;
/*--------------------------------------------------------------------
c initialize
--------------------------------------------------------------------*/
for (i = 0; i <= ISIZ2+1; i++) {
for (k = 0; k <= ISIZ3+1; k++) {
phi1[i][k] = 0.0;
phi2[i][k] = 0.0;
}
}
iglob = ibeg;
if (iglob == ii1) {
for (j = jbeg; j <= jfin; j++) {
jglob = j;
for (k = ki1; k <= ki2; k++) {
phi1[j][k] = C2*( u[ibeg][j][k][4]
- 0.50 * ( pow2(u[ibeg][j][k][1])
+ pow2(u[ibeg][j][k][2])
+ pow2(u[ibeg][j][k][3]) )
/ u[ibeg][j][k][0] );
}
}
}
iglob = ifin;
if (iglob == ii2) {
for (j = jbeg; j <= jfin; j++) {
jglob = j;
for (k = ki1; k <= ki2; k++) {
phi2[j][k] = C2*( u[ifin][j][k][4]
- 0.50 * ( pow2(u[ifin][j][k][1])
+ pow2(u[ifin][j][k][2])
+ pow2(u[ifin][j][k][3]) )
/ u[ifin][j][k][0] );
}
}
}
frc3 = 0.0;
for (j = jbeg; j <= jfin1; j++) {
for (k = ki1; k <= ki2-1; k++) {
frc3 = frc3 + ( phi1[j][k]
+ phi1[j+1][k]
+ phi1[j][k+1]
+ phi1[j+1][k+1]
+ phi2[j][k]
+ phi2[j+1][k]
+ phi2[j][k+1]
+ phi2[j+1][k+1] );
}
}
frc3 = deta * dzeta * frc3;
frc = 0.25 * ( frc1 + frc2 + frc3 );
}
/*--------------------------------------------------------------------
--------------------------------------------------------------------*/
static void read_input(void) {
FILE *fp;
/*--------------------------------------------------------------------
c if input file does not exist, it uses defaults
c ipr = 1 for detailed progress output
c inorm = how often the norm is printed (once every inorm iterations)
c itmax = number of pseudo time steps
c dt = time step
c omega 1 over-relaxation factor for SSOR
c tolrsd = steady state residual tolerance levels
c nx, ny, nz = number of grid points in x, y, z directions
--------------------------------------------------------------------*/
printf("\n\n NAS Parallel Benchmarks 2.3 OpenMP C version"
" - LU Benchmark\n\n");
fp = fopen("inputlu.data", "r");
if (fp != NULL) {
printf(" Reading from input file inputlu.data\n");
while(fgetc(fp) != '\n'); while(fgetc(fp) != '\n');
fscanf(fp, "%d%d", &ipr, &inorm);
while(fgetc(fp) != '\n');
while(fgetc(fp) != '\n'); while(fgetc(fp) != '\n');
fscanf(fp, "%d", &itmax);
while(fgetc(fp) != '\n');
while(fgetc(fp) != '\n'); while(fgetc(fp) != '\n');
fscanf(fp, "%lf", &dt);
while(fgetc(fp) != '\n');
while(fgetc(fp) != '\n'); while(fgetc(fp) != '\n');
fscanf(fp, "%lf", &omega);
while(fgetc(fp) != '\n');
while(fgetc(fp) != '\n'); while(fgetc(fp) != '\n');
fscanf(fp, "%lf%lf%lf%lf%lf",
&tolrsd[0], &tolrsd[1], &tolrsd[2], &tolrsd[3], &tolrsd[4]);
while(fgetc(fp) != '\n');
while(fgetc(fp) != '\n'); while(fgetc(fp) != '\n');
fscanf(fp, "%d%d%d", &nx0, &ny0, &nz0);
while(fgetc(fp) != '\n');
fclose(fp);
} else {
ipr = IPR_DEFAULT;
inorm = INORM_DEFAULT;
itmax = ITMAX_DEFAULT;
dt = DT_DEFAULT;
omega = OMEGA_DEFAULT;
tolrsd[0] = TOLRSD1_DEF;
tolrsd[1] = TOLRSD2_DEF;
tolrsd[2] = TOLRSD3_DEF;
tolrsd[3] = TOLRSD4_DEF;
tolrsd[4] = TOLRSD5_DEF;
nx0 = ISIZ1;
ny0 = ISIZ2;
nz0 = ISIZ3;
}
/*--------------------------------------------------------------------
c check problem size
--------------------------------------------------------------------*/
if ( nx0 < 4 || ny0 < 4 || nz0 < 4 ) {
printf(" PROBLEM SIZE IS TOO SMALL - \n"
" SET EACH OF NX, NY AND NZ AT LEAST EQUAL TO 5\n");
exit(1);
}
if ( nx0 > ISIZ1 || ny0 > ISIZ2 || nz0 > ISIZ3 ) {
printf(" PROBLEM SIZE IS TOO LARGE - \n"
" NX, NY AND NZ SHOULD BE EQUAL TO \n"
" ISIZ1, ISIZ2 AND ISIZ3 RESPECTIVELY\n");
exit(1);
}
printf(" Size: %3dx%3dx%3d\n", nx0, ny0, nz0);
printf(" Iterations: %3d\n", itmax);
}
/*--------------------------------------------------------------------
--------------------------------------------------------------------*/
static void rhs(void) {
/*--------------------------------------------------------------------
c compute the right hand sides
--------------------------------------------------------------------*/
/*--------------------------------------------------------------------
c local variables
--------------------------------------------------------------------*/
int i, j, k, m;
int L1, L2;
int ist1, iend1;
int jst1, jend1;
double q;
double u21, u31, u41;
double tmp;
double u21i, u31i, u41i, u51i;
double u21j, u31j, u41j, u51j;
double u21k, u31k, u41k, u51k;
double u21im1, u31im1, u41im1, u51im1;
double u21jm1, u31jm1, u41jm1, u51jm1;
double u21km1, u31km1, u41km1, u51km1;
#pragma omp for
for (i = 0; i <= nx-1; i++) {
for (j = 0; j <= ny-1; j++) {
for (k = 0; k <= nz-1; k++) {
for (m = 0; m < 5; m++) {
rsd[i][j][k][m] = - frct[i][j][k][m];
}
}
}
}
/*--------------------------------------------------------------------
c xi-direction flux differences
--------------------------------------------------------------------*/
L1 = 0;
L2 = nx-1;
#pragma omp for
for (i = L1; i <= L2; i++) {
for (j = jst; j <= jend; j++) {
for (k = 1; k <= nz - 2; k++) {
flux[i][j][k][0] = u[i][j][k][1];
u21 = u[i][j][k][1] / u[i][j][k][0];
q = 0.50 * ( u[i][j][k][1] * u[i][j][k][1]
+ u[i][j][k][2] * u[i][j][k][2]
+ u[i][j][k][3] * u[i][j][k][3] )
/ u[i][j][k][0];
flux[i][j][k][1] = u[i][j][k][1] * u21 + C2 *
( u[i][j][k][4] - q );
flux[i][j][k][2] = u[i][j][k][2] * u21;
flux[i][j][k][3] = u[i][j][k][3] * u21;
flux[i][j][k][4] = ( C1 * u[i][j][k][4] - C2 * q ) * u21;
}
}
}
#pragma omp for
for (j = jst; j <= jend; j++) {
for (k = 1; k <= nz - 2; k++) {
for (i = ist; i <= iend; i++) {
for (m = 0; m < 5; m++) {
rsd[i][j][k][m] = rsd[i][j][k][m]
- tx2 * ( flux[i+1][j][k][m] - flux[i-1][j][k][m] );
}
}
L2 = nx-1;
for (i = ist; i <= L2; i++) {
tmp = 1.0 / u[i][j][k][0];
u21i = tmp * u[i][j][k][1];
u31i = tmp * u[i][j][k][2];
u41i = tmp * u[i][j][k][3];
u51i = tmp * u[i][j][k][4];
tmp = 1.0 / u[i-1][j][k][0];
u21im1 = tmp * u[i-1][j][k][1];
u31im1 = tmp * u[i-1][j][k][2];
u41im1 = tmp * u[i-1][j][k][3];
u51im1 = tmp * u[i-1][j][k][4];
flux[i][j][k][1] = (4.0/3.0) * tx3 * (u21i-u21im1);
flux[i][j][k][2] = tx3 * ( u31i - u31im1 );
flux[i][j][k][3] = tx3 * ( u41i - u41im1 );
flux[i][j][k][4] = 0.50 * ( 1.0 - C1*C5 )
* tx3 * ( ( pow2(u21i) + pow2(u31i) + pow2(u41i) )
- ( pow2(u21im1) + pow2(u31im1) + pow2(u41im1) ) )
+ (1.0/6.0)
* tx3 * ( pow2(u21i) - pow2(u21im1) )
+ C1 * C5 * tx3 * ( u51i - u51im1 );
}
for (i = ist; i <= iend; i++) {
rsd[i][j][k][0] = rsd[i][j][k][0]
+ dx1 * tx1 * ( u[i-1][j][k][0]
- 2.0 * u[i][j][k][0]
+ u[i+1][j][k][0] );
rsd[i][j][k][1] = rsd[i][j][k][1]
+ tx3 * C3 * C4 * ( flux[i+1][j][k][1] - flux[i][j][k][1] )
+ dx2 * tx1 * ( u[i-1][j][k][1]
- 2.0 * u[i][j][k][1]
+ u[i+1][j][k][1] );
rsd[i][j][k][2] = rsd[i][j][k][2]
+ tx3 * C3 * C4 * ( flux[i+1][j][k][2] - flux[i][j][k][2] )
+ dx3 * tx1 * ( u[i-1][j][k][2]
- 2.0 * u[i][j][k][2]
+ u[i+1][j][k][2] );
rsd[i][j][k][3] = rsd[i][j][k][3]
+ tx3 * C3 * C4 * ( flux[i+1][j][k][3] - flux[i][j][k][3] )
+ dx4 * tx1 * ( u[i-1][j][k][3]
- 2.0 * u[i][j][k][3]
+ u[i+1][j][k][3] );
rsd[i][j][k][4] = rsd[i][j][k][4]
+ tx3 * C3 * C4 * ( flux[i+1][j][k][4] - flux[i][j][k][4] )
+ dx5 * tx1 * ( u[i-1][j][k][4]
- 2.0 * u[i][j][k][4]
+ u[i+1][j][k][4] );
}
/*--------------------------------------------------------------------
c Fourth-order dissipation
--------------------------------------------------------------------*/
for (m = 0; m < 5; m++) {
rsd[1][j][k][m] = rsd[1][j][k][m]
- dssp * ( + 5.0 * u[1][j][k][m]
- 4.0 * u[2][j][k][m]
+ u[3][j][k][m] );
rsd[2][j][k][m] = rsd[2][j][k][m]
- dssp * ( - 4.0 * u[1][j][k][m]
+ 6.0 * u[2][j][k][m]
- 4.0 * u[3][j][k][m]
+ u[4][j][k][m] );
}
ist1 = 3;
iend1 = nx - 4;
for (i = ist1; i <= iend1; i++) {
for (m = 0; m < 5; m++) {
rsd[i][j][k][m] = rsd[i][j][k][m]
- dssp * ( u[i-2][j][k][m]
- 4.0 * u[i-1][j][k][m]
+ 6.0 * u[i][j][k][m]
- 4.0 * u[i+1][j][k][m]
+ u[i+2][j][k][m] );
}
}
for (m = 0; m < 5; m++) {
rsd[nx-3][j][k][m] = rsd[nx-3][j][k][m]
- dssp * ( u[nx-5][j][k][m]
- 4.0 * u[nx-4][j][k][m]
+ 6.0 * u[nx-3][j][k][m]
- 4.0 * u[nx-2][j][k][m] );
rsd[nx-2][j][k][m] = rsd[nx-2][j][k][m]
- dssp * ( u[nx-4][j][k][m]
- 4.0 * u[nx-3][j][k][m]
+ 5.0 * u[nx-2][j][k][m] );
}
}
}
/*--------------------------------------------------------------------
c eta-direction flux differences
--------------------------------------------------------------------*/
L1 = 0;
L2 = ny-1;
#pragma omp for
for (i = ist; i <= iend; i++) {
for (j = L1; j <= L2; j++) {
for (k = 1; k <= nz - 2; k++) {
flux[i][j][k][0] = u[i][j][k][2];
u31 = u[i][j][k][2] / u[i][j][k][0];
q = 0.50 * ( u[i][j][k][1] * u[i][j][k][1]
+ u[i][j][k][2] * u[i][j][k][2]
+ u[i][j][k][3] * u[i][j][k][3] )
/ u[i][j][k][0];
flux[i][j][k][1] = u[i][j][k][1] * u31;
flux[i][j][k][2] = u[i][j][k][2] * u31 + C2 * (u[i][j][k][4]-q);
flux[i][j][k][3] = u[i][j][k][3] * u31;
flux[i][j][k][4] = ( C1 * u[i][j][k][4] - C2 * q ) * u31;
}
}
}
#pragma omp for
for (i = ist; i <= iend; i++) {
for (k = 1; k <= nz - 2; k++) {
for (j = jst; j <= jend; j++) {
for (m = 0; m < 5; m++) {
rsd[i][j][k][m] = rsd[i][j][k][m]
- ty2 * ( flux[i][j+1][k][m] - flux[i][j-1][k][m] );
}
}
L2 = ny-1;
for (j = jst; j <= L2; j++) {
tmp = 1.0 / u[i][j][k][0];
u21j = tmp * u[i][j][k][1];
u31j = tmp * u[i][j][k][2];
u41j = tmp * u[i][j][k][3];
u51j = tmp * u[i][j][k][4];
tmp = 1.0 / u[i][j-1][k][0];
u21jm1 = tmp * u[i][j-1][k][1];
u31jm1 = tmp * u[i][j-1][k][2];
u41jm1 = tmp * u[i][j-1][k][3];
u51jm1 = tmp * u[i][j-1][k][4];
flux[i][j][k][1] = ty3 * ( u21j - u21jm1 );
flux[i][j][k][2] = (4.0/3.0) * ty3 * (u31j-u31jm1);
flux[i][j][k][3] = ty3 * ( u41j - u41jm1 );
flux[i][j][k][4] = 0.50 * ( 1.0 - C1*C5 )
* ty3 * ( ( pow2(u21j) + pow2(u31j) + pow2(u41j) )
- ( pow2(u21jm1) + pow2(u31jm1) + pow2(u41jm1) ) )
+ (1.0/6.0)
* ty3 * ( pow2(u31j) - pow2(u31jm1) )
+ C1 * C5 * ty3 * ( u51j - u51jm1 );
}
for (j = jst; j <= jend; j++) {
rsd[i][j][k][0] = rsd[i][j][k][0]
+ dy1 * ty1 * ( u[i][j-1][k][0]
- 2.0 * u[i][j][k][0]
+ u[i][j+1][k][0] );
rsd[i][j][k][1] = rsd[i][j][k][1]
+ ty3 * C3 * C4 * ( flux[i][j+1][k][1] - flux[i][j][k][1] )
+ dy2 * ty1 * ( u[i][j-1][k][1]
- 2.0 * u[i][j][k][1]
+ u[i][j+1][k][1] );
rsd[i][j][k][2] = rsd[i][j][k][2]
+ ty3 * C3 * C4 * ( flux[i][j+1][k][2] - flux[i][j][k][2] )
+ dy3 * ty1 * ( u[i][j-1][k][2]
- 2.0 * u[i][j][k][2]
+ u[i][j+1][k][2] );
rsd[i][j][k][3] = rsd[i][j][k][3]
+ ty3 * C3 * C4 * ( flux[i][j+1][k][3] - flux[i][j][k][3] )
+ dy4 * ty1 * ( u[i][j-1][k][3]
- 2.0 * u[i][j][k][3]
+ u[i][j+1][k][3] );
rsd[i][j][k][4] = rsd[i][j][k][4]
+ ty3 * C3 * C4 * ( flux[i][j+1][k][4] - flux[i][j][k][4] )
+ dy5 * ty1 * ( u[i][j-1][k][4]
- 2.0 * u[i][j][k][4]
+ u[i][j+1][k][4] );
}
/*--------------------------------------------------------------------
c fourth-order dissipation
--------------------------------------------------------------------*/
for (m = 0; m < 5; m++) {
rsd[i][1][k][m] = rsd[i][1][k][m]
- dssp * ( + 5.0 * u[i][1][k][m]
- 4.0 * u[i][2][k][m]
+ u[i][3][k][m] );
rsd[i][2][k][m] = rsd[i][2][k][m]
- dssp * ( - 4.0 * u[i][1][k][m]
+ 6.0 * u[i][2][k][m]
- 4.0 * u[i][3][k][m]
+ u[i][4][k][m] );
}
jst1 = 3;
jend1 = ny - 4;
for (j = jst1; j <= jend1; j++) {
for (m = 0; m < 5; m++) {
rsd[i][j][k][m] = rsd[i][j][k][m]
- dssp * ( u[i][j-2][k][m]
- 4.0 * u[i][j-1][k][m]
+ 6.0 * u[i][j][k][m]
- 4.0 * u[i][j+1][k][m]
+ u[i][j+2][k][m] );
}
}
for (m = 0; m < 5; m++) {
rsd[i][ny-3][k][m] = rsd[i][ny-3][k][m]
- dssp * ( u[i][ny-5][k][m]
- 4.0 * u[i][ny-4][k][m]
+ 6.0 * u[i][ny-3][k][m]
- 4.0 * u[i][ny-2][k][m] );
rsd[i][ny-2][k][m] = rsd[i][ny-2][k][m]
- dssp * ( u[i][ny-4][k][m]
- 4.0 * u[i][ny-3][k][m]
+ 5.0 * u[i][ny-2][k][m] );
}
}
}
/*--------------------------------------------------------------------
c zeta-direction flux differences
--------------------------------------------------------------------*/
#pragma omp for
for (i = ist; i <= iend; i++) {
for (j = jst; j <= jend; j++) {
for (k = 0; k <= nz-1; k++) {
flux[i][j][k][0] = u[i][j][k][3];
u41 = u[i][j][k][3] / u[i][j][k][0];
q = 0.50 * ( u[i][j][k][1] * u[i][j][k][1]
+ u[i][j][k][2] * u[i][j][k][2]
+ u[i][j][k][3] * u[i][j][k][3] )
/ u[i][j][k][0];
flux[i][j][k][1] = u[i][j][k][1] * u41;
flux[i][j][k][2] = u[i][j][k][2] * u41;
flux[i][j][k][3] = u[i][j][k][3] * u41 + C2 * (u[i][j][k][4]-q);
flux[i][j][k][4] = ( C1 * u[i][j][k][4] - C2 * q ) * u41;
}
for (k = 1; k <= nz - 2; k++) {
for (m = 0; m < 5; m++) {
rsd[i][j][k][m] = rsd[i][j][k][m]
- tz2 * ( flux[i][j][k+1][m] - flux[i][j][k-1][m] );
}
}
for (k = 1; k <= nz-1; k++) {
tmp = 1.0 / u[i][j][k][0];
u21k = tmp * u[i][j][k][1];
u31k = tmp * u[i][j][k][2];
u41k = tmp * u[i][j][k][3];
u51k = tmp * u[i][j][k][4];
tmp = 1.0 / u[i][j][k-1][0];
u21km1 = tmp * u[i][j][k-1][1];
u31km1 = tmp * u[i][j][k-1][2];
u41km1 = tmp * u[i][j][k-1][3];
u51km1 = tmp * u[i][j][k-1][4];
flux[i][j][k][1] = tz3 * ( u21k - u21km1 );
flux[i][j][k][2] = tz3 * ( u31k - u31km1 );
flux[i][j][k][3] = (4.0/3.0) * tz3 * (u41k-u41km1);
flux[i][j][k][4] = 0.50 * ( 1.0 - C1*C5 )
* tz3 * ( ( pow2(u21k) + pow2(u31k) + pow2(u41k) )
- ( pow2(u21km1) + pow2(u31km1) + pow2(u41km1) ) )
+ (1.0/6.0)
* tz3 * ( pow2(u41k) - pow2(u41km1) )
+ C1 * C5 * tz3 * ( u51k - u51km1 );
}
for (k = 1; k <= nz - 2; k++) {
rsd[i][j][k][0] = rsd[i][j][k][0]
+ dz1 * tz1 * ( u[i][j][k-1][0]
- 2.0 * u[i][j][k][0]
+ u[i][j][k+1][0] );
rsd[i][j][k][1] = rsd[i][j][k][1]
+ tz3 * C3 * C4 * ( flux[i][j][k+1][1] - flux[i][j][k][1] )
+ dz2 * tz1 * ( u[i][j][k-1][1]
- 2.0 * u[i][j][k][1]
+ u[i][j][k+1][1] );
rsd[i][j][k][2] = rsd[i][j][k][2]
+ tz3 * C3 * C4 * ( flux[i][j][k+1][2] - flux[i][j][k][2] )
+ dz3 * tz1 * ( u[i][j][k-1][2]
- 2.0 * u[i][j][k][2]
+ u[i][j][k+1][2] );
rsd[i][j][k][3] = rsd[i][j][k][3]
+ tz3 * C3 * C4 * ( flux[i][j][k+1][3] - flux[i][j][k][3] )
+ dz4 * tz1 * ( u[i][j][k-1][3]
- 2.0 * u[i][j][k][3]
+ u[i][j][k+1][3] );
rsd[i][j][k][4] = rsd[i][j][k][4]
+ tz3 * C3 * C4 * ( flux[i][j][k+1][4] - flux[i][j][k][4] )
+ dz5 * tz1 * ( u[i][j][k-1][4]
- 2.0 * u[i][j][k][4]
+ u[i][j][k+1][4] );
}
/*--------------------------------------------------------------------
c fourth-order dissipation
--------------------------------------------------------------------*/
for (m = 0; m < 5; m++) {
rsd[i][j][1][m] = rsd[i][j][1][m]
- dssp * ( + 5.0 * u[i][j][1][m]
- 4.0 * u[i][j][2][m]
+ u[i][j][3][m] );
rsd[i][j][2][m] = rsd[i][j][2][m]
- dssp * ( - 4.0 * u[i][j][1][m]
+ 6.0 * u[i][j][2][m]
- 4.0 * u[i][j][3][m]
+ u[i][j][4][m] );
}
for (k = 3; k <= nz - 4; k++) {
for (m = 0; m < 5; m++) {
rsd[i][j][k][m] = rsd[i][j][k][m]
- dssp * ( u[i][j][k-2][m]
- 4.0 * u[i][j][k-1][m]
+ 6.0 * u[i][j][k][m]
- 4.0 * u[i][j][k+1][m]
+ u[i][j][k+2][m] );
}
}
for (m = 0; m < 5; m++) {
rsd[i][j][nz-3][m] = rsd[i][j][nz-3][m]
- dssp * ( u[i][j][nz-5][m]
- 4.0 * u[i][j][nz-4][m]
+ 6.0 * u[i][j][nz-3][m]
- 4.0 * u[i][j][nz-2][m] );
rsd[i][j][nz-2][m] = rsd[i][j][nz-2][m]
- dssp * ( u[i][j][nz-4][m]
- 4.0 * u[i][j][nz-3][m]
+ 5.0 * u[i][j][nz-2][m] );
}
}
}
}
/*--------------------------------------------------------------------
--------------------------------------------------------------------*/
static void setbv(void) {
/*--------------------------------------------------------------------
c set the boundary values of dependent variables
--------------------------------------------------------------------*/
/*--------------------------------------------------------------------
c local variables
--------------------------------------------------------------------*/
int i, j, k;
int iglob, jglob;
/*--------------------------------------------------------------------
c set the dependent variable values along the top and bottom faces
--------------------------------------------------------------------*/
#pragma omp for
for (i = 0; i < nx; i++) {
iglob = i;
for (j = 0; j < ny; j++) {
jglob = j;
exact( iglob, jglob, 0, &u[i][j][0][0] );
exact( iglob, jglob, nz-1, &u[i][j][nz-1][0] );
}
}
/*--------------------------------------------------------------------
c set the dependent variable values along north and south faces
--------------------------------------------------------------------*/
#pragma omp for
for (i = 0; i < nx; i++) {
iglob = i;
for (k = 0; k < nz; k++) {
exact( iglob, 0, k, &u[i][0][k][0] );
}
}
#pragma omp for
for (i = 0; i < nx; i++) {
iglob = i;
for (k = 0; k < nz; k++) {
exact( iglob, ny0-1, k, &u[i][ny-1][k][0] );
}
}
/*--------------------------------------------------------------------
c set the dependent variable values along east and west faces
--------------------------------------------------------------------*/
#pragma omp for
for (j = 0; j < ny; j++) {
jglob = j;
for (k = 0; k < nz; k++) {
exact( 0, jglob, k, &u[0][j][k][0] );
}
}
#pragma omp for
for (j = 0; j < ny; j++) {
jglob = j;
for (k = 0; k < nz; k++) {
exact( nx0-1, jglob, k, &u[nx-1][j][k][0] );
}
}
}
/*--------------------------------------------------------------------
--------------------------------------------------------------------*/
static void setcoeff(void) {
/*--------------------------------------------------------------------
c set up coefficients
--------------------------------------------------------------------*/
dxi = 1.0 / ( nx0 - 1 );
deta = 1.0 / ( ny0 - 1 );
dzeta = 1.0 / ( nz0 - 1 );
tx1 = 1.0 / ( dxi * dxi );
tx2 = 1.0 / ( 2.0 * dxi );
tx3 = 1.0 / dxi;
ty1 = 1.0 / ( deta * deta );
ty2 = 1.0 / ( 2.0 * deta );
ty3 = 1.0 / deta;
tz1 = 1.0 / ( dzeta * dzeta );
tz2 = 1.0 / ( 2.0 * dzeta );
tz3 = 1.0 / dzeta;
ii1 = 1;
ii2 = nx0 - 2;
ji1 = 1;
ji2 = ny0 - 3;
ki1 = 2;
ki2 = nz0 - 2;
/*--------------------------------------------------------------------
c diffusion coefficients
--------------------------------------------------------------------*/
dx1 = 0.75;
dx2 = dx1;
dx3 = dx1;
dx4 = dx1;
dx5 = dx1;
dy1 = 0.75;
dy2 = dy1;
dy3 = dy1;
dy4 = dy1;
dy5 = dy1;
dz1 = 1.00;
dz2 = dz1;
dz3 = dz1;
dz4 = dz1;
dz5 = dz1;
/*--------------------------------------------------------------------
c fourth difference dissipation
--------------------------------------------------------------------*/
dssp = ( max (dx1, max(dy1, dz1) ) ) / 4.0;
/*--------------------------------------------------------------------
c coefficients of the exact solution to the first pde
--------------------------------------------------------------------*/
ce[0][0] = 2.0;
ce[0][1] = 0.0;
ce[0][2] = 0.0;
ce[0][3] = 4.0;
ce[0][4] = 5.0;
ce[0][5] = 3.0;
ce[0][6] = 5.0e-01;
ce[0][7] = 2.0e-02;
ce[0][8] = 1.0e-02;
ce[0][9] = 3.0e-02;
ce[0][10] = 5.0e-01;
ce[0][11] = 4.0e-01;
ce[0][12] = 3.0e-01;
/*--------------------------------------------------------------------
c coefficients of the exact solution to the second pde
--------------------------------------------------------------------*/
ce[1][0] = 1.0;
ce[1][1] = 0.0;
ce[1][2] = 0.0;
ce[1][3] = 0.0;
ce[1][4] = 1.0;
ce[1][5] = 2.0;
ce[1][6] = 3.0;
ce[1][7] = 1.0e-02;
ce[1][8] = 3.0e-02;
ce[1][9] = 2.0e-02;
ce[1][10] = 4.0e-01;
ce[1][11] = 3.0e-01;
ce[1][12] = 5.0e-01;
/*--------------------------------------------------------------------
c coefficients of the exact solution to the third pde
--------------------------------------------------------------------*/
ce[2][0] = 2.0;
ce[2][1] = 2.0;
ce[2][2] = 0.0;
ce[2][3] = 0.0;
ce[2][4] = 0.0;
ce[2][5] = 2.0;
ce[2][6] = 3.0;
ce[2][7] = 4.0e-02;
ce[2][8] = 3.0e-02;
ce[2][9] = 5.0e-02;
ce[2][10] = 3.0e-01;
ce[2][11] = 5.0e-01;
ce[2][12] = 4.0e-01;
/*--------------------------------------------------------------------
c coefficients of the exact solution to the fourth pde
--------------------------------------------------------------------*/
ce[3][0] = 2.0;
ce[3][1] = 2.0;
ce[3][2] = 0.0;
ce[3][3] = 0.0;
ce[3][4] = 0.0;
ce[3][5] = 2.0;
ce[3][6] = 3.0;
ce[3][7] = 3.0e-02;
ce[3][8] = 5.0e-02;
ce[3][9] = 4.0e-02;
ce[3][10] = 2.0e-01;
ce[3][11] = 1.0e-01;
ce[3][12] = 3.0e-01;
/*--------------------------------------------------------------------
c coefficients of the exact solution to the fifth pde
--------------------------------------------------------------------*/
ce[4][0] = 5.0;
ce[4][1] = 4.0;
ce[4][2] = 3.0;
ce[4][3] = 2.0;
ce[4][4] = 1.0e-01;
ce[4][5] = 4.0e-01;
ce[4][6] = 3.0e-01;
ce[4][7] = 5.0e-02;
ce[4][8] = 4.0e-02;
ce[4][9] = 3.0e-02;
ce[4][10] = 1.0e-01;
ce[4][11] = 3.0e-01;
ce[4][12] = 2.0e-01;
}
/*--------------------------------------------------------------------
--------------------------------------------------------------------*/
static void setiv(void) {
/*--------------------------------------------------------------------
c
c set the initial values of independent variables based on tri-linear
c interpolation of boundary values in the computational space.
c
--------------------------------------------------------------------*/
/*--------------------------------------------------------------------
c local variables
--------------------------------------------------------------------*/
int i, j, k, m;
int iglob, jglob;
double xi, eta, zeta;
double pxi, peta, pzeta;
double ue_1jk[5],ue_nx0jk[5],ue_i1k[5],
ue_iny0k[5],ue_ij1[5],ue_ijnz[5];
#pragma omp for
for (j = 0; j < ny; j++) {
jglob = j;
for (k = 1; k < nz - 1; k++) {
zeta = ((double)k) / (nz-1);
if (jglob != 0 && jglob != ny0-1) {
eta = ( (double) (jglob) ) / (ny0-1);
for (i = 0; i < nx; i++) {
iglob = i;
if(iglob != 0 && iglob != nx0-1) {
xi = ( (double) (iglob) ) / (nx0-1);
exact (0,jglob,k,ue_1jk);
exact (nx0-1,jglob,k,ue_nx0jk);
exact (iglob,0,k,ue_i1k);
exact (iglob,ny0-1,k,ue_iny0k);
exact (iglob,jglob,0,ue_ij1);
exact (iglob,jglob,nz-1,ue_ijnz);
for (m = 0; m < 5; m++) {
pxi = ( 1.0 - xi ) * ue_1jk[m]
+ xi * ue_nx0jk[m];
peta = ( 1.0 - eta ) * ue_i1k[m]
+ eta * ue_iny0k[m];
pzeta = ( 1.0 - zeta ) * ue_ij1[m]
+ zeta * ue_ijnz[m];
u[i][j][k][m] = pxi + peta + pzeta
- pxi * peta - peta * pzeta - pzeta * pxi
+ pxi * peta * pzeta;
}
}
}
}
}
}
}
/*--------------------------------------------------------------------
--------------------------------------------------------------------*/
static void ssor(void) {
/*--------------------------------------------------------------------
c to perform pseudo-time stepping SSOR iterations
c for five nonlinear pde s.
--------------------------------------------------------------------*/
/*--------------------------------------------------------------------
c local variables
--------------------------------------------------------------------*/
int i, j, k, m;
int istep;
double tmp;
double delunm[5], tv[ISIZ1][ISIZ2][5];
/*--------------------------------------------------------------------
c begin pseudo-time stepping iterations
--------------------------------------------------------------------*/
tmp = 1.0 / ( omega * ( 2.0 - omega ) ) ;
/*--------------------------------------------------------------------
c initialize a,b,c,d to zero (guarantees that page tables have been
c formed, if applicable on given architecture, before timestepping).
--------------------------------------------------------------------*/
#pragma omp parallel private(i,j,k,m)
{
#pragma omp for
for (i = 0; i < ISIZ1; i++) {
for (j = 0; j < ISIZ2; j++) {
for (k = 0; k < 5; k++) {
for (m = 0; m < 5; m++) {
a[i][j][k][m] = 0.0;
b[i][j][k][m] = 0.0;
c[i][j][k][m] = 0.0;
d[i][j][k][m] = 0.0;
}
}
}
}
/*--------------------------------------------------------------------
c compute the steady-state residuals
--------------------------------------------------------------------*/
rhs();
/*--------------------------------------------------------------------
c compute the L2 norms of newton iteration residuals
--------------------------------------------------------------------*/
l2norm( nx0, ny0, nz0,
ist, iend, jst, jend,
rsd, rsdnm );
}
timer_clear(1);
timer_start(1);
/*--------------------------------------------------------------------
c the timestep loop
--------------------------------------------------------------------*/
#pragma omp parallel private(istep,i,j,k,m)
{
for (istep = 1; istep <= itmax; istep++) {
if (istep%20 == 0 || istep == itmax || istep == 1) {
#pragma omp master
printf(" Time step %4d\n", istep);
}
/*--------------------------------------------------------------------
c perform SSOR iteration
--------------------------------------------------------------------*/
#pragma omp for
for (i = ist; i <= iend; i++) {
for (j = jst; j <= jend; j++) {
for (k = 1; k <= nz - 2; k++) {
for (m = 0; m < 5; m++) {
rsd[i][j][k][m] = dt * rsd[i][j][k][m];
}
}
}
}
for (k = 1; k <= nz - 2; k++) {
/*--------------------------------------------------------------------
c form the lower triangular part of the jacobian matrix
--------------------------------------------------------------------*/
jacld(k);
/*--------------------------------------------------------------------
c perform the lower triangular solution
--------------------------------------------------------------------*/
blts(nx, ny, nz, k,
omega,
rsd,
a, b, c, d,
ist, iend, jst, jend,
nx0, ny0 );
}
#pragma omp barrier
for (k = nz - 2; k >= 1; k--) {
/*--------------------------------------------------------------------
c form the strictly upper triangular part of the jacobian matrix
--------------------------------------------------------------------*/
jacu(k);
/*--------------------------------------------------------------------
c perform the upper triangular solution
--------------------------------------------------------------------*/
buts(nx, ny, nz, k,
omega,
rsd, tv,
d, a, b, c,
ist, iend, jst, jend,
nx0, ny0 );
}
#pragma omp barrier
/*--------------------------------------------------------------------
c update the variables
--------------------------------------------------------------------*/
#pragma omp for
for (i = ist; i <= iend; i++) {
for (j = jst; j <= jend; j++) {
for (k = 1; k <= nz-2; k++) {
for (m = 0; m < 5; m++) {
u[i][j][k][m] = u[i][j][k][m]
+ tmp * rsd[i][j][k][m];
}
}
}
}
/*--------------------------------------------------------------------
c compute the max-norms of newton iteration corrections
--------------------------------------------------------------------*/
if ( istep % inorm == 0 ) {
l2norm( nx0, ny0, nz0,
ist, iend, jst, jend,
rsd, delunm );
}
/*--------------------------------------------------------------------
c compute the steady-state residuals
--------------------------------------------------------------------*/
rhs();
/*--------------------------------------------------------------------
c compute the max-norms of newton iteration residuals
--------------------------------------------------------------------*/
if ( ( istep % inorm == 0 ) ||
( istep == itmax ) ) {
l2norm( nx0, ny0, nz0,
ist, iend, jst, jend,
rsd, rsdnm );
}
/*--------------------------------------------------------------------
c check the newton-iteration residuals against the tolerance levels
--------------------------------------------------------------------*/
if ( ( rsdnm[0] < tolrsd[0] ) &&
( rsdnm[1] < tolrsd[1] ) &&
( rsdnm[2] < tolrsd[2] ) &&
( rsdnm[3] < tolrsd[3] ) &&
( rsdnm[4] < tolrsd[4] ) ) {
exit(1);
}
}
} /* end parallel */
timer_stop(1);
maxtime= timer_read(1);
}
/*--------------------------------------------------------------------
--------------------------------------------------------------------*/
static void verify(double xcr[5], double xce[5], double xci,
char *cclass, boolean *verified) {
/*--------------------------------------------------------------------
c verification routine
--------------------------------------------------------------------*/
double xcrref[5],xceref[5],xciref,
xcrdif[5],xcedif[5],xcidif,
epsilon, dtref;
int m;
/*--------------------------------------------------------------------
c tolerance level
--------------------------------------------------------------------*/
epsilon = 1.0e-08;
*cclass = 'U';
*verified = TRUE;
for (m = 0; m < 5; m++) {
xcrref[m] = 1.0;
xceref[m] = 1.0;
}
xciref = 1.0;
if ( nx0 == 12 && ny0 == 12 && nz0 == 12 && itmax == 50) {
*cclass = 'S';
dtref = 5.0e-1;
/*--------------------------------------------------------------------
c Reference values of RMS-norms of residual, for the (12X12X12) grid,
c after 50 time steps, with DT = 5.0d-01
--------------------------------------------------------------------*/
xcrref[0] = 1.6196343210976702e-02;
xcrref[1] = 2.1976745164821318e-03;
xcrref[2] = 1.5179927653399185e-03;
xcrref[3] = 1.5029584435994323e-03;
xcrref[4] = 3.4264073155896461e-02;
/*--------------------------------------------------------------------
c Reference values of RMS-norms of solution error, for the (12X12X12) grid,
c after 50 time steps, with DT = 5.0d-01
--------------------------------------------------------------------*/
xceref[0] = 6.4223319957960924e-04;
xceref[1] = 8.4144342047347926e-05;
xceref[2] = 5.8588269616485186e-05;
xceref[3] = 5.8474222595157350e-05;
xceref[4] = 1.3103347914111294e-03;
/*--------------------------------------------------------------------
c Reference value of surface integral, for the (12X12X12) grid,
c after 50 time steps, with DT = 5.0d-01
--------------------------------------------------------------------*/
xciref = 7.8418928865937083;
} else if ( nx0 == 33 && ny0 == 33 && nz0 == 33 && itmax == 300) {
*cclass = 'W'; /* SPEC95fp size */
dtref = 1.5e-3;
/*--------------------------------------------------------------------
c Reference values of RMS-norms of residual, for the (33x33x33) grid,
c after 300 time steps, with DT = 1.5d-3
--------------------------------------------------------------------*/
xcrref[0] = 0.1236511638192e+02;
xcrref[1] = 0.1317228477799e+01;
xcrref[2] = 0.2550120713095e+01;
xcrref[3] = 0.2326187750252e+01;
xcrref[4] = 0.2826799444189e+02;
/*--------------------------------------------------------------------
c Reference values of RMS-norms of solution error, for the (33X33X33) grid,
--------------------------------------------------------------------*/
xceref[0] = 0.4867877144216;
xceref[1] = 0.5064652880982e-01;
xceref[2] = 0.9281818101960e-01;
xceref[3] = 0.8570126542733e-01;
xceref[4] = 0.1084277417792e+01;
/*--------------------------------------------------------------------
c Reference value of surface integral, for the (33X33X33) grid,
c after 300 time steps, with DT = 1.5d-3
--------------------------------------------------------------------*/
xciref = 0.1161399311023e+02;
} else if ( nx0 == 64 && ny0 == 64 && nz0 == 64 && itmax == 250) {
*cclass = 'A';
dtref = 2.0e+0;
/*--------------------------------------------------------------------
c Reference values of RMS-norms of residual, for the (64X64X64) grid,
c after 250 time steps, with DT = 2.0d+0.0
--------------------------------------------------------------------*/
xcrref[0] = 7.7902107606689367e+02;
xcrref[1] = 6.3402765259692870e+01;
xcrref[2] = 1.9499249727292479e+02;
xcrref[3] = 1.7845301160418537e+02;
xcrref[4] = 1.8384760349464247e+03;
/*--------------------------------------------------------------------
c Reference values of RMS-norms of solution error, for the (64X64X64) grid,
c after 250 time steps, with DT = 2.0d+0.0
--------------------------------------------------------------------*/
xceref[0] = 2.9964085685471943e+01;
xceref[1] = 2.8194576365003349;
xceref[2] = 7.3473412698774742;
xceref[3] = 6.7139225687777051;
xceref[4] = 7.0715315688392578e+01;
/*--------------------------------------------------------------------
c Reference value of surface integral, for the (64X64X64) grid,
c after 250 time steps, with DT = 2.0d+0.0
--------------------------------------------------------------------*/
xciref = 2.6030925604886277e+01;
} else if ( nx0 == 102 && ny0 == 102 && nz0 == 102 && itmax == 250) {
*cclass = 'B';
dtref = 2.0e+0;
/*--------------------------------------------------------------------
c Reference values of RMS-norms of residual, for the (102X102X102) grid,
c after 250 time steps, with DT = 2.0d+0.0
--------------------------------------------------------------------*/
xcrref[0] = 3.5532672969982736e+03;
xcrref[1] = 2.6214750795310692e+02;
xcrref[2] = 8.8333721850952190e+02;
xcrref[3] = 7.7812774739425265e+02;
xcrref[4] = 7.3087969592545314e+03;
/*--------------------------------------------------------------------
c Reference values of RMS-norms of solution error, for the (102X102X102)
c grid, after 250 time steps, with DT = 2.0d+0.0
--------------------------------------------------------------------*/
xceref[0] = 1.1401176380212709e+02;
xceref[1] = 8.1098963655421574;
xceref[2] = 2.8480597317698308e+01;
xceref[3] = 2.5905394567832939e+01;
xceref[4] = 2.6054907504857413e+02;
/*--------------------------------------------------------------------
c Reference value of surface integral, for the (102X102X102) grid,
c after 250 time steps, with DT = 2.0d+0.0
--------------------------------------------------------------------*/
xciref = 4.7887162703308227e+01;
} else if ( nx0 == 162 && ny0 == 162 && nz0 == 162 && itmax == 250) {
*cclass = 'C';
dtref = 2.0e+0;
/*--------------------------------------------------------------------
c Reference values of RMS-norms of residual, for the (162X162X162) grid,
c after 250 time steps, with DT = 2.0d+0.0
--------------------------------------------------------------------*/
xcrref[0] = 1.03766980323537846e+04;
xcrref[1] = 8.92212458801008552e+02;
xcrref[2] = 2.56238814582660871e+03;
xcrref[3] = 2.19194343857831427e+03;
xcrref[4] = 1.78078057261061185e+04;
/*--------------------------------------------------------------------
c Reference values of RMS-norms of solution error, for the (162X162X162)
c grid, after 250 time steps, with DT = 2.0d+0.0
--------------------------------------------------------------------*/
xceref[0] = 2.15986399716949279e+02;
xceref[1] = 1.55789559239863600e+01;
xceref[2] = 5.41318863077207766e+01;
xceref[3] = 4.82262643154045421e+01;
xceref[4] = 4.55902910043250358e+02;
/*--------------------------------------------------------------------
c Reference value of surface integral, for the (162X162X162) grid,
c after 250 time steps, with DT = 2.0d+0.0
--------------------------------------------------------------------*/
xciref = 6.66404553572181300e+01;
} else {
*verified = FALSE;
}
/*--------------------------------------------------------------------
c verification test for residuals if gridsize is either 12X12X12 or
c 64X64X64 or 102X102X102 or 162X162X162
--------------------------------------------------------------------*/
/*--------------------------------------------------------------------
c Compute the difference of solution values and the known reference values.
--------------------------------------------------------------------*/
for (m = 0; m < 5; m++) {
xcrdif[m] = fabs((xcr[m]-xcrref[m])/xcrref[m]);
xcedif[m] = fabs((xce[m]-xceref[m])/xceref[m]);
}
xcidif = fabs((xci - xciref)/xciref);
/*--------------------------------------------------------------------
c Output the comparison of computed results to known cases.
--------------------------------------------------------------------*/
if (*cclass != 'U') {
printf("\n Verification being performed for cclass %1c\n", *cclass);
printf(" Accuracy setting for epsilon = %20.13e\n", epsilon);
if (fabs(dt-dtref) > epsilon) {
*verified = FALSE;
*cclass = 'U';
printf(" DT does not match the reference value of %15.8e\n", dtref);
}
} else {
printf(" Unknown cclass\n");
}
if (*cclass != 'U') {
printf(" Comparison of RMS-norms of residual\n");
} else {
printf(" RMS-norms of residual\n");
}
for (m = 0; m < 5; m++) {
if (*cclass == 'U') {
printf(" %2d %20.13e\n", m, xcr[m]);
} else if (xcrdif[m] > epsilon) {
*verified = FALSE;
printf(" FAILURE: %2d %20.13e%20.13e%20.13e\n",
m,xcr[m],xcrref[m],xcrdif[m]);
} else {
printf(" %2d %20.13e%20.13e%20.13e\n",
m,xcr[m],xcrref[m],xcrdif[m]);
}
}
if (*cclass != 'U') {
printf(" Comparison of RMS-norms of solution error\n");
} else {
printf(" RMS-norms of solution error\n");
}
for (m = 0; m < 5; m++) {
if (*cclass == 'U') {
printf(" %2d %20.13e\n", m, xce[m]);
} else if (xcedif[m] > epsilon) {
*verified = FALSE;
printf(" FAILURE: %2d %20.13e%20.13e%20.13e\n",
m,xce[m],xceref[m],xcedif[m]);
} else {
printf(" %2d %20.13e%20.13e%20.13e\n",
m,xce[m],xceref[m],xcedif[m]);
}
}
if (*cclass != 'U') {
printf(" Comparison of surface integral\n");
} else {
printf(" Surface integral\n");
}
if (*cclass == 'U') {
printf(" %20.13e\n", xci);
} else if (xcidif > epsilon) {
*verified = FALSE;
printf(" FAILURE: %20.13e%20.13e%20.13e\n",
xci, xciref, xcidif);
} else {
printf(" %20.13e%20.13e%20.13e\n",
xci, xciref, xcidif);
}
if (*cclass == 'U') {
printf(" No reference values provided\n");
printf(" No verification performed\n");
} else if (*verified) {
printf(" Verification Successful\n");
} else {
printf(" Verification failed\n");
}
}
|
reduce_demo.c | //------------------------------------------------------------------------------
// GraphBLAS/Demo/Program/reduce_demo: reduce a matrix to a scalar
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2020, All Rights Reserved.
// http://suitesparse.com See GraphBLAS/Doc/License.txt for license.
//------------------------------------------------------------------------------
#include "GraphBLAS.h"
// #define N 65536
#define N 16384
int main (void)
{
#if defined ( _OPENMP )
double t0 = omp_get_wtime ( ) ;
#endif
// start GraphBLAS
GrB_init (GrB_NONBLOCKING) ;
int nthreads ;
GxB_Global_Option_get (GxB_NTHREADS, &nthreads) ;
printf ("demo: reduce a matrix to a scalar, nthreads: %d\n", nthreads) ;
int nthreads_max ;
GxB_Global_Option_get (GxB_NTHREADS, &nthreads_max) ;
printf ("# of threads: %d\n", nthreads_max) ;
#if defined ( _OPENMP )
t0 = omp_get_wtime ( ) - t0 ;
printf ("GPU warmup time: %g\n", t0) ;
t0 = omp_get_wtime ( ) ;
#endif
GrB_Index nrows = N ;
GrB_Index ncols = N ;
GrB_Matrix A ;
GrB_Matrix_new (&A, GrB_INT64, nrows, ncols) ;
GrB_Index *I = malloc (nrows * ncols * sizeof (GrB_Index)) ;
GrB_Index *J = malloc (nrows * ncols * sizeof (GrB_Index)) ;
int64_t *X = malloc (nrows * ncols * sizeof (int64_t)) ;
int64_t k ;
#pragma omp parallel for num_threads(nthreads_max) schedule(static)
for (k = 0 ; k < N*N ; k++)
{
// k = i * N + j ;
int64_t i = k / N ;
int64_t j = k % N ;
// int x = (int) (rand ( ) & 0xFF) ;
int x = (int) (k & 0xFF) ;
I [k] = i ;
J [k] = j ;
X [k] = x ;
}
GrB_Index nvals = N*N ;
GrB_Matrix_build_INT64 (A, I, J, X, nvals, GrB_PLUS_INT64) ;
free (I) ;
free (J) ;
free (X) ;
#if defined ( _OPENMP )
t0 = omp_get_wtime ( ) - t0 ;
printf ("time to create matrix: %g\n", t0) ;
#endif
GrB_Index result ;
double t1 ;
printf ("\nreduce to a scalar:\n") ;
for (int nthreads = 1 ; nthreads <= nthreads_max ; nthreads++)
{
GxB_Global_Option_set (GxB_NTHREADS, nthreads) ;
#if defined ( _OPENMP )
double t = omp_get_wtime ( ) ;
#endif
GrB_Matrix_reduce_UINT64 (&result, NULL, GxB_PLUS_INT64_MONOID,
A, NULL) ;
#if defined ( _OPENMP )
t = omp_get_wtime ( ) - t ;
if (nthreads == 1) t1 = t ;
printf ("nthreads %3d time: %12.6f speedup %8.2f\n",
nthreads, t, t1/t) ;
#endif
}
printf ("result %"PRId64"\n", result) ;
// free everyting
GrB_Matrix_free (&A) ;
GrB_finalize ( ) ;
}
|
GB_unaryop__ainv_int8_fp64.c | //------------------------------------------------------------------------------
// GB_unaryop: hard-coded functions for each built-in unary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2019, All Rights Reserved.
// http://suitesparse.com See GraphBLAS/Doc/License.txt for license.
//------------------------------------------------------------------------------
// If this file is in the Generated/ folder, do not edit it (auto-generated).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_control.h"
#include "GB_iterator.h"
#include "GB_unaryop__include.h"
// C=unop(A) is defined by the following types and operators:
// op(A) function: GB_unop__ainv_int8_fp64
// op(A') function: GB_tran__ainv_int8_fp64
// C type: int8_t
// A type: double
// cast: int8_t cij ; GB_CAST_SIGNED(cij,aij,8)
// unaryop: cij = -aij
#define GB_ATYPE \
double
#define GB_CTYPE \
int8_t
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
double aij = Ax [pA]
#define GB_CX(p) Cx [p]
// unary operator
#define GB_OP(z, x) \
z = -x ;
// casting
#define GB_CASTING(z, x) \
int8_t z ; GB_CAST_SIGNED(z,x,8) ;
// cij = op (cast (aij))
#define GB_CAST_OP(pC,pA) \
{ \
/* aij = Ax [pA] */ \
GB_GETA (aij, Ax, pA) ; \
/* Cx [pC] = op (cast (aij)) */ \
GB_CASTING (x, aij) ; \
GB_OP (GB_CX (pC), x) ; \
}
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_AINV || GxB_NO_INT8 || GxB_NO_FP64)
//------------------------------------------------------------------------------
// Cx = op (cast (Ax)): apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_unop__ainv_int8_fp64
(
int8_t *restrict Cx,
const double *restrict Ax,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (int64_t p = 0 ; p < anz ; p++)
{
GB_CAST_OP (p, p) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (cast (A')): transpose, typecast, and apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_tran__ainv_int8_fp64
(
GrB_Matrix C,
const GrB_Matrix A,
int64_t *restrict *Rowcounts,
GBI_single_iterator Iter,
const int64_t *restrict A_slice,
int naslice
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#define GB_PHASE_2_OF_2
#include "GB_unaryop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
GB_bitmap_assign_A_whole_template.c | //------------------------------------------------------------------------------
// GB_bitmap_assign_A_whole_template: traverse A for bitmap assignment into C
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// This template traverses over all the entries of the matrix A and operates on
// the corresponding entry in C(i,j), using the GB_AIJ_WORK macro. A can be
// hypersparse or sparse, not bitmap or full. It is not a scalar.
{
//--------------------------------------------------------------------------
// matrix assignment: slice the entries of A for each task
//--------------------------------------------------------------------------
const int64_t avlen = A->vlen ;
int nthreads = GB_nthreads (GB_NNZ (A) + A->nvec, chunk, nthreads_max) ;
int ntasks = (nthreads == 1) ? 1 : (8 * nthreads) ;
int64_t *pstart_slice = NULL, *kfirst_slice = NULL, *klast_slice = NULL ;
if (!GB_ek_slice (&pstart_slice, &kfirst_slice, &klast_slice, A, &ntasks))
{
// out of memory
GB_FREE_ALL ;
return (GrB_OUT_OF_MEMORY) ;
}
//--------------------------------------------------------------------------
// traverse of the entries of the matrix A
//--------------------------------------------------------------------------
int tid ;
#pragma omp parallel for num_threads(nthreads) schedule(dynamic,1) \
reduction(+:cnvals)
for (tid = 0 ; tid < ntasks ; tid++)
{
// if kfirst > klast then task tid does no work at all
int64_t kfirst = kfirst_slice [tid] ;
int64_t klast = klast_slice [tid] ;
int64_t task_cnvals = 0 ;
//----------------------------------------------------------------------
// traverse over A (:,kfirst:klast)
//----------------------------------------------------------------------
for (int64_t k = kfirst ; k <= klast ; k++)
{
//------------------------------------------------------------------
// find the part of A(:,k) for this task
//------------------------------------------------------------------
int64_t j = GBH (Ah, k) ;
int64_t pA_start, pA_end ;
GB_get_pA (&pA_start, &pA_end, tid, k, kfirst,
klast, pstart_slice, Ap, avlen) ;
//------------------------------------------------------------------
// traverse over A(:,j), the kth vector of A
//------------------------------------------------------------------
int64_t pC0 = j * cvlen ; // first entry in C(:,j)
for (int64_t pA = pA_start ; pA < pA_end ; pA++)
{
int64_t i = Ai [pA] ;
int64_t pC = i + pC0 ;
// operate on C(i,j) at pC, and A(i,j) at pA. The mask
// can be accessed at pC if M is bitmap or full. A has any
// sparsity format so only A(i,j) can be accessed at pA.
GB_AIJ_WORK (pC, pA) ;
}
}
cnvals += task_cnvals ;
}
//--------------------------------------------------------------------------
// free workspace
//--------------------------------------------------------------------------
GB_ek_slice_free (&pstart_slice, &kfirst_slice, &klast_slice) ;
}
|
GB_unop__identity_uint8_uint32.c | //------------------------------------------------------------------------------
// GB_unop: hard-coded functions for each built-in unary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2022, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// If this file is in the Generated2/ folder, do not edit it
// (it is auto-generated from Generator/*).
#include "GB.h"
#ifndef GBCUDA_DEV
#include "GB_control.h"
#include "GB_atomics.h"
#include "GB_unop__include.h"
// C=unop(A) is defined by the following types and operators:
// op(A) function: GB (_unop_apply__identity_uint8_uint32)
// op(A') function: GB (_unop_tran__identity_uint8_uint32)
// C type: uint8_t
// A type: uint32_t
// cast: uint8_t cij = (uint8_t) aij
// unaryop: cij = aij
#define GB_ATYPE \
uint32_t
#define GB_CTYPE \
uint8_t
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
uint32_t aij = Ax [pA]
#define GB_CX(p) Cx [p]
// unary operator
#define GB_OP(z, x) \
z = x ;
// casting
#define GB_CAST(z, aij) \
uint8_t z = (uint8_t) aij ;
// cij = op (aij)
#define GB_CAST_OP(pC,pA) \
{ \
/* aij = Ax [pA] */ \
uint32_t aij = Ax [pA] ; \
/* Cx [pC] = op (cast (aij)) */ \
uint8_t z = (uint8_t) aij ; \
Cx [pC] = z ; \
}
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_IDENTITY || GxB_NO_UINT8 || GxB_NO_UINT32)
//------------------------------------------------------------------------------
// Cx = op (cast (Ax)): apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB (_unop_apply__identity_uint8_uint32)
(
uint8_t *Cx, // Cx and Ax may be aliased
const uint32_t *Ax,
const int8_t *restrict Ab, // A->b if A is bitmap
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
if (Ab == NULL)
{
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
uint32_t aij = Ax [p] ;
uint8_t z = (uint8_t) aij ;
Cx [p] = z ;
}
}
else
{
// bitmap case, no transpose; A->b already memcpy'd into C->b
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!Ab [p]) continue ;
uint32_t aij = Ax [p] ;
uint8_t z = (uint8_t) aij ;
Cx [p] = z ;
}
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (cast (A')): transpose, typecast, and apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB (_unop_tran__identity_uint8_uint32)
(
GrB_Matrix C,
const GrB_Matrix A,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
exchange_boundary.c | //------------------------------------------------------------------------------------------------------------------------------
// Samuel Williams
// SWWilliams@lbl.gov
// Lawrence Berkeley National Lab
//------------------------------------------------------------------------------------------------------------------------------
// perform a (intra-level) ghost zone exchange
// NOTE exchange_boundary() only exchanges the boundary.
// It will not enforce any boundary conditions
// BC's are either the responsibility of a separate function or should be fused into the stencil
void exchange_boundary(level_type * level, int id, int justFaces){
uint64_t _timeCommunicationStart = CycleTime();
uint64_t _timeStart,_timeEnd;
int buffer=0;
int sendBox,recvBox,n;
if(justFaces)justFaces=1;else justFaces=0; // must be 0 or 1 in order to index into exchange_ghosts[]
#ifdef USE_MPI
int nMessages = level->exchange_ghosts[justFaces].num_recvs + level->exchange_ghosts[justFaces].num_sends;
// loop through packed list of MPI receives and prepost Irecv's...
_timeStart = CycleTime();
#ifdef USE_MPI_THREAD_MULTIPLE
#pragma omp parallel for schedule(dynamic,1)
#endif
for(n=0;n<level->exchange_ghosts[justFaces].num_recvs;n++){
MPI_Irecv(level->exchange_ghosts[justFaces].recv_buffers[n],
level->exchange_ghosts[justFaces].recv_sizes[n],
MPI_DOUBLE,
level->exchange_ghosts[justFaces].recv_ranks[n],
0, // by construction, only one message should be received from each neighboring process
MPI_COMM_WORLD,
&level->exchange_ghosts[justFaces].requests[n]
);
}
_timeEnd = CycleTime();
level->cycles.ghostZone_recv += (_timeEnd-_timeStart);
// pack MPI send buffers...
_timeStart = CycleTime();
#pragma omp parallel for if(level->exchange_ghosts[justFaces].num_blocks[0]>1) schedule(static,1)
for(buffer=0;buffer<level->exchange_ghosts[justFaces].num_blocks[0];buffer++){CopyBlock(level,id,&level->exchange_ghosts[justFaces].blocks[0][buffer]);}
_timeEnd = CycleTime();
level->cycles.ghostZone_pack += (_timeEnd-_timeStart);
// loop through MPI send buffers and post Isend's...
_timeStart = CycleTime();
#ifdef USE_MPI_THREAD_MULTIPLE
#pragma omp parallel for schedule(dynamic,1)
#endif
for(n=0;n<level->exchange_ghosts[justFaces].num_sends;n++){
MPI_Isend(level->exchange_ghosts[justFaces].send_buffers[n],
level->exchange_ghosts[justFaces].send_sizes[n],
MPI_DOUBLE,
level->exchange_ghosts[justFaces].send_ranks[n],
0, // by construction, only one message should be sent to each neighboring process
MPI_COMM_WORLD,
&level->exchange_ghosts[justFaces].requests[n+level->exchange_ghosts[justFaces].num_recvs]
// requests[0..num_recvs-1] were used by recvs. So sends start at num_recvs
);
}
_timeEnd = CycleTime();
level->cycles.ghostZone_send += (_timeEnd-_timeStart);
#endif
// exchange locally... try and hide within Isend latency...
_timeStart = CycleTime();
#pragma omp parallel for if(level->exchange_ghosts[justFaces].num_blocks[1]>1) schedule(static,1)
for(buffer=0;buffer<level->exchange_ghosts[justFaces].num_blocks[1];buffer++){CopyBlock(level,id,&level->exchange_ghosts[justFaces].blocks[1][buffer]);}
_timeEnd = CycleTime();
level->cycles.ghostZone_local += (_timeEnd-_timeStart);
// wait for MPI to finish...
#ifdef USE_MPI
_timeStart = CycleTime();
if(nMessages)MPI_Waitall(nMessages,level->exchange_ghosts[justFaces].requests,level->exchange_ghosts[justFaces].status);
_timeEnd = CycleTime();
level->cycles.ghostZone_wait += (_timeEnd-_timeStart);
// unpack MPI receive buffers
_timeStart = CycleTime();
#pragma omp parallel for if(level->exchange_ghosts[justFaces].num_blocks[2]>1) schedule(static,1)
for(buffer=0;buffer<level->exchange_ghosts[justFaces].num_blocks[2];buffer++){CopyBlock(level,id,&level->exchange_ghosts[justFaces].blocks[2][buffer]);}
_timeEnd = CycleTime();
level->cycles.ghostZone_unpack += (_timeEnd-_timeStart);
#endif
level->cycles.ghostZone_total += (uint64_t)(CycleTime()-_timeCommunicationStart);
}
|
main.c | /* #define _XOPEN_SOURCE 500 */
#ifndef _FILE_OFFSET_BITS
#define _FILE_OFFSET_BITS 64
#endif
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <math.h>
#include <inttypes.h>
#include <limits.h>
#ifndef SIZE_MAX
#define SIZE_MAX (~(size_t)0)
#endif
#ifdef _OPENMP
#include <omp.h>
#endif
/* for open/close and other file io*/
#include <unistd.h>
#include <sys/types.h>
#include <sys/stat.h>
#include <fcntl.h>
/*for sendfile (in-kernel file to file copy, supposedly very fast) */
#ifdef USE_SENDFILE
#include <sys/sendfile.h>
#endif
#if defined(USE_MMAP)
#include <sys/mman.h>
#elif defined(USE_SENDFILE)
#include <sys/sendfile.h>
#else
#endif
#ifdef USE_WRITEV
#ifndef USE_MMAP
#error USE_MMAP must be enabled with USE_WRITEV
#endif
#include <sys/uio.h>
#endif
#include <gsl/gsl_rng.h>
#include "macros.h"
#include "utils.h"
#include "progressbar.h"
#include "gadget_utils.h"
/* Copied straight from https://fossies.org/dox/gsl-2.2.1/shuffle_8c_source.html*/
/* Adapted to generate array indices. The returned random indices are in increasing order */
int gsl_ran_arr_index (const gsl_rng * r, size_t * dest, const size_t k, const size_t n)
{
/* Choose k out of n items, return an array x[] of the k items.
These items will preserve the relative order of the original
input -- you can use shuffle() to randomize the output if you
wish */
if (k > n) {
fprintf(stderr,"k=%zu is greater than n=%zu. Cannot sample more than n items\n",
k, n);
return EXIT_FAILURE;
}
if(k == n) {
for(size_t i=0;i<n;i++) {
dest[i] = i;
}
} else {
size_t j=0;
for (size_t i = 0; i < n && j < k; i++) {
if ((n - i) * gsl_rng_uniform (r) < k - j) {
dest[j] = i;
j++ ;
}
}
}
return EXIT_SUCCESS;
}
int write_random_subsample_of_field(int in_fd, int out_fd, off_t in_offset, off_t out_offset, const int dest_npart, const size_t itemsize, size_t *random_indices
#ifdef USE_MMAP
,char *in_memblock
#endif
)
{
#ifdef USE_MMAP
in_memblock += in_offset;//These are here so that I don't lose the original pointer from mmap
#endif
#ifndef USE_MMAP
size_t buf[itemsize];
#endif
int interrupted=0;
#ifndef _OPENMP
init_my_progressbar(dest_npart,&interrupted);
#endif
#ifdef USE_WRITEV
//vector writes. Loop has to be re-written in units of IOV_MAX
for(int i=0;i<dest_npart;i+=IOV_MAX) {
#ifndef _OPENMP
my_progressbar(i,&interrupted);//progress-bar will be jumpy
#endif
const int nleft = ((dest_npart - i) > IOV_MAX) ? IOV_MAX:(dest_npart - i);
struct iovec iov[IOV_MAX];
for(int j=0;j<nleft;j++){
const size_t ind = random_indices[j];
const size_t offset_in_field = ind * itemsize;
iov[j].iov_base = in_memblock + offset_in_field;
iov[j].iov_len = itemsize;
}
random_indices += nleft;
ssize_t bytes_written = writev(out_fd, iov, nleft);
XRETURN(bytes_written == nleft*itemsize, EXIT_FAILURE, "Expected to write bytes = %zu but wrote %zd instead\n",nleft*itemsize, bytes_written);
}
#else //Not using WRITEV -> 3 options here i) MMAP + write ii) sendfile iii) pread + write
for(int i=0;i<dest_npart;i++) {
#ifndef _OPENMP
my_progressbar(i,&interrupted);
#endif
const size_t ind = random_indices[i];
size_t offset_in_field = ind * itemsize;
#if defined(USE_MMAP)
ssize_t bytes_written = write(out_fd, in_memblock + offset_in_field, itemsize);
#elif defined(USE_SENDFILE)
off_t input_offset = in_offset + offset_in_field;
ssize_t bytes_written = sendfile(out_fd, in_fd, &input_offset, itemsize);
#else
ssize_t bytes_read = pread(in_fd, &buf, itemsize, in_offset + offset_in_field);
XRETURN(bytes_read == itemsize, EXIT_FAILURE, "Expected to read bytes = %zu but read %zd instead\n",itemsize, bytes_read);
ssize_t bytes_written = write(out_fd, &buf, itemsize);
#endif//default case (pread + write)
XRETURN(bytes_written == itemsize, EXIT_FAILURE, "Expected to write bytes = %zu but wrote %zd instead\n",itemsize, bytes_written);
out_offset += itemsize;
}
#endif //end of WRITEV
#ifndef _OPENMP
finish_myprogressbar(&interrupted);
#endif
return EXIT_SUCCESS;
}
int subsample_single_gadgetfile(const int dest_npart, const char *inputfile, const char *outputfile, const size_t id_bytes, const gsl_rng *rng, const double fraction, const int64_t nparttotal)
{
if(dest_npart <= 0) {
fprintf(stderr,"Error: Desired number of particles =%d in the subsampled file must be > 0\n",dest_npart);
return EXIT_FAILURE;
}
struct timespec tstart, t0, t1;
current_utc_time(&tstart);
int in_fd=-1;
int out_fd=-1;
int status = EXIT_FAILURE;
in_fd = open(inputfile, O_RDONLY);
if(in_fd < 0) {
fprintf(stderr,"Error (in function %s, line # %d) while opening input file = `%s'\n",__FUNCTION__,__LINE__,inputfile);
perror(NULL);
return EXIT_FAILURE;
}
#ifdef USE_MMAP
struct stat sb;
if(fstat(in_fd, &sb) < 0) {
perror(NULL);
return EXIT_FAILURE;
}
char *in_memblock = mmap(NULL, sb.st_size, PROT_READ, MAP_SHARED, in_fd, 0);
#endif
//Check that that the output file does not exist.
{
FILE *fp = fopen(outputfile,"r");
if(fp != NULL) {
fclose(fp);
fprintf(stderr,"Warning: Output file = `%s' should not exist. "
"Aborting so as to avoid accidentally over-writing regular fles\n",outputfile);
return EXIT_FAILURE;
}
}
out_fd = open(outputfile, O_CREAT | O_WRONLY | O_TRUNC, S_IRUSR | S_IWUSR);//set the mode since the file is being created
if(out_fd < 0) {
fprintf(stderr,"Error (in function %s, line # %d) while opening output file = `%s'\n",__FUNCTION__,__LINE__, outputfile);
perror(NULL);
return EXIT_FAILURE;
}
/* Reserve disk-space for dest_npart particles, written as a fortran binary */
const size_t pos_vel_itemsize = sizeof(float)*3;
const off_t header_disk_size = 4 + sizeof(struct io_header) + 4; //header
const off_t pos_disk_size = 4 + pos_vel_itemsize*dest_npart + 4; //all 3 positions for each particle
const off_t vel_disk_size = 4 + pos_vel_itemsize*dest_npart + 4; //all 3 velocities for each particle
const off_t id_disk_size = 4 + id_bytes*dest_npart + 4;//all particle IDs
/* These are all of the fields to be written */
const off_t outputfile_size = header_disk_size + pos_disk_size + vel_disk_size + id_disk_size;
status = posix_fallocate(out_fd, 0, outputfile_size);
if(status < 0) {
fprintf(stderr,"Error: Could not reserve disk space for %d particles (output file: `%s', expected file-size on disk = %zu bytes)\n",
dest_npart, outputfile, (size_t) outputfile_size);
perror(NULL);
return status;
}
struct io_header hdr;
int dummy1=0,dummy2=0;
const off_t pos_start_offset = header_disk_size + 4;
const off_t vel_start_offset = header_disk_size + pos_disk_size + 4;
const off_t id_start_offset = header_disk_size + pos_disk_size + vel_disk_size + 4;
read(in_fd, &dummy1, 4);
read(in_fd, &hdr, sizeof(struct io_header));
read(in_fd, &dummy2, 4);
if(dummy1 != 256 || dummy2 != 256) {
fprintf(stderr,"Error: Padding bytes for header = %d (front) and %d (end) should be exactly 256\n",dummy1, dummy2);
return EXIT_FAILURE;
}
#ifdef USE_MMAP
if(fraction == 1.0) {
XRETURN(dest_npart == hdr.npart[1],EXIT_FAILURE,
"for fraction = 1.0, input npart = %d must equal subsampled npart = %d\n",hdr.npart[1], dest_npart);
/* XRETURN(sb.st_size == outputfile_size, EXIT_FAILURE, */
/* "for fraction = 1.0, input (=%zu bytes) and output file sizes (=%zu bytes) must be the same",sb.st_size, outputfile_size); */
}
#endif
for(int type=0;type<6;type++) {
if(type==1) {
continue;
}
/* There should only be dark-matter particles */
if(hdr.npart[type] > 0) {
fprintf(stderr,"Error: Input file `%s' contains %d particles of type=%d. This code only works for dark-matter only simulations\n",
inputfile, hdr.npart[type], type);
return EXIT_FAILURE;
}
}
/* The number of subsampled particles wanted can at most be the numbers present in the file*/
if(dest_npart > hdr.npart[1]) {
fprintf(stderr,"Error: Number of subsampled particles = %d exceeds the number of particles in the file = %d\n",
dest_npart, hdr.npart[1]);
return EXIT_FAILURE;
}
struct io_header out_hdr = hdr;
out_hdr.npart[1] = dest_npart;
out_hdr.npartTotal[1] = nparttotal;
out_hdr.npartTotalHighWord[1] = (nparttotal >> 32);
out_hdr.mass[1] /= fraction;
write(out_fd, &dummy1, sizeof(dummy1));
write(out_fd, &out_hdr, sizeof(struct io_header));
write(out_fd, &dummy2, sizeof(dummy2));
//create an array of random indices
size_t *random_indices = calloc(dest_npart, sizeof(*random_indices));
status = gsl_ran_arr_index(rng, random_indices, (size_t) dest_npart, (size_t) hdr.npart[1]);
//write positions
current_utc_time(&t0);
int posvel_disk_size=pos_vel_itemsize*dest_npart;
write(out_fd, &posvel_disk_size, sizeof(posvel_disk_size));
status = write_random_subsample_of_field(in_fd, out_fd, pos_start_offset, -1,dest_npart, pos_vel_itemsize, random_indices
#ifdef USE_MMAP
,in_memblock
#endif
);
if(status != EXIT_SUCCESS) {
return status;
}
write(out_fd, &posvel_disk_size, sizeof(posvel_disk_size));
current_utc_time(&t1);
double pos_time = REALTIME_ELAPSED_NS(t0,t1)*1e-9;
//write velocities
write(out_fd, &posvel_disk_size, sizeof(posvel_disk_size));
status = write_random_subsample_of_field(in_fd, out_fd, vel_start_offset, -1,dest_npart, pos_vel_itemsize, random_indices
#ifdef USE_MMAP
,in_memblock
#endif
);
if(status != EXIT_SUCCESS) {
return status;
}
write(out_fd, &posvel_disk_size, sizeof(posvel_disk_size));
current_utc_time(&t0);
double vel_time = REALTIME_ELAPSED_NS(t1, t0)*1e-9;
//write ids
int id_size = id_bytes * dest_npart;
write(out_fd, &id_size, sizeof(id_size));
status = write_random_subsample_of_field(in_fd, out_fd, vel_start_offset, -1,dest_npart, id_bytes, random_indices
#ifdef USE_MMAP
,in_memblock
#endif
);
if(status != EXIT_SUCCESS) {
return status;
}
write(out_fd, &id_size, sizeof(id_size));
current_utc_time(&t1);
double id_time = REALTIME_ELAPSED_NS(t0,t1)*1e-9;
free(random_indices);
//close the input file -> we are only reading, unlikely to be error
close(in_fd);
#ifdef USE_MMAP
munmap(in_memblock, sb.st_size);
#endif
//check for error code here since disk quota might be hit
status = close(out_fd);
if(status != EXIT_SUCCESS){
fprintf(stderr,"Error while closing output file = `%s'\n",outputfile);
perror(NULL);
return status;
}
current_utc_time(&t1);
/* fprintf(stderr,"Done with file. Total time taken = %8.4lf seconds (pos_time = %6.3e vel_time = %6.3e id_time = %6.3e seconds)\n",REALTIME_ELAPSED_NS(tstart,t1)*1e-9, */
/* pos_time,vel_time,id_time); */
return EXIT_SUCCESS;
}
int main(int argc,char **argv)
{
const char argnames[][100]={"fraction","input file","output file"};
int nargs=sizeof(argnames)/(sizeof(char)*100);
char input_filename[MAXLEN];
char output_filename[MAXLEN];
struct timespec tstart,t0,t1;
int64_t nparticles_written=0,nparticles_withmass=0;
const gsl_rng_type * rng_type = gsl_rng_ranlxd1;
gsl_rng * all_procs_rng = gsl_rng_alloc(rng_type);
unsigned long seed = 42;
int64_t TotNumPart;
current_utc_time(&tstart);
if (argc != 4 ) {
fprintf(stderr,"ERROR: %s usage - <fraction> <gadget snapshot name> <output filename>\n",argv[0]);
fprintf(stderr,"Each file will be subsampled to get (roughly) that fraction for each particle-type\n");
fprintf(stderr,"\nFound: %d parameters\n ",argc-1);
int i;
for(i=1;i<argc;i++) {
if(i <= nargs)
fprintf(stderr,"\t\t %s = `%s' \n",argnames[i-1],argv[i]);
else
fprintf(stderr,"\t\t <> = `%s' \n",argv[i]);
}
if(i <= nargs) {
fprintf(stderr,"\nMissing required parameters: \n");
for(i=argc;i<=nargs;i++)
fprintf(stderr,"\t\t %20s = `?'\n",argnames[i-1]);
}
fprintf(stderr,"\n\n");
exit(EXIT_FAILURE);
}
double fraction = atof(argv[1]);
XRETURN(fraction > 0 && fraction <= 1.0, EXIT_FAILURE, "Subsample fraction = %lf needs to be in (0,1]", fraction);
strncpy(input_filename,argv[2],MAXLEN);
strncpy(output_filename,argv[3],MAXLEN);
XRETURN(strncmp(input_filename,output_filename,MAXLEN) != 0, EXIT_FAILURE, "Input filename = `%s' and output filename = `%s' are the same",input_filename, output_filename);
const int nfiles = get_gadget_nfiles(input_filename);
struct io_header header = get_gadget_header(input_filename);
TotNumPart = get_Numpart(&header);
XRETURN(header.npartTotal[0] == 0 && header.npartTotalHighWord[0] == 0, EXIT_FAILURE, "Subsampling will not work with gas particles");
fprintf(stderr,"Running `%s' on %d files with the following parameters \n",argv[0],nfiles);
fprintf(stderr,"\n\t\t ---------------------------------------------\n");
for(int i=1;i<=nargs;i++) {
fprintf(stderr,"\t\t %-25s = %s \n",argnames[i-1],argv[i]);
}
#ifdef _OPENMP
#pragma omp parallel
{
int nthreads = omp_get_num_threads();
int tid = omp_get_thread_num();
if(tid == 0)
fprintf(stderr,"\t\t %-25s = %d \n","nthreads", nthreads);
}
#endif
fprintf(stderr,"\t\t ---------------------------------------------\n\n");
if(SIZE_MAX != 0xFFFFFFFFFFFFFFFF) {
fprintf(stderr,"Error: SIZE_MAX=%zu should be (2^64-1)\n", SIZE_MAX);
return EXIT_FAILURE;
}
size_t id_bytes;
int interrupted=0;
size_t seedtable[nfiles];
int64_t nparttotal=0;
fprintf(stderr,"Checking all input files ...\n");
init_my_progressbar(nfiles, &interrupted);
for(int ifile=0;ifile<nfiles;ifile++) {
seedtable[ifile] = SIZE_MAX * gsl_rng_uniform(all_procs_rng);
char inputfile[MAXLEN];
my_snprintf(inputfile,MAXLEN,"%s.%d",input_filename, ifile);
if(ifile == 0) {
id_bytes = get_gadget_id_bytes(inputfile);
fprintf(stderr,"Gadget ID bytes = %zu\n",id_bytes);
interrupted=1;
}
struct io_header hdr = get_gadget_header(inputfile);
const int dest_npart = fraction * hdr.npart[1];
XRETURN((int) (dest_npart*3*sizeof(float)) < INT_MAX, EXIT_FAILURE,
"Padding bytes will overflow, please reduce the value of fraction (currently, fraction = %lf)\n",fraction);
my_progressbar(ifile,&interrupted);
nparttotal += dest_npart;
}
finish_myprogressbar(&interrupted);
fprintf(stderr,"Checking all input files .....done\n\n");
int numdone=0, errorflag=0, savestatus=0;
init_my_progressbar(nfiles, &interrupted);
#ifdef _OPENMP
#pragma omp parallel shared(numdone, savestatus)
{
int nthreads = omp_get_num_threads();
int tid = omp_get_thread_num();
#pragma omp for schedule(dynamic)
#endif
for(int ifile=0;ifile<nfiles;ifile++) {
if(errorflag == 0) {
//Setup the rng
gsl_rng * rng = gsl_rng_alloc(rng_type);
gsl_rng_set(rng, seedtable[ifile]);
#ifdef _OPENMP
#pragma omp atomic
numdone++;
if(tid == 0)
#endif
my_progressbar(numdone,&interrupted);
char inputfile[MAXLEN],outputfile[MAXLEN];
my_snprintf(inputfile,MAXLEN,"%s.%d",input_filename,ifile);
my_snprintf(outputfile, MAXLEN,"%s.%d",output_filename,ifile);
struct io_header hdr = get_gadget_header(inputfile);
const int dest_npart = fraction * hdr.npart[1];
int status = subsample_single_gadgetfile(dest_npart, inputfile, outputfile, id_bytes, rng, fraction, nparttotal);
if(status != EXIT_SUCCESS) {
savestatus = status;
errorflag = 1;
}
gsl_rng_free(rng);
/* #ifdef _OPENMP */
/* #pragma omp critical(info) */
/* { */
/* #endif */
/* fprintf(stderr,"Wrote %d subsampled particles out of %d particles (%d/%d files).\n", */
/* dest_npart, hdr.npart[1], ifile+1,nfiles); */
/* interrupted=1; */
/* #ifdef _OPENMP */
/* }//critical region */
/* #endif */
}//errorflag == 0 condition
}//for loop over nfiles
#ifdef _OPENMP
}//omp parallel region
#endif
gsl_rng_free(all_procs_rng);
finish_myprogressbar(&interrupted);
if(errorflag != 0) {
return savestatus;
}
current_utc_time(&t1);
fprintf(stderr,"subsample_Gadget> Done. Wrote %"PRId64" particles to file `%s'. Time taken = %6.2lf mins\n",
nparttotal,output_filename,REALTIME_ELAPSED_NS(tstart, t1)*1e-9/60.0);
return EXIT_SUCCESS;
}
|
serial_tree_learner.h | #ifndef LIGHTGBM_TREELEARNER_SERIAL_TREE_LEARNER_H_
#define LIGHTGBM_TREELEARNER_SERIAL_TREE_LEARNER_H_
#include <LightGBM/tree_learner.h>
#include <LightGBM/utils/random.h>
#include <LightGBM/utils/array_args.h>
#include <LightGBM/dataset.h>
#include <LightGBM/tree.h>
#include "feature_histogram.hpp"
#include "split_info.hpp"
#include "data_partition.hpp"
#include "leaf_splits.hpp"
#include <cstdio>
#include <vector>
#include <random>
#include <cmath>
#include <memory>
#ifdef USE_GPU
// Use 4KBytes aligned allocator for ordered gradients and ordered hessians when GPU is enabled.
// This is necessary to pin the two arrays in memory and make transferring faster.
#include <boost/align/aligned_allocator.hpp>
#endif
using namespace json11;
namespace LightGBM {
/*!
* \brief Used for learning a tree by single machine
*/
class SerialTreeLearner: public TreeLearner {
public:
explicit SerialTreeLearner(const Config* config);
~SerialTreeLearner();
void Init(const Dataset* train_data, bool is_constant_hessian) override;
void ResetTrainingData(const Dataset* train_data) override;
void ResetConfig(const Config* config) override;
Tree* Train(const score_t* gradients, const score_t *hessians, bool is_constant_hessian,
Json& forced_split_json) override;
Tree* FitByExistingTree(const Tree* old_tree, const score_t* gradients, const score_t* hessians) const override;
Tree* FitByExistingTree(const Tree* old_tree, const std::vector<int>& leaf_pred,
const score_t* gradients, const score_t* hessians) override;
void SetBaggingData(const data_size_t* used_indices, data_size_t num_data) override {
data_partition_->SetUsedDataIndices(used_indices, num_data);
}
void AddPredictionToScore(const Tree* tree, double* out_score) const override {
if (tree->num_leaves() <= 1) { return; }
CHECK(tree->num_leaves() <= data_partition_->num_leaves());
#pragma omp parallel for schedule(static)
for (int i = 0; i < tree->num_leaves(); ++i) {
double output = static_cast<double>(tree->LeafOutput(i));
data_size_t cnt_leaf_data = 0;
auto tmp_idx = data_partition_->GetIndexOnLeaf(i, &cnt_leaf_data);
for (data_size_t j = 0; j < cnt_leaf_data; ++j) {
out_score[tmp_idx[j]] += output;
}
}
}
void RenewTreeOutput(Tree* tree, const ObjectiveFunction* obj, const double* prediction,
data_size_t total_num_data, const data_size_t* bag_indices, data_size_t bag_cnt) const override;
void RenewTreeOutput(Tree* tree, const ObjectiveFunction* obj, double prediction,
data_size_t total_num_data, const data_size_t* bag_indices, data_size_t bag_cnt) const override;
protected:
/*!
* \brief Some initial works before training
*/
virtual void BeforeTrain();
/*!
* \brief Some initial works before FindBestSplit
*/
virtual bool BeforeFindBestSplit(const Tree* tree, int left_leaf, int right_leaf);
virtual void FindBestSplits();
virtual void ConstructHistograms(const std::vector<int8_t>& is_feature_used, bool use_subtract);
virtual void FindBestSplitsFromHistograms(const std::vector<int8_t>& is_feature_used, bool use_subtract);
/*!
* \brief Partition tree and data according best split.
* \param tree Current tree, will be splitted on this function.
* \param best_leaf The index of leaf that will be splitted.
* \param left_leaf The index of left leaf after splitted.
* \param right_leaf The index of right leaf after splitted.
*/
virtual void Split(Tree* tree, int best_leaf, int* left_leaf, int* right_leaf);
/* Force splits with forced_split_json dict and then return num splits forced.*/
virtual int32_t ForceSplits(Tree* tree, Json& forced_split_json, int* left_leaf,
int* right_leaf, int* cur_depth,
bool *aborted_last_force_split);
/*!
* \brief Get the number of data in a leaf
* \param leaf_idx The index of leaf
* \return The number of data in the leaf_idx leaf
*/
inline virtual data_size_t GetGlobalDataCountInLeaf(int leaf_idx) const;
/*! \brief number of data */
data_size_t num_data_;
/*! \brief number of features */
int num_features_;
/*! \brief training data */
const Dataset* train_data_;
/*! \brief gradients of current iteration */
const score_t* gradients_;
/*! \brief hessians of current iteration */
const score_t* hessians_;
/*! \brief training data partition on leaves */
std::unique_ptr<DataPartition> data_partition_;
/*! \brief used for generate used features */
Random random_;
/*! \brief used for sub feature training, is_feature_used_[i] = false means don't used feature i */
std::vector<int8_t> is_feature_used_;
/*! \brief pointer to histograms array of parent of current leaves */
FeatureHistogram* parent_leaf_histogram_array_;
/*! \brief pointer to histograms array of smaller leaf */
FeatureHistogram* smaller_leaf_histogram_array_;
/*! \brief pointer to histograms array of larger leaf */
FeatureHistogram* larger_leaf_histogram_array_;
/*! \brief store best split points for all leaves */
std::vector<SplitInfo> best_split_per_leaf_;
/*! \brief stores best thresholds for all feature for smaller leaf */
std::unique_ptr<LeafSplits> smaller_leaf_splits_;
/*! \brief stores best thresholds for all feature for larger leaf */
std::unique_ptr<LeafSplits> larger_leaf_splits_;
std::vector<int> valid_feature_indices_;
#ifdef USE_GPU
/*! \brief gradients of current iteration, ordered for cache optimized, aligned to 4K page */
std::vector<score_t, boost::alignment::aligned_allocator<score_t, 4096>> ordered_gradients_;
/*! \brief hessians of current iteration, ordered for cache optimized, aligned to 4K page */
std::vector<score_t, boost::alignment::aligned_allocator<score_t, 4096>> ordered_hessians_;
#else
/*! \brief gradients of current iteration, ordered for cache optimized */
std::vector<score_t> ordered_gradients_;
/*! \brief hessians of current iteration, ordered for cache optimized */
std::vector<score_t> ordered_hessians_;
#endif
/*! \brief Store ordered bin */
std::vector<std::unique_ptr<OrderedBin>> ordered_bins_;
/*! \brief True if has ordered bin */
bool has_ordered_bin_ = false;
/*! \brief is_data_in_leaf_[i] != 0 means i-th data is marked */
std::vector<char> is_data_in_leaf_;
/*! \brief used to cache historical histogram to speed up*/
HistogramPool histogram_pool_;
/*! \brief config of tree learner*/
const Config* config_;
int num_threads_;
std::vector<int> ordered_bin_indices_;
bool is_constant_hessian_;
};
inline data_size_t SerialTreeLearner::GetGlobalDataCountInLeaf(int leaf_idx) const {
if (leaf_idx >= 0) {
return data_partition_->leaf_count(leaf_idx);
} else {
return 0;
}
}
} // namespace LightGBM
#endif // LightGBM_TREELEARNER_SERIAL_TREE_LEARNER_H_
|
trisolv.c | /* POLYBENCH/GPU-OPENMP
*
* This file is a part of the Polybench/GPU-OpenMP suite
*
* Contact:
* William Killian <killian@udel.edu>
*
* Copyright 2013, The University of Delaware
*/
#include <stdio.h>
#include <unistd.h>
#include <string.h>
#include <math.h>
/* Include polybench common header. */
#include <polybench.h>
/* Include benchmark-specific header. */
/* Default data type is double, default size is 4000. */
#include "trisolv.h"
/* Array initialization. */
static
void init_array(int n,
DATA_TYPE POLYBENCH_2D(A,N,N,n,n),
DATA_TYPE POLYBENCH_1D(x,N,n),
DATA_TYPE POLYBENCH_1D(c,N,n))
{
int i, j;
for (i = 0; i < n; i++)
{
c[i] = x[i] = ((DATA_TYPE) i) / n;
for (j = 0; j < n; j++)
A[i][j] = ((DATA_TYPE) i*j) / n;
}
}
/* DCE code. Must scan the entire live-out data.
Can be used also to check the correctness of the output. */
static
void print_array(int n,
DATA_TYPE POLYBENCH_1D(x,N,n))
{
int i;
for (i = 0; i < n; i++) {
fprintf (stderr, DATA_PRINTF_MODIFIER, x[i]);
if (i % 20 == 0) fprintf (stderr, "\n");
}
}
/* Main computational kernel. The whole function will be timed,
including the call and return. */
static
void kernel_trisolv(int n,
DATA_TYPE POLYBENCH_2D(A,N,N,n,n),
DATA_TYPE POLYBENCH_1D(x,N,n),
DATA_TYPE POLYBENCH_1D(c,N,n))
{
int i, j;
#pragma scop
#pragma omp parallel private (i,j)
{
#pragma omp master
{
for (i = 0; i < _PB_N; i++)
{
x[i] = c[i];
#pragma omp for schedule(static)
for (j = 0; j <= i - 1; j++)
x[i] = x[i] - A[i][j] * x[j];
x[i] = x[i] / A[i][i];
}
}
}
#pragma endscop
}
int main(int argc, char** argv)
{
/* Retrieve problem size. */
int n = N;
/* Variable declaration/allocation. */
POLYBENCH_2D_ARRAY_DECL(A, DATA_TYPE, N, N, n, n);
POLYBENCH_1D_ARRAY_DECL(x, DATA_TYPE, N, n);
POLYBENCH_1D_ARRAY_DECL(c, DATA_TYPE, N, n);
/* Initialize array(s). */
init_array (n, POLYBENCH_ARRAY(A), POLYBENCH_ARRAY(x), POLYBENCH_ARRAY(c));
/* Start timer. */
polybench_start_instruments;
/* Run kernel. */
kernel_trisolv (n, POLYBENCH_ARRAY(A), POLYBENCH_ARRAY(x), POLYBENCH_ARRAY(c));
/* Stop and print timer. */
polybench_stop_instruments;
polybench_print_instruments;
/* Prevent dead-code elimination. All live-out data must be printed
by the function call in argument. */
polybench_prevent_dce(print_array(n, POLYBENCH_ARRAY(x)));
/* Be clean. */
POLYBENCH_FREE_ARRAY(A);
POLYBENCH_FREE_ARRAY(x);
POLYBENCH_FREE_ARRAY(c);
return 0;
}
|
sample_task_multiple_producer.c | /* -*- Mode: C; c-basic-offset:4 ; indent-tabs-mode:nil ; -*- */
/*
* See LICENSE.txt in top-level directory.
*/
#include <omp.h>
#include <stdio.h>
#include <sys/time.h>
int main(int argc, char * argv[]) {
int i,num=(argc>1)?atoi(argv[1]):100;
int nthreads;
struct timeval t_start, t_end;
double time;
double *a = (double *)malloc(sizeof(double)*num);
#pragma omp parallel
{
nthreads=omp_get_num_threads();
}
for(i=0;i<num;i++){
a[i]=i;
}
gettimeofday(&t_start,NULL);
#pragma omp parallel
{
#pragma omp for
for(i=0;i<num;i++){
#pragma omp task
{
a[i]*=0.9;
}
}
}
gettimeofday(&t_end,NULL);
time=(t_end.tv_sec * 1000000 + t_end.tv_usec) -
(t_start.tv_sec * 1000000 + t_start.tv_usec);
printf("%d %f\n",nthreads,time/1000000.0);
for(i=0;i<num;i++){
if(a[i]!=i*0.9){
printf("a[%d]=%f != %f\n",i,a[i],i*0.9);
return 1;
}
}
}
|
problem.fv.c | //------------------------------------------------------------------------------------------------------------------------------
// Samuel Williams
// SWWilliams@lbl.gov
// Lawrence Berkeley National Lab
//------------------------------------------------------------------------------------------------------------------------------
#ifndef M_PI
#define M_PI 3.14159265358979323846 // in case math.h doesn't define it
#endif
double evaluateBeta(double x, double y, double z, double h, int add_Bxx, int add_Byy, int add_Bzz){
double b = 0.25;
double a = 2.0*M_PI; // one period on [0,1]^3
double B = 1.0 + b*sin(a*x)*sin(a*y)*sin(a*z);
//double Bx = a*b*cos(a*x)*sin(a*y)*sin(a*z);
//double By = a*b*sin(a*x)*cos(a*y)*sin(a*z);
//double Bz = a*b*sin(a*x)*sin(a*y)*cos(a*z);
double Bxx = -a*a*b*sin(a*x)*sin(a*y)*sin(a*z);
double Byy = -a*a*b*sin(a*x)*sin(a*y)*sin(a*z);
double Bzz = -a*a*b*sin(a*x)*sin(a*y)*sin(a*z);
// 4th order correction to approximate the conversion of cell-centered values to cell-averaged...
if(add_Bxx)B+=(h*h/24.0)*Bxx;
if(add_Byy)B+=(h*h/24.0)*Byy;
if(add_Bzz)B+=(h*h/24.0)*Bzz;
return(B);
}
//------------------------------------------------------------------------------------------------------------------------------
double evaluateF(double x, double y, double z, double h, int add_Fxx, int add_Fyy, int add_Fzz){
#if 0 // harder problem... not sure I manually differentiated this right...
// 8 'poles', one per octant
double cx = 0.75;
double cy = 0.75;
double cz = 0.75,sign = 1.0;
if(x<0.5){cx = 0.25;sign*=-1.0;}
if(y<0.5){cy = 0.25;sign*=-1.0;}
if(z<0.5){cz = 0.25;sign*=-1.0;}
double r0 = 0.1;
double a = M_PI/2/r0;
double r = pow( (x-cx)*(x-cx) + (y-cy)*(y-cy) + (z-cz)*(z-cz) , 0.5); // euclidean distance
double rx = pow( (x-cx)*(x-cx) + (y-cy)*(y-cy) + (z-cz)*(z-cz) , -0.5)*(x-cx); // dr/dx
double ry = pow( (x-cx)*(x-cx) + (y-cy)*(y-cy) + (z-cz)*(z-cz) , -0.5)*(y-cy);
double rz = pow( (x-cx)*(x-cx) + (y-cy)*(y-cy) + (z-cz)*(z-cz) , -0.5)*(z-cz);
double rxx = -pow( (x-cx)*(x-cx) + (y-cy)*(y-cy) + (z-cz)*(z-cz) , -1.5)*(x-cx)*(x-cx) + pow( (x-cx)*(x-cx) + (y-cy)*(y-cy) + (z-cz)*(z-cz) , -0.5); // d2r/dx2
double ryy = -pow( (x-cx)*(x-cx) + (y-cy)*(y-cy) + (z-cz)*(z-cz) , -1.5)*(y-cy)*(y-cy) + pow( (x-cx)*(x-cx) + (y-cy)*(y-cy) + (z-cz)*(z-cz) , -0.5);
double rzz = -pow( (x-cx)*(x-cx) + (y-cy)*(y-cy) + (z-cz)*(z-cz) , -1.5)*(z-cz)*(z-cz) + pow( (x-cx)*(x-cx) + (y-cy)*(y-cy) + (z-cz)*(z-cz) , -0.5);
double p = 6.0;
double F = sign*( pow(cos(a*r),p ) );
double Fx = sign*( -a*p*pow(cos(a*r),p-1)*sin(a*r)*rx );
double Fy = sign*( -a*p*pow(cos(a*r),p-1)*sin(a*r)*ry );
double Fz = sign*( -a*p*pow(cos(a*r),p-1)*sin(a*r)*rz );
double Fxx = sign*( -a*a*p*pow(cos(a*r),p )*rx*rx + a*a*p*(p-1)*pow(cos(a*r),p-2)*pow(sin(a*r),2)*rx*rx - a*p*pow(cos(a*r),p-1)*sin(a*r)*rxx );
double Fyy = sign*( -a*a*p*pow(cos(a*r),p )*ry*ry + a*a*p*(p-1)*pow(cos(a*r),p-2)*pow(sin(a*r),2)*ry*ry - a*p*pow(cos(a*r),p-1)*sin(a*r)*ryy );
double Fzz = sign*( -a*a*p*pow(cos(a*r),p )*rz*rz + a*a*p*(p-1)*pow(cos(a*r),p-2)*pow(sin(a*r),2)*rz*rz - a*p*pow(cos(a*r),p-1)*sin(a*r)*rzz );
if(r>=r0){
F = 0.0;
Fx = 0.0;
Fy = 0.0;
Fz = 0.0;
Fxx = 0.0;
Fyy = 0.0;
Fzz = 0.0;
}
#else
double a = 2.0*M_PI;
double p = 7.0;
double F = pow(sin(a*x),p )*pow(sin(a*y),p )*pow(sin(a*z),p );
//double Fx = a*p*pow(sin(a*x),p-1)*pow(sin(a*y),p )*pow(sin(a*z),p )*cos(a*x);
//double Fy = a*p*pow(sin(a*x),p )*pow(sin(a*y),p-1)*pow(sin(a*z),p )*cos(a*y);
//double Fz = a*p*pow(sin(a*x),p )*pow(sin(a*y),p )*pow(sin(a*z),p-1)*cos(a*z);
double Fxx = -a*a*p*pow(sin(a*x),p )*pow(sin(a*y),p )*pow(sin(a*z),p ) + a*a*p*(p-1)*pow(sin(a*x),p-2)*pow(sin(a*y),p )*pow(sin(a*z),p )*pow(cos(a*x),2);
double Fyy = -a*a*p*pow(sin(a*x),p )*pow(sin(a*y),p )*pow(sin(a*z),p ) + a*a*p*(p-1)*pow(sin(a*x),p )*pow(sin(a*y),p-2)*pow(sin(a*z),p )*pow(cos(a*y),2);
double Fzz = -a*a*p*pow(sin(a*x),p )*pow(sin(a*y),p )*pow(sin(a*z),p ) + a*a*p*(p-1)*pow(sin(a*x),p )*pow(sin(a*y),p )*pow(sin(a*z),p-2)*pow(cos(a*z),2);
#endif
// 4th order correction to approximate the conversion of cell-centered values to cell-averaged...
if(add_Fxx)F+=(h*h/24.0)*Fxx;
if(add_Fyy)F+=(h*h/24.0)*Fyy;
if(add_Fzz)F+=(h*h/24.0)*Fzz;
return(F);
}
//------------------------------------------------------------------------------------------------------------------------------
void initialize_problem(level_type * level, double hLevel, double a, double b){
level->h = hLevel;
int box;
for(box=0;box<level->num_my_boxes;box++){
int i,j,k;
const int jStride = level->my_boxes[box].jStride;
const int kStride = level->my_boxes[box].kStride;
const int ghosts = level->my_boxes[box].ghosts;
const int dim_i = level->my_boxes[box].dim;
const int dim_j = level->my_boxes[box].dim;
const int dim_k = level->my_boxes[box].dim;
#ifdef _OPENMP
#pragma omp parallel for private(k,j,i) collapse(3)
#endif
for(k=0;k<=dim_k;k++){ // include high face
for(j=0;j<=dim_j;j++){ // include high face
for(i=0;i<=dim_i;i++){ // include high face
//- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
int ijk = (i+ghosts) + (j+ghosts)*jStride + (k+ghosts)*kStride;
double x = hLevel*( (double)(i+level->my_boxes[box].low.i) + 0.5 ); // +0.5 to get to the center of cell
double y = hLevel*( (double)(j+level->my_boxes[box].low.j) + 0.5 );
double z = hLevel*( (double)(k+level->my_boxes[box].low.k) + 0.5 );
double A,Bi,Bj,Bk;
//double A,B,Bx,By,Bz,Bi,Bj,Bk;
//double U,Ux,Uy,Uz,Uxx,Uyy,Uzz;
//- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
A = 1.0;
Bi = 1.0;
Bj = 1.0;
Bk = 1.0;
#ifdef STENCIL_VARIABLE_COEFFICIENT // variable coefficient problem...
Bi=evaluateBeta(x-hLevel*0.5,y ,z ,hLevel,0,1,1); // face-centered value of Beta for beta_i
Bj=evaluateBeta(x ,y-hLevel*0.5,z ,hLevel,1,0,1); // face-centered value of Beta for beta_j
Bk=evaluateBeta(x ,y ,z-hLevel*0.5,hLevel,1,1,0); // face-centered value of Beta for beta_k
#endif
//- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
double F=evaluateF(x,y,z,hLevel,1,1,1);
//- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
#ifdef VECTOR_ALPHA
level->my_boxes[box].vectors[VECTOR_ALPHA ][ijk] = A;
#endif
level->my_boxes[box].vectors[VECTOR_BETA_I][ijk] = Bi;
level->my_boxes[box].vectors[VECTOR_BETA_J][ijk] = Bj;
level->my_boxes[box].vectors[VECTOR_BETA_K][ijk] = Bk;
level->my_boxes[box].vectors[VECTOR_F ][ijk] = F;
//- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
}}}
}
}
//------------------------------------------------------------------------------------------------------------------------------
|
game.c | /*
* Copyright (C) 2009 Raphael Kubo da Costa <kubito@gmail.com>
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
#include <assert.h>
#include <pcre.h>
#include <string.h>
#include "config.h"
#include "game.h"
#include "mem.h"
/**
* Returns the string matched by the first subgroup pattern in a regular expression.
*
* This function is useful if the regular expression is used primarily to find
* one single pattern inside the given string.
* Please note that it's up to the programmer to specify a regular expression with
* at least one subgroup.
*
* @param pattern The regular expression to use.
* @param subject The string to be searched.
*
* @return The matched string.
*/
static char *__re_get_first_match(const char *pattern, const char *subject) {
int erroffset;
const char *error;
char *match;
int ovector[30];
int rc;
pcre *re;
re = pcre_compile(pattern, PCRE_NEWLINE_LF, &error, &erroffset, NULL);
if (re == NULL)
return NULL;
rc = pcre_exec(re, NULL, subject, strlen(subject), 0, 0, ovector, 30);
if (rc <= 0)
return NULL;
match = MEM_ALLOC_N(char, ovector[3] - ovector[2] + 1);
strncpy(match, subject + ovector[2], ovector[3] - ovector[2]);
pcre_free(re);
return match;
}
/**
* Parses the custom board file format.
*
* @param game Pointer to a Game structure.
* @param board The file to read.
*
* @retval 0 The file was parsed correctly.
* @retval 1 The file could not be parsed.
*/
static int __parse_custom_format(Game *game, FILE *board) {
char boardline_re[20];
char *endptr;
char header_line[16];
size_t i, j;
char *line;
char *s;
/* First line - "Rows:NNN" */
fgets(header_line, 16, board);
s = __re_get_first_match("^Rows: *(\\d{1,10})$", header_line);
if (!s) {
free(s);
return 1;
}
game->rows = (size_t) strtol(s, &endptr, 10);
if (*endptr != '\0') {
free(s);
return 1;
} else {
free(s);
}
/* Second line - "Cols:NNN" */
fgets(header_line, 16, board);
s = __re_get_first_match("^Cols: *(\\d{1,10})$", header_line);
if (!s) {
free(s);
return 1;
}
game->cols = (size_t) strtol(s, &endptr, 10);
if (*endptr != '\0') {
free(s);
return 1;
} else {
free(s);
}
/* Allocate memory for the board */
if (game->board)
free(game->board);
game->board = MEM_ALLOC_N(char, game->cols * game->rows);
/* Read game->rows lines describing the board */
sprintf(boardline_re, "^([#.]{%zu})$", game->cols);
line = MEM_ALLOC_N(char, game->cols + 2);
for (i = 0; i < game->rows; i++) {
fgets(line, game->cols + 2, board);
s = __re_get_first_match(boardline_re, line);
if (!s) {
free(line);
free(s);
return 1;
}
for (j = 0; j < game->cols; j++) {
if (s[j] == '#')
game_set_alive(game, i, j);
else
game_set_dead(game, i, j);
}
free(s);
}
free(line);
return 0;
}
/**
* Analyzes a particular part of the game board and update its state to the next generation.
*
* @param t Pointer to a GameInfo structure.
*/
void process_slice(GameInfo *tinfo) {
char live_count;
size_t row, col;
#pragma omp parallel for private(row, live_count) shared(tinfo) default(none)
for (col = 0; col < tinfo->game->cols; col++) {
for (row = 0; row < tinfo->game->rows; row++) {
live_count = 0;
/* Count the living neighbour cells */
if (game_is_alive(tinfo->game, row, col + 1)) live_count++;
if (game_is_alive(tinfo->game, row + 1, col)) live_count++;
if (game_is_alive(tinfo->game, row + 1, col + 1)) live_count++;
if (row > 0) {
if (game_is_alive(tinfo->game, row - 1, col)) live_count++;
if (game_is_alive(tinfo->game, row - 1, col + 1)) live_count++;
}
if (col > 0) {
if (game_is_alive(tinfo->game, row, col - 1)) live_count++;
if (game_is_alive(tinfo->game, row + 1, col - 1)) live_count++;
}
if ((row > 0) && (col > 0))
if (game_is_alive(tinfo->game, row - 1, col - 1)) live_count++;
/* Apply the game's rules to the current cell */
if ((live_count < 2) || (live_count > 3))
tinfo->new_board[row * tinfo->game->cols + col] = 0;
else if (live_count == 3)
tinfo->new_board[row * tinfo->game->cols + col] = 1;
else
tinfo->new_board[row * tinfo->game->cols + col] = tinfo->game->board[row * tinfo->game->cols + col];
}
}
}
void game_free(Game *game) {
if (game) {
if (game->board)
free(game->board);
free(game);
}
}
int game_is_alive(Game *game, size_t row, size_t col) {
assert(game);
assert(game->board);
if ((row >= game->rows) || (col >= game->cols))
return 0;
return game->board[row * game->cols + col] == 1;
}
int game_is_dead(Game *game, size_t row, size_t col) {
return !game_is_alive(game, row, col);
}
Game *game_new(void) {
Game *game = MEM_ALLOC(Game);
game->board = NULL;
game->cols = 0;
game->rows = 0;
return game;
}
int game_parse_board(Game *game, GameConfig *config) {
FILE *board;
int exit_code;
long input_file_pos;
assert(game);
assert(config);
assert(config->input_file);
board = config->input_file;
input_file_pos = ftell(board);
fseek(board, 0, SEEK_SET);
exit_code = __parse_custom_format(game, board);
fseek(board, input_file_pos, SEEK_SET);
return exit_code;
}
void game_print_board(Game *game) {
size_t col, row;
assert(game);
assert(game->board);
for (row = 0; row < game->rows; row++) {
for (col = 0; col < game->cols; col++) {
printf("%c", game_is_alive(game, row, col) ? '#' : '.');
}
printf("\n");
}
}
void game_set_alive(Game *game, size_t row, size_t col) {
assert(game);
assert(game->board);
assert(row < game->rows);
assert(col < game->cols);
game->board[row * game->cols + col] = 1;
}
void game_set_dead(Game *game, size_t row, size_t col) {
assert(game);
assert(game->board);
assert(row < game->rows);
assert(col < game->cols);
game->board[row * game->cols + col] = 0;
}
int game_tick(Game *game) {
char *new_board;
int retval = 0;
GameInfo *tinfo;
size_t tnum = 0;
new_board = MEM_ALLOC_N(char, game->rows * game->cols);
tinfo = MEM_ALLOC(GameInfo);
tinfo->game = game;
tinfo->new_board = new_board;
process_slice(&tinfo[tnum]);
/* Make game use the new board and drop the old one */
free(game->board);
game->board = new_board;
free(tinfo);
return retval;
}
|
direct_task.c | //===-- direct_task.c - Task example with code outlining shown ----*- C -*-===//
//
// Part of the LOMP Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
#include <stdint.h>
#include <stdio.h>
#include <unistd.h>
#include <string.h>
#include <omp.h>
#define NTASKS 16
/* function prototypes of the runtime */
typedef int32_t (*thunk_ptr_t)(int32_t, void *);
void * __kmpc_omp_task_alloc(void *, int32_t, void *, size_t, size_t,
thunk_ptr_t);
int32_t __kmpc_omp_task(void *, int32_t, void *);
void produce_original(double d) {
for (int i = 0; i < NTASKS; i++) {
// create a new task to for another thread to steal
printf("%d: creating task\n", omp_get_thread_num());
#pragma omp task firstprivate(i) firstprivate(d)
{
double answer = i * d;
printf("%d: Hello from task %d and the answer is %lf\n",
omp_get_thread_num(), i, answer);
}
// slow down a little in producing the tasks, so that a worker
// can steal the task from the queue
sleep(1);
}
}
int32_t __omp_produce_thunk_0(int32_t gtid, void * task) {
(void)gtid;
char * data = ((char **)task)[0];
int i = *((int *)data);
double d = *((double *)(data + 8));
double answer = i * d;
printf("%d: Hello from task %d and the answer is %lf\n", omp_get_thread_num(),
i, answer);
return 0;
}
void produce_transformed(double d) {
for (int i = 0; i < NTASKS; i++) {
// create a new task to for another thread to steal
printf("%d: creating task\n", omp_get_thread_num());
void * task = __kmpc_omp_task_alloc(NULL, 0, NULL, 40 + 16, 16,
__omp_produce_thunk_0);
char * data = ((char **)task)[0];
*((int *)data) = i;
*((double *)(data + 8)) = d;
__kmpc_omp_task(NULL, 0, task);
// slow down a little in producing the tasks, so that a worker
// can steal the task from the queue
sleep(1);
}
}
int32_t __omp_produce_thunk_0_memcpy(int32_t gtid, void * task) {
(void)gtid;
char * data = ((char **)task)[0];
int i;
double d;
memcpy(&i, data + 0, sizeof(int));
memcpy(&d, data + 8, sizeof(double));
double answer = i * d;
printf("%d: Hello from task %d and the answer is %lf\n", omp_get_thread_num(),
i, answer);
return 0;
}
void produce_transformed_memcpy(double d) {
for (int i = 0; i < NTASKS; i++) {
// create a new task to for another thread to steal
printf("%d: creating task\n", omp_get_thread_num());
void * task = __kmpc_omp_task_alloc(NULL, 0, NULL, 40 + 16, 16,
__omp_produce_thunk_0_memcpy);
char * data = ((char **)task)[0];
memcpy(data + 0, &i, sizeof(int));
memcpy(data + 8, &d, sizeof(double));
__kmpc_omp_task(NULL, 0, task);
// slow down a little in producing the tasks, so that a worker
// can steal the task from the queue
sleep(1);
}
}
void consume() {
// This function is only a placeholder to mark a worker thread
// as being a consumer. The actual task execution is done in
// the barrier of the parallel region.
}
int main(void) {
double d = 42.0;
#pragma omp parallel
{
if (omp_get_thread_num() == 0) {
produce_transformed_memcpy(d);
}
else {
consume();
}
}
return 0;
}
|
gm_bfs_template.h | #ifndef GM_BFS_TEMPLATE_H
#define GM_BFS_TEMPLATE_H
#include <omp.h>
#include <string.h>
#include <map>
#include <set>
#include "gm_graph.h"
#include "gm_atomic_wrapper.h"
#include "gm_bitmap.h"
// todo: consideration for non small-world graph
template<typename level_t, bool use_multithread, bool has_navigator, bool use_reverse_edge, bool save_child>
class gm_bfs_template
{
public:
gm_bfs_template(gm_graph& _G) :
G(_G) {
visited_bitmap = NULL; // bitmap
visited_level = NULL;
already_expanded = false;
//global_curr_level = NULL;
//global_next_level = NULL;
//global_queue = NULL;
thread_local_next_level = NULL;
down_edge_array = NULL;
down_edge_set = NULL;
if (save_child) {
down_edge_set = new std::set<edge_t>();
}
}
virtual ~gm_bfs_template() {
delete [] visited_bitmap;
delete [] visited_level;
//delete [] global_queue;
delete [] thread_local_next_level;
delete [] down_edge_set;
delete [] down_edge_array;
}
void prepare(node_t root_node, int max_num_thread) {
int num_thread;
if (use_multithread) {
num_thread = max_num_thread;
} else {
num_thread = 1;
}
is_finished = false;
curr_level = 0;
root = root_node;
state = ST_SMALL;
assert(root != gm_graph::NIL_NODE);
//global_queue = new node_t[G.num_nodes()];
//global_curr_level = global_queue;
//global_next_level = NULL;
// create local queues
thread_local_next_level = new std::vector<node_t>[num_thread];
for (int i = 0; i < num_thread; i++)
thread_local_next_level[i].reserve(G.num_nodes()/num_thread/2);
}
void do_bfs_forward() {
//---------------------------------
// prepare root node
//---------------------------------
curr_level = 0;
curr_count = 0;
next_count = 0;
small_visited[root] = curr_level;
curr_count++;
global_vector.push_back(root);
global_curr_level_begin = 0;
global_next_level_begin = curr_count;
level_count.push_back(curr_count);
level_queue_begin.push_back(global_curr_level_begin);
#define PROFILE_LEVEL_TIME 0
#if PROFILE_LEVEL_TIME
struct timeval T1, T2;
#endif
bool is_done = false;
while (!is_done) {
#if PROFILE_LEVEL_TIME
gettimeofday(&T1,NULL);
printf("state = %d, curr_count=%d, ", state, curr_count);
#endif
switch (state) {
case ST_SMALL: {
for (node_t i = 0; i < curr_count; i++) {
node_t t = global_vector[global_curr_level_begin + i];
iterate_neighbor_small(t);
visit_fw(t); // visit after iteration. in that way, one can check down-neighbors quite easily
}
break;
}
case ST_QUE: {
if (use_multithread) // do it in parallel
{
#pragma omp parallel
{
int tid = omp_get_thread_num();
#pragma omp for nowait schedule(dynamic,128)
for (node_t i = 0; i < curr_count; i++) {
//node_t t = global_curr_level[i];
node_t t = global_vector[global_curr_level_begin + i];
iterate_neighbor_que(t, tid);
visit_fw(t);
}
finish_thread_que(tid);
}
}
else { // do it in sequential
int tid = 0;
for (node_t i = 0; i < curr_count; i++) {
//node_t t = global_curr_level[i];
node_t t = global_vector[global_curr_level_begin + i];
iterate_neighbor_que(t, tid);
visit_fw(t);
}
finish_thread_que(tid);
}
break;
}
case ST_Q2R: {
if (use_multithread) { // do it in parallel
#pragma omp parallel
{
node_t local_cnt = 0;
#pragma omp for nowait schedule(dynamic,128)
for (node_t i = 0; i < curr_count; i++) {
//node_t t = global_curr_level[i];
node_t t = global_vector[global_curr_level_begin + i];
iterate_neighbor_rd(t, local_cnt);
visit_fw(t);
}
finish_thread_rd(local_cnt);
}
} else { // do it sequentially
node_t local_cnt = 0;
for (node_t i = 0; i < curr_count; i++) {
//node_t t = global_curr_level[i];
node_t t = global_vector[global_curr_level_begin + i];
iterate_neighbor_rd(t, local_cnt);
visit_fw(t);
}
finish_thread_rd(local_cnt);
}
break;
}
case ST_RD: {
if (use_multithread) { // do it in parallel
#pragma omp parallel
{
node_t local_cnt = 0;
#pragma omp for nowait schedule(dynamic,128)
for (node_t t = 0; t < G.num_nodes(); t++) {
if (visited_level[t] == curr_level) {
iterate_neighbor_rd(t, local_cnt);
visit_fw(t);
}
}
finish_thread_rd(local_cnt);
}
} else { // do it in sequential
node_t local_cnt = 0;
for (node_t t = 0; t < G.num_nodes(); t++) {
if (visited_level[t] == curr_level) {
iterate_neighbor_rd(t, local_cnt);
visit_fw(t);
}
}
finish_thread_rd(local_cnt);
}
break;
}
case ST_R2Q: {
if (use_multithread) { // do it in parallel
#pragma omp parallel
{
int tid = omp_get_thread_num();
#pragma omp for nowait schedule(dynamic,128)
for (node_t t = 0; t < G.num_nodes(); t++) {
if (visited_level[t] == curr_level) {
iterate_neighbor_que(t, tid);
visit_fw(t);
}
}
finish_thread_que(tid);
}
} else {
int tid = 0;
for (node_t t = 0; t < G.num_nodes(); t++) {
if (visited_level[t] == curr_level) {
iterate_neighbor_que(t, tid);
visit_fw(t);
}
}
finish_thread_que(tid);
}
break;
}
// reverse read
case ST_RRD: {
#pragma omp parallel if (use_multithread)
{
node_t local_cnt = 0;
#pragma omp for nowait schedule(dynamic,128)
for (node_t t = 0; t < G.num_nodes(); t++) {
if (visited_level[t] == curr_level) {
visit_fw(t);
}
else if (visited_level[t] == __INVALID_LEVEL) {
if (check_parent_rrd(t)) {
local_cnt ++;
_gm_set_bit(visited_bitmap, t);
visited_level[t] = (curr_level + 1);
}
}
}
finish_thread_rd(local_cnt);
}
break;
}
// reverse read2Q
case ST_RR2Q: {
#pragma omp parallel if (use_multithread)
{
int tid = omp_get_thread_num();
#pragma omp for nowait schedule(dynamic,128)
for (node_t t = 0; t < G.num_nodes(); t++) {
if (visited_level[t] == curr_level) {
visit_fw(t);
}
else if (visited_level[t] == __INVALID_LEVEL) {
if (check_parent_rrd(t)) {
_gm_set_bit(visited_bitmap, t);
visited_level[t] = (curr_level + 1);
thread_local_next_level[tid].push_back(t);
}
}
}
finish_thread_que(tid);
}
break;
}
} // end of switch
do_end_of_level_fw();
is_done = get_next_state();
#if PROFILE_LEVEL_TIME
gettimeofday(&T2, NULL);
printf("time = %6.5f ms\n", (T2.tv_sec - T1.tv_sec)*1000 + (T2.tv_usec - T1.tv_usec)*0.001);
#endif
} // end of while
}
void do_bfs_reverse() {
// This function should be called only after do_bfs_foward has finished.
// assumption: small-world graph
level_t& level = curr_level;
while (true) {
node_t count = level_count[level];
//node_t* queue_ptr = level_start_ptr[level];
node_t* queue_ptr;
node_t begin_idx = level_queue_begin[level];
if (begin_idx == -1) {
queue_ptr = NULL;
} else {
queue_ptr = & (global_vector[begin_idx]);
}
if (queue_ptr == NULL) {
#pragma omp parallel if (use_multithread)
{
#pragma omp for nowait schedule(dynamic,128)
for (node_t i = 0; i < G.num_nodes(); i++) {
if (visited_level[i] != curr_level) continue;
visit_rv(i);
}
}
} else {
#pragma omp parallel if (use_multithread)
{
#pragma omp for nowait
for (node_t i = 0; i < count; i++) {
node_t u = queue_ptr[i];
visit_rv(u);
}
}
}
do_end_of_level_rv();
if (level == 0) break;
level--;
}
}
bool is_down_edge(edge_t idx) {
if (state == ST_SMALL)
return (down_edge_set->find(idx) != down_edge_set->end());
else
return down_edge_array[idx];
}
protected:
virtual void visit_fw(node_t t)=0;
virtual void visit_rv(node_t t)=0;
virtual bool check_navigator(node_t t, edge_t nx)=0;
virtual void do_end_of_level_fw() {
}
virtual void do_end_of_level_rv() {
}
node_t get_root() {
return root;
}
level_t get_level(node_t t) {
// GCC expansion
if (__builtin_expect((state == ST_SMALL), 0)) {
if (small_visited.find(t) == small_visited.end())
return __INVALID_LEVEL;
else
return small_visited[t];
} else {
return visited_level[t];
}
}
level_t get_curr_level() {
return curr_level;
}
private:
bool get_next_state() {
const char* state_name[5] = {"SMALL","QUEUE","Q2R","RD","R2Q"};
if (next_count == 0) return true; // BFS is finished
int next_state = state;
const float RRD_THRESHOLD = 0.05;
switch (state) {
case ST_SMALL:
if (next_count >= THRESHOLD1) {
prepare_que();
next_state = ST_QUE;
}
break;
case ST_QUE:
if (already_expanded) break;
if (next_count >= G.num_nodes()*RRD_THRESHOLD)
{
already_expanded = true;
prepare_read();
if (!save_child && G.has_reverse_edge())
{
next_state = ST_RRD;
}
else
next_state = ST_RD;
break;
}
else if ((next_count >= THRESHOLD2) && (next_count >= curr_count*5)) {
already_expanded = true;
prepare_read();
next_state = ST_Q2R;
}
break;
case ST_Q2R:
if (!save_child && G.has_reverse_edge() && (next_count >= G.num_nodes()*RRD_THRESHOLD))
{
next_state = ST_RRD;
}
else
next_state = ST_RD;
break;
case ST_RD:
if (next_count >= G.num_nodes()*RRD_THRESHOLD) {
next_state = ST_RRD;
}
else if (next_count <= (2 * curr_count)) {
next_state = ST_R2Q;
}
break;
case ST_R2Q:
next_state = ST_QUE;
break;
case ST_RRD:
if (next_count <= (2 * curr_count)) {
next_state = ST_RR2Q;
}
break;
case ST_RR2Q:
next_state = ST_QUE;
break;
}
finish_level(state);
state = next_state;
return false;
}
void finish_level(int state) {
if ((state == ST_RD) || (state == ST_Q2R)) {
// output queue is not valid
} else { // move output queue
//node_t* temp = &(global_next_level[next_count]);
//global_curr_level = global_next_level;
//global_next_level = temp;
global_curr_level_begin = global_next_level_begin;
global_next_level_begin = global_next_level_begin + next_count;
}
curr_count = next_count;
next_count = 0;
curr_level++;
// save 'new current' level status
level_count.push_back(curr_count);
if ((state == ST_RD) || (state == ST_Q2R) || (state == ST_RRD)) {
//level_start_ptr.push_back(NULL);
level_queue_begin.push_back(-1);
} else {
//level_start_ptr.push_back(global_curr_level);
level_queue_begin.push_back(global_curr_level_begin);
}
}
void get_range(edge_t& begin, edge_t& end, node_t t) {
if (use_reverse_edge) {
begin = G.r_begin[t];
end = G.r_begin[t + 1];
} else {
begin = G.begin[t];
end = G.begin[t + 1];
}
}
void get_range_rev(edge_t& begin, edge_t& end, node_t t) {
if (use_reverse_edge) {
begin = G.begin[t];
end = G.begin[t + 1];
} else {
begin = G.r_begin[t];
end = G.r_begin[t + 1];
}
}
node_t get_node(edge_t& n) {
if (use_reverse_edge) {
return G.r_node_idx[n];
} else {
return G.node_idx[n];
}
}
node_t get_node_rev(edge_t& n) {
if (use_reverse_edge) {
return G.node_idx[n];
} else {
return G.r_node_idx[n];
}
}
void iterate_neighbor_small(node_t t) {
edge_t begin;
edge_t end;
get_range(begin, end, t);
for (edge_t nx = begin; nx < end; nx++) {
node_t u = get_node(nx);
// check visited
if (small_visited.find(u) == small_visited.end()) {
if (has_navigator) {
if (check_navigator(u, nx) == false) continue;
}
if (save_child) {
save_down_edge_small(nx);
}
small_visited[u] = curr_level + 1;
//global_next_level[next_count++] = u;
global_vector.push_back(u);
next_count++;
}
else if (save_child) {
if (has_navigator) {
if (check_navigator(u, nx) == false) continue;
}
if (small_visited[u] == (curr_level+1)){
save_down_edge_small(nx);
}
}
}
}
// should be used only when save_child is enabled
void save_down_edge_small(edge_t idx) {
down_edge_set->insert(idx);
}
void save_down_edge_large(edge_t idx) {
down_edge_array[idx] = 1;
}
void prepare_que() {
global_vector.reserve(G.num_nodes());
// create bitmap and edges
visited_bitmap = new unsigned char[(G.num_nodes() + 7) / 8];
visited_level = new level_t[G.num_nodes()];
if (save_child) {
down_edge_array = new unsigned char[G.num_edges()];
}
if (use_multithread) {
#pragma omp parallel
{
#pragma omp for nowait //schedule(dynamic,65536)
for (node_t i = 0; i < (G.num_nodes() + 7) / 8; i++)
visited_bitmap[i] = 0;
#pragma omp for nowait //schedule(dynamic,65536)
for (node_t i = 0; i < G.num_nodes(); i++)
visited_level[i] = __INVALID_LEVEL;
if (save_child) {
#pragma omp for nowait //schedule(dynamic,65536)
for (edge_t i = 0; i < G.num_edges(); i++)
down_edge_array[i] = 0;
}
}
} else {
for (node_t i = 0; i < (G.num_nodes() + 7) / 8; i++)
visited_bitmap[i] = 0;
for (node_t i = 0; i < G.num_nodes(); i++)
visited_level[i] = __INVALID_LEVEL;
if (save_child) {
for (edge_t i = 0; i < G.num_edges(); i++)
down_edge_array[i] = 0;
}
}
typename std::map<node_t, level_t>::iterator II;
for (II = small_visited.begin(); II != small_visited.end(); II++) {
node_t u = II->first;
level_t lev = II->second;
_gm_set_bit(visited_bitmap, u);
visited_level[u] = lev;
}
if (save_child) {
typename std::set<edge_t>::iterator J;
for (J = down_edge_set->begin(); J != down_edge_set->end(); J++) {
down_edge_array[*J] = 1;
}
}
}
void iterate_neighbor_que(node_t t, int tid) {
edge_t begin;
edge_t end;
get_range(begin, end, t);
for (edge_t nx = begin; nx < end; nx++) {
node_t u = get_node(nx);
// check visited bitmap
// test & test& set
if (_gm_get_bit(visited_bitmap, u) == 0) {
if (has_navigator) {
if (check_navigator(u, nx) == false) continue;
}
bool re_check_result;
if (use_multithread) {
re_check_result = _gm_set_bit_atomic(visited_bitmap, u);
} else {
re_check_result = true;
_gm_set_bit(visited_bitmap, u);
}
if (save_child) {
save_down_edge_large(nx);
}
if (re_check_result) {
// add to local q
thread_local_next_level[tid].push_back(u);
visited_level[u] = (curr_level + 1);
}
}
else if (save_child) {
if (has_navigator) {
if (check_navigator(u, nx) == false) continue;
}
if (visited_level[u] == (curr_level +1)) {
save_down_edge_large(nx);
}
}
}
}
void finish_thread_que(int tid) {
node_t local_cnt = thread_local_next_level[tid].size();
//copy curr_cnt to next_cnt
if (local_cnt > 0) {
node_t old_idx = _gm_atomic_fetch_and_add_node(&next_count, local_cnt);
// copy to global vector
memcpy(&(global_vector[global_next_level_begin + old_idx]),
&(thread_local_next_level[tid][0]),
local_cnt * sizeof(node_t));
}
thread_local_next_level[tid].clear();
}
void prepare_read() {
// nothing to do
}
void iterate_neighbor_rd(node_t t, node_t& local_cnt) {
edge_t begin;
edge_t end;
get_range(begin, end, t);
for (edge_t nx = begin; nx < end; nx++) {
node_t u = get_node(nx);
// check visited bitmap
// test & test& set
if (_gm_get_bit(visited_bitmap, u) == 0) {
if (has_navigator) {
if (check_navigator(u, nx) == false) continue;
}
bool re_check_result;
if (use_multithread) {
re_check_result = _gm_set_bit_atomic(visited_bitmap, u);
} else {
re_check_result = true;
_gm_set_bit(visited_bitmap, u);
}
if (save_child) {
save_down_edge_large(nx);
}
if (re_check_result) {
// add to local q
visited_level[u] = curr_level + 1;
local_cnt++;
}
}
else if (save_child) {
if (has_navigator) {
if (check_navigator(u, nx) == false) continue;
}
if (visited_level[u] == (curr_level +1)) {
save_down_edge_large(nx);
}
}
}
}
void finish_thread_rd(node_t local_cnt) {
_gm_atomic_fetch_and_add_node(&next_count, local_cnt);
}
// return true if t is next level
bool check_parent_rrd(node_t t)
{
bool ret = false;
edge_t begin;
edge_t end;
get_range_rev(begin, end, t);
if (has_navigator) {
// there is no 'EDGE' used in navigator
if (check_navigator(t, 0) == false) return false;
}
for (edge_t nx = begin; nx < end; nx++) {
node_t u = get_node_rev(nx);
//if (_gm_get_bit(visited_bitmap, u) == 0) continue;
if (visited_level[u] == (curr_level ))
return true;
}
return false;
}
//-----------------------------------------------------
//-----------------------------------------------------
static const int ST_SMALL = 0;
static const int ST_QUE = 1;
static const int ST_Q2R = 2;
static const int ST_RD = 3;
static const int ST_R2Q = 4;
static const int ST_RRD = 5;
static const int ST_RR2Q = 6;
static const int THRESHOLD1 = 128; // single threaded
static const int THRESHOLD2 = 1024; // move to RD-based
// not -1.
//(why? because curr_level-1 might be -1, when curr_level = 0)
static const level_t __INVALID_LEVEL = -2;
int state;
unsigned char* visited_bitmap; // bitmap
level_t* visited_level; // assumption: small_world graph
bool is_finished;
level_t curr_level;
node_t root;
gm_graph& G;
node_t curr_count;
node_t next_count;
std::map<node_t, level_t> small_visited;
std::set<edge_t>* down_edge_set;
unsigned char* down_edge_array;
//node_t* global_next_level;
//node_t* global_curr_level;
//node_t* global_queue;
std::vector<node_t> global_vector;
node_t global_curr_level_begin;
node_t global_next_level_begin;
//std::vector<node_t*> level_start_ptr;
std::vector<node_t> level_queue_begin;
std::vector<node_t> level_count;
std::vector<node_t>* thread_local_next_level;
bool already_expanded;
};
#endif
|
GB_unaryop__minv_uint8_int8.c | //------------------------------------------------------------------------------
// GB_unaryop: hard-coded functions for each built-in unary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2019, All Rights Reserved.
// http://suitesparse.com See GraphBLAS/Doc/License.txt for license.
//------------------------------------------------------------------------------
// If this file is in the Generated/ folder, do not edit it (auto-generated).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_control.h"
#include "GB_iterator.h"
#include "GB_unaryop__include.h"
// C=unop(A) is defined by the following types and operators:
// op(A) function: GB_unop__minv_uint8_int8
// op(A') function: GB_tran__minv_uint8_int8
// C type: uint8_t
// A type: int8_t
// cast: uint8_t cij = (uint8_t) aij
// unaryop: cij = GB_IMINV_UNSIGNED (aij, 8)
#define GB_ATYPE \
int8_t
#define GB_CTYPE \
uint8_t
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
int8_t aij = Ax [pA]
#define GB_CX(p) Cx [p]
// unary operator
#define GB_OP(z, x) \
z = GB_IMINV_UNSIGNED (x, 8) ;
// casting
#define GB_CASTING(z, x) \
uint8_t z = (uint8_t) x ;
// cij = op (cast (aij))
#define GB_CAST_OP(pC,pA) \
{ \
/* aij = Ax [pA] */ \
GB_GETA (aij, Ax, pA) ; \
/* Cx [pC] = op (cast (aij)) */ \
GB_CASTING (x, aij) ; \
GB_OP (GB_CX (pC), x) ; \
}
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_MINV || GxB_NO_UINT8 || GxB_NO_INT8)
//------------------------------------------------------------------------------
// Cx = op (cast (Ax)): apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_unop__minv_uint8_int8
(
uint8_t *restrict Cx,
const int8_t *restrict Ax,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (int64_t p = 0 ; p < anz ; p++)
{
GB_CAST_OP (p, p) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (cast (A')): transpose, typecast, and apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_tran__minv_uint8_int8
(
GrB_Matrix C,
const GrB_Matrix A,
int64_t *restrict *Rowcounts,
GBI_single_iterator Iter,
const int64_t *restrict A_slice,
int naslice
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#define GB_PHASE_2_OF_2
#include "GB_unaryop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
lfmm3d_fin.c | #include "stdlib.h"
#include "stdio.h"
#include "math.h"
#include "lfmm3d_c.h"
#include "complex.h"
#include "cprini.h"
#include <omp.h>
#include <string.h>
#include <sys/time.h>
double get_wtime_sec()
{
double sec;
struct timeval tv;
gettimeofday(&tv, NULL);
sec = tv.tv_sec + (double) tv.tv_usec / 1000000.0;
return sec;
}
void Laplace_3d_matvec_std(
const double *coord0, const int ld0, const int n0,
const double *coord1, const int ld1, const int n1,
const double *x_in, double *x_out
)
{
const double *x0 = coord0 + ld0 * 0;
const double *y0 = coord0 + ld0 * 1;
const double *z0 = coord0 + ld0 * 2;
const double *x1 = coord1 + ld1 * 0;
const double *y1 = coord1 + ld1 * 1;
const double *z1 = coord1 + ld1 * 2;
for (int i = 0; i < n0; i++)
{
const double x0_i = x0[i];
const double y0_i = y0[i];
const double z0_i = z0[i];
double sum = 0.0;
#pragma omp simd
for (int j = 0; j < n1; j++)
{
double dx = x0_i - x1[j];
double dy = y0_i - y1[j];
double dz = z0_i - z1[j];
double r2 = dx * dx + dy * dy + dz * dz;
sum += (r2 == 0.0) ? 0.0 : (x_in[j] / sqrt(r2));
}
x_out[i] += sum;
}
}
int main(int argc, char **argv)
{
cprin_init("stdout", "fort.13");
cprin_skipline(2);
if (argc < 3)
{
printf("Usage : %s num_point eps input_file (csv or bin, optional)\n", argv[0]);
return 0;
}
int npoint = atoi(argv[1]);
double eps = atof(argv[2]);
printf("Number of points = %d, eps = %e\n", npoint, eps);
double *coord = (double*) malloc(sizeof(double) * npoint * 3);
double *charge = (double*) malloc(sizeof(double) * npoint);
double *poten = (double*) malloc(sizeof(double) * npoint);
int need_gen = 1;
if (argc >= 4)
{
if (strstr(argv[3], ".csv") != NULL)
{
printf("Reading coordinates from CSV file...");
FILE *inf = fopen(argv[3], "r");
for (int i = 0; i < npoint; i++)
{
fscanf(inf, "%lf,%lf,%lf\n", &coord[3 * i], &coord[3 * i + 1], &coord[3 * i + 2]);
}
fclose(inf);
printf(" done.\n");
need_gen = 0;
}
if (strstr(argv[3], ".bin") != NULL)
{
printf("Reading coordinates from binary file...");
FILE *inf = fopen(argv[3], "rb");
fread(coord, sizeof(double), npoint * 3, inf);
fclose(inf);
printf(" done.\n");
need_gen = 0;
}
}
if (need_gen == 1)
{
printf("Binary/CSV coordinate file not provided. Generating random coordinates in unit box...");
for (int i = 0; i < 3 * npoint; i++) coord[i] = rand01();
printf(" done.\n");
}
for (int i = 0; i < npoint; i++) charge[i] = rand01() - 0.5;
printf("[INFO] lfmm3d_t_c_p: file coordinate input, source = target\n");
int ierr;
lfmm3d_t_c_p_(&eps, &npoint, coord, charge, &npoint, coord, poten, &ierr);
for (int k = 0; k < 5; k++)
{
lfmm3d_t_c_p_(&eps, &npoint, coord, charge, &npoint, coord, poten, &ierr);
printf("\n ===== Test %d done =====\n\n", k);
}
double *coord1 = (double*) malloc(sizeof(double) * npoint * 3);
double *poten1 = (double*) malloc(sizeof(double) * npoint);
for (int i = 0; i < 3; i++)
for (int j = 0; j < npoint; j++)
coord1[i * npoint + j] = coord[j * 3 + i];
memset(poten1, 0, sizeof(double) * npoint);
int nthread = omp_get_max_threads();
int ntrgchk = (npoint < 10000) ? npoint : 10000;
#pragma omp parallel
{
int tid = omp_get_thread_num();
int sidx = tid * ntrgchk / nthread;
int eidx = (tid + 1) * ntrgchk / nthread;
int ntrgt = eidx - sidx;
Laplace_3d_matvec_std(
coord1 + sidx, npoint, ntrgt,
coord1, npoint, npoint,
charge, poten1 + sidx
);
}
double std_l2 = 0, err_l2 = 0;
for (int i = 0; i < ntrgchk; i++)
{
double diff = poten1[i] - poten[i];
std_l2 += poten1[i] * poten1[i];
err_l2 += diff * diff;
}
std_l2 = sqrt(std_l2);
err_l2 = sqrt(err_l2);
printf("Relative L2 error = %e\n", err_l2 / std_l2);
free(coord1);
free(coord);
free(charge);
free(poten);
return 0;
}
|
GB_unaryop__ainv_int16_fp32.c | //------------------------------------------------------------------------------
// GB_unaryop: hard-coded functions for each built-in unary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2020, All Rights Reserved.
// http://suitesparse.com See GraphBLAS/Doc/License.txt for license.
//------------------------------------------------------------------------------
// If this file is in the Generated/ folder, do not edit it (auto-generated).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_control.h"
#include "GB_iterator.h"
#include "GB_unaryop__include.h"
// C=unop(A) is defined by the following types and operators:
// op(A) function: GB_unop__ainv_int16_fp32
// op(A') function: GB_tran__ainv_int16_fp32
// C type: int16_t
// A type: float
// cast: int16_t cij ; GB_CAST_SIGNED(cij,aij,16)
// unaryop: cij = -aij
#define GB_ATYPE \
float
#define GB_CTYPE \
int16_t
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
float aij = Ax [pA]
#define GB_CX(p) Cx [p]
// unary operator
#define GB_OP(z, x) \
z = -x ;
// casting
#define GB_CASTING(z, aij) \
int16_t z ; GB_CAST_SIGNED(z,aij,16) ;
// cij = op (cast (aij))
#define GB_CAST_OP(pC,pA) \
{ \
/* aij = Ax [pA] */ \
GB_GETA (aij, Ax, pA) ; \
/* Cx [pC] = op (cast (aij)) */ \
GB_CASTING (z, aij) ; \
GB_OP (GB_CX (pC), z) ; \
}
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_AINV || GxB_NO_INT16 || GxB_NO_FP32)
//------------------------------------------------------------------------------
// Cx = op (cast (Ax)): apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_unop__ainv_int16_fp32
(
int16_t *Cx, // Cx and Ax may be aliased
float *Ax,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
GB_CAST_OP (p, p) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (cast (A')): transpose, typecast, and apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_tran__ainv_int16_fp32
(
GrB_Matrix C,
const GrB_Matrix A,
int64_t *GB_RESTRICT *Rowcounts,
GBI_single_iterator Iter,
const int64_t *GB_RESTRICT A_slice,
int naslice
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#define GB_PHASE_2_OF_2
#include "GB_unaryop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
norm2.c | /*
The MIT License (MIT)
Copyright (c) 2017 Tim Warburton, Noel Chalmers, Jesse Chan, Ali Karakus
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
*/
extern "C" void FUNC(norm2)(const dlong & Nblocks, const dlong & N,
const dfloat * __restrict__ cpu_a,
dfloat * __restrict__ normA){
dfloat wa2 = 0;
#ifdef __NEKRS__OMP__
#pragma omp parallel for reduction(+:wa2)
#endif
for(int i=0;i<N;++i){
const dfloat ai = cpu_a[i];
wa2 += ai*ai;
}
normA[0] = wa2;
}
|
GB_binop__bshift_int8.c | //------------------------------------------------------------------------------
// GB_binop: hard-coded functions for each built-in binary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// If this file is in the Generated2/ folder, do not edit it
// (it is auto-generated from Generator/*).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_emult.h"
#include "GB_control.h"
#include "GB_ek_slice.h"
#include "GB_dense.h"
#include "GB_atomics.h"
#include "GB_bitmap_assign_methods.h"
#include "GB_binop__include.h"
// C=binop(A,B) is defined by the following types and operators:
// A+B function (eWiseAdd): GB (_AaddB__bshift_int8)
// A.*B function (eWiseMult): GB (_AemultB_01__bshift_int8)
// A.*B function (eWiseMult): GB (_AemultB_02__bshift_int8)
// A.*B function (eWiseMult): GB (_AemultB_03__bshift_int8)
// A.*B function (eWiseMult): GB (_AemultB_bitmap__bshift_int8)
// A*D function (colscale): GB ((none))
// D*A function (rowscale): GB ((none))
// C+=B function (dense accum): GB (_Cdense_accumB__bshift_int8)
// C+=b function (dense accum): GB (_Cdense_accumb__bshift_int8)
// C+=A+B function (dense ewise3): GB ((none))
// C=A+B function (dense ewise3): GB (_Cdense_ewise3_noaccum__bshift_int8)
// C=scalar+B GB (_bind1st__bshift_int8)
// C=scalar+B' GB (_bind1st_tran__bshift_int8)
// C=A+scalar GB (_bind2nd__bshift_int8)
// C=A'+scalar GB (_bind2nd_tran__bshift_int8)
// C type: int8_t
// A type: int8_t
// B,b type: int8_t
// BinaryOp: cij = GB_bitshift_int8 (aij, bij)
#define GB_ATYPE \
int8_t
#define GB_BTYPE \
int8_t
#define GB_CTYPE \
int8_t
// true if the types of A and B are identical
#define GB_ATYPE_IS_BTYPE \
1
// true if the types of C and A are identical
#define GB_CTYPE_IS_ATYPE \
1
// true if the types of C and B are identical
#define GB_CTYPE_IS_BTYPE \
1
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA,A_iso) \
int8_t aij = GBX (Ax, pA, A_iso)
// bij = Bx [pB]
#define GB_GETB(bij,Bx,pB,B_iso) \
int8_t bij = GBX (Bx, pB, B_iso)
// declare scalar of the same type as C
#define GB_CTYPE_SCALAR(t) \
int8_t t
// cij = Ax [pA]
#define GB_COPY_A_TO_C(cij,Ax,pA,A_iso) \
cij = GBX (Ax, pA, A_iso)
// cij = Bx [pB]
#define GB_COPY_B_TO_C(cij,Bx,pB,B_iso) \
cij = GBX (Bx, pB, B_iso)
#define GB_CX(p) Cx [p]
// binary operator
#define GB_BINOP(z,x,y,i,j) \
z = GB_bitshift_int8 (x, y) ;
// true if the binop must be flipped
#define GB_BINOP_FLIP \
1
// op is second
#define GB_OP_IS_SECOND \
0
// do the numerical phases of GB_add and GB_emult
#define GB_PHASE_2_OF_2
// hard-coded loops can be vectorized
#define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_BSHIFT || GxB_NO_INT8 || GxB_NO_BSHIFT_INT8)
//------------------------------------------------------------------------------
// C += A+B, all 3 matrices dense
//------------------------------------------------------------------------------
#if 0
// The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV.
void GB ((none))
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#include "GB_dense_ewise3_accum_template.c"
}
#endif
//------------------------------------------------------------------------------
// C = A+B, all 3 matrices dense
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_ewise3_noaccum__bshift_int8)
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_dense_ewise3_noaccum_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += B, accumulate a sparse matrix into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumB__bshift_int8)
(
GrB_Matrix C,
const GrB_Matrix B,
const int64_t *B_ek_slicing, const int B_ntasks, const int B_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
#include "GB_dense_subassign_23_template.c"
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += b, accumulate a scalar into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumb__bshift_int8)
(
GrB_Matrix C,
const GB_void *p_bwork,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
// get the scalar b for C += b, of type int8_t
int8_t bwork = (*((int8_t *) p_bwork)) ;
#include "GB_dense_subassign_22_template.c"
return (GrB_SUCCESS) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = A*D, column scale with diagonal D matrix
//------------------------------------------------------------------------------
#if 0
GrB_Info GB ((none))
(
GrB_Matrix C,
const GrB_Matrix A, bool A_is_pattern,
const GrB_Matrix D, bool D_is_pattern,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int8_t *restrict Cx = (int8_t *) C->x ;
#include "GB_AxB_colscale_template.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
//------------------------------------------------------------------------------
// C = D*B, row scale with diagonal D matrix
//------------------------------------------------------------------------------
#if 0
GrB_Info GB ((none))
(
GrB_Matrix C,
const GrB_Matrix D, bool D_is_pattern,
const GrB_Matrix B, bool B_is_pattern,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int8_t *restrict Cx = (int8_t *) C->x ;
#include "GB_AxB_rowscale_template.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
//------------------------------------------------------------------------------
// eWiseAdd: C = A+B or C<M> = A+B
//------------------------------------------------------------------------------
GrB_Info GB (_AaddB__bshift_int8)
(
GrB_Matrix C,
const int C_sparsity,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool Ch_is_Mh,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
GB_WERK_DECLARE (M_ek_slicing, int64_t) ;
GB_WERK_DECLARE (A_ek_slicing, int64_t) ;
GB_WERK_DECLARE (B_ek_slicing, int64_t) ;
#include "GB_add_template.c"
GB_FREE_WORK ;
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C = A.*B or C<M> = A.*B
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_01__bshift_int8)
(
GrB_Matrix C,
const int C_sparsity,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_01_meta.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C<#> = A.*B when A is sparse/hyper and B is bitmap/full
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_02__bshift_int8)
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool flipxy,
const int64_t *restrict Cp_kfirst,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#if GB_BINOP_FLIP
// The operator is not commutative, and does not have a flipped
// variant. For example z=atan2(y,x).
if (flipxy)
{
// use fmult(y,x)
#undef GB_FLIPPED
#define GB_FLIPPED 1
#include "GB_emult_02_template.c"
}
else
{
// use fmult(x,y)
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
}
#else
// No need to handle the flip: the operator is either commutative, or
// has been handled by changing z=div(y,x) to z=rdiv(x,y) for example.
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
#endif
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C<M> = A.*B, M sparse/hyper, A and B bitmap/full
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_03__bshift_int8)
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict Cp_kfirst,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_03_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C=A.*B, C<M>=A.*B, C<!M>=A.*B where C is bitmap
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_bitmap__bshift_int8)
(
GrB_Matrix C,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_bitmap_emult_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st
//------------------------------------------------------------------------------
GrB_Info GB (_bind1st__bshift_int8)
(
GB_void *Cx_output, // Cx and Bx may be aliased
const GB_void *x_input,
const GB_void *Bx_input,
const int8_t *restrict Bb,
int64_t bnz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int8_t *Cx = (int8_t *) Cx_output ;
int8_t x = (*((int8_t *) x_input)) ;
int8_t *Bx = (int8_t *) Bx_input ;
int64_t p ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < bnz ; p++)
{
if (!GBB (Bb, p)) continue ;
int8_t bij = GBX (Bx, p, false) ;
Cx [p] = GB_bitshift_int8 (x, bij) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd
//------------------------------------------------------------------------------
GrB_Info GB (_bind2nd__bshift_int8)
(
GB_void *Cx_output, // Cx and Ax may be aliased
const GB_void *Ax_input,
const GB_void *y_input,
const int8_t *restrict Ab,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
int8_t *Cx = (int8_t *) Cx_output ;
int8_t *Ax = (int8_t *) Ax_input ;
int8_t y = (*((int8_t *) y_input)) ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!GBB (Ab, p)) continue ;
int8_t aij = GBX (Ax, p, false) ;
Cx [p] = GB_bitshift_int8 (aij, y) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (x, A'): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (x, aij), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
int8_t aij = GBX (Ax, pA, false) ; \
Cx [pC] = GB_bitshift_int8 (x, aij) ; \
}
GrB_Info GB (_bind1st_tran__bshift_int8)
(
GrB_Matrix C,
const GB_void *x_input,
const GrB_Matrix A,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
// GB_unop_transpose.c uses GB_ATYPE, but A is
// the 2nd input to binary operator z=f(x,y).
#undef GB_ATYPE
#define GB_ATYPE \
int8_t
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int8_t x = (*((const int8_t *) x_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
#undef GB_ATYPE
#define GB_ATYPE \
int8_t
}
//------------------------------------------------------------------------------
// C = op (A', y): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (aij, y), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
int8_t aij = GBX (Ax, pA, false) ; \
Cx [pC] = GB_bitshift_int8 (aij, y) ; \
}
GrB_Info GB (_bind2nd_tran__bshift_int8)
(
GrB_Matrix C,
const GrB_Matrix A,
const GB_void *y_input,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int8_t y = (*((const int8_t *) y_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
diagmm_x_dia_u_row.c | #include "alphasparse/kernel.h"
#include "alphasparse/util.h"
#include "alphasparse/opt.h"
#ifdef _OPENMP
#include <omp.h>
#endif
alphasparse_status_t ONAME(const ALPHA_Number alpha, const ALPHA_SPMAT_DIA *mat, const ALPHA_Number *x, const ALPHA_INT columns, const ALPHA_INT ldx, const ALPHA_Number beta, ALPHA_Number *y, const ALPHA_INT ldy)
{
ALPHA_INT num_threads = alpha_get_thread_num();
#ifdef _OPENMP
#pragma omp parallel for num_threads(num_threads)
#endif
for (ALPHA_INT r = 0; r < mat->rows; ++r)
{
for (ALPHA_INT c = 0; c < columns; ++c)
{
alpha_mul(y[index2(r,c,ldy)],y[index2(r,c,ldy)],beta);
alpha_madde(y[index2(r,c,ldy)],x[index2(r,c,ldx)],alpha);
}
}
return ALPHA_SPARSE_STATUS_SUCCESS;
}
|
cones.c | #include "cones.h"
#define CONE_RATE (2)
#define CONE_TOL (1e-8)
#define EXP_CONE_MAX_ITERS (100)
#ifdef LAPACK_LIB_FOUND
/* underscore for blas / lapack, single or double precision */
#ifdef NOBLASUNDERSCORE
#ifndef FLOAT
#define BLAS(x) d ## x
#else
#define BLAS(x) s ## x
#endif
#else
#ifndef FLOAT
#define BLAS(x) d ## x ## _
#else
#define BLAS(x) s ## x ## _
#endif
#endif
#ifdef MATLAB_MEX_FILE
typedef ptrdiff_t blasint;
#elif defined BLAS64
#include <stdint.h>
typedef int64_t blasint;
#else
typedef int blasint;
#endif
void BLAS(syevr)(char* jobz, char* range, char* uplo, blasint* n, scs_float* a, blasint* lda, scs_float* vl,
scs_float* vu, blasint* il, blasint* iu, scs_float* abstol, blasint* m, scs_float* w, scs_float* z, blasint* ldz,
blasint* isuppz, scs_float* work, blasint* lwork, blasint* iwork, blasint* liwork, blasint* info);
void BLAS(syr)(const char *uplo, const blasint *n, const scs_float *alpha, const scs_float *x, const blasint *incx,
scs_float *a, const blasint *lda);
void BLAS(axpy)(const blasint *n, const scs_float *alpha, const scs_float *dx, const blasint *incx, scs_float *dy,
const blasint *incy);
/* private data to help cone projection step */
static struct ConeData_t {
/* workspace for eigenvector decompositions: */
scs_float * Xs, *Z, *e, *work;
blasint *iwork, lwork, liwork;
}c;
#endif
static timer coneTimer;
static scs_float totalConeTime;
/*
* boundaries will contain array of indices of rows of A corresponding to
* cone boundaries, boundaries[0] is starting index for cones of size strictly larger than 1
* returns length of boundaries array, boundaries malloc-ed here so should be freed
*/
scs_int getConeBoundaries(Cone * k, scs_int ** boundaries) {
scs_int i, count = 0;
scs_int len = 1 + k->qsize + k->ssize + k->ed + k->ep;
scs_int * b = scs_malloc(sizeof(scs_int) * len);
b[count] = k->f + k->l;
count += 1;
if (k->qsize > 0) {
memcpy(&b[count], k->q, k->qsize * sizeof(scs_int));
}
count += k->qsize;
for (i = 0; i < k->ssize; ++i) {
b[count + i] = k->s[i] * k->s[i];
}
count += k->ssize;
for (i = 0; i < k->ep + k->ed; ++i) {
b[count + i] = 3;
}
count += k->ep + k->ed;
*boundaries = b;
return len;
}
scs_int getFullConeDims(Cone * k) {
scs_int i, c = 0;
if (k->f)
c += k->f;
if (k->l)
c += k->l;
if (k->qsize && k->q) {
for (i = 0; i < k->qsize; ++i) {
c += k->q[i];
}
}
if (k->ssize && k->s) {
for (i = 0; i < k->ssize; ++i) {
c += k->s[i] * k->s[i];
}
}
if (k->ed)
c += 3 * k->ed;
if (k->ep)
c += 3 * k->ep;
return c;
}
scs_int validateCones(Data * d, Cone * k) {
scs_int i;
if (k->f && k->f < 0) {
scs_printf("free cone error\n");
return -1;
}
if (k->l && k->l < 0) {
scs_printf("lp cone error\n");
return -1;
}
if (k->qsize && k->q) {
for (i = 0; i < k->qsize; ++i) {
if (k->q[i] < 0) {
scs_printf("soc cone error\n");
return -1;
}
}
}
if (k->ssize && k->s) {
for (i = 0; i < k->ssize; ++i) {
if (k->s[i] < 0) {
scs_printf("sd cone error\n");
return -1;
}
}
}
if (k->ed && k->ed < 0) {
scs_printf("ep cone error\n");
return -1;
}
if (k->ep && k->ep < 0) {
scs_printf("ed cone error\n");
return -1;
}
if (getFullConeDims(k) != d->m) {
scs_printf("cone dimensions %i not equal to num rows in A = m = %i\n", (int) getFullConeDims(k), (int) d->m);
return -1;
}
return 0;
}
char * getConeSummary(Info * info) {
char * str = scs_malloc(sizeof(char) * 64);
sprintf(str, "\tCones: avg projection time: %1.2es\n", totalConeTime / (info->iter + 1) / 1e3);
totalConeTime = 0.0;
return str;
}
void finishCone() {
#ifdef LAPACK_LIB_FOUND
if (c.Xs)
scs_free(c.Xs);
if (c.Z)
scs_free(c.Z);
if (c.e)
scs_free(c.e);
if (c.work)
scs_free(c.work);
if (c.iwork)
scs_free(c.iwork);
#endif
}
char * getConeHeader(Cone * k) {
char * tmp = scs_malloc(sizeof(char) * 512);
scs_int i, socVars, socBlks, sdVars, sdBlks, expPvars, expDvars;
sprintf(tmp, "Cones:");
if (k->f) {
sprintf(tmp + strlen(tmp), "\tprimal zero / dual free vars: %i\n", (int) k->f);
}
if (k->l) {
sprintf(tmp + strlen(tmp), "\tlinear vars: %i\n", (int) k->l);
}
socVars = 0;
socBlks = 0;
if (k->qsize && k->q) {
socBlks = k->qsize;
for (i = 0; i < k->qsize; i++) {
socVars += k->q[i];
}
sprintf(tmp + strlen(tmp), "\tsoc vars: %i, soc blks: %i\n", (int) socVars, (int) socBlks);
}
sdVars = 0;
sdBlks = 0;
if (k->ssize && k->s) {
sdBlks = k->ssize;
for (i = 0; i < k->ssize; i++) {
sdVars += k->s[i] * k->s[i];
}
sprintf(tmp + strlen(tmp), "\tsd vars: %i, sd blks: %i\n", (int) sdVars, (int) sdBlks);
}
if (k->ep || k->ed) {
expPvars = k->ep ? 3 * k->ep : 0;
expDvars = k->ed ? 3 * k->ed : 0;
sprintf(tmp + strlen(tmp), "\texp vars: %i, dual exp vars: %i\n", (int) expPvars, (int) expDvars);
}
return tmp;
}
scs_int isSimpleSemiDefiniteCone(scs_int * s, scs_int ssize) {
scs_int i;
for (i = 0; i < ssize; i++) {
if (s[i] >= 3) {
return 0; /* false */
}
}
return 1; /* true */
}
scs_float expNewtonOneD(scs_float rho, scs_float y_hat, scs_float z_hat) {
scs_float t = MAX(-z_hat, 1e-6);
scs_float f, fp;
scs_int i;
for (i = 0; i < EXP_CONE_MAX_ITERS; ++i) {
f = t * (t + z_hat) / rho / rho - y_hat / rho + log(t / rho) + 1;
fp = (2 * t + z_hat) / rho / rho + 1 / t;
t = t - f / fp;
if (t <= -z_hat) {
return 0;
} else if (t <= 0) {
return z_hat;
} else if ( ABS(f) < CONE_TOL) {
break;
}
}
return t + z_hat;
}
void expSolveForXWithRho(scs_float * v, scs_float * x, scs_float rho) {
x[2] = expNewtonOneD(rho, v[1], v[2]);
x[1] = (x[2] - v[2]) * x[2] / rho;
x[0] = v[0] - rho;
}
scs_float expCalcGrad(scs_float * v, scs_float * x, scs_float rho) {
expSolveForXWithRho(v, x, rho);
if (x[1] <= 1e-12) {
return x[0];
} else {
return x[0] + x[1] * log(x[1] / x[2]);
}
}
void expGetRhoUb(scs_float * v, scs_float * x, scs_float * ub, scs_float * lb) {
*lb = 0;
*ub = 0.125;
while (expCalcGrad(v, x, *ub) > 0) {
*lb = *ub;
(*ub) *= 2;
}
}
/* project onto the exponential cone, v has dimension *exactly* 3 */
static scs_int projExpCone(scs_float * v, scs_int iter) {
scs_int i;
scs_float ub, lb, rho, g, x[3];
scs_float r = v[0], s = v[1], t = v[2];
scs_float tol = CONE_TOL; /* iter < 0 ? CONE_TOL : MAX(CONE_TOL, 1 / POWF((iter + 1), CONE_RATE)); */
/* v in cl(Kexp) */
if ((s * exp(r / s) <= t && s > 0) || (r <= 0 && s == 0 && t >= 0)) {
return 0;
}
/* -v in Kexp^* */
if ((-r < 0 && r * exp(s / r) <= -exp(1) * t) || (-r == 0 && -s >= 0 && -t >= 0)) {
memset(v, 0, 3 * sizeof(scs_float));
return 0;
}
/* special case with analytical solution */
if (r < 0 && s < 0) {
v[1] = 0.0;
v[2] = MAX(v[2], 0);
return 0;
}
/* iterative procedure to find projection, bisects on dual variable: */
expGetRhoUb(v, x, &ub, &lb); /* get starting upper and lower bounds */
for (i = 0; i < EXP_CONE_MAX_ITERS; ++i) {
rho = (ub + lb) / 2; /* halfway between upper and lower bounds */
g = expCalcGrad(v, x, rho); /* calculates gradient wrt dual var */
if (g > 0) {
lb = rho;
} else {
ub = rho;
}
if (ub - lb < tol) {
break;
}
}
/*
#ifdef EXTRAVERBOSE
scs_printf("exponential cone proj iters %i\n", i);
#endif
*/
v[0] = x[0];
v[1] = x[1];
v[2] = x[2];
return 0;
}
scs_int initCone(Cone * k) {
#ifdef LAPACK_LIB_FOUND
scs_int i;
blasint nMax = 0;
scs_float eigTol = 1e-8;
blasint negOne = -1;
blasint m = 0;
blasint info;
scs_float wkopt;
c.Xs = NULL;
c.Z = NULL;
c.e = NULL;
c.work = NULL;
c.iwork = NULL;
#endif
totalConeTime = 0.0;
#ifdef EXTRAVERBOSE
scs_printf("initCone\n");
#ifdef MATLAB_MEX_FILE
mexEvalString("drawnow;");
#endif
#endif
if (k->ssize && k->s) {
if (isSimpleSemiDefiniteCone(k->s, k->ssize)) {
return 0;
}
#ifdef LAPACK_LIB_FOUND
/* eigenvector decomp workspace */
for (i = 0; i < k->ssize; ++i) {
if (k->s[i] > nMax) {
nMax = (blasint) k->s[i];
}
}
c.Xs = scs_calloc(nMax * nMax, sizeof(scs_float));
c.Z = scs_calloc(nMax * nMax, sizeof(scs_float));
c.e = scs_calloc(nMax, sizeof(scs_float));
BLAS(syevr)("Vectors", "All", "Upper", &nMax, c.Xs, &nMax, NULL, NULL, NULL, NULL,
&eigTol, &m, c.e, c.Z, &nMax, NULL, &wkopt, &negOne, &(c.liwork), &negOne, &info);
if (info != 0) {
scs_printf("FATAL: syevr failure, info = %i\n", (int) info);
return -1;
}
c.lwork = (blasint) (wkopt + 0.01); /* 0.01 for int casting safety */
c.work = scs_malloc(c.lwork * sizeof(scs_float));
c.iwork = scs_malloc(c.liwork * sizeof(blasint));
if (!c.Xs || !c.Z || !c.e || !c.work || !c.iwork) {
return -1;
}
#else
scs_printf("FATAL: Cannot solve SDPs with > 2x2 matrices without linked blas+lapack libraries\n");
scs_printf("Edit scs.mk to point to blas+lapack libray locations\n");
return -1;
#endif
}
#ifdef EXTRAVERBOSE
scs_printf("initCone complete\n");
#ifdef MATLAB_MEX_FILE
mexEvalString("drawnow;");
#endif
#endif
return 0;
}
scs_int project2By2Sdc(scs_float *X) {
scs_float a, b, d, l1, l2, x1, x2, rad;
a = X[0];
b = 0.5 * (X[1] + X[2]);
d = X[3];
if (ABS(b) < 1e-6) { /* diagonal matrix */
X[0] = MAX(a, 0);
X[1] = 0;
X[2] = 0;
X[3] = MAX(d, 0);
return 0;
}
rad = SQRTF((a - d) * (a - d) + 4 * b * b);
/* l1 >= l2 always, since rad >= 0 */
l1 = 0.5 * (a + d + rad);
l2 = 0.5 * (a + d - rad);
#ifdef EXTRAVERBOSE
scs_printf("2x2 SD: a = %4f, b = %4f, (X[1] = %4f, X[2] = %4f), d = %4f, rad = %4f, l1 = %4f, l2 = %4f\n", a, b, X[1], X[2], d, rad, l1, l2);
#endif
if (l2 >= 0) { /* both positive, just symmetrize */
X[1] = b;
X[2] = b;
return 0;
}
if (l1 <= 0) { /* both negative, set to 0 */
X[0] = 0;
X[1] = 0;
X[2] = 0;
X[3] = 0;
return 0;
}
/* l1 pos, l2 neg */
x1 = 1 / SQRTF(1 + (l1 - a) * (l1 - a) / b / b);
x2 = x1 * (l1 - a) / b;
X[0] = l1 * x1 * x1;
X[1] = l1 * x1 * x2;
X[2] = X[1];
X[3] = l1 * x2 * x2;
return 0;
}
static scs_int projSemiDefiniteCone(scs_float *X, scs_int n, scs_int iter) {
/* project onto the positive semi-definite cone */
#ifdef LAPACK_LIB_FOUND
scs_int i, j;
blasint one = 1;
blasint m = 0;
blasint nb = (blasint) n;
scs_float * Xs = c.Xs;
scs_float * Z = c.Z;
scs_float * e = c.e;
scs_float * work = c.work;
blasint * iwork = c.iwork;
blasint lwork = c.lwork;
blasint liwork = c.liwork;
scs_float eigTol = CONE_TOL; /* iter < 0 ? CONE_TOL : MAX(CONE_TOL, 1 / POWF(iter + 1, CONE_RATE)); */
scs_float onef = 1.0;
scs_float zero = 0.0;
blasint info;
scs_float vupper;
#endif
if (n == 0) {
return 0;
}
if (n == 1) {
if (X[0] < 0.0) {
X[0] = 0.0;
}
return 0;
}
if (n == 2) {
return project2By2Sdc(X);
}
#ifdef LAPACK_LIB_FOUND
memcpy(Xs, X, n * n * sizeof(scs_float));
/* Xs = X + X', save div by 2 for eigen-recomp */
for (i = 0; i < n; ++i) {
BLAS(axpy)(&nb, &onef, &(X[i]), &nb, &(Xs[i * n]), &one);
}
vupper = MAX(calcNorm(Xs, n * n), 0.001);
/* Solve eigenproblem, reuse workspaces */
BLAS(syevr)("Vectors", "VInterval", "Upper", &nb, Xs, &nb, &zero, &vupper,
NULL, NULL, &eigTol, &m, e, Z, &nb, NULL, work, &lwork, iwork, &liwork, &info);
if (info != 0) {
#ifdef EXTRAVERBOSE
scs_printf("WARN: LAPACK syevr error, info = %i\n", info);
scs_printf("syevr input parameter dump:\n");
scs_printf("nb = %li\n", (long) nb);
scs_printf("lwork = %li\n", (long) lwork);
scs_printf("liwork = %li\n", (long) liwork);
scs_printf("vupper = %f\n", vupper);
scs_printf("eigTol = %e\n", eigTol);
printArray(Xs, n * n, "Xs");
printArray(X, n * n, "X");
printArray(e, m, "e");
printArray(Z, m * n, "Z");
#endif
if (info < 0) return -1;
}
memset(X, 0, n * n * sizeof(scs_float));
for (i = 0; i < m; ++i) {
scs_float a = e[i] / 2;
BLAS(syr)("Lower", &nb, &a, &(Z[i * n]), &one, X, &nb);
}
/* fill in upper half */
for (i = 0; i < n; ++i) {
for (j = i + 1; j < n; ++j) {
X[i + j * n] = X[j + i * n];
}
}
#else
scs_printf("FAILURE: solving SDP with > 2x2 matrices, but no blas/lapack libraries were linked!\n");
scs_printf("scs will return nonsense!\n");
scaleArray(X, NAN, n);
return -1;
#endif
return 0;
}
/* outward facing cone projection routine, iter is outer algorithm iteration, if iter < 0 then iter is ignored
warm_start contains guess of projection (can be set to NULL) */
scs_int projDualCone(scs_float *x, Cone * k, const scs_float * warm_start, scs_int iter) {
scs_int i;
scs_int count = (k->f ? k->f : 0);
#ifdef EXTRAVERBOSE
timer projTimer;
tic(&projTimer);
#endif
tic(&coneTimer);
if (k->l) {
/* project onto positive orthant */
for (i = count; i < count + k->l; ++i) {
if (x[i] < 0.0)
x[i] = 0.0;
/*x[i] = (x[i] < 0.0) ? 0.0 : x[i]; */
}
count += k->l;
#ifdef EXTRAVERBOSE
scs_printf("pos orthant proj time: %1.2es\n", tocq(&projTimer) / 1e3);
tic(&projTimer);
#endif
}
if (k->qsize && k->q) {
/* project onto SOC */
for (i = 0; i < k->qsize; ++i) {
if (k->q[i] == 0) {
continue;
}
if (k->q[i] == 1) {
if (x[count] < 0.0)
x[count] = 0.0;
} else {
scs_float v1 = x[count];
scs_float s = calcNorm(&(x[count + 1]), k->q[i] - 1);
scs_float alpha = (s + v1) / 2.0;
if (s <= v1) { /* do nothing */
} else if (s <= -v1) {
memset(&(x[count]), 0, k->q[i] * sizeof(scs_float));
} else {
x[count] = alpha;
scaleArray(&(x[count + 1]), alpha / s, k->q[i] - 1);
}
}
count += k->q[i];
}
#ifdef EXTRAVERBOSE
scs_printf("SOC proj time: %1.2es\n", tocq(&projTimer) / 1e3);
tic(&projTimer);
#endif
}
if (k->ssize && k->s) {
/* project onto PSD cone */
for (i = 0; i < k->ssize; ++i) {
if (k->s[i] == 0) {
continue;
}
if (projSemiDefiniteCone(&(x[count]), k->s[i], iter) < 0) return -1;
count += (k->s[i]) * (k->s[i]);
}
#ifdef EXTRAVERBOSE
scs_printf("SD proj time: %1.2es\n", tocq(&projTimer) / 1e3);
tic(&projTimer);
#endif
}
if (k->ep) {
scs_float r, s, t;
scs_int idx;
/*
* exponential cone is not self dual, if s \in K
* then y \in K^* and so if K is the primal cone
* here we project onto K^*, via Moreau
* \Pi_C^*(y) = y + \Pi_C(-y)
*/
scaleArray(&(x[count]), -1, 3 * k->ep); /* x = -x; */
#ifdef OPENMP
#pragma omp parallel for private(r,s,t,idx)
#endif
for (i = 0; i < k->ep; ++i) {
idx = count + 3 * i;
r = x[idx];
s = x[idx + 1];
t = x[idx + 2];
projExpCone(&(x[idx]), iter);
x[idx] -= r;
x[idx + 1] -= s;
x[idx + 2] -= t;
}
count += 3 * k->ep;
#ifdef EXTRAVERBOSE
scs_printf("EP proj time: %1.2es\n", tocq(&projTimer) / 1e3);
tic(&projTimer);
#endif
}
if (k->ed) {
/* exponential cone: */
#ifdef OPENMP
#pragma omp parallel for
#endif
for (i = 0; i < k->ed; ++i) {
projExpCone(&(x[count + 3 * i]), iter);
}
count += 3 * k->ed;
#ifdef EXTRAVERBOSE
scs_printf("ED proj time: %1.2es\n", tocq(&projTimer) / 1e3);
tic(&projTimer);
#endif
}
/* project onto OTHER cones */
totalConeTime += tocq(&coneTimer);
return 0;
}
|
effect.c | /*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% EEEEE FFFFF FFFFF EEEEE CCCC TTTTT %
% E F F E C T %
% EEE FFF FFF EEE C T %
% E F F E C T %
% EEEEE F F EEEEE CCCC T %
% %
% %
% MagickCore Image Effects Methods %
% %
% Software Design %
% Cristy %
% October 1996 %
% %
% %
% Copyright 1999-2016 ImageMagick Studio LLC, a non-profit organization %
% dedicated to making software imaging solutions freely available. %
% %
% You may not use this file except in compliance with the License. You may %
% obtain a copy of the License at %
% %
% http://www.imagemagick.org/script/license.php %
% %
% Unless required by applicable law or agreed to in writing, software %
% distributed under the License is distributed on an "AS IS" BASIS, %
% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. %
% See the License for the specific language governing permissions and %
% limitations under the License. %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
%
%
*/
/*
Include declarations.
*/
#include "MagickCore/studio.h"
#include "MagickCore/accelerate-private.h"
#include "MagickCore/blob.h"
#include "MagickCore/cache-view.h"
#include "MagickCore/color.h"
#include "MagickCore/color-private.h"
#include "MagickCore/colorspace.h"
#include "MagickCore/constitute.h"
#include "MagickCore/decorate.h"
#include "MagickCore/distort.h"
#include "MagickCore/draw.h"
#include "MagickCore/enhance.h"
#include "MagickCore/exception.h"
#include "MagickCore/exception-private.h"
#include "MagickCore/effect.h"
#include "MagickCore/fx.h"
#include "MagickCore/gem.h"
#include "MagickCore/gem-private.h"
#include "MagickCore/geometry.h"
#include "MagickCore/image-private.h"
#include "MagickCore/list.h"
#include "MagickCore/log.h"
#include "MagickCore/matrix.h"
#include "MagickCore/memory_.h"
#include "MagickCore/memory-private.h"
#include "MagickCore/monitor.h"
#include "MagickCore/monitor-private.h"
#include "MagickCore/montage.h"
#include "MagickCore/morphology.h"
#include "MagickCore/morphology-private.h"
#include "MagickCore/paint.h"
#include "MagickCore/pixel-accessor.h"
#include "MagickCore/pixel-private.h"
#include "MagickCore/property.h"
#include "MagickCore/quantize.h"
#include "MagickCore/quantum.h"
#include "MagickCore/quantum-private.h"
#include "MagickCore/random_.h"
#include "MagickCore/random-private.h"
#include "MagickCore/resample.h"
#include "MagickCore/resample-private.h"
#include "MagickCore/resize.h"
#include "MagickCore/resource_.h"
#include "MagickCore/segment.h"
#include "MagickCore/shear.h"
#include "MagickCore/signature-private.h"
#include "MagickCore/statistic.h"
#include "MagickCore/string_.h"
#include "MagickCore/thread-private.h"
#include "MagickCore/transform.h"
#include "MagickCore/threshold.h"
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% A d a p t i v e B l u r I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% AdaptiveBlurImage() adaptively blurs the image by blurring less
% intensely near image edges and more intensely far from edges. We blur the
% image with a Gaussian operator of the given radius and standard deviation
% (sigma). For reasonable results, radius should be larger than sigma. Use a
% radius of 0 and AdaptiveBlurImage() selects a suitable radius for you.
%
% The format of the AdaptiveBlurImage method is:
%
% Image *AdaptiveBlurImage(const Image *image,const double radius,
% const double sigma,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o radius: the radius of the Gaussian, in pixels, not counting the center
% pixel.
%
% o sigma: the standard deviation of the Laplacian, in pixels.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport Image *AdaptiveBlurImage(const Image *image,const double radius,
const double sigma,ExceptionInfo *exception)
{
#define AdaptiveBlurImageTag "Convolve/Image"
#define MagickSigma (fabs(sigma) < MagickEpsilon ? MagickEpsilon : sigma)
CacheView
*blur_view,
*edge_view,
*image_view;
double
normalize,
**kernel;
Image
*blur_image,
*edge_image,
*gaussian_image;
MagickBooleanType
status;
MagickOffsetType
progress;
register ssize_t
i;
size_t
width;
ssize_t
j,
k,
u,
v,
y;
assert(image != (const Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickCoreSignature);
blur_image=CloneImage(image,image->columns,image->rows,MagickTrue,exception);
if (blur_image == (Image *) NULL)
return((Image *) NULL);
if (fabs(sigma) < MagickEpsilon)
return(blur_image);
if (SetImageStorageClass(blur_image,DirectClass,exception) == MagickFalse)
{
blur_image=DestroyImage(blur_image);
return((Image *) NULL);
}
/*
Edge detect the image brightness channel, level, blur, and level again.
*/
edge_image=EdgeImage(image,radius,exception);
if (edge_image == (Image *) NULL)
{
blur_image=DestroyImage(blur_image);
return((Image *) NULL);
}
(void) AutoLevelImage(edge_image,exception);
gaussian_image=BlurImage(edge_image,radius,sigma,exception);
if (gaussian_image != (Image *) NULL)
{
edge_image=DestroyImage(edge_image);
edge_image=gaussian_image;
}
(void) AutoLevelImage(edge_image,exception);
/*
Create a set of kernels from maximum (radius,sigma) to minimum.
*/
width=GetOptimalKernelWidth2D(radius,sigma);
kernel=(double **) MagickAssumeAligned(AcquireAlignedMemory((size_t) width,
sizeof(*kernel)));
if (kernel == (double **) NULL)
{
edge_image=DestroyImage(edge_image);
blur_image=DestroyImage(blur_image);
ThrowImageException(ResourceLimitError,"MemoryAllocationFailed");
}
(void) ResetMagickMemory(kernel,0,(size_t) width*sizeof(*kernel));
for (i=0; i < (ssize_t) width; i+=2)
{
kernel[i]=(double *) MagickAssumeAligned(AcquireAlignedMemory(
(size_t) (width-i),(width-i)*sizeof(**kernel)));
if (kernel[i] == (double *) NULL)
break;
normalize=0.0;
j=(ssize_t) (width-i-1)/2;
k=0;
for (v=(-j); v <= j; v++)
{
for (u=(-j); u <= j; u++)
{
kernel[i][k]=(double) (exp(-((double) u*u+v*v)/(2.0*MagickSigma*
MagickSigma))/(2.0*MagickPI*MagickSigma*MagickSigma));
normalize+=kernel[i][k];
k++;
}
}
kernel[i][(k-1)/2]+=(double) (1.0-normalize);
if (sigma < MagickEpsilon)
kernel[i][(k-1)/2]=1.0;
}
if (i < (ssize_t) width)
{
for (i-=2; i >= 0; i-=2)
kernel[i]=(double *) RelinquishAlignedMemory(kernel[i]);
kernel=(double **) RelinquishAlignedMemory(kernel);
edge_image=DestroyImage(edge_image);
blur_image=DestroyImage(blur_image);
ThrowImageException(ResourceLimitError,"MemoryAllocationFailed");
}
/*
Adaptively blur image.
*/
status=MagickTrue;
progress=0;
image_view=AcquireVirtualCacheView(image,exception);
edge_view=AcquireVirtualCacheView(edge_image,exception);
blur_view=AcquireAuthenticCacheView(blur_image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static,4) shared(progress,status) \
magick_threads(image,blur_image,blur_image->rows,1)
#endif
for (y=0; y < (ssize_t) blur_image->rows; y++)
{
register const Quantum
*magick_restrict r;
register Quantum
*magick_restrict q;
register ssize_t
x;
if (status == MagickFalse)
continue;
r=GetCacheViewVirtualPixels(edge_view,0,y,edge_image->columns,1,exception);
q=QueueCacheViewAuthenticPixels(blur_view,0,y,blur_image->columns,1,
exception);
if ((r == (const Quantum *) NULL) || (q == (Quantum *) NULL))
{
status=MagickFalse;
continue;
}
for (x=0; x < (ssize_t) blur_image->columns; x++)
{
register const Quantum
*magick_restrict p;
register ssize_t
i;
ssize_t
center,
j;
j=(ssize_t) ceil((double) width*(1.0-QuantumScale*
GetPixelIntensity(edge_image,r))-0.5);
if (j < 0)
j=0;
else
if (j > (ssize_t) width)
j=(ssize_t) width;
if ((j & 0x01) != 0)
j--;
p=GetCacheViewVirtualPixels(image_view,x-((ssize_t) (width-j)/2L),y-
(ssize_t) ((width-j)/2L),width-j,width-j,exception);
if (p == (const Quantum *) NULL)
break;
center=(ssize_t) GetPixelChannels(image)*(width-j)*((width-j)/2L)+
GetPixelChannels(image)*((width-j)/2);
for (i=0; i < (ssize_t) GetPixelChannels(blur_image); i++)
{
double
alpha,
gamma,
pixel;
PixelChannel
channel;
PixelTrait
blur_traits,
traits;
register const double
*magick_restrict k;
register const Quantum
*magick_restrict pixels;
register ssize_t
u;
ssize_t
v;
channel=GetPixelChannelChannel(image,i);
traits=GetPixelChannelTraits(image,channel);
blur_traits=GetPixelChannelTraits(blur_image,channel);
if ((traits == UndefinedPixelTrait) ||
(blur_traits == UndefinedPixelTrait))
continue;
if (((blur_traits & CopyPixelTrait) != 0) ||
(GetPixelReadMask(image,p+center) == 0))
{
SetPixelChannel(blur_image,channel,p[center+i],q);
continue;
}
k=kernel[j];
pixels=p;
pixel=0.0;
gamma=0.0;
if ((blur_traits & BlendPixelTrait) == 0)
{
/*
No alpha blending.
*/
for (v=0; v < (ssize_t) (width-j); v++)
{
for (u=0; u < (ssize_t) (width-j); u++)
{
pixel+=(*k)*pixels[i];
gamma+=(*k);
k++;
pixels+=GetPixelChannels(image);
}
}
gamma=PerceptibleReciprocal(gamma);
SetPixelChannel(blur_image,channel,ClampToQuantum(gamma*pixel),q);
continue;
}
/*
Alpha blending.
*/
for (v=0; v < (ssize_t) (width-j); v++)
{
for (u=0; u < (ssize_t) (width-j); u++)
{
alpha=(double) (QuantumScale*GetPixelAlpha(image,pixels));
pixel+=(*k)*alpha*pixels[i];
gamma+=(*k)*alpha;
k++;
pixels+=GetPixelChannels(image);
}
}
gamma=PerceptibleReciprocal(gamma);
SetPixelChannel(blur_image,channel,ClampToQuantum(gamma*pixel),q);
}
q+=GetPixelChannels(blur_image);
r+=GetPixelChannels(edge_image);
}
if (SyncCacheViewAuthenticPixels(blur_view,exception) == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp critical (MagickCore_AdaptiveBlurImage)
#endif
proceed=SetImageProgress(image,AdaptiveBlurImageTag,progress++,
image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
blur_image->type=image->type;
blur_view=DestroyCacheView(blur_view);
edge_view=DestroyCacheView(edge_view);
image_view=DestroyCacheView(image_view);
edge_image=DestroyImage(edge_image);
for (i=0; i < (ssize_t) width; i+=2)
kernel[i]=(double *) RelinquishAlignedMemory(kernel[i]);
kernel=(double **) RelinquishAlignedMemory(kernel);
if (status == MagickFalse)
blur_image=DestroyImage(blur_image);
return(blur_image);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% A d a p t i v e S h a r p e n I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% AdaptiveSharpenImage() adaptively sharpens the image by sharpening more
% intensely near image edges and less intensely far from edges. We sharpen the
% image with a Gaussian operator of the given radius and standard deviation
% (sigma). For reasonable results, radius should be larger than sigma. Use a
% radius of 0 and AdaptiveSharpenImage() selects a suitable radius for you.
%
% The format of the AdaptiveSharpenImage method is:
%
% Image *AdaptiveSharpenImage(const Image *image,const double radius,
% const double sigma,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o radius: the radius of the Gaussian, in pixels, not counting the center
% pixel.
%
% o sigma: the standard deviation of the Laplacian, in pixels.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport Image *AdaptiveSharpenImage(const Image *image,const double radius,
const double sigma,ExceptionInfo *exception)
{
#define AdaptiveSharpenImageTag "Convolve/Image"
#define MagickSigma (fabs(sigma) < MagickEpsilon ? MagickEpsilon : sigma)
CacheView
*sharp_view,
*edge_view,
*image_view;
double
normalize,
**kernel;
Image
*sharp_image,
*edge_image,
*gaussian_image;
MagickBooleanType
status;
MagickOffsetType
progress;
register ssize_t
i;
size_t
width;
ssize_t
j,
k,
u,
v,
y;
assert(image != (const Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickCoreSignature);
sharp_image=CloneImage(image,image->columns,image->rows,MagickTrue,exception);
if (sharp_image == (Image *) NULL)
return((Image *) NULL);
if (fabs(sigma) < MagickEpsilon)
return(sharp_image);
if (SetImageStorageClass(sharp_image,DirectClass,exception) == MagickFalse)
{
sharp_image=DestroyImage(sharp_image);
return((Image *) NULL);
}
/*
Edge detect the image brightness channel, level, sharp, and level again.
*/
edge_image=EdgeImage(image,radius,exception);
if (edge_image == (Image *) NULL)
{
sharp_image=DestroyImage(sharp_image);
return((Image *) NULL);
}
(void) AutoLevelImage(edge_image,exception);
gaussian_image=BlurImage(edge_image,radius,sigma,exception);
if (gaussian_image != (Image *) NULL)
{
edge_image=DestroyImage(edge_image);
edge_image=gaussian_image;
}
(void) AutoLevelImage(edge_image,exception);
/*
Create a set of kernels from maximum (radius,sigma) to minimum.
*/
width=GetOptimalKernelWidth2D(radius,sigma);
kernel=(double **) MagickAssumeAligned(AcquireAlignedMemory((size_t)
width,sizeof(*kernel)));
if (kernel == (double **) NULL)
{
edge_image=DestroyImage(edge_image);
sharp_image=DestroyImage(sharp_image);
ThrowImageException(ResourceLimitError,"MemoryAllocationFailed");
}
(void) ResetMagickMemory(kernel,0,(size_t) width*sizeof(*kernel));
for (i=0; i < (ssize_t) width; i+=2)
{
kernel[i]=(double *) MagickAssumeAligned(AcquireAlignedMemory((size_t)
(width-i),(width-i)*sizeof(**kernel)));
if (kernel[i] == (double *) NULL)
break;
normalize=0.0;
j=(ssize_t) (width-i-1)/2;
k=0;
for (v=(-j); v <= j; v++)
{
for (u=(-j); u <= j; u++)
{
kernel[i][k]=(double) (-exp(-((double) u*u+v*v)/(2.0*MagickSigma*
MagickSigma))/(2.0*MagickPI*MagickSigma*MagickSigma));
normalize+=kernel[i][k];
k++;
}
}
kernel[i][(k-1)/2]=(double) ((-2.0)*normalize);
if (sigma < MagickEpsilon)
kernel[i][(k-1)/2]=1.0;
}
if (i < (ssize_t) width)
{
for (i-=2; i >= 0; i-=2)
kernel[i]=(double *) RelinquishAlignedMemory(kernel[i]);
kernel=(double **) RelinquishAlignedMemory(kernel);
edge_image=DestroyImage(edge_image);
sharp_image=DestroyImage(sharp_image);
ThrowImageException(ResourceLimitError,"MemoryAllocationFailed");
}
/*
Adaptively sharpen image.
*/
status=MagickTrue;
progress=0;
image_view=AcquireVirtualCacheView(image,exception);
edge_view=AcquireVirtualCacheView(edge_image,exception);
sharp_view=AcquireAuthenticCacheView(sharp_image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static,4) shared(progress,status) \
magick_threads(image,sharp_image,sharp_image->rows,1)
#endif
for (y=0; y < (ssize_t) sharp_image->rows; y++)
{
register const Quantum
*magick_restrict r;
register Quantum
*magick_restrict q;
register ssize_t
x;
if (status == MagickFalse)
continue;
r=GetCacheViewVirtualPixels(edge_view,0,y,edge_image->columns,1,exception);
q=QueueCacheViewAuthenticPixels(sharp_view,0,y,sharp_image->columns,1,
exception);
if ((r == (const Quantum *) NULL) || (q == (Quantum *) NULL))
{
status=MagickFalse;
continue;
}
for (x=0; x < (ssize_t) sharp_image->columns; x++)
{
register const Quantum
*magick_restrict p;
register ssize_t
i;
ssize_t
center,
j;
j=(ssize_t) ceil((double) width*(1.0-QuantumScale*
GetPixelIntensity(edge_image,r))-0.5);
if (j < 0)
j=0;
else
if (j > (ssize_t) width)
j=(ssize_t) width;
if ((j & 0x01) != 0)
j--;
p=GetCacheViewVirtualPixels(image_view,x-((ssize_t) (width-j)/2L),y-
(ssize_t) ((width-j)/2L),width-j,width-j,exception);
if (p == (const Quantum *) NULL)
break;
center=(ssize_t) GetPixelChannels(image)*(width-j)*((width-j)/2L)+
GetPixelChannels(image)*((width-j)/2);
for (i=0; i < (ssize_t) GetPixelChannels(sharp_image); i++)
{
double
alpha,
gamma,
pixel;
PixelChannel
channel;
PixelTrait
sharp_traits,
traits;
register const double
*magick_restrict k;
register const Quantum
*magick_restrict pixels;
register ssize_t
u;
ssize_t
v;
channel=GetPixelChannelChannel(image,i);
traits=GetPixelChannelTraits(image,channel);
sharp_traits=GetPixelChannelTraits(sharp_image,channel);
if ((traits == UndefinedPixelTrait) ||
(sharp_traits == UndefinedPixelTrait))
continue;
if (((sharp_traits & CopyPixelTrait) != 0) ||
(GetPixelReadMask(image,p+center) == 0))
{
SetPixelChannel(sharp_image,channel,p[center+i],q);
continue;
}
k=kernel[j];
pixels=p;
pixel=0.0;
gamma=0.0;
if ((sharp_traits & BlendPixelTrait) == 0)
{
/*
No alpha blending.
*/
for (v=0; v < (ssize_t) (width-j); v++)
{
for (u=0; u < (ssize_t) (width-j); u++)
{
pixel+=(*k)*pixels[i];
gamma+=(*k);
k++;
pixels+=GetPixelChannels(image);
}
}
gamma=PerceptibleReciprocal(gamma);
SetPixelChannel(sharp_image,channel,ClampToQuantum(gamma*pixel),q);
continue;
}
/*
Alpha blending.
*/
for (v=0; v < (ssize_t) (width-j); v++)
{
for (u=0; u < (ssize_t) (width-j); u++)
{
alpha=(double) (QuantumScale*GetPixelAlpha(image,pixels));
pixel+=(*k)*alpha*pixels[i];
gamma+=(*k)*alpha;
k++;
pixels+=GetPixelChannels(image);
}
}
gamma=PerceptibleReciprocal(gamma);
SetPixelChannel(sharp_image,channel,ClampToQuantum(gamma*pixel),q);
}
q+=GetPixelChannels(sharp_image);
r+=GetPixelChannels(edge_image);
}
if (SyncCacheViewAuthenticPixels(sharp_view,exception) == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp critical (MagickCore_AdaptiveSharpenImage)
#endif
proceed=SetImageProgress(image,AdaptiveSharpenImageTag,progress++,
image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
sharp_image->type=image->type;
sharp_view=DestroyCacheView(sharp_view);
edge_view=DestroyCacheView(edge_view);
image_view=DestroyCacheView(image_view);
edge_image=DestroyImage(edge_image);
for (i=0; i < (ssize_t) width; i+=2)
kernel[i]=(double *) RelinquishAlignedMemory(kernel[i]);
kernel=(double **) RelinquishAlignedMemory(kernel);
if (status == MagickFalse)
sharp_image=DestroyImage(sharp_image);
return(sharp_image);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% B l u r I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% BlurImage() blurs an image. We convolve the image with a Gaussian operator
% of the given radius and standard deviation (sigma). For reasonable results,
% the radius should be larger than sigma. Use a radius of 0 and BlurImage()
% selects a suitable radius for you.
%
% The format of the BlurImage method is:
%
% Image *BlurImage(const Image *image,const double radius,
% const double sigma,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o radius: the radius of the Gaussian, in pixels, not counting the center
% pixel.
%
% o sigma: the standard deviation of the Gaussian, in pixels.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport Image *BlurImage(const Image *image,const double radius,
const double sigma,ExceptionInfo *exception)
{
char
geometry[MagickPathExtent];
KernelInfo
*kernel_info;
Image
*blur_image;
assert(image != (const Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickCoreSignature);
#if defined(MAGICKCORE_OPENCL_SUPPORT)
blur_image=AccelerateBlurImage(image,radius,sigma,exception);
if (blur_image != (Image *) NULL)
return(blur_image);
#endif
(void) FormatLocaleString(geometry,MagickPathExtent,
"blur:%.20gx%.20g;blur:%.20gx%.20g+90",radius,sigma,radius,sigma);
kernel_info=AcquireKernelInfo(geometry,exception);
if (kernel_info == (KernelInfo *) NULL)
ThrowImageException(ResourceLimitError,"MemoryAllocationFailed");
blur_image=ConvolveImage(image,kernel_info,exception);
kernel_info=DestroyKernelInfo(kernel_info);
return(blur_image);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% C o n v o l v e I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% ConvolveImage() applies a custom convolution kernel to the image.
%
% The format of the ConvolveImage method is:
%
% Image *ConvolveImage(const Image *image,const KernelInfo *kernel,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o kernel: the filtering kernel.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport Image *ConvolveImage(const Image *image,
const KernelInfo *kernel_info,ExceptionInfo *exception)
{
Image
*convolve_image;
#if defined(MAGICKCORE_OPENCL_SUPPORT)
convolve_image=AccelerateConvolveImage(image,kernel_info,exception);
if (convolve_image != (Image *) NULL)
return(convolve_image);
#endif
convolve_image=MorphologyImage(image,ConvolveMorphology,1,kernel_info,
exception);
return(convolve_image);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% D e s p e c k l e I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% DespeckleImage() reduces the speckle noise in an image while perserving the
% edges of the original image. A speckle removing filter uses a complementary
% hulling technique (raising pixels that are darker than their surrounding
% neighbors, then complementarily lowering pixels that are brighter than their
% surrounding neighbors) to reduce the speckle index of that image (reference
% Crimmins speckle removal).
%
% The format of the DespeckleImage method is:
%
% Image *DespeckleImage(const Image *image,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o exception: return any errors or warnings in this structure.
%
*/
static void Hull(const Image *image,const ssize_t x_offset,
const ssize_t y_offset,const size_t columns,const size_t rows,
const int polarity,Quantum *magick_restrict f,Quantum *magick_restrict g)
{
register Quantum
*p,
*q,
*r,
*s;
ssize_t
y;
assert(image != (const Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(f != (Quantum *) NULL);
assert(g != (Quantum *) NULL);
p=f+(columns+2);
q=g+(columns+2);
r=p+(y_offset*(columns+2)+x_offset);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static,4) \
magick_threads(image,image,1,1)
#endif
for (y=0; y < (ssize_t) rows; y++)
{
MagickRealType
v;
register ssize_t
i,
x;
i=(2*y+1)+y*columns;
if (polarity > 0)
for (x=0; x < (ssize_t) columns; x++)
{
v=(MagickRealType) p[i];
if ((MagickRealType) r[i] >= (v+ScaleCharToQuantum(2)))
v+=ScaleCharToQuantum(1);
q[i]=(Quantum) v;
i++;
}
else
for (x=0; x < (ssize_t) columns; x++)
{
v=(MagickRealType) p[i];
if ((MagickRealType) r[i] <= (v-ScaleCharToQuantum(2)))
v-=ScaleCharToQuantum(1);
q[i]=(Quantum) v;
i++;
}
}
p=f+(columns+2);
q=g+(columns+2);
r=q+(y_offset*(columns+2)+x_offset);
s=q-(y_offset*(columns+2)+x_offset);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static,4) \
magick_threads(image,image,1,1)
#endif
for (y=0; y < (ssize_t) rows; y++)
{
register ssize_t
i,
x;
MagickRealType
v;
i=(2*y+1)+y*columns;
if (polarity > 0)
for (x=0; x < (ssize_t) columns; x++)
{
v=(MagickRealType) q[i];
if (((MagickRealType) s[i] >= (v+ScaleCharToQuantum(2))) &&
((MagickRealType) r[i] > v))
v+=ScaleCharToQuantum(1);
p[i]=(Quantum) v;
i++;
}
else
for (x=0; x < (ssize_t) columns; x++)
{
v=(MagickRealType) q[i];
if (((MagickRealType) s[i] <= (v-ScaleCharToQuantum(2))) &&
((MagickRealType) r[i] < v))
v-=ScaleCharToQuantum(1);
p[i]=(Quantum) v;
i++;
}
}
}
MagickExport Image *DespeckleImage(const Image *image,ExceptionInfo *exception)
{
#define DespeckleImageTag "Despeckle/Image"
CacheView
*despeckle_view,
*image_view;
Image
*despeckle_image;
MagickBooleanType
status;
MemoryInfo
*buffer_info,
*pixel_info;
Quantum
*magick_restrict buffer,
*magick_restrict pixels;
register ssize_t
i;
size_t
length;
static const ssize_t
X[4] = {0, 1, 1,-1},
Y[4] = {1, 0, 1, 1};
/*
Allocate despeckled image.
*/
assert(image != (const Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickCoreSignature);
#if defined(MAGICKCORE_OPENCL_SUPPORT)
despeckle_image=AccelerateDespeckleImage(image,exception);
if (despeckle_image != (Image *) NULL)
return(despeckle_image);
#endif
despeckle_image=CloneImage(image,0,0,MagickTrue,exception);
if (despeckle_image == (Image *) NULL)
return((Image *) NULL);
status=SetImageStorageClass(despeckle_image,DirectClass,exception);
if (status == MagickFalse)
{
despeckle_image=DestroyImage(despeckle_image);
return((Image *) NULL);
}
/*
Allocate image buffer.
*/
length=(size_t) ((image->columns+2)*(image->rows+2));
pixel_info=AcquireVirtualMemory(length,sizeof(*pixels));
buffer_info=AcquireVirtualMemory(length,sizeof(*buffer));
if ((pixel_info == (MemoryInfo *) NULL) ||
(buffer_info == (MemoryInfo *) NULL))
{
if (buffer_info != (MemoryInfo *) NULL)
buffer_info=RelinquishVirtualMemory(buffer_info);
if (pixel_info != (MemoryInfo *) NULL)
pixel_info=RelinquishVirtualMemory(pixel_info);
despeckle_image=DestroyImage(despeckle_image);
ThrowImageException(ResourceLimitError,"MemoryAllocationFailed");
}
pixels=(Quantum *) GetVirtualMemoryBlob(pixel_info);
buffer=(Quantum *) GetVirtualMemoryBlob(buffer_info);
/*
Reduce speckle in the image.
*/
status=MagickTrue;
image_view=AcquireVirtualCacheView(image,exception);
despeckle_view=AcquireAuthenticCacheView(despeckle_image,exception);
for (i=0; i < (ssize_t) GetPixelChannels(image); i++)
{
PixelChannel
channel;
PixelTrait
despeckle_traits,
traits;
register ssize_t
k,
x;
ssize_t
j,
y;
if (status == MagickFalse)
continue;
channel=GetPixelChannelChannel(image,i);
traits=GetPixelChannelTraits(image,channel);
despeckle_traits=GetPixelChannelTraits(despeckle_image,channel);
if ((traits == UndefinedPixelTrait) ||
(despeckle_traits == UndefinedPixelTrait))
continue;
if ((despeckle_traits & CopyPixelTrait) != 0)
continue;
(void) ResetMagickMemory(pixels,0,length*sizeof(*pixels));
j=(ssize_t) image->columns+2;
for (y=0; y < (ssize_t) image->rows; y++)
{
register const Quantum
*magick_restrict p;
p=GetCacheViewVirtualPixels(image_view,0,y,image->columns,1,exception);
if (p == (const Quantum *) NULL)
{
status=MagickFalse;
continue;
}
j++;
for (x=0; x < (ssize_t) image->columns; x++)
{
pixels[j++]=p[i];
p+=GetPixelChannels(image);
}
j++;
}
(void) ResetMagickMemory(buffer,0,length*sizeof(*buffer));
for (k=0; k < 4; k++)
{
Hull(image,X[k],Y[k],image->columns,image->rows,1,pixels,buffer);
Hull(image,-X[k],-Y[k],image->columns,image->rows,1,pixels,buffer);
Hull(image,-X[k],-Y[k],image->columns,image->rows,-1,pixels,buffer);
Hull(image,X[k],Y[k],image->columns,image->rows,-1,pixels,buffer);
}
j=(ssize_t) image->columns+2;
for (y=0; y < (ssize_t) image->rows; y++)
{
MagickBooleanType
sync;
register Quantum
*magick_restrict q;
q=GetCacheViewAuthenticPixels(despeckle_view,0,y,despeckle_image->columns,
1,exception);
if (q == (Quantum *) NULL)
{
status=MagickFalse;
continue;
}
j++;
for (x=0; x < (ssize_t) image->columns; x++)
{
SetPixelChannel(despeckle_image,channel,pixels[j++],q);
q+=GetPixelChannels(despeckle_image);
}
sync=SyncCacheViewAuthenticPixels(despeckle_view,exception);
if (sync == MagickFalse)
status=MagickFalse;
j++;
}
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
proceed=SetImageProgress(image,DespeckleImageTag,(MagickOffsetType) i,
GetPixelChannels(image));
if (proceed == MagickFalse)
status=MagickFalse;
}
}
despeckle_view=DestroyCacheView(despeckle_view);
image_view=DestroyCacheView(image_view);
buffer_info=RelinquishVirtualMemory(buffer_info);
pixel_info=RelinquishVirtualMemory(pixel_info);
despeckle_image->type=image->type;
if (status == MagickFalse)
despeckle_image=DestroyImage(despeckle_image);
return(despeckle_image);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% E d g e I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% EdgeImage() finds edges in an image. Radius defines the radius of the
% convolution filter. Use a radius of 0 and EdgeImage() selects a suitable
% radius for you.
%
% The format of the EdgeImage method is:
%
% Image *EdgeImage(const Image *image,const double radius,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o radius: the radius of the pixel neighborhood.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport Image *EdgeImage(const Image *image,const double radius,
ExceptionInfo *exception)
{
Image
*edge_image;
KernelInfo
*kernel_info;
register ssize_t
i;
size_t
width;
assert(image != (const Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickCoreSignature);
width=GetOptimalKernelWidth1D(radius,0.5);
kernel_info=AcquireKernelInfo((const char *) NULL,exception);
if (kernel_info == (KernelInfo *) NULL)
ThrowImageException(ResourceLimitError,"MemoryAllocationFailed");
(void) ResetMagickMemory(kernel_info,0,sizeof(*kernel_info));
kernel_info->width=width;
kernel_info->height=width;
kernel_info->x=(ssize_t) (kernel_info->width-1)/2;
kernel_info->y=(ssize_t) (kernel_info->height-1)/2;
kernel_info->signature=MagickCoreSignature;
kernel_info->values=(MagickRealType *) MagickAssumeAligned(
AcquireAlignedMemory(kernel_info->width,kernel_info->height*
sizeof(*kernel_info->values)));
if (kernel_info->values == (MagickRealType *) NULL)
{
kernel_info=DestroyKernelInfo(kernel_info);
ThrowImageException(ResourceLimitError,"MemoryAllocationFailed");
}
for (i=0; i < (ssize_t) (kernel_info->width*kernel_info->height); i++)
kernel_info->values[i]=(-1.0);
kernel_info->values[i/2]=(double) kernel_info->width*kernel_info->height-1.0;
edge_image=ConvolveImage(image,kernel_info,exception);
kernel_info=DestroyKernelInfo(kernel_info);
return(edge_image);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% E m b o s s I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% EmbossImage() returns a grayscale image with a three-dimensional effect.
% We convolve the image with a Gaussian operator of the given radius and
% standard deviation (sigma). For reasonable results, radius should be
% larger than sigma. Use a radius of 0 and Emboss() selects a suitable
% radius for you.
%
% The format of the EmbossImage method is:
%
% Image *EmbossImage(const Image *image,const double radius,
% const double sigma,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o radius: the radius of the pixel neighborhood.
%
% o sigma: the standard deviation of the Gaussian, in pixels.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport Image *EmbossImage(const Image *image,const double radius,
const double sigma,ExceptionInfo *exception)
{
double
gamma,
normalize;
Image
*emboss_image;
KernelInfo
*kernel_info;
register ssize_t
i;
size_t
width;
ssize_t
j,
k,
u,
v;
assert(image != (const Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickCoreSignature);
width=GetOptimalKernelWidth1D(radius,sigma);
kernel_info=AcquireKernelInfo((const char *) NULL,exception);
if (kernel_info == (KernelInfo *) NULL)
ThrowImageException(ResourceLimitError,"MemoryAllocationFailed");
kernel_info->width=width;
kernel_info->height=width;
kernel_info->x=(ssize_t) (width-1)/2;
kernel_info->y=(ssize_t) (width-1)/2;
kernel_info->values=(MagickRealType *) MagickAssumeAligned(
AcquireAlignedMemory(kernel_info->width,kernel_info->width*
sizeof(*kernel_info->values)));
if (kernel_info->values == (MagickRealType *) NULL)
{
kernel_info=DestroyKernelInfo(kernel_info);
ThrowImageException(ResourceLimitError,"MemoryAllocationFailed");
}
j=(ssize_t) (kernel_info->width-1)/2;
k=j;
i=0;
for (v=(-j); v <= j; v++)
{
for (u=(-j); u <= j; u++)
{
kernel_info->values[i]=(MagickRealType) (((u < 0) || (v < 0) ? -8.0 :
8.0)*exp(-((double) u*u+v*v)/(2.0*MagickSigma*MagickSigma))/
(2.0*MagickPI*MagickSigma*MagickSigma));
if (u != k)
kernel_info->values[i]=0.0;
i++;
}
k--;
}
normalize=0.0;
for (i=0; i < (ssize_t) (kernel_info->width*kernel_info->height); i++)
normalize+=kernel_info->values[i];
gamma=PerceptibleReciprocal(normalize);
for (i=0; i < (ssize_t) (kernel_info->width*kernel_info->height); i++)
kernel_info->values[i]*=gamma;
emboss_image=ConvolveImage(image,kernel_info,exception);
kernel_info=DestroyKernelInfo(kernel_info);
if (emboss_image != (Image *) NULL)
(void) EqualizeImage(emboss_image,exception);
return(emboss_image);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% G a u s s i a n B l u r I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GaussianBlurImage() blurs an image. We convolve the image with a
% Gaussian operator of the given radius and standard deviation (sigma).
% For reasonable results, the radius should be larger than sigma. Use a
% radius of 0 and GaussianBlurImage() selects a suitable radius for you
%
% The format of the GaussianBlurImage method is:
%
% Image *GaussianBlurImage(const Image *image,onst double radius,
% const double sigma,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o radius: the radius of the Gaussian, in pixels, not counting the center
% pixel.
%
% o sigma: the standard deviation of the Gaussian, in pixels.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport Image *GaussianBlurImage(const Image *image,const double radius,
const double sigma,ExceptionInfo *exception)
{
char
geometry[MagickPathExtent];
KernelInfo
*kernel_info;
Image
*blur_image;
assert(image != (const Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickCoreSignature);
(void) FormatLocaleString(geometry,MagickPathExtent,"gaussian:%.20gx%.20g",
radius,sigma);
kernel_info=AcquireKernelInfo(geometry,exception);
if (kernel_info == (KernelInfo *) NULL)
ThrowImageException(ResourceLimitError,"MemoryAllocationFailed");
blur_image=ConvolveImage(image,kernel_info,exception);
kernel_info=DestroyKernelInfo(kernel_info);
return(blur_image);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% K u w a h a r a I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% KuwaharaImage() is an edge preserving noise reduction filter.
%
% The format of the KuwaharaImage method is:
%
% Image *KuwaharaImage(const Image *image,const double radius,
% const double sigma,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o radius: the square window radius.
%
% o sigma: the standard deviation of the Gaussian, in pixels.
%
% o exception: return any errors or warnings in this structure.
%
*/
static inline MagickRealType GetMeanLuma(const Image *magick_restrict image,
const double *magick_restrict pixel)
{
return(0.212656f*pixel[image->channel_map[RedPixelChannel].offset]+
0.715158f*pixel[image->channel_map[GreenPixelChannel].offset]+
0.072186f*pixel[image->channel_map[BluePixelChannel].offset]); /* Rec709 */
}
MagickExport Image *KuwaharaImage(const Image *image,const double radius,
const double sigma,ExceptionInfo *exception)
{
#define KuwaharaImageTag "Kuwahara/Image"
CacheView
*image_view,
*kuwahara_view;
Image
*gaussian_image,
*kuwahara_image;
MagickBooleanType
status;
MagickOffsetType
progress;
size_t
width;
ssize_t
y;
/*
Initialize Kuwahara image attributes.
*/
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickCoreSignature);
width=(size_t) radius+1;
gaussian_image=BlurImage(image,radius,sigma,exception);
if (gaussian_image == (Image *) NULL)
return((Image *) NULL);
kuwahara_image=CloneImage(image,image->columns,image->rows,MagickTrue,
exception);
if (kuwahara_image == (Image *) NULL)
{
gaussian_image=DestroyImage(gaussian_image);
return((Image *) NULL);
}
if (SetImageStorageClass(kuwahara_image,DirectClass,exception) == MagickFalse)
{
gaussian_image=DestroyImage(gaussian_image);
kuwahara_image=DestroyImage(kuwahara_image);
return((Image *) NULL);
}
/*
Edge preserving noise reduction filter.
*/
status=MagickTrue;
progress=0;
image_view=AcquireVirtualCacheView(gaussian_image,exception);
kuwahara_view=AcquireAuthenticCacheView(kuwahara_image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static,4) shared(progress,status) \
magick_threads(image,kuwahara_image,image->rows,1)
#endif
for (y=0; y < (ssize_t) gaussian_image->rows; y++)
{
register Quantum
*magick_restrict q;
register ssize_t
x;
if (status == MagickFalse)
continue;
q=QueueCacheViewAuthenticPixels(kuwahara_view,0,y,kuwahara_image->columns,1,
exception);
if (q == (Quantum *) NULL)
{
status=MagickFalse;
continue;
}
for (x=0; x < (ssize_t) gaussian_image->columns; x++)
{
const Quantum
*magick_restrict p;
double
min_variance;
RectangleInfo
quadrant,
target;
register size_t
i;
min_variance=MagickMaximumValue;
SetGeometry(gaussian_image,&target);
quadrant.width=width;
quadrant.height=width;
for (i=0; i < 4; i++)
{
const Quantum
*magick_restrict k;
double
mean[MaxPixelChannels],
variance;
register ssize_t
n;
ssize_t
j;
quadrant.x=x;
quadrant.y=y;
switch (i)
{
case 0:
{
quadrant.x=x-(ssize_t) (width-1);
quadrant.y=y-(ssize_t) (width-1);
break;
}
case 1:
{
quadrant.y=y-(ssize_t) (width-1);
break;
}
case 2:
{
quadrant.x=x-(ssize_t) (width-1);
break;
}
case 3:
default:
break;
}
p=GetCacheViewVirtualPixels(image_view,quadrant.x,quadrant.y,
quadrant.width,quadrant.height,exception);
if (p == (const Quantum *) NULL)
break;
for (j=0; j < (ssize_t) GetPixelChannels(gaussian_image); j++)
mean[j]=0.0;
k=p;
for (n=0; n < (ssize_t) (width*width); n++)
{
for (j=0; j < (ssize_t) GetPixelChannels(gaussian_image); j++)
mean[j]+=(double) k[j];
k+=GetPixelChannels(gaussian_image);
}
for (j=0; j < (ssize_t) GetPixelChannels(gaussian_image); j++)
mean[j]/=(double) (width*width);
k=p;
variance=0.0;
for (n=0; n < (ssize_t) (width*width); n++)
{
double
luma;
luma=GetPixelLuma(gaussian_image,k);
variance+=(luma-GetMeanLuma(gaussian_image,mean))*
(luma-GetMeanLuma(gaussian_image,mean));
k+=GetPixelChannels(gaussian_image);
}
if (variance < min_variance)
{
min_variance=variance;
target=quadrant;
}
}
if (i < 4)
{
status=MagickFalse;
break;
}
status=InterpolatePixelChannels(gaussian_image,image_view,kuwahara_image,
UndefinedInterpolatePixel,(double) target.x+target.width/2.0,(double)
target.y+target.height/2.0,q,exception);
q+=GetPixelChannels(kuwahara_image);
}
if (SyncCacheViewAuthenticPixels(kuwahara_view,exception) == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp critical (MagickCore_KuwaharaImage)
#endif
proceed=SetImageProgress(image,KuwaharaImageTag,progress++,image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
kuwahara_view=DestroyCacheView(kuwahara_view);
image_view=DestroyCacheView(image_view);
gaussian_image=DestroyImage(gaussian_image);
if (status == MagickFalse)
kuwahara_image=DestroyImage(kuwahara_image);
return(kuwahara_image);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% L o c a l C o n t r a s t I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% LocalContrastImage() attempts to increase the appearance of large-scale
% light-dark transitions. Local contrast enhancement works similarly to
% sharpening with an unsharp mask, however the mask is instead created using
% an image with a greater blur distance.
%
% The format of the LocalContrastImage method is:
%
% Image *LocalContrastImage(const Image *image, const double radius,
% const double strength,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o radius: the radius of the Gaussian blur, in percentage with 100%
% resulting in a blur radius of 20% of largest dimension.
%
% o strength: the strength of the blur mask in percentage.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport Image *LocalContrastImage(const Image *image,const double radius,
const double strength,ExceptionInfo *exception)
{
#define LocalContrastImageTag "LocalContrast/Image"
CacheView
*image_view,
*contrast_view;
float
*interImage,
*scanLinePixels,
totalWeight;
Image
*contrast_image;
MagickBooleanType
status;
MemoryInfo
*scanLinePixels_info,
*interImage_info;
ssize_t
scanLineSize,
width;
/*
Initialize contrast image attributes.
*/
assert(image != (const Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickCoreSignature);
#if defined(MAGICKCORE_OPENCL_SUPPORT)
contrast_image=AccelerateLocalContrastImage(image,radius,strength,exception);
if (contrast_image != (Image *) NULL)
return(contrast_image);
#endif
contrast_image=CloneImage(image,0,0,MagickTrue,exception);
if (contrast_image == (Image *) NULL)
return((Image *) NULL);
if (SetImageStorageClass(contrast_image,DirectClass,exception) == MagickFalse)
{
contrast_image=DestroyImage(contrast_image);
return((Image *) NULL);
}
image_view=AcquireVirtualCacheView(image,exception);
contrast_view=AcquireAuthenticCacheView(contrast_image,exception);
scanLineSize=(ssize_t) MagickMax(image->columns,image->rows);
width=(ssize_t) scanLineSize*0.002f*fabs(radius);
scanLineSize+=(2*width);
scanLinePixels_info=AcquireVirtualMemory((size_t) GetOpenMPMaximumThreads()*
scanLineSize,sizeof(*scanLinePixels));
if (scanLinePixels_info == (MemoryInfo *) NULL)
{
contrast_view=DestroyCacheView(contrast_view);
image_view=DestroyCacheView(image_view);
contrast_image=DestroyImage(contrast_image);
ThrowImageException(ResourceLimitError,"MemoryAllocationFailed");
}
scanLinePixels=(float *) GetVirtualMemoryBlob(scanLinePixels_info);
/*
Create intermediate buffer.
*/
interImage_info=AcquireVirtualMemory(image->rows*(image->columns+(2*width)),
sizeof(*interImage));
if (interImage_info == (MemoryInfo *) NULL)
{
scanLinePixels_info=RelinquishVirtualMemory(scanLinePixels_info);
contrast_view=DestroyCacheView(contrast_view);
image_view=DestroyCacheView(image_view);
contrast_image=DestroyImage(contrast_image);
ThrowImageException(ResourceLimitError,"MemoryAllocationFailed");
}
interImage=(float *) GetVirtualMemoryBlob(interImage_info);
totalWeight=(float) ((width+1)*(width+1));
/*
Vertical pass.
*/
status=MagickTrue;
{
ssize_t
x;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static,4) \
magick_threads(image,image,image->columns,1)
#endif
for (x=0; x < (ssize_t) image->columns; x++)
{
const int
id = GetOpenMPThreadId();
const Quantum
*magick_restrict p;
float
*out,
*pix,
*pixels;
register ssize_t
y;
ssize_t
i;
if (status == MagickFalse)
continue;
pixels=scanLinePixels;
pixels+=id*scanLineSize;
pix=pixels;
p=GetCacheViewVirtualPixels(image_view,x,-width,1,image->rows+(2*width),
exception);
if (p == (const Quantum *) NULL)
{
status=MagickFalse;
continue;
}
for (y=0; y < (ssize_t) image->rows+(2*width); y++)
{
*pix++=(float)GetPixelLuma(image,p);
p+=image->number_channels;
}
out=interImage+x+width;
for (y=0; y < (ssize_t) image->rows; y++)
{
float
sum,
weight;
weight=1.0f;
sum=0;
pix=pixels+y;
for (i=0; i < width; i++)
{
sum+=weight*(*pix++);
weight+=1.0f;
}
for (i=width+1; i < (2*width); i++)
{
sum+=weight*(*pix++);
weight-=1.0f;
}
/* write to output */
*out=sum/totalWeight;
/* mirror into padding */
if (x <= width && x != 0)
*(out-(x*2))=*out;
if ((x > (ssize_t) image->columns-width-2) &&
(x != (ssize_t) image->columns-1))
*(out+((image->columns-x-1)*2))=*out;
out+=image->columns+(width*2);
}
}
}
/*
Horizontal pass.
*/
{
ssize_t
y;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static,4) \
magick_threads(image,image,image->rows,1)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
const int
id = GetOpenMPThreadId();
const Quantum
*magick_restrict p;
float
*pix,
*pixels;
register Quantum
*magick_restrict q;
register ssize_t
x;
ssize_t
i;
if (status == MagickFalse)
continue;
pixels=scanLinePixels;
pixels+=id*scanLineSize;
p=GetCacheViewVirtualPixels(image_view,0,y,image->columns,1,exception);
q=GetCacheViewAuthenticPixels(contrast_view,0,y,image->columns,1,
exception);
if ((p == (const Quantum *) NULL) || (q == (Quantum *) NULL))
{
status=MagickFalse;
continue;
}
memcpy(pixels,interImage+(y*(image->columns+(2*width))),(image->columns+
(2*width))*sizeof(float));
for (x=0; x < (ssize_t) image->columns; x++)
{
float
mult,
srcVal,
sum,
weight;
weight=1.0f;
sum=0;
pix=pixels+x;
for (i=0; i < width; i++)
{
sum+=weight*(*pix++);
weight+=1.0f;
}
for (i=width+1; i < (2*width); i++)
{
sum+=weight*(*pix++);
weight-=1.0f;
}
/* Apply and write */
srcVal=(float) GetPixelLuma(image,p);
mult=(srcVal-(sum/totalWeight))*(strength/100.0f);
mult=(srcVal+mult)/srcVal;
SetPixelRed(contrast_image,ClampToQuantum(GetPixelRed(image,p)*mult),
q);
SetPixelGreen(contrast_image,ClampToQuantum(GetPixelGreen(image,p)*
mult),q);
SetPixelBlue(contrast_image,ClampToQuantum(GetPixelBlue(image,p)*mult),
q);
p+=image->number_channels;
q+=contrast_image->number_channels;
}
if (SyncCacheViewAuthenticPixels(contrast_view,exception) == MagickFalse)
status=MagickFalse;
}
}
scanLinePixels_info=RelinquishVirtualMemory(scanLinePixels_info);
interImage_info=RelinquishVirtualMemory(interImage_info);
contrast_view=DestroyCacheView(contrast_view);
image_view=DestroyCacheView(image_view);
if (status == MagickFalse)
contrast_image=DestroyImage(contrast_image);
return(contrast_image);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% M o t i o n B l u r I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% MotionBlurImage() simulates motion blur. We convolve the image with a
% Gaussian operator of the given radius and standard deviation (sigma).
% For reasonable results, radius should be larger than sigma. Use a
% radius of 0 and MotionBlurImage() selects a suitable radius for you.
% Angle gives the angle of the blurring motion.
%
% Andrew Protano contributed this effect.
%
% The format of the MotionBlurImage method is:
%
% Image *MotionBlurImage(const Image *image,const double radius,
% const double sigma,const double angle,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o radius: the radius of the Gaussian, in pixels, not counting
% the center pixel.
%
% o sigma: the standard deviation of the Gaussian, in pixels.
%
% o angle: Apply the effect along this angle.
%
% o exception: return any errors or warnings in this structure.
%
*/
static MagickRealType *GetMotionBlurKernel(const size_t width,
const double sigma)
{
MagickRealType
*kernel,
normalize;
register ssize_t
i;
/*
Generate a 1-D convolution kernel.
*/
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"...");
kernel=(MagickRealType *) MagickAssumeAligned(AcquireAlignedMemory((size_t)
width,sizeof(*kernel)));
if (kernel == (MagickRealType *) NULL)
return(kernel);
normalize=0.0;
for (i=0; i < (ssize_t) width; i++)
{
kernel[i]=(MagickRealType) (exp((-((double) i*i)/(double) (2.0*MagickSigma*
MagickSigma)))/(MagickSQ2PI*MagickSigma));
normalize+=kernel[i];
}
for (i=0; i < (ssize_t) width; i++)
kernel[i]/=normalize;
return(kernel);
}
MagickExport Image *MotionBlurImage(const Image *image,const double radius,
const double sigma,const double angle,ExceptionInfo *exception)
{
#define BlurImageTag "Blur/Image"
CacheView
*blur_view,
*image_view,
*motion_view;
Image
*blur_image;
MagickBooleanType
status;
MagickOffsetType
progress;
MagickRealType
*kernel;
OffsetInfo
*offset;
PointInfo
point;
register ssize_t
i;
size_t
width;
ssize_t
y;
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(exception != (ExceptionInfo *) NULL);
width=GetOptimalKernelWidth1D(radius,sigma);
kernel=GetMotionBlurKernel(width,sigma);
if (kernel == (MagickRealType *) NULL)
ThrowImageException(ResourceLimitError,"MemoryAllocationFailed");
offset=(OffsetInfo *) AcquireQuantumMemory(width,sizeof(*offset));
if (offset == (OffsetInfo *) NULL)
{
kernel=(MagickRealType *) RelinquishAlignedMemory(kernel);
ThrowImageException(ResourceLimitError,"MemoryAllocationFailed");
}
blur_image=CloneImage(image,image->columns,image->rows,MagickTrue,exception);
if (blur_image == (Image *) NULL)
{
kernel=(MagickRealType *) RelinquishAlignedMemory(kernel);
offset=(OffsetInfo *) RelinquishMagickMemory(offset);
return((Image *) NULL);
}
if (SetImageStorageClass(blur_image,DirectClass,exception) == MagickFalse)
{
kernel=(MagickRealType *) RelinquishAlignedMemory(kernel);
offset=(OffsetInfo *) RelinquishMagickMemory(offset);
blur_image=DestroyImage(blur_image);
return((Image *) NULL);
}
point.x=(double) width*sin(DegreesToRadians(angle));
point.y=(double) width*cos(DegreesToRadians(angle));
for (i=0; i < (ssize_t) width; i++)
{
offset[i].x=(ssize_t) ceil((double) (i*point.y)/hypot(point.x,point.y)-0.5);
offset[i].y=(ssize_t) ceil((double) (i*point.x)/hypot(point.x,point.y)-0.5);
}
/*
Motion blur image.
*/
status=MagickTrue;
progress=0;
image_view=AcquireVirtualCacheView(image,exception);
motion_view=AcquireVirtualCacheView(image,exception);
blur_view=AcquireAuthenticCacheView(blur_image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static,4) shared(progress,status) \
magick_threads(image,blur_image,image->rows,1)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
register const Quantum
*magick_restrict p;
register Quantum
*magick_restrict q;
register ssize_t
x;
if (status == MagickFalse)
continue;
p=GetCacheViewVirtualPixels(image_view,0,y,image->columns,1,exception);
q=QueueCacheViewAuthenticPixels(blur_view,0,y,blur_image->columns,1,
exception);
if ((p == (const Quantum *) NULL) || (q == (Quantum *) NULL))
{
status=MagickFalse;
continue;
}
for (x=0; x < (ssize_t) image->columns; x++)
{
register ssize_t
i;
for (i=0; i < (ssize_t) GetPixelChannels(image); i++)
{
double
alpha,
gamma,
pixel;
PixelChannel
channel;
PixelTrait
blur_traits,
traits;
register const Quantum
*magick_restrict r;
register MagickRealType
*magick_restrict k;
register ssize_t
j;
channel=GetPixelChannelChannel(image,i);
traits=GetPixelChannelTraits(image,channel);
blur_traits=GetPixelChannelTraits(blur_image,channel);
if ((traits == UndefinedPixelTrait) ||
(blur_traits == UndefinedPixelTrait))
continue;
if (((blur_traits & CopyPixelTrait) != 0) ||
(GetPixelReadMask(image,p) == 0))
{
SetPixelChannel(blur_image,channel,p[i],q);
continue;
}
k=kernel;
pixel=0.0;
if ((blur_traits & BlendPixelTrait) == 0)
{
for (j=0; j < (ssize_t) width; j++)
{
r=GetCacheViewVirtualPixels(motion_view,x+offset[j].x,y+
offset[j].y,1,1,exception);
if (r == (const Quantum *) NULL)
{
status=MagickFalse;
continue;
}
pixel+=(*k)*r[i];
k++;
}
SetPixelChannel(blur_image,channel,ClampToQuantum(pixel),q);
continue;
}
alpha=0.0;
gamma=0.0;
for (j=0; j < (ssize_t) width; j++)
{
r=GetCacheViewVirtualPixels(motion_view,x+offset[j].x,y+offset[j].y,1,
1,exception);
if (r == (const Quantum *) NULL)
{
status=MagickFalse;
continue;
}
alpha=(double) (QuantumScale*GetPixelAlpha(image,r));
pixel+=(*k)*alpha*r[i];
gamma+=(*k)*alpha;
k++;
}
gamma=PerceptibleReciprocal(gamma);
SetPixelChannel(blur_image,channel,ClampToQuantum(gamma*pixel),q);
}
p+=GetPixelChannels(image);
q+=GetPixelChannels(blur_image);
}
if (SyncCacheViewAuthenticPixels(blur_view,exception) == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp critical (MagickCore_MotionBlurImage)
#endif
proceed=SetImageProgress(image,BlurImageTag,progress++,image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
blur_view=DestroyCacheView(blur_view);
motion_view=DestroyCacheView(motion_view);
image_view=DestroyCacheView(image_view);
kernel=(MagickRealType *) RelinquishAlignedMemory(kernel);
offset=(OffsetInfo *) RelinquishMagickMemory(offset);
if (status == MagickFalse)
blur_image=DestroyImage(blur_image);
return(blur_image);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% P r e v i e w I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% PreviewImage() tiles 9 thumbnails of the specified image with an image
% processing operation applied with varying parameters. This may be helpful
% pin-pointing an appropriate parameter for a particular image processing
% operation.
%
% The format of the PreviewImages method is:
%
% Image *PreviewImages(const Image *image,const PreviewType preview,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o preview: the image processing operation.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport Image *PreviewImage(const Image *image,const PreviewType preview,
ExceptionInfo *exception)
{
#define NumberTiles 9
#define PreviewImageTag "Preview/Image"
#define DefaultPreviewGeometry "204x204+10+10"
char
factor[MagickPathExtent],
label[MagickPathExtent];
double
degrees,
gamma,
percentage,
radius,
sigma,
threshold;
extern const char
DefaultTileFrame[];
Image
*images,
*montage_image,
*preview_image,
*thumbnail;
ImageInfo
*preview_info;
MagickBooleanType
proceed;
MontageInfo
*montage_info;
QuantizeInfo
quantize_info;
RectangleInfo
geometry;
register ssize_t
i,
x;
size_t
colors;
ssize_t
y;
/*
Open output image file.
*/
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
colors=2;
degrees=0.0;
gamma=(-0.2f);
preview_info=AcquireImageInfo();
SetGeometry(image,&geometry);
(void) ParseMetaGeometry(DefaultPreviewGeometry,&geometry.x,&geometry.y,
&geometry.width,&geometry.height);
images=NewImageList();
percentage=12.5;
GetQuantizeInfo(&quantize_info);
radius=0.0;
sigma=1.0;
threshold=0.0;
x=0;
y=0;
for (i=0; i < NumberTiles; i++)
{
thumbnail=ThumbnailImage(image,geometry.width,geometry.height,exception);
if (thumbnail == (Image *) NULL)
break;
(void) SetImageProgressMonitor(thumbnail,(MagickProgressMonitor) NULL,
(void *) NULL);
(void) SetImageProperty(thumbnail,"label",DefaultTileLabel,exception);
if (i == (NumberTiles/2))
{
(void) QueryColorCompliance("#dfdfdf",AllCompliance,
&thumbnail->alpha_color,exception);
AppendImageToList(&images,thumbnail);
continue;
}
switch (preview)
{
case RotatePreview:
{
degrees+=45.0;
preview_image=RotateImage(thumbnail,degrees,exception);
(void) FormatLocaleString(label,MagickPathExtent,"rotate %g",degrees);
break;
}
case ShearPreview:
{
degrees+=5.0;
preview_image=ShearImage(thumbnail,degrees,degrees,exception);
(void) FormatLocaleString(label,MagickPathExtent,"shear %gx%g",degrees,
2.0*degrees);
break;
}
case RollPreview:
{
x=(ssize_t) ((i+1)*thumbnail->columns)/NumberTiles;
y=(ssize_t) ((i+1)*thumbnail->rows)/NumberTiles;
preview_image=RollImage(thumbnail,x,y,exception);
(void) FormatLocaleString(label,MagickPathExtent,"roll %+.20gx%+.20g",
(double) x,(double) y);
break;
}
case HuePreview:
{
preview_image=CloneImage(thumbnail,0,0,MagickTrue,exception);
if (preview_image == (Image *) NULL)
break;
(void) FormatLocaleString(factor,MagickPathExtent,"100,100,%g",2.0*
percentage);
(void) ModulateImage(preview_image,factor,exception);
(void) FormatLocaleString(label,MagickPathExtent,"modulate %s",factor);
break;
}
case SaturationPreview:
{
preview_image=CloneImage(thumbnail,0,0,MagickTrue,exception);
if (preview_image == (Image *) NULL)
break;
(void) FormatLocaleString(factor,MagickPathExtent,"100,%g",2.0*percentage);
(void) ModulateImage(preview_image,factor,exception);
(void) FormatLocaleString(label,MagickPathExtent,"modulate %s",factor);
break;
}
case BrightnessPreview:
{
preview_image=CloneImage(thumbnail,0,0,MagickTrue,exception);
if (preview_image == (Image *) NULL)
break;
(void) FormatLocaleString(factor,MagickPathExtent,"%g",2.0*percentage);
(void) ModulateImage(preview_image,factor,exception);
(void) FormatLocaleString(label,MagickPathExtent,"modulate %s",factor);
break;
}
case GammaPreview:
default:
{
preview_image=CloneImage(thumbnail,0,0,MagickTrue,exception);
if (preview_image == (Image *) NULL)
break;
gamma+=0.4f;
(void) GammaImage(preview_image,gamma,exception);
(void) FormatLocaleString(label,MagickPathExtent,"gamma %g",gamma);
break;
}
case SpiffPreview:
{
preview_image=CloneImage(thumbnail,0,0,MagickTrue,exception);
if (preview_image != (Image *) NULL)
for (x=0; x < i; x++)
(void) ContrastImage(preview_image,MagickTrue,exception);
(void) FormatLocaleString(label,MagickPathExtent,"contrast (%.20g)",
(double) i+1);
break;
}
case DullPreview:
{
preview_image=CloneImage(thumbnail,0,0,MagickTrue,exception);
if (preview_image == (Image *) NULL)
break;
for (x=0; x < i; x++)
(void) ContrastImage(preview_image,MagickFalse,exception);
(void) FormatLocaleString(label,MagickPathExtent,"+contrast (%.20g)",
(double) i+1);
break;
}
case GrayscalePreview:
{
preview_image=CloneImage(thumbnail,0,0,MagickTrue,exception);
if (preview_image == (Image *) NULL)
break;
colors<<=1;
quantize_info.number_colors=colors;
quantize_info.colorspace=GRAYColorspace;
(void) QuantizeImage(&quantize_info,preview_image,exception);
(void) FormatLocaleString(label,MagickPathExtent,
"-colorspace gray -colors %.20g",(double) colors);
break;
}
case QuantizePreview:
{
preview_image=CloneImage(thumbnail,0,0,MagickTrue,exception);
if (preview_image == (Image *) NULL)
break;
colors<<=1;
quantize_info.number_colors=colors;
(void) QuantizeImage(&quantize_info,preview_image,exception);
(void) FormatLocaleString(label,MagickPathExtent,"colors %.20g",(double)
colors);
break;
}
case DespecklePreview:
{
for (x=0; x < (i-1); x++)
{
preview_image=DespeckleImage(thumbnail,exception);
if (preview_image == (Image *) NULL)
break;
thumbnail=DestroyImage(thumbnail);
thumbnail=preview_image;
}
preview_image=DespeckleImage(thumbnail,exception);
if (preview_image == (Image *) NULL)
break;
(void) FormatLocaleString(label,MagickPathExtent,"despeckle (%.20g)",
(double) i+1);
break;
}
case ReduceNoisePreview:
{
preview_image=StatisticImage(thumbnail,NonpeakStatistic,(size_t) radius,
(size_t) radius,exception);
(void) FormatLocaleString(label,MagickPathExtent,"noise %g",radius);
break;
}
case AddNoisePreview:
{
switch ((int) i)
{
case 0:
{
(void) CopyMagickString(factor,"uniform",MagickPathExtent);
break;
}
case 1:
{
(void) CopyMagickString(factor,"gaussian",MagickPathExtent);
break;
}
case 2:
{
(void) CopyMagickString(factor,"multiplicative",MagickPathExtent);
break;
}
case 3:
{
(void) CopyMagickString(factor,"impulse",MagickPathExtent);
break;
}
case 5:
{
(void) CopyMagickString(factor,"laplacian",MagickPathExtent);
break;
}
case 6:
{
(void) CopyMagickString(factor,"Poisson",MagickPathExtent);
break;
}
default:
{
(void) CopyMagickString(thumbnail->magick,"NULL",MagickPathExtent);
break;
}
}
preview_image=StatisticImage(thumbnail,NonpeakStatistic,(size_t) i,
(size_t) i,exception);
(void) FormatLocaleString(label,MagickPathExtent,"+noise %s",factor);
break;
}
case SharpenPreview:
{
preview_image=SharpenImage(thumbnail,radius,sigma,exception);
(void) FormatLocaleString(label,MagickPathExtent,"sharpen %gx%g",radius,
sigma);
break;
}
case BlurPreview:
{
preview_image=BlurImage(thumbnail,radius,sigma,exception);
(void) FormatLocaleString(label,MagickPathExtent,"blur %gx%g",radius,
sigma);
break;
}
case ThresholdPreview:
{
preview_image=CloneImage(thumbnail,0,0,MagickTrue,exception);
if (preview_image == (Image *) NULL)
break;
(void) BilevelImage(thumbnail,(double) (percentage*((double)
QuantumRange+1.0))/100.0,exception);
(void) FormatLocaleString(label,MagickPathExtent,"threshold %g",(double)
(percentage*((double) QuantumRange+1.0))/100.0);
break;
}
case EdgeDetectPreview:
{
preview_image=EdgeImage(thumbnail,radius,exception);
(void) FormatLocaleString(label,MagickPathExtent,"edge %g",radius);
break;
}
case SpreadPreview:
{
preview_image=SpreadImage(thumbnail,image->interpolate,radius,
exception);
(void) FormatLocaleString(label,MagickPathExtent,"spread %g",
radius+0.5);
break;
}
case SolarizePreview:
{
preview_image=CloneImage(thumbnail,0,0,MagickTrue,exception);
if (preview_image == (Image *) NULL)
break;
(void) SolarizeImage(preview_image,(double) QuantumRange*percentage/
100.0,exception);
(void) FormatLocaleString(label,MagickPathExtent,"solarize %g",
(QuantumRange*percentage)/100.0);
break;
}
case ShadePreview:
{
degrees+=10.0;
preview_image=ShadeImage(thumbnail,MagickTrue,degrees,degrees,
exception);
(void) FormatLocaleString(label,MagickPathExtent,"shade %gx%g",degrees,
degrees);
break;
}
case RaisePreview:
{
preview_image=CloneImage(thumbnail,0,0,MagickTrue,exception);
if (preview_image == (Image *) NULL)
break;
geometry.width=(size_t) (2*i+2);
geometry.height=(size_t) (2*i+2);
geometry.x=(i-1)/2;
geometry.y=(i-1)/2;
(void) RaiseImage(preview_image,&geometry,MagickTrue,exception);
(void) FormatLocaleString(label,MagickPathExtent,
"raise %.20gx%.20g%+.20g%+.20g",(double) geometry.width,(double)
geometry.height,(double) geometry.x,(double) geometry.y);
break;
}
case SegmentPreview:
{
preview_image=CloneImage(thumbnail,0,0,MagickTrue,exception);
if (preview_image == (Image *) NULL)
break;
threshold+=0.4f;
(void) SegmentImage(preview_image,sRGBColorspace,MagickFalse,threshold,
threshold,exception);
(void) FormatLocaleString(label,MagickPathExtent,"segment %gx%g",
threshold,threshold);
break;
}
case SwirlPreview:
{
preview_image=SwirlImage(thumbnail,degrees,image->interpolate,
exception);
(void) FormatLocaleString(label,MagickPathExtent,"swirl %g",degrees);
degrees+=45.0;
break;
}
case ImplodePreview:
{
degrees+=0.1f;
preview_image=ImplodeImage(thumbnail,degrees,image->interpolate,
exception);
(void) FormatLocaleString(label,MagickPathExtent,"implode %g",degrees);
break;
}
case WavePreview:
{
degrees+=5.0f;
preview_image=WaveImage(thumbnail,0.5*degrees,2.0*degrees,
image->interpolate,exception);
(void) FormatLocaleString(label,MagickPathExtent,"wave %gx%g",0.5*degrees,
2.0*degrees);
break;
}
case OilPaintPreview:
{
preview_image=OilPaintImage(thumbnail,(double) radius,(double) sigma,
exception);
(void) FormatLocaleString(label,MagickPathExtent,"charcoal %gx%g",radius,
sigma);
break;
}
case CharcoalDrawingPreview:
{
preview_image=CharcoalImage(thumbnail,(double) radius,(double) sigma,
exception);
(void) FormatLocaleString(label,MagickPathExtent,"charcoal %gx%g",radius,
sigma);
break;
}
case JPEGPreview:
{
char
filename[MagickPathExtent];
int
file;
MagickBooleanType
status;
preview_image=CloneImage(thumbnail,0,0,MagickTrue,exception);
if (preview_image == (Image *) NULL)
break;
preview_info->quality=(size_t) percentage;
(void) FormatLocaleString(factor,MagickPathExtent,"%.20g",(double)
preview_info->quality);
file=AcquireUniqueFileResource(filename);
if (file != -1)
file=close(file)-1;
(void) FormatLocaleString(preview_image->filename,MagickPathExtent,
"jpeg:%s",filename);
status=WriteImage(preview_info,preview_image,exception);
if (status != MagickFalse)
{
Image
*quality_image;
(void) CopyMagickString(preview_info->filename,
preview_image->filename,MagickPathExtent);
quality_image=ReadImage(preview_info,exception);
if (quality_image != (Image *) NULL)
{
preview_image=DestroyImage(preview_image);
preview_image=quality_image;
}
}
(void) RelinquishUniqueFileResource(preview_image->filename);
if ((GetBlobSize(preview_image)/1024) >= 1024)
(void) FormatLocaleString(label,MagickPathExtent,"quality %s\n%gmb ",
factor,(double) ((MagickOffsetType) GetBlobSize(preview_image))/
1024.0/1024.0);
else
if (GetBlobSize(preview_image) >= 1024)
(void) FormatLocaleString(label,MagickPathExtent,
"quality %s\n%gkb ",factor,(double) ((MagickOffsetType)
GetBlobSize(preview_image))/1024.0);
else
(void) FormatLocaleString(label,MagickPathExtent,"quality %s\n%.20gb ",
factor,(double) ((MagickOffsetType) GetBlobSize(thumbnail)));
break;
}
}
thumbnail=DestroyImage(thumbnail);
percentage+=12.5;
radius+=0.5;
sigma+=0.25;
if (preview_image == (Image *) NULL)
break;
(void) DeleteImageProperty(preview_image,"label");
(void) SetImageProperty(preview_image,"label",label,exception);
AppendImageToList(&images,preview_image);
proceed=SetImageProgress(image,PreviewImageTag,(MagickOffsetType) i,
NumberTiles);
if (proceed == MagickFalse)
break;
}
if (images == (Image *) NULL)
{
preview_info=DestroyImageInfo(preview_info);
return((Image *) NULL);
}
/*
Create the montage.
*/
montage_info=CloneMontageInfo(preview_info,(MontageInfo *) NULL);
(void) CopyMagickString(montage_info->filename,image->filename,MagickPathExtent);
montage_info->shadow=MagickTrue;
(void) CloneString(&montage_info->tile,"3x3");
(void) CloneString(&montage_info->geometry,DefaultPreviewGeometry);
(void) CloneString(&montage_info->frame,DefaultTileFrame);
montage_image=MontageImages(images,montage_info,exception);
montage_info=DestroyMontageInfo(montage_info);
images=DestroyImageList(images);
if (montage_image == (Image *) NULL)
ThrowImageException(ResourceLimitError,"MemoryAllocationFailed");
if (montage_image->montage != (char *) NULL)
{
/*
Free image directory.
*/
montage_image->montage=(char *) RelinquishMagickMemory(
montage_image->montage);
if (image->directory != (char *) NULL)
montage_image->directory=(char *) RelinquishMagickMemory(
montage_image->directory);
}
preview_info=DestroyImageInfo(preview_info);
return(montage_image);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% R o t a t i o n a l B l u r I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% RotationalBlurImage() applies a radial blur to the image.
%
% Andrew Protano contributed this effect.
%
% The format of the RotationalBlurImage method is:
%
% Image *RotationalBlurImage(const Image *image,const double angle,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o angle: the angle of the radial blur.
%
% o blur: the blur.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport Image *RotationalBlurImage(const Image *image,const double angle,
ExceptionInfo *exception)
{
CacheView
*blur_view,
*image_view,
*radial_view;
double
blur_radius,
*cos_theta,
offset,
*sin_theta,
theta;
Image
*blur_image;
MagickBooleanType
status;
MagickOffsetType
progress;
PointInfo
blur_center;
register ssize_t
i;
size_t
n;
ssize_t
y;
/*
Allocate blur image.
*/
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickCoreSignature);
#if defined(MAGICKCORE_OPENCL_SUPPORT)
blur_image=AccelerateRotationalBlurImage(image,angle,exception);
if (blur_image != (Image *) NULL)
return(blur_image);
#endif
blur_image=CloneImage(image,image->columns,image->rows,MagickTrue,exception);
if (blur_image == (Image *) NULL)
return((Image *) NULL);
if (SetImageStorageClass(blur_image,DirectClass,exception) == MagickFalse)
{
blur_image=DestroyImage(blur_image);
return((Image *) NULL);
}
blur_center.x=(double) (image->columns-1)/2.0;
blur_center.y=(double) (image->rows-1)/2.0;
blur_radius=hypot(blur_center.x,blur_center.y);
n=(size_t) fabs(4.0*DegreesToRadians(angle)*sqrt((double) blur_radius)+2UL);
theta=DegreesToRadians(angle)/(double) (n-1);
cos_theta=(double *) AcquireQuantumMemory((size_t) n,
sizeof(*cos_theta));
sin_theta=(double *) AcquireQuantumMemory((size_t) n,
sizeof(*sin_theta));
if ((cos_theta == (double *) NULL) ||
(sin_theta == (double *) NULL))
{
blur_image=DestroyImage(blur_image);
ThrowImageException(ResourceLimitError,"MemoryAllocationFailed");
}
offset=theta*(double) (n-1)/2.0;
for (i=0; i < (ssize_t) n; i++)
{
cos_theta[i]=cos((double) (theta*i-offset));
sin_theta[i]=sin((double) (theta*i-offset));
}
/*
Radial blur image.
*/
status=MagickTrue;
progress=0;
image_view=AcquireVirtualCacheView(image,exception);
radial_view=AcquireVirtualCacheView(image,exception);
blur_view=AcquireAuthenticCacheView(blur_image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static,4) shared(progress,status) \
magick_threads(image,blur_image,image->rows,1)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
register const Quantum
*magick_restrict p;
register Quantum
*magick_restrict q;
register ssize_t
x;
if (status == MagickFalse)
continue;
p=GetCacheViewVirtualPixels(image_view,0,y,image->columns,1,exception);
q=QueueCacheViewAuthenticPixels(blur_view,0,y,blur_image->columns,1,
exception);
if ((p == (const Quantum *) NULL) || (q == (Quantum *) NULL))
{
status=MagickFalse;
continue;
}
for (x=0; x < (ssize_t) image->columns; x++)
{
double
radius;
PointInfo
center;
register ssize_t
i;
size_t
step;
center.x=(double) x-blur_center.x;
center.y=(double) y-blur_center.y;
radius=hypot((double) center.x,center.y);
if (radius == 0)
step=1;
else
{
step=(size_t) (blur_radius/radius);
if (step == 0)
step=1;
else
if (step >= n)
step=n-1;
}
for (i=0; i < (ssize_t) GetPixelChannels(image); i++)
{
double
gamma,
pixel;
PixelChannel
channel;
PixelTrait
blur_traits,
traits;
register const Quantum
*magick_restrict r;
register ssize_t
j;
channel=GetPixelChannelChannel(image,i);
traits=GetPixelChannelTraits(image,channel);
blur_traits=GetPixelChannelTraits(blur_image,channel);
if ((traits == UndefinedPixelTrait) ||
(blur_traits == UndefinedPixelTrait))
continue;
if (((blur_traits & CopyPixelTrait) != 0) ||
(GetPixelReadMask(image,p) == 0))
{
SetPixelChannel(blur_image,channel,p[i],q);
continue;
}
gamma=0.0;
pixel=0.0;
if ((GetPixelChannelTraits(image,AlphaChannel) == UndefinedPixelTrait) ||
(channel == AlphaPixelChannel))
{
for (j=0; j < (ssize_t) n; j+=(ssize_t) step)
{
r=GetCacheViewVirtualPixels(radial_view, (ssize_t) (blur_center.x+
center.x*cos_theta[j]-center.y*sin_theta[j]+0.5),(ssize_t)
(blur_center.y+center.x*sin_theta[j]+center.y*cos_theta[j]+0.5),
1,1,exception);
if (r == (const Quantum *) NULL)
{
status=MagickFalse;
continue;
}
pixel+=r[i];
gamma++;
}
gamma=PerceptibleReciprocal(gamma);
SetPixelChannel(blur_image,channel,ClampToQuantum(gamma*pixel),q);
continue;
}
for (j=0; j < (ssize_t) n; j+=(ssize_t) step)
{
double
alpha;
r=GetCacheViewVirtualPixels(radial_view, (ssize_t) (blur_center.x+
center.x*cos_theta[j]-center.y*sin_theta[j]+0.5),(ssize_t)
(blur_center.y+center.x*sin_theta[j]+center.y*cos_theta[j]+0.5),
1,1,exception);
if (r == (const Quantum *) NULL)
{
status=MagickFalse;
continue;
}
alpha=(double) QuantumScale*GetPixelAlpha(image,r);
pixel+=alpha*r[i];
gamma+=alpha;
}
gamma=PerceptibleReciprocal(gamma);
SetPixelChannel(blur_image,channel,ClampToQuantum(gamma*pixel),q);
}
p+=GetPixelChannels(image);
q+=GetPixelChannels(blur_image);
}
if (SyncCacheViewAuthenticPixels(blur_view,exception) == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp critical (MagickCore_RotationalBlurImage)
#endif
proceed=SetImageProgress(image,BlurImageTag,progress++,image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
blur_view=DestroyCacheView(blur_view);
radial_view=DestroyCacheView(radial_view);
image_view=DestroyCacheView(image_view);
cos_theta=(double *) RelinquishMagickMemory(cos_theta);
sin_theta=(double *) RelinquishMagickMemory(sin_theta);
if (status == MagickFalse)
blur_image=DestroyImage(blur_image);
return(blur_image);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% S e l e c t i v e B l u r I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% SelectiveBlurImage() selectively blur pixels within a contrast threshold.
% It is similar to the unsharpen mask that sharpens everything with contrast
% above a certain threshold.
%
% The format of the SelectiveBlurImage method is:
%
% Image *SelectiveBlurImage(const Image *image,const double radius,
% const double sigma,const double threshold,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o radius: the radius of the Gaussian, in pixels, not counting the center
% pixel.
%
% o sigma: the standard deviation of the Gaussian, in pixels.
%
% o threshold: only pixels within this contrast threshold are included
% in the blur operation.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport Image *SelectiveBlurImage(const Image *image,const double radius,
const double sigma,const double threshold,ExceptionInfo *exception)
{
#define SelectiveBlurImageTag "SelectiveBlur/Image"
CacheView
*blur_view,
*image_view,
*luminance_view;
Image
*blur_image,
*luminance_image;
MagickBooleanType
status;
MagickOffsetType
progress;
MagickRealType
*kernel;
register ssize_t
i;
size_t
width;
ssize_t
center,
j,
u,
v,
y;
/*
Initialize blur image attributes.
*/
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickCoreSignature);
width=GetOptimalKernelWidth1D(radius,sigma);
kernel=(MagickRealType *) MagickAssumeAligned(AcquireAlignedMemory((size_t)
width,width*sizeof(*kernel)));
if (kernel == (MagickRealType *) NULL)
ThrowImageException(ResourceLimitError,"MemoryAllocationFailed");
j=(ssize_t) (width-1)/2;
i=0;
for (v=(-j); v <= j; v++)
{
for (u=(-j); u <= j; u++)
kernel[i++]=(MagickRealType) (exp(-((double) u*u+v*v)/(2.0*MagickSigma*
MagickSigma))/(2.0*MagickPI*MagickSigma*MagickSigma));
}
if (image->debug != MagickFalse)
{
char
format[MagickPathExtent],
*message;
register const MagickRealType
*k;
ssize_t
u,
v;
(void) LogMagickEvent(TransformEvent,GetMagickModule(),
" SelectiveBlurImage with %.20gx%.20g kernel:",(double) width,(double)
width);
message=AcquireString("");
k=kernel;
for (v=0; v < (ssize_t) width; v++)
{
*message='\0';
(void) FormatLocaleString(format,MagickPathExtent,"%.20g: ",(double) v);
(void) ConcatenateString(&message,format);
for (u=0; u < (ssize_t) width; u++)
{
(void) FormatLocaleString(format,MagickPathExtent,"%+f ",(double) *k++);
(void) ConcatenateString(&message,format);
}
(void) LogMagickEvent(TransformEvent,GetMagickModule(),"%s",message);
}
message=DestroyString(message);
}
blur_image=CloneImage(image,image->columns,image->rows,MagickTrue,exception);
if (blur_image == (Image *) NULL)
return((Image *) NULL);
if (SetImageStorageClass(blur_image,DirectClass,exception) == MagickFalse)
{
blur_image=DestroyImage(blur_image);
kernel=(MagickRealType *) RelinquishAlignedMemory(kernel);
return((Image *) NULL);
}
luminance_image=CloneImage(image,0,0,MagickTrue,exception);
if (luminance_image == (Image *) NULL)
{
blur_image=DestroyImage(blur_image);
kernel=(MagickRealType *) RelinquishAlignedMemory(kernel);
return((Image *) NULL);
}
status=TransformImageColorspace(luminance_image,GRAYColorspace,exception);
if (status == MagickFalse)
{
luminance_image=DestroyImage(luminance_image);
blur_image=DestroyImage(blur_image);
kernel=(MagickRealType *) RelinquishAlignedMemory(kernel);
return((Image *) NULL);
}
/*
Threshold blur image.
*/
status=MagickTrue;
progress=0;
center=(ssize_t) (GetPixelChannels(image)*(image->columns+width)*
((width-1)/2L)+GetPixelChannels(image)*((width-1)/2L));
image_view=AcquireVirtualCacheView(image,exception);
luminance_view=AcquireVirtualCacheView(luminance_image,exception);
blur_view=AcquireAuthenticCacheView(blur_image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static,4) shared(progress,status) \
magick_threads(image,blur_image,image->rows,1)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
double
contrast;
MagickBooleanType
sync;
register const Quantum
*magick_restrict l,
*magick_restrict p;
register Quantum
*magick_restrict q;
register ssize_t
x;
if (status == MagickFalse)
continue;
p=GetCacheViewVirtualPixels(image_view,-((ssize_t) (width-1)/2L),y-(ssize_t)
((width-1)/2L),image->columns+width,width,exception);
l=GetCacheViewVirtualPixels(luminance_view,-((ssize_t) (width-1)/2L),y-
(ssize_t) ((width-1)/2L),luminance_image->columns+width,width,exception);
q=QueueCacheViewAuthenticPixels(blur_view,0,y,blur_image->columns,1,
exception);
if ((p == (const Quantum *) NULL) || (q == (Quantum *) NULL))
{
status=MagickFalse;
continue;
}
for (x=0; x < (ssize_t) image->columns; x++)
{
double
intensity;
register ssize_t
i;
intensity=GetPixelIntensity(image,p+center);
for (i=0; i < (ssize_t) GetPixelChannels(image); i++)
{
double
alpha,
gamma,
pixel;
PixelChannel
channel;
PixelTrait
blur_traits,
traits;
register const MagickRealType
*magick_restrict k;
register const Quantum
*magick_restrict luminance_pixels,
*magick_restrict pixels;
register ssize_t
u;
ssize_t
v;
channel=GetPixelChannelChannel(image,i);
traits=GetPixelChannelTraits(image,channel);
blur_traits=GetPixelChannelTraits(blur_image,channel);
if ((traits == UndefinedPixelTrait) ||
(blur_traits == UndefinedPixelTrait))
continue;
if (((blur_traits & CopyPixelTrait) != 0) ||
(GetPixelReadMask(image,p+center) == 0))
{
SetPixelChannel(blur_image,channel,p[center+i],q);
continue;
}
k=kernel;
pixel=0.0;
pixels=p;
luminance_pixels=l;
gamma=0.0;
if ((blur_traits & BlendPixelTrait) == 0)
{
for (v=0; v < (ssize_t) width; v++)
{
for (u=0; u < (ssize_t) width; u++)
{
contrast=GetPixelIntensity(luminance_image,luminance_pixels)-
intensity;
if (fabs(contrast) < threshold)
{
pixel+=(*k)*pixels[i];
gamma+=(*k);
}
k++;
pixels+=GetPixelChannels(image);
luminance_pixels+=GetPixelChannels(luminance_image);
}
pixels+=GetPixelChannels(image)*image->columns;
luminance_pixels+=GetPixelChannels(luminance_image)*
luminance_image->columns;
}
if (fabs((double) gamma) < MagickEpsilon)
{
SetPixelChannel(blur_image,channel,p[center+i],q);
continue;
}
gamma=PerceptibleReciprocal(gamma);
SetPixelChannel(blur_image,channel,ClampToQuantum(gamma*pixel),q);
continue;
}
for (v=0; v < (ssize_t) width; v++)
{
for (u=0; u < (ssize_t) width; u++)
{
contrast=GetPixelIntensity(image,pixels)-intensity;
if (fabs(contrast) < threshold)
{
alpha=(double) (QuantumScale*GetPixelAlpha(image,pixels));
pixel+=(*k)*alpha*pixels[i];
gamma+=(*k)*alpha;
}
k++;
pixels+=GetPixelChannels(image);
luminance_pixels+=GetPixelChannels(luminance_image);
}
pixels+=GetPixelChannels(image)*image->columns;
luminance_pixels+=GetPixelChannels(luminance_image)*
luminance_image->columns;
}
if (fabs((double) gamma) < MagickEpsilon)
{
SetPixelChannel(blur_image,channel,p[center+i],q);
continue;
}
gamma=PerceptibleReciprocal(gamma);
SetPixelChannel(blur_image,channel,ClampToQuantum(gamma*pixel),q);
}
p+=GetPixelChannels(image);
l+=GetPixelChannels(luminance_image);
q+=GetPixelChannels(blur_image);
}
sync=SyncCacheViewAuthenticPixels(blur_view,exception);
if (sync == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp critical (MagickCore_SelectiveBlurImage)
#endif
proceed=SetImageProgress(image,SelectiveBlurImageTag,progress++,
image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
blur_image->type=image->type;
blur_view=DestroyCacheView(blur_view);
image_view=DestroyCacheView(image_view);
luminance_image=DestroyImage(luminance_image);
kernel=(MagickRealType *) RelinquishAlignedMemory(kernel);
if (status == MagickFalse)
blur_image=DestroyImage(blur_image);
return(blur_image);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% S h a d e I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% ShadeImage() shines a distant light on an image to create a
% three-dimensional effect. You control the positioning of the light with
% azimuth and elevation; azimuth is measured in degrees off the x axis
% and elevation is measured in pixels above the Z axis.
%
% The format of the ShadeImage method is:
%
% Image *ShadeImage(const Image *image,const MagickBooleanType gray,
% const double azimuth,const double elevation,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o gray: A value other than zero shades the intensity of each pixel.
%
% o azimuth, elevation: Define the light source direction.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport Image *ShadeImage(const Image *image,const MagickBooleanType gray,
const double azimuth,const double elevation,ExceptionInfo *exception)
{
#define ShadeImageTag "Shade/Image"
CacheView
*image_view,
*shade_view;
Image
*linear_image,
*shade_image;
MagickBooleanType
status;
MagickOffsetType
progress;
PrimaryInfo
light;
ssize_t
y;
/*
Initialize shaded image attributes.
*/
assert(image != (const Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickCoreSignature);
linear_image=CloneImage(image,0,0,MagickTrue,exception);
shade_image=CloneImage(image,image->columns,image->rows,MagickTrue,exception);
if ((linear_image == (Image *) NULL) || (shade_image == (Image *) NULL))
{
if (linear_image != (Image *) NULL)
linear_image=DestroyImage(linear_image);
if (shade_image != (Image *) NULL)
shade_image=DestroyImage(shade_image);
return((Image *) NULL);
}
if (SetImageStorageClass(shade_image,DirectClass,exception) == MagickFalse)
{
linear_image=DestroyImage(linear_image);
shade_image=DestroyImage(shade_image);
return((Image *) NULL);
}
/*
Compute the light vector.
*/
light.x=(double) QuantumRange*cos(DegreesToRadians(azimuth))*
cos(DegreesToRadians(elevation));
light.y=(double) QuantumRange*sin(DegreesToRadians(azimuth))*
cos(DegreesToRadians(elevation));
light.z=(double) QuantumRange*sin(DegreesToRadians(elevation));
/*
Shade image.
*/
status=MagickTrue;
progress=0;
image_view=AcquireVirtualCacheView(linear_image,exception);
shade_view=AcquireAuthenticCacheView(shade_image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static,4) shared(progress,status) \
magick_threads(linear_image,shade_image,linear_image->rows,1)
#endif
for (y=0; y < (ssize_t) linear_image->rows; y++)
{
double
distance,
normal_distance,
shade;
PrimaryInfo
normal;
register const Quantum
*magick_restrict center,
*magick_restrict p,
*magick_restrict post,
*magick_restrict pre;
register Quantum
*magick_restrict q;
register ssize_t
x;
if (status == MagickFalse)
continue;
p=GetCacheViewVirtualPixels(image_view,-1,y-1,linear_image->columns+2,3,
exception);
q=QueueCacheViewAuthenticPixels(shade_view,0,y,shade_image->columns,1,
exception);
if ((p == (const Quantum *) NULL) || (q == (Quantum *) NULL))
{
status=MagickFalse;
continue;
}
/*
Shade this row of pixels.
*/
normal.z=2.0*(double) QuantumRange; /* constant Z of surface normal */
for (x=0; x < (ssize_t) linear_image->columns; x++)
{
register ssize_t
i;
/*
Determine the surface normal and compute shading.
*/
pre=p+GetPixelChannels(linear_image);
center=pre+(linear_image->columns+2)*GetPixelChannels(linear_image);
post=center+(linear_image->columns+2)*GetPixelChannels(linear_image);
normal.x=(double) (
GetPixelIntensity(linear_image,pre-GetPixelChannels(linear_image))+
GetPixelIntensity(linear_image,center-GetPixelChannels(linear_image))+
GetPixelIntensity(linear_image,post-GetPixelChannels(linear_image))-
GetPixelIntensity(linear_image,pre+GetPixelChannels(linear_image))-
GetPixelIntensity(linear_image,center+GetPixelChannels(linear_image))-
GetPixelIntensity(linear_image,post+GetPixelChannels(linear_image)));
normal.y=(double) (
GetPixelIntensity(linear_image,post-GetPixelChannels(linear_image))+
GetPixelIntensity(linear_image,post)+
GetPixelIntensity(linear_image,post+GetPixelChannels(linear_image))-
GetPixelIntensity(linear_image,pre-GetPixelChannels(linear_image))-
GetPixelIntensity(linear_image,pre)-
GetPixelIntensity(linear_image,pre+GetPixelChannels(linear_image)));
if ((fabs(normal.x) <= MagickEpsilon) &&
(fabs(normal.y) <= MagickEpsilon))
shade=light.z;
else
{
shade=0.0;
distance=normal.x*light.x+normal.y*light.y+normal.z*light.z;
if (distance > MagickEpsilon)
{
normal_distance=normal.x*normal.x+normal.y*normal.y+
normal.z*normal.z;
if (normal_distance > (MagickEpsilon*MagickEpsilon))
shade=distance/sqrt((double) normal_distance);
}
}
for (i=0; i < (ssize_t) GetPixelChannels(linear_image); i++)
{
PixelChannel
channel;
PixelTrait
shade_traits,
traits;
channel=GetPixelChannelChannel(linear_image,i);
traits=GetPixelChannelTraits(linear_image,channel);
shade_traits=GetPixelChannelTraits(shade_image,channel);
if ((traits == UndefinedPixelTrait) ||
(shade_traits == UndefinedPixelTrait))
continue;
if (((shade_traits & CopyPixelTrait) != 0) ||
(GetPixelReadMask(linear_image,center) == 0))
{
SetPixelChannel(shade_image,channel,center[i],q);
continue;
}
if ((traits & UpdatePixelTrait) == 0)
{
SetPixelChannel(shade_image,channel,center[i],q);
continue;
}
if (gray != MagickFalse)
{
SetPixelChannel(shade_image,channel,ClampToQuantum(shade),q);
continue;
}
SetPixelChannel(shade_image,channel,ClampToQuantum(QuantumScale*shade*
center[i]),q);
}
p+=GetPixelChannels(linear_image);
q+=GetPixelChannels(shade_image);
}
if (SyncCacheViewAuthenticPixels(shade_view,exception) == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp critical (MagickCore_ShadeImage)
#endif
proceed=SetImageProgress(image,ShadeImageTag,progress++,image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
shade_view=DestroyCacheView(shade_view);
image_view=DestroyCacheView(image_view);
linear_image=DestroyImage(linear_image);
if (status == MagickFalse)
shade_image=DestroyImage(shade_image);
return(shade_image);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% S h a r p e n I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% SharpenImage() sharpens the image. We convolve the image with a Gaussian
% operator of the given radius and standard deviation (sigma). For
% reasonable results, radius should be larger than sigma. Use a radius of 0
% and SharpenImage() selects a suitable radius for you.
%
% Using a separable kernel would be faster, but the negative weights cancel
% out on the corners of the kernel producing often undesirable ringing in the
% filtered result; this can be avoided by using a 2D gaussian shaped image
% sharpening kernel instead.
%
% The format of the SharpenImage method is:
%
% Image *SharpenImage(const Image *image,const double radius,
% const double sigma,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o radius: the radius of the Gaussian, in pixels, not counting the center
% pixel.
%
% o sigma: the standard deviation of the Laplacian, in pixels.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport Image *SharpenImage(const Image *image,const double radius,
const double sigma,ExceptionInfo *exception)
{
double
gamma,
normalize;
Image
*sharp_image;
KernelInfo
*kernel_info;
register ssize_t
i;
size_t
width;
ssize_t
j,
u,
v;
assert(image != (const Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickCoreSignature);
width=GetOptimalKernelWidth2D(radius,sigma);
kernel_info=AcquireKernelInfo((const char *) NULL,exception);
if (kernel_info == (KernelInfo *) NULL)
ThrowImageException(ResourceLimitError,"MemoryAllocationFailed");
(void) ResetMagickMemory(kernel_info,0,sizeof(*kernel_info));
kernel_info->width=width;
kernel_info->height=width;
kernel_info->x=(ssize_t) (width-1)/2;
kernel_info->y=(ssize_t) (width-1)/2;
kernel_info->signature=MagickCoreSignature;
kernel_info->values=(MagickRealType *) MagickAssumeAligned(
AcquireAlignedMemory(kernel_info->width,kernel_info->height*
sizeof(*kernel_info->values)));
if (kernel_info->values == (MagickRealType *) NULL)
{
kernel_info=DestroyKernelInfo(kernel_info);
ThrowImageException(ResourceLimitError,"MemoryAllocationFailed");
}
normalize=0.0;
j=(ssize_t) (kernel_info->width-1)/2;
i=0;
for (v=(-j); v <= j; v++)
{
for (u=(-j); u <= j; u++)
{
kernel_info->values[i]=(MagickRealType) (-exp(-((double) u*u+v*v)/(2.0*
MagickSigma*MagickSigma))/(2.0*MagickPI*MagickSigma*MagickSigma));
normalize+=kernel_info->values[i];
i++;
}
}
kernel_info->values[i/2]=(double) ((-2.0)*normalize);
normalize=0.0;
for (i=0; i < (ssize_t) (kernel_info->width*kernel_info->height); i++)
normalize+=kernel_info->values[i];
gamma=PerceptibleReciprocal(normalize);
for (i=0; i < (ssize_t) (kernel_info->width*kernel_info->height); i++)
kernel_info->values[i]*=gamma;
sharp_image=ConvolveImage(image,kernel_info,exception);
kernel_info=DestroyKernelInfo(kernel_info);
return(sharp_image);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% S p r e a d I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% SpreadImage() is a special effects method that randomly displaces each
% pixel in a square area defined by the radius parameter.
%
% The format of the SpreadImage method is:
%
% Image *SpreadImage(const Image *image,
% const PixelInterpolateMethod method,const double radius,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o method: intepolation method.
%
% o radius: choose a random pixel in a neighborhood of this extent.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport Image *SpreadImage(const Image *image,
const PixelInterpolateMethod method,const double radius,
ExceptionInfo *exception)
{
#define SpreadImageTag "Spread/Image"
CacheView
*image_view,
*spread_view;
Image
*spread_image;
MagickBooleanType
status;
MagickOffsetType
progress;
RandomInfo
**magick_restrict random_info;
size_t
width;
ssize_t
y;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
unsigned long
key;
#endif
/*
Initialize spread image attributes.
*/
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickCoreSignature);
spread_image=CloneImage(image,image->columns,image->rows,MagickTrue,
exception);
if (spread_image == (Image *) NULL)
return((Image *) NULL);
if (SetImageStorageClass(spread_image,DirectClass,exception) == MagickFalse)
{
spread_image=DestroyImage(spread_image);
return((Image *) NULL);
}
/*
Spread image.
*/
status=MagickTrue;
progress=0;
width=GetOptimalKernelWidth1D(radius,0.5);
random_info=AcquireRandomInfoThreadSet();
image_view=AcquireVirtualCacheView(image,exception);
spread_view=AcquireAuthenticCacheView(spread_image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
key=GetRandomSecretKey(random_info[0]);
#pragma omp parallel for schedule(static,4) shared(progress,status) \
magick_threads(image,spread_image,image->rows,key == ~0UL)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
const int
id = GetOpenMPThreadId();
register Quantum
*magick_restrict q;
register ssize_t
x;
if (status == MagickFalse)
continue;
q=QueueCacheViewAuthenticPixels(spread_view,0,y,spread_image->columns,1,
exception);
if (q == (Quantum *) NULL)
{
status=MagickFalse;
continue;
}
for (x=0; x < (ssize_t) image->columns; x++)
{
PointInfo
point;
point.x=GetPseudoRandomValue(random_info[id]);
point.y=GetPseudoRandomValue(random_info[id]);
status=InterpolatePixelChannels(image,image_view,spread_image,method,
(double) x+width*(point.x-0.5),(double) y+width*(point.y-0.5),q,
exception);
q+=GetPixelChannels(spread_image);
}
if (SyncCacheViewAuthenticPixels(spread_view,exception) == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp critical (MagickCore_SpreadImage)
#endif
proceed=SetImageProgress(image,SpreadImageTag,progress++,image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
spread_view=DestroyCacheView(spread_view);
image_view=DestroyCacheView(image_view);
random_info=DestroyRandomInfoThreadSet(random_info);
if (status == MagickFalse)
spread_image=DestroyImage(spread_image);
return(spread_image);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% U n s h a r p M a s k I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% UnsharpMaskImage() sharpens one or more image channels. We convolve the
% image with a Gaussian operator of the given radius and standard deviation
% (sigma). For reasonable results, radius should be larger than sigma. Use a
% radius of 0 and UnsharpMaskImage() selects a suitable radius for you.
%
% The format of the UnsharpMaskImage method is:
%
% Image *UnsharpMaskImage(const Image *image,const double radius,
% const double sigma,const double amount,const double threshold,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o radius: the radius of the Gaussian, in pixels, not counting the center
% pixel.
%
% o sigma: the standard deviation of the Gaussian, in pixels.
%
% o gain: the percentage of the difference between the original and the
% blur image that is added back into the original.
%
% o threshold: the threshold in pixels needed to apply the diffence gain.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport Image *UnsharpMaskImage(const Image *image,const double radius,
const double sigma,const double gain,const double threshold,
ExceptionInfo *exception)
{
#define SharpenImageTag "Sharpen/Image"
CacheView
*image_view,
*unsharp_view;
Image
*unsharp_image;
MagickBooleanType
status;
MagickOffsetType
progress;
double
quantum_threshold;
ssize_t
y;
assert(image != (const Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(exception != (ExceptionInfo *) NULL);
#if defined(MAGICKCORE_OPENCL_SUPPORT)
unsharp_image=AccelerateUnsharpMaskImage(image,radius,sigma,gain,threshold,
exception);
if (unsharp_image != (Image *) NULL)
return(unsharp_image);
#endif
unsharp_image=BlurImage(image,radius,sigma,exception);
if (unsharp_image == (Image *) NULL)
return((Image *) NULL);
quantum_threshold=(double) QuantumRange*threshold;
/*
Unsharp-mask image.
*/
status=MagickTrue;
progress=0;
image_view=AcquireVirtualCacheView(image,exception);
unsharp_view=AcquireAuthenticCacheView(unsharp_image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static,4) shared(progress,status) \
magick_threads(image,unsharp_image,image->rows,1)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
register const Quantum
*magick_restrict p;
register Quantum
*magick_restrict q;
register ssize_t
x;
if (status == MagickFalse)
continue;
p=GetCacheViewVirtualPixels(image_view,0,y,image->columns,1,exception);
q=QueueCacheViewAuthenticPixels(unsharp_view,0,y,unsharp_image->columns,1,
exception);
if ((p == (const Quantum *) NULL) || (q == (Quantum *) NULL))
{
status=MagickFalse;
continue;
}
for (x=0; x < (ssize_t) image->columns; x++)
{
register ssize_t
i;
for (i=0; i < (ssize_t) GetPixelChannels(image); i++)
{
double
pixel;
PixelChannel
channel;
PixelTrait
traits,
unsharp_traits;
channel=GetPixelChannelChannel(image,i);
traits=GetPixelChannelTraits(image,channel);
unsharp_traits=GetPixelChannelTraits(unsharp_image,channel);
if ((traits == UndefinedPixelTrait) ||
(unsharp_traits == UndefinedPixelTrait))
continue;
if (((unsharp_traits & CopyPixelTrait) != 0) ||
(GetPixelReadMask(image,p) == 0))
{
SetPixelChannel(unsharp_image,channel,p[i],q);
continue;
}
pixel=p[i]-(double) GetPixelChannel(unsharp_image,channel,q);
if (fabs(2.0*pixel) < quantum_threshold)
pixel=(double) p[i];
else
pixel=(double) p[i]+gain*pixel;
SetPixelChannel(unsharp_image,channel,ClampToQuantum(pixel),q);
}
p+=GetPixelChannels(image);
q+=GetPixelChannels(unsharp_image);
}
if (SyncCacheViewAuthenticPixels(unsharp_view,exception) == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp critical (MagickCore_UnsharpMaskImage)
#endif
proceed=SetImageProgress(image,SharpenImageTag,progress++,image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
unsharp_image->type=image->type;
unsharp_view=DestroyCacheView(unsharp_view);
image_view=DestroyCacheView(image_view);
if (status == MagickFalse)
unsharp_image=DestroyImage(unsharp_image);
return(unsharp_image);
}
|
cloudsubtractor.h | #ifndef CLOUDSUBTRACTOR_H
#include <pcl/io/pcd_io.h>
#include <pcl/point_types.h>
#include <pcl/sample_consensus/method_types.h>
#include <pcl/sample_consensus/model_types.h>
#include <pcl/segmentation/sac_segmentation.h>
#include <pcl/filters/voxel_grid.h>
#include <pcl/filters/extract_indices.h>
#include <pcl/kdtree/kdtree_flann.h>
#include <pcl/common/pca.h>
#include "../h/inrange.h"
#define CLOUDSUBTRACTOR_H
// by Sven Schneider 07/2017
template <typename PointInT>
class CloudSubtractor: public pcl::PCLBase<PointInT>
{
protected:
typedef typename pcl::PointCloud<PointInT>::Ptr PointCloudPtr;
typedef typename pcl::PointCloud<PointInT>::ConstPtr PointCloudConstPtr;
public:
typedef boost::shared_ptr< CloudSubtractor<PointInT> > Ptr;
typedef boost::shared_ptr< const CloudSubtractor<PointInT> > ConstPtr;
CloudSubtractor () :
pointIncrement_ (0.25),
planeDistThreshold_ (0.1),
kdRadius_ (1),
kdRadiusPCA_ (1),
coefficients_ (new pcl::ModelCoefficients),
gridPC_ (new pcl::PointCloud<PointInT>),
invertedPC_ (new pcl::PointCloud<PointInT>)
{ };
CloudSubtractor (float pointInc, float planeDist, float kdR) :
pointIncrement_ (pointInc),
planeDistThreshold_ (planeDist),
kdRadius_ (kdR),
kdRadiusPCA_ (1),
coefficients_ (new pcl::ModelCoefficients),
gridPC_ (new pcl::PointCloud<PointInT>),
invertedPC_ (new pcl::PointCloud<PointInT>)
{ };
inline void setPointIncrement(float inc) {pointIncrement_ = inc;}
inline void setKdTreeRadius(float radius) {kdRadius_ = radius;}
inline void setKdTreeRadiusPCA(float radius) {kdRadiusPCA_ = radius;}
inline void setPlaneDistThreshold(float threshold) {planeDistThreshold_ = threshold;}
inline float getPointIncrement() {return pointIncrement_;}
inline float getPlaneDistThreshold() {return planeDistThreshold_;}
inline float getKdTreeRadius() {return kdRadius_;}
void dummySearch();
void applyCloudSubtraction(typename pcl::PointCloud<PointInT>::Ptr cloud_out);
void pointwisePCA(typename pcl::PointCloud<PointInT>::Ptr cloudOut);
virtual ~CloudSubtractor ()
{
}
protected:
float kdRadius_;
float kdRadiusPCA_;
/** \brief The grid spacing in x,yz, direction. Smaller values lead to to larger and denser point clouds (default: 0.25). */
float pointIncrement_;
float planeDistThreshold_;
/** \brief An input point cloud describing the surface that is to be used for nearest neighbors estimation. */
using pcl::PCLBase<PointInT>::input_;
/** \brief The subtracted (inverted) point cloud. */
typename pcl::PointCloud<PointInT>::Ptr invertedPC_;
typename pcl::PointCloud<PointInT>::Ptr gridPC_;
pcl::ModelCoefficients::Ptr coefficients_;
void detectPlane();
void makePlaneFromCoefficients();
void gridSubtraction();
};
template <typename PointInT>
void CloudSubtractor<PointInT>::dummySearch ()//, typename pcl::PointCloud<PointInT>::Ptr invertedPC_)
{
//typename pcl::PointCloud<PointT>::Ptr cloud_out (new pcl::PointCloud<PointT>);
pcl::copyPointCloud(*input_,*invertedPC_);
for (int i=0; i< input_->size(); ++i){
invertedPC_->points[i].x = input_->points[i].x + 3;
invertedPC_->points[i].y = input_->points[i].y + 5;
}
}
template <typename PointInT>
void CloudSubtractor<PointInT>::applyCloudSubtraction (typename pcl::PointCloud<PointInT>::Ptr cloud_out)
{
detectPlane();
makePlaneFromCoefficients();
gridSubtraction();
//viewer<PointInT> (invertedPC_);
pcl::copyPointCloud(*invertedPC_, *cloud_out);
}
template <typename PointInT>
void CloudSubtractor<PointInT>::detectPlane(){
typename pcl::PointCloud<PointInT>::Ptr cloudFiltered (new pcl::PointCloud<PointInT>);
typename pcl::PointCloud<PointInT>::Ptr tmpPlane (new pcl::PointCloud<PointInT>);
pcl::console::TicToc tt;
/* // Create the filtering object
pcl::VoxelGrid<PointInT> sor;
sor.setInputCloud (input_);
sor.setLeafSize (0.1f, 0.1f, 0.1f);
tt.tic();
sor.filter (*cloud_filtered);
std::cerr << "PointCloud after VoxelGrid Downsampling has: " << cloud_filtered->size() << " points. Took " << tt.toc() / 1000. << " s." << std::endl;
*/
pcl::copyPointCloud(*input_,*cloudFiltered);
//pcl::ModelCoefficients::Ptr coefficients (new pcl::ModelCoefficients);
pcl::PointIndices::Ptr inliers (new pcl::PointIndices);
// Create the segmentation object
pcl::SACSegmentation<PointInT> seg;
// Optional
seg.setOptimizeCoefficients (true);
// Mandatory
seg.setModelType (pcl::SACMODEL_PLANE);
seg.setMethodType (pcl::SAC_RANSAC);
seg.setDistanceThreshold (planeDistThreshold_);
tt.tic ();
seg.setInputCloud (cloudFiltered);
seg.segment (*inliers, *coefficients_);
std::cerr << "PointCloud after plane segmentation has: " << inliers->indices.size () << " inliers. Took " << tt.toc() / 1000. << " s." << std::endl;
std::cout << "Normalized coeffs of Pt-Normal Eq. (Ax+By+Cz=D): " << coefficients_->values[0] << " " << coefficients_->values[1] << " " << coefficients_->values[2] << " " << coefficients_->values[3] << std::endl;
//TODO: Detect 1 more plane: ~30 cm +/- 10cm before (direction of the street) the facadePlane
// this is done to detect Erkers.
//TODO: Combine both facadePoint clouds
// For DEBUGING ONLY
// Extract the planar inliers from the input cloud
typename pcl::ExtractIndices<PointInT> extract;
extract.setInputCloud (cloudFiltered);
extract.setIndices (inliers);
extract.setNegative (false);
// Get the points associated with the planar surface
extract.filter (*tmpPlane);
std::stringstream ss;
pcl::PCDWriter writer;
ss << "STEP_03_moreRefinedPlane_.pcd";
writer.write<PointInT> (ss.str (), *tmpPlane, false); //*
}
template <typename PointInT>
void CloudSubtractor<PointInT>::makePlaneFromCoefficients()
{
PointInT minPt;
PointInT maxPt;
pcl::getMinMax3D(*input_, minPt, maxPt);
PointInT point;
// construct new points on the plane by rearranging the point normal equation for one of the coordinates.
for (float x=minPt.x; x<=maxPt.x; x+=pointIncrement_)
{
for (float y=minPt.y; y<= maxPt.y; y+=pointIncrement_)
{
for(float z=minPt.z; z <= maxPt.z ; z+=pointIncrement_)
{
point.x = x;
if (coefficients_->values[1] != 0) // to avoid division by zero
point.y = -1.0 * (1.0 * coefficients_->values[3] + coefficients_->values[0] * x + coefficients_->values[2] * z ) / coefficients_->values[1] ;
else // divisor is set to 0.0000001 if the coefficient is zero
point.y = -1.0 * (1.0 * coefficients_->values[3] + coefficients_->values[0] * x + coefficients_->values[2] * z ) / 0.000001 ;
point.z = z;
gridPC_->points.push_back (point);
}
}
}
gridPC_->width = (int) gridPC_->points.size (); gridPC_->height = 1;
std::cout << "size of gridPlane: " << gridPC_->points.size() << std::endl;
std::stringstream ss;
pcl::PCDWriter writer;
ss << "STEP_04_PlaneFromCoeffs.pcd";
writer.write<PointInT> (ss.str (), *gridPC_, false); //*
}
template <typename PointInT>
void CloudSubtractor<PointInT>::gridSubtraction()
{
pcl::console::TicToc tt;
typename pcl::PointCloud<PointInT>::Ptr cloud (new pcl::PointCloud<PointInT>);
pcl::copyPointCloud(*input_,*cloud); // make a copy of original cloud
// create a kdtree object
pcl::KdTreeFLANN<PointInT> kdtreeObj;
// set the input cloud for the kdtree. this should start creating the tree.
kdtreeObj.setInputCloud (cloud);
std::vector<int> pointIdxRadiusSearch;
std::vector<float> pointRadiusSquaredDistance;
// determine size of PC
size_t nIters = gridPC_->size();
//typename pcl::PointCloud<PointInT>::iterator pointIdxZERO;
tt.tic();
#pragma omp parallel for // use parallel processing to speed up the process slightly. More cores would be better i guess...
for (size_t p = 0; p < nIters; p++){
//clear the vectors before each search.
pointIdxRadiusSearch.clear();
pointRadiusSquaredDistance.clear();
const PointInT searchPoint = gridPC_->points[p];
// if no neighbours were found add point to result point cloud;.
if ( !(kdtreeObj.radiusSearch (searchPoint, kdRadius_, pointIdxRadiusSearch, pointRadiusSquaredDistance) > 1) )
{
invertedPC_->points.push_back(searchPoint);
if (p % 100000 == 0)
std::cout << "Performed " << p+1 << " kdtree searches out of "
<< nIters << ". " << std::setprecision(2)
<< float((p+1.))/float(nIters)*100.0 << " % completed..." << std::endl;
}
}
std::cout << "gridSearch took: " << tt.toc() / 1000. << " seconds." << std::endl;
}
template <typename PointInT>
void CloudSubtractor<PointInT>::pointwisePCA(typename pcl::PointCloud<PointInT>::Ptr cloudOut)
{
pcl::console::TicToc tt;
typename pcl::PointCloud<pcl::PointNormal>::Ptr visCloud (new pcl::PointCloud<pcl::PointNormal>);
// create a kdtree object
pcl::KdTreeFLANN<PointInT> kdtreeObj;
// set the input cloud for the kdtree. this should start creating the tree.
kdtreeObj.setInputCloud (input_);
pcl::IndicesPtr indices (new std::vector <int>);
std::vector<float> pointRadiusSquaredDistance;
// create PCA object
pcl::PCA<PointInT> pcaObj;
pcaObj.setInputCloud(input_);
// determine size of PC before entering for, as the size changes by one during the loop.
size_t cloudSize = input_->size();
Eigen::Vector3f EigenVals;
tt.tic();
#pragma omp parallel for // use parallel processing to speed up the process slightly. More cores would be better i guess...
for (size_t p = 0; p < cloudSize; p++){
PointInT searchPoint = input_->points[p];
pcl::PointNormal visPoint; // this allows to store Lambda0,1,2 in the normal_x,_y,_z values
// if neighbours were found, calculate PCA for this neighbourhood
if ( (kdtreeObj.radiusSearch (searchPoint, kdRadiusPCA_, *indices, pointRadiusSquaredDistance) > 3) )
{
pcaObj.setIndices(indices);
EigenVals = pcaObj.getEigenValues();
const float alpha = EigenVals(0) +EigenVals(1) + EigenVals(2);
// store planarity value in intensity field of point cloud for easy visualisation
searchPoint.intensity = EigenVals(0)/alpha;
visPoint.x = searchPoint.x;
visPoint.y = searchPoint.y;
visPoint.z = searchPoint.z;
visPoint.normal_x = EigenVals(0)/alpha;
visPoint.normal_y = EigenVals(1)/alpha;
visPoint.normal_z = EigenVals(2)/alpha;
// curvature is used to save to classification results based on thresholds
if ( inrange(0.62, 0.7).contains(visPoint.normal_x) && inrange(0.17, 0.3).contains(visPoint.normal_y) && inrange(0.0, 0.1).contains(visPoint.normal_z) )
{ // basically test if L0~2/3 and L1~1/3 and L2~0, where L=Lambda (Eigenvalues)
visPoint.curvature = 0.25; // its a border point, labeled as 0.25
/*if (p % 1000 == 0)
std::cout << "Border" << std::endl;*/
}
else if ( inrange(0.20, 0.3).contains(visPoint.normal_x) && inrange(0.20, 0.3).contains(visPoint.normal_y) && inrange(0.1, 0.3).contains(visPoint.normal_z) )
{ // basically test if L0 ~ L1 ~ L2 ~= 1/3
visPoint.curvature = 0.5; //its a corner point labeled as 0.5
/*if (p % 1000 == 0)
std::cout << "Corner" << std::endl;*/
}
else if ( inrange(0.8, 1.1).contains(visPoint.normal_x) && inrange(0.0, 0.1).contains(visPoint.normal_y) && inrange(0.0, 0.1).contains(visPoint.normal_z) )
{ // basically test if L0 ~ 1 and L2 L1 = 0
visPoint.curvature = 0.75; //its a line point labeld as 0.75
/*if (p % 1000 == 0)
std::cout << "Line" << std::endl;*/
}
else{
visPoint.curvature = 0.0; //its an inlier labeld as 0
/* if (p % 1000 == 0)
std::cout << "Inlier" << std::endl; */
}
invertedPC_->points.push_back(searchPoint); // push_back the point with planarity info into result PC
visCloud->points.push_back(visPoint); // cloud vor visualization, take generated point and add it to cloudvector
// user info - output
if (p % 100000 == 0) // give processing info to the user...
std::cout << "PCA Performed. " << p+1 << " kdtree searches out of "
<< cloudSize << ". " << std::setprecision(3)
<< float((p+1.))/float(cloudSize)*100.0 << " % completed..." << std::endl;
}
indices->clear(); // clear indices for new neighbour search
}
// set basic cloud information.
invertedPC_->width = invertedPC_->points.size();
invertedPC_->height = 1;
visCloud->width = visCloud->points.size();
visCloud->height = 1;
// write the data to a file. This will be removed in a final version... or?
std::cout << "pointwisePCA took: " << std::setprecision(5) << tt.toc() / 1000. << " seconds." << std::endl;
if (invertedPC_->points.size() > 0){
pcl::PCDWriter writer;
writer.write<PointInT> ("pca_cloud.pcd", *invertedPC_, false);
}
// view result. Will be removed in final version??
viewer<PointInT>(invertedPC_);
/////////////////////////////////////////////////
// creating point cloud for visualization
///////////////////////////////////////////////
if (visCloud->points.size() > 0){
pcl::PCDWriter writer;
writer.write<pcl::PointNormal> ("pca_visCloud.pcd", *visCloud, false);
}
for (size_t i=0; i < visCloud->points.size(); i++)
{
if (visCloud->points[i].curvature == 0.25)
{
pcl::PointXYZI borderPoint;
borderPoint.x = visCloud->points[i].x;
borderPoint.y = visCloud->points[i].y;
borderPoint.z = visCloud->points[i].z;
borderPoint.intensity = visCloud->points[i].normal_y;
cloudOut->points.push_back(borderPoint);
}
}
cloudOut->width = cloudOut->points.size();
cloudOut->height = 1;
}
#endif // CLOUDSUBTRACTOR_H
|
GB_subref_phase0.c | //------------------------------------------------------------------------------
// GB_subref_phase0: find vectors of C = A(I,J) and determine I,J properties
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
#include "GB_subref.h"
#define GB_Ai(p) GBI_UNFLIP (Ai, p, avlen)
//------------------------------------------------------------------------------
// GB_find_Ap_start_end
//------------------------------------------------------------------------------
// Find pA and pA_end so that Ai,Ax [pA:pA_end-1] contains the vector
// A(imin:imax,kA). If A(:,kA) is dense, [pA:pA_end-1] is the entire dense
// vector (it is not trimmed). Otherwise, if A(imin:imax,kA) is empty, then
// pA and pA_end are set to -1 to denote an empty list. The resulting pointers
// are then returned in Ap_start [kC] and Ap_end [kC].
static inline void GB_find_Ap_start_end
(
// input, not modified
const int64_t kA,
const int64_t *restrict Ap,
const int64_t *restrict Ai,
const int64_t avlen,
const int64_t imin,
const int64_t imax,
const int64_t kC,
const int64_t nzombies,
// output: Ap_start [kC] and Ap_end [kC]:
int64_t *restrict Ap_start,
int64_t *restrict Ap_end
)
{
//--------------------------------------------------------------------------
// get A(:,kA)
//--------------------------------------------------------------------------
int64_t pA = GBP (Ap, kA, avlen) ;
int64_t pA_end = GBP (Ap, kA+1, avlen) ;
int64_t ajnz = pA_end - pA ;
//--------------------------------------------------------------------------
// trim it to A(imin:imax,kA)
//--------------------------------------------------------------------------
if (ajnz == avlen)
{
//----------------------------------------------------------------------
// A (:,kA) is dense; use pA and pA_end as-is
//----------------------------------------------------------------------
;
}
else if (ajnz == 0 || GB_Ai (pA) > imax || GB_Ai (pA_end-1) < imin)
{
//----------------------------------------------------------------------
// intersection of A(:,kA) and imin:imax is empty
//----------------------------------------------------------------------
pA = -1 ;
pA_end = -1 ;
}
else
{
//----------------------------------------------------------------------
// A (:,kA) is sparse, with at least one entry
//----------------------------------------------------------------------
// trim the leading part of A(:,kA)
if (GB_Ai (pA) < imin)
{
bool found, is_zombie ;
int64_t pright = pA_end - 1 ;
GB_SPLIT_BINARY_SEARCH_ZOMBIE (imin, Ai,
pA, pright, found, nzombies, is_zombie) ;
}
// trim the trailing part of A (:,kA)
if (imin == imax)
{
if (GB_Ai (pA) == imin)
{
// found the the single entry A (i,kA)
pA_end = pA + 1 ;
}
else
{
// A (i,kA) has not been found
pA = -1 ;
pA_end = -1 ;
}
}
else if (imax < GB_Ai (pA_end-1))
{
bool found, is_zombie ;
int64_t pleft = pA ;
int64_t pright = pA_end - 1 ;
GB_SPLIT_BINARY_SEARCH_ZOMBIE (imax, Ai,
pleft, pright, found, nzombies, is_zombie) ;
pA_end = (found) ? (pleft + 1) : pleft ;
}
#ifdef GB_DEBUG
ajnz = pA_end - pA ;
if (ajnz > 0 && Ap != NULL)
{
// A(imin:imax,kA) is now in Ai [pA:pA_end-1]
ASSERT (GB_IMPLIES (Ap [kA] < pA, GB_Ai (pA-1) < imin)) ;
ASSERT (GB_IMPLIES (pA_end < Ap [kA+1], imax < GB_Ai (pA_end))) ;
ASSERT (imin <= GB_Ai (pA)) ;
ASSERT (GB_Ai (pA_end-1) <= imax) ;
}
#endif
}
//--------------------------------------------------------------------------
// return result
//--------------------------------------------------------------------------
// The result [pA:pA_end-1] defines the range of entries that need to be
// accessed for constructing C(:,kC).
Ap_start [kC] = pA ;
Ap_end [kC] = pA_end ;
}
//------------------------------------------------------------------------------
// GB_subref_phase0
//------------------------------------------------------------------------------
#define GB_FREE_WORKSPACE \
{ \
GB_WERK_POP (Count, int64_t) ; \
}
GrB_Info GB_subref_phase0
(
// output
int64_t *restrict *p_Ch, // Ch = C->h hyperlist, or NULL standard
size_t *p_Ch_size,
int64_t *restrict *p_Ap_start, // A(:,kA) starts at Ap_start [kC]
size_t *p_Ap_start_size,
int64_t *restrict *p_Ap_end, // ... and ends at Ap_end [kC] - 1
size_t *p_Ap_end_size,
int64_t *p_Cnvec, // # of vectors in C
bool *p_need_qsort, // true if C must be sorted
int *p_Ikind, // kind of I
int64_t *p_nI, // length of I
int64_t Icolon [3], // for GB_RANGE, GB_STRIDE
int64_t *p_nJ, // length of J
// input, not modified
const GrB_Matrix A,
const GrB_Index *I, // index list for C = A(I,J), or GrB_ALL, etc.
const int64_t ni, // length of I, or special
const GrB_Index *J, // index list for C = A(I,J), or GrB_ALL, etc.
const int64_t nj, // length of J, or special
// const bool must_sort, // true if C must be returned sorted
GB_Context Context
)
{
//--------------------------------------------------------------------------
// check inputs
//--------------------------------------------------------------------------
ASSERT_MATRIX_OK (A, "A for subref phase 0", GB0) ;
ASSERT (!GB_IS_BITMAP (A)) ; // GB_bitmap_subref is used instead
ASSERT (p_Ch != NULL) ;
ASSERT (p_Ap_start != NULL) ;
ASSERT (p_Ap_end != NULL) ;
ASSERT (p_Cnvec != NULL) ;
ASSERT (p_nJ != NULL) ;
ASSERT (p_Ikind != NULL) ;
ASSERT (p_nI != NULL) ;
ASSERT (Icolon != NULL) ;
ASSERT (I != NULL) ;
ASSERT (J != NULL) ;
GrB_Info info ;
(*p_Ch ) = NULL ;
(*p_Ap_start ) = NULL ;
(*p_Ap_end ) = NULL ;
(*p_Cnvec ) = 0 ;
(*p_need_qsort) = false ;
(*p_Ikind ) = 0 ;
(*p_nI ) = 0 ;
(*p_nJ ) = 0 ;
//--------------------------------------------------------------------------
// get A
//--------------------------------------------------------------------------
int64_t *restrict Ap = A->p ; // Ap (but not A->p) may be trimmed
int64_t *restrict Ah = A->h ; // Ah (but not A->h) may be trimmed
int64_t *restrict Ai = A->i ;
int64_t anvec = A->nvec ; // may be trimmed
int64_t avlen = A->vlen ;
int64_t avdim = A->vdim ;
int64_t nzombies = A->nzombies ;
//--------------------------------------------------------------------------
// check the properties of I and J
//--------------------------------------------------------------------------
// C = A(I,J) so I is in range 0:avlen-1 and J is in range 0:avdim-1
int64_t nI, nJ, Jcolon [3] ;
int Ikind, Jkind ;
GB_ijlength (I, ni, avlen, &nI, &Ikind, Icolon) ;
GB_ijlength (J, nj, avdim, &nJ, &Jkind, Jcolon) ;
bool I_unsorted, I_has_dupl, I_contig, J_unsorted, J_has_dupl, J_contig ;
int64_t imin, imax, jmin, jmax ;
info = GB_ijproperties (I, ni, nI, avlen, &Ikind, Icolon,
&I_unsorted, &I_has_dupl, &I_contig, &imin, &imax, Context) ;
if (info != GrB_SUCCESS)
{
// I invalid or out of memory
return (info) ;
}
info = GB_ijproperties (J, nj, nJ, avdim, &Jkind, Jcolon,
&J_unsorted, &J_has_dupl, &J_contig, &jmin, &jmax, Context) ;
if (info != GrB_SUCCESS)
{
// J invalid or out of memory
return (info) ;
}
bool need_qsort = I_unsorted ;
//--------------------------------------------------------------------------
// determine if C is empty
//--------------------------------------------------------------------------
bool C_empty = (nI == 0 || nJ == 0) ;
//--------------------------------------------------------------------------
// trim the hyperlist of A
//--------------------------------------------------------------------------
// Ah, Ap, and anvec are modified to include just the vectors in range
// jmin:jmax, inclusive. A itself is not modified, just the Ah and Ap
// pointers, and the scalar anvec. If J is ":", then jmin is zero and
// jmax is avdim-1, so there is nothing to trim from Ah. If C is empty,
// then Ah and Ap will not be accessed at all, so this can be skipped.
bool A_is_hyper = (Ah != NULL) ;
if (A_is_hyper && !C_empty)
{
//----------------------------------------------------------------------
// trim the leading end of Ah so that it starts with jmin:...
//----------------------------------------------------------------------
if (jmin > 0)
{
bool found ;
int64_t kleft = 0 ;
int64_t kright = anvec-1 ;
GB_SPLIT_BINARY_SEARCH (jmin, Ah, kleft, kright, found) ;
Ah += kleft ;
Ap += kleft ;
anvec -= kleft ;
}
//----------------------------------------------------------------------
// trim the trailing end of Ah so that it ends with ..:jmax
//----------------------------------------------------------------------
if (jmax < avdim-1)
{
bool found ;
int64_t kleft = 0 ;
int64_t kright = anvec-1 ;
GB_SPLIT_BINARY_SEARCH (jmax, Ah, kleft, kright, found) ;
anvec = (found) ? (kleft + 1) : kleft ;
}
// Ah has been trimmed
ASSERT (GB_IMPLIES (anvec > 0, jmin <= Ah [0] && Ah [anvec-1] <= jmax));
}
// Ah may now be empty, after being trimmed
C_empty = C_empty || (anvec == 0) ;
//--------------------------------------------------------------------------
// determine # of threads to use
//--------------------------------------------------------------------------
#define NTASKS_PER_THREAD 8
GB_GET_NTHREADS_MAX (nthreads_max, chunk, Context) ;
int nthreads = 1, ntasks = 1 ;
int ntasks_max = nthreads_max * NTASKS_PER_THREAD ;
#define GB_GET_NTHREADS_AND_NTASKS(work) \
{ \
nthreads = GB_nthreads (work, chunk, nthreads_max) ; \
ntasks = (nthreads == 1) ? 1 : (NTASKS_PER_THREAD * nthreads) ; \
ntasks = GB_IMIN (ntasks, work) ; \
ntasks = GB_IMAX (ntasks, 1) ; \
}
//--------------------------------------------------------------------------
// allocate workspace
//--------------------------------------------------------------------------
GB_WERK_DECLARE (Count, int64_t) ;
GB_WERK_PUSH (Count, ntasks_max+1, int64_t) ;
if (Count == NULL)
{
// out of memory
return (GrB_OUT_OF_MEMORY) ;
}
//--------------------------------------------------------------------------
// compute Cnvec and determine the format of Ch
//--------------------------------------------------------------------------
// Ch is an explicit or implicit array of size Cnvec <= nJ. jC = Ch [kC]
// if C(:,jC) is the (kC)th vector of C. If NULL, then C is standard, and
// jC == kC. jC is in the range 0 to nJ-1.
int64_t *restrict Ch = NULL ; size_t Ch_size = 0 ;
int64_t *restrict Ap_start = NULL ; size_t Ap_start_size = 0 ;
int64_t *restrict Ap_end = NULL ; size_t Ap_end_size = 0 ;
int64_t Cnvec = 0 ;
int64_t jbegin = Jcolon [GxB_BEGIN] ;
int64_t jinc = Jcolon [GxB_INC ] ;
if (C_empty)
{
//----------------------------------------------------------------------
// C is an empty hypersparse matrix
//----------------------------------------------------------------------
;
}
else if (!A_is_hyper)
{
//----------------------------------------------------------------------
// both C and A are standard matrices
//----------------------------------------------------------------------
Cnvec = nJ ;
GB_GET_NTHREADS_AND_NTASKS (nJ) ;
}
else if (Jkind == GB_ALL || Jkind == GB_RANGE)
{
//----------------------------------------------------------------------
// J is ":" or jbegin:jend
//----------------------------------------------------------------------
// Ch is a shifted copy of the trimmed Ah, of length Cnvec = anvec.
// so kA = kC, and jC = Ch [kC] = jA - jmin. Ap has also been trimmed.
Cnvec = anvec ;
ASSERT (Cnvec <= nJ) ;
GB_GET_NTHREADS_AND_NTASKS (anvec) ;
}
else if (Jkind == GB_STRIDE && anvec < nJ * 64)
{
//----------------------------------------------------------------------
// J is jbegin:jinc:jend, but J is large
//----------------------------------------------------------------------
// The case for Jkind == GB_STRIDE can be done by either this method,
// or the one below. This takes O(anvec) time, and the one below
// takes O(nj*log2(anvec)), so use this method if anvec < nj * 64.
// Ch is a list of length Cnvec, where Cnvec is the length of
// the intersection of Ah and jbegin:jinc:jend.
// count the length of Ch
Cnvec = 0 ;
GB_GET_NTHREADS_AND_NTASKS (anvec) ;
// scan all of Ah and check each entry if it appears in J
int tid ;
#pragma omp parallel for num_threads(nthreads) schedule(dynamic,1)
for (tid = 0 ; tid < ntasks ; tid++)
{
int64_t kA_start, kA_end, my_Cnvec = 0 ;
GB_PARTITION (kA_start, kA_end, anvec,
(jinc > 0) ? tid : (ntasks-tid-1), ntasks) ;
for (int64_t kA = kA_start ; kA < kA_end ; kA++)
{
int64_t jA = Ah [kA] ;
if (GB_ij_is_in_list (J, nJ, jA, GB_STRIDE, Jcolon))
{
my_Cnvec++ ;
}
}
Count [tid] = my_Cnvec ;
}
GB_cumsum (Count, ntasks, NULL, 1, NULL) ;
Cnvec = Count [ntasks] ;
}
else // Jkind == GB_LIST or GB_STRIDE
{
//----------------------------------------------------------------------
// J is an explicit list, or jbegin:jinc:end
//----------------------------------------------------------------------
// Ch is an explicit list: the intersection of Ah and J
// count the length of Ch
Cnvec = 0 ;
GB_GET_NTHREADS_AND_NTASKS (nJ) ;
// scan all of J and check each entry if it appears in Ah
int tid ;
#pragma omp parallel for num_threads(nthreads) schedule(dynamic,1)
for (tid = 0 ; tid < ntasks ; tid++)
{
int64_t jC_start, jC_end, my_Cnvec = 0 ;
GB_PARTITION (jC_start, jC_end, nJ, tid, ntasks) ;
for (int64_t jC = jC_start ; jC < jC_end ; jC++)
{
int64_t jA = GB_ijlist (J, jC, Jkind, Jcolon) ;
bool found ;
int64_t kA = 0 ;
int64_t kright = anvec-1 ;
GB_BINARY_SEARCH (jA, Ah, kA, kright, found) ;
if (found) my_Cnvec++ ;
}
Count [tid] = my_Cnvec ;
}
GB_cumsum (Count, ntasks, NULL, 1, NULL) ;
Cnvec = Count [ntasks] ;
}
//--------------------------------------------------------------------------
// allocate Ch, Ap_start, and Ap_end
//--------------------------------------------------------------------------
C_empty = C_empty || (Cnvec == 0) ;
// C is hypersparse if A is hypersparse, or if C is empty
bool C_is_hyper = A_is_hyper || C_empty ;
if (C_is_hyper)
{
Ch = GB_MALLOC (Cnvec, int64_t, &Ch_size) ;
if (Ch == NULL)
{
GB_FREE_WORKSPACE ;
return (GrB_OUT_OF_MEMORY) ;
}
}
if (Cnvec > 0)
{
Ap_start = GB_MALLOC_WORK (Cnvec, int64_t, &Ap_start_size) ;
Ap_end = GB_MALLOC_WORK (Cnvec, int64_t, &Ap_end_size) ;
if (Ap_start == NULL || Ap_end == NULL)
{
// out of memory
GB_FREE_WORKSPACE ;
GB_FREE (&Ch, Ch_size) ;
GB_FREE_WORK (&Ap_start, Ap_start_size) ;
GB_FREE_WORK (&Ap_end, Ap_end_size) ;
return (GrB_OUT_OF_MEMORY) ;
}
}
//--------------------------------------------------------------------------
// create Ch, Ap_start, and Ap_end
//--------------------------------------------------------------------------
// For the (kC)th vector of C, which corresponds to the (kA)th vector of A,
// pA = Ap_start [kC] and pA_end = Ap_end [kC] are pointers to the range
// of entries in A(imin:imax,kA).
if (C_empty)
{
//----------------------------------------------------------------------
// C is an empty hypersparse matrix
//----------------------------------------------------------------------
;
}
else if (!A_is_hyper)
{
//----------------------------------------------------------------------
// both C and A are standard matrices
//----------------------------------------------------------------------
int64_t jC ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (jC = 0 ; jC < nJ ; jC++)
{
int64_t jA = GB_ijlist (J, jC, Jkind, Jcolon) ;
GB_find_Ap_start_end (jA, Ap, Ai, avlen, imin, imax,
jC, nzombies, Ap_start, Ap_end) ;
}
}
else if (Jkind == GB_ALL || Jkind == GB_RANGE)
{
//----------------------------------------------------------------------
// J is ":" or jbegin:jend
//----------------------------------------------------------------------
// C and A are both hypersparse. Ch is a shifted copy of the trimmed
// Ah, of length Cnvec = anvec. so kA = kC. Ap has also been trimmed.
int64_t kC ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (kC = 0 ; kC < Cnvec ; kC++)
{
int64_t kA = kC ;
int64_t jA = Ah [kA] ;
int64_t jC = jA - jmin ;
Ch [kC] = jC ;
GB_find_Ap_start_end (kA, Ap, Ai, avlen, imin, imax,
kC, nzombies, Ap_start, Ap_end) ;
}
}
else if (Jkind == GB_STRIDE && anvec < nJ * 64)
{
//----------------------------------------------------------------------
// J is jbegin:jinc:jend where jinc may be positive or negative
//----------------------------------------------------------------------
// C and A are both hypersparse. Ch is constructed by scanning all
// vectors in Ah [0..anvec-1] and checking if they appear in the
// jbegin:jinc:jend sequence.
if (jinc > 0)
{
int tid ;
#pragma omp parallel for num_threads(nthreads) schedule(dynamic,1)
for (tid = 0 ; tid < ntasks ; tid++)
{
int64_t kA_start, kA_end ;
GB_PARTITION (kA_start, kA_end, anvec, tid, ntasks) ;
int64_t kC = Count [tid] ;
for (int64_t kA = kA_start ; kA < kA_end ; kA++)
{
int64_t jA = Ah [kA] ;
if (GB_ij_is_in_list (J, nJ, jA, GB_STRIDE, Jcolon))
{
int64_t jC = (jA - jbegin) / jinc ;
Ch [kC] = jC ;
GB_find_Ap_start_end (kA, Ap, Ai, avlen, imin, imax,
kC, nzombies, Ap_start, Ap_end) ;
kC++ ;
}
}
}
}
else
{
int tid;
#pragma omp parallel for num_threads(nthreads) schedule(dynamic,1)
for (tid = 0 ; tid < ntasks ; tid++)
{
int64_t kA_start, kA_end ;
GB_PARTITION (kA_start, kA_end, anvec, ntasks-tid-1, ntasks) ;
int64_t kC = Count [tid] ;
for (int64_t kA = kA_end-1 ; kA >= kA_start ; kA--)
{
int64_t jA = Ah [kA] ;
if (GB_ij_is_in_list (J, nJ, jA, GB_STRIDE, Jcolon))
{
int64_t jC = (jA - jbegin) / jinc ;
Ch [kC] = jC ;
GB_find_Ap_start_end (kA, Ap, Ai, avlen, imin, imax,
kC, nzombies, Ap_start, Ap_end) ;
kC++ ;
}
}
}
}
}
else // Jkind == GB_LIST or GB_STRIDE
{
//----------------------------------------------------------------------
// J is an explicit list, or jbegin:jinc:jend
//----------------------------------------------------------------------
// C and A are both hypersparse. Ch is constructed by scanning the
// list J, or the entire jbegin:jinc:jend sequence. Each vector is
// then found in Ah, via binary search.
int tid ;
#pragma omp parallel for num_threads(nthreads) schedule(dynamic,1)
for (tid = 0 ; tid < ntasks ; tid++)
{
int64_t jC_start, jC_end ;
GB_PARTITION (jC_start, jC_end, nJ, tid, ntasks) ;
int64_t kC = Count [tid] ;
for (int64_t jC = jC_start ; jC < jC_end ; jC++)
{
int64_t jA = GB_ijlist (J, jC, Jkind, Jcolon) ;
bool found ;
int64_t kA = 0 ;
int64_t kright = anvec-1 ;
GB_BINARY_SEARCH (jA, Ah, kA, kright, found) ;
if (found)
{
ASSERT (jA == Ah [kA]) ;
Ch [kC] = jC ;
GB_find_Ap_start_end (kA, Ap, Ai, avlen, imin, imax,
kC, nzombies, Ap_start, Ap_end) ;
kC++ ;
}
}
}
}
//--------------------------------------------------------------------------
// check result
//--------------------------------------------------------------------------
#ifdef GB_DEBUG
for (int64_t kC = 0 ; kC < Cnvec ; kC++)
{
// jC is the (kC)th vector of C = A(I,J)
int64_t jC = GBH (Ch, kC) ;
int64_t jA = GB_ijlist (J, jC, Jkind, Jcolon) ;
// jA is the corresponding (kA)th vector of A.
int64_t kA = 0 ;
int64_t pright = A->nvec - 1 ;
int64_t pA_start_all, pA_end_all ;
bool found = GB_lookup (A->h != NULL, A->h, A->p, A->vlen, &kA,
pright, jA, &pA_start_all, &pA_end_all) ;
if (found && A->h != NULL)
{
ASSERT (jA == A->h [kA]) ;
}
int64_t pA = Ap_start [kC] ;
int64_t pA_end = Ap_end [kC] ;
int64_t ajnz = pA_end - pA ;
if (ajnz == avlen)
{
// A(:,kA) is dense; Ai [pA:pA_end-1] is the entire vector.
// C(:,kC) will have exactly nI entries.
ASSERT (pA == pA_start_all) ;
ASSERT (pA_end == pA_end_all ) ;
;
}
else if (ajnz > 0)
{
// A(imin:imax,kA) has at least one entry, in Ai [pA:pA_end-1]
ASSERT (imin <= GB_Ai (pA)) ;
ASSERT (GB_Ai (pA_end-1) <= imax) ;
ASSERT (pA_start_all <= pA && pA < pA_end && pA_end <= pA_end_all) ;
}
else
{
// A(imin:imax,kA) and C(:,kC) are empty
;
}
}
#endif
//--------------------------------------------------------------------------
// free workspace and return result
//--------------------------------------------------------------------------
GB_FREE_WORKSPACE ;
(*p_Ch ) = Ch ; (*p_Ch_size) = Ch_size ;
(*p_Ap_start ) = Ap_start ; (*p_Ap_start_size) = Ap_start_size ;
(*p_Ap_end ) = Ap_end ; (*p_Ap_end_size) = Ap_end_size ;
(*p_Cnvec ) = Cnvec ;
(*p_need_qsort) = need_qsort ;
(*p_Ikind ) = Ikind ;
(*p_nI ) = nI ;
(*p_nJ ) = nJ ;
return (GrB_SUCCESS) ;
}
|
gemm.c | #include "gemm.h"
#include "utils.h"
#include "cuda.h"
#include <stdlib.h>
#include <stdio.h>
#include <math.h>
void gemm_bin(int M, int N, int K, float ALPHA,
char *A, int lda,
float *B, int ldb,
float *C, int ldc)
{
int i,j,k;
for(i = 0; i < M; ++i){
for(k = 0; k < K; ++k){
char A_PART = A[i*lda+k];
if(A_PART){
for(j = 0; j < N; ++j){
C[i*ldc+j] += B[k*ldb+j];
}
} else {
for(j = 0; j < N; ++j){
C[i*ldc+j] -= B[k*ldb+j];
}
}
}
}
}
float *random_matrix(int rows, int cols)
{
int i;
float *m = calloc(rows*cols, sizeof(float));
for(i = 0; i < rows*cols; ++i){
m[i] = (float)rand()/RAND_MAX;
}
return m;
}
void time_random_matrix(int TA, int TB, int m, int k, int n)
{
float *a;
if(!TA) a = random_matrix(m,k);
else a = random_matrix(k,m);
int lda = (!TA)?k:m;
float *b;
if(!TB) b = random_matrix(k,n);
else b = random_matrix(n,k);
int ldb = (!TB)?n:k;
float *c = random_matrix(m,n);
int i;
clock_t start = clock(), end;
for(i = 0; i<10; ++i){
gemm_cpu(TA,TB,m,n,k,1,a,lda,b,ldb,1,c,n);
}
end = clock();
printf("Matrix Multiplication %dx%d * %dx%d, TA=%d, TB=%d: %lf ms\n",m,k,k,n, TA, TB, (float)(end-start)/CLOCKS_PER_SEC);
free(a);
free(b);
free(c);
}
void gemm(int TA, int TB, int M, int N, int K, float ALPHA,
float *A, int lda,
float *B, int ldb,
float BETA,
float *C, int ldc)
{
gemm_cpu( TA, TB, M, N, K, ALPHA,A,lda, B, ldb,BETA,C,ldc);
}
void gemm_nn(int M, int N, int K, float ALPHA,
float *A, int lda,
float *B, int ldb,
float *C, int ldc)
{
int i,j,k;
#pragma omp parallel for
for(i = 0; i < M; ++i){
for(k = 0; k < K; ++k){
register float A_PART = ALPHA*A[i*lda+k];
for(j = 0; j < N; ++j){
C[i*ldc+j] += A_PART*B[k*ldb+j];
}
}
}
}
void gemm_nt(int M, int N, int K, float ALPHA,
float *A, int lda,
float *B, int ldb,
float *C, int ldc)
{
int i,j,k;
#pragma omp parallel for
for(i = 0; i < M; ++i){
for(j = 0; j < N; ++j){
register float sum = 0;
for(k = 0; k < K; ++k){
sum += ALPHA*A[i*lda+k]*B[j*ldb + k];
}
C[i*ldc+j] += sum;
}
}
}
void gemm_tn(int M, int N, int K, float ALPHA,
float *A, int lda,
float *B, int ldb,
float *C, int ldc)
{
int i,j,k;
#pragma omp parallel for
for(i = 0; i < M; ++i){
for(k = 0; k < K; ++k){
register float A_PART = ALPHA*A[k*lda+i];
for(j = 0; j < N; ++j){
C[i*ldc+j] += A_PART*B[k*ldb+j];
}
}
}
}
void gemm_tt(int M, int N, int K, float ALPHA,
float *A, int lda,
float *B, int ldb,
float *C, int ldc)
{
int i,j,k;
#pragma omp parallel for
for(i = 0; i < M; ++i){
for(j = 0; j < N; ++j){
register float sum = 0;
for(k = 0; k < K; ++k){
sum += ALPHA*A[i+k*lda]*B[k+j*ldb];
}
C[i*ldc+j] += sum;
}
}
}
void gemm_cpu(int TA, int TB, int M, int N, int K, float ALPHA,
float *A, int lda,
float *B, int ldb,
float BETA,
float *C, int ldc)
{
//printf("cpu: %d %d %d %d %d %f %d %d %f %d\n",TA, TB, M, N, K, ALPHA, lda, ldb, BETA, ldc);
int i, j;
for(i = 0; i < M; ++i){
for(j = 0; j < N; ++j){
C[i*ldc + j] *= BETA;
}
}
if(!TA && !TB)
gemm_nn(M, N, K, ALPHA,A,lda, B, ldb,C,ldc);
else if(TA && !TB)
gemm_tn(M, N, K, ALPHA,A,lda, B, ldb,C,ldc);
else if(!TA && TB)
gemm_nt(M, N, K, ALPHA,A,lda, B, ldb,C,ldc);
else
gemm_tt(M, N, K, ALPHA,A,lda, B, ldb,C,ldc);
}
#ifdef GPU
#include <math.h>
void gemm_gpu(int TA, int TB, int M, int N, int K, float ALPHA,
float *A_gpu, int lda,
float *B_gpu, int ldb,
float BETA,
float *C_gpu, int ldc)
{
cublasHandle_t handle = blas_handle();
cudaError_t status = cublasSgemm(handle, (TB ? CUBLAS_OP_T : CUBLAS_OP_N),
(TA ? CUBLAS_OP_T : CUBLAS_OP_N), N, M, K, &ALPHA, B_gpu, ldb, A_gpu, lda, &BETA, C_gpu, ldc);
check_error(status);
}
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <time.h>
void time_gpu_random_matrix(int TA, int TB, int m, int k, int n)
{
float *a;
if(!TA) a = random_matrix(m,k);
else a = random_matrix(k,m);
int lda = (!TA)?k:m;
float *b;
if(!TB) b = random_matrix(k,n);
else b = random_matrix(n,k);
int ldb = (!TB)?n:k;
float *c = random_matrix(m,n);
int i;
clock_t start = clock(), end;
for(i = 0; i<32; ++i){
gemm_gpu(TA,TB,m,n,k,1,a,lda,b,ldb,1,c,n);
}
end = clock();
printf("Matrix Multiplication %dx%d * %dx%d, TA=%d, TB=%d: %lf s\n",m,k,k,n, TA, TB, (float)(end-start)/CLOCKS_PER_SEC);
free(a);
free(b);
free(c);
}
void time_gpu(int TA, int TB, int m, int k, int n)
{
int iter = 10;
float *a = random_matrix(m,k);
float *b = random_matrix(k,n);
int lda = (!TA)?k:m;
int ldb = (!TB)?n:k;
float *c = random_matrix(m,n);
float *a_cl = cuda_make_array(a, m*k);
float *b_cl = cuda_make_array(b, k*n);
float *c_cl = cuda_make_array(c, m*n);
int i;
clock_t start = clock(), end;
for(i = 0; i<iter; ++i){
gemm_gpu(TA,TB,m,n,k,1,a_cl,lda,b_cl,ldb,1,c_cl,n);
cudaThreadSynchronize();
}
double flop = ((double)m)*n*(2.*k + 2.)*iter;
double gflop = flop/pow(10., 9);
end = clock();
double seconds = sec(end-start);
printf("Matrix Multiplication %dx%d * %dx%d, TA=%d, TB=%d: %lf s, %lf GFLOPS\n",m,k,k,n, TA, TB, seconds, gflop/seconds);
cuda_free(a_cl);
cuda_free(b_cl);
cuda_free(c_cl);
free(a);
free(b);
free(c);
}
void test_gpu_accuracy(int TA, int TB, int m, int k, int n)
{
srand(0);
float *a;
if(!TA) a = random_matrix(m,k);
else a = random_matrix(k,m);
int lda = (!TA)?k:m;
float *b;
if(!TB) b = random_matrix(k,n);
else b = random_matrix(n,k);
int ldb = (!TB)?n:k;
float *c = random_matrix(m,n);
float *c_gpu = random_matrix(m,n);
memset(c, 0, m*n*sizeof(float));
memset(c_gpu, 0, m*n*sizeof(float));
int i;
//pm(m,k,b);
gemm_gpu(TA,TB,m,n,k,1,a,lda,b,ldb,1,c_gpu,n);
//printf("GPU\n");
//pm(m, n, c_gpu);
gemm_cpu(TA,TB,m,n,k,1,a,lda,b,ldb,1,c,n);
//printf("\n\nCPU\n");
//pm(m, n, c);
double sse = 0;
for(i = 0; i < m*n; ++i) {
//printf("%f %f\n", c[i], c_gpu[i]);
sse += pow(c[i]-c_gpu[i], 2);
}
printf("Matrix Multiplication %dx%d * %dx%d, TA=%d, TB=%d: %g SSE\n",m,k,k,n, TA, TB, sse/(m*n));
free(a);
free(b);
free(c);
free(c_gpu);
}
int test_gpu_blas()
{
/*
test_gpu_accuracy(0,0,10,576,75);
test_gpu_accuracy(0,0,17,10,10);
test_gpu_accuracy(1,0,17,10,10);
test_gpu_accuracy(0,1,17,10,10);
test_gpu_accuracy(1,1,17,10,10);
test_gpu_accuracy(0,0,1000,10,100);
test_gpu_accuracy(1,0,1000,10,100);
test_gpu_accuracy(0,1,1000,10,100);
test_gpu_accuracy(1,1,1000,10,100);
test_gpu_accuracy(0,0,10,10,10);
time_gpu(0,0,64,2916,363);
time_gpu(0,0,64,2916,363);
time_gpu(0,0,64,2916,363);
time_gpu(0,0,192,729,1600);
time_gpu(0,0,384,196,1728);
time_gpu(0,0,256,196,3456);
time_gpu(0,0,256,196,2304);
time_gpu(0,0,128,4096,12544);
time_gpu(0,0,128,4096,4096);
*/
time_gpu(0,0,64,75,12544);
time_gpu(0,0,64,75,12544);
time_gpu(0,0,64,75,12544);
time_gpu(0,0,64,576,12544);
time_gpu(0,0,256,2304,784);
time_gpu(1,1,2304,256,784);
time_gpu(0,0,512,4608,196);
time_gpu(1,1,4608,512,196);
return 0;
}
#endif
|
rose_scalar_output.c | /*
* Scalar-to-scalar output dependencies
* */
#include "omp.h"
int a[100];
// A private case
void foo2()
{
int i;
int tmp;
#pragma omp parallel for private (tmp,i)
for (i = 0; i <= 99; i += 1) {
tmp = a[i] + i;
}
}
// A lastprivate case
void foo()
{
int i;
int tmp;
#pragma omp parallel for private (i) lastprivate (tmp)
for (i = 0; i <= 99; i += 1) {
tmp = a[i] + i;
}
i = tmp;
}
|
TransferOP.h | /*
* TransferOP.h
*
* Created on: Jul 20, 2016
* Author: mason
*/
#ifndef TransferOP_H_
#define TransferOP_H_
#include "Param.h"
#include "MyLib.h"
#include "Node.h"
#include "Graph.h"
class TransferParams {
public:
vector<Param> W;
PAlphabet elems;
int nVSize;
int nInSize;
int nOutSize;
public:
TransferParams() {
nVSize = 0;
}
inline void exportAdaParams(ModelUpdate& ada) {
for(int idx = 0; idx < nVSize; idx++) {
ada.addParam(&(W[idx]));
}
}
inline void initial(PAlphabet alpha, int nOSize, int nISize) {
elems = alpha;
nVSize = elems->size();
nInSize = nISize;
nOutSize = nOSize;
W.resize(nVSize);
for(int idx = 0; idx < nVSize; idx++) {
W[idx].initial(nOSize, nISize);
}
}
inline int getElemId(const string& strFeat) {
return elems->from_string(strFeat);
}
// will add it
inline void save(std::ofstream &os) const {
}
// will add it
inline void load(std::ifstream &is) {
}
};
class TransferNode : public Node {
public:
PNode in;
int xid;
TransferParams* param;
public:
TransferNode() : Node() {
in = NULL;
xid = -1;
param = NULL;
}
inline void setParam(TransferParams* paramInit) {
param = paramInit;
}
inline void clearValue() {
Node::clearValue();
in = NULL;
xid = -1;
}
public:
void forward(Graph *cg, PNode x, const string& strNorm) {
in = x;
xid = param->getElemId(strNorm);
if (xid < 0) {
std::cout << "TransferNode warning: could find the label: " << strNorm << std::endl;
}
degree = 0;
in->addParent(this);
}
public:
void compute() {
if (xid >= 0) {
val.mat() = param->W[xid].val.mat() * in->val.mat();
}
}
void backward() {
if(xid >= 0) {
param->W[xid].grad.mat() += loss.mat() * in->val.tmat();
in->loss.mat() += param->W[xid].val.mat().transpose() * loss.mat();
}
}
public:
inline PExecute generate(bool bTrain, dtype cur_drop_factor);
// better to rewrite for deep understanding
inline bool typeEqual(PNode other) {
bool result = Node::typeEqual(other);
if (!result) return false;
TransferNode* conv_other = (TransferNode*)other;
if (param != conv_other->param) {
return false;
}
if (xid != conv_other->xid) {
return false;
}
return true;
}
};
class TransferExecute :public Execute {
public:
inline void forward() {
int count = batch.size();
//#pragma omp parallel for
for (int idx = 0; idx < count; idx++) {
batch[idx]->compute();
batch[idx]->forward_drop(bTrain, drop_factor);
}
}
inline void backward() {
int count = batch.size();
//#pragma omp parallel for
for (int idx = 0; idx < count; idx++) {
batch[idx]->backward_drop();
batch[idx]->backward();
}
}
};
inline PExecute TransferNode::generate(bool bTrain, dtype cur_drop_factor) {
TransferExecute* exec = new TransferExecute();
exec->batch.push_back(this);
exec->bTrain = bTrain;
exec->drop_factor = cur_drop_factor;
return exec;
};
#endif /* TransferOP_H_ */
|
cg.c | /*--------------------------------------------------------------------
NAS Parallel Benchmarks 3.0 structured OpenMP C versions - CG
This benchmark is an OpenMP C version of the NPB CG code.
The OpenMP C 2.3 versions are derived by RWCP from the serial Fortran versions
in "NPB 2.3-serial" developed by NAS. 3.0 translation is performed by the UVSQ.
Permission to use, copy, distribute and modify this software for any
purpose with or without fee is hereby granted.
This software is provided "as is" without express or implied warranty.
Information on OpenMP activities at RWCP is available at:
http://pdplab.trc.rwcp.or.jp/pdperf/Omni/
Information on NAS Parallel Benchmarks 2.3 is available at:
http://www.nas.nasa.gov/NAS/NPB/
--------------------------------------------------------------------*/
/*--------------------------------------------------------------------
Authors: M. Yarrow
C. Kuszmaul
OpenMP C version: S. Satoh
3.0 structure translation: F. Conti
--------------------------------------------------------------------*/
/*
c---------------------------------------------------------------------
c Note: please observe that in the routine conj_grad three
c implementations of the sparse matrix-vector multiply have
c been supplied. The default matrix-vector multiply is not
c loop unrolled. The alternate implementations are unrolled
c to a depth of 2 and unrolled to a depth of 8. Please
c experiment with these to find the fastest for your particular
c architecture. If reporting timing results, any of these three may
c be used without penalty.
c---------------------------------------------------------------------
*/
#include "../common/npb-C.h"
#include "npbparams.h"
#define NZ NA*(NONZER+1)*(NONZER+1)+NA*(NONZER+2)
/* global variables */
/* common /partit_size/ */
static int naa;
static int nzz;
static int firstrow;
static int lastrow;
static int firstcol;
static int lastcol;
/* common /main_int_mem/ */
static int colidx[NZ+1]; /* colidx[1:NZ] */
static int rowstr[NA+1+1]; /* rowstr[1:NA+1] */
static int iv[2*NA+1+1]; /* iv[1:2*NA+1] */
static int arow[NZ+1]; /* arow[1:NZ] */
static int acol[NZ+1]; /* acol[1:NZ] */
/* common /main_flt_mem/ */
static double v[NA+1+1]; /* v[1:NA+1] */
static double aelt[NZ+1]; /* aelt[1:NZ] */
static double a[NZ+1]; /* a[1:NZ] */
static double x[NA+2+1]; /* x[1:NA+2] */
static double z[NA+2+1]; /* z[1:NA+2] */
static double p[NA+2+1]; /* p[1:NA+2] */
static double q[NA+2+1]; /* q[1:NA+2] */
static double r[NA+2+1]; /* r[1:NA+2] */
//static double w[NA+2+1]; /* w[1:NA+2] */
/* common /urando/ */
static double amult;
static double tran;
/* function declarations */
static void conj_grad (int colidx[], int rowstr[], double x[], double z[],
double a[], double p[], double q[], double r[],
//double w[],
double *rnorm);
static void makea(int n, int nz, double a[], int colidx[], int rowstr[],
int nonzer, int firstrow, int lastrow, int firstcol,
int lastcol, double rcond, int arow[], int acol[],
double aelt[], double v[], int iv[], double shift );
static void sparse(double a[], int colidx[], int rowstr[], int n,
int arow[], int acol[], double aelt[],
int firstrow, int lastrow,
double x[], boolean mark[], int nzloc[], int nnza);
static void sprnvc(int n, int nz, double v[], int iv[], int nzloc[],
int mark[]);
static int icnvrt(double x, int ipwr2);
static void vecset(int n, double v[], int iv[], int *nzv, int i, double val);
/*--------------------------------------------------------------------
program cg
--------------------------------------------------------------------*/
int main(int argc, char **argv) {
int i, j, k, it;
int nthreads = 1;
double zeta;
double rnorm;
double norm_temp11;
double norm_temp12;
double t, mflops;
char class;
boolean verified;
double zeta_verify_value, epsilon;
firstrow = 1;
lastrow = NA;
firstcol = 1;
lastcol = NA;
if (NA == 1400 && NONZER == 7 && NITER == 15 && SHIFT == 10.0) {
class = 'S';
zeta_verify_value = 8.5971775078648;
} else if (NA == 7000 && NONZER == 8 && NITER == 15 && SHIFT == 12.0) {
class = 'W';
zeta_verify_value = 10.362595087124;
} else if (NA == 14000 && NONZER == 11 && NITER == 15 && SHIFT == 20.0) {
class = 'A';
zeta_verify_value = 17.130235054029;
} else if (NA == 75000 && NONZER == 13 && NITER == 75 && SHIFT == 60.0) {
class = 'B';
zeta_verify_value = 22.712745482631;
} else if (NA == 150000 && NONZER == 15 && NITER == 75 && SHIFT == 110.0) {
class = 'C';
zeta_verify_value = 28.973605592845;
} else {
class = 'U';
}
printf("\n\n NAS Parallel Benchmarks 3.0 structured OpenMP C version"
" - CG Benchmark\n");
printf(" Size: %10d\n", NA);
printf(" Iterations: %5d\n", NITER);
naa = NA;
nzz = NZ;
/*--------------------------------------------------------------------
c Initialize random number generator
c-------------------------------------------------------------------*/
tran = 314159265.0;
amult = 1220703125.0;
zeta = randlc( &tran, amult );
/*--------------------------------------------------------------------
c
c-------------------------------------------------------------------*/
makea(naa, nzz, a, colidx, rowstr, NONZER,
firstrow, lastrow, firstcol, lastcol,
RCOND, arow, acol, aelt, v, iv, SHIFT);
/*---------------------------------------------------------------------
c Note: as a result of the above call to makea:
c values of j used in indexing rowstr go from 1 --> lastrow-firstrow+1
c values of colidx which are col indexes go from firstcol --> lastcol
c So:
c Shift the col index vals from actual (firstcol --> lastcol )
c to local, i.e., (1 --> lastcol-firstcol+1)
c---------------------------------------------------------------------*/
#pragma omp parallel default(shared) private(i,j,k)
{
#pragma omp for nowait
for (j = 1; j <= lastrow - firstrow + 1; j++) {
for (k = rowstr[j]; k < rowstr[j+1]; k++) {
colidx[k] = colidx[k] - firstcol + 1;
}
}
/*--------------------------------------------------------------------
c set starting vector to (1, 1, .... 1)
c-------------------------------------------------------------------*/
#pragma omp for nowait
for (i = 1; i <= NA+1; i++) {
x[i] = 1.0;
}
#pragma omp for nowait
for (j = 1; j <= lastcol-firstcol+1; j++) {
q[j] = 0.0;
z[j] = 0.0;
r[j] = 0.0;
p[j] = 0.0;
}
}// end omp parallel
zeta = 0.0;
/*-------------------------------------------------------------------
c---->
c Do one iteration untimed to init all code and data page tables
c----> (then reinit, start timing, to niter its)
c-------------------------------------------------------------------*/
for (it = 1; it <= 1; it++) {
/*--------------------------------------------------------------------
c The call to the conjugate gradient routine:
c-------------------------------------------------------------------*/
conj_grad (colidx, rowstr, x, z, a, p, q, r,/* w,*/ &rnorm);
/*--------------------------------------------------------------------
c zeta = shift + 1/(x.z)
c So, first: (x.z)
c Also, find norm of z
c So, first: (z.z)
c-------------------------------------------------------------------*/
norm_temp11 = 0.0;
norm_temp12 = 0.0;
#pragma omp parallel for default(shared) private(j) reduction(+:norm_temp11,norm_temp12)
for (j = 1; j <= lastcol-firstcol+1; j++) {
norm_temp11 = norm_temp11 + x[j]*z[j];
norm_temp12 = norm_temp12 + z[j]*z[j];
}
norm_temp12 = 1.0 / sqrt( norm_temp12 );
/*--------------------------------------------------------------------
c Normalize z to obtain x
c-------------------------------------------------------------------*/
#pragma omp parallel for default(shared) private(j)
for (j = 1; j <= lastcol-firstcol+1; j++) {
x[j] = norm_temp12*z[j];
}
} /* end of do one iteration untimed */
/*--------------------------------------------------------------------
c set starting vector to (1, 1, .... 1)
c-------------------------------------------------------------------*/
#pragma omp parallel for default(shared) private(i)
for (i = 1; i <= NA+1; i++) {
x[i] = 1.0;
}
zeta = 0.0;
timer_clear( 1 );
timer_start( 1 );
/*--------------------------------------------------------------------
c---->
c Main Iteration for inverse power method
c---->
c-------------------------------------------------------------------*/
for (it = 1; it <= NITER; it++) {
/*--------------------------------------------------------------------
c The call to the conjugate gradient routine:
c-------------------------------------------------------------------*/
conj_grad(colidx, rowstr, x, z, a, p, q, r/*, w*/, &rnorm);
/*--------------------------------------------------------------------
c zeta = shift + 1/(x.z)
c So, first: (x.z)
c Also, find norm of z
c So, first: (z.z)
c-------------------------------------------------------------------*/
norm_temp11 = 0.0;
norm_temp12 = 0.0;
#pragma omp parallel for default(shared) private(j) reduction(+:norm_temp11,norm_temp12)
for (j = 1; j <= lastcol-firstcol+1; j++) {
norm_temp11 = norm_temp11 + x[j]*z[j];
norm_temp12 = norm_temp12 + z[j]*z[j];
}
norm_temp12 = 1.0 / sqrt( norm_temp12 );
zeta = SHIFT + 1.0 / norm_temp11;
if( it == 1 ) {
printf(" iteration ||r|| zeta\n");
}
printf(" %5d %20.14e%20.13e\n", it, rnorm, zeta);
/*--------------------------------------------------------------------
c Normalize z to obtain x
c-------------------------------------------------------------------*/
#pragma omp parallel for default(shared) private(j)
for (j = 1; j <= lastcol-firstcol+1; j++) {
x[j] = norm_temp12*z[j];
}
} /* end of main iter inv pow meth */
#pragma omp parallel
{
#if defined(_OPENMP)
#pragma omp master
nthreads = omp_get_num_threads();
#endif /* _OPENMP */
} /* end parallel */
timer_stop( 1 );
/*--------------------------------------------------------------------
c End of timed section
c-------------------------------------------------------------------*/
t = timer_read( 1 );
printf(" Benchmark completed\n");
epsilon = 1.0e-10;
if (class != 'U') {
if (fabs(zeta - zeta_verify_value) <= epsilon) {
verified = TRUE;
printf(" VERIFICATION SUCCESSFUL\n");
printf(" Zeta is %20.12e\n", zeta);
printf(" Error is %20.12e\n", zeta - zeta_verify_value);
} else {
verified = FALSE;
printf(" VERIFICATION FAILED\n");
printf(" Zeta %20.12e\n", zeta);
printf(" The correct zeta is %20.12e\n", zeta_verify_value);
}
} else {
verified = FALSE;
printf(" Problem size unknown\n");
printf(" NO VERIFICATION PERFORMED\n");
}
if ( t != 0.0 ) {
mflops = (2.0*NITER*NA)
* (3.0+(NONZER*(NONZER+1)) + 25.0*(5.0+(NONZER*(NONZER+1))) + 3.0 )
/ t / 1000000.0;
} else {
mflops = 0.0;
}
c_print_results("CG", class, NA, 0, 0, NITER, nthreads, t,
mflops, " floating point",
verified, NPBVERSION, COMPILETIME,
CS1, CS2, CS3, CS4, CS5, CS6, CS7);
}
/*--------------------------------------------------------------------
c-------------------------------------------------------------------*/
static void conj_grad (
int colidx[], /* colidx[1:nzz] */
int rowstr[], /* rowstr[1:naa+1] */
double x[], /* x[*] */
double z[], /* z[*] */
double a[], /* a[1:nzz] */
double p[], /* p[*] */
double q[], /* q[*] */
double r[], /* r[*] */
//double w[], /* w[*] */
double *rnorm )
/*--------------------------------------------------------------------
c-------------------------------------------------------------------*/
/*---------------------------------------------------------------------
c Floaging point arrays here are named as in NPB1 spec discussion of
c CG algorithm
c---------------------------------------------------------------------*/
{
static int callcount = 0;
double d, sum, rho, rho0, alpha, beta;
int i, j, k;
int cgit, cgitmax = 25;
rho = 0.0;
#pragma omp parallel default(shared) private(j,sum) shared(rho,naa)
/*--------------------------------------------------------------------
c Initialize the CG algorithm:
c-------------------------------------------------------------------*/
{
#pragma omp for
for (j = 1; j <= naa+1; j++) {
q[j] = 0.0;
z[j] = 0.0;
r[j] = x[j];
p[j] = r[j];
//w[j] = 0.0;
}
/*--------------------------------------------------------------------
c rho = r.r
c Now, obtain the norm of r: First, sum squares of r elements locally...
c-------------------------------------------------------------------*/
#pragma omp for reduction(+:rho)
for (j = 1; j <= lastcol-firstcol+1; j++) {
rho = rho + r[j]*r[j];
}
}/* end omp parallel */
/*--------------------------------------------------------------------
c---->
c The conj grad iteration loop
c---->
c-------------------------------------------------------------------*/
for (cgit = 1; cgit <= cgitmax; cgit++) {
rho0 = rho;
d = 0.0;
rho = 0.0;
#pragma omp parallel default(shared) private(j,k,sum,alpha,beta) shared(d,rho0,rho)
{
/*--------------------------------------------------------------------
c q = A.p
c The partition submatrix-vector multiply: use workspace w
c---------------------------------------------------------------------
C
C NOTE: this version of the multiply is actually (slightly: maybe %5)
C faster on the sp2 on 16 nodes than is the unrolled-by-2 version
C below. On the Cray t3d, the reverse is true, i.e., the
C unrolled-by-two version is some 10% faster.
C The unrolled-by-8 version below is significantly faster
C on the Cray t3d - overall speed of code is 1.5 times faster.
*/
/* rolled version */
#pragma omp for
for (j = 1; j <= lastrow-firstrow+1; j++) {
sum = 0.0;
for (k = rowstr[j]; k < rowstr[j+1]; k++) {
sum = sum + a[k]*p[colidx[k]];
}
//w[j] = sum;
q[j] = sum;
}
/* unrolled-by-two version
#pragma omp for private(i,k)
for (j = 1; j <= lastrow-firstrow+1; j++) {
int iresidue;
double sum1, sum2;
i = rowstr[j];
iresidue = (rowstr[j+1]-i) % 2;
sum1 = 0.0;
sum2 = 0.0;
if (iresidue == 1) sum1 = sum1 + a[i]*p[colidx[i]];
for (k = i+iresidue; k <= rowstr[j+1]-2; k += 2) {
sum1 = sum1 + a[k] * p[colidx[k]];
sum2 = sum2 + a[k+1] * p[colidx[k+1]];
}
w[j] = sum1 + sum2;
}
*/
/* unrolled-by-8 version
#pragma omp for private(i,k,sum)
for (j = 1; j <= lastrow-firstrow+1; j++) {
int iresidue;
i = rowstr[j];
iresidue = (rowstr[j+1]-i) % 8;
sum = 0.0;
for (k = i; k <= i+iresidue-1; k++) {
sum = sum + a[k] * p[colidx[k]];
}
for (k = i+iresidue; k <= rowstr[j+1]-8; k += 8) {
sum = sum + a[k ] * p[colidx[k ]]
+ a[k+1] * p[colidx[k+1]]
+ a[k+2] * p[colidx[k+2]]
+ a[k+3] * p[colidx[k+3]]
+ a[k+4] * p[colidx[k+4]]
+ a[k+5] * p[colidx[k+5]]
+ a[k+6] * p[colidx[k+6]]
+ a[k+7] * p[colidx[k+7]];
}
w[j] = sum;
}
*/
/*
#pragma omp for
for (j = 1; j <= lastcol-firstcol+1; j++) {
q[j] = w[j];
}
*/
/*--------------------------------------------------------------------
c Clear w for reuse...
c-------------------------------------------------------------------*/
/*
#pragma omp for nowait
for (j = 1; j <= lastcol-firstcol+1; j++) {
w[j] = 0.0;
}
*/
/*--------------------------------------------------------------------
c Obtain p.q
c-------------------------------------------------------------------*/
#pragma omp for reduction(+:d)
for (j = 1; j <= lastcol-firstcol+1; j++) {
d = d + p[j]*q[j];
}
#pragma omp barrier
/*--------------------------------------------------------------------
c Obtain alpha = rho / (p.q)
c-------------------------------------------------------------------*/
//#pragma omp single
alpha = rho0 / d;
/*--------------------------------------------------------------------
c Save a temporary of rho
c-------------------------------------------------------------------*/
/* rho0 = rho;*/
/*---------------------------------------------------------------------
c Obtain z = z + alpha*p
c and r = r - alpha*q
c---------------------------------------------------------------------*/
#pragma omp for reduction(+:rho)
for (j = 1; j <= lastcol-firstcol+1; j++) {
z[j] = z[j] + alpha*p[j];
r[j] = r[j] - alpha*q[j];
// }
/*---------------------------------------------------------------------
c rho = r.r
c Now, obtain the norm of r: First, sum squares of r elements locally...
c---------------------------------------------------------------------*/
/*
#pragma omp for
for (j = 1; j <= lastcol-firstcol+1; j++) {*/
rho = rho + r[j]*r[j];
}
//#pragma omp barrier
/*--------------------------------------------------------------------
c Obtain beta:
c-------------------------------------------------------------------*/
//#pragma omp single
beta = rho / rho0;
/*--------------------------------------------------------------------
c p = r + beta*p
c-------------------------------------------------------------------*/
#pragma omp for nowait
for (j = 1; j <= lastcol-firstcol+1; j++) {
p[j] = r[j] + beta*p[j];
}
callcount++;
} /* end omp parallel */
} /* end of do cgit=1,cgitmax */
/*---------------------------------------------------------------------
c Compute residual norm explicitly: ||r|| = ||x - A.z||
c First, form A.z
c The partition submatrix-vector multiply
c---------------------------------------------------------------------*/
sum = 0.0;
#pragma omp parallel default(shared) private(j,d) shared(sum)
{
#pragma omp for //private(d, k)
for (j = 1; j <= lastrow-firstrow+1; j++) {
d = 0.0;
for (k = rowstr[j]; k <= rowstr[j+1]-1; k++) {
d = d + a[k]*z[colidx[k]];
}
r[j] = d;
}
/*--------------------------------------------------------------------
c At this point, r contains A.z
c-------------------------------------------------------------------*/
#pragma omp for reduction(+:sum)
for (j = 1; j <= lastcol-firstcol+1; j++) {
d = x[j] - r[j];
sum = sum + d*d;
}
} //end omp parallel
(*rnorm) = sqrt(sum);
}
/*---------------------------------------------------------------------
c generate the test problem for benchmark 6
c makea generates a sparse matrix with a
c prescribed sparsity distribution
c
c parameter type usage
c
c input
c
c n i number of cols/rows of matrix
c nz i nonzeros as declared array size
c rcond r*8 condition number
c shift r*8 main diagonal shift
c
c output
c
c a r*8 array for nonzeros
c colidx i col indices
c rowstr i row pointers
c
c workspace
c
c iv, arow, acol i
c v, aelt r*8
c---------------------------------------------------------------------*/
static void makea(
int n,
int nz,
double a[], /* a[1:nz] */
int colidx[], /* colidx[1:nz] */
int rowstr[], /* rowstr[1:n+1] */
int nonzer,
int firstrow,
int lastrow,
int firstcol,
int lastcol,
double rcond,
int arow[], /* arow[1:nz] */
int acol[], /* acol[1:nz] */
double aelt[], /* aelt[1:nz] */
double v[], /* v[1:n+1] */
int iv[], /* iv[1:2*n+1] */
double shift )
{
int i, nnza, iouter, ivelt, ivelt1, irow, nzv;
/*--------------------------------------------------------------------
c nonzer is approximately (int(sqrt(nnza /n)));
c-------------------------------------------------------------------*/
double size, ratio, scale;
int jcol;
size = 1.0;
ratio = pow(rcond, (1.0 / (double)n));
nnza = 0;
/*---------------------------------------------------------------------
c Initialize colidx(n+1 .. 2n) to zero.
c Used by sprnvc to mark nonzero positions
c---------------------------------------------------------------------*/
#pragma omp parallel for default(shared) private(i)
for (i = 1; i <= n; i++) {
colidx[n+i] = 0;
}
for (iouter = 1; iouter <= n; iouter++) {
nzv = nonzer;
sprnvc(n, nzv, v, iv, &(colidx[0]), &(colidx[n]));
vecset(n, v, iv, &nzv, iouter, 0.5);
for (ivelt = 1; ivelt <= nzv; ivelt++) {
jcol = iv[ivelt];
if (jcol >= firstcol && jcol <= lastcol) {
scale = size * v[ivelt];
for (ivelt1 = 1; ivelt1 <= nzv; ivelt1++) {
irow = iv[ivelt1];
if (irow >= firstrow && irow <= lastrow) {
nnza = nnza + 1;
if (nnza > nz) {
printf("Space for matrix elements exceeded in"
" makea\n");
printf("nnza, nzmax = %d, %d\n", nnza, nz);
printf("iouter = %d\n", iouter);
exit(1);
}
acol[nnza] = jcol;
arow[nnza] = irow;
aelt[nnza] = v[ivelt1] * scale;
}
}
}
}
size = size * ratio;
}
/*---------------------------------------------------------------------
c ... add the identity * rcond to the generated matrix to bound
c the smallest eigenvalue from below by rcond
c---------------------------------------------------------------------*/
for (i = firstrow; i <= lastrow; i++) {
if (i >= firstcol && i <= lastcol) {
iouter = n + i;
nnza = nnza + 1;
if (nnza > nz) {
printf("Space for matrix elements exceeded in makea\n");
printf("nnza, nzmax = %d, %d\n", nnza, nz);
printf("iouter = %d\n", iouter);
exit(1);
}
acol[nnza] = i;
arow[nnza] = i;
aelt[nnza] = rcond - shift;
}
}
/*---------------------------------------------------------------------
c ... make the sparse matrix from list of elements with duplicates
c (v and iv are used as workspace)
c---------------------------------------------------------------------*/
sparse(a, colidx, rowstr, n, arow, acol, aelt,
firstrow, lastrow, v, &(iv[0]), &(iv[n]), nnza);
}
/*---------------------------------------------------
c generate a sparse matrix from a list of
c [col, row, element] tri
c---------------------------------------------------*/
static void sparse(
double a[], /* a[1:*] */
int colidx[], /* colidx[1:*] */
int rowstr[], /* rowstr[1:*] */
int n,
int arow[], /* arow[1:*] */
int acol[], /* acol[1:*] */
double aelt[], /* aelt[1:*] */
int firstrow,
int lastrow,
double x[], /* x[1:n] */
boolean mark[], /* mark[1:n] */
int nzloc[], /* nzloc[1:n] */
int nnza)
/*---------------------------------------------------------------------
c rows range from firstrow to lastrow
c the rowstr pointers are defined for nrows = lastrow-firstrow+1 values
c---------------------------------------------------------------------*/
{
int nrows;
int i, j, jajp1, nza, k, nzrow;
double xi;
/*--------------------------------------------------------------------
c how many rows of result
c-------------------------------------------------------------------*/
nrows = lastrow - firstrow + 1;
/*--------------------------------------------------------------------
c ...count the number of triples in each row
c-------------------------------------------------------------------*/
#pragma omp parallel for default(shared) private(j)
for (j = 1; j <= n; j++) {
rowstr[j] = 0;
mark[j] = FALSE;
}
rowstr[n+1] = 0;
for (nza = 1; nza <= nnza; nza++) {
j = (arow[nza] - firstrow + 1) + 1;
rowstr[j] = rowstr[j] + 1;
}
rowstr[1] = 1;
for (j = 2; j <= nrows+1; j++) {
rowstr[j] = rowstr[j] + rowstr[j-1];
}
/*---------------------------------------------------------------------
c ... rowstr(j) now is the location of the first nonzero
c of row j of a
c---------------------------------------------------------------------*/
/*---------------------------------------------------------------------
c ... preload data pages
c---------------------------------------------------------------------*/
#pragma omp parallel for default(shared) private(k,j)
for(j = 0;j <= nrows-1;j++) {
for(k = rowstr[j];k <= rowstr[j+1]-1;k++)
a[k] = 0.0;
}
/*--------------------------------------------------------------------
c ... do a bucket sort of the triples on the row index
c-------------------------------------------------------------------*/
for (nza = 1; nza <= nnza; nza++) {
j = arow[nza] - firstrow + 1;
k = rowstr[j];
a[k] = aelt[nza];
colidx[k] = acol[nza];
rowstr[j] = rowstr[j] + 1;
}
/*--------------------------------------------------------------------
c ... rowstr(j) now points to the first element of row j+1
c-------------------------------------------------------------------*/
for (j = nrows; j >= 1; j--) {
rowstr[j+1] = rowstr[j];
}
rowstr[1] = 1;
/*--------------------------------------------------------------------
c ... generate the actual output rows by adding elements
c-------------------------------------------------------------------*/
nza = 0;
#pragma omp parallel for default(shared) private(i)
for (i = 1; i <= n; i++) {
x[i] = 0.0;
mark[i] = FALSE;
}
jajp1 = rowstr[1];
for (j = 1; j <= nrows; j++) {
nzrow = 0;
/*--------------------------------------------------------------------
c ...loop over the jth row of a
c-------------------------------------------------------------------*/
for (k = jajp1; k < rowstr[j+1]; k++) {
i = colidx[k];
x[i] = x[i] + a[k];
if ( mark[i] == FALSE && x[i] != 0.0) {
mark[i] = TRUE;
nzrow = nzrow + 1;
nzloc[nzrow] = i;
}
}
/*--------------------------------------------------------------------
c ... extract the nonzeros of this row
c-------------------------------------------------------------------*/
for (k = 1; k <= nzrow; k++) {
i = nzloc[k];
mark[i] = FALSE;
xi = x[i];
x[i] = 0.0;
if (xi != 0.0) {
nza = nza + 1;
a[nza] = xi;
colidx[nza] = i;
}
}
jajp1 = rowstr[j+1];
rowstr[j+1] = nza + rowstr[1];
}
}
/*---------------------------------------------------------------------
c generate a sparse n-vector (v, iv)
c having nzv nonzeros
c
c mark(i) is set to 1 if position i is nonzero.
c mark is all zero on entry and is reset to all zero before exit
c this corrects a performance bug found by John G. Lewis, caused by
c reinitialization of mark on every one of the n calls to sprnvc
---------------------------------------------------------------------*/
static void sprnvc(
int n,
int nz,
double v[], /* v[1:*] */
int iv[], /* iv[1:*] */
int nzloc[], /* nzloc[1:n] */
int mark[] ) /* mark[1:n] */
{
int nn1;
int nzrow, nzv, ii, i;
double vecelt, vecloc;
nzv = 0;
nzrow = 0;
nn1 = 1;
do {
nn1 = 2 * nn1;
} while (nn1 < n);
/*--------------------------------------------------------------------
c nn1 is the smallest power of two not less than n
c-------------------------------------------------------------------*/
while (nzv < nz) {
vecelt = randlc(&tran, amult);
/*--------------------------------------------------------------------
c generate an integer between 1 and n in a portable manner
c-------------------------------------------------------------------*/
vecloc = randlc(&tran, amult);
i = icnvrt(vecloc, nn1) + 1;
if (i > n) continue;
/*--------------------------------------------------------------------
c was this integer generated already?
c-------------------------------------------------------------------*/
if (mark[i] == 0) {
mark[i] = 1;
nzrow = nzrow + 1;
nzloc[nzrow] = i;
nzv = nzv + 1;
v[nzv] = vecelt;
iv[nzv] = i;
}
}
for (ii = 1; ii <= nzrow; ii++) {
i = nzloc[ii];
mark[i] = 0;
}
}
/*---------------------------------------------------------------------
* scale a double precision number x in (0,1) by a power of 2 and chop it
*---------------------------------------------------------------------*/
static int icnvrt(double x, int ipwr2) {
return ((int)(ipwr2 * x));
}
/*--------------------------------------------------------------------
c set ith element of sparse vector (v, iv) with
c nzv nonzeros to val
c-------------------------------------------------------------------*/
static void vecset(
int n,
double v[], /* v[1:*] */
int iv[], /* iv[1:*] */
int *nzv,
int i,
double val)
{
int k;
boolean set;
set = FALSE;
for (k = 1; k <= *nzv; k++) {
if (iv[k] == i) {
v[k] = val;
set = TRUE;
}
}
if (set == FALSE) {
*nzv = *nzv + 1;
v[*nzv] = val;
iv[*nzv] = i;
}
}
|
DRB095-doall2-taskloop-orig-yes.c | /*
Copyright (c) 2017, Lawrence Livermore National Security, LLC.
Produced at the Lawrence Livermore National Laboratory
Written by Chunhua Liao, Pei-Hung Lin, Joshua Asplund,
Markus Schordan, and Ian Karlin
(email: liao6@llnl.gov, lin32@llnl.gov, asplund1@llnl.gov,
schordan1@llnl.gov, karlin1@llnl.gov)
LLNL-CODE-732144
All rights reserved.
This file is part of DataRaceBench. For details, see
https://github.com/LLNL/dataracebench. Please also see the LICENSE file
for our additional BSD notice.
Redistribution and use in source and binary forms, with
or without modification, are permitted provided that the following
conditions are met:
* Redistributions of source code must retain the above copyright
notice, this list of conditions and the disclaimer below.
* Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the disclaimer (as noted below)
in the documentation and/or other materials provided with the
distribution.
* Neither the name of the LLNS/LLNL nor the names of its contributors
may be used to endorse or promote products derived from this
software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND
CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES,
INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL LAWRENCE LIVERMORE NATIONAL
SECURITY, LLC, THE U.S. DEPARTMENT OF ENERGY OR CONTRIBUTORS BE
LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY,
OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
THE POSSIBILITY OF SUCH DAMAGE.
*/
/*
Two-dimensional array computation:
Only one loop is associated with omp taskloop.
The inner loop's loop iteration variable will be shared if it is shared in the enclosing context.
Data race pairs (we allow multiple ones to preserve the pattern):
Write_set = {j@69:14, j@69:30}
Read_set = {j@69:21, j@69:30, j@70:16}
Any pair from Write_set vs. Write_set and Write_set vs. Read_set is a data race pair.
*/
#include <stdio.h>
int a[100][100];
int main()
{
int i, j;
{
#pragma omp parallel for private(j )
for (i = 0; i < 100; i++)
#pragma omp parallel for private(j )
for (j = 0; j < 100; j++)
a[i][j]+=1;
}
printf ("a[50][50]=%d\n", a[50][50]);
return 0;
}
|
fft.c | /* Copyright 2013-2014. The Regents of the University of California.
* Copyright 2016-2018. Martin Uecker.
* All rights reserved. Use of this source code is governed by
* a BSD-style license which can be found in the LICENSE file.
*
* Authors:
* 2011-2018 Martin Uecker <martin.uecker@med.uni-goettingen.de>
* 2014 Frank Ong <frankong@berkeley.edu>
*
*
* FFT. It uses FFTW or CUFFT internally.
*
*
* Gauss, Carl F. 1805. "Nachlass: Theoria Interpolationis Methodo Nova
* Tractata." Werke 3, pp. 265-327, Königliche Gesellschaft der
* Wissenschaften, Göttingen, 1866
*/
#include <assert.h>
#include <complex.h>
#include <stdbool.h>
#include <math.h>
#include <fftw3.h>
#include "num/multind.h"
#include "num/flpmath.h"
#include "num/ops.h"
#include "misc/misc.h"
#include "misc/debug.h"
#include "fft.h"
#undef fft_plan_s
#ifdef USE_CUDA
#include "num/gpuops.h"
#include "fft-cuda.h"
#define LAZY_CUDA
#endif
void fftscale2(unsigned int N, const long dimensions[N], unsigned long flags, const long ostrides[N], complex float* dst, const long istrides[N], const complex float* src)
{
long fft_dims[N];
md_select_dims(N, flags, fft_dims, dimensions);
float scale = 1. / sqrtf((float)md_calc_size(N, fft_dims));
md_zsmul2(N, dimensions, ostrides, dst, istrides, src, scale);
}
void fftscale(unsigned int N, const long dims[N], unsigned long flags, complex float* dst, const complex float* src)
{
long strs[N];
md_calc_strides(N, strs, dims, CFL_SIZE);
fftscale2(N, dims, flags, strs, dst, strs, src);
}
static double fftmod_phase(long length, int j)
{
long center1 = length / 2;
double shift = (double)center1 / (double)length;
return ((double)j - (double)center1 / 2.) * shift;
}
static void fftmod2_r(unsigned int N, const long dims[N], unsigned long flags, const long ostrs[N], complex float* dst, const long istrs[N], const complex float* src, bool inv, double phase)
{
if (0 == flags) {
md_zsmul2(N, dims, ostrs, dst, istrs, src, cexp(M_PI * 2.i * (inv ? -phase : phase)));
return;
}
/* this will also currently be slow on the GPU because we do not
* support strides there on the lowest level */
unsigned int i = N - 1;
while (!MD_IS_SET(flags, i))
i--;
#if 1
// If there is only one dimensions left and it is the innermost
// which is contiguous optimize using md_zfftmod2
if ((0u == MD_CLEAR(flags, i)) && (1 == md_calc_size(i, dims))
&& (CFL_SIZE == ostrs[i]) && (CFL_SIZE == istrs[i])) {
md_zfftmod2(N - i, dims + i, ostrs + i, dst, istrs + i, src, inv, phase);
return;
}
#endif
long tdims[N];
md_select_dims(N, ~MD_BIT(i), tdims, dims);
#pragma omp parallel for
for (int j = 0; j < dims[i]; j++)
fftmod2_r(N, tdims, MD_CLEAR(flags, i),
ostrs, (void*)dst + j * ostrs[i], istrs, (void*)src + j * istrs[i],
inv, phase + fftmod_phase(dims[i], j));
}
static unsigned int md_find_first_case_element(unsigned int N, const long dims[N])
{
unsigned int i;
// Find the first non-zero dimension
for (i = 0; i < N; ++i) {
if (dims[i] != 1)
return i;
}
return N-1; // All dimensions are singletons.
}
static unsigned int md_find_last_case_element(unsigned int N, unsigned long flags, unsigned int first)
{
// Find the last dimension of the contiguous group
unsigned int j = MD_IS_SET(flags, first) != 0; // This is the case
unsigned int k;
for (k = first + 1; k < N; ++k)
if ((MD_IS_SET(flags, k) != 0) != j)
break;
return --k;
}
static void fftmod_even(unsigned int N, const long dims[N], unsigned long flags, complex float* dst, const complex float* src)
{
// Get the total number of elements
long relevant_dims[N];
md_select_dims(N, flags, relevant_dims, dims);
unsigned int first_case_element = md_find_first_case_element(N, dims);
unsigned int case_element = md_find_last_case_element(N, flags, first_case_element);
long total_elements = md_calc_size(N, dims);
long inner_elements = md_calc_size(first_case_element+1, dims);
long outer_loop_size = total_elements / inner_elements;
// Compute whether the first element is 1 or -1 depending on the array sizes.
double tmp_phase = .0;
for (int p = 0; p < N ; p++) {
if (relevant_dims[p] > 1)
tmp_phase += fftmod_phase(relevant_dims[p], 0);
}
double rem = tmp_phase - floor(tmp_phase);
float initial_phase = .0;
if (rem == 0.)
initial_phase = 1.;
else if (rem == 0.5)
initial_phase = -1.;
// Check what case is this by looking at the innermost dimensions
if (!MD_IS_SET(flags, case_element)) {
// Case #1: the innermost dimensions are not flagged, thus elements are multiplied by the same value
#pragma omp parallel for
for (long ol = 0; ol < outer_loop_size; ol++) {
// Capture the phase for the first element and the other will be the same
long iter_i[N];
long ii = ol * inner_elements;
long phase_idx = 0;
for (int p = 0; p < N && ii > 0; p++) {
if (dims[p] <=1)
continue;
iter_i[p] = ii % dims[p];
ii /= dims[p];
// Compute also the phase and the position in the src and dst arrays
phase_idx += iter_i[p] * (relevant_dims[p] > 1);
}
float phase = (phase_idx % 2 == 0) * initial_phase + (phase_idx % 2 == 1) * (-initial_phase);
long last_element = (ol+1) * inner_elements;
#pragma omp simd
for (long il = ol * inner_elements; il < last_element; il++) {
dst[il] = src[il] * phase;
}
}
} else {
// Case #2: the innermost dimensions are flagged, thus elements are multiplied by phases with alternating sign
#pragma omp parallel for
for (long ol = 0; ol < outer_loop_size; ol++) {
// Capture the phase for the first element and the other will be alternating for the innermost block
long iter_i[N];
long ii = ol * inner_elements;
long phase_idx = 0;
for (int p = 0; p < N && ii > 0; p++) {
if (dims[p] <=1)
continue;
iter_i[p] = ii % dims[p];
ii /= dims[p];
// Compute also the phase and the position in the src and dst arrays
phase_idx += iter_i[p] * (relevant_dims[p] > 1);
}
float phase = (phase_idx % 2 == 0) * initial_phase + (phase_idx % 2 == 1) * (-initial_phase);
long last_element = (ol+1) * inner_elements;
#pragma omp simd
for (long il = ol * inner_elements; il < last_element; il++) {
float alt_phase = (il % 2 == 0) * phase + (il % 2 == 1) * (-phase);
dst[il] = src[il] * alt_phase;
}
}
}
}
static unsigned long clear_singletons(unsigned int N, const long dims[N], unsigned long flags)
{
return (0 == N) ? flags : clear_singletons(N - 1, dims, (1 == dims[N - 1]) ? MD_CLEAR(flags, N - 1) : flags);
}
void fftmod2(unsigned int N, const long dims[N], unsigned long flags, const long ostrs[N], complex float* dst, const long istrs[N], const complex float* src)
{
fftmod2_r(N, dims, clear_singletons(N, dims, flags), ostrs, dst, istrs, src, false, 0.);
}
/*
* The correct usage is fftmod before and after fft and
* ifftmod before and after ifft (this is different from
* how fftshift/ifftshift has to be used)
*/
void ifftmod2(unsigned int N, const long dims[N], unsigned long flags, const long ostrs[N], complex float* dst, const long istrs[N], const complex float* src)
{
fftmod2_r(N, dims, clear_singletons(N, dims, flags), ostrs, dst, istrs, src, true, 0.);
}
void fftmod(unsigned int N, const long dimensions[N], unsigned long flags, complex float* dst, const complex float* src)
{
long relevant_dims[N];
md_select_dims(N, flags, relevant_dims, dimensions);
if (!md_calc_all_modulo(N, relevant_dims, 4)) {
// If at least one active dimensions (size >1) is not modulo 4 then fall back to general code
long strs[N];
md_calc_strides(N, strs, dimensions, CFL_SIZE);
fftmod2(N, dimensions, flags, strs, dst, strs, src);
} else {
// Otherwise run faster code case
fftmod_even(N, dimensions, clear_singletons(N, dimensions, flags), dst, src);
}
}
void ifftmod(unsigned int N, const long dimensions[N], unsigned long flags, complex float* dst, const complex float* src)
{
long relevant_dims[N];
md_select_dims(N, flags, relevant_dims, dimensions);
if (!md_calc_all_modulo(N, relevant_dims, 4)) {
// If at least one active dimensions (size >1) is not modulo 4 then fall back to general code
long strs[N];
md_calc_strides(N, strs, dimensions, CFL_SIZE);
ifftmod2(N, dimensions, flags, strs, dst, strs, src);
} else {
// Otherwise run faster code case
fftmod_even(N, dimensions, clear_singletons(N, dimensions, flags), dst, src);
}
}
void ifftshift2(unsigned int N, const long dims[N], unsigned long flags, const long ostrs[N], complex float* dst, const long istrs[N], const complex float* src)
{
long pos[N];
md_set_dims(N, pos, 0);
for (unsigned int i = 0; i < N; i++)
if (MD_IS_SET(flags, i))
pos[i] = dims[i] - dims[i] / 2;
md_circ_shift2(N, dims, pos, ostrs, dst, istrs, src, CFL_SIZE);
}
void ifftshift(unsigned int N, const long dimensions[N], unsigned long flags, complex float* dst, const complex float* src)
{
long strs[N];
md_calc_strides(N, strs, dimensions, CFL_SIZE);
ifftshift2(N, dimensions, flags, strs, dst, strs, src);
}
void fftshift2(unsigned int N, const long dims[N], unsigned long flags, const long ostrs[N], complex float* dst, const long istrs[N], const complex float* src)
{
long pos[N];
md_set_dims(N, pos, 0);
for (unsigned int i = 0; i < N; i++)
if (MD_IS_SET(flags, i))
pos[i] = dims[i] / 2;
md_circ_shift2(N, dims, pos, ostrs, dst, istrs, src, CFL_SIZE);
}
void fftshift(unsigned int N, const long dimensions[N], unsigned long flags, complex float* dst, const complex float* src)
{
long strs[N];
md_calc_strides(N, strs, dimensions, CFL_SIZE);
fftshift2(N, dimensions, flags, strs, dst, strs, src);
}
struct fft_plan_s {
INTERFACE(operator_data_t);
fftwf_plan fftw;
#ifdef USE_CUDA
#ifdef LAZY_CUDA
unsigned int D;
unsigned long flags;
bool backwards;
const long* dims;
const long* istrs;
const long* ostrs;
#endif
struct fft_cuda_plan_s* cuplan;
#endif
};
static DEF_TYPEID(fft_plan_s);
static fftwf_plan fft_fftwf_plan(unsigned int D, const long dimensions[D], unsigned long flags, const long ostrides[D], complex float* dst, const long istrides[D], const complex float* src, bool backwards, bool measure)
{
unsigned int N = D;
fftwf_iodim64 dims[N];
fftwf_iodim64 hmdims[N];
unsigned int k = 0;
unsigned int l = 0;
//FFTW seems to be fine with this
//assert(0 != flags);
for (unsigned int i = 0; i < N; i++) {
if (MD_IS_SET(flags, i)) {
dims[k].n = dimensions[i];
dims[k].is = istrides[i] / CFL_SIZE;
dims[k].os = ostrides[i] / CFL_SIZE;
k++;
} else {
hmdims[l].n = dimensions[i];
hmdims[l].is = istrides[i] / CFL_SIZE;
hmdims[l].os = ostrides[i] / CFL_SIZE;
l++;
}
}
fftwf_plan fftwf;
#pragma omp critical
fftwf = fftwf_plan_guru64_dft(k, dims, l, hmdims, (complex float*)src, dst,
backwards ? 1 : (-1), measure ? FFTW_MEASURE : FFTW_ESTIMATE);
return fftwf;
}
static void fft_apply(const operator_data_t* _plan, unsigned int N, void* args[N])
{
complex float* dst = args[0];
const complex float* src = args[1];
const auto plan = CAST_DOWN(fft_plan_s, _plan);
assert(2 == N);
#ifdef USE_CUDA
if (cuda_ondevice(src)) {
#ifdef LAZY_CUDA
if (NULL == plan->cuplan)
((struct fft_plan_s*)plan)->cuplan = fft_cuda_plan(plan->D, plan->dims, plan->flags, plan->ostrs, plan->istrs, plan->backwards);
#endif
assert(NULL != plan->cuplan);
fft_cuda_exec(plan->cuplan, dst, src);
} else
#endif
{
assert(NULL != plan->fftw);
fftwf_execute_dft(plan->fftw, (complex float*)src, dst);
}
}
static void fft_free_plan(const operator_data_t* _data)
{
const auto plan = CAST_DOWN(fft_plan_s, _data);
fftwf_destroy_plan(plan->fftw);
#ifdef USE_CUDA
#ifdef LAZY_CUDA
xfree(plan->dims);
xfree(plan->istrs);
xfree(plan->ostrs);
#endif
if (NULL != plan->cuplan)
fft_cuda_free_plan(plan->cuplan);
#endif
xfree(plan);
}
const struct operator_s* fft_measure_create(unsigned int D, const long dimensions[D], unsigned long flags, bool inplace, bool backwards)
{
PTR_ALLOC(struct fft_plan_s, plan);
SET_TYPEID(fft_plan_s, plan);
complex float* src = md_alloc(D, dimensions, CFL_SIZE);
complex float* dst = inplace ? src : md_alloc(D, dimensions, CFL_SIZE);
long strides[D];
md_calc_strides(D, strides, dimensions, CFL_SIZE);
plan->fftw = fft_fftwf_plan(D, dimensions, flags, strides, dst, strides, src, backwards, true);
md_free(src);
if (!inplace)
md_free(dst);
#ifdef USE_CUDA
plan->cuplan = NULL;
#ifndef LAZY_CUDA
if (cuda_ondevice(src))
plan->cuplan = fft_cuda_plan(D, dimensions, flags, strides, strides, backwards);
#else
plan->D = D;
plan->flags = flags;
plan->backwards = backwards;
PTR_ALLOC(long[D], dims);
md_copy_dims(D, *dims, dimensions);
plan->dims = *PTR_PASS(dims);
PTR_ALLOC(long[D], istrs);
md_copy_strides(D, *istrs, strides);
plan->istrs = *PTR_PASS(istrs);
PTR_ALLOC(long[D], ostrs);
md_copy_strides(D, *ostrs, strides);
plan->ostrs = *PTR_PASS(ostrs);
#endif
#endif
return operator_create2(D, dimensions, strides, D, dimensions, strides, CAST_UP(PTR_PASS(plan)), fft_apply, fft_free_plan);
}
const struct operator_s* fft_create2(unsigned int D, const long dimensions[D], unsigned long flags, const long ostrides[D], complex float* dst, const long istrides[D], const complex float* src, bool backwards)
{
PTR_ALLOC(struct fft_plan_s, plan);
SET_TYPEID(fft_plan_s, plan);
plan->fftw = fft_fftwf_plan(D, dimensions, flags, ostrides, dst, istrides, src, backwards, false);
#ifdef USE_CUDA
plan->cuplan = NULL;
#ifndef LAZY_CUDA
if (cuda_ondevice(src))
plan->cuplan = fft_cuda_plan(D, dimensions, flags, ostrides, istrides, backwards);
#else
plan->D = D;
plan->flags = flags;
plan->backwards = backwards;
PTR_ALLOC(long[D], dims);
md_copy_dims(D, *dims, dimensions);
plan->dims = *PTR_PASS(dims);
PTR_ALLOC(long[D], istrs);
md_copy_strides(D, *istrs, istrides);
plan->istrs = *PTR_PASS(istrs);
PTR_ALLOC(long[D], ostrs);
md_copy_strides(D, *ostrs, ostrides);
plan->ostrs = *PTR_PASS(ostrs);
#endif
#endif
return operator_create2(D, dimensions, ostrides, D, dimensions, istrides, CAST_UP(PTR_PASS(plan)), fft_apply, fft_free_plan);
}
const struct operator_s* fft_create(unsigned int D, const long dimensions[D], unsigned long flags, complex float* dst, const complex float* src, bool backwards)
{
long strides[D];
md_calc_strides(D, strides, dimensions, CFL_SIZE);
return fft_create2(D, dimensions, flags, strides, dst, strides, src, backwards);
}
void fft_exec(const struct operator_s* o, complex float* dst, const complex float* src)
{
operator_apply_unchecked(o, dst, src);
}
void fft_free(const struct operator_s* o)
{
operator_free(o);
}
void fft2(unsigned int D, const long dimensions[D], unsigned long flags, const long ostrides[D], complex float* dst, const long istrides[D], const complex float* src)
{
const struct operator_s* plan = fft_create2(D, dimensions, flags, ostrides, dst, istrides, src, false);
fft_exec(plan, dst, src);
fft_free(plan);
}
void ifft2(unsigned int D, const long dimensions[D], unsigned long flags, const long ostrides[D], complex float* dst, const long istrides[D], const complex float* src)
{
const struct operator_s* plan = fft_create2(D, dimensions, flags, ostrides, dst, istrides, src, true);
fft_exec(plan, dst, src);
fft_free(plan);
}
void fft(unsigned int D, const long dimensions[D], unsigned long flags, complex float* dst, const complex float* src)
{
const struct operator_s* plan = fft_create(D, dimensions, flags, dst, src, false);
fft_exec(plan, dst, src);
fft_free(plan);
}
void ifft(unsigned int D, const long dimensions[D], unsigned long flags, complex float* dst, const complex float* src)
{
const struct operator_s* plan = fft_create(D, dimensions, flags, dst, src, true);
fft_exec(plan, dst, src);
fft_free(plan);
}
void fftc(unsigned int D, const long dimensions[__VLA(D)], unsigned long flags, complex float* dst, const complex float* src)
{
fftmod(D, dimensions, flags, dst, src);
fft(D, dimensions, flags, dst, dst);
fftmod(D, dimensions, flags, dst, dst);
}
void ifftc(unsigned int D, const long dimensions[__VLA(D)], unsigned long flags, complex float* dst, const complex float* src)
{
ifftmod(D, dimensions, flags, dst, src);
ifft(D, dimensions, flags, dst, dst);
ifftmod(D, dimensions, flags, dst, dst);
}
void fftc2(unsigned int D, const long dimensions[D], unsigned long flags, const long ostrides[D], complex float* dst, const long istrides[D], const complex float* src)
{
fftmod2(D, dimensions, flags, ostrides, dst, istrides, src);
fft2(D, dimensions, flags, ostrides, dst, ostrides, dst);
fftmod2(D, dimensions, flags, ostrides, dst, ostrides, dst);
}
void ifftc2(unsigned int D, const long dimensions[D], unsigned long flags, const long ostrides[D], complex float* dst, const long istrides[D], const complex float* src)
{
ifftmod2(D, dimensions, flags, ostrides, dst, istrides, src);
ifft2(D, dimensions, flags, ostrides, dst, ostrides, dst);
ifftmod2(D, dimensions, flags, ostrides, dst, ostrides, dst);
}
void fftu(unsigned int D, const long dimensions[__VLA(D)], unsigned long flags, complex float* dst, const complex float* src)
{
fft(D, dimensions, flags, dst, src);
fftscale(D, dimensions, flags, dst, dst);
}
void ifftu(unsigned int D, const long dimensions[__VLA(D)], unsigned long flags, complex float* dst, const complex float* src)
{
ifft(D, dimensions, flags, dst, src);
fftscale(D, dimensions, flags, dst, dst);
}
void fftu2(unsigned int D, const long dimensions[D], unsigned long flags, const long ostrides[D], complex float* dst, const long istrides[D], const complex float* src)
{
fft2(D, dimensions, flags, ostrides, dst, istrides, src);
fftscale2(D, dimensions, flags, ostrides, dst, ostrides, dst);
}
void ifftu2(unsigned int D, const long dimensions[D], unsigned long flags, const long ostrides[D], complex float* dst, const long istrides[D], const complex float* src)
{
ifft2(D, dimensions, flags, ostrides, dst, istrides, src);
fftscale2(D, dimensions, flags, ostrides, dst, ostrides, dst);
}
void fftuc(unsigned int D, const long dimensions[__VLA(D)], unsigned long flags, complex float* dst, const complex float* src)
{
fftc(D, dimensions, flags, dst, src);
fftscale(D, dimensions, flags, dst, dst);
}
void ifftuc(unsigned int D, const long dimensions[__VLA(D)], unsigned long flags, complex float* dst, const complex float* src)
{
ifftc(D, dimensions, flags, dst, src);
fftscale(D, dimensions, flags, dst, dst);
}
void fftuc2(unsigned int D, const long dimensions[D], unsigned long flags, const long ostrides[D], complex float* dst, const long istrides[D], const complex float* src)
{
fftc2(D, dimensions, flags, ostrides, dst, istrides, src);
fftscale2(D, dimensions, flags, ostrides, dst, ostrides, dst);
}
void ifftuc2(unsigned int D, const long dimensions[D], unsigned long flags, const long ostrides[D], complex float* dst, const long istrides[D], const complex float* src)
{
ifftc2(D, dimensions, flags, ostrides, dst, istrides, src);
fftscale2(D, dimensions, flags, ostrides, dst, ostrides, dst);
}
bool fft_threads_init = false;
void fft_set_num_threads(unsigned int n)
{
#ifdef FFTWTHREADS
#pragma omp critical
if (!fft_threads_init) {
fft_threads_init = true;
fftwf_init_threads();
}
#pragma omp critical
fftwf_plan_with_nthreads(n);
#else
UNUSED(n);
#endif
}
|
GB_unop__identity_bool_uint16.c | //------------------------------------------------------------------------------
// GB_unop: hard-coded functions for each built-in unary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2022, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// If this file is in the Generated2/ folder, do not edit it
// (it is auto-generated from Generator/*).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_control.h"
#include "GB_atomics.h"
#include "GB_unop__include.h"
// C=unop(A) is defined by the following types and operators:
// op(A) function: GB (_unop_apply__identity_bool_uint16)
// op(A') function: GB (_unop_tran__identity_bool_uint16)
// C type: bool
// A type: uint16_t
// cast: bool cij = (bool) aij
// unaryop: cij = aij
#define GB_ATYPE \
uint16_t
#define GB_CTYPE \
bool
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
uint16_t aij = Ax [pA]
#define GB_CX(p) Cx [p]
// unary operator
#define GB_OP(z, x) \
z = x ;
// casting
#define GB_CAST(z, aij) \
bool z = (bool) aij ;
// cij = op (aij)
#define GB_CAST_OP(pC,pA) \
{ \
/* aij = Ax [pA] */ \
uint16_t aij = Ax [pA] ; \
/* Cx [pC] = op (cast (aij)) */ \
bool z = (bool) aij ; \
Cx [pC] = z ; \
}
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_IDENTITY || GxB_NO_BOOL || GxB_NO_UINT16)
//------------------------------------------------------------------------------
// Cx = op (cast (Ax)): apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB (_unop_apply__identity_bool_uint16)
(
bool *Cx, // Cx and Ax may be aliased
const uint16_t *Ax,
const int8_t *restrict Ab, // A->b if A is bitmap
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
if (Ab == NULL)
{
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
uint16_t aij = Ax [p] ;
bool z = (bool) aij ;
Cx [p] = z ;
}
}
else
{
// bitmap case, no transpose; A->b already memcpy'd into C->b
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!Ab [p]) continue ;
uint16_t aij = Ax [p] ;
bool z = (bool) aij ;
Cx [p] = z ;
}
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (cast (A')): transpose, typecast, and apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB (_unop_tran__identity_bool_uint16)
(
GrB_Matrix C,
const GrB_Matrix A,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
use_memory.c | /*
* This is just a test, to do a sanity check
* on the actual peak_memusage
* $Id$
*
*/
#include <stdio.h> /* print */
#include <stdlib.h> /* malloc */
#include <unistd.h> /* optarg */
#ifdef COMPILE_MPI
#include <mpi.h>
#endif
#define MAX_SIZE 2147483647
#define DEFAULT_SIZE 1000000
#define MAX_REPEAT 100
void printUsage(char **argv) {
fprintf(stderr, "\nUsage:\n%s [-s size] [-r repetitions] [-d delay (ms)] \n\n", argv[0]);
exit(1);
}
void parseInt(int *data, char *param) {
if (param != NULL)
*data = atoi(param);
}
int main(int argc, char **argv) {
int i, j, last, size = DEFAULT_SIZE, repeat = 1, delay=1000;
int *spare_data, c, error, rank, poolsize;
char *ch_size = NULL, *ch_repeat = NULL, *ch_delay=NULL, *ch_rank="thread";
#ifdef COMPILE_MPI
if(error = MPI_Init(NULL, NULL)) {
fprintf(stderr, "MPI INIT error: %d", error);
return 1;
}
if(error = MPI_Comm_rank(MPI_COMM_WORLD, &rank)) {
fprintf(stderr, "MPI RANK error: %d", error);
return 1;
}
if(error = MPI_Comm_size(MPI_COMM_WORLD, &poolsize)) {
fprintf(stderr, "MPI SIZE error: %d", error);
return 1;
}
ch_rank="task";
#endif
while ((c = getopt (argc, argv, "s:r:d:")) != -1) {
switch(c) {
case 's':
ch_size = optarg;
break;
case 'r':
ch_repeat = optarg;
break;
case 'd':
ch_delay = optarg;
break;
default:
printUsage(argv);
}
}
parseInt(&size, ch_size);
parseInt(&repeat, ch_repeat);
parseInt(&delay, ch_delay);
if (size <= 0 || size > MAX_SIZE
|| repeat < 1 || repeat > MAX_REPEAT
|| delay < 0) {
fprintf(stderr, "size:%d, repeat:%d, delay:%f\n", size, repeat, delay);
printUsage(argv);
}
#ifdef COMPILE_OMP
#pragma omp parallel default(none) private(rank, poolsize, i, j, last, spare_data) shared(size, delay, repeat, ch_rank)
{
rank = omp_get_thread_num();
poolsize = omp_get_num_threads();
#elif !defined COMPILE_MPI
rank = 0;
poolsize = 1;
#endif
for(i=0; i<repeat; i++) {
last = size *(rank+1); /* every rank will have a different number */
int amount = last*sizeof(int);
printf("Allocating, using and freeing %d ints (%.2f MiB) in %s %d/%d\n", last, amount/1048576., ch_rank, rank, poolsize);
spare_data = malloc(amount);
for (j=0; j<last; j++)
spare_data[j] = 24;
usleep(delay * 1000);
}
free(spare_data);
#ifdef COMPILE_MPI
if(error = MPI_Finalize()) {
fprintf(stderr, "MPI FINALIZE error: %d", error);
return 1;
}
#else
#ifdef COMPILE_OMP
}
#endif
#endif
return 0;
}
|
vertex.c | // -----------------------------------------------------------------------------
//
// "00_AccelGraph"
//
// -----------------------------------------------------------------------------
// Copyright (c) 2014-2019 All rights reserved
// -----------------------------------------------------------------------------
// Author : Abdullah Mughrabi
// Email : atmughra@ncsu.edu||atmughrabi@gmail.com
// File : vertex.c
// Create : 2019-06-21 17:15:17
// Revise : 2019-09-28 15:36:13
// Editor : Abdullah Mughrabi
// -----------------------------------------------------------------------------
#include <stdio.h>
#include <stdlib.h>
#include <unistd.h>
#include <stdint.h>
#include <omp.h>
#include "reorder.h"
#include "graphCSR.h"
#include "vertex.h"
#include "myMalloc.h"
struct Vertex *newVertexArray(uint32_t num_vertices)
{
struct Vertex *vertex_array = (struct Vertex *) my_malloc(sizeof(struct Vertex));
vertex_array->out_degree = (uint32_t *) my_malloc( num_vertices * sizeof(uint32_t));
vertex_array->in_degree = (uint32_t *) my_malloc( num_vertices * sizeof(uint32_t));
vertex_array->edges_idx = (uint32_t *) my_malloc( num_vertices * sizeof(uint32_t));
uint32_t i;
for(i = 0; i < num_vertices; i++)
{
vertex_array->edges_idx[i] = 0;
vertex_array->out_degree[i] = 0;
vertex_array->in_degree[i] = 0;
}
return vertex_array;
}
struct GraphCSR *mapVertices (struct GraphCSR *graph, uint8_t inverse)
{
uint32_t i;
uint32_t vertex_id;
struct Vertex *vertices;
struct EdgeList *sorted_edges_array;
#if DIRECTED
if(inverse)
{
sorted_edges_array = graph->inverse_sorted_edges_array;
vertices = graph->inverse_vertices; // sorted edge array
}
else
{
sorted_edges_array = graph->sorted_edges_array;
vertices = graph->vertices;
}
#else
sorted_edges_array = graph->sorted_edges_array;
vertices = graph->vertices;
#endif
vertex_id = VERTEX_CACHE_MASK_U32 & sorted_edges_array->edges_array_src[0];
vertices->edges_idx[vertex_id] = 0;
for(i = 1; i < graph->num_edges; i++)
{
if(sorted_edges_array->edges_array_src[i] != sorted_edges_array->edges_array_src[i - 1])
{
vertex_id = VERTEX_CACHE_MASK_U32 & sorted_edges_array->edges_array_src[i];
vertices->edges_idx[vertex_id] = i;
}
}
return graph;
}
void partitionEdgeListOffsetStartEnd(struct GraphCSR *graph, struct EdgeList *sorted_edges_array, uint32_t *offset_start, uint32_t *offset_end)
{
uint32_t i;
uint32_t j;
uint32_t P = 1;
#pragma omp parallel default(none) shared(P)
{
uint32_t t_id = omp_get_thread_num();
if(t_id == 0)
{
P = omp_get_num_threads();
}
}
for(i = 0 ; i < P ; i++)
{
offset_start[i] = graph->num_edges;
offset_end[i] = graph->num_edges;
}
if(P > graph->num_edges && graph->num_edges != 0)
P = graph->num_edges;
for(i = 0 ; i < P ; i++)
{
offset_start[i] = 0;
offset_end[i] = 0;
}
offset_start[0] = 0;
offset_end[0] = offset_start[0] + (graph->num_edges / P);
if(1 == (P))
{
offset_end[0] = graph->num_edges;
}
for(i = 1 ; i < P ; i++)
{
j = offset_end[i - 1];
if(j == graph->num_edges)
{
offset_start[i] = graph->num_edges;
offset_end[i] = graph->num_edges;
continue;
}
for(; j < graph->num_edges; j++)
{
if(sorted_edges_array->edges_array_src[j] != sorted_edges_array->edges_array_src[j - 1])
{
offset_start[i] = j;
offset_end[i - 1] = j;
if(i == (P - 1))
{
offset_end[i] = i * (graph->num_edges / P) + (graph->num_edges / P) + (graph->num_edges % P) ;
}
else
{
offset_end[i] = offset_start[i] + (graph->num_edges / P);
}
if(offset_end[i] > graph->num_edges && offset_start[i] < graph->num_edges)
{
offset_end[i] = graph->num_edges;
// printf("3-%u %u\n", offset_start[i], offset_end[i] );
}
break;
}
else if(sorted_edges_array->edges_array_src[j] == sorted_edges_array->edges_array_src[j - 1] && j == (graph->num_edges - 1))
{
offset_start[i] = graph->num_edges;
offset_end[i] = graph->num_edges;
offset_end[i - 1] = graph->num_edges;
}
}
}
// for(i=0 ; i < P ; i++){
// printf("%u %u\n", offset_start[i], offset_end[i] );
// }
}
struct GraphCSR *mapVerticesWithInOutDegree (struct GraphCSR *graph, uint8_t inverse)
{
uint32_t i;
uint32_t vertex_id;
// uint32_t vertex_id_dest;
uint32_t P = 1;
struct Vertex *vertices;
struct EdgeList *sorted_edges_array;
uint32_t *offset_start_arr = NULL;
uint32_t *offset_end_arr = NULL;
#pragma omp parallel default(none) shared(P,offset_start_arr,offset_end_arr)
{
uint32_t t_id = omp_get_thread_num();
if(t_id == 0)
{
P = omp_get_num_threads();
offset_start_arr = (uint32_t *) my_malloc( P * sizeof(uint32_t));
offset_end_arr = (uint32_t *) my_malloc( P * sizeof(uint32_t));
}
}
// for(vertex_id = 0; vertex_id < graph->num_vertices; vertex_id++){
// printf("-->v %u out_degree %u\n",vertex_id, graph->vertices->out_degree[vertex_id] );
// }
#if DIRECTED
if(inverse)
{
sorted_edges_array = graph->inverse_sorted_edges_array;
vertices = graph->inverse_vertices; // sorted edge array
}
else
{
sorted_edges_array = graph->sorted_edges_array;
vertices = graph->vertices;
}
#else
sorted_edges_array = graph->sorted_edges_array;
vertices = graph->vertices;
#endif
//edge list must be sorted
partitionEdgeListOffsetStartEnd(graph, sorted_edges_array, offset_start_arr, offset_end_arr);
uint32_t offset_start = 0;
uint32_t offset_end = 0;
#pragma omp parallel default(none) private(i,vertex_id) shared(inverse,graph,vertices,sorted_edges_array,offset_start_arr,offset_end_arr) firstprivate( offset_end,offset_start)
{
uint32_t t_id = omp_get_thread_num();
offset_start = offset_start_arr[t_id];
offset_end = offset_end_arr[t_id];
// printf("t_id %u start %u end %u \n",t_id,offset_start, offset_end);
if(offset_start < graph->num_edges)
{
vertex_id = VERTEX_CACHE_MASK_U32 & sorted_edges_array->edges_array_src[offset_start];
vertices->edges_idx[vertex_id] = offset_start;
vertices->out_degree[vertex_id]++;
for(i = offset_start + 1; i < offset_end; i++)
{
vertex_id = VERTEX_CACHE_MASK_U32 & sorted_edges_array->edges_array_src[i];
vertices->out_degree[vertex_id]++;
if(sorted_edges_array->edges_array_src[i] != sorted_edges_array->edges_array_src[i - 1])
{
vertices->edges_idx[vertex_id] = i;
}
}
}
}
#if DIRECTED
if(!inverse)
{
#pragma omp parallel for default(none) private(vertex_id) shared(vertices,graph)
for(vertex_id = 0; vertex_id < graph->num_vertices ; vertex_id++)
{
graph->inverse_vertices->in_degree[vertex_id] = vertices->out_degree[vertex_id];
}
}
else
{
#pragma omp parallel for default(none) private(vertex_id) shared(vertices,graph)
for(vertex_id = 0; vertex_id < graph->num_vertices ; vertex_id++)
{
graph->vertices->in_degree[vertex_id] = vertices->out_degree[vertex_id];
}
}
#endif
free(offset_start_arr);
free(offset_end_arr);
// for(vertex_id = 0; vertex_id < graph->num_vertices; vertex_id++){
// printf("<--v %u out_degree %u\n",vertex_id, graph->vertices->out_degree[vertex_id] );
// }
return graph;
}
void vertexArrayMaxOutdegree(struct Vertex *vertex_array, uint32_t num_vertices)
{
uint32_t i;
uint32_t out_degree = 0;
uint32_t index = 0;
printf(" -----------------------------------------------------\n");
printf("| %-15s | %-15s | %-15s | \n", "Node", "*max_out_degree", "in_degree");
printf(" -----------------------------------------------------\n");
for(i = 0; i < num_vertices; i++)
{
out_degree = maxTwoIntegers(out_degree, vertex_array->out_degree[i]);
if(vertex_array->out_degree[i] == out_degree)
index = i;
// printf("| %-15u | %-15u | %-15u | \n", i, vertex_array->out_degree[i], vertex_array->in_degree[i]);
}
printf("| %-15u | %-15u | %-15u | \n", index, vertex_array->out_degree[index], vertex_array->in_degree[index]);
printf(" -----------------------------------------------------\n");
}
void vertexArrayMaxInDegree(struct Vertex *vertex_array, uint32_t num_vertices)
{
uint32_t i;
uint32_t in_degree = 0;
uint32_t index = 0;
printf(" -----------------------------------------------------\n");
printf("| %-15s | %-15s | %-15s | \n", "Node", "out_degree", "*max_in_degree");
printf(" -----------------------------------------------------\n");
for(i = 0; i < num_vertices; i++)
{
in_degree = maxTwoIntegers(in_degree, vertex_array->out_degree[i]);
if(vertex_array->out_degree[i] == in_degree)
index = i;
// printf("| %-15u | %-15u | %-15u | \n", i, vertex_array->in_degree[i], vertex_array->out_degree[i]);
}
printf("| %-15u | %-15u | %-15u | \n", index, vertex_array->in_degree[index], vertex_array->out_degree[index]);
printf(" -----------------------------------------------------\n");
}
void printVertexArray(struct Vertex *vertex_array, uint32_t num_vertices)
{
uint32_t i;
printf("| %-15s | %-15s | %-15s |\n", "Node", "out_degree", "in_degree");
for(i = 0; i < num_vertices; i++)
{
if((vertex_array->out_degree[i] > 0) )
printf("| %-15u | %-15u | %-15u | \n", i, vertex_array->out_degree[i], vertex_array->in_degree[i]);
}
}
void freeVertexArray(struct Vertex *vertices)
{
if(vertices)
{
if(vertices->edges_idx)
free(vertices->edges_idx);
if(vertices->out_degree)
free(vertices->out_degree);
if(vertices->in_degree)
free(vertices->in_degree);
free(vertices);
}
}
|
elemwise_binary_op.h | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
/*!
* \file elemwise_binary_op.h
* \brief Function definition of elementwise binary operators
*/
#ifndef MXNET_OPERATOR_TENSOR_ELEMWISE_BINARY_OP_H_
#define MXNET_OPERATOR_TENSOR_ELEMWISE_BINARY_OP_H_
#include <mxnet/operator_util.h>
#include <mxnet/op_attr_types.h>
#include <vector>
#include <string>
#include <utility>
#include <typeinfo>
#include <algorithm>
#include "../mxnet_op.h"
#include "../mshadow_op.h"
#include "../../engine/openmp.h"
#include "elemwise_unary_op.h"
#include "../../common/utils.h"
#include "./init_op.h"
#include "../operator_common.h"
namespace mxnet {
namespace op {
/*! Gather binary operator functions into ElemwiseBinaryOp class */
class ElemwiseBinaryOp : public OpBase {
public:
/*! \brief For sparse, assume missing rvalue is 0 */
template <typename OP, int Req>
struct MissingRValueOp {
typedef OP Operation;
template <typename DType>
MSHADOW_XINLINE static void Map(int i, DType* out, const DType* lhs) {
KERNEL_ASSIGN(out[i], Req, OP::Map(lhs[i], DType(0)));
}
};
/*! \brief For sparse, assume missing lvalue is 0 */
template <typename OP, int Req>
struct MissingLValueOp {
typedef OP Operation;
template <typename DType>
MSHADOW_XINLINE static void Map(int i, DType* out, const DType* rhs) {
KERNEL_ASSIGN(out[i], Req, OP::Map(DType(0), rhs[i]));
}
};
private:
/*!
* \brief CSR operation requires temp space
*/
enum ResourceRequestType { kTempSpace };
/*!
* \brief Fill contiguous dense output rows with value computed from 0 lhs and 0 rhs input
* CPU-Only version
*/
template <typename DType, typename OP, typename xpu>
static inline size_t FillDense(mshadow::Stream<xpu>* s,
const size_t idx_l,
const size_t idx_r,
const OpReqType req,
mshadow::Tensor<xpu, 2, DType>* out,
const size_t iter_out) {
const int index_out_min = static_cast<int>(std::min(idx_l, idx_r));
if (static_cast<size_t>(index_out_min) > iter_out) {
const DType zero_input_val = OP::Map(DType(0), DType(0));
#pragma omp parallel for num_threads(engine::OpenMP::Get()->GetRecommendedOMPThreadCount())
for (int i = static_cast<int>(iter_out); i < index_out_min; ++i) {
Fill<false>(s, (*out)[i], req, zero_input_val);
}
}
return static_cast<size_t>(index_out_min); // MSVC wants OMP loops to always use 'int'
}
static inline bool IsSameArray(const NDArray& a1, const NDArray& a2) {
return a1.var() == a2.var();
}
public:
/*! \brief Minimum of three */
static MSHADOW_XINLINE size_t minthree(const size_t a, const size_t b, const size_t c) {
return a < b ? (a < c ? a : c) : (b < c ? b : c);
}
private:
template <typename LOP, typename ROP>
static void BackwardUseNone_(const nnvm::NodeAttrs& attrs,
mshadow::Stream<cpu>* s,
const std::vector<TBlob>& inputs,
const std::vector<OpReqType>& req,
const std::vector<TBlob>& outputs) {
MSHADOW_TYPE_SWITCH(outputs[0].type_flag_, DType, {
using namespace mxnet_op;
const size_t size = static_cast<size_t>((outputs[0].Size() + DataType<DType>::kLanes - 1) /
DataType<DType>::kLanes);
const DType* ograd_dptr = inputs[0].dptr<DType>();
if (std::is_same<LOP, mshadow_op::identity>::value && req[0] == kWriteInplace) {
CHECK_EQ(ograd_dptr, outputs[0].dptr<DType>());
} else if (req[0] != kNullOp) {
DType* lgrad_dptr = outputs[0].dptr<DType>();
MXNET_ASSIGN_REQ_SWITCH(req[0], Req, {
Kernel<mxnet_op::op_with_req<LOP, Req>, cpu>::Launch(s, size, lgrad_dptr, ograd_dptr);
});
}
if (std::is_same<ROP, mshadow_op::identity>::value && req[1] == kWriteInplace) {
CHECK_EQ(ograd_dptr, outputs[1].dptr<DType>());
} else if (req[1] != kNullOp) {
DType* rgrad_dptr = outputs[1].dptr<DType>();
MXNET_ASSIGN_REQ_SWITCH(req[1], Req, {
Kernel<mxnet_op::op_with_req<ROP, Req>, cpu>::Launch(s, size, rgrad_dptr, ograd_dptr);
});
}
});
}
template <typename LOP, typename ROP>
static void BackwardUseIn_(const nnvm::NodeAttrs& attrs,
mshadow::Stream<cpu>* s,
const std::vector<TBlob>& inputs,
const std::vector<OpReqType>& req,
const std::vector<TBlob>& outputs) {
MSHADOW_TYPE_SWITCH(outputs[0].type_flag_, DType, {
DCHECK_EQ(outputs.size(), 2U);
DCHECK_EQ(inputs.size(), 3U);
const DType* ograd_dptr = inputs[0].dptr<DType>();
const DType* lhs_dptr = inputs[1].dptr<DType>();
const DType* rhs_dptr = inputs[2].dptr<DType>();
MXNET_ASSIGN_REQ_SWITCH(req[0], Req, {
const size_t size =
static_cast<size_t>((outputs[0].Size() + mxnet_op::DataType<DType>::kLanes - 1) /
mxnet_op::DataType<DType>::kLanes);
DType* lgrad_dptr = outputs[0].dptr<DType>();
mxnet_op::Kernel<mxnet_op::op_with_req<mxnet_op::backward_grad_tuned<LOP>, Req>,
cpu>::Launch(s, size, lgrad_dptr, ograd_dptr, lhs_dptr, rhs_dptr);
});
MXNET_ASSIGN_REQ_SWITCH(req[1], Req, {
const size_t size =
static_cast<size_t>((outputs[1].Size() + mxnet_op::DataType<DType>::kLanes - 1) /
mxnet_op::DataType<DType>::kLanes);
DType* rgrad_dptr = outputs[1].dptr<DType>();
mxnet_op::Kernel<mxnet_op::op_with_req<mxnet_op::backward_grad_tuned<ROP>, Req>,
cpu>::Launch(s, size, rgrad_dptr, ograd_dptr, lhs_dptr, rhs_dptr);
});
});
}
template <typename xpu,
typename LOP,
typename ROP,
bool in0_ok_dense = false,
bool in1_ok_dense = false,
bool in2_ok_dense = false,
typename BackupCompute>
static inline void RspRspOpBackward(const nnvm::NodeAttrs& attrs,
const OpContext& ctx,
const std::vector<NDArray>& inputs,
const std::vector<OpReqType>& req,
const std::vector<NDArray>& outputs,
BackupCompute backup_compute) {
mshadow::Stream<xpu>* s = ctx.get_stream<xpu>();
// lhs grad
if (req[0] != kNullOp) {
// RspRspOp can handle dense outputs so long as OP(0, 0) == 0
RspRspOp<LOP>(
s, attrs, ctx, inputs[1], inputs[2], req[0], outputs[0], false, false, false, false);
// lhs in-place
RspRspOp<op::mshadow_op::mul>(
s, attrs, ctx, outputs[0], inputs[0], req[0], outputs[0], false, false, true, false);
}
// rhs grad
if (req[1] != kNullOp) {
RspRspOp<ROP>(
s, attrs, ctx, inputs[1], inputs[2], req[1], outputs[1], false, false, false, false);
// rhs in-place
RspRspOp<op::mshadow_op::mul>(
s, attrs, ctx, inputs[0], outputs[1], req[1], outputs[1], false, false, true, false);
}
}
public:
/*! \brief Binary op handling for lhr/rhs: RspDns, RspRsp, DnsRsp, or RspRsp->Dns result */
template <typename OP>
static void RspRspOp(mshadow::Stream<cpu>* s,
const nnvm::NodeAttrs& attrs,
const OpContext& ctx,
const NDArray& lhs,
const NDArray& rhs,
OpReqType req,
const NDArray& output,
bool lhs_may_be_dense,
bool rhs_may_be_dense,
bool allow_inplace,
bool scatter);
/*! \brief Binary op handling for lhr/rhs: RspDns, RspRsp, DnsRsp, or RspRsp->Dns result */
template <typename OP>
static void RspRspOp(mshadow::Stream<gpu>* s,
const nnvm::NodeAttrs& attrs,
const OpContext& ctx,
const NDArray& lhs,
const NDArray& rhs,
OpReqType req,
const NDArray& output,
bool lhs_may_be_dense,
bool rhs_may_be_dense,
bool allow_inplace,
bool scatter);
/*! \brief CSR -op- CSR binary operator for non-canonical NDArray */
template <typename OP>
static void CsrCsrOp(mshadow::Stream<cpu>* s,
const nnvm::NodeAttrs& attrs,
const OpContext& ctx,
const NDArray& lhs,
const NDArray& rhs,
OpReqType req,
const NDArray& output);
/*! \brief CSR -op- CSR binary operator for non-canonical NDArray */
template <typename OP>
static void CsrCsrOp(mshadow::Stream<gpu>* s,
const nnvm::NodeAttrs& attrs,
const OpContext& ctx,
const NDArray& lhs,
const NDArray& rhs,
OpReqType req,
const NDArray& output);
/*! \brief DNS -op- CSR binary operator for non-canonical NDArray */
template <typename OP>
static void DnsCsrDnsOp(mshadow::Stream<cpu>* s,
const nnvm::NodeAttrs& attrs,
const OpContext& ctx,
const NDArray& lhs,
const NDArray& rhs,
OpReqType req,
const NDArray& output,
const bool reverse);
/*! \brief DNS -op- CSR binary operator for non-canonical NDArray */
template <typename OP>
static void DnsCsrDnsOp(mshadow::Stream<gpu>* s,
const nnvm::NodeAttrs& attrs,
const OpContext& ctx,
const NDArray& lhs,
const NDArray& rhs,
OpReqType req,
const NDArray& output,
const bool reverse);
/*! \brief DNS -op- CSR binary operator for non-canonical NDArray */
template <typename xpu, typename OP>
static void DnsCsrCsrOp(const nnvm::NodeAttrs& attrs,
const OpContext& ctx,
const NDArray& lhs,
const NDArray& rhs,
OpReqType req,
const NDArray& output,
const bool reverse);
/*! \brief DNS -op- RSP binary operator for non-canonical NDArray */
template <typename xpu, typename OP>
static void DnsRspDnsOp(mshadow::Stream<xpu>* s,
const nnvm::NodeAttrs& attrs,
const OpContext& ctx,
const NDArray& lhs,
const NDArray& rhs,
OpReqType req,
const NDArray& output,
const bool reverse);
public:
/*!
* \brief Rsp-op-Rsp operation which produces a dense result
* \param attrs Attributes
* \param dev_mask Device mask
* \param dispatch_mode Dispatch Mode
* \param in_attrs Input storage attributes
* \param out_attrs Output storage attributes
* \return true if handled
*/
static bool SparseSparseWithDenseResult(const nnvm::NodeAttrs& attrs,
int dev_mask,
DispatchMode* dispatch_mode,
std::vector<int>* in_attrs,
std::vector<int>* out_attrs);
/*!
* \brief Allow one of the binary inputs to be dense and still produce a sparse output.
* Typically used for sparse * dense = sparse.
* Note: for csr, it dispatches to fallback other than csr, csr -> csr
* \param attrs Attributes
* \param dev_mask Device mask
* \param dispatch_mode Dispatch Mode
* \param in_attrs Input storage attributes
* \param out_attrs Output storage attributes
* \return true if handled
*/
static bool PreferSparseStorageType(const nnvm::NodeAttrs& attrs,
int dev_mask,
DispatchMode* dispatch_mode,
std::vector<int>* in_attrs,
std::vector<int>* out_attrs) {
using namespace common;
CHECK_EQ(in_attrs->size(), 2U) << " in operator " << attrs.name;
CHECK_EQ(out_attrs->size(), 1U) << " in operator " << attrs.name;
const auto& lhs_stype = in_attrs->at(0);
const auto& rhs_stype = in_attrs->at(1);
auto& out_stype = out_attrs->at(0);
bool dispatched = false;
const bool invalid_ctx = dev_mask != mshadow::cpu::kDevMask;
const auto dispatch_ex =
invalid_ctx ? DispatchMode::kFComputeFallback : DispatchMode::kFComputeEx;
if (!dispatched && ContainsOnlyStorage(*in_attrs, kDefaultStorage)) {
// dns, dns -> dns
dispatched =
storage_type_assign(&out_stype, kDefaultStorage, dispatch_mode, DispatchMode::kFCompute);
}
if (!dispatched && ContainsOnlyStorage(*in_attrs, kRowSparseStorage)) {
// rsp, rsp -> rsp
dispatched = storage_type_assign(&out_stype, kRowSparseStorage, dispatch_mode, dispatch_ex);
}
if (!dispatched && ContainsOnlyStorage(*in_attrs, kCSRStorage)) {
// csr, csr -> csr
dispatched = storage_type_assign(&out_stype, kCSRStorage, dispatch_mode, dispatch_ex);
}
if (!dispatched && ((lhs_stype == kRowSparseStorage && rhs_stype == kDefaultStorage) ||
(lhs_stype == kDefaultStorage && rhs_stype == kRowSparseStorage))) {
// rsp, dns -> rsp
// dns, rsp -> rsp
dispatched = storage_type_assign(&out_stype, kRowSparseStorage, dispatch_mode, dispatch_ex);
}
if (!dispatched && ((lhs_stype == kCSRStorage && rhs_stype == kDefaultStorage) ||
(lhs_stype == kDefaultStorage && rhs_stype == kCSRStorage))) {
// csr, dns -> csr
// dns, csr -> csr
dispatched =
storage_type_assign(&out_stype, kCSRStorage, dispatch_mode, DispatchMode::kFComputeEx);
}
if (!dispatched) {
dispatched = dispatch_fallback(out_attrs, dispatch_mode);
}
return dispatched;
}
/*!
* \brief Allow one of the inputs to be dense and produce a dense output,
* for rsp inputs only support when both inputs are rsp type.
* \param attrs Attributes
* \param dev_mask Device mask
* \param dispatch_mode Dispatch Mode
* \param in_attrs Input storage attributes
* \param out_attrs Output storage attributes
* \return true if handled
*/
template <bool cpu_only, bool rsp, bool csr>
static bool PreferDenseStorageType(const nnvm::NodeAttrs& attrs,
const int dev_mask,
DispatchMode* dispatch_mode,
std::vector<int>* in_attrs,
std::vector<int>* out_attrs) {
using namespace common;
CHECK_EQ(in_attrs->size(), 2);
CHECK_EQ(out_attrs->size(), 1);
const auto lhs_stype = (*in_attrs)[0];
const auto rhs_stype = (*in_attrs)[1];
bool dispatched = false;
const bool invalid_ctx = cpu_only && dev_mask != mshadow::cpu::kDevMask;
const auto dispatch_ex =
invalid_ctx ? DispatchMode::kFComputeFallback : DispatchMode::kFComputeEx;
if (!dispatched && ContainsOnlyStorage(*in_attrs, kDefaultStorage)) {
// dns, dns ... -> dns
dispatched =
storage_type_assign(out_attrs, kDefaultStorage, dispatch_mode, DispatchMode::kFCompute);
}
if (!dispatched && rsp && ContainsOnlyStorage(*in_attrs, kRowSparseStorage)) {
// rsp, rsp, ... -> rsp
dispatched = storage_type_assign(
out_attrs, kRowSparseStorage, dispatch_mode, DispatchMode::kFComputeEx);
}
if (!dispatched && csr && ContainsOnlyStorage(*in_attrs, kCSRStorage)) {
// csr, csr, ... -> csr
dispatched = storage_type_assign(out_attrs, kCSRStorage, dispatch_mode, dispatch_ex);
}
if (!dispatched && ((lhs_stype == kDefaultStorage && rhs_stype == kCSRStorage) ||
(lhs_stype == kCSRStorage && rhs_stype == kDefaultStorage))) {
// dense, csr -> dense / csr, dense -> dense
dispatched =
storage_type_assign(out_attrs, kDefaultStorage, dispatch_mode, DispatchMode::kFComputeEx);
}
if (!dispatched && ((lhs_stype == kDefaultStorage && rhs_stype == kRowSparseStorage) ||
(lhs_stype == kRowSparseStorage && rhs_stype == kDefaultStorage))) {
// dense, rsp -> dense / rsp, dense -> dense
dispatched =
storage_type_assign(out_attrs, kDefaultStorage, dispatch_mode, DispatchMode::kFComputeEx);
}
if (!dispatched) {
dispatch_fallback(out_attrs, dispatch_mode);
}
return true;
}
/*!
* \brief Backward pass computing input gradient using forward inputs
* \param attrs Attributes
* \param dev_mask Device mask
* \param dispatch_mode Dispatch Mode
* \param in_attrs Input storage attributes
* \param out_attrs Output storage attributes
* \return true if handled
*/
static bool BackwardUseInStorageType(const nnvm::NodeAttrs& attrs,
int dev_mask,
DispatchMode* dispatch_mode,
std::vector<int>* in_attrs,
std::vector<int>* out_attrs);
template <typename xpu, typename OP>
static void ComputeInt(const nnvm::NodeAttrs& attrs,
const OpContext& ctx,
const std::vector<TBlob>& inputs,
const std::vector<OpReqType>& req,
const std::vector<TBlob>& outputs) {
using namespace mxnet_op;
if (req[0] == kNullOp)
return;
Stream<xpu>* s = ctx.get_stream<xpu>();
CHECK_EQ(inputs.size(), 2U);
CHECK_EQ(outputs.size(), 1U);
MXNET_ASSIGN_REQ_SWITCH(req[0], Req, {
MXNET_INT_TYPE_SWITCH_EXT(outputs[0].type_flag_, DType, {
const size_t size = (minthree(outputs[0].Size(), inputs[0].Size(), inputs[1].Size()) +
DataType<DType>::kLanes - 1) /
DataType<DType>::kLanes;
if (size != 0) {
Kernel<mxnet_op::op_with_req<OP, Req>, xpu>::Launch(
s, size, outputs[0].dptr<DType>(), inputs[0].dptr<DType>(), inputs[1].dptr<DType>());
}
});
});
}
template <typename xpu, typename OP>
static void ComputeIntWithBool(const nnvm::NodeAttrs& attrs,
const OpContext& ctx,
const std::vector<TBlob>& inputs,
const std::vector<OpReqType>& req,
const std::vector<TBlob>& outputs) {
using namespace mxnet_op;
if (req[0] == kNullOp)
return;
Stream<xpu>* s = ctx.get_stream<xpu>();
CHECK_EQ(inputs.size(), 2U);
CHECK_EQ(outputs.size(), 1U);
MXNET_ASSIGN_REQ_SWITCH(req[0], Req, {
MXNET_INT_TYPE_SWITCH_EXT_WITH_BOOL(outputs[0].type_flag_, DType, {
const size_t size = (minthree(outputs[0].Size(), inputs[0].Size(), inputs[1].Size()) +
DataType<DType>::kLanes - 1) /
DataType<DType>::kLanes;
if (size != 0) {
Kernel<mxnet_op::op_with_req<OP, Req>, xpu>::Launch(
s, size, outputs[0].dptr<DType>(), inputs[0].dptr<DType>(), inputs[1].dptr<DType>());
}
});
});
}
template <typename xpu, typename OP>
static void Compute(const nnvm::NodeAttrs& attrs,
const OpContext& ctx,
const std::vector<TBlob>& inputs,
const std::vector<OpReqType>& req,
const std::vector<TBlob>& outputs) {
using namespace mxnet_op;
if (req[0] == kNullOp)
return;
mshadow::Stream<xpu>* s = ctx.get_stream<xpu>();
CHECK_EQ(inputs.size(), 2U);
CHECK_EQ(outputs.size(), 1U);
if (outputs[0].type_flag_ == mshadow::kBool) {
LOG(FATAL) << "Operator " << attrs.op->name << " does not support boolean type";
}
MXNET_ASSIGN_REQ_SWITCH(req[0], Req, {
MSHADOW_TYPE_SWITCH_EXT(outputs[0].type_flag_, DType, {
const size_t size = (minthree(outputs[0].Size(), inputs[0].Size(), inputs[1].Size()) +
DataType<DType>::kLanes - 1) /
DataType<DType>::kLanes;
if (size != 0) {
Kernel<mxnet_op::op_with_req<OP, Req>, xpu>::Launch(
s, size, outputs[0].dptr<DType>(), inputs[0].dptr<DType>(), inputs[1].dptr<DType>());
}
});
});
}
template <typename xpu, typename OP>
static void MixedUnaryBackwardUseInCompute(const nnvm::NodeAttrs& attrs,
const OpContext& ctx,
const std::vector<TBlob>& inputs,
const std::vector<OpReqType>& req,
const std::vector<TBlob>& outputs) {
using namespace mxnet_op;
if (req[0] == kNullOp)
return;
Stream<xpu>* s = ctx.get_stream<xpu>();
CHECK_EQ(inputs.size(), 2U);
CHECK_EQ(outputs.size(), 1U);
if (mxnet::common::is_int(outputs[0].type_flag_) || outputs[0].type_flag_ == mshadow::kBool) {
LOG(FATAL) << "gradient computation of operator " << attrs.op->name << " for "
<< mshadow::dtype_string(outputs[0].type_flag_) << " type is not supported";
}
MXNET_ASSIGN_REQ_SWITCH(req[0], Req, {
MSHADOW_REAL_TYPE_SWITCH(outputs[0].type_flag_, DType, {
const size_t size = (minthree(outputs[0].Size(), inputs[0].Size(), inputs[1].Size()) +
DataType<DType>::kLanes - 1) /
DataType<DType>::kLanes;
if (size != 0) {
Kernel<mxnet_op::op_with_req<OP, Req>, xpu>::Launch(
s, size, outputs[0].dptr<DType>(), inputs[0].dptr<DType>(), inputs[1].dptr<DType>());
}
});
});
}
template <typename xpu, typename OP>
static void MixedUnaryBackwardUseInOutCompute(const nnvm::NodeAttrs& attrs,
const OpContext& ctx,
const std::vector<TBlob>& inputs,
const std::vector<OpReqType>& req,
const std::vector<TBlob>& outputs) {
using namespace mxnet_op;
if (req[0] == kNullOp)
return;
Stream<xpu>* s = ctx.get_stream<xpu>();
CHECK_EQ(inputs.size(), 3U);
CHECK_EQ(outputs.size(), 1U);
if (mxnet::common::is_int(outputs[0].type_flag_) || outputs[0].type_flag_ == mshadow::kBool) {
LOG(FATAL) << "gradient computation of operator " << attrs.op->name << " for "
<< mshadow::dtype_string(outputs[0].type_flag_) << " type is not supported";
}
MXNET_ASSIGN_REQ_SWITCH(req[0], Req, {
MSHADOW_REAL_TYPE_SWITCH(outputs[0].type_flag_, DType, {
const size_t size = (minthree(outputs[0].Size(), inputs[0].Size(), inputs[2].Size()) +
DataType<DType>::kLanes - 1) /
DataType<DType>::kLanes;
if (size != 0) {
Kernel<mxnet_op::op_with_req<OP, Req>, xpu>::Launch(
s, size, outputs[0].dptr<DType>(), inputs[0].dptr<DType>(), inputs[2].dptr<DType>());
}
});
});
}
template <typename xpu, typename OP>
static void ComputeWithBool(const nnvm::NodeAttrs& attrs,
const OpContext& ctx,
const std::vector<TBlob>& inputs,
const std::vector<OpReqType>& req,
const std::vector<TBlob>& outputs) {
using namespace mxnet_op;
if (req[0] == kNullOp)
return;
Stream<xpu>* s = ctx.get_stream<xpu>();
CHECK_EQ(inputs.size(), 2U);
CHECK_EQ(outputs.size(), 1U);
MXNET_ASSIGN_REQ_SWITCH(req[0], Req, {
MSHADOW_TYPE_SWITCH_EXT_WITH_BOOL(outputs[0].type_flag_, DType, {
const size_t size = (minthree(outputs[0].Size(), inputs[0].Size(), inputs[1].Size()) +
DataType<DType>::kLanes - 1) /
DataType<DType>::kLanes;
if (size != 0) {
Kernel<mxnet_op::op_with_req<OP, Req>, xpu>::Launch(
s, size, outputs[0].dptr<DType>(), inputs[0].dptr<DType>(), inputs[1].dptr<DType>());
}
});
});
}
template <typename xpu, typename OP>
static void ComputeLogic(const nnvm::NodeAttrs& attrs,
const OpContext& ctx,
const std::vector<TBlob>& inputs,
const std::vector<OpReqType>& req,
const std::vector<TBlob>& outputs) {
using namespace mxnet_op;
if (req[0] == kNullOp)
return;
Stream<xpu>* s = ctx.get_stream<xpu>();
CHECK_EQ(inputs.size(), 2U);
CHECK_EQ(outputs.size(), 1U);
MXNET_ASSIGN_REQ_SWITCH(req[0], Req, {
MSHADOW_TYPE_SWITCH_EXT_WITH_BOOL(inputs[0].type_flag_, DType, {
MSHADOW_TYPE_SWITCH_EXT_WITH_BOOL(inputs[1].type_flag_, EType, {
const size_t size = (minthree(outputs[0].Size(), inputs[0].Size(), inputs[1].Size()) +
DataType<DType>::kLanes - 1) /
DataType<DType>::kLanes;
if (size != 0) {
Kernel<mxnet_op::op_with_req<OP, Req>, xpu>::Launch(
s, size, outputs[0].dptr<bool>(), inputs[0].dptr<DType>(), inputs[1].dptr<EType>());
}
});
});
});
}
template <typename xpu, typename OP>
static void ComputeEx(const nnvm::NodeAttrs& attrs,
const OpContext& ctx,
const std::vector<NDArray>& inputs,
const std::vector<OpReqType>& req,
const std::vector<NDArray>& outputs) {
using namespace common;
CHECK_EQ(inputs.size(), 2);
CHECK_EQ(outputs.size(), 1);
if (req[0] == kNullOp)
return;
const auto lhs_stype = inputs[0].storage_type();
const auto rhs_stype = inputs[1].storage_type();
const auto out_stype = outputs[0].storage_type();
mshadow::Stream<xpu>* s = ctx.get_stream<xpu>();
if ((ContainsOnlyStorage(inputs, kRowSparseStorage)) &&
(out_stype == kRowSparseStorage || out_stype == kDefaultStorage)) {
// rsp, rsp -> rsp
// rsp, rsp -> dns
RspRspOp<OP>(
s, attrs, ctx, inputs[0], inputs[1], req[0], outputs[0], false, false, false, false);
} else if (ContainsOnlyStorage(inputs, kCSRStorage) && out_stype == kCSRStorage) {
// csr, csr -> csr
CsrCsrOp<OP>(s, attrs, ctx, inputs[0], inputs[1], req[0], outputs[0]);
} else if (((lhs_stype == kCSRStorage && rhs_stype == kDefaultStorage) ||
(lhs_stype == kDefaultStorage && rhs_stype == kCSRStorage)) &&
out_stype == kDefaultStorage) {
const NDArray& dns = (lhs_stype == kDefaultStorage) ? inputs[0] : inputs[1];
const NDArray& csr = (lhs_stype == kCSRStorage) ? inputs[0] : inputs[1];
const bool reverse = (lhs_stype == kCSRStorage);
DnsCsrDnsOp<OP>(s, attrs, ctx, dns, csr, req[0], outputs[0], reverse);
} else if (((lhs_stype == kRowSparseStorage && rhs_stype == kDefaultStorage) ||
(lhs_stype == kDefaultStorage && rhs_stype == kRowSparseStorage)) &&
out_stype == kDefaultStorage) {
const NDArray& dns = (lhs_stype == kDefaultStorage) ? inputs[0] : inputs[1];
const bool reverse = (lhs_stype == kRowSparseStorage);
const NDArray& rsp = (reverse) ? inputs[0] : inputs[1];
DnsRspDnsOp<xpu, OP>(s, attrs, ctx, dns, rsp, req[0], outputs[0], reverse);
} else {
LogUnimplementedOp(attrs, ctx, inputs, req, outputs);
}
}
/*! \brief ComputeEx allowing dense lvalue and/or rvalue */
template <typename xpu, typename OP, bool lhs_may_be_dense, bool rhs_may_be_dense>
static void ComputeDnsLRValueEx(const nnvm::NodeAttrs& attrs,
const OpContext& ctx,
const std::vector<NDArray>& inputs,
const std::vector<OpReqType>& req,
const std::vector<NDArray>& outputs) {
using namespace mshadow;
using namespace mshadow::expr;
CHECK_EQ(inputs.size(), 2);
CHECK_EQ(outputs.size(), 1);
if (req[0] == kNullOp)
return;
const auto lhs_stype = inputs[0].storage_type();
const auto rhs_stype = inputs[1].storage_type();
const auto out_stype = outputs[0].storage_type();
if ((out_stype == kRowSparseStorage || out_stype == kDefaultStorage) &&
((lhs_stype == kRowSparseStorage && rhs_stype == kRowSparseStorage) ||
(lhs_stype == kRowSparseStorage && rhs_stype == kDefaultStorage) ||
(lhs_stype == kDefaultStorage && rhs_stype == kRowSparseStorage)) &&
lhs_may_be_dense && rhs_may_be_dense) {
// rsp, rsp -> rsp
// rsp, rsp -> dns
// rsp, dns -> rsp
// dns, rsp -> rsp
// More than once dense not allowed (this will be checked in RspRspOp):
// rsp, dns -> dns <-- NOT ALLOWED
// dns, rsp -> dns <-- NOT ALLOWED
mshadow::Stream<xpu>* s = ctx.get_stream<xpu>();
RspRspOp<OP>(s,
attrs,
ctx,
inputs[0],
inputs[1],
req[0],
outputs[0],
lhs_may_be_dense,
rhs_may_be_dense,
false,
false);
} else if (lhs_stype == kCSRStorage && rhs_stype == kCSRStorage) {
ComputeEx<xpu, OP>(attrs, ctx, inputs, req, outputs);
} else if (((lhs_stype == kCSRStorage && rhs_stype == kDefaultStorage) ||
(lhs_stype == kDefaultStorage && rhs_stype == kCSRStorage)) &&
out_stype == kCSRStorage) {
const NDArray& dns = (lhs_stype == kDefaultStorage) ? inputs[0] : inputs[1];
const NDArray& csr = (lhs_stype == kCSRStorage) ? inputs[0] : inputs[1];
const bool reverse = (lhs_stype == kCSRStorage);
DnsCsrCsrOp<xpu, OP>(attrs, ctx, dns, csr, req[0], outputs[0], reverse);
} else {
LogUnimplementedOp(attrs, ctx, inputs, req, outputs);
}
}
template <typename xpu, typename LOP, typename ROP>
static inline void BackwardUseNone(const nnvm::NodeAttrs& attrs,
const OpContext& ctx,
const std::vector<TBlob>& inputs,
const std::vector<OpReqType>& req,
const std::vector<TBlob>& outputs) {
mshadow::Stream<xpu>* s = ctx.get_stream<xpu>();
BackwardUseNone_<LOP, ROP>(attrs, s, inputs, req, outputs);
}
template <typename xpu, typename LOP, typename ROP>
static inline void BackwardUseNoneEx(const nnvm::NodeAttrs& attrs,
const OpContext& ctx,
const std::vector<NDArray>& inputs,
const std::vector<OpReqType>& req,
const std::vector<NDArray>& outputs) {
CHECK_EQ(inputs.size(), 1U); // output grad
CHECK_EQ(outputs.size(), 2U); // lhs input grad, rhs input grad
const auto in_stype = inputs[0].storage_type();
const auto lhs_stype = outputs[0].storage_type();
const auto rhs_stype = outputs[1].storage_type();
// lhs grad
if (req[0] != kNullOp) {
if (in_stype == lhs_stype && (in_stype == kRowSparseStorage || in_stype == kCSRStorage)) {
CHECK_EQ(outputs[0].storage_type(), in_stype);
// rsp -> rsp, _. op requires 0-input returns 0-output
DCHECK_LT(std::fabs(static_cast<float>(LOP::Map(0))), 1e-5f);
UnaryOp::ComputeEx<xpu, LOP>(attrs, ctx, inputs, req, {outputs[0]});
} else {
LogUnimplementedOp(attrs, ctx, inputs, req, outputs);
}
}
// rhs grad
if (req[1] != kNullOp) {
if (in_stype == rhs_stype && (in_stype == kRowSparseStorage || in_stype == kCSRStorage)) {
CHECK_EQ(outputs[0].storage_type(), in_stype);
// rsp -> _, rsp. op requires 0-input returns 0-output
DCHECK_LT(std::fabs(static_cast<float>(ROP::Map(0))), 1e-5f);
UnaryOp::ComputeEx<xpu, ROP>(attrs, ctx, inputs, req, {outputs[1]});
} else {
LogUnimplementedOp(attrs, ctx, inputs, req, outputs);
}
}
}
template <typename xpu, typename LOP, typename ROP>
static inline void BackwardUseIn(const nnvm::NodeAttrs& attrs,
const OpContext& ctx,
const std::vector<TBlob>& inputs,
const std::vector<OpReqType>& req,
const std::vector<TBlob>& outputs) {
mshadow::Stream<xpu>* s = ctx.get_stream<xpu>();
BackwardUseIn_<LOP, ROP>(attrs, s, inputs, req, outputs);
}
template <typename xpu, typename LOP, typename ROP>
static inline void BackwardUseInEx(const nnvm::NodeAttrs& attrs,
const OpContext& ctx,
const std::vector<NDArray>& inputs,
const std::vector<OpReqType>& req,
const std::vector<NDArray>& outputs) {
using namespace common;
CHECK_EQ(inputs.size(), 3U);
CHECK_EQ(outputs.size(), 2U); // lhs input grad, rhs input grad
const auto lhs_grad_stype = outputs[0].storage_type();
const auto rhs_grad_stype = outputs[1].storage_type();
if (ContainsOnlyStorage(inputs, kRowSparseStorage) &&
(lhs_grad_stype == kDefaultStorage || lhs_grad_stype == kRowSparseStorage) &&
(rhs_grad_stype == kDefaultStorage || rhs_grad_stype == kRowSparseStorage)) {
// rsp, rsp, rsp -> [dns, rsp], [dns, rsp]
RspRspOpBackward<xpu, LOP, ROP, false, false, false>(
attrs, ctx, inputs, req, outputs, BackwardUseIn<xpu, LOP, ROP>);
} else {
LOG(FATAL) << "Not Implemented";
}
}
}; // class ElemwiseBinaryOp
/*! \brief Binary launch */
#define MXNET_OPERATOR_REGISTER_BINARY(name) \
NNVM_REGISTER_OP(name) \
.set_num_inputs(2) \
.set_num_outputs(1) \
.set_attr<nnvm::FListInputNames>("FListInputNames", \
[](const NodeAttrs& attrs) { \
return std::vector<std::string>{"lhs", "rhs"}; \
}) \
.set_attr<mxnet::FInferShape>("FInferShape", ElemwiseShape<2, 1>) \
.set_attr<nnvm::FInferType>("FInferType", ElemwiseType<2, 1>) \
.set_attr<nnvm::FInplaceOption>("FInplaceOption", \
[](const NodeAttrs& attrs) { \
return std::vector<std::pair<int, int> >{{0, 0}, {1, 0}}; \
}) \
.add_argument("lhs", "NDArray-or-Symbol", "first input") \
.add_argument("rhs", "NDArray-or-Symbol", "second input")
/*! \brief Binary launch, with FComputeEx for csr and rsp available */
#define MXNET_OPERATOR_REGISTER_BINARY_WITH_SPARSE_CPU(__name$, __kernel$) \
MXNET_OPERATOR_REGISTER_BINARY(__name$) \
.set_attr<FInferStorageType>("FInferStorageType", \
ElemwiseStorageType<2, 1, true, true, true>) \
.set_attr<FCompute>("FCompute<cpu>", ElemwiseBinaryOp::Compute<cpu, __kernel$>) \
.set_attr<FComputeEx>("FComputeEx<cpu>", ElemwiseBinaryOp::ComputeEx<cpu, __kernel$>) \
.set_attr<FResourceRequest>( \
"FResourceRequest", /* For Sparse CSR */ \
[](const NodeAttrs& attrs) { \
return std::vector<ResourceRequest>{ResourceRequest::kTempSpace}; \
})
/*! \brief Binary launch, with FComputeEx for csr and rsp available.
when inputs contain both sparse and dense, sparse output is preferred. */
#define MXNET_OPERATOR_REGISTER_BINARY_WITH_SPARSE_CPU_PS(__name$, __kernel$) \
MXNET_OPERATOR_REGISTER_BINARY(__name$) \
.set_attr<FInferStorageType>("FInferStorageType", ElemwiseBinaryOp::PreferSparseStorageType) \
.set_attr<FCompute>("FCompute<cpu>", ElemwiseBinaryOp::Compute<cpu, __kernel$>) \
.set_attr<FComputeEx>("FComputeEx<cpu>", ElemwiseBinaryOp::ComputeEx<cpu, __kernel$>) \
.set_attr<FResourceRequest>( \
"FResourceRequest", /* For Sparse CSR */ \
[](const NodeAttrs& attrs) { \
return std::vector<ResourceRequest>{ResourceRequest::kTempSpace}; \
})
/*! \brief Binary launch, dense result
* FInferStorageType attr is not set using this macro.
* By default DefaultStorageType is used.
*/
#define MXNET_OPERATOR_REGISTER_BINARY_WITH_SPARSE_CPU_DR(__name$, __kernel$) \
MXNET_OPERATOR_REGISTER_BINARY(__name$) \
.set_attr<FInferStorageType>("FInferStorageType", \
ElemwiseBinaryOp::SparseSparseWithDenseResult) \
.set_attr<FCompute>("FCompute<cpu>", ElemwiseBinaryOp::Compute<cpu, __kernel$>) \
.set_attr<FComputeEx>("FComputeEx<cpu>", ElemwiseBinaryOp::ComputeEx<cpu, __kernel$>)
/*! \brief Binary launch, with FComputeEx for prefer dense */
#define MXNET_OPERATOR_REGISTER_BINARY_WITH_SPARSE_CPU_PD(__name$, __kernel$) \
MXNET_OPERATOR_REGISTER_BINARY(__name$) \
.set_attr<FInferStorageType>("FInferStorageType", \
ElemwiseBinaryOp::PreferDenseStorageType<true, true, true>) \
.set_attr<FCompute>("FCompute<cpu>", ElemwiseBinaryOp::Compute<cpu, __kernel$>) \
.set_attr<FComputeEx>("FComputeEx<cpu>", ElemwiseBinaryOp::ComputeEx<cpu, __kernel$>) \
.set_attr<FResourceRequest>( \
"FResourceRequest", /* For Sparse CSR */ \
[](const NodeAttrs& attrs) { \
return std::vector<ResourceRequest>{ResourceRequest::kTempSpace}; \
})
#if MXNET_USE_CUDA
struct ElemwiseBinaryRTCCompute {
std::string OP;
void operator()(const nnvm::NodeAttrs& attrs,
const OpContext& ctx,
const std::vector<TBlob>& inputs,
const std::vector<OpReqType>& req,
const std::vector<TBlob>& outputs);
};
struct ElemwiseBinaryRTCBwdUseNone {
std::string LOP;
std::string ROP;
void operator()(const nnvm::NodeAttrs& attrs,
const OpContext& ctx,
const std::vector<TBlob>& inputs,
const std::vector<OpReqType>& req,
const std::vector<TBlob>& outputs);
};
struct ElemwiseBinaryRTCBwdUseIn {
std::string LOP;
std::string ROP;
void operator()(const nnvm::NodeAttrs& attrs,
const OpContext& ctx,
const std::vector<TBlob>& inputs,
const std::vector<OpReqType>& req,
const std::vector<TBlob>& outputs);
};
#endif
} // namespace op
} // namespace mxnet
#endif // MXNET_OPERATOR_TENSOR_ELEMWISE_BINARY_OP_H_
|
prefixspan.h | //########################################################################
//## Copyright 2019 Da Yan http://www.cs.uab.edu/yanda
//##
//## Licensed under the Apache License, Version 2.0 (the "License");
//## you may not use this file except in compliance with the License.
//## You may obtain a copy of the License at
//##
//## //http://www.apache.org/licenses/LICENSE-2.0
//##
//## Unless required by applicable law or agreed to in writing, software
//## distributed under the License is distributed on an "AS IS" BASIS,
//## WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
//## See the License for the specific language governing permissions and
//## limitations under the License.
//########################################################################
//########################################################################
//## Contributors
//## * Wenwen Qu
//## * Da Yan
//########################################################################
#ifndef PREFIXSPAN_PREFIXSPAN_H_
#define PREFIXSPAN_PREFIXSPAN_H_
#include <vector>
#include <map>
#include <set>
#include <fstream>
#include <strstream>
#include "Trans.h"
#include "Pattern.h"
#include "Task.h"
#include "Worker.h"
#include "omp.h"
using namespace std;
typedef int Element;
typedef vector<int> sequence;
//---------------------------
class ProjSeq: public ProjTrans{
public:
int pos;
ProjSeq(){};
ProjSeq(int _id, int _pos): ProjTrans(_id), pos(_pos){};
};
//---------------------------
class Seq: public Trans{
public:
sequence seq;
Seq(){};
Seq(int _tid): Trans(_tid){};
};
//---------------------------
typedef vector<ProjSeq> seqPDB;
class PrefixPattern: public Pattern {
public:
sequence items; // elements of a sequence (in order)
vector<ProjSeq>* projDB; // should be set from outside, usually passed in from children_map
~PrefixPattern(){delete projDB;}
virtual void print(ostream& fout){
for(vector<Element>::iterator it = items.begin(); it!=items.end(); it++) fout << *it << " ";
fout << " |PDB| = " << projDB->size() << endl;
}
};
//---------------------------
typedef map<Element, seqPDB*> ChildrenT;
class PrefixTask: public Task<PrefixPattern, ChildrenT, Seq>{
public:
ChildrenT::iterator it;
virtual bool needSplit(){
if(pat.projDB->size() > tauDB_singlethread) return true;
return false;
};
// with omp, branching on tauDB_omp
virtual void setChildren(ChildrenT& children) {
//get current projected DB (to set children's projected DBs)
vector<ProjSeq>& oldPDB = *(pat.projDB);
if(oldPDB.size() <= tauDB_omp)
{
for (int i=0; i<oldPDB.size(); i++) {
int pos = oldPDB[i].pos;
unsigned int id = oldPDB[i].tid;
sequence& seq = TransDB()[id].seq;
unsigned int size = seq.size();
set<int> tmp; //to filter out labels that already occur in a previous position in the current seq
//continue scanning elements from "pos"
for (unsigned int j = pos + 1; j < size; j++) {
//avoid checking repeated label
int item = seq[j];
if(item == -1) continue;
if (tmp.find(item) != tmp.end()) continue;
tmp.insert(item);
//add current seq to children's PDB
ChildrenT::iterator it = children.find(item);
if(it == children.end()){
vector<ProjSeq>* pdb = new vector<ProjSeq>;
pdb->emplace_back(id, j);
children[item] = pdb;
}
else{
vector<ProjSeq>* pdb = it->second;
pdb->emplace_back(id, j);
}
}
}
}
else
{
vector<ChildrenT> childrenOfThread(THREADS);
// parallel-for, set the array of childrenOfThread
#pragma omp parallel for schedule(dynamic, CHUNK) num_threads(THREADS)
for (int i=0; i<oldPDB.size(); i++) {
int thread_id = omp_get_thread_num();
ChildrenT& child_list = childrenOfThread[thread_id];
//------
int pos = oldPDB[i].pos;
unsigned int id = oldPDB[i].tid;
sequence& seq = TransDB()[id].seq;
unsigned int size = seq.size();
set<int> tmp; //to filter out labels that already occur in a previous position in the current seq
//continue scanning elements from "pos"
for (unsigned int j = pos + 1; j < size; j++) {
//avoid checking repeated label
int item = seq[j];
if(item == -1) continue;
if (tmp.find(item) != tmp.end()) continue;
tmp.insert(item);
//add current seq to children's PDB
ChildrenT::iterator it = child_list.find(item);
if(it == child_list.end()){
vector<ProjSeq>* pdb = new vector<ProjSeq>;
pdb->emplace_back(id, j);
child_list[item] = pdb;
}
else{
vector<ProjSeq>* pdb = it->second;
pdb->emplace_back(id, j);
}
}
}
// merge childrenOfThread elements into children
for(int i = 0; i < THREADS; i++){
ChildrenT& children_i = childrenOfThread[i];
for(ChildrenT::iterator it = children_i.begin(); it!= children_i.end(); it++){
Element label = it->first;
seqPDB* pdb_i = it->second;
ChildrenT::iterator it2 = children.find(label);
if(it2 == children.end()){
vector<ProjSeq>* pdb = new vector<ProjSeq>;
pdb->swap(*pdb_i);
children[label] = pdb;
}
else
{
vector<ProjSeq>* pdb = it2->second;
pdb->insert(pdb->end(), pdb_i->begin(), pdb_i->end());
pdb_i->clear(); // to release memory space timely
}
}
children_i.clear(); // to release memory space timely
}
}
//delete child patterns with PDBs not big enough
for (ChildrenT::iterator it = children.begin(); it != children.end();)
{
vector<ProjSeq>* pdb = it->second;
if (pdb->size() < minsup) {
ChildrenT::iterator tmp = it;
++tmp;
delete it->second;
children.erase(it);
it = tmp;
} else {
++it;
}
}
// set iterator as the first element
it = children.begin();
}
Task* project(const Element& e, vector<ProjSeq>* projDB){
Task* newT = new PrefixTask;
PrefixPattern& newPat = newT->pat;
newPat.items.assign(pat.items.begin(), pat.items.end());
newPat.items.push_back(e);
newPat.projDB = projDB;
return newT;
}
virtual bool pre_check(ostream& fout){
pat.print(fout);
return true;
}
virtual Task* get_next_child(){
if(it != children.end()){
Task* newTask = this->project(it->first, it->second);
it++;
return newTask;
}
return 0;
}
};
class PrefixWorker:public Worker<PrefixTask> {
public:
int trans_cnt; // to used for adding IDs to transactions when loading data
PrefixWorker(const char *infile, const char *outfolder = "outputs"): Worker(infile, outfolder), trans_cnt(0)
{}
//set map[label] = count
virtual int getNextTrans(vector<TransT>& transDB){
string line;
int item;
if(getline (fd, line)){
TransDB().resize(TransDB().size()+1);
Seq& tmp = TransDB().back();
tmp.tid = trans_cnt++;
istrstream istrs ((char *)line.c_str());
while (istrs >> item) tmp.seq.push_back (item);
return 1;
}
return 0;
}
virtual void setRoot(stack<PrefixTask*>& task_queue){
ChildrenT children;
if(TransDB().size() <= tauDB_omp)
{
for (int i=0; i<TransDB().size(); i++) {
sequence& seq = TransDB()[i].seq;
set<int> tmp; //to filter out labels that already occur in a previous position in the current seq
for (unsigned int j = 0; j < seq.size(); j++) {
//avoid checking repeated label
int item = seq[j];
if (tmp.find(item) != tmp.end()) continue;
tmp.insert(item);
//add current seq to children's PDB
ChildrenT::iterator it = children.find(item);
if(it == children.end()){
vector<ProjSeq>* pdb = new vector<ProjSeq>;
pdb->emplace_back(i, j);
children[item] = pdb;
}
else{
vector<ProjSeq>* pdb = it->second;
pdb->emplace_back(i, j);
}
}
}
}
else
{
vector<ChildrenT> childrenOfThread(THREADS);
// parallel-for, set the array of childrenOfThread
#pragma omp parallel for schedule(dynamic, CHUNK) num_threads(THREADS)
for (int i=0; i<TransDB().size(); i++) {
int thread_id = omp_get_thread_num();
ChildrenT& child_list = childrenOfThread[thread_id];
//------
sequence& seq = TransDB()[i].seq;
set<int> tmp; //to filter out labels that already occur in a previous position in the current seq
for (unsigned int j = 0; j < seq.size(); j++) {
//avoid checking repeated label
int item = seq[j];
if (tmp.find(item) != tmp.end()) continue;
tmp.insert(item);
//add current seq to children's PDB
ChildrenT::iterator it = child_list.find(item);
if(it == child_list.end()){
vector<ProjSeq>* pdb = new vector<ProjSeq>;
pdb->emplace_back(i, j);
child_list[item] = pdb;
}
else{
vector<ProjSeq>* pdb = it->second;
pdb->emplace_back(i, j);
}
}
}
// merge childrenOfThread elements into children
for(int i = 0; i < THREADS; i++){
ChildrenT& children_i = childrenOfThread[i];
for(ChildrenT::iterator it = children_i.begin(); it!= children_i.end(); it++){
Element label = it->first;
seqPDB* pdb_i = it->second;
ChildrenT::iterator it2 = children.find(label);
if(it2 == children.end()){
vector<ProjSeq>* pdb = new vector<ProjSeq>;
pdb->swap(*pdb_i);
children[label] = pdb;
}
else
{
vector<ProjSeq>* pdb = it2->second;
pdb->insert(pdb->end(), pdb_i->begin(), pdb_i->end());
pdb_i->clear(); // to release memory space timely
}
}
children_i.clear(); // to release memory space timely
}
}
set<int> frquent_labels;
//delete child patterns with PDBs not big enough
for (ChildrenT::iterator it = children.begin(); it != children.end();)
{
vector<ProjSeq>* pdb = it->second;
if (pdb->size() < minsup) {
ChildrenT::iterator tmp = it;
++tmp;
delete it->second;
children.erase(it);
it = tmp;
} else {
PrefixTask *t = new PrefixTask;
t->pat.items.push_back(it->first);
t->pat.projDB = it->second;
task_queue.push(t);
frquent_labels.insert(it->first);
++it;
}
}
//make infrequent items invalid
for(int i = 0; i<TransDB().size(); i++){
for(sequence::iterator it = TransDB()[i].seq.begin(); it != TransDB()[i].seq.end();){
if(frquent_labels.find(*it) == frquent_labels.end()){
*it = -1;
}
it++;
}
}
}
};
#endif /* PREFIXSPAN_PREFIXSPAN_H_ */
|
setfeenv.c | #ifndef CRAY
# ifdef NOUNDERSCORE
# define SETFEENV setfeenv
# else
# ifdef F2CSTYLE
# define SETFEENV setfeenv__
# else
# define SETFEENV setfeenv_
# endif
# endif
#endif
#include <fenv.h>
#include <stdio.h>
void SETFEENV()
{
fenv_t envp;
int stat;
#ifdef _OPENMP
stat = fegetenv(&envp);
/*
if (fesetenv(&envp) != 0) {
perror("Error getting fp env");
}
*/
#pragma omp parallel shared(envp)
{
stat = fesetenv(&envp);
/*
if (fesetenv(&envp) != 0) {
perror("Error setting fp env");
}
*/
}
#endif
}
|
eavlCombinedTopologySparseMapOp.h | // Copyright 2010-2014 UT-Battelle, LLC. See LICENSE.txt for more information.
#ifndef EAVL_COMBINED_TOPOLOGY_SPARSE_MAP_OP_H
#define EAVL_COMBINED_TOPOLOGY_SPARSE_MAP_OP_H
#include "eavlCUDA.h"
#include "eavlCellSet.h"
#include "eavlCellSetExplicit.h"
#include "eavlCellSetAllStructured.h"
#include "eavlDataSet.h"
#include "eavlArray.h"
#include "eavlOpDispatch.h"
#include "eavlOperation.h"
#include "eavlTopology.h"
#include "eavlException.h"
#include <time.h>
#ifdef HAVE_OPENMP
#include <omp.h>
#endif
#ifndef DOXYGEN
template <class CONN>
struct eavlCombinedTopologySparseMapOp_CPU
{
static inline eavlArray::Location location() { return eavlArray::HOST; }
template <class F, class IN0, class IN1, class OUT, class INDEX>
static void call(int nitems, CONN &conn,
const IN0 s_inputs, const IN1 d_inputs, OUT outputs,
INDEX indices, F &functor)
{
int *sparseindices = get<0>(indices).array;
int ids[MAX_LOCAL_TOPOLOGY_IDS]; // these are effectively our src indices
#pragma omp parallel for private(ids)
for (int denseindex = 0; denseindex < nitems; ++denseindex)
{
int sparseindex = sparseindices[get<0>(indices).indexer.index(denseindex)];
int nids;
int shapeType = conn.GetElementComponents(sparseindex, nids, ids);
typename collecttype<IN1>::const_type in_d(collect(sparseindex, d_inputs));
typename collecttype<OUT>::type out(collect(sparseindex, outputs));
out = functor(shapeType, nids, ids, s_inputs, in_d);
}
}
};
#if defined __CUDACC__
template <class CONN, class F, class IN0, class IN1, class OUT, class INDEX>
__global__ void
eavlCombinedTopologyPackedMapOp_kernel(int nitems, CONN conn,
const IN0 s_inputs, const IN1 d_inputs, OUT outputs,
INDEX indices, F functor)
{
int *sparseindices = get<0>(indices).array;
const int numThreads = blockDim.x * gridDim.x;
const int threadID = blockIdx.x * blockDim.x + threadIdx.x;
int ids[MAX_LOCAL_TOPOLOGY_IDS];
for (int denseindex = threadID; denseindex < nitems; denseindex += numThreads)
{
int sparseindex = sparseindices[get<0>(indices).indexer.index(denseindex)];
int nids;
int shapeType = conn.GetElementComponents(sparseindex, nids, ids);
collect(sparseindex, outputs) = functor(shapeType, nids, ids, s_inputs,
collect(sparseindex, d_inputs));
}
}
template <class CONN>
struct eavlCombinedTopologyPackedMapOp_GPU
{
static inline eavlArray::Location location() { return eavlArray::DEVICE; }
template <class F, class IN0, class IN1, class OUT, class INDEX>
static void call(int nitems, CONN &conn,
const IN0 s_inputs, const IN1 d_inputs, OUT outputs,
INDEX indices, F &functor)
{
int numThreads = 256;
dim3 threads(numThreads, 1, 1);
dim3 blocks (32, 1, 1);
eavlCombinedTopologyPackedMapOp_kernel<<< blocks, threads >>>(nitems, conn,
s_inputs, d_inputs, outputs,
indices, functor);
CUDA_CHECK_ERROR();
}
};
#endif
#endif
// ****************************************************************************
// Class: eavlCombinedTopologySparseMapOp
//
// Purpose:
/// Map from one topological element in a mesh to another, with input
/// arrays on the source topology (at sparsely indexed locations) and the
/// destination topology, and with outputs on the destination topology.
/// In this sparse version of the operation, the inputs on the destination
/// topology and the outputs are all sparsely indexed by the index array.
//
// Programmer: Jeremy Meredith
// Creation: August 2, 2013
//
// Modifications:
// ****************************************************************************
template <class IS, class ID, class O, class INDEX, class F>
class eavlCombinedTopologySparseMapOp : public eavlOperation
{
protected:
eavlCellSet *cells;
eavlTopology topology;
IS s_inputs;
ID d_inputs;
O outputs;
INDEX indices;
F functor;
public:
eavlCombinedTopologySparseMapOp(eavlCellSet *c, eavlTopology t,
IS is, ID id, O o, INDEX ind, F f)
: cells(c), topology(t), s_inputs(is), d_inputs(id), outputs(o), indices(ind), functor(f)
{
}
virtual void GoCPU()
{
eavlCellSetExplicit *elExp = dynamic_cast<eavlCellSetExplicit*>(cells);
eavlCellSetAllStructured *elStr = dynamic_cast<eavlCellSetAllStructured*>(cells);
int n = outputs.first.length();
if (elExp)
{
eavlExplicitConnectivity &conn = elExp->GetConnectivity(topology);
eavlOpDispatch<eavlCombinedTopologySparseMapOp_CPU<eavlExplicitConnectivity> >(n, conn, s_inputs, d_inputs, outputs, indices, functor);
}
else if (elStr)
{
eavlRegularConnectivity conn = eavlRegularConnectivity(elStr->GetRegularStructure(),topology);
eavlOpDispatch<eavlCombinedTopologySparseMapOp_CPU<eavlRegularConnectivity> >(n, conn, s_inputs, d_inputs, outputs, indices, functor);
}
}
virtual void GoGPU()
{
#ifdef HAVE_CUDA
eavlCellSetExplicit *elExp = dynamic_cast<eavlCellSetExplicit*>(cells);
eavlCellSetAllStructured *elStr = dynamic_cast<eavlCellSetAllStructured*>(cells);
int n = outputs.first.length();
if (elExp)
{
eavlExplicitConnectivity &conn = elExp->GetConnectivity(topology);
conn.shapetype.NeedOnDevice();
conn.connectivity.NeedOnDevice();
conn.mapCellToIndex.NeedOnDevice();
eavlOpDispatch<eavlCombinedTopologySparseMapOp_GPU<eavlExplicitConnectivity> >(n, conn, s_inputs, d_inputs, outputs, indices, functor);
conn.shapetype.NeedOnHost();
conn.connectivity.NeedOnHost();
conn.mapCellToIndex.NeedOnHost();
}
else if (elStr)
{
eavlRegularConnectivity conn = eavlRegularConnectivity(elStr->GetRegularStructure(),topology);
eavlOpDispatch<eavlCombinedTopologySparseMapOp_GPU<eavlRegularConnectivity> >(n, conn, s_inputs, d_inputs, outputs, indices, functor);
}
#else
THROW(eavlException,"Executing GPU code without compiling under CUDA compiler.");
#endif
}
};
// helper function for type deduction
template <class IS, class ID, class O, class INDEX, class F>
eavlCombinedTopologySparseMapOp<IS,ID,O,INDEX,F> *new_eavlCombinedTopologySparseMapOp(eavlCellSet *c, eavlTopology t,
IS is, ID id, O o, INDEX indices, F f)
{
return new eavlCombinedTopologySparseMapOp<IS,ID,O,INDEX,F>(c,t,is,id,o,indices,f);
}
#endif
|
looptc_c1.c | #include <string.h>
void deinterleave(char *page, char *transposed, const int ntabs, const int nchannels, const int ntimes, const int padded_size) {
int tab;
for (tab = 0; tab < ntabs; tab++) {
int time;
for (time = 0; time < ntimes; time++) {
// build temporary array containing a complete channel row
char temp[nchannels];
int channel;
#pragma omp parallel for
for (channel = 0; channel < nchannels; channel++) {
// reverse freq order to comply with header
temp[nchannels-channel-1] = page[(tab*nchannels + channel) * padded_size + time];
}
// copy full row at once
memcpy(&transposed[time*nchannels], temp, nchannels);
}
}
}
|
hdp.c | #include <math.h>
#include <stdio.h>
#include <stdlib.h>
#include <float.h>
#include <inttypes.h>
#include "hdp.h"
#include "hdp_math_utils.h"
#include "sonLib.h"
#include "ranlib.h"
#define N_IG_NUM_PARAMS 4
#ifndef MINUS_INF
#define MINUS_INF -0.5 * DBL_MAX
#endif
#ifndef M_PI
#define M_PI 3.14159265358979323846264338
#endif
typedef enum FactorType {
BASE,
MIDDLE,
DATA_PT
} FactorType;
struct Factor {
FactorType factor_type;
struct Factor* parent;
stSet* children;
double* factor_data;
struct DirichletProcess* dp;
};
struct DirichletProcess {
int64_t id;
struct HierarchicalDirichletProcess* hdp;
double* gamma;
int64_t depth;
struct DirichletProcess* parent;
stList* children;
stSet* factors;
int64_t num_factor_children;
double base_factor_wt;
double* posterior_predictive;
double* spline_slopes;
double cached_factor_mean;
double cached_factor_sum_sq_dev;
int64_t cached_factor_size;
bool observed;
};
typedef struct Factor Factor;
typedef struct DirichletProcess DirichletProcess;
struct HierarchicalDirichletProcess {
bool finalized;
double* data;
int64_t* data_pt_dp_id;
int64_t data_length;
struct DirichletProcess* base_dp;
struct DirichletProcess** dps;
int64_t num_dps;
// normal-inverse gamma parameters
double mu;
double nu;
double two_alpha;
double beta;
double* sampling_grid;
int64_t grid_length;
int64_t samples_taken;
bool splines_finalized;
// TODO: replace this with my new offset log gamma memo
//struct SumOfLogsMemo* log_sum_memo;
int64_t depth;
bool sample_gamma;
double* gamma;
double* gamma_alpha;
double* gamma_beta;
double* w_aux_vector;
bool* s_aux_vector;
stSet* distr_metric_memos;
};
struct DistributionMetricMemo {
int64_t num_distrs;
double* memo_matrix;
HierarchicalDirichletProcess* hdp;
double (*metric_func) (HierarchicalDirichletProcess*, int64_t, int64_t);
};
bool is_structure_finalized(HierarchicalDirichletProcess* hdp) {
return hdp->finalized;
}
bool is_gamma_random(HierarchicalDirichletProcess* hdp) {
return hdp->sample_gamma;
}
bool is_sampling_finalized(HierarchicalDirichletProcess* hdp) {
return hdp->splines_finalized;
}
bool hdp_check_for_observed(HierarchicalDirichletProcess *hdp, int64_t kmerIndex) {
return hdp->dps[kmerIndex]->observed;
}
int64_t get_num_dir_proc(HierarchicalDirichletProcess* hdp) {
return hdp->num_dps;
}
int64_t get_depth(HierarchicalDirichletProcess* hdp) {
return hdp->depth;
}
int64_t get_num_data(HierarchicalDirichletProcess* hdp) {
return hdp->data_length;
}
double* get_data_copy(HierarchicalDirichletProcess* hdp) {
int64_t data_length = hdp->data_length;
double* data = (double*) malloc(sizeof(double) * data_length);
for (int64_t i = 0; i < data_length; i++) {
data[i] = hdp->data[i];
}
return data;
}
int64_t* get_data_pt_dp_ids_copy(HierarchicalDirichletProcess* hdp) {
int64_t data_length = hdp->data_length;
int64_t* dp_ids = (int64_t*) malloc(sizeof(int64_t) * data_length);
for (int64_t i = 0; i < data_length; i++) {
dp_ids[i] = hdp->data_pt_dp_id[i];
}
return dp_ids;
}
double* get_gamma_params_copy(HierarchicalDirichletProcess* hdp) {
int64_t depth = hdp->depth;
double* gamma_params = (double*) malloc(sizeof(double) * depth);
for (int64_t i = 0; i < depth; i++) {
gamma_params[i] = hdp->gamma[i];
}
return gamma_params;
}
double get_mu(HierarchicalDirichletProcess* hdp) {
return hdp->mu;
}
double get_nu(HierarchicalDirichletProcess* hdp) {
return hdp->nu;
}
double get_alpha(HierarchicalDirichletProcess* hdp) {
return hdp->two_alpha / 2.0;
}
double get_beta(HierarchicalDirichletProcess* hdp) {
return hdp->beta;
}
int64_t get_grid_length(HierarchicalDirichletProcess* hdp) {
return hdp->grid_length;
}
double* get_sampling_grid_copy(HierarchicalDirichletProcess* hdp) {
int64_t grid_length = hdp->grid_length;
double* sampling_grid = (double*) malloc(sizeof(double) * grid_length);
for (int64_t i = 0; i < grid_length; i++) {
sampling_grid[i] = hdp->sampling_grid[i];
}
return sampling_grid;
}
double* get_gamma_alpha_params_copy(HierarchicalDirichletProcess* hdp) {
if (!hdp->sample_gamma) {
fprintf(stderr, "Hierarchical Dirichlet process is not sampling gamma parameters.");
exit(EXIT_FAILURE);
}
int64_t depth = hdp->depth;
double* gamma_alpha = (double*) malloc(sizeof(double) * depth);
for (int64_t i = 0; i < depth; i++) {
gamma_alpha[i] = hdp->gamma_alpha[i];
}
return gamma_alpha;
}
double* get_gamma_beta_params_copy(HierarchicalDirichletProcess* hdp) {
if (!hdp->sample_gamma) {
fprintf(stderr, "Hierarchical Dirichlet process is not sampling gamma parameters.");
exit(EXIT_FAILURE);
}
int64_t depth = hdp->depth;
double* gamma_beta = (double*) malloc(sizeof(double) * depth);
for (int64_t i = 0; i < depth; i++) {
gamma_beta[i] = hdp->gamma_beta[i];
}
return gamma_beta;
}
int64_t get_dir_proc_num_factors(HierarchicalDirichletProcess* hdp, int64_t dp_id) {
if (dp_id < 0 || dp_id >= hdp->num_dps) {
fprintf(stderr, "Hierarchical Dirichlet process has no Dirichlet process with this ID.\n");
exit(EXIT_FAILURE);
}
DirichletProcess* dp = hdp->dps[dp_id];
return stSet_size(dp->factors);
}
int64_t get_dir_proc_parent_id(HierarchicalDirichletProcess* hdp, int64_t dp_id) {
if (dp_id < 0 || dp_id >= hdp->num_dps) {
fprintf(stderr, "Hierarchical Dirichlet process has no Dirichlet process with this ID.\n");
exit(EXIT_FAILURE);
}
DirichletProcess* dp = hdp->dps[dp_id];
if (dp->parent == NULL) {
return -1;
}
else {
return dp->parent->id;
}
}
DistributionMetricMemo* new_distr_metric_memo(HierarchicalDirichletProcess* hdp,
double (*metric_func) (HierarchicalDirichletProcess*, int64_t, int64_t)) {
DistributionMetricMemo* memo = (DistributionMetricMemo*) malloc(sizeof(DistributionMetricMemo));
int64_t num_dps = hdp->num_dps;
memo->num_distrs = num_dps;
int64_t num_entries = ((num_dps - 1) * num_dps) / 2;
double* memo_matrix = (double*) malloc(sizeof(double) * num_entries);
memo->memo_matrix = memo_matrix;
for (int64_t i = 0; i < num_entries; i++) {
memo_matrix[i] = -1.0;
}
memo->hdp = hdp;
memo->metric_func = metric_func;
stSet_insert(hdp->distr_metric_memos, memo);
return memo;
}
void destroy_distr_metric_memo(void* memo) {
DistributionMetricMemo* metric_memo = (DistributionMetricMemo*) memo;
free(metric_memo->memo_matrix);
free(metric_memo);
}
void cache_base_factor_params(Factor* fctr, double mu, double nu, double two_alpha, double beta, double log_post_term) {
if (fctr->factor_type != BASE) {
fprintf(stderr, "Can only cache parameters for base factors.\n");
exit(EXIT_FAILURE);
}
double* param_array = fctr->factor_data;
param_array[0] = mu;
param_array[1] = nu;
param_array[2] = two_alpha;
param_array[3] = beta;
param_array[4] = log_post_term;
}
Factor* new_base_factor(HierarchicalDirichletProcess* hdp) {
Factor* fctr = (Factor*) malloc(sizeof(Factor));
fctr->factor_type = BASE;
fctr->factor_data = (double*) malloc(sizeof(double) * (N_IG_NUM_PARAMS + 1));
cache_base_factor_params(fctr, hdp->mu, hdp->nu, hdp->two_alpha, hdp->beta, 1.0);
fctr->parent = NULL;
fctr->children = stSet_construct();
DirichletProcess* base_dp = hdp->base_dp;
fctr->dp = base_dp;
stSet_insert(base_dp->factors, (void*) fctr);
return fctr;
}
Factor* new_middle_factor(DirichletProcess* dp) {
if (dp->parent == NULL) {
fprintf(stderr, "Attempted to create middle factor in root Dirichlet process.\n");
exit(EXIT_FAILURE);
}
Factor* fctr = (Factor*) malloc(sizeof(Factor));
fctr->factor_type = MIDDLE;
fctr->factor_data = NULL;
// note: assigning to parent handled externally
fctr->parent = NULL;
fctr->children = stSet_construct();
fctr->dp = dp;
stSet_insert(dp->factors, (void*) fctr);
return fctr;
}
Factor* new_data_pt_factor(HierarchicalDirichletProcess* hdp, int64_t data_pt_idx) {
Factor* fctr = (Factor*) malloc(sizeof(Factor));
fctr->factor_type = DATA_PT;
fctr->factor_data = &(hdp->data[data_pt_idx]);
// note: assigning to parent handled externally
fctr->parent = NULL;
fctr->children = NULL;
fctr->dp = NULL;
return fctr;
}
void destroy_factor(Factor* fctr) {
stSet* children = fctr->children;
if (children != NULL) {
if (stSet_size(children) > 0) {
fprintf(stderr, "Attempted to destroy factor that still has children.\n");
exit(EXIT_FAILURE);
}
stSet_destruct(children);
}
Factor* parent = fctr->parent;
if (parent != NULL) {
stSet_remove(parent->children, (void*) fctr);
(parent->dp->num_factor_children)--;
if (stSet_size(parent->children) == 0) {
destroy_factor(parent);
}
}
if (fctr->factor_type == BASE) {
free(fctr->factor_data);
}
DirichletProcess* dp = fctr->dp;
if (dp != NULL) {
stSet_remove(dp->factors, (void*) fctr);
}
free(fctr);
}
Factor* get_base_factor(Factor* fctr) {
while (fctr->factor_type != BASE) {
fctr = fctr->parent;
if (fctr == NULL) {
break;
}
}
return fctr;
}
double get_factor_data_pt(Factor* fctr) {
if (fctr->factor_type != DATA_PT) {
fprintf(stderr, "Attempted to access data point from non-leaf factor.\n");
exit(EXIT_FAILURE);
}
return *(fctr->factor_data);
}
void get_factor_sum_internal(Factor* fctr, double* sum, int64_t* num_data) {
if (fctr->factor_type == DATA_PT) {
*sum += get_factor_data_pt(fctr);
// TODO: there should be a way to use the parent's counters instead of recounting the data pts
(*num_data)++;
}
else {
stSetIterator* child_iter = stSet_getIterator(fctr->children);
Factor* child_fctr = (Factor*) stSet_getNext(child_iter);
while (child_fctr != NULL) {
get_factor_sum_internal(child_fctr, sum, num_data);
child_fctr = (Factor*) stSet_getNext(child_iter);
}
stSet_destructIterator(child_iter);
}
}
void get_factor_ssd_internal(Factor* fctr, double center, double* sum_sq_dev) {
if (fctr->factor_type == DATA_PT) {
double dev = get_factor_data_pt(fctr) - center;
*sum_sq_dev += dev * dev;
}
else {
stSetIterator* child_iter = stSet_getIterator(fctr->children);
Factor* child_fctr = (Factor*) stSet_getNext(child_iter);
while (child_fctr != NULL) {
get_factor_ssd_internal(child_fctr, center, sum_sq_dev);
child_fctr = (Factor*) stSet_getNext(child_iter);
}
stSet_destructIterator(child_iter);
}
}
void get_factor_stats(Factor* fctr, double* mean_out, double* sum_sq_dev_out, int64_t* num_data_out) {
*mean_out = 0.0;
*sum_sq_dev_out = 0.0;
*num_data_out = 0;
get_factor_sum_internal(fctr, mean_out, num_data_out);
*mean_out /= (double) *num_data_out;
get_factor_ssd_internal(fctr, *mean_out, sum_sq_dev_out);
}
void add_update_base_factor_params(Factor* fctr, double mean, double sum_sq_devs, double num_data) {
double* param_array = fctr->factor_data;
double mu_prev = param_array[0];
double nu_prev = param_array[1];
double two_alpha_prev = param_array[2];
double beta_prev = param_array[3];
double nu_post = nu_prev + num_data;
double mu_post = (mu_prev * nu_prev + mean * num_data) / nu_post;
double two_alpha_post = two_alpha_prev + num_data;
double mean_dev = mean - mu_prev;
double sq_mean_dev = nu_prev * num_data * mean_dev * mean_dev / nu_post;
double beta_post = beta_prev + .5 * (sum_sq_devs + sq_mean_dev);
double log_post_term = log_posterior_conditional_term(nu_post, two_alpha_post, beta_post);//,
//fctr->dp->hdp->log_sum_memo);
cache_base_factor_params(fctr, mu_post, nu_post, two_alpha_post, beta_post, log_post_term);
}
void remove_update_base_factor_params(Factor* fctr, double mean, double sum_sq_devs, double num_data) {
double* param_array = fctr->factor_data;
double mu_post = param_array[0];
double nu_post = param_array[1];
double two_alpha_post = param_array[2];
double beta_post = param_array[3];
double nu_prev = nu_post - num_data;
double mu_prev = (mu_post * nu_post - mean * num_data) / nu_prev;
double two_alpha_prev = two_alpha_post - num_data;
double mean_dev = mean - mu_prev;
double sq_mean_dev = nu_prev * num_data * mean_dev * mean_dev / nu_post;
double beta_prev = beta_post - 0.5 * (sum_sq_devs + sq_mean_dev);
double log_post_term = log_posterior_conditional_term(nu_prev, two_alpha_prev, beta_prev);//,
//fctr->dp->hdp->log_sum_memo);
cache_base_factor_params(fctr, mu_prev, nu_prev, two_alpha_prev, beta_prev, log_post_term);
}
double factor_parent_joint_log_likelihood(Factor* fctr, Factor* parent) {
Factor* base_fctr = get_base_factor(parent);
DirichletProcess* dp = fctr->dp;
double num_reassign = (double) dp->cached_factor_size;
double mean_reassign = dp->cached_factor_mean;
double sum_sq_devs = dp->cached_factor_sum_sq_dev;
double* param_array = base_fctr->factor_data;
double mu_denom = param_array[0];
double nu_denom = param_array[1];
double two_alpha_denom = param_array[2];
double beta_denom = param_array[3];
double nu_numer = nu_denom + num_reassign;
double two_alpha_numer = two_alpha_denom + num_reassign;
double mean_dev = mean_reassign - mu_denom;
double sq_mean_dev = nu_denom * num_reassign * mean_dev * mean_dev / nu_numer;
double beta_numer = beta_denom + 0.5 * (sum_sq_devs + sq_mean_dev);
double log_denom = param_array[4];
double log_numer = log_posterior_conditional_term(nu_numer, two_alpha_numer, beta_numer);//,
//dp->hdp->log_sum_memo);
return -0.5 * num_reassign * log(2.0 * M_PI) + log_numer - log_denom;
}
double data_pt_factor_parent_likelihood(Factor* data_pt_fctr, Factor* parent) {
if (data_pt_fctr->factor_type != DATA_PT) {
fprintf(stderr, "Can only access data point likelihood for data point factors.\n");
exit(EXIT_FAILURE);
}
double data_pt = get_factor_data_pt(data_pt_fctr);
Factor* base_fctr = get_base_factor(parent);
double* param_array = base_fctr->factor_data;
double mu_denom = param_array[0];
double nu_denom = param_array[1];
double two_alpha_denom = param_array[2];
double beta_denom = param_array[3];
double nu_numer = nu_denom + 1.0;
double mean_dev = data_pt - mu_denom;
double sq_mean_dev = nu_denom * mean_dev * mean_dev / nu_numer;
double two_alpha_numer = two_alpha_denom + 1.0;
double beta_numer = beta_denom + 0.5 * sq_mean_dev;
double log_denom = param_array[4];
double log_numer = log_posterior_conditional_term(nu_numer, two_alpha_numer, beta_numer);//,
//base_fctr->dp->hdp->log_sum_memo);
return (1.0 / sqrt(2.0 * M_PI)) * exp(log_numer - log_denom);
}
void evaluate_posterior_predictive(Factor* base_fctr, double* x, double* pdf_out, int64_t length) {//,
//SumOfLogsMemo* log_sum_memo) {
if (base_fctr->factor_type != BASE) {
fprintf(stderr, "Can only evaluate posterior predictive of base factors.\n");
exit(EXIT_FAILURE);
}
double* param_array = base_fctr->factor_data;
double mu_denom = param_array[0];
double nu_denom = param_array[1];
double two_alpha_denom = param_array[2];
double beta_denom = param_array[3];
double log_denom = param_array[4];
double nu_numer = nu_denom + 1.0;
double two_alpha_numer = two_alpha_denom + 1.0;
double nu_ratio = nu_denom / nu_numer;
double pi_factor = 1.0 / sqrt(2.0 * M_PI);
double mean_dev;
double sq_mean_dev;
double beta_numer;
double log_numer;
for (int64_t i = 0; i < length; i++) {
mean_dev = x[i] - mu_denom;
sq_mean_dev = nu_ratio * mean_dev * mean_dev;
beta_numer = beta_denom + 0.5 * sq_mean_dev;
log_numer = log_posterior_conditional_term(nu_numer, two_alpha_numer, beta_numer);//,
//log_sum_memo);
pdf_out[i] = pi_factor * exp(log_numer - log_denom);
}
}
void evaluate_prior_predictive(HierarchicalDirichletProcess* hdp,
double* x, double* pdf_out, int64_t length) {
//TODO: this could be made more efficient with some precomputed variables stashed in HDP
double mu = hdp->mu;
double nu = hdp->nu;
double two_alpha = hdp->two_alpha;
double beta = hdp->beta;
double nu_factor = nu / (2.0 * (nu + 1.0) * beta);
//double alpha_term = exp(log_gamma_half(two_alpha + 1, hdp->log_sum_memo)
// - log_gamma_half(two_alpha, hdp->log_sum_memo));
double alpha_term = exp(lgamma(.5 * (two_alpha + 1.0)) - lgamma(.5 * two_alpha));
double beta_term = sqrt(nu_factor / M_PI);
double constant_term = alpha_term * beta_term;
double alpha_power = -0.5 * (two_alpha + 1.0);
for (int64_t i = 0; i < length; i++) {
double dev = x[i] - mu;
double var_term = pow(1.0 + nu_factor * dev * dev, alpha_power);
pdf_out[i] = constant_term * var_term;
}
}
double prior_likelihood(HierarchicalDirichletProcess* hdp, Factor* fctr) {
if (fctr->factor_type != DATA_PT) {
fprintf(stderr, "Cannot calculate point prior likelihood from non-data point factor.\n");
}
//TODO: this could be made more efficient with some precomputed variables stashed in HDP
double mu = hdp->mu;
double nu = hdp->nu;
double dbl_two_alpha = hdp->two_alpha;
//int64_t two_alpha = (int64_t) dbl_two_alpha;
double beta = hdp->beta;
double data_pt = get_factor_data_pt(fctr);
double dev = data_pt - mu;
//double alpha_term = exp(log_gamma_half(two_alpha + 1, hdp->log_sum_memo)
// - log_gamma_half(two_alpha, hdp->log_sum_memo));
double alpha_term = exp(lgamma(.5 * (dbl_two_alpha + 1.0)) - lgamma(.5 * dbl_two_alpha));
double nu_term = nu / (2.0 * (nu + 1.0) * beta);
double beta_term = pow(1.0 + nu_term * dev * dev, -0.5 * (dbl_two_alpha + 1.0));
return alpha_term * sqrt(nu_term / M_PI) * beta_term;
}
double prior_joint_log_likelihood(HierarchicalDirichletProcess* hdp, Factor* fctr) {
if (fctr->factor_type != MIDDLE) {
fprintf(stderr, "Cannot calculate joint prior likelihood from non-middle factor.\n");
}
double mu = hdp->mu;
double nu = hdp->nu;
double dbl_two_alpha = hdp->two_alpha;
//int64_t two_alpha = (int64_t) dbl_two_alpha;
double beta = hdp->beta;
DirichletProcess* dp = fctr->dp;
int64_t num_reassign = dp->cached_factor_size;
double dbl_reassign = (double) num_reassign;
double mean_reassign = dp->cached_factor_mean;
double sum_sq_devs = dp->cached_factor_sum_sq_dev;
double mean_dev = mean_reassign - mu;
double sq_mean_dev = nu * dbl_reassign * mean_dev * mean_dev / (nu + dbl_reassign);
//double log_alpha_term = log_gamma_half(two_alpha + num_reassign, hdp->log_sum_memo)
// - log_gamma_half(two_alpha, hdp->log_sum_memo);
double log_alpha_term = lgamma(.5 * (dbl_two_alpha + dbl_reassign)) - lgamma(.5 * dbl_two_alpha);
double log_nu_term = 0.5 * (log(nu) - log(nu + dbl_reassign));
double log_pi_term = 0.5 * dbl_reassign * log(2.0 * M_PI);
double log_beta_term_1 = dbl_two_alpha * log(beta);
double log_beta_term_2 = (dbl_two_alpha + dbl_reassign)
* log(beta + 0.5 * (sum_sq_devs + sq_mean_dev));
return log_alpha_term + log_nu_term - log_pi_term + 0.5 * (log_beta_term_1 - log_beta_term_2);
}
// TODO: figure out how to break into chunks and spin up threads to reduce the sum behind the iterator
double unobserved_factor_likelihood(Factor* fctr, DirichletProcess* dp) {
DirichletProcess* parent_dp = dp->parent;
if (parent_dp == NULL) {
return prior_likelihood(dp->hdp, fctr);
}
else {
double parent_gamma = *(parent_dp->gamma);
double likelihood = 0.0;
double next_height_unobs_likelihood;
int64_t num_parent_fctrs = stSet_size(parent_dp->factors);
Factor** parent_fctrs = (Factor**) malloc(sizeof(Factor*) * num_parent_fctrs);
#pragma omp parallel shared(likelihood,next_height_unobs_likelihood,parent_dp,num_parent_fctrs,parent_fctrs)
{
#pragma omp single nowait
next_height_unobs_likelihood = unobserved_factor_likelihood(fctr, parent_dp);
#pragma omp single
{
stSetIterator* parent_fctr_iter = stSet_getIterator(parent_dp->factors);
for (int64_t i = 0; i < num_parent_fctrs; i++) {
parent_fctrs[i] = (Factor*) stSet_getNext(parent_fctr_iter);
}
stSet_destructIterator(parent_fctr_iter);
}
double local_likelihood = 0.0;
Factor* parent_fctr;
#pragma omp for nowait
for (int64_t i = 0; i < num_parent_fctrs; i++) {
parent_fctr = parent_fctrs[i];
local_likelihood += stSet_size(parent_fctr->children) * data_pt_factor_parent_likelihood(fctr, parent_fctr);
}
#pragma omp critical
likelihood += local_likelihood;
}
free(parent_fctrs);
likelihood += parent_gamma * next_height_unobs_likelihood;
likelihood /= (parent_gamma + (double) parent_dp->num_factor_children);
return likelihood;
}
}
//double unobserved_factor_likelihood(Factor* fctr, DirichletProcess* dp) {
// DirichletProcess* parent_dp = dp->parent;
// if (parent_dp == NULL) {
// return prior_likelihood(dp->hdp, fctr);
// }
// else {
// double parent_gamma = *(parent_dp->gamma);
// double likelihood = 0.0;
//
// stSetIterator* parent_fctr_iter = stSet_getIterator(parent_dp->factors);
//
// Factor* parent_fctr = (Factor*) stSet_getNext(parent_fctr_iter);
// double fctr_size;
// while (parent_fctr != NULL) {
// fctr_size = (double) stSet_size(parent_fctr->children);
// likelihood += fctr_size * data_pt_factor_parent_likelihood(fctr, parent_fctr);
// parent_fctr = (Factor*) stSet_getNext(parent_fctr_iter);
// }
// stSet_destructIterator(parent_fctr_iter);
//
// likelihood += parent_gamma * unobserved_factor_likelihood(fctr, parent_dp);
//
// likelihood /= (parent_gamma + (double) parent_dp->num_factor_children);
//
// return likelihood;
// }
//}
double unobserved_factor_joint_log_likelihood(Factor* fctr, DirichletProcess* dp) {
DirichletProcess* parent_dp = dp->parent;
if (parent_dp == NULL) {
return prior_joint_log_likelihood(dp->hdp, fctr);
}
else {
double parent_gamma = *(parent_dp->gamma);
double log_likelihood = MINUS_INF;
int64_t num_parent_fctrs = stSet_size(parent_dp->factors);
Factor** parent_fctrs = (Factor**) malloc(sizeof(Factor*) * num_parent_fctrs);
double next_height_unobs_log_likelihood;
#pragma omp parallel shared(log_likelihood,next_height_unobs_log_likelihood,parent_dp,num_parent_fctrs,parent_fctrs)
{
#pragma omp single nowait
next_height_unobs_log_likelihood = unobserved_factor_joint_log_likelihood(fctr, parent_dp);
#pragma omp single
{
stSetIterator* parent_fctr_iter = stSet_getIterator(parent_dp->factors);
for (int64_t i = 0; i < num_parent_fctrs; i++) {
parent_fctrs[i] = (Factor*) stSet_getNext(parent_fctr_iter);
}
stSet_destructIterator(parent_fctr_iter);
}
double local_log_likelihood = MINUS_INF;
double log_fctr_size;
Factor* parent_fctr;
#pragma omp for nowait
for (int64_t i = 0; i < num_parent_fctrs; i++) {
parent_fctr = parent_fctrs[i];
log_fctr_size = log(stSet_size(parent_fctr->children));
local_log_likelihood = add_logs(local_log_likelihood,
log_fctr_size + factor_parent_joint_log_likelihood(fctr, parent_fctr));
}
#pragma omp critical
log_likelihood = add_logs(log_likelihood, local_log_likelihood);
}
free(parent_fctrs);
log_likelihood = add_logs(log_likelihood,
log(parent_gamma) + next_height_unobs_log_likelihood);
log_likelihood -= log(parent_gamma + parent_dp->num_factor_children);
return log_likelihood;
}
}
//double unobserved_factor_joint_log_likelihood(Factor* fctr, DirichletProcess* dp) {
// DirichletProcess* parent_dp = dp->parent;
// if (parent_dp == NULL) {
// return prior_joint_log_likelihood(dp->hdp, fctr);
// }
// else {
// double parent_gamma = *(parent_dp->gamma);
//
// double log_likelihood = MINUS_INF;
// stSetIterator* parent_fctr_iter = stSet_getIterator(parent_dp->factors);
// Factor* parent_fctr = (Factor*) stSet_getNext(parent_fctr_iter);
// double log_fctr_size;
// while (parent_fctr != NULL) {
// log_fctr_size = log((double) stSet_size(parent_fctr->children));
// log_likelihood = add_logs(log_likelihood,
// log_fctr_size + factor_parent_joint_log_likelihood(fctr, parent_fctr));
// parent_fctr = (Factor*) stSet_getNext(parent_fctr_iter);
// }
// stSet_destructIterator(parent_fctr_iter);
//
// log_likelihood = add_logs(log_likelihood,
// log(parent_gamma) + unobserved_factor_joint_log_likelihood(fctr, parent_dp));
//
// log_likelihood -= log(parent_gamma + (double) parent_dp->num_factor_children);
//
// return log_likelihood;
// }
//}
DirichletProcess* new_dir_proc() {
DirichletProcess* dp = (DirichletProcess*) malloc(sizeof(DirichletProcess));
dp->gamma = NULL;
dp->depth = 0;
dp->parent = NULL;
dp->children = stList_construct();
dp->factors = stSet_construct();
dp->num_factor_children = 0;
dp->cached_factor_mean = 0.0;
dp->cached_factor_sum_sq_dev = 0.0;
dp->cached_factor_size = 0;
dp->base_factor_wt = 0.0;
dp->posterior_predictive = NULL;
dp->spline_slopes = NULL;
dp->observed = false;
return dp;
}
void clear_factor_tree(Factor* fctr) {
if (fctr->children != NULL) {
stSetIterator* child_fctr_iter = stSet_getIterator(fctr->children);
Factor* child_fctr = (Factor*) stSet_getNext(child_fctr_iter);
while (child_fctr != NULL) {
clear_factor_tree(child_fctr);
child_fctr = (Factor*) stSet_getNext(child_fctr_iter);
}
stSet_destructIterator(child_fctr_iter);
}
else {
// note: this will trigger automatic destruction of parent factors
destroy_factor(fctr);
}
}
void destroy_dir_proc_factor_tree(DirichletProcess* dp) {
if (stSet_size(dp->factors) == 0) {
return;
}
stSetIterator* fctr_iter = stSet_getIterator(dp->factors);
Factor* fctr = (Factor*) stSet_getNext(fctr_iter);
while (fctr != NULL) {
clear_factor_tree(fctr);
fctr = (Factor*) stSet_getNext(fctr_iter);
}
stSet_destructIterator(fctr_iter);
}
void destroy_dir_proc(DirichletProcess* dp) {
destroy_dir_proc_factor_tree(dp);
stSet_destruct(dp->factors);
if (dp->children != NULL) {
stListIterator* st_iterator = stList_getIterator(dp->children);
DirichletProcess* dp_child = (DirichletProcess*) stList_getNext(st_iterator);
while (dp_child != NULL) {
destroy_dir_proc(dp_child);
dp_child = (DirichletProcess*) stList_getNext(st_iterator);
}
stList_destructIterator(st_iterator);
stList_destruct(dp->children);
}
if (dp->parent != NULL) {
stList_removeItem(dp->parent->children, (void*) dp);
}
free(dp->posterior_predictive);
free(dp->spline_slopes);
free(dp);
}
// fixed concentration parameters
HierarchicalDirichletProcess* new_hier_dir_proc(int64_t num_dps, int64_t depth, double* gamma, double sampling_grid_start,
double sampling_grid_stop, int64_t sampling_grid_length, double mu,
double nu, double alpha, double beta) {
if (nu <= 0.0) {
fprintf(stderr, "nu parameter of Normal-Inverse Gamma distribution must be positive.\n");
exit(EXIT_FAILURE);
}
//if (alpha <= 0.0) {
// fprintf(stderr, "alpha parameter of Normal-Inverse Gamma distribution must be positive.\n");
// exit(EXIT_FAILURE);
//}
if (beta <= 0.0) {
fprintf(stderr, "beta parameter of Normal-Inverse Gamma distribution must be positive.\n");
exit(EXIT_FAILURE);
}
if (alpha <= 1.0){
alpha = 1.00001;
}
if (2 * alpha != (int64_t) 2 * alpha || alpha <= 1.0) {
fprintf(stderr, "Normal-Inverse Gamma parameter 'alpha' must be integer or half-integer > 1.0.\n");
exit(EXIT_FAILURE);
}
if (gamma != NULL) {
for (int64_t i = 0; i < depth; i++) {
if (gamma[i] <= 0) {
fprintf(stderr, "Concentration parameter gamma must be postive.\n");
exit(EXIT_FAILURE);
}
}
}
if (num_dps < 2) {
fprintf(stderr, "Hierarchical Dirichlet process formalism requires >= 2 Dirichlet Processes.\n");
exit(EXIT_FAILURE);
}
double* grid = linspace(sampling_grid_start, sampling_grid_stop, sampling_grid_length);
HierarchicalDirichletProcess* hdp = (HierarchicalDirichletProcess*) malloc(sizeof(HierarchicalDirichletProcess));
// normal-inverse gamma parameters
hdp->mu = mu;
hdp->nu = nu;
hdp->two_alpha = 2.0 * alpha;
hdp->beta = beta;
hdp->gamma = gamma;
hdp->depth = depth;
hdp->finalized = false;
hdp->num_dps = num_dps;
DirichletProcess** dps = (DirichletProcess**) malloc(sizeof(DirichletProcess*) * num_dps);
for (int64_t i = 0; i < num_dps; i++) {
DirichletProcess* dp = new_dir_proc();
dp->id = i;
dp->hdp = hdp;
dps[i] = dp;
}
hdp->dps = dps;
hdp->base_dp = NULL;
hdp->sampling_grid = grid;
hdp->grid_length = sampling_grid_length;
hdp->samples_taken = 0;
hdp->splines_finalized = false;
hdp->data = NULL;
hdp->data_pt_dp_id = NULL;
hdp->data_length = 0;
//hdp->log_sum_memo = new_log_sum_memo();
hdp->sample_gamma = false;
hdp->gamma_alpha = NULL;
hdp->gamma_beta = NULL;
hdp->s_aux_vector = NULL;
hdp->w_aux_vector = NULL;
hdp->distr_metric_memos = stSet_construct2(&destroy_distr_metric_memo);
return hdp;
}
// Gamma prior on concentration parameters
HierarchicalDirichletProcess* new_hier_dir_proc_2(int64_t num_dps, int64_t depth, double* gamma_alpha, double* gamma_beta,
double sampling_grid_start, double sampling_grid_stop,
int64_t sampling_grid_length, double mu, double nu, double alpha,
double beta) {
for (int64_t i = 0; i < depth; i++) {
if (gamma_alpha[i] <= 0.0) {
fprintf(stderr, "alpha parameter of Gamma distribution must be positive.\n");
exit(EXIT_FAILURE);
}
if (gamma_beta[i] <= 0.0) {
fprintf(stderr, "beta parameter of Gamma distribution must be positive.\n");
exit(EXIT_FAILURE);
}
}
HierarchicalDirichletProcess* hdp = new_hier_dir_proc(num_dps, depth, NULL, sampling_grid_start, sampling_grid_stop,
sampling_grid_length, mu, nu, alpha, beta);
hdp->sample_gamma = true;
hdp->gamma_alpha = gamma_alpha;
hdp->gamma_beta = gamma_beta;
double* w = (double*) malloc(sizeof(double) * num_dps);
hdp->w_aux_vector = w;
bool* s = (bool*) malloc(sizeof(bool) * num_dps);
hdp->s_aux_vector = s;
for (int64_t i = 0; i < num_dps; i++) {
w[i] = 1.0;
s[i] = false;
}
// init to prior expected value
double* gamma = (double*) malloc(sizeof(double) * depth);
hdp->gamma = gamma;
for (int64_t i = 0; i < depth; i++) {
gamma[i] = gamma_alpha[i] / gamma_beta[i];
}
return hdp;
}
void destroy_hier_dir_proc(HierarchicalDirichletProcess* hdp) {
destroy_dir_proc(hdp->base_dp);
free(hdp->gamma);
free(hdp->data);
free(hdp->data_pt_dp_id);
free(hdp->dps);
free(hdp->sampling_grid);
//destroy_log_sum_memo(hdp->log_sum_memo);
free(hdp->gamma_alpha);
free(hdp->gamma_beta);
free(hdp->w_aux_vector);
free(hdp->s_aux_vector);
stSet_destruct(hdp->distr_metric_memos);
free(hdp);
}
void establish_base_dp(HierarchicalDirichletProcess* hdp) {
DirichletProcess** dps = hdp->dps;
int64_t num_dps = hdp->num_dps;
DirichletProcess* dp;
for (int64_t i = 0; i < num_dps; i++) {
dp = dps[i];
if (dp->parent == NULL) {
if (hdp->base_dp == NULL) {
hdp->base_dp = dp;
}
else {
fprintf(stderr, "Hierarchical Dirichlet process contains orphaned Dirichlet process.\n");
exit(EXIT_FAILURE);
}
}
}
if (hdp->base_dp == NULL) {
fprintf(stderr, "Hierarchical Dirichlet process does not contain base Dirichlet process.\n");
exit(EXIT_FAILURE);
}
}
// DFS to verify that Dirichlet processes follow tree structure
void verify_dp_tree(HierarchicalDirichletProcess* hdp) {
int64_t num_dps = hdp->num_dps;
bool* visited = (bool*) malloc(sizeof(bool) * num_dps);
for (int64_t i = 0; i < num_dps; i++) {
visited[i] = false;
}
DirichletProcess* base_dp = hdp->base_dp;
stList* stck = stList_construct();
stList_append(stck, (void*) base_dp);
DirichletProcess* dp;
while (stList_length(stck) > 0) {
dp = (DirichletProcess*) stList_pop(stck);
if (visited[dp->id]) {
fprintf(stderr, "Hierarchical Dirichlet process does not have tree structure.\n");
exit(EXIT_FAILURE);
}
visited[dp->id] = true;
stListIterator* child_iter = stList_getIterator(dp->children);
DirichletProcess* child = (DirichletProcess*) stList_getNext(child_iter);
while (child != NULL) {
stList_append(stck, (void*) child);
child = (DirichletProcess*) stList_getNext(child_iter);
}
stList_destructIterator(child_iter);
}
stList_destruct(stck);
free(visited);
}
void verify_tree_depth(HierarchicalDirichletProcess* hdp, DirichletProcess* dp, int64_t current_depth,
int64_t leaf_depth) {
dp->gamma = &(hdp->gamma[current_depth]);
dp->depth = current_depth;
if (stList_length(dp->children) == 0) {
if (current_depth != leaf_depth) {
fprintf(stderr, "Hierarchical Dirichlet process has leaf Dirichlet process at incorrect depth.\n");
exit(EXIT_FAILURE);
}
}
else {
stListIterator* st_iterator = stList_getIterator(dp->children);
DirichletProcess* child = (DirichletProcess*) stList_getNext(st_iterator);
while (child != NULL) {
verify_tree_depth(hdp, child, current_depth + 1, leaf_depth);
child = (DirichletProcess*) stList_getNext(st_iterator);
}
stList_destructIterator(st_iterator);
}
}
void verify_valid_dp_assignments(HierarchicalDirichletProcess* hdp) {
int64_t length = hdp->data_length;
int64_t num_dps = hdp->num_dps;
DirichletProcess** dps = hdp->dps;
int64_t* dp_ids = hdp->data_pt_dp_id;
int64_t id;
DirichletProcess* dp;
for (int64_t i = 0; i < length; i++) {
id = dp_ids[i];
if (id >= num_dps || id < 0) {
fprintf(stderr, "Data point is assigned to non-existent Dirichlet process.\n");
exit(EXIT_FAILURE);
}
dp = dps[id];
if (stList_length(dp->children) > 0) {
fprintf(stderr, "Data point cannot be assigned to non-leaf Dirichlet process.\n");
exit(EXIT_FAILURE);
}
}
}
void mark_observed_dps(HierarchicalDirichletProcess* hdp) {
// mark newly observed dps
int64_t length = hdp->data_length;
DirichletProcess** dps = hdp->dps;
int64_t* dp_ids = hdp->data_pt_dp_id;
int64_t grid_length = hdp->grid_length;
DirichletProcess* dp;
double* pdf;
int64_t id;
for (int64_t i = 0; i < length; i++) {
id = dp_ids[i];
dp = dps[id];
while (dp != NULL) {
if (dp->observed) {
break;
}
dp->observed = true;
pdf = (double*) malloc(sizeof(double) * grid_length);
dp->posterior_predictive = pdf;
for (int64_t j = 0; j < grid_length; j++) {
pdf[j] = 0.0;
}
dp = dp->parent;
}
}
}
void k_means(int64_t k, double* data, int64_t length, int64_t max_iters, int64_t num_restarts,
int64_t** assignments_out, double** centroids_out) {
if (k > length) {
fprintf(stderr, "Must have at least as many data points as clusters.\n");
exit(EXIT_FAILURE);
}
if (k <= 0) {
fprintf(stderr, "Must have at least one cluster.\n");
exit(EXIT_FAILURE);
}
int64_t* best_assignments = NULL;
double* best_centroids = NULL;
double best_sum_dist = DBL_MAX;
int64_t* centroid_counts = (int64_t*) malloc(sizeof(int64_t) * k);
for (int64_t restart = 0; restart < num_restarts; restart++) {
double* centroids = (double*) malloc(sizeof(double) * k);
int64_t* assignments = (int64_t*) malloc(sizeof(int64_t) * length);
for (int64_t i = 0; i < length; i++) {
assignments[i] = -1;
}
for (int64_t i = 0; i < k; i++) {
centroids[i] = data[rand() % length];
}
for (int64_t iter = 0; iter < max_iters; iter++) {
bool converged = true;
for (int64_t i = 0; i < length; i++) {
double data_pt = data[i];
double dist = fabs(data_pt - centroids[0]);
double closest_dist = dist;
int64_t closest_centroid = 0;
for (int64_t j = 1; j < k; j++) {
dist = fabs(data_pt - centroids[j]);
if (dist < closest_dist) {
closest_centroid = j;
closest_dist = dist;
}
}
if (assignments[i] != closest_centroid) {
converged = false;
}
assignments[i] = closest_centroid;
}
if (converged) {
break;
}
for (int64_t i = 0; i < k; i++) {
centroids[i] = 0.0;
centroid_counts[i] = 0;
}
int64_t assignment;
for (int64_t i = 0; i < length; i++) {
assignment = assignments[i];
centroids[assignment] += data[i];
centroid_counts[assignment]++;
}
for (int64_t i = 0; i < k; i++) {
if (centroid_counts[i] > 0) {
centroids[i] /= centroid_counts[i];
}
else {
centroids[i] = data[rand() % length];
}
}
}
double sum_dist = 0.0;
for (int64_t i = 0; i < length; i++) {
sum_dist += fabs(data[i] - centroids[assignments[i]]);
}
if (sum_dist < best_sum_dist) {
free(best_centroids);
free(best_assignments);
best_centroids = centroids;
best_assignments = assignments;
best_sum_dist = sum_dist;
}
else {
free(centroids);
free(assignments);
}
}
free(centroid_counts);
*centroids_out = best_centroids;
*assignments_out = best_assignments;
}
//void fill_k_means_factor_bank(Factor*** fctr_bank, int64_t* dp_depths, DirichletProcess* dp,
// int64_t depth, int64_t* expected_num_factors) {
// int64_t dp_id = dp->id;
// dp_depths[dp_id] = depth;
//
// int64_t expected_num = expected_num_factors[depth];
//
// Factor** dp_fctr_bank = (Factor**) malloc(sizeof(Factor*) * expected_num);
// fctr_bank[dp_id] = dp_fctr_bank;
// for (int64_t i = 0; i < expected_num; i++) {
// dp_fctr_bank[i] = NULL;
// }
//
//
//}
void get_dp_depths_internal(int64_t* dp_depths, DirichletProcess* dp, int64_t depth) {
dp_depths[dp->id] = depth;
stListIterator* dp_child_iter = stList_getIterator(dp->children);
DirichletProcess* dp_child = stList_getNext(dp_child_iter);
while (dp_child != NULL) {
get_dp_depths_internal(dp_depths, dp_child, depth + 1);
dp_child = stList_getNext(dp_child_iter);
}
stList_destructIterator(dp_child_iter);
}
int64_t* get_dp_depths(HierarchicalDirichletProcess* hdp) {
int64_t* dp_depths = (int64_t*) malloc(sizeof(int64_t) * hdp->num_dps);
get_dp_depths_internal(dp_depths, hdp->base_dp, 0);
return dp_depths;
}
void k_means_init_factors(HierarchicalDirichletProcess* hdp, int64_t max_iters, int64_t num_restarts) {
int64_t tree_depth = hdp->depth;
double* gamma_params = hdp->gamma;
int64_t num_data = hdp->data_length;
int64_t* data_pt_dp_id = hdp->data_pt_dp_id;
int64_t num_dps = hdp->num_dps;
int64_t* dp_depths = get_dp_depths(hdp);
int64_t* depth_dp_counts = (int64_t*) malloc(sizeof(int64_t) * tree_depth);
for (int64_t i = 0; i < tree_depth; i++) {
depth_dp_counts[i] = 0;
}
for (int64_t i = 0; i < num_dps; i++) {
depth_dp_counts[dp_depths[i]]++;
}
int64_t* expected_num_factors = (int64_t*) malloc(sizeof(int64_t) * tree_depth);
double stat_expect = gamma_params[0] * log(1.0 + num_data / gamma_params[0]);
expected_num_factors[0] = ((int64_t) stat_expect / depth_dp_counts[tree_depth - 1]) + 1;
for (int64_t i = 1; i < tree_depth; i++) {
int64_t num_lower_factors = expected_num_factors[i - 1];
stat_expect = gamma_params[i] * log(1.0 + num_lower_factors / gamma_params[i]);
expected_num_factors[i] = ((int64_t) stat_expect / depth_dp_counts[tree_depth - i - 1]) + 1;
if (expected_num_factors[i] > num_lower_factors) {
expected_num_factors[i] = num_lower_factors;
}
}
int64_t** cluster_assignments = (int64_t**) malloc(sizeof(int64_t*) * tree_depth);
double** factor_centers = (double**) malloc(sizeof(double*) * tree_depth);
k_means(expected_num_factors[0], hdp->data, num_data, max_iters, num_restarts,
&cluster_assignments[0], &factor_centers[0]);
for (int64_t i = 1; i < tree_depth; i++) {
k_means(expected_num_factors[i], factor_centers[i - 1], expected_num_factors[i - 1], max_iters, num_restarts,
&cluster_assignments[i], &factor_centers[i]);
}
Factor*** fctr_bank = (Factor***) malloc(sizeof(Factor**) * num_dps);
int64_t num_potential_factors;
int64_t depth;
Factor** dp_fctr_bank;
for (int64_t i = 0; i < num_dps; i++) {
depth = dp_depths[i];
num_potential_factors = expected_num_factors[tree_depth - depth - 1];
dp_fctr_bank = (Factor**) malloc(sizeof(Factor*) * num_potential_factors);
for (int64_t j = 0; j < num_potential_factors; j++) {
dp_fctr_bank[j] = NULL;
}
fctr_bank[i] = dp_fctr_bank;
}
DirichletProcess** dps = hdp->dps;
DirichletProcess* dp;
int64_t dp_id;
Factor* data_pt_fctr;
Factor* parent_fctr;
int64_t parent_fctr_num;
for (int64_t i = 0; i < num_data; i++) {
data_pt_fctr = new_data_pt_factor(hdp, i);
dp_id = data_pt_dp_id[i];
dp = dps[dp_id];
parent_fctr_num = cluster_assignments[0][i];
parent_fctr = fctr_bank[dp_id][parent_fctr_num];
if (parent_fctr == NULL) {
parent_fctr = new_middle_factor(dps[dp_id]);
fctr_bank[dp_id][parent_fctr_num] = parent_fctr;
}
data_pt_fctr->parent = parent_fctr;
stSet_insert(parent_fctr->children, (void*) data_pt_fctr);
(dp->num_factor_children)++;
}
DirichletProcess* parent_dp;
Factor** parent_dp_fctr_bank;
int64_t* assignments;
int64_t expected_num;
Factor* fctr;
// could make this faster with recursion instead of multiple passes
for (int64_t depth = tree_depth - 1; depth > 0; depth--) {
assignments = cluster_assignments[tree_depth - depth];
expected_num = expected_num_factors[tree_depth - depth - 1];
for (int64_t i = 0; i < num_dps; i++) {
if (dp_depths[i] != depth) {
continue;
}
dp = dps[i];
parent_dp = dp->parent;
dp_fctr_bank = fctr_bank[i];
parent_dp_fctr_bank = fctr_bank[parent_dp->id];
for (int64_t j = 0; j < expected_num; j++) {
fctr = dp_fctr_bank[j];
if (fctr == NULL) {
continue;
}
parent_fctr_num = assignments[j];
parent_fctr = parent_dp_fctr_bank[parent_fctr_num];
if (parent_fctr == NULL) {
if (depth > 1) {
parent_fctr = new_middle_factor(parent_dp);
}
else {
parent_fctr = new_base_factor(hdp);
}
parent_dp_fctr_bank[parent_fctr_num] = parent_fctr;
}
fctr->parent = parent_fctr;
stSet_insert(parent_fctr->children, (void*) fctr);
(parent_dp->num_factor_children)++;
}
}
}
double mean, sum_sq_devs;
int64_t num_fctr_data;
stSetIterator* base_fctr_iter = stSet_getIterator(hdp->base_dp->factors);
Factor* base_fctr = stSet_getNext(base_fctr_iter);
while (base_fctr != NULL) {
get_factor_stats(base_fctr, &mean, &sum_sq_devs, &num_fctr_data);
add_update_base_factor_params(base_fctr, mean, sum_sq_devs, (double) num_fctr_data);
base_fctr = stSet_getNext(base_fctr_iter);
}
stSet_destructIterator(base_fctr_iter);
for (int64_t i = 0; i < num_dps; i++) {
free(fctr_bank[i]);
}
for (int64_t i = 0; i < tree_depth; i++) {
free(cluster_assignments[i]);
free(factor_centers[i]);
}
free(cluster_assignments);
free(factor_centers);
free(expected_num_factors);
free(fctr_bank);
free(dp_depths);
free(depth_dp_counts);
}
void init_factors_internal(DirichletProcess* dp, Factor* parent_fctr, stList** data_pt_fctr_lists) {
if (!dp->observed) {
return;
}
Factor* fctr = new_middle_factor(dp);
fctr->parent = parent_fctr;
stSet_insert(parent_fctr->children, (void*) fctr);
if (stList_length(dp->children) == 0) {
stSet* children = fctr->children;
stListIterator* data_pt_fctr_iter = stList_getIterator(data_pt_fctr_lists[dp->id]);
Factor* data_pt_fctr = (Factor*) stList_getNext(data_pt_fctr_iter);
while (data_pt_fctr != NULL) {
data_pt_fctr->parent = fctr;
stSet_insert(children, (void*) data_pt_fctr);
data_pt_fctr = (Factor*) stList_getNext(data_pt_fctr_iter);
}
stList_destructIterator(data_pt_fctr_iter);
}
else {
stListIterator* child_dp_iter = stList_getIterator(dp->children);
DirichletProcess* child_dp = (DirichletProcess*) stList_getNext(child_dp_iter);
while (child_dp != NULL) {
init_factors_internal(child_dp, fctr, data_pt_fctr_lists);
child_dp = (DirichletProcess*) stList_getNext(child_dp_iter);
}
stList_destructIterator(child_dp_iter);
}
}
void init_factors(HierarchicalDirichletProcess* hdp) {
int64_t data_length = hdp->data_length;
int64_t* data_pt_dp_id = hdp->data_pt_dp_id;
DirichletProcess** dps = hdp->dps;
int64_t num_dps = hdp->num_dps;
stList** data_pt_fctr_lists = (stList**) malloc(sizeof(stList*) * num_dps);
for (int64_t i = 0; i < num_dps; i++) {
data_pt_fctr_lists[i] = NULL;
}
Factor* data_pt_fctr;
int64_t dp_id;
stList* fctr_list;
for (int64_t data_pt_idx = 0; data_pt_idx < data_length; data_pt_idx++) {
dp_id = data_pt_dp_id[data_pt_idx];
fctr_list = data_pt_fctr_lists[dp_id];
if (fctr_list == NULL) {
fctr_list = stList_construct();
data_pt_fctr_lists[dp_id] = fctr_list;
}
data_pt_fctr = new_data_pt_factor(hdp, data_pt_idx);
stList_append(fctr_list, (void*) data_pt_fctr);
}
DirichletProcess* base_dp = hdp->base_dp;
Factor* root_factor = new_base_factor(hdp);
stListIterator* child_dp_iter = stList_getIterator(base_dp->children);
DirichletProcess* child_dp = (DirichletProcess*) stList_getNext(child_dp_iter);
while (child_dp != NULL) {
init_factors_internal(child_dp, root_factor, data_pt_fctr_lists);
child_dp = (DirichletProcess*) stList_getNext(child_dp_iter);
}
stList_destructIterator(child_dp_iter);
for (int64_t i = 0; i < num_dps; i++) {
if (data_pt_fctr_lists[i] != NULL) {
stList_destruct(data_pt_fctr_lists[i]);
}
}
free(data_pt_fctr_lists);
double mean, sum_sq_devs;
int64_t num_data;
get_factor_stats(root_factor, &mean, &sum_sq_devs, &num_data);
add_update_base_factor_params(root_factor, mean, sum_sq_devs, (double) num_data);
int64_t fctr_child_count;
DirichletProcess* dp;
Factor* fctr;
stSetIterator* fctr_iter;
for (int64_t i = 0; i < num_dps; i++) {
dp = dps[i];
fctr_child_count = 0;
fctr_iter = stSet_getIterator(dp->factors);
fctr = (Factor*) stSet_getNext(fctr_iter);
while (fctr != NULL) {
fctr_child_count += stSet_size(fctr->children);
fctr = (Factor*) stSet_getNext(fctr_iter);
}
stSet_destructIterator(fctr_iter);
dp->num_factor_children = fctr_child_count;
}
}
void finalize_data(HierarchicalDirichletProcess* hdp) {
verify_valid_dp_assignments(hdp);
mark_observed_dps(hdp);
init_factors(hdp);
//k_means_init_factors(hdp, 500, 5);
}
void set_dir_proc_parent(HierarchicalDirichletProcess* hdp, int64_t child_id, int64_t parent_id) {
if (hdp->finalized) {
fprintf(stderr, "Hierarchical Dirichlet process structure has been finalized. Cannot set new parent.\n");
exit(EXIT_FAILURE);
}
if (child_id >= hdp->num_dps || parent_id >= hdp->num_dps || child_id < 0 || parent_id < 0) {
fprintf(stderr, "Dirichlet process ID does not exist.\n");
exit(EXIT_FAILURE);
}
DirichletProcess* child_dp = hdp->dps[child_id];
DirichletProcess* parent_dp = hdp->dps[parent_id];
if (child_dp->parent != NULL) {
fprintf(stderr, "Dirichlet process already has parent.\n");
exit(EXIT_FAILURE);
}
child_dp->parent = parent_dp;
stList_append(parent_dp->children, (void*) child_dp);
}
void pass_data_to_hdp(HierarchicalDirichletProcess* hdp, double* data, int64_t* dp_ids, int64_t length) {
if (hdp->data != NULL) {
fprintf(stderr, "Hierarchical Dirichlet process must be reset before passing new data.\n");
exit(EXIT_FAILURE);
}
hdp->data = data;
hdp->data_pt_dp_id = dp_ids;
hdp->data_length = length;
if (hdp->finalized) {
finalize_data(hdp);
}
}
void finalize_hdp_structure(HierarchicalDirichletProcess* hdp) {
establish_base_dp(hdp);
verify_dp_tree(hdp);
verify_tree_depth(hdp, hdp->base_dp, 0, hdp->depth - 1);
if (hdp->data != NULL) {
finalize_data(hdp);
}
hdp->finalized = true;
}
void reset_distr_metric_memo(DistributionMetricMemo* memo) {
int64_t num_distrs = memo->num_distrs;
int64_t num_entries = ((num_distrs - 1) * num_distrs) / 2;
double* memo_entries = memo->memo_matrix;
for (int64_t i = 0; i < num_entries; i++) {
memo_entries[i] = -1.0;
}
}
void reset_hdp_data(HierarchicalDirichletProcess* hdp) {
if (hdp->data == NULL && hdp->data_pt_dp_id == NULL) {
return;
}
free(hdp->data);
hdp->data = NULL;
free(hdp->data_pt_dp_id);
hdp->data_pt_dp_id = NULL;
DirichletProcess** dps = hdp->dps;
int64_t num_dps = hdp->num_dps;
destroy_dir_proc_factor_tree(hdp->base_dp);
DirichletProcess* dp;
for (int64_t i = 0; i < num_dps; i++) {
dp = dps[i];
free(dp->posterior_predictive);
dp->posterior_predictive = NULL;
free(dp->spline_slopes);
dp->spline_slopes = NULL;
dp->observed = false;
}
stSetIterator* memo_iter = stSet_getIterator(hdp->distr_metric_memos);
DistributionMetricMemo* memo = stSet_getNext(memo_iter);
while (memo != NULL) {
reset_distr_metric_memo(memo);
memo = stSet_getNext(memo_iter);
}
stSet_destructIterator(memo_iter);
hdp->splines_finalized = false;
hdp->samples_taken = 0;
if (hdp->sample_gamma) {
double* gamma = hdp->gamma;
double* gamma_alpha = hdp->gamma_alpha;
double* gamma_beta = hdp->gamma_beta;
for (int64_t depth = 0; depth < hdp->depth; depth++) {
gamma[depth] = gamma_alpha[depth] / gamma_beta[depth];
}
double* w = hdp->w_aux_vector;
bool* s = hdp->s_aux_vector;
for (int64_t i = 0; i < num_dps; i++) {
w[i] = 1.0;
s[i] = false;
}
}
}
void unassign_from_parent(Factor* fctr) {
if (fctr->factor_type == BASE) {
fprintf(stderr, "Cannot unassign base factor's parent.\n");
exit(EXIT_FAILURE);
}
Factor* parent = fctr->parent;
Factor* base_fctr = get_base_factor(parent);
DirichletProcess* base_dp = base_fctr->dp;
stSet_remove(parent->children, (void*) fctr);
fctr->parent = NULL;
(parent->dp->num_factor_children)--;
if (stSet_size(parent->children) == 0) {
destroy_factor(parent);
}
int64_t num_reassign;
double mean_reassign;
double sum_sq_devs;
get_factor_stats(fctr, &mean_reassign, &sum_sq_devs, &num_reassign);
// check to see if base factor has been destroyed
if (stSet_search(base_dp->factors, (void*) base_fctr) != NULL) {
remove_update_base_factor_params(base_fctr, mean_reassign, sum_sq_devs, (double) num_reassign);
}
DirichletProcess* dp = fctr->dp;
if (dp != NULL) {
dp->cached_factor_mean = mean_reassign;
dp->cached_factor_size = num_reassign;
dp->cached_factor_sum_sq_dev = sum_sq_devs;
}
}
void assign_to_parent(Factor* fctr, Factor* parent, bool update_params) {
if (fctr->factor_type == BASE) {
fprintf(stderr, "Cannot assign base factor to a parent.\n");
exit(EXIT_FAILURE);
}
if (parent->factor_type == DATA_PT) {
fprintf(stderr, "Cannot assign data point factor to be parent.\n");
exit(EXIT_FAILURE);
}
fctr->parent = parent;
stSet_insert(parent->children, (void*) fctr);
(parent->dp->num_factor_children)++;
Factor* base_fctr = get_base_factor(parent);
if (!update_params) {
return;
}
if (fctr->factor_type == DATA_PT) {
double data_pt = get_factor_data_pt(fctr);
add_update_base_factor_params(base_fctr, data_pt, 0.0, 1.0);
}
else {
DirichletProcess* dp = fctr->dp;
add_update_base_factor_params(base_fctr, dp->cached_factor_mean, dp->cached_factor_sum_sq_dev,
(double) dp->cached_factor_size);
}
}
//Factor* sample_from_data_pt_factor(Factor* fctr, DirichletProcess* dp) {
// if (fctr->factor_type != DATA_PT) {
// fprintf(stderr, "Attempted a data point factor sample from non-data point factor.\n");
// exit(EXIT_FAILURE);
// }
//
// stSet* pool = dp->factors;
// int64_t num_fctrs = stSet_size(pool);
//
// Factor** fctr_order = (Factor**) malloc(sizeof(Factor*) * num_fctrs);
//
// double* cdf = (double*) malloc(sizeof(double) * (num_fctrs + 1));
// double cumul = 0.0;
//
// stSetIterator* pool_iter = stSet_getIterator(pool);
// Factor* fctr_option;
// double fctr_size;
// for (int64_t i = 0; i < num_fctrs; i++) {
// fctr_option = (Factor*) stSet_getNext(pool_iter);
// fctr_order[i] = fctr_option;
//
// fctr_size = (double) stSet_size(fctr_option->children);
// cumul += fctr_size * data_pt_factor_parent_likelihood(fctr, fctr_option);
// cdf[i] = cumul;
// }
// stSet_destructIterator(pool_iter);
//
// double gamma_param = *(dp->gamma);
// cumul += gamma_param * unobserved_factor_likelihood(fctr, dp);
// cdf[num_fctrs] = cumul;
//
// int64_t choice_idx = bisect_left(rand_uniform(cumul), cdf, num_fctrs + 1);
//
// Factor* fctr_choice;
// if (choice_idx == num_fctrs) {
// free(fctr_order);
// DirichletProcess* parent_dp = dp->parent;
// if (parent_dp == NULL) {
// fctr_choice = new_base_factor(dp->hdp);
// }
// else {
// fctr_choice = new_middle_factor(dp);
// Factor* new_fctr_parent = sample_from_data_pt_factor(fctr, parent_dp);
// assign_to_parent(fctr_choice, new_fctr_parent, false);
// }
// }
// else {
// fctr_choice = fctr_order[choice_idx];
// free(fctr_order);
// }
//
// return fctr_choice;
//}
Factor* sample_from_data_pt_factor(Factor* fctr, DirichletProcess* dp) {
if (fctr->factor_type != DATA_PT) {
fprintf(stderr, "Attempted a data point factor sample from non-data point factor.\n");
exit(EXIT_FAILURE);
}
stSet* pool = dp->factors;
int64_t num_fctrs = stSet_size(pool);
Factor** fctr_order = (Factor**) malloc(sizeof(Factor*) * num_fctrs);
stSetIterator* pool_iter = stSet_getIterator(pool);
for (int64_t i = 0; i < num_fctrs; i++) {
Factor* fctr_option = (Factor*) stSet_getNext(pool_iter);
fctr_order[i] = fctr_option;
}
stSet_destructIterator(pool_iter);
double* probs = (double*) malloc(sizeof(double) * num_fctrs);
double new_fctr_prob;
#pragma omp parallel shared(new_fctr_prob,probs)
{
#pragma omp single nowait
new_fctr_prob = (*(dp->gamma)) * unobserved_factor_likelihood(fctr, dp);
Factor* fctr_option;
#pragma omp for
for (int64_t i = 0; i < num_fctrs; i++) {
fctr_option = fctr_order[i];
probs[i] = stSet_size(fctr_option->children) * data_pt_factor_parent_likelihood(fctr, fctr_option);
}
}
double* cdf = (double*) malloc(sizeof(double) * (num_fctrs + 1));
parallel_cdf(cdf, probs, num_fctrs, 10);
cdf[num_fctrs] = cdf[num_fctrs - 1] + new_fctr_prob;
int64_t choice_idx = bisect_left(rand_uniform(cdf[num_fctrs]), cdf, num_fctrs + 1);
Factor* fctr_choice;
if (choice_idx == num_fctrs) {
free(fctr_order);
DirichletProcess* parent_dp = dp->parent;
if (parent_dp == NULL) {
fctr_choice = new_base_factor(dp->hdp);
}
else {
fctr_choice = new_middle_factor(dp);
Factor* new_fctr_parent = sample_from_data_pt_factor(fctr, parent_dp);
assign_to_parent(fctr_choice, new_fctr_parent, false);
}
}
else {
fctr_choice = fctr_order[choice_idx];
free(fctr_order);
}
return fctr_choice;
}
//Factor* sample_from_middle_factor(Factor* fctr, DirichletProcess* dp) {
// if (fctr->factor_type != MIDDLE) {
// fprintf(stderr, "Attempted a middle factor sample from non-middle factor.\n");
// exit(EXIT_FAILURE);
// }
//
// stSet* pool = dp->factors;
// int64_t num_fctrs = stSet_size(pool);
// int64_t num_choices = num_fctrs + 1;
//
// Factor** fctr_order = (Factor**) malloc(sizeof(Factor*) * num_fctrs);
// double* log_probs = (double*) malloc(sizeof(double) * num_choices);
//
// stSetIterator* pool_iter = stSet_getIterator(pool);
// for (int64_t i = 0; i < num_fctrs; i++) {
// Factor* fctr_option = (Factor*) stSet_getNext(pool_iter);
// fctr_order[i] = fctr_option;
// log_probs[i] = log((double) stSet_size(fctr_option->children))
// + factor_parent_joint_log_likelihood(fctr, fctr_option);
// }
// stSet_destructIterator(pool_iter);
//
// log_probs[num_fctrs] = log(*(dp->gamma)) + unobserved_factor_joint_log_likelihood(fctr, dp);
//
// double* cdf = (double*) malloc(sizeof(double) * num_choices);
// double cumul = 0.0;
// double normalizing_const = max(log_probs, num_choices);
//
// for (int64_t i = 0; i < num_choices; i++) {
// cumul += exp(log_probs[i] - normalizing_const);
// cdf[i] = cumul;
// }
//
// free(log_probs);
//
// int64_t choice_idx = bisect_left(rand_uniform(cumul), cdf, num_choices);
// free(cdf);
//
// Factor* fctr_choice;
// if (choice_idx == num_fctrs) {
// free(fctr_order);
// DirichletProcess* parent_dp = dp->parent;
// if (parent_dp == NULL) {
// fctr_choice = new_base_factor(dp->hdp);
// }
// else {
// fctr_choice = new_middle_factor(dp);
// Factor* new_fctr_parent = sample_from_middle_factor(fctr, parent_dp);
// assign_to_parent(fctr_choice, new_fctr_parent, false);
// }
// }
// else {
// fctr_choice = fctr_order[choice_idx];
// free(fctr_order);
// }
//
// return fctr_choice;
//}
Factor* sample_from_middle_factor(Factor* fctr, DirichletProcess* dp) {
if (fctr->factor_type != MIDDLE) {
fprintf(stderr, "Attempted a middle factor sample from non-middle factor.\n");
exit(EXIT_FAILURE);
}
stSet* pool = dp->factors;
int64_t num_fctrs = stSet_size(pool);
int64_t num_choices = num_fctrs + 1;
Factor** fctr_order = (Factor**) malloc(sizeof(Factor*) * num_fctrs);
double* log_probs = (double*) malloc(sizeof(double) * num_choices);
stSetIterator* pool_iter = stSet_getIterator(pool);
for (int64_t i = 0; i < num_fctrs; i++) {
fctr_order[i] = (Factor*) stSet_getNext(pool_iter);
}
stSet_destructIterator(pool_iter);
double new_fctr_log_prob;
#pragma omp parallel shared(new_fctr_log_prob,log_probs)
{
#pragma omp single nowait
new_fctr_log_prob = log(*(dp->gamma)) + unobserved_factor_joint_log_likelihood(fctr, dp);
#pragma omp for
for (int64_t i = 0; i < num_fctrs; i++) {
Factor* fctr_option = fctr_order[i];
log_probs[i] = log((double) stSet_size(fctr_option->children))
+ factor_parent_joint_log_likelihood(fctr, fctr_option);
}
}
log_probs[num_fctrs] = new_fctr_log_prob;
double normalizing_const = parallel_max(log_probs, num_choices);
parallel_add(-normalizing_const, log_probs, num_choices);
parallel_exp(log_probs, num_choices);
double* cdf = (double*) malloc(sizeof(double) * num_choices);
parallel_cdf(cdf, log_probs, num_choices, 10);
free(log_probs);
int64_t choice_idx = bisect_left(rand_uniform(cdf[num_fctrs]), cdf, num_choices);
free(cdf);
Factor* fctr_choice;
if (choice_idx == num_fctrs) {
free(fctr_order);
DirichletProcess* parent_dp = dp->parent;
if (parent_dp == NULL) {
fctr_choice = new_base_factor(dp->hdp);
}
else {
fctr_choice = new_middle_factor(dp);
Factor* new_fctr_parent = sample_from_middle_factor(fctr, parent_dp);
assign_to_parent(fctr_choice, new_fctr_parent, false);
}
}
else {
fctr_choice = fctr_order[choice_idx];
free(fctr_order);
}
return fctr_choice;
}
Factor* sample_factor(Factor* fctr, DirichletProcess* dp) {
if (fctr->factor_type == DATA_PT) {
return sample_from_data_pt_factor(fctr, dp);
}
else if (fctr->factor_type == MIDDLE) {
return sample_from_middle_factor(fctr, dp);
}
else {
fprintf(stderr, "Cannot sample base factor parent assignments.\n");
exit(EXIT_FAILURE);
}
}
void gibbs_factor_iteration(Factor* fctr) {
DirichletProcess* parent_dp = fctr->parent->dp;
unassign_from_parent(fctr);
Factor* new_parent = sample_factor(fctr, parent_dp);
assign_to_parent(fctr, new_parent, true);
}
void cache_prior_contribution(DirichletProcess* dp, double parent_prior_prod) {
if (!(dp->observed)) {
return;
}
double gamma_param = *(dp->gamma);
double total_children = (double) dp->num_factor_children;
double prior_prod = (gamma_param / (gamma_param + total_children)) * parent_prior_prod;
dp->base_factor_wt += prior_prod;
stListIterator* child_iter = stList_getIterator(dp->children);
DirichletProcess* child = (DirichletProcess*) stList_getNext(child_iter);
while (child != NULL) {
cache_prior_contribution(child, prior_prod);
child = (DirichletProcess*) stList_getNext(child_iter);
}
stList_destructIterator(child_iter);
}
void cache_base_factor_weight(Factor* fctr) {
DirichletProcess* dp = fctr->dp;
double gamma_param = *(dp->gamma);
double total_children = (double) dp->num_factor_children;
double wt = ((double) stSet_size(fctr->children)) / (gamma_param + total_children);
dp->base_factor_wt += wt;
if (stList_length(dp->children) > 0) {
stSetIterator* child_fctr_iter = stSet_getIterator(fctr->children);
Factor* child_fctr = (Factor*) stSet_getNext(child_fctr_iter);
while (child_fctr != NULL) {
cache_base_factor_weight(child_fctr);
child_fctr = (Factor*) stSet_getNext(child_fctr_iter);
}
stSet_destructIterator(child_fctr_iter);
stListIterator* child_dp_iter = stList_getIterator(dp->children);
DirichletProcess* child_dp = (DirichletProcess*) stList_getNext(child_dp_iter);
while (child_dp != NULL) {
cache_prior_contribution(child_dp, wt);
child_dp = (DirichletProcess*) stList_getNext(child_dp_iter);
}
stList_destructIterator(child_dp_iter);
}
}
void push_factor_distr(DirichletProcess* dp, double* distr, int64_t length) {
double* sample_collector = dp->posterior_predictive;
double wt = dp->base_factor_wt;
for (int64_t i = 0; i < length; i++) {
sample_collector[i] += wt * distr[i];
}
dp->base_factor_wt = 0.0;
stListIterator* child_iter = stList_getIterator(dp->children);
DirichletProcess* child = (DirichletProcess*) stList_getNext(child_iter);
while (child != NULL) {
if (child->observed) {
push_factor_distr(child, distr, length);
}
child = (DirichletProcess*) stList_getNext(child_iter);
}
stList_destructIterator(child_iter);
}
void take_distr_sample(HierarchicalDirichletProcess* hdp) {
DirichletProcess* base_dp = hdp->base_dp;
double* grid = hdp->sampling_grid;
int64_t length = hdp->grid_length;
double* pdf = (double*) malloc(sizeof(double) * length);
//SumOfLogsMemo* log_sum_memo = hdp->log_sum_memo;
stSetIterator* base_fctr_iter = stSet_getIterator(base_dp->factors);
Factor* base_fctr = (Factor*) stSet_getNext(base_fctr_iter);
while (base_fctr != NULL) {
cache_base_factor_weight(base_fctr);
evaluate_posterior_predictive(base_fctr, grid, pdf, length);//, log_sum_memo);
push_factor_distr(base_dp, pdf, length);
base_fctr = (Factor*) stSet_getNext(base_fctr_iter);
}
stSet_destructIterator(base_fctr_iter);
cache_prior_contribution(base_dp, 1.0);
evaluate_prior_predictive(hdp, grid, pdf, length);
push_factor_distr(base_dp, pdf, length);
(hdp->samples_taken)++;
free(pdf);
}
// Knuth shuffle algorithm
DirichletProcess** get_shuffled_dps(HierarchicalDirichletProcess* hdp) {
int64_t num_dps = hdp->num_dps;
DirichletProcess** dps = hdp->dps;
DirichletProcess** shuffled_dps = (DirichletProcess**) malloc(sizeof(DirichletProcess*) * num_dps);
int64_t pos;
for (int64_t i = 0; i < num_dps; i++) {
pos = rand() % (i + 1);
shuffled_dps[i] = shuffled_dps[pos];
shuffled_dps[pos] = dps[i];
}
return shuffled_dps;
}
void sample_dp_factors(DirichletProcess* dp, int64_t* iter_counter, int64_t burn_in, int64_t thinning,
int64_t* sample_counter, int64_t num_samples) {
if (!dp->observed) {
return;
}
int64_t iter = *iter_counter;
int64_t samples_taken = *sample_counter;
// have to pre-allocate the array of sampling factors in case reassignment triggers
// destruction of the set the iterator is iterating through
int64_t num_factor_children = dp->num_factor_children;
Factor** sampling_fctrs = (Factor**) malloc(sizeof(Factor*) * num_factor_children);
int64_t i = 0;
stSetIterator* fctr_iter = stSet_getIterator(dp->factors);
Factor* fctr = (Factor*) stSet_getNext(fctr_iter);
stSetIterator* child_fctr_iter;
Factor* child_fctr;
while (fctr != NULL) {
child_fctr_iter = stSet_getIterator(fctr->children);
child_fctr = (Factor*) stSet_getNext(child_fctr_iter);
while (child_fctr != NULL) {
sampling_fctrs[i] = child_fctr;
i++;
child_fctr = (Factor*) stSet_getNext(child_fctr_iter);
}
stSet_destructIterator(child_fctr_iter);
fctr = (Factor*) stSet_getNext(fctr_iter);
}
stSet_destructIterator(fctr_iter);
for (int64_t j = 0; j < num_factor_children; j++) {
gibbs_factor_iteration(sampling_fctrs[j]);
iter++;
if (iter % thinning == 0) {
if (iter > burn_in) {
take_distr_sample(dp->hdp);
samples_taken++;
if (samples_taken >= num_samples) {
break;
}
}
}
}
free(sampling_fctrs);
*sample_counter = samples_taken;
*iter_counter = iter;
}
double sample_auxilliary_w(DirichletProcess* dp) {
return (double) genbet((float) *(dp->gamma) + 1.0, (float) dp->num_factor_children);
}
bool sample_auxilliary_s(DirichletProcess* dp) {
double num_children = (double) dp->num_factor_children;
return rand_bernoulli(num_children / (num_children + *(dp->gamma)));
}
void sample_gamma_aux_vars(HierarchicalDirichletProcess* hdp) {
double* w = hdp->w_aux_vector;
bool* s = hdp->s_aux_vector;
DirichletProcess** dps = hdp->dps;
int64_t num_dps = hdp->num_dps;
DirichletProcess* dp;
for (int64_t id = 0; id < num_dps; id++) {
dp = dps[id];
if (!dp->observed) {
continue;
}
w[id] = sample_auxilliary_w(dp);
s[id] = sample_auxilliary_s(dp);
}
}
void sample_base_gamma_internal(HierarchicalDirichletProcess* hdp, double log_w, int64_t num_factors) {
// Escobar and West's (1995) algorithm
DirichletProcess* base_dp = hdp->base_dp;
double gamma_alpha = hdp->gamma_alpha[0];
double gamma_beta = hdp->gamma_beta[0];
double num_children = (double) base_dp->num_factor_children;
double gamma_beta_post = gamma_beta - log_w;
double gamma_alpha_post = gamma_alpha + (double) num_factors;
double frac = (gamma_alpha_post - 1.0)
/ (num_children * gamma_beta_post);
double wt = frac / (1.0 + frac);
// note: different parameterization switches alpha and beta
float sample_gamma = wt * gengam(gamma_beta_post, gamma_alpha_post)
+ (1 - wt) * gengam(gamma_beta_post, gamma_alpha_post - 1.0);
hdp->gamma[0] = (double) sample_gamma;
}
void sample_middle_gammas_internal(HierarchicalDirichletProcess* hdp, int64_t depth,
double sum_log_w, int64_t sum_s, int64_t num_depth_fctrs) {
double gamma_alpha = hdp->gamma_alpha[depth];
double gamma_beta = hdp->gamma_beta[depth];
float gamma_alpha_post = (float) (gamma_alpha + (double) (num_depth_fctrs - sum_s));
float gamma_beta_post = (float) (gamma_beta - sum_log_w);
// note: different parameterization switches alpha and beta
hdp->gamma[depth] = (double) gengam(gamma_beta_post, gamma_alpha_post);
}
void sample_gammas(HierarchicalDirichletProcess* hdp, int64_t* iter_counter, int64_t burn_in,
int64_t thinning, int64_t* sample_counter, int64_t num_samples) {
int64_t iter = *iter_counter;
int64_t samples_taken = *sample_counter;
int64_t tree_depth = hdp->depth;
double* w = hdp->w_aux_vector;
bool* s = hdp->s_aux_vector;
int64_t* num_depth_fctrs = (int64_t*) malloc(sizeof(int64_t) * tree_depth);
double* sum_log_w = (double*) malloc(sizeof(double) * tree_depth);
int64_t* sum_s = (int64_t*) malloc(sizeof(int64_t) * tree_depth);
for (int64_t depth = 0; depth < tree_depth; depth++) {
num_depth_fctrs[depth] = 0;
sum_log_w[depth] = 0.0;
sum_s[depth] = 0;
}
int64_t num_dps = hdp->num_dps;
DirichletProcess** dps = hdp->dps;
//DirichletProcess* dp;
DirichletProcess* dp = NULL;
int64_t dp_depth;
for (int64_t id = 0; id < num_dps; id++) {
dp = dps[id];
if (!dp->observed) {
continue;
}
dp_depth = dp->depth;
num_depth_fctrs[dp_depth] += stSet_size(dp->factors);
sum_log_w[dp_depth] += log(w[id]);
if (s[id]) sum_s[dp_depth]++;
}
for (int64_t depth = 0; depth < tree_depth; depth++) {
if (depth == 0) {
sample_base_gamma_internal(hdp, sum_log_w[depth], num_depth_fctrs[depth]);
}
else {
sample_middle_gammas_internal(hdp, depth, sum_log_w[depth],
sum_s[depth], num_depth_fctrs[depth]);
}
iter++;
if (iter % thinning == 0) {
if (iter > burn_in) {
take_distr_sample(dp->hdp);
samples_taken++;
if (samples_taken >= num_samples) {
break;
}
}
}
}
free(sum_log_w);
free(sum_s);
free(num_depth_fctrs);
*iter_counter = iter;
*sample_counter = samples_taken;
}
void sample_gamma_params(HierarchicalDirichletProcess* hdp, int64_t* iter_counter, int64_t burn_in,
int64_t thinning, int64_t* sample_counter, int64_t num_samples) {
sample_gamma_aux_vars(hdp);
sample_gammas(hdp, iter_counter, burn_in, thinning, sample_counter, num_samples);
}
double snapshot_joint_log_density_internal(Factor* fctr) {
if (fctr->factor_type == DATA_PT) {
return log(data_pt_factor_parent_likelihood(fctr, fctr->parent));
}
else {
double log_density = 0.0;
stSetIterator* child_fctr_iter = stSet_getIterator(fctr->children);
Factor* child_fctr = stSet_getNext(child_fctr_iter);
while (child_fctr != NULL) {
log_density += snapshot_joint_log_density_internal(child_fctr);
child_fctr = stSet_getNext(child_fctr_iter);
}
stSet_destructIterator(child_fctr_iter);
return log_density;
}
}
double snapshot_joint_log_density(HierarchicalDirichletProcess* hdp) {
double log_density = 0.0;
stSetIterator* base_fctr_iter = stSet_getIterator(hdp->base_dp->factors);
Factor* base_fctr = stSet_getNext(base_fctr_iter);
while (base_fctr != NULL) {
log_density += snapshot_joint_log_density_internal(base_fctr);
base_fctr = stSet_getNext(base_fctr_iter);
}
stSet_destructIterator(base_fctr_iter);
return log_density;
}
int64_t* snapshot_num_factors(HierarchicalDirichletProcess* hdp, int64_t* length_out) {
int64_t length = hdp->num_dps;
*length_out = length;
int64_t* snapshot = (int64_t*) malloc(sizeof(int64_t) * length);
DirichletProcess** dps = hdp->dps;
for (int64_t i = 0; i < length; i++) {
snapshot[i] = (int64_t) stSet_size((dps[i])->factors);
}
return snapshot;
}
double* snapshot_gamma_params(HierarchicalDirichletProcess* hdp, int64_t* length_out) {
int64_t length = hdp->depth;
*length_out = length;
double* snapshot = (double*) malloc(sizeof(double) * length);
double* gammas = hdp->gamma;
for (int64_t i = 0; i < length; i++) {
snapshot[i] = gammas[i];
}
return snapshot;
}
double snapshot_factor_log_likelihood(Factor* fctr) {
//double parent_prob;
double parent_prob = 0.0;
double cumul = 0.0;
if (fctr->factor_type == BASE) {
fprintf(stderr, "Cannot snapshot base factor log likelihood.\n");
exit(EXIT_FAILURE);
}
else if (fctr->factor_type == DATA_PT) {
Factor* parent_fctr = fctr->parent;
DirichletProcess* parent_dp = parent_fctr->dp;
stSet* pool = parent_dp->factors;
int64_t num_fctrs = stSet_size(pool);
stSetIterator* pool_iter = stSet_getIterator(pool);
Factor* fctr_option;
double fctr_size;
double prob;
for (int64_t i = 0; i < num_fctrs; i++) {
fctr_option = (Factor*) stSet_getNext(pool_iter);
fctr_size = (double) stSet_size(fctr_option->children);
prob = fctr_size * data_pt_factor_parent_likelihood(fctr, fctr_option);
cumul += prob;
if (fctr_option == parent_fctr) {
parent_prob = prob;
}
}
stSet_destructIterator(pool_iter);
double gamma_param = *(parent_dp->gamma);
cumul += gamma_param * unobserved_factor_likelihood(fctr, parent_dp);
}
else {
DirichletProcess* dp = fctr->dp;
DirichletProcess* parent_dp = dp->parent;
Factor* parent_fctr = fctr->parent;
double mean, sum_sq_devs;
int64_t num_data;
get_factor_stats(fctr, &mean, &sum_sq_devs, &num_data);
dp->cached_factor_mean = mean;
dp->cached_factor_size = num_data;
dp->cached_factor_sum_sq_dev = sum_sq_devs;
stSet* pool = parent_dp->factors;
int64_t num_fctrs = stSet_size(pool);
int64_t num_choices = num_fctrs + 1;
double* log_probs = (double*) malloc(sizeof(double) * num_choices);
stSetIterator* pool_iter = stSet_getIterator(pool);
Factor* fctr_option;
double log_prob;
//double parent_log_prob;
double parent_log_prob = 0.0;
double fctr_size;
for (int64_t i = 0; i < num_fctrs; i++) {
fctr_option = (Factor *)stSet_getNext(pool_iter);
fctr_size = (double )stSet_size(fctr_option->children);
log_prob = factor_parent_joint_log_likelihood(fctr, fctr_option) + log(fctr_size);
log_probs[i] = log_prob;
if (fctr_option == parent_fctr) {
parent_log_prob = log_prob;
}
}
stSet_destructIterator(pool_iter);
double gamma_param = *(dp->gamma);
log_probs[num_fctrs] = unobserved_factor_joint_log_likelihood(fctr, parent_dp) + log(gamma_param);
double normalizing_const = max(log_probs, num_choices);
parent_prob = exp(parent_log_prob - normalizing_const);
for (int64_t i = 0; i < num_choices; i++) {
cumul += exp(log_probs[i] - normalizing_const);;
}
free(log_probs);
}
// TODO: this is a hack, makes it inaccurate for the early iterations
if (parent_prob == 0.0) {
return 0.0;
}
return (log(parent_prob) - log(cumul)) / 1000.0;
}
double snapshot_dir_proc_log_likelihood(DirichletProcess* dp) {
double log_likelihood = 0.0;
stSetIterator* fctr_iter = stSet_getIterator(dp->factors);
Factor* fctr = (Factor*) stSet_getNext(fctr_iter);
stSetIterator* child_fctr_iter;
Factor* child_fctr;
while (fctr != NULL) {
child_fctr_iter = stSet_getIterator(fctr->children);
child_fctr = (Factor*) stSet_getNext(child_fctr_iter);
while (child_fctr != NULL) {
log_likelihood += snapshot_factor_log_likelihood(child_fctr);
child_fctr = (Factor*) stSet_getNext(child_fctr_iter);
}
stSet_destructIterator(child_fctr_iter);
fctr = (Factor*) stSet_getNext(fctr_iter);
}
stSet_destructIterator(fctr_iter);
return log_likelihood;
}
double snapshot_log_likelihood(HierarchicalDirichletProcess* hdp) {
double log_likelihood = 0.0;
int64_t num_dps = hdp->num_dps;
DirichletProcess** dps = hdp->dps;
DirichletProcess* dp;
for (int64_t id = 0; id < num_dps; id++){
dp = dps[id];
if (!dp->observed) {
continue;
}
log_likelihood += snapshot_dir_proc_log_likelihood(dp);
}
return log_likelihood;
}
void take_snapshot(HierarchicalDirichletProcess* hdp, int64_t** num_dp_fctrs_out, int64_t* num_dps_out,
double** gamma_params_out, int64_t* num_gamma_params_out, double* log_likelihood_out,
double* log_density_out) {
*num_dp_fctrs_out = snapshot_num_factors(hdp, num_dps_out);
*gamma_params_out = snapshot_gamma_params(hdp, num_gamma_params_out);
*log_likelihood_out = snapshot_log_likelihood(hdp);
*log_density_out = snapshot_joint_log_density(hdp);
}
void execute_gibbs_sampling(HierarchicalDirichletProcess* hdp, int64_t num_samples, int64_t burn_in,
int64_t thinning, bool verbose) {
execute_gibbs_sampling_with_snapshots(hdp, num_samples, burn_in, thinning, NULL, NULL, verbose);
}
void execute_gibbs_sampling_with_snapshots(HierarchicalDirichletProcess* hdp, int64_t num_samples, int64_t burn_in, int64_t thinning,
void (*snapshot_func)(HierarchicalDirichletProcess*, void*),
void* snapshot_func_args, bool verbose) {
if (hdp->data == NULL || hdp->data_pt_dp_id == NULL) {
fprintf(stderr, "Cannot perform Gibbs sampling before passing data to HDP.\n");
exit(EXIT_FAILURE);
}
if (!hdp->finalized) {
fprintf(stderr, "Cannot perform Gibbs sampling before finalizing HDP structure.\n");
exit(EXIT_FAILURE);
}
int64_t prev_sweep_iter_count = 0;
int64_t sweep_counter = 1;
int64_t iter_counter = 0;
int64_t sample_counter = 0;
int64_t num_dps = hdp->num_dps;
int64_t non_data_pt_samples = 0;
DirichletProcess** sampling_dps;
while (sample_counter < num_samples) {
if (verbose) {
if (sweep_counter > 1) {
non_data_pt_samples = iter_counter - prev_sweep_iter_count - hdp->data_length;
}
fprintf(stderr, "Beginning sweep %"PRId64". Performed %"PRId64" sampling iterations. Previous sweep sampled from ~%"PRId64" non-data point factors. Collected %"PRId64" of %"PRId64" distribution samples.\n", sweep_counter, iter_counter, non_data_pt_samples, sample_counter, num_samples);
prev_sweep_iter_count = iter_counter;
sweep_counter++;
}
if (snapshot_func != NULL) {
snapshot_func(hdp, snapshot_func_args);
}
sampling_dps = get_shuffled_dps(hdp);
for (int64_t i = 0; i < num_dps; i++) {
sample_dp_factors(sampling_dps[i], &iter_counter, burn_in, thinning,
&sample_counter, num_samples);
if (sample_counter >= num_samples) {
break;
}
}
free(sampling_dps);
if (hdp->sample_gamma && sample_counter < num_samples) {
sample_gamma_params(hdp, &iter_counter, burn_in, thinning, &sample_counter,
num_samples);
}
}
}
void finalize_distributions(HierarchicalDirichletProcess* hdp) {
if (hdp->samples_taken <= 0) {
fprintf(stderr, "Must perform Gibbs sampling before finalizing sampled distributions.\n");
exit(EXIT_FAILURE);
}
if (hdp->splines_finalized) {
fprintf(stderr, "Distributions have already been finalized.\n");
exit(EXIT_FAILURE);
}
double inv_sample_size = 1.0 / ((double) hdp->samples_taken);
int64_t grid_length = hdp->grid_length;
double* grid = hdp->sampling_grid;
int64_t num_dps = hdp->num_dps;
DirichletProcess** dps = hdp->dps;
DirichletProcess* dp;
double* distr;
for (int64_t id = 0; id < num_dps; id++){
dp = dps[id];
if (!dp->observed) {
continue;
}
distr = dp->posterior_predictive;
for (int64_t i = 0; i < grid_length; i++) {
distr[i] = distr[i] * inv_sample_size;
}
dp->spline_slopes = spline_knot_slopes(grid, distr, grid_length);
}
hdp->splines_finalized = true;
}
double dir_proc_density(HierarchicalDirichletProcess* hdp, double x, int64_t dp_id) {
if (!hdp->splines_finalized) {
fprintf(stderr, "Must finalize distributions before querying densities.\n");
exit(EXIT_FAILURE);
}
if (dp_id < 0 || dp_id >= hdp->num_dps) {
fprintf(stderr, "Hierarchical Dirichlet process has no Dirichlet process with this ID.\n");
exit(EXIT_FAILURE);
}
DirichletProcess* dp = hdp->dps[dp_id];
while (!dp->observed) {
dp = dp->parent;
}
double interp = grid_spline_interp(x, hdp->sampling_grid, dp->posterior_predictive,
dp->spline_slopes, hdp->grid_length);
if (interp > 0.0) {
return interp;
}
else {
return 0.0;
}
}
double get_dir_proc_distance(DistributionMetricMemo* memo, int64_t dp_id_1, int64_t dp_id_2) {
int64_t num_dps = memo->num_distrs;
if (dp_id_1 < 0 || dp_id_2 < 0 || dp_id_1 >= num_dps || dp_id_2 >= num_dps) {
fprintf(stderr, "Invalid Dirchlet process ID.\n");
exit(EXIT_FAILURE);
}
if (dp_id_1 == dp_id_2) {
return 0.0;
}
if (dp_id_1 < dp_id_2) {
return get_dir_proc_distance(memo, dp_id_2, dp_id_1);
}
int64_t idx = ((dp_id_1 - 1) * dp_id_1) / 2 + dp_id_2;
double* matrix = memo->memo_matrix;
if (matrix[idx] < 0) {
matrix[idx] = memo->metric_func(memo->hdp, dp_id_1, dp_id_2);
}
return matrix[idx];
}
double dir_proc_distance(HierarchicalDirichletProcess* hdp, int64_t dp_id_1, int64_t dp_id_2,
double (*dist_func)(double*, double*, double*, int64_t)) {
if (!hdp->splines_finalized) {
fprintf(stderr, "Cannot compute a Shannon-Jensen divergence before finalizing distributions.\n");
exit(EXIT_FAILURE);
}
int64_t grid_length = hdp->grid_length;
double* grid = hdp->sampling_grid;
DirichletProcess* dp_1 = hdp->dps[dp_id_1];
DirichletProcess* dp_2 = hdp->dps[dp_id_2];
while (!dp_1->observed) {
dp_1 = dp_1->parent;
}
while (!dp_2->observed) {
dp_2 = dp_2->parent;
}
double* distr_1 = dp_1->posterior_predictive;
double* distr_2 = dp_2->posterior_predictive;
return dist_func(grid, distr_1, distr_2, grid_length);
}
double kl_divergence(double* x, double* distr_1, double* distr_2, int64_t length) {
double divergence = 0.0;
double left_pt = distr_1[0] * log(distr_1[0] / distr_2[0]) + distr_2[0] * log(distr_2[0] / distr_1[0]);
double right_pt;
double dx;
for (int64_t i = 1; i < length; i++) {
right_pt = distr_1[i] * log(distr_1[i] / distr_2[i]) + distr_2[i] * log(distr_2[i] / distr_1[i]);
dx = x[i] - x[i - 1];
divergence += 0.5 * (left_pt + right_pt) * dx;
left_pt = right_pt;
}
return divergence;
}
double dir_proc_kl_divergence(HierarchicalDirichletProcess* hdp, int64_t dp_id_1, int64_t dp_id_2) {
return dir_proc_distance(hdp, dp_id_1, dp_id_2, &kl_divergence);
}
DistributionMetricMemo* new_kl_divergence_memo(HierarchicalDirichletProcess* hdp) {
return new_distr_metric_memo(hdp, &dir_proc_kl_divergence);
}
double hellinger_distance(double* x, double* distr_1, double* distr_2, int64_t length) {
double integral = 0.0;
double left_pt = sqrt(distr_1[0] * distr_2[0]);
double right_pt;
double dx;
for (int64_t i = 1; i < length; i++) {
right_pt = sqrt(distr_1[i] * distr_2[i]);
dx = x[i] - x[i - 1];
integral += 0.5 * (left_pt + right_pt) * dx;
left_pt = right_pt;
}
return sqrt(1.0 - integral);
}
double dir_proc_hellinger_distance(HierarchicalDirichletProcess* hdp, int64_t dp_id_1, int64_t dp_id_2) {
return dir_proc_distance(hdp, dp_id_1, dp_id_2, &hellinger_distance);
}
DistributionMetricMemo* new_hellinger_distance_memo(HierarchicalDirichletProcess* hdp) {
return new_distr_metric_memo(hdp, &dir_proc_hellinger_distance);
}
double l2_distance(double* x, double* distr_1, double* distr_2, int64_t length) {
double integral = 0.0;
double diff = distr_1[0] - distr_2[0];
double left_pt = diff * diff;
double right_pt;
double dx;
for (int64_t i = 1; i < length; i++) {
diff = distr_1[i] - distr_2[i];
right_pt = diff * diff;
dx = x[i] - x[i - 1];
integral += 0.5 * (left_pt + right_pt) * dx;
left_pt = right_pt;
}
return sqrt(integral);
}
double dir_proc_l2_distance(HierarchicalDirichletProcess* hdp, int64_t dp_id_1, int64_t dp_id_2) {
return dir_proc_distance(hdp, dp_id_1, dp_id_2, &l2_distance);
}
DistributionMetricMemo* new_l2_distance_memo(HierarchicalDirichletProcess* hdp) {
return new_distr_metric_memo(hdp, &dir_proc_l2_distance);
}
double shannon_jensen_distance(double* x, double* distr_1, double* distr_2, int64_t length) {
double divergence = 0.0;
double mean_distr_pt = 0.5 * (distr_1[0] + distr_2[0]);
double left_pt = 0.5 * (distr_1[0] * log(distr_1[0] / mean_distr_pt) + distr_2[0] * log(distr_2[0] / mean_distr_pt));
double right_pt;
double dx;
for (int64_t i = 1; i < length; i++) {
mean_distr_pt = 0.5 * (distr_1[i] + distr_2[i]);
right_pt = 0.5 * (distr_1[i] * log(distr_1[i] / mean_distr_pt) + distr_2[i] * log(distr_2[i] / mean_distr_pt));
dx = x[i] - x[i - 1];
divergence += 0.5 * (left_pt + right_pt) * dx;
left_pt = right_pt;
}
return sqrt(divergence);
}
double dir_proc_shannon_jensen_distance(HierarchicalDirichletProcess* hdp, int64_t dp_id_1, int64_t dp_id_2) {
return dir_proc_distance(hdp, dp_id_1, dp_id_2, &shannon_jensen_distance);
}
DistributionMetricMemo* new_shannon_jensen_distance_memo(HierarchicalDirichletProcess* hdp) {
return new_distr_metric_memo(hdp, &dir_proc_shannon_jensen_distance);
}
double dir_proc_expected_val(HierarchicalDirichletProcess* hdp, int64_t dp_id) {
double* grid = hdp->sampling_grid;
int64_t grid_length = hdp->grid_length;
double* distr = hdp->dps[dp_id]->posterior_predictive;
double expected_val = 0.0;
double dx;
for (int64_t i = 1; i < grid_length; i++) {
dx = grid[i] - grid[i - 1];
expected_val += grid[i] * distr[i] * dx;
}
return expected_val;
}
double dir_proc_variance(HierarchicalDirichletProcess* hdp, int64_t dp_id) {
double* grid = hdp->sampling_grid;
int64_t grid_length = hdp->grid_length;
double* distr = hdp->dps[dp_id]->posterior_predictive;
double expected_val = dir_proc_expected_val(hdp, dp_id);
double variance = 0.0;
double dev;
double dx;
for (int64_t i = 1; i < grid_length; i++) {
dx = grid[i] - grid[i-1];
dev = grid[i] - expected_val;
variance += dev * dev * distr[i] * dx;;
}
return variance;
}
double compare_hdp_distrs(HierarchicalDirichletProcess* hdp_1, int64_t dp_id_1, // this HDP is the master for grid samples
HierarchicalDirichletProcess* hdp_2, int64_t dp_id_2,
double (*dist_func)(double*, double*, double*, int64_t)) {
if (!hdp_1->splines_finalized || !hdp_2->splines_finalized) {
fprintf(stderr, "Must finalize distributions of both hierarchical Dirichelt processes before comparing.\n");
exit(EXIT_FAILURE);
}
int64_t num_dps_1 = hdp_1->num_dps;
int64_t num_dps_2 = hdp_2->num_dps;
if (dp_id_1 < 0 || dp_id_2 < 0 || dp_id_1 >= num_dps_1 || dp_id_2 >= num_dps_2) {
fprintf(stderr, "Invalid Dirchlet process ID.\n");
exit(EXIT_FAILURE);
}
double* grid = hdp_1->sampling_grid;
int64_t grid_length = hdp_1->grid_length;
DirichletProcess* dp_1 = hdp_1->dps[dp_id_1];
while (!dp_1->observed) {
dp_1 = dp_1->parent;
}
double* distr_1 = dp_1->posterior_predictive;
double* distr_2 = (double*) malloc(sizeof(double) * grid_length);
for (int64_t i = 0; i < grid_length; i++) {
distr_2[i] = dir_proc_density(hdp_2, grid[i], dp_id_2);
}
return dist_func(grid, distr_1, distr_2, grid_length);
}
double compare_hdp_distrs_kl_divergence(HierarchicalDirichletProcess* hdp_1, int64_t dp_id_1,
HierarchicalDirichletProcess* hdp_2, int64_t dp_id_2) {
return compare_hdp_distrs(hdp_1, dp_id_1, hdp_2, dp_id_2, &kl_divergence);
}
double compare_hdp_distrs_l2_distance(HierarchicalDirichletProcess* hdp_1, int64_t dp_id_1,
HierarchicalDirichletProcess* hdp_2, int64_t dp_id_2) {
return compare_hdp_distrs(hdp_1, dp_id_1, hdp_2, dp_id_2, &l2_distance);
}
double compare_hdp_distrs_shannon_jensen_distance(HierarchicalDirichletProcess* hdp_1, int64_t dp_id_1,
HierarchicalDirichletProcess* hdp_2, int64_t dp_id_2) {
return compare_hdp_distrs(hdp_1, dp_id_1, hdp_2, dp_id_2, &shannon_jensen_distance);
}
double compare_hdp_distrs_hellinger_distance(HierarchicalDirichletProcess* hdp_1, int64_t dp_id_1,
HierarchicalDirichletProcess* hdp_2, int64_t dp_id_2) {
return compare_hdp_distrs(hdp_1, dp_id_1, hdp_2, dp_id_2, &hellinger_distance);
}
void serialize_factor_tree_internal(FILE* out, Factor* fctr, int64_t parent_id, int64_t* next_fctr_id, uintptr_t data_start) {
int64_t id = *next_fctr_id;
(*next_fctr_id)++;
// factor type
if (fctr->factor_type == BASE) {
fprintf(out, "0\t");
}
else if (fctr->factor_type == MIDDLE) {
fprintf(out, "1\t");
}
else {
fprintf(out, "2\t");
}
// parent id
if (fctr->factor_type == BASE) {
fprintf(out, "-\t");
}
else {
fprintf(out, "%"PRId64"\t", parent_id);
}
// extra data based on type
if (fctr->factor_type == BASE) {
// cached params
double* param_array = fctr->factor_data;
for (int64_t i = 0; i < N_IG_NUM_PARAMS; i++) {
fprintf(out, "%.17lg;", param_array[i]);
}
fprintf(out, "%.17lg", param_array[N_IG_NUM_PARAMS]);
}
else if (fctr->factor_type == MIDDLE) {
// dp id
fprintf(out, "%"PRId64, fctr->dp->id);
}
else {
// data index
uintptr_t data_pos = (uintptr_t) fctr->factor_data;
fprintf(out, "%"PRId64, ((int64_t) (data_pos - data_start)) / sizeof(int64_t));
}
fprintf(out, "\n");
if (fctr->children != NULL) {
stSetIterator* iter = stSet_getIterator(fctr->children);
Factor* child_fctr = (Factor*) stSet_getNext(iter);
while (child_fctr != NULL) {
serialize_factor_tree_internal(out, child_fctr, id, next_fctr_id, data_start);
child_fctr = (Factor*) stSet_getNext(iter);
}
stSet_destructIterator(iter);
}
}
void serialize_hdp(HierarchicalDirichletProcess* hdp, FILE* out) {
int64_t num_dps = hdp->num_dps;
int64_t num_data = hdp->data_length;
double* data = hdp->data;
int64_t* dp_ids = hdp->data_pt_dp_id;
int64_t grid_length = hdp->grid_length;
double* grid = hdp->sampling_grid;
int64_t depth = hdp->depth;
double* gamma_params = hdp->gamma;
double* gamma_alpha = hdp->gamma_alpha;
double* gamma_beta = hdp->gamma_beta;
double* w_aux_vector = hdp->w_aux_vector;
bool* s_aux_vector = hdp->s_aux_vector;
DirichletProcess** dps = hdp->dps;
DirichletProcess* base_dp = hdp->base_dp;
bool has_data = hdp->data != NULL;
if (!hdp->finalized) {
fprintf(stderr, "Can only serialize HierarchicalDirichletProcess with finalized structure");
exit(EXIT_FAILURE);
}
// splines finalized
fprintf(out, "%"PRId64"\n", (int64_t) hdp->splines_finalized);
// has data
fprintf(out, "%"PRId64"\n", (int64_t) has_data);
// sample gamma
fprintf(out, "%"PRId64"\n", (int64_t) hdp->sample_gamma);
// num dps
fprintf(out, "%"PRId64"\n", num_dps);
// data
if (has_data) {
for (int64_t i = 0; i < num_data - 1; i++) {
fprintf(out, "%.17lg\t", data[i]);
}
fprintf(out, "%.17lg\n", data[num_data - 1]);
// dp ids
for (int64_t i = 0; i < num_data - 1; i++) {
fprintf(out, "%"PRId64"\t", dp_ids[i]);
}
fprintf(out, "%"PRId64"\n", dp_ids[num_data - 1]);
}
// base params
fprintf(out, "%.17lg\t%.17lg\t%.17lg\t%.17lg\n", hdp->mu, hdp->nu, (hdp->two_alpha) / 2.0, hdp->beta);
// sampling grid
fprintf(out, "%.17lg\t%.17lg\t%"PRId64"\n", grid[0], grid[grid_length - 1], grid_length);
// gamma
for (int64_t i = 0; i < depth - 1; i++) {
fprintf(out, "%.17lg\t", gamma_params[i]);
}
fprintf(out, "%.17lg\n", gamma_params[depth - 1]);
// gamma distr params
if (hdp->sample_gamma) {
// alpha
for (int64_t i = 0; i < depth - 1; i++) {
fprintf(out, "%.17lg\t", gamma_alpha[i]);
}
fprintf(out, "%.17lg\n", gamma_alpha[depth - 1]);
// beta
for (int64_t i = 0; i < depth - 1; i++) {
fprintf(out, "%.17lg\t", gamma_beta[i]);
}
fprintf(out, "%.17lg\n", gamma_beta[depth - 1]);
// w
for (int64_t i = 0; i < num_dps - 1; i++) {
fprintf(out, "%.17lg\t", w_aux_vector[i]);
}
fprintf(out, "%.17lg\n", w_aux_vector[num_dps - 1]);
// s
for (int64_t i = 0; i < num_dps - 1; i++) {
fprintf(out, "%"PRId64"\t", (int64_t) s_aux_vector[i]);
}
fprintf(out, "%"PRId64"\n", (int64_t) s_aux_vector[num_dps - 1]);
}
// dp parents
DirichletProcess* dp;
for (int64_t i = 0; i < num_dps; i++) {
dp = dps[i];
// parent
if (dp == base_dp) {
fprintf(out, "-\t%"PRId64"\n", dp->num_factor_children);
}
else {
fprintf(out, "%"PRId64"\t%"PRId64"\n", dp->parent->id, dp->num_factor_children);
}
}
// post preds
if (has_data) {
double* post_pred;
for (int64_t i = 0; i < num_dps; i++) {
dp = dps[i];
post_pred = dp->posterior_predictive;
if (post_pred != NULL) {
for (int64_t j = 0; j < grid_length - 1; j++) {
fprintf(out, "%.17lg\t", post_pred[j]);
}
fprintf(out, "%.17lg", post_pred[grid_length - 1]);
}
fprintf(out, "\n");
}
}
// spline slopes
if (hdp->splines_finalized) {
double* slopes;
for (int64_t i = 0; i < num_dps; i++) {
dp = dps[i];
slopes = dp->spline_slopes;
if (slopes != NULL) {
for (int64_t i = 0; i < grid_length - 1; i++) {
fprintf(out, "%.17lg\t", slopes[i]);
}
fprintf(out, "%.17lg", slopes[grid_length - 1]);
}
fprintf(out, "\n");
}
}
// factors
if (has_data) {
int64_t next_fctr_id = 0;
uintptr_t data_start = (uintptr_t) hdp->data;
stSetIterator* iter = stSet_getIterator(base_dp->factors);
Factor* fctr = (Factor*) stSet_getNext(iter);
while (fctr != NULL) {
serialize_factor_tree_internal(out, fctr, -1, &next_fctr_id, data_start);
fctr = (Factor*) stSet_getNext(iter);
}
stSet_destructIterator(iter);
}
}
HierarchicalDirichletProcess* deserialize_hdp(FILE* in) {
// splines finalized
char* end;
char* line = stFile_getLineFromFile(in);
bool splines_finalized = (bool) strtol(line, &end, 10);
free(line);
// has data
line = stFile_getLineFromFile(in);
bool has_data = (bool) strtol(line, &end, 10);
free(line);
// sample gamma
line = stFile_getLineFromFile(in);
bool sample_gamma = (bool) strtol(line, &end, 10);
free(line);
// num dps
line = stFile_getLineFromFile(in);
int64_t num_dps = (int64_t) strtol(line, &end, 10);
free(line);
double* data;
//int64_t* dp_ids;
int64_t* dp_ids = NULL;
int64_t data_length;
stList* tokens;
if (has_data) {
// data
line = stFile_getLineFromFile(in);
tokens = stString_split(line);
data_length = stList_length(tokens);
data = (double*) malloc(sizeof(double) * data_length);
for (int64_t i = 0; i < data_length; i++) {
sscanf(stList_get(tokens, i), "%lf", &(data[i]));
}
free(line);
stList_destruct(tokens);
// dp ids
line = stFile_getLineFromFile(in);
tokens = stString_split(line);
dp_ids = (int64_t*) malloc(sizeof(int64_t) * data_length);
for (int64_t i = 0; i < data_length; i++) {
sscanf((char*) stList_get(tokens, i), "%"SCNd64, &(dp_ids[i]));
}
free(line);
stList_destruct(tokens);
}
// base params
line = stFile_getLineFromFile(in);
double mu, nu, alpha, beta;
sscanf(line, "%lg\t%lg\t%lg\t%lg", &mu, &nu, &alpha, &beta);
free(line);
// sampling grid
line = stFile_getLineFromFile(in);
double grid_start, grid_stop;
int64_t grid_length;
sscanf(line, "%lg\t%lg\t%"SCNd64, &grid_start, &grid_stop, &grid_length);
free(line);
// gamma
line = stFile_getLineFromFile(in);
tokens = stString_split(line);
int64_t depth = stList_length(tokens);
double* gamma_params = (double*) malloc(sizeof(double) * depth);
for (int64_t i = 0; i < depth; i++) {
sscanf((char*) stList_get(tokens, i), "%lf", &(gamma_params[i]));
}
free(line);
stList_destruct(tokens);
// gamma distr params
double* gamma_alpha;
double* gamma_beta;
double* w;
bool* s;
int64_t s_int;
if (sample_gamma) {
line = stFile_getLineFromFile(in);
tokens = stString_split(line);
// gamma alpha
gamma_alpha = (double*) malloc(sizeof(double) * depth);
for (int64_t i = 0; i < depth; i++) {
sscanf((char*) stList_get(tokens, i), "%lf", &(gamma_alpha[i]));
}
free(line);
stList_destruct(tokens);
// gamma beta
line = stFile_getLineFromFile(in);
tokens = stString_split(line);
gamma_beta = (double*) malloc(sizeof(double) * depth);
for (int64_t i = 0; i < depth; i++) {
sscanf((char*) stList_get(tokens, i), "%lf", &(gamma_beta[i]));
}
free(line);
stList_destruct(tokens);
// w
line = stFile_getLineFromFile(in);
tokens = stString_split(line);
w = (double*) malloc(sizeof(double) * num_dps);
for (int64_t i = 0; i < num_dps; i++) {
sscanf((char*) stList_get(tokens, i), "%lf", &(w[i]));
}
free(line);
stList_destruct(tokens);
// s
line = stFile_getLineFromFile(in);
tokens = stString_split(line);
s = (bool*) malloc(sizeof(bool) * num_dps);
for (int64_t i = 0; i < num_dps; i++) {
sscanf((char*) stList_get(tokens, i), "%"SCNd64, &s_int);
s[i] = (bool) s_int;
}
free(line);
stList_destruct(tokens);
}
// construct hdp
HierarchicalDirichletProcess* hdp;
if (sample_gamma) {
hdp = new_hier_dir_proc_2(num_dps, depth, gamma_alpha, gamma_beta, grid_start,
grid_stop, grid_length, mu, nu, alpha, beta);
for (int64_t i = 0; i < depth; i++) {
hdp->gamma[i] = gamma_params[i];
}
free(gamma_params);
for (int64_t i = 0; i < num_dps; i++) {
hdp->w_aux_vector[i] = w[i];
hdp->s_aux_vector[i] = s[i];
}
free(w);
free(s);
}
else {
hdp = new_hier_dir_proc(num_dps, depth, gamma_params, grid_start, grid_stop,
grid_length, mu, nu, alpha, beta);
}
DirichletProcess** dps = hdp->dps;
DirichletProcess* dp;
// dp parents and num children
int64_t parent_id;
int64_t num_factor_children;
for (int64_t id = 0; id < num_dps; id++) {
line = stFile_getLineFromFile(in);
if (line[0] != '-') {
sscanf(line, "%"SCNd64"\t%"SCNd64, &parent_id, &num_factor_children);
set_dir_proc_parent(hdp, id, parent_id);
(dps[id])->num_factor_children = num_factor_children;
}
else {
sscanf(line, "-\t%"SCNd64, &num_factor_children);
(dps[id])->num_factor_children = num_factor_children; }
free(line);
}
finalize_hdp_structure(hdp);
// give it data
if (has_data) {
// note: don't use pass_hdp_data because want to manually init factors
hdp->data = data;
hdp->data_pt_dp_id = dp_ids;
hdp->data_length = data_length;
verify_valid_dp_assignments(hdp);
mark_observed_dps(hdp);
// post predictives
double* post_pred;
for (int64_t id = 0; id < num_dps; id++) {
dp = dps[id];
line = stFile_getLineFromFile(in);
stList* tokens = stString_split(line);
if (stList_length(tokens) != 0) {
free(dp->posterior_predictive);
dp->posterior_predictive = (double*) malloc(sizeof(double) * grid_length);
post_pred = dp->posterior_predictive;
for (int64_t i = 0; i < grid_length; i++) {
sscanf((char*) stList_get(tokens, i), "%lf\n", &(post_pred[i]));
}
}
free(line);
stList_destruct(tokens);
}
}
double* spline_slopes;
if (splines_finalized) {
hdp->splines_finalized = true;
for (int64_t id = 0; id < num_dps; id++) {
dp = dps[id];
line = stFile_getLineFromFile(in);
stList* tokens = stString_split(line);
if (stList_length(tokens) != 0) {
spline_slopes = (double*) malloc(sizeof(double) * grid_length);
dp->spline_slopes = spline_slopes;
for (int64_t i = 0; i < grid_length; i++) {
sscanf((char*) stList_get(tokens, i), "%lf", &(spline_slopes[i]));
}
}
free(line);
stList_destruct(tokens);
}
}
if (has_data) {
char* type_str;
char* parent_str;
char* dp_str;
char* idx_str;
char* params_str;
int64_t type_int;
int64_t dp_id;
int64_t data_pt_idx;
int64_t parent_idx;
double* param_array;
stList* params_list;
Factor* fctr;
Factor* parent_fctr;
stList* fctr_list = stList_construct();
line = stFile_getLineFromFile(in);
while (line != NULL) {
tokens = stString_split(line);
type_str = (char*) stList_get(tokens, 0);
sscanf(type_str, "%"SCNd64, &type_int);
if (type_int == 0) {
fctr = new_base_factor(hdp);
params_str = (char*) stList_get(tokens, 2);
params_list = stString_splitByString(params_str, ";");
param_array = fctr->factor_data;
for (int64_t i = 0; i < N_IG_NUM_PARAMS + 1; i++) {
sscanf((char*) stList_get(params_list, i), "%lf", ¶m_array[i]);
}
stList_destruct(params_list);
}
else if (type_int == 1) {
dp_str = (char*) stList_get(tokens, 2);
sscanf(dp_str, "%"SCNd64, &dp_id);
fctr = new_middle_factor(dps[dp_id]);
}
else if (type_int == 2) {
idx_str = (char*) stList_get(tokens, 2);;
sscanf(idx_str, "%"SCNd64, &data_pt_idx);
fctr = new_data_pt_factor(hdp, data_pt_idx);
}
else {
fprintf(stderr, "Deserialization error");
exit(EXIT_FAILURE);
}
stList_append(fctr_list, (void*) fctr);
// set parent if appicable
parent_str = (char*) stList_get(tokens, 1);
if (parent_str[0] != '-') {
sscanf(parent_str, "%"SCNd64, &parent_idx);
parent_fctr = (Factor*) stList_get(fctr_list, parent_idx);
fctr->parent = parent_fctr;
stSet_insert(parent_fctr->children, (void*) fctr);
}
free(line);
line = stFile_getLineFromFile(in);
stList_destruct(tokens);
}
stList_destruct(fctr_list);
}
return hdp;
}
|
par_csr_matrix.c | /*BHEADER**********************************************************************
* Copyright (c) 2017, Lawrence Livermore National Security, LLC.
* Produced at the Lawrence Livermore National Laboratory.
* Written by Ulrike Yang (yang11@llnl.gov) et al. CODE-LLNL-738-322.
* This file is part of AMG. See files README and COPYRIGHT for details.
*
* AMG is free software; you can redistribute it and/or modify it under the
* terms of the GNU Lesser General Public License (as published by the Free
* Software Foundation) version 2.1 dated February 1999.
*
* This software is distributed in the hope that it will be useful, but
* WITHOUT ANY WARRANTY; without even the IMPLIED WARRANTY OF MERCHANTIBILITY
* or FITNESS FOR A PARTICULAR PURPOSE. See the terms and conditions of the
* GNU General Public License for more details.
*
***********************************************************************EHEADER*/
/******************************************************************************
*
* Member functions for hypre_ParCSRMatrix class.
*
*****************************************************************************/
#include "_hypre_parcsr_mv.h"
#include "../seq_mv/HYPRE_seq_mv.h"
#include "../seq_mv/csr_matrix.h"
/* In addition to publically accessible interface in HYPRE_mv.h, the
implementation in this file uses accessor macros into the sequential matrix
structure, and so includes the .h that defines that structure. Should those
accessor functions become proper functions at some later date, this will not
be necessary. AJC 4/99 */
#ifdef HYPRE_NO_GLOBAL_PARTITION
HYPRE_Int hypre_FillResponseParToCSRMatrix(void*, HYPRE_Int, HYPRE_Int, void*, MPI_Comm, void**, HYPRE_Int*);
#endif
/*--------------------------------------------------------------------------
* hypre_ParCSRMatrixCreate
*--------------------------------------------------------------------------*/
/* If create is called for HYPRE_NO_GLOBAL_PARTITION and row_starts and
col_starts are NOT null, then it is assumed that they are array of length 2
containing the start row of the calling processor followed by the start row
of the next processor - AHB 6/05 */
hypre_ParCSRMatrix *
hypre_ParCSRMatrixCreate( MPI_Comm comm,
HYPRE_Int global_num_rows,
HYPRE_Int global_num_cols,
HYPRE_Int *row_starts,
HYPRE_Int *col_starts,
HYPRE_Int num_cols_offd,
HYPRE_Int num_nonzeros_diag,
HYPRE_Int num_nonzeros_offd )
{
hypre_ParCSRMatrix *matrix;
HYPRE_Int num_procs, my_id;
HYPRE_Int local_num_rows, local_num_cols;
HYPRE_Int first_row_index, first_col_diag;
matrix = hypre_CTAlloc(hypre_ParCSRMatrix, 1);
hypre_MPI_Comm_rank(comm,&my_id);
hypre_MPI_Comm_size(comm,&num_procs);
if (!row_starts)
{
#ifdef HYPRE_NO_GLOBAL_PARTITION
hypre_GenerateLocalPartitioning(global_num_rows, num_procs, my_id,
&row_starts);
#else
hypre_GeneratePartitioning(global_num_rows, num_procs, &row_starts);
#endif
}
if (!col_starts)
{
if (global_num_rows == global_num_cols)
{
col_starts = row_starts;
}
else
{
#ifdef HYPRE_NO_GLOBAL_PARTITION
hypre_GenerateLocalPartitioning(global_num_cols, num_procs, my_id,
&col_starts);
#else
hypre_GeneratePartitioning(global_num_cols, num_procs, &col_starts);
#endif
}
}
#ifdef HYPRE_NO_GLOBAL_PARTITION
/* row_starts[0] is start of local rows. row_starts[1] is start of next
processor's rows */
first_row_index = row_starts[0];
local_num_rows = row_starts[1]-first_row_index ;
first_col_diag = col_starts[0];
local_num_cols = col_starts[1]-first_col_diag;
#else
first_row_index = row_starts[my_id];
local_num_rows = row_starts[my_id+1]-first_row_index;
first_col_diag = col_starts[my_id];
local_num_cols = col_starts[my_id+1]-first_col_diag;
#endif
hypre_ParCSRMatrixComm(matrix) = comm;
hypre_ParCSRMatrixDiag(matrix) =
hypre_CSRMatrixCreate(local_num_rows, local_num_cols,num_nonzeros_diag);
hypre_ParCSRMatrixOffd(matrix) =
hypre_CSRMatrixCreate(local_num_rows, num_cols_offd,num_nonzeros_offd);
hypre_ParCSRMatrixDiagT(matrix) = NULL;
hypre_ParCSRMatrixOffdT(matrix) = NULL; // JSP: transposed matrices are optional
hypre_ParCSRMatrixGlobalNumRows(matrix) = global_num_rows;
hypre_ParCSRMatrixGlobalNumCols(matrix) = global_num_cols;
hypre_ParCSRMatrixFirstRowIndex(matrix) = first_row_index;
hypre_ParCSRMatrixFirstColDiag(matrix) = first_col_diag;
hypre_ParCSRMatrixLastRowIndex(matrix) = first_row_index + local_num_rows - 1;
hypre_ParCSRMatrixLastColDiag(matrix) = first_col_diag + local_num_cols - 1;
hypre_ParCSRMatrixColMapOffd(matrix) = NULL;
hypre_ParCSRMatrixAssumedPartition(matrix) = NULL;
/* When NO_GLOBAL_PARTITION is set we could make these null, instead
of leaving the range. If that change is made, then when this create
is called from functions like the matrix-matrix multiply, be careful
not to generate a new partition */
hypre_ParCSRMatrixRowStarts(matrix) = row_starts;
hypre_ParCSRMatrixColStarts(matrix) = col_starts;
hypre_ParCSRMatrixCommPkg(matrix) = NULL;
hypre_ParCSRMatrixCommPkgT(matrix) = NULL;
/* set defaults */
hypre_ParCSRMatrixOwnsData(matrix) = 1;
hypre_ParCSRMatrixOwnsRowStarts(matrix) = 1;
hypre_ParCSRMatrixOwnsColStarts(matrix) = 1;
if (row_starts == col_starts)
hypre_ParCSRMatrixOwnsColStarts(matrix) = 0;
hypre_ParCSRMatrixRowindices(matrix) = NULL;
hypre_ParCSRMatrixRowvalues(matrix) = NULL;
hypre_ParCSRMatrixGetrowactive(matrix) = 0;
return matrix;
}
/*--------------------------------------------------------------------------
* hypre_ParCSRMatrixDestroy
*--------------------------------------------------------------------------*/
HYPRE_Int
hypre_ParCSRMatrixDestroy( hypre_ParCSRMatrix *matrix )
{
if (matrix)
{
if ( hypre_ParCSRMatrixOwnsData(matrix) )
{
hypre_CSRMatrixDestroy(hypre_ParCSRMatrixDiag(matrix));
hypre_CSRMatrixDestroy(hypre_ParCSRMatrixOffd(matrix));
if ( hypre_ParCSRMatrixDiagT(matrix) )
{
hypre_CSRMatrixDestroy(hypre_ParCSRMatrixDiagT(matrix));
}
if ( hypre_ParCSRMatrixOffdT(matrix) )
{
hypre_CSRMatrixDestroy(hypre_ParCSRMatrixOffdT(matrix));
}
if (hypre_ParCSRMatrixColMapOffd(matrix))
hypre_TFree(hypre_ParCSRMatrixColMapOffd(matrix));
if (hypre_ParCSRMatrixCommPkg(matrix))
hypre_MatvecCommPkgDestroy(hypre_ParCSRMatrixCommPkg(matrix));
if (hypre_ParCSRMatrixCommPkgT(matrix))
hypre_MatvecCommPkgDestroy(hypre_ParCSRMatrixCommPkgT(matrix));
}
if ( hypre_ParCSRMatrixOwnsRowStarts(matrix) )
hypre_TFree(hypre_ParCSRMatrixRowStarts(matrix));
if ( hypre_ParCSRMatrixOwnsColStarts(matrix) )
hypre_TFree(hypre_ParCSRMatrixColStarts(matrix));
hypre_TFree(hypre_ParCSRMatrixRowindices(matrix));
hypre_TFree(hypre_ParCSRMatrixRowvalues(matrix));
if (hypre_ParCSRMatrixAssumedPartition(matrix))
hypre_AssumedPartitionDestroy(hypre_ParCSRMatrixAssumedPartition(matrix));
hypre_TFree(matrix);
}
return hypre_error_flag;
}
/*--------------------------------------------------------------------------
* hypre_ParCSRMatrixInitialize
*--------------------------------------------------------------------------*/
HYPRE_Int
hypre_ParCSRMatrixInitialize( hypre_ParCSRMatrix *matrix )
{
if (!matrix)
{
hypre_error_in_arg(1);
return hypre_error_flag;
}
hypre_CSRMatrixInitialize(hypre_ParCSRMatrixDiag(matrix));
hypre_CSRMatrixInitialize(hypre_ParCSRMatrixOffd(matrix));
hypre_ParCSRMatrixColMapOffd(matrix) =
hypre_CTAlloc(HYPRE_Int,hypre_CSRMatrixNumCols(
hypre_ParCSRMatrixOffd(matrix)));
return hypre_error_flag;
}
/*--------------------------------------------------------------------------
* hypre_ParCSRMatrixSetNumNonzeros
*--------------------------------------------------------------------------*/
HYPRE_Int
hypre_ParCSRMatrixSetNumNonzeros( hypre_ParCSRMatrix *matrix )
{
MPI_Comm comm;
hypre_CSRMatrix *diag;
HYPRE_Int *diag_i;
hypre_CSRMatrix *offd;
HYPRE_Int *offd_i;
HYPRE_Int local_num_rows;
HYPRE_Int total_num_nonzeros;
HYPRE_Int local_num_nonzeros;
if (!matrix)
{
hypre_error_in_arg(1);
return hypre_error_flag;
}
comm = hypre_ParCSRMatrixComm(matrix);
diag = hypre_ParCSRMatrixDiag(matrix);
diag_i = hypre_CSRMatrixI(diag);
offd = hypre_ParCSRMatrixOffd(matrix);
offd_i = hypre_CSRMatrixI(offd);
local_num_rows = hypre_CSRMatrixNumRows(diag);
local_num_nonzeros = diag_i[local_num_rows] + offd_i[local_num_rows];
hypre_MPI_Allreduce(&local_num_nonzeros, &total_num_nonzeros, 1, HYPRE_MPI_INT,
hypre_MPI_SUM, comm);
hypre_ParCSRMatrixNumNonzeros(matrix) = total_num_nonzeros;
return hypre_error_flag;
}
/*--------------------------------------------------------------------------
* hypre_ParCSRMatrixSetDNumNonzeros
*--------------------------------------------------------------------------*/
HYPRE_Int
hypre_ParCSRMatrixSetDNumNonzeros( hypre_ParCSRMatrix *matrix )
{
MPI_Comm comm;
hypre_CSRMatrix *diag;
HYPRE_Int *diag_i;
hypre_CSRMatrix *offd;
HYPRE_Int *offd_i;
HYPRE_Int local_num_rows;
HYPRE_Real total_num_nonzeros;
HYPRE_Real local_num_nonzeros;
if (!matrix)
{
hypre_error_in_arg(1);
return hypre_error_flag;
}
comm = hypre_ParCSRMatrixComm(matrix);
diag = hypre_ParCSRMatrixDiag(matrix);
diag_i = hypre_CSRMatrixI(diag);
offd = hypre_ParCSRMatrixOffd(matrix);
offd_i = hypre_CSRMatrixI(offd);
local_num_rows = hypre_CSRMatrixNumRows(diag);
local_num_nonzeros = (HYPRE_Real) diag_i[local_num_rows]
+ (HYPRE_Real) offd_i[local_num_rows];
hypre_MPI_Allreduce(&local_num_nonzeros, &total_num_nonzeros, 1,
HYPRE_MPI_REAL, hypre_MPI_SUM, comm);
hypre_ParCSRMatrixDNumNonzeros(matrix) = total_num_nonzeros;
return hypre_error_flag;
}
/*--------------------------------------------------------------------------
* hypre_ParCSRMatrixSetDataOwner
*--------------------------------------------------------------------------*/
HYPRE_Int
hypre_ParCSRMatrixSetDataOwner( hypre_ParCSRMatrix *matrix,
HYPRE_Int owns_data )
{
if (!matrix)
{
hypre_error_in_arg(1);
return hypre_error_flag;
}
hypre_ParCSRMatrixOwnsData(matrix) = owns_data;
return hypre_error_flag;
}
/*--------------------------------------------------------------------------
* hypre_ParCSRMatrixSetRowStartsOwner
*--------------------------------------------------------------------------*/
HYPRE_Int
hypre_ParCSRMatrixSetRowStartsOwner( hypre_ParCSRMatrix *matrix,
HYPRE_Int owns_row_starts )
{
if (!matrix)
{
hypre_error_in_arg(1);
return hypre_error_flag;
}
hypre_ParCSRMatrixOwnsRowStarts(matrix) = owns_row_starts;
return hypre_error_flag;
}
/*--------------------------------------------------------------------------
* hypre_ParCSRMatrixSetColStartsOwner
*--------------------------------------------------------------------------*/
HYPRE_Int
hypre_ParCSRMatrixSetColStartsOwner( hypre_ParCSRMatrix *matrix,
HYPRE_Int owns_col_starts )
{
if (!matrix)
{
hypre_error_in_arg(1);
return hypre_error_flag;
}
hypre_ParCSRMatrixOwnsColStarts(matrix) = owns_col_starts;
return hypre_error_flag;
}
/*--------------------------------------------------------------------------
* hypre_ParCSRMatrixRead
*--------------------------------------------------------------------------*/
hypre_ParCSRMatrix *
hypre_ParCSRMatrixRead( MPI_Comm comm,
const char *file_name )
{
hypre_ParCSRMatrix *matrix;
hypre_CSRMatrix *diag;
hypre_CSRMatrix *offd;
HYPRE_Int my_id, i, num_procs;
char new_file_d[80], new_file_o[80], new_file_info[80];
HYPRE_Int global_num_rows, global_num_cols, num_cols_offd;
HYPRE_Int local_num_rows;
HYPRE_Int *row_starts;
HYPRE_Int *col_starts;
HYPRE_Int *col_map_offd;
FILE *fp;
HYPRE_Int equal = 1;
#ifdef HYPRE_NO_GLOBAL_PARTITION
HYPRE_Int row_s, row_e, col_s, col_e;
#endif
hypre_MPI_Comm_rank(comm,&my_id);
hypre_MPI_Comm_size(comm,&num_procs);
#ifdef HYPRE_NO_GLOBAL_PARTITION
row_starts = hypre_CTAlloc(HYPRE_Int, 2);
col_starts = hypre_CTAlloc(HYPRE_Int, 2);
#else
row_starts = hypre_CTAlloc(HYPRE_Int, num_procs+1);
col_starts = hypre_CTAlloc(HYPRE_Int, num_procs+1);
#endif
hypre_sprintf(new_file_d,"%s.D.%d",file_name,my_id);
hypre_sprintf(new_file_o,"%s.O.%d",file_name,my_id);
hypre_sprintf(new_file_info,"%s.INFO.%d",file_name,my_id);
fp = fopen(new_file_info, "r");
hypre_fscanf(fp, "%d", &global_num_rows);
hypre_fscanf(fp, "%d", &global_num_cols);
hypre_fscanf(fp, "%d", &num_cols_offd);
#ifdef HYPRE_NO_GLOBAL_PARTITION
/* the bgl input file should only contain the EXACT range for local processor */
hypre_fscanf(fp, "%d %d %d %d", &row_s, &row_e, &col_s, &col_e);
row_starts[0] = row_s;
row_starts[1] = row_e;
col_starts[0] = col_s;
col_starts[1] = col_e;
#else
for (i=0; i < num_procs; i++)
hypre_fscanf(fp, "%d %d", &row_starts[i], &col_starts[i]);
row_starts[num_procs] = global_num_rows;
col_starts[num_procs] = global_num_cols;
#endif
col_map_offd = hypre_CTAlloc(HYPRE_Int, num_cols_offd);
for (i=0; i < num_cols_offd; i++)
hypre_fscanf(fp, "%d", &col_map_offd[i]);
fclose(fp);
#ifdef HYPRE_NO_GLOBAL_PARTITION
for (i=1; i >= 0; i--)
{
if (row_starts[i] != col_starts[i])
{
equal = 0;
break;
}
}
#else
for (i=num_procs; i >= 0; i--)
{
if (row_starts[i] != col_starts[i])
{
equal = 0;
break;
}
}
#endif
if (equal)
{
hypre_TFree(col_starts);
col_starts = row_starts;
}
diag = hypre_CSRMatrixRead(new_file_d);
local_num_rows = hypre_CSRMatrixNumRows(diag);
if (num_cols_offd)
{
offd = hypre_CSRMatrixRead(new_file_o);
}
else
{
offd = hypre_CSRMatrixCreate(local_num_rows,0,0);
hypre_CSRMatrixInitialize(offd);
}
matrix = hypre_CTAlloc(hypre_ParCSRMatrix, 1);
hypre_ParCSRMatrixComm(matrix) = comm;
hypre_ParCSRMatrixGlobalNumRows(matrix) = global_num_rows;
hypre_ParCSRMatrixGlobalNumCols(matrix) = global_num_cols;
#ifdef HYPRE_NO_GLOBAL_PARTITION
hypre_ParCSRMatrixFirstRowIndex(matrix) = row_s;
hypre_ParCSRMatrixFirstColDiag(matrix) = col_s;
hypre_ParCSRMatrixLastRowIndex(matrix) = row_e - 1;
hypre_ParCSRMatrixLastColDiag(matrix) = col_e - 1;
#else
hypre_ParCSRMatrixFirstRowIndex(matrix) = row_starts[my_id];
hypre_ParCSRMatrixFirstColDiag(matrix) = col_starts[my_id];
hypre_ParCSRMatrixLastRowIndex(matrix) = row_starts[my_id+1]-1;
hypre_ParCSRMatrixLastColDiag(matrix) = col_starts[my_id+1]-1;
#endif
hypre_ParCSRMatrixRowStarts(matrix) = row_starts;
hypre_ParCSRMatrixColStarts(matrix) = col_starts;
hypre_ParCSRMatrixCommPkg(matrix) = NULL;
/* set defaults */
hypre_ParCSRMatrixOwnsData(matrix) = 1;
hypre_ParCSRMatrixOwnsRowStarts(matrix) = 1;
hypre_ParCSRMatrixOwnsColStarts(matrix) = 1;
if (row_starts == col_starts)
hypre_ParCSRMatrixOwnsColStarts(matrix) = 0;
hypre_ParCSRMatrixDiag(matrix) = diag;
hypre_ParCSRMatrixOffd(matrix) = offd;
if (num_cols_offd)
hypre_ParCSRMatrixColMapOffd(matrix) = col_map_offd;
else
hypre_ParCSRMatrixColMapOffd(matrix) = NULL;
return matrix;
}
/*--------------------------------------------------------------------------
* hypre_ParCSRMatrixPrint
*--------------------------------------------------------------------------*/
HYPRE_Int
hypre_ParCSRMatrixPrint( hypre_ParCSRMatrix *matrix,
const char *file_name )
{
MPI_Comm comm;
HYPRE_Int global_num_rows;
HYPRE_Int global_num_cols;
HYPRE_Int *col_map_offd;
#ifndef HYPRE_NO_GLOBAL_PARTITION
HYPRE_Int *row_starts;
HYPRE_Int *col_starts;
#endif
HYPRE_Int my_id, i, num_procs;
char new_file_d[80], new_file_o[80], new_file_info[80];
FILE *fp;
HYPRE_Int num_cols_offd = 0;
#ifdef HYPRE_NO_GLOBAL_PARTITION
HYPRE_Int row_s, row_e, col_s, col_e;
#endif
if (!matrix)
{
hypre_error_in_arg(1);
return hypre_error_flag;
}
comm = hypre_ParCSRMatrixComm(matrix);
global_num_rows = hypre_ParCSRMatrixGlobalNumRows(matrix);
global_num_cols = hypre_ParCSRMatrixGlobalNumCols(matrix);
col_map_offd = hypre_ParCSRMatrixColMapOffd(matrix);
#ifndef HYPRE_NO_GLOBAL_PARTITION
row_starts = hypre_ParCSRMatrixRowStarts(matrix);
col_starts = hypre_ParCSRMatrixColStarts(matrix);
#endif
if (hypre_ParCSRMatrixOffd(matrix))
num_cols_offd = hypre_CSRMatrixNumCols(hypre_ParCSRMatrixOffd(matrix));
hypre_MPI_Comm_rank(comm, &my_id);
hypre_MPI_Comm_size(comm, &num_procs);
hypre_sprintf(new_file_d,"%s.D.%d",file_name,my_id);
hypre_sprintf(new_file_o,"%s.O.%d",file_name,my_id);
hypre_sprintf(new_file_info,"%s.INFO.%d",file_name,my_id);
hypre_CSRMatrixPrint(hypre_ParCSRMatrixDiag(matrix),new_file_d);
if (num_cols_offd != 0)
hypre_CSRMatrixPrint(hypre_ParCSRMatrixOffd(matrix),new_file_o);
fp = fopen(new_file_info, "w");
hypre_fprintf(fp, "%d\n", global_num_rows);
hypre_fprintf(fp, "%d\n", global_num_cols);
hypre_fprintf(fp, "%d\n", num_cols_offd);
#ifdef HYPRE_NO_GLOBAL_PARTITION
row_s = hypre_ParCSRMatrixFirstRowIndex(matrix);
row_e = hypre_ParCSRMatrixLastRowIndex(matrix);
col_s = hypre_ParCSRMatrixFirstColDiag(matrix);
col_e = hypre_ParCSRMatrixLastColDiag(matrix);
/* add 1 to the ends because this is a starts partition */
hypre_fprintf(fp, "%d %d %d %d\n", row_s, row_e + 1, col_s, col_e + 1);
#else
for (i=0; i < num_procs; i++)
hypre_fprintf(fp, "%d %d\n", row_starts[i], col_starts[i]);
#endif
for (i=0; i < num_cols_offd; i++)
hypre_fprintf(fp, "%d\n", col_map_offd[i]);
fclose(fp);
return hypre_error_flag;
}
/*--------------------------------------------------------------------------
* hypre_ParCSRMatrixPrintIJ
*--------------------------------------------------------------------------*/
HYPRE_Int
hypre_ParCSRMatrixPrintIJ( const hypre_ParCSRMatrix *matrix,
const HYPRE_Int base_i,
const HYPRE_Int base_j,
const char *filename )
{
MPI_Comm comm;
HYPRE_Int first_row_index;
HYPRE_Int first_col_diag;
hypre_CSRMatrix *diag;
hypre_CSRMatrix *offd;
HYPRE_Int *col_map_offd;
HYPRE_Int num_rows;
HYPRE_Int *row_starts;
HYPRE_Int *col_starts;
HYPRE_Complex *diag_data;
HYPRE_Int *diag_i;
HYPRE_Int *diag_j;
HYPRE_Complex *offd_data;
HYPRE_Int *offd_i;
HYPRE_Int *offd_j;
HYPRE_Int myid, num_procs, i, j, I, J;
char new_filename[255];
FILE *file;
HYPRE_Int num_nonzeros_offd;
HYPRE_Int ilower, iupper, jlower, jupper;
if (!matrix)
{
hypre_error_in_arg(1);
return hypre_error_flag;
}
comm = hypre_ParCSRMatrixComm(matrix);
first_row_index = hypre_ParCSRMatrixFirstRowIndex(matrix);
first_col_diag = hypre_ParCSRMatrixFirstColDiag(matrix);
diag = hypre_ParCSRMatrixDiag(matrix);
offd = hypre_ParCSRMatrixOffd(matrix);
col_map_offd = hypre_ParCSRMatrixColMapOffd(matrix);
num_rows = hypre_ParCSRMatrixNumRows(matrix);
row_starts = hypre_ParCSRMatrixRowStarts(matrix);
col_starts = hypre_ParCSRMatrixColStarts(matrix);
hypre_MPI_Comm_rank(comm, &myid);
hypre_MPI_Comm_size(comm, &num_procs);
hypre_sprintf(new_filename,"%s.%05d", filename, myid);
if ((file = fopen(new_filename, "w")) == NULL)
{
hypre_error_w_msg(HYPRE_ERROR_GENERIC,"Error: can't open output file %s\n");
return hypre_error_flag;
}
num_nonzeros_offd = hypre_CSRMatrixNumNonzeros(offd);
diag_data = hypre_CSRMatrixData(diag);
diag_i = hypre_CSRMatrixI(diag);
diag_j = hypre_CSRMatrixJ(diag);
offd_i = hypre_CSRMatrixI(offd);
if (num_nonzeros_offd)
{
offd_data = hypre_CSRMatrixData(offd);
offd_j = hypre_CSRMatrixJ(offd);
}
#ifdef HYPRE_NO_GLOBAL_PARTITION
ilower = row_starts[0]+base_i;
iupper = row_starts[1]+base_i - 1;
jlower = col_starts[0]+base_j;
jupper = col_starts[1]+base_j - 1;
#else
ilower = row_starts[myid] +base_i;
iupper = row_starts[myid+1]+base_i - 1;
jlower = col_starts[myid] +base_j;
jupper = col_starts[myid+1]+base_j - 1;
#endif
hypre_fprintf(file, "%d %d %d %d\n", ilower, iupper, jlower, jupper);
for (i = 0; i < num_rows; i++)
{
I = first_row_index + i + base_i;
/* print diag columns */
for (j = diag_i[i]; j < diag_i[i+1]; j++)
{
J = first_col_diag + diag_j[j] + base_j;
if ( diag_data )
{
#ifdef HYPRE_COMPLEX
hypre_fprintf(file, "%d %d %.14e , %.14e\n", I, J,
hypre_creal(diag_data[j]), hypre_cimag(diag_data[j]));
#else
hypre_fprintf(file, "%d %d %.14e\n", I, J, diag_data[j]);
#endif
}
else
hypre_fprintf(file, "%d %d\n", I, J);
}
/* print offd columns */
if ( num_nonzeros_offd )
{
for (j = offd_i[i]; j < offd_i[i+1]; j++)
{
J = col_map_offd[offd_j[j]] + base_j;
if ( offd_data )
{
#ifdef HYPRE_COMPLEX
hypre_fprintf(file, "%d %d %.14e , %.14e\n", I, J,
hypre_creal(offd_data[j]), hypre_cimag(offd_data[j]));
#else
hypre_fprintf(file, "%d %d %.14e\n", I, J, offd_data[j]);
#endif
}
else
hypre_fprintf(file, "%d %d\n", I, J );
}
}
}
fclose(file);
return hypre_error_flag;
}
/*--------------------------------------------------------------------------
* hypre_ParCSRMatrixReadIJ
*--------------------------------------------------------------------------*/
HYPRE_Int
hypre_ParCSRMatrixReadIJ( MPI_Comm comm,
const char *filename,
HYPRE_Int *base_i_ptr,
HYPRE_Int *base_j_ptr,
hypre_ParCSRMatrix **matrix_ptr)
{
HYPRE_Int global_num_rows;
HYPRE_Int global_num_cols;
HYPRE_Int first_row_index;
HYPRE_Int first_col_diag;
HYPRE_Int last_col_diag;
hypre_ParCSRMatrix *matrix;
hypre_CSRMatrix *diag;
hypre_CSRMatrix *offd;
HYPRE_Int *col_map_offd;
HYPRE_Int *row_starts;
HYPRE_Int *col_starts;
HYPRE_Int num_rows;
HYPRE_Int base_i, base_j;
HYPRE_Complex *diag_data;
HYPRE_Int *diag_i;
HYPRE_Int *diag_j;
HYPRE_Complex *offd_data;
HYPRE_Int *offd_i;
HYPRE_Int *offd_j;
HYPRE_Int *aux_offd_j;
HYPRE_Int myid, num_procs, i, j, I, J;
char new_filename[255];
FILE *file;
HYPRE_Int num_cols_offd, num_nonzeros_diag, num_nonzeros_offd;
HYPRE_Int equal, i_col, num_cols;
HYPRE_Int diag_cnt, offd_cnt, row_cnt;
HYPRE_Complex data;
hypre_MPI_Comm_size(comm, &num_procs);
hypre_MPI_Comm_rank(comm, &myid);
hypre_sprintf(new_filename,"%s.%05d", filename, myid);
if ((file = fopen(new_filename, "r")) == NULL)
{
hypre_error_w_msg(HYPRE_ERROR_GENERIC,"Error: can't open output file %s\n");
return hypre_error_flag;
}
hypre_fscanf(file, "%d %d", &global_num_rows, &global_num_cols);
hypre_fscanf(file, "%d %d %d", &num_rows, &num_cols, &num_cols_offd);
hypre_fscanf(file, "%d %d", &num_nonzeros_diag, &num_nonzeros_offd);
row_starts = hypre_CTAlloc(HYPRE_Int,num_procs+1);
col_starts = hypre_CTAlloc(HYPRE_Int,num_procs+1);
for (i = 0; i <= num_procs; i++)
hypre_fscanf(file, "%d %d", &row_starts[i], &col_starts[i]);
base_i = row_starts[0];
base_j = col_starts[0];
equal = 1;
for (i = 0; i <= num_procs; i++)
{
row_starts[i] -= base_i;
col_starts[i] -= base_j;
if (row_starts[i] != col_starts[i]) equal = 0;
}
if (equal)
{
hypre_TFree(col_starts);
col_starts = row_starts;
}
matrix = hypre_ParCSRMatrixCreate(comm, global_num_rows, global_num_cols,
row_starts, col_starts, num_cols_offd,
num_nonzeros_diag, num_nonzeros_offd);
hypre_ParCSRMatrixInitialize(matrix);
diag = hypre_ParCSRMatrixDiag(matrix);
offd = hypre_ParCSRMatrixOffd(matrix);
diag_data = hypre_CSRMatrixData(diag);
diag_i = hypre_CSRMatrixI(diag);
diag_j = hypre_CSRMatrixJ(diag);
offd_i = hypre_CSRMatrixI(offd);
if (num_nonzeros_offd)
{
offd_data = hypre_CSRMatrixData(offd);
offd_j = hypre_CSRMatrixJ(offd);
}
first_row_index = hypre_ParCSRMatrixFirstRowIndex(matrix);
first_col_diag = hypre_ParCSRMatrixFirstColDiag(matrix);
last_col_diag = first_col_diag+num_cols-1;
diag_cnt = 0;
offd_cnt = 0;
row_cnt = 0;
for (i = 0; i < num_nonzeros_diag+num_nonzeros_offd; i++)
{
/* read values */
hypre_fscanf(file, "%d %d %le", &I, &J, &data);
I = I-base_i-first_row_index;
J -= base_j;
if (I > row_cnt)
{
diag_i[I] = diag_cnt;
offd_i[I] = offd_cnt;
row_cnt++;
}
if (J < first_col_diag || J > last_col_diag)
{
offd_j[offd_cnt] = J;
offd_data[offd_cnt++] = data;
}
else
{
diag_j[diag_cnt] = J - first_col_diag;
diag_data[diag_cnt++] = data;
}
}
diag_i[num_rows] = diag_cnt;
offd_i[num_rows] = offd_cnt;
fclose(file);
/* generate col_map_offd */
if (num_nonzeros_offd)
{
aux_offd_j = hypre_CTAlloc(HYPRE_Int, num_nonzeros_offd);
for (i=0; i < num_nonzeros_offd; i++)
aux_offd_j[i] = offd_j[i];
hypre_qsort0(aux_offd_j,0,num_nonzeros_offd-1);
col_map_offd = hypre_ParCSRMatrixColMapOffd(matrix);
col_map_offd[0] = aux_offd_j[0];
offd_cnt = 0;
for (i=1; i < num_nonzeros_offd; i++)
{
if (aux_offd_j[i] > col_map_offd[offd_cnt])
col_map_offd[++offd_cnt] = aux_offd_j[i];
}
for (i=0; i < num_nonzeros_offd; i++)
{
offd_j[i] = hypre_BinarySearch(col_map_offd, offd_j[i], num_cols_offd);
}
hypre_TFree(aux_offd_j);
}
/* move diagonal element in first position in each row */
for (i=0; i < num_rows; i++)
{
i_col = diag_i[i];
for (j=i_col; j < diag_i[i+1]; j++)
{
if (diag_j[j] == i)
{
diag_j[j] = diag_j[i_col];
data = diag_data[j];
diag_data[j] = diag_data[i_col];
diag_data[i_col] = data;
diag_j[i_col] = i;
break;
}
}
}
*base_i_ptr = base_i;
*base_j_ptr = base_j;
*matrix_ptr = matrix;
return hypre_error_flag;
}
/*--------------------------------------------------------------------------
* hypre_ParCSRMatrixGetLocalRange
* returns the row numbers of the rows stored on this processor.
* "End" is actually the row number of the last row on this processor.
*--------------------------------------------------------------------------*/
HYPRE_Int
hypre_ParCSRMatrixGetLocalRange( hypre_ParCSRMatrix *matrix,
HYPRE_Int *row_start,
HYPRE_Int *row_end,
HYPRE_Int *col_start,
HYPRE_Int *col_end )
{
HYPRE_Int my_id;
if (!matrix)
{
hypre_error_in_arg(1);
return hypre_error_flag;
}
hypre_MPI_Comm_rank( hypre_ParCSRMatrixComm(matrix), &my_id );
#ifdef HYPRE_NO_GLOBAL_PARTITION
*row_start = hypre_ParCSRMatrixFirstRowIndex(matrix);
*row_end = hypre_ParCSRMatrixLastRowIndex(matrix);
*col_start = hypre_ParCSRMatrixFirstColDiag(matrix);
*col_end = hypre_ParCSRMatrixLastColDiag(matrix);
#else
*row_start = hypre_ParCSRMatrixRowStarts(matrix)[ my_id ];
*row_end = hypre_ParCSRMatrixRowStarts(matrix)[ my_id + 1 ]-1;
*col_start = hypre_ParCSRMatrixColStarts(matrix)[ my_id ];
*col_end = hypre_ParCSRMatrixColStarts(matrix)[ my_id + 1 ]-1;
#endif
return hypre_error_flag;
}
/*--------------------------------------------------------------------------
* hypre_ParCSRMatrixGetRow
* Returns global column indices and/or values for a given row in the global
* matrix. Global row number is used, but the row must be stored locally or
* an error is returned. This implementation copies from the two matrices that
* store the local data, storing them in the hypre_ParCSRMatrix structure.
* Only a single row can be accessed via this function at any one time; the
* corresponding RestoreRow function must be called, to avoid bleeding memory,
* and to be able to look at another row.
* Either one of col_ind and values can be left null, and those values will
* not be returned.
* All indices are returned in 0-based indexing, no matter what is used under
* the hood. EXCEPTION: currently this only works if the local CSR matrices
* use 0-based indexing.
* This code, semantics, implementation, etc., are all based on PETSc's hypre_MPI_AIJ
* matrix code, adjusted for our data and software structures.
* AJC 4/99.
*--------------------------------------------------------------------------*/
HYPRE_Int
hypre_ParCSRMatrixGetRow( hypre_ParCSRMatrix *mat,
HYPRE_Int row,
HYPRE_Int *size,
HYPRE_Int **col_ind,
HYPRE_Complex **values )
{
HYPRE_Int my_id;
HYPRE_Int row_start, row_end;
hypre_CSRMatrix *Aa;
hypre_CSRMatrix *Ba;
if (!mat)
{
hypre_error_in_arg(1);
return hypre_error_flag;
}
Aa = (hypre_CSRMatrix *) hypre_ParCSRMatrixDiag(mat);
Ba = (hypre_CSRMatrix *) hypre_ParCSRMatrixOffd(mat);
if (hypre_ParCSRMatrixGetrowactive(mat)) return(-1);
hypre_MPI_Comm_rank( hypre_ParCSRMatrixComm(mat), &my_id );
hypre_ParCSRMatrixGetrowactive(mat) = 1;
#ifdef HYPRE_NO_GLOBAL_PARTITION
row_start = hypre_ParCSRMatrixFirstRowIndex(mat);
row_end = hypre_ParCSRMatrixLastRowIndex(mat) + 1;
#else
row_end = hypre_ParCSRMatrixRowStarts(mat)[ my_id + 1 ];
row_start = hypre_ParCSRMatrixRowStarts(mat)[ my_id ];
#endif
if (row < row_start || row >= row_end) return(-1);
/* if buffer is not allocated and some information is requested,
allocate buffer */
if (!hypre_ParCSRMatrixRowvalues(mat) && ( col_ind || values ))
{
/*
allocate enough space to hold information from the longest row.
*/
HYPRE_Int max = 1,tmp;
HYPRE_Int i;
HYPRE_Int m = row_end-row_start;
for ( i=0; i<m; i++ ) {
tmp = hypre_CSRMatrixI(Aa)[i+1] - hypre_CSRMatrixI(Aa)[i] +
hypre_CSRMatrixI(Ba)[i+1] - hypre_CSRMatrixI(Ba)[i];
if (max < tmp) { max = tmp; }
}
hypre_ParCSRMatrixRowvalues(mat) = (HYPRE_Complex *) hypre_CTAlloc
( HYPRE_Complex, max );
hypre_ParCSRMatrixRowindices(mat) = (HYPRE_Int *) hypre_CTAlloc
( HYPRE_Int, max );
}
/* Copy from dual sequential matrices into buffer */
{
HYPRE_Complex *vworkA, *vworkB, *v_p;
HYPRE_Int i, *cworkA, *cworkB;
HYPRE_Int cstart = hypre_ParCSRMatrixFirstColDiag(mat);
HYPRE_Int nztot, nzA, nzB, lrow=row-row_start;
HYPRE_Int *cmap, *idx_p;
nzA = hypre_CSRMatrixI(Aa)[lrow+1]-hypre_CSRMatrixI(Aa)[lrow];
cworkA = &( hypre_CSRMatrixJ(Aa)[ hypre_CSRMatrixI(Aa)[lrow] ] );
vworkA = &( hypre_CSRMatrixData(Aa)[ hypre_CSRMatrixI(Aa)[lrow] ] );
nzB = hypre_CSRMatrixI(Ba)[lrow+1]-hypre_CSRMatrixI(Ba)[lrow];
cworkB = &( hypre_CSRMatrixJ(Ba)[ hypre_CSRMatrixI(Ba)[lrow] ] );
vworkB = &( hypre_CSRMatrixData(Ba)[ hypre_CSRMatrixI(Ba)[lrow] ] );
nztot = nzA + nzB;
cmap = hypre_ParCSRMatrixColMapOffd(mat);
if (values || col_ind) {
if (nztot) {
/* Sort by increasing column numbers, assuming A and B already sorted */
HYPRE_Int imark = -1;
if (values) {
*values = v_p = hypre_ParCSRMatrixRowvalues(mat);
for ( i=0; i<nzB; i++ ) {
if (cmap[cworkB[i]] < cstart) v_p[i] = vworkB[i];
else break;
}
imark = i;
for ( i=0; i<nzA; i++ ) v_p[imark+i] = vworkA[i];
for ( i=imark; i<nzB; i++ ) v_p[nzA+i] = vworkB[i];
}
if (col_ind) {
*col_ind = idx_p = hypre_ParCSRMatrixRowindices(mat);
if (imark > -1) {
for ( i=0; i<imark; i++ ) {
idx_p[i] = cmap[cworkB[i]];
}
} else {
for ( i=0; i<nzB; i++ ) {
if (cmap[cworkB[i]] < cstart) idx_p[i] = cmap[cworkB[i]];
else break;
}
imark = i;
}
for ( i=0; i<nzA; i++ ) idx_p[imark+i] = cstart + cworkA[i];
for ( i=imark; i<nzB; i++ ) idx_p[nzA+i] = cmap[cworkB[i]];
}
}
else {
if (col_ind) *col_ind = 0;
if (values) *values = 0;
}
}
*size = nztot;
} /* End of copy */
return hypre_error_flag;
}
/*--------------------------------------------------------------------------
* hypre_ParCSRMatrixRestoreRow
*--------------------------------------------------------------------------*/
HYPRE_Int
hypre_ParCSRMatrixRestoreRow( hypre_ParCSRMatrix *matrix,
HYPRE_Int row,
HYPRE_Int *size,
HYPRE_Int **col_ind,
HYPRE_Complex **values )
{
if (!hypre_ParCSRMatrixGetrowactive(matrix))
{
hypre_error(HYPRE_ERROR_GENERIC);
return hypre_error_flag;
}
hypre_ParCSRMatrixGetrowactive(matrix)=0;
return hypre_error_flag;
}
/*--------------------------------------------------------------------------
* hypre_CSRMatrixToParCSRMatrix:
* generates a ParCSRMatrix distributed across the processors in comm
* from a CSRMatrix on proc 0 .
*
* This shouldn't be used with the HYPRE_NO_GLOBAL_PARTITON option
*
*--------------------------------------------------------------------------*/
hypre_ParCSRMatrix *
hypre_CSRMatrixToParCSRMatrix( MPI_Comm comm,
hypre_CSRMatrix *A,
HYPRE_Int *row_starts,
HYPRE_Int *col_starts )
{
HYPRE_Int *global_data;
HYPRE_Int global_size;
HYPRE_Int global_num_rows;
HYPRE_Int global_num_cols;
HYPRE_Int *local_num_rows;
HYPRE_Int num_procs, my_id;
HYPRE_Int *local_num_nonzeros=NULL;
HYPRE_Int num_nonzeros;
HYPRE_Complex *a_data;
HYPRE_Int *a_i;
HYPRE_Int *a_j;
hypre_CSRMatrix *local_A;
hypre_MPI_Request *requests;
hypre_MPI_Status *status, status0;
hypre_MPI_Datatype *csr_matrix_datatypes;
hypre_ParCSRMatrix *par_matrix;
HYPRE_Int first_col_diag;
HYPRE_Int last_col_diag;
HYPRE_Int i, j, ind;
hypre_MPI_Comm_rank(comm, &my_id);
hypre_MPI_Comm_size(comm, &num_procs);
global_data = hypre_CTAlloc(HYPRE_Int, 2*num_procs+6);
if (my_id == 0)
{
global_size = 3;
if (row_starts)
{
if (col_starts)
{
if (col_starts != row_starts)
{
/* contains code for what to expect,
if 0: row_starts = col_starts, only row_starts given
if 1: only row_starts given, col_starts = NULL
if 2: both row_starts and col_starts given
if 3: only col_starts given, row_starts = NULL */
global_data[3] = 2;
global_size = 2*num_procs+6;
for (i=0; i < num_procs+1; i++)
global_data[i+4] = row_starts[i];
for (i=0; i < num_procs+1; i++)
global_data[i+num_procs+5] = col_starts[i];
}
else
{
global_data[3] = 0;
global_size = num_procs+5;
for (i=0; i < num_procs+1; i++)
global_data[i+4] = row_starts[i];
}
}
else
{
global_data[3] = 1;
global_size = num_procs+5;
for (i=0; i < num_procs+1; i++)
global_data[i+4] = row_starts[i];
}
}
else
{
if (col_starts)
{
global_data[3] = 3;
global_size = num_procs+5;
for (i=0; i < num_procs+1; i++)
global_data[i+4] = col_starts[i];
}
}
global_data[0] = hypre_CSRMatrixNumRows(A);
global_data[1] = hypre_CSRMatrixNumCols(A);
global_data[2] = global_size;
a_data = hypre_CSRMatrixData(A);
a_i = hypre_CSRMatrixI(A);
a_j = hypre_CSRMatrixJ(A);
}
hypre_MPI_Bcast(global_data,3,HYPRE_MPI_INT,0,comm);
global_num_rows = global_data[0];
global_num_cols = global_data[1];
global_size = global_data[2];
if (global_size > 3)
{
hypre_MPI_Bcast(&global_data[3],global_size-3,HYPRE_MPI_INT,0,comm);
if (my_id > 0)
{
if (global_data[3] < 3)
{
row_starts = hypre_CTAlloc(HYPRE_Int, num_procs+1);
for (i=0; i< num_procs+1; i++)
{
row_starts[i] = global_data[i+4];
}
if (global_data[3] == 0)
col_starts = row_starts;
if (global_data[3] == 2)
{
col_starts = hypre_CTAlloc(HYPRE_Int, num_procs+1);
for (i=0; i < num_procs+1; i++)
{
col_starts[i] = global_data[i+num_procs+5];
}
}
}
else
{
col_starts = hypre_CTAlloc(HYPRE_Int, num_procs+1);
for (i=0; i< num_procs+1; i++)
{
col_starts[i] = global_data[i+4];
}
}
}
}
hypre_TFree(global_data);
local_num_rows = hypre_CTAlloc(HYPRE_Int, num_procs);
csr_matrix_datatypes = hypre_CTAlloc(hypre_MPI_Datatype, num_procs);
par_matrix = hypre_ParCSRMatrixCreate(
comm, global_num_rows, global_num_cols,row_starts,col_starts,0,0,0);
row_starts = hypre_ParCSRMatrixRowStarts(par_matrix);
col_starts = hypre_ParCSRMatrixColStarts(par_matrix);
for (i=0; i < num_procs; i++)
local_num_rows[i] = row_starts[i+1] - row_starts[i];
if (my_id == 0)
{
local_num_nonzeros = hypre_CTAlloc(HYPRE_Int, num_procs);
for (i=0; i < num_procs-1; i++)
local_num_nonzeros[i] = a_i[row_starts[i+1]]
- a_i[row_starts[i]];
local_num_nonzeros[num_procs-1] = a_i[global_num_rows]
- a_i[row_starts[num_procs-1]];
}
hypre_MPI_Scatter(local_num_nonzeros,1,HYPRE_MPI_INT,&num_nonzeros,1,
HYPRE_MPI_INT,0,comm);
if (my_id == 0) num_nonzeros = local_num_nonzeros[0];
local_A = hypre_CSRMatrixCreate(local_num_rows[my_id], global_num_cols,
num_nonzeros);
if (my_id == 0)
{
requests = hypre_CTAlloc (hypre_MPI_Request, num_procs-1);
status = hypre_CTAlloc(hypre_MPI_Status, num_procs-1);
j=0;
for (i=1; i < num_procs; i++)
{
ind = a_i[row_starts[i]];
hypre_BuildCSRMatrixMPIDataType(local_num_nonzeros[i],
local_num_rows[i],
&a_data[ind],
&a_i[row_starts[i]],
&a_j[ind],
&csr_matrix_datatypes[i]);
hypre_MPI_Isend(hypre_MPI_BOTTOM, 1, csr_matrix_datatypes[i], i, 0, comm,
&requests[j++]);
hypre_MPI_Type_free(&csr_matrix_datatypes[i]);
}
hypre_CSRMatrixData(local_A) = a_data;
hypre_CSRMatrixI(local_A) = a_i;
hypre_CSRMatrixJ(local_A) = a_j;
hypre_CSRMatrixOwnsData(local_A) = 0;
hypre_MPI_Waitall(num_procs-1,requests,status);
hypre_TFree(requests);
hypre_TFree(status);
hypre_TFree(local_num_nonzeros);
}
else
{
hypre_CSRMatrixInitialize(local_A);
hypre_BuildCSRMatrixMPIDataType(num_nonzeros,
local_num_rows[my_id],
hypre_CSRMatrixData(local_A),
hypre_CSRMatrixI(local_A),
hypre_CSRMatrixJ(local_A),
csr_matrix_datatypes);
hypre_MPI_Recv(hypre_MPI_BOTTOM,1,csr_matrix_datatypes[0],0,0,comm,&status0);
hypre_MPI_Type_free(csr_matrix_datatypes);
}
first_col_diag = col_starts[my_id];
last_col_diag = col_starts[my_id+1]-1;
GenerateDiagAndOffd(local_A, par_matrix, first_col_diag, last_col_diag);
/* set pointers back to NULL before destroying */
if (my_id == 0)
{
hypre_CSRMatrixData(local_A) = NULL;
hypre_CSRMatrixI(local_A) = NULL;
hypre_CSRMatrixJ(local_A) = NULL;
}
hypre_CSRMatrixDestroy(local_A);
hypre_TFree(local_num_rows);
hypre_TFree(csr_matrix_datatypes);
return par_matrix;
}
HYPRE_Int
GenerateDiagAndOffd(hypre_CSRMatrix *A,
hypre_ParCSRMatrix *matrix,
HYPRE_Int first_col_diag,
HYPRE_Int last_col_diag)
{
HYPRE_Int i, j;
HYPRE_Int jo, jd;
HYPRE_Int num_rows = hypre_CSRMatrixNumRows(A);
HYPRE_Int num_cols = hypre_CSRMatrixNumCols(A);
HYPRE_Complex *a_data = hypre_CSRMatrixData(A);
HYPRE_Int *a_i = hypre_CSRMatrixI(A);
HYPRE_Int *a_j = hypre_CSRMatrixJ(A);
hypre_CSRMatrix *diag = hypre_ParCSRMatrixDiag(matrix);
hypre_CSRMatrix *offd = hypre_ParCSRMatrixOffd(matrix);
HYPRE_Int *col_map_offd;
HYPRE_Complex *diag_data, *offd_data;
HYPRE_Int *diag_i, *offd_i;
HYPRE_Int *diag_j, *offd_j;
HYPRE_Int *marker;
HYPRE_Int num_cols_diag, num_cols_offd;
HYPRE_Int first_elmt = a_i[0];
HYPRE_Int num_nonzeros = a_i[num_rows]-first_elmt;
HYPRE_Int counter;
num_cols_diag = last_col_diag - first_col_diag +1;
num_cols_offd = 0;
if (num_cols - num_cols_diag)
{
hypre_CSRMatrixInitialize(diag);
diag_i = hypre_CSRMatrixI(diag);
hypre_CSRMatrixInitialize(offd);
offd_i = hypre_CSRMatrixI(offd);
marker = hypre_CTAlloc(HYPRE_Int,num_cols);
for (i=0; i < num_cols; i++)
marker[i] = 0;
jo = 0;
jd = 0;
for (i=0; i < num_rows; i++)
{
offd_i[i] = jo;
diag_i[i] = jd;
for (j=a_i[i]-first_elmt; j < a_i[i+1]-first_elmt; j++)
if (a_j[j] < first_col_diag || a_j[j] > last_col_diag)
{
if (!marker[a_j[j]])
{
marker[a_j[j]] = 1;
num_cols_offd++;
}
jo++;
}
else
{
jd++;
}
}
offd_i[num_rows] = jo;
diag_i[num_rows] = jd;
hypre_ParCSRMatrixColMapOffd(matrix) = hypre_CTAlloc(HYPRE_Int,num_cols_offd);
col_map_offd = hypre_ParCSRMatrixColMapOffd(matrix);
counter = 0;
for (i=0; i < num_cols; i++)
if (marker[i])
{
col_map_offd[counter] = i;
marker[i] = counter;
counter++;
}
hypre_CSRMatrixNumNonzeros(diag) = jd;
hypre_CSRMatrixInitialize(diag);
diag_data = hypre_CSRMatrixData(diag);
diag_j = hypre_CSRMatrixJ(diag);
hypre_CSRMatrixNumNonzeros(offd) = jo;
hypre_CSRMatrixNumCols(offd) = num_cols_offd;
hypre_CSRMatrixInitialize(offd);
offd_data = hypre_CSRMatrixData(offd);
offd_j = hypre_CSRMatrixJ(offd);
jo = 0;
jd = 0;
for (i=0; i < num_rows; i++)
{
for (j=a_i[i]-first_elmt; j < a_i[i+1]-first_elmt; j++)
if (a_j[j] < first_col_diag || a_j[j] > last_col_diag)
{
offd_data[jo] = a_data[j];
offd_j[jo++] = marker[a_j[j]];
}
else
{
diag_data[jd] = a_data[j];
diag_j[jd++] = a_j[j]-first_col_diag;
}
}
hypre_TFree(marker);
}
else
{
hypre_CSRMatrixNumNonzeros(diag) = num_nonzeros;
hypre_CSRMatrixInitialize(diag);
diag_data = hypre_CSRMatrixData(diag);
diag_i = hypre_CSRMatrixI(diag);
diag_j = hypre_CSRMatrixJ(diag);
for (i=0; i < num_nonzeros; i++)
{
diag_data[i] = a_data[i];
diag_j[i] = a_j[i];
}
offd_i = hypre_CTAlloc(HYPRE_Int, num_rows+1);
for (i=0; i < num_rows+1; i++)
{
diag_i[i] = a_i[i];
offd_i[i] = 0;
}
hypre_CSRMatrixNumCols(offd) = 0;
hypre_CSRMatrixI(offd) = offd_i;
}
return hypre_error_flag;
}
hypre_CSRMatrix *
hypre_MergeDiagAndOffd(hypre_ParCSRMatrix *par_matrix)
{
hypre_CSRMatrix *diag = hypre_ParCSRMatrixDiag(par_matrix);
hypre_CSRMatrix *offd = hypre_ParCSRMatrixOffd(par_matrix);
hypre_CSRMatrix *matrix;
HYPRE_Int num_cols = hypre_ParCSRMatrixGlobalNumCols(par_matrix);
HYPRE_Int first_col_diag = hypre_ParCSRMatrixFirstColDiag(par_matrix);
HYPRE_Int *col_map_offd = hypre_ParCSRMatrixColMapOffd(par_matrix);
HYPRE_Int num_rows = hypre_CSRMatrixNumRows(diag);
HYPRE_Int *diag_i = hypre_CSRMatrixI(diag);
HYPRE_Int *diag_j = hypre_CSRMatrixJ(diag);
HYPRE_Complex *diag_data = hypre_CSRMatrixData(diag);
HYPRE_Int *offd_i = hypre_CSRMatrixI(offd);
HYPRE_Int *offd_j = hypre_CSRMatrixJ(offd);
HYPRE_Complex *offd_data = hypre_CSRMatrixData(offd);
HYPRE_Int *matrix_i;
HYPRE_Int *matrix_j;
HYPRE_Complex *matrix_data;
HYPRE_Int num_nonzeros, i, j;
HYPRE_Int count;
HYPRE_Int size, rest, num_threads, ii;
num_nonzeros = diag_i[num_rows] + offd_i[num_rows];
matrix = hypre_CSRMatrixCreate(num_rows,num_cols,num_nonzeros);
hypre_CSRMatrixInitialize(matrix);
matrix_i = hypre_CSRMatrixI(matrix);
matrix_j = hypre_CSRMatrixJ(matrix);
matrix_data = hypre_CSRMatrixData(matrix);
num_threads = hypre_NumThreads();
size = num_rows/num_threads;
rest = num_rows - size*num_threads;
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel for private(ii, i, j, count) HYPRE_SMP_SCHEDULE
#endif
for (ii=0; ii < num_threads; ii++)
{
HYPRE_Int ns, ne;
if (ii < rest)
{
ns = ii*size+ii;
ne = (ii+1)*size+ii+1;
}
else
{
ns = ii*size+rest;
ne = (ii+1)*size+rest;
}
count = diag_i[ns]+offd_i[ns];;
for (i=ns; i < ne; i++)
{
matrix_i[i] = count;
for (j=diag_i[i]; j < diag_i[i+1]; j++)
{
matrix_data[count] = diag_data[j];
matrix_j[count++] = diag_j[j]+first_col_diag;
}
for (j=offd_i[i]; j < offd_i[i+1]; j++)
{
matrix_data[count] = offd_data[j];
matrix_j[count++] = col_map_offd[offd_j[j]];
}
}
} /* end parallel region */
matrix_i[num_rows] = num_nonzeros;
return matrix;
}
/*--------------------------------------------------------------------------
* hypre_ParCSRMatrixToCSRMatrixAll:
* generates a CSRMatrix from a ParCSRMatrix on all processors that have
* parts of the ParCSRMatrix
*--------------------------------------------------------------------------*/
hypre_CSRMatrix *
hypre_ParCSRMatrixToCSRMatrixAll(hypre_ParCSRMatrix *par_matrix)
{
MPI_Comm comm = hypre_ParCSRMatrixComm(par_matrix);
hypre_CSRMatrix *matrix;
hypre_CSRMatrix *local_matrix;
HYPRE_Int num_rows = hypre_ParCSRMatrixGlobalNumRows(par_matrix);
HYPRE_Int num_cols = hypre_ParCSRMatrixGlobalNumCols(par_matrix);
#ifndef HYPRE_NO_GLOBAL_PARTITION
HYPRE_Int *row_starts = hypre_ParCSRMatrixRowStarts(par_matrix);
#endif
HYPRE_Int *matrix_i;
HYPRE_Int *matrix_j;
HYPRE_Complex *matrix_data;
HYPRE_Int *local_matrix_i;
HYPRE_Int *local_matrix_j;
HYPRE_Complex *local_matrix_data;
HYPRE_Int i, j;
HYPRE_Int local_num_rows;
HYPRE_Int local_num_nonzeros;
HYPRE_Int num_nonzeros;
HYPRE_Int num_data;
HYPRE_Int num_requests;
HYPRE_Int vec_len, offset;
HYPRE_Int start_index;
HYPRE_Int proc_id;
HYPRE_Int num_procs, my_id;
HYPRE_Int num_types;
HYPRE_Int *used_procs;
hypre_MPI_Request *requests;
hypre_MPI_Status *status;
#ifdef HYPRE_NO_GLOBAL_PARTITION
HYPRE_Int *new_vec_starts;
HYPRE_Int num_contacts;
HYPRE_Int contact_proc_list[1];
HYPRE_Int contact_send_buf[1];
HYPRE_Int contact_send_buf_starts[2];
HYPRE_Int max_response_size;
HYPRE_Int *response_recv_buf=NULL;
HYPRE_Int *response_recv_buf_starts = NULL;
hypre_DataExchangeResponse response_obj;
hypre_ProcListElements send_proc_obj;
HYPRE_Int *send_info = NULL;
hypre_MPI_Status status1;
HYPRE_Int count, tag1 = 11112, tag2 = 22223, tag3 = 33334;
HYPRE_Int start;
#endif
hypre_MPI_Comm_size(comm, &num_procs);
hypre_MPI_Comm_rank(comm, &my_id);
#ifdef HYPRE_NO_GLOBAL_PARTITION
local_num_rows = hypre_ParCSRMatrixLastRowIndex(par_matrix) -
hypre_ParCSRMatrixFirstRowIndex(par_matrix) + 1;
local_matrix = hypre_MergeDiagAndOffd(par_matrix); /* creates matrix */
local_matrix_i = hypre_CSRMatrixI(local_matrix);
local_matrix_j = hypre_CSRMatrixJ(local_matrix);
local_matrix_data = hypre_CSRMatrixData(local_matrix);
/* determine procs that have vector data and store their ids in used_procs */
/* we need to do an exchange data for this. If I own row then I will contact
processor 0 with the endpoint of my local range */
if (local_num_rows > 0)
{
num_contacts = 1;
contact_proc_list[0] = 0;
contact_send_buf[0] = hypre_ParCSRMatrixLastRowIndex(par_matrix);
contact_send_buf_starts[0] = 0;
contact_send_buf_starts[1] = 1;
}
else
{
num_contacts = 0;
contact_send_buf_starts[0] = 0;
contact_send_buf_starts[1] = 0;
}
/*build the response object*/
/*send_proc_obj will be for saving info from contacts */
send_proc_obj.length = 0;
send_proc_obj.storage_length = 10;
send_proc_obj.id = hypre_CTAlloc(HYPRE_Int, send_proc_obj.storage_length);
send_proc_obj.vec_starts =
hypre_CTAlloc(HYPRE_Int, send_proc_obj.storage_length + 1);
send_proc_obj.vec_starts[0] = 0;
send_proc_obj.element_storage_length = 10;
send_proc_obj.elements =
hypre_CTAlloc(HYPRE_Int, send_proc_obj.element_storage_length);
max_response_size = 0; /* each response is null */
response_obj.fill_response = hypre_FillResponseParToCSRMatrix;
response_obj.data1 = NULL;
response_obj.data2 = &send_proc_obj; /*this is where we keep info from contacts*/
hypre_DataExchangeList(num_contacts,
contact_proc_list, contact_send_buf,
contact_send_buf_starts, sizeof(HYPRE_Int),
sizeof(HYPRE_Int), &response_obj,
max_response_size, 1,
comm, (void**) &response_recv_buf,
&response_recv_buf_starts);
/* now processor 0 should have a list of ranges for processors that have rows -
these are in send_proc_obj - it needs to create the new list of processors
and also an array of vec starts - and send to those who own row*/
if (my_id)
{
if (local_num_rows)
{
/* look for a message from processor 0 */
hypre_MPI_Probe(0, tag1, comm, &status1);
hypre_MPI_Get_count(&status1, HYPRE_MPI_INT, &count);
send_info = hypre_CTAlloc(HYPRE_Int, count);
hypre_MPI_Recv(send_info, count, HYPRE_MPI_INT, 0, tag1, comm, &status1);
/* now unpack */
num_types = send_info[0];
used_procs = hypre_CTAlloc(HYPRE_Int, num_types);
new_vec_starts = hypre_CTAlloc(HYPRE_Int, num_types+1);
for (i=1; i<= num_types; i++)
{
used_procs[i-1] = send_info[i];
}
for (i=num_types+1; i< count; i++)
{
new_vec_starts[i-num_types-1] = send_info[i] ;
}
}
else /* clean up and exit */
{
hypre_TFree(send_proc_obj.vec_starts);
hypre_TFree(send_proc_obj.id);
hypre_TFree(send_proc_obj.elements);
if(response_recv_buf) hypre_TFree(response_recv_buf);
if(response_recv_buf_starts) hypre_TFree(response_recv_buf_starts);
if (hypre_CSRMatrixOwnsData(local_matrix))
hypre_CSRMatrixDestroy(local_matrix);
else
hypre_TFree(local_matrix);
return NULL;
}
}
else /* my_id ==0 */
{
num_types = send_proc_obj.length;
used_procs = hypre_CTAlloc(HYPRE_Int, num_types);
new_vec_starts = hypre_CTAlloc(HYPRE_Int, num_types+1);
new_vec_starts[0] = 0;
for (i=0; i< num_types; i++)
{
used_procs[i] = send_proc_obj.id[i];
new_vec_starts[i+1] = send_proc_obj.elements[i]+1;
}
hypre_qsort0(used_procs, 0, num_types-1);
hypre_qsort0(new_vec_starts, 0, num_types);
/*now we need to put into an array to send */
count = 2*num_types+2;
send_info = hypre_CTAlloc(HYPRE_Int, count);
send_info[0] = num_types;
for (i=1; i<= num_types; i++)
{
send_info[i] = used_procs[i-1];
}
for (i=num_types+1; i< count; i++)
{
send_info[i] = new_vec_starts[i-num_types-1];
}
requests = hypre_CTAlloc(hypre_MPI_Request, num_types);
status = hypre_CTAlloc(hypre_MPI_Status, num_types);
/* don't send to myself - these are sorted so my id would be first*/
start = 0;
if (num_types && used_procs[0] == 0)
{
start = 1;
}
for (i=start; i < num_types; i++)
{
hypre_MPI_Isend(send_info, count, HYPRE_MPI_INT, used_procs[i], tag1,
comm, &requests[i-start]);
}
hypre_MPI_Waitall(num_types-start, requests, status);
hypre_TFree(status);
hypre_TFree(requests);
}
/* clean up */
hypre_TFree(send_proc_obj.vec_starts);
hypre_TFree(send_proc_obj.id);
hypre_TFree(send_proc_obj.elements);
hypre_TFree(send_info);
if(response_recv_buf) hypre_TFree(response_recv_buf);
if(response_recv_buf_starts) hypre_TFree(response_recv_buf_starts);
/* now proc 0 can exit if it has no rows */
if (!local_num_rows)
{
if (hypre_CSRMatrixOwnsData(local_matrix))
hypre_CSRMatrixDestroy(local_matrix);
else
hypre_TFree(local_matrix);
hypre_TFree(new_vec_starts);
hypre_TFree(used_procs);
return NULL;
}
/* everyone left has rows and knows: new_vec_starts, num_types, and used_procs */
/* this matrix should be rather small */
matrix_i = hypre_CTAlloc(HYPRE_Int, num_rows+1);
num_requests = 4*num_types;
requests = hypre_CTAlloc(hypre_MPI_Request, num_requests);
status = hypre_CTAlloc(hypre_MPI_Status, num_requests);
/* exchange contents of local_matrix_i - here we are sending to ourself also*/
j = 0;
for (i = 0; i < num_types; i++)
{
proc_id = used_procs[i];
vec_len = new_vec_starts[i+1] - new_vec_starts[i];
hypre_MPI_Irecv(&matrix_i[new_vec_starts[i]+1], vec_len, HYPRE_MPI_INT,
proc_id, tag2, comm, &requests[j++]);
}
for (i = 0; i < num_types; i++)
{
proc_id = used_procs[i];
hypre_MPI_Isend(&local_matrix_i[1], local_num_rows, HYPRE_MPI_INT,
proc_id, tag2, comm, &requests[j++]);
}
hypre_MPI_Waitall(j, requests, status);
/* generate matrix_i from received data */
/* global numbering?*/
offset = matrix_i[new_vec_starts[1]];
for (i=1; i < num_types; i++)
{
for (j = new_vec_starts[i]; j < new_vec_starts[i+1]; j++)
matrix_i[j+1] += offset;
offset = matrix_i[new_vec_starts[i+1]];
}
num_nonzeros = matrix_i[num_rows];
matrix = hypre_CSRMatrixCreate(num_rows, num_cols, num_nonzeros);
hypre_CSRMatrixI(matrix) = matrix_i;
hypre_CSRMatrixInitialize(matrix);
matrix_j = hypre_CSRMatrixJ(matrix);
matrix_data = hypre_CSRMatrixData(matrix);
/* generate datatypes for further data exchange and exchange remaining
data, i.e. column info and actual data */
j = 0;
for (i = 0; i < num_types; i++)
{
proc_id = used_procs[i];
start_index = matrix_i[new_vec_starts[i]];
num_data = matrix_i[new_vec_starts[i+1]] - start_index;
hypre_MPI_Irecv(&matrix_data[start_index], num_data, HYPRE_MPI_COMPLEX,
used_procs[i], tag1, comm, &requests[j++]);
hypre_MPI_Irecv(&matrix_j[start_index], num_data, HYPRE_MPI_INT,
used_procs[i], tag3, comm, &requests[j++]);
}
local_num_nonzeros = local_matrix_i[local_num_rows];
for (i=0; i < num_types; i++)
{
hypre_MPI_Isend(local_matrix_data, local_num_nonzeros, HYPRE_MPI_COMPLEX,
used_procs[i], tag1, comm, &requests[j++]);
hypre_MPI_Isend(local_matrix_j, local_num_nonzeros, HYPRE_MPI_INT,
used_procs[i], tag3, comm, &requests[j++]);
}
hypre_MPI_Waitall(num_requests, requests, status);
hypre_TFree(new_vec_starts);
#else
local_num_rows = row_starts[my_id+1] - row_starts[my_id];
/* if my_id contains no data, return NULL */
if (!local_num_rows)
return NULL;
local_matrix = hypre_MergeDiagAndOffd(par_matrix);
local_matrix_i = hypre_CSRMatrixI(local_matrix);
local_matrix_j = hypre_CSRMatrixJ(local_matrix);
local_matrix_data = hypre_CSRMatrixData(local_matrix);
matrix_i = hypre_CTAlloc(HYPRE_Int, num_rows+1);
/* determine procs that have vector data and store their ids in used_procs */
num_types = 0;
for (i=0; i < num_procs; i++)
if (row_starts[i+1]-row_starts[i] && i-my_id)
num_types++;
num_requests = 4*num_types;
used_procs = hypre_CTAlloc(HYPRE_Int, num_types);
j = 0;
for (i=0; i < num_procs; i++)
if (row_starts[i+1]-row_starts[i] && i-my_id)
used_procs[j++] = i;
requests = hypre_CTAlloc(hypre_MPI_Request, num_requests);
status = hypre_CTAlloc(hypre_MPI_Status, num_requests);
/* data_type = hypre_CTAlloc(hypre_MPI_Datatype, num_types+1); */
/* exchange contents of local_matrix_i */
j = 0;
for (i = 0; i < num_types; i++)
{
proc_id = used_procs[i];
vec_len = row_starts[proc_id+1] - row_starts[proc_id];
hypre_MPI_Irecv(&matrix_i[row_starts[proc_id]+1], vec_len, HYPRE_MPI_INT,
proc_id, 0, comm, &requests[j++]);
}
for (i = 0; i < num_types; i++)
{
proc_id = used_procs[i];
hypre_MPI_Isend(&local_matrix_i[1], local_num_rows, HYPRE_MPI_INT,
proc_id, 0, comm, &requests[j++]);
}
vec_len = row_starts[my_id+1] - row_starts[my_id];
for (i=1; i <= vec_len; i++)
matrix_i[row_starts[my_id]+i] = local_matrix_i[i];
hypre_MPI_Waitall(j, requests, status);
/* generate matrix_i from received data */
offset = matrix_i[row_starts[1]];
for (i=1; i < num_procs; i++)
{
for (j = row_starts[i]; j < row_starts[i+1]; j++)
matrix_i[j+1] += offset;
offset = matrix_i[row_starts[i+1]];
}
num_nonzeros = matrix_i[num_rows];
matrix = hypre_CSRMatrixCreate(num_rows, num_cols, num_nonzeros);
hypre_CSRMatrixI(matrix) = matrix_i;
hypre_CSRMatrixInitialize(matrix);
matrix_j = hypre_CSRMatrixJ(matrix);
matrix_data = hypre_CSRMatrixData(matrix);
/* generate datatypes for further data exchange and exchange remaining
data, i.e. column info and actual data */
j = 0;
for (i = 0; i < num_types; i++)
{
proc_id = used_procs[i];
start_index = matrix_i[row_starts[proc_id]];
num_data = matrix_i[row_starts[proc_id+1]] - start_index;
hypre_MPI_Irecv(&matrix_data[start_index], num_data, HYPRE_MPI_COMPLEX,
used_procs[i], 0, comm, &requests[j++]);
hypre_MPI_Irecv(&matrix_j[start_index], num_data, HYPRE_MPI_INT,
used_procs[i], 0, comm, &requests[j++]);
}
local_num_nonzeros = local_matrix_i[local_num_rows];
for (i=0; i < num_types; i++)
{
hypre_MPI_Isend(local_matrix_data, local_num_nonzeros, HYPRE_MPI_COMPLEX,
used_procs[i], 0, comm, &requests[j++]);
hypre_MPI_Isend(local_matrix_j, local_num_nonzeros, HYPRE_MPI_INT,
used_procs[i], 0, comm, &requests[j++]);
}
start_index = matrix_i[row_starts[my_id]];
for (i=0; i < local_num_nonzeros; i++)
{
matrix_j[start_index+i] = local_matrix_j[i];
matrix_data[start_index+i] = local_matrix_data[i];
}
hypre_MPI_Waitall(num_requests, requests, status);
start_index = matrix_i[row_starts[my_id]];
for (i=0; i < local_num_nonzeros; i++)
{
matrix_j[start_index+i] = local_matrix_j[i];
matrix_data[start_index+i] = local_matrix_data[i];
}
hypre_MPI_Waitall(num_requests, requests, status);
#endif
if (hypre_CSRMatrixOwnsData(local_matrix))
hypre_CSRMatrixDestroy(local_matrix);
else
hypre_TFree(local_matrix);
if (num_requests)
{
hypre_TFree(requests);
hypre_TFree(status);
hypre_TFree(used_procs);
}
return matrix;
}
/*--------------------------------------------------------------------------
* hypre_ParCSRMatrixCopy,
* copies B to A,
* if copy_data = 0, only the structure of A is copied to B
* the routine does not check whether the dimensions of A and B are compatible
*--------------------------------------------------------------------------*/
HYPRE_Int
hypre_ParCSRMatrixCopy( hypre_ParCSRMatrix *A,
hypre_ParCSRMatrix *B,
HYPRE_Int copy_data )
{
hypre_CSRMatrix *A_diag;
hypre_CSRMatrix *A_offd;
HYPRE_Int *col_map_offd_A;
hypre_CSRMatrix *B_diag;
hypre_CSRMatrix *B_offd;
HYPRE_Int *col_map_offd_B;
HYPRE_Int num_cols_offd;
HYPRE_Int i;
if (!A)
{
hypre_error_in_arg(1);
return hypre_error_flag;
}
if (!B)
{
hypre_error_in_arg(1);
return hypre_error_flag;
}
A_diag = hypre_ParCSRMatrixDiag(A);
A_offd = hypre_ParCSRMatrixOffd(A);
col_map_offd_A = hypre_ParCSRMatrixColMapOffd(A);
B_diag = hypre_ParCSRMatrixDiag(B);
B_offd = hypre_ParCSRMatrixOffd(B);
col_map_offd_B = hypre_ParCSRMatrixColMapOffd(B);
num_cols_offd = hypre_CSRMatrixNumCols(A_offd);
hypre_CSRMatrixCopy(A_diag, B_diag, copy_data);
hypre_CSRMatrixCopy(A_offd, B_offd, copy_data);
if (num_cols_offd && col_map_offd_B == NULL)
{
col_map_offd_B = hypre_CTAlloc(HYPRE_Int,num_cols_offd);
hypre_ParCSRMatrixColMapOffd(B) = col_map_offd_B;
}
for (i = 0; i < num_cols_offd; i++)
col_map_offd_B[i] = col_map_offd_A[i];
return hypre_error_flag;
}
/*--------------------------------------------------------------------
* hypre_FillResponseParToCSRMatrix
* Fill response function for determining the send processors
* data exchange
*--------------------------------------------------------------------*/
HYPRE_Int
hypre_FillResponseParToCSRMatrix( void *p_recv_contact_buf,
HYPRE_Int contact_size,
HYPRE_Int contact_proc,
void *ro,
MPI_Comm comm,
void **p_send_response_buf,
HYPRE_Int *response_message_size )
{
HYPRE_Int myid;
HYPRE_Int i, index, count, elength;
HYPRE_Int *recv_contact_buf = (HYPRE_Int * ) p_recv_contact_buf;
hypre_DataExchangeResponse *response_obj = (hypre_DataExchangeResponse*)ro;
hypre_ProcListElements *send_proc_obj = (hypre_ProcListElements*)response_obj->data2;
hypre_MPI_Comm_rank(comm, &myid );
/*check to see if we need to allocate more space in send_proc_obj for ids*/
if (send_proc_obj->length == send_proc_obj->storage_length)
{
send_proc_obj->storage_length +=10; /*add space for 10 more processors*/
send_proc_obj->id = hypre_TReAlloc(send_proc_obj->id,HYPRE_Int,
send_proc_obj->storage_length);
send_proc_obj->vec_starts =
hypre_TReAlloc(send_proc_obj->vec_starts,HYPRE_Int,
send_proc_obj->storage_length + 1);
}
/*initialize*/
count = send_proc_obj->length;
index = send_proc_obj->vec_starts[count]; /*this is the number of elements*/
/*send proc*/
send_proc_obj->id[count] = contact_proc;
/*do we need more storage for the elements?*/
if (send_proc_obj->element_storage_length < index + contact_size)
{
elength = hypre_max(contact_size, 10);
elength += index;
send_proc_obj->elements = hypre_TReAlloc(send_proc_obj->elements,
HYPRE_Int, elength);
send_proc_obj->element_storage_length = elength;
}
/*populate send_proc_obj*/
for (i=0; i< contact_size; i++)
{
send_proc_obj->elements[index++] = recv_contact_buf[i];
}
send_proc_obj->vec_starts[count+1] = index;
send_proc_obj->length++;
/*output - no message to return (confirmation) */
*response_message_size = 0;
return hypre_error_flag;
}
/*--------------------------------------------------------------------------
* hypre_ParCSRMatrixCompleteClone
* Creates and returns a new copy of the argument, A.
* Data is not copied, only structural information is reproduced.
* The following variables are not copied because they will be constructed
* later if needed: CommPkg, CommPkgT, rowindices, rowvalues
*--------------------------------------------------------------------------*/
/* This differs from Hypre_ParCSRMatrixClone in parcsr_ls/par_gsmg.c, because
that Clone function makes a matrix with different global parameters. */
hypre_ParCSRMatrix * hypre_ParCSRMatrixCompleteClone( hypre_ParCSRMatrix * A )
{
hypre_ParCSRMatrix * B = hypre_CTAlloc(hypre_ParCSRMatrix, 1);
HYPRE_Int i, ncols_offd;
hypre_ParCSRMatrixComm( B ) = hypre_ParCSRMatrixComm( A );
hypre_ParCSRMatrixGlobalNumRows( B ) = hypre_ParCSRMatrixGlobalNumRows( A );
hypre_ParCSRMatrixGlobalNumCols( B ) = hypre_ParCSRMatrixGlobalNumCols( A );
hypre_ParCSRMatrixFirstRowIndex( B ) = hypre_ParCSRMatrixFirstRowIndex( A );
hypre_ParCSRMatrixFirstColDiag( B ) = hypre_ParCSRMatrixFirstColDiag( A );
hypre_ParCSRMatrixLastRowIndex( B ) = hypre_ParCSRMatrixLastRowIndex( A );
hypre_ParCSRMatrixLastColDiag( B ) = hypre_ParCSRMatrixLastColDiag( A );
hypre_ParCSRMatrixDiag( B ) = hypre_CSRMatrixClone( hypre_ParCSRMatrixDiag( A ) );
hypre_ParCSRMatrixOffd( B ) = hypre_CSRMatrixClone( hypre_ParCSRMatrixOffd( A ) );
hypre_ParCSRMatrixRowStarts( B ) = hypre_ParCSRMatrixRowStarts( A );
hypre_ParCSRMatrixColStarts( B ) = hypre_ParCSRMatrixColStarts( A );
/* note that B doesn't own row_starts & col_starts; this isn't a full copy */
hypre_ParCSRMatrixCommPkg( B ) = NULL;
hypre_ParCSRMatrixCommPkgT( B ) = NULL;
hypre_ParCSRMatrixOwnsData( B ) = 1;
hypre_ParCSRMatrixOwnsRowStarts( B ) = 0;
hypre_ParCSRMatrixOwnsColStarts( B ) = 0;
hypre_ParCSRMatrixNumNonzeros( B ) = hypre_ParCSRMatrixNumNonzeros( A );
hypre_ParCSRMatrixDNumNonzeros( B ) = hypre_ParCSRMatrixNumNonzeros( A );
hypre_ParCSRMatrixRowindices( B ) = NULL;
hypre_ParCSRMatrixRowvalues( B ) = NULL;
hypre_ParCSRMatrixGetrowactive( B ) = 0;
ncols_offd = hypre_CSRMatrixNumCols( hypre_ParCSRMatrixOffd( B ) );
hypre_ParCSRMatrixColMapOffd( B ) = hypre_CTAlloc( HYPRE_Int, ncols_offd );
for ( i=0; i<ncols_offd; ++i )
hypre_ParCSRMatrixColMapOffd( B )[i] = hypre_ParCSRMatrixColMapOffd( A )[i];
return B;
}
/*--------------------------------------------------------------------------
* hypre_ParCSRMatrixUnion
* Creates and returns a new matrix whose elements are the union of A and B.
* Data is not copied, only structural information is created.
* A and B must have the same communicator, numbers and distributions of rows
* and columns (they can differ in which row-column pairs are nonzero, thus
* in which columns are in a offd block)
*--------------------------------------------------------------------------*/
hypre_ParCSRMatrix * hypre_ParCSRMatrixUnion( hypre_ParCSRMatrix * A,
hypre_ParCSRMatrix * B )
{
hypre_ParCSRMatrix * C;
HYPRE_Int * col_map_offd_C = NULL;
HYPRE_Int num_procs, my_id, p;
MPI_Comm comm = hypre_ParCSRMatrixComm( A );
hypre_MPI_Comm_rank(comm,&my_id);
hypre_MPI_Comm_size(comm,&num_procs);
C = hypre_CTAlloc( hypre_ParCSRMatrix, 1 );
hypre_ParCSRMatrixComm( C ) = hypre_ParCSRMatrixComm( A );
hypre_ParCSRMatrixGlobalNumRows( C ) = hypre_ParCSRMatrixGlobalNumRows( A );
hypre_ParCSRMatrixGlobalNumCols( C ) = hypre_ParCSRMatrixGlobalNumCols( A );
hypre_ParCSRMatrixFirstRowIndex( C ) = hypre_ParCSRMatrixFirstRowIndex( A );
hypre_assert( hypre_ParCSRMatrixFirstRowIndex( B )
== hypre_ParCSRMatrixFirstRowIndex( A ) );
hypre_ParCSRMatrixRowStarts( C ) = hypre_ParCSRMatrixRowStarts( A );
hypre_ParCSRMatrixOwnsRowStarts( C ) = 0;
hypre_ParCSRMatrixColStarts( C ) = hypre_ParCSRMatrixColStarts( A );
hypre_ParCSRMatrixOwnsColStarts( C ) = 0;
for ( p=0; p<=num_procs; ++p )
hypre_assert( hypre_ParCSRMatrixColStarts(A)
== hypre_ParCSRMatrixColStarts(B) );
hypre_ParCSRMatrixFirstColDiag( C ) = hypre_ParCSRMatrixFirstColDiag( A );
hypre_ParCSRMatrixLastRowIndex( C ) = hypre_ParCSRMatrixLastRowIndex( A );
hypre_ParCSRMatrixLastColDiag( C ) = hypre_ParCSRMatrixLastColDiag( A );
hypre_ParCSRMatrixDiag( C ) =
hypre_CSRMatrixUnion( hypre_ParCSRMatrixDiag(A), hypre_ParCSRMatrixDiag(B),
0, 0, 0 );
hypre_ParCSRMatrixOffd( C ) =
hypre_CSRMatrixUnion( hypre_ParCSRMatrixOffd(A), hypre_ParCSRMatrixOffd(B),
hypre_ParCSRMatrixColMapOffd(A),
hypre_ParCSRMatrixColMapOffd(B), &col_map_offd_C );
hypre_ParCSRMatrixColMapOffd( C ) = col_map_offd_C;
hypre_ParCSRMatrixCommPkg( C ) = NULL;
hypre_ParCSRMatrixCommPkgT( C ) = NULL;
hypre_ParCSRMatrixOwnsData( C ) = 1;
/* SetNumNonzeros, SetDNumNonzeros are global, need hypre_MPI_Allreduce.
I suspect, but don't know, that other parts of hypre do not assume that
the correct values have been set.
hypre_ParCSRMatrixSetNumNonzeros( C );
hypre_ParCSRMatrixSetDNumNonzeros( C );*/
hypre_ParCSRMatrixNumNonzeros( C ) = 0;
hypre_ParCSRMatrixDNumNonzeros( C ) = 0.0;
hypre_ParCSRMatrixRowindices( C ) = NULL;
hypre_ParCSRMatrixRowvalues( C ) = NULL;
hypre_ParCSRMatrixGetrowactive( C ) = 0;
return C;
}
|
BuildSsdIndex.h | // Copyright (c) Microsoft Corporation. All rights reserved.
// Licensed under the MIT License.
#pragma once
#include <unordered_set>
#include <string>
#include <memory>
#include <vector>
#include <set>
#include <float.h>
#include "inc/SSDServing/IndexBuildManager/CommonDefines.h"
#include "inc/SSDServing/VectorSearch/Options.h"
#include "inc/SSDServing/VectorSearch/SearchDefault.h"
#include "inc/Core/Common/QueryResultSet.h"
#include "inc/Helper/VectorSetReader.h"
#include "inc/SSDServing/VectorSearch/TimeUtils.h"
#include <chrono>
namespace SPTAG {
namespace SSDServing {
namespace VectorSearch {
namespace Local
{
const std::uint16_t c_pageSize = 4096;
struct EdgeCompare
{
bool operator()(const Edge& a, int b) const
{
return a.node < b;
};
bool operator()(int a, const Edge& b) const
{
return a < b.node;
};
bool operator()(const Edge& a, const Edge& b) const
{
if (a.node == b.node)
{
if (a.distance == b.distance)
{
return a.tonode < b.tonode;
}
return a.distance < b.distance;
}
return a.node < b.node;
};
} g_edgeComparer;
struct Selection {
std::string m_tmpfile;
size_t m_totalsize;
size_t m_start;
size_t m_end;
std::vector<Edge> m_selections;
Selection(size_t totalsize, std::string tmpdir) : m_tmpfile(tmpdir + FolderSep + "selection_tmp"), m_totalsize(totalsize), m_start(0), m_end(totalsize) { remove(m_tmpfile.c_str()); m_selections.resize(totalsize); }
void SaveBatch()
{
auto f_out = f_createIO();
if (f_out == nullptr || !f_out->Initialize(m_tmpfile.c_str(), std::ios::out | std::ios::binary | (fileexists(m_tmpfile.c_str())? std::ios::in : 0))) {
LOG(Helper::LogLevel::LL_Error, "Cannot open %s to save selection for batching!\n", m_tmpfile.c_str());
exit(1);
}
if (f_out->WriteBinary(sizeof(Edge) * (m_end - m_start), (const char*)m_selections.data(), sizeof(Edge) * m_start) != sizeof(Edge) * (m_end - m_start)) {
LOG(Helper::LogLevel::LL_Error, "Cannot write to %s!\n", m_tmpfile.c_str());
exit(1);
}
std::vector<Edge> batch_selection;
m_selections.swap(batch_selection);
m_start = m_end = 0;
}
void LoadBatch(size_t start, size_t end)
{
auto f_in = f_createIO();
if (f_in == nullptr || !f_in->Initialize(m_tmpfile.c_str(), std::ios::in | std::ios::binary)) {
LOG(Helper::LogLevel::LL_Error, "Cannot open %s to load selection batch!\n", m_tmpfile.c_str());
exit(1);
}
size_t readsize = end - start;
m_selections.resize(readsize);
if (f_in->ReadBinary(readsize * sizeof(Edge), (char*)m_selections.data(), start * sizeof(Edge)) != readsize * sizeof(Edge)) {
LOG(Helper::LogLevel::LL_Error, "Cannot read from %s! start:%zu size:%zu\n", m_tmpfile.c_str(), start, readsize);
exit(1);
}
m_start = start;
m_end = end;
}
size_t lower_bound(SizeType node)
{
auto ptr = std::lower_bound(m_selections.begin(), m_selections.end(), node, g_edgeComparer);
return m_start + (ptr - m_selections.begin());
}
Edge& operator[](size_t offset)
{
if (offset < m_start || offset >= m_end) {
LOG(Helper::LogLevel::LL_Error, "Error read offset in selections:%zu\n", offset);
}
return m_selections[offset - m_start];
}
};
void LoadHeadVectorIDSet(const std::string& p_filename, std::unordered_set<int>& p_set)
{
if (!p_filename.empty())
{
auto ptr = SPTAG::f_createIO();
if (ptr == nullptr || !ptr->Initialize(p_filename.c_str(), std::ios::binary | std::ios::in)) {
LOG(Helper::LogLevel::LL_Error, "failed open VectorIDTranslate: %s\n", p_filename.c_str());
exit(1);
}
long long vid;
while (ptr->ReadBinary(sizeof(vid), reinterpret_cast<char*>(&vid)) == sizeof(vid))
{
p_set.insert(static_cast<int>(vid));
}
LOG(Helper::LogLevel::LL_Info, "Loaded %u Vector IDs\n", static_cast<uint32_t>(p_set.size()));
}
else
{
LOG(Helper::LogLevel::LL_Error, "Not found VectorIDTranslate!\n");
exit(1);
}
}
void SelectPostingOffset(size_t p_spacePerVector,
const std::vector<int>& p_postingListSizes,
std::unique_ptr<int[]>& p_postPageNum,
std::unique_ptr<std::uint16_t[]>& p_postPageOffset,
std::vector<int>& p_postingOrderInIndex)
{
p_postPageNum.reset(new int[p_postingListSizes.size()]);
p_postPageOffset.reset(new std::uint16_t[p_postingListSizes.size()]);
struct PageModWithID
{
int id;
std::uint16_t rest;
};
struct PageModeWithIDCmp
{
bool operator()(const PageModWithID& a, const PageModWithID& b) const
{
return a.rest == b.rest ? a.id < b.id : a.rest > b.rest;
}
};
std::set<PageModWithID, PageModeWithIDCmp> listRestSize;
p_postingOrderInIndex.clear();
p_postingOrderInIndex.reserve(p_postingListSizes.size());
PageModWithID listInfo;
for (size_t i = 0; i < p_postingListSizes.size(); ++i)
{
if (p_postingListSizes[i] == 0)
{
continue;
}
listInfo.id = static_cast<int>(i);
listInfo.rest = static_cast<std::uint16_t>((p_spacePerVector * p_postingListSizes[i]) % c_pageSize);
listRestSize.insert(listInfo);
}
listInfo.id = -1;
int currPageNum = 0;
std::uint16_t currOffset = 0;
while (!listRestSize.empty())
{
listInfo.rest = c_pageSize - currOffset;
auto iter = listRestSize.lower_bound(listInfo);
if (iter == listRestSize.end())
{
++currPageNum;
currOffset = 0;
}
else
{
p_postPageNum[iter->id] = currPageNum;
p_postPageOffset[iter->id] = currOffset;
p_postingOrderInIndex.push_back(iter->id);
currOffset += iter->rest;
if (currOffset > c_pageSize)
{
LOG(Helper::LogLevel::LL_Error, "Crossing extra pages\n");
exit(1);
}
if (currOffset == c_pageSize)
{
++currPageNum;
currOffset = 0;
}
currPageNum += static_cast<int>((p_spacePerVector * p_postingListSizes[iter->id]) / c_pageSize);
listRestSize.erase(iter);
}
}
LOG(Helper::LogLevel::LL_Info, "TotalPageNumbers: %d, IndexSize: %llu\n", currPageNum, static_cast<uint64_t>(currPageNum)* c_pageSize + currOffset);
}
void OutputSSDIndexFile(const std::string& p_outputFile,
size_t p_spacePerVector,
const std::vector<int>& p_postingListSizes,
Selection& p_postingSelections,
const std::unique_ptr<int[]>& p_postPageNum,
const std::unique_ptr<std::uint16_t[]>& p_postPageOffset,
const std::vector<int>& p_postingOrderInIndex,
std::shared_ptr<VectorSet> p_fullVectors,
size_t p_postingListOffset)
{
LOG(Helper::LogLevel::LL_Info, "Start output...\n");
auto t1 = std::chrono::high_resolution_clock::now();
auto ptr = SPTAG::f_createIO();
int retry = 3;
while (retry > 0 && (ptr == nullptr || !ptr->Initialize(p_outputFile.c_str(), std::ios::binary | std::ios::out)))
{
LOG(Helper::LogLevel::LL_Error, "Failed open file %s\n", p_outputFile.c_str());
retry--;
}
if (ptr == nullptr || !ptr->Initialize(p_outputFile.c_str(), std::ios::binary | std::ios::out)) {
LOG(Helper::LogLevel::LL_Error, "Failed open file %s\n", p_outputFile.c_str());
exit(1);
}
std::uint64_t listOffset = sizeof(int) * 4;
listOffset += (sizeof(int) + sizeof(std::uint16_t) + sizeof(int) + sizeof(std::uint16_t)) * p_postingListSizes.size();
std::unique_ptr<char[]> paddingVals(new char[c_pageSize]);
memset(paddingVals.get(), 0, sizeof(char) * c_pageSize);
std::uint64_t paddingSize = c_pageSize - (listOffset % c_pageSize);
if (paddingSize == c_pageSize)
{
paddingSize = 0;
}
else
{
listOffset += paddingSize;
}
// Number of lists.
int i32Val = static_cast<int>(p_postingListSizes.size());
if (ptr->WriteBinary(sizeof(i32Val), reinterpret_cast<char*>(&i32Val)) != sizeof(i32Val)) {
LOG(Helper::LogLevel::LL_Error, "Failed to write SSDIndex File!");
exit(1);
}
// Number of all documents.
i32Val = static_cast<int>(p_fullVectors->Count());
if (ptr->WriteBinary(sizeof(i32Val), reinterpret_cast<char*>(&i32Val)) != sizeof(i32Val)) {
LOG(Helper::LogLevel::LL_Error, "Failed to write SSDIndex File!");
exit(1);
}
// Bytes of each vector.
i32Val = static_cast<int>(p_fullVectors->Dimension());
if (ptr->WriteBinary(sizeof(i32Val), reinterpret_cast<char*>(&i32Val)) != sizeof(i32Val)) {
LOG(Helper::LogLevel::LL_Error, "Failed to write SSDIndex File!");
exit(1);
}
// Page offset of list content section.
i32Val = static_cast<int>(listOffset / c_pageSize);
if (ptr->WriteBinary(sizeof(i32Val), reinterpret_cast<char*>(&i32Val)) != sizeof(i32Val)) {
LOG(Helper::LogLevel::LL_Error, "Failed to write SSDIndex File!");
exit(1);
}
for (int i = 0; i < p_postingListSizes.size(); ++i)
{
int pageNum = 0;
std::uint16_t pageOffset = 0;
int listEleCount = 0;
std::uint16_t listPageCount = 0;
if (p_postingListSizes[i] > 0)
{
pageNum = p_postPageNum[i];
pageOffset = static_cast<std::uint16_t>(p_postPageOffset[i]);
listEleCount = static_cast<int>(p_postingListSizes[i]);
listPageCount = static_cast<std::uint16_t>((p_spacePerVector * p_postingListSizes[i]) / c_pageSize);
if (0 != ((p_spacePerVector * p_postingListSizes[i]) % c_pageSize))
{
++listPageCount;
}
}
if (ptr->WriteBinary(sizeof(pageNum), reinterpret_cast<char*>(&pageNum)) != sizeof(pageNum)) {
LOG(Helper::LogLevel::LL_Error, "Failed to write SSDIndex File!");
exit(1);
}
if (ptr->WriteBinary(sizeof(pageOffset), reinterpret_cast<char*>(&pageOffset)) != sizeof(pageOffset)) {
LOG(Helper::LogLevel::LL_Error, "Failed to write SSDIndex File!");
exit(1);
}
if (ptr->WriteBinary(sizeof(listEleCount), reinterpret_cast<char*>(&listEleCount)) != sizeof(listEleCount)) {
LOG(Helper::LogLevel::LL_Error, "Failed to write SSDIndex File!");
exit(1);
}
if (ptr->WriteBinary(sizeof(listPageCount), reinterpret_cast<char*>(&listPageCount)) != sizeof(listPageCount)) {
LOG(Helper::LogLevel::LL_Error, "Failed to write SSDIndex File!");
exit(1);
}
}
if (paddingSize > 0)
{
if (ptr->WriteBinary(paddingSize, reinterpret_cast<char*>(paddingVals.get())) != paddingSize) {
LOG(Helper::LogLevel::LL_Error, "Failed to write SSDIndex File!");
exit(1);
}
}
if (static_cast<uint64_t>(ptr->TellP()) != listOffset)
{
LOG(Helper::LogLevel::LL_Info, "List offset not match!\n");
exit(1);
}
LOG(Helper::LogLevel::LL_Info, "SubIndex Size: %llu bytes, %llu MBytes\n", listOffset, listOffset >> 20);
listOffset = 0;
std::uint64_t paddedSize = 0;
for (auto id : p_postingOrderInIndex)
{
std::uint64_t targetOffset = static_cast<uint64_t>(p_postPageNum[id])* c_pageSize + p_postPageOffset[id];
if (targetOffset < listOffset)
{
LOG(Helper::LogLevel::LL_Info, "List offset not match, targetOffset < listOffset!\n");
exit(1);
}
if (targetOffset > listOffset)
{
if (targetOffset - listOffset > c_pageSize)
{
LOG(Helper::LogLevel::LL_Error, "Padding size greater than page size!\n");
exit(1);
}
if (ptr->WriteBinary(targetOffset - listOffset, reinterpret_cast<char*>(paddingVals.get())) != targetOffset - listOffset) {
LOG(Helper::LogLevel::LL_Error, "Failed to write SSDIndex File!");
exit(1);
}
paddedSize += targetOffset - listOffset;
listOffset = targetOffset;
}
std::size_t selectIdx = p_postingSelections.lower_bound(id + (int)p_postingListOffset);
for (int j = 0; j < p_postingListSizes[id]; ++j)
{
if (p_postingSelections[selectIdx].node != id + (int)p_postingListOffset)
{
LOG(Helper::LogLevel::LL_Error, "Selection ID NOT MATCH! node:%d offset:%zu\n", id + (int)p_postingListOffset, selectIdx);
exit(1);
}
i32Val = p_postingSelections[selectIdx++].tonode;
if (ptr->WriteBinary(sizeof(i32Val), reinterpret_cast<char*>(&i32Val)) != sizeof(i32Val)) {
LOG(Helper::LogLevel::LL_Error, "Failed to write SSDIndex File!");
exit(1);
}
if (ptr->WriteBinary(p_fullVectors->PerVectorDataSize(), reinterpret_cast<char*>(p_fullVectors->GetVector(i32Val))) != p_fullVectors->PerVectorDataSize()) {
LOG(Helper::LogLevel::LL_Error, "Failed to write SSDIndex File!");
exit(1);
}
listOffset += p_spacePerVector;
}
}
paddingSize = c_pageSize - (listOffset % c_pageSize);
if (paddingSize == c_pageSize)
{
paddingSize = 0;
}
else
{
listOffset += paddingSize;
paddedSize += paddingSize;
}
if (paddingSize > 0)
{
if (ptr->WriteBinary(paddingSize, reinterpret_cast<char*>(paddingVals.get())) != paddingSize) {
LOG(Helper::LogLevel::LL_Error, "Failed to write SSDIndex File!");
exit(1);
}
}
LOG(Helper::LogLevel::LL_Info, "Padded Size: %llu, final total size: %llu.\n", paddedSize, listOffset);
LOG(Helper::LogLevel::LL_Info, "Output done...\n");
auto t2 = std::chrono::high_resolution_clock::now();
LOG(Helper::LogLevel::LL_Info, "Time to write results:%.2lf sec.\n", ((double)std::chrono::duration_cast<std::chrono::seconds>(t2 - t1).count()) + ((double)std::chrono::duration_cast<std::chrono::milliseconds>(t2 - t1).count()) / 1000);
}
}
template<typename ValueType>
void BuildSsdIndex(Options& p_opts)
{
using namespace Local;
TimeUtils::StopW sw;
std::string outputFile = COMMON_OPTS.m_ssdIndex;
if (outputFile.empty())
{
LOG(Helper::LogLevel::LL_Error, "Output file can't be empty!\n");
exit(1);
}
int numThreads = p_opts.m_iNumberOfThreads;
int candidateNum = p_opts.m_internalResultNum;
std::unordered_set<int> headVectorIDS;
LoadHeadVectorIDSet(COMMON_OPTS.m_headIDFile, headVectorIDS);
std::shared_ptr<Helper::ReaderOptions> vectorOptions(new Helper::ReaderOptions(COMMON_OPTS.m_valueType, COMMON_OPTS.m_dim, COMMON_OPTS.m_vectorType, COMMON_OPTS.m_vectorDelimiter));
auto vectorReader = Helper::VectorSetReader::CreateInstance(vectorOptions);
if (ErrorCode::Success != vectorReader->LoadFile(COMMON_OPTS.m_vectorPath))
{
LOG(Helper::LogLevel::LL_Error, "Failed to read vector file.\n");
exit(1);
}
SizeType fullCount = 0;
size_t vectorInfoSize = 0;
{
auto fullVectors = vectorReader->GetVectorSet();
fullCount = fullVectors->Count();
vectorInfoSize = fullVectors->PerVectorDataSize() + sizeof(int);
}
Selection selections(static_cast<size_t>(fullCount)* p_opts.m_replicaCount, p_opts.m_tmpdir);
LOG(Helper::LogLevel::LL_Info, "Full vector count:%d Edge bytes:%llu selection size:%zu, capacity size:%zu\n", fullCount, sizeof(Edge), selections.m_selections.size(), selections.m_selections.capacity());
std::vector<std::atomic_int> replicaCount(fullCount);
std::vector<std::atomic_int> postingListSize(headVectorIDS.size());
for (auto& pls : postingListSize) pls = 0;
std::unordered_set<int> emptySet;
SizeType batchSize = (fullCount + p_opts.m_batches - 1) / p_opts.m_batches;
if (p_opts.m_batches > 1) selections.SaveBatch();
{
SearchDefault<ValueType> searcher;
LOG(Helper::LogLevel::LL_Info, "Start setup index...\n");
ByteArray myByteArray;
searcher.Setup(p_opts, myByteArray);
LOG(Helper::LogLevel::LL_Info, "Setup index finish, start setup hint...\n");
searcher.SetHint(numThreads, candidateNum, false, p_opts);
TimeUtils::StopW rngw;
LOG(Helper::LogLevel::LL_Info, "Preparation done, start candidate searching.\n");
SizeType sampleSize = p_opts.m_samples;
SizeType sampleK = candidateNum;
float sampleE = 1e-6f;
std::vector<SizeType> samples(sampleSize, 0);
std::vector<SizeType> recalls(sampleSize, 0);
for (int i = 0; i < p_opts.m_batches; i++) {
SizeType start = i * batchSize;
SizeType end = min(start + batchSize, fullCount);
auto fullVectors = vectorReader->GetVectorSet(start, end);
if (COMMON_OPTS.m_distCalcMethod == DistCalcMethod::Cosine) fullVectors->Normalize(p_opts.m_iNumberOfThreads);
if (p_opts.m_batches > 1) {
selections.LoadBatch(static_cast<size_t>(start)* p_opts.m_replicaCount, static_cast<size_t>(end)* p_opts.m_replicaCount);
emptySet.clear();
for (auto vid : headVectorIDS) {
if (vid >= start && vid < end) emptySet.insert(vid - start);
}
}
else {
emptySet = headVectorIDS;
}
int sampleNum = 0;
for (int j = start; j < end && sampleNum < sampleSize; j++)
{
if (headVectorIDS.count(j) == 0) samples[sampleNum++] = j - start;
}
#pragma omp parallel for schedule(dynamic)
for (int j = 0; j < sampleNum; j++)
{
COMMON::QueryResultSet<void> sampleANN(fullVectors->GetVector(samples[j]), sampleK);
searcher.HeadIndex()->SearchIndex(sampleANN);
COMMON::QueryResultSet<void> sampleTruth(fullVectors->GetVector(samples[j]), sampleK);
for (SizeType y = 0; y < searcher.HeadIndex()->GetNumSamples(); y++)
{
float dist = searcher.HeadIndex()->ComputeDistance(sampleTruth.GetTarget(), searcher.HeadIndex()->GetSample(y));
sampleTruth.AddPoint(y, dist);
}
sampleTruth.SortResult();
recalls[j] = 0;
std::vector<bool> visited(sampleK, false);
for (SizeType y = 0; y < sampleK; y++)
{
for (SizeType z = 0; z < sampleK; z++)
{
if (visited[z]) continue;
if (fabs(sampleANN.GetResult(z)->Dist - sampleTruth.GetResult(y)->Dist) < sampleE)
{
recalls[j]++;
visited[z] = true;
break;
}
}
}
}
float acc = 0;
for (int j = 0; j < sampleNum; j++) acc += float(recalls[j]);
acc = acc / sampleNum / sampleK;
LOG(Helper::LogLevel::LL_Info, "Batch %d vector(%d,%d) loaded with %d vectors (%zu) HeadIndex acc @%d:%f.\n", i, start, end, fullVectors->Count(), selections.m_selections.size(), sampleK, acc);
searcher.HeadIndex()->ApproximateRNG(fullVectors, emptySet, candidateNum, selections.m_selections.data(), p_opts.m_replicaCount, numThreads, p_opts.m_gpuSSDNumTrees, p_opts.m_gpuSSDLeafSize, p_opts.m_rngFactor, p_opts.m_numGPUs);
for (SizeType j = start; j < end; j++) {
replicaCount[j] = 0;
size_t vecOffset = j * (size_t)p_opts.m_replicaCount;
if (headVectorIDS.count(j) == 0) {
for (int resNum = 0; resNum < p_opts.m_replicaCount && selections[vecOffset + resNum].node != INT_MAX; resNum++) {
++postingListSize[selections[vecOffset + resNum].node];
selections[vecOffset + resNum].tonode = j;
//selections[vecOffset + resNum].order = (char)resNum;
++replicaCount[j];
}
}
}
if (p_opts.m_batches > 1) selections.SaveBatch();
}
double rngElapsedMinutes = rngw.getElapsedMin();
LOG(Helper::LogLevel::LL_Info, "Searching replicas ended. Search Time: %.2lf mins\n", rngElapsedMinutes);
}
auto t1 = std::chrono::high_resolution_clock::now();
if (p_opts.m_batches > 1) selections.LoadBatch(0, static_cast<size_t>(fullCount)* p_opts.m_replicaCount);
// Sort results either in CPU or GPU
VectorIndex::SortSelections(&selections.m_selections);
auto t2 = std::chrono::high_resolution_clock::now();
LOG(Helper::LogLevel::LL_Info, "Time to sort selections:%.2lf sec.\n", ((double)std::chrono::duration_cast<std::chrono::seconds>(t2 - t1).count()) + ((double)std::chrono::duration_cast<std::chrono::milliseconds>(t2 - t1).count()) / 1000);
t1 = std::chrono::high_resolution_clock::now();
int postingSizeLimit = INT_MAX;
if (p_opts.m_postingPageLimit > 0)
{
postingSizeLimit = static_cast<int>(p_opts.m_postingPageLimit * c_pageSize / vectorInfoSize);
}
LOG(Helper::LogLevel::LL_Info, "Posting size limit: %d\n", postingSizeLimit);
{
std::vector<int> replicaCountDist(p_opts.m_replicaCount + 1, 0);
for (int i = 0; i < replicaCount.size(); ++i)
{
if (headVectorIDS.count(i) > 0)
{
continue;
}
++replicaCountDist[replicaCount[i]];
}
LOG(Helper::LogLevel::LL_Info, "Before Posting Cut:\n");
for (int i = 0; i < replicaCountDist.size(); ++i)
{
LOG(Helper::LogLevel::LL_Info, "Replica Count Dist: %d, %d\n", i, replicaCountDist[i]);
}
}
#pragma omp parallel for schedule(dynamic)
for (int i = 0; i < postingListSize.size(); ++i)
{
if (postingListSize[i] <= postingSizeLimit)
{
continue;
}
std::size_t selectIdx = std::lower_bound(selections.m_selections.begin(), selections.m_selections.end(), i, g_edgeComparer) - selections.m_selections.begin();
/*
int deletenum = postingListSize[i] - postingSizeLimit;
for (char remove = p_opts.m_replicaCount - 1; deletenum > 0 && remove > 0; remove--)
{
for (int dropID = postingListSize[i] - 1; deletenum > 0 && dropID >= 0; --dropID)
{
if (selections.m_selections[selectIdx + dropID].order == remove) {
selections.m_selections[selectIdx + dropID].order = -1;
--replicaCount[selections.m_selections[selectIdx + dropID].tonode];
deletenum--;
}
}
}
for (int iid = 0; iid < postingSizeLimit + deletenum; iid++) {
if (selections.m_selections[selectIdx + iid].order < 0) {
for (int ij = iid + 1; ij < postingListSize[i]; ij++) {
if (selections.m_selections[selectIdx + ij].order >= 0) {
std::swap(selections.m_selections[selectIdx + iid], selections.m_selections[selectIdx + ij]);
break;
}
}
}
}
*/
for (size_t dropID = postingSizeLimit; dropID < postingListSize[i]; ++dropID)
{
int tonode = selections.m_selections[selectIdx + dropID].tonode;
--replicaCount[tonode];
}
postingListSize[i] = postingSizeLimit;
}
if (p_opts.m_outputEmptyReplicaID)
{
std::vector<int> replicaCountDist(p_opts.m_replicaCount + 1, 0);
auto ptr = SPTAG::f_createIO();
if (ptr == nullptr || !ptr->Initialize("EmptyReplicaID.bin", std::ios::binary | std::ios::out)) {
LOG(Helper::LogLevel::LL_Error, "Fail to create EmptyReplicaID.bin!\n");
exit(1);
}
for (int i = 0; i < replicaCount.size(); ++i)
{
if (headVectorIDS.count(i) > 0)
{
continue;
}
++replicaCountDist[replicaCount[i]];
if (replicaCount[i] < 2)
{
long long vid = i;
if (ptr->WriteBinary(sizeof(vid), reinterpret_cast<char*>(&vid)) != sizeof(vid)) {
LOG(Helper::LogLevel::LL_Error, "Failt to write EmptyReplicaID.bin!");
exit(1);
}
}
}
LOG(Helper::LogLevel::LL_Info, "After Posting Cut:\n");
for (int i = 0; i < replicaCountDist.size(); ++i)
{
LOG(Helper::LogLevel::LL_Info, "Replica Count Dist: %d, %d\n", i, replicaCountDist[i]);
}
}
t2 = std::chrono::high_resolution_clock::now();
LOG(SPTAG::Helper::LogLevel::LL_Info, "Time to perform posting cut:%.2lf sec.\n", ((double)std::chrono::duration_cast<std::chrono::seconds>(t2 - t1).count()) + ((double)std::chrono::duration_cast<std::chrono::milliseconds>(t2 - t1).count()) / 1000);
size_t postingFileSize = (postingListSize.size() + COMMON_OPTS.m_ssdIndexFileNum - 1) / COMMON_OPTS.m_ssdIndexFileNum;
std::vector<size_t> selectionsBatchOffset(COMMON_OPTS.m_ssdIndexFileNum + 1, 0);
for (int i = 0; i < COMMON_OPTS.m_ssdIndexFileNum; i++) {
size_t curPostingListEnd = min(postingListSize.size(), (i + 1) * postingFileSize);
selectionsBatchOffset[i + 1] = std::lower_bound(selections.m_selections.begin(), selections.m_selections.end(), (SizeType)curPostingListEnd, g_edgeComparer) - selections.m_selections.begin();
}
if (COMMON_OPTS.m_ssdIndexFileNum > 1) selections.SaveBatch();
auto fullVectors = vectorReader->GetVectorSet();
if (COMMON_OPTS.m_distCalcMethod == DistCalcMethod::Cosine) fullVectors->Normalize(p_opts.m_iNumberOfThreads);
for (int i = 0; i < COMMON_OPTS.m_ssdIndexFileNum; i++) {
size_t curPostingListOffSet = i * postingFileSize;
size_t curPostingListEnd = min(postingListSize.size(), (i + 1) * postingFileSize);
std::vector<int> curPostingListSizes(
postingListSize.begin() + curPostingListOffSet,
postingListSize.begin() + curPostingListEnd);
std::unique_ptr<int[]> postPageNum;
std::unique_ptr<std::uint16_t[]> postPageOffset;
std::vector<int> postingOrderInIndex;
SelectPostingOffset(vectorInfoSize, curPostingListSizes, postPageNum, postPageOffset, postingOrderInIndex);
if (COMMON_OPTS.m_ssdIndexFileNum > 1) selections.LoadBatch(selectionsBatchOffset[i], selectionsBatchOffset[i + 1]);
OutputSSDIndexFile((i == 0)? outputFile : outputFile + "_" + std::to_string(i),
vectorInfoSize,
curPostingListSizes,
selections,
postPageNum,
postPageOffset,
postingOrderInIndex,
fullVectors,
curPostingListOffSet);
}
double elapsedMinutes = sw.getElapsedMin();
LOG(Helper::LogLevel::LL_Info, "Total used time: %.2lf minutes (about %.2lf hours).\n", elapsedMinutes, elapsedMinutes / 60.0);
}
}
}
}
|
bt_single.c | /*--------------------------------------------------------------------
NAS Parallel Benchmarks 2.3 OpenMP C versions - BT
This benchmark is an OpenMP C version of the NPB BT code.
The OpenMP C versions are developed by RWCP and derived from the serial
Fortran versions in "NPB 2.3-serial" developed by NAS.
Permission to use, copy, distribute and modify this software for any
purpose with or without fee is hereby granted.
This software is provided "as is" without express or implied warranty.
Send comments on the OpenMP C versions to pdp-openmp@rwcp.or.jp
Information on OpenMP activities at RWCP is available at:
http://pdplab.trc.rwcp.or.jp/pdperf/Omni/
Information on NAS Parallel Benchmarks 2.3 is available at:
http://www.nas.nasa.gov/NAS/NPB/
--------------------------------------------------------------------*/
/*--------------------------------------------------------------------
Authors: R. Van der Wijngaart
T. Harris
M. Yarrow
OpenMP C version: S. Satoh
--------------------------------------------------------------------*/
//#include "npb-C.h"
/*
NAS Parallel Benchmarks 2.3 OpenMP C Versions
*/
#include <stdio.h>
#include <stdlib.h>
#include <math.h>
#if defined(_OPENMP)
#include <omp.h>
#endif /* _OPENMP */
typedef int boolean;
typedef struct { double real; double imag; } dcomplex;
#define TRUE 1
#define FALSE 0
#define max(a,b) (((a) > (b)) ? (a) : (b))
#define min(a,b) (((a) < (b)) ? (a) : (b))
#define pow2(a) ((a)*(a))
#define get_real(c) c.real
#define get_imag(c) c.imag
#define cadd(c,a,b) (c.real = a.real + b.real, c.imag = a.imag + b.imag)
#define csub(c,a,b) (c.real = a.real - b.real, c.imag = a.imag - b.imag)
#define cmul(c,a,b) (c.real = a.real * b.real - a.imag * b.imag, \
c.imag = a.real * b.imag + a.imag * b.real)
#define crmul(c,a,b) (c.real = a.real * b, c.imag = a.imag * b)
extern double randlc(double *, double);
extern void vranlc(int, double *, double, double *);
extern void timer_clear(int);
extern void timer_start(int);
extern void timer_stop(int);
extern double timer_read(int);
extern void c_print_results(char *name, char cclass, int n1, int n2,
int n3, int niter, int nthreads, double t,
double mops, char *optype, int passed_verification,
char *npbversion, char *compiletime, char *cc,
char *clink, char *c_lib, char *c_inc,
char *cflags, char *clinkflags, char *rand);
/* global variables */
//#include "header.h"
/*--------------------------------------------------------------------
c---------------------------------------------------------------------
c
c header.h
c
c---------------------------------------------------------------------
c-------------------------------------------------------------------*/
/*--------------------------------------------------------------------
c The following include file is generated automatically by the
c "setparams" utility. It defines
c maxcells: the square root of the maximum number of processors
c problem_size: 12, 64, 102, 162 (for class T, A, B, C)
c dt_default: default time step for this problem size if no
c config file
c niter_default: default number of iterations for this problem size
--------------------------------------------------------------------*/
//#include "npbparams.h"
/******************/
/* default values */
/******************/
#ifndef CLASS
#define CLASS 'W'
#endif
#if CLASS == 'S'
#define PROBLEM_SIZE 12
#define NITER_DEFAULT 60
#define DT_DEFAULT 0.010
#endif
#if CLASS == 'W'
#define PROBLEM_SIZE 24
#define NITER_DEFAULT 200
#define DT_DEFAULT 0.0008
#endif
#if CLASS == 'A'
#define PROBLEM_SIZE 64
#define NITER_DEFAULT 200
#define DT_DEFAULT 0.0008
#endif
#if CLASS == 'B'
#define PROBLEM_SIZE 102
#define NITER_DEFAULT 200
#define DT_DEFAULT 0.0003
#endif
#if CLASS == 'C'
#define PROBLEM_SIZE 162
#define NITER_DEFAULT 200
#define DT_DEFAULT 0.0001
#endif
#define CONVERTDOUBLE FALSE
#define COMPILETIME "27 Oct 2014"
#define NPBVERSION "2.3"
#define CS1 "gcc"
#define CS2 "$(CC)"
#define CS3 "(none)"
#define CS4 "-I../common"
#define CS5 "-fopenmp -O2"
#define CS6 "-lm -fopenmp"
#define CS7 "randdp"
//--------end class definition -----------
#define AA 0
#define BB 1
#define CC 2
#define BLOCK_SIZE 5
/* COMMON block: global */
static int grid_points[3]; /* grid_ponts(1:3) */
/* COMMON block: constants */
static double tx1, tx2, tx3, ty1, ty2, ty3, tz1, tz2, tz3;
static double dx1, dx2, dx3, dx4, dx5;
static double dy1, dy2, dy3, dy4, dy5;
static double dz1, dz2, dz3, dz4, dz5;
static double dssp, dt;
static double ce[5][13]; /* ce(5,13) */
static double dxmax, dymax, dzmax;
static double xxcon1, xxcon2, xxcon3, xxcon4, xxcon5;
static double dx1tx1, dx2tx1, dx3tx1, dx4tx1, dx5tx1;
static double yycon1, yycon2, yycon3, yycon4, yycon5;
static double dy1ty1, dy2ty1, dy3ty1, dy4ty1, dy5ty1;
static double zzcon1, zzcon2, zzcon3, zzcon4, zzcon5;
static double dz1tz1, dz2tz1, dz3tz1, dz4tz1, dz5tz1;
static double dnxm1, dnym1, dnzm1, c1c2, c1c5, c3c4, c1345;
static double conz1, c1, c2, c3, c4, c5, c4dssp, c5dssp, dtdssp;
static double dttx1, dttx2, dtty1, dtty2, dttz1, dttz2;
static double c2dttx1, c2dtty1, c2dttz1, comz1, comz4, comz5, comz6;
static double c3c4tx3, c3c4ty3, c3c4tz3, c2iv, con43, con16;
#define IMAX PROBLEM_SIZE
#define JMAX PROBLEM_SIZE
#define KMAX PROBLEM_SIZE
/*
c to improve cache performance, grid dimensions padded by 1
c for even number sizes only.
*/
/* COMMON block: fields */
static double us[IMAX/2*2+1][JMAX/2*2+1][KMAX/2*2+1];
static double vs[IMAX/2*2+1][JMAX/2*2+1][KMAX/2*2+1];
static double ws[IMAX/2*2+1][JMAX/2*2+1][KMAX/2*2+1];
static double qs[IMAX/2*2+1][JMAX/2*2+1][KMAX/2*2+1];
static double rho_i[IMAX/2*2+1][JMAX/2*2+1][KMAX/2*2+1];
static double square[IMAX/2*2+1][JMAX/2*2+1][KMAX/2*2+1];
static double forcing[IMAX/2*2+1][JMAX/2*2+1][KMAX/2*2+1][5+1];
static double u[(IMAX+1)/2*2+1][(JMAX+1)/2*2+1][(KMAX+1)/2*2+1][5];
static double rhs[IMAX/2*2+1][JMAX/2*2+1][KMAX/2*2+1][5];
static double lhs[IMAX/2*2+1][JMAX/2*2+1][KMAX/2*2+1][3][5][5];
/* COMMON block: work_1d */
static double cuf[PROBLEM_SIZE];
static double q[PROBLEM_SIZE];
static double ue[PROBLEM_SIZE][5];
static double buf[PROBLEM_SIZE][5];
//Liao, the program may be wrong!!
#pragma omp threadprivate(cuf, q, ue, buf)
/*
c to improve cache performance, grid dimensions (first two for these
c to arrays) padded by 1 for even number sizes only.
*/
/* COMMON block: work_lhs */
static double fjac[IMAX/2*2+1][JMAX/2*2+1][KMAX-1+1][5][5];
/* fjac(5, 5, 0:IMAX/2*2, 0:JMAX/2*2, 0:KMAX-1) */
static double njac[IMAX/2*2+1][JMAX/2*2+1][KMAX-1+1][5][5];
/* njac(5, 5, 0:IMAX/2*2, 0:JMAX/2*2, 0:KMAX-1) */
static double tmp1, tmp2, tmp3;
/* function declarations */
static void add(void);
static void adi(void);
static void error_norm(double rms[5]);
static void rhs_norm(double rms[5]);
static void exact_rhs(void);
static void exact_solution(double xi, double eta, double zeta,
double dtemp[5]);
static void initialize(void);
static void lhsinit(void);
static void lhsx(void);
static void lhsy(void);
static void lhsz(void);
static void compute_rhs(void);
static void set_constants(void);
static void verify(int no_time_steps, char *cclass, boolean *verified);
static void x_solve(void);
static void x_backsubstitute(void);
static void x_solve_cell(void);
static void matvec_sub(double ablock[5][5], double avec[5], double bvec[5]);
static void matmul_sub(double ablock[5][5], double bblock[5][5],
double cblock[5][5]);
static void binvcrhs(double lhs[5][5], double c[5][5], double r[5]);
static void binvrhs(double lhs[5][5], double r[5]);
static void y_solve(void);
static void y_backsubstitute(void);
static void y_solve_cell(void);
static void z_solve(void);
static void z_backsubstitute(void);
static void z_solve_cell(void);
/*--------------------------------------------------------------------
program BT
c-------------------------------------------------------------------*/
int main(int argc, char **argv) {
int niter, step, n3;
int nthreads = 1;
double navg, mflops;
double tmax;
boolean verified;
char cclass;
FILE *fp;
/*--------------------------------------------------------------------
c Root node reads input file (if it exists) else takes
c defaults from parameters
c-------------------------------------------------------------------*/
printf("\n\n NAS Parallel Benchmarks 2.3 OpenMP C version"
" - BT Benchmark\n\n");
fp = fopen("inputbt.data", "r");
if (fp != NULL) {
printf(" Reading from input file inputbt.data");
fscanf(fp, "%d", &niter);
while (fgetc(fp) != '\n');
fscanf(fp, "%lg", &dt);
while (fgetc(fp) != '\n');
fscanf(fp, "%d%d%d",
&grid_points[0], &grid_points[1], &grid_points[2]);
fclose(fp);
} else {
printf(" No input file inputbt.data. Using compiled defaults\n");
niter = NITER_DEFAULT;
dt = DT_DEFAULT;
grid_points[0] = PROBLEM_SIZE;
grid_points[1] = PROBLEM_SIZE;
grid_points[2] = PROBLEM_SIZE;
}
printf(" Size: %3dx%3dx%3d\n",
grid_points[0], grid_points[1], grid_points[2]);
printf(" Iterations: %3d dt: %10.6f\n", niter, dt);
if (grid_points[0] > IMAX ||
grid_points[1] > JMAX ||
grid_points[2] > KMAX) {
printf(" %dx%dx%d\n", grid_points[0], grid_points[1], grid_points[2]);
printf(" Problem size too big for compiled array sizes\n");
exit(1);
}
set_constants();
#pragma omp parallel
{
initialize();
lhsinit();
exact_rhs();
/*--------------------------------------------------------------------
c do one time step to touch all code, and reinitialize
c-------------------------------------------------------------------*/
adi();
initialize();
} /* end parallel */
timer_clear(1);
timer_start(1);
#pragma omp parallel firstprivate(niter) private(step)
{
for (step = 1; step <= niter; step++) {
if (step%20 == 0 || step == 1) {
#pragma omp master
printf(" Time step %4d\n", step);
}
adi();
}
#if defined(_OPENMP)
#pragma omp master
nthreads = omp_get_num_threads();
#endif /* _OPENMP */
} /* end parallel */
timer_stop(1);
tmax = timer_read(1);
verify(niter, &cclass, &verified);
n3 = grid_points[0]*grid_points[1]*grid_points[2];
navg = (grid_points[0]+grid_points[1]+grid_points[2])/3.0;
if ( fabs(tmax-0.0)>1.0e-5 ) {
//if ( tmax != 0.0 ) {
mflops = 1.0e-6*(double)niter*
(3478.8*(double)n3-17655.7*pow2(navg)+28023.7*navg) / tmax;
} else {
mflops = 0.0;
}
c_print_results("BT", cclass, grid_points[0],
grid_points[1], grid_points[2], niter, nthreads,
tmax, mflops, " floating point",
verified, NPBVERSION,COMPILETIME, CS1, CS2, CS3, CS4, CS5,
CS6, "(none)");
}
/*--------------------------------------------------------------------
c-------------------------------------------------------------------*/
static void add(void) {
/*--------------------------------------------------------------------
c addition of update to the vector u
c-------------------------------------------------------------------*/
int i, j, k, m;
#pragma omp for private(j,k,m)
for (i = 1; i < grid_points[0]-1; i++) {
for (j = 1; j < grid_points[1]-1; j++) {
for (k = 1; k < grid_points[2]-1; k++) {
for (m = 0; m < 5; m++) {
u[i][j][k][m] = u[i][j][k][m] + rhs[i][j][k][m];
}
}
}
}
}
/*--------------------------------------------------------------------
--------------------------------------------------------------------*/
static void adi(void) {
compute_rhs();
x_solve();
y_solve();
z_solve();
add();
}
/*--------------------------------------------------------------------
--------------------------------------------------------------------*/
static void error_norm(double rms[5]) {
/*--------------------------------------------------------------------
c this function computes the norm of the difference between the
c computed solution and the exact solution
c-------------------------------------------------------------------*/
int i, j, k, m, d;
double xi, eta, zeta, u_exact[5], add;
for (m = 0; m < 5; m++) {
rms[m] = 0.0;
}
for (i = 0; i < grid_points[0]; i++) {
xi = (double)i * dnxm1;
for (j = 0; j < grid_points[1]; j++) {
eta = (double)j * dnym1;
for (k = 0; k < grid_points[2]; k++) {
zeta = (double)k * dnzm1;
exact_solution(xi, eta, zeta, u_exact);
for (m = 0; m < 5; m++) {
add = u[i][j][k][m] - u_exact[m];
rms[m] = rms[m] + add*add;
}
}
}
}
for (m = 0; m < 5; m++) {
for (d = 0; d <= 2; d++) {
rms[m] = rms[m] / (double)(grid_points[d]-2);
}
rms[m] = sqrt(rms[m]);
}
}
/*--------------------------------------------------------------------
--------------------------------------------------------------------*/
static void rhs_norm(double rms[5]) {
/*--------------------------------------------------------------------
--------------------------------------------------------------------*/
int i, j, k, d, m;
double add;
for (m = 0; m < 5; m++) {
rms[m] = 0.0;
}
for (i = 1; i < grid_points[0]-1; i++) {
for (j = 1; j < grid_points[1]-1; j++) {
for (k = 1; k < grid_points[2]-1; k++) {
for (m = 0; m < 5; m++) {
add = rhs[i][j][k][m];
rms[m] = rms[m] + add*add;
}
}
}
}
for (m = 0; m < 5; m++) {
for (d = 0; d <= 2; d++) {
rms[m] = rms[m] / (double)(grid_points[d]-2);
}
rms[m] = sqrt(rms[m]);
}
}
/*--------------------------------------------------------------------
--------------------------------------------------------------------*/
static void exact_rhs(void) {
/*--------------------------------------------------------------------
--------------------------------------------------------------------*/
/*--------------------------------------------------------------------
c compute the right hand side based on exact solution
c-------------------------------------------------------------------*/
double dtemp[5], xi, eta, zeta, dtpp;
int m, i, j, k, ip1, im1, jp1, jm1, km1, kp1;
/*--------------------------------------------------------------------
c initialize
c-------------------------------------------------------------------*/
#pragma omp for private(j,k,m)
for (i = 0; i < grid_points[0]; i++) {
for (j = 0; j < grid_points[1]; j++) {
for (k = 0; k < grid_points[2]; k++) {
for (m = 0; m < 5; m++) {
forcing[i][j][k][m] = 0.0;
}
}
}
}
/*--------------------------------------------------------------------
c xi-direction flux differences
c-------------------------------------------------------------------*/
#pragma omp for private(k,i,m)
for (j = 1; j < grid_points[1]-1; j++) {
eta = (double)j * dnym1;
for (k = 1; k < grid_points[2]-1; k++) {
zeta = (double)k * dnzm1;
for (i = 0; i < grid_points[0]; i++) {
xi = (double)i * dnxm1;
exact_solution(xi, eta, zeta, dtemp);
for (m = 0; m < 5; m++) {
ue[i][m] = dtemp[m];
}
dtpp = 1.0 / dtemp[0];
for (m = 1; m <= 4; m++) {
buf[i][m] = dtpp * dtemp[m];
}
cuf[i] = buf[i][1] * buf[i][1];
buf[i][0] = cuf[i] + buf[i][2] * buf[i][2] +
buf[i][3] * buf[i][3];
q[i] = 0.5*(buf[i][1]*ue[i][1] + buf[i][2]*ue[i][2] +
buf[i][3]*ue[i][3]);
}
for (i = 1; i < grid_points[0]-1; i++) {
im1 = i-1;
ip1 = i+1;
forcing[i][j][k][0] = forcing[i][j][k][0] -
tx2*(ue[ip1][1]-ue[im1][1])+
dx1tx1*(ue[ip1][0]-2.0*ue[i][0]+ue[im1][0]);
forcing[i][j][k][1] = forcing[i][j][k][1] -
tx2 * ((ue[ip1][1]*buf[ip1][1]+c2*(ue[ip1][4]-q[ip1]))-
(ue[im1][1]*buf[im1][1]+c2*(ue[im1][4]-q[im1])))+
xxcon1*(buf[ip1][1]-2.0*buf[i][1]+buf[im1][1])+
dx2tx1*( ue[ip1][1]-2.0* ue[i][1]+ ue[im1][1]);
forcing[i][j][k][2] = forcing[i][j][k][2] -
tx2 * (ue[ip1][2]*buf[ip1][1]-ue[im1][2]*buf[im1][1])+
xxcon2*(buf[ip1][2]-2.0*buf[i][2]+buf[im1][2])+
dx3tx1*( ue[ip1][2]-2.0* ue[i][2]+ ue[im1][2]);
forcing[i][j][k][3] = forcing[i][j][k][3] -
tx2*(ue[ip1][3]*buf[ip1][1]-ue[im1][3]*buf[im1][1])+
xxcon2*(buf[ip1][3]-2.0*buf[i][3]+buf[im1][3])+
dx4tx1*( ue[ip1][3]-2.0* ue[i][3]+ ue[im1][3]);
forcing[i][j][k][4] = forcing[i][j][k][4] -
tx2*(buf[ip1][1]*(c1*ue[ip1][4]-c2*q[ip1])-
buf[im1][1]*(c1*ue[im1][4]-c2*q[im1]))+
0.5*xxcon3*(buf[ip1][0]-2.0*buf[i][0]+buf[im1][0])+
xxcon4*(cuf[ip1]-2.0*cuf[i]+cuf[im1])+
xxcon5*(buf[ip1][4]-2.0*buf[i][4]+buf[im1][4])+
dx5tx1*( ue[ip1][4]-2.0* ue[i][4]+ ue[im1][4]);
}
/*--------------------------------------------------------------------
c Fourth-order dissipation
c-------------------------------------------------------------------*/
for (m = 0; m < 5; m++) {
i = 1;
forcing[i][j][k][m] = forcing[i][j][k][m] - dssp *
(5.0*ue[i][m] - 4.0*ue[i+1][m] +ue[i+2][m]);
i = 2;
forcing[i][j][k][m] = forcing[i][j][k][m] - dssp *
(-4.0*ue[i-1][m] + 6.0*ue[i][m] -
4.0*ue[i+1][m] + ue[i+2][m]);
}
for (m = 0; m < 5; m++) {
for (i = 1*3; i <= grid_points[0]-3*1-1; i++) {
forcing[i][j][k][m] = forcing[i][j][k][m] - dssp*
(ue[i-2][m] - 4.0*ue[i-1][m] +
6.0*ue[i][m] - 4.0*ue[i+1][m] + ue[i+2][m]);
}
}
for (m = 0; m < 5; m++) {
i = grid_points[0]-3;
forcing[i][j][k][m] = forcing[i][j][k][m] - dssp *
(ue[i-2][m] - 4.0*ue[i-1][m] +
6.0*ue[i][m] - 4.0*ue[i+1][m]);
i = grid_points[0]-2;
forcing[i][j][k][m] = forcing[i][j][k][m] - dssp *
(ue[i-2][m] - 4.0*ue[i-1][m] + 5.0*ue[i][m]);
}
}
}
/*--------------------------------------------------------------------
c eta-direction flux differences
c-------------------------------------------------------------------*/
#pragma omp for private(k,j,m)
for (i = 1; i < grid_points[0]-1; i++) {
xi = (double)i * dnxm1;
for (k = 1; k < grid_points[2]-1; k++) {
zeta = (double)k * dnzm1;
for (j = 0; j < grid_points[1]; j++) {
eta = (double)j * dnym1;
exact_solution(xi, eta, zeta, dtemp);
for (m = 0; m < 5; m++) {
ue[j][m] = dtemp[m];
}
dtpp = 1.0/dtemp[0];
for (m = 1; m <= 4; m++) {
buf[j][m] = dtpp * dtemp[m];
}
cuf[j] = buf[j][2] * buf[j][2];
buf[j][0] = cuf[j] + buf[j][1] * buf[j][1] +
buf[j][3] * buf[j][3];
q[j] = 0.5*(buf[j][1]*ue[j][1] + buf[j][2]*ue[j][2] +
buf[j][3]*ue[j][3]);
}
for (j = 1; j < grid_points[1]-1; j++) {
jm1 = j-1;
jp1 = j+1;
forcing[i][j][k][0] = forcing[i][j][k][0] -
ty2*( ue[jp1][2]-ue[jm1][2] )+
dy1ty1*(ue[jp1][0]-2.0*ue[j][0]+ue[jm1][0]);
forcing[i][j][k][1] = forcing[i][j][k][1] -
ty2*(ue[jp1][1]*buf[jp1][2]-ue[jm1][1]*buf[jm1][2])+
yycon2*(buf[jp1][1]-2.0*buf[j][1]+buf[jm1][1])+
dy2ty1*( ue[jp1][1]-2.0* ue[j][1]+ ue[jm1][1]);
forcing[i][j][k][2] = forcing[i][j][k][2] -
ty2*((ue[jp1][2]*buf[jp1][2]+c2*(ue[jp1][4]-q[jp1]))-
(ue[jm1][2]*buf[jm1][2]+c2*(ue[jm1][4]-q[jm1])))+
yycon1*(buf[jp1][2]-2.0*buf[j][2]+buf[jm1][2])+
dy3ty1*( ue[jp1][2]-2.0*ue[j][2] +ue[jm1][2]);
forcing[i][j][k][3] = forcing[i][j][k][3] -
ty2*(ue[jp1][3]*buf[jp1][2]-ue[jm1][3]*buf[jm1][2])+
yycon2*(buf[jp1][3]-2.0*buf[j][3]+buf[jm1][3])+
dy4ty1*( ue[jp1][3]-2.0*ue[j][3]+ ue[jm1][3]);
forcing[i][j][k][4] = forcing[i][j][k][4] -
ty2*(buf[jp1][2]*(c1*ue[jp1][4]-c2*q[jp1])-
buf[jm1][2]*(c1*ue[jm1][4]-c2*q[jm1]))+
0.5*yycon3*(buf[jp1][0]-2.0*buf[j][0]+
buf[jm1][0])+
yycon4*(cuf[jp1]-2.0*cuf[j]+cuf[jm1])+
yycon5*(buf[jp1][4]-2.0*buf[j][4]+buf[jm1][4])+
dy5ty1*(ue[jp1][4]-2.0*ue[j][4]+ue[jm1][4]);
}
/*--------------------------------------------------------------------
c Fourth-order dissipation
c-------------------------------------------------------------------*/
for (m = 0; m < 5; m++) {
j = 1;
forcing[i][j][k][m] = forcing[i][j][k][m] - dssp *
(5.0*ue[j][m] - 4.0*ue[j+1][m] +ue[j+2][m]);
j = 2;
forcing[i][j][k][m] = forcing[i][j][k][m] - dssp *
(-4.0*ue[j-1][m] + 6.0*ue[j][m] -
4.0*ue[j+1][m] + ue[j+2][m]);
}
for (m = 0; m < 5; m++) {
for (j = 1*3; j <= grid_points[1]-3*1-1; j++) {
forcing[i][j][k][m] = forcing[i][j][k][m] - dssp*
(ue[j-2][m] - 4.0*ue[j-1][m] +
6.0*ue[j][m] - 4.0*ue[j+1][m] + ue[j+2][m]);
}
}
for (m = 0; m < 5; m++) {
j = grid_points[1]-3;
forcing[i][j][k][m] = forcing[i][j][k][m] - dssp *
(ue[j-2][m] - 4.0*ue[j-1][m] +
6.0*ue[j][m] - 4.0*ue[j+1][m]);
j = grid_points[1]-2;
forcing[i][j][k][m] = forcing[i][j][k][m] - dssp *
(ue[j-2][m] - 4.0*ue[j-1][m] + 5.0*ue[j][m]);
}
}
}
/*--------------------------------------------------------------------
c zeta-direction flux differences
c-------------------------------------------------------------------*/
#pragma omp for private(j,k,m)
for (i = 1; i < grid_points[0]-1; i++) {
xi = (double)i * dnxm1;
for (j = 1; j < grid_points[1]-1; j++) {
eta = (double)j * dnym1;
for (k = 0; k < grid_points[2]; k++) {
zeta = (double)k * dnzm1;
exact_solution(xi, eta, zeta, dtemp);
for (m = 0; m < 5; m++) {
ue[k][m] = dtemp[m];
}
dtpp = 1.0/dtemp[0];
for (m = 1; m <= 4; m++) {
buf[k][m] = dtpp * dtemp[m];
}
cuf[k] = buf[k][3] * buf[k][3];
buf[k][0] = cuf[k] + buf[k][1] * buf[k][1] +
buf[k][2] * buf[k][2];
q[k] = 0.5*(buf[k][1]*ue[k][1] + buf[k][2]*ue[k][2] +
buf[k][3]*ue[k][3]);
}
for (k = 1; k < grid_points[2]-1; k++) {
km1 = k-1;
kp1 = k+1;
forcing[i][j][k][0] = forcing[i][j][k][0] -
tz2*( ue[kp1][3]-ue[km1][3] )+
dz1tz1*(ue[kp1][0]-2.0*ue[k][0]+ue[km1][0]);
forcing[i][j][k][1] = forcing[i][j][k][1] -
tz2 * (ue[kp1][1]*buf[kp1][3]-ue[km1][1]*buf[km1][3])+
zzcon2*(buf[kp1][1]-2.0*buf[k][1]+buf[km1][1])+
dz2tz1*( ue[kp1][1]-2.0* ue[k][1]+ ue[km1][1]);
forcing[i][j][k][2] = forcing[i][j][k][2] -
tz2 * (ue[kp1][2]*buf[kp1][3]-ue[km1][2]*buf[km1][3])+
zzcon2*(buf[kp1][2]-2.0*buf[k][2]+buf[km1][2])+
dz3tz1*(ue[kp1][2]-2.0*ue[k][2]+ue[km1][2]);
forcing[i][j][k][3] = forcing[i][j][k][3] -
tz2 * ((ue[kp1][3]*buf[kp1][3]+c2*(ue[kp1][4]-q[kp1]))-
(ue[km1][3]*buf[km1][3]+c2*(ue[km1][4]-q[km1])))+
zzcon1*(buf[kp1][3]-2.0*buf[k][3]+buf[km1][3])+
dz4tz1*( ue[kp1][3]-2.0*ue[k][3] +ue[km1][3]);
forcing[i][j][k][4] = forcing[i][j][k][4] -
tz2 * (buf[kp1][3]*(c1*ue[kp1][4]-c2*q[kp1])-
buf[km1][3]*(c1*ue[km1][4]-c2*q[km1]))+
0.5*zzcon3*(buf[kp1][0]-2.0*buf[k][0]
+buf[km1][0])+
zzcon4*(cuf[kp1]-2.0*cuf[k]+cuf[km1])+
zzcon5*(buf[kp1][4]-2.0*buf[k][4]+buf[km1][4])+
dz5tz1*( ue[kp1][4]-2.0*ue[k][4]+ ue[km1][4]);
}
/*--------------------------------------------------------------------
c Fourth-order dissipation
c-------------------------------------------------------------------*/
for (m = 0; m < 5; m++) {
k = 1;
forcing[i][j][k][m] = forcing[i][j][k][m] - dssp *
(5.0*ue[k][m] - 4.0*ue[k+1][m] +ue[k+2][m]);
k = 2;
forcing[i][j][k][m] = forcing[i][j][k][m] - dssp *
(-4.0*ue[k-1][m] + 6.0*ue[k][m] -
4.0*ue[k+1][m] + ue[k+2][m]);
}
for (m = 0; m < 5; m++) {
for (k = 1*3; k <= grid_points[2]-3*1-1; k++) {
forcing[i][j][k][m] = forcing[i][j][k][m] - dssp*
(ue[k-2][m] - 4.0*ue[k-1][m] +
6.0*ue[k][m] - 4.0*ue[k+1][m] + ue[k+2][m]);
}
}
for (m = 0; m < 5; m++) {
k = grid_points[2]-3;
forcing[i][j][k][m] = forcing[i][j][k][m] - dssp *
(ue[k-2][m] - 4.0*ue[k-1][m] +
6.0*ue[k][m] - 4.0*ue[k+1][m]);
k = grid_points[2]-2;
forcing[i][j][k][m] = forcing[i][j][k][m] - dssp *
(ue[k-2][m] - 4.0*ue[k-1][m] + 5.0*ue[k][m]);
}
}
}
/*--------------------------------------------------------------------
c now change the sign of the forcing function,
c-------------------------------------------------------------------*/
#pragma omp for private(j,k,m)
for (i = 1; i < grid_points[0]-1; i++) {
for (j = 1; j < grid_points[1]-1; j++) {
for (k = 1; k < grid_points[2]-1; k++) {
for (m = 0; m < 5; m++) {
forcing[i][j][k][m] = -1.0 * forcing[i][j][k][m];
}
}
}
}
}
/*--------------------------------------------------------------------
--------------------------------------------------------------------*/
static void exact_solution(double xi, double eta, double zeta,
double dtemp[5]) {
/*--------------------------------------------------------------------
--------------------------------------------------------------------*/
/*--------------------------------------------------------------------
c this function returns the exact solution at point xi, eta, zeta
c-------------------------------------------------------------------*/
int m;
for (m = 0; m < 5; m++) {
dtemp[m] = ce[m][0] +
xi*(ce[m][1] + xi*(ce[m][4] + xi*(ce[m][7]
+ xi*ce[m][10]))) +
eta*(ce[m][2] + eta*(ce[m][5] + eta*(ce[m][8]
+ eta*ce[m][11])))+
zeta*(ce[m][3] + zeta*(ce[m][6] + zeta*(ce[m][9] +
zeta*ce[m][12])));
}
}
/*--------------------------------------------------------------------
--------------------------------------------------------------------*/
static void initialize(void) {
/*--------------------------------------------------------------------
--------------------------------------------------------------------*/
/*--------------------------------------------------------------------
c This subroutine initializes the field variable u using
c tri-linear transfinite interpolation of the boundary values
c-------------------------------------------------------------------*/
int i, j, k, m, ix, iy, iz;
double xi, eta, zeta, Pface[2][3][5], Pxi, Peta, Pzeta, temp[5];
/*--------------------------------------------------------------------
c Later (in compute_rhs) we compute 1/u for every element. A few of
c the corner elements are not used, but it convenient (and faster)
c to compute the whole thing with a simple loop. Make sure those
c values are nonzero by initializing the whole thing here.
c-------------------------------------------------------------------*/
#pragma omp for private(j,k,m)
for (i = 0; i < IMAX; i++) {
for (j = 0; j < IMAX; j++) {
for (k = 0; k < IMAX; k++) {
for (m = 0; m < 5; m++) {
u[i][j][k][m] = 1.0;
}
}
}
}
/*--------------------------------------------------------------------
c first store the "interpolated" values everywhere on the grid
c-------------------------------------------------------------------*/
#pragma omp for private(j,k,ix,iy,iz,m)
for (i = 0; i < grid_points[0]; i++) {
xi = (double)i * dnxm1;
for (j = 0; j < grid_points[1]; j++) {
eta = (double)j * dnym1;
for (k = 0; k < grid_points[2]; k++) {
zeta = (double)k * dnzm1;
for (ix = 0; ix < 2; ix++) {
exact_solution((double)ix, eta, zeta,
&(Pface[ix][0][0]));
}
for (iy = 0; iy < 2; iy++) {
exact_solution(xi, (double)iy , zeta,
&Pface[iy][1][0]);
}
for (iz = 0; iz < 2; iz++) {
exact_solution(xi, eta, (double)iz,
&Pface[iz][2][0]);
}
for (m = 0; m < 5; m++) {
Pxi = xi * Pface[1][0][m] +
(1.0-xi) * Pface[0][0][m];
Peta = eta * Pface[1][1][m] +
(1.0-eta) * Pface[0][1][m];
Pzeta = zeta * Pface[1][2][m] +
(1.0-zeta) * Pface[0][2][m];
u[i][j][k][m] = Pxi + Peta + Pzeta -
Pxi*Peta - Pxi*Pzeta - Peta*Pzeta +
Pxi*Peta*Pzeta;
}
}
}
}
/*--------------------------------------------------------------------
c now store the exact values on the boundaries
c-------------------------------------------------------------------*/
/*--------------------------------------------------------------------
c west face
c-------------------------------------------------------------------*/
i = 0;
xi = 0.0;
#pragma omp for private(k,m) nowait
for (j = 0; j < grid_points[1]; j++) {
eta = (double)j * dnym1;
for (k = 0; k < grid_points[2]; k++) {
zeta = (double)k * dnzm1;
exact_solution(xi, eta, zeta, temp);
for (m = 0; m < 5; m++) {
u[i][j][k][m] = temp[m];
}
}
}
/*--------------------------------------------------------------------
c east face
c-------------------------------------------------------------------*/
i = grid_points[0]-1;
xi = 1.0;
#pragma omp for private(k,m)
for (j = 0; j < grid_points[1]; j++) {
eta = (double)j * dnym1;
for (k = 0; k < grid_points[2]; k++) {
zeta = (double)k * dnzm1;
exact_solution(xi, eta, zeta, temp);
for (m = 0; m < 5; m++) {
u[i][j][k][m] = temp[m];
}
}
}
/*--------------------------------------------------------------------
c south face
c-------------------------------------------------------------------*/
j = 0;
eta = 0.0;
#pragma omp for private(k,m) nowait
for (i = 0; i < grid_points[0]; i++) {
xi = (double)i * dnxm1;
for (k = 0; k < grid_points[2]; k++) {
zeta = (double)k * dnzm1;
exact_solution(xi, eta, zeta, temp);
for (m = 0; m < 5; m++) {
u[i][j][k][m] = temp[m];
}
}
}
/*--------------------------------------------------------------------
c north face
c-------------------------------------------------------------------*/
j = grid_points[1]-1;
eta = 1.0;
#pragma omp for private(k,m)
for (i = 0; i < grid_points[0]; i++) {
xi = (double)i * dnxm1;
for (k = 0; k < grid_points[2]; k++) {
zeta = (double)k * dnzm1;
exact_solution(xi, eta, zeta, temp);
for (m = 0; m < 5; m++) {
u[i][j][k][m] = temp[m];
}
}
}
/*--------------------------------------------------------------------
c bottom face
c-------------------------------------------------------------------*/
k = 0;
zeta = 0.0;
#pragma omp for private(j,m) nowait
for (i = 0; i < grid_points[0]; i++) {
xi = (double)i *dnxm1;
for (j = 0; j < grid_points[1]; j++) {
eta = (double)j * dnym1;
exact_solution(xi, eta, zeta, temp);
for (m = 0; m < 5; m++) {
u[i][j][k][m] = temp[m];
}
}
}
/*--------------------------------------------------------------------
c top face
c-------------------------------------------------------------------*/
k = grid_points[2]-1;
zeta = 1.0;
#pragma omp for private(j,m)
for (i = 0; i < grid_points[0]; i++) {
xi = (double)i * dnxm1;
for (j = 0; j < grid_points[1]; j++) {
eta = (double)j * dnym1;
exact_solution(xi, eta, zeta, temp);
for (m = 0; m < 5; m++) {
u[i][j][k][m] = temp[m];
}
}
}
}
/*--------------------------------------------------------------------
--------------------------------------------------------------------*/
static void lhsinit(void) {
int i, j, k, m, n;
/*--------------------------------------------------------------------
--------------------------------------------------------------------*/
/*--------------------------------------------------------------------
c zero the whole left hand side for starters
c-------------------------------------------------------------------*/
#pragma omp for private(j,k,m,n)
for (i = 0; i < grid_points[0]; i++) {
for (j = 0; j < grid_points[1]; j++) {
for (k = 0; k < grid_points[2]; k++) {
for (m = 0; m < 5; m++) {
for (n = 0; n < 5; n++) {
lhs[i][j][k][0][m][n] = 0.0;
lhs[i][j][k][1][m][n] = 0.0;
lhs[i][j][k][2][m][n] = 0.0;
}
}
}
}
}
/*--------------------------------------------------------------------
c next, set all diagonal values to 1. This is overkill, but convenient
c-------------------------------------------------------------------*/
#pragma omp for private(j,k,m)
for (i = 0; i < grid_points[0]; i++) {
for (j = 0; j < grid_points[1]; j++) {
for (k = 0; k < grid_points[2]; k++) {
for (m = 0; m < 5; m++) {
lhs[i][j][k][1][m][m] = 1.0;
}
}
}
}
}
/*--------------------------------------------------------------------
--------------------------------------------------------------------*/
static void lhsx(void) {
/*--------------------------------------------------------------------
--------------------------------------------------------------------*/
/*--------------------------------------------------------------------
c This function computes the left hand side in the xi-direction
c-------------------------------------------------------------------*/
int i, j, k;
/*--------------------------------------------------------------------
c determine a (labeled f) and n jacobians
c-------------------------------------------------------------------*/
#pragma omp for private(k,i)
for (j = 1; j < grid_points[1]-1; j++) {
for (k = 1; k < grid_points[2]-1; k++) {
for (i = 0; i < grid_points[0]; i++) {
tmp1 = 1.0 / u[i][j][k][0];
tmp2 = tmp1 * tmp1;
tmp3 = tmp1 * tmp2;
/*--------------------------------------------------------------------
c
c-------------------------------------------------------------------*/
fjac[ i][ j][ k][0][0] = 0.0;
fjac[ i][ j][ k][0][1] = 1.0;
fjac[ i][ j][ k][0][2] = 0.0;
fjac[ i][ j][ k][0][3] = 0.0;
fjac[ i][ j][ k][0][4] = 0.0;
fjac[ i][ j][ k][1][0] = -(u[i][j][k][1] * tmp2 *
u[i][j][k][1])
+ c2 * 0.50 * (u[i][j][k][1] * u[i][j][k][1]
+ u[i][j][k][2] * u[i][j][k][2]
+ u[i][j][k][3] * u[i][j][k][3] ) * tmp2;
fjac[i][j][k][1][1] = ( 2.0 - c2 )
* ( u[i][j][k][1] / u[i][j][k][0] );
fjac[i][j][k][1][2] = - c2 * ( u[i][j][k][2] * tmp1 );
fjac[i][j][k][1][3] = - c2 * ( u[i][j][k][3] * tmp1 );
fjac[i][j][k][1][4] = c2;
fjac[i][j][k][2][0] = - ( u[i][j][k][1]*u[i][j][k][2] ) * tmp2;
fjac[i][j][k][2][1] = u[i][j][k][2] * tmp1;
fjac[i][j][k][2][2] = u[i][j][k][1] * tmp1;
fjac[i][j][k][2][3] = 0.0;
fjac[i][j][k][2][4] = 0.0;
fjac[i][j][k][3][0] = - ( u[i][j][k][1]*u[i][j][k][3] ) * tmp2;
fjac[i][j][k][3][1] = u[i][j][k][3] * tmp1;
fjac[i][j][k][3][2] = 0.0;
fjac[i][j][k][3][3] = u[i][j][k][1] * tmp1;
fjac[i][j][k][3][4] = 0.0;
fjac[i][j][k][4][0] = ( c2 * ( u[i][j][k][1] * u[i][j][k][1]
+ u[i][j][k][2] * u[i][j][k][2]
+ u[i][j][k][3] * u[i][j][k][3] ) * tmp2
- c1 * ( u[i][j][k][4] * tmp1 ) )
* ( u[i][j][k][1] * tmp1 );
fjac[i][j][k][4][1] = c1 * u[i][j][k][4] * tmp1
- 0.50 * c2
* ( 3.0*u[i][j][k][1]*u[i][j][k][1]
+ u[i][j][k][2]*u[i][j][k][2]
+ u[i][j][k][3]*u[i][j][k][3] ) * tmp2;
fjac[i][j][k][4][2] = - c2 * ( u[i][j][k][2]*u[i][j][k][1] )
* tmp2;
fjac[i][j][k][4][3] = - c2 * ( u[i][j][k][3]*u[i][j][k][1] )
* tmp2;
fjac[i][j][k][4][4] = c1 * ( u[i][j][k][1] * tmp1 );
njac[i][j][k][0][0] = 0.0;
njac[i][j][k][0][1] = 0.0;
njac[i][j][k][0][2] = 0.0;
njac[i][j][k][0][3] = 0.0;
njac[i][j][k][0][4] = 0.0;
njac[i][j][k][1][0] = - con43 * c3c4 * tmp2 * u[i][j][k][1];
njac[i][j][k][1][1] = con43 * c3c4 * tmp1;
njac[i][j][k][1][2] = 0.0;
njac[i][j][k][1][3] = 0.0;
njac[i][j][k][1][4] = 0.0;
njac[i][j][k][2][0] = - c3c4 * tmp2 * u[i][j][k][2];
njac[i][j][k][2][1] = 0.0;
njac[i][j][k][2][2] = c3c4 * tmp1;
njac[i][j][k][2][3] = 0.0;
njac[i][j][k][2][4] = 0.0;
njac[i][j][k][3][0] = - c3c4 * tmp2 * u[i][j][k][3];
njac[i][j][k][3][1] = 0.0;
njac[i][j][k][3][2] = 0.0;
njac[i][j][k][3][3] = c3c4 * tmp1;
njac[i][j][k][3][4] = 0.0;
njac[i][j][k][4][0] = - ( con43 * c3c4
- c1345 ) * tmp3 * (pow2(u[i][j][k][1]))
- ( c3c4 - c1345 ) * tmp3 * (pow2(u[i][j][k][2]))
- ( c3c4 - c1345 ) * tmp3 * (pow2(u[i][j][k][3]))
- c1345 * tmp2 * u[i][j][k][4];
njac[i][j][k][4][1] = ( con43 * c3c4
- c1345 ) * tmp2 * u[i][j][k][1];
njac[i][j][k][4][2] = ( c3c4 - c1345 ) * tmp2 * u[i][j][k][2];
njac[i][j][k][4][3] = ( c3c4 - c1345 ) * tmp2 * u[i][j][k][3];
njac[i][j][k][4][4] = ( c1345 ) * tmp1;
}
/*--------------------------------------------------------------------
c now jacobians set, so form left hand side in x direction
c-------------------------------------------------------------------*/
for (i = 1; i < grid_points[0]-1; i++) {
tmp1 = dt * tx1;
tmp2 = dt * tx2;
lhs[i][j][k][AA][0][0] = - tmp2 * fjac[i-1][j][k][0][0]
- tmp1 * njac[i-1][j][k][0][0]
- tmp1 * dx1;
lhs[i][j][k][AA][0][1] = - tmp2 * fjac[i-1][j][k][0][1]
- tmp1 * njac[i-1][j][k][0][1];
lhs[i][j][k][AA][0][2] = - tmp2 * fjac[i-1][j][k][0][2]
- tmp1 * njac[i-1][j][k][0][2];
lhs[i][j][k][AA][0][3] = - tmp2 * fjac[i-1][j][k][0][3]
- tmp1 * njac[i-1][j][k][0][3];
lhs[i][j][k][AA][0][4] = - tmp2 * fjac[i-1][j][k][0][4]
- tmp1 * njac[i-1][j][k][0][4];
lhs[i][j][k][AA][1][0] = - tmp2 * fjac[i-1][j][k][1][0]
- tmp1 * njac[i-1][j][k][1][0];
lhs[i][j][k][AA][1][1] = - tmp2 * fjac[i-1][j][k][1][1]
- tmp1 * njac[i-1][j][k][1][1]
- tmp1 * dx2;
lhs[i][j][k][AA][1][2] = - tmp2 * fjac[i-1][j][k][1][2]
- tmp1 * njac[i-1][j][k][1][2];
lhs[i][j][k][AA][1][3] = - tmp2 * fjac[i-1][j][k][1][3]
- tmp1 * njac[i-1][j][k][1][3];
lhs[i][j][k][AA][1][4] = - tmp2 * fjac[i-1][j][k][1][4]
- tmp1 * njac[i-1][j][k][1][4];
lhs[i][j][k][AA][2][0] = - tmp2 * fjac[i-1][j][k][2][0]
- tmp1 * njac[i-1][j][k][2][0];
lhs[i][j][k][AA][2][1] = - tmp2 * fjac[i-1][j][k][2][1]
- tmp1 * njac[i-1][j][k][2][1];
lhs[i][j][k][AA][2][2] = - tmp2 * fjac[i-1][j][k][2][2]
- tmp1 * njac[i-1][j][k][2][2]
- tmp1 * dx3;
lhs[i][j][k][AA][2][3] = - tmp2 * fjac[i-1][j][k][2][3]
- tmp1 * njac[i-1][j][k][2][3];
lhs[i][j][k][AA][2][4] = - tmp2 * fjac[i-1][j][k][2][4]
- tmp1 * njac[i-1][j][k][2][4];
lhs[i][j][k][AA][3][0] = - tmp2 * fjac[i-1][j][k][3][0]
- tmp1 * njac[i-1][j][k][3][0];
lhs[i][j][k][AA][3][1] = - tmp2 * fjac[i-1][j][k][3][1]
- tmp1 * njac[i-1][j][k][3][1];
lhs[i][j][k][AA][3][2] = - tmp2 * fjac[i-1][j][k][3][2]
- tmp1 * njac[i-1][j][k][3][2];
lhs[i][j][k][AA][3][3] = - tmp2 * fjac[i-1][j][k][3][3]
- tmp1 * njac[i-1][j][k][3][3]
- tmp1 * dx4;
lhs[i][j][k][AA][3][4] = - tmp2 * fjac[i-1][j][k][3][4]
- tmp1 * njac[i-1][j][k][3][4];
lhs[i][j][k][AA][4][0] = - tmp2 * fjac[i-1][j][k][4][0]
- tmp1 * njac[i-1][j][k][4][0];
lhs[i][j][k][AA][4][1] = - tmp2 * fjac[i-1][j][k][4][1]
- tmp1 * njac[i-1][j][k][4][1];
lhs[i][j][k][AA][4][2] = - tmp2 * fjac[i-1][j][k][4][2]
- tmp1 * njac[i-1][j][k][4][2];
lhs[i][j][k][AA][4][3] = - tmp2 * fjac[i-1][j][k][4][3]
- tmp1 * njac[i-1][j][k][4][3];
lhs[i][j][k][AA][4][4] = - tmp2 * fjac[i-1][j][k][4][4]
- tmp1 * njac[i-1][j][k][4][4]
- tmp1 * dx5;
lhs[i][j][k][BB][0][0] = 1.0
+ tmp1 * 2.0 * njac[i][j][k][0][0]
+ tmp1 * 2.0 * dx1;
lhs[i][j][k][BB][0][1] = tmp1 * 2.0 * njac[i][j][k][0][1];
lhs[i][j][k][BB][0][2] = tmp1 * 2.0 * njac[i][j][k][0][2];
lhs[i][j][k][BB][0][3] = tmp1 * 2.0 * njac[i][j][k][0][3];
lhs[i][j][k][BB][0][4] = tmp1 * 2.0 * njac[i][j][k][0][4];
lhs[i][j][k][BB][1][0] = tmp1 * 2.0 * njac[i][j][k][1][0];
lhs[i][j][k][BB][1][1] = 1.0
+ tmp1 * 2.0 * njac[i][j][k][1][1]
+ tmp1 * 2.0 * dx2;
lhs[i][j][k][BB][1][2] = tmp1 * 2.0 * njac[i][j][k][1][2];
lhs[i][j][k][BB][1][3] = tmp1 * 2.0 * njac[i][j][k][1][3];
lhs[i][j][k][BB][1][4] = tmp1 * 2.0 * njac[i][j][k][1][4];
lhs[i][j][k][BB][2][0] = tmp1 * 2.0 * njac[i][j][k][2][0];
lhs[i][j][k][BB][2][1] = tmp1 * 2.0 * njac[i][j][k][2][1];
lhs[i][j][k][BB][2][2] = 1.0
+ tmp1 * 2.0 * njac[i][j][k][2][2]
+ tmp1 * 2.0 * dx3;
lhs[i][j][k][BB][2][3] = tmp1 * 2.0 * njac[i][j][k][2][3];
lhs[i][j][k][BB][2][4] = tmp1 * 2.0 * njac[i][j][k][2][4];
lhs[i][j][k][BB][3][0] = tmp1 * 2.0 * njac[i][j][k][3][0];
lhs[i][j][k][BB][3][1] = tmp1 * 2.0 * njac[i][j][k][3][1];
lhs[i][j][k][BB][3][2] = tmp1 * 2.0 * njac[i][j][k][3][2];
lhs[i][j][k][BB][3][3] = 1.0
+ tmp1 * 2.0 * njac[i][j][k][3][3]
+ tmp1 * 2.0 * dx4;
lhs[i][j][k][BB][3][4] = tmp1 * 2.0 * njac[i][j][k][3][4];
lhs[i][j][k][BB][4][0] = tmp1 * 2.0 * njac[i][j][k][4][0];
lhs[i][j][k][BB][4][1] = tmp1 * 2.0 * njac[i][j][k][4][1];
lhs[i][j][k][BB][4][2] = tmp1 * 2.0 * njac[i][j][k][4][2];
lhs[i][j][k][BB][4][3] = tmp1 * 2.0 * njac[i][j][k][4][3];
lhs[i][j][k][BB][4][4] = 1.0
+ tmp1 * 2.0 * njac[i][j][k][4][4]
+ tmp1 * 2.0 * dx5;
lhs[i][j][k][CC][0][0] = tmp2 * fjac[i+1][j][k][0][0]
- tmp1 * njac[i+1][j][k][0][0]
- tmp1 * dx1;
lhs[i][j][k][CC][0][1] = tmp2 * fjac[i+1][j][k][0][1]
- tmp1 * njac[i+1][j][k][0][1];
lhs[i][j][k][CC][0][2] = tmp2 * fjac[i+1][j][k][0][2]
- tmp1 * njac[i+1][j][k][0][2];
lhs[i][j][k][CC][0][3] = tmp2 * fjac[i+1][j][k][0][3]
- tmp1 * njac[i+1][j][k][0][3];
lhs[i][j][k][CC][0][4] = tmp2 * fjac[i+1][j][k][0][4]
- tmp1 * njac[i+1][j][k][0][4];
lhs[i][j][k][CC][1][0] = tmp2 * fjac[i+1][j][k][1][0]
- tmp1 * njac[i+1][j][k][1][0];
lhs[i][j][k][CC][1][1] = tmp2 * fjac[i+1][j][k][1][1]
- tmp1 * njac[i+1][j][k][1][1]
- tmp1 * dx2;
lhs[i][j][k][CC][1][2] = tmp2 * fjac[i+1][j][k][1][2]
- tmp1 * njac[i+1][j][k][1][2];
lhs[i][j][k][CC][1][3] = tmp2 * fjac[i+1][j][k][1][3]
- tmp1 * njac[i+1][j][k][1][3];
lhs[i][j][k][CC][1][4] = tmp2 * fjac[i+1][j][k][1][4]
- tmp1 * njac[i+1][j][k][1][4];
lhs[i][j][k][CC][2][0] = tmp2 * fjac[i+1][j][k][2][0]
- tmp1 * njac[i+1][j][k][2][0];
lhs[i][j][k][CC][2][1] = tmp2 * fjac[i+1][j][k][2][1]
- tmp1 * njac[i+1][j][k][2][1];
lhs[i][j][k][CC][2][2] = tmp2 * fjac[i+1][j][k][2][2]
- tmp1 * njac[i+1][j][k][2][2]
- tmp1 * dx3;
lhs[i][j][k][CC][2][3] = tmp2 * fjac[i+1][j][k][2][3]
- tmp1 * njac[i+1][j][k][2][3];
lhs[i][j][k][CC][2][4] = tmp2 * fjac[i+1][j][k][2][4]
- tmp1 * njac[i+1][j][k][2][4];
lhs[i][j][k][CC][3][0] = tmp2 * fjac[i+1][j][k][3][0]
- tmp1 * njac[i+1][j][k][3][0];
lhs[i][j][k][CC][3][1] = tmp2 * fjac[i+1][j][k][3][1]
- tmp1 * njac[i+1][j][k][3][1];
lhs[i][j][k][CC][3][2] = tmp2 * fjac[i+1][j][k][3][2]
- tmp1 * njac[i+1][j][k][3][2];
lhs[i][j][k][CC][3][3] = tmp2 * fjac[i+1][j][k][3][3]
- tmp1 * njac[i+1][j][k][3][3]
- tmp1 * dx4;
lhs[i][j][k][CC][3][4] = tmp2 * fjac[i+1][j][k][3][4]
- tmp1 * njac[i+1][j][k][3][4];
lhs[i][j][k][CC][4][0] = tmp2 * fjac[i+1][j][k][4][0]
- tmp1 * njac[i+1][j][k][4][0];
lhs[i][j][k][CC][4][1] = tmp2 * fjac[i+1][j][k][4][1]
- tmp1 * njac[i+1][j][k][4][1];
lhs[i][j][k][CC][4][2] = tmp2 * fjac[i+1][j][k][4][2]
- tmp1 * njac[i+1][j][k][4][2];
lhs[i][j][k][CC][4][3] = tmp2 * fjac[i+1][j][k][4][3]
- tmp1 * njac[i+1][j][k][4][3];
lhs[i][j][k][CC][4][4] = tmp2 * fjac[i+1][j][k][4][4]
- tmp1 * njac[i+1][j][k][4][4]
- tmp1 * dx5;
}
}
}
}
/*--------------------------------------------------------------------
--------------------------------------------------------------------*/
static void lhsy(void) {
/*--------------------------------------------------------------------
--------------------------------------------------------------------*/
/*--------------------------------------------------------------------
c This function computes the left hand side for the three y-factors
c-------------------------------------------------------------------*/
int i, j, k;
/*--------------------------------------------------------------------
c Compute the indices for storing the tri-diagonal matrix;
c determine a (labeled f) and n jacobians for cell c
c-------------------------------------------------------------------*/
#pragma omp for private(j,k)
for (i = 1; i < grid_points[0]-1; i++) {
for (j = 0; j < grid_points[1]; j++) {
for (k = 1; k < grid_points[2]-1; k++) {
tmp1 = 1.0 / u[i][j][k][0];
tmp2 = tmp1 * tmp1;
tmp3 = tmp1 * tmp2;
fjac[ i][ j][ k][0][0] = 0.0;
fjac[ i][ j][ k][0][1] = 0.0;
fjac[ i][ j][ k][0][2] = 1.0;
fjac[ i][ j][ k][0][3] = 0.0;
fjac[ i][ j][ k][0][4] = 0.0;
fjac[i][j][k][1][0] = - ( u[i][j][k][1]*u[i][j][k][2] )
* tmp2;
fjac[i][j][k][1][1] = u[i][j][k][2] * tmp1;
fjac[i][j][k][1][2] = u[i][j][k][1] * tmp1;
fjac[i][j][k][1][3] = 0.0;
fjac[i][j][k][1][4] = 0.0;
fjac[i][j][k][2][0] = - ( u[i][j][k][2]*u[i][j][k][2]*tmp2)
+ 0.50 * c2 * ( ( u[i][j][k][1] * u[i][j][k][1]
+ u[i][j][k][2] * u[i][j][k][2]
+ u[i][j][k][3] * u[i][j][k][3] )
* tmp2 );
fjac[i][j][k][2][1] = - c2 * u[i][j][k][1] * tmp1;
fjac[i][j][k][2][2] = ( 2.0 - c2 )
* u[i][j][k][2] * tmp1;
fjac[i][j][k][2][3] = - c2 * u[i][j][k][3] * tmp1;
fjac[i][j][k][2][4] = c2;
fjac[i][j][k][3][0] = - ( u[i][j][k][2]*u[i][j][k][3] )
* tmp2;
fjac[i][j][k][3][1] = 0.0;
fjac[i][j][k][3][2] = u[i][j][k][3] * tmp1;
fjac[i][j][k][3][3] = u[i][j][k][2] * tmp1;
fjac[i][j][k][3][4] = 0.0;
fjac[i][j][k][4][0] = ( c2 * ( u[i][j][k][1] * u[i][j][k][1]
+ u[i][j][k][2] * u[i][j][k][2]
+ u[i][j][k][3] * u[i][j][k][3] )
* tmp2
- c1 * u[i][j][k][4] * tmp1 )
* u[i][j][k][2] * tmp1;
fjac[i][j][k][4][1] = - c2 * u[i][j][k][1]*u[i][j][k][2]
* tmp2;
fjac[i][j][k][4][2] = c1 * u[i][j][k][4] * tmp1
- 0.50 * c2
* ( ( u[i][j][k][1]*u[i][j][k][1]
+ 3.0 * u[i][j][k][2]*u[i][j][k][2]
+ u[i][j][k][3]*u[i][j][k][3] )
* tmp2 );
fjac[i][j][k][4][3] = - c2 * ( u[i][j][k][2]*u[i][j][k][3] )
* tmp2;
fjac[i][j][k][4][4] = c1 * u[i][j][k][2] * tmp1;
njac[i][j][k][0][0] = 0.0;
njac[i][j][k][0][1] = 0.0;
njac[i][j][k][0][2] = 0.0;
njac[i][j][k][0][3] = 0.0;
njac[i][j][k][0][4] = 0.0;
njac[i][j][k][1][0] = - c3c4 * tmp2 * u[i][j][k][1];
njac[i][j][k][1][1] = c3c4 * tmp1;
njac[i][j][k][1][2] = 0.0;
njac[i][j][k][1][3] = 0.0;
njac[i][j][k][1][4] = 0.0;
njac[i][j][k][2][0] = - con43 * c3c4 * tmp2 * u[i][j][k][2];
njac[i][j][k][2][1] = 0.0;
njac[i][j][k][2][2] = con43 * c3c4 * tmp1;
njac[i][j][k][2][3] = 0.0;
njac[i][j][k][2][4] = 0.0;
njac[i][j][k][3][0] = - c3c4 * tmp2 * u[i][j][k][3];
njac[i][j][k][3][1] = 0.0;
njac[i][j][k][3][2] = 0.0;
njac[i][j][k][3][3] = c3c4 * tmp1;
njac[i][j][k][3][4] = 0.0;
njac[i][j][k][4][0] = - ( c3c4
- c1345 ) * tmp3 * (pow2(u[i][j][k][1]))
- ( con43 * c3c4
- c1345 ) * tmp3 * (pow2(u[i][j][k][2]))
- ( c3c4 - c1345 ) * tmp3 * (pow2(u[i][j][k][3]))
- c1345 * tmp2 * u[i][j][k][4];
njac[i][j][k][4][1] = ( c3c4 - c1345 ) * tmp2 * u[i][j][k][1];
njac[i][j][k][4][2] = ( con43 * c3c4
- c1345 ) * tmp2 * u[i][j][k][2];
njac[i][j][k][4][3] = ( c3c4 - c1345 ) * tmp2 * u[i][j][k][3];
njac[i][j][k][4][4] = ( c1345 ) * tmp1;
}
}
}
/*--------------------------------------------------------------------
c now joacobians set, so form left hand side in y direction
c-------------------------------------------------------------------*/
#pragma omp for private(j,k)
for (i = 1; i < grid_points[0]-1; i++) {
for (j = 1; j < grid_points[1]-1; j++) {
for (k = 1; k < grid_points[2]-1; k++) {
tmp1 = dt * ty1;
tmp2 = dt * ty2;
lhs[i][j][k][AA][0][0] = - tmp2 * fjac[i][j-1][k][0][0]
- tmp1 * njac[i][j-1][k][0][0]
- tmp1 * dy1;
lhs[i][j][k][AA][0][1] = - tmp2 * fjac[i][j-1][k][0][1]
- tmp1 * njac[i][j-1][k][0][1];
lhs[i][j][k][AA][0][2] = - tmp2 * fjac[i][j-1][k][0][2]
- tmp1 * njac[i][j-1][k][0][2];
lhs[i][j][k][AA][0][3] = - tmp2 * fjac[i][j-1][k][0][3]
- tmp1 * njac[i][j-1][k][0][3];
lhs[i][j][k][AA][0][4] = - tmp2 * fjac[i][j-1][k][0][4]
- tmp1 * njac[i][j-1][k][0][4];
lhs[i][j][k][AA][1][0] = - tmp2 * fjac[i][j-1][k][1][0]
- tmp1 * njac[i][j-1][k][1][0];
lhs[i][j][k][AA][1][1] = - tmp2 * fjac[i][j-1][k][1][1]
- tmp1 * njac[i][j-1][k][1][1]
- tmp1 * dy2;
lhs[i][j][k][AA][1][2] = - tmp2 * fjac[i][j-1][k][1][2]
- tmp1 * njac[i][j-1][k][1][2];
lhs[i][j][k][AA][1][3] = - tmp2 * fjac[i][j-1][k][1][3]
- tmp1 * njac[i][j-1][k][1][3];
lhs[i][j][k][AA][1][4] = - tmp2 * fjac[i][j-1][k][1][4]
- tmp1 * njac[i][j-1][k][1][4];
lhs[i][j][k][AA][2][0] = - tmp2 * fjac[i][j-1][k][2][0]
- tmp1 * njac[i][j-1][k][2][0];
lhs[i][j][k][AA][2][1] = - tmp2 * fjac[i][j-1][k][2][1]
- tmp1 * njac[i][j-1][k][2][1];
lhs[i][j][k][AA][2][2] = - tmp2 * fjac[i][j-1][k][2][2]
- tmp1 * njac[i][j-1][k][2][2]
- tmp1 * dy3;
lhs[i][j][k][AA][2][3] = - tmp2 * fjac[i][j-1][k][2][3]
- tmp1 * njac[i][j-1][k][2][3];
lhs[i][j][k][AA][2][4] = - tmp2 * fjac[i][j-1][k][2][4]
- tmp1 * njac[i][j-1][k][2][4];
lhs[i][j][k][AA][3][0] = - tmp2 * fjac[i][j-1][k][3][0]
- tmp1 * njac[i][j-1][k][3][0];
lhs[i][j][k][AA][3][1] = - tmp2 * fjac[i][j-1][k][3][1]
- tmp1 * njac[i][j-1][k][3][1];
lhs[i][j][k][AA][3][2] = - tmp2 * fjac[i][j-1][k][3][2]
- tmp1 * njac[i][j-1][k][3][2];
lhs[i][j][k][AA][3][3] = - tmp2 * fjac[i][j-1][k][3][3]
- tmp1 * njac[i][j-1][k][3][3]
- tmp1 * dy4;
lhs[i][j][k][AA][3][4] = - tmp2 * fjac[i][j-1][k][3][4]
- tmp1 * njac[i][j-1][k][3][4];
lhs[i][j][k][AA][4][0] = - tmp2 * fjac[i][j-1][k][4][0]
- tmp1 * njac[i][j-1][k][4][0];
lhs[i][j][k][AA][4][1] = - tmp2 * fjac[i][j-1][k][4][1]
- tmp1 * njac[i][j-1][k][4][1];
lhs[i][j][k][AA][4][2] = - tmp2 * fjac[i][j-1][k][4][2]
- tmp1 * njac[i][j-1][k][4][2];
lhs[i][j][k][AA][4][3] = - tmp2 * fjac[i][j-1][k][4][3]
- tmp1 * njac[i][j-1][k][4][3];
lhs[i][j][k][AA][4][4] = - tmp2 * fjac[i][j-1][k][4][4]
- tmp1 * njac[i][j-1][k][4][4]
- tmp1 * dy5;
lhs[i][j][k][BB][0][0] = 1.0
+ tmp1 * 2.0 * njac[i][j][k][0][0]
+ tmp1 * 2.0 * dy1;
lhs[i][j][k][BB][0][1] = tmp1 * 2.0 * njac[i][j][k][0][1];
lhs[i][j][k][BB][0][2] = tmp1 * 2.0 * njac[i][j][k][0][2];
lhs[i][j][k][BB][0][3] = tmp1 * 2.0 * njac[i][j][k][0][3];
lhs[i][j][k][BB][0][4] = tmp1 * 2.0 * njac[i][j][k][0][4];
lhs[i][j][k][BB][1][0] = tmp1 * 2.0 * njac[i][j][k][1][0];
lhs[i][j][k][BB][1][1] = 1.0
+ tmp1 * 2.0 * njac[i][j][k][1][1]
+ tmp1 * 2.0 * dy2;
lhs[i][j][k][BB][1][2] = tmp1 * 2.0 * njac[i][j][k][1][2];
lhs[i][j][k][BB][1][3] = tmp1 * 2.0 * njac[i][j][k][1][3];
lhs[i][j][k][BB][1][4] = tmp1 * 2.0 * njac[i][j][k][1][4];
lhs[i][j][k][BB][2][0] = tmp1 * 2.0 * njac[i][j][k][2][0];
lhs[i][j][k][BB][2][1] = tmp1 * 2.0 * njac[i][j][k][2][1];
lhs[i][j][k][BB][2][2] = 1.0
+ tmp1 * 2.0 * njac[i][j][k][2][2]
+ tmp1 * 2.0 * dy3;
lhs[i][j][k][BB][2][3] = tmp1 * 2.0 * njac[i][j][k][2][3];
lhs[i][j][k][BB][2][4] = tmp1 * 2.0 * njac[i][j][k][2][4];
lhs[i][j][k][BB][3][0] = tmp1 * 2.0 * njac[i][j][k][3][0];
lhs[i][j][k][BB][3][1] = tmp1 * 2.0 * njac[i][j][k][3][1];
lhs[i][j][k][BB][3][2] = tmp1 * 2.0 * njac[i][j][k][3][2];
lhs[i][j][k][BB][3][3] = 1.0
+ tmp1 * 2.0 * njac[i][j][k][3][3]
+ tmp1 * 2.0 * dy4;
lhs[i][j][k][BB][3][4] = tmp1 * 2.0 * njac[i][j][k][3][4];
lhs[i][j][k][BB][4][0] = tmp1 * 2.0 * njac[i][j][k][4][0];
lhs[i][j][k][BB][4][1] = tmp1 * 2.0 * njac[i][j][k][4][1];
lhs[i][j][k][BB][4][2] = tmp1 * 2.0 * njac[i][j][k][4][2];
lhs[i][j][k][BB][4][3] = tmp1 * 2.0 * njac[i][j][k][4][3];
lhs[i][j][k][BB][4][4] = 1.0
+ tmp1 * 2.0 * njac[i][j][k][4][4]
+ tmp1 * 2.0 * dy5;
lhs[i][j][k][CC][0][0] = tmp2 * fjac[i][j+1][k][0][0]
- tmp1 * njac[i][j+1][k][0][0]
- tmp1 * dy1;
lhs[i][j][k][CC][0][1] = tmp2 * fjac[i][j+1][k][0][1]
- tmp1 * njac[i][j+1][k][0][1];
lhs[i][j][k][CC][0][2] = tmp2 * fjac[i][j+1][k][0][2]
- tmp1 * njac[i][j+1][k][0][2];
lhs[i][j][k][CC][0][3] = tmp2 * fjac[i][j+1][k][0][3]
- tmp1 * njac[i][j+1][k][0][3];
lhs[i][j][k][CC][0][4] = tmp2 * fjac[i][j+1][k][0][4]
- tmp1 * njac[i][j+1][k][0][4];
lhs[i][j][k][CC][1][0] = tmp2 * fjac[i][j+1][k][1][0]
- tmp1 * njac[i][j+1][k][1][0];
lhs[i][j][k][CC][1][1] = tmp2 * fjac[i][j+1][k][1][1]
- tmp1 * njac[i][j+1][k][1][1]
- tmp1 * dy2;
lhs[i][j][k][CC][1][2] = tmp2 * fjac[i][j+1][k][1][2]
- tmp1 * njac[i][j+1][k][1][2];
lhs[i][j][k][CC][1][3] = tmp2 * fjac[i][j+1][k][1][3]
- tmp1 * njac[i][j+1][k][1][3];
lhs[i][j][k][CC][1][4] = tmp2 * fjac[i][j+1][k][1][4]
- tmp1 * njac[i][j+1][k][1][4];
lhs[i][j][k][CC][2][0] = tmp2 * fjac[i][j+1][k][2][0]
- tmp1 * njac[i][j+1][k][2][0];
lhs[i][j][k][CC][2][1] = tmp2 * fjac[i][j+1][k][2][1]
- tmp1 * njac[i][j+1][k][2][1];
lhs[i][j][k][CC][2][2] = tmp2 * fjac[i][j+1][k][2][2]
- tmp1 * njac[i][j+1][k][2][2]
- tmp1 * dy3;
lhs[i][j][k][CC][2][3] = tmp2 * fjac[i][j+1][k][2][3]
- tmp1 * njac[i][j+1][k][2][3];
lhs[i][j][k][CC][2][4] = tmp2 * fjac[i][j+1][k][2][4]
- tmp1 * njac[i][j+1][k][2][4];
lhs[i][j][k][CC][3][0] = tmp2 * fjac[i][j+1][k][3][0]
- tmp1 * njac[i][j+1][k][3][0];
lhs[i][j][k][CC][3][1] = tmp2 * fjac[i][j+1][k][3][1]
- tmp1 * njac[i][j+1][k][3][1];
lhs[i][j][k][CC][3][2] = tmp2 * fjac[i][j+1][k][3][2]
- tmp1 * njac[i][j+1][k][3][2];
lhs[i][j][k][CC][3][3] = tmp2 * fjac[i][j+1][k][3][3]
- tmp1 * njac[i][j+1][k][3][3]
- tmp1 * dy4;
lhs[i][j][k][CC][3][4] = tmp2 * fjac[i][j+1][k][3][4]
- tmp1 * njac[i][j+1][k][3][4];
lhs[i][j][k][CC][4][0] = tmp2 * fjac[i][j+1][k][4][0]
- tmp1 * njac[i][j+1][k][4][0];
lhs[i][j][k][CC][4][1] = tmp2 * fjac[i][j+1][k][4][1]
- tmp1 * njac[i][j+1][k][4][1];
lhs[i][j][k][CC][4][2] = tmp2 * fjac[i][j+1][k][4][2]
- tmp1 * njac[i][j+1][k][4][2];
lhs[i][j][k][CC][4][3] = tmp2 * fjac[i][j+1][k][4][3]
- tmp1 * njac[i][j+1][k][4][3];
lhs[i][j][k][CC][4][4] = tmp2 * fjac[i][j+1][k][4][4]
- tmp1 * njac[i][j+1][k][4][4]
- tmp1 * dy5;
}
}
}
}
/*--------------------------------------------------------------------
--------------------------------------------------------------------*/
static void lhsz(void) {
/*--------------------------------------------------------------------
--------------------------------------------------------------------*/
/*--------------------------------------------------------------------
c This function computes the left hand side for the three z-factors
c-------------------------------------------------------------------*/
int i, j, k;
/*--------------------------------------------------------------------
c Compute the indices for storing the block-diagonal matrix;
c determine c (labeled f) and s jacobians
c---------------------------------------------------------------------*/
#pragma omp for private(j,k)
for (i = 1; i < grid_points[0]-1; i++) {
for (j = 1; j < grid_points[1]-1; j++) {
for (k = 0; k < grid_points[2]; k++) {
tmp1 = 1.0 / u[i][j][k][0];
tmp2 = tmp1 * tmp1;
tmp3 = tmp1 * tmp2;
fjac[i][j][k][0][0] = 0.0;
fjac[i][j][k][0][1] = 0.0;
fjac[i][j][k][0][2] = 0.0;
fjac[i][j][k][0][3] = 1.0;
fjac[i][j][k][0][4] = 0.0;
fjac[i][j][k][1][0] = - ( u[i][j][k][1]*u[i][j][k][3] )
* tmp2;
fjac[i][j][k][1][1] = u[i][j][k][3] * tmp1;
fjac[i][j][k][1][2] = 0.0;
fjac[i][j][k][1][3] = u[i][j][k][1] * tmp1;
fjac[i][j][k][1][4] = 0.0;
fjac[i][j][k][2][0] = - ( u[i][j][k][2]*u[i][j][k][3] )
* tmp2;
fjac[i][j][k][2][1] = 0.0;
fjac[i][j][k][2][2] = u[i][j][k][3] * tmp1;
fjac[i][j][k][2][3] = u[i][j][k][2] * tmp1;
fjac[i][j][k][2][4] = 0.0;
fjac[i][j][k][3][0] = - (u[i][j][k][3]*u[i][j][k][3] * tmp2 )
+ 0.50 * c2 * ( ( u[i][j][k][1] * u[i][j][k][1]
+ u[i][j][k][2] * u[i][j][k][2]
+ u[i][j][k][3] * u[i][j][k][3] ) * tmp2 );
fjac[i][j][k][3][1] = - c2 * u[i][j][k][1] * tmp1;
fjac[i][j][k][3][2] = - c2 * u[i][j][k][2] * tmp1;
fjac[i][j][k][3][3] = ( 2.0 - c2 )
* u[i][j][k][3] * tmp1;
fjac[i][j][k][3][4] = c2;
fjac[i][j][k][4][0] = ( c2 * ( u[i][j][k][1] * u[i][j][k][1]
+ u[i][j][k][2] * u[i][j][k][2]
+ u[i][j][k][3] * u[i][j][k][3] )
* tmp2
- c1 * ( u[i][j][k][4] * tmp1 ) )
* ( u[i][j][k][3] * tmp1 );
fjac[i][j][k][4][1] = - c2 * ( u[i][j][k][1]*u[i][j][k][3] )
* tmp2;
fjac[i][j][k][4][2] = - c2 * ( u[i][j][k][2]*u[i][j][k][3] )
* tmp2;
fjac[i][j][k][4][3] = c1 * ( u[i][j][k][4] * tmp1 )
- 0.50 * c2
* ( ( u[i][j][k][1]*u[i][j][k][1]
+ u[i][j][k][2]*u[i][j][k][2]
+ 3.0*u[i][j][k][3]*u[i][j][k][3] )
* tmp2 );
fjac[i][j][k][4][4] = c1 * u[i][j][k][3] * tmp1;
njac[i][j][k][0][0] = 0.0;
njac[i][j][k][0][1] = 0.0;
njac[i][j][k][0][2] = 0.0;
njac[i][j][k][0][3] = 0.0;
njac[i][j][k][0][4] = 0.0;
njac[i][j][k][1][0] = - c3c4 * tmp2 * u[i][j][k][1];
njac[i][j][k][1][1] = c3c4 * tmp1;
njac[i][j][k][1][2] = 0.0;
njac[i][j][k][1][3] = 0.0;
njac[i][j][k][1][4] = 0.0;
njac[i][j][k][2][0] = - c3c4 * tmp2 * u[i][j][k][2];
njac[i][j][k][2][1] = 0.0;
njac[i][j][k][2][2] = c3c4 * tmp1;
njac[i][j][k][2][3] = 0.0;
njac[i][j][k][2][4] = 0.0;
njac[i][j][k][3][0] = - con43 * c3c4 * tmp2 * u[i][j][k][3];
njac[i][j][k][3][1] = 0.0;
njac[i][j][k][3][2] = 0.0;
njac[i][j][k][3][3] = con43 * c3 * c4 * tmp1;
njac[i][j][k][3][4] = 0.0;
njac[i][j][k][4][0] = - ( c3c4
- c1345 ) * tmp3 * (pow2(u[i][j][k][1]))
- ( c3c4 - c1345 ) * tmp3 * (pow2(u[i][j][k][2]))
- ( con43 * c3c4
- c1345 ) * tmp3 * (pow2(u[i][j][k][3]))
- c1345 * tmp2 * u[i][j][k][4];
njac[i][j][k][4][1] = ( c3c4 - c1345 ) * tmp2 * u[i][j][k][1];
njac[i][j][k][4][2] = ( c3c4 - c1345 ) * tmp2 * u[i][j][k][2];
njac[i][j][k][4][3] = ( con43 * c3c4
- c1345 ) * tmp2 * u[i][j][k][3];
njac[i][j][k][4][4] = ( c1345 )* tmp1;
}
}
}
/*--------------------------------------------------------------------
c now jacobians set, so form left hand side in z direction
c-------------------------------------------------------------------*/
#pragma omp for private(j,k)
for (i = 1; i < grid_points[0]-1; i++) {
for (j = 1; j < grid_points[1]-1; j++) {
for (k = 1; k < grid_points[2]-1; k++) {
tmp1 = dt * tz1;
tmp2 = dt * tz2;
lhs[i][j][k][AA][0][0] = - tmp2 * fjac[i][j][k-1][0][0]
- tmp1 * njac[i][j][k-1][0][0]
- tmp1 * dz1;
lhs[i][j][k][AA][0][1] = - tmp2 * fjac[i][j][k-1][0][1]
- tmp1 * njac[i][j][k-1][0][1];
lhs[i][j][k][AA][0][2] = - tmp2 * fjac[i][j][k-1][0][2]
- tmp1 * njac[i][j][k-1][0][2];
lhs[i][j][k][AA][0][3] = - tmp2 * fjac[i][j][k-1][0][3]
- tmp1 * njac[i][j][k-1][0][3];
lhs[i][j][k][AA][0][4] = - tmp2 * fjac[i][j][k-1][0][4]
- tmp1 * njac[i][j][k-1][0][4];
lhs[i][j][k][AA][1][0] = - tmp2 * fjac[i][j][k-1][1][0]
- tmp1 * njac[i][j][k-1][1][0];
lhs[i][j][k][AA][1][1] = - tmp2 * fjac[i][j][k-1][1][1]
- tmp1 * njac[i][j][k-1][1][1]
- tmp1 * dz2;
lhs[i][j][k][AA][1][2] = - tmp2 * fjac[i][j][k-1][1][2]
- tmp1 * njac[i][j][k-1][1][2];
lhs[i][j][k][AA][1][3] = - tmp2 * fjac[i][j][k-1][1][3]
- tmp1 * njac[i][j][k-1][1][3];
lhs[i][j][k][AA][1][4] = - tmp2 * fjac[i][j][k-1][1][4]
- tmp1 * njac[i][j][k-1][1][4];
lhs[i][j][k][AA][2][0] = - tmp2 * fjac[i][j][k-1][2][0]
- tmp1 * njac[i][j][k-1][2][0];
lhs[i][j][k][AA][2][1] = - tmp2 * fjac[i][j][k-1][2][1]
- tmp1 * njac[i][j][k-1][2][1];
lhs[i][j][k][AA][2][2] = - tmp2 * fjac[i][j][k-1][2][2]
- tmp1 * njac[i][j][k-1][2][2]
- tmp1 * dz3;
lhs[i][j][k][AA][2][3] = - tmp2 * fjac[i][j][k-1][2][3]
- tmp1 * njac[i][j][k-1][2][3];
lhs[i][j][k][AA][2][4] = - tmp2 * fjac[i][j][k-1][2][4]
- tmp1 * njac[i][j][k-1][2][4];
lhs[i][j][k][AA][3][0] = - tmp2 * fjac[i][j][k-1][3][0]
- tmp1 * njac[i][j][k-1][3][0];
lhs[i][j][k][AA][3][1] = - tmp2 * fjac[i][j][k-1][3][1]
- tmp1 * njac[i][j][k-1][3][1];
lhs[i][j][k][AA][3][2] = - tmp2 * fjac[i][j][k-1][3][2]
- tmp1 * njac[i][j][k-1][3][2];
lhs[i][j][k][AA][3][3] = - tmp2 * fjac[i][j][k-1][3][3]
- tmp1 * njac[i][j][k-1][3][3]
- tmp1 * dz4;
lhs[i][j][k][AA][3][4] = - tmp2 * fjac[i][j][k-1][3][4]
- tmp1 * njac[i][j][k-1][3][4];
lhs[i][j][k][AA][4][0] = - tmp2 * fjac[i][j][k-1][4][0]
- tmp1 * njac[i][j][k-1][4][0];
lhs[i][j][k][AA][4][1] = - tmp2 * fjac[i][j][k-1][4][1]
- tmp1 * njac[i][j][k-1][4][1];
lhs[i][j][k][AA][4][2] = - tmp2 * fjac[i][j][k-1][4][2]
- tmp1 * njac[i][j][k-1][4][2];
lhs[i][j][k][AA][4][3] = - tmp2 * fjac[i][j][k-1][4][3]
- tmp1 * njac[i][j][k-1][4][3];
lhs[i][j][k][AA][4][4] = - tmp2 * fjac[i][j][k-1][4][4]
- tmp1 * njac[i][j][k-1][4][4]
- tmp1 * dz5;
lhs[i][j][k][BB][0][0] = 1.0
+ tmp1 * 2.0 * njac[i][j][k][0][0]
+ tmp1 * 2.0 * dz1;
lhs[i][j][k][BB][0][1] = tmp1 * 2.0 * njac[i][j][k][0][1];
lhs[i][j][k][BB][0][2] = tmp1 * 2.0 * njac[i][j][k][0][2];
lhs[i][j][k][BB][0][3] = tmp1 * 2.0 * njac[i][j][k][0][3];
lhs[i][j][k][BB][0][4] = tmp1 * 2.0 * njac[i][j][k][0][4];
lhs[i][j][k][BB][1][0] = tmp1 * 2.0 * njac[i][j][k][1][0];
lhs[i][j][k][BB][1][1] = 1.0
+ tmp1 * 2.0 * njac[i][j][k][1][1]
+ tmp1 * 2.0 * dz2;
lhs[i][j][k][BB][1][2] = tmp1 * 2.0 * njac[i][j][k][1][2];
lhs[i][j][k][BB][1][3] = tmp1 * 2.0 * njac[i][j][k][1][3];
lhs[i][j][k][BB][1][4] = tmp1 * 2.0 * njac[i][j][k][1][4];
lhs[i][j][k][BB][2][0] = tmp1 * 2.0 * njac[i][j][k][2][0];
lhs[i][j][k][BB][2][1] = tmp1 * 2.0 * njac[i][j][k][2][1];
lhs[i][j][k][BB][2][2] = 1.0
+ tmp1 * 2.0 * njac[i][j][k][2][2]
+ tmp1 * 2.0 * dz3;
lhs[i][j][k][BB][2][3] = tmp1 * 2.0 * njac[i][j][k][2][3];
lhs[i][j][k][BB][2][4] = tmp1 * 2.0 * njac[i][j][k][2][4];
lhs[i][j][k][BB][3][0] = tmp1 * 2.0 * njac[i][j][k][3][0];
lhs[i][j][k][BB][3][1] = tmp1 * 2.0 * njac[i][j][k][3][1];
lhs[i][j][k][BB][3][2] = tmp1 * 2.0 * njac[i][j][k][3][2];
lhs[i][j][k][BB][3][3] = 1.0
+ tmp1 * 2.0 * njac[i][j][k][3][3]
+ tmp1 * 2.0 * dz4;
lhs[i][j][k][BB][3][4] = tmp1 * 2.0 * njac[i][j][k][3][4];
lhs[i][j][k][BB][4][0] = tmp1 * 2.0 * njac[i][j][k][4][0];
lhs[i][j][k][BB][4][1] = tmp1 * 2.0 * njac[i][j][k][4][1];
lhs[i][j][k][BB][4][2] = tmp1 * 2.0 * njac[i][j][k][4][2];
lhs[i][j][k][BB][4][3] = tmp1 * 2.0 * njac[i][j][k][4][3];
lhs[i][j][k][BB][4][4] = 1.0
+ tmp1 * 2.0 * njac[i][j][k][4][4]
+ tmp1 * 2.0 * dz5;
lhs[i][j][k][CC][0][0] = tmp2 * fjac[i][j][k+1][0][0]
- tmp1 * njac[i][j][k+1][0][0]
- tmp1 * dz1;
lhs[i][j][k][CC][0][1] = tmp2 * fjac[i][j][k+1][0][1]
- tmp1 * njac[i][j][k+1][0][1];
lhs[i][j][k][CC][0][2] = tmp2 * fjac[i][j][k+1][0][2]
- tmp1 * njac[i][j][k+1][0][2];
lhs[i][j][k][CC][0][3] = tmp2 * fjac[i][j][k+1][0][3]
- tmp1 * njac[i][j][k+1][0][3];
lhs[i][j][k][CC][0][4] = tmp2 * fjac[i][j][k+1][0][4]
- tmp1 * njac[i][j][k+1][0][4];
lhs[i][j][k][CC][1][0] = tmp2 * fjac[i][j][k+1][1][0]
- tmp1 * njac[i][j][k+1][1][0];
lhs[i][j][k][CC][1][1] = tmp2 * fjac[i][j][k+1][1][1]
- tmp1 * njac[i][j][k+1][1][1]
- tmp1 * dz2;
lhs[i][j][k][CC][1][2] = tmp2 * fjac[i][j][k+1][1][2]
- tmp1 * njac[i][j][k+1][1][2];
lhs[i][j][k][CC][1][3] = tmp2 * fjac[i][j][k+1][1][3]
- tmp1 * njac[i][j][k+1][1][3];
lhs[i][j][k][CC][1][4] = tmp2 * fjac[i][j][k+1][1][4]
- tmp1 * njac[i][j][k+1][1][4];
lhs[i][j][k][CC][2][0] = tmp2 * fjac[i][j][k+1][2][0]
- tmp1 * njac[i][j][k+1][2][0];
lhs[i][j][k][CC][2][1] = tmp2 * fjac[i][j][k+1][2][1]
- tmp1 * njac[i][j][k+1][2][1];
lhs[i][j][k][CC][2][2] = tmp2 * fjac[i][j][k+1][2][2]
- tmp1 * njac[i][j][k+1][2][2]
- tmp1 * dz3;
lhs[i][j][k][CC][2][3] = tmp2 * fjac[i][j][k+1][2][3]
- tmp1 * njac[i][j][k+1][2][3];
lhs[i][j][k][CC][2][4] = tmp2 * fjac[i][j][k+1][2][4]
- tmp1 * njac[i][j][k+1][2][4];
lhs[i][j][k][CC][3][0] = tmp2 * fjac[i][j][k+1][3][0]
- tmp1 * njac[i][j][k+1][3][0];
lhs[i][j][k][CC][3][1] = tmp2 * fjac[i][j][k+1][3][1]
- tmp1 * njac[i][j][k+1][3][1];
lhs[i][j][k][CC][3][2] = tmp2 * fjac[i][j][k+1][3][2]
- tmp1 * njac[i][j][k+1][3][2];
lhs[i][j][k][CC][3][3] = tmp2 * fjac[i][j][k+1][3][3]
- tmp1 * njac[i][j][k+1][3][3]
- tmp1 * dz4;
lhs[i][j][k][CC][3][4] = tmp2 * fjac[i][j][k+1][3][4]
- tmp1 * njac[i][j][k+1][3][4];
lhs[i][j][k][CC][4][0] = tmp2 * fjac[i][j][k+1][4][0]
- tmp1 * njac[i][j][k+1][4][0];
lhs[i][j][k][CC][4][1] = tmp2 * fjac[i][j][k+1][4][1]
- tmp1 * njac[i][j][k+1][4][1];
lhs[i][j][k][CC][4][2] = tmp2 * fjac[i][j][k+1][4][2]
- tmp1 * njac[i][j][k+1][4][2];
lhs[i][j][k][CC][4][3] = tmp2 * fjac[i][j][k+1][4][3]
- tmp1 * njac[i][j][k+1][4][3];
lhs[i][j][k][CC][4][4] = tmp2 * fjac[i][j][k+1][4][4]
- tmp1 * njac[i][j][k+1][4][4]
- tmp1 * dz5;
}
}
}
}
/*--------------------------------------------------------------------
--------------------------------------------------------------------*/
static void compute_rhs(void) {
int i, j, k, m;
double rho_inv, uijk, up1, um1, vijk, vp1, vm1, wijk, wp1, wm1;
/*--------------------------------------------------------------------
c compute the reciprocal of density, and the kinetic energy,
c and the speed of sound.
c-------------------------------------------------------------------*/
#pragma omp for private(j,k) nowait
for (i = 0; i < grid_points[0]; i++) {
for (j = 0; j < grid_points[1]; j++) {
for (k = 0; k < grid_points[2]; k++) {
rho_inv = 1.0/u[i][j][k][0];
rho_i[i][j][k] = rho_inv;
us[i][j][k] = u[i][j][k][1] * rho_inv;
vs[i][j][k] = u[i][j][k][2] * rho_inv;
ws[i][j][k] = u[i][j][k][3] * rho_inv;
square[i][j][k] = 0.5 * (u[i][j][k][1]*u[i][j][k][1] +
u[i][j][k][2]*u[i][j][k][2] +
u[i][j][k][3]*u[i][j][k][3] ) * rho_inv;
qs[i][j][k] = square[i][j][k] * rho_inv;
}
}
}
/*--------------------------------------------------------------------
c copy the exact forcing term to the right hand side; because
c this forcing term is known, we can store it on the whole grid
c including the boundary
c-------------------------------------------------------------------*/
#pragma omp for private(j,k,m)
for (i = 0; i < grid_points[0]; i++) {
for (j = 0; j < grid_points[1]; j++) {
for (k = 0; k < grid_points[2]; k++) {
for (m = 0; m < 5; m++) {
rhs[i][j][k][m] = forcing[i][j][k][m];
}
}
}
}
/*--------------------------------------------------------------------
c compute xi-direction fluxes
c-------------------------------------------------------------------*/
#pragma omp for private(j,k)
for (i = 1; i < grid_points[0]-1; i++) {
for (j = 1; j < grid_points[1]-1; j++) {
for (k = 1; k < grid_points[2]-1; k++) {
uijk = us[i][j][k];
up1 = us[i+1][j][k];
um1 = us[i-1][j][k];
rhs[i][j][k][0] = rhs[i][j][k][0] + dx1tx1 *
(u[i+1][j][k][0] - 2.0*u[i][j][k][0] +
u[i-1][j][k][0]) -
tx2 * (u[i+1][j][k][1] - u[i-1][j][k][1]);
rhs[i][j][k][1] = rhs[i][j][k][1] + dx2tx1 *
(u[i+1][j][k][1] - 2.0*u[i][j][k][1] +
u[i-1][j][k][1]) +
xxcon2*con43 * (up1 - 2.0*uijk + um1) -
tx2 * (u[i+1][j][k][1]*up1 -
u[i-1][j][k][1]*um1 +
(u[i+1][j][k][4]- square[i+1][j][k]-
u[i-1][j][k][4]+ square[i-1][j][k])*
c2);
rhs[i][j][k][2] = rhs[i][j][k][2] + dx3tx1 *
(u[i+1][j][k][2] - 2.0*u[i][j][k][2] +
u[i-1][j][k][2]) +
xxcon2 * (vs[i+1][j][k] - 2.0*vs[i][j][k] +
vs[i-1][j][k]) -
tx2 * (u[i+1][j][k][2]*up1 -
u[i-1][j][k][2]*um1);
rhs[i][j][k][3] = rhs[i][j][k][3] + dx4tx1 *
(u[i+1][j][k][3] - 2.0*u[i][j][k][3] +
u[i-1][j][k][3]) +
xxcon2 * (ws[i+1][j][k] - 2.0*ws[i][j][k] +
ws[i-1][j][k]) -
tx2 * (u[i+1][j][k][3]*up1 -
u[i-1][j][k][3]*um1);
rhs[i][j][k][4] = rhs[i][j][k][4] + dx5tx1 *
(u[i+1][j][k][4] - 2.0*u[i][j][k][4] +
u[i-1][j][k][4]) +
xxcon3 * (qs[i+1][j][k] - 2.0*qs[i][j][k] +
qs[i-1][j][k]) +
xxcon4 * (up1*up1 - 2.0*uijk*uijk +
um1*um1) +
xxcon5 * (u[i+1][j][k][4]*rho_i[i+1][j][k] -
2.0*u[i][j][k][4]*rho_i[i][j][k] +
u[i-1][j][k][4]*rho_i[i-1][j][k]) -
tx2 * ( (c1*u[i+1][j][k][4] -
c2*square[i+1][j][k])*up1 -
(c1*u[i-1][j][k][4] -
c2*square[i-1][j][k])*um1 );
}
}
}
/*--------------------------------------------------------------------
c add fourth order xi-direction dissipation
c-------------------------------------------------------------------*/
i = 1;
#pragma omp for private(k,m) nowait
for (j = 1; j < grid_points[1]-1; j++) {
for (k = 1; k < grid_points[2]-1; k++) {
for (m = 0; m < 5; m++) {
rhs[i][j][k][m] = rhs[i][j][k][m]- dssp *
( 5.0*u[i][j][k][m] - 4.0*u[i+1][j][k][m] +
u[i+2][j][k][m]);
}
}
}
i = 2;
#pragma omp for private(k,m) nowait
for (j = 1; j < grid_points[1]-1; j++) {
for (k = 1; k < grid_points[2]-1; k++) {
for (m = 0; m < 5; m++) {
rhs[i][j][k][m] = rhs[i][j][k][m] - dssp *
(-4.0*u[i-1][j][k][m] + 6.0*u[i][j][k][m] -
4.0*u[i+1][j][k][m] + u[i+2][j][k][m]);
}
}
}
#pragma omp for private(j,k,m) nowait
for (i = 3; i < grid_points[0]-3; i++) {
for (j = 1; j < grid_points[1]-1; j++) {
for (k = 1; k < grid_points[2]-1; k++) {
for (m = 0; m < 5; m++) {
rhs[i][j][k][m] = rhs[i][j][k][m] - dssp *
( u[i-2][j][k][m] - 4.0*u[i-1][j][k][m] +
6.0*u[i][j][k][m] - 4.0*u[i+1][j][k][m] +
u[i+2][j][k][m] );
}
}
}
}
i = grid_points[0]-3;
#pragma omp for private(k,m) nowait
for (j = 1; j < grid_points[1]-1; j++) {
for (k = 1; k < grid_points[2]-1; k++) {
for (m = 0; m < 5; m++) {
rhs[i][j][k][m] = rhs[i][j][k][m] - dssp *
( u[i-2][j][k][m] - 4.0*u[i-1][j][k][m] +
6.0*u[i][j][k][m] - 4.0*u[i+1][j][k][m] );
}
}
}
i = grid_points[0]-2;
#pragma omp for private(k,m)
for (j = 1; j < grid_points[1]-1; j++) {
for (k = 1; k < grid_points[2]-1; k++) {
for (m = 0; m < 5; m++) {
rhs[i][j][k][m] = rhs[i][j][k][m] - dssp *
( u[i-2][j][k][m] - 4.*u[i-1][j][k][m] +
5.0*u[i][j][k][m] );
}
}
}
/*--------------------------------------------------------------------
c compute eta-direction fluxes
c-------------------------------------------------------------------*/
#pragma omp for private(j,k)
for (i = 1; i < grid_points[0]-1; i++) {
for (j = 1; j < grid_points[1]-1; j++) {
for (k = 1; k < grid_points[2]-1; k++) {
vijk = vs[i][j][k];
vp1 = vs[i][j+1][k];
vm1 = vs[i][j-1][k];
rhs[i][j][k][0] = rhs[i][j][k][0] + dy1ty1 *
(u[i][j+1][k][0] - 2.0*u[i][j][k][0] +
u[i][j-1][k][0]) -
ty2 * (u[i][j+1][k][2] - u[i][j-1][k][2]);
rhs[i][j][k][1] = rhs[i][j][k][1] + dy2ty1 *
(u[i][j+1][k][1] - 2.0*u[i][j][k][1] +
u[i][j-1][k][1]) +
yycon2 * (us[i][j+1][k] - 2.0*us[i][j][k] +
us[i][j-1][k]) -
ty2 * (u[i][j+1][k][1]*vp1 -
u[i][j-1][k][1]*vm1);
rhs[i][j][k][2] = rhs[i][j][k][2] + dy3ty1 *
(u[i][j+1][k][2] - 2.0*u[i][j][k][2] +
u[i][j-1][k][2]) +
yycon2*con43 * (vp1 - 2.0*vijk + vm1) -
ty2 * (u[i][j+1][k][2]*vp1 -
u[i][j-1][k][2]*vm1 +
(u[i][j+1][k][4] - square[i][j+1][k] -
u[i][j-1][k][4] + square[i][j-1][k])
*c2);
rhs[i][j][k][3] = rhs[i][j][k][3] + dy4ty1 *
(u[i][j+1][k][3] - 2.0*u[i][j][k][3] +
u[i][j-1][k][3]) +
yycon2 * (ws[i][j+1][k] - 2.0*ws[i][j][k] +
ws[i][j-1][k]) -
ty2 * (u[i][j+1][k][3]*vp1 -
u[i][j-1][k][3]*vm1);
rhs[i][j][k][4] = rhs[i][j][k][4] + dy5ty1 *
(u[i][j+1][k][4] - 2.0*u[i][j][k][4] +
u[i][j-1][k][4]) +
yycon3 * (qs[i][j+1][k] - 2.0*qs[i][j][k] +
qs[i][j-1][k]) +
yycon4 * (vp1*vp1 - 2.0*vijk*vijk +
vm1*vm1) +
yycon5 * (u[i][j+1][k][4]*rho_i[i][j+1][k] -
2.0*u[i][j][k][4]*rho_i[i][j][k] +
u[i][j-1][k][4]*rho_i[i][j-1][k]) -
ty2 * ((c1*u[i][j+1][k][4] -
c2*square[i][j+1][k]) * vp1 -
(c1*u[i][j-1][k][4] -
c2*square[i][j-1][k]) * vm1);
}
}
}
/*--------------------------------------------------------------------
c add fourth order eta-direction dissipation
c-------------------------------------------------------------------*/
j = 1;
#pragma omp for private(k,m) nowait
for (i = 1; i < grid_points[0]-1; i++) {
for (k = 1; k < grid_points[2]-1; k++) {
for (m = 0; m < 5; m++) {
rhs[i][j][k][m] = rhs[i][j][k][m]- dssp *
( 5.0*u[i][j][k][m] - 4.0*u[i][j+1][k][m] +
u[i][j+2][k][m]);
}
}
}
j = 2;
#pragma omp for private(k,m) nowait
for (i = 1; i < grid_points[0]-1; i++) {
for (k = 1; k < grid_points[2]-1; k++) {
for (m = 0; m < 5; m++) {
rhs[i][j][k][m] = rhs[i][j][k][m] - dssp *
(-4.0*u[i][j-1][k][m] + 6.0*u[i][j][k][m] -
4.0*u[i][j+1][k][m] + u[i][j+2][k][m]);
}
}
}
#pragma omp for private(j,k,m) nowait
for (i = 1; i < grid_points[0]-1; i++) {
for (j = 3; j < grid_points[1]-3; j++) {
for (k = 1; k < grid_points[2]-1; k++) {
for (m = 0; m < 5; m++) {
rhs[i][j][k][m] = rhs[i][j][k][m] - dssp *
( u[i][j-2][k][m] - 4.0*u[i][j-1][k][m] +
6.0*u[i][j][k][m] - 4.0*u[i][j+1][k][m] +
u[i][j+2][k][m] );
}
}
}
}
j = grid_points[1]-3;
#pragma omp for private(k,m) nowait
for (i = 1; i < grid_points[0]-1; i++) {
for (k = 1; k < grid_points[2]-1; k++) {
for (m = 0; m < 5; m++) {
rhs[i][j][k][m] = rhs[i][j][k][m] - dssp *
( u[i][j-2][k][m] - 4.0*u[i][j-1][k][m] +
6.0*u[i][j][k][m] - 4.0*u[i][j+1][k][m] );
}
}
}
j = grid_points[1]-2;
#pragma omp for private(k,m)
for (i = 1; i < grid_points[0]-1; i++) {
for (k = 1; k < grid_points[2]-1; k++) {
for (m = 0; m < 5; m++) {
rhs[i][j][k][m] = rhs[i][j][k][m] - dssp *
( u[i][j-2][k][m] - 4.*u[i][j-1][k][m] +
5.*u[i][j][k][m] );
}
}
}
/*--------------------------------------------------------------------
c compute zeta-direction fluxes
c-------------------------------------------------------------------*/
#pragma omp for private(j,k)
for (i = 1; i < grid_points[0]-1; i++) {
for (j = 1; j < grid_points[1]-1; j++) {
for (k = 1; k < grid_points[2]-1; k++) {
wijk = ws[i][j][k];
wp1 = ws[i][j][k+1];
wm1 = ws[i][j][k-1];
rhs[i][j][k][0] = rhs[i][j][k][0] + dz1tz1 *
(u[i][j][k+1][0] - 2.0*u[i][j][k][0] +
u[i][j][k-1][0]) -
tz2 * (u[i][j][k+1][3] - u[i][j][k-1][3]);
rhs[i][j][k][1] = rhs[i][j][k][1] + dz2tz1 *
(u[i][j][k+1][1] - 2.0*u[i][j][k][1] +
u[i][j][k-1][1]) +
zzcon2 * (us[i][j][k+1] - 2.0*us[i][j][k] +
us[i][j][k-1]) -
tz2 * (u[i][j][k+1][1]*wp1 -
u[i][j][k-1][1]*wm1);
rhs[i][j][k][2] = rhs[i][j][k][2] + dz3tz1 *
(u[i][j][k+1][2] - 2.0*u[i][j][k][2] +
u[i][j][k-1][2]) +
zzcon2 * (vs[i][j][k+1] - 2.0*vs[i][j][k] +
vs[i][j][k-1]) -
tz2 * (u[i][j][k+1][2]*wp1 -
u[i][j][k-1][2]*wm1);
rhs[i][j][k][3] = rhs[i][j][k][3] + dz4tz1 *
(u[i][j][k+1][3] - 2.0*u[i][j][k][3] +
u[i][j][k-1][3]) +
zzcon2*con43 * (wp1 - 2.0*wijk + wm1) -
tz2 * (u[i][j][k+1][3]*wp1 -
u[i][j][k-1][3]*wm1 +
(u[i][j][k+1][4] - square[i][j][k+1] -
u[i][j][k-1][4] + square[i][j][k-1])
*c2);
rhs[i][j][k][4] = rhs[i][j][k][4] + dz5tz1 *
(u[i][j][k+1][4] - 2.0*u[i][j][k][4] +
u[i][j][k-1][4]) +
zzcon3 * (qs[i][j][k+1] - 2.0*qs[i][j][k] +
qs[i][j][k-1]) +
zzcon4 * (wp1*wp1 - 2.0*wijk*wijk +
wm1*wm1) +
zzcon5 * (u[i][j][k+1][4]*rho_i[i][j][k+1] -
2.0*u[i][j][k][4]*rho_i[i][j][k] +
u[i][j][k-1][4]*rho_i[i][j][k-1]) -
tz2 * ( (c1*u[i][j][k+1][4] -
c2*square[i][j][k+1])*wp1 -
(c1*u[i][j][k-1][4] -
c2*square[i][j][k-1])*wm1);
}
}
}
/*--------------------------------------------------------------------
c add fourth order zeta-direction dissipation
c-------------------------------------------------------------------*/
k = 1;
#pragma omp for private(j,m) nowait
for (i = 1; i < grid_points[0]-1; i++) {
for (j = 1; j < grid_points[1]-1; j++) {
for (m = 0; m < 5; m++) {
rhs[i][j][k][m] = rhs[i][j][k][m]- dssp *
( 5.0*u[i][j][k][m] - 4.0*u[i][j][k+1][m] +
u[i][j][k+2][m]);
}
}
}
k = 2;
#pragma omp for private(j,m) nowait
for (i = 1; i < grid_points[0]-1; i++) {
for (j = 1; j < grid_points[1]-1; j++) {
for (m = 0; m < 5; m++) {
rhs[i][j][k][m] = rhs[i][j][k][m] - dssp *
(-4.0*u[i][j][k-1][m] + 6.0*u[i][j][k][m] -
4.0*u[i][j][k+1][m] + u[i][j][k+2][m]);
}
}
}
#pragma omp for private(j,k,m) nowait
for (i = 1; i < grid_points[0]-1; i++) {
for (j = 1; j < grid_points[1]-1; j++) {
for (k = 3; k < grid_points[2]-3; k++) {
for (m = 0; m < 5; m++) {
rhs[i][j][k][m] = rhs[i][j][k][m] - dssp *
( u[i][j][k-2][m] - 4.0*u[i][j][k-1][m] +
6.0*u[i][j][k][m] - 4.0*u[i][j][k+1][m] +
u[i][j][k+2][m] );
}
}
}
}
k = grid_points[2]-3;
#pragma omp for private(j,m) nowait
for (i = 1; i < grid_points[0]-1; i++) {
for (j = 1; j < grid_points[1]-1; j++) {
for (m = 0; m < 5; m++) {
rhs[i][j][k][m] = rhs[i][j][k][m] - dssp *
( u[i][j][k-2][m] - 4.0*u[i][j][k-1][m] +
6.0*u[i][j][k][m] - 4.0*u[i][j][k+1][m] );
}
}
}
k = grid_points[2]-2;
#pragma omp for private(j,m)
for (i = 1; i < grid_points[0]-1; i++) {
for (j = 1; j < grid_points[1]-1; j++) {
for (m = 0; m < 5; m++) {
rhs[i][j][k][m] = rhs[i][j][k][m] - dssp *
( u[i][j][k-2][m] - 4.0*u[i][j][k-1][m] +
5.0*u[i][j][k][m] );
}
}
}
#pragma omp for private(k,m,i)
for (j = 1; j < grid_points[1]-1; j++) {
for (k = 1; k < grid_points[2]-1; k++) {
for (m = 0; m < 5; m++) {
for (i = 1; i < grid_points[0]-1; i++) {
rhs[i][j][k][m] = rhs[i][j][k][m] * dt;
}
}
}
}
}
/*--------------------------------------------------------------------
--------------------------------------------------------------------*/
static void set_constants(void) {
/*--------------------------------------------------------------------
--------------------------------------------------------------------*/
ce[0][0] = 2.0;
ce[0][1] = 0.0;
ce[0][2] = 0.0;
ce[0][3] = 4.0;
ce[0][4] = 5.0;
ce[0][5] = 3.0;
ce[0][6] = 0.5;
ce[0][7] = 0.02;
ce[0][8] = 0.01;
ce[0][9] = 0.03;
ce[0][10] = 0.5;
ce[0][11] = 0.4;
ce[0][12] = 0.3;
ce[1][0] = 1.0;
ce[1][1] = 0.0;
ce[1][2] = 0.0;
ce[1][3] = 0.0;
ce[1][4] = 1.0;
ce[1][5] = 2.0;
ce[1][6] = 3.0;
ce[1][7] = 0.01;
ce[1][8] = 0.03;
ce[1][9] = 0.02;
ce[1][10] = 0.4;
ce[1][11] = 0.3;
ce[1][12] = 0.5;
ce[2][0] = 2.0;
ce[2][1] = 2.0;
ce[2][2] = 0.0;
ce[2][3] = 0.0;
ce[2][4] = 0.0;
ce[2][5] = 2.0;
ce[2][6] = 3.0;
ce[2][7] = 0.04;
ce[2][8] = 0.03;
ce[2][9] = 0.05;
ce[2][10] = 0.3;
ce[2][11] = 0.5;
ce[2][12] = 0.4;
ce[3][0] = 2.0;
ce[3][1] = 2.0;
ce[3][2] = 0.0;
ce[3][3] = 0.0;
ce[3][4] = 0.0;
ce[3][5] = 2.0;
ce[3][6] = 3.0;
ce[3][7] = 0.03;
ce[3][8] = 0.05;
ce[3][9] = 0.04;
ce[3][10] = 0.2;
ce[3][11] = 0.1;
ce[3][12] = 0.3;
ce[4][0] = 5.0;
ce[4][1] = 4.0;
ce[4][2] = 3.0;
ce[4][3] = 2.0;
ce[4][4] = 0.1;
ce[4][5] = 0.4;
ce[4][6] = 0.3;
ce[4][7] = 0.05;
ce[4][8] = 0.04;
ce[4][9] = 0.03;
ce[4][10] = 0.1;
ce[4][11] = 0.3;
ce[4][12] = 0.2;
c1 = 1.4;
c2 = 0.4;
c3 = 0.1;
c4 = 1.0;
c5 = 1.4;
dnxm1 = 1.0 / (double)(grid_points[0]-1);
dnym1 = 1.0 / (double)(grid_points[1]-1);
dnzm1 = 1.0 / (double)(grid_points[2]-1);
c1c2 = c1 * c2;
c1c5 = c1 * c5;
c3c4 = c3 * c4;
c1345 = c1c5 * c3c4;
conz1 = (1.0-c1c5);
tx1 = 1.0 / (dnxm1 * dnxm1);
tx2 = 1.0 / (2.0 * dnxm1);
tx3 = 1.0 / dnxm1;
ty1 = 1.0 / (dnym1 * dnym1);
ty2 = 1.0 / (2.0 * dnym1);
ty3 = 1.0 / dnym1;
tz1 = 1.0 / (dnzm1 * dnzm1);
tz2 = 1.0 / (2.0 * dnzm1);
tz3 = 1.0 / dnzm1;
dx1 = 0.75;
dx2 = 0.75;
dx3 = 0.75;
dx4 = 0.75;
dx5 = 0.75;
dy1 = 0.75;
dy2 = 0.75;
dy3 = 0.75;
dy4 = 0.75;
dy5 = 0.75;
dz1 = 1.0;
dz2 = 1.0;
dz3 = 1.0;
dz4 = 1.0;
dz5 = 1.0;
dxmax = max(dx3, dx4);
dymax = max(dy2, dy4);
dzmax = max(dz2, dz3);
dssp = 0.25 * max(dx1, max(dy1, dz1) );
c4dssp = 4.0 * dssp;
c5dssp = 5.0 * dssp;
dttx1 = dt*tx1;
dttx2 = dt*tx2;
dtty1 = dt*ty1;
dtty2 = dt*ty2;
dttz1 = dt*tz1;
dttz2 = dt*tz2;
c2dttx1 = 2.0*dttx1;
c2dtty1 = 2.0*dtty1;
c2dttz1 = 2.0*dttz1;
dtdssp = dt*dssp;
comz1 = dtdssp;
comz4 = 4.0*dtdssp;
comz5 = 5.0*dtdssp;
comz6 = 6.0*dtdssp;
c3c4tx3 = c3c4*tx3;
c3c4ty3 = c3c4*ty3;
c3c4tz3 = c3c4*tz3;
dx1tx1 = dx1*tx1;
dx2tx1 = dx2*tx1;
dx3tx1 = dx3*tx1;
dx4tx1 = dx4*tx1;
dx5tx1 = dx5*tx1;
dy1ty1 = dy1*ty1;
dy2ty1 = dy2*ty1;
dy3ty1 = dy3*ty1;
dy4ty1 = dy4*ty1;
dy5ty1 = dy5*ty1;
dz1tz1 = dz1*tz1;
dz2tz1 = dz2*tz1;
dz3tz1 = dz3*tz1;
dz4tz1 = dz4*tz1;
dz5tz1 = dz5*tz1;
c2iv = 2.5;
con43 = 4.0/3.0;
con16 = 1.0/6.0;
xxcon1 = c3c4tx3*con43*tx3;
xxcon2 = c3c4tx3*tx3;
xxcon3 = c3c4tx3*conz1*tx3;
xxcon4 = c3c4tx3*con16*tx3;
xxcon5 = c3c4tx3*c1c5*tx3;
yycon1 = c3c4ty3*con43*ty3;
yycon2 = c3c4ty3*ty3;
yycon3 = c3c4ty3*conz1*ty3;
yycon4 = c3c4ty3*con16*ty3;
yycon5 = c3c4ty3*c1c5*ty3;
zzcon1 = c3c4tz3*con43*tz3;
zzcon2 = c3c4tz3*tz3;
zzcon3 = c3c4tz3*conz1*tz3;
zzcon4 = c3c4tz3*con16*tz3;
zzcon5 = c3c4tz3*c1c5*tz3;
}
/*--------------------------------------------------------------------
--------------------------------------------------------------------*/
static void verify(int no_time_steps, char *cclass, boolean *verified) {
/*--------------------------------------------------------------------
--------------------------------------------------------------------*/
/*--------------------------------------------------------------------
c verification routine
c-------------------------------------------------------------------*/
double xcrref[5],xceref[5],xcrdif[5],xcedif[5],
epsilon, xce[5], xcr[5], dtref;
int m;
/*--------------------------------------------------------------------
c tolerance level
c-------------------------------------------------------------------*/
epsilon = 1.0e-08;
/*--------------------------------------------------------------------
c compute the error norm and the residual norm, and exit if not printing
c-------------------------------------------------------------------*/
error_norm(xce);
compute_rhs();
rhs_norm(xcr);
for (m = 0; m < 5; m++) {
xcr[m] = xcr[m] / dt;
}
*cclass = 'U';
*verified = TRUE;
for (m = 0; m < 5; m++) {
xcrref[m] = 1.0;
xceref[m] = 1.0;
}
/*--------------------------------------------------------------------
c reference data for 12X12X12 grids after 100 time steps, with DT = 1.0d-02
c-------------------------------------------------------------------*/
if (grid_points[0] == 12 &&
grid_points[1] == 12 &&
grid_points[2] == 12 &&
no_time_steps == 60) {
*cclass = 'S';
dtref = 1.0e-2;
/*--------------------------------------------------------------------
c Reference values of RMS-norms of residual.
c-------------------------------------------------------------------*/
xcrref[0] = 1.7034283709541311e-01;
xcrref[1] = 1.2975252070034097e-02;
xcrref[2] = 3.2527926989486055e-02;
xcrref[3] = 2.6436421275166801e-02;
xcrref[4] = 1.9211784131744430e-01;
/*--------------------------------------------------------------------
c Reference values of RMS-norms of solution error.
c-------------------------------------------------------------------*/
xceref[0] = 4.9976913345811579e-04;
xceref[1] = 4.5195666782961927e-05;
xceref[2] = 7.3973765172921357e-05;
xceref[3] = 7.3821238632439731e-05;
xceref[4] = 8.9269630987491446e-04;
/*--------------------------------------------------------------------
c reference data for 24X24X24 grids after 200 time steps, with DT = 0.8d-3
c-------------------------------------------------------------------*/
} else if (grid_points[0] == 24 &&
grid_points[1] == 24 &&
grid_points[2] == 24 &&
no_time_steps == 200) {
*cclass = 'W';
dtref = 0.8e-3;
/*--------------------------------------------------------------------
c Reference values of RMS-norms of residual.
c-------------------------------------------------------------------*/
xcrref[0] = 0.1125590409344e+03;
xcrref[1] = 0.1180007595731e+02;
xcrref[2] = 0.2710329767846e+02;
xcrref[3] = 0.2469174937669e+02;
xcrref[4] = 0.2638427874317e+03;
/*--------------------------------------------------------------------
c Reference values of RMS-norms of solution error.
c-------------------------------------------------------------------*/
xceref[0] = 0.4419655736008e+01;
xceref[1] = 0.4638531260002e+00;
xceref[2] = 0.1011551749967e+01;
xceref[3] = 0.9235878729944e+00;
xceref[4] = 0.1018045837718e+02;
/*--------------------------------------------------------------------
c reference data for 64X64X64 grids after 200 time steps, with DT = 0.8d-3
c-------------------------------------------------------------------*/
} else if (grid_points[0] == 64 &&
grid_points[1] == 64 &&
grid_points[2] == 64 &&
no_time_steps == 200) {
*cclass = 'A';
dtref = 0.8e-3;
/*--------------------------------------------------------------------
c Reference values of RMS-norms of residual.
c-------------------------------------------------------------------*/
xcrref[0] = 1.0806346714637264e+02;
xcrref[1] = 1.1319730901220813e+01;
xcrref[2] = 2.5974354511582465e+01;
xcrref[3] = 2.3665622544678910e+01;
xcrref[4] = 2.5278963211748344e+02;
/*--------------------------------------------------------------------
c Reference values of RMS-norms of solution error.
c-------------------------------------------------------------------*/
xceref[0] = 4.2348416040525025e+00;
xceref[1] = 4.4390282496995698e-01;
xceref[2] = 9.6692480136345650e-01;
xceref[3] = 8.8302063039765474e-01;
xceref[4] = 9.7379901770829278e+00;
/*--------------------------------------------------------------------
c reference data for 102X102X102 grids after 200 time steps,
c with DT = 3.0d-04
c-------------------------------------------------------------------*/
} else if (grid_points[0] == 102 &&
grid_points[1] == 102 &&
grid_points[2] == 102 &&
no_time_steps == 200) {
*cclass = 'B';
dtref = 3.0e-4;
/*--------------------------------------------------------------------
c Reference values of RMS-norms of residual.
c-------------------------------------------------------------------*/
xcrref[0] = 1.4233597229287254e+03;
xcrref[1] = 9.9330522590150238e+01;
xcrref[2] = 3.5646025644535285e+02;
xcrref[3] = 3.2485447959084092e+02;
xcrref[4] = 3.2707541254659363e+03;
/*--------------------------------------------------------------------
c Reference values of RMS-norms of solution error.
c-------------------------------------------------------------------*/
xceref[0] = 5.2969847140936856e+01;
xceref[1] = 4.4632896115670668e+00;
xceref[2] = 1.3122573342210174e+01;
xceref[3] = 1.2006925323559144e+01;
xceref[4] = 1.2459576151035986e+02;
/*--------------------------------------------------------------------
c reference data for 162X162X162 grids after 200 time steps,
c with DT = 1.0d-04
c-------------------------------------------------------------------*/
} else if (grid_points[0] == 162 &&
grid_points[1] == 162 &&
grid_points[2] == 162 &&
no_time_steps == 200) {
*cclass = 'C';
dtref = 1.0e-4;
/*--------------------------------------------------------------------
c Reference values of RMS-norms of residual.
c-------------------------------------------------------------------*/
xcrref[0] = 0.62398116551764615e+04;
xcrref[1] = 0.50793239190423964e+03;
xcrref[2] = 0.15423530093013596e+04;
xcrref[3] = 0.13302387929291190e+04;
xcrref[4] = 0.11604087428436455e+05;
/*--------------------------------------------------------------------
c Reference values of RMS-norms of solution error.
c-------------------------------------------------------------------*/
xceref[0] = 0.16462008369091265e+03;
xceref[1] = 0.11497107903824313e+02;
xceref[2] = 0.41207446207461508e+02;
xceref[3] = 0.37087651059694167e+02;
xceref[4] = 0.36211053051841265e+03;
} else {
*verified = FALSE;
}
/*--------------------------------------------------------------------
c verification test for residuals if gridsize is either 12X12X12 or
c 64X64X64 or 102X102X102 or 162X162X162
c-------------------------------------------------------------------*/
/*--------------------------------------------------------------------
c Compute the difference of solution values and the known reference values.
c-------------------------------------------------------------------*/
for (m = 0; m < 5; m++) {
xcrdif[m] = fabs((xcr[m]-xcrref[m])/xcrref[m]);
xcedif[m] = fabs((xce[m]-xceref[m])/xceref[m]);
}
/*--------------------------------------------------------------------
c Output the comparison of computed results to known cases.
c-------------------------------------------------------------------*/
if (*cclass != 'U') {
printf(" Verification being performed for class %1c\n", *cclass);
printf(" accuracy setting for epsilon = %20.13e\n", epsilon);
if (fabs(dt-dtref) > epsilon) {
*verified = FALSE;
*cclass = 'U';
printf(" DT does not match the reference value of %15.8e\n", dtref);
}
} else {
printf(" Unknown class\n");
}
if (*cclass != 'U') {
printf(" Comparison of RMS-norms of residual\n");
} else {
printf(" RMS-norms of residual\n");
}
for (m = 0; m < 5; m++) {
if (*cclass == 'U') {
printf(" %2d%20.13e\n", m, xcr[m]);
} else if (xcrdif[m] > epsilon) {
*verified = FALSE;
printf(" FAILURE: %2d%20.13e%20.13e%20.13e\n",
m, xcr[m], xcrref[m], xcrdif[m]);
} else {
printf(" %2d%20.13e%20.13e%20.13e\n",
m, xcr[m], xcrref[m], xcrdif[m]);
}
}
if (*cclass != 'U') {
printf(" Comparison of RMS-norms of solution error\n");
} else {
printf(" RMS-norms of solution error\n");
}
for (m = 0; m < 5; m++) {
if (*cclass == 'U') {
printf(" %2d%20.13e\n", m, xce[m]);
} else if (xcedif[m] > epsilon) {
*verified = FALSE;
printf(" FAILURE: %2d%20.13e%20.13e%20.13e\n",
m, xce[m], xceref[m], xcedif[m]);
} else {
printf(" %2d%20.13e%20.13e%20.13e\n",
m, xce[m], xceref[m], xcedif[m]);
}
}
if (*cclass == 'U') {
printf(" No reference values provided\n");
printf(" No verification performed\n");
} else if (*verified == TRUE) {
printf(" Verification Successful\n");
} else {
printf(" Verification failed\n");
}
}
/*--------------------------------------------------------------------
--------------------------------------------------------------------*/
static void x_solve(void) {
/*--------------------------------------------------------------------
--------------------------------------------------------------------*/
/*--------------------------------------------------------------------
c
c Performs line solves in X direction by first factoring
c the block-tridiagonal matrix into an upper triangular matrix,
c and then performing back substitution to solve for the unknow
c vectors of each line.
c
c Make sure we treat elements zero to cell_size in the direction
c of the sweep.
c
c-------------------------------------------------------------------*/
lhsx();
x_solve_cell();
x_backsubstitute();
}
/*--------------------------------------------------------------------
--------------------------------------------------------------------*/
static void x_backsubstitute(void) {
/*--------------------------------------------------------------------
--------------------------------------------------------------------*/
/*--------------------------------------------------------------------
c back solve: if last cell, then generate U(isize)=rhs[isize)
c else assume U(isize) is loaded in un pack backsub_info
c so just use it
c after call u(istart) will be sent to next cell
c-------------------------------------------------------------------*/
int i, j, k, m, n;
for (i = grid_points[0]-2; i >= 0; i--) {
#pragma omp for private(k,m,n)
for (j = 1; j < grid_points[1]-1; j++) {
for (k = 1; k < grid_points[2]-1; k++) {
for (m = 0; m < BLOCK_SIZE; m++) {
for (n = 0; n < BLOCK_SIZE; n++) {
rhs[i][j][k][m] = rhs[i][j][k][m]
- lhs[i][j][k][CC][m][n]*rhs[i+1][j][k][n];
}
}
}
}
}
}
/*--------------------------------------------------------------------
--------------------------------------------------------------------*/
static void x_solve_cell(void) {
/*--------------------------------------------------------------------
c performs guaussian elimination on this cell.
c
c assumes that unpacking routines for non-first cells
c preload C' and rhs' from previous cell.
c
c assumed send happens outside this routine, but that
c c'(IMAX) and rhs'(IMAX) will be sent to next cell
c-------------------------------------------------------------------*/
int i,j,k,isize;
isize = grid_points[0]-1;
/*--------------------------------------------------------------------
c outer most do loops - sweeping in i direction
c-------------------------------------------------------------------*/
#pragma omp for private(k)
for (j = 1; j < grid_points[1]-1; j++) {
for (k = 1; k < grid_points[2]-1; k++) {
/*--------------------------------------------------------------------
c multiply c(0,j,k) by b_inverse and copy back to c
c multiply rhs(0) by b_inverse(0) and copy to rhs
c-------------------------------------------------------------------*/
binvcrhs( lhs[0][j][k][BB],
lhs[0][j][k][CC],
rhs[0][j][k] );
}
}
/*--------------------------------------------------------------------
c begin inner most do loop
c do all the elements of the cell unless last
c-------------------------------------------------------------------*/
for (i = 1; i < isize; i++) {
#pragma omp for private(k)
for (j = 1; j < grid_points[1]-1; j++) {
for (k = 1; k < grid_points[2]-1; k++) {
/*--------------------------------------------------------------------
c rhs(i) = rhs(i) - A*rhs(i-1)
c-------------------------------------------------------------------*/
matvec_sub(lhs[i][j][k][AA],
rhs[i-1][j][k], rhs[i][j][k]);
/*--------------------------------------------------------------------
c B(i) = B(i) - C(i-1)*A(i)
c-------------------------------------------------------------------*/
matmul_sub(lhs[i][j][k][AA],
lhs[i-1][j][k][CC],
lhs[i][j][k][BB]);
/*--------------------------------------------------------------------
c multiply c(i,j,k) by b_inverse and copy back to c
c multiply rhs(1,j,k) by b_inverse(1,j,k) and copy to rhs
c-------------------------------------------------------------------*/
binvcrhs( lhs[i][j][k][BB],
lhs[i][j][k][CC],
rhs[i][j][k] );
}
}
}
#pragma omp for private(k)
for (j = 1; j < grid_points[1]-1; j++) {
for (k = 1; k < grid_points[2]-1; k++) {
/*--------------------------------------------------------------------
c rhs(isize) = rhs(isize) - A*rhs(isize-1)
c-------------------------------------------------------------------*/
matvec_sub(lhs[isize][j][k][AA],
rhs[isize-1][j][k], rhs[isize][j][k]);
/*--------------------------------------------------------------------
c B(isize) = B(isize) - C(isize-1)*A(isize)
c-------------------------------------------------------------------*/
matmul_sub(lhs[isize][j][k][AA],
lhs[isize-1][j][k][CC],
lhs[isize][j][k][BB]);
/*--------------------------------------------------------------------
c multiply rhs() by b_inverse() and copy to rhs
c-------------------------------------------------------------------*/
binvrhs( lhs[i][j][k][BB],
rhs[i][j][k] );
}
}
}
/*--------------------------------------------------------------------
--------------------------------------------------------------------*/
static void matvec_sub(double ablock[5][5], double avec[5], double bvec[5]) {
/*--------------------------------------------------------------------
--------------------------------------------------------------------*/
/*--------------------------------------------------------------------
c subtracts bvec=bvec - ablock*avec
c-------------------------------------------------------------------*/
int i;
for (i = 0; i < 5; i++) {
/*--------------------------------------------------------------------
c rhs(i,ic,jc,kc,ccell) = rhs(i,ic,jc,kc,ccell)
c $ - lhs[i,1,ablock,ia,ja,ka,acell)*
c-------------------------------------------------------------------*/
bvec[i] = bvec[i] - ablock[i][0]*avec[0]
- ablock[i][1]*avec[1]
- ablock[i][2]*avec[2]
- ablock[i][3]*avec[3]
- ablock[i][4]*avec[4];
}
}
/*--------------------------------------------------------------------
--------------------------------------------------------------------*/
static void matmul_sub(double ablock[5][5], double bblock[5][5],
double cblock[5][5]) {
/*--------------------------------------------------------------------
--------------------------------------------------------------------*/
/*--------------------------------------------------------------------
c subtracts a(i,j,k) X b(i,j,k) from c(i,j,k)
c-------------------------------------------------------------------*/
int j;
for (j = 0; j < 5; j++) {
cblock[0][j] = cblock[0][j] - ablock[0][0]*bblock[0][j]
- ablock[0][1]*bblock[1][j]
- ablock[0][2]*bblock[2][j]
- ablock[0][3]*bblock[3][j]
- ablock[0][4]*bblock[4][j];
cblock[1][j] = cblock[1][j] - ablock[1][0]*bblock[0][j]
- ablock[1][1]*bblock[1][j]
- ablock[1][2]*bblock[2][j]
- ablock[1][3]*bblock[3][j]
- ablock[1][4]*bblock[4][j];
cblock[2][j] = cblock[2][j] - ablock[2][0]*bblock[0][j]
- ablock[2][1]*bblock[1][j]
- ablock[2][2]*bblock[2][j]
- ablock[2][3]*bblock[3][j]
- ablock[2][4]*bblock[4][j];
cblock[3][j] = cblock[3][j] - ablock[3][0]*bblock[0][j]
- ablock[3][1]*bblock[1][j]
- ablock[3][2]*bblock[2][j]
- ablock[3][3]*bblock[3][j]
- ablock[3][4]*bblock[4][j];
cblock[4][j] = cblock[4][j] - ablock[4][0]*bblock[0][j]
- ablock[4][1]*bblock[1][j]
- ablock[4][2]*bblock[2][j]
- ablock[4][3]*bblock[3][j]
- ablock[4][4]*bblock[4][j];
}
}
/*--------------------------------------------------------------------
--------------------------------------------------------------------*/
static void binvcrhs(double lhs[5][5], double c[5][5], double r[5]) {
/*--------------------------------------------------------------------
--------------------------------------------------------------------*/
double pivot, coeff;
/*--------------------------------------------------------------------
c
c-------------------------------------------------------------------*/
pivot = 1.00/lhs[0][0];
lhs[0][1] = lhs[0][1]*pivot;
lhs[0][2] = lhs[0][2]*pivot;
lhs[0][3] = lhs[0][3]*pivot;
lhs[0][4] = lhs[0][4]*pivot;
c[0][0] = c[0][0]*pivot;
c[0][1] = c[0][1]*pivot;
c[0][2] = c[0][2]*pivot;
c[0][3] = c[0][3]*pivot;
c[0][4] = c[0][4]*pivot;
r[0] = r[0] *pivot;
coeff = lhs[1][0];
lhs[1][1]= lhs[1][1] - coeff*lhs[0][1];
lhs[1][2]= lhs[1][2] - coeff*lhs[0][2];
lhs[1][3]= lhs[1][3] - coeff*lhs[0][3];
lhs[1][4]= lhs[1][4] - coeff*lhs[0][4];
c[1][0] = c[1][0] - coeff*c[0][0];
c[1][1] = c[1][1] - coeff*c[0][1];
c[1][2] = c[1][2] - coeff*c[0][2];
c[1][3] = c[1][3] - coeff*c[0][3];
c[1][4] = c[1][4] - coeff*c[0][4];
r[1] = r[1] - coeff*r[0];
coeff = lhs[2][0];
lhs[2][1]= lhs[2][1] - coeff*lhs[0][1];
lhs[2][2]= lhs[2][2] - coeff*lhs[0][2];
lhs[2][3]= lhs[2][3] - coeff*lhs[0][3];
lhs[2][4]= lhs[2][4] - coeff*lhs[0][4];
c[2][0] = c[2][0] - coeff*c[0][0];
c[2][1] = c[2][1] - coeff*c[0][1];
c[2][2] = c[2][2] - coeff*c[0][2];
c[2][3] = c[2][3] - coeff*c[0][3];
c[2][4] = c[2][4] - coeff*c[0][4];
r[2] = r[2] - coeff*r[0];
coeff = lhs[3][0];
lhs[3][1]= lhs[3][1] - coeff*lhs[0][1];
lhs[3][2]= lhs[3][2] - coeff*lhs[0][2];
lhs[3][3]= lhs[3][3] - coeff*lhs[0][3];
lhs[3][4]= lhs[3][4] - coeff*lhs[0][4];
c[3][0] = c[3][0] - coeff*c[0][0];
c[3][1] = c[3][1] - coeff*c[0][1];
c[3][2] = c[3][2] - coeff*c[0][2];
c[3][3] = c[3][3] - coeff*c[0][3];
c[3][4] = c[3][4] - coeff*c[0][4];
r[3] = r[3] - coeff*r[0];
coeff = lhs[4][0];
lhs[4][1]= lhs[4][1] - coeff*lhs[0][1];
lhs[4][2]= lhs[4][2] - coeff*lhs[0][2];
lhs[4][3]= lhs[4][3] - coeff*lhs[0][3];
lhs[4][4]= lhs[4][4] - coeff*lhs[0][4];
c[4][0] = c[4][0] - coeff*c[0][0];
c[4][1] = c[4][1] - coeff*c[0][1];
c[4][2] = c[4][2] - coeff*c[0][2];
c[4][3] = c[4][3] - coeff*c[0][3];
c[4][4] = c[4][4] - coeff*c[0][4];
r[4] = r[4] - coeff*r[0];
pivot = 1.00/lhs[1][1];
lhs[1][2] = lhs[1][2]*pivot;
lhs[1][3] = lhs[1][3]*pivot;
lhs[1][4] = lhs[1][4]*pivot;
c[1][0] = c[1][0]*pivot;
c[1][1] = c[1][1]*pivot;
c[1][2] = c[1][2]*pivot;
c[1][3] = c[1][3]*pivot;
c[1][4] = c[1][4]*pivot;
r[1] = r[1] *pivot;
coeff = lhs[0][1];
lhs[0][2]= lhs[0][2] - coeff*lhs[1][2];
lhs[0][3]= lhs[0][3] - coeff*lhs[1][3];
lhs[0][4]= lhs[0][4] - coeff*lhs[1][4];
c[0][0] = c[0][0] - coeff*c[1][0];
c[0][1] = c[0][1] - coeff*c[1][1];
c[0][2] = c[0][2] - coeff*c[1][2];
c[0][3] = c[0][3] - coeff*c[1][3];
c[0][4] = c[0][4] - coeff*c[1][4];
r[0] = r[0] - coeff*r[1];
coeff = lhs[2][1];
lhs[2][2]= lhs[2][2] - coeff*lhs[1][2];
lhs[2][3]= lhs[2][3] - coeff*lhs[1][3];
lhs[2][4]= lhs[2][4] - coeff*lhs[1][4];
c[2][0] = c[2][0] - coeff*c[1][0];
c[2][1] = c[2][1] - coeff*c[1][1];
c[2][2] = c[2][2] - coeff*c[1][2];
c[2][3] = c[2][3] - coeff*c[1][3];
c[2][4] = c[2][4] - coeff*c[1][4];
r[2] = r[2] - coeff*r[1];
coeff = lhs[3][1];
lhs[3][2]= lhs[3][2] - coeff*lhs[1][2];
lhs[3][3]= lhs[3][3] - coeff*lhs[1][3];
lhs[3][4]= lhs[3][4] - coeff*lhs[1][4];
c[3][0] = c[3][0] - coeff*c[1][0];
c[3][1] = c[3][1] - coeff*c[1][1];
c[3][2] = c[3][2] - coeff*c[1][2];
c[3][3] = c[3][3] - coeff*c[1][3];
c[3][4] = c[3][4] - coeff*c[1][4];
r[3] = r[3] - coeff*r[1];
coeff = lhs[4][1];
lhs[4][2]= lhs[4][2] - coeff*lhs[1][2];
lhs[4][3]= lhs[4][3] - coeff*lhs[1][3];
lhs[4][4]= lhs[4][4] - coeff*lhs[1][4];
c[4][0] = c[4][0] - coeff*c[1][0];
c[4][1] = c[4][1] - coeff*c[1][1];
c[4][2] = c[4][2] - coeff*c[1][2];
c[4][3] = c[4][3] - coeff*c[1][3];
c[4][4] = c[4][4] - coeff*c[1][4];
r[4] = r[4] - coeff*r[1];
pivot = 1.00/lhs[2][2];
lhs[2][3] = lhs[2][3]*pivot;
lhs[2][4] = lhs[2][4]*pivot;
c[2][0] = c[2][0]*pivot;
c[2][1] = c[2][1]*pivot;
c[2][2] = c[2][2]*pivot;
c[2][3] = c[2][3]*pivot;
c[2][4] = c[2][4]*pivot;
r[2] = r[2] *pivot;
coeff = lhs[0][2];
lhs[0][3]= lhs[0][3] - coeff*lhs[2][3];
lhs[0][4]= lhs[0][4] - coeff*lhs[2][4];
c[0][0] = c[0][0] - coeff*c[2][0];
c[0][1] = c[0][1] - coeff*c[2][1];
c[0][2] = c[0][2] - coeff*c[2][2];
c[0][3] = c[0][3] - coeff*c[2][3];
c[0][4] = c[0][4] - coeff*c[2][4];
r[0] = r[0] - coeff*r[2];
coeff = lhs[1][2];
lhs[1][3]= lhs[1][3] - coeff*lhs[2][3];
lhs[1][4]= lhs[1][4] - coeff*lhs[2][4];
c[1][0] = c[1][0] - coeff*c[2][0];
c[1][1] = c[1][1] - coeff*c[2][1];
c[1][2] = c[1][2] - coeff*c[2][2];
c[1][3] = c[1][3] - coeff*c[2][3];
c[1][4] = c[1][4] - coeff*c[2][4];
r[1] = r[1] - coeff*r[2];
coeff = lhs[3][2];
lhs[3][3]= lhs[3][3] - coeff*lhs[2][3];
lhs[3][4]= lhs[3][4] - coeff*lhs[2][4];
c[3][0] = c[3][0] - coeff*c[2][0];
c[3][1] = c[3][1] - coeff*c[2][1];
c[3][2] = c[3][2] - coeff*c[2][2];
c[3][3] = c[3][3] - coeff*c[2][3];
c[3][4] = c[3][4] - coeff*c[2][4];
r[3] = r[3] - coeff*r[2];
coeff = lhs[4][2];
lhs[4][3]= lhs[4][3] - coeff*lhs[2][3];
lhs[4][4]= lhs[4][4] - coeff*lhs[2][4];
c[4][0] = c[4][0] - coeff*c[2][0];
c[4][1] = c[4][1] - coeff*c[2][1];
c[4][2] = c[4][2] - coeff*c[2][2];
c[4][3] = c[4][3] - coeff*c[2][3];
c[4][4] = c[4][4] - coeff*c[2][4];
r[4] = r[4] - coeff*r[2];
pivot = 1.00/lhs[3][3];
lhs[3][4] = lhs[3][4]*pivot;
c[3][0] = c[3][0]*pivot;
c[3][1] = c[3][1]*pivot;
c[3][2] = c[3][2]*pivot;
c[3][3] = c[3][3]*pivot;
c[3][4] = c[3][4]*pivot;
r[3] = r[3] *pivot;
coeff = lhs[0][3];
lhs[0][4]= lhs[0][4] - coeff*lhs[3][4];
c[0][0] = c[0][0] - coeff*c[3][0];
c[0][1] = c[0][1] - coeff*c[3][1];
c[0][2] = c[0][2] - coeff*c[3][2];
c[0][3] = c[0][3] - coeff*c[3][3];
c[0][4] = c[0][4] - coeff*c[3][4];
r[0] = r[0] - coeff*r[3];
coeff = lhs[1][3];
lhs[1][4]= lhs[1][4] - coeff*lhs[3][4];
c[1][0] = c[1][0] - coeff*c[3][0];
c[1][1] = c[1][1] - coeff*c[3][1];
c[1][2] = c[1][2] - coeff*c[3][2];
c[1][3] = c[1][3] - coeff*c[3][3];
c[1][4] = c[1][4] - coeff*c[3][4];
r[1] = r[1] - coeff*r[3];
coeff = lhs[2][3];
lhs[2][4]= lhs[2][4] - coeff*lhs[3][4];
c[2][0] = c[2][0] - coeff*c[3][0];
c[2][1] = c[2][1] - coeff*c[3][1];
c[2][2] = c[2][2] - coeff*c[3][2];
c[2][3] = c[2][3] - coeff*c[3][3];
c[2][4] = c[2][4] - coeff*c[3][4];
r[2] = r[2] - coeff*r[3];
coeff = lhs[4][3];
lhs[4][4]= lhs[4][4] - coeff*lhs[3][4];
c[4][0] = c[4][0] - coeff*c[3][0];
c[4][1] = c[4][1] - coeff*c[3][1];
c[4][2] = c[4][2] - coeff*c[3][2];
c[4][3] = c[4][3] - coeff*c[3][3];
c[4][4] = c[4][4] - coeff*c[3][4];
r[4] = r[4] - coeff*r[3];
pivot = 1.00/lhs[4][4];
c[4][0] = c[4][0]*pivot;
c[4][1] = c[4][1]*pivot;
c[4][2] = c[4][2]*pivot;
c[4][3] = c[4][3]*pivot;
c[4][4] = c[4][4]*pivot;
r[4] = r[4] *pivot;
coeff = lhs[0][4];
c[0][0] = c[0][0] - coeff*c[4][0];
c[0][1] = c[0][1] - coeff*c[4][1];
c[0][2] = c[0][2] - coeff*c[4][2];
c[0][3] = c[0][3] - coeff*c[4][3];
c[0][4] = c[0][4] - coeff*c[4][4];
r[0] = r[0] - coeff*r[4];
coeff = lhs[1][4];
c[1][0] = c[1][0] - coeff*c[4][0];
c[1][1] = c[1][1] - coeff*c[4][1];
c[1][2] = c[1][2] - coeff*c[4][2];
c[1][3] = c[1][3] - coeff*c[4][3];
c[1][4] = c[1][4] - coeff*c[4][4];
r[1] = r[1] - coeff*r[4];
coeff = lhs[2][4];
c[2][0] = c[2][0] - coeff*c[4][0];
c[2][1] = c[2][1] - coeff*c[4][1];
c[2][2] = c[2][2] - coeff*c[4][2];
c[2][3] = c[2][3] - coeff*c[4][3];
c[2][4] = c[2][4] - coeff*c[4][4];
r[2] = r[2] - coeff*r[4];
coeff = lhs[3][4];
c[3][0] = c[3][0] - coeff*c[4][0];
c[3][1] = c[3][1] - coeff*c[4][1];
c[3][2] = c[3][2] - coeff*c[4][2];
c[3][3] = c[3][3] - coeff*c[4][3];
c[3][4] = c[3][4] - coeff*c[4][4];
r[3] = r[3] - coeff*r[4];
}
/*--------------------------------------------------------------------
--------------------------------------------------------------------*/
static void binvrhs( double lhs[5][5], double r[5] ) {
/*--------------------------------------------------------------------
--------------------------------------------------------------------*/
double pivot, coeff;
/*--------------------------------------------------------------------
c
c-------------------------------------------------------------------*/
pivot = 1.00/lhs[0][0];
lhs[0][1] = lhs[0][1]*pivot;
lhs[0][2] = lhs[0][2]*pivot;
lhs[0][3] = lhs[0][3]*pivot;
lhs[0][4] = lhs[0][4]*pivot;
r[0] = r[0] *pivot;
coeff = lhs[1][0];
lhs[1][1]= lhs[1][1] - coeff*lhs[0][1];
lhs[1][2]= lhs[1][2] - coeff*lhs[0][2];
lhs[1][3]= lhs[1][3] - coeff*lhs[0][3];
lhs[1][4]= lhs[1][4] - coeff*lhs[0][4];
r[1] = r[1] - coeff*r[0];
coeff = lhs[2][0];
lhs[2][1]= lhs[2][1] - coeff*lhs[0][1];
lhs[2][2]= lhs[2][2] - coeff*lhs[0][2];
lhs[2][3]= lhs[2][3] - coeff*lhs[0][3];
lhs[2][4]= lhs[2][4] - coeff*lhs[0][4];
r[2] = r[2] - coeff*r[0];
coeff = lhs[3][0];
lhs[3][1]= lhs[3][1] - coeff*lhs[0][1];
lhs[3][2]= lhs[3][2] - coeff*lhs[0][2];
lhs[3][3]= lhs[3][3] - coeff*lhs[0][3];
lhs[3][4]= lhs[3][4] - coeff*lhs[0][4];
r[3] = r[3] - coeff*r[0];
coeff = lhs[4][0];
lhs[4][1]= lhs[4][1] - coeff*lhs[0][1];
lhs[4][2]= lhs[4][2] - coeff*lhs[0][2];
lhs[4][3]= lhs[4][3] - coeff*lhs[0][3];
lhs[4][4]= lhs[4][4] - coeff*lhs[0][4];
r[4] = r[4] - coeff*r[0];
pivot = 1.00/lhs[1][1];
lhs[1][2] = lhs[1][2]*pivot;
lhs[1][3] = lhs[1][3]*pivot;
lhs[1][4] = lhs[1][4]*pivot;
r[1] = r[1] *pivot;
coeff = lhs[0][1];
lhs[0][2]= lhs[0][2] - coeff*lhs[1][2];
lhs[0][3]= lhs[0][3] - coeff*lhs[1][3];
lhs[0][4]= lhs[0][4] - coeff*lhs[1][4];
r[0] = r[0] - coeff*r[1];
coeff = lhs[2][1];
lhs[2][2]= lhs[2][2] - coeff*lhs[1][2];
lhs[2][3]= lhs[2][3] - coeff*lhs[1][3];
lhs[2][4]= lhs[2][4] - coeff*lhs[1][4];
r[2] = r[2] - coeff*r[1];
coeff = lhs[3][1];
lhs[3][2]= lhs[3][2] - coeff*lhs[1][2];
lhs[3][3]= lhs[3][3] - coeff*lhs[1][3];
lhs[3][4]= lhs[3][4] - coeff*lhs[1][4];
r[3] = r[3] - coeff*r[1];
coeff = lhs[4][1];
lhs[4][2]= lhs[4][2] - coeff*lhs[1][2];
lhs[4][3]= lhs[4][3] - coeff*lhs[1][3];
lhs[4][4]= lhs[4][4] - coeff*lhs[1][4];
r[4] = r[4] - coeff*r[1];
pivot = 1.00/lhs[2][2];
lhs[2][3] = lhs[2][3]*pivot;
lhs[2][4] = lhs[2][4]*pivot;
r[2] = r[2] *pivot;
coeff = lhs[0][2];
lhs[0][3]= lhs[0][3] - coeff*lhs[2][3];
lhs[0][4]= lhs[0][4] - coeff*lhs[2][4];
r[0] = r[0] - coeff*r[2];
coeff = lhs[1][2];
lhs[1][3]= lhs[1][3] - coeff*lhs[2][3];
lhs[1][4]= lhs[1][4] - coeff*lhs[2][4];
r[1] = r[1] - coeff*r[2];
coeff = lhs[3][2];
lhs[3][3]= lhs[3][3] - coeff*lhs[2][3];
lhs[3][4]= lhs[3][4] - coeff*lhs[2][4];
r[3] = r[3] - coeff*r[2];
coeff = lhs[4][2];
lhs[4][3]= lhs[4][3] - coeff*lhs[2][3];
lhs[4][4]= lhs[4][4] - coeff*lhs[2][4];
r[4] = r[4] - coeff*r[2];
pivot = 1.00/lhs[3][3];
lhs[3][4] = lhs[3][4]*pivot;
r[3] = r[3] *pivot;
coeff = lhs[0][3];
lhs[0][4]= lhs[0][4] - coeff*lhs[3][4];
r[0] = r[0] - coeff*r[3];
coeff = lhs[1][3];
lhs[1][4]= lhs[1][4] - coeff*lhs[3][4];
r[1] = r[1] - coeff*r[3];
coeff = lhs[2][3];
lhs[2][4]= lhs[2][4] - coeff*lhs[3][4];
r[2] = r[2] - coeff*r[3];
coeff = lhs[4][3];
lhs[4][4]= lhs[4][4] - coeff*lhs[3][4];
r[4] = r[4] - coeff*r[3];
pivot = 1.00/lhs[4][4];
r[4] = r[4] *pivot;
coeff = lhs[0][4];
r[0] = r[0] - coeff*r[4];
coeff = lhs[1][4];
r[1] = r[1] - coeff*r[4];
coeff = lhs[2][4];
r[2] = r[2] - coeff*r[4];
coeff = lhs[3][4];
r[3] = r[3] - coeff*r[4];
}
/*--------------------------------------------------------------------
--------------------------------------------------------------------*/
static void y_solve(void) {
/*--------------------------------------------------------------------
--------------------------------------------------------------------*/
/*--------------------------------------------------------------------
c Performs line solves in Y direction by first factoring
c the block-tridiagonal matrix into an upper triangular matrix][
c and then performing back substitution to solve for the unknow
c vectors of each line.
c
c Make sure we treat elements zero to cell_size in the direction
c of the sweep.
c-------------------------------------------------------------------*/
lhsy();
y_solve_cell();
y_backsubstitute();
}
/*--------------------------------------------------------------------
--------------------------------------------------------------------*/
static void y_backsubstitute(void) {
/*--------------------------------------------------------------------
--------------------------------------------------------------------*/
/*--------------------------------------------------------------------
c back solve: if last cell][ then generate U(jsize)=rhs(jsize)
c else assume U(jsize) is loaded in un pack backsub_info
c so just use it
c after call u(jstart) will be sent to next cell
c-------------------------------------------------------------------*/
int i, j, k, m, n;
for (j = grid_points[1]-2; j >= 0; j--) {
#pragma omp for private(k,m,n)
for (i = 1; i < grid_points[0]-1; i++) {
for (k = 1; k < grid_points[2]-1; k++) {
for (m = 0; m < BLOCK_SIZE; m++) {
for (n = 0; n < BLOCK_SIZE; n++) {
rhs[i][j][k][m] = rhs[i][j][k][m]
- lhs[i][j][k][CC][m][n]*rhs[i][j+1][k][n];
}
}
}
}
}
}
/*--------------------------------------------------------------------
--------------------------------------------------------------------*/
static void y_solve_cell(void) {
/*--------------------------------------------------------------------
--------------------------------------------------------------------*/
/*--------------------------------------------------------------------
c performs guaussian elimination on this cell.
c
c assumes that unpacking routines for non-first cells
c preload C' and rhs' from previous cell.
c
c assumed send happens outside this routine, but that
c c'(JMAX) and rhs'(JMAX) will be sent to next cell
c-------------------------------------------------------------------*/
int i, j, k, jsize;
jsize = grid_points[1]-1;
#pragma omp for private(k)
for (i = 1; i < grid_points[0]-1; i++) {
for (k = 1; k < grid_points[2]-1; k++) {
/*--------------------------------------------------------------------
c multiply c(i,0,k) by b_inverse and copy back to c
c multiply rhs(0) by b_inverse(0) and copy to rhs
c-------------------------------------------------------------------*/
binvcrhs( lhs[i][0][k][BB],
lhs[i][0][k][CC],
rhs[i][0][k] );
}
}
/*--------------------------------------------------------------------
c begin inner most do loop
c do all the elements of the cell unless last
c-------------------------------------------------------------------*/
for (j = 1; j < jsize; j++) {
#pragma omp for private(k)
for (i = 1; i < grid_points[0]-1; i++) {
for (k = 1; k < grid_points[2]-1; k++) {
/*--------------------------------------------------------------------
c subtract A*lhs_vector(j-1) from lhs_vector(j)
c
c rhs(j) = rhs(j) - A*rhs(j-1)
c-------------------------------------------------------------------*/
matvec_sub(lhs[i][j][k][AA],
rhs[i][j-1][k], rhs[i][j][k]);
/*--------------------------------------------------------------------
c B(j) = B(j) - C(j-1)*A(j)
c-------------------------------------------------------------------*/
matmul_sub(lhs[i][j][k][AA],
lhs[i][j-1][k][CC],
lhs[i][j][k][BB]);
/*--------------------------------------------------------------------
c multiply c(i,j,k) by b_inverse and copy back to c
c multiply rhs(i,1,k) by b_inverse(i,1,k) and copy to rhs
c-------------------------------------------------------------------*/
binvcrhs( lhs[i][j][k][BB],
lhs[i][j][k][CC],
rhs[i][j][k] );
}
}
}
#pragma omp for private(k)
for (i = 1; i < grid_points[0]-1; i++) {
for (k = 1; k < grid_points[2]-1; k++) {
/*--------------------------------------------------------------------
c rhs(jsize) = rhs(jsize) - A*rhs(jsize-1)
c-------------------------------------------------------------------*/
matvec_sub(lhs[i][jsize][k][AA],
rhs[i][jsize-1][k], rhs[i][jsize][k]);
/*--------------------------------------------------------------------
c B(jsize) = B(jsize) - C(jsize-1)*A(jsize)
c call matmul_sub(aa,i,jsize,k,c,
c $ cc,i,jsize-1,k,c,BB,i,jsize,k)
c-------------------------------------------------------------------*/
matmul_sub(lhs[i][jsize][k][AA],
lhs[i][jsize-1][k][CC],
lhs[i][jsize][k][BB]);
/*--------------------------------------------------------------------
c multiply rhs(jsize) by b_inverse(jsize) and copy to rhs
c-------------------------------------------------------------------*/
binvrhs( lhs[i][jsize][k][BB],
rhs[i][jsize][k] );
}
}
}
/*--------------------------------------------------------------------
--------------------------------------------------------------------*/
static void z_solve(void) {
/*--------------------------------------------------------------------
--------------------------------------------------------------------*/
/*--------------------------------------------------------------------
c Performs line solves in Z direction by first factoring
c the block-tridiagonal matrix into an upper triangular matrix,
c and then performing back substitution to solve for the unknow
c vectors of each line.
c
c Make sure we treat elements zero to cell_size in the direction
c of the sweep.
c-------------------------------------------------------------------*/
lhsz();
z_solve_cell();
z_backsubstitute();
}
/*--------------------------------------------------------------------
--------------------------------------------------------------------*/
static void z_backsubstitute(void) {
/*--------------------------------------------------------------------
--------------------------------------------------------------------*/
/*--------------------------------------------------------------------
c back solve: if last cell, then generate U(ksize)=rhs(ksize)
c else assume U(ksize) is loaded in un pack backsub_info
c so just use it
c after call u(kstart) will be sent to next cell
c-------------------------------------------------------------------*/
int i, j, k, m, n;
#pragma omp for private(j,k,m,n)
for (i = 1; i < grid_points[0]-1; i++) {
for (j = 1; j < grid_points[1]-1; j++) {
for (k = grid_points[2]-2; k >= 0; k--) {
for (m = 0; m < BLOCK_SIZE; m++) {
for (n = 0; n < BLOCK_SIZE; n++) {
rhs[i][j][k][m] = rhs[i][j][k][m]
- lhs[i][j][k][CC][m][n]*rhs[i][j][k+1][n];
}
}
}
}
}
}
/*--------------------------------------------------------------------
--------------------------------------------------------------------*/
static void z_solve_cell(void) {
/*--------------------------------------------------------------------
--------------------------------------------------------------------*/
/*--------------------------------------------------------------------
c performs guaussian elimination on this cell.
c
c assumes that unpacking routines for non-first cells
c preload C' and rhs' from previous cell.
c
c assumed send happens outside this routine, but that
c c'(KMAX) and rhs'(KMAX) will be sent to next cell.
c-------------------------------------------------------------------*/
int i,j,k,ksize;
ksize = grid_points[2]-1;
/*--------------------------------------------------------------------
c outer most do loops - sweeping in i direction
c-------------------------------------------------------------------*/
#pragma omp for private(j)
for (i = 1; i < grid_points[0]-1; i++) {
for (j = 1; j < grid_points[1]-1; j++) {
/*--------------------------------------------------------------------
c multiply c(i,j,0) by b_inverse and copy back to c
c multiply rhs(0) by b_inverse(0) and copy to rhs
c-------------------------------------------------------------------*/
binvcrhs( lhs[i][j][0][BB],
lhs[i][j][0][CC],
rhs[i][j][0] );
}
}
/*--------------------------------------------------------------------
c begin inner most do loop
c do all the elements of the cell unless last
c-------------------------------------------------------------------*/
for (k = 1; k < ksize; k++) {
#pragma omp for private(j)
for (i = 1; i < grid_points[0]-1; i++) {
for (j = 1; j < grid_points[1]-1; j++) {
/*--------------------------------------------------------------------
c subtract A*lhs_vector(k-1) from lhs_vector(k)
c
c rhs(k) = rhs(k) - A*rhs(k-1)
c-------------------------------------------------------------------*/
matvec_sub(lhs[i][j][k][AA],
rhs[i][j][k-1], rhs[i][j][k]);
/*--------------------------------------------------------------------
c B(k) = B(k) - C(k-1)*A(k)
c call matmul_sub(aa,i,j,k,c,cc,i,j,k-1,c,BB,i,j,k)
c-------------------------------------------------------------------*/
matmul_sub(lhs[i][j][k][AA],
lhs[i][j][k-1][CC],
lhs[i][j][k][BB]);
/*--------------------------------------------------------------------
c multiply c(i,j,k) by b_inverse and copy back to c
c multiply rhs(i,j,1) by b_inverse(i,j,1) and copy to rhs
c-------------------------------------------------------------------*/
binvcrhs( lhs[i][j][k][BB],
lhs[i][j][k][CC],
rhs[i][j][k] );
}
}
}
/*--------------------------------------------------------------------
c Now finish up special cases for last cell
c-------------------------------------------------------------------*/
#pragma omp for private(j)
for (i = 1; i < grid_points[0]-1; i++) {
for (j = 1; j < grid_points[1]-1; j++) {
/*--------------------------------------------------------------------
c rhs(ksize) = rhs(ksize) - A*rhs(ksize-1)
c-------------------------------------------------------------------*/
matvec_sub(lhs[i][j][ksize][AA],
rhs[i][j][ksize-1], rhs[i][j][ksize]);
/*--------------------------------------------------------------------
c B(ksize) = B(ksize) - C(ksize-1)*A(ksize)
c call matmul_sub(aa,i,j,ksize,c,
c $ cc,i,j,ksize-1,c,BB,i,j,ksize)
c-------------------------------------------------------------------*/
matmul_sub(lhs[i][j][ksize][AA],
lhs[i][j][ksize-1][CC],
lhs[i][j][ksize][BB]);
/*--------------------------------------------------------------------
c multiply rhs(ksize) by b_inverse(ksize) and copy to rhs
c-------------------------------------------------------------------*/
binvrhs( lhs[i][j][ksize][BB],
rhs[i][j][ksize] );
}
}
}
/* cat ./common/c_print_results.c */
/*****************************************************************/
/****** C _ P R I N T _ R E S U L T S ******/
/*****************************************************************/
void c_print_results( char *name,
char cclass,
int n1,
int n2,
int n3,
int niter,
int nthreads,
double t,
double mops,
char *optype,
int passed_verification,
char *npbversion,
char *compiletime,
char *cc,
char *clink,
char *c_lib,
char *c_inc,
char *cflags,
char *clinkflags,
char *rand)
{
char *evalue="1000";
printf( "\n\n %s Benchmark Completed\n", name );
printf( " Class = %c\n", cclass );
if( n2 == 0 && n3 == 0 )
printf( " Size = %12d\n", n1 ); /* as in IS */
else
printf( " Size = %3dx%3dx%3d\n", n1,n2,n3 );
printf( " Iterations = %12d\n", niter );
printf( " Threads = %12d\n", nthreads );
printf( " Time in seconds = %12.2f\n", t );
printf( " Mop/s total = %12.2f\n", mops );
printf( " Operation type = %24s\n", optype);
if( passed_verification )
printf( " Verification = SUCCESSFUL\n" );
else
printf( " Verification = UNSUCCESSFUL\n" );
printf( " Version = %12s\n", npbversion );
printf( " Compile date = %12s\n", compiletime );
printf( "\n Compile options:\n" );
printf( " CC = %s\n", cc );
printf( " CLINK = %s\n", clink );
printf( " C_LIB = %s\n", c_lib );
printf( " C_INC = %s\n", c_inc );
printf( " CFLAGS = %s\n", cflags );
printf( " CLINKFLAGS = %s\n", clinkflags );
printf( " RAND = %s\n", rand );
#ifdef SMP
evalue = getenv("MP_SET_NUMTHREADS");
printf( " MULTICPUS = %s\n", evalue );
#endif
/* printf( "\n\n" );
printf( " Please send the results of this run to:\n\n" );
printf( " NPB Development Team\n" );
printf( " Internet: npb@nas.nasa.gov\n \n" );
printf( " If email is not available, send this to:\n\n" );
printf( " MS T27A-1\n" );
printf( " NASA Ames Research Center\n" );
printf( " Moffett Field, CA 94035-1000\n\n" );
printf( " Fax: 415-604-3957\n\n" );*/
}
/*
cat ./common/c_timers.c
*/
/*
#include "wtime.h"
#if defined(IBM)
#define wtime wtime
#elif defined(CRAY)
#define wtime WTIME
#else
#define wtime wtime_
#endif
*/
/* Prototype */
void wtime( double * );
/*****************************************************************/
/****** E L A P S E D _ T I M E ******/
/*****************************************************************/
double elapsed_time( void )
{
double t;
wtime( &t );
return( t );
}
double start[64], elapsed[64];
/*****************************************************************/
/****** T I M E R _ C L E A R ******/
/*****************************************************************/
void timer_clear( int n )
{
elapsed[n] = 0.0;
}
/*****************************************************************/
/****** T I M E R _ S T A R T ******/
/*****************************************************************/
void timer_start( int n )
{
start[n] = elapsed_time();
}
/*****************************************************************/
/****** T I M E R _ S T O P ******/
/*****************************************************************/
void timer_stop( int n )
{
double t, now;
now = elapsed_time();
t = now - start[n];
elapsed[n] += t;
}
/*****************************************************************/
/****** T I M E R _ R E A D ******/
/*****************************************************************/
double timer_read( int n )
{
return( elapsed[n] );
}
void wtime(double *t)
{
static int sec = -1;
struct timeval tv;
// gettimeofday(&tv, (void *)0);
gettimeofday(&tv, (struct timezone *)0);
if (sec < 0) sec = tv.tv_sec;
*t = (tv.tv_sec - sec) + 1.0e-6*tv.tv_usec;
}
|
PolynomialFunctor3D.h | //////////////////////////////////////////////////////////////////////////////////////
// This file is distributed under the University of Illinois/NCSA Open Source License.
// See LICENSE file in top directory for details.
//
// Copyright (c) 2016 Jeongnim Kim and QMCPACK developers.
//
// File developed by: Jeongnim Kim, jeongnim.kim@gmail.com, University of Illinois at Urbana-Champaign
// Ken Esler, kpesler@gmail.com, University of Illinois at Urbana-Champaign
// Jeremy McMinnis, jmcminis@gmail.com, University of Illinois at Urbana-Champaign
// Mark A. Berrill, berrillma@ornl.gov, Oak Ridge National Laboratory
// Ye Luo, yeluo@anl.gov, Argonne National Laboratory
//
// File created by: Jeongnim Kim, jeongnim.kim@gmail.com, University of Illinois at Urbana-Champaign
//////////////////////////////////////////////////////////////////////////////////////
#ifndef QMCPLUSPLUS_POLYNOMIAL3D_FUNCTOR_H
#define QMCPLUSPLUS_POLYNOMIAL3D_FUNCTOR_H
#include "Numerics/OptimizableFunctorBase.h"
#include "Numerics/HDFNumericAttrib.h"
#include "Utilities/ProgressReportEngine.h"
#include "OhmmsData/AttributeSet.h"
#include "Numerics/LinearFit.h"
#include "Numerics/DeterminantOperators.h"
#include <cstdio>
#include <algorithm>
namespace qmcplusplus
{
struct PolynomialFunctor3D: public OptimizableFunctorBase
{
typedef real_type value_type;
int N_eI, N_ee;
Array<real_type,3> gamma;
// Permutation vector, used when we need to pivot
// columns
std::vector<int> GammaPerm;
Array<int,3> index;
std::vector<bool> IndepVar;
std::vector<real_type> GammaVec, dval_Vec;
std::vector<TinyVector<real_type,3> > dgrad_Vec;
std::vector<Tensor<real_type,3> > dhess_Vec;
int NumConstraints, NumGamma;
Matrix<real_type> ConstraintMatrix;
std::vector<real_type> Parameters, d_valsFD;
std::vector<TinyVector<real_type,3> > d_gradsFD;
std::vector<Tensor<real_type,3> > d_hessFD;
std::vector<std::string> ParameterNames;
std::string iSpecies, eSpecies1, eSpecies2;
int ResetCount;
real_type scale;
// Order of continuity
const int C;
bool notOpt;
///constructor
PolynomialFunctor3D(real_type ee_cusp=0.0, real_type eI_cusp=0.0) :
N_eI(0), N_ee(0), ResetCount(0), C(3), scale(1.0), notOpt(false)
{
if (std::abs(ee_cusp) > 0.0 || std::abs(eI_cusp) > 0.0)
{
app_error() << "PolynomialFunctor3D does not support nonzero cusp.\n";
abort();
}
cutoff_radius = 0.0;
}
OptimizableFunctorBase* makeClone() const
{
return new PolynomialFunctor3D(*this);
}
void resize(int neI, int nee)
{
N_eI = neI;
N_ee = nee;
const double L = 0.5 * cutoff_radius;
gamma.resize(N_eI+1, N_eI+1, N_ee+1);
index.resize(N_eI+1, N_eI+1, N_ee+1);
NumGamma = ((N_eI+1)*(N_eI+2)/2 * (N_ee+1));
NumConstraints = (2*N_eI+1) + (N_eI+N_ee+1);
int numParams = NumGamma - NumConstraints;
Parameters.resize(numParams);
d_valsFD.resize(numParams);
d_gradsFD.resize(numParams);
d_hessFD.resize(numParams);
GammaVec.resize(NumGamma);
dval_Vec.resize(NumGamma);
dgrad_Vec.resize(NumGamma);
dhess_Vec.resize(NumGamma);
ConstraintMatrix.resize(NumConstraints, NumGamma);
// Assign indices
int num=0;
for (int m=0; m<=N_eI; m++)
for (int l=m; l<=N_eI; l++)
for (int n=0; n<=N_ee; n++)
index(l,m,n) = index(m,l,n) = num++;
assert (num == NumGamma);
// std::cerr << "NumGamma = " << NumGamma << std::endl;
// Fill up contraint matrix
// For 3 constraints and 2 parameters, we would have
// |A00 A01 A02 A03 A04| |g0| |0 |
// |A11 A11 A12 A13 A14| |g1| |0 |
// |A22 A21 A22 A23 A24| |g2| = |0 |
// | 0 0 0 1 0 | |g3| |p0|
// | 0 0 0 0 1 | |g4| |p1|
ConstraintMatrix = 0.0;
// std::cerr << "ConstraintMatrix.size = " << ConstraintMatrix.size(0)
// << " by " << ConstraintMatrix.size(1) << std::endl;
// std::cerr << "index.size() = (" << index.size(0) << ", "
// << index.size(1) << ", " << index.size(2) << ").\n";
int k;
// e-e no-cusp constraint
for (k=0; k<=2*N_eI; k++)
{
for (int m=0; m<=k; m++)
{
int l = k - m;
if (l<=N_eI && m <=N_eI)
{
int i = index(l,m,1);
if (l > m)
ConstraintMatrix(k,i) = 2.0;
else
if (l == m)
ConstraintMatrix(k,i) = 1.0;
}
}
}
// e-I no-cusp constraint
for (int kp=0; kp<=N_eI+N_ee; kp++)
{
if (kp <= N_ee)
{
ConstraintMatrix(k+kp,index(0,0,kp)) = (real_type) C;
ConstraintMatrix(k+kp,index(0,1,kp)) = -L;
}
for (int l=1; l<=kp; l++)
{
int n = kp - l;
if (n >= 0 && n <= N_ee && l <= N_eI)
{
ConstraintMatrix(k+kp,index(l,0,n)) = (real_type)C;
ConstraintMatrix(k+kp,index(l,1,n)) = -L;
}
}
}
// {
// fprintf (stderr, "Constraint matrix:\n");
// for (int i=0; i<NumConstraints; i++) {
// for (int j=0; j<NumGamma; j++)
// fprintf (stderr, "%5.2f ", ConstraintMatrix(i,j));
// fprintf(stderr, "\n");
// }
// }
// Now, row-reduce constraint matrix
GammaPerm.resize(NumGamma);
IndepVar.resize(NumGamma, false);
// Set identity permutation
for (int i=0; i<NumGamma; i++)
GammaPerm[i] = i;
int col=-1;
for (int row=0; row<NumConstraints; row++)
{
int max_loc;
real_type max_abs;
do
{
col++;
max_loc = row;
max_abs = std::abs(ConstraintMatrix(row,col));
for (int ri=row+1; ri<NumConstraints; ri++)
{
real_type abs_val = std::abs(ConstraintMatrix(ri,col));
if (abs_val > max_abs)
{
max_loc = ri;
max_abs = abs_val;
}
}
if (max_abs < 1.0e-6)
IndepVar[col] = true;
}
while (max_abs < 1.0e-6);
#if ( ( __INTEL_COMPILER == 1700 ) && ( __cplusplus < 201103L ) )
// the swap_rows is sick with Intel compiler 17 update 1, c++11 off
// manually swap the rows
for(int ind_col=0; ind_col<ConstraintMatrix.size2(); ind_col++)
{
real_type temp = ConstraintMatrix(row,ind_col);
ConstraintMatrix(row,ind_col) = ConstraintMatrix(max_loc,ind_col);
ConstraintMatrix(max_loc,ind_col) = temp;
}
#else
ConstraintMatrix.swap_rows(row,max_loc);
#endif
real_type lead_inv = 1.0/ConstraintMatrix(row,col);
for (int c=0; c<NumGamma; c++)
ConstraintMatrix(row,c) *= lead_inv;
// Now, eliminate column entries
for (int ri=0; ri<NumConstraints; ri++)
{
if (ri != row)
{
real_type val = ConstraintMatrix(ri,col);
for (int c=0; c < NumGamma; c++)
ConstraintMatrix(ri,c) -= val * ConstraintMatrix(row,c);
}
}
}
for (int c=col+1; c<NumGamma; c++)
IndepVar[c] = true;
// fprintf (stderr, "Reduced Constraint matrix:\n");
// for (int i=0; i<NumConstraints; i++) {
// for (int j=0; j<NumGamma; j++)
// fprintf (stderr, "%5.2f ", ConstraintMatrix(i,j));
// fprintf(stderr, "\n");
// }
// fprintf (stderr, "Independent vars = \n");
// for (int i=0; i<NumGamma; i++)
// if (IndepVar[i])
// fprintf (stderr, "%d ", i);
// fprintf (stderr, "\n");
// fprintf (stderr, "Inverse matrix:\n");
// // Now, invert constraint matrix
// Invert(ConstraintMatrix.data(), NumGamma, NumGamma);
// for (int i=0; i<NumGamma; i++) {
// for (int j=0; j<NumGamma; j++)
// fprintf (stderr, "%5.2f ", ConstraintMatrix(i,j));
// fprintf(stderr, "\n");
// }
}
void reset()
{
resize(N_eI, N_ee);
reset_gamma();
}
void reset_gamma()
{
// fprintf (stderr, "Parameters:\n");
// for (int i=0; i<Parameters.size(); i++)
// fprintf (stderr, " %16.10e\n", Parameters[i]);
const double L = 0.5 * cutoff_radius;
std::fill(GammaVec.begin(), GammaVec.end(), 0.0);
// First, set all independent variables
int var=0;
for (int i=0; i<NumGamma; i++)
if (IndepVar[i])
GammaVec[i] = scale*Parameters[var++];
assert (var == Parameters.size());
// Now, set dependent variables
var = 0;
// std::cerr << "NumConstraints = " << NumConstraints << std::endl;
for (int i=0; i<NumGamma; i++)
if (!IndepVar[i])
{
// fprintf (stderr, "constraintMatrix(%d,%d) = %1.10f\n",
// var, i, ConstraintMatrix(var,i));
assert (std::abs(ConstraintMatrix(var,i) -1.0) < 1.0e-6);
for (int j=0; j<NumGamma; j++)
if (i != j)
GammaVec[i] -= ConstraintMatrix(var,j) * GammaVec[j];
var++;
}
int num=0;
for (int m=0; m<=N_eI; m++)
for (int l=m; l<=N_eI; l++)
for (int n=0; n<=N_ee; n++)
// gamma(m,l,n) = gamma(l,m,n) = unpermuted[num++];
gamma(m,l,n) = gamma(l,m,n) = GammaVec[num++];
// Now check that constraints have been satisfied
// e-e constraints
for (int k=0; k<=2*N_eI; k++)
{
real_type sum = 0.0;
for (int m=0; m<=k; m++)
{
int l = k - m;
if (l<=N_eI && m <=N_eI)
{
int i = index(l,m,1);
if (l > m)
sum += 2.0*GammaVec[i];
else
if (l == m)
sum += GammaVec[i];
}
}
if (std::abs(sum) > 1.0e-9)
std::cerr << "error in k = " << k << " sum = " << sum << std::endl;
}
for (int k=0; k<=2*N_eI; k++)
{
real_type sum=0.0;
for (int l=0; l<=k; l++)
{
int m = k - l;
if (m <= N_eI && l <= N_eI)
{
// fprintf (stderr, "k = %d gamma(%d, %d, 1) = %1.8f\n", k, l, m,
// gamma(l,m,1));
sum += gamma(l,m,1);
}
}
if (std::abs(sum) > 1.0e-6)
{
app_error() << "e-e constraint not satisfied in PolynomialFunctor3D: k="
<< k << " sum=" << sum << std::endl;
abort();
}
}
// e-I constraints
for (int k=0; k<=N_eI+N_ee; k++)
{
real_type sum = 0.0;
for (int m=0; m<=k; m++)
{
int n = k - m;
if (m <= N_eI && n <= N_ee)
{
sum += (real_type)C*gamma(0,m,n) - L*gamma(1,m,n);
// fprintf (stderr,
// "k = %d gamma(0,%d,%d) = %1.8f gamma(1,%d,%d)=%1.8f\n",
// k, m, n, gamma(0,m,n), m, n, gamma(1,m,n));
}
}
if (std::abs(sum) > 1.0e-6)
{
app_error() << "e-I constraint not satisfied in PolynomialFunctor3D: k="
<< k << " sum=" << sum << std::endl;
abort();
}
}
}
inline real_type evaluate(real_type r_12,
real_type r_1I,
real_type r_2I) const
{
constexpr real_type czero(0);
constexpr real_type cone(1);
constexpr real_type chalf(0.5);
const real_type L = chalf*cutoff_radius;
if (r_1I >= L || r_2I >= L)
return czero;
real_type val = czero;
real_type r2l(cone);
for (int l=0; l<=N_eI; l++)
{
real_type r2m(r2l);
for (int m=0; m<=N_eI; m++)
{
real_type r2n(r2m);
for (int n=0; n<=N_ee; n++)
{
val += gamma(l,m,n)*r2n;
r2n *= r_12;
}
r2m *= r_2I;
}
r2l *= r_1I;
}
for (int i=0; i<C; i++)
val *= (r_1I - L)*(r_2I - L);
return val;
}
// assume r_1I < L && r_2I < L, compression and screening is handled outside
inline real_type evaluateV(int Nptcl,
const real_type* restrict r_12_array,
const real_type* restrict r_1I_array,
const real_type* restrict r_2I_array) const
{
constexpr real_type czero(0);
constexpr real_type cone(1);
constexpr real_type chalf(0.5);
const real_type L = chalf*cutoff_radius;
real_type val_tot = czero;
#pragma omp simd aligned(r_12_array,r_1I_array,r_2I_array) reduction(+:val_tot)
for(int ptcl=0; ptcl<Nptcl; ptcl++)
{
const real_type r_12 = r_12_array[ptcl];
const real_type r_1I = r_1I_array[ptcl];
const real_type r_2I = r_2I_array[ptcl];
real_type val = czero;
real_type r2l(cone);
for (int l=0; l<=N_eI; l++)
{
real_type r2m(r2l);
for (int m=0; m<=N_eI; m++)
{
real_type r2n(r2m);
for (int n=0; n<=N_ee; n++)
{
val += gamma(l,m,n)*r2n;
r2n *= r_12;
}
r2m *= r_2I;
}
r2l *= r_1I;
}
const real_type both_minus_L = (r_2I - L) * (r_1I - L);
for (int i=0; i<C; i++)
val *= both_minus_L;
val_tot += val;
}
return val_tot;
}
inline real_type evaluate(real_type r_12, real_type r_1I, real_type r_2I,
TinyVector<real_type,3> &grad,
Tensor<real_type,3> &hess) const
{
constexpr real_type czero(0);
constexpr real_type cone(1);
constexpr real_type chalf(0.5);
constexpr real_type ctwo(2);
const real_type L = chalf*cutoff_radius;
if (r_1I >= L || r_2I >= L)
{
grad = czero;
hess = czero;
return czero;
}
real_type val = czero;
grad = czero;
hess = czero;
real_type r2l(cone), r2l_1(czero), r2l_2(czero), lf(czero);
for (int l=0; l<=N_eI; l++)
{
real_type r2m(cone), r2m_1(czero), r2m_2(czero), mf(czero);
for (int m=0; m<=N_eI; m++)
{
real_type r2n(cone), r2n_1(czero), r2n_2(czero), nf(czero);
for (int n=0; n<=N_ee; n++)
{
const real_type g = gamma(l,m,n);
const real_type g00x = g * r2l * r2m ;
const real_type g10x = g * r2l_1 * r2m ;
const real_type g01x = g * r2l * r2m_1;
const real_type gxx0 = g * r2n;
val += g00x * r2n;
grad[0] += g00x * r2n_1;
grad[1] += g10x * r2n ;
grad[2] += g01x * r2n ;
hess(0,0) += g00x * r2n_2;
hess(0,1) += g10x * r2n_1;
hess(0,2) += g01x * r2n_1;
hess(1,1) += gxx0 * r2l_2 * r2m ;
hess(1,2) += gxx0 * r2l_1 * r2m_1;
hess(2,2) += gxx0 * r2l * r2m_2;
nf += cone;
r2n_2 = r2n_1 * nf;
r2n_1 = r2n * nf;
r2n *= r_12;
}
mf += cone;
r2m_2 = r2m_1 * mf;
r2m_1 = r2m * mf;
r2m *= r_2I;
}
lf += cone;
r2l_2 = r2l_1 * lf;
r2l_1 = r2l * lf;
r2l *= r_1I;
}
const real_type r_2I_minus_L = r_2I - L;
const real_type r_1I_minus_L = r_1I - L;
const real_type both_minus_L = r_2I_minus_L * r_1I_minus_L;
for (int i=0; i<C; i++)
{
hess(0,0)=both_minus_L*hess(0,0);
hess(0,1)=both_minus_L*hess(0,1)+ r_2I_minus_L*grad[0];
hess(0,2)=both_minus_L*hess(0,2)+ r_1I_minus_L*grad[0];
hess(1,1)=both_minus_L*hess(1,1)+ ctwo*r_2I_minus_L*grad[1];
hess(1,2)=both_minus_L*hess(1,2)+ r_1I_minus_L*grad[1] + r_2I_minus_L*grad[2] + val;
hess(2,2)=both_minus_L*hess(2,2)+ ctwo*r_1I_minus_L*grad[2];
grad[0] = both_minus_L*grad[0];
grad[1] = both_minus_L*grad[1] + r_2I_minus_L * val;
grad[2] = both_minus_L*grad[2] + r_1I_minus_L * val;
val *= both_minus_L;
}
hess(1,0) = hess(0,1);
hess(2,0) = hess(0,2);
hess(2,1) = hess(1,2);
return val;
}
// assume r_1I < L && r_2I < L, compression and screening is handled outside
inline void evaluateVGL(int Nptcl, const real_type* restrict r_12_array,
const real_type* restrict r_1I_array,
const real_type* restrict r_2I_array,
real_type* restrict val_array,
real_type* restrict grad0_array,
real_type* restrict grad1_array,
real_type* restrict grad2_array,
real_type* restrict hess00_array,
real_type* restrict hess11_array,
real_type* restrict hess22_array,
real_type* restrict hess01_array,
real_type* restrict hess02_array) const
{
constexpr real_type czero(0);
constexpr real_type cone(1);
constexpr real_type chalf(0.5);
constexpr real_type ctwo(2);
const real_type L = chalf*cutoff_radius;
#pragma omp simd aligned(r_12_array,r_1I_array,r_2I_array,val_array, \
grad0_array,grad1_array,grad2_array, \
hess00_array,hess11_array,hess22_array,hess01_array,hess02_array)
for(int ptcl=0; ptcl<Nptcl; ptcl++)
{
const real_type r_12 = r_12_array[ptcl];
const real_type r_1I = r_1I_array[ptcl];
const real_type r_2I = r_2I_array[ptcl];
real_type val(czero);
real_type grad0(czero);
real_type grad1(czero);
real_type grad2(czero);
real_type hess00(czero);
real_type hess11(czero);
real_type hess22(czero);
real_type hess01(czero);
real_type hess02(czero);
real_type r2l(cone), r2l_1(czero), r2l_2(czero), lf(czero);
for (int l=0; l<=N_eI; l++)
{
real_type r2m(cone), r2m_1(czero), r2m_2(czero), mf(czero);
for (int m=0; m<=N_eI; m++)
{
real_type r2n(cone), r2n_1(czero), r2n_2(czero), nf(czero);
for (int n=0; n<=N_ee; n++)
{
const real_type g = gamma(l,m,n);
const real_type g00x = g * r2l * r2m ;
const real_type g10x = g * r2l_1 * r2m ;
const real_type g01x = g * r2l * r2m_1;
const real_type gxx0 = g * r2n;
val += g00x * r2n;
grad0 += g00x * r2n_1;
grad1 += g10x * r2n ;
grad2 += g01x * r2n ;
hess00 += g00x * r2n_2;
hess01 += g10x * r2n_1;
hess02 += g01x * r2n_1;
hess11 += gxx0 * r2l_2 * r2m ;
hess22 += gxx0 * r2l * r2m_2;
nf += cone;
r2n_2 = r2n_1 * nf;
r2n_1 = r2n * nf;
r2n *= r_12;
}
mf += cone;
r2m_2 = r2m_1 * mf;
r2m_1 = r2m * mf;
r2m *= r_2I;
}
lf += cone;
r2l_2 = r2l_1 * lf;
r2l_1 = r2l * lf;
r2l *= r_1I;
}
const real_type r_2I_minus_L = r_2I - L;
const real_type r_1I_minus_L = r_1I - L;
const real_type both_minus_L = r_2I_minus_L * r_1I_minus_L;
for (int i=0; i<C; i++)
{
hess00=both_minus_L*hess00;
hess01=both_minus_L*hess01 + r_2I_minus_L*grad0;
hess02=both_minus_L*hess02 + r_1I_minus_L*grad0;
hess11=both_minus_L*hess11 + ctwo*r_2I_minus_L*grad1;
hess22=both_minus_L*hess22 + ctwo*r_1I_minus_L*grad2;
grad0 = both_minus_L*grad0;
grad1 = both_minus_L*grad1 + r_2I_minus_L * val;
grad2 = both_minus_L*grad2 + r_1I_minus_L * val;
val *= both_minus_L;
}
val_array[ptcl] = val;
grad0_array[ptcl] = grad0/r_12;
grad1_array[ptcl] = grad1/r_1I;
grad2_array[ptcl] = grad2/r_2I;
hess00_array[ptcl] = hess00;
hess11_array[ptcl] = hess11;
hess22_array[ptcl] = hess22;
hess01_array[ptcl] = hess01/(r_12*r_1I);
hess02_array[ptcl] = hess02/(r_12*r_2I);
}
}
inline real_type evaluate(const real_type r_12, const real_type r_1I, const real_type r_2I,
TinyVector<real_type,3> &grad,
Tensor<real_type,3> &hess,
TinyVector<Tensor<real_type,3>,3> &d3)
{
grad = 0.0;
hess = 0.0;
d3 = grad;
const real_type L = 0.5*cutoff_radius;
if (r_1I >= L || r_2I >= L)
return 0.0;
real_type val = 0.0;
real_type r2l(1.0), r2l_1(0.0), r2l_2(0.0), r2l_3(0.0), lf(0.0);
for (int l=0; l<=N_eI; l++)
{
real_type r2m(1.0), r2m_1(0.0), r2m_2(0.0), r2m_3(0.0), mf(0.0);
for (int m=0; m<=N_eI; m++)
{
real_type r2n(1.0), r2n_1(0.0), r2n_2(0.0), r2n_3(0.0), nf(0.0);
for (int n=0; n<=N_ee; n++)
{
real_type g = gamma(l,m,n);
val += g*r2l*r2m*r2n;
grad[0] += nf * g *r2l * r2m * r2n_1;
grad[1] += lf * g *r2l_1 * r2m * r2n ;
grad[2] += mf * g *r2l * r2m_1 * r2n ;
hess(0,0) += nf*(nf-1.0) * g * r2l * r2m * r2n_2 ;
hess(0,1) += nf*lf * g * r2l_1 * r2m * r2n_1 ;
hess(0,2) += nf*mf * g * r2l * r2m_1 * r2n_1 ;
hess(1,1) += lf*(lf-1.0) * g * r2l_2 * r2m * r2n ;
hess(1,2) += lf*mf * g * r2l_1 * r2m_1 * r2n ;
hess(2,2) += mf*(mf-1.0) * g * r2l * r2m_2 * r2n ;
d3[0](0,0) += nf*(nf-1.0)*(nf-2.0) * g * r2l * r2m * r2n_3 ;
d3[0](0,1) += nf*(nf-1.0)*lf * g * r2l_1 * r2m * r2n_2 ;
d3[0](0,2) += nf*(nf-1.0)*mf * g * r2l * r2m_1 * r2n_2 ;
d3[0](1,1) += nf*lf*(lf-1.0) * g * r2l_2 * r2m * r2n_1 ;
d3[0](1,2) += nf*lf*mf * g * r2l_1 * r2m_1 * r2n_1 ;
d3[0](2,2) += nf*mf*(mf-1.0) * g * r2l * r2m_2 * r2n_1 ;
d3[1](1,1) += lf*(lf-1.0)*(lf-2.0) * g * r2l_3 * r2m * r2n ;
d3[1](1,2) += lf*(lf-1.0)*mf * g * r2l_2 * r2m_1 * r2n ;
d3[1](2,2) += lf*mf*(mf-1.0) * g * r2l_1 * r2m_2 * r2n ;
d3[2](2,2) += mf*(mf-1.0)*(mf-2.0) * g * r2l * r2m_3 * r2n ;
r2n_3 = r2n_2;
r2n_2 = r2n_1;
r2n_1 = r2n;
r2n *= r_12;
nf += 1.0;
}
r2m_3 = r2m_2;
r2m_2 = r2m_1;
r2m_1 = r2m;
r2m *= r_2I;
mf += 1.0;
}
r2l_3 = r2l_2;
r2l_2 = r2l_1;
r2l_1 = r2l;
r2l *= r_1I;
lf += 1.0;
}
for (int i=0; i<C; i++)
{
d3[0](0,0) = (r_1I - L)*(r_2I - L)*d3[0](0,0);
d3[0](0,1) = (r_1I - L)*(r_2I - L)*d3[0](0,1) + (r_2I - L)*hess(0,0);
d3[0](0,2) = (r_1I - L)*(r_2I - L)*d3[0](0,2) + (r_1I - L)*hess(0,0);
d3[0](1,1) = (r_1I - L)*(r_2I - L)*d3[0](1,1) + 2.0*(r_2I - L)*hess(0,1);
d3[0](1,2) = (r_1I - L)*(r_2I - L)*d3[0](1,2) + (r_1I - L)*hess(0,1) + (r_2I - L)*hess(0,2) + grad[0];
d3[0](2,2) = (r_1I - L)*(r_2I - L)*d3[0](2,2) + 2.0*(r_1I-L)*hess(0,2);
d3[1](1,1) = (r_1I - L)*(r_2I - L)*d3[1](1,1) + 3.0*(r_2I-L)*hess(1,1);
d3[1](1,2) = (r_1I - L)*(r_2I - L)*d3[1](1,2) +
2.0*(r_2I - L)*hess(1,2) + 2.0*grad[1] + (r_1I - L)*hess(1,1);
d3[1](2,2) = (r_1I - L)*(r_2I - L)*d3[1](2,2) +
2.0*(r_1I - L)*hess(1,2) + 2.0*grad[2] + (r_2I - L)*hess(2,2);
d3[2](2,2) = (r_1I - L)*(r_2I - L)*d3[2](2,2) + 3.0*(r_1I - L)*hess(2,2);
hess(0,0)=(r_1I - L)*(r_2I - L)*hess(0,0);
hess(0,1)=(r_1I - L)*(r_2I - L)*hess(0,1)+ (r_2I - L)*grad[0];
hess(0,2)=(r_1I - L)*(r_2I - L)*hess(0,2)+ (r_1I - L)*grad[0];
hess(1,1)=(r_1I - L)*(r_2I - L)*hess(1,1)+ 2.0*(r_2I - L)*grad[1];
hess(1,2)=(r_1I - L)*(r_2I - L)*hess(1,2)+ (r_1I - L)*grad[1] + (r_2I - L)*grad[2] + val;
hess(2,2)=(r_1I - L)*(r_2I - L)*hess(2,2)+ 2.0*(r_1I - L)*grad[2];
grad[0] = (r_1I - L)*(r_2I - L)*grad[0];
grad[1] = (r_1I - L)*(r_2I - L)*grad[1] + (r_2I - L) * val;
grad[2] = (r_1I - L)*(r_2I - L)*grad[2] + (r_1I - L) * val;
val *= (r_1I - L)*(r_2I - L);
}
hess(1,0) = hess(0,1);
hess(2,0) = hess(0,2);
hess(2,1) = hess(1,2);
d3[0](1,0) = d3[0](0,1);
d3[0](2,0) = d3[0](0,2);
d3[0](2,1) = d3[0](1,2);
d3[1](0,0) = d3[0](1,1);
d3[1](0,1) = d3[0](0,1);
d3[1](0,2) = d3[0](1,2);
d3[1](1,0) = d3[0](0,1);
d3[1](2,0) = d3[0](1,2);
d3[1](2,1) = d3[1](1,2);
d3[2](0,0) = d3[0](0,2);
d3[2](0,1) = d3[0](1,2);
d3[2](0,2) = d3[0](2,2);
d3[2](1,0) = d3[0](1,2);
d3[2](1,1) = d3[1](1,2);
d3[2](1,2) = d3[1](2,2);
d3[2](2,0) = d3[0](2,2);
d3[2](2,1) = d3[1](2,2);
return val;
}
inline real_type evaluate(const real_type r, const real_type rinv)
{
return 0.0;
}
inline bool
evaluateDerivativesFD (const real_type r_12, const real_type r_1I, const real_type r_2I,
std::vector<double> &d_vals,
std::vector<TinyVector<real_type,3> >& d_grads,
std::vector<Tensor<real_type,3> > &d_hess)
{
const real_type eps = 1.0e-6;
assert (d_vals.size() == Parameters.size());
assert (d_grads.size() == Parameters.size());
assert (d_hess.size() == Parameters.size());
for (int ip=0; ip < Parameters.size(); ip++)
{
real_type v_plus, v_minus;
TinyVector<real_type,3> g_plus, g_minus;
Tensor<real_type,3> h_plus, h_minus;
real_type save_p = Parameters[ip];
Parameters[ip] = save_p + eps;
reset_gamma();
v_plus = evaluate (r_12, r_1I, r_2I, g_plus, h_plus);
Parameters[ip] = save_p - eps;
reset_gamma();
v_minus = evaluate (r_12, r_1I, r_2I, g_minus, h_minus);
Parameters[ip] = save_p;
reset_gamma();
real_type dp_inv = 0.5/eps;
d_vals[ip] = dp_inv * (v_plus - v_minus);
d_grads[ip] = dp_inv * (g_plus - g_minus);
d_hess[ip] = dp_inv * (h_plus - h_minus);
}
return true;
}
inline bool
evaluateDerivatives (const real_type r_12, const real_type r_1I, const real_type r_2I,
std::vector<real_type> &d_vals,
std::vector<TinyVector<real_type,3> >& d_grads,
std::vector<Tensor<real_type,3> > &d_hess)
{
const real_type L = 0.5*cutoff_radius;
if (r_1I >= L || r_2I >= L)
return false;
constexpr real_type czero(0);
constexpr real_type cone(1);
constexpr real_type ctwo(2);
real_type dval_dgamma;
TinyVector<real_type,3> dgrad_dgamma;
Tensor<real_type,3> dhess_dgamma;
for (int i=0; i<dval_Vec.size(); i++)
{
dval_Vec[i] = czero;
dgrad_Vec[i] = czero;
dhess_Vec[i] = czero;
}
const real_type r_2I_minus_L = r_2I - L;
const real_type r_1I_minus_L = r_1I - L;
const real_type both_minus_L = r_2I_minus_L * r_1I_minus_L;
real_type r2l(cone), r2l_1(czero), r2l_2(czero), lf(czero);
for (int l=0; l<=N_eI; l++)
{
real_type r2m(cone), r2m_1(czero), r2m_2(czero), mf(czero);
for (int m=0; m<=N_eI; m++)
{
int num;
if(m>l)
num = ((2*N_eI-l+3)*l/2 + m-l)*(N_ee+1);
else
num = ((2*N_eI-m+3)*m/2 + l-m)*(N_ee+1);
real_type r2n(cone), r2n_1(czero), r2n_2(czero), nf(czero);
for (int n=0; n<=N_ee; n++, num++)
{
dval_dgamma = r2l*r2m*r2n;
dgrad_dgamma[0] = r2l * r2m * r2n_1;
dgrad_dgamma[1] = r2l_1 * r2m * r2n ;
dgrad_dgamma[2] = r2l * r2m_1 * r2n ;
dhess_dgamma(0,0) = r2l * r2m * r2n_2;
dhess_dgamma(0,1) = r2l_1 * r2m * r2n_1;
dhess_dgamma(0,2) = r2l * r2m_1 * r2n_1;
dhess_dgamma(1,1) = r2l_2 * r2m * r2n ;
dhess_dgamma(1,2) = r2l_1 * r2m_1 * r2n ;
dhess_dgamma(2,2) = r2l * r2m_2 * r2n ;
for (int i=0; i<C; i++)
{
dhess_dgamma(0,0) = both_minus_L*dhess_dgamma(0,0);
dhess_dgamma(0,1) = both_minus_L*dhess_dgamma(0,1)+ r_2I_minus_L*dgrad_dgamma[0];
dhess_dgamma(0,2) = both_minus_L*dhess_dgamma(0,2)+ r_1I_minus_L*dgrad_dgamma[0];
dhess_dgamma(1,1) = both_minus_L*dhess_dgamma(1,1)+ ctwo*r_2I_minus_L*dgrad_dgamma[1];
dhess_dgamma(1,2) = both_minus_L*dhess_dgamma(1,2)+ r_1I_minus_L*dgrad_dgamma[1]
+ r_2I_minus_L*dgrad_dgamma[2] + dval_dgamma;
dhess_dgamma(2,2) = both_minus_L*dhess_dgamma(2,2)+ ctwo*r_1I_minus_L*dgrad_dgamma[2];
dgrad_dgamma[0] = both_minus_L*dgrad_dgamma[0];
dgrad_dgamma[1] = both_minus_L*dgrad_dgamma[1] + r_2I_minus_L * dval_dgamma;
dgrad_dgamma[2] = both_minus_L*dgrad_dgamma[2] + r_1I_minus_L * dval_dgamma;
dval_dgamma *= both_minus_L;
}
// Now, pack into vectors
dval_Vec[num] += scale*dval_dgamma;
for (int i=0; i<3; i++)
{
dgrad_Vec[num][i] += scale*dgrad_dgamma[i];
dhess_Vec[num](i,i) += scale*dhess_dgamma(i,i);
for (int j=i+1; j<3; j++)
{
dhess_Vec[num](i,j) += scale*dhess_dgamma(i,j);
dhess_Vec[num](j,i) = dhess_Vec[num](i,j);
}
}
nf += cone;
r2n_2 = r2n_1 * nf;
r2n_1 = r2n * nf;
r2n *= r_12;
}
mf += cone;
r2m_2 = r2m_1 * mf;
r2m_1 = r2m * mf;
r2m *= r_2I;
}
lf += cone;
r2l_2 = r2l_1 * lf;
r2l_1 = r2l * lf;
r2l *= r_1I;
}
// for (int i=0; i<dval_Vec.size(); i++)
// fprintf (stderr, "dval_Vec[%d] = %12.6e\n", i, dval_Vec[i]);
///////////////////////////////////////////
// Now, compensate for constraint matrix //
///////////////////////////////////////////
std::fill (d_vals.begin(), d_vals.end(), 0.0);
int var = 0;
for (int i=0; i<NumGamma; i++)
if (IndepVar[i])
{
d_vals[var] = dval_Vec[i];
d_grads[var] = dgrad_Vec[i];
d_hess[var] = dhess_Vec[i];
var++;
}
int constraint = 0;
for (int i=0; i<NumGamma; i++)
{
if (!IndepVar[i])
{
int indep_var = 0;
for (int j=0; j<NumGamma; j++)
if (IndepVar[j])
{
d_vals[indep_var] -= ConstraintMatrix(constraint,j) * dval_Vec[i];
d_grads[indep_var] -= ConstraintMatrix(constraint,j) * dgrad_Vec[i];
d_hess[indep_var] -= ConstraintMatrix(constraint,j) * dhess_Vec[i];
indep_var++;
}
else
if (i != j)
assert (std::abs(ConstraintMatrix(constraint,j)) < 1.0e-10);
constraint++;
}
}
return true;
#ifdef DEBUG_DERIVS
evaluateDerivativesFD(r_12, r_1I, r_2I, d_valsFD, d_gradsFD, d_hessFD);
fprintf (stderr, "Param Analytic Finite diffference\n");
for (int ip=0; ip<Parameters.size(); ip++)
fprintf (stderr, " %3d %12.6e %12.6e\n", ip, d_vals[ip], d_valsFD[ip]);
fprintf (stderr, "Param Analytic Finite diffference\n");
for (int ip=0; ip<Parameters.size(); ip++)
fprintf (stderr, " %3d %12.6e %12.6e %12.6e %12.6e %12.6e %12.6e\n", ip,
d_grads[ip][0], d_gradsFD[ip][0],
d_grads[ip][1], d_gradsFD[ip][1],
d_grads[ip][2], d_gradsFD[ip][2] );
fprintf (stderr, "Param Analytic Finite diffference\n");
for (int ip=0; ip<Parameters.size(); ip++)
for (int dim=0; dim<3; dim++)
fprintf (stderr, " %3d %12.6e %12.6e %12.6e %12.6e %12.6e %12.6e\n", ip,
d_hess[ip](0,dim), d_hessFD[ip](0,dim),
d_hess[ip](1,dim), d_hessFD[ip](1,dim),
d_hess[ip](2,dim), d_hessFD[ip](2,dim) );
#endif
}
inline real_type f(real_type r)
{
return 0.0;
}
inline real_type df(real_type r)
{
return 0.0;
}
bool put(xmlNodePtr cur)
{
ReportEngine PRE("PolynomialFunctor3D","put(xmlNodePtr)");
// //CuspValue = -1.0e10;
// NumParams_eI = NumParams_ee = 0;
cutoff_radius = 0.0;
OhmmsAttributeSet rAttrib;
rAttrib.add(N_ee, "esize");
rAttrib.add(N_eI, "isize");
rAttrib.add(cutoff_radius, "rcut");
rAttrib.put(cur);
if (N_eI == 0)
PRE.error("You must specify a positive number for \"isize\"",true);
if (N_ee == 0)
PRE.error("You must specify a positive number for \"esize\"",true);
// app_log() << " esize = " << NumParams_ee << " parameters " << std::endl;
// app_log() << " isize = " << NumParams_eI << " parameters " << std::endl;
// app_log() << " rcut = " << cutoff_radius << std::endl;
resize (N_eI, N_ee);
// Now read coefficents
xmlNodePtr xmlCoefs = cur->xmlChildrenNode;
while (xmlCoefs != NULL)
{
std::string cname((const char*)xmlCoefs->name);
if (cname == "coefficients")
{
std::string type("0"), id("0"), opt("yes");
OhmmsAttributeSet cAttrib;
cAttrib.add(id, "id");
cAttrib.add(type, "type");
cAttrib.add(opt, "optimize");
cAttrib.put(xmlCoefs);
notOpt = (opt=="no");
if (type != "Array")
{
PRE.error( "Unknown correlation type " + type +
" in PolynomialFunctor3D." + "Resetting to \"Array\"");
xmlNewProp (xmlCoefs, (const xmlChar*) "type",
(const xmlChar*) "Array");
}
std::vector<real_type> params;
putContent(params, xmlCoefs);
if (params.size() == Parameters.size())
Parameters = params;
else
{
app_log() << "Expected " << Parameters.size() << " parameters,"
<< " but found " << params.size()
<< " in PolynomialFunctor3D.\n";
if (params.size()!=0)
abort(); //you think you know what they should be but don't.
}
// Setup parameter names
for (int i=0; i<Parameters.size(); i++)
{
std::stringstream sstr;
sstr << id << "_" << i;;
if(!notOpt)
myVars.insert(sstr.str(),Parameters[i],optimize::LOGLINEAR_P,true);
}
// for (int i=0; i< N_ee; i++)
// for (int j=0; j < N_eI; j++)
// for (int k=0; k<=j; k++) {
// std::stringstream sstr;
// sstr << id << "_" << i << "_" << j << "_" << k;
// myVars.insert(sstr.str(),Parameters[index],true);
// ParamArray(i,j,k) = ParamArray(i,k,j) = Parameters[index];
// index++;
// }
if(!notOpt)
{
app_log() << "Parameter Name Value\n";
myVars.print(app_log());
}
}
xmlCoefs = xmlCoefs->next;
}
reset_gamma();
//print();
return true;
}
void resetParameters(const opt_variables_type& active)
{
if (notOpt)
return;
for(int i=0; i<Parameters.size(); ++i)
{
int loc=myVars.where(i);
if(loc>=0)
Parameters[i]=myVars[i]=active[loc];
}
if (ResetCount++ == 100)
{
ResetCount = 0;
//print();
}
reset_gamma();
}
void checkInVariables(opt_variables_type& active)
{
active.insertFrom(myVars);
}
void checkOutVariables(const opt_variables_type& active)
{
myVars.getIndex(active);
}
void print()
{
const int N = 100;
std::string fname = iSpecies + ".J3.h5";
hid_t hid = H5Fcreate (fname.c_str(), H5F_ACC_TRUNC, H5P_DEFAULT, H5P_DEFAULT);
Array<real_type,3> val (N,N,N);
for (int i=0; i<N; i++)
{
double r_12 = (real_type)i/(real_type)(N-1);
for (int j=0; j<N; j++)
{
double r_1I = (real_type)j/(real_type)(N-1) * 0.5*cutoff_radius;
for (int k=0; k<N; k++)
{
double r_2I = (real_type)k/(real_type)(N-1) * 0.5*cutoff_radius;
double rmin = std::abs(r_1I - r_2I);
double rmax = std::abs(r_1I + r_2I);
double r = rmin + r_12*(rmax-rmin);
val(i,j,k) = evaluate (r, r_1I, r_2I);
}
}
}
// HDFAttribIO<Array<real_type,3> > coefs_attrib (SplineCoefs);
// HDFAttribIO<Array<real_type,3> > param_attrib (ParamArray);
Array<double,3> val_DP(N,N,N);
val_DP = val;
HDFAttribIO<Array<double,3> > val_attrib(val_DP);
val_attrib.write(hid, "val");
// coefs_attrib.write (hid, "coefs");
// param_attrib.write (hid, "params");
H5Fclose(hid);
// std::string fname = (elementType != "") ? elementType : pairType;
// fname = fname + ".dat";
// //cerr << "Writing " << fname << " file.\n";
// FILE *fout = fopen (fname.c_str(), "w");
// for (double r=0.0; r<cutoff_radius; r+=0.001)
// fprintf (fout, "%8.3f %16.10f\n", r, evaluate(r));
// fclose(fout);
}
void print(std::ostream& os)
{
/* no longer correct. Ye Luo
int n=100;
real_type d=cutoff_radius/100.,r=0;
real_type u,du,d2du;
for(int i=0; i<n; ++i)
{
u=evaluate(r,du,d2du);
os << std::setw(22) << r << std::setw(22) << u << std::setw(22) << du
<< std::setw(22) << d2du << std::endl;
r+=d;
}
*/
}
inline int getNumParameters()
{
return Parameters.size();
}
};
}
#endif
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.